commit
stringlengths 40
40
| subject
stringlengths 1
3.25k
| old_file
stringlengths 4
311
| new_file
stringlengths 4
311
| old_contents
stringlengths 0
26.3k
| lang
stringclasses 3
values | proba
float64 0
1
| diff
stringlengths 0
7.82k
|
|---|---|---|---|---|---|---|---|
022e8a8099c154789d8c2cde925f54639c2ae1b6
|
Fix query
|
ogn/collect/ognrange.py
|
ogn/collect/ognrange.py
|
from celery.utils.log import get_task_logger
from sqlalchemy import String
from sqlalchemy import and_, insert, update, exists, between
from sqlalchemy.sql import func, null
from sqlalchemy.sql.expression import literal_column
from ogn.collect.celery import app
from ogn.model import AircraftBeacon, ReceiverCoverage
from ogn.utils import date_to_timestamps
logger = get_task_logger(__name__)
@app.task
def create_receiver_coverage(session=None, date=None):
"""Create receiver coverages."""
logger.info("Compute receiver coverages.")
if session is None:
session = app.session
if not date:
logger.warn("A date is needed for calculating stats. Exiting")
return None
else:
(start, end) = date_to_timestamps(date)
# Filter aircraft beacons and shrink precision of MGRS from 1m to 1km resolution: 30UXC 00061 18429 -> 30UXC 00 18
sq = session.query((func.left(AircraftBeacon.location_mgrs, 5, type_=String) + func.substring(AircraftBeacon.location_mgrs, 6, 2, type_=String) + func.substring(AircraftBeacon.location_mgrs, 11, 2, type_=String)).label('reduced_mgrs'),
AircraftBeacon.receiver_id,
AircraftBeacon.signal_quality,
AircraftBeacon.altitude,
AircraftBeacon.device_id) \
.filter(and_(between(AircraftBeacon.timestamp, start, end),
AircraftBeacon.location_mgrs != null(),
AircraftBeacon.receiver_id != null(),
AircraftBeacon.device_id != null())) \
.subquery()
# ... and group them by reduced MGRS, receiver and date
query = session.query(sq.c.reduced_mgrs,
sq.c.receiver_id,
literal_column("'{}'".format(date.strftime('%Y-%m-%d'))).label('date'),
func.max(sq.c.signal_quality).label('max_signal_quality'),
func.min(sq.c.altitude).label('min_altitude'),
func.max(sq.c.altitude).label('max_altitude'),
func.count(sq.c.altitude).label('aircraft_beacon_count'),
func.count(func.distinct(sq.c.device_id)).label('device_count')) \
.group_by(sq.c.reduced_mgrs,
sq.c.receiver_id) \
.subquery()
# if a receiver coverage entry exist --> update it
upd = update(ReceiverCoverage) \
.where(and_(ReceiverCoverage.location_mgrs == query.c.reduced_mgrs,
ReceiverCoverage.receiver_id == query.c.receiver_id,
ReceiverCoverage.date == query.c.date)) \
.values({"max_signal_quality": query.c.max_signal_quality,
"min_altitude": query.c.min_altitude,
"max_altitude": query.c.max_altitude,
"aircraft_beacon_count": query.c.aircraft_beacon_count,
"device_count": query.c.device_count})
result = session.execute(upd)
update_counter = result.rowcount
session.commit()
logger.debug("Updated receiver coverage entries: {}".format(update_counter))
# if a receiver coverage entry doesnt exist --> insert it
new_coverage_entries = session.query(query) \
.filter(~exists().where(
and_(ReceiverCoverage.location_mgrs == query.c.reduced_mgrs,
ReceiverCoverage.receiver_id == query.c.receiver_id,
ReceiverCoverage.date == query.c.date)))
ins = insert(ReceiverCoverage).from_select((
ReceiverCoverage.location_mgrs,
ReceiverCoverage.receiver_id,
ReceiverCoverage.date,
ReceiverCoverage.max_signal_quality,
ReceiverCoverage.min_altitude,
ReceiverCoverage.max_altitude,
ReceiverCoverage.aircraft_beacon_count,
ReceiverCoverage.device_count),
new_coverage_entries)
result = session.execute(ins)
insert_counter = result.rowcount
session.commit()
logger.debug("New receiver coverage entries: {}".format(insert_counter))
return "Receiver coverage entries: {} inserted, {} updated".format(insert_counter, update_counter)
|
Python
| 0.999801
|
@@ -68,16 +68,22 @@
t String
+, Date
%0Afrom sq
@@ -1781,63 +1781,28 @@
-literal_column(%22'%7B%7D'%22.format(date.strftime('%25Y-%25m-%25d'))
+func.cast(date, Date
).la
@@ -2600,32 +2600,24 @@
age.date ==
-query.c.
date)) %5C%0A
@@ -3374,32 +3374,32 @@
.c.receiver_id,%0A
+
@@ -3424,24 +3424,16 @@
date ==
-query.c.
date)))%0A
|
a5b5acb62466c77e665f81692970ce6b2976b778
|
extend handshake test
|
devp2p/tests/test_peer.py
|
devp2p/tests/test_peer.py
|
from devp2p import peermanager
from devp2p import crypto
from devp2p.app import BaseApp
import devp2p.muxsession
import rlp
import devp2p.p2p_protocol
import time
import gevent
import copy
def get_connected_apps():
a_config = dict(p2p=dict(listen_host='127.0.0.1', listen_port=3000),
node=dict(privkey_hex=crypto.sha3('a').encode('hex')))
b_config = copy.deepcopy(a_config)
b_config['p2p']['listen_port'] = 3001
b_config['node']['privkey_hex'] = crypto.sha3('b').encode('hex')
a_app = BaseApp(a_config)
peermanager.PeerManager.register_with_app(a_app)
a_app.start()
b_app = BaseApp(b_config)
peermanager.PeerManager.register_with_app(b_app)
b_app.start()
a_peermgr = a_app.services.peermanager
b_peermgr = b_app.services.peermanager
# connect
host = b_config['p2p']['listen_host']
port = b_config['p2p']['listen_port']
pubkey = crypto.privtopub(b_config['node']['privkey_hex'].decode('hex'))
a_peermgr.connect((host, port), remote_pubkey=pubkey)
return a_app, b_app
def test_handshake():
a_app, b_app = get_connected_apps()
gevent.sleep(1)
a_app.stop()
b_app.stop()
def test_big_transfer():
class transfer(devp2p.p2p_protocol.BaseProtocol.command):
cmd_id = 4
structure = [('raw_data', rlp.sedes.binary)]
def create(self, proto, raw_data=''):
return [raw_data]
# money patches
devp2p.p2p_protocol.P2PProtocol.transfer = transfer
devp2p.muxsession.MultiplexedSession.max_window_size = 8 * 1024
a_app, b_app = get_connected_apps()
gevent.sleep(.1)
a_protocol = a_app.services.peermanager.peers[0].protocols[devp2p.p2p_protocol.P2PProtocol]
b_protocol = b_app.services.peermanager.peers[0].protocols[devp2p.p2p_protocol.P2PProtocol]
st = time.time()
def cb(proto, **data):
print 'took', time.time() - st, len(data['raw_data'])
b_protocol.receive_transfer_callbacks.append(cb)
raw_data = '0' * 1 * 1000 * 100
a_protocol.send_transfer(raw_data=raw_data)
# 0.03 secs for 0.1mb
# 0.28 secs for 1mb
# 2.7 secs for 10mb
# 3.7 MB/s == 30Mbit
gevent.sleep(1)
a_app.stop()
b_app.stop()
gevent.sleep(0.1)
def connect_go():
a_config = dict(p2p=dict(listen_host='127.0.0.1', listen_port=3000),
node=dict(privkey_hex=crypto.sha3('a').encode('hex')))
a_app = BaseApp(a_config)
peermanager.PeerManager.register_with_app(a_app)
a_app.start()
a_peermgr = a_app.services.peermanager
# connect
pubkey = "6ed2fecb28ff17dec8647f08aa4368b57790000e0e9b33a7b91f32c41b6ca9ba21600e9a8c44248ce63a71544388c6745fa291f88f8b81e109ba3da11f7b41b9".decode(
'hex')
a_peermgr.connect(('127.0.0.1', 30303), remote_pubkey=pubkey)
gevent.sleep(50)
a_app.stop()
if __name__ == '__main__':
# ethereum -loglevel 5 --bootnodes ''
import ethereum.slogging
ethereum.slogging.configure(config_string=':debug')
# connect_go()
test_big_transfer()
|
Python
| 0.000002
|
@@ -1134,32 +1134,120 @@
gevent.sleep(1)%0A
+ assert a_app.services.peermanager.peers%0A assert b_app.services.peermanager.peers%0A
a_app.stop()
|
e844847323a39f8bfd1870a21071f9f07f110274
|
manage password
|
models/user.py
|
models/user.py
|
from peewee import CharField, DateTimeField
from flask_login import UserMixin
from hashlib import sha1
from time import mktime
import datetime
from models.base import BaseModel
class User(BaseModel, UserMixin):
created = DateTimeField(default=datetime.datetime.now)
email = CharField(max_length=50)
password = CharField(max_length=50)
@staticmethod
def create(email, password, fullname=None):
user = User(email=email)
user.password = sha1(password.encode('utf-8')).hexdigest()
return user
def check(self, password):
return self.password == sha1(password.encode('utf-8')).hexdigest()
def to_json(self):
return {
'id': self.id,
'created': mktime(self.created.timetuple()) * 1000,
'email': self.email
}
|
Python
| 0.000004
|
@@ -456,16 +456,117 @@
user.
+update_password(password)%0A return user%0A%0A def update_password(self, password):%0A self.
password
@@ -610,36 +610,16 @@
digest()
-%0A return user
%0A%0A de
|
648b4e6ed9f998c735226549b9cd2c62d82336d7
|
Make display of reference stack line defaulting to False
|
django/projects/mysite/settings_base.py
|
django/projects/mysite/settings_base.py
|
# General Django settings for mysite project.
import django.conf.global_settings as DEFAULT_SETTINGS
# A list of people who get code error notifications. They will get an email
# if DEBUG=False and a view raises an exception.
ADMINS = (
# ('Your Name', 'your_email@domain.com'),
)
# A tuple in the same format as ADMINS of people who get broken-link
# notifications when SEND_BROKEN_LINKS_EMAILS=True.
MANAGERS = ADMINS
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader'
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'catmaid.middleware.AnonymousAuthenticationMiddleware',
'catmaid.middleware.AjaxExceptionMiddleware',
'django.middleware.transaction.TransactionMiddleware',
)
ROOT_URLCONF = 'mysite.urls'
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.admin',
'django.contrib.staticfiles',
'devserver',
'djcelery',
'taggit',
'adminplus',
'catmaid',
'guardian',
'south',
)
# Use the default template context processors. If custom ones should be
# added, please append it to the tuple to make sure the default processors
# are still available. See this page for further detail:
# http://blog.madpython.com/2010/04/07/django-context-processors-best-practice/
TEMPLATE_CONTEXT_PROCESSORS = DEFAULT_SETTINGS.TEMPLATE_CONTEXT_PROCESSORS
# The URL requests are redirected after login
LOGIN_REDIRECT_URL = '/'
# The URL where requests are redirected after login
LOGIN_URL = '/accounts/login'
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend', # default
'guardian.backends.ObjectPermissionBackend',
)
# User-ID of the anonymous (i.e. not-logged-in) user. This is usualld -1.
ANONYMOUS_USER_ID = -1
# Project ID of a dummy project that will keep all ontologies and
# classifications that are shared between multiple projcts (and are
# thereby project independent).
ONTOLOGY_DUMMY_PROJECT_ID = -1
SOUTH_DATABASE_ADAPTERS = {'default': 'south.db.postgresql_psycopg2'}
# The current site in the django_site database table. This is used so that
# applications can hook into specific site(s) and a single database can manage
# content of multiple sites.
SITE_ID = 1
# Default user profile settings
PROFILE_DEFAULT_INVERSE_MOUSE_WHEEL = False
PROFILE_DISPLAY_STACK_REFERENCE_LINES = True
PROFILE_INDEPENDENT_ONTOLOGY_WORKSPACE_IS_DEFAULT = False
PROFILE_SHOW_TEXT_LABEL_TOOL = False
PROFILE_SHOW_TAGGING_TOOL = False
PROFILE_SHOW_CROPPING_TOOL = False
PROFILE_SHOW_SEGMENTATION_TOOL = False
PROFILE_SHOW_TRACING_TOOL = False
PROFILE_SHOW_ONTOLOGY_TOOL = False
# Defines if a cropped image of a ROI should be created
# automatically when the ROI is created. If set to False
# such an image will be created when requested.
ROI_AUTO_CREATE_IMAGE = False
# Default importer tile width and height
IMPORTER_DEFAULT_TILE_WIDTH = 256
IMPORTER_DEFAULT_TILE_HEIGHT = 256
# A couple of functions useful for generating default directories to
# be used in the settings files:
import os, errno
def relative(*path_components):
'''Returns a path relative to the directory this file is in'''
base = os.path.abspath(os.path.dirname(__file__))
all_parts = [base] + list(path_components)
return os.path.realpath(os.path.join(*all_parts))
# From: http://stackoverflow.com/q/600268/223092
def mkdir_p(path):
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
import sys
from os.path import realpath
PROJECT_ROOT = relative('..', '..')
for subdirectory in ('projects', 'applications', 'lib'):
full_path = os.path.join(PROJECT_ROOT, subdirectory)
sys.path.insert(0, full_path)
|
Python
| 0.999959
|
@@ -2868,19 +2868,20 @@
LINES =
-Tru
+Fals
e%0APROFIL
|
d2bf3f6d830fb5d4f929af324052f1699c82fcb7
|
fix modified files not being updated
|
mopidy/local/commands.py
|
mopidy/local/commands.py
|
from __future__ import print_function, unicode_literals
import logging
import os
import time
from mopidy import commands, exceptions
from mopidy.audio import scan
from mopidy.local import translator
from mopidy.utils import path
logger = logging.getLogger(__name__)
def _get_library(args, config):
libraries = dict((l.name, l) for l in args.registry['local:library'])
library_name = config['local']['library']
if library_name not in libraries:
logger.warning('Local library %s not found', library_name)
return 1
logger.debug('Using %s as the local library', library_name)
return libraries[library_name](config)
class LocalCommand(commands.Command):
def __init__(self):
super(LocalCommand, self).__init__()
self.add_child('scan', ScanCommand())
self.add_child('clear', ClearCommand())
class ClearCommand(commands.Command):
help = 'Clear local media files from the local library.'
def run(self, args, config):
library = _get_library(args, config)
prompt = '\nAre you sure you want to clear the library? [y/N] '
if raw_input(prompt).lower() != 'y':
print('Clearing library aborted.')
return 0
if library.clear():
print('Library successfully cleared.')
return 0
print('Unable to clear library.')
return 1
class ScanCommand(commands.Command):
help = 'Scan local media files and populate the local library.'
def __init__(self):
super(ScanCommand, self).__init__()
self.add_argument('--limit',
action='store', type=int, dest='limit', default=None,
help='Maxmimum number of tracks to scan')
def run(self, args, config):
media_dir = config['local']['media_dir']
scan_timeout = config['local']['scan_timeout']
flush_threshold = config['local']['scan_flush_threshold']
excluded_file_extensions = config['local']['excluded_file_extensions']
excluded_file_extensions = tuple(
bytes(file_ext.lower()) for file_ext in excluded_file_extensions)
library = _get_library(args, config)
uris_in_library = set()
uris_to_update = set()
uris_to_remove = set()
file_mtimes = path.find_mtimes(media_dir)
logger.info('Found %d files in media_dir.', len(file_mtimes))
num_tracks = library.load()
logger.info('Checking %d tracks from library.', num_tracks)
for track in library.begin():
abspath = translator.local_track_uri_to_path(track.uri, media_dir)
mtime = file_mtimes.pop(abspath, None)
if mtime is None:
logger.debug('Missing file %s', track.uri)
uris_to_remove.add(track.uri)
elif mtime > track.last_modified:
uris_in_library.add(track.uri)
logger.info('Removing %d missing tracks.', len(uris_to_remove))
for uri in uris_to_remove:
library.remove(uri)
for abspath in file_mtimes:
relpath = os.path.relpath(abspath, media_dir)
uri = translator.path_to_local_track_uri(relpath)
if relpath.lower().endswith(excluded_file_extensions):
logger.debug('Skipped %s: File extension excluded.', uri)
continue
uris_to_update.add(uri)
logger.info(
'Found %d tracks which need to be updated.', len(uris_to_update))
logger.info('Scanning...')
uris_to_update = sorted(uris_to_update, key=lambda v: v.lower())
uris_to_update = uris_to_update[:args.limit]
scanner = scan.Scanner(scan_timeout)
progress = _Progress(flush_threshold, len(uris_to_update))
for uri in uris_to_update:
try:
relpath = translator.local_track_uri_to_path(uri, media_dir)
file_uri = path.path_to_uri(os.path.join(media_dir, relpath))
data = scanner.scan(file_uri)
track = scan.audio_data_to_track(data).copy(uri=uri)
library.add(track)
logger.debug('Added %s', track.uri)
except exceptions.ScannerError as error:
logger.warning('Failed %s: %s', uri, error)
if progress.increment():
progress.log()
if library.flush():
logger.debug('Progress flushed.')
progress.log()
library.close()
logger.info('Done scanning.')
return 0
class _Progress(object):
def __init__(self, batch_size, total):
self.count = 0
self.batch_size = batch_size
self.total = total
self.start = time.time()
def increment(self):
self.count += 1
return self.batch_size and self.count % self.batch_size == 0
def log(self):
duration = time.time() - self.start
if self.count >= self.total or not self.count:
logger.info('Scanned %d of %d files in %ds.',
self.count, self.total, duration)
else:
remainder = duration / self.count * (self.total - self.count)
logger.info('Scanned %d of %d files in %ds, ~%ds left.',
self.count, self.total, duration, remainder)
|
Python
| 0
|
@@ -2193,40 +2193,8 @@
g)%0A%0A
- uris_in_library = set()%0A
@@ -2852,18 +2852,17 @@
ris_
-in_library
+to_update
.add
|
31381728cb8d76314c82833d4400b4140fcc573f
|
Change parameter name so it does not conflict with an url parameter called "name".
|
django_jinja/builtins/global_context.py
|
django_jinja/builtins/global_context.py
|
# -*- coding: utf-8 -*-
import logging
from django.conf import settings
from django.core.urlresolvers import reverse as django_reverse, NoReverseMatch
from django.contrib.staticfiles.storage import staticfiles_storage
JINJA2_MUTE_URLRESOLVE_EXCEPTIONS = getattr(settings, "JINJA2_MUTE_URLRESOLVE_EXCEPTIONS", False)
logger = logging.getLogger(__name__)
def url(name, *args, **kwargs):
"""
Shortcut filter for reverse url on templates. Is a alternative to
django {% url %} tag, but more simple.
Usage example:
{{ url('web:timeline', userid=2) }}
This is a equivalent to django:
{% url 'web:timeline' userid=2 %}
"""
try:
return django_reverse(name, args=args, kwargs=kwargs)
except NoReverseMatch as exc:
logger.error('Error: %s', exc)
if not JINJA2_MUTE_URLRESOLVE_EXCEPTIONS:
raise
return ''
def static(path):
return staticfiles_storage.url(path)
|
Python
| 0.000001
|
@@ -359,16 +359,21 @@
def url(
+view_
name, *a
@@ -700,16 +700,21 @@
reverse(
+view_
name, ar
|
4ae3d012fefdf10448f88c9de34c182b0a6b9f9f
|
fix paths in test
|
marmot/features/tests/test_alignment_feature_extractor.py
|
marmot/features/tests/test_alignment_feature_extractor.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import os, glob
from subprocess import call
import unittest
from marmot.features.alignment_feature_extractor import AlignmentFeatureExtractor
class AlignmentFeatureExtractorTests(unittest.TestCase):
def setUp(self):
self.module_path = os.path.dirname(os.path.realpath(__file__))
self.src_name = os.path.join(self.module_path,'../../preprocessing/test_data/corpus.de.1000')
self.tg_name = os.path.join(self.module_path,'../../preprocessing/test_data/corpus.en.1000')
self.aligner_no_model = AlignmentFeatureExtractor()
self.aligner_no_model_2 = AlignmentFeatureExtractor(context_size=2)
def test_alignment_in_obj(self):
obj = {'token':u'hits', 'index':2, 'target':[u'a',u'boy',u'hits',u'a',u'dog'], 'source':[u'un', u'garcon',u'frappe', u'un', u'chien'], 'target_pos':['DT','NN','VBZ', 'DT', 'NN'], 'source_pos':['DT','NN','VBZ', 'DT', 'NN'], 'alignments':[[0],[1],[3],[2],[4]]}
(cont_word, left, right) = self.aligner_no_model.get_features(obj)
self.assertEqual(cont_word, u'un')
self.assertEqual(left, [u'frappe'])
self.assertEqual(right, [u'chien'])
(cont_word, left, right) = self.aligner_no_model_2.get_features(obj)
self.assertEqual(left, [u'garcon',u'frappe'])
self.assertEqual(right, [u'chien', '_END_'])
def test_alignment_on_the_fly(self):
obj = {'token':u'boy', 'index':1, 'source':[u'ein', u'junge', u'schlägt', u'einen', u'Hund'], 'target':[u'a', u'boy', u'hits', u'a', u'dog']}
aligner_corpus = AlignmentFeatureExtractor( src_file = self.src_name, tg_file = self.tg_name)
(cont_word, left, right) = aligner_corpus.get_features(obj)
self.assertTrue(obj.has_key('alignments'))
self.assertEqual(cont_word, u'junge')
for a_file in glob.glob('align_model.*'):
call(['rm', a_file])
for a_file in glob.glob(os.path.basename(self.src_name)+'_'+os.path.basename(self.tg_name)+'*'):
call(['rm', a_file])
def test_align_model_in_extractor(self):
obj = {'token':u'boy', 'index':1, 'source':[u'ein', u'junge', u'schlägt', u'einen', u'Hund'], 'target':[u'a', u'boy', u'hits', u'a', u'dog']}
aligner_model = AlignmentFeatureExtractor( align_model = os.path.join(self.module_path, 'test_data/alignments/align_model') )
(cont_word, left, right) = aligner_model.get_features(obj)
self.assertTrue(obj.has_key('alignments'))
self.assertEqual(cont_word, u'junge')
def test_unaligned(self):
obj = {'token':u'hits', 'index':2, 'target':[u'a',u'boy',u'hits',u'a',u'dog'], 'source':[u'un', u'garcon',u'frappe', u'un', u'chien'], 'target_pos':['DT','NN','VBZ', 'DT', 'NN'], 'source_pos':['DT','NN','VBZ', 'DT', 'NN'], 'alignments':[[0],[1],[],[2],[4]]}
(cont_word, left, right) = self.aligner_no_model.get_features(obj)
self.assertEqual(cont_word, u'Unaligned')
self.assertEqual(left, [u'Unaligned'])
self.assertEqual(right, [u'Unaligned'])
if __name__ == '__main__':
unittest.main()
|
Python
| 0.000007
|
@@ -397,32 +397,38 @@
./preprocessing/
+tests/
test_data/corpus
@@ -508,24 +508,30 @@
eprocessing/
+tests/
test_data/co
@@ -1621,17 +1621,16 @@
tractor(
-
src_file
|
6dffa2d22fa5da3b2d8fbcdff04477ff0116bfc1
|
Resolve a bug in the write function
|
utilities.py
|
utilities.py
|
# Function to return a list of pvs from a given file
import pkg_resources
pkg_resources.require('aphla')
import aphla as ap
def get_pv_names(mode):
''' Given a certain ring mode as a string, return all available pvs '''
ap.machines.load(mode)
result = set()
elements = ap.getElements('*')
for element in elements:
pvs = element.pv()
if(len(pvs) > 0):
pv_name = pvs[0].split(':')[0]
result.add(pv_name)
return result
def get_pvs_from_file(filepath):
''' Return a list of pvs from a given file '''
with open(filepath) as f:
contents = f.read().splitlines()
return contents
def write_pvs_to_file(filename, data):
''' Write given pvs to file '''
f = open(filename, 'w')
for element in data:
f.write(element, '\n')
f.close()
|
Python
| 0.00024
|
@@ -809,17 +809,18 @@
(element
-,
+ +
'%5Cn')%0A
|
1002b81ba5d612271a1e4c33e411eed01398f6fa
|
Remove not needed env string
|
dhcpcanon/clientscript.py
|
dhcpcanon/clientscript.py
|
#
""""""
from __future__ import unicode_literals
import os
import logging
import attr
import subprocess
from constants import STATES2REASONS
logger = logging.getLogger('dhcpcanon')
@attr.s
class ClientScript(object):
"""Simulates the behaviour of isc-dhcp client-script or
Network Manager nm-dhcp-helper.
"""
scriptname = attr.ib(default=None)
env = attr.ib(default=attr.Factory(dict))
def script_init(self, lease, state, prefix='', medium=''):
logger.debug('script init with state %s', state)
if type(state) == int:
reason = STATES2REASONS[state]
else:
reason = state
self.env['reason'] = reason
# FIXME: what is medium?
self.env['medium'] = medium
self.env['client'] = 'dhcpcanon'
self.env['pid'] = str(os.getpid())
self.env['interface'] = str(lease.interface)
self.env['ip_address'] = str(lease.address)
self.env['subnet_mask'] = lease.subnet_mask
self.env['network_number'] = str(lease.network)
self.env['broadcast_address'] = lease.broadcast_address
self.env['domain_name_servers'] = lease.name_server
self.env['routers'] = lease.router
self.env['dhcp_server_identifier'] = str(lease.server_address)
self.env['next_server'] = lease.next_server
self.env['domain_name'] = lease.domain
# FIXME: what is expiry?
self.env['expiry'] = str(lease.lease_time)
self.env['dhcp_lease_time'] = str(lease.lease_time)
self.env['dhcp_renewal_time'] = str(lease.renewal_time)
self.env['dhcp_rebinding_time'] = str(lease.rebinding_time)
# logger.debug('env %s', self.env)
def script_go(self, scriptname=None, env=None):
scriptname = self.scriptname or scriptname
env = self.env or env
envstr = "\n".join(["%s='%s'" % (k, v) for (k, v) in
env.items()])
logger.debug('calling script %s', scriptname)
# with env %s', scriptname,
# envstr)
# os.execve(scriptname, [scriptname], clientenv)
p = subprocess.Popen([scriptname], shell=False,
stdin=None, stdout=None, stderr=None,
close_fds=True, env=env)
# FIXME: what to do with p?
return p
|
Python
| 0.00082
|
@@ -1837,111 +1837,8 @@
env%0A
- envstr = %22%5Cn%22.join(%5B%22%25s='%25s'%22 %25 (k, v) for (k, v) in%0A env.items()%5D)%0A
|
c829f5328065e4051160a3ee7d99c061bb41f08c
|
add additional args to yum commands
|
pyinfra/modules/yum.py
|
pyinfra/modules/yum.py
|
'''
Manage yum packages and repositories. Note that yum package names are case-sensitive.
'''
from __future__ import unicode_literals
from six import StringIO
from six.moves.urllib.parse import urlparse
from pyinfra.api import operation
from . import files
from .util.packaging import ensure_packages
@operation
def key(state, host, key):
'''
Add yum gpg keys with ``rpm``.
+ key: filename or URL
Note:
always returns one command, not state checking
'''
yield 'rpm --import {0}'.format(key)
@operation
def repo(
state, host, name, baseurl,
present=True, description=None, enabled=True, gpgcheck=True, gpgkey=None,
):
'''
Add/remove/update yum repositories.
+ name: filename for the repo (in ``/etc/yum/repos.d/``)
+ baseurl: the baseurl of the repo
+ present: whether the ``.repo`` file should be present
+ description: optional verbose description
+ gpgcheck: whether set ``gpgcheck=1``
+ gpgkey: the URL to the gpg key for this repo
'''
# Description defaults to name
description = description or name
filename = '/etc/yum.repos.d/{0}.repo'.format(name)
# If we don't want the repo, just remove any existing file
if not present:
yield files.file(state, host, filename, present=False)
return
# Build the repo file from string
repo_lines = [
'[{0}]'.format(name),
'name={0}'.format(description),
'baseurl={0}'.format(baseurl),
'enabled={0}'.format(1 if enabled else 0),
'gpgcheck={0}'.format(1 if gpgcheck else 0),
]
if gpgkey:
repo_lines.append('gpgkey={0}'.format(gpgkey))
repo_lines.append('')
repo = '\n'.join(repo_lines)
repo = StringIO(repo)
# Ensure this is the file on the server
yield files.put(state, host, repo, filename)
@operation
def rpm(state, host, source, present=True):
'''
Add/remove ``.rpm`` file packages.
+ source: filename or URL of the ``.rpm`` package
+ present: whether ore not the package should exist on the system
URL sources with ``present=False``:
If the ``.rpm`` file isn't downloaded, pyinfra can't remove any existing
package as the file won't exist until mid-deploy.
'''
# If source is a url
if urlparse(source).scheme:
# Generate a temp filename (with .rpm extension to please yum)
temp_filename = '{0}.rpm'.format(state.get_temp_filename(source))
# Ensure it's downloaded
yield files.download(state, host, source, temp_filename)
# Override the source with the downloaded file
source = temp_filename
# Check for file .rpm information
info = host.fact.rpm_package(source)
exists = False
# We have info!
if info:
current_packages = host.fact.rpm_packages
if (
info['name'] in current_packages
and info['version'] in current_packages[info['name']]
):
exists = True
# Package does not exist and we want?
if present and not exists:
# If we had info, always install
if info:
yield 'rpm -U {0}'.format(source)
# This happens if we download the package mid-deploy, so we have no info
# but also don't know if it's installed. So check at runtime, otherwise
# the install will fail.
else:
yield 'rpm -qa | grep `rpm -qp {0}` || rpm -U {0}'.format(source)
# Package exists but we don't want?
if exists and not present:
yield 'yum remove -y {0}'.format(info['name'])
@operation
def update(state, host):
'''
Updates all yum packages.
'''
yield 'yum update -y'
_update = update # noqa: E305 (for use below where update is a kwarg)
@operation
def packages(
state, host, packages=None,
present=True, latest=False, update=False, clean=False,
):
'''
Install/remove/update yum packages & updates.
+ packages: list of packages to ensure
+ present: whether the packages should be installed
+ latest: whether to upgrade packages without a specified version
+ update: run yum update
+ clean: run yum clean
Versions:
Package versions can be pinned like yum: ``<pkg>-<version>``
'''
if clean:
yield 'yum clean all'
if update:
yield _update(state, host)
yield ensure_packages(
packages, host.fact.rpm_packages, present,
install_command='yum install -y',
uninstall_command='yum remove -y',
upgrade_command='yum update -y',
version_join='-',
latest=latest,
)
|
Python
| 0.000001
|
@@ -3885,16 +3885,69 @@
=False,%0A
+ installArgs='', updateArgs='', uninstallArgs='',%0A
):%0A '
@@ -4224,16 +4224,219 @@
um clean
+%0A + installArgs: additional arguments to the yum install command%0A + updateArgs: additional arguments to the yum update command%0A + uninstallArgs: additional arguments to the yum uninstall command
%0A%0A Ve
@@ -4731,24 +4731,38 @@
install -y'
+ + installArgs
,%0A un
@@ -4788,24 +4788,40 @@
m remove -y'
+ + uninstallArgs
,%0A up
@@ -4849,16 +4849,29 @@
date -y'
+ + updateArgs
,%0A
|
d09fa37069dd6f107d464870d2c59c05fd9625d6
|
add tool menu flag
|
sansview/local_config.py
|
sansview/local_config.py
|
"""
Application settings
"""
import time
import os
from sans.guiframe.gui_style import GUIFRAME
# Version of the application
__appname__ = "SansView"
__version__ = '1.9_release_candidate'
__download_page__ = 'http://danse.chem.utk.edu'
__update_URL__ = 'http://danse.chem.utk.edu/sansview_version.php'
# Debug message flag
__EVT_DEBUG__ = False
# Flag for automated testing
__TEST__ = False
# Debug message should be written to a file?
__EVT_DEBUG_2_FILE__ = False
__EVT_DEBUG_FILENAME__ = "debug.log"
# About box info
_do_aboutbox=True
_acknowledgement = \
'''This software was developed by the University of Tennessee as part of the
Distributed Data Analysis of Neutron Scattering Experiments (DANSE)
project funded by the US National Science Foundation.
'''
_homepage = "http://danse.chem.utk.edu"
_download = "http://danse.chem.utk.edu/sansview.html"
_authors = []
_paper = "http://danse.us/trac/sans/newticket"
_license = "mailto:sansdanse@gmail.com"
_nsf_logo = "images/nsf_logo.png"
_danse_logo = "images/danse_logo.png"
_inst_logo = "images/utlogo.gif"
_nsf_url = "http://www.nsf.gov"
_danse_url = "http://www.cacr.caltech.edu/projects/danse/release/index.html"
_inst_url = "http://www.utk.edu"
_corner_image = "images/angles_flat.png"
_welcome_image = "images/SVwelcome.png"
_copyright = "(c) 2009, University of Tennessee"
#edit the list of file state your plugin can read
APPLICATION_WLIST = 'SansView files (*.svs)|*.svs'
APPLICATION_STATE_EXTENSION = '.svs'
GUIFRAME_WIDTH = 1100
GUIFRAME_HEIGHT = 840
PLUGIN_STATE_EXTENSIONS = ['.fitv', '.inv', '.prv']
PLUGINS_WLIST = ['Fitting files (*.fitv)|*.fitv',
'Invariant files (*.inv)|*.inv',
'P(r) files (*.prv)|*.prv']
PLOPANEL_WIDTH = 450
PLOPANEL_HEIGTH = 400
SPLASH_SCREEN_PATH = os.path.join("images","SVwelcome_mini.png")
DEFAULT_STYLE = GUIFRAME.MULTIPLE_APPLICATIONS|GUIFRAME.MANAGER_ON
SPLASH_SCREEN_WIDTH = 512
SPLASH_SCREEN_HEIGHT = 366
SS_MAX_DISPLAY_TIME = 3000 #3 sec
SetupIconFile_win = os.path.join("images", "ball.ico")
SetupIconFile_mac = os.path.join("images", "ball.icns")
DefaultGroupName = "DANSE"
OutputBaseFilename = "setupSansView"
def printEVT(message):
if __EVT_DEBUG__:
print "%g: %s" % (time.clock(), message)
if __EVT_DEBUG_2_FILE__:
out = open(__EVT_DEBUG_FILENAME__, 'a')
out.write("%10g: %s\n" % (time.clock(), message))
out.close()
|
Python
| 0.000002
|
@@ -1958,16 +1958,39 @@
NAGER_ON
+%7CGUIFRAME.CALCULATOR_ON
%0D%0ASPLASH
|
f6a5bb4784bc069813e68278a8f78abacd49f4f6
|
raise exception when parsing XML failed and libxml2 was the backend
|
bakefile/src/xmlparser.py
|
bakefile/src/xmlparser.py
|
class Element:
def __init__(self):
self.name = None
self.value = None
self.props = {}
self.children = []
self.filename = None
self.lineno = None
def __copy__(self):
x = Element()
x.name = self.name
x.value = self.value
x.props = self.props
x.children = self.children
x.filename = self.filename
x.lineno = self.lineno
return x
def location(self):
if self.lineno != None:
return "%s:%i" % (self.filename, self.lineno)
else:
return self.filename
class ParsingError(Exception):
def __init__(self):
pass
def __parseFileLibxml2(filename):
def handleNode(filename, n):
if n.isBlankNode(): return None
if n.type != 'element': return None
e = Element()
e.name = n.name
e.filename = filename
e.lineno = n.lineNo()
prop = n.properties
while prop != None:
e.props[prop.name] = prop.content
prop = prop.next
c = n.children
while c != None:
l = handleNode(filename, c)
if l != None:
e.children.append(l)
c = c.next
if len(e.children) == 0:
e.value = n.content.strip()
return e
try:
ctxt = libxml2.createFileParserCtxt(filename);
ctxt.replaceEntities(1)
ctxt.keepBlanks = 0
ctxt.validate(0)
ctxt.lineNumbers(1)
ctxt.parseDocument()
doc = ctxt.doc()
t = handleNode(filename, doc.getRootElement())
doc.freeDoc()
return t
except libxml2.parserError:
raise ParsingError()
def __doParseMinidom(func, src):
def handleNode(filename, n):
if n.nodeType != n.ELEMENT_NODE: return None
e = Element()
e.name = n.tagName
e.filename = filename
if n.hasAttributes():
for p in n.attributes.keys():
e.props[p] = n.getAttribute(p)
for c in n.childNodes:
l = handleNode(filename, c)
if l != None:
e.children.append(l)
if n.firstChild != None and n.firstChild.nodeType == n.TEXT_NODE:
e.value = n.firstChild.data.strip()
if e.value == '': e.value = None
return e
try:
dom = func(src)
dom.normalize()
t = handleNode(src, dom.documentElement)
dom.unlink()
return t
except xml.dom.DOMException:
raise ParsingError()
except xml.sax.SAXException:
raise ParsingError()
def __parseFileMinidom(filename):
return __doParseMinidom(xml.dom.minidom.parse, filename)
def __parseStringMinidom(data):
return __doParseMinidom(xml.dom.minidom.parseString, data)
import xml.sax, xml.dom, xml.dom.minidom
parseString = __parseStringMinidom
# Use libxml2 if available, it gives us better error checking than
# xml.dom.minidom (DTD validation, line numbers etc.)
try:
import libxml2
parseFile = __parseFileLibxml2
except(ImportError):
parseFile = __parseFileMinidom
import sys
sys.stderr.write("Warning: libxml2 missing, running in non-validating mode\n")
|
Python
| 0.000004
|
@@ -674,16 +674,84 @@
pass%0A%0A
+def __libxml2err(ctx, str):%0A print str%0A raise ParsingError()%0A%0A
def __pa
@@ -3178,16 +3178,70 @@
Libxml2%0A
+ libxml2.registerErrorHandler(__libxml2err, %22--%3E%22)%0A
except(I
|
3cf13b783a1aa3a5bd956d38ad2ca193bc67f1ae
|
Fix call
|
pyproteome/__init__.py
|
pyproteome/__init__.py
|
from .utils import DEFAULT_DPI
from .analysis import (
correlation, tables, volcano,
)
from .motifs import (
logo, motif, phosphosite,
)
from . import (
analysis, bca, data_sets, discoverer, levels,
loading, modification, motifs, paths, pride, protein, sequence,
utils, version,
)
from . import cluster
try:
from IPython import get_ipython
from IPython.core.magic import register_line_magic
except ImportError:
get_ipython = None
if get_ipython() is not None:
@register_line_magic
def import_all(line=None):
"""
Inialize and import many packages using IPython Notebooks magic.
Examples
--------
>>> from pyproteome import *
>>> %import_all
"""
ip = get_ipython()
ip.run_line_magic(
"config",
"InlineBackend.figure_formats = ['retina']",
)
ip.run_line_magic("load_ext", "autoreload")
ip.run_line_magic("autoreload", "2")
ip.run_line_magic("aimport", "pyproteome")
ip.run_line_magic("aimport", "brainrnaseq")
ip.run_line_magic("pylab", "inline")
ip.ex(
"\n".join([
"from collections import OrderedDict, Counter",
"import logging",
"import os",
"import pickle",
"from IPython.display import display, SVG, Image",
"import numpy as np",
"import pandas as pd",
"import seaborn as sns",
"import sklearn",
"pylab.rcParams['figure.figsize'] = (12, 8)",
"pylab.rcParams['mathtext.default'] = 'regular'",
"pylab.rcParams['figure.max_open_warning'] = 0",
"sns.set_style('white')",
"sns.set_context('notebook')",
'pd.set_option("display.max_colwidth", 500)',
'pd.set_option("display.max_rows", 500)',
"formatter = logging.Formatter('%(asctime)s - %(name)s - "
"%(levelname)s - %(message)s')",
"root = logging.getLogger()",
"if not root.handlers: "
"handler = logging.StreamHandler(); "
"handler.setFormatter(formatter); "
"root.setLevel(logging.INFO); "
"root.addHandler(handler)",
])
)
__all__ = [
"analysis",
"bca",
"cluster",
"correlation",
"data_sets",
"discoverer",
"levels",
"loading",
"modification",
"motifs",
"phosphosite",
"paths",
"pride",
"protein",
"sequence",
"tables",
"utils",
"version",
"volcano",
"import_all",
"DEFAULT_DPI",
"logo",
"motif",
]
|
Python
| 0.000001
|
@@ -462,16 +462,44 @@
ne%0A%0A%0Aif
+get_ipython is not None and
get_ipyt
|
55f4507c2285b5927e911a455065dd9c6d60112a
|
add a Node.__repr__ method
|
pypuppetdbquery/ast.py
|
pypuppetdbquery/ast.py
|
# -*- coding: utf-8 -*-
#
# This file is part of pypuppetdbquery.
# Copyright © 2016 Chris Boot <bootc@bootc.net>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class Node(object):
pass
class Literal(Node):
def __init__(self, value):
self.value = value
class Date(Literal):
pass
class Query(Node):
def __init__(self, expression):
self.expression = expression
class Expression(Node):
pass
class UnaryExpression(Node):
def __init__(self, expression):
self.expression = expression
class BinaryExpression(Node):
def __init__(self, left, right):
self.left = left
self.right = right
class AndExpression(BinaryExpression):
pass
class OrExpression(BinaryExpression):
pass
class NotExpression(UnaryExpression):
pass
class ParenthesizedExpression(UnaryExpression):
pass
class BlockExpression(UnaryExpression):
pass
class Comparison(Expression):
def __init__(self, operator, left, right):
self.operator = operator
self.left = left
self.right = right
class Identifier(Node):
def __init__(self, name):
self.name = name
class RegexpIdentifier(Identifier):
pass
class IdentifierPath(Node):
def __init__(self, component):
self.components = [component]
class Subquery(Node):
def __init__(self, endpoint, expression):
self.endpoint = endpoint
self.expression = expression
class Resource(Expression):
def __init__(self, res_type, title, exported, parameters=None):
self.res_type = res_type
self.title = title
self.exported = exported
self.parameters = parameters
class RegexpNodeMatch(Expression):
def __init__(self, value):
self.value = value
|
Python
| 0
|
@@ -655,16 +655,32 @@
cense.%0A%0A
+import inspect%0A%0A
%0Aclass N
@@ -692,28 +692,520 @@
bject):%0A
-pass
+def __repr__(self):%0A # Represent the variables defined in the constructor in the same order%0A # that they are listed in the constructor.%0A members = %5B%5D%0A for var in inspect.getargspec(self.__init__).args:%0A if var == 'self':%0A continue%0A%0A members.append(repr(getattr(self, var)))%0A%0A # Put it together with the class name%0A return %22%7Bcls%7D(%7Bmembers%7D)%22.format(%0A cls=self.__class__.__name__, members=', '.join(members))
%0A%0A%0Aclass Lit
|
4be8aea26f5dbec2c93413f6e545a47e850a7382
|
Mark test as xfail due to new connection factory behavior
|
irc/tests/test_client.py
|
irc/tests/test_client.py
|
import datetime
import random
import pytest
import mock
import irc.client
def test_version():
assert 'VERSION' in vars(irc.client)
assert isinstance(irc.client.VERSION, tuple)
assert irc.client.VERSION, "No VERSION detected."
def test_delayed_command_order():
"""
delayed commands should be sorted by delay time
"""
null = lambda: None
delays = [random.randint(0, 99) for x in xrange(5)]
cmds = sorted([
irc.client.DelayedCommand(delay, null, tuple())
for delay in delays
])
assert [c.delay.seconds for c in cmds] == sorted(delays)
def test_periodic_command_fixed_delay():
"""
Test that we can construct a periodic command with a fixed initial
delay.
"""
fd = irc.client.PeriodicCommandFixedDelay.at_time(
at = datetime.datetime.now(),
delay = datetime.timedelta(seconds=2),
function = lambda: None,
arguments = [],
)
assert fd.due() == True
assert fd.next().due() == False
@mock.patch('irc.connection.socket')
def test_privmsg_sends_msg(socket_mod):
server = irc.client.IRC().server()
server.connect('foo', 6667, 'bestnick')
server.privmsg('#best-channel', 'You are great')
socket_mod.socket.return_value.send.assert_called_with(
b'PRIVMSG #best-channel :You are great\r\n')
@mock.patch('irc.connection.socket')
def test_privmsg_fails_on_embedded_carriage_returns(socket_mod):
server = irc.client.IRC().server()
server.connect('foo', 6667, 'bestnick')
with pytest.raises(ValueError):
server.privmsg('#best-channel', 'You are great\nSo are you')
|
Python
| 0
|
@@ -974,32 +974,106 @@
sg(socket_mod):%0A
+%09pytest.xfail(%22Fails because server finds 'write' method on mock socket%22)%0A
%09server = irc.cl
|
0314334373b380c41e72ed41bfef1f7cbc65b894
|
Add CAN_DETECT
|
bears/yml/YAMLLintBear.py
|
bears/yml/YAMLLintBear.py
|
from coalib.bearlib.abstractions.Linter import linter
from coalib.bears.requirements.PipRequirement import PipRequirement
@linter(executable='yamllint',
output_format="regex",
output_regex=r'.+:(?P<line>\d+):(?P<column>\d+): '
r'\[(?P<severity>error|warning)\] (?P<message>.+)')
class YAMLLintBear:
"""
Check yaml code for errors and possible problems.
You can read more about capabilities at
<http://yamllint.readthedocs.org/en/latest/rules.html>.
"""
LANGUAGES = {"YAML"}
REQUIREMENTS = {PipRequirement('yamllint', '1.*')}
AUTHORS = {'The coala developers'}
AUTHORS_EMAILS = {'coala-devel@googlegroups.com'}
LICENSE = 'AGPL-3.0'
@staticmethod
def create_arguments(filename, file, config_file, yamllint_config: str=''):
"""
:param yamllint_config: Path to a custom configuration file.
"""
args = ('-f', 'parsable', filename)
if yamllint_config:
args += ('--config=' + yamllint_config,)
return args
|
Python
| 0.000235
|
@@ -704,16 +704,58 @@
GPL-3.0'
+%0A CAN_DETECT = %7B'Syntax', 'Formatting'%7D
%0A%0A @s
|
0d9b29c80502f8c4f23920ec65bc89093d553e47
|
Corrige numero da versao do pacote
|
pysigep/__version__.py
|
pysigep/__version__.py
|
__title__ = 'pysigep'
__description__ = 'API python para uso dos serviços fornecidos pelo ' \
'SIGEPWeb dos Correios '
__version__ = '0.4.4'
__url__ = 'https://github.com/mstuttgart/pysigep'
__download_url__ = 'https://github.com/mstuttgart/pysigep'
__author__ = 'Michell Stuttgart'
__author_email__ = 'michellstut@gmail.com'
__maintainer__ = 'Michell Stuttgart'
__maintainer_email__ = 'michellstut@gmail.com'
__license__ = 'MIT License'
__copyright__ = 'Copyright 2016-2017 Michell Stuttgart'
__status__ = 'Development'
|
Python
| 0
|
@@ -151,11 +151,11 @@
'0.
-4.4
+1.0
'%0A__
|
1a0c9bb4cf26e75745398cdfa38252c250267d9a
|
add show plot in example code
|
librosa/feature/rhythm.py
|
librosa/feature/rhythm.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''Rhythmic feature extraction'''
import numpy as np
from .. import util
from ..core.audio import autocorrelate
from ..util.exceptions import ParameterError
from ..filters import get_window
__all__ = ['tempogram']
# -- Rhythmic features -- #
def tempogram(y=None, sr=22050, onset_envelope=None, hop_length=512,
win_length=384, center=True, window='hann', norm=np.inf):
'''Compute the tempogram: local autocorrelation of the onset strength envelope. [1]_
.. [1] Grosche, Peter, Meinard Müller, and Frank Kurth.
"Cyclic tempogram - A mid-level tempo representation for music signals."
ICASSP, 2010.
Parameters
----------
y : np.ndarray [shape=(n,)] or None
Audio time series.
sr : number > 0 [scalar]
sampling rate of `y`
onset_envelope : np.ndarray [shape=(n,) or (m, n)] or None
Optional pre-computed onset strength envelope as provided by
`onset.onset_strength`.
If multi-dimensional, tempograms are computed independently for each
band (first dimension).
hop_length : int > 0
number of audio samples between successive onset measurements
win_length : int > 0
length of the onset autocorrelation window (in frames/onset measurements)
The default settings (384) corresponds to `384 * hop_length / sr ~= 8.9s`.
center : bool
If `True`, onset autocorrelation windows are centered.
If `False`, windows are left-aligned.
window : string, function, number, tuple, or np.ndarray [shape=(win_length,)]
A window specification as in `core.stft`.
norm : {np.inf, -np.inf, 0, float > 0, None}
Normalization mode. Set to `None` to disable normalization.
Returns
-------
tempogram : np.ndarray [shape=(win_length, n) or (m, win_length, n)]
Localized autocorrelation of the onset strength envelope.
If given multi-band input (`onset_envelope.shape==(m,n)`) then
`tempogram[i]` is the tempogram of `onset_envelope[i]`.
Raises
------
ParameterError
if neither `y` nor `onset_envelope` are provided
if `win_length < 1`
See Also
--------
librosa.onset.onset_strength
librosa.util.normalize
librosa.core.stft
Examples
--------
>>> # Compute local onset autocorrelation
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> hop_length = 512
>>> oenv = librosa.onset.onset_strength(y=y, sr=sr, hop_length=hop_length)
>>> tempogram = librosa.feature.tempogram(onset_envelope=oenv, sr=sr,
... hop_length=hop_length)
>>> # Compute global onset autocorrelation
>>> ac_global = librosa.autocorrelate(oenv, max_size=tempogram.shape[0])
>>> ac_global = librosa.util.normalize(ac_global)
>>> # Estimate the global tempo for display purposes
>>> tempo = librosa.beat.tempo(onset_envelope=oenv, sr=sr,
... hop_length=hop_length)[0]
>>> import matplotlib.pyplot as plt
>>> plt.figure(figsize=(8, 8))
>>> plt.subplot(4, 1, 1)
>>> plt.plot(oenv, label='Onset strength')
>>> plt.xticks([])
>>> plt.legend(frameon=True)
>>> plt.axis('tight')
>>> plt.subplot(4, 1, 2)
>>> # We'll truncate the display to a narrower range of tempi
>>> librosa.display.specshow(tempogram, sr=sr, hop_length=hop_length,
>>> x_axis='time', y_axis='tempo')
>>> plt.axhline(tempo, color='w', linestyle='--', alpha=1,
... label='Estimated tempo={:g}'.format(tempo))
>>> plt.legend(frameon=True, framealpha=0.75)
>>> plt.subplot(4, 1, 3)
>>> x = np.linspace(0, tempogram.shape[0] * float(hop_length) / sr,
... num=tempogram.shape[0])
>>> plt.plot(x, np.mean(tempogram, axis=1), label='Mean local autocorrelation')
>>> plt.plot(x, ac_global, '--', alpha=0.75, label='Global autocorrelation')
>>> plt.xlabel('Lag (seconds)')
>>> plt.axis('tight')
>>> plt.legend(frameon=True)
>>> plt.subplot(4,1,4)
>>> # We can also plot on a BPM axis
>>> freqs = librosa.tempo_frequencies(tempogram.shape[0], hop_length=hop_length, sr=sr)
>>> plt.semilogx(freqs[1:], np.mean(tempogram[1:], axis=1),
... label='Mean local autocorrelation', basex=2)
>>> plt.semilogx(freqs[1:], ac_global[1:], '--', alpha=0.75,
... label='Global autocorrelation', basex=2)
>>> plt.axvline(tempo, color='black', linestyle='--', alpha=.8,
... label='Estimated tempo={:g}'.format(tempo))
>>> plt.legend(frameon=True)
>>> plt.xlabel('BPM')
>>> plt.axis('tight')
>>> plt.grid()
>>> plt.tight_layout()
'''
from ..onset import onset_strength
if win_length < 1:
raise ParameterError('win_length must be a positive integer')
ac_window = get_window(window, win_length, fftbins=True)
if onset_envelope is None:
if y is None:
raise ParameterError('Either y or onset_envelope must be provided')
onset_envelope = onset_strength(y=y, sr=sr, hop_length=hop_length)
else:
# Force row-contiguity to avoid framing errors below
onset_envelope = np.ascontiguousarray(onset_envelope)
if onset_envelope.ndim > 1:
# If we have multi-band input, iterate over rows
return np.asarray([tempogram(onset_envelope=oe_subband,
hop_length=hop_length,
win_length=win_length,
center=center,
window=window,
norm=norm) for oe_subband in onset_envelope])
# Center the autocorrelation windows
n = len(onset_envelope)
if center:
onset_envelope = np.pad(onset_envelope, int(win_length // 2),
mode='linear_ramp', end_values=[0, 0])
# Carve onset envelope into frames
odf_frame = util.frame(onset_envelope,
frame_length=win_length,
hop_length=1)
# Truncate to the length of the original signal
if center:
odf_frame = odf_frame[:, :n]
# Window, autocorrelate, and normalize
return util.normalize(autocorrelate(odf_frame * ac_window[:, np.newaxis],
axis=0),
norm=norm, axis=0)
|
Python
| 0
|
@@ -4795,16 +4795,35 @@
ayout()%0A
+ %3E%3E%3E plt.show()%0A
'''%0A
|
124d202ee6c18b79e5baf560b1cbfaddf47ed194
|
allow runtests.py to run only certain tests
|
mpmath/tests/runtests.py
|
mpmath/tests/runtests.py
|
#!/usr/bin/env python
"""
python runtests.py -py
Use py.test to run tests (more useful for debugging)
python runtests.py -psyco
Enable psyco to make tests run about 50% faster
python runtests.py -profile
Generate profile stats (this is much slower)
python runtests.py -nogmpy
Run tests without using GMPY even if it exists
python runtests.py -strict
Enforce extra tests in normalize()
python runtests.py -local
Insert '../..' at the beginning of sys.path to use local mpmath
"""
import sys, os
if "-psyco" in sys.argv:
sys.argv.remove('-psyco')
import psyco
psyco.full()
profile = False
if "-profile" in sys.argv:
sys.argv.remove('-profile')
profile = True
if "-nogmpy" in sys.argv:
sys.argv.remove('-nogmpy')
os.environ['MPMATH_NOGMPY'] = 'Y'
if "-strict" in sys.argv:
sys.argv.remove('-strict')
os.environ['MPMATH_STRICT'] = 'Y'
if "-local" in sys.argv:
sys.argv.remove('-local')
sys.path.insert(0, '../..')
def testit():
if "-py" in sys.argv:
sys.argv.remove('-py')
import py
py.test.cmdline.main()
else:
import glob
import os.path
from time import clock
modules = []
for f in glob.glob("test*.py"):
name = os.path.splitext(os.path.basename(f))[0]
module = __import__(name)
priority = module.__dict__.get('priority', 100)
if priority == 666:
modules = [[priority, name, module]]
break
modules.append([priority, name, module])
modules.sort()
tstart = clock()
for priority, name, module in modules:
print name
for f in sorted(module.__dict__.keys()):
if f.startswith('test_'):
print " ", f[5:].ljust(25),
t1 = clock()
module.__dict__[f]()
t2 = clock()
print "ok", " ", ("%.7f" % (t2-t1)), "s"
tend = clock()
print
print "finished tests in", ("%.2f" % (tend-tstart)), "seconds"
if profile:
import cProfile
cProfile.run("testit()", sort=2)
else:
testit()
|
Python
| 0.000004
|
@@ -504,24 +504,156 @@
l mpmath%0D%0A%0D%0A
+Additional arguments are used to filter the tests to run. Only files that have%0D%0Aone of the arguments in their name are executed.%0D%0A%0D%0A
%22%22%22%0D%0A%0D%0Aimpor
@@ -1387,16 +1387,45 @@
s = %5B%5D%0D%0A
+ args = sys.argv%5B1:%5D%0D%0A
@@ -1518,16 +1518,261 @@
f))%5B0%5D%0D%0A
+ if args:%0D%0A ok = False%0D%0A for arg in args:%0D%0A if arg in name:%0D%0A ok = True%0D%0A break%0D%0A if not ok:%0D%0A continue%0D%0A
|
e023c87a8ce566e6fd320677d0c9c7269665e6e4
|
add -r resolution argument
|
python/data_browser.py
|
python/data_browser.py
|
#!/usr/bin/python
import numpy as np
import argparse
import glob
#from obspy import Stream
'''
Author: Qingkai Kong, qingkai.kong@gmail.com
This is a script quickly view station on maps and the waveforms.
It plots map of the stations on the top, and the waveform data at bottom.
When you select the station on the map, the corresponding waveform will
show up in the bottom.
To do, add the command line arguments.
'''
class PointBrowser:
"""
Click on a point to select and highlight it -- the data that
generated the point will be shown in the lower axes. Use the 'n'
and 'p' keys to browse through the next and previous points
"""
def __init__(self):
self.lastind = 0
self.text = ax.text(0.05, 0.95, 'selected: none',
transform=ax.transAxes, va='top')
self.selected, = ax.plot([xs[0]], [ys[0]], 'o', ms=12, alpha=0.7,
color='yellow', visible=False)
def onpress(self, event):
if self.lastind is None: return
if event.key not in ('n', 'p'): return
if event.key=='n': inc = 1
else: inc = -1
self.lastind += inc
self.lastind = np.clip(self.lastind, 0, len(xs)-1)
self.update()
def onpick(self, event):
if event.artist!=line: return True
N = len(event.ind)
if not N: return True
# the click locations
x = event.mouseevent.xdata
y = event.mouseevent.ydata
distances = np.hypot(x-xs[event.ind], y-ys[event.ind])
indmin = distances.argmin()
dataind = event.ind[indmin]
self.lastind = dataind
self.update()
def update(self):
if self.lastind is None: return
dataind = self.lastind
rec = X[dataind]
time = np.arange(len(X[dataind])) / sample_rate[dataind]
ax2.cla()
ax2.plot(time, rec)
plt.xlabel('Time (sec)', fontsize = 14)
#ax2.text(0.05, 0.9, 'mu=%1.3f\nsigma=%1.3f'%(xs[dataind], ys[dataind]),transform=ax2.transAxes, va='top')
self.selected.set_visible(True)
self.selected.set_data(xs[dataind], ys[dataind])
self.text.set_text('selected: %s'%stnm[dataind])
fig.canvas.draw()
if __name__ == '__main__':
import matplotlib.pyplot as plt
from obspy.core import read
from mpl_toolkits.basemap import Basemap
parser = argparse.ArgumentParser(description='data browser')
#option for specify the files
parser.add_argument('-f', '--files', action='store', dest='files',
help='specify input files as strings')
#option for specify location of the event
#parser.add_argument ('-e', '--event',nargs=2, action='append',
# help='specify the latitude and longitude of the event')
parser.add_argument('-evla', action='store', type = float,
help='specify the latitude of the event as float number')
parser.add_argument('-evlo', action='store', type = float,
help='specify the longitude of the event as float number')
results = parser.parse_args()
filename = results.files
if filename:
st = read(filename)
else:
st = read('./*.sac')
if results.evla and results.evlo:
evla = results.evla
evlo = results.evlo
else:
try:
tr = st[0]
evla = tr.stats.sac.evla
evlo = tr.stats.sac.evlo
except:
evla = None
evlo = None
data = []
stla = []
stlo = []
kstnm = []
sample_rate = []
n = len(st)
for i in range(0, n):
stla.append(st[i].stats['sac']['stla'])
stlo.append(st[i].stats['sac']['stlo'])
kstnm.append(st[i].stats['station'])
sample_rate.append(st[i].stats['sampling_rate'])
data.append(st[i].data)
max_lat = max(stla)
max_lon = max(stlo)
min_lat = min(stla)
min_lon = min(stlo)
ys = np.array(stla)
xs = np.array(stlo)
X = np.array(data)
#t = np.range()
stnm = np.array(kstnm)
fig = plt.figure()
ax = fig.add_subplot(211)
ax.set_title('select the seismic station to plot the waveform')
#line, = ax.plot(xs, ys, 'o', picker=5) # 5 points tolerance
#ax.set_ylim(33.7527, 33.848)
#ax.set_xlim(-118.214, -118.125)
lat0 = min_lat - 0.5
lat1 = max_lat + 0.5
lon0 = min_lon - 0.5
lon1 = max_lon + 0.5
resolution = 'l'
m = Basemap(lon0, lat0, lon1, lat1, resolution=resolution, ax=ax)
m.drawcoastlines()
m.drawcountries(color=(1,1,0)) # country boundaries yellow
m.drawrivers(color=(0,1,1)) # rivers in cyan
#m.bluemarble() # NASA bluemarble image
#m.etopo()
parallels = np.linspace(lat0, lat1, 3)
meridians = np.linspace(lon0, lon1, 3)
m.drawparallels(parallels, labels=[1,0,0,0], fmt='%.2f')
m.drawmeridians(meridians, labels=[0,0,0,1], fmt='%.2f')
line, = m.plot(xs, ys, 'o', picker=5)
if evlo is not None and evla is not None:
x_0, y_0 = m(evlo, evla)
m.plot(x_0, y_0, 'r*', markersize=20, label = 'Event')
ax2 = fig.add_subplot(212)
browser = PointBrowser()
fig.canvas.mpl_connect('pick_event', browser.onpick)
fig.canvas.mpl_connect('key_press_event', browser.onpress)
plt.show()
|
Python
| 0.000001
|
@@ -3159,16 +3159,158 @@
umber')
+%0A %0A parser.add_argument('-r', '--resolution', action='store',%0A help='specify input files as strings')
@@ -3775,24 +3775,121 @@
%0A
+ resolution = results.resolution%0A if not resolution:%0A resolution = 'l'%0A %0A
data = %5B
@@ -4834,37 +4834,16 @@
n + 0.5%0A
- resolution = 'l'%0A
m =
|
c9157639b1e412d9d13fcfca8bf4e0f04858e323
|
Fix tempfile cleanup.
|
betago/corpora/archive.py
|
betago/corpora/archive.py
|
import os
import shutil
import tarfile
import tempfile
from contextlib import contextmanager
from operator import attrgetter
__all__ = [
'SGF',
'find_sgfs',
]
class SafetyError(Exception):
pass
class SGF(object):
def __init__(self, locator, contents):
self.locator = locator
self.contents = contents
def __str__(self):
return 'SGF from %s' % self.locator
class SGFLocator(object):
# TODO Support zips and physical SGFs.
def __init__(self, archive_path, archive_filename):
self.archive_path = archive_path
self.archive_filename = archive_filename
@property
def physical_file(self):
return self.archive_path
@property
def game_file(self):
return self.archive_filename
def __cmp__(self, other):
return cmp((self.physical_file, self.game_file), (other.physical_file, other.game_file))
def __str__(self):
return '%s:%s' % (self.archive_path, self.archive_filename)
def serialize(self):
return {
'archive_path': self.archive_path,
'archive_filename': self.archive_filename,
}
@classmethod
def deserialize(cls, data):
return SGFLocator(
archive_path=data['archive_path'],
archive_filename=data['archive_filename'],
)
class TarballSGFLocator(SGFLocator):
def __init__(self, tarball_path, archive_filename):
self.tarball_path = tarball_path
self.archive_filename = archive_filename
def __str__(self):
return '%s:%s' % (self.tarball_path, self.archive_filename)
def contents(self):
return tarfile.open(self.tarball_path).extractfile(self.archive_filename).read()
def find_sgfs(path):
"""Find all SGFs in a directory or archive."""
if os.path.isdir(path):
return _walk_dir(path)
if tarfile.is_tarfile(path):
return _walk_tarball(path)
def _walk_dir(path):
children = os.listdir(path)
children.sort()
for child in children:
full_path = os.path.join(path, child)
for sgf in find_sgfs(full_path):
yield sgf
@contextmanager
def tarball_iterator(tarball_path):
tempdir = tempfile.mkdtemp(prefix='tmp-betago')
tf = tarfile.open(tarball_path)
# Check for unsafe filenames. Theoretically a tarball can contain
# absolute filenames, or names like '../../whatever'
def name_is_safe(filename):
final_path = os.path.realpath(os.path.join(tempdir, filename))
dir_to_check = os.path.join(os.path.realpath(tempdir), '')
return os.path.commonprefix([final_path, dir_to_check]) == dir_to_check
if not all(name_is_safe(tf_entry.name) for tf_entry in tf):
raise SafetyError('Tarball %s contains unsafe filenames' % (tarball_path,))
sgf_names = [tf_entry.name for tf_entry in tf
if tf_entry.isfile and tf_entry.name.endswith('.sgf')]
sgf_names.sort()
tf.extractall(tempdir)
yield [SGF(SGFLocator(tarball_path, sgf_name), open(os.path.join(tempdir, sgf_name)).read())
for sgf_name in sgf_names]
shutil.rmtree(tempdir)
tf.close()
def _walk_tarball(path):
with tarball_iterator(path) as tarball:
for sgf in tarball:
yield sgf
|
Python
| 0
|
@@ -2970,24 +2970,37 @@
ll(tempdir)%0A
+ try:%0A
yield %5BS
@@ -3091,24 +3091,28 @@
%0A
+
for sgf_name
@@ -3126,16 +3126,33 @@
_names%5D%0A
+ finally:%0A
shut
@@ -3166,24 +3166,28 @@
ee(tempdir)%0A
+
tf.close
|
2373734b9eda5c887621ee64a2ca755850685699
|
test c-model
|
transiNXOR_modeling/transixor_predictor.py
|
transiNXOR_modeling/transixor_predictor.py
|
import sys
sys.path.append('../')
import numpy as np
from itertools import product
from pinn_api import predict_ids_grads, predict_ids
import matplotlib.pyplot as plt
import glob
## ------------ True data ---------------
ids_file = glob.glob('./transiXOR_data/current_D9.npy')
# ids_file = glob.glob('./transiXOR_data/*_id_*.npy')
# vds, vbg, vtg, id
ids_data = np.load(ids_file[0])
print(ids_data.shape)
## ------------ Prediction ---------------
# vds = np.linspace(-0.1, 0.3, 41)
# vbg = np.linspace(0.1, 0.1, 1)
# vtg = np.linspace(0.2, 0.2, 1)
vds = np.linspace(0.2, 0.2, 1)
vbg = np.linspace(0.1, 0.1, 1)
vtg = np.linspace(-0.1, 0.3, 41)
iter_lst = list(product(vds, vbg, vtg))
vds_pred = np.expand_dims(np.array([e[0] for e in iter_lst], dtype=np.float32), axis=1)
vbg_pred = np.array([e[1] for e in iter_lst], dtype=np.float32)
vtg_pred = np.array([e[2] for e in iter_lst], dtype=np.float32)
vg_pred = np.column_stack((vtg_pred, vbg_pred))
vg_pred = np.sum(vg_pred, axis=1, keepdims=True)
# vg_pred = np.sum(vg_pred, axis=1, keepdims=True)
## If trained with adjoint builder
# ids_pred, _, _ = predict_ids_grads(
# './transiXOR_Models/bise_h16', vg_pred, vds_pred)
## If trained with origin builder
ids_pred = predict_ids(
'./transiXOR_Models/bise_ext_sym_h264_0', vg_pred, vds_pred)
# ids_true = ids_data[:, 30, 20]
# vds_true = np.linspace(-0.1, 0.3, 41)
# plt.plot(vds, ids_pred, 'r')
# plt.plot(vds_true, ids_true)
# plt.show()
# plt.semilogy(vds, np.abs(ids_pred), 'r')
# plt.semilogy(vds_true, np.abs(ids_true))
# plt.show()
ids_true = ids_data[30, 20, :]
vtg_true = np.linspace(-0.1, 0.3, 41)
plt.plot(vtg, ids_pred, 'r')
plt.plot(vtg_true, ids_true)
plt.show()
plt.semilogy(vtg, np.abs(ids_pred), 'r')
plt.semilogy(vtg_true, np.abs(ids_true))
plt.show()
## Point test
ids_pred = predict_ids(
'./transiXOR_Models/bise_ext_sym_h264_0',
np.array([0.2+0.2]), np.array([0.2]))
print(ids_pred)
|
Python
| 0.000005
|
@@ -1547,16 +1547,18 @@
show()%0A%0A
+#
ids_true
@@ -1580,16 +1580,18 @@
20, :%5D%0A
+#
vtg_true
@@ -1620,16 +1620,18 @@
.3, 41)%0A
+#
plt.plot
@@ -1651,16 +1651,18 @@
d, 'r')%0A
+#
plt.plot
@@ -1682,16 +1682,18 @@
s_true)%0A
+#
plt.show
@@ -1695,16 +1695,18 @@
.show()%0A
+#
plt.semi
@@ -1739,16 +1739,18 @@
, 'r') %0A
+#
plt.semi
@@ -1782,16 +1782,18 @@
_true))%0A
+#
plt.show
@@ -1925,16 +1925,382 @@
print(ids_pred)%0A
+ids_pred = predict_ids(%0A%09'./transiXOR_Models/bise_ext_sym_h264_0',%0A%09np.array(%5B0.0+0.0%5D), np.array(%5B0.2%5D))%0Aprint(ids_pred)%0Aids_pred = predict_ids(%0A%09'./transiXOR_Models/bise_ext_sym_h264_0',%0A%09np.array(%5B0.0+0.1%5D), np.array(%5B0.2%5D))%0Aprint(ids_pred)%0Aids_pred = predict_ids(%0A%09'./transiXOR_Models/bise_ext_sym_h264_0',%0A%09np.array(%5B0.1+0.0%5D), np.array(%5B0.2%5D))%0Aprint(ids_pred)%0A
|
56a78baf677a345ae23035111f978dd695407d48
|
Bump version to 0.6.6 (#4621)
|
python/ray/__init__.py
|
python/ray/__init__.py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
if "pyarrow" in sys.modules:
raise ImportError("Ray must be imported before pyarrow because Ray "
"requires a specific version of pyarrow (which is "
"packaged along with Ray).")
# Add the directory containing pyarrow to the Python path so that we find the
# pyarrow version packaged with ray and not a pre-existing pyarrow.
pyarrow_path = os.path.join(
os.path.abspath(os.path.dirname(__file__)), "pyarrow_files")
sys.path.insert(0, pyarrow_path)
# See https://github.com/ray-project/ray/issues/131.
helpful_message = """
If you are using Anaconda, try fixing this problem by running:
conda install libgcc
"""
try:
import pyarrow # noqa: F401
except ImportError as e:
if ((hasattr(e, "msg") and isinstance(e.msg, str)
and ("libstdc++" in e.msg or "CXX" in e.msg))):
# This code path should be taken with Python 3.
e.msg += helpful_message
elif (hasattr(e, "message") and isinstance(e.message, str)
and ("libstdc++" in e.message or "CXX" in e.message)):
# This code path should be taken with Python 2.
condition = (hasattr(e, "args") and isinstance(e.args, tuple)
and len(e.args) == 1 and isinstance(e.args[0], str))
if condition:
e.args = (e.args[0] + helpful_message, )
else:
if not hasattr(e, "args"):
e.args = ()
elif not isinstance(e.args, tuple):
e.args = (e.args, )
e.args += (helpful_message, )
raise
modin_path = os.path.join(os.path.abspath(os.path.dirname(__file__)), "modin")
sys.path.append(modin_path)
from ray._raylet import (
ActorCheckpointID,
ActorClassID,
ActorHandleID,
ActorID,
ClientID,
Config as _Config,
DriverID,
FunctionID,
ObjectID,
TaskID,
UniqueID,
) # noqa: E402
_config = _Config()
from ray.profiling import profile # noqa: E402
from ray.worker import (
LOCAL_MODE,
PYTHON_MODE,
SCRIPT_MODE,
WORKER_MODE,
connect,
disconnect,
error_info,
get,
get_gpu_ids,
get_resource_ids,
get_webui_url,
global_state,
init,
is_initialized,
put,
register_custom_serializer,
remote,
shutdown,
wait,
) # noqa: E402
import ray.internal # noqa: E402
# We import ray.actor because some code is run in actor.py which initializes
# some functions in the worker.
import ray.actor # noqa: F401
from ray.actor import method # noqa: E402
from ray.runtime_context import _get_runtime_context # noqa: E402
# Ray version string.
__version__ = "0.7.0.dev2"
__all__ = [
"LOCAL_MODE",
"PYTHON_MODE",
"SCRIPT_MODE",
"WORKER_MODE",
"__version__",
"_config",
"_get_runtime_context",
"actor",
"connect",
"disconnect",
"error_info",
"get",
"get_gpu_ids",
"get_resource_ids",
"get_webui_url",
"global_state",
"init",
"internal",
"is_initialized",
"method",
"profile",
"put",
"register_custom_serializer",
"remote",
"shutdown",
"wait",
]
# ID types
__all__ += [
"ActorCheckpointID",
"ActorClassID",
"ActorHandleID",
"ActorID",
"ClientID",
"DriverID",
"FunctionID",
"ObjectID",
"TaskID",
"UniqueID",
]
import ctypes # noqa: E402
# Windows only
if hasattr(ctypes, "windll"):
# Makes sure that all child processes die when we die. Also makes sure that
# fatal crashes result in process termination rather than an error dialog
# (the latter is annoying since we have a lot of processes). This is done
# by associating all child processes with a "job" object that imposes this
# behavior.
(lambda kernel32: (lambda job: (lambda n: kernel32.SetInformationJobObject(job, 9, "\0" * 17 + chr(0x8 | 0x4 | 0x20) + "\0" * (n - 18), n))(0x90 if ctypes.sizeof(ctypes.c_void_p) > ctypes.sizeof(ctypes.c_int) else 0x70) and kernel32.AssignProcessToJobObject(job, ctypes.c_void_p(kernel32.GetCurrentProcess())))(ctypes.c_void_p(kernel32.CreateJobObjectW(None, None))) if kernel32 is not None else None)(ctypes.windll.kernel32) # noqa: E501
|
Python
| 0
|
@@ -2754,16 +2754,11 @@
%220.
-7.0.dev2
+6.6
%22%0A%0A_
|
99cd22c7a727b5a45ac0391f0d3aa0e79f7dd107
|
Return the actual status code that was returned
|
jacquard/commands_dev.py
|
jacquard/commands_dev.py
|
"""Useful commands for Jacquard development."""
import io
import json
import functools
import itertools
import contextlib
from werkzeug.test import Client
from jacquard.cli import main as run_command
from jacquard.service import get_wsgi_app
from jacquard.commands import BaseCommand, CommandError
from jacquard.utils_dev import shrink
from jacquard.storage.dummy import DummyStore
from jacquard.storage.utils import copy_data
class _DuplicateStorageConfig(object):
"""Dummy config emulator with a duplicate store."""
def __init__(self, config):
"""
Init from config.
Copies the storage but otherwise passes through.
"""
self.storage = DummyStore('')
print("Copying all data to a local version")
copy_data(config.storage, self.storage)
self.config = config
def __getattr__(self, item):
"""Attribute lookup pass-through."""
return getattr(self.config, item)
class Bugpoint(BaseCommand):
"""
Minimise error by reducing storage.
Drops keys from storage to minimise the size of test case needed to
reproduce an exception.
"""
help = "minimise test case from storage"
plumbing = True
def add_arguments(self, parser):
"""Add command-line arguments."""
target = parser.add_mutually_exclusive_group(required=True)
target.add_argument(
'--command',
type=str,
help="command to run",
nargs='*',
)
target.add_argument(
'--url',
type=str,
help="url to fetch",
)
def handle(self, config, options):
"""Run command."""
replacement_config = _DuplicateStorageConfig(config)
target = self._get_run_target(replacement_config, options)
target_failure_mode = functools.partial(
self._failure_mode,
target,
)
reference_failure_mode = target_failure_mode()
if reference_failure_mode is None:
raise CommandError("Target is not currently failing")
print("Failure mode: ", reference_failure_mode)
with self._backed_up_storage(replacement_config.storage):
def predicate():
"""Determine if the config maintains the original failure."""
return target_failure_mode() == reference_failure_mode
# Sequence 1: Simplify by dropping keys
print("Dropping keys")
self._progressively_simplify(
replacement_config.storage,
self._try_dropping_key,
predicate,
)
# Sequence 2: Progressively simplify all remaining keys
print("Simplifying keys")
self._progressively_simplify(
replacement_config.storage,
self._try_simplifying_key,
predicate,
)
print("Done bugpointing")
# Output storage state
run_command(["storage-dump"], replacement_config)
def _failure_mode(self, target):
"""
Get a representation of the failure mode of a run target.
The nature of this representation is unspecified other than that:
* In case of no exception, it is None;
* In case of an exception it is printable;
* In all cases it has functional equality.
In order to make life easier if bugpoint is feeling particularly slow,
we special-case KeyboardInterrupt and pass it straight through.
"""
try:
target()
except KeyboardInterrupt:
# Pass through ^C
raise
except Exception as exc:
return repr(exc)
else:
return None
def _progressively_simplify(self, storage, process, predicate):
"""
Repeatedly simplify storage, using `process`.
Process is a callable taking a storage engine and a key, and returning
whether it committed any changes or not. The `predicate` is an argument
to `process`, determining whether a simplification has been valid.
"""
pass_number = itertools.count(1)
any_changes = True
while any_changes:
print("Pass {}".format(next(pass_number)))
any_changes = False
# Get list of keys
storage.begin_read_only()
all_keys = list(storage.keys())
storage.rollback()
all_keys.sort()
for key in all_keys:
any_changes = process(storage, key, predicate) or any_changes
def _try_dropping_key(self, storage, key, predicate):
storage.begin()
old_value = storage.get(key)
storage.commit({}, (key,))
if not predicate():
# This either passes the tests or changes the failure mode,
# and so must be kept.
storage.begin()
storage.commit({key: old_value}, ())
return False
else:
print("Dropped key {}".format(key))
return True
def _try_simplifying_key(self, storage, key, predicate):
storage.begin()
old_value = storage.get(key)
storage.commit({}, (key,))
def test_validity(new_json):
dumped_json = json.dumps(new_json)
storage.begin()
storage.commit({key: dumped_json}, ())
return predicate()
parsed_old_value = json.loads(old_value)
shrunk_value = shrink(parsed_old_value, test_validity)
storage.begin()
storage.commit({key: json.dumps(shrunk_value)}, ())
if shrunk_value != parsed_old_value:
print("Shrunk key: {}".format(key))
return True
else:
return False
def _get_run_target(self, config, options):
"""
Get the 'run target' out from options.
This is a nullary callable which is expected to raise an exception -
the exception we are needing to debug.
For the command case it's a wrapped version of the command, which
silences stdout and stderr.
For the URL case it's a call with a WSGI client which catches 4xx and
5xx status codes turning them into ValueErrors.
"""
if options.command:
def target():
out_stream = io.StringIO()
with contextlib.ExitStack() as context:
context.enter_context(
contextlib.redirect_stdout(out_stream),
)
context.enter_context(
contextlib.redirect_stderr(out_stream),
)
run_command(options.command, config)
elif options.url:
app = get_wsgi_app(config)
test_client = Client(app)
def target():
result = test_client.get(options.url)
status_class = str(result.status_code)[0]
if status_class in ('4', '5'):
raise ValueError("Class 4 or 5 status")
else:
raise AssertionError("No target type")
return target
@contextlib.contextmanager
def _backed_up_storage(self, storage):
"""
Back up storage and restore it on exiting.
A convenience context manager.
"""
backup = DummyStore('')
copy_data(storage, backup)
try:
yield
finally:
copy_data(backup, storage)
|
Python
| 0.000097
|
@@ -7115,28 +7115,46 @@
or(%22
-Class 4 or 5 status%22
+Status: %7B%7D%22.format(result.status_code)
)%0A
|
168b29e28dd3b48f4b4fc3ce82daa0e13ffa7223
|
Use Cython i.o. setuptools
|
python/smurff/setup.py
|
python/smurff/setup.py
|
import subprocess
from setuptools import setup
from setuptools import Extension
from Cython.Build import build_ext
import numpy
import numpy.distutils.system_info as sysinfo
import sys
import os
lapack_opt_info = sysinfo.get_info("lapack_opt")
# {'libraries': ['mkl_rt', 'pthread'],
# 'library_dirs': ['/Users/vanderaa/miniconda3/lib'],
# 'define_macros': [('SCIPY_MKL_H', None), ('HAVE_CBLAS', None)],
# 'include_dirs': ['/Users/vanderaa/miniconda3/include']
# }
SOURCES = ["smurff.pyx"]
INCLUDE_DIRS = [ numpy.get_include() ] + lapack_opt_info['include_dirs']
LIBRARY_DIRS = lapack_opt_info['library_dirs']
LIBRARIES = ["smurff-cpp" ] + lapack_opt_info['libraries']
EXTRA_COMPILE_ARGS = ['-std=c++11']
EXTRA_LINK_ARGS = []
# add --with-smurff-cpp option
for arg in sys.argv:
if arg.startswith("--with-smurff-cpp="):
smurff_cpp_dir=arg[len("--with-smurff-cpp="):]
INCLUDE_DIRS.append(os.path.join(smurff_cpp_dir, "include"))
LIBRARY_DIRS.append(os.path.join(smurff_cpp_dir, "lib"))
sys.argv.remove(arg)
# add cleanall option
for arg in sys.argv:
if (arg == "cleanall"):
print("Deleting cython files...")
subprocess.Popen("rm -rf build *.cpp *.so", shell=True, executable="/bin/bash")
sys.argv.remove(arg)
sys.argv.append("clean")
# add --with-openmp option
for arg in sys.argv:
if (arg == "--with-openmp"):
EXTRA_COMPILE_ARGS.append("-fopenmp")
EXTRA_LINK_ARGS.append("-fopenmp")
sys.argv.remove(arg)
CLASSIFIERS = [
"Development Status :: 4 - Beta",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT",
"Programming Language :: C++",
"Programming Language :: Python",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Topic :: Machine Learning",
"Topic :: Matrix Factorization",
"Operating System :: Microsoft :: Windows",
"Operating System :: POSIX",
"Operating System :: Unix",
"Operating System :: MacOS"
]
setup(cmdclass={'build_ext': build_ext},
name = 'smurff',
version = subprocess.check_output("git describe", shell=True).rstrip().decode(),
# packages = ["smurff"],
# package_dir = {'' : 'python'},
url = "http://github.com/ExaScience/smurff",
license = "MIT",
description = 'Bayesian Factorization Methods',
long_description = 'Highly optimized and parallelized methods for Bayesian Factorization, including BPMF and smurff. The package uses optimized OpenMP/C++ code with a Cython wrapper to factorize large scale matrices. smurff method provides also the ability to incorporate high-dimensional side information to the factorization.',
author = "Tom Vander Aa",
author_email = "Tom.VanderAa@imec.be",
ext_modules = [
Extension(
"smurff",
sources = SOURCES,
include_dirs = INCLUDE_DIRS,
libraries = LIBRARIES,
library_dirs = LIBRARY_DIRS,
extra_compile_args = EXTRA_COMPILE_ARGS,
extra_link_args = EXTRA_LINK_ARGS,
language = "c++",
)
],
# cythonize(ext_modules, compiler_directives={'c_string_type': 'str', 'c_string_encoding': 'default'}),
classifiers = CLASSIFIERS,
keywords = "bayesian factorization machine-learning high-dimensional side-information",
install_requires = ['numpy', 'scipy', 'pandas']
)
|
Python
| 0
|
@@ -42,32 +42,38 @@
setup%0Afrom
-setuptoo
+Cython.Distuti
ls import Ex
@@ -96,13 +96,17 @@
hon.
-Build
+Distutils
imp
|
d692ed6c48fc36b296b9a3e952dd1f70b133210c
|
add migrate script to remove ezid from suggestions
|
portality/migrate/p1p2/suggestionrestructure.py
|
portality/migrate/p1p2/suggestionrestructure.py
|
from portality import models, settings
import requests, json
# first thing to do is delete suggestions which are marked "waiting for answer"
q = {
"query" : {
"bool" : {
"must" : [
{"term" : {"admin.application_status.exact" : "waiting for answer"}}
]
}
}
}
url = settings.ELASTIC_SEARCH_HOST + "/" + settings.ELASTIC_SEARCH_DB + "/suggestion/_query"
resp = requests.delete(url, data=json.dumps(q))
deletable = models.Suggestion.iterate(q, page_size=15000, wrap=False)
for d in deletable:
id = d.get("id")
if id is not None:
models.Suggestion.remove_by_id(id)
print "removing", id
batch_size = 1000
total=0
batch = []
suggestion_iterator = models.Suggestion.iterall(page_size=10000)
for s in suggestion_iterator:
# remove any author-pays stuff
if "author_pays" in s.data.get("bibjson"):
del s.data["bibjson"]["author_pays"]
if "author_pays_url" in s.data.get("bibjson"):
del s.data["bibjson"]["author_pays_url"]
# normalise the application statuses
if s.application_status == "answer received":
s.set_application_status("in progress")
s.prep()
batch.append(s.data)
if len(batch) >= batch_size:
total += len(batch)
print "writing", len(batch), "; total so far", total
models.Suggestion.bulk(batch)
batch = []
if len(batch) > 0:
total += len(batch)
print "writing", len(batch), "; total so far", total
models.Suggestion.bulk(batch)
|
Python
| 0
|
@@ -1186,24 +1186,263 @@
%22)%0A %0A
+ # remove any EzID from the persistent identifier schemes%0A pids = s.bibjson().persistent_identifier_scheme%0A if %22EzID%22 in pids:%0A i = pids.index(%22EzID%22)%0A del pids%5Bi%5D%0A s.bibjson().persistent_identifier_scheme = pids%0A
%0A s.p
|
4bc871aaa72fa1d793203e5627a2ac5f859ae27d
|
add dependencies; still incomplete
|
tardis/montecarlo/setup_package.py
|
tardis/montecarlo/setup_package.py
|
#setting the right include
from setuptools import Extension
import numpy as np
import os
from astropy_helpers.setup_helpers import get_distutils_option
from glob import glob
if get_distutils_option('with_openmp', ['build', 'install', 'develop']) is not None:
compile_args = ['-fopenmp', '-W', '-Wall', '-Wmissing-prototypes', '-std=c99']
link_args = ['-fopenmp']
define_macros = [('WITHOPENMP', None)]
else:
compile_args = ['-W', '-Wall', '-Wmissing-prototypes', '-std=c99']
link_args = []
define_macros = []
def get_extensions():
sources = ['tardis/montecarlo/montecarlo.pyx']
sources += [os.path.relpath(fname) for fname in glob(
os.path.join(os.path.dirname(__file__), 'src', '*.c'))]
sources += [os.path.relpath(fname) for fname in glob(
os.path.join(os.path.dirname(__file__), 'src/randomkit', '*.c'))]
return [Extension('tardis.montecarlo.montecarlo', sources,
include_dirs=['tardis/montecarlo/src',
'tardis/montecarlo/src/randomkit',
np.get_include()],
extra_compile_args=compile_args,
extra_link_args=link_args,
define_macros=define_macros)]
|
Python
| 0
|
@@ -855,16 +855,263 @@
'*.c'))%5D
+%0A deps = %5Bos.path.relpath(fname) for fname in glob(%0A os.path.join(os.path.dirname(__file__), 'src', '*.h'))%5D%0A deps += %5Bos.path.relpath(fname) for fname in glob(%0A os.path.join(os.path.dirname(__file__), 'src/randomkit', '*.h'))%5D
%0A%0A re
@@ -1354,16 +1354,52 @@
ude()%5D,%0A
+ depends=deps,%0A
|
df16f3e9c49ba2fb3cdbfdc62e120c6358eb25f9
|
Add 'dump_header' function
|
edgedb/lang/common/markup/__init__.py
|
edgedb/lang/common/markup/__init__.py
|
##
# Copyright (c) 2011 Sprymix Inc.
# All rights reserved.
#
# See LICENSE for details.
##
from . import elements, serializer, renderers
from .serializer import serialize
from .serializer import base as _base_serializer
from semantix.exceptions import ExceptionContext as _ExceptionContext
from semantix.utils import abc
@serializer.serializer(method='as_markup')
class MarkupExceptionContext(_ExceptionContext, metaclass=abc.AbstractMeta):
@abc.abstractclassmethod
def as_markup(cls, *, ctx):
pass
def _serialize(obj, trim=True):
ctx = _base_serializer.Context(trim=trim)
try:
return serialize(obj, ctx=ctx)
finally:
ctx.reset()
def dumps(obj, header=None, trim=True):
markup = _serialize(obj, trim=trim)
if header is not None:
markup = elements.doc.Section(title=header, body=[markup])
return renderers.terminal.renders(markup)
def _dump(markup, header, file):
if header is not None:
markup = elements.doc.Section(title=header, body=[markup])
renderers.terminal.render(markup, file=file)
def dump(obj, *, header=None, file=None, trim=True):
markup = _serialize(obj, trim=trim)
_dump(markup, header, file)
def dump_code(code:str, *, lexer='python', header=None, file=None):
markup = serializer.serialize_code(code, lexer=lexer)
_dump(markup, header, file)
|
Python
| 0.000005
|
@@ -1068,32 +1068,176 @@
p, file=file)%0A%0A%0A
+def dump_header(header, file=None):%0A markup = elements.doc.Section(title=header, body=%5B%5D)%0A renderers.terminal.render(markup, file=file)%0A%0A%0A
def dump(obj, *,
|
551c99864e6082aeb43e4e78e545720cecde5679
|
Use find for URL exclusions, rather than match
|
modules/url.py
|
modules/url.py
|
#!/usr/bin/env python
"""
url.py - Willie URL title module
Copyright 2010-2011, Michael Yanovich, yanovich.net, Kenneth Sham
Copyright 2012 Edward Powell
Licensed under the Eiffel Forum License 2.
http://willie.dftba.net
"""
import re
from htmlentitydefs import name2codepoint
import web
import unicodedata
import urlparse
url_finder = None
r_entity = re.compile(r'&[A-Za-z0-9#]+;')
INVALID_WEBSITE = 0x01
def configure(config):
if config.option('Exclude certain URLs from automatic title display', True):
config.add_list('url_exclude', 'Enter regular expressions for each URL you would like to exclude.',
'Regex:')
config.interactive_add('url_exclusion_char',
'Prefix to suppress URL titling', '!')
chunk = ("url_exclude = %s\nurl_exclusion_char = '%s'\n" %
(config.url_exclude, config.url_exclusion_char))
return chunk
else: return ''
def setup(willie):
global url_finder
# Set up empty url exclusion list and default exclusion character
if not hasattr(willie.config, 'url_exclude'):
willie.config.set_attr('url_exclude', [])
else:
for s in willie.config.url_exclude:
if isinstance(s, basestring):
willie.config.url_exclude.remove(s)
willie.config.url_exclude.append(re.compile(s))
#Otherwise, it's probably already compiled by another module.
if not hasattr(willie.config, 'url_exclusion_char'):
willie.config.set_attr('url_exclusion_char', '!')
url_finder = re.compile(r'(?u)(%s?(http|https|ftp)(://\S+))' %
(willie.config.url_exclusion_char))
# We want the exclusion list to be pre-compiled, since url parsing gets
# called a /lot/, and it's annoying when it's laggy.
def find_title(url):
"""
This finds the title when provided with a string of a URL.
"""
uri = url
if not uri and hasattr(self, 'last_seen_uri'):
uri = self.last_seen_uri.get(origin.sender)
if not re.search('^((https?)|(ftp))://', uri):
uri = 'http://' + uri
if "twitter.com" in uri:
uri = uri.replace('#!', '?_escaped_fragment_=')
content = web.get(uri)
regex = re.compile('<(/?)title( [^>]+)?>', re.IGNORECASE)
content = regex.sub(r'<\1title>',content)
regex = re.compile('[\'"]<title>[\'"]', re.IGNORECASE)
content = regex.sub('',content)
start = content.find('<title>')
if start == -1: return
end = content.find('</title>', start)
if end == -1: return
content = content[start+7:end]
content = content.strip('\n').rstrip().lstrip()
title = content
if len(title) > 200:
title = title[:200] + '[...]'
def e(m):
entity = m.group()
if entity.startswith('&#x'):
cp = int(entity[3:-1],16)
return unichr(cp).encode('utf-8')
elif entity.startswith('&#'):
cp = int(entity[2:-1])
return unichr(cp).encode('utf-8')
else:
char = name2codepoint[entity[1:-1]]
return unichr(char).encode('utf-8')
title = r_entity.sub(e, title)
if title:
title = uni_decode(title)
else: title = 'None'
title = title.replace('\n', '')
title = title.replace('\r', '')
def remove_spaces(x):
if " " in x:
x = x.replace(" ", " ")
return remove_spaces(x)
else:
return x
title = remove_spaces (title)
re_dcc = re.compile(r'(?i)dcc\ssend')
title = re.sub(re_dcc, '', title)
if title:
return title
def getTLD (url):
idx = 7
if url.startswith('https://'): idx = 8
elif url.startswith('ftp://'): idx = 6
u = url[idx:]
f = u.find('/')
if f == -1: u = url
else: u = url[0:idx] + u[0:f]
return u
def get_results(willie, text):
a = re.findall(url_finder, text)
display = [ ]
for match in a:
match = match[0]
if (match.startswith(willie.config.url_exclusion_char) or
any(pattern.match(match) for pattern in willie.config.url_exclude)):
continue
print 'no exclusion'
url = uni_encode(match)
url = uni_decode(url)
url = iriToUri(url)
try:
page_title = find_title(url)
except:
page_title = None # if it can't access the site fail silently
display.append([page_title, url])
return display
def show_title_auto (willie, trigger):
if trigger.startswith('.title '):
return
if len(re.findall("\([\d]+\sfiles\sin\s[\d]+\sdirs\)", trigger)) == 1: return
try:
results = get_results(willie, trigger)
except: return
if results is None: return
k = 1
for r in results:
if k > 3: break
k += 1
if r[0] is None:
continue
else: r[1] = getTLD(r[1])
willie.say('[ %s ] - %s' % (r[0], r[1]))
show_title_auto.rule = '(?u).*((http|https)(://\S+)).*'
show_title_auto.priority = 'high'
def show_title_demand (willie, trigger):
#try:
results = get_results(trigger)
#except: return
if results is None: return
for r in results:
if r[0] is None: continue
r[1] = getTLD(r[1])
willie.say('[ %s ] - %s' % (r[0], r[1]))
show_title_demand.commands = ['title']
show_title_demand.priority = 'high'
#Tools formerly in unicode.py
def uni_decode(bytes):
try:
text = bytes.decode('utf-8')
except UnicodeDecodeError:
try:
text = bytes.decode('iso-8859-1')
except UnicodeDecodeError:
text = bytes.decode('cp1252')
return text
def uni_encode(bytes):
try:
text = bytes.encode('utf-8')
except UnicodeEncodeError:
try:
text = bytes.encode('iso-8859-1')
except UnicodeEncodeError:
text = bytes.encode('cp1252')
return text
def urlEncodeNonAscii(b):
return re.sub('[\x80-\xFF]', lambda c: '%%%02x' % ord(c.group(0)), b)
def iriToUri(iri):
parts = urlparse.urlparse(iri)
return urlparse.urlunparse(
part.encode('idna') if parti == 1 else urlEncodeNonAscii(part.encode('utf-8'))
for parti, part in enumerate(parts)
)
if __name__ == '__main__':
print __doc__.strip()
|
Python
| 0
|
@@ -4050,21 +4050,20 @@
pattern.
-match
+find
(match)
|
9aeecaac014e67e5ad55b670be2dc4ab5dd95b5f
|
decrease max files size limit from 2mb to 250kb (#3602)
|
ecommerce/extensions/offer/constants.py
|
ecommerce/extensions/offer/constants.py
|
from django.utils.translation import ugettext_lazy as _
DYNAMIC_DISCOUNT_FLAG = 'offer.dynamic_discount'
# OfferAssignment status constants defined here to avoid circular dependency.
OFFER_ASSIGNMENT_EMAIL_PENDING = 'EMAIL_PENDING'
OFFER_ASSIGNED = 'ASSIGNED'
OFFER_REDEEMED = 'REDEEMED'
OFFER_ASSIGNMENT_EMAIL_BOUNCED = 'EMAIL_BOUNCED'
OFFER_ASSIGNMENT_REVOKED = 'REVOKED'
OFFER_MAX_USES_DEFAULT = 10000
# Coupon code filters
VOUCHER_NOT_ASSIGNED = 'unassigned'
VOUCHER_NOT_REDEEMED = 'unredeemed'
VOUCHER_PARTIAL_REDEEMED = 'partially-redeemed'
VOUCHER_REDEEMED = 'redeemed'
# Coupon visibility filters
VOUCHER_IS_PUBLIC = 'public'
VOUCHER_IS_PRIVATE = 'private'
OFFER_ASSIGNMENT_EMAIL_TEMPLATE_FIELD_LIMIT = 50000
OFFER_ASSIGNMENT_EMAIL_SUBJECT_LIMIT = 1000
# Code Assignment Nudge email templates.
DAY3, DAY10, DAY19 = ('Day3', 'Day10', 'Day19')
NUDGE_EMAIL_CYCLE = {'3': DAY3, '10': DAY10, '19': DAY19}
NUDGE_EMAIL_TEMPLATE_TYPES = (
(DAY3, _('Day 3')),
(DAY10, _('Day 10')),
(DAY19, _('Day 19')),
)
# Email Template Types
ASSIGN, REMIND, REVOKE = ('assign', 'remind', 'revoke')
EMAIL_TEMPLATE_TYPES = (
(ASSIGN, _('Assign')),
(REMIND, _('Remind')),
(REVOKE, _('Revoke')),
)
# Don't change it, These is being used in data migration '0047_codeassignmentnudgeemailtemplates'
TEMPLATES_NAME = ['Day 3 Nudge Email', 'Day 10 Nudge Email', 'Day 19 Nudge Email']
NUDGE_EMAIL_TEMPLATES = [
{
'email_type': DAY3,
'email_greeting': 'Remember when your organization gave you a code to learn on edX? We do, and we\'re glad to '
'have you! Come see what you can learn.',
'email_closing': 'Redeem your edX code and start learning today.',
'email_subject': 'Start learning on edX!',
'name': TEMPLATES_NAME[0],
},
{
'email_type': DAY10,
'email_greeting': 'Many learners from your organization are completing more problems every week, and are '
'learning new skills. What do you want to start learning?',
'email_closing': 'Join your peers, and start learning today.',
'email_subject': 'Join the learning on edX!',
'name': TEMPLATES_NAME[1],
},
{
'email_type': DAY19,
'email_greeting': 'Learners like you are earning certificates from some of the top universities and companies '
'in the world. Will you join them?',
'email_closing': 'Learn from the best, and redeem your code today.',
'email_subject': 'It\'s not too late to redeem your edX code!',
'name': TEMPLATES_NAME[2],
},
]
# Email Sender Category Types
AUTOMATIC_EMAIL, MANUAL_EMAIL = ('automatic', 'manual')
SENDER_CATEGORY_TYPES = (
(AUTOMATIC_EMAIL, _('Automatic')),
(MANUAL_EMAIL, _('Manual')),
)
# Max files size for coupon attachments
MAX_FILES_SIZE_FOR_COUPONS = 2097152
|
Python
| 0
|
@@ -2845,16 +2845,23 @@
achments
+: 250kb
%0AMAX_FIL
@@ -2887,11 +2887,10 @@
= 2
-097152
+56000
%0A
|
ab6e5754283999ece4e77da959c6f9c868b964a7
|
Add Security Manager information
|
variables.py
|
variables.py
|
"""
Define the variables in a module.
"""
NOT_EVALUATED_PHASE = 'Not Evaluated'
NOT_STARTED_PHASE = 'Not Started'
IN_PROGRESS_PHASE = 'In Progress'
DONE_PHASE = 'Done'
|
Python
| 0
|
@@ -1,11 +1,12 @@
%22%22%22
+%0D
%0ADefine
@@ -35,14 +35,18 @@
ule.
-%0A
+%0D%0A%0D
%0A%22%22%22
-%0A
+%0D%0A%0D
%0ANOT
@@ -79,16 +79,17 @@
aluated'
+%0D
%0ANOT_STA
@@ -114,16 +114,17 @@
Started'
+%0D
%0AIN_PROG
@@ -149,16 +149,17 @@
rogress'
+%0D
%0ADONE_PH
@@ -170,9 +170,151 @@
= 'Done'
+%0D%0A%0D%0ASECURITY_MANAGER_NAME = 'R%C3%A9mi Lavedrine'%0D%0ASECURITY_MANAGER_EMAIL = 'remi.lavedrine@orange.com'%0D%0ASECURITY_MANAGER_PHONE = '06 31 17 80 39'%0D
%0A
|
e44021fff840435fe49aaef1a1531cb2ccf44e43
|
Add back to "rebuild_data" command
|
project/api/management/commands/rebuild_data.py
|
project/api/management/commands/rebuild_data.py
|
# Django
from django.apps import apps
from django.core.management.base import BaseCommand
from django.utils import timezone
import datetime
class Command(BaseCommand):
help = "Command to rebuild denorms."
def add_arguments(self, parser):
parser.add_argument(
'--days',
type=int,
dest='days',
nargs='?',
const=1,
help='Number of days to update.',
)
parser.add_argument(
'--hours',
type=int,
dest='hours',
nargs='?',
const=1,
help='Number of hours to update.',
)
parser.add_argument(
'--minutes',
type=int,
dest='minutes',
nargs='?',
const=1,
help='Number of hours to update.',
)
def handle(self, *args, **options):
# Set Cursor
if options['days']:
cursor = timezone.now() - datetime.timedelta(days=options['days'], hours=1)
elif options['hours']:
cursor = timezone.now() - datetime.timedelta(hours=options['hours'], minutes=5)
elif options['minutes']:
cursor = timezone.now() - datetime.timedelta(minutes=options['minutes'], seconds=5)
else:
cursor = None
Group = apps.get_model('api.group')
# Group.objects.denormalize(cursor=cursor)
# Group.objects.sort_tree()
# Group.objects.update_seniors()
Award = apps.get_model('api.award')
Award.objects.sort_tree()
return
|
Python
| 0.000005
|
@@ -1361,34 +1361,32 @@
.group')%0A
- #
Group.objects.d
@@ -1410,34 +1410,32 @@
=cursor)%0A
- #
Group.objects.s
@@ -1452,18 +1452,16 @@
%0A
- #
Group.o
|
f728e80b97343aba18e39972b3208ccaafa43dd0
|
add test for create_schedule with "cron" = ""
|
tdclient/test/schedule_api_test.py
|
tdclient/test/schedule_api_test.py
|
#!/usr/bin/env python
from __future__ import print_function
from __future__ import unicode_literals
try:
from unittest import mock
except ImportError:
import mock
import pytest
from tdclient import api
from tdclient.test.test_helper import *
def setup_function(function):
unset_environ()
def test_create_schedule_success():
td = api.API("APIKEY")
# TODO: should be replaced by wire dump
body = b"""
{
"start": "2015-01-24 04:34:51 UTC"
}
"""
td.post = mock.MagicMock(return_value=make_response(200, body))
start = td.create_schedule("bar", {"type": "presto"})
td.post.assert_called_with("/v3/schedule/create/bar", {"type": "presto"})
assert start.year == 2015
assert start.month == 1
assert start.day == 24
assert start.hour == 4
assert start.minute == 34
assert start.second == 51
assert start.utcoffset().seconds == 0 # UTC
def test_delete_schedule_success():
td = api.API("APIKEY")
# TODO: should be replaced by wire dump
body = b"""
{
"cron": "foo",
"query": "SELECT 1 FROM nasdaq"
}
"""
td.post = mock.MagicMock(return_value=make_response(200, body))
cron, query = td.delete_schedule("bar")
td.post.assert_called_with("/v3/schedule/delete/bar")
assert cron == "foo"
assert query == "SELECT 1 FROM nasdaq"
def test_list_schedules_success():
td = api.API("APIKEY")
# TODO: should be replaced by wire dump
body = b"""
{
"schedules":[
{"name":"foo","cron":"* * * * *","query":"SELECT COUNT(1) FROM nasdaq;","database":"sample_datasets","result":"","timezone":"UTC","delay":"","next_time":"","priority":"","retry_limit":""},
{"name":"bar","cron":"* * * * *","query":"SELECT COUNT(1) FROM nasdaq;","database":"sample_datasets","result":"","timezone":"UTC","delay":"","next_time":"","priority":"","retry_limit":""},
{"name":"baz","cron":"* * * * *","query":"SELECT COUNT(1) FROM nasdaq;","database":"sample_datasets","result":"","timezone":"UTC","delay":"","next_time":"","priority":"","retry_limit":""}
]
}
"""
td.get = mock.MagicMock(return_value=make_response(200, body))
schedules = td.list_schedules()
td.get.assert_called_with("/v3/schedule/list")
assert len(schedules) == 3
def test_list_schedules_failure():
td = api.API("APIKEY")
td.get = mock.MagicMock(return_value=make_response(500, b"error"))
with pytest.raises(api.APIError) as error:
td.list_schedules()
assert error.value.args == ("500: List schedules failed: error",)
def test_update_schedule_success():
td = api.API("APIKEY")
td.post = mock.MagicMock(return_value=make_response(200, b""))
td.update_schedule("foo")
td.post.assert_called_with("/v3/schedule/update/foo", {})
def test_history_success():
td = api.API("APIKEY")
# TODO: should be replaced by wire dump
body = b"""
{
"history": [
{"job_id":"12345"},
{"job_id":"67890"}
]
}
"""
td.get = mock.MagicMock(return_value=make_response(200, body))
history = td.history("foo", 0, 3)
td.get.assert_called_with("/v3/schedule/history/foo", {"from": "0", "to": "3"})
def test_run_schedule_success():
td = api.API("APIKEY")
# TODO: should be replaced by wire dump
body = b"""
{
"jobs": [
{"job_id":"12345","type":"hive"},
{"job_id":"67890","type":"hive"}
]
}
"""
td.post = mock.MagicMock(return_value=make_response(200, body))
jobs = td.run_schedule("name", "time", 1)
td.post.assert_called_with("/v3/schedule/run/name/time", {"num": 1})
|
Python
| 0
|
@@ -921,16 +921,458 @@
# UTC%0A%0A
+def test_create_schedule_without_cron_success():%0A td = api.API(%22APIKEY%22)%0A # TODO: should be replaced by wire dump%0A body = b%22%22%22%0A %7B%0A %22start%22: null%0A %7D%0A %22%22%22%0A td.post = mock.MagicMock(return_value=make_response(200, body))%0A start = td.create_schedule(%22bar%22, %7B%22type%22: %22presto%22, %22cron%22: %22%22%7D)%0A td.post.assert_called_with(%22/v3/schedule/create/bar%22, %7B%22type%22: %22presto%22, %22cron%22: %22%22%7D)%0A assert start == %22%22%0A%0A
def test
|
b24ed88670460f9037b6fbfa17a37d7912d45af9
|
Fix test that fails when you have a SMTP server on localhost
|
openspending/ui/test/functional/test_account.py
|
openspending/ui/test/functional/test_account.py
|
from .. import ControllerTestCase, url, helpers as h
from openspending.model import Account, meta as db
from openspending.lib.mailer import MailerException
import json
class TestAccountController(ControllerTestCase):
def test_login(self):
response = self.app.get(url(controller='account', action='login'))
def test_register(self):
response = self.app.get(url(controller='account', action='register'))
@h.patch('openspending.auth.account.update')
@h.patch('openspending.ui.lib.base.model.Account.by_name')
def test_settings(self, model_mock, update_mock):
account = Account()
account.name = 'mockaccount'
db.session.add(account)
db.session.commit()
model_mock.return_value = account
update_mock.return_value = True
response = self.app.get(url(controller='account', action='settings'),
extra_environ={'REMOTE_USER': 'mockaccount'})
def test_after_login(self):
response = self.app.get(url(controller='account', action='after_login'))
def test_after_logout(self):
response = self.app.get(url(controller='account', action='after_logout'))
def test_trigger_reset_get(self):
response = self.app.get(url(controller='account', action='trigger_reset'))
assert 'email address you used to register your account' in response.body, response.body
def test_trigger_reset_post_fail(self):
response = self.app.post(url(controller='account', action='trigger_reset'),
params={'emailx': "foo@bar"})
assert 'Please enter an email address' in response.body, response.body
response = self.app.post(url(controller='account', action='trigger_reset'),
params={'email': "foo@bar"})
assert 'No user is registered' in response.body, response.body
@h.raises(MailerException)
def test_trigger_reset_post_ok(self):
account = h.make_account()
response = self.app.post(url(controller='account', action='trigger_reset'),
params={'email': "test@example.com"})
def test_reset_get(self):
response = self.app.get(url(controller='account', action='do_reset',
token='huhu',
email='huhu@example.com'))
assert '/login' in response.headers['location'], response.headers
account = h.make_account()
response = self.app.get(url(controller='account', action='do_reset',
token=account.token,
email=account.email))
assert '/settings' in response.headers['location'], response.headers
def test_distinct_json(self):
h.make_account()
response = self.app.get(url(controller='account', action='complete'),
params={})
obj = json.loads(response.body)['results']
assert len(obj) == 1, obj
assert obj[0]['name'] == 'test', obj[0]
response = self.app.get(url(controller='account', action='complete'),
params={'q': 'tes'})
obj = json.loads(response.body)['results']
assert len(obj) == 1, obj
response = self.app.get(url(controller='account', action='complete'),
params={'q': 'foo'})
obj = json.loads(response.body)['results']
assert len(obj) == 0, obj
def test_dashboard_not_logged_in(self):
response = self.app.get(url(controller='account', action='dashboard'),
status=403)
assert '403' in response.status, response.status
def test_dashboard(self):
test = h.make_account('test')
cra = h.load_fixture('cra', manager=test)
response = self.app.get(url(controller='account', action='dashboard'),
extra_environ={'REMOTE_USER': str(test.name)})
assert '200' in response.status, response.status
assert cra.label in response, response
|
Python
| 0.000001
|
@@ -149,16 +149,42 @@
ception%0A
+from pylons import config%0A
import j
@@ -1947,32 +1947,173 @@
_post_ok(self):%0A
+ try:%0A original_smtp_server = config.get('smtp_server')%0A config%5B'smtp_server'%5D = 'non-existent-smtp-server'%0A
account
@@ -2123,32 +2123,36 @@
.make_account()%0A
+
response
@@ -2227,32 +2227,36 @@
+
params=%7B'email':
@@ -2276,16 +2276,90 @@
e.com%22%7D)
+%0A finally:%0A config%5B'smtp_server'%5D = original_smtp_server
%0A%0A de
|
d37a36cea14bbb9c050196ccf42251db6250f3c0
|
Add ManyToMAnyFields
|
project_template/project_name/dolphin/models.py
|
project_template/project_name/dolphin/models.py
|
from __future__ import unicode_literals
from django.db import models
class Actor(models.Model):
actor_id = models.IntegerField(primary_key=True)
first_name = models.CharField(max_length=45)
last_name = models.CharField(max_length=45)
last_update = models.DateTimeField()
def __unicode__(self):
return u'%s %s' % (self.first_name, self.last_name)
class Meta:
db_table = 'actor'
class Address(models.Model):
address_id = models.IntegerField(primary_key=True)
address = models.CharField(max_length=50)
address2 = models.CharField(max_length=50, blank=True)
district = models.CharField(max_length=20)
city = models.ForeignKey('City')
postal_code = models.CharField(max_length=10, blank=True)
phone = models.CharField(max_length=20)
last_update = models.DateTimeField()
def __unicode__(self):
return u'%s %s, tel: %s' % (self.address, self.address2, self.phone)
class Meta:
db_table = 'address'
class Category(models.Model):
category_id = models.IntegerField(primary_key=True)
name = models.CharField(max_length=25)
last_update = models.DateTimeField()
def __unicode__(self):
return u'%s' % self.name
class Meta:
db_table = 'category'
class City(models.Model):
city_id = models.IntegerField(primary_key=True)
city = models.CharField(max_length=50)
country = models.ForeignKey('Country')
last_update = models.DateTimeField()
def __unicode__(self):
return u'%s, %s' % (self.city, self.country)
class Meta:
db_table = 'city'
class Country(models.Model):
country_id = models.IntegerField(primary_key=True)
country = models.CharField(max_length=50)
last_update = models.DateTimeField()
def __unicode__(self):
return u'%s' % self.country
class Meta:
db_table = 'country'
class Customer(models.Model):
customer_id = models.IntegerField(primary_key=True)
store = models.ForeignKey('Store')
first_name = models.CharField(max_length=45)
last_name = models.CharField(max_length=45)
email = models.CharField(max_length=50, blank=True)
address = models.ForeignKey(Address)
activebool = models.BooleanField()
create_date = models.DateField()
last_update = models.DateTimeField(blank=True, null=True)
active = models.IntegerField(blank=True, null=True)
def __unicode__(self):
return u'%s %s' % (self.first_name, self.last_name)
class Meta:
db_table = 'customer'
class Film(models.Model):
film_id = models.IntegerField(primary_key=True)
title = models.CharField(max_length=255)
description = models.TextField(blank=True)
release_year = models.IntegerField(blank=True, null=True)
language = models.ForeignKey('Language')
original_language = models.ForeignKey('Language', blank=True, null=True, related_name='filmAsOriginalLanguage')
rental_duration = models.SmallIntegerField()
rental_rate = models.DecimalField(max_digits=4, decimal_places=2)
length = models.SmallIntegerField(blank=True, null=True)
replacement_cost = models.DecimalField(max_digits=5, decimal_places=2)
rating = models.TextField(blank=True) # This field type is a guess.
last_update = models.DateTimeField()
special_features = models.TextField(blank=True) # This field type is a guess.
fulltext = models.TextField() # This field type is a guess.
def __unicode__(self):
return u'%s' % self.title
class Meta:
db_table = 'film'
class FilmActor(models.Model):
actor = models.ForeignKey(Actor)
film = models.ForeignKey(Film)
last_update = models.DateTimeField()
class Meta:
db_table = 'film_actor'
class FilmCategory(models.Model):
film = models.ForeignKey(Film)
category = models.ForeignKey(Category)
last_update = models.DateTimeField()
class Meta:
db_table = 'film_category'
class Inventory(models.Model):
inventory_id = models.IntegerField(primary_key=True)
film = models.ForeignKey(Film)
store = models.ForeignKey('Store')
last_update = models.DateTimeField()
def __unicode__(self):
return u'No.%d' % self.inventory_id
class Meta:
db_table = 'inventory'
class Language(models.Model):
language_id = models.IntegerField(primary_key=True)
name = models.CharField(max_length=20)
last_update = models.DateTimeField()
def __unicode__(self):
return u'%s' % self.name
class Meta:
db_table = 'language'
class Payment(models.Model):
payment_id = models.IntegerField(primary_key=True)
customer = models.ForeignKey(Customer)
staff = models.ForeignKey('Staff')
rental = models.ForeignKey('Rental')
amount = models.DecimalField(max_digits=5, decimal_places=2)
payment_date = models.DateTimeField()
def __unicode__(self):
return u'%s' % self.payment_id
class Meta:
db_table = 'payment'
class Rental(models.Model):
rental_id = models.IntegerField(primary_key=True)
rental_date = models.DateTimeField()
inventory = models.ForeignKey(Inventory)
customer = models.ForeignKey(Customer)
return_date = models.DateTimeField(blank=True, null=True)
staff = models.ForeignKey('Staff')
last_update = models.DateTimeField()
def __unicode__(self):
return u'No.%d' % self.rental_id
class Meta:
db_table = 'rental'
class Staff(models.Model):
staff_id = models.IntegerField(primary_key=True)
first_name = models.CharField(max_length=45)
last_name = models.CharField(max_length=45)
address = models.ForeignKey(Address)
email = models.CharField(max_length=50, blank=True)
store = models.ForeignKey('Store')
active = models.BooleanField()
username = models.CharField(max_length=16)
password = models.CharField(max_length=40, blank=True)
last_update = models.DateTimeField()
picture = models.BinaryField(blank=True, null=True)
def __unicode__(self):
return u'%s %s (%s)' % (self.first_name, self.last_name, self.username)
class Meta:
db_table = 'staff'
class Store(models.Model):
store_id = models.IntegerField(primary_key=True)
manager_staff = models.ForeignKey(Staff, unique=True, related_name='store_managed_by_me')
address = models.ForeignKey(Address)
last_update = models.DateTimeField()
def __unicode__(self):
return u'No.%d' % self.store_id
class Meta:
db_table = 'store'
|
Python
| 0.000001
|
@@ -273,32 +273,96 @@
.DateTimeField()
+%0A films = models.ManyToManyField('Film', through='FilmActor')
%0A%0A def __unic
@@ -1213,32 +1213,99 @@
.DateTimeField()
+%0A films = models.ManyToManyField('Film', through='FilmCategory')
%0A%0A def __unic
@@ -3570,16 +3570,154 @@
a guess.
+%0A categories = models.ManyToManyField(Category, through='FilmCategory')%0A actors = models.ManyToManyField(Actor, through='FilmActor')
%0A%0A de
|
53cc5af4a43f0dd8a5a2943a1f62250914f4635d
|
exclude unnecessary paths from GA
|
ndohyep/settings/base.py
|
ndohyep/settings/base.py
|
"""
Django settings for base ndohyep.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
from os.path import abspath, dirname, join
from django.conf import global_settings
from django.utils.translation import ugettext_lazy as _
import os
import djcelery
djcelery.setup_loader()
# Absolute filesystem path to the Django project directory:
PROJECT_ROOT = dirname(dirname(dirname(abspath(__file__))))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'v+*c@9@x%h%ou32gk58nv5=03dti0=z^g%296vcx*1alxg#m2)'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = ['*']
# Base URL to use when referring to full URLs
# within the Wagtail admin backend -
# e.g. in notification emails.
# Don't include '/admin' or a trailing slash
BASE_URL = 'http://example.com'
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.humanize',
'django.contrib.sites',
'compressor',
'taggit',
'modelcluster',
'ndohyep',
'wagtail.wagtailcore',
'wagtail.wagtailadmin',
'wagtail.wagtaildocs',
'wagtail.wagtailsnippets',
'wagtail.wagtailusers',
'wagtail.wagtailsites',
'wagtail.wagtailimages',
'wagtail.wagtailembeds',
'wagtail.wagtailsearch',
'wagtail.wagtailredirects',
'wagtail.wagtailforms',
'raven.contrib.django.raven_compat',
# for molo.commenting
'mptt',
'django_comments',
'molo.core',
'molo.profiles',
'molo.commenting',
'polls',
'surveys',
'app',
'djcelery',
'django_extensions',
'google_analytics',
)
COMMENTS_APP = 'molo.commenting'
COMMENTS_FLAG_THRESHHOLD = 3
COMMENTS_HIDE_REMOVED = False
SITE_ID = 1
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'wagtail.wagtailcore.middleware.SiteMiddleware',
'wagtail.wagtailredirects.middleware.RedirectMiddleware',
'google_analytics.middleware.GoogleAnalyticsMiddleware',
)
ROOT_URLCONF = 'ndohyep.urls'
WSGI_APPLICATION = 'ndohyep.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
# SQLite (simplest install)
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': join(PROJECT_ROOT, 'db.sqlite3'),
}
}
# PostgreSQL (Recommended, but requires the psycopg2 library
# and Postgresql development headers)
# DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.postgresql_psycopg2',
# 'NAME': 'base',
# 'USER': 'postgres',
# 'PASSWORD': '',
# 'HOST': '', # Set to empty string for localhost.
# 'PORT': '', # Set to empty string for default.
# 'CONN_MAX_AGE': 600,
# number of seconds database connections should persist for
# }
# }
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-gb'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Native South African languages are currently not included in the default
# list of languges in django
# https://github.com/django/django/blob/master/django/conf/global_settings.py#L50
LANGUAGES = global_settings.LANGUAGES + (
('zu', _('Zulu')),
('xh', _('Xhosa')),
('st', _('Sotho')),
('ve', _('Venda')),
('tn', _('Tswana')),
('ts', _('Tsonga')),
('ss', _('Swati')),
('nr', _('Ndebele')),
)
LOCALE_PATHS = (
join(PROJECT_ROOT, "locale"),
)
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_ROOT = join(PROJECT_ROOT, 'static')
STATIC_URL = '/static/'
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'compressor.finders.CompressorFinder',
)
MEDIA_ROOT = join(PROJECT_ROOT, 'media')
MEDIA_URL = '/media/'
# Django compressor settings
# http://django-compressor.readthedocs.org/en/latest/settings/
COMPRESS_PRECOMPILERS = (
('text/x-scss', 'django_libsass.SassCompiler'),
)
# Template configuration
TEMPLATE_CONTEXT_PROCESSORS = global_settings.TEMPLATE_CONTEXT_PROCESSORS + (
'django.core.context_processors.request',
'molo.core.context_processors.locale',
'molo.profiles.context_processors.get_profile_data',
'ndohyep.context_processors.default_forms',
)
# Wagtail settings
LOGIN_URL = 'molo.profiles:auth_login'
LOGIN_REDIRECT_URL = 'wagtailadmin_home'
WAGTAIL_SITE_NAME = "NDOH Youth Platform"
# Use Elasticsearch as the search backend for extra performance
# and better search results:
# http://wagtail.readthedocs.org/en/latest/howto/performance.html#search
# http://wagtail.readthedocs.org/en/latest \
# /core_components/search/backends.html#elasticsearch-backend
#
WAGTAILSEARCH_RESULTS_TEMPLATE = 'search/search_results.html'
WAGTAILSEARCH_BACKENDS = {
'default': {
'BACKEND': (
'wagtail.wagtailsearch.backends.elasticsearch.ElasticSearch'),
'INDEX': 'ndohyep',
'AUTO_UPDATE': True,
},
}
# Whether to use face/feature detection to improve
# image cropping - requires OpenCV
WAGTAILIMAGES_FEATURE_DETECTION_ENABLED = False
TEMPLATE_DIRS = (
os.path.join(PROJECT_ROOT, 'ndohyep'),
)
# Google Analytics
GOOGLE_ANALYTICS = {
'google_analytics_id': '',
}
GOOGLE_ANALYTICS_IGNORE_PATH = ['/health/', ]
# Celery
CELERY_IMPORTS = ('google_analytics.tasks',)
BROKER_URL = 'redis://localhost:6379/0'
CELERY_RESULT_BACKEND = 'redis://localhost:6379/0'
REGISTRATION_OPEN = True
|
Python
| 0.000002
|
@@ -6159,16 +6159,21 @@
PATH = %5B
+%0A
'/health
@@ -6176,16 +6176,74 @@
alth/',
+'/favicon.ico', '/robots.txt', '/admin/', '/django-admin/'
%5D%0A%0A%0A# Ce
|
48b33bedda0da0ad324f8f7a3ac2fbafa8e6f665
|
change issue commit to markdown
|
moment/main.py
|
moment/main.py
|
from sanic import Sanic
from sanic.response import json as response_json
import aiohttp
import json
from moment.gitlab_message_dict import get_dingtalk_data
app = Sanic(__name__)
async def post(url, json_data):
headers = {
"Content-Type": "application/json"
}
conn = aiohttp.TCPConnector(verify_ssl=False)
async with aiohttp.ClientSession(connector=conn, headers=headers) as session:
async with session.post(url, data=json.dumps(json_data)) as resp:
return await resp.json()
@app.post("/gitlab")
async def test(request):
access_token = request.args.get('access_token')
request_data = request.json
print(f'request: {request.body}')
url = f'https://oapi.dingtalk.com/robot/send?access_token={access_token}'
data = get_dingtalk_data(request_data)
response = await post(url, data)
print(f'{url}: {response}')
return response_json(request.json)
|
Python
| 0
|
@@ -681,16 +681,25 @@
est.body
+.decode()
%7D')%0A
|
8ec6b8b6c2f099261f85a3f68b5d6e87cbdb1c25
|
set context to none for ws://
|
src/mattermostdriver/websocket.py
|
src/mattermostdriver/websocket.py
|
import json
import ssl
import asyncio
import logging
import websockets
logging.basicConfig(level=logging.INFO)
log = logging.getLogger('mattermostdriver.websocket')
class Websocket:
def __init__(self, options, token):
self.options = options
self._token = token
@asyncio.coroutine
def connect(self, event_handler):
"""
Connect to the websocket and authenticate it.
When the authentication has finished, start the loop listening for messages,
sending a ping to the server to keep the connection alive.
:param event_handler: Every websocket event will be passed there
:type event_handler: Function
:return:
"""
context = ssl.create_default_context(purpose=ssl.Purpose.CLIENT_AUTH)
if not self.options['verify']:
context.verify_mode = ssl.CERT_NONE
scheme = 'wss://'
if self.options['scheme'] != 'https':
scheme = 'ws://'
url = scheme + self.options['url'] + ':' + str(self.options['port']) + self.options['basepath'] + '/websocket'
websocket = yield from websockets.connect(
url,
ssl=context,
)
yield from self._authenticate_websocket(websocket, event_handler)
yield from self._start_loop(websocket, event_handler)
@asyncio.coroutine
def _start_loop(self, websocket, event_handler):
"""
We will listen for websockets events, sending a heartbeat/pong everytime
we react a TimeoutError. If we don't the webserver would close the idle connection,
forcing us to reconnect.
"""
log.debug('Starting websocket loop')
while True:
try:
yield from asyncio.wait_for(
self._wait_for_message(websocket, event_handler),
timeout=self.options['timeout']
)
except asyncio.TimeoutError:
yield from websocket.pong()
log.debug("Sending heartbeat...")
continue
@asyncio.coroutine
def _authenticate_websocket(self, websocket, event_handler):
"""
Sends a authentication challenge over a websocket.
This is not needed when we just send the cookie we got on login
when connecting to the websocket.
"""
json_data = json.dumps({
"seq": 1,
"action": "authentication_challenge",
"data": {
"token": self._token
}
}).encode('utf8')
yield from websocket.send(json_data)
while True:
message = yield from websocket.recv()
status = json.loads(message)
log.debug(status)
# We want to pass the events to the event_handler already
# because the hello event could arrive before the authentication ok response
yield from event_handler(message)
if ('status' in status and status['status'] == 'OK') and \
('seq_reply' in status and status['seq_reply'] == 1):
log.info('Websocket authentification OK')
return True
@asyncio.coroutine
def _wait_for_message(self, websocket, event_handler):
while True:
message = yield from websocket.recv()
yield from event_handler(message)
|
Python
| 0.000014
|
@@ -852,16 +852,34 @@
'ws://'
+%0A%09%09%09context = None
%0A%0A%09%09url
@@ -2812,28 +2812,29 @@
from event_handler(message)
+%0A
|
63529c668acaa196ca8480c70a0d19edd2c7baa0
|
Replace Bounty.data with dict
|
common/bounty.py
|
common/bounty.py
|
import os, pickle, re, sys
from common.safeprint import safeprint
from multiprocessing import Lock
from hashlib import sha256
global bountyList
global bountyLock
global bountyPath
bountyList = []
bountyLock = Lock()
bountyPath = "data" + os.sep + "bounties.pickle"
class Bounty(object):
ip = ""
btc = ""
reward = 0
data = []
def __repr__(self):
return ("<Bounty: ip=" + str(self.ip) + ", btc=" + str(self.btc) + ", reward=" + str(self.reward) + ", data=" + str(self.data) + ">")
def __init__(self, ipAddress, btcAddress, rewardAmount, dataList=[]):
self.ip = ipAddress
self.btc = btcAddress
self.reward = rewardAmount
self.data = dataList
def isValid(self):
try:
safeprint("Testing IP address")
#is IP valid
b = int(self.ip.split(":")[1]) in range(1024,49152)
b = int(self.ip.split(":")[0].split(".")[0]) in range(0,256) and b
b = int(self.ip.split(":")[0].split(".")[1]) in range(0,256) and b
b = int(self.ip.split(":")[0].split(".")[2]) in range(0,256) and b
b = int(self.ip.split(":")[0].split(".")[3]) in range(0,256) and b
if not b:
return False
#ping IP
#is Bitcoin address valid
safeprint("Testing Bitcoin address")
address = str(self.btc)
#The following is a soft check
#A deeper check will need to be done in order to assure this is correct
if not checkAddressValid(address):
return False
#is reward valid
safeprint("Testing reward")
b = int(self.reward)
return (b >= 0)
except:
return False
def isPayable(self):
#check if address has enough
return True
def checkAddressValid(bc):
if not re.match(re.compile("^[a-zA-Z1-9]{26,35}$"),bc):
return False
n = 0
for char in bc:
n = n * 58 + '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'.index(char)
if sys.version_info[0] < 3:
bcbytes = (('%%0%dx' % (25 << 1) % n).decode('hex')[-25:])
return bcbytes[-4:] == sha256(sha256(bcbytes[:-4]).digest()).digest()[:4]
else:
bcbytes = n.to_bytes(25, 'big')
return bcbytes[-4:] == sha256(sha256(bcbytes[:-4]).digest()).digest()[:4]
def verify(string):
test = pickle.loads(string)
try:
safeprint("Testing IP address")
#is IP valid
b = int(test.ip.split(":")[1]) in range(1024,49152)
b = int(test.ip.split(":")[0].split(".")[0]) in range(0,256) and b
b = int(test.ip.split(":")[0].split(".")[1]) in range(0,256) and b
b = int(test.ip.split(":")[0].split(".")[2]) in range(0,256) and b
b = int(test.ip.split(":")[0].split(".")[3]) in range(0,256) and b
if not b:
return False
#ping IP
#is Bitcoin address valid
safeprint("Testing Bitcoin address")
address = str(test.btc)
#The following is a soft check
#A deeper check will need to be done in order to assure this is correct
if not checkAddressValid(address):
return False
#is reward valid
safeprint("Testing reward")
b = int(test.reward)
return (b >= 0)
except:
return False
def getBountyList():
a = []
with bountyLock:
safeprint(bountyList)
a = bountyList
safeprint(a)
return a
def saveToFile(bountyList):
if not os.path.exists(bountyPath.split(os.sep)[0]):
os.mkdir(bountyPath.split(os.sep)[0])
pickle.dump(bountyList,open(bountyPath, "wb"),1)
return True
def loadFromFile():
if os.path.exists(bountyPath):
with bountyLock:
bountyList = pickle.load(open(bountyPath,"rb"))
return True
return False
def addBounty(bounty):
a = False
safeprint((sys.version_info[0],sys.version_info[1],sys.version_info[2]))
if sys.version_info[0] == 2 and sys.version_info[1] == 6 and (type(bounty) == type("aaa") or type(bounty) == type(unicode("aaa"))):
safeprint("Fed as string in 2.6; encoding ascii and ignoring errors")
try:
bounty = bounty.encode('ascii','ignore')
except:
bounty = str(bounty)
elif type(bounty) == type("aaa") and sys.version_info[0] >= 3:
safeprint("Fed as string; encoding utf-8")
bounty = bounty.encode('utf-8')
safeprint("External verify")
a = verify(bounty)
bounty = pickle.loads(bounty)
safeprint("Internal verify")
b = bounty.isValid()
if a and b:
with bountyLock:
safeprint(bountyList)
bountyList.append(bounty)
return (a and b)
def getBounty(charity, factor):
a = getBountyList()
safeprint("bountyList = " + str(a))
if a == []:
return None
elif charity:
for bounty in a:
if bounty.isValid():
b = a.index(bounty)
return a.pop(b)
else:
best = None
for bounty in a:
if best is None:
best = bounty
elif best.reward < bounty.reward and bounty.isValid() and bounty.isPayable(factor):
best = bounty
return best
|
Python
| 0.999992
|
@@ -325,18 +325,18 @@
data =
-%5B%5D
+%7B%7D
%0A %0A de
@@ -560,15 +560,15 @@
data
-List=%5B%5D
+Dict=%7B%7D
):%0A
@@ -667,19 +667,19 @@
a = data
-Lis
+Dic
t%0A %0A
|
238c49d4fb1fe67ffd63ed7b9dc5dce0915ae389
|
remove internationalisation of uri. fix issue #2
|
django_authopenid/urls.py
|
django_authopenid/urls.py
|
# -*- coding: utf-8 -*-
from django.conf.urls.defaults import patterns, url
from django.utils.translation import ugettext as _
urlpatterns = patterns('django_authopenid.views',
# yadis rdf
url(r'^yadis.xrdf$', 'xrdf', name='yadis_xrdf'),
# manage account registration
url(r'^%s$' % _('signin/'), 'signin', name='user_signin'),
url(r'^%s$' % _('signout/'), 'signout', name='user_signout'),
url(r'^%s%s$' % (_('signin/'), _('complete/')), 'complete_signin',
name='user_complete_signin'),
url(r'^%s$' % _('register/'), 'register', name='user_register'),
)
|
Python
| 0.000001
|
@@ -290,27 +290,17 @@
(r'%5E
-%25s$' %25 _('
signin/
+$
'
-)
, 's
@@ -343,26 +343,16 @@
(r'%5E
-%25s$' %25 _('
signout/
'),
@@ -347,18 +347,18 @@
signout/
+$
'
-)
, 'signo
@@ -399,35 +399,15 @@
(r'%5E
-%25s%25s$' %25 (_('signin/'), _('
+signin/
comp
@@ -415,11 +415,10 @@
ete/
+$
'
-))
, 'c
@@ -438,17 +438,8 @@
n',
-%0A
name
@@ -479,18 +479,8 @@
(r'%5E
-%25s$' %25 _('
regi
@@ -484,18 +484,18 @@
egister/
+$
'
-)
, 'regis
|
1e0079b168a598ca2e5dcbb97503d08b7f927a09
|
Update check_permissions to better reflect what the server requires.
|
montage/api.py
|
montage/api.py
|
import mimetypes
__all__ = ('DataAPI', 'FileAPI', 'RoleAPI', 'SchemaAPI')
class DocumentsAPI(object):
def __init__(self, client):
self.client = client
def save(self, schema, *documents):
endpoint = 'schemas/{0}/documents'.format(schema, schema)
return self.client.request(endpoint, method='post', json=documents)
def get(self, schema, document_id):
endpoint = 'schemas/{0}/documents/{1}'.format(schema, document_id)
return self.client.request(endpoint)
def replace(self, schema, document):
endpoint = 'schemas/{0}/documents/{1}'.format(schema, document['id'])
return self.client.request(endpoint, method='put', json=document)
def update(self, schema, document):
endpoint = 'schemas/{0}/documents/{1}'.format(schema, document['id'])
return self.client.request(endpoint, method='patch', json=document)
def remove(self, schema, document_id):
endpoint = 'schemas/{0}/documents/{1}'.format(schema, document_id)
return self.client.request(endpoint, method='delete')
class FileAPI(object):
def __init__(self, client):
self.client = client
def list(self, **kwargs):
return self.client.request('files', params=kwargs)
def get(self, file_id):
endpoint = 'files/{0}'.format(file_id)
return self.client.request(endpoint)
def remove(self, file_id):
endpoint = 'files/{0}'.format(file_id)
return self.client.request(endpoint, method='delete')
def save(self, *files):
'''
Each file is extected to be a tuple of (name, content), where
content is a file-like object or the contents as a string.
client.files.save(('foo.txt', open('/path/to/foo.txt')))
client.files.save(('foo.txt', StringIO('This is foo.txt')))
client.files.save(('foo.txt', 'This is foo.txt'))
'''
file_list = []
for name, contents in files:
content_type = mimetypes.guess_type(name)[0]
file_list.append(('file', (name, contents, content_type)))
return self.client.request('files', 'post', files=file_list)
class PolicyAPI(object):
def __init__(self, client):
self.client = client
def create(self, description, policy):
payload = {
'description': description,
'policy': policy
}
return self.client.request('policy', method='post', json=payload)
def list(self, **kwargs):
return self.client.request('policy', params=kwargs)
def get(self, policy_id):
return self.client.request('policy/{0}'.format(policy_id))
def update(self, policy_id, description=None, policy=None):
payload = {}
if description:
payload['description'] = description
if policy:
payload['policy'] = policy
if payload:
return self.client.request('policy/{0}'.format(policy_id),
method='patch', json=payload)
def remove(self, policy_id):
return self.client.request('policy/{0}'.format(policy_id), method='delete')
def check_permission(self, action=None, resource=None):
payload = {}
if action:
payload['action'] = action
if resource:
payload['action'] = resource
return self.client.request('policy/check/', params=payload)
class RoleAPI(object):
def __init__(self, client):
self.client = client
def create(self, name, add_users=None):
payload = {
'name': name,
'add_users': users or []
}
return self.client.request('roles', method='post', json=payload)
def list(self, **kwargs):
return self.client.request('roles', params=kwargs)
def get(self, role):
return self.client.request('roles/{0}'.format(role))
def update(self, role, name=None, add_users=None, remove_users=None):
payload = {}
if name:
payload['name'] = name
if add_users:
payload['add_users'] = add_users
if remove_users:
payload['remove_users'] = remove_users
if payload:
return self.client.request('roles/{0}'.format(role),
method='patch', json=payload)
def remove(self, role):
return self.client.request('roles/{0}'.format(role), method='delete')
class SchemaAPI(object):
def __init__(self, client):
self.client = client
def create(self, name, fields=None):
payload = {
'name': name,
'fields': fields or []
}
return self.client.request('schemas', method='post', json=payload)
def list(self, **kwargs):
return self.client.request('schemas', params=kwargs)
def get(self, schema):
return self.client.request('schemas/{0}'.format(schema))
def update(self, schema, name=None, fields=None):
payload = {}
if name:
payload['name'] = name
if fields:
payload['fields'] = fields
if payload:
return self.client.request('schemas/{0}'.format(schema),
method='patch', json=payload)
def remove(self, schema):
return self.client.request('schemas/{0}'.format(schema), method='delete')
class UserAPI(object):
attributes = ('email', 'full_name', 'password')
def __init__(self, client):
self.client = client
def list(self, **kwargs):
return self.client.request('users', params=kwargs)
def create(self, full_name, email, password):
payload = {
'full_name': full_name,
'email': email,
'password': password,
}
return self.client.request('users', method='post', json=payload)
def get(self, user_id):
return self.client.request('users/{0}'.format(user_id))
def update(self, user_id, full_name=None, email=None, password=None):
payload = {}
if full_name:
payload['full_name'] = full_name
if email:
payload['email'] = email
if password:
payload['password'] = password
if payload:
return self.client.request('users/{0}'.format(user_id),
method='patch', json=payload)
def remove(self, user_id):
return self.client.request('users/{0}'.format(user_id), method='delete')
|
Python
| 0
|
@@ -3175,21 +3175,16 @@
, action
-=None
, resour
@@ -3204,33 +3204,32 @@
payload = %7B
-%7D
%0A if acti
@@ -3225,47 +3225,20 @@
-if action:%0A payload%5B
+
'action'
%5D =
@@ -3237,27 +3237,27 @@
ion'
-%5D =
+:
action
-%0A
+,%0A
if r
@@ -3256,19 +3256,20 @@
-if
+ '
resource
:%0A
@@ -3268,50 +3268,30 @@
urce
-:%0A payload%5B'action'%5D = resource
+': resource,%0A %7D
%0A
|
62ef1602ab3caa0b0730a19d9c5e146d2d320c0c
|
update url
|
osmaxx/excerptexport/models/extraction_order.py
|
osmaxx/excerptexport/models/extraction_order.py
|
import json
from django.contrib.auth.models import User
from django.db import models
from django.db.models.signals import post_save
from django.dispatch.dispatcher import receiver
from django.template.loader import render_to_string
from django.utils.translation import ugettext_lazy as _
from django_enumfield import enum
from osmaxx.excerptexport.models.output_file import OutputFile
from .excerpt import Excerpt
class ExtractionOrderState(enum.Enum):
UNDEFINED = 0
INITIALIZED = 1
QUEUED = 2
PROCESSING = 3
FINISHED = 4
FAILED = 6
FINAL_STATES = {ExtractionOrderState.FINISHED, ExtractionOrderState.FAILED}
CONVERSION_PROGRESS_TO_EXTRACTION_ORDER_STATE_MAPPING = {
'new': ExtractionOrderState.INITIALIZED,
'received': ExtractionOrderState.QUEUED,
'started': ExtractionOrderState.PROCESSING,
'successful': ExtractionOrderState.FINISHED,
'error': ExtractionOrderState.FAILED,
}
def get_order_status_from_conversion_progress(progress):
return CONVERSION_PROGRESS_TO_EXTRACTION_ORDER_STATE_MAPPING.get(progress, ExtractionOrderState.UNDEFINED)
class ExtractionOrder(models.Model):
DOWNLOAD_STATUS_NOT_DOWNLOADED = 0
DOWNLOAD_STATUS_DOWNLOADING = 1
DOWNLOAD_STATUS_AVAILABLE = 2
DOWNLOAD_STATUSES = (
(DOWNLOAD_STATUS_NOT_DOWNLOADED, 'unknown'),
(DOWNLOAD_STATUS_DOWNLOADING, 'downloading'),
(DOWNLOAD_STATUS_AVAILABLE, 'received'),
)
state = enum.EnumField(ExtractionOrderState, default=ExtractionOrderState.INITIALIZED, verbose_name=_('state'))
_extraction_configuration = models.TextField(
blank=True, null=True, default='', verbose_name=_('extraction options')
)
process_id = models.TextField(blank=True, null=True, verbose_name=_('process link'))
orderer = models.ForeignKey(User, related_name='extraction_orders', verbose_name=_('orderer'))
excerpt = models.ForeignKey(Excerpt, related_name='extraction_orders', verbose_name=_('excerpt'), null=True)
progress_url = models.URLField(verbose_name=_('progress URL'), null=True, blank=True)
process_start_time = models.DateTimeField(verbose_name=_('process start time'), null=True, blank=True)
download_status = models.IntegerField(
_('file status'),
choices=DOWNLOAD_STATUSES,
default=DOWNLOAD_STATUS_NOT_DOWNLOADED
)
def forward_to_conversion_service(self, *, incoming_request):
clipping_area_json = self.excerpt.send_to_conversion_service()
jobs_json = [
export.send_to_conversion_service(clipping_area_json, incoming_request)
for export in self.exports.all()
]
return jobs_json
def __str__(self):
return ', '.join(
[
'[{order_id}] orderer: {orderer_name}'.format(
order_id=self.id,
orderer_name=self.orderer.get_username(),
),
'excerpt: {}'.format(str(self.excerpt_name)),
'state: {}'.format(self.get_state_display()),
]
)
@property
def excerpt_name(self):
"""
Returns:
user-given excerpt name for user-defined excerpts,
country name for countries,
None if order has no excerpt (neither country nor user-defined)
"""
if self.excerpt:
return self.excerpt.name
@property
def output_files(self):
return OutputFile.objects.filter(export__extraction_order=self)
@property
def are_downloads_ready(self):
return self.state == ExtractionOrderState.FINISHED
@property
def extraction_configuration(self):
if self._extraction_configuration and not self._extraction_configuration == '':
return json.loads(self._extraction_configuration)
else:
return None
@extraction_configuration.setter
def extraction_configuration(self, value):
if not value:
value = {}
else:
value = dict(value)
assert 'gis_formats' not in value
self._extraction_configuration = json.dumps(value)
@property
def extraction_formats(self):
return self.exports.values_list('file_format', flat=True)
@extraction_formats.setter
def extraction_formats(self, value):
new_formats = frozenset(value)
previous_formats = self.exports.values_list('file_format', flat=True)
assert new_formats.issuperset(previous_formats)
self._new_formats = new_formats # Will be collected and cleaned up by attach_new_formats.
if self.id is not None:
attach_new_formats(self.__class__, instance=self)
@property
def process_due_time(self):
from django.conf import settings # import locally, so migrations can't depend on settings
return self.process_start_time + settings.OSMAXX.get('EXTRACTION_PROCESSING_TIMEOUT_TIMEDELTA')
def set_status_from_conversion_progress(self, job_overall_progress):
if self.state not in [ExtractionOrderState.FINISHED, ExtractionOrderState.FAILED]:
self.state = get_order_status_from_conversion_progress(job_overall_progress)
def get_absolute_url(self):
from django.core.urlresolvers import reverse
return reverse('excerptexport:status', kwargs={'extraction_order_id': self.id})
def send_email_if_all_exports_done(self, incoming_request):
if all(export.is_status_final for export in self.exports.all()):
from osmaxx.utilities.shortcuts import Emissary
emissary = Emissary(recipient=self.orderer)
emissary.inform_mail(
subject=self._get_all_exports_done_email_subject(),
mail_body=self._get_all_exports_done_mail_body(incoming_request)
)
def _get_all_exports_done_email_subject(self):
view_context = dict(
extraction_order=self,
successful_exports_count=self.exports.filter(output_file__isnull=False).count(),
failed_exports_count=self.exports.filter(output_file__isnull=True).count(),
)
return render_to_string(
'excerptexport/email/all_exports_of_extraction_order_done_subject.txt',
context=view_context,
).strip()
def _get_all_exports_done_mail_body(self, incoming_request):
view_context = dict(
extraction_order=self,
successful_exports=self.exports.filter(output_file__isnull=False),
failed_exports=self.exports.filter(output_file__isnull=True),
request=incoming_request,
)
return render_to_string(
'excerptexport/email/all_exports_of_extraction_order_done_body.txt',
context=view_context,
).strip()
@receiver(post_save, sender=ExtractionOrder)
def attach_new_formats(sender, instance, **kwargs):
if hasattr(instance, '_new_formats'):
for format in instance._new_formats:
instance.exports.get_or_create(file_format=format)
del instance._new_formats
|
Python
| 0.000001
|
@@ -5316,56 +5316,20 @@
ort:
-status', kwargs=%7B'extraction_order_id': self.id%7D
+export_list'
)%0A%0A
|
fe314468c4a8c02650b3b983a239acd06bfc003f
|
Improve config file handling on the job.
|
lobster/cmssw/data/job.py
|
lobster/cmssw/data/job.py
|
#!/usr/bin/env python
import base64
import json
import os
import pickle
import subprocess
import sys
def edit_process_source(cmssw_config_file, config_params):
(dataset_files, lumis) = config_params
config = open(cmssw_config_file, 'a')
with open(cmssw_config_file, 'a') as config:
fragment = ('import FWCore.ParameterSet.Config as cms'
'\nprocess.source.fileNames = cms.untracked.vstring({input_files})'
'\nprocess.maxEvents = cms.untracked.PSet(input = cms.untracked.int32(-1))'
'\nprocess.source.lumisToProcess = cms.untracked.VLuminosityBlockRange({lumis})')
config.write(fragment.format(input_files=repr([str(f) for f in dataset_files]), lumis=[str(l) for l in lumis]))
(configfile, inputs) = sys.argv[1:3]
args = sys.argv[3:]
for d in os.listdir('.'):
if d.startswith('CMSSW'):
break
env = os.environ
env['X509_USER_PROXY'] = os.path.join(d, 'proxy')
edit_process_source(configfile, pickle.loads(base64.b64decode(inputs)))
exit_code = subprocess.call('cmsRun -j report.xml "{0}" {1} > cmssw.log 2>&1'.format(configfile, ' '.join(args)), shell=True, env=env)
sys.exit(exit_code)
|
Python
| 0
|
@@ -66,16 +66,30 @@
pickle%0A
+import shutil%0A
import s
@@ -110,16 +110,288 @@
rt sys%0A%0A
+fragment = %22%22%22import FWCore.ParameterSet.Config as cms%0Aprocess.source.fileNames = cms.untracked.vstring(%7Binput_files%7D)%0Aprocess.maxEvents = cms.untracked.PSet(input = cms.untracked.int32(-1))%0Aprocess.source.lumisToProcess = cms.untracked.VLuminosityBlockRange(%7Blumis%7D)%22%22%22%0A%0A
def edit
@@ -488,50 +488,8 @@
ams%0A
- config = open(cmssw_config_file, 'a')%0A
@@ -549,463 +549,215 @@
frag
-ment = ('import FWCore.ParameterSet.Config as cms'%0A '%5Cnprocess.source.fileNames = cms.untracked.vstring(%7Binput_files%7D)'%0A '%5Cnprocess.maxEvents = cms.untracked.PSet(input = cms.untracked.int32(-1))'%0A '%5Cnprocess.source.lumisToProcess = cms.untracked.VLuminosityBlockRange(%7Blumis%7D)')%0A config.write(fragment.format(input_files=repr(%5Bstr(f) for f in dataset_files%5D), lumis=%5Bstr(l) for l in lumis%5D)
+ = fragment.format(input_files=repr(%5Bstr(f) for f in dataset_files%5D), lumis=%5Bstr(l) for l in lumis%5D)%0A print %22--- config file fragment:%22%0A print frag%0A print %22---%22%0A config.write(frag
)%0A%0A(
@@ -762,20 +762,16 @@
%0A(config
-file
, inputs
@@ -809,16 +809,96 @@
gv%5B3:%5D%0A%0A
+configfile = config.replace(%22.py%22, %22_mod.py%22)%0Ashutil.copy2(config, configfile)%0A%0A
for d in
|
95a21d9c758e471f7d458f6dc597d615605afe73
|
Add function to derive iRODS zone name and use to make correct collection
|
jicirodsmanager/irods.py
|
jicirodsmanager/irods.py
|
"""Module for storing irods specific code."""
import logging
from jicirodsmanager import StorageManager, CommandWrapper
logger = logging.getLogger(__name__)
def string_to_list(s):
"""Return a list of items.
:param s: string with white space separated items
:returns: list of items
"""
return s.strip().split()
def nbi_zone_user_name(user_name):
"Return nbi zone user name."""
return "{}#nbi".format(user_name)
class IrodsStorageManager(StorageManager):
"""Class for adding users/groups/namespaces to an irods storage system."""
command_prefix = "irods"
def group_exists(self, group_name):
"""Return true if the group exists."""
logger.info("Calling group exists")
lg = CommandWrapper(["iadmin", "lg"])
groups = string_to_list(lg())
return group_name in groups
def create_group_without_quota(self, group_name):
"""Add the group without setting a quota."""
logger.info("Calling create_group_without_quota")
mkgroup = CommandWrapper(["iadmin", "mkgroup", group_name])
mkgroup()
if mkgroup.returncode == 0:
collection = "/{}".format(group_name)
imkdir = CommandWrapper(
["imkdir", collection])
imkdir()
if imkdir.returncode == 0:
ichmod_own = CommandWrapper(
["ichmod", "own", group_name, collection])
ichmod_own()
ichmod_inherit = CommandWrapper(
["ichmod", "inherit", collection])
ichmod_inherit()
return mkgroup.success()
def create_group_with_quota(self, group_name, quota):
"""Add the group and set quota."""
logger.info("Calling create_group_with_quota")
created = self.create_group_without_quota(group_name)
if created:
sgq = CommandWrapper(
["iadmin", "sgq, group_name", "total", quota])
sgq()
def create_user(self, user_name):
"""Create the user and return True if successful."""
logger.info("Calling create_user")
mkuser = CommandWrapper(
["iadmin", "mkuser", nbi_zone_user_name(user_name)])
mkuser()
return mkuser.success()
def add_user_to_group(self, user_name, group_name):
"""Add the user to the group."""
logger.info("Calling add_user_to_group")
atg = CommandWrapper(
["iadmin", "atg", group_name, nbi_zone_user_name(user_name)])
atg()
|
Python
| 0
|
@@ -40,16 +40,38 @@
de.%22%22%22%0A%0A
+import os%0Aimport json%0A
import l
@@ -77,16 +77,16 @@
logging%0A
-
%0Afrom ji
@@ -349,24 +349,379 @@
).split()%0A%0A%0A
+def irods_zone_collection_name(group_name):%0A %22%22%22Return iRODS collection name derived from group_name including working%0A out the iRODS zone.%22%22%22%0A%0A irods_envfile = os.path.expanduser('~/.irods/irods_environment.json')%0A irods_zone_name = json.load(open(irods_envfile))%5B'irods_zone_name'%5D%0A%0A return %22/%7B%7D/%7B%7D%22.format(irods_zone_name, group_name)%0A%0A%0A
def nbi_zone
@@ -743,20 +743,22 @@
_name):%0A
-
+%22%22
%22Return
@@ -1541,20 +1541,34 @@
n =
-%22/%7B%7D%22.format
+irods_zone_collection_name
(gro
|
74a59179311e456e3ad5a01e7320284aa819d7b6
|
patch prev. commit
|
conductor/cli.py
|
conductor/cli.py
|
import json
import click
from click import echo
from click_spinner import spinner
from prettytable import PrettyTable
from tabulate import tabulate
from .config import get_config, new_config, set_config
from .feeds import run_price_feeds
from .markets import Markets
from .utils import generate_signing_key
from .watchdog import (
watchdog,
enable_witness,
disable_witness,
is_witness_enabled,
current_signing_key,
total_missed,
get_witness,
witness_create,
witness_set_props,
)
def heading(title):
echo('%s:\n' % title + (len(title) + 1) * '-')
def output(data, title=None):
if title:
heading(title)
if type(data) == dict:
print(json.dumps(data, indent=4))
else:
echo(data)
echo('')
context_settings = dict(help_option_names=['-h', '--help'])
@click.group(context_settings=context_settings)
def conductor():
"""Steem Witness Toolkit."""
pass
# Config Commands
# ---------------
@conductor.command()
def init():
"""Add your witness account."""
account = click.prompt('What is your witness account name?', type=str)
witness = get_witness(account)
if witness:
c = new_config()
c['witness']['name'] = account
c['witness']['url'] = witness['url']
c['props'] = witness['props']
set_config(c)
echo('Imported a witness %s from its existing settings.' % account)
else:
click.confirm('Witness %s does not exist. Would you like to create it?' % account, abort=True)
c = new_config()
c['witness']['name'] = account
c['witness']['url'] = click.prompt(
'What should be your witness URL?',
default=c['witness']['url'],
)
c['props']['account_creation_fee'] = click.prompt(
'How much should be account creation fee?',
default=c['props']['account_creation_fee'],
)
c['props']['maximum_block_size'] = click.prompt(
'What should be the maximum block size?',
default=c['props']['maximum_block_size'],
)
c['props']['sbd_interest_rate'] = click.prompt(
'What should be the SBD interest rate?',
default=c['props']['sbd_interest_rate'],
)
set_config(c)
witness_create(c)
echo('Witness %s created!' % account)
@conductor.command()
def update():
"""Update witness properties."""
c = get_config()
c['witness']['url'] = click.prompt(
'What should be your witness URL?',
default=c['witness']['url'],
)
c['props']['account_creation_fee'] = click.prompt(
'How much should be account creation fee?',
default=c['props']['account_creation_fee'],
)
c['props']['maximum_block_size'] = click.prompt(
'What should be the maximum block size?',
default=c['props']['maximum_block_size'],
)
c['props']['sbd_interest_rate'] = click.prompt(
'What should be the SBD interest rate?',
default=c['props']['sbd_interest_rate'],
)
# verify
output(c, '\nConfiguration')
click.confirm('Do you want to commit the updated values?', abort=True)
# update
set_config(c)
witness_set_props(c['witness']['url'], c['props'])
output('Witness %s Updated' % c['witness']['name'])
@conductor.command(name='key-gen')
def feed():
"""Generate a random signing key-pair."""
pk, pub = generate_signing_key()
t = PrettyTable(["Private (install on your witness node)",
"Public (publish with 'conductor enable' command)"])
t.align = "l"
t.add_row([pk, pub])
output(t, '')
# Operational Commands
# --------------------
@conductor.command()
@click.option('--sbd-peg/--no-sbd-peg', default=True)
def feed(sbd_peg):
"""Update Price Feeds."""
run_price_feeds(support_peg=sbd_peg)
@conductor.command()
@click.argument('signing_key')
def enable(signing_key):
"""Enable a witness, or change key."""
tx = enable_witness(signing_key) or 'This key is already set'
output(tx)
@conductor.command()
@click.confirmation_option(help='Are you sure you want to stop the witness?')
def disable():
"""Disable a witness."""
tx = disable_witness() or 'Witness already disabled'
output(tx)
@conductor.command(name='kill-switch')
@click.option('--disable-after', '-n', default=10)
@click.option('--second-key', '-k', default=None)
def kill_switch(disable_after, second_key):
"""Monitor for misses w/ disable."""
watchdog(disable_after, second_key)
# Status Commands
# ---------------
@conductor.command()
def tickers():
"""Print Tickers."""
with spinner():
m = Markets()
data = {
"BTC/USD": round(m.btc_usd(), 2),
"SBD/USD": round(m.sbd_usd_implied(), 3),
"STEEM/USD": round(m.steem_usd_implied(), 3),
}
echo(tabulate(data.items(), headers=['Symbol', 'Price'], numalign="right", tablefmt='orgtbl'))
@conductor.command(name='status')
def status():
"""Print basic witness info."""
with spinner():
is_enabled = is_witness_enabled()
signing_key = current_signing_key()
misses = total_missed()
t = PrettyTable(["Enabled", "Misses", "Key"])
t.align = "l"
t.add_row([is_enabled, misses, signing_key])
output(t, 'Status')
output(get_config(), 'Configuration')
|
Python
| 0.000006
|
@@ -3773,19 +3773,20 @@
default=
-Tru
+Fals
e)%0Adef f
|
4645f904d0f522d51148d9fde3f50da6a619c6a8
|
add forms widget factory beginnings
|
examples/djangowanted/wanted/forms.py
|
examples/djangowanted/wanted/forms.py
|
from django.forms import ModelForm
from django import forms
from wanted.models import *
class FlagForm(ModelForm):
item = forms.ModelChoiceField(queryset=Item.objects.all())
type = forms.ModelChoiceField(queryset=FlagType.objects.all())
value = forms.CharField(max_length=255)
class ItemForm(ModelForm):
class Meta:
model = Item
name = forms.CharField(max_length=50, label="Name")
short_description = forms.CharField(max_length=100, label="Description")
description = forms.CharField(max_length=1000)
price = forms.FloatField(label="Price")
def _is_valid(self):
res = ModelForm.is_valid(self)
if self.instance.id is None:
return res
for f in FlagType.objects.all():
fv = self.data[f.name]
d = {'item': self.instance.id, 'type': f.id, 'value': fv}
try:
fg = Flag.objects.get(item=self.instance.id, type=f.id)
except Flag.DoesNotExist:
continue
ff = FlagForm(d, instance=fg)
if not ff.is_valid():
res = False
self.errors[f.name] = ff.errors
return res
def delete(self, idx=None):
if idx is None:
instance = self.instance
else:
instance = Item.objects.get(id=idx)
for f in FlagType.objects.all():
try:
fg = Flag.objects.get(item=instance.id, type=f.id)
except Flag.DoesNotExist:
continue
fg.delete()
instance.delete()
def save(self):
res = ModelForm.save(self)
for f in FlagType.objects.all():
fv = self.data[f.name]
d = {'item': self.instance.id, 'type': f.id, 'value': fv}
try:
fg = Flag.objects.get(item=self.instance.id, type=f.id)
except Flag.DoesNotExist:
fg = Flag()
ff = FlagForm(d, instance=fg)
fv = ff.save()
setattr(res, f.name, fv)
return res
for f in FlagType.objects.all():
locals()[f.name] = forms.CharField(max_length=100, label=f.description)
#ItemForm.Meta.fields.append(str(f.name))
def test_item_form():
for idx in range(1,314):
try:
i = Item.objects.get(id=idx)
except Item.DoesNotExist:
i = Item()
i.id = idx
d = {'id': idx, 'name': 'fred %d' % (idx % 10), 'short_description': 'joe', 'description': 'longer', 'price': 20, 'vehicletype': 'a car', 'numdoors': 5}
f = ItemForm(d, instance=i)
if not f.is_valid():
for (e, k) in f.errors.items():
print e, k
it = f.save()
print it, it.id, it.price, it.vehicletype.id, it.vehicletype.value, it.numdoors.value
it.price = 25
if not f.is_valid():
for (e, k) in f.errors.items():
print e, k
it = f.save()
print it, it.id, it.price, it.vehicletype.id, it.vehicletype.value, it.numdoors.value
|
Python
| 0.000001
|
@@ -531,16 +531,33 @@
gth=1000
+, label=%22Details%22
)%0A pr
|
a441ed9b5ccd3eaf74c235a60ba5ab3771533db7
|
Add __str__ and __unicode__ to BlacklistedGuest
|
SigmaPi/PartyList/models.py
|
SigmaPi/PartyList/models.py
|
from django.db import models
from django.contrib.auth.models import User
from django import forms
from datetime import datetime
import editdistance
def timeStamped(fname, fmt='%Y-%m-%d_{fname}'):
"""
Utility function to add a timestamp to uploaded files.
"""
return datetime.now().strftime(fmt).format(fname=fname)
def partyjobspath(_, filename):
"""
Defines where party job information should be stored
"""
return "parties/partyjobs/" + timeStamped(filename)
class Party(models.Model):
"""
Model to represent a party.
"""
# NOTE: In the future, this path should be changed to be in protected file space so it is not
# accessible to the public.
name = models.CharField(max_length=100)
date = models.DateField()
guycount = models.IntegerField(default=0)
girlcount = models.IntegerField(default=0)
guy_delta = models.IntegerField(default=0)
girl_delta = models.IntegerField(default=0)
jobs = models.FileField(upload_to=partyjobspath, blank=True, null=True)
def __unicode__(self):
return self.name
def __str__(self):
return self.name
def isPartyMode(self):
# Find out if we are in party mode
closedatetime = datetime(self.date.year, self.date.month, self.date.day, 20)
return closedatetime < datetime.now()
#Setup meta info about this model
class Meta:
verbose_name_plural = "Parties"
verbose_name = "Party"
permissions = (
("manage_parties", "Can manage Parties"),
)
class BlacklistedGuest(models.Model):
name = models.CharField(max_length=100, db_index=True)
details = models.TextField()
MAX_MATCH_EDIT_DISTANCE = 5
def check_match(self, to_check):
check_name = ''.join(c.lower() for c in to_check if not c.isspace())
bl_name = ''.join(c.lower() for c in self.name if not c.isspace())
edit_distance = editdistance.eval(check_name, bl_name)
return (
self
if edit_distance <= self.MAX_MATCH_EDIT_DISTANCE
else None
)
class Meta:
permissions = (
("manage_blacklist", "Can manage the blacklist"),
)
class Guest(models.Model):
"""
Model to represent a party guest
"""
name = models.CharField(max_length=100, db_index=True)
birthDate = models.DateField(blank=True,auto_now=True)
gender = models.CharField(max_length=100)
cardID = models.CharField(max_length=100, blank=True)
createdAt = models.DateTimeField(auto_now_add=True)
updatedAt = models.DateTimeField(auto_now=True)
def __unicode__(self):
return self.name
def __str__(self):
return self.name
def __iter__(self):
"""return a ** iterator of field,value"""
for i in self._meta.get_all_field_names():
yield (i, getattr(self,i))
def __cmp__(self,other):
pass
#apparently django does not use this during the order_by query
#Setup meta info about this model
class Meta:
verbose_name_plural = "Guests"
verbose_name = "Guest"
class PartyGuest(models.Model):
"""
Model to represent a guest for a specific party.
"""
party = models.ForeignKey(Party, related_name="party_for_guest", default=1)
guest = models.ForeignKey(Guest, related_name="guest", default=1, db_index=True)
addedBy = models.ForeignKey(User, related_name="added_by", default=1)
createdAt = models.DateTimeField(auto_now_add=True, db_index=True)
signedIn = models.BooleanField(default=False)
everSignedIn = models.BooleanField(default=False)
timeFirstSignedIn = models.DateTimeField(auto_now_add=True)
def __unicode__(self):
return self.guest.name
def __str__(self):
return self.guest.name
def __iter__(self):
"""return a ** iterator of field,value"""
for i in self._meta.get_all_field_names():
yield (i, getattr(self,i))
#Setup meta info about this model
class Meta:
verbose_name_plural = "Party Guests"
verbose_name = "Party Guest"
permissions = (("can_destroy_any_party_guest", "Can Remove Any Party Guest"),)
def toJSON(self):
data = {}
data['id'] = self.id
data['name'] = self.guest.name
data['addedByName'] = self.addedBy.first_name + " " + self.addedBy.last_name
data['addedByID'] = self.addedBy.id
data['signedIn'] = self.signedIn
return data
def check_blacklisted(self):
matches = (
blacklisted.check_match(self.guest.name)
for blacklisted in BlacklistedGuest.objects.all()
)
return matches.next() if matches else None
|
Python
| 0.999977
|
@@ -1603,24 +1603,47 @@
els.Model):%0A
+ #TODO- DOC_UPDATE!%0A
name = m
@@ -1756,16 +1756,118 @@
CE = 5%0A%0A
+ def __unicode__(self):%0A return self.name%0A%0A def __str__(self):%0A return self.name%0A%0A
def
|
abffd85d6038494eea93b277b2d25af816dc2b78
|
Enable bidi tests for Firefox 86+
|
py/test/selenium/webdriver/common/bidi_tests.py
|
py/test/selenium/webdriver/common/bidi_tests.py
|
# Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from selenium.common.exceptions import InvalidSelectorException
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
import pytest
@pytest.mark.xfail_safari
@pytest.mark.xfail_firefox(reason="This is not in release firefox yet")
@pytest.mark.xfail_remote
async def test_check_console_messages(driver, pages):
pages.load("javascriptPage.html")
from selenium.webdriver.common.bidi.console import Console
async with driver.log.add_listener(Console.ALL) as messages:
driver.execute_script("console.log('I love cheese')")
assert messages["message"] == "I love cheese"
@pytest.mark.xfail_safari
@pytest.mark.xfail_firefox(reason="This is not in release firefox yet")
@pytest.mark.xfail_remote
async def test_check_error_console_messages(driver, pages):
pages.load("javascriptPage.html")
from selenium.webdriver.common.bidi.console import Console
async with driver.log.add_listener(Console.ERROR) as messages:
driver.execute_script("console.error(\"I don't cheese\")")
driver.execute_script("console.log('I love cheese')")
assert messages["message"] == "I don't cheese"
@pytest.mark.xfail_firefox
@pytest.mark.xfail_safari
@pytest.mark.xfail_remote
async def test_collect_js_exceptions(driver, pages):
pages.load("javascriptPage.html")
async with driver.log.add_js_error_listener() as exceptions:
driver.find_element(By.ID, "throwing-mouseover").click()
assert exceptions is not None
assert exceptions.exception_details.stack_trace.call_frames[0].function_name == "onmouseover"
@pytest.mark.xfail_firefox
@pytest.mark.xfail_safari
@pytest.mark.xfail_remote
async def test_collect_log_mutations(driver, pages):
async with driver.log.mutation_events() as event:
pages.load("dynamic.html")
driver.find_element(By.ID, "reveal").click()
WebDriverWait(driver, 5, ignored_exceptions=InvalidSelectorException)\
.until(EC.visibility_of(driver.find_element(By.ID, "revealed")))
assert event["attribute_name"] == "style"
assert event["current_value"] == ""
assert event["old_value"] == "display:none;"
|
Python
| 0
|
@@ -1056,80 +1056,8 @@
ari%0A
-@pytest.mark.xfail_firefox(reason=%22This is not in release firefox yet%22)%0A
@pyt
@@ -1442,80 +1442,8 @@
ari%0A
-@pytest.mark.xfail_firefox(reason=%22This is not in release firefox yet%22)%0A
@pyt
|
e0681bcee248e409dcec9f0918a8cd8101cb1c0d
|
Set terminal width for Vyatta Driver
|
netmiko/vyos/vyos_ssh.py
|
netmiko/vyos/vyos_ssh.py
|
import time
from netmiko.cisco_base_connection import CiscoSSHConnection
class VyOSSSH(CiscoSSHConnection):
"""Implement methods for interacting with VyOS network devices."""
def session_preparation(self):
"""Prepare the session after the connection has been established."""
self._test_channel_read()
self.set_base_prompt()
self.disable_paging(command="set terminal length 0")
# Clear the read buffer
time.sleep(0.3 * self.global_delay_factor)
self.clear_buffer()
def check_enable_mode(self, *args, **kwargs):
"""No enable mode on VyOS."""
pass
def enable(self, *args, **kwargs):
"""No enable mode on VyOS."""
pass
def exit_enable_mode(self, *args, **kwargs):
"""No enable mode on VyOS."""
pass
def check_config_mode(self, check_string="#"):
"""Checks if the device is in configuration mode"""
return super().check_config_mode(check_string=check_string)
def config_mode(self, config_command="configure", pattern=r"[edit]"):
"""Enter configuration mode."""
return super().config_mode(config_command=config_command, pattern=pattern)
def exit_config_mode(self, exit_config="exit", pattern=r"exit"):
"""Exit configuration mode"""
output = ""
if self.check_config_mode():
output = self.send_command_timing(
exit_config, strip_prompt=False, strip_command=False
)
if "Cannot exit: configuration modified" in output:
output += self.send_command_timing(
"exit discard", strip_prompt=False, strip_command=False
)
if self.check_config_mode():
raise ValueError("Failed to exit configuration mode")
return output
def commit(self, comment="", delay_factor=0.1):
"""
Commit the candidate configuration.
Commit the entered configuration. Raise an error and return the failure
if the commit fails.
default:
command_string = commit
comment:
command_string = commit comment <comment>
"""
delay_factor = self.select_delay_factor(delay_factor)
error_marker = ["Failed to generate committed config", "Commit failed"]
command_string = "commit"
if comment:
command_string += f' comment "{comment}"'
output = self.config_mode()
output += self.send_command_expect(
command_string,
strip_prompt=False,
strip_command=False,
delay_factor=delay_factor,
)
if any(x in output for x in error_marker):
raise ValueError(f"Commit failed with following errors:\n\n{output}")
return output
def set_base_prompt(
self, pri_prompt_terminator="$", alt_prompt_terminator="#", delay_factor=1
):
"""Sets self.base_prompt: used as delimiter for stripping of trailing prompt in output."""
prompt = super().set_base_prompt(
pri_prompt_terminator=pri_prompt_terminator,
alt_prompt_terminator=alt_prompt_terminator,
delay_factor=delay_factor,
)
# Set prompt to user@hostname (remove two additional characters)
self.base_prompt = prompt[:-2].strip()
return self.base_prompt
def send_config_set(self, config_commands=None, exit_config_mode=False, **kwargs):
"""Remain in configuration mode."""
return super().send_config_set(
config_commands=config_commands, exit_config_mode=exit_config_mode, **kwargs
)
def save_config(self, *args, **kwargs):
"""Not Implemented"""
raise NotImplementedError
|
Python
| 0
|
@@ -413,16 +413,82 @@
gth 0%22)%0A
+ self.set_terminal_width(command=%22set terminal width 512%22)%0A
|
f2c9a930a3f9f8dc0b7904f1d490b2665979d768
|
Update forward compatibility horizon to 2020-09-10
|
tensorflow/python/compat/compat.py
|
tensorflow/python/compat/compat.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for API compatibility between TensorFlow release versions.
See [Version
Compatibility](https://tensorflow.org/guide/version_compat#backward_forward)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import datetime
import os
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import tf_contextlib
from tensorflow.python.util.tf_export import tf_export
# This value changes every day with an automatic CL. It can be modified in code
# via `forward_compatibility_horizon()` or with the environment variable
# TF_FORWARD_COMPATIBILITY_DELTA_DAYS, which is added to the compatibility date.
_FORWARD_COMPATIBILITY_HORIZON = datetime.date(2020, 9, 9)
_FORWARD_COMPATIBILITY_DELTA_DAYS_VAR_NAME = "TF_FORWARD_COMPATIBILITY_DELTA_DAYS"
_FORWARD_COMPATIBILITY_DATE_NUMBER = None
def _date_to_date_number(year, month, day):
return (year << 9) | (month << 5) | day
def _update_forward_compatibility_date_number(date_to_override=None):
"""Update the base date to compare in forward_compatible function."""
global _FORWARD_COMPATIBILITY_DATE_NUMBER
if date_to_override:
date = date_to_override
else:
date = _FORWARD_COMPATIBILITY_HORIZON
delta_days = os.getenv(_FORWARD_COMPATIBILITY_DELTA_DAYS_VAR_NAME)
if delta_days:
date += datetime.timedelta(days=int(delta_days))
if date < _FORWARD_COMPATIBILITY_HORIZON:
logging.warning("Trying to set the forward compatibility date to the past"
" date %s. This will be ignored by TensorFlow." % (date))
return
_FORWARD_COMPATIBILITY_DATE_NUMBER = _date_to_date_number(
date.year, date.month, date.day)
_update_forward_compatibility_date_number()
@tf_export("compat.forward_compatible")
def forward_compatible(year, month, day):
"""Return true if the forward compatibility window has expired.
See [Version
compatibility](https://tensorflow.org/guide/version_compat#backward_forward).
Forward-compatibility refers to scenarios where the producer of a TensorFlow
model (a GraphDef or SavedModel) is compiled against a version of the
TensorFlow library newer than what the consumer was compiled against. The
"producer" is typically a Python program that constructs and trains a model
while the "consumer" is typically another program that loads and serves the
model.
TensorFlow has been supporting a 3 week forward-compatibility window for
programs compiled from source at HEAD.
For example, consider the case where a new operation `MyNewAwesomeAdd` is
created with the intent of replacing the implementation of an existing Python
wrapper - `tf.add`. The Python wrapper implementation should change from
something like:
```python
def add(inputs, name=None):
return gen_math_ops.add(inputs, name)
```
to:
```python
from tensorflow.python.compat import compat
def add(inputs, name=None):
if compat.forward_compatible(year, month, day):
# Can use the awesome new implementation.
return gen_math_ops.my_new_awesome_add(inputs, name)
# To maintain forward compatibility, use the old implementation.
return gen_math_ops.add(inputs, name)
```
Where `year`, `month`, and `day` specify the date beyond which binaries
that consume a model are expected to have been updated to include the
new operations. This date is typically at least 3 weeks beyond the date
the code that adds the new operation is committed.
Args:
year: A year (e.g., 2018). Must be an `int`.
month: A month (1 <= month <= 12) in year. Must be an `int`.
day: A day (1 <= day <= 31, or 30, or 29, or 28) in month. Must be an
`int`.
Returns:
True if the caller can expect that serialized TensorFlow graphs produced
can be consumed by programs that are compiled with the TensorFlow library
source code after (year, month, day).
"""
return _FORWARD_COMPATIBILITY_DATE_NUMBER > _date_to_date_number(
year, month, day)
@tf_export("compat.forward_compatibility_horizon")
@tf_contextlib.contextmanager
def forward_compatibility_horizon(year, month, day):
"""Context manager for testing forward compatibility of generated graphs.
See [Version
compatibility](https://tensorflow.org/guide/version_compat#backward_forward).
To ensure forward compatibility of generated graphs (see `forward_compatible`)
with older binaries, new features can be gated with:
```python
if compat.forward_compatible(year=2018, month=08, date=01):
generate_graph_with_new_features()
else:
generate_graph_so_older_binaries_can_consume_it()
```
However, when adding new features, one may want to unittest it before
the forward compatibility window expires. This context manager enables
such tests. For example:
```python
from tensorflow.python.compat import compat
def testMyNewFeature(self):
with compat.forward_compatibility_horizon(2018, 08, 02):
# Test that generate_graph_with_new_features() has an effect
```
Args:
year: A year (e.g., 2018). Must be an `int`.
month: A month (1 <= month <= 12) in year. Must be an `int`.
day: A day (1 <= day <= 31, or 30, or 29, or 28) in month. Must be an
`int`.
Yields:
Nothing.
"""
try:
_update_forward_compatibility_date_number(datetime.date(year, month, day))
yield
finally:
_update_forward_compatibility_date_number()
|
Python
| 0
|
@@ -1444,17 +1444,18 @@
020, 9,
-9
+10
)%0A_FORWA
|
420807ff32a9c6327e233a5512204fe53083603c
|
Fix bug.
|
SigmaPi/Standards/models.py
|
SigmaPi/Standards/models.py
|
from django.db import models
from django.contrib.auth.models import User
class Bone(models.Model):
"""
Model for a single bone that is given to a User
"""
bonee = models.ForeignKey(User, related_name='+')
boner = models.ForeignKey(User, related_name='+')
reason = models.TextField()
dateReceived = models.DateField()
expirationDate = models.DateField()
value = models.PositiveIntegerField(default=0)
def __unicode__(self):
return self.reason
def __str__(self):
return self.reason
class Meta:
verbose_name = "Bone"
verbose_name_plural = "Bones"
class BoneChangeRecord(models.Model):
"""
Model for a bone change history record
"""
bone = models.ForeignKey(Bone)
modifier = models.ForeignKey(User, related_name='+')
dateChangeMade = models.DateTimeField()
previousReason = models.TextField()
newReason = models.TextField()
previousExpirationDate = models.DateField()
newExpirationDate = models.DateField()
def __unicode__(self):
return self.bone
def __str__(self):
return self.bone
class Meta:
verbose_name = "Bone Change Record"
verbose_name_plural = "Bone Change Records"
class Probation(models.Model):
"""
Model for a probation punishment that a user will receive.
"""
recipient = models.ForeignKey(User, related_name='+')
giver = models.ForeignKey(User, related_name='+')
dateReceived = models.DateField()
expirationDate = models.DateField()
def __unicode__(self):
return self.recipient
def __str__(self):
return self.recipient
class Meta:
verbose_name = "Probation"
verbose_name_plural = "Probations"
class PiPointsRecord(models.Model):
"""
Model for a pipoint record for a user
"""
brother = models.OneToOneField(User, primary_key=True)
jobsTaken = models.PositiveIntegerField(default=0)
points = models.PositiveIntegerField(default=0)
def __unicode__(self):
return self.user
def __str__(self):
return self.user
class Meta:
verbose_name = "Pi Points Record"
verbose_name_plural = "Pi Points Records"
class PiPointsChangeRecord(models.Model):
"""
Model for a PiPoint change history record
"""
brother = models.ForeignKey(PiPointsRecord)
modifier = models.ForeignKey(User, related_name='+')
dateChanged = models.DateTimeField()
oldValue = models.PositiveIntegerField(default=0)
newValue = models.PositiveIntegerField(default=0)
def __unicode__(self):
return self.dateChanged
def __str__(self):
return self.dateChanged
class Meta:
verbose_name = "Pi Points Change Record"
verbose_name_plural = "Pi Points Change Records"
class PiPointsRequest(models.Model):
"""
Model for a request for pi points
"""
REASON_CHOICES = (
('P', 'Pre/Post Party Job'),
('F', 'First Shift Party Job'),
('S', 'Second Shift Party Job'),
('H', 'House Job'),
('M', 'Meal Crew')
)
REASON_POINTS = { 'P': 10, 'F': 30, 'S': 40, 'H': 20, 'M': 20,}
requester = models.ForeignKey(User, related_name='+')
date = models.DateTimeField()
reason = models.TextField(max_length=1, choices=REASON_CHOICES)
witness = models.CharField(max_length=100, default="None")
def pointsForReason(self, reason):
return self.REASON_POINTS[reason]
def __unicode__(self):
return self.requester
def __str__(self):
return self.requester
class Meta:
verbose_name = "Pi Points Request"
verbose_name_plural = "Pi Points Request"
class JobRequest(models.Model):
REASON_CHOICES = (
('P', 'Pre/Post Party Job (10)'),
('F', 'First Shift Party Job (30)'),
('S', 'Second Shift Party Job (40)'),
('H', 'House Job (20)'),
('M', 'Meal Crew (20)')
)
REASON_POINTS = { 'P': 10, 'F': 30, 'S': 40, 'H': 20, 'M': 20,}
requester = models.ForeignKey(User, related_name='+')
date = models.DateTimeField()
job = models.TextField(max_length=1, choices=REASON_CHOICES)
details = models.TextField()
takingJob = models.BooleanField(default=False)
def pointsForReason(self, reason):
return self.REASON_POINTS[job]
def __unicode__(self):
return self.requester
def __str__(self):
return self.requester
class Meta:
verbose_name = "Job Request"
verbose_name_plural = "Job Requests"
class SummonsRequest(models.Model):
"""
Model for a request to summons a user.
"""
summoner = models.ForeignKey(User, related_name='+')
summonee = models.ForeignKey(User, related_name='+')
reason = models.TextField()
dateRequestSent = models.DateField()
def __unicode__(self):
return summoner + " wants to summon " + summonee + " for " + self.reason
def __str__(self):
return summoner + " wants to summon " + summonee + " for " + self.reason
class Meta:
verbose_name = "Summons Request"
verbose_name_plural = "Summons Requests"
class Summons(models.Model):
"""
Model for a summons that is given to a User.
"""
summoner = models.ForeignKey(User, related_name='+')
summonee = models.ForeignKey(User, related_name='+')
approver = models.ForeignKey(User, related_name='+')
reason = models.TextField()
dateSummonsSent = models.DateField()
def __unicode__(self):
return summoner + " has summoned " + summonee + " for " + self.reason
def __str__(self):
return summoner + " has summoned " + summonee + " for " + self.reason
class Meta:
verbose_name = "Summons"
verbose_name_plural = "Summonses"
class SummonsHistoryRecord(models.Model):
"""
Model for a summons history record.
"""
summoner = models.ForeignKey(User, related_name='+')
summonee = models.ForeignKey(User, related_name='+')
details = models.TextField()
resultReason = models.TextField()
date = models.DateField()
hasBone = models.BooleanField(default=False)
boneID = models.PositiveIntegerField()
|
Python
| 0
|
@@ -1854,20 +1854,32 @@
rn self.
-user
+brother.username
%0A%0A%09def _
@@ -1906,20 +1906,32 @@
rn self.
-user
+brother.username
%0A%0A%09class
|
9722016a0117682fa7d0d5599a8dc2f1a75f7c6a
|
remove softmax / centroidloss
|
pyannote/audio/embedding/approaches/__init__.py
|
pyannote/audio/embedding/approaches/__init__.py
|
#!/usr/bin/env python
# encoding: utf-8
# The MIT License (MIT)
# Copyright (c) 2017-2018 CNRS
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# AUTHORS
# Hervé BREDIN - http://herve.niderb.fr
from .triplet_loss import TripletLoss
from .centroid_loss import CentroidLoss
from .softmax import Softmax
# from .agg_triplet_loss import AggTripletLoss
|
Python
| 0.00003
|
@@ -1230,24 +1230,26 @@
TripletLoss%0A
+#
from .centro
@@ -1276,16 +1276,18 @@
oidLoss%0A
+#
from .so
@@ -1311,51 +1311,4 @@
max%0A
-# from .agg_triplet_loss import AggTripletLoss%0A
|
57a1e59f034b0edbabaa76376ba6475d6e4d0297
|
Add code to work out the Julian representation of a date.
|
qual/calendars/main.py
|
qual/calendars/main.py
|
from datetime import date, timedelta
from qual.helpers import ordinal, month_string
from date import DateWithCalendar, InvalidDate
from base import Calendar
class ProlepticGregorianCalendar(Calendar):
display_name = "Proleptic Gregorian Calendar"
def date(self, year, month, day):
try:
d = date(year, month, day)
except ValueError as e:
raise InvalidDate(e.message)
return self.from_date(d)
@staticmethod
def date_display_string(d):
return "%s %s %s" % (ordinal(d.day), month_string(d.month), d.year)
class JulianCalendar(Calendar):
display_name = "Julian Calendar"
@staticmethod
def is_julian_leap_year(y):
return (y % 4) == 0
@staticmethod
def is_gregorian_leap_year(y):
if (y % 400) == 0:
return True
if (y % 100) == 0:
return False
if (y % 4) == 0:
return True
return False
def number_of_extra_leap_days(self, end, start=date(200, 3, 1)):
count = 0
for x in range(start.year, end.year + 1, 100):
if not self.is_gregorian_leap_year(x):
leap_day = date(x, 2, 28)
if start < leap_day < end:
count = count + 1
return count
def date(self, year, month, day):
if day == 29 and month == 2 and self.is_julian_leap_year(year):
d = date(year, 2, 28)
offset = self.number_of_extra_leap_days(d) + 1
else:
d = date(year, month, day)
offset = self.number_of_extra_leap_days(d)
d = d + timedelta(days=offset)
return self.from_date(d)
|
Python
| 0.000001
|
@@ -947,24 +947,431 @@
False%0A%0A
+@staticmethod%0A def date_display_string(d):%0A year, month, day = JulianCalendar.julian_representation(d)%0A return %22%25s %25s %25s%22 %25 (ordinal(day), month_string(month), year)%0A%0A @staticmethod%0A def julian_representation(d):%0A offset = JulianCalendar.number_of_extra_leap_days(d)%0A d = d - timedelta(days=offset)%0A return (d.year, d.month, d.day)%0A %0A @staticmethod%0A
def number_o
@@ -1388,22 +1388,16 @@
ap_days(
-self,
end, sta
@@ -1509,20 +1509,30 @@
if not
-self
+JulianCalendar
.is_greg
|
692d35dfd92c9a3c4697a74bb45fe8963d7e39f3
|
Update forward compatibility horizon to 2022-02-01
|
tensorflow/python/compat/compat.py
|
tensorflow/python/compat/compat.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for API compatibility between TensorFlow release versions.
See [Version
Compatibility](https://tensorflow.org/guide/version_compat#backward_forward)
"""
import datetime
import os
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import tf_contextlib
from tensorflow.python.util.tf_export import tf_export
# This value changes every day with an automatic CL. It can be modified in code
# via `forward_compatibility_horizon()` or with the environment variable
# TF_FORWARD_COMPATIBILITY_DELTA_DAYS, which is added to the compatibility date.
_FORWARD_COMPATIBILITY_HORIZON = datetime.date(2022, 1, 31)
_FORWARD_COMPATIBILITY_DELTA_DAYS_VAR_NAME = "TF_FORWARD_COMPATIBILITY_DELTA_DAYS"
_FORWARD_COMPATIBILITY_DATE_NUMBER = None
def _date_to_date_number(year, month, day):
return (year << 9) | (month << 5) | day
def _update_forward_compatibility_date_number(date_to_override=None):
"""Update the base date to compare in forward_compatible function."""
global _FORWARD_COMPATIBILITY_DATE_NUMBER
if date_to_override:
date = date_to_override
else:
date = _FORWARD_COMPATIBILITY_HORIZON
delta_days = os.getenv(_FORWARD_COMPATIBILITY_DELTA_DAYS_VAR_NAME)
if delta_days:
date += datetime.timedelta(days=int(delta_days))
if date < _FORWARD_COMPATIBILITY_HORIZON:
logging.warning("Trying to set the forward compatibility date to the past"
" date %s. This will be ignored by TensorFlow." % (date))
return
_FORWARD_COMPATIBILITY_DATE_NUMBER = _date_to_date_number(
date.year, date.month, date.day)
_update_forward_compatibility_date_number()
@tf_export("compat.forward_compatible")
def forward_compatible(year, month, day):
"""Return true if the forward compatibility window has expired.
See [Version
compatibility](https://tensorflow.org/guide/version_compat#backward_forward).
Forward-compatibility refers to scenarios where the producer of a TensorFlow
model (a GraphDef or SavedModel) is compiled against a version of the
TensorFlow library newer than what the consumer was compiled against. The
"producer" is typically a Python program that constructs and trains a model
while the "consumer" is typically another program that loads and serves the
model.
TensorFlow has been supporting a 3 week forward-compatibility window for
programs compiled from source at HEAD.
For example, consider the case where a new operation `MyNewAwesomeAdd` is
created with the intent of replacing the implementation of an existing Python
wrapper - `tf.add`. The Python wrapper implementation should change from
something like:
```python
def add(inputs, name=None):
return gen_math_ops.add(inputs, name)
```
to:
```python
from tensorflow.python.compat import compat
def add(inputs, name=None):
if compat.forward_compatible(year, month, day):
# Can use the awesome new implementation.
return gen_math_ops.my_new_awesome_add(inputs, name)
# To maintain forward compatibility, use the old implementation.
return gen_math_ops.add(inputs, name)
```
Where `year`, `month`, and `day` specify the date beyond which binaries
that consume a model are expected to have been updated to include the
new operations. This date is typically at least 3 weeks beyond the date
the code that adds the new operation is committed.
Args:
year: A year (e.g., 2018). Must be an `int`.
month: A month (1 <= month <= 12) in year. Must be an `int`.
day: A day (1 <= day <= 31, or 30, or 29, or 28) in month. Must be an
`int`.
Returns:
True if the caller can expect that serialized TensorFlow graphs produced
can be consumed by programs that are compiled with the TensorFlow library
source code after (year, month, day).
"""
return _FORWARD_COMPATIBILITY_DATE_NUMBER > _date_to_date_number(
year, month, day)
@tf_export("compat.forward_compatibility_horizon")
@tf_contextlib.contextmanager
def forward_compatibility_horizon(year, month, day):
"""Context manager for testing forward compatibility of generated graphs.
See [Version
compatibility](https://tensorflow.org/guide/version_compat#backward_forward).
To ensure forward compatibility of generated graphs (see `forward_compatible`)
with older binaries, new features can be gated with:
```python
if compat.forward_compatible(year=2018, month=08, date=01):
generate_graph_with_new_features()
else:
generate_graph_so_older_binaries_can_consume_it()
```
However, when adding new features, one may want to unittest it before
the forward compatibility window expires. This context manager enables
such tests. For example:
```python
from tensorflow.python.compat import compat
def testMyNewFeature(self):
with compat.forward_compatibility_horizon(2018, 08, 02):
# Test that generate_graph_with_new_features() has an effect
```
Args:
year: A year (e.g., 2018). Must be an `int`.
month: A month (1 <= month <= 12) in year. Must be an `int`.
day: A day (1 <= day <= 31, or 30, or 29, or 28) in month. Must be an
`int`.
Yields:
Nothing.
"""
try:
_update_forward_compatibility_date_number(datetime.date(year, month, day))
yield
finally:
_update_forward_compatibility_date_number()
|
Python
| 0
|
@@ -1335,12 +1335,11 @@
22,
-1
+2
,
-3
1)%0A_
|
82f563d7ed8dc53d00edf361af1f607f9a89b918
|
Add the rv32mi tests.
|
Simulation/core/conftest.py
|
Simulation/core/conftest.py
|
#!/usr/bin/env python
# Copyright (c) 2015 Angel Terrones (<angelterrones@gmail.com>)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import glob
def auto_int(value):
return int(value, 0)
def pytest_addoption(parser):
parser.addoption('--hex_file', type=str, action='append', default=[],
help='Memory image in HEX format')
parser.addoption('--all', action='store_true', default=False, help='Run all RV32 tests')
parser.addoption('--vcd', action='store_true', default=False, help='Generate VCD files')
def pytest_generate_tests(metafunc):
if 'hex_file' in metafunc.fixturenames:
if metafunc.config.option.all:
list_hex = glob.glob("Simulation/tests/rv32ui-*.hex")
metafunc.parametrize('hex_file', list_hex)
else:
metafunc.parametrize('hex_file', metafunc.config.option.hex_file)
if 'vcd' in metafunc.fixturenames:
metafunc.parametrize('vcd', [metafunc.config.option.vcd])
# Local Variables:
# flycheck-flake8-maximum-line-length: 120
# flycheck-flake8rc: ".flake8rc"
# End:
|
Python
| 0
|
@@ -1688,16 +1688,95 @@
st_hex =
+ glob.glob(%22Simulation/tests/rv32mi-p-*.hex%22)%0A list_hex = list_hex +
glob.gl
|
55fbe047e091669d005a73ebb333392954186ace
|
Compact test false negative fix
|
jpp/cli_test/cli_test.py
|
jpp/cli_test/cli_test.py
|
import os
import shutil
import subprocess
import unittest
from collections import namedtuple
CURR_DIR = os.path.dirname(os.path.realpath(__file__))
class TestCli(unittest.TestCase):
TMP_TEST_FILES = os.path.join(CURR_DIR, '__tmp__')
@classmethod
def setUpClass(cls):
FileDef = namedtuple('FileDef', ('name', 'contents', 'sub_path'))
required_files = (
FileDef('compact_test.jpp', '{\n"many": 1, \n"lines": 2\n}', ''),
FileDef('main.jpp', '', ''),
FileDef('other.jpp', '', ''),
FileDef('user_input_test.jpp', '{"foo": user_input["bar"]}', ''),
FileDef('sub_other.jpp', '', 'sub_path'),
)
os.mkdir(cls.TMP_TEST_FILES)
for file_def in required_files:
if file_def.sub_path:
os.mkdir(os.path.join(cls.TMP_TEST_FILES, file_def.sub_path))
file_path = os.path.join(cls.TMP_TEST_FILES, file_def.sub_path, file_def.name)
else:
file_path = os.path.join(cls.TMP_TEST_FILES, file_def.name)
with open(file_path, 'w') as fp:
fp.write(file_def.contents)
@classmethod
def tearDownClass(cls):
shutil.rmtree(cls.TMP_TEST_FILES)
# Naming makes sure this test is run first. If --failfast option is specified the rest of the tests will no run
def test00_installation(self):
try:
subprocess.call(['jpp', '--version'])
except FileNotFoundError:
installed = False
else:
installed = True
if not installed:
self.fail('jpp not installed. Please run "pip install jpp" and try again.')
def test_help(self):
help_message = subprocess.check_output(['jpp', '-h'])
self.assertRegex(help_message, '^usage:')
def test_no_args(self):
subprocess.check_call(['jpp'], cwd=self.TMP_TEST_FILES)
def test_parse_specific_file(self):
subprocess.check_call(['jpp', 'other.jpp'], cwd=self.TMP_TEST_FILES)
def test_path_option(self):
subprocess.check_call(['jpp', '--path', os.path.join(CURR_DIR, 'sub_path'), 'sub_other.jpp'],
cwd=self.TMP_TEST_FILES)
def test_compact_path(self):
cmd_out = subprocess.check_output(['jpp', '--compact-print', 'compact_test.jpp'], cwd=self.TMP_TEST_FILES)
# Make sure output is a one-liner
self.assertEqual(cmd_out.count(b'\n'), 0)
def test_user_input(self):
subprocess.check_call(['jpp', '--user-input', '{"bar": "baz"}', 'user_input_test.jpp'], cwd=self.TMP_TEST_FILES)
def main():
unittest.main()
if __name__ == '__main__':
main()
|
Python
| 0.999412
|
@@ -2401,16 +2401,24 @@
ne-liner
+ at most
%0A
@@ -2429,16 +2429,20 @@
f.assert
+Less
Equal(cm
@@ -2461,17 +2461,17 @@
b'%5Cn'),
-0
+1
)%0A%0A d
|
03baff9626f1c163e99e53eb9a9cf4f981c79121
|
Update forward compatibility horizon to 2020-09-15
|
tensorflow/python/compat/compat.py
|
tensorflow/python/compat/compat.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for API compatibility between TensorFlow release versions.
See [Version
Compatibility](https://tensorflow.org/guide/version_compat#backward_forward)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import datetime
import os
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import tf_contextlib
from tensorflow.python.util.tf_export import tf_export
# This value changes every day with an automatic CL. It can be modified in code
# via `forward_compatibility_horizon()` or with the environment variable
# TF_FORWARD_COMPATIBILITY_DELTA_DAYS, which is added to the compatibility date.
_FORWARD_COMPATIBILITY_HORIZON = datetime.date(2020, 9, 14)
_FORWARD_COMPATIBILITY_DELTA_DAYS_VAR_NAME = "TF_FORWARD_COMPATIBILITY_DELTA_DAYS"
_FORWARD_COMPATIBILITY_DATE_NUMBER = None
def _date_to_date_number(year, month, day):
return (year << 9) | (month << 5) | day
def _update_forward_compatibility_date_number(date_to_override=None):
"""Update the base date to compare in forward_compatible function."""
global _FORWARD_COMPATIBILITY_DATE_NUMBER
if date_to_override:
date = date_to_override
else:
date = _FORWARD_COMPATIBILITY_HORIZON
delta_days = os.getenv(_FORWARD_COMPATIBILITY_DELTA_DAYS_VAR_NAME)
if delta_days:
date += datetime.timedelta(days=int(delta_days))
if date < _FORWARD_COMPATIBILITY_HORIZON:
logging.warning("Trying to set the forward compatibility date to the past"
" date %s. This will be ignored by TensorFlow." % (date))
return
_FORWARD_COMPATIBILITY_DATE_NUMBER = _date_to_date_number(
date.year, date.month, date.day)
_update_forward_compatibility_date_number()
@tf_export("compat.forward_compatible")
def forward_compatible(year, month, day):
"""Return true if the forward compatibility window has expired.
See [Version
compatibility](https://tensorflow.org/guide/version_compat#backward_forward).
Forward-compatibility refers to scenarios where the producer of a TensorFlow
model (a GraphDef or SavedModel) is compiled against a version of the
TensorFlow library newer than what the consumer was compiled against. The
"producer" is typically a Python program that constructs and trains a model
while the "consumer" is typically another program that loads and serves the
model.
TensorFlow has been supporting a 3 week forward-compatibility window for
programs compiled from source at HEAD.
For example, consider the case where a new operation `MyNewAwesomeAdd` is
created with the intent of replacing the implementation of an existing Python
wrapper - `tf.add`. The Python wrapper implementation should change from
something like:
```python
def add(inputs, name=None):
return gen_math_ops.add(inputs, name)
```
to:
```python
from tensorflow.python.compat import compat
def add(inputs, name=None):
if compat.forward_compatible(year, month, day):
# Can use the awesome new implementation.
return gen_math_ops.my_new_awesome_add(inputs, name)
# To maintain forward compatibility, use the old implementation.
return gen_math_ops.add(inputs, name)
```
Where `year`, `month`, and `day` specify the date beyond which binaries
that consume a model are expected to have been updated to include the
new operations. This date is typically at least 3 weeks beyond the date
the code that adds the new operation is committed.
Args:
year: A year (e.g., 2018). Must be an `int`.
month: A month (1 <= month <= 12) in year. Must be an `int`.
day: A day (1 <= day <= 31, or 30, or 29, or 28) in month. Must be an
`int`.
Returns:
True if the caller can expect that serialized TensorFlow graphs produced
can be consumed by programs that are compiled with the TensorFlow library
source code after (year, month, day).
"""
return _FORWARD_COMPATIBILITY_DATE_NUMBER > _date_to_date_number(
year, month, day)
@tf_export("compat.forward_compatibility_horizon")
@tf_contextlib.contextmanager
def forward_compatibility_horizon(year, month, day):
"""Context manager for testing forward compatibility of generated graphs.
See [Version
compatibility](https://tensorflow.org/guide/version_compat#backward_forward).
To ensure forward compatibility of generated graphs (see `forward_compatible`)
with older binaries, new features can be gated with:
```python
if compat.forward_compatible(year=2018, month=08, date=01):
generate_graph_with_new_features()
else:
generate_graph_so_older_binaries_can_consume_it()
```
However, when adding new features, one may want to unittest it before
the forward compatibility window expires. This context manager enables
such tests. For example:
```python
from tensorflow.python.compat import compat
def testMyNewFeature(self):
with compat.forward_compatibility_horizon(2018, 08, 02):
# Test that generate_graph_with_new_features() has an effect
```
Args:
year: A year (e.g., 2018). Must be an `int`.
month: A month (1 <= month <= 12) in year. Must be an `int`.
day: A day (1 <= day <= 31, or 30, or 29, or 28) in month. Must be an
`int`.
Yields:
Nothing.
"""
try:
_update_forward_compatibility_date_number(datetime.date(year, month, day))
yield
finally:
_update_forward_compatibility_date_number()
|
Python
| 0
|
@@ -1449,9 +1449,9 @@
9, 1
-4
+5
)%0A_F
|
ab5c36e8d50eacf7c13234c75e17b606c0d97758
|
convert HTTP request arguments to lowercase
|
webserver.py
|
webserver.py
|
import threading
__author__ = 'bawki'
from http.server import BaseHTTPRequestHandler, HTTPServer
import socket
import multiprocessing
import json
from database import CatDb
class CatHandler(BaseHTTPRequestHandler):
def servePingData(self, arguments):
self.sendSuccessHeader()
print('servePingData: arguments->', arguments)
self.wfile.write(bytes(self.statsToJson(), "UTF-8"))
servlets = [("pingdata", servePingData), ]
def __init__(self, request, client_address, server):
self.db = CatDb()
self.db.connect()
BaseHTTPRequestHandler.__init__(self, request, client_address, server)
def do_GET(self):
print(multiprocessing.current_process().name, 'Get request received. Path: ', self.path)
path = self.path
req = path[1:].split("/")
print('parsed req: ', req)
function = [item[1] for item in self.servlets if item[0] == req[0]]
if len(function) == 1:
function[0](self, req)
else:
self.sendNotFoundHeader()
return
def statsToJson(self):
self.db.c.execute("SELECT * FROM 'pingdata'")
results = self.db.c.fetchall()
data = []
for result in results:
rdata = {
'date': result[0],
'thisIP': result[1],
'pktsSend': result[2],
'pktsRcvd': result[3],
'minTime': result[4],
'maxTime': result[5],
'totTime': result[6],
'fracLoss': result[7]
}
data.append(rdata)
return json.dumps(data, indent=4)
def sendSuccessHeader(self):
self.send_response(200)
self.send_header('Content-type', 'text/html')
# TODO: set to production url after testing!
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
def sendNotFoundHeader(self):
self.send_response(404)
self.send_header('Content-type', 'text/html')
self.end_headers()
class CatServer(HTTPServer):
def __init__(self):
try:
self = HTTPServer(("::", 8042), CatHandler, False)
self.address_family = socket.AF_INET6
self.socket = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
self.socket.setsockopt(41, socket.IPV6_V6ONLY, 0)
self.server_bind()
self.server_activate()
print("Started webserver on: ", self.socket.getsockname())
self.serve_forever()
except KeyboardInterrupt:
self.server_close()
if __name__ == '__main__':
c = CatServer()
|
Python
| 0.999999
|
@@ -805,16 +805,24 @@
ath%5B1:%5D.
+lower().
split(%22/
|
b193f9ccabb1093db8a803f7994adb14a85caf5a
|
Update __init__.py
|
djconnectwise/__init__.py
|
djconnectwise/__init__.py
|
# -*- coding: utf-8 -*-
VERSION = (0, 3, 31, 'final')
# pragma: no cover
if VERSION[-1] != "final":
__version__ = '.'.join(map(str, VERSION))
else:
# pragma: no cover
__version__ = '.'.join(map(str, VERSION[:-1]))
default_app_config = 'djconnectwise.apps.DjangoConnectwiseConfig'
|
Python
| 0.000005
|
@@ -35,17 +35,17 @@
(0, 3, 3
-1
+2
, 'final
|
062924016bc5be483bbd477bc5e2aaaa37ede66b
|
Update forward compatibility horizon to 2021-03-21
|
tensorflow/python/compat/compat.py
|
tensorflow/python/compat/compat.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for API compatibility between TensorFlow release versions.
See [Version
Compatibility](https://tensorflow.org/guide/version_compat#backward_forward)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import datetime
import os
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import tf_contextlib
from tensorflow.python.util.tf_export import tf_export
# This value changes every day with an automatic CL. It can be modified in code
# via `forward_compatibility_horizon()` or with the environment variable
# TF_FORWARD_COMPATIBILITY_DELTA_DAYS, which is added to the compatibility date.
_FORWARD_COMPATIBILITY_HORIZON = datetime.date(2021, 3, 20)
_FORWARD_COMPATIBILITY_DELTA_DAYS_VAR_NAME = "TF_FORWARD_COMPATIBILITY_DELTA_DAYS"
_FORWARD_COMPATIBILITY_DATE_NUMBER = None
def _date_to_date_number(year, month, day):
return (year << 9) | (month << 5) | day
def _update_forward_compatibility_date_number(date_to_override=None):
"""Update the base date to compare in forward_compatible function."""
global _FORWARD_COMPATIBILITY_DATE_NUMBER
if date_to_override:
date = date_to_override
else:
date = _FORWARD_COMPATIBILITY_HORIZON
delta_days = os.getenv(_FORWARD_COMPATIBILITY_DELTA_DAYS_VAR_NAME)
if delta_days:
date += datetime.timedelta(days=int(delta_days))
if date < _FORWARD_COMPATIBILITY_HORIZON:
logging.warning("Trying to set the forward compatibility date to the past"
" date %s. This will be ignored by TensorFlow." % (date))
return
_FORWARD_COMPATIBILITY_DATE_NUMBER = _date_to_date_number(
date.year, date.month, date.day)
_update_forward_compatibility_date_number()
@tf_export("compat.forward_compatible")
def forward_compatible(year, month, day):
"""Return true if the forward compatibility window has expired.
See [Version
compatibility](https://tensorflow.org/guide/version_compat#backward_forward).
Forward-compatibility refers to scenarios where the producer of a TensorFlow
model (a GraphDef or SavedModel) is compiled against a version of the
TensorFlow library newer than what the consumer was compiled against. The
"producer" is typically a Python program that constructs and trains a model
while the "consumer" is typically another program that loads and serves the
model.
TensorFlow has been supporting a 3 week forward-compatibility window for
programs compiled from source at HEAD.
For example, consider the case where a new operation `MyNewAwesomeAdd` is
created with the intent of replacing the implementation of an existing Python
wrapper - `tf.add`. The Python wrapper implementation should change from
something like:
```python
def add(inputs, name=None):
return gen_math_ops.add(inputs, name)
```
to:
```python
from tensorflow.python.compat import compat
def add(inputs, name=None):
if compat.forward_compatible(year, month, day):
# Can use the awesome new implementation.
return gen_math_ops.my_new_awesome_add(inputs, name)
# To maintain forward compatibility, use the old implementation.
return gen_math_ops.add(inputs, name)
```
Where `year`, `month`, and `day` specify the date beyond which binaries
that consume a model are expected to have been updated to include the
new operations. This date is typically at least 3 weeks beyond the date
the code that adds the new operation is committed.
Args:
year: A year (e.g., 2018). Must be an `int`.
month: A month (1 <= month <= 12) in year. Must be an `int`.
day: A day (1 <= day <= 31, or 30, or 29, or 28) in month. Must be an
`int`.
Returns:
True if the caller can expect that serialized TensorFlow graphs produced
can be consumed by programs that are compiled with the TensorFlow library
source code after (year, month, day).
"""
return _FORWARD_COMPATIBILITY_DATE_NUMBER > _date_to_date_number(
year, month, day)
@tf_export("compat.forward_compatibility_horizon")
@tf_contextlib.contextmanager
def forward_compatibility_horizon(year, month, day):
"""Context manager for testing forward compatibility of generated graphs.
See [Version
compatibility](https://tensorflow.org/guide/version_compat#backward_forward).
To ensure forward compatibility of generated graphs (see `forward_compatible`)
with older binaries, new features can be gated with:
```python
if compat.forward_compatible(year=2018, month=08, date=01):
generate_graph_with_new_features()
else:
generate_graph_so_older_binaries_can_consume_it()
```
However, when adding new features, one may want to unittest it before
the forward compatibility window expires. This context manager enables
such tests. For example:
```python
from tensorflow.python.compat import compat
def testMyNewFeature(self):
with compat.forward_compatibility_horizon(2018, 08, 02):
# Test that generate_graph_with_new_features() has an effect
```
Args:
year: A year (e.g., 2018). Must be an `int`.
month: A month (1 <= month <= 12) in year. Must be an `int`.
day: A day (1 <= day <= 31, or 30, or 29, or 28) in month. Must be an
`int`.
Yields:
Nothing.
"""
try:
_update_forward_compatibility_date_number(datetime.date(year, month, day))
yield
finally:
_update_forward_compatibility_date_number()
|
Python
| 0
|
@@ -1445,17 +1445,17 @@
21, 3, 2
-0
+1
)%0A_FORWA
|
0e9d3f5c2bae999dc71c8f7bb62e380faac5dec7
|
improve example
|
examples/widgets/tabbed_panel_test.py
|
examples/widgets/tabbed_panel_test.py
|
'''
TabbedPannel
======
Test of the widget TabbedPannel.
'''
from kivy.app import App
from kivy.animation import Animation
from kivy.clock import Clock
from kivy.uix.button import Button
from kivy.uix.floatlayout import FloatLayout
from kivy.uix.tabbedpannel import TabbedPannel
from kivy.properties import ObjectProperty
from kivy.lang import Builder
Builder.load_string('''
<cut_copy_paste>
size_hint: (None, None)
size: (350, 250)
pos_hint: {'center_x': .25, 'y': .55}
tab_pos: 'top_left'
tab_height: 20
tab_width: 70
default_tab_text: 'tab1'
default_content: cut
FloatLayout:
BubbleButton:
id: cut
pos:self.parent.pos
size: self.parent.size
text: 'Cut'
Image:
id: copy
color: 1, 1, 1, 0
pos:self.parent.pos
size: self.parent.size
source: 'data/images/defaulttheme-0.png'
Image:
id: paste
color: 1, 1, 1, 0
pos:self.parent.pos
size: self.parent.size
source: 'data/images/image-loading.gif'
Tab_Heading:
text: 'tab2'
on_release: root.change_tab_contents(copy)
Tab_Heading:
text: 'tab3'
on_release: root.change_tab_contents(paste)
#Tab_Heading:
# text: 'tab4'
# on_release: root.clear_widgets()
#Tab_Heading:
# text: 'tab5'
# on_release: root.clear_widgets()
''')
class cut_copy_paste(TabbedPannel):
default_content = ObjectProperty(None)
def on_default_tab(self, *l):
self.change_tab_contents(self.default_content)
def change_tab_contents(self, *l):
anim = Animation(color=(1, 1, 1, 0), d =.24, t = 'in_back')
def start_anim(_anim, *lt):
_anim.start(l[0])
def _on_complete(*lt):
if l[0].parent:
l[0].parent.remove_widget(l[0])
self.clear_widgets()
self.add_widget(l[0])
anim = Animation(color = (1, 1, 1, 1), d =.23, t = 'in_quad')
start_anim(anim)
anim.bind(on_complete = _on_complete)
start_anim(anim)
class TabShowcase(FloatLayout):
def __init__(self, **kwargs):
super(TabShowcase, self).__init__(**kwargs)
self.but = Button(text='Press to show Tabbed Pannel')
self.but.bind(on_release=self.show_tab)
self.add_widget(self.but)
def show_tab(self, *l):
if not hasattr(self, 'tab'):
self.tab = tab = cut_copy_paste()
self.add_widget(tab)
else:
values = ('left_top', 'left_mid', 'left_bottom', 'top_left',
'top_mid', 'top_right', 'right_top', 'right_mid',
'right_bottom', 'bottom_left', 'bottom_mid', 'bottom_right')
index = values.index(self.tab.tab_pos)
self.tab.tab_pos = values[(index + 1) % len(values)]
self.but.text = 'Tabs in\'%s\' position,\n press to change to next pos'\
%self.tab.tab_pos
class TestTabApp(App):
def build(self):
return TabShowcase()
if __name__ in ('__main__', '__android__'):
TestTabApp().run()
|
Python
| 0.000031
|
@@ -625,20 +625,16 @@
B
-ubbleButton:
+oxLayout
%0A
@@ -733,18 +733,143 @@
-text: 'Cut
+padding: 3%0A TextInput:%0A text: 'everything is relative!'%0A BubbleButton:%0A text:'dummy
'%0A
@@ -1901,42 +1901,172 @@
im,
-*lt):%0A _anim.start(l%5B0%5D
+child, in_complete, *lt):%0A if hasattr(child, 'color'):%0A _anim.start(child)%0A elif not in_complete:%0A _on_complete(
)%0A%0A
@@ -2339,16 +2339,28 @@
nim(anim
+, l%5B0%5D, True
)%0A%0A
@@ -2396,24 +2396,24 @@
n_complete)%0A
-
star
@@ -2423,16 +2423,49 @@
nim(anim
+, self.content.children%5B0%5D, False
)%0A%0A%0Aclas
|
b8e1d2419a1dbe065e1828599e60867bc845f0e3
|
Add some docs to nm root package
|
neuralmonkey/__init__.py
|
neuralmonkey/__init__.py
|
Python
| 0
|
@@ -0,0 +1,69 @@
+%22%22%22The neuralmonkey package is the root package of this project.%22%22%22%0D%0A
|
|
aa741b5a2b18a7df402325b53476eba36e448b40
|
Update to 0.0.49
|
djconnectwise/__init__.py
|
djconnectwise/__init__.py
|
# -*- coding: utf-8 -*-
VERSION = (0, 0, 48, 'alpha')
# pragma: no cover
if VERSION[-1] != "final":
__version__ = '.'.join(map(str, VERSION))
else:
# pragma: no cover
__version__ = '.'.join(map(str, VERSION[:-1]))
|
Python
| 0.000001
|
@@ -35,17 +35,17 @@
(0, 0, 4
-8
+9
, 'alpha
|
3726ed139ff990899a582325950a5721cc8c49f3
|
Update forward compatibility horizon to 2022-07-23
|
tensorflow/python/compat/compat.py
|
tensorflow/python/compat/compat.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for API compatibility between TensorFlow release versions.
See [Version
Compatibility](https://tensorflow.org/guide/version_compat#backward_forward)
"""
import datetime
import os
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import tf_contextlib
from tensorflow.python.util.tf_export import tf_export
# This value changes every day with an automatic CL. It can be modified in code
# via `forward_compatibility_horizon()` or with the environment variable
# TF_FORWARD_COMPATIBILITY_DELTA_DAYS, which is added to the compatibility date.
_FORWARD_COMPATIBILITY_HORIZON = datetime.date(2022, 7, 22)
_FORWARD_COMPATIBILITY_DELTA_DAYS_VAR_NAME = "TF_FORWARD_COMPATIBILITY_DELTA_DAYS"
_FORWARD_COMPATIBILITY_DATE_NUMBER = None
def _date_to_date_number(year, month, day):
return (year << 9) | (month << 5) | day
def _update_forward_compatibility_date_number(date_to_override=None):
"""Update the base date to compare in forward_compatible function."""
global _FORWARD_COMPATIBILITY_DATE_NUMBER
if date_to_override:
date = date_to_override
else:
date = _FORWARD_COMPATIBILITY_HORIZON
delta_days = os.getenv(_FORWARD_COMPATIBILITY_DELTA_DAYS_VAR_NAME)
if delta_days:
date += datetime.timedelta(days=int(delta_days))
if date < _FORWARD_COMPATIBILITY_HORIZON:
logging.warning("Trying to set the forward compatibility date to the past"
" date %s. This will be ignored by TensorFlow." % (date))
return
_FORWARD_COMPATIBILITY_DATE_NUMBER = _date_to_date_number(
date.year, date.month, date.day)
_update_forward_compatibility_date_number()
@tf_export("compat.forward_compatible")
def forward_compatible(year, month, day):
"""Return true if the forward compatibility window has expired.
See [Version
compatibility](https://tensorflow.org/guide/version_compat#backward_forward).
Forward-compatibility refers to scenarios where the producer of a TensorFlow
model (a GraphDef or SavedModel) is compiled against a version of the
TensorFlow library newer than what the consumer was compiled against. The
"producer" is typically a Python program that constructs and trains a model
while the "consumer" is typically another program that loads and serves the
model.
TensorFlow has been supporting a 3 week forward-compatibility window for
programs compiled from source at HEAD.
For example, consider the case where a new operation `MyNewAwesomeAdd` is
created with the intent of replacing the implementation of an existing Python
wrapper - `tf.add`. The Python wrapper implementation should change from
something like:
```python
def add(inputs, name=None):
return gen_math_ops.add(inputs, name)
```
to:
```python
from tensorflow.python.compat import compat
def add(inputs, name=None):
if compat.forward_compatible(year, month, day):
# Can use the awesome new implementation.
return gen_math_ops.my_new_awesome_add(inputs, name)
# To maintain forward compatibility, use the old implementation.
return gen_math_ops.add(inputs, name)
```
Where `year`, `month`, and `day` specify the date beyond which binaries
that consume a model are expected to have been updated to include the
new operations. This date is typically at least 3 weeks beyond the date
the code that adds the new operation is committed.
Args:
year: A year (e.g., 2018). Must be an `int`.
month: A month (1 <= month <= 12) in year. Must be an `int`.
day: A day (1 <= day <= 31, or 30, or 29, or 28) in month. Must be an
`int`.
Returns:
True if the caller can expect that serialized TensorFlow graphs produced
can be consumed by programs that are compiled with the TensorFlow library
source code after (year, month, day).
"""
return _FORWARD_COMPATIBILITY_DATE_NUMBER > _date_to_date_number(
year, month, day)
@tf_export("compat.forward_compatibility_horizon")
@tf_contextlib.contextmanager
def forward_compatibility_horizon(year, month, day):
"""Context manager for testing forward compatibility of generated graphs.
See [Version
compatibility](https://www.tensorflow.org/guide/versions#backward_and_partial_forward_compatibility).
To ensure forward compatibility of generated graphs (see `forward_compatible`)
with older binaries, new features can be gated with:
```python
if compat.forward_compatible(year=2018, month=08, date=01):
generate_graph_with_new_features()
else:
generate_graph_so_older_binaries_can_consume_it()
```
However, when adding new features, one may want to unittest it before
the forward compatibility window expires. This context manager enables
such tests. For example:
```python
from tensorflow.python.compat import compat
def testMyNewFeature(self):
with compat.forward_compatibility_horizon(2018, 08, 02):
# Test that generate_graph_with_new_features() has an effect
```
Args:
year: A year (e.g., 2018). Must be an `int`.
month: A month (1 <= month <= 12) in year. Must be an `int`.
day: A day (1 <= day <= 31, or 30, or 29, or 28) in month. Must be an
`int`.
Yields:
Nothing.
"""
try:
_update_forward_compatibility_date_number(datetime.date(year, month, day))
yield
finally:
_update_forward_compatibility_date_number()
|
Python
| 0
|
@@ -1335,17 +1335,17 @@
22, 7, 2
-2
+3
)%0A_FORWA
|
818de1beb4dc2d5986e1affa661af06b7c9df37c
|
Update forward compatibility horizon to 2019-07-27
|
tensorflow/python/compat/compat.py
|
tensorflow/python/compat/compat.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for API compatibility between TensorFlow release versions.
See [Version
Compatibility](https://tensorflow.org/guide/version_compat#backward_forward)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import datetime
import os
from tensorflow.python.util import tf_contextlib
from tensorflow.python.util.tf_export import tf_export
# This value changes every day with an automatic CL. It can be modified in code
# via `forward_compatibility_horizon()` or with the environment variable
# TF_FORWARD_COMPATIBILITY_DELTA_DAYS, which is added to the compatibility date.
_FORWARD_COMPATIBILITY_HORIZON = datetime.date(2019, 7, 26)
_FORWARD_COMPATIBILITY_HORIZON_OVERRIDDEN = False
_FORWARD_COMPATIBILITY_DELTA_DAYS_VAR_NAME = "TF_FORWARD_COMPATIBILITY_DELTA_DAYS"
def _get_forward_compatibility_date():
date = _FORWARD_COMPATIBILITY_HORIZON
delta_days = os.getenv(_FORWARD_COMPATIBILITY_DELTA_DAYS_VAR_NAME)
if delta_days is not None and not _FORWARD_COMPATIBILITY_HORIZON_OVERRIDDEN:
return date + datetime.timedelta(days=int(delta_days))
else:
return date
@tf_export("compat.forward_compatible")
def forward_compatible(year, month, day):
"""Return true if the forward compatibility window has expired.
See [Version
compatibility](https://tensorflow.org/guide/version_compat#backward_forward).
Forward-compatibility refers to scenarios where the producer of a TensorFlow
model (a GraphDef or SavedModel) is compiled against a version of the
TensorFlow library newer than what the consumer was compiled against. The
"producer" is typically a Python program that constructs and trains a model
while the "consumer" is typically another program that loads and serves the
model.
TensorFlow has been supporting a 3 week forward-compatibility window for
programs compiled from source at HEAD.
For example, consider the case where a new operation `MyNewAwesomeAdd` is
created with the intent of replacing the implementation of an existing Python
wrapper - `tf.add`. The Python wrapper implementation should change from
something like:
```python
def add(inputs, name=None):
return gen_math_ops.add(inputs, name)
```
to:
```python
from tensorflow.python.compat import compat
def add(inputs, name=None):
if compat.forward_compatible(year, month, day):
# Can use the awesome new implementation.
return gen_math_ops.my_new_awesome_add(inputs, name)
# To maintain forward compatibiltiy, use the old implementation.
return gen_math_ops.add(inputs, name)
```
Where `year`, `month`, and `day` specify the date beyond which binaries
that consume a model are expected to have been updated to include the
new operations. This date is typically at least 3 weeks beyond the date
the code that adds the new operation is committed.
Args:
year: A year (e.g., 2018). Must be an `int`.
month: A month (1 <= month <= 12) in year. Must be an `int`.
day: A day (1 <= day <= 31, or 30, or 29, or 28) in month. Must be an
`int`.
Returns:
True if the caller can expect that serialized TensorFlow graphs produced
can be consumed by programs that are compiled with the TensorFlow library
source code after (year, month, day).
"""
return _get_forward_compatibility_date() > datetime.date(year, month, day)
@tf_export("compat.forward_compatibility_horizon")
@tf_contextlib.contextmanager
def forward_compatibility_horizon(year, month, day):
"""Context manager for testing forward compatibility of generated graphs.
See [Version
compatibility](https://tensorflow.org/guide/version_compat#backward_forward).
To ensure forward compatibility of generated graphs (see `forward_compatible`)
with older binaries, new features can be gated with:
```python
if compat.forward_compatible(year=2018, month=08, date=01):
generate_graph_with_new_features()
else:
generate_graph_so_older_binaries_can_consume_it()
```
However, when adding new features, one may want to unittest it before
the forward compatibility window expires. This context manager enables
such tests. For example:
```python
from tensorflow.python.compat import compat
def testMyNewFeature(self):
with compat.forward_compatibility_horizon(2018, 08, 02):
# Test that generate_graph_with_new_features() has an effect
```
Args:
year: A year (e.g., 2018). Must be an `int`.
month: A month (1 <= month <= 12) in year. Must be an `int`.
day: A day (1 <= day <= 31, or 30, or 29, or 28) in month. Must be an
`int`.
Yields:
Nothing.
"""
global _FORWARD_COMPATIBILITY_HORIZON
global _FORWARD_COMPATIBILITY_HORIZON_OVERRIDDEN
try:
old_compat_date = _FORWARD_COMPATIBILITY_HORIZON
_FORWARD_COMPATIBILITY_HORIZON_OVERRIDDEN = True
_FORWARD_COMPATIBILITY_HORIZON = datetime.date(year, month, day)
yield
finally:
_FORWARD_COMPATIBILITY_HORIZON_OVERRIDDEN = False
_FORWARD_COMPATIBILITY_HORIZON = old_compat_date
|
Python
| 0
|
@@ -1387,9 +1387,9 @@
7, 2
-6
+7
)%0A%0A_
|
6c4e55e05cd719a52c6ddba376cada5142a03e9c
|
Update forward compatibility horizon to 2022-09-28
|
tensorflow/python/compat/compat.py
|
tensorflow/python/compat/compat.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for API compatibility between TensorFlow release versions.
See [Version
Compatibility](https://tensorflow.org/guide/version_compat#backward_forward)
"""
import datetime
import os
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import tf_contextlib
from tensorflow.python.util.tf_export import tf_export
# This value changes every day with an automatic CL. It can be modified in code
# via `forward_compatibility_horizon()` or with the environment variable
# TF_FORWARD_COMPATIBILITY_DELTA_DAYS, which is added to the compatibility date.
_FORWARD_COMPATIBILITY_HORIZON = datetime.date(2022, 9, 27)
_FORWARD_COMPATIBILITY_DELTA_DAYS_VAR_NAME = "TF_FORWARD_COMPATIBILITY_DELTA_DAYS"
_FORWARD_COMPATIBILITY_DATE_NUMBER = None
def _date_to_date_number(year, month, day):
return (year << 9) | (month << 5) | day
def _update_forward_compatibility_date_number(date_to_override=None):
"""Update the base date to compare in forward_compatible function."""
global _FORWARD_COMPATIBILITY_DATE_NUMBER
if date_to_override:
date = date_to_override
else:
date = _FORWARD_COMPATIBILITY_HORIZON
delta_days = os.getenv(_FORWARD_COMPATIBILITY_DELTA_DAYS_VAR_NAME)
if delta_days:
date += datetime.timedelta(days=int(delta_days))
if date < _FORWARD_COMPATIBILITY_HORIZON:
logging.warning("Trying to set the forward compatibility date to the past"
" date %s. This will be ignored by TensorFlow." % (date))
return
_FORWARD_COMPATIBILITY_DATE_NUMBER = _date_to_date_number(
date.year, date.month, date.day)
_update_forward_compatibility_date_number()
@tf_export("compat.forward_compatible")
def forward_compatible(year, month, day):
"""Return true if the forward compatibility window has expired.
See [Version
compatibility](https://tensorflow.org/guide/version_compat#backward_forward).
Forward-compatibility refers to scenarios where the producer of a TensorFlow
model (a GraphDef or SavedModel) is compiled against a version of the
TensorFlow library newer than what the consumer was compiled against. The
"producer" is typically a Python program that constructs and trains a model
while the "consumer" is typically another program that loads and serves the
model.
TensorFlow has been supporting a 3 week forward-compatibility window for
programs compiled from source at HEAD.
For example, consider the case where a new operation `MyNewAwesomeAdd` is
created with the intent of replacing the implementation of an existing Python
wrapper - `tf.add`. The Python wrapper implementation should change from
something like:
```python
def add(inputs, name=None):
return gen_math_ops.add(inputs, name)
```
to:
```python
from tensorflow.python.compat import compat
def add(inputs, name=None):
if compat.forward_compatible(year, month, day):
# Can use the awesome new implementation.
return gen_math_ops.my_new_awesome_add(inputs, name)
# To maintain forward compatibility, use the old implementation.
return gen_math_ops.add(inputs, name)
```
Where `year`, `month`, and `day` specify the date beyond which binaries
that consume a model are expected to have been updated to include the
new operations. This date is typically at least 3 weeks beyond the date
the code that adds the new operation is committed.
Args:
year: A year (e.g., 2018). Must be an `int`.
month: A month (1 <= month <= 12) in year. Must be an `int`.
day: A day (1 <= day <= 31, or 30, or 29, or 28) in month. Must be an
`int`.
Returns:
True if the caller can expect that serialized TensorFlow graphs produced
can be consumed by programs that are compiled with the TensorFlow library
source code after (year, month, day).
"""
return _FORWARD_COMPATIBILITY_DATE_NUMBER > _date_to_date_number(
year, month, day)
@tf_export("compat.forward_compatibility_horizon")
@tf_contextlib.contextmanager
def forward_compatibility_horizon(year, month, day):
"""Context manager for testing forward compatibility of generated graphs.
See [Version
compatibility](https://www.tensorflow.org/guide/versions#backward_and_partial_forward_compatibility).
To ensure forward compatibility of generated graphs (see `forward_compatible`)
with older binaries, new features can be gated with:
```python
if compat.forward_compatible(year=2018, month=08, date=01):
generate_graph_with_new_features()
else:
generate_graph_so_older_binaries_can_consume_it()
```
However, when adding new features, one may want to unittest it before
the forward compatibility window expires. This context manager enables
such tests. For example:
```python
from tensorflow.python.compat import compat
def testMyNewFeature(self):
with compat.forward_compatibility_horizon(2018, 08, 02):
# Test that generate_graph_with_new_features() has an effect
```
Args:
year: A year (e.g., 2018). Must be an `int`.
month: A month (1 <= month <= 12) in year. Must be an `int`.
day: A day (1 <= day <= 31, or 30, or 29, or 28) in month. Must be an
`int`.
Yields:
Nothing.
"""
try:
_update_forward_compatibility_date_number(datetime.date(year, month, day))
yield
finally:
_update_forward_compatibility_date_number()
|
Python
| 0
|
@@ -1339,9 +1339,9 @@
9, 2
-7
+8
)%0A_F
|
90df282f121422049b65efd09378d48ff080cf1e
|
Update forward compatibility horizon to 2019-07-15
|
tensorflow/python/compat/compat.py
|
tensorflow/python/compat/compat.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for API compatibility between TensorFlow release versions.
See [Version
Compatibility](https://tensorflow.org/guide/version_compat#backward_forward)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import datetime
import os
from tensorflow.python.util import tf_contextlib
from tensorflow.python.util.tf_export import tf_export
# This value changes every day with an automatic CL. It can be modified in code
# via `forward_compatibility_horizon()` or with the environment variable
# TF_FORWARD_COMPATIBILITY_DELTA_DAYS, which is added to the compatibility date.
_FORWARD_COMPATIBILITY_HORIZON = datetime.date(2019, 7, 14)
_FORWARD_COMPATIBILITY_HORIZON_OVERRIDDEN = False
_FORWARD_COMPATIBILITY_DELTA_DAYS_VAR_NAME = "TF_FORWARD_COMPATIBILITY_DELTA_DAYS"
def _get_forward_compatibility_date():
date = _FORWARD_COMPATIBILITY_HORIZON
delta_days = os.getenv(_FORWARD_COMPATIBILITY_DELTA_DAYS_VAR_NAME)
if delta_days is not None and not _FORWARD_COMPATIBILITY_HORIZON_OVERRIDDEN:
return date + datetime.timedelta(days=int(delta_days))
else:
return date
@tf_export("compat.forward_compatible")
def forward_compatible(year, month, day):
"""Return true if the forward compatibility window has expired.
See [Version
compatibility](https://tensorflow.org/guide/version_compat#backward_forward).
Forward-compatibility refers to scenarios where the producer of a TensorFlow
model (a GraphDef or SavedModel) is compiled against a version of the
TensorFlow library newer than what the consumer was compiled against. The
"producer" is typically a Python program that constructs and trains a model
while the "consumer" is typically another program that loads and serves the
model.
TensorFlow has been supporting a 3 week forward-compatibility window for
programs compiled from source at HEAD.
For example, consider the case where a new operation `MyNewAwesomeAdd` is
created with the intent of replacing the implementation of an existing Python
wrapper - `tf.add`. The Python wrapper implementation should change from
something like:
```python
def add(inputs, name=None):
return gen_math_ops.add(inputs, name)
```
to:
```python
from tensorflow.python.compat import compat
def add(inputs, name=None):
if compat.forward_compatible(year, month, day):
# Can use the awesome new implementation.
return gen_math_ops.my_new_awesome_add(inputs, name)
# To maintain forward compatibiltiy, use the old implementation.
return gen_math_ops.add(inputs, name)
```
Where `year`, `month`, and `day` specify the date beyond which binaries
that consume a model are expected to have been updated to include the
new operations. This date is typically at least 3 weeks beyond the date
the code that adds the new operation is committed.
Args:
year: A year (e.g., 2018). Must be an `int`.
month: A month (1 <= month <= 12) in year. Must be an `int`.
day: A day (1 <= day <= 31, or 30, or 29, or 28) in month. Must be an
`int`.
Returns:
True if the caller can expect that serialized TensorFlow graphs produced
can be consumed by programs that are compiled with the TensorFlow library
source code after (year, month, day).
"""
return _get_forward_compatibility_date() > datetime.date(year, month, day)
@tf_export("compat.forward_compatibility_horizon")
@tf_contextlib.contextmanager
def forward_compatibility_horizon(year, month, day):
"""Context manager for testing forward compatibility of generated graphs.
See [Version
compatibility](https://tensorflow.org/guide/version_compat#backward_forward).
To ensure forward compatibility of generated graphs (see `forward_compatible`)
with older binaries, new features can be gated with:
```python
if compat.forward_compatible(year=2018, month=08, date=01):
generate_graph_with_new_features()
else:
generate_graph_so_older_binaries_can_consume_it()
```
However, when adding new features, one may want to unittest it before
the forward compatibility window expires. This context manager enables
such tests. For example:
```python
from tensorflow.python.compat import compat
def testMyNewFeature(self):
with compat.forward_compatibility_horizon(2018, 08, 02):
# Test that generate_graph_with_new_features() has an effect
```
Args:
year: A year (e.g., 2018). Must be an `int`.
month: A month (1 <= month <= 12) in year. Must be an `int`.
day: A day (1 <= day <= 31, or 30, or 29, or 28) in month. Must be an
`int`.
Yields:
Nothing.
"""
global _FORWARD_COMPATIBILITY_HORIZON
global _FORWARD_COMPATIBILITY_HORIZON_OVERRIDDEN
try:
old_compat_date = _FORWARD_COMPATIBILITY_HORIZON
_FORWARD_COMPATIBILITY_HORIZON_OVERRIDDEN = True
_FORWARD_COMPATIBILITY_HORIZON = datetime.date(year, month, day)
yield
finally:
_FORWARD_COMPATIBILITY_HORIZON_OVERRIDDEN = False
_FORWARD_COMPATIBILITY_HORIZON = old_compat_date
|
Python
| 0
|
@@ -1387,9 +1387,9 @@
7, 1
-4
+5
)%0A%0A_
|
ef8b8fd6b6446b00334a80809a65e51ab6f78dff
|
Create a method for getting a list of the items in a particular config section.
|
quickrelease/config.py
|
quickrelease/config.py
|
import ConfigParser
import os
from quickrelease import constants
from quickrelease.exception import ReleaseFrameworkError
class ConfigSpecError(ReleaseFrameworkError):
NO_OPTION_ERROR = 0
INTERPOLATION_MISSING_OPTION_ERROR = 1
INTERPOLATION_SYNTAX_ERROR = 2
COERCION_TYPE_ERROR = 3
def __init__(self, errorStr, details=None):
ReleaseFrameworkError.__init__(self, errorStr, details)
def __str__(self):
return "ConfigSpec Error: " + ReleaseFrameworkError.__str__(self)
class ConfigSpec:
DEFAULT_STARTING_SECTION = 'DEFAULT'
CONFIG_SECTION_DELIMETER = ':'
@staticmethod
def GetConstant(name):
value = os.getenv(name)
if value is not None:
if name in constants.CONSTANTS_FROM_ENV_HANDLERS:
return constants.CONSTANTS_FROM_ENV_HANDLERS[name](value)
else:
return value
if name in constants.QUICKRELEASE_CONSTANTS:
return constants.QUICKRELEASE_CONSTANTS[name]
raise ConfigSpecError("Undefined constant '%s'" % (name))
@staticmethod
def GetDefinedConstants():
return constants.QUICKRELEASE_CONSTANTS.keys()
def __init__(self, configFile, rootDir=os.getcwd(),
section=DEFAULT_STARTING_SECTION):
if configFile is None:
raise ConfigSpecError("No config file specified.")
elif not os.path.isfile(configFile):
raise ConfigSpecError("Invalid config file specified.")
self.configSpec = ConfigParser.SafeConfigParser()
self.rootDirectory = rootDir
self.currentSection = section
self.defaultSection = section
try:
self.configSpec.read(configFile)
except ConfigParser.ParsingError, ex:
raise ConfigSpecError(str(ex))
if section != ConfigSpec.DEFAULT_STARTING_SECTION:
if self.GetSection() not in self.GetSectionList():
raise ConfigSpecError("Invalid initial section '%s'" %
(self.GetSection()))
def GetRootDir(self):
return self.rootDirectory
def GetRawConfig(self):
return self.configSpec
def GetSectionList(self):
return self.configSpec.sections()
def GetSection(self):
return self.currentSection
def GetDefaultSection(self):
return self.defaultSection
def SetSection(self, newSection):
if self.GetSection() == newSection:
return
if (newSection.lower() != 'default' and
(not self.configSpec.has_section(newSection))):
raise ConfigSpecError("Non-existent config spec section: %s" %
(newSection), 'INVALID_SECTION')
self.currentSection = newSection
def SetPartnerSection(self, partner):
if not self.ValidPartner(partner):
raise ConfigSpecError("Invalid/unknown partner: %s" % (partner))
self.SetSection(self._GetPartnerSectionName(partner))
@staticmethod
def _GetPartnerSectionName(partnerName):
return 'partner' + ConfigSpec.CONFIG_SECTION_DELIMETER + partnerName
@staticmethod
def _GetDeliverableSectionName(delivName):
return 'deliverable' + ConfigSpec.CONFIG_SECTION_DELIMETER + delivName
def ValidDeliverable(self, deliverable):
return (self._GetDeliverableSectionName(deliverable) in
self.GetSectionList())
def ValidPartner(self, partner):
return self._GetPartnerSectionName(partner) in self.GetSectionList()
def PartnerGet(self, partner, name, coercion=None, interpolation=()):
return self.SectionGet(self._GetPartnerSectionName(partner),
name,
coercion,
interpolation)
def SectionGet(self, section, name, coercion=None, interpOverrides=()):
origSection = self.GetSection()
self.SetSection(section)
try:
value = self.Get(name, coercion, interpOverrides)
except ConfigSpecError, ex:
raise ex
finally:
self.SetSection(origSection)
return value
def Get(self, name, coercion=None, interpOverrides=()):
getRawValues = False
overrides = None
if interpOverrides is None:
getRawValues = True
else:
try:
# Attempt to convert our argument to a tuple for type-checking
# purposes.
overrides = tuple(interpOverrides)
except TypeError:
raise ConfigSpecError("Invalid interpolation overrides "
"specified; must be convertable to a tuple.",
ConfigSpecError.COERCION_TYPE_ERROR)
try:
if coercion is bool:
return self.configSpec.getboolean(self.currentSection, name)
elif coercion is int:
return self.configSpec.getint(self.currentSection, name)
elif coercion is float:
return self.configSpec.getfloat(self.currentSection, name)
elif coercion is list:
return self.configSpec.get(self.currentSection, name,
getRawValues, overrides).split()
elif coercion is None:
return self.configSpec.get(self.currentSection, name,
getRawValues, overrides)
raise ConfigSpecError("Invalid coercion type specified: %s" %
(coercion), ConfigSpecError.COERCION_TYPE_ERROR)
except ConfigParser.NoOptionError, ex:
raise ConfigSpecError("Undefined config variable '%s' requested "
"from section %s" % (name, self.currentSection),
ConfigSpecError.NO_OPTION_ERROR)
except ConfigParser.InterpolationMissingOptionError, ex:
raise ConfigSpecError(str(ex),
ConfigSpecError.INTERPOLATION_MISSING_OPTION_ERROR)
except ConfigParser.InterpolationSyntaxError, ex:
raise ConfigSpecError(str(ex),
ConfigSpecError.INTERPOLATION_SYNTAX_ERROR)
def GetAll(self):
return self.configSpec.items(self.currentSection)
|
Python
| 0
|
@@ -2194,24 +2194,262 @@
entSection%0A%0A
+ def GetSectionItems(self, sectionName):%0A try:%0A return list(x%5B0%5D for x in self.GetRawConfig().items(sectionName))%0A except ConfigParser.NoSectionError:%0A raise ValueError(%22No config section '%25s'%22 %25 sectionName)%0A%0A
def GetDe
|
95f09bc7d61d6ea0a1228229a5092e2bff889855
|
make website_multi_company_demo hidden
|
website_multi_company_demo/__manifest__.py
|
website_multi_company_demo/__manifest__.py
|
# -*- coding: utf-8 -*-
{
"name": """Demo Data for \"Real Multi Website\"""",
"summary": """Provides demo websites""",
"category": "eCommerce",
# "live_test_URL": "",
"images": [],
"version": "1.0.0",
"application": False,
"author": "IT-Projects LLC, Ivan Yelizariev",
"support": "apps@it-projects.info",
"website": "https://it-projects.info",
"license": "LGPL-3",
# "price": 9.00,
# "currency": "EUR",
"depends": [
"website_multi_company",
"website_sale",
"theme_bootswatch",
],
"external_dependencies": {"python": [], "bin": []},
"data": [
],
"qweb": [
],
"demo": [
"demo/res.company.csv",
"demo/website.csv",
"demo/product.template.csv",
"demo/ir.ui.view.csv",
"demo/website.menu.csv",
"demo/website_templates.xml",
],
"post_load": None,
"pre_init_hook": None,
"post_init_hook": None,
"auto_install": False,
"installable": True,
}
|
Python
| 0
|
@@ -141,17 +141,14 @@
%22: %22
-eCommerce
+Hidden
%22,%0A
|
03931ff739f8a3a051ef15d1c63795ef10ab0c12
|
Update forward compatibility horizon to 2021-08-07
|
tensorflow/python/compat/compat.py
|
tensorflow/python/compat/compat.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for API compatibility between TensorFlow release versions.
See [Version
Compatibility](https://tensorflow.org/guide/version_compat#backward_forward)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import datetime
import os
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import tf_contextlib
from tensorflow.python.util.tf_export import tf_export
# This value changes every day with an automatic CL. It can be modified in code
# via `forward_compatibility_horizon()` or with the environment variable
# TF_FORWARD_COMPATIBILITY_DELTA_DAYS, which is added to the compatibility date.
_FORWARD_COMPATIBILITY_HORIZON = datetime.date(2021, 8, 6)
_FORWARD_COMPATIBILITY_DELTA_DAYS_VAR_NAME = "TF_FORWARD_COMPATIBILITY_DELTA_DAYS"
_FORWARD_COMPATIBILITY_DATE_NUMBER = None
def _date_to_date_number(year, month, day):
return (year << 9) | (month << 5) | day
def _update_forward_compatibility_date_number(date_to_override=None):
"""Update the base date to compare in forward_compatible function."""
global _FORWARD_COMPATIBILITY_DATE_NUMBER
if date_to_override:
date = date_to_override
else:
date = _FORWARD_COMPATIBILITY_HORIZON
delta_days = os.getenv(_FORWARD_COMPATIBILITY_DELTA_DAYS_VAR_NAME)
if delta_days:
date += datetime.timedelta(days=int(delta_days))
if date < _FORWARD_COMPATIBILITY_HORIZON:
logging.warning("Trying to set the forward compatibility date to the past"
" date %s. This will be ignored by TensorFlow." % (date))
return
_FORWARD_COMPATIBILITY_DATE_NUMBER = _date_to_date_number(
date.year, date.month, date.day)
_update_forward_compatibility_date_number()
@tf_export("compat.forward_compatible")
def forward_compatible(year, month, day):
"""Return true if the forward compatibility window has expired.
See [Version
compatibility](https://tensorflow.org/guide/version_compat#backward_forward).
Forward-compatibility refers to scenarios where the producer of a TensorFlow
model (a GraphDef or SavedModel) is compiled against a version of the
TensorFlow library newer than what the consumer was compiled against. The
"producer" is typically a Python program that constructs and trains a model
while the "consumer" is typically another program that loads and serves the
model.
TensorFlow has been supporting a 3 week forward-compatibility window for
programs compiled from source at HEAD.
For example, consider the case where a new operation `MyNewAwesomeAdd` is
created with the intent of replacing the implementation of an existing Python
wrapper - `tf.add`. The Python wrapper implementation should change from
something like:
```python
def add(inputs, name=None):
return gen_math_ops.add(inputs, name)
```
to:
```python
from tensorflow.python.compat import compat
def add(inputs, name=None):
if compat.forward_compatible(year, month, day):
# Can use the awesome new implementation.
return gen_math_ops.my_new_awesome_add(inputs, name)
# To maintain forward compatibility, use the old implementation.
return gen_math_ops.add(inputs, name)
```
Where `year`, `month`, and `day` specify the date beyond which binaries
that consume a model are expected to have been updated to include the
new operations. This date is typically at least 3 weeks beyond the date
the code that adds the new operation is committed.
Args:
year: A year (e.g., 2018). Must be an `int`.
month: A month (1 <= month <= 12) in year. Must be an `int`.
day: A day (1 <= day <= 31, or 30, or 29, or 28) in month. Must be an
`int`.
Returns:
True if the caller can expect that serialized TensorFlow graphs produced
can be consumed by programs that are compiled with the TensorFlow library
source code after (year, month, day).
"""
return _FORWARD_COMPATIBILITY_DATE_NUMBER > _date_to_date_number(
year, month, day)
@tf_export("compat.forward_compatibility_horizon")
@tf_contextlib.contextmanager
def forward_compatibility_horizon(year, month, day):
"""Context manager for testing forward compatibility of generated graphs.
See [Version
compatibility](https://tensorflow.org/guide/version_compat#backward_forward).
To ensure forward compatibility of generated graphs (see `forward_compatible`)
with older binaries, new features can be gated with:
```python
if compat.forward_compatible(year=2018, month=08, date=01):
generate_graph_with_new_features()
else:
generate_graph_so_older_binaries_can_consume_it()
```
However, when adding new features, one may want to unittest it before
the forward compatibility window expires. This context manager enables
such tests. For example:
```python
from tensorflow.python.compat import compat
def testMyNewFeature(self):
with compat.forward_compatibility_horizon(2018, 08, 02):
# Test that generate_graph_with_new_features() has an effect
```
Args:
year: A year (e.g., 2018). Must be an `int`.
month: A month (1 <= month <= 12) in year. Must be an `int`.
day: A day (1 <= day <= 31, or 30, or 29, or 28) in month. Must be an
`int`.
Yields:
Nothing.
"""
try:
_update_forward_compatibility_date_number(datetime.date(year, month, day))
yield
finally:
_update_forward_compatibility_date_number()
|
Python
| 0
|
@@ -1448,9 +1448,9 @@
8,
-6
+7
)%0A_F
|
cbc3be65f46bf6ec2dc12157c019921e82791412
|
Remove blank line
|
jsonrpcserver/request.py
|
jsonrpcserver/request.py
|
"""A JSON-RPC request object. Used internally by the library, but class
attributes can be modified to configure various options for handling requests.
"""
import json
import logging
import re
import pkgutil
try:
# Python 2
from collections import Mapping, Sequence
except ImportError:
# Python 3
from collections.abc import Mapping, Sequence
from funcsigs import signature
import jsonschema
from jsonrpcserver import config
from jsonrpcserver.response import RequestResponse, NotificationResponse, \
ExceptionResponse
from jsonrpcserver.exceptions import JsonRpcServerError, InvalidRequest, \
InvalidParams, MethodNotFound
logger = logging.getLogger(__name__)
json_validator = jsonschema.Draft4Validator(json.loads(pkgutil.get_data(
__name__, 'request-schema.json').decode('utf-8')))
def _get_method(methods, name):
"""Finds a method in a list (or dictionary).
:param methods: List or dictionary of named functions.
:param name: Name of the method to find.
:raises MethodNotFound: If the method wasn't in the list.
:returns: The method from the list.
"""
# If it's a Mapping (dict-like), search for the key
if isinstance(methods, Mapping):
try:
return methods[name]
except KeyError:
raise MethodNotFound(name)
# Otherwise it must be a Sequence (list-like), search the __name__
# attributes
elif isinstance(methods, Sequence):
try:
return next(m for m in methods if m.__name__ == name)
except StopIteration:
raise MethodNotFound(name)
def _convert_camel_case(name):
"""Convert a camelCase string to under_score"""
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
def _convert_camel_case_keys(original_dict):
"""Converts all keys of a dict from camelCase to under_score, recursively"""
new_dict = dict()
for k, v in original_dict.items():
if isinstance(v, dict):
# Recurse
new_dict[_convert_camel_case(k)] = _convert_camel_case_keys(v)
else:
new_dict[_convert_camel_case(k)] = v
return new_dict
def _validate_against_schema(request):
"""Validate against the JSON-RPC schema.
:param request: JSON-RPC request dict.
:raises InvalidRequest: If the request is invalid.
:returns: None
"""
try:
json_validator.validate(request)
except jsonschema.ValidationError as e:
raise InvalidRequest(e.message)
def _validate_arguments_against_signature(func, args, kwargs):
"""Check if arguments match a function signature and can therefore be passed
to it.
:param func: The function object.
:param args: List of positional arguments (or None).
:param kwargs: Dict of keyword arguments (or None).
:raises InvalidParams: If the arguments cannot be passed to the function.
"""
try:
if not args and not kwargs:
signature(func).bind()
elif args:
signature(func).bind(*args)
elif kwargs:
signature(func).bind(**kwargs)
except TypeError as e:
raise InvalidParams(str(e))
def _call(methods, method_name, args=None, kwargs=None):
"""Find a method from a list, then validate the arguments before calling it.
:param methods: The list of methods - either a python list, or Methods obj.
:param args: Positional arguments (list)
:param kwargs: Keyword arguments (dict)
:raises MethodNotFound: If the method is not in the list.
:raises InvalidParams: If the arguments don't match the method signature.
:returns: The return value from the method called.
"""
# Get the method object from a list of rpc methods
method = _get_method(methods, method_name)
# Ensure the arguments match the method's signature
_validate_arguments_against_signature(method, args, kwargs)
# Call the method
if args and kwargs:
# Cannot have both positional and keyword arguments in JSON-RPC.
raise InvalidParams()
# No arguments
elif not args and not kwargs:
return method()
# Positional arguments
elif args:
return method(*args)
# Keyword arguments
elif kwargs:
return method(**kwargs)
def _get_arguments(request):
"""Takes the 'params' part of a JSON-RPC request and converts it to either
positional or keyword arguments usable in Python. The value can be a JSON
array (python list), object (python dict), or omitted. There are no other
acceptable options. Note that a JSON-RPC request can have positional or
keyword arguments, but not both! See
http://www.jsonrpc.org/specification#parameter_structures
:param request: JSON-RPC request in dict form.
:raises InvalidParams: If 'params' was present but was not a list or dict.
:returns: A tuple containing the positionals (in a list, or None) and
keywords (in a dict, or None) extracted from the 'params' part of the
request.
"""
positionals = keywords = None
params = request.get('params')
# Params was omitted from the request. Taken as no arguments.
if 'params' not in request:
pass
# Params is a list. Taken as positional arguments.
elif isinstance(params, list):
positionals = params
# Params is a dict. Taken as keyword arguments.
elif isinstance(params, dict):
keywords = params
# Anything else is invalid. (This should never happen if the request has
# passed the schema validation.)
else:
raise InvalidParams('Params of type %s is not allowed' % \
type(params).__name__)
return (positionals, keywords)
class Request(object):
"""JSON-RPC Request object.
Encapsulates a JSON-RPC request, providing details such as the method name,
arguments, and whether it's a request or a notification, and provides a
``process`` method to execute the request.
"""
def __init__(self, request):
"""
:param request: JSON-RPC request, in dict or string form
"""
# Validate against the JSON-RPC schema
if config.schema_validation:
_validate_against_schema(request)
# Get method name from the request. We can assume the key exists because
# the request passed the schema.
self.method_name = request['method']
# Get arguments from the request, if any
self.args, self.kwargs = _get_arguments(request)
# Get request id, if any
self.request_id = request.get('id')
# Convert camelCase to underscore
if config.convert_camel_case:
self.method_name = _convert_camel_case(self.method_name)
if self.kwargs:
self.kwargs = _convert_camel_case_keys(self.kwargs)
@property
def is_notification(self):
"""Returns True if the request is a JSON-RPC notification (ie. No
response is required, False if it's a request.
"""
return self.request_id is None
def process(self, methods):
"""Calls the method and returns a Response object."""
error = None
try:
result = _call(methods, self.method_name, self.args, self.kwargs)
# Catch any JsonRpcServerError raised (Invalid Request, etc)
except JsonRpcServerError as e:
error = e
# Catch uncaught exceptions and respond with ServerError
except Exception as e: # pylint: disable=broad-except
# Log the uncaught exception
logger.exception(e)
error = e # pylint: disable=redefined-variable-type
if error:
if self.is_notification and not config.notification_errors:
return NotificationResponse()
else:
return ExceptionResponse(error, self.request_id)
# Success
if self.is_notification:
return NotificationResponse()
else:
return RequestResponse(self.request_id, result)
|
Python
| 0.999999
|
@@ -6834,17 +6834,16 @@
wargs)%0A%0A
-%0A
@pro
|
f6b80d5cd39d5764174a8e05f3b4f73d1ea70827
|
Update forward compatibility horizon to 2018-11-26
|
tensorflow/python/compat/compat.py
|
tensorflow/python/compat/compat.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for API compatibility between TensorFlow release versions.
See [Version
Compatibility](https://tensorflow.org/guide/version_compat#backward_forward)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import datetime
from tensorflow.python.util import tf_contextlib
from tensorflow.python.util.tf_export import tf_export
_FORWARD_COMPATIBILITY_HORIZON = datetime.date(2018, 11, 25)
@tf_export("compat.forward_compatible")
def forward_compatible(year, month, day):
"""Return true if the forward compatibility window has expired.
See [Version
compatibility](https://tensorflow.org/guide/version_compat#backward_forward).
Forward-compatibility refers to scenarios where the producer of a TensorFlow
model (a GraphDef or SavedModel) is compiled against a version of the
TensorFlow library newer than what the consumer was compiled against. The
"producer" is typically a Python program that constructs and trains a model
while the "consumer" is typically another program that loads and serves the
model.
TensorFlow has been supporting a 3 week forward-compatibility window for
programs compiled from source at HEAD.
For example, consider the case where a new operation `MyNewAwesomeAdd` is
created with the intent of replacing the implementation of an existing Python
wrapper - `tf.add`. The Python wrapper implementation should change from
something like:
```python
def add(inputs, name=None):
return gen_math_ops.add(inputs, name)
```
to:
```python
from tensorflow.python.compat import compat
def add(inputs, name=None):
if compat.forward_compatible(year, month, day):
# Can use the awesome new implementation.
return gen_math_ops.my_new_awesome_add(inputs, name)
# To maintain forward compatibiltiy, use the old implementation.
return gen_math_ops.add(inputs, name)
```
Where `year`, `month`, and `day` specify the date beyond which binaries
that consume a model are expected to have been updated to include the
new operations. This date is typically at least 3 weeks beyond the date
the code that adds the new operation is committed.
Args:
year: A year (e.g., 2018).
month: A month (1 <= month <= 12) in year.
day: A day (1 <= day <= 31, or 30, or 29, or 28) in month.
Returns:
True if the caller can expect that serialized TensorFlow graphs produced
can be consumed by programs that are compiled with the TensorFlow library
source code after (year, month, day).
"""
return _FORWARD_COMPATIBILITY_HORIZON > datetime.date(year, month, day)
@tf_export("compat.forward_compatibility_horizon")
@tf_contextlib.contextmanager
def forward_compatibility_horizon(year, month, day):
"""Context manager for testing forward compatibility of generated graphs.
See [Version
compatibility](https://tensorflow.org/guide/version_compat#backward_forward).
To ensure forward compatibility of generated graphs (see `forward_compatible`)
with older binaries, new features can be gated with:
```python
if compat.forward_compatible(year=2018, month=08, date=01):
generate_graph_with_new_features()
else:
generate_graph_so_older_binaries_can_consume_it()
```
However, when adding new features, one may want to unittest it before
the forward compatibility window expires. This context manager enables
such tests. For example:
```python
from tensorflow.python.compat import compat
def testMyNewFeature(self):
with compat.forward_compatibility_horizon(2018, 08, 02):
# Test that generate_graph_with_new_features() has an effect
```
Args :
year: A year (e.g. 2018).
month: A month (1 <= month <= 12) in year.
day: A day (1 <= day <= 31, or 30, or 29, or 28) in month.
Yields:
Nothing.
"""
global _FORWARD_COMPATIBILITY_HORIZON
try:
old_compat_date = _FORWARD_COMPATIBILITY_HORIZON
_FORWARD_COMPATIBILITY_HORIZON = datetime.date(year, month, day)
yield
finally:
_FORWARD_COMPATIBILITY_HORIZON = old_compat_date
|
Python
| 0
|
@@ -1143,9 +1143,9 @@
1, 2
-5
+6
)%0A%0A%0A
|
a8333a5c3e9c6b07df2b04782c9e0cc3c4b6e60c
|
Bump Version
|
common.py
|
common.py
|
VERSION_YEAR = 2017
VERSION_MONTH = 10
VERSION_DAY = 2
VERSION_REV = 1
whos_in = None
twitter = None
users = {}
twilio_client = None
ARGS = {}
smmry_api_key = None
# Variable hold trumps last tweet id
last_id = 0
trump_chance_roll_rdy = False
# Runtime stats
duels_conducted = 0
items_awarded = 0
trump_tweets_seen = 0
# Shot_duel acceptance and active
accepted = False
shot_duel_running = False
vict_name = ""
# News handles to pull from
news_handles = ['mashable', 'cnnbrk', 'whitehouse', 'cnn', 'nytimes',
'foxnews', 'reuters', 'npr', 'usatoday', 'cbsnews',
'abc', 'washingtonpost', 'msnbc', 'ap', 'aphealthscience',
'lifehacker', 'cnnnewsroom', 'theonion']
# Delays for Newsfeed and Check_trump, These are in minutes
# remember that news_del is fuzzed + (0-10)
trump_del = 15
news_del = 55
# Location of db.json and tokens.config
data_dir = "/data"
# Create/Load Local Database
db_file = '{}/db.json'.format(data_dir)
db = {}
# Global toggle for news feed
NEWS_FEED_ON = False
NEWS_FEED_CREATED = False
async def trigger_social(ctx):
"""Triggers a social """
for m in ctx.bot.get_all_members():
if m.display_name != 'brochat-bot' and m.status == 'online':
add_drink(m.display_name)
glass = ":tumbler_glass:"
await ctx.bot.say("Ah shit that's three in a row! ITS A SOCIAL! SHOTS! "
"SHOTS! SHOTS!\n{}{}{}".format(glass, glass, glass))
def add_drink(user):
"""
Adds a drink for the user.
:param user: users display name
:return:
"""
if user not in users:
users[user] = {}
if "drinks_owed" in users[user]:
users[user]['drinks_owed'] += 1
else:
users[user]['drinks_owed'] = 1
return users[user]['drinks_owed']
|
Python
| 0
|
@@ -46,17 +46,17 @@
N_DAY =
-2
+5
%0AVERSION
@@ -62,17 +62,17 @@
N_REV =
-1
+0
%0A%0Awhos_i
|
086b5a028d3d85f9fa6e71a5fc08b61ae9426e7b
|
Copy the right file :)
|
readthedocs/doc_builder/backends/sphinx_dash.py
|
readthedocs/doc_builder/backends/sphinx_dash.py
|
from glob import glob
import logging
import os
import shutil
import tarfile
from django.conf import settings
from django.template import Template, Context
from doc_builder.base import restoring_chdir
from doc_builder.backends.sphinx import Builder as HtmlBuilder
from projects.utils import run
from core.utils import copy_file_to_app_servers
log = logging.getLogger(__name__)
FEED_TEMPLATE = """<entry>
<version>{{ version.slug }}</version>
<url>{{ media_url_prefix }}{{ version.project.get_dash_url }}</url>
</entry>
"""
class Builder(HtmlBuilder):
@restoring_chdir
def build(self, **kwargs):
project = self.version.project
os.chdir(project.conf_dir(self.version.slug))
if os.path.exists('_build/dash'):
shutil.rmtree('_build/dash')
os.makedirs('_build/dash')
dash_build_command = ("doc2dash --name=\"%s\" --force "
"--destination=_build/dash _build/html"
% project.name)
dash_build_results = run(dash_build_command, shell=True)
self._zip_dash()
self._write_feed()
return dash_build_results
def _write_feed(self):
if settings.MEDIA_URL.startswith('//'):
media_url_prefix = 'http:'
else:
media_url_prefix = ''
context = Context({
'version': self.version,
'media_url_prefix': media_url_prefix,
})
feed_content = Template(FEED_TEMPLATE).render(context)
to_file = self.version.project.get_dash_feed_path(self.version.slug)
to_path = self.version.project.checkout_path(self.version.slug)
to_file = os.path.join(to_path, '%s.xml' % self.version.project.doc_name)
if not os.path.exists(to_path):
os.makedirs(to_path)
with open(to_file, 'w') as feed_file:
feed_file.write(feed_content)
@restoring_chdir
def _zip_dash(self, **kwargs):
from_path = self.version.project.full_dash_path(self.version.slug)
to_path = self.version.project.checkout_path(self.version.slug)
to_file = os.path.join(to_path,
'%s.tgz' % self.version.project.doc_name)
log.info("Creating dash tarball from %s at %s" %
(from_path, to_file))
# Create a <slug>.tgz file containing all files in file_path
os.chdir(from_path)
archive = tarfile.open(to_file, "w:gz")
# archive = zipfile.ZipFile(to_file, 'w')
for root, subfolders, files in os.walk('.'):
for file in files:
to_write = os.path.join(root, file)
archive.add(to_write)
archive.close()
return to_file
def move(self, **kwargs):
project = self.version.project
outputted_path = self.version.project.checkout_path(self.version.slug)
to_path = os.path.join(settings.MEDIA_ROOT,
'dash',
project.slug,
self.version.slug)
from_globs = glob(os.path.join(outputted_path, "*.tgz"))
xml_globs = glob(os.path.join(outputted_path, "*.xml"))
if from_globs:
from_file = from_globs[0]
to_file = os.path.join(to_path, "%s.tgz" % project.doc_name)
if getattr(settings, "MULTIPLE_APP_SERVERS", None):
copy_file_to_app_servers(from_file, to_file)
else:
if not os.path.exists(to_path):
os.makedirs(to_path)
run('mv -f %s %s' % (from_file, to_file))
if xml_globs:
from_file = from_globs[0]
to_file = os.path.join(to_path, "%s.xml" % project.doc_name)
if getattr(settings, "MULTIPLE_APP_SERVERS", None):
copy_file_to_app_servers(from_file, to_file)
else:
if not os.path.exists(to_path):
os.makedirs(to_path)
run('mv -f %s %s' % (from_file, to_file))
|
Python
| 0
|
@@ -3074,64 +3074,8 @@
-from_globs = glob(os.path.join(outputted_path, %22*.tgz%22))
%0A
@@ -3079,19 +3079,20 @@
-xml
+from
_globs =
@@ -3129,19 +3129,19 @@
ath, %22*.
-xml
+tgz
%22))%0A
@@ -3551,32 +3551,97 @@
_file, to_file))
+%0A%0A xml_globs = glob(os.path.join(outputted_path, %22*.xml%22))
%0A if xml_
@@ -3663,36 +3663,35 @@
from_file =
-from
+xml
_globs%5B0%5D%0A
|
7047816b5edc7911685219d53970c892728d0220
|
add os to config
|
config.py
|
config.py
|
# -*- encoding: utf-8 -*-
import datetime
# -----------------------------------------------------
# Application configurations
# ------------------------------------------------------
DEBUG = True
SECRET_KEY = os.environ['SECRET_KEY']
PORT = os.environ['PORT']
HOST = os.environ['HOST']
# -----------------------------------------------------
# SQL Alchemy configs
# -----------------------------------------------------
SQLALCHEMY_DATABASE_URI = os.environ['DATABASE_URL']
# -----------------------------------------------------
# ESI Configs
# -----------------------------------------------------
ESI_DATASOURCE = 'tranquility' # Change it to 'singularity' to use the test server
ESI_SWAGGER_JSON = 'https://esi.tech.ccp.is/latest/swagger.json?datasource=%s' % ESI_DATASOURCE
ESI_SECRET_KEY = os.environ['ESI_SECRET_KEY'] # your secret key
ESI_CLIENT_ID = os.environ['ESI_CLIENT_ID'] # your client ID
ESI_CALLBACK = 'http://%s:%d/sso/callback' % (HOST, PORT) # the callback URI you gave CCP
ESI_USER_AGENT = 'hauler-packing-tool'
# ------------------------------------------------------
# Session settings for flask login
# ------------------------------------------------------
PERMANENT_SESSION_LIFETIME = datetime.timedelta(days=30)
# ------------------------------------------------------
# DO NOT EDIT
# Fix warnings from flask-sqlalchemy / others
# ------------------------------------------------------
SQLALCHEMY_TRACK_MODIFICATIONS = True
|
Python
| 0.000001
|
@@ -34,16 +34,26 @@
datetime
+%0Aimport os
%0A%0A# ----
|
4748a984b2e594c2e92b02eaac3b27457ec1d023
|
reorder l2 lambda
|
config.py
|
config.py
|
import tensorflow as tf
from classes.model import Layer
class BaseConfig():
TRAINING_DATA = './assignment/train_potus_by_county.csv'
TESTING_DATA = './assignment/train_potus_by_county.csv'
TARGET_LABEL = 'Winner'
OUTFILES = {'targets': './targets.csv',
'preprocessing_means': './preprocessing_means.csv',
'preprocessing_stddevs': './preprocessing_stddevs.csv',
'graph_def': './graph_def.bin',
'performance': './performance.txt',
'predictions': './predictions.txt'}
# defaults to random (non-persistent) seed if None
SEED = 47
# defaults to all detected if 0
NUM_CORES = 3
VERBOSE = False
class Config(BaseConfig):
def __init__(self, hyperparams, layers):
self.HYPERPARAMS = hyperparams
self.LAYERS = layers
class GridSearchConfig(BaseConfig):
def __init__(self, hyperparam_grid, hidden_layer_grid):
self.HYPERPARAM_GRID = hyperparam_grid
self.HIDDEN_LAYER_GRID = hidden_layer_grid
########################################################################################
HYPERPARAM_GRID = {'learning_rate': [0.05, 0.01, 0.1],
# keep probability for dropout (1 for none)
'dropout': [0.5, 0.7, 1],
# lambda for L2 regularization (0 for none)
'lambda_l2_reg': [1E-5, 1E-4, 1E-3, 0],
'n_minibatch': [100],
'epochs': [100]}
HIDDEN_LAYER_GRID = {'activation': [tf.nn.relu],# tf.nn.tanh, tf.nn.sigmoid],
'hidden_nodes': [[14],
[12],
[10],
[8],
[10, 8]]}
HYPERPARAMS = {'learning_rate': 0.05,
'dropout': 0.7,
'lambda_l2_reg': 1E-5,
'n_minibatch': 100,
'epochs': 100}
ARCHITECTURE = [
# input & output nodes will be sized by data shape
Layer('input', None, None),
Layer('hidden_1', 12, tf.nn.relu),
#Layer('hidden_2', 10, tf.nn.relu),
Layer('output', None, tf.nn.softmax)
]
#config = GridSearchConfig(HYPERPARAM_GRID, HIDDEN_LAYER_GRID)
config = Config(HYPERPARAMS, ARCHITECTURE)
|
Python
| 0.999999
|
@@ -1397,16 +1397,19 @@
_reg': %5B
+0,
1E-5, 1E
@@ -1416,19 +1416,16 @@
-4, 1E-3
-, 0
%5D,%0A
|
9a09b6fdcd26fbacfa73574835da1fe27a8760f6
|
Add separate config for preview.
|
config.py
|
config.py
|
class Config(object):
DEBUG = False
class DevelopmentConfig(object):
DEBUG = True
RULES_ENGINE_URL = "http://localhost:5005"
BANKRUPTCY_DATABASE_API = "http://localhost:5004"
CASEWORK_DATABASE_API = "http://localhost:5006"
|
Python
| 0
|
@@ -57,30 +57,30 @@
pmentConfig(
-object
+Config
):%0A DEBUG
@@ -239,8 +239,192 @@
st:5006%22
+%0A%0A%0Aclass PreviewConfig(Config):%0A RULES_ENGINE_URL = %22http://localhost:5005%22%0A BANKRUPTCY_DATABASE_API = %22http://localhost:5004%22%0A CASEWORK_DATABASE_API = %22http://localhost:5006%22
|
dbf8a78c9506ad316d4d02cec14dd77ce3c0402c
|
Fix timeout
|
src/checker/net.py
|
src/checker/net.py
|
import requests
from requests.exceptions import InvalidSchema, ConnectionError, MissingSchema, Timeout
from urllib.parse import urlparse, urlencode
import os
import tempfile
import magic
import logging
import math
import time
class NetworkError(Exception):
pass
class ConditionError(Exception):
pass
class UrlError(ConditionError):
pass
class StatusError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class Network(object):
__allowed_schemata = ['http', 'https']
@staticmethod
def getLink(linkedTransaction, acceptedTypes, conf, journal, session):
log = logging.getLogger(__name__)
try:
acc_header = Network.__create_accept_header(acceptedTypes)
log.debug("Accept header: "+acc_header)
r = Network.__conditional_fetch(linkedTransaction, acc_header, conf, session)
Network.__store_cookies(linkedTransaction, r.cookies, journal)
name = Network.__save_content(r.text, conf.getProperty("tmpPrefix"), conf.getProperty("tmpSuffix"))
match, mime = Network.__test_content_type(linkedTransaction.type, name)
if not match:
journal.foundDefect(linkedTransaction.idno, "type-mishmash", "Declared content-type doesn't match detected one", "Declared "+linkedTransaction.type+", detected "+mime, 0.5)
return name
except ConnectionError as e:
log.debug("Connection error: "+format(e))
journal.foundDefect(linkedTransaction.srcId, "badlink", "Invalid link", linkedTransaction.uri, 1.0)
raise NetworkError(e)
except Timeout as e:
log.error("Timeout")
journal.foundDefect(linkedTransaction.srcId, "timeout", "Link timed out", linkedTransaction.uri, 0.9)
raise NetworkError() from e
@staticmethod
def check_link(linkedTransaction, journal, conf, session, verify=False):
log = logging.getLogger(__name__)
s = urlparse(linkedTransaction.uri).scheme
if s not in Network.__allowed_schemata:
raise UrlError(s+" is not an allowed schema")
try:
r = session.head(linkedTransaction.uri, headers={ "user-agent": conf.getProperty("agent") }, timeout = conf.getProperty("time"), verify=verify) #TODO: accept
except Timeout as e:
log.error("Timeout")
journal.foundDefect(linkedTransaction.srcId, "timeout", "Link timed out", linkedTransaction.uri, 0.9)
raise NetworkError() from e
except ConnectionError as e:
log.debug("Connection error: "+format(e))
journal.foundDefect(linkedTransaction.srcId, "badlink", "Invalid link", linkedTransaction.uri, 1.0)
raise NetworkError(e) from e
linkedTransaction.status = r.status_code
if r.status_code >= 400:
journal.foundDefect(linkedTransaction.srcId, "badlink", "Invalid link", linkedTransaction.uri, 1.0)
raise StatusError(r.status_code)
lst = list(r.headers.keys())
if 'content-type' in lst:
ct = r.headers['content-type']
elif 'Content-Type' in lst:
ct = r.headers['Content-Type']
else:
ct = ''
if not ct.strip():
journal.foundDefect(linkedTransaction.idno, "badtype", "Content-type empty", None, 0.5)
if ';' in ct: #text/html;charset=utf-8 -> text/html
ct = ct.split(';')[0]
return ct, r
@staticmethod
def __conditional_fetch(transaction, accept, conf, session):
if not transaction.isWorthIt(conf):
logging.getLogger(__name__).debug("Uri not accepted: "+transaction.uri)
raise ConditionError
elif not conf.type_acceptor.mightAccept(transaction.type):
logging.getLogger(__name__).debug("Content-type not accepted: "+transaction.type+" ("+transaction.uri+")")
raise ConditionError
else:
return Network.__fetch_response(transaction, conf.getProperty("agent"), accept, conf.getProperty('timeout'), session, conf.getProperty("verifyHttps"), conf.getProperty("maxAttempts"))
@staticmethod
def __fetch_response(transaction, agent, accept, timeout, session, verify=False, max_attempts=3):
r = None
head = {"user-agent" : agent, "accept" : accept }
log = logging.getLogger(__name__)
log.debug("Fetching "+transaction.uri)
log.debug("Data: "+str(transaction.data))
#if not allowed to send cookies or don't have any, then cookies are None -> should be safe to use them; maybe filter which to use?
attempt = 0
while attempt < max_attempts:
try:
if transaction.method == 'GET':
r = session.get(transaction.uri+Network.__gen_param(transaction), allow_redirects=False, headers = head, timeout = timeout, cookies = transaction.cookies, verify=verify)
elif transaction.method == 'POST':
r = session.post(transaction.uri, allow_redirects=False, headers = head, data = transaction.data, timeout = timeout, cookies = transaction.cookies, verify=verify)
except ConnectionError as e:
if (attempt + 1) < max_attempts:
wait = math.pow(10, attempt)
time.sleep(wait)
else:
raise
attempt = attempt + 1
except Timeout as e:
if (attempt + 1) < max_attempts:
wait = math.pow(10, attempt)
time.sleep(wait)
else:
raise
attempt = attempt + 1
else:
transaction.status = r.status_code
if transaction.uri != r.url:
logging.getLogger(__name__).debug("Redirection: "+transaction.uri+" -> "+r.url)
transaction.changePrimaryUri(r.url)
return r
return None
@staticmethod
def __gen_param(transaction):
if transaction.data is not None:
param = "?"+urlencode(transaction.data)
else:
param = ""
return param
@staticmethod
def __save_content(content, prefix=None, suffix=None):
with tempfile.NamedTemporaryFile(delete=False, prefix=prefix, suffix=suffix) as tmp:
tmp.write(content.encode('utf-8'))
name = tmp.name
return name
@staticmethod
def __test_content_type(ctype, fname):
mime = magic.from_file(fname, mime=True)
return (mime == ctype), mime
@staticmethod
def __create_accept_header(acceptedTypes):
#see RFC 2616, section 14.1
if len(acceptedTypes) > 0:
string = acceptedTypes[0]
for aType in acceptedTypes[2:]:
string += ", "+aType
return string
else:
return ""
@staticmethod
def __store_cookies(transaction, cookies, journal):
for name, value in cookies.items():
journal.gotCookie(transaction, name, value)
transaction.cookies = cookies
|
Python
| 0.001785
|
@@ -2205,32 +2205,109 @@
)%0A%0A try:%0A
+ log.debug(%22Timeout set to: %22 + str(conf.getProperty(%22timeout%22)))%0A
r =
@@ -2402,19 +2402,17 @@
timeout
- =
+=
conf.get
@@ -2425,16 +2425,19 @@
ty(%22time
+out
%22), veri
|
e87562d15de92bac443be418d45dce0fe47cb4e1
|
switch to utc
|
events.py
|
events.py
|
#!/usr/bin/env python
import requests
import time
import math
import os
import shutil
import tempfile
from six.moves import configparser
from yattag import Doc
# Read config file in
mydir = os.path.dirname(os.path.realpath(__file__))
configReader = configparser.RawConfigParser()
configReader.read(mydir + "/config.txt")
config = {
'outputdir': "./",
'customtext': "Zone events running on and around Telara",
'name': "Simple RIFT Event Tracker",
}
for var in ["outputdir","name","customtext"]:
try:
config[var] = configReader.get("Tracker",var)
except ConfigParser.NoOptionError:
pass
allshards = {
'us': {
1704: 'Deepwood',
1707: 'Faeblight',
1702: 'Greybriar',
1721: 'Hailol',
1708: 'Laethys',
1701: 'Seastone',
1706: 'Wolfsbane',
},
'eu': {
2702: 'Bloodiron',
2714: 'Brisesol',
2711: 'Brutwacht',
2721: 'Gelidra',
2741: 'Typhiria',
2722: 'Zaviel',
}
}
os.environ['TZ'] = 'America/Los_Angeles'
for dc in allshards:
# Construct a page at a time
doc, tag, text = Doc().tagtext()
with tag('html'):
with tag('head'):
doc.stag('meta', ('http-equiv', "Refresh"), ('content', 60))
doc.stag('meta', ('http-equiv', "Content-Type"), ('content', "text/html; charset=UTF-8"))
doc.stag('link', ('rel', "stylesheet"), ('type', "text/css"), ('href', "style.css"))
with tag('title'):
text(config['name'])
with tag('body'):
with tag('h2'):
text(config['name'], ' - ', dc.upper())
# Links to other DCs
with tag('p'):
for otherdc in allshards:
if (otherdc != dc):
with tag('a', href = otherdc + ".html"):
text(otherdc.upper())
with tag('p'):
text(config['customtext'])
# Event table
with tag('table'):
with tag('thead'):
with tag('tr'):
for title in ['Shard', 'Zone', 'Event Name', 'Elapsed Time']:
with tag('th'):
text(title)
with tag('tbody'):
# Get each shard's events
for shardid in sorted(allshards[dc], key=allshards[dc].get):
r = requests.get("https://web-api-" + dc + ".riftgame.com/chatservice/zoneevent/list?shardId=" + str(shardid))
r.raise_for_status() # fail
data = r.json()["data"]
data.reverse()
# Print any events
displayshard = allshards[dc][shardid]
for zone in data:
# An event is running in a zone, so add a table row
if "name" in zone:
with tag('tr'):
with tag('td', klass = "bold"):
text(displayshard)
zoneclass = "secondary"
# Starfall zone IDs
if zone['zoneId'] in [788055204, 2007770238, 1208799201, 2066418614]:
zoneclass = "bold"
for display in [zone['zone'], zone['name'], str(int( math.floor((time.time() - zone['started']) / 60) )) + " min" ]:
with tag('td', klass = zoneclass):
text(display)
# already printed the shard name once, so clear it
displayshard = ""
with tag('p', klass = 'small tertiary'):
text(time.strftime("%x %X %Z"))
with tag('p', klass = 'small tertiary'):
text("Trion, Trion Worlds, RIFT, Storm Legion, Nightmare Tide, Starfall Prophecy, Telara, and their respective logos, are trademarks or registered trademarks of Trion Worlds, Inc. in the U.S. and other countries. This site is not affiliated with Trion Worlds or any of its affiliates.")
# Write page then move it over the old one
with tempfile.NamedTemporaryFile(delete=False) as outfile:
outfile.write(doc.getvalue().encode('utf8'))
os.chmod(outfile.name, 0o0644)
os.rename(outfile.name, config['outputdir'] + dc + ".html")
if not os.path.exists(config['outputdir'] + "index.html"):
os.symlink(config['outputdir'] + dc + ".html", config['outputdir'] + "index.html")
if not os.path.exists(config['outputdir'] + "style.css"):
shutil.copy2(mydir + "/style.css",config['outputdir'] + "style.css")
|
Python
| 0.000982
|
@@ -14,16 +14,17 @@
v python
+3
%0Aimport
@@ -96,16 +96,46 @@
empfile%0A
+import asyncio%0Aimport aiohttp%0A
from six
@@ -985,27 +985,11 @@
= '
-America/Los_Angeles
+UTC
'%0A%0Af
|
1c895f37f3b3090f1f53ab9d01bc639758f14a2f
|
refine coding style
|
nthuoj/urls.py
|
nthuoj/urls.py
|
from django.conf.urls import patterns, include, url
from django.contrib import admin
from django.conf import settings
from django.http import HttpResponseRedirect
from ckeditor.views import upload, browse
from utils.user_info import validate_user
import autocomplete_light
# OP autodiscover
autocomplete_light.autodiscover()
def judge_auth_required(view):
"""A decorator to ensure user has judge auth."""
def f(request, *args, **kwargs):
user = validate_user(request.user)
if user.has_judge_auth():
return view(request, *args, **kwargs)
return HttpResponseRedirect(settings.LOGIN_URL)
return f
urlpatterns = patterns('',
url(r'^ckeditor/upload/', judge_auth_required(upload), name='ckeditor_upload'),
url(r'^ckeditor/browse/', judge_auth_required(browse), name='ckeditor_browse'),
url(r'^media/(?P<path>.*)$', 'django.views.static.serve', {'document_root': settings.MEDIA_ROOT, 'show_indexes': True}),
url(r'^autocomplete/', include('autocomplete_light.urls')),
url(r'^admin/', include(admin.site.urls)),
url(r'^get_time/', 'index.views.get_time'),
url(r'^', include('index.urls', namespace='index')),
url(r'^problem/', include('problem.urls', namespace='problem')),
url(r'^contest/', include('contest.urls', namespace='contest')),
url(r'^users/', include('users.urls', namespace='users')),
url(r'^team/', include('team.urls', namespace='team')),
url(r'^group/', include('group.urls', namespace='group')),
url(r'^status/', include('status.urls', namespace='status')),
)
handler400 = 'index.views.custom_400'
handler403 = 'index.views.custom_403'
handler404 = 'index.views.custom_404'
handler500 = 'index.views.custom_500'
|
Python
| 0.908904
|
@@ -662,16 +662,21 @@
atterns(
+%0A
'',%0A
@@ -729,16 +729,24 @@
upload),
+%0A
name='c
@@ -821,16 +821,24 @@
browse),
+%0A
name='c
@@ -916,16 +916,24 @@
.serve',
+%0A
%7B'docum
|
7f13b29cc918f63c4d1fc24717c0a0b5d2f5f8ad
|
Fix problem with array values.
|
filter.py
|
filter.py
|
import numpy as np
class LowPassFilter(object):
'''
First order discrete IIR filter.
'''
def __init__(self, feedback_gain, initial_value=0.0):
self.feedback_gain = np.ones_like(initial_value) * feedback_gain
self.initial_value = initial_value
self.output_gain = 1.0 - feedback_gain
self.input = np.nan
self.output = initial_value
self.feedback_value = initial_value / self.output_gain
def filter(self, value):
#if not math.isanan(value) and math.isinf(value):
self.input = value
self.feedback_value = value + self.feedback_gain * self.feedback_value
self.output = self.output_gain * self.feedback_value
return self.output
class MovingAverage(object):
'''
Moving average filter.
'''
def __init__(self, lifetime, sampling_time):
self.lifetime = lifetime
self.sampling_time = sampling_time
self.exp = np.exp(-sampling_time / lifetime)
self.last_value = np.nan
self.mean_value = np.nan
def filter(self, value):
self.last_value = value
if np.isnan(self.mean_value):
self.mean_value = value
else:
self.mean_value = value + self.exp * (self.mean_value - value)
return self.mean_value
|
Python
| 0.000021
|
@@ -1011,22 +1011,20 @@
value =
-np.nan
+None
%0A
@@ -1042,22 +1042,20 @@
value =
-np.nan
+None
%0A%0A de
@@ -1124,17 +1124,8 @@
if
-np.isnan(
self
@@ -1131,25 +1131,32 @@
f.mean_value
-)
+ is None
:%0A
|
bb8e3163920bb81998bc9851a3abceac498e0b0e
|
add coding:utf-8 comment to finder.py
|
finder.py
|
finder.py
|
from design import FindDesigns
class Finder(object):
def __init__(self, payload, preferred_radial_size, delta_vs, accelerations, pressures, gimbal,
boosters, electricity, length):
"""Initializes this finder.
Args:
payload (Int) - Payload size in kilograms.
preferred_radial_size (RadialSize) - The preferred radial size.
delta_vs ([float]) - Array of delta-V requirements.
accelerations ([float]) - Array of acceleration requirements.
pressures ([float]) - Array of pressure requirements.
gimbal (boolean) - Whether or not to prefer thrust vectoring engines.
boosters (boolean) - Whether or not to include solid boosters.
electricity (boolean) - Whether or not to prefer engines that generate power.
length (boolean) - Whether or not to prefer shorter engines.
"""
if payload < 0.0:
raise ValueError("Invalid payload")
for i in range(len(delta_vs)):
# because of Eve, we have to support up to 5 ATM
if delta_vs[i] <= 0.0 or accelerations[i] < 0.0 or \
pressures[i] < 0.0 or pressures[i] > 5.0:
raise ValueError("Invalid Delta-v tuple")
self.payload = payload
self.preferred_radial_size = preferred_radial_size
self.delta_vs = delta_vs
self.accelerations = accelerations
self.pressures = pressures
self.gimbal = gimbal
self.boosters = boosters
self.electricity = electricity
self.length = length
def lint(self):
"""Check input values for common mistakes and return a list of warnings."""
warnings = []
if max(self.accelerations) == 0.0:
warnings.append("No minimum acceleration in any phase given. Very weak engines could "
"be presented.")
elif max(self.accelerations) > 22.5:
warnings.append("Very high minimum acceleration required. Overthink whether you really "
"need such a strong engine.")
if max(self.pressures) > 2.5:
warnings.append("Very high pressure required. If you are going to land on Eve, "
"consider landing on a mountain.")
if self.payload > 115500:
# 2/3 * ((670000*8 * 195/220) / 13 - 8*24000)
# Two thirds of maximum weight for eight kickbacks to accelerate with 13 m/s² at 1 ATM
warnings.append("Your rocket is very heavy.")
if sum(self.delta_vs) > 7300:
warnings.append("You require too much Delta-v for most conventional engines. Overthink "
"your mission planning.")
if not self.boosters and sum(self.delta_vs) > 3000 and max(self.pressures) > 0.75 \
and max(self.accelerations) > 9.8:
warnings.append("Enable solid fuel boosters if you are building a launcher.")
return warnings
def Find(self, best_only=True, order_by_cost=False):
all_designs = FindDesigns(self.payload,
self.pressures,
self.delta_vs,
self.accelerations,
self.preferred_radial_size,
self.gimbal,
self.boosters,
self.electricity,
self.length)
if best_only:
designs = [d for d in all_designs if d.IsBest]
else:
designs = all_designs
if order_by_cost:
return sorted(designs, key=lambda dsg: dsg.cost)
return sorted(designs, key=lambda dsg: dsg.mass)
|
Python
| 0
|
@@ -1,8 +1,33 @@
+# -*- coding: utf-8 -*-%0A%0A
from des
|
1f78da6be6aa0aaa2d361eaa3994488f1a8b4a07
|
add nodesMentioned, edgesMentioned
|
src/dig/outline.py
|
src/dig/outline.py
|
#!/usr/bin/env python
import sys
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
from pprint import pprint
from collections import defaultdict
iii = None
class Outline(object):
def __init__(self, graph, subgraph, query, root, **kwargs):
self.graph = graph
self.subgraph = subgraph
self.query = query
self.root = root
def intermediate(self):
global iii
i = defaultdict(list)
i["root"] = self.root
# to begin with, no terms are covered
covered = dict([(term, set()) for term in self.query.terms])
for a in self.query.ngrams.values():
for cand in a["candidates"]:
if cand.referentType == 'node':
i["must"].append(cand.binding())
# required[truenodeDesig(cand.referent)].append(cand)
for w in a["words"]:
covered[w].add(cand)
elif cand.referentType == 'edge':
i["must"].append(cand.binding())
# required[truenodeDesig(cand.referent)].append(cand)
for w in a["words"]:
covered[w].add(cand)
# now we have all known candidates
for term in self.query.terms:
if not covered[term]:
i["should"].append(("match", term))
i["covered"] = covered
iii = i
return i
def detail(self, file=sys.stdout):
# print (root,g,q,s,m,wg,sg)
print("\nDetail of outline {}".format(self), file=file)
print("Input Graph: {}".format(self.graph), file=file)
print("Input Keywords: {}".format(self.query.terms), file=file)
print("Input Keyword Coloring: \n{}".format(self.query.dumpToString(indent=2)), file=file)
print("Relevant Subgraph: {}".format(self.subgraph), file=file)
print("Intermediate Repn:", file=file)
pprint(self.intermediate(), file)
|
Python
| 0.000036
|
@@ -175,16 +175,38 @@
aultdict
+%0Afrom util import info
%0A%0Aiii =
@@ -458,16 +458,110 @@
bal iii%0A
+ edgesMentioned = %5B%5D%0A nodesMentioned = %5B%5D%0A must = %5B%5D%0A should = %5B%5D%0A
@@ -666,23 +666,23 @@
-covered
+touches
= dict(
@@ -877,33 +877,225 @@
-i%5B%22must%22%5D
+print(%22node:%22)%0A pprint(cand)%0A info(cand)%0A info(cand.referent)%0A must.append(cand.binding())%0A nodesMentioned
.append(cand
@@ -1237,39 +1237,39 @@
-covered
+touches
%5Bw%5D.add(cand)%0A
@@ -1340,17 +1340,22 @@
-i%5B%22must%22%5D
+edgesMentioned
.app
@@ -1513,23 +1513,23 @@
-covered
+touches
%5Bw%5D.add(
@@ -1634,23 +1634,23 @@
if not
-covered
+touches
%5Bterm%5D:%0A
@@ -1669,19 +1669,14 @@
-i%5B%22
should
-%22%5D
.app
@@ -1711,26 +1711,170 @@
i%5B%22
-covered%22%5D = covere
+touches%22%5D = touches%0A i%5B%22edgesMentioned%22%5D = edgesMentioned%0A i%5B%22nodesMentioned%22%5D = nodesMentioned%0A i%5B%22must%22%5D = must%0A i%5B%22should%22%5D = shoul
d%0A
|
9378ee0d414321bd557b478ffb6725ee899bc9b0
|
simplify code and add comment
|
TaskList/FileInfo/FileInfo.py
|
TaskList/FileInfo/FileInfo.py
|
#!/usr/bin/python3.4
# -*-coding:Utf-8 -*
'''module to manage blender file info'''
import xml.etree.ElementTree as xmlMod
from TaskList.FileInfo.Scene import *
from usefullFunctions import XML
import os
class FileInfo:
'''class to manage blender file info'''
def __init__(self, xml):
'''initialize blender file info with default settings or saved settings'''
self.fromXml(xml)
def fromXml(self, xml):
'''initialize blender file info with savedd settings'''
self.active = XML.decode(xml.get('active'))
self.scenes = {}
for scene in xml.findall('scene'):
self.scenes[scene.get('name')] = Scene(scene)
def toXml(self):
'''export blender file info into xml syntaxed string'''
xml = ' <fileInfo active="'+XML.encode(self.active)+'">\n'
for scene in self.scenes.values():
xml += scene.toXml()
xml += ' </fileInfo>\n'
return xml
def sceneChoice(self, log):
'''choose between render the active scene or all the scene'''
scenes = len(self.scenes)
if scenes == 0:
log.error(' no scene in this file… Abort')
return None
if scenes == 1:
log.write(' Only one scene in file. All scene will be rendered.')
return True
log.menuIn('Scene Choice')
while True:
choice = input('there is '+str(scenes)+''' scenes in this file. Do you want to:
1- Render all scenes
2- Render active scene «'''+self.active+'''»
0- Abort''').strip().lower()
if choice in [ '', 'q', '0' ]:
log.menuOut()
log.write(' Abort task adding')
return None
elif choice == '1':
log.menuOut()
log.write(' Set to render all task scene')
return True
elif choice == '2':
log.menuOut()
log.write(' Set to render task active scene only')
return False
else:
log.error('unvalid choice')
|
Python
| 0.000001
|
@@ -1010,24 +1010,54 @@
.scenes)%0A%09%09%0A
+%09%09# can't add empty task file%0A
%09%09if scenes
@@ -1127,16 +1127,77 @@
None%0A%09%09%0A
+%09%09# no need to choose if there is only one scene in the file%0A
%09%09if sce
@@ -1241,43 +1241,25 @@
ene
-in file. All scene will be rendered
+to render in file
.')%0A
@@ -1276,16 +1276,36 @@
True%0A%09%09%0A
+%09%09# get user choice%0A
%09%09log.me
@@ -1524,16 +1524,48 @@
r()%0A%09%09%09%0A
+%09%09%09# quit and abort task adding%0A
%09%09%09if ch
@@ -1660,26 +1660,59 @@
urn None%0A%09%09%09
-el
+%0A%09%09%09# quit and render all scene%0A%09%09%09
if choice ==
@@ -1802,18 +1802,59 @@
True%0A%09%09%09
-el
+%0A%09%09%09# quit and render only active scene%0A%09%09%09
if choic
@@ -1961,15 +1961,9 @@
%0A%09%09%09
-else:%0A%09
+%0A
%09%09%09l
|
87d026455424be83346019e0a6e75c16810ccc39
|
add some debug messages for superuser auth/acl
|
backend/src/gosa/backend/plugins/mqtt/mosquitto_auth.py
|
backend/src/gosa/backend/plugins/mqtt/mosquitto_auth.py
|
# This file is part of the GOsa project.
#
# http://gosa-project.org
#
# Copyright:
# (C) 2016 GONICUS GmbH, Germany, http://www.gonicus.de
#
# See the LICENSE file in the project's top-level directory for details.
import logging
from gosa.backend.utils import BackendTypes
from gosa.common import Environment
from gosa.backend.utils.ldap import check_auth
import paho.mqtt.client as mqtt
from gosa.common.components import PluginRegistry
from gosa.common.hsts_request_handler import HSTSRequestHandler
class BaseMosquittoClass(HSTSRequestHandler):
def __init__(self, application, request, **kwargs):
super(BaseMosquittoClass, self).__init__(application, request, **kwargs)
self.env = Environment.getInstance()
self.log = logging.getLogger(__name__)
self.superuser = self.env.config.get("mqtt.superuser")
def initialize(self):
self.set_header('Content-Type', 'text/plain')
self.set_header('Cache-Control', 'no-cache')
def send_result(self, result):
if result is True:
self.set_status(200)
else:
self.set_status(403)
self.finish('')
def check_xsrf_cookie(self): # pragma: nocover
pass
def data_received(self, chunk): # pragma: nocover
pass
class MosquittoAuthHandler(BaseMosquittoClass):
"""
Handles Mosquitto auth plugins http authentification requests and checks them against ldap
"""
def post(self, *args, **kwargs):
username = self.get_argument('username', '')
if self.superuser is not None and username == self.superuser:
self.send_result(True)
else:
password = self.get_argument('password')
# backend self authentification mode
is_backend = PluginRegistry.getInstance("BackendRegistry").check_auth(username, password)
is_allowed = is_backend or check_auth(username, password)
self.log.debug("MQTT AUTH request from '%s' ['%s'] => %s" %
(username, "backend" if is_backend else "client", "GRANTED" if is_allowed else "DENIED"))
self.send_result(is_allowed)
class MosquittoAclHandler(BaseMosquittoClass):
"""
Handles Mosquitto auth plugins http authorization (ACL) requests
"""
def post(self, *args, **kwargs):
"""
Handle incoming acl post request from the mosquitto auth plugin.
Available parameters are:
username: current username
topic: mqtt topic
clientid: client id
acc: (1 == subscribe, 2 == publish)
"""
uuid = self.get_argument('username', '')
topic = self.get_argument('topic')
# 1 == SUB, 2 == PUB
acc = self.get_argument('acc')
backend_type = PluginRegistry.getInstance("BackendRegistry").get_type(uuid)
client_channel = "%s/client/%s" % (self.env.domain, uuid)
event_channel = "%s/events" % self.env.domain
if backend_type is not None:
client_channel = "%s/client/+" % self.env.domain
if topic == event_channel:
# backend can publish/subscribe to event channel
is_allowed = True
elif topic == "%s/client/broadcast" % self.env.domain:
# backend can publish/subscribe on client broadcast channel
is_allowed = True
elif topic == "%s/client/#" % self.env.domain:
# proxy can publish/subscribe on all client subtopics
is_allowed = backend_type == BackendTypes.proxy
elif topic == "%s/proxy" % self.env.domain:
# proxy and backend can publish/subscribe on /proxy topic
is_allowed = True
elif mqtt.topic_matches_sub(client_channel, topic):
# backend can publish/subscribe (send ClientPoll, receive ClientPing)
is_allowed = True
elif topic.startswith("%s/client/" % self.env.domain) and topic.endswith("/request"):
# the temporary RPC request channel: backend can send
is_allowed = acc == "2"
elif topic.startswith("%s/client/" % self.env.domain) and topic.endswith("/response"):
# the temporary RPC response channel: backend can receive
is_allowed = acc == "1"
elif topic.startswith("%s/proxy/" % self.env.domain) and topic.endswith("/request"):
# the temporary RPC request channel from proxy: backend can receive, proxy can publish
if backend_type == BackendTypes.proxy:
is_allowed = acc == "2"
else:
is_allowed = acc == "1"
elif topic.startswith("%s/proxy/" % self.env.domain) and topic.endswith("/response"):
# the temporary RPC response channel to proxy: backend can publish, proxy can receive
if backend_type == BackendTypes.proxy:
is_allowed = acc == "1"
else:
is_allowed = acc == "2"
else:
is_allowed = False
else:
if topic == event_channel:
# global event topic -> check acls
acl = PluginRegistry.getInstance("ACLResolver")
topic = ".".join([self.env.domain, 'event'])
is_allowed = acl.check(uuid, topic, "x")
elif topic == "%s/client/broadcast" % self.env.domain:
# client can listen on client broadcast channel
is_allowed = acc == "1"
elif topic == client_channel:
# client can do both on own channel
is_allowed = True
elif topic.startswith("%s/client/" % self.env.domain) and topic.endswith("/request"):
# the temporary RPC request channel: client can subscribe
is_allowed = acc == "1"
elif topic.startswith("%s/client/" % self.env.domain) and topic.endswith("/response"):
# the temporary RPC response channel: client can publish
is_allowed = acc == "2"
else:
is_allowed = False
self.log.debug("MQTT ACL request: '%s'|->%s from '%s' ['%s'] => %s" %
(topic, "PUB" if acc == "2" else "SUB" if acc == "1" else "BOTH" if acc == "0" else "UNKOWN",
uuid, backend_type if backend_type is not None else "client", "GRANTED" if is_allowed else "DENIED"))
self.send_result(is_allowed)
class MosquittoSuperuserHandler(BaseMosquittoClass):
"""
Handles Mosquitto auth plugins http superuser authentication requests
"""
def post(self, *args, **kwargs):
if self.superuser is not None:
self.send_result(self.get_argument('username', '') == self.superuser)
else:
self.send_result(False)
|
Python
| 0
|
@@ -1597,16 +1597,108 @@
eruser:%0A
+ self.log.debug(%22MQTT AUTH request from '%25s' %5BSUPERUSER%5D =%3E GRANTED%22 %25 username)%0A
@@ -6879,91 +6879,425 @@
-self.send_result(self.get_argument('username', '') == self.superuser)%0A else:
+is_allowed = self.get_argument('username', '') == self.superuser%0A self.log.debug(%22MQTT Superuser ACL request for '%25s': %25s%22 %25 (self.get_argument('username', ''), %22GRANTED%22 if is_allowed else %22DENIED%22))%0A self.send_result(self.get_argument('username', '') == self.superuser)%0A else:%0A self.log.debug(%22MQTT Superuser ACL request for '%25s': DENIED%22 %25 self.get_argument('username', ''))
%0A
|
3dc58e0467a1be86208fffd08097652c6fe46f94
|
Rename Tor to control_agent
|
ooni/templates/httpt.py
|
ooni/templates/httpt.py
|
# -*- encoding: utf-8 -*-
#
# :authors: Arturo Filastò
# :licence: see LICENSE
import copy
import random
import struct
from twisted.python import usage
from twisted.plugin import IPlugin
from twisted.internet import protocol, defer
from twisted.internet.ssl import ClientContextFactory
from twisted.internet import reactor
from twisted.internet.error import ConnectionRefusedError
from twisted.web._newclient import Request
from twisted.web.http_headers import Headers
from ooni.nettest import NetTestCase
from ooni.utils import log
from ooni import config
from ooni.utils.net import BodyReceiver, StringProducer, userAgents
from ooni.lib.txagentwithsocks import Agent, SOCKSError
class HTTPTest(NetTestCase):
"""
A utility class for dealing with HTTP based testing. It provides methods to
be overriden for dealing with HTTP based testing.
The main functions to look at are processResponseBody and
processResponseHeader that are invoked once the headers have been received
and once the request body has been received.
To perform requests over Tor you will have to use the special URL schema
"shttp". For example to request / on example.com you will have to do
specify as URL "shttp://example.com/".
XXX all of this requires some refactoring.
"""
name = "HTTP Test"
version = "0.1.1"
randomizeUA = True
followRedirects = False
baseParameters = [['socksproxy', 's', None,
'Specify a socks proxy to use for requests (ip:port)']]
request = {}
response = {}
def _setUp(self):
log.debug("Setting up HTTPTest")
try:
import OpenSSL
except:
log.err("Warning! pyOpenSSL is not installed. https websites will"
"not work")
self.control_agent = Agent(reactor, sockshost="127.0.0.1",
socksport=config.advanced.tor_socksport)
sockshost, socksport = (None, None)
if self.localOptions['socksproxy']:
self.report['socksproxy'] = self.localOptions['socksproxy']
sockshost, socksport = self.localOptions['socksproxy'].split(':')
socksport = int(socksport)
self.agent = Agent(reactor, sockshost=sockshost,
socksport=socksport)
if self.followRedirects:
try:
from twisted.web.client import RedirectAgent
self.control_agent = RedirectAgent(self.control_agent)
self.agent = RedirectAgent(self.agent)
except:
log.err("Warning! You are running an old version of twisted"\
"(<= 10.1). I will not be able to follow redirects."\
"This may make the testing less precise.")
self.report['errors'].append("Could not import RedirectAgent")
self.processInputs()
log.debug("Finished test setup")
def processInputs(self):
pass
def _processResponseBody(self, response_body, request, response, body_processor):
log.debug("Processing response body")
self.report['requests'].append({
'request': {
'headers': request['headers'],
'body': request['body'],
'url': request['url'],
'method': request['method']
},
'response': {
'headers': list(response.headers.getAllRawHeaders()),
'body': response_body,
'code': response.code
}
})
if body_processor:
body_processor(response_body)
else:
self.processResponseBody(response_body)
def processResponseBody(self, data):
"""
This should handle all the response body smushing for getting it ready
to be passed onto the control.
@param data: The content of the body returned.
"""
pass
def processResponseHeaders(self, headers):
"""
This should take care of dealing with the returned HTTP headers.
@param headers: The content of the returned headers.
"""
pass
def processRedirect(self, location):
"""
Handle a redirection via a 3XX HTTP status code.
@param location: the url that is being redirected to.
"""
pass
def doRequest(self, url, method="GET",
headers={}, body=None, headers_processor=None,
body_processor=None, use_tor=False):
"""
Perform an HTTP request with the specified method.
url: the full url path of the request
method: the HTTP Method to be used
headers: the request headers to be sent as a dict
body: the request body
headers_processor: a function to be used for processing the HTTP header
responses (defaults to self.processResponseHeaders).
This function takes as argument the HTTP headers as a
dict.
body_processory: a function to be used for processing the HTTP response
body (defaults to self.processResponseBody).
This function takes the response body as an argument.
"""
# We prefix the URL with 's' to make the connection go over the
# configured socks proxy
if use_tor:
log.debug("Using tor for the request")
url = 's'+url
agent = self.tor_agent
else:
agent = self.agent
if self.localOptions['socksproxy']:
log.debug("Using SOCKS proxy %s for request" % (self.localOptions['socksproxy']))
url = 's'+url
log.debug("Performing request %s %s %s" % (url, method, headers))
request = {}
request['method'] = method
request['url'] = url
request['headers'] = headers
request['body'] = body
if self.randomizeUA:
log.debug("Randomizing user agent")
self.randomize_useragent(request)
log.debug("Writing to report the request")
if 'requests' not in self.report:
self.report['requests'] = []
# If we have a request body payload, set the request body to such
# content
if body:
body_producer = StringProducer(request['body'])
else:
body_producer = None
headers = Headers(request['headers'])
def errback(failure):
failure.trap(ConnectionRefusedError, SOCKSError)
if type(failure.value) is ConnectionRefusedError:
log.err("Connection refused. The backend may be down")
else:
log.err("Sock error. The SOCK proxy may be down")
self.report["failure"] = str(failure.value)
def finished(data):
return
d = agent.request(request['method'], request['url'], headers,
body_producer)
d.addErrback(errback)
d.addCallback(self._cbResponse, request, headers_processor, body_processor)
d.addCallback(finished)
return d
def _cbResponse(self, response, request, headers_processor,
body_processor):
if not response:
log.err("Got no response")
return
else:
log.debug("Got response %s" % response)
if str(response.code).startswith('3'):
self.processRedirect(response.headers.getRawHeaders('Location')[0])
# [!] We are passing to the headers_processor the headers dict and
# not the Headers() object
response_headers_dict = list(response.headers.getAllRawHeaders())
if headers_processor:
headers_processor(response_headers_dict)
else:
self.processResponseHeaders(response_headers_dict)
finished = defer.Deferred()
response.deliverBody(BodyReceiver(finished))
finished.addCallback(self._processResponseBody, request,
response, body_processor)
return finished
def randomize_useragent(self, request):
user_agent = random.choice(userAgents)
request['headers']['User-Agent'] = [user_agent]
|
Python
| 0.000004
|
@@ -5419,19 +5419,29 @@
(%22Using
-tor
+control agent
for the
@@ -5502,19 +5502,23 @@
= self.
-tor
+control
_agent%0A
|
b0f25b7263a42fbd1e90cf7ebe3dcba50f9cfe42
|
use the correct class name
|
amiconfig/plugins/rmakeserver.py
|
amiconfig/plugins/rmakeserver.py
|
#
# Copyright (c) 2008 rPath, Inc.
#
import os
from rmakeplugin import rMakePlugin
class rMakeServer(rMakePlugin):
name = 'rmakeserver'
def pluginMethod(self):
self._setupProxy()
self._setuprBuilder()
self._setupRepoUrl()
def _setupProxy(self):
proxycfg = '/etc/rmake/server.d/proxy'
if 'conaryproxy' in self.rmakecfg:
proxy = self.rmakecfg['conaryproxy']
else:
host = self.id.getLocalHostname()
fh = open(proxycfg, 'w')
fh.write('proxy http://%s:7778/\n' % host)
def _setuprBuilder(self):
rbuildercfg = '/etc/rmake/server.d/rbuilder'
if 'rbuilderurl' in self.rmakecfg:
fh = open(rbuildercfg, 'w')
fh.write('rbuilderUrl %s\n' % self.rmakecfg['rbuilderurl'])
def _setupRepoUrl(self):
repourlcfg = '/etc/rmake/server.d/serverurl'
if 'serverurl' in self.rmakecfg:
url = self.rmakecfg['serverurl']
else:
url = 'http://%s/conary/' % self.id.getLocalHostname()
fh = open(repourlcfg, 'w')
fh.write('serverUrl %s\n' % url)
|
Python
| 0.999948
|
@@ -89,19 +89,23 @@
ass
-rMakeServer
+AMIConfigPlugin
(rMa
|
28b535ef3db6eb79b6cb0f0c86e30b78899d31d9
|
Rewrite .current_translation using a window function.
|
amnesia/translations/__init__.py
|
amnesia/translations/__init__.py
|
# -*- coding: utf-8 -*-
import logging
from pyramid.i18n import default_locale_negotiator
from pyramid.threadlocal import get_current_registry
from pyramid.threadlocal import get_current_request
from sqlalchemy import orm
from sqlalchemy import sql
from sqlalchemy import event
from sqlalchemy.types import String
from sqlalchemy.orm.collections import attribute_mapped_collection
from sqlalchemy.ext.hybrid import hybrid_property
from amnesia.modules.content import Content
log = logging.getLogger(__name__)
def _localizer(request=None):
if not request:
request = get_current_request()
return request.locale_name
def setup_translation(content_cls, translation_cls, localizer=None, **kwargs):
'''Helper to setup translations'''
log.debug('Adding translation properties: %s to %s', content_cls,
translation_cls)
if not localizer:
localizer = _localizer
content_mapper = orm.class_mapper(content_cls)
translation_mapper = orm.class_mapper(translation_cls)
content_mapper.add_properties({
'current_translation': orm.relationship(
lambda: translation_cls,
primaryjoin=lambda: sql.and_(
# XXX: use base_mapper.class_
# pylint: disable=no-member
orm.foreign(translation_cls.content_id) == Content.id,
translation_cls.language_id == sql.bindparam(
None,
callable_=lambda: localizer(),
type_=String()
)
),
#lazy='joined',
uselist=False,
innerjoin=True,
viewonly=True,
bake_queries=False,
back_populates='content'
),
'translations': orm.relationship(
lambda: translation_cls,
cascade='all, delete-orphan',
lazy='subquery',
innerjoin=True,
back_populates='content',
collection_class=attribute_mapped_collection('language_id')
)
})
translation_mapper.add_properties({
'content': orm.relationship(
lambda: content_cls,
back_populates='translations',
innerjoin=True,
uselist=False
),
})
def make_hybrid(cls, name, translation_cls):
@hybrid_property
def _column(self):
locale_name = _localizer()
try:
return getattr(self.translations[locale_name], name)
except KeyError:
return getattr(self.translations['en'], name)
@_column.setter
def _column(self, value):
locale_name = _localizer()
trans = self.translations.setdefault(
locale_name, translation_cls(language_id=locale_name)
)
setattr(trans, name, value)
#@_column.expression
#def _column(cls):
# return cls.current_translation.has()
_column.__name__ = name
return _column
_TRANSLATIONS_KEY = 'amnesia.translations'
def _setup_translation():
log.info('SQLAlchemy after_configured handler _setup_translation called')
registry = get_current_registry()
if _TRANSLATIONS_KEY not in registry:
return
_cfg = registry[_TRANSLATIONS_KEY]
if 'mappings' in _cfg:
for cls, tr_cls in _cfg['mappings'].items():
setup_translation(cls, tr_cls)
if 'attrs' in _cfg:
for cls, cols in _cfg['attrs'].items():
translation_cls = _cfg['mappings'][cls]
for col in cols:
log.debug('Adding hybrid attribute: %s.%s', cls, col)
setattr(cls, col, make_hybrid(cls, col, translation_cls))
def set_translatable_attrs(config, cls, cols):
_attrs = config.registry.\
setdefault(_TRANSLATIONS_KEY, {}).\
setdefault('attrs', {})
_attrs[cls] = cols
def set_translatable_mapping(config, cls, trans_cls):
_mappings = config.registry.\
setdefault(_TRANSLATIONS_KEY, {}).\
setdefault('mappings', {})
_mappings[cls] = trans_cls
def my_locale_negotiator(request):
return default_locale_negotiator(request)
def includeme(config):
event.listen(orm.mapper, 'after_configured', _setup_translation)
config.add_directive('set_translatable_attrs', set_translatable_attrs)
config.add_directive('set_translatable_mapping', set_translatable_mapping)
config.set_locale_negotiator(my_locale_negotiator)
config.add_translation_dirs('amnesia:locale/')
config.add_tween('amnesia.translations.tweens.path_info_lang_tween')
|
Python
| 0
|
@@ -1027,257 +1027,341 @@
-content_mapper.add_properties(%7B%0A 'current_translation': orm.relationship(%0A lambda: translation_cls,%0A primaryjoin=lambda: sql.and_(%0A # XXX: use base_mapper.class_%0A # pylint: disable=no-member
+partition = sql.select(%5B%0A translation_cls,%0A sql.func.row_number().over(%0A order_by=%5B%0A sql.desc(translation_cls.language_id == sql.bindparam(%0A None, callable_=lambda: localizer(), type_=String()%0A )),%0A sql.desc(translation_cls.language_id == 'en')
%0A
@@ -1373,24 +1373,36 @@
+%5D,%0A
-orm.foreign(
+ partition_by=
tran
@@ -1427,41 +1427,91 @@
t_id
-) == Content.id,%0A
+%0A ).label('index')%0A %5D, use_labels=True).where(%0A sql.and_(%0A
tran
@@ -1498,32 +1498,36 @@
l.and_(%0A
+
translation_cls.
@@ -1529,35 +1529,54 @@
_cls.language_id
- ==
+.in_((%0A
sql.bindparam(%0A
@@ -1604,112 +1604,460 @@
one,
-%0A callable_=lambda: localizer(),%0A type_=String()%0A )
+ callable_=lambda: localizer(), type_=String()%0A ),%0A 'en'%0A ))%0A )%0A ).alias()%0A%0A partition_alias = orm.aliased(translation_cls, partition)%0A%0A content_mapper.add_properties(%7B%0A 'current_translation': orm.relationship(%0A partition_alias,%0A primaryjoin=sql.and_(%0A orm.foreign(partition_alias.content_id) == content_cls.id,%0A partition.c.index == 1,
%0A
@@ -2080,17 +2080,16 @@
-#
lazy='jo
@@ -2213,32 +2213,33 @@
se,%0A
+#
back_populates='
@@ -2388,24 +2388,25 @@
+#
lazy='subque
@@ -2895,194 +2895,61 @@
-locale_name = _localizer()%0A try:%0A return getattr(self.translations%5Blocale_name%5D, name)%0A except KeyError:%0A return getattr(self.translations%5B'en'%5D, name
+return getattr(self.current_translation, name, 'NONE'
)%0A%0A
|
ba31be554d3cc4fd51b7189434071596143b686c
|
add audio.load.readrecf
|
hvc/audio/load.py
|
hvc/audio/load.py
|
import numpy as np
def read_cbin(filename):
"""
loads .cbin files output by EvTAF
"""
data = np.fromfile(filename,dtype=">d") # ">d" means big endian, double
return data
def readrecf(filename):
"""
reads .rec files output by EvTAF
"""
|
Python
| 0
|
@@ -243,28 +243,2639 @@
les output by EvTAF%0A %22%22%22%0A
+%0A rec_dict = %7B%7D%0A with open(filename,'r') as recfile:%0A line_tmp = %22%22%0A while 1:%0A if line_tmp == %22%22:%0A line = recfile.readline()%0A else:%0A line = lime_tmp%0A line_tmp = %22%22%0A %0A if line == %22%22: # if End Of File%0A break%0A elif line == %22%5Cn%22: # if blank line%0A continue%0A elif %22Catch%22 in line:%0A ind = line.find('=')%0A rec_dict%5B'iscatch'%5D = line%5Bind+1:%5D%0A elif %22Chans%22 in line:%0A ind = line.find('=')%0A rec_dict%5B'num_channels'%5D = int(line%5Bind+1:%5D)%0A elif %22ADFREQ%22 in line:%0A ind = line.find('=')%0A rec_dict%5B'sample_freq'%5D = int(line%5Bind+1:%5D)%0A elif %22Samples%22 in line:%0A ind = line.find('=')%0A rec_dict%5B'num_samples'%5D = int(line%5Bind+1:%5D)%0A elif %22T after%22 in line:%0A ind = line.find('=')%0A rec_dict%5B'time_after'%5D = float(line%5Bind+1:%5D)%0A elif %22T Before%22 in line:%0A ind = line.find('=')%0A rec_dict%5B'time before'%5D = float(line%5Bind+1:%5D)%0A elif %22Output Sound File%22 in line:%0A ind = line.find('=')%0A rec_dict%5B'outfile'%5D = int(line%5Bind+1:%5D)%0A elif %22thresholds%22 in line:%0A th_list = %5B%5D%0A while 1:%0A line = recfile.line()%0A if line == %22%22:%0A break%0A try:%0A th_list.append(float)%0A except ValueError: # because we reached next section%0A line_tmp = line%0A break%0A rec_dict%5B'thresholds'%5D = th_list%0A if line = %22%22:%0A break%0A elif %22feedback information%22 in line:%0A fb_dict = %7B%7D%0A while 1:%0A line = recfile.readline()%0A if line = %22%22:%0A break%0A elif line = %22%5Cn%22:%0A continue%0A ind = line.find(%22msec%22)%0A time = float(line%5B:ind-1%5D)%0A ind = line.find(%22:%22)%0A fb_type = line%5Bind+2:%5D%0A fb_dict%5Btime%5D = fb_type%0A rec_dict%5B'feedback_info'%5D = fb_dict%0A if line = %22%22:%0A break%0A elif %22trigger times%22 in line:%0A pass%0A elif %22file created%22 in line:%0A pass%0A return rec_dict%0A
|
8d1e13301de96eb2a4caee2e53b7691ee4d7dab5
|
add some array functions
|
benchmarks/__init__.py
|
benchmarks/__init__.py
|
# -*- coding: utf-8 -*-
# Copyright 2012 Christoph Reiter
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
import time
def run(load_gi, backend=None):
if not load_gi:
import pgi
pgi.install_as_gi()
try:
pgi.set_backend(backend)
except LookupError:
print("Couldn't load backend: %r" % backend)
return
import gi
if load_gi:
assert gi.__name__ == "gi"
hl = "### GI " + "#" * 100
else:
assert gi.__name__ == "pgi"
if backend:
hl = "### PGI (%s) " % backend + "#" * 100
else:
hl = "### PGI " + "#" * 100
print(hl[:80])
t = time.time()
from gi.repository import Gtk, GObject, GLib, Gio, Pango, Gdk
GLib = GLib
Gio = Gio
Pango = Pango
Gdk = Gdk
t = time.time() - t
print("%20s: %6.2f ms" % ("import", t * (10 ** 3)))
def bench_func(n):
times = []
for i in xrange(n):
t = time.time()
Gtk.get_current_event_time()
Gtk.rc_get_theme_dir()[:]
t = time.time() - t
times.append(t)
return times
def bench_gvalue(n):
times = []
b = Gtk.Button()
for i in xrange(n):
t = time.time()
value = GObject.Value()
value.init(GObject.TYPE_INT)
value.set_int(42)
value.get_int()
value.unset()
value = GObject.Value()
value.init(GObject.TYPE_STRING)
value.set_string("foobar")
value.get_string()
value.unset()
value = GObject.Value()
value.init(GObject.TYPE_OBJECT)
value.set_object(b)
value.get_object()
value.unset()
t = time.time() - t
times.append(t)
return times
def bench_object(n):
times = []
for i in xrange(n):
t = time.time()
w = Gtk.Window()
w.props.title = "this"
t = time.time() - t
times.append(t)
return times
def bench_method(n):
times = []
b = Gtk.Button()
for i in xrange(n):
t = time.time()
b.set_name("foobar")
b.get_name()
b.set_relief(Gtk.ReliefStyle.NORMAL)
b.get_relief()
b.set_use_stock(True)
b.get_use_stock()
b.set_alignment(0.2, 0.4)
b.get_alignment()
t = time.time() - t
times.append(t)
return times
def torture_signature_0(rounds):
test = Regress.TestObj()
func = test.torture_signature_0
times = []
for i in xrange(rounds):
t0 = time.time()
func(5000, "foobar", 12345)
times.append(time.time() - t0)
return times
def torture_signature_1(rounds):
test = Regress.TestObj()
func = test.torture_signature_1
times = []
for i in xrange(rounds):
t0 = time.time()
func(5000, "foobar", 12344)
times.append(time.time() - t0)
return times
def torture_signature_1e(rounds):
test = Regress.TestObj()
func = test.torture_signature_1
times = []
for i in xrange(rounds):
t0 = time.time()
try:
func(5000, "foobar", 12345)
except:
pass
times.append(time.time() - t0)
return times
bench = [
(bench_func, 100000),
(bench_method, 100000),
(bench_gvalue, 10000),
(bench_object, 10000),
]
try:
from gi.repository import Regress
except ImportError:
pass
else:
bench.extend([
(torture_signature_0, 10000),
(torture_signature_1, 10000),
(torture_signature_1e, 10000),
])
for b, n in bench:
min_time = min(b(n))
print("%20s: %6.2f µs" % (b.__name__, min_time * (10 ** 6)))
|
Python
| 0.000034
|
@@ -3742,24 +3742,655 @@
turn times%0A%0A
+ def bench_arrays(rounds):%0A times = %5B%5D%0A for i in xrange(rounds):%0A t0 = time.time()%0A GIMarshallingTests.array_fixed_int_return()%0A GIMarshallingTests.array_fixed_short_return()%0A GIMarshallingTests.array_fixed_int_in(%5B-1, 0, 1, 2%5D)%0A GIMarshallingTests.array_fixed_out()%0A GIMarshallingTests.array_fixed_inout(%5B-1, 0, 1, 2%5D)%0A GIMarshallingTests.array_return()%0A GIMarshallingTests.array_return_etc(5, 9)%0A GIMarshallingTests.array_string_in(%5B'foo', 'bar'%5D)%0A times.append(time.time() - t0)%0A return times%0A%0A
bench =
@@ -4778,24 +4778,203 @@
%5D)%0A%0A
+ try:%0A from gi.repository import GIMarshallingTests%0A except ImportError:%0A pass%0A else:%0A bench.extend(%5B%0A (bench_arrays, 10000),%0A %5D)%0A%0A
for b, n
|
4b46c07b795e3e16c16a8897ac42a0755e88c213
|
Use trial logging.
|
analysis/sanity-count-markers.py
|
analysis/sanity-count-markers.py
|
#!/usr/bin/env python
import climate
import collections
import joblib
import lmj.cubes
import lmj.plot
import numpy as np
logging = climate.get_logger('count')
def count(trial):
trial.load()
trial.mask_dropouts()
total = len(trial.df)
markers = {m: trial.df[m + '-c'].count() / total for m in trial.marker_columns}
full = len(trial.df[[m + '-c' for m in markers]].dropna(axis=0))
logging.info('%s %s %s: %d rows, %d full (%.1f%%)',
trial.subject.key, trial.block.key, trial.key,
total, full, 100 * full / total)
return markers
PERCENTILES = [1, 2, 5, 10, 20, 50, 80, 90, 95, 98, 99]
def main(root):
trials = lmj.cubes.Experiment(root).trials_matching('*')
counts = collections.defaultdict(int)
percents = collections.defaultdict(list)
f = joblib.delayed(count)
for markers in joblib.Parallel(-1)(f(t) for t in trials):
for m in markers:
counts[m] += markers[m] > 0.1
percents[m].append(markers[m])
for m, c in counts.items():
print(m, c, *np.percentile(percents[m], PERCENTILES), sep='\t')
return
with lmj.plot.axes(spines=True) as ax:
for m, values in percents.items():
ax.hist(values, bins=np.linspace(0, 1, 127), alpha=0.5, lw=0, label=m[9:])
ax.legend(ncol=3, loc=0)
if __name__ == '__main__':
climate.call(main)
|
Python
| 0
|
@@ -405,32 +405,19 @@
-logging.info('%25s %25s %25s:
+trial.log('
%25d r
@@ -443,89 +443,8 @@
%25)',
-%0A trial.subject.key, trial.block.key, trial.key,%0A
tot
|
8c651899be8eab478d0cc6da22f695ecd3b33313
|
Add parents
|
anybox/recipe/openerp/vcs/git.py
|
anybox/recipe/openerp/vcs/git.py
|
import os
import subprocess
import logging
from ..utils import working_directory_keeper
from .base import BaseRepo
from .base import SUBPROCESS_ENV
logger = logging.getLogger(__name__)
class GitRepo(BaseRepo):
"""Represent a Git clone tied to a reference branch."""
vcs_control_dir = '.git'
def uncommitted_changes(self):
"""True if we have uncommitted changes."""
os.chdir(self.target_dir)
p = subprocess.Popen(['git', 'status', '--short'],
stdout=subprocess.PIPE, env=SUBPROCESS_ENV)
return bool(p.communicate()[0])
def get_update(self, revision):
"""Ensure that target_dir is a branch of url at specified revision.
If target_dir already exists, does a simple pull.
Offline-mode: no branch nor pull, but update.
"""
target_dir = self.target_dir
url = self.url
offline = self.offline
rev_str = revision
with working_directory_keeper:
if not os.path.exists(target_dir):
# TODO case of local url ?
if offline:
raise IOError(
"git repository %s does not exist; cannot clone it "
"from %s (offline mode)" % (target_dir, url))
os.chdir(os.path.split(target_dir)[0])
logger.info("Cloning %s ...", url)
subprocess.check_call(['git', 'clone', '-b',
rev_str, url, target_dir])
else:
os.chdir(target_dir)
# TODO what if remote repo is actually local fs ?
if not offline:
logger.info("Pull for git repo %s (rev %s)...",
target_dir, rev_str)
subprocess.check_call(['git', 'pull', url])
if revision:
logger.info("Checkout %s to revision %s",
target_dir, revision)
subprocess.check_call(['git', 'checkout', rev_str])
|
Python
| 0.000069
|
@@ -298,16 +298,309 @@
'.git'%0A%0A
+ def parents(self):%0A %22%22%22Return full hash of parent nodes. %22%22%22%0A os.chdir(self.target_dir)%0A p = subprocess.Popen(%5B'git', 'rev-parse', '--verify', 'HEAD'%5D,%0A stdout=subprocess.PIPE, env=SUBPROCESS_ENV)%0A return p.communicate()%5B0%5D.split()%0A%0A
def
|
be291475601657cbcd3903679c77c2860b543308
|
fix doc
|
deepchem/feat/tests/test_dummy_featurizer.py
|
deepchem/feat/tests/test_dummy_featurizer.py
|
import unittest
import deepchem as dc
import numpy as np
class TestDummyFeaturizer(unittest.TestCase):
"""
Test for DummyFeaturizer.
"""
def test_featurize(self):
"""
Test the featurize method on a list of inputs.
"""
input_array = np.array([[
"N#C[S-].O=C(CBr)c1ccc(C(F)(F)F)cc1>CCO.[K+]",
"N#CSCC(=O)c1ccc(C(F)(F)F)cc1"
], [
"C1COCCN1.FCC(Br)c1cccc(Br)n1>CCN(C(C)C)C(C)C.CN(C)C=O.O",
"FCC(c1cccc(Br)n1)N1CCOCC1"
]])
featurizer = dc.feat.DummyFeaturizer()
out = featurizer.featurize(input_array)
assert (type(out) == np.ndarray)
assert (out.shape == input_array.shape)
|
Python
| 0.000001
|
@@ -104,18 +104,16 @@
:%0A %22%22%22%0A
-
Test f
@@ -132,18 +132,16 @@
urizer.%0A
-
%22%22%22%0A%0A
@@ -175,20 +175,16 @@
%22%22%22%0A
-
Test
@@ -213,13 +213,15 @@
on a
- list
+n array
of
@@ -228,20 +228,16 @@
inputs.%0A
-
%22%22%22%0A
|
c8a97a33449eedc110169cb9b3f0120124d95e49
|
Add tiny test for ToPickle (#6021)
|
distributed/protocol/tests/test_to_pickle.py
|
distributed/protocol/tests/test_to_pickle.py
|
from typing import Dict
import dask.config
from dask.highlevelgraph import HighLevelGraph, MaterializedLayer
from distributed.client import Client
from distributed.protocol.serialize import ToPickle
from distributed.utils_test import gen_cluster
class NonMsgPackSerializableLayer(MaterializedLayer):
"""Layer that uses non-msgpack-serializable data"""
def __dask_distributed_pack__(self, *args, **kwargs):
ret = super().__dask_distributed_pack__(*args, **kwargs)
# Some info that contains a `list`, which msgpack will convert to
# a tuple if getting the chance.
ret["myinfo"] = ["myinfo"]
return ToPickle(ret)
@classmethod
def __dask_distributed_unpack__(cls, state, *args, **kwargs):
assert state["myinfo"] == ["myinfo"]
return super().__dask_distributed_unpack__(state, *args, **kwargs)
@gen_cluster(client=True)
async def test_non_msgpack_serializable_layer(c: Client, s, w1, w2):
with dask.config.set({"distributed.scheduler.allowed-imports": "test_to_pickle"}):
a = NonMsgPackSerializableLayer({"x": 42})
layers = {"a": a}
dependencies: Dict[str, set] = {"a": set()}
hg = HighLevelGraph(layers, dependencies)
res = await c.get(hg, "x", sync=False)
assert res == 42
|
Python
| 0
|
@@ -142,16 +142,62 @@
Client%0A
+from distributed.protocol import dumps, loads%0A
from dis
@@ -289,16 +289,234 @@
uster%0A%0A%0A
+def test_ToPickle():%0A class Foo:%0A def __init__(self, data):%0A self.data = data%0A%0A msg = %7B%22x%22: ToPickle(Foo(123))%7D%0A frames = dumps(msg)%0A out = loads(frames)%0A assert out%5B%22x%22%5D.data == 123%0A%0A%0A
class No
|
63b20c15d3749fc60fcb7e1e43fbbc8832b50354
|
Alphabetize imports
|
django_graph_api/tests/graphql/test_types.py
|
django_graph_api/tests/graphql/test_types.py
|
import pytest
from unittest import mock
from django_graph_api.graphql.schema import Schema
from django_graph_api.graphql.types import Boolean, Float, Field, Int, String, List
schema = Schema()
def test_field_get_value_calls_coerce():
field = Field()
field.type_ = mock.Mock()
field.name = 'foo'
field.obj = mock.Mock()
field.obj.get_foo.return_value = 'bar'
field.get_value()
field.type_.coerce_result.assert_called_once_with('bar')
def test_boolean_coerce():
assert Boolean.coerce_result(1) is True
assert Boolean.coerce_result(None) is None
def test_int_coerce():
assert Int.coerce_result(True) is 1
assert Int.coerce_result(4.9) is 4
assert Int.coerce_result(None) is None
with pytest.raises(ValueError):
Int.coerce_result('2.0')
def test_float_coerce():
assert Float.coerce_result(-10) == -10.0
assert Float.coerce_result(None) is None
with pytest.raises(ValueError):
Float.coerce_result('abc')
def test_string_coerce():
assert String.coerce_result(True) == 'True'
assert String.coerce_result(4.9) == '4.9'
assert String.coerce_result(None) is None
def test_list_coerce():
assert List.coerce_result({True}) == [True]
assert List.coerce_result((1, 2, 3)) == [1, 2, 3]
assert List.coerce_result(None) is None
|
Python
| 0.999913
|
@@ -161,20 +161,20 @@
nt,
+List,
String
-, List
%0A%0Asc
|
288d02bccf08ff0498767aafca9bd37509213ec3
|
Update forms.py
|
djforms/communications/printrequest/forms.py
|
djforms/communications/printrequest/forms.py
|
# -*- coding: utf-8 -*-
from django import forms
from django.conf import settings
from localflavor.us.forms import USPhoneNumberField
from djforms.communications.printrequest.models import PrintRequest, FORMATS
class PrintRequestForm(forms.ModelForm):
phone = USPhoneNumberField(
label = "Phone number",
max_length=12,
required=True,
widget=forms.TextInput(attrs={'class': 'required phoneUS'})
)
print_format = forms.MultipleChoiceField(
label = "What is the format of your finished piece",
choices=FORMATS,
help_text="Check all that apply"
)
def clean(self):
cleaned_data = super(PrintRequestForm, self).clean()
is_mailing = cleaned_data.get("is_mailing")
who_mailing = cleaned_data.get("who_mailing")
how_mailing = cleaned_data.get("how_mailing")
speed_mailing = cleaned_data.get("speed_mailing")
if is_mailing == "Yes":
msg = "Required"
if who_mailing == "":
self._errors["who_mailing"] = self.error_class(["Required field."])
if how_mailing == "":
self._errors["how_mailing"] = self.error_class(["Required field."])
if speed_mailing == "":
self._errors["speed_mailing"] = self.error_class(["Required field."])
return cleaned_data
class Meta:
model = PrintRequest
widgets = {
'phone': forms.TextInput(attrs={
'placeholder': 'eg. 123-456-7890', 'class': 'phoneUS'
}),
}
exclude = (
'user','updated_by','date_created','date_updated'
)
|
Python
| 0
|
@@ -940,16 +940,152 @@
iling%22)%0D
+%0A %0D%0A print_format = cleaned_data.get(%22print_format%22)%0D%0A print_format_other = cleaned_data.get(%22print_format_other%22)%0D
%0A%0D%0A
@@ -1504,24 +1504,213 @@
ield.%22%5D)%0D%0A%0D%0A
+ if print_format == %22Other%22:%0D%0A if print_format_other == %22%22:%0D%0A self._errors%5B%22print_format_other%22%5D = self.error_class(%5B%22Required field.%22%5D)%0D%0A %0D%0A
retu
|
1b13a929122c2bcb7e524b39183610ac3e57f191
|
Mark Show.upcoming as @staticmethod
|
karspexet/show/models.py
|
karspexet/show/models.py
|
from django.db import models
import datetime
class Production(models.Model):
name = models.CharField(max_length=100)
description = models.TextField(blank=True)
def __str__(self):
return self.name
class Show(models.Model):
production = models.ForeignKey(Production, on_delete=models.PROTECT)
date = models.DateTimeField()
venue = models.ForeignKey('venue.Venue', on_delete=models.PROTECT)
def upcoming():
return Show.objects.filter(date__gte=datetime.date.today())
def date_string(self):
return self.date.strftime("%Y-%m-%d %H:%M")
def __str__(self):
return self.production.name + " " + self.date_string()
class Meta:
ordering = ('date',)
|
Python
| 0
|
@@ -419,16 +419,34 @@
OTECT)%0A%0A
+ @staticmethod%0A
def
|
f79bb6e549c311e7b45bebba58e223826c713132
|
Fix ContenderSerializer
|
driver27/api.py
|
driver27/api.py
|
from .models import Competition, Contender, Driver, Race, Result, Season, Seat, Team
from rest_framework import routers, serializers, viewsets
from rest_framework.decorators import detail_route, list_route
from rest_framework.response import Response
from django_countries.serializer_fields import CountryField
class DriverSerializer(serializers.HyperlinkedModelSerializer):
country = CountryField()
class Meta:
model = Driver
fields = ('url', 'last_name', 'first_name', 'year_of_birth', 'country', 'competitions')
class NestedDriverSerializer(DriverSerializer):
class Meta:
model = Driver
fields = ('url', 'last_name', 'first_name', 'year_of_birth', 'country')
class TeamSerializer(serializers.HyperlinkedModelSerializer):
country = CountryField()
class Meta:
model = Team
fields = ('url', 'name', 'full_name', 'competitions', 'country')
class NestedTeamSerializer(TeamSerializer):
class Meta:
model = Team
fields = ('url', 'name', 'full_name', 'country')
class ContenderSerializer(serializers.HyperlinkedModelSerializer):
driver = DriverSerializer(many=False)
teams = TeamSerializer(many=True)
class Meta:
model = Contender
fields = ('url', 'driver', 'competition', 'teams')
class NestedContenderSerializer(ContenderSerializer):
driver = NestedDriverSerializer(many=False)
class Meta:
model = Contender
fields = ('url', 'driver')
class SeatSerializer(serializers.HyperlinkedModelSerializer):
team = NestedTeamSerializer(many=False)
contender = NestedContenderSerializer(many=False)
class Meta:
model = Seat
fields = ('url', 'team', 'contender', 'current', 'seasons')
class NestedSeatSerializer(SeatSerializer):
class Meta:
model = Seat
fields = ('url', 'team', 'contender', 'current')
class ResultSerializer(serializers.HyperlinkedModelSerializer):
seat = SeatSerializer(many=False)
class Meta:
model = Result
fields = ('url', 'race', 'seat', 'qualifying', 'finish', 'fastest_lap', 'wildcard',
'retired', 'comment')
class NestedResultSerializer(ResultSerializer):
class Meta:
model = Result
fields = ('url', 'seat', 'qualifying', 'finish', 'fastest_lap', 'wildcard',
'retired', 'comment')
class CompetitionSerializer(serializers.HyperlinkedModelSerializer):
# https://github.com/SmileyChris/django-countries/issues/106
country = CountryField()
class Meta:
model = Competition
fields = ('url', 'name', 'full_name', 'country', 'slug')
class SeasonSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Season
fields = ('url', 'year', 'competition', 'rounds', 'punctuation', 'races')
class NestedSeasonSerializer(SeasonSerializer):
competition = CompetitionSerializer(many=False)
class Meta:
model = Season
fields = ('url', 'year', 'competition', 'rounds', 'punctuation')
class RaceSerializer(serializers.HyperlinkedModelSerializer):
# results = NestedResultSerializer(many=True)
season = NestedSeasonSerializer(many=False)
class Meta:
model = Race
fields = ('url', 'season', 'round', 'date', 'alter_punctuation')
# ViewSets define the view behavior.
class RaceViewSet(viewsets.ModelViewSet):
queryset = Race.objects.all()
serializer_class = RaceSerializer
@detail_route(methods=['get'])
def results(self, request, pk=None):
race = self.get_object()
self.queryset = race.results.all()
serializer = ResultSerializer(instance=self.queryset, many=True, context={'request': request})
return Response(serializer.data)
# ViewSets define the view behavior.
class SeasonViewSet(viewsets.ModelViewSet):
queryset = Season.objects.all()
serializer_class = SeasonSerializer
@detail_route(methods=['get'])
def races(self, request, pk=None):
season = self.get_object()
self.queryset = season.races.all()
serializer = RaceSerializer(instance=self.queryset, many=True, context={'request': request})
return Response(serializer.data)
@detail_route(methods=['get'])
def seats(self, request, pk=None):
season = self.get_object()
self.queryset = season.seats.all()
serializer = SeatSerializer(instance=self.queryset, many=True, context={'request': request})
return Response(serializer.data)
# ViewSets define the view behavior.
class CompetitionViewSet(viewsets.ModelViewSet):
queryset = Competition.objects.all()
serializer_class = CompetitionSerializer
# ViewSets define the view behavior.
class DriverViewSet(viewsets.ModelViewSet):
queryset = Driver.objects.all()
serializer_class = DriverSerializer
# ViewSets define the view behavior.
class TeamViewSet(viewsets.ModelViewSet):
queryset = Team.objects.all()
serializer_class = TeamSerializer
# ViewSets define the view behavior.
class ContenderViewSet(viewsets.ModelViewSet):
queryset = Contender.objects.all()
serializer_class = ContenderSerializer
# ViewSets define the view behavior.
class SeatViewSet(viewsets.ModelViewSet):
queryset = Seat.objects.all()
serializer_class = SeatSerializer
# ViewSets define the view behavior.
class ResultViewSet(viewsets.ModelViewSet):
queryset = Result.objects.all()
serializer_class = ResultSerializer
# Routers provide an easy way of automatically determining the URL conf.
router = routers.DefaultRouter()
router.register(r'competitions', CompetitionViewSet)
router.register(r'contenders', ContenderViewSet)
router.register(r'drivers', DriverViewSet)
router.register(r'races', RaceViewSet)
router.register(r'results', ResultViewSet)
router.register(r'seasons', SeasonViewSet)
router.register(r'seats', SeatViewSet)
router.register(r'teams', TeamViewSet)
|
Python
| 0.000003
|
@@ -306,16 +306,291 @@
Field%0A%0A%0A
+class CompetitionSerializer(serializers.HyperlinkedModelSerializer):%0A # https://github.com/SmileyChris/django-countries/issues/106%0A country = CountryField()%0A%0A class Meta:%0A model = Competition%0A fields = ('url', 'name', 'full_name', 'country', 'slug')%0A%0A%0A
class Dr
@@ -637,32 +637,32 @@
delSerializer):%0A
-
country = Co
@@ -1407,16 +1407,22 @@
river =
+Nested
DriverSe
@@ -1450,24 +1450,30 @@
teams =
+Nested
TeamSerializ
@@ -1485,16 +1485,68 @@
ny=True)
+%0A competition = CompetitionSerializer(many=False)
%0A%0A cl
@@ -2672,32 +2672,32 @@
p', 'wildcard',%0A
+
@@ -2726,283 +2726,8 @@
)%0A%0A%0A
-class CompetitionSerializer(serializers.HyperlinkedModelSerializer):%0A # https://github.com/SmileyChris/django-countries/issues/106%0A country = CountryField()%0A%0A class Meta:%0A model = Competition%0A fields = ('url', 'name', 'full_name', 'country', 'slug')%0A%0A%0A
clas
|
8ad3308738890d6f4301c7b306afc95d480930ef
|
Fix spurious headers in ListBucket requests
|
euca2ools/commands/walrus/listbucket.py
|
euca2ools/commands/walrus/listbucket.py
|
# Copyright 2013 Eucalyptus Systems, Inc.
#
# Redistribution and use of this software in source and binary forms,
# with or without modification, are permitted provided that the following
# conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import argparse
from euca2ools.commands.walrus import (WalrusRequest,
validate_generic_bucket_name)
from requestbuilder import Arg
from requestbuilder.exceptions import ArgumentError
from requestbuilder.mixins import TabifyingMixin
from requestbuilder.response import PaginatedResponse
from requestbuilder.xmlparse import parse_aws_xml
class ListBucket(WalrusRequest, TabifyingMixin):
DESCRIPTION = 'List keys in one or more buckets'
ARGS = [Arg('paths', metavar='BUCKET[/KEY]', nargs='+', route_to=None),
Arg('--max-keys-per-request', dest='max-keys', type=int,
help=argparse.SUPPRESS)]
# noinspection PyExceptionInherit
def configure(self):
WalrusRequest.configure(self)
for path in self.args['paths']:
if path.startswith('/'):
raise ArgumentError((
'argument \'{0}\' must not start with '
'"/"; format is BUCKET[/KEY]').format(path))
bucket = path.split('/', 1)[0]
try:
validate_generic_bucket_name(bucket)
except ValueError as err:
raise ArgumentError(
'bucket "{0}": {1}'.format(bucket, err.message))
def main(self):
self.method = 'GET'
pages = [(path, {}) for path in self.args['paths']]
return PaginatedResponse(self, pages, ('Contents',))
def get_next_page(self, response):
if response.get('IsTruncated') == 'true':
return self.path, {'marker': response['Contents'][-1]['Key']}
def prepare_for_page(self, page):
bucket, __, prefix = page[0].partition('/')
markers = page[1]
self.path = bucket
if prefix:
self.params['prefix'] = prefix
elif 'prefix' in self.params:
del self.params['prefix']
if markers is not None and markers.get('marker'):
self.params['marker'] = markers['marker']
elif 'marker' in self.params:
del self.params['marker']
def parse_response(self, response):
response_dict = self.log_and_parse_response(
response, parse_aws_xml,
list_item_tags=('Contents', 'CommonPrefixes'))
return response_dict['ListBucketResult']
def print_result(self, result):
for obj in result.get('Contents', []):
print obj.get('Key')
|
Python
| 0.000003
|
@@ -1974,16 +1974,43 @@
+ default=argparse.SUPPRESS,
help=ar
|
92008233d8418e1166b3aab18d93f91e552f4a8f
|
Order elections by election ID
|
every_election/apps/elections/models.py
|
every_election/apps/elections/models.py
|
from django.db import models
from django.core.urlresolvers import reverse
from suggested_content.models import SuggestedByPublicMixin
from .managers import ElectionManager
class ElectionType(models.Model):
"""
As defined at https://democracyclub.org.uk/projects/election-ids/reference/
"""
name = models.CharField(blank=True, max_length=100)
election_type = models.CharField(blank=True, max_length=100, unique=True)
default_voting_system = models.ForeignKey(
'elections.VotingSystem', null=True)
def __str__(self):
return self.name
class ElectionSubType(models.Model):
"""
As defined at https://democracyclub.org.uk/projects/election-ids/reference/
"""
name = models.CharField(blank=True, max_length=100)
election_type = models.ForeignKey('ElectionType', related_name="subtype")
election_subtype = models.CharField(blank=True, max_length=100)
def __str__(self):
return "{} ({})".format(self.name, self.election_type)
class ElectedRole(models.Model):
"""
M2M through table between Organisation <-> ElectionType that defines
the role of the job that the elected person will have. e.g:
"Councillor for Trumpton" or "Mayor of London"
"""
election_type = models.ForeignKey('ElectionType')
organisation = models.ForeignKey('organisations.Organisation')
elected_title = models.CharField(blank=True, max_length=255)
elected_role_name = models.CharField(blank=True, max_length=255)
def __str__(self):
return "{} ({})".format(self.elected_title, self.organisation)
class Election(SuggestedByPublicMixin, models.Model):
"""
An election.
This model should contain everything needed to make the election ID,
plus extra information about this election.
"""
election_id = models.CharField(
blank=True, null=True, max_length=250, unique=True)
tmp_election_id = models.CharField(blank=True, null=True, max_length=250)
election_title = models.CharField(blank=True, max_length=255)
election_type = models.ForeignKey(ElectionType)
election_subtype = models.ForeignKey(ElectionSubType, null=True)
poll_open_date = models.DateField(blank=True, null=True)
organisation = models.ForeignKey('organisations.Organisation', null=True)
elected_role = models.ForeignKey(ElectedRole, null=True)
division = models.ForeignKey('organisations.OrganisationDivision', null=True)
geography = models.ForeignKey('organisations.DivisionGeography', null=True)
seats_contested = models.IntegerField(blank=False, null=True)
seats_total = models.IntegerField(blank=False, null=True)
group = models.ForeignKey('Election', null=True, related_name="children")
group_type = models.CharField(blank=True, max_length=100, null=True)
voting_system = models.ForeignKey('elections.VotingSystem', null=True)
objects = ElectionManager.as_manager()
def get_absolute_url(self):
return reverse("single_election_view", args=(self.election_id,))
# TODO:
# Notice of election
# Reason for election
# Link to legislation
# hashtags? Other names?
# Discription
def __str__(self):
return self.get_id()
def get_id(self):
if self.election_id:
return self.election_id
else:
return self.tmp_election_id
class VotingSystem(models.Model):
slug = models.SlugField(primary_key=True)
name = models.CharField(blank=True, max_length=100)
wikipedia_url = models.URLField(blank=True)
description = models.TextField(blank=True)
uses_party_lists = models.BooleanField(default=False)
def __str__(self):
return self.name
|
Python
| 0
|
@@ -2917,24 +2917,77 @@
_manager()%0A%0A
+ class Meta:%0A ordering = ('election_id',)%0A%0A
def get_
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.