commit stringlengths 40 40 | subject stringlengths 1 1.49k | old_file stringlengths 4 311 | new_file stringlengths 4 311 | new_contents stringlengths 1 29.8k | old_contents stringlengths 0 9.9k | lang stringclasses 3 values | proba float64 0 1 |
|---|---|---|---|---|---|---|---|
7aca118be8db36370ff793b6cbedecb050dc869d | Change control plugin name to 'control'. | twisted/plugins/automatron_control.py | twisted/plugins/automatron_control.py | from twisted.internet import defer
from zope.interface import classProvides, implements
from automatron.command import IAutomatronCommandHandler
from automatron.plugin import IAutomatronPluginFactory, STOP
class AutomatronControlPlugin(object):
classProvides(IAutomatronPluginFactory)
implements(IAutomatronCommandHandler)
name = 'control'
priority = 100
def __init__(self, controller):
self.controller = controller
command_map = {
#command: (help, min_args, max_args, permission)
'join': ('<channel> [key]', 1, 2, 'channel'),
'leave': ('<channel> [reason]', 1, 2, 'channel'),
'say': ('<channel> <message>', 2, 2, 'say'),
'nickname': ('<nickname>', 1, 1, 'admin'),
'identify': ('[channel]', 0, 1, None),
}
def on_command(self, client, user, command, args):
if command in self.command_map:
self._on_command(client, user, command, args)
return STOP
@defer.inlineCallbacks
def _on_command(self, client, user, command, args):
config = self.command_map[command]
if config[3] is not None:
if not (yield self.controller.config.has_permission(client.server, None, user, config[3])):
client.msg(user, 'You\'re not authorized to do that.')
return
if not config[1] <= len(args) <= config[2]:
client.msg(user, 'Invalid syntax. Use: %s %s' % (command, config[0]))
return
getattr(self, '_on_command_%s' % command)(client, user, *args)
def _on_command_join(self, client, user, channel, key=None):
if key is not None:
self.controller.config.update_value('channel', client.server, channel, 'key', key)
client.join(channel, key)
else:
d = self.controller.config.get_value('channel', client.server, channel, 'key')
d.addCallback(lambda (channel_key, _): client.join(channel, channel_key))
def _on_command_leave(self, client, user, channel, reason='Leaving...'):
client.leave(channel, reason)
def _on_command_say(self, client, user, channel, message):
client.msg(channel, message)
def _on_command_nickname(self, client, user, nickname):
client.setNick(nickname)
@defer.inlineCallbacks
def _on_command_identify(self, client, user, channel=None):
username, username_relevance = yield self.controller.config.get_username_by_hostmask(client.server, user)
if username is not None:
if username_relevance == 0:
identity = 'You are globally known as %s' % username
else:
identity = 'You are known as %s' % username
role, role_relevance = yield self.controller.config.get_role_by_username(client.server, channel, username)
if role_relevance is not None and role_relevance < username_relevance:
role = role_relevance = None
if role_relevance is None:
client.msg(user, identity)
elif role_relevance in (2, 3):
client.msg(user, '%s and your role in %s is %s' % (identity, channel, role))
else:
client.msg(user, '%s and your role is %s' % (identity, role))
else:
client.msg(user, 'I don\'t know you...')
| from twisted.internet import defer
from zope.interface import classProvides, implements
from automatron.command import IAutomatronCommandHandler
from automatron.plugin import IAutomatronPluginFactory, STOP
class AutomatronControlPlugin(object):
classProvides(IAutomatronPluginFactory)
implements(IAutomatronCommandHandler)
name = 'notify_control'
priority = 100
def __init__(self, controller):
self.controller = controller
command_map = {
#command: (help, min_args, max_args, permission)
'join': ('<channel> [key]', 1, 2, 'channel'),
'leave': ('<channel> [reason]', 1, 2, 'channel'),
'say': ('<channel> <message>', 2, 2, 'say'),
'nickname': ('<nickname>', 1, 1, 'admin'),
'identify': ('[channel]', 0, 1, None),
}
def on_command(self, client, user, command, args):
if command in self.command_map:
self._on_command(client, user, command, args)
return STOP
@defer.inlineCallbacks
def _on_command(self, client, user, command, args):
config = self.command_map[command]
if config[3] is not None:
if not (yield self.controller.config.has_permission(client.server, None, user, config[3])):
client.msg(user, 'You\'re not authorized to do that.')
return
if not config[1] <= len(args) <= config[2]:
client.msg(user, 'Invalid syntax. Use: %s %s' % (command, config[0]))
return
getattr(self, '_on_command_%s' % command)(client, user, *args)
def _on_command_join(self, client, user, channel, key=None):
if key is not None:
self.controller.config.update_value('channel', client.server, channel, 'key', key)
client.join(channel, key)
else:
d = self.controller.config.get_value('channel', client.server, channel, 'key')
d.addCallback(lambda (channel_key, _): client.join(channel, channel_key))
def _on_command_leave(self, client, user, channel, reason='Leaving...'):
client.leave(channel, reason)
def _on_command_say(self, client, user, channel, message):
client.msg(channel, message)
def _on_command_nickname(self, client, user, nickname):
client.setNick(nickname)
@defer.inlineCallbacks
def _on_command_identify(self, client, user, channel=None):
username, username_relevance = yield self.controller.config.get_username_by_hostmask(client.server, user)
if username is not None:
if username_relevance == 0:
identity = 'You are globally known as %s' % username
else:
identity = 'You are known as %s' % username
role, role_relevance = yield self.controller.config.get_role_by_username(client.server, channel, username)
if role_relevance is not None and role_relevance < username_relevance:
role = role_relevance = None
if role_relevance is None:
client.msg(user, identity)
elif role_relevance in (2, 3):
client.msg(user, '%s and your role in %s is %s' % (identity, channel, role))
else:
client.msg(user, '%s and your role is %s' % (identity, role))
else:
client.msg(user, 'I don\'t know you...')
| Python | 0 |
1519d9dc2f483671aee0f92252dd839a4d7af9c3 | Add About page TemplateView | painindex_app/urls.py | painindex_app/urls.py | from django.conf.urls import patterns, url
from django.views.generic import TemplateView, FormView
from painindex_app import views
urlpatterns = patterns('',
url(r'^$', views.homepage, name='homepage'),
url(r'^about/$', TemplateView.as_view(template_name='painindex_app/about.html'), name='about'),
url(r'^painsource/(?P<painsource_id>\d+)/$', views.painsource_detail, name='painsource_detail'),
# url(r'^painreport/new/$', views.painreport_form, name='painreport'),
url(r'^painreport/new/$', views.PainReportView.as_view(), name='painreport'),
) | from django.conf.urls import patterns, url
from django.views.generic import TemplateView, FormView
from painindex_app import views
urlpatterns = patterns('',
url(r'^$', views.homepage, name='homepage'),
url(r'^painsource/(?P<painsource_id>\d+)/$', views.painsource_detail, name='painsource_detail'),
# url(r'^painreport/new/$', views.painreport_form, name='painreport'),
url(r'^painreport/new/$', views.PainReportView.as_view(), name='painreport'),
) | Python | 0 |
40705a39292d0080126933b2318d20ef1a4499a2 | Remove obsolete input. | lobster/cmssw/data/job.py | lobster/cmssw/data/job.py | #!/usr/bin/env python
import json
import os
import pickle
import shutil
import subprocess
import sys
fragment = """import FWCore.ParameterSet.Config as cms
process.source.fileNames = cms.untracked.vstring({input_files})
process.maxEvents = cms.untracked.PSet(input = cms.untracked.int32(-1))
process.source.lumisToProcess = cms.untracked.VLuminosityBlockRange({lumis})"""
def edit_process_source(cmssw_config_file, files, lumis):
with open(cmssw_config_file, 'a') as config:
frag = fragment.format(input_files=repr([str(f) for f in files]), lumis=[str(l) for l in lumis])
print "--- config file fragment:"
print frag
print "---"
config.write(frag)
(config, data) = sys.argv[1:]
with open(data, 'rb') as f:
(args, files, lumis) = pickle.load(f)
configfile = config.replace(".py", "_mod.py")
shutil.copy2(config, configfile)
env = os.environ
env['X509_USER_PROXY'] = 'proxy'
edit_process_source(configfile, files, lumis)
# exit_code = subprocess.call('python "{0}" {1} > cmssw.log 2>&1'.format(configfile, ' '.join(map(repr, args))), shell=True, env=env)
exit_code = subprocess.call('cmsRun -j report.xml "{0}" {1} > cmssw.log 2>&1'.format(configfile, ' '.join(map(repr, args))), shell=True, env=env)
sys.exit(exit_code)
| #!/usr/bin/env python
import base64
import json
import os
import pickle
import shutil
import subprocess
import sys
fragment = """import FWCore.ParameterSet.Config as cms
process.source.fileNames = cms.untracked.vstring({input_files})
process.maxEvents = cms.untracked.PSet(input = cms.untracked.int32(-1))
process.source.lumisToProcess = cms.untracked.VLuminosityBlockRange({lumis})"""
def edit_process_source(cmssw_config_file, files, lumis):
with open(cmssw_config_file, 'a') as config:
frag = fragment.format(input_files=repr([str(f) for f in files]), lumis=[str(l) for l in lumis])
print "--- config file fragment:"
print frag
print "---"
config.write(frag)
(config, data) = sys.argv[1:]
with open(data, 'rb') as f:
(args, files, lumis) = pickle.load(f)
configfile = config.replace(".py", "_mod.py")
shutil.copy2(config, configfile)
env = os.environ
env['X509_USER_PROXY'] = 'proxy'
edit_process_source(configfile, files, lumis)
# exit_code = subprocess.call('python "{0}" {1} > cmssw.log 2>&1'.format(configfile, ' '.join(map(repr, args))), shell=True, env=env)
exit_code = subprocess.call('cmsRun -j report.xml "{0}" {1} > cmssw.log 2>&1'.format(configfile, ' '.join(map(repr, args))), shell=True, env=env)
sys.exit(exit_code)
| Python | 0.000183 |
efe75d083093bf7a421234831e27d661ef93dfdb | add labels and kwargs' | cupyx/time.py | cupyx/time.py | import time
import numpy
import cupy
from cupy import util
class _PerfCaseResult(object):
def __init__(self, name, ts):
assert ts.ndim == 2 and ts.shape[0] == 2 and ts.shape[1] > 0
self.name = name
self._ts = ts
@property
def cpu_times(self):
return self._ts[0]
@property
def gpu_times(self):
return self._ts[1]
@staticmethod
def _to_str_per_item(t):
assert t.size > 0
t *= 1e6
s = ' {:9.03f} us'.format(t.mean())
if t.size > 1:
s += ' +/-{:6.03f} (min:{:9.03f} / max:{:9.03f}) us'.format(
t.std(), t.min(), t.max())
return s
def to_str(self, show_gpu=False):
ts = self._ts if show_gpu else self._ts[[0]]
devices = ["CPU", "GPU"]
return '{:<20s}:{}'.format(
self.name, ' '.join([devices[i] + ": "
+ self._to_str_per_item(ts[i]) for i in range(len(ts))]))
def __str__(self):
return self.to_str(show_gpu=True)
def repeat(func, args=(), kwargs={}, n=10000, *, name=None, n_warmup=10):
util.experimental('cupyx.time.repeat')
if name is None:
name = func.__name__
if not callable(func):
raise ValueError('`func` should be a callable object.')
if not isinstance(args, tuple):
raise ValueError('`args` should be of tuple type.')
if not isinstance(kwargs, dict):
raise ValueError('`args` should be of tuple type.')
if not isinstance(n, int):
raise ValueError('`n` should be an integer.')
if not isinstance(name, str):
raise ValueError('`str` should be a string.')
if not isinstance(n_warmup, int):
raise ValueError('`n_warmup` should be an integer.')
ts = numpy.empty((2, n,), dtype=numpy.float64)
ev1 = cupy.cuda.stream.Event()
ev2 = cupy.cuda.stream.Event()
for i in range(n_warmup):
func(*args, **kwargs)
ev1.record()
ev1.synchronize()
for i in range(n):
ev1.record()
t1 = time.perf_counter()
func(*args, **kwargs)
t2 = time.perf_counter()
ev2.record()
ev2.synchronize()
cpu_time = t2 - t1
gpu_time = cupy.cuda.get_elapsed_time(ev1, ev2) * 1e-3
ts[0, i] = cpu_time
ts[1, i] = gpu_time
return _PerfCaseResult(name, ts)
| import time
import numpy
import cupy
from cupy import util
class _PerfCaseResult(object):
def __init__(self, name, ts):
assert ts.ndim == 2 and ts.shape[0] == 2 and ts.shape[1] > 0
self.name = name
self._ts = ts
@property
def cpu_times(self):
return self._ts[0]
@property
def gpu_times(self):
return self._ts[1]
@staticmethod
def _to_str_per_item(t):
assert t.size > 0
t *= 1e6
s = ' {:9.03f} us'.format(t.mean())
if t.size > 1:
s += ' +/-{:6.03f} (min:{:9.03f} / max:{:9.03f}) us'.format(
t.std(), t.min(), t.max())
return s
def to_str(self, show_gpu=False):
ts = self._ts if show_gpu else self._ts[[0]]
return '{:<20s}:{}'.format(
self.name, ' '.join([self._to_str_per_item(t) for t in ts]))
def __str__(self):
return self.to_str(show_gpu=True)
def repeat(func, args=(), n=10000, *, name=None, n_warmup=10):
util.experimental('cupyx.time.repeat')
if name is None:
name = func.__name__
if not callable(func):
raise ValueError('`func` should be a callable object.')
if not isinstance(args, tuple):
raise ValueError('`args` should be of tuple type.')
if not isinstance(n, int):
raise ValueError('`n` should be an integer.')
if not isinstance(name, str):
raise ValueError('`str` should be a string.')
if not isinstance(n_warmup, int):
raise ValueError('`n_warmup` should be an integer.')
ts = numpy.empty((2, n,), dtype=numpy.float64)
ev1 = cupy.cuda.stream.Event()
ev2 = cupy.cuda.stream.Event()
for i in range(n_warmup):
func(*args)
ev1.record()
ev1.synchronize()
for i in range(n):
ev1.record()
t1 = time.perf_counter()
func(*args)
t2 = time.perf_counter()
ev2.record()
ev2.synchronize()
cpu_time = t2 - t1
gpu_time = cupy.cuda.get_elapsed_time(ev1, ev2) * 1e-3
ts[0, i] = cpu_time
ts[1, i] = gpu_time
return _PerfCaseResult(name, ts)
| Python | 0 |
85769162560d83a58ccc92f818559ddd3dce2a09 | Fix another bug in the authentication | pages/index.py | pages/index.py | import web
from modules.base import renderer
from modules.login import loginInstance
from modules.courses import Course
#Index page
class IndexPage:
#Simply display the page
def GET(self):
if loginInstance.isLoggedIn():
userInput = web.input();
if "logoff" in userInput:
loginInstance.disconnect();
return renderer.index(False)
else:
return renderer.main(Course.GetAllCoursesIds())
else:
return renderer.index(False)
#Try to log in
def POST(self):
userInput = web.input();
if "login" in userInput and "password" in userInput and loginInstance.connect(userInput.login,userInput.password):
return renderer.main(Course.GetAllCoursesIds())
else:
return renderer.index(True) | import web
from modules.base import renderer
from modules.login import loginInstance
from modules.courses import Course
#Index page
class IndexPage:
#Simply display the page
def GET(self):
if loginInstance.isLoggedIn():
userInput = web.input();
if "logoff" in userInput:
loginInstance.disconnect();
return renderer.index(False)
else:
courses = Course.GetAllCoursesIds()
return renderer.main(courses)
else:
return renderer.index(False)
#Try to log in
def POST(self):
userInput = web.input();
if "login" in userInput and "password" in userInput and loginInstance.connect(userInput.login,userInput.password):
return renderer.main()
else:
return renderer.index(True) | Python | 0.000005 |
f7cfa7ec75243dbc4dc6cf75155d8083df504692 | extend benchmark for lights | mpf/benchmarks/test_benchmark_light_shows.py | mpf/benchmarks/test_benchmark_light_shows.py | import time
from functools import partial
from mpf.core.logging import LogMixin
from mpf.tests.MpfGameTestCase import MpfGameTestCase
class BenchmarkLightShows(MpfGameTestCase):
def getConfigFile(self):
return 'config.yaml'
def getMachinePath(self):
return 'benchmarks/machine_files/shows/'
def getOptions(self):
options = super().getOptions()
if self.unittest_verbosity() <= 1:
options["production"] = True
return options
def get_platform(self):
return 'virtual'
def setUp(self):
LogMixin.unit_test = False
super().setUp()
def _output(self, name, start, end, end2, num):
print("Duration {} {:.5f}ms Processing {:.5f}ms Total: {:5f}ms Per second: {:2f}".format(
name,
(1000 * (end - start) / num), ((end2 - end) * 1000) / num, (1000 * (end2 - start)) / num,
(num * 1) / (end2 - start)
))
def _benchmark(self, function, name, num=10000, iterations=10):
function(num, True)
total = 0
for i in range(iterations):
start, end, end2 = function(num, False)
total += (end2 - start) / num
self._output(name, start, end, end2, num)
print("Total average {:.5f}ms".format(total * 1000/ iterations))
return total/iterations
def testBenchmark(self):
baseline = self._benchmark(partial(self._event_and_run, "random_event", "random_event2"), "baseline")
minimal_show = self._benchmark(partial(self._event_and_run, "play_minimal_light_show", "stop_minimal_light_show"), "minimal_show")
all_leds = self._benchmark(partial(self._event_and_run, "play_single_step_tag_playfield", "stop_single_step_tag_playfield"), "all_leds_tag")
multi_step = self._benchmark(partial(self._event_and_run, "play_multi_step", "stop_multi_step"), "multi_step", num=500)
print("Baseline: {:.5f}ms One LED: +{:.5f}ms 30 LEDs: +{:.5f}ms Multi Step: +{:.5f}".format(
baseline * 1000,
(minimal_show - baseline) * 1000,
(all_leds - baseline) * 1000,
(multi_step - baseline) * 1000
))
def _event_and_run(self, event, event2, num, test):
channel_list = []
for light in self.machine.lights.values():
for color, channels in light.hw_drivers.items():
channel_list.extend(channels)
start = time.time()
for i in range(num):
self.post_event(event)
for channel in channel_list:
brightness = channel.current_brightness
self.advance_time_and_run(.01)
for channel in channel_list:
brightness = channel.current_brightness
end = time.time()
self.advance_time_and_run()
end2 = time.time()
self.post_event(event2)
return start, end, end2
| import time
from functools import partial
from mpf.core.logging import LogMixin
from mpf.tests.MpfGameTestCase import MpfGameTestCase
class BenchmarkLightShows(MpfGameTestCase):
def getConfigFile(self):
return 'config.yaml'
def getMachinePath(self):
return 'benchmarks/machine_files/shows/'
def getOptions(self):
options = super().getOptions()
if self.unittest_verbosity() <= 1:
options["production"] = True
return options
def get_platform(self):
return 'virtual'
def setUp(self):
LogMixin.unit_test = False
super().setUp()
def _output(self, name, start, end, end2, num):
print("Duration {} {:.5f}ms Processing {:.5f}ms Total: {:5f}ms Per second: {:2f}".format(
name,
(1000 * (end - start) / num), ((end2 - end) * 1000) / num, (1000 * (end2 - start)) / num,
(num * 1) / (end2 - start)
))
def _benchmark(self, function, name, num=10000, iterations=10):
function(num, True)
total = 0
for i in range(iterations):
start, end, end2 = function(num, False)
total += (end2 - start) / num
self._output(name, start, end, end2, num)
print("Total average {:.5f}ms".format(total * 1000/ iterations))
return total/iterations
def testBenchmark(self):
baseline = self._benchmark(partial(self._event_and_run, "random_event", "random_event2"), "baseline")
minimal_show = self._benchmark(partial(self._event_and_run, "play_minimal_light_show", "stop_minimal_light_show"), "minimal_show")
all_leds = self._benchmark(partial(self._event_and_run, "play_single_step_tag_playfield", "stop_single_step_tag_playfield"), "all_leds_tag")
multi_step = self._benchmark(partial(self._event_and_run, "play_multi_step", "stop_multi_step"), "multi_step", num=500)
print("Baseline: {:.5f}ms One LED: +{:.5f}ms 30 LEDs: +{:.5f}ms Multi Step: +{:.5f}".format(
baseline * 1000,
(minimal_show - baseline) * 1000,
(all_leds - baseline) * 1000,
(multi_step - baseline) * 1000
))
def _event_and_run(self, event, event2, num, test):
start = time.time()
for i in range(num):
self.post_event(event)
self.advance_time_and_run(.01)
end = time.time()
self.advance_time_and_run()
end2 = time.time()
self.post_event(event2)
return start, end, end2
| Python | 0 |
1363c12251cb6aaad37f2b3be6890f70e7f80a66 | Fix invalid syntax | location_field/widgets.py | location_field/widgets.py | from django.conf import settings
from django.forms import widgets
from django.utils.safestring import mark_safe
GOOGLE_MAPS_V3_APIKEY = getattr(settings, 'GOOGLE_MAPS_V3_APIKEY', None)
GOOGLE_API_JS = '//maps.google.com/maps/api/js?sensor=false'
if GOOGLE_MAPS_V3_APIKEY:
GOOGLE_API_JS = '{0}&key={0}'.format(GOOGLE_API_JS, GOOGLE_MAPS_V3_APIKEY)
class LocationWidget(widgets.TextInput):
def __init__(self, attrs=None, based_fields=None, zoom=None, suffix='', **kwargs):
self.based_fields = based_fields
self.zoom = zoom
self.suffix = suffix
super(LocationWidget, self).__init__(attrs)
def render(self, name, value, attrs=None):
if value is not None:
if isinstance(value, basestring):
lat, lng = value.split(',')
else:
lng = value.x
lat = value.y
value = '%s,%s' % (
float(lat),
float(lng),
)
else:
value = ''
if '-' not in name:
prefix = ''
else:
prefix = name[:name.rindex('-') + 1]
based_fields = ','.join(
map(lambda f: '#id_' + prefix + f.name, self.based_fields))
attrs = attrs or {}
attrs['data-location-widget'] = name
attrs['data-based-fields'] = based_fields
attrs['data-zoom'] = self.zoom
attrs['data-suffix'] = self.suffix
attrs['data-map'] = '#map_' + name
text_input = super(LocationWidget, self).render(name, value, attrs)
map_div = u'''
<div style="margin:4px 0 0 0">
<label></label>
<div id="map_%(name)s" style="width: 500px; height: 250px"></div>
</div>
'''
return mark_safe(text_input + map_div % {'name': name})
class Media:
# Use schemaless URL so it works with both, http and https websites
js = (
GOOGLE_API_JS,
settings.STATIC_URL + 'location_field/js/form.js',
)
| from django.conf import settings
from django.forms import widgets
from django.utils.safestring import mark_safe
GOOGLE_MAPS_V3_APIKEY = getattr(settings, 'GOOGLE_MAPS_V3_APIKEY', None)
GOOGLE_API_JS = '//maps.google.com/maps/api/js?sensor=false'
if GOOGLE_MAPS_V3_APIKEY:
GOOGLE_API_JS = '{0}&key={0}'.format(GOOGLE_API_JS, GOOGLE_MAPS_V3_APIKEY))
class LocationWidget(widgets.TextInput):
def __init__(self, attrs=None, based_fields=None, zoom=None, suffix='', **kwargs):
self.based_fields = based_fields
self.zoom = zoom
self.suffix = suffix
super(LocationWidget, self).__init__(attrs)
def render(self, name, value, attrs=None):
if value is not None:
if isinstance(value, basestring):
lat, lng = value.split(',')
else:
lng = value.x
lat = value.y
value = '%s,%s' % (
float(lat),
float(lng),
)
else:
value = ''
if '-' not in name:
prefix = ''
else:
prefix = name[:name.rindex('-') + 1]
based_fields = ','.join(
map(lambda f: '#id_' + prefix + f.name, self.based_fields))
attrs = attrs or {}
attrs['data-location-widget'] = name
attrs['data-based-fields'] = based_fields
attrs['data-zoom'] = self.zoom
attrs['data-suffix'] = self.suffix
attrs['data-map'] = '#map_' + name
text_input = super(LocationWidget, self).render(name, value, attrs)
map_div = u'''
<div style="margin:4px 0 0 0">
<label></label>
<div id="map_%(name)s" style="width: 500px; height: 250px"></div>
</div>
'''
return mark_safe(text_input + map_div % {'name': name})
class Media:
# Use schemaless URL so it works with both, http and https websites
js = (
GOOGLE_API_JS,
settings.STATIC_URL + 'location_field/js/form.js',
)
| Python | 0.999586 |
b177629c1869fe707ec69ade0927bf9769e1cbe6 | Freeze sql parse to 0.2.4 | djongo/__init__.py | djongo/__init__.py |
__version__ = '1.2.32'
|
__version__ = '1.2.31'
| Python | 0.999989 |
30c8e4d7a1e6e237772aa89256b83ec37a015803 | increment version | terminalone/metadata.py | terminalone/metadata.py | # -*- coding: utf-8 -*-
__name__ = 'TerminalOne'
__author__ = 'MediaMath'
__copyright__ = 'Copyright 2015, MediaMath'
__license__ = 'Apache License, Version 2.0'
__version__ = '1.9.9'
__maintainer__ = 'MediaMath Developer Relations'
__email__ = 'developers@mediamath.com'
__status__ = 'Stable'
__url__ = 'http://www.mediamath.com'
__description__ = "A package for interacting with MediaMath's TerminalOne API."
| # -*- coding: utf-8 -*-
__name__ = 'TerminalOne'
__author__ = 'MediaMath'
__copyright__ = 'Copyright 2015, MediaMath'
__license__ = 'Apache License, Version 2.0'
__version__ = '1.9.8'
__maintainer__ = 'MediaMath Developer Relations'
__email__ = 'developers@mediamath.com'
__status__ = 'Stable'
__url__ = 'http://www.mediamath.com'
__description__ = "A package for interacting with MediaMath's TerminalOne API."
| Python | 0.000004 |
c8445a938d9bd9512b8af40ac8e9465a3ab9f04d | Fix exception handling error. | d2to1/core.py | d2to1/core.py | import os
import sys
import warnings
from distutils.core import Distribution as _Distribution
from distutils.errors import DistutilsFileError, DistutilsSetupError
from setuptools.dist import _get_unpatched
from .extern import six
from .util import DefaultGetDict, IgnoreDict, cfg_to_args
_Distribution = _get_unpatched(_Distribution)
def d2to1(dist, attr, value):
"""Implements the actual d2to1 setup() keyword. When used, this should be
the only keyword in your setup() aside from `setup_requires`.
If given as a string, the value of d2to1 is assumed to be the relative path
to the setup.cfg file to use. Otherwise, if it evaluates to true, it
simply assumes that d2to1 should be used, and the default 'setup.cfg' is
used.
This works by reading the setup.cfg file, parsing out the supported
metadata and command options, and using them to rebuild the
`DistributionMetadata` object and set the newly added command options.
The reason for doing things this way is that a custom `Distribution` class
will not play nicely with setup_requires; however, this implementation may
not work well with distributions that do use a `Distribution` subclass.
"""
if not value:
return
if isinstance(value, six.string_types):
path = os.path.abspath(value)
else:
path = os.path.abspath('setup.cfg')
if not os.path.exists(path):
raise DistutilsFileError(
'The setup.cfg file %s does not exist.' % path)
# Converts the setup.cfg file to setup() arguments
try:
attrs = cfg_to_args(path)
except:
e = sys.exc_info()[1]
raise DistutilsSetupError(
'Error parsing %s: %s: %s' % (path, e.__class__.__name__,
six.u(str(e))))
# Repeat some of the Distribution initialization code with the newly
# provided attrs
if attrs:
# Skips 'options' and 'licence' support which are rarely used; may add
# back in later if demanded
for key, val in six.iteritems(attrs):
if hasattr(dist.metadata, 'set_' + key):
getattr(dist.metadata, 'set_' + key)(val)
elif hasattr(dist.metadata, key):
setattr(dist.metadata, key, val)
elif hasattr(dist, key):
setattr(dist, key, val)
else:
msg = 'Unknown distribution option: %s' % repr(key)
warnings.warn(msg)
# Re-finalize the underlying Distribution
_Distribution.finalize_options(dist)
# This bit comes out of distribute/setuptools
if isinstance(dist.metadata.version, six.integer_types + (float,)):
# Some people apparently take "version number" too literally :)
dist.metadata.version = str(dist.metadata.version)
# This bit of hackery is necessary so that the Distribution will ignore
# normally unsupport command options (namely pre-hooks and post-hooks).
# dist.command_options is normally a dict mapping command names to dicts of
# their options. Now it will be a defaultdict that returns IgnoreDicts for
# the each command's options so we can pass through the unsupported options
ignore = ['pre_hook.*', 'post_hook.*']
dist.command_options = DefaultGetDict(lambda: IgnoreDict(ignore))
| import os
import sys
import warnings
from distutils.core import Distribution as _Distribution
from distutils.errors import DistutilsFileError, DistutilsSetupError
from setuptools.dist import _get_unpatched
from .extern import six
from .util import DefaultGetDict, IgnoreDict, cfg_to_args
_Distribution = _get_unpatched(_Distribution)
def d2to1(dist, attr, value):
"""Implements the actual d2to1 setup() keyword. When used, this should be
the only keyword in your setup() aside from `setup_requires`.
If given as a string, the value of d2to1 is assumed to be the relative path
to the setup.cfg file to use. Otherwise, if it evaluates to true, it
simply assumes that d2to1 should be used, and the default 'setup.cfg' is
used.
This works by reading the setup.cfg file, parsing out the supported
metadata and command options, and using them to rebuild the
`DistributionMetadata` object and set the newly added command options.
The reason for doing things this way is that a custom `Distribution` class
will not play nicely with setup_requires; however, this implementation may
not work well with distributions that do use a `Distribution` subclass.
"""
if not value:
return
if isinstance(value, six.string_types):
path = os.path.abspath(value)
else:
path = os.path.abspath('setup.cfg')
if not os.path.exists(path):
raise DistutilsFileError(
'The setup.cfg file %s does not exist.' % path)
# Converts the setup.cfg file to setup() arguments
try:
attrs = cfg_to_args(path)
except:
e = sys.exc_info()[1]
raise DistutilsSetupError(
'Error parsing %s: %s: %s' % (path, e.__class__.__name__,
six.u(e)))
# Repeat some of the Distribution initialization code with the newly
# provided attrs
if attrs:
# Skips 'options' and 'licence' support which are rarely used; may add
# back in later if demanded
for key, val in six.iteritems(attrs):
if hasattr(dist.metadata, 'set_' + key):
getattr(dist.metadata, 'set_' + key)(val)
elif hasattr(dist.metadata, key):
setattr(dist.metadata, key, val)
elif hasattr(dist, key):
setattr(dist, key, val)
else:
msg = 'Unknown distribution option: %s' % repr(key)
warnings.warn(msg)
# Re-finalize the underlying Distribution
_Distribution.finalize_options(dist)
# This bit comes out of distribute/setuptools
if isinstance(dist.metadata.version, six.integer_types + (float,)):
# Some people apparently take "version number" too literally :)
dist.metadata.version = str(dist.metadata.version)
# This bit of hackery is necessary so that the Distribution will ignore
# normally unsupport command options (namely pre-hooks and post-hooks).
# dist.command_options is normally a dict mapping command names to dicts of
# their options. Now it will be a defaultdict that returns IgnoreDicts for
# the each command's options so we can pass through the unsupported options
ignore = ['pre_hook.*', 'post_hook.*']
dist.command_options = DefaultGetDict(lambda: IgnoreDict(ignore))
| Python | 0.000011 |
1fca3a48b0617b19554ab55c54db322090a69c3d | Add with statement tests | magic/tests/test_magic.py | magic/tests/test_magic.py | import unittest
import magic
import magic.flags
class MagicTestCase(unittest.TestCase):
def setUp(self):
self.magic = magic.Magic()
def test_get_version(self):
self.assertTrue(isinstance(self.magic.version, int))
def test_from_buffer(self):
mimetype = self.magic.from_buffer("ehlo")
self.assertEqual(mimetype, "ASCII text, with no line terminators")
def test_from_file(self):
mimetype = self.magic.from_file("/etc/passwd")
self.assertEqual(mimetype, "ASCII text")
def test_with(self):
with magic.Magic(mimetype=True) as m:
mimetype = self.magic.from_file("/etc/passwd")
self.assertEqual(mimetype, "text/plain")
def test_set_flags(self):
mimetype = self.magic.from_file("/etc/passwd")
self.assertEqual(mimetype, "ASCII text")
self.magic.set_flags(magic.flags.MAGIC_MIME_TYPE)
mimetype = self.magic.from_file("/etc/passwd")
self.assertEqual(mimetype, "text/plain")
| import unittest
import magic
import magic.flags
class MagicTestCase(unittest.TestCase):
def setUp(self):
self.magic = magic.Magic()
def test_get_version(self):
self.assertTrue(isinstance(self.magic.version, int))
def test_from_buffer(self):
mimetype = self.magic.from_buffer("ehlo")
self.assertEqual(mimetype, "ASCII text, with no line terminators")
def test_from_file(self):
mimetype = self.magic.from_file("/etc/passwd")
self.assertEqual(mimetype, "ASCII text")
def test_set_flags(self):
mimetype = self.magic.from_file("/etc/passwd")
self.assertEqual(mimetype, "ASCII text")
self.magic.set_flags(magic.flags.MAGIC_MIME_TYPE)
mimetype = self.magic.from_file("/etc/passwd")
self.assertEqual(mimetype, "text/plain")
| Python | 0.000004 |
436aa56758403b96aa4c0038db6d2a24047cfa16 | fix bug | monthertree/monthertree/wsgi.py | monthertree/monthertree/wsgi.py | """
WSGI config for monthertree project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/howto/deployment/wsgi/
"""
import os
import sys
from django.conf import settings
sys.path.append(settings.PROJECT_DIR)
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "monthertree.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
| """
WSGI config for monthertree project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/howto/deployment/wsgi/
"""
import os
import sys
sys.path.append('/home/jinxp/Documents/shell/mothertree/monthertree/')
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "monthertree.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
| Python | 0.000001 |
3d7e7c53c5c2809df74a8f8d61a7f929e02a1ce6 | Require libunwind | var/spack/packages/gperftools/package.py | var/spack/packages/gperftools/package.py | ##############################################################################
# Copyright (c) 2013, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License (as published by
# the Free Software Foundation) version 2.1 dated February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Gperftools(Package):
"""Google's fast malloc/free implementation, especially for multi-threaded applications.
Contains tcmalloc, heap-checker, heap-profiler, and cpu-profiler."""
homepage = "https://code.google.com/p/gperftools"
url = "https://googledrive.com/host/0B6NtGsLhIcf7MWxMMF9JdTN3UVk/gperftools-2.3.tar.gz"
version('2.4', '2171cea3bbe053036fb5d5d25176a160', url="https://github.com/gperftools/gperftools/releases/download/gperftools-2.4/gperftools-2.4.tar.gz")
version('2.3', 'f54dd119f0e46ac1f13264f8d97adf90', url="https://googledrive.com/host/0B6NtGsLhIcf7MWxMMF9JdTN3UVk/gperftools-2.3.tar.gz")
depends_on("libunwind")
def install(self, spec, prefix):
configure("--prefix=" + prefix)
make()
make("install")
| ##############################################################################
# Copyright (c) 2013, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License (as published by
# the Free Software Foundation) version 2.1 dated February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Gperftools(Package):
"""Google's fast malloc/free implementation, especially for multi-threaded applications.
Contains tcmalloc, heap-checker, heap-profiler, and cpu-profiler."""
homepage = "https://code.google.com/p/gperftools"
url = "https://googledrive.com/host/0B6NtGsLhIcf7MWxMMF9JdTN3UVk/gperftools-2.3.tar.gz"
version('2.4', '2171cea3bbe053036fb5d5d25176a160', url="https://github.com/gperftools/gperftools/releases/download/gperftools-2.4/gperftools-2.4.tar.gz")
version('2.3', 'f54dd119f0e46ac1f13264f8d97adf90', url="https://googledrive.com/host/0B6NtGsLhIcf7MWxMMF9JdTN3UVk/gperftools-2.3.tar.gz")
def install(self, spec, prefix):
configure("--prefix=" + prefix)
make()
make("install")
| Python | 0.998829 |
6d8dbb6621da2ddfffd58303131eb6cda345e37c | Make person experience the default tab for ZA | pombola/south_africa/urls.py | pombola/south_africa/urls.py | from django.conf.urls import patterns, include, url
from pombola.core.views import PersonDetailSub
from pombola.south_africa.views import LatLonDetailView,SAPlaceDetailSub
urlpatterns = patterns('pombola.south_africa.views',
url(r'^place/latlon/(?P<lat>[0-9\.-]+),(?P<lon>[0-9\.-]+)/', LatLonDetailView.as_view(), name='latlon'),
url(r'^place/(?P<slug>[-\w]+)/places/', SAPlaceDetailSub.as_view(), {'sub_page': 'places'}, name='place_places'),
url(r'^person/(?P<slug>[-\w]+)/$', PersonDetailSub.as_view(), { 'sub_page': 'experience' }, name='person'),
)
| from django.conf.urls import patterns, include, url
from pombola.south_africa.views import LatLonDetailView,SAPlaceDetailSub
urlpatterns = patterns('pombola.south_africa.views',
url(r'^place/latlon/(?P<lat>[0-9\.-]+),(?P<lon>[0-9\.-]+)/', LatLonDetailView.as_view(), name='latlon'),
url(r'^place/(?P<slug>[-\w]+)/places/', SAPlaceDetailSub.as_view(), {'sub_page': 'places'}, name='place_places'),
)
| Python | 0.000001 |
80618891a11c90bdfc763d9a00e9bdcf2e9302fd | Create resources on deploy | noopy/project_template/deploy.py | noopy/project_template/deploy.py | #!/usr/bin/python
import glob
import importlib
import os
import sys
import zipfile
from StringIO import StringIO
import boto3
import noopy
from noopy.endpoint import Endpoint
from noopy.endpoint.resource import Resource
from noopy.utils import to_pascal_case
import settings
def main():
target_dir = 'src'
zip_bytes = make_zip(target_dir)
for endpoint in settings.ENDPOINTS:
importlib.import_module('src.{}'.format(endpoint))
for func in Endpoint.endpoints.values():
create_lambda_function(zip_bytes, func)
def make_zip(target_dir):
f = StringIO()
zip_file = zipfile.ZipFile(f, 'w')
file_names = glob.glob('{}/*.py'.format(target_dir))
if not file_names:
sys.stderr.write('There is no python file in src directory')
sys.exit(1)
for file_name in file_names:
zip_file.write(file_name, os.path.split(file_name)[1])
noopy_parent = os.path.split(noopy.__path__[0])[0]
for root, _, file_names in os.walk(noopy.__path__[0]):
for file_name in file_names:
full_path = os.path.join(root, file_name)
local_path = full_path[len(noopy_parent):]
zip_file.write(full_path, local_path)
zip_file.close()
f.seek(0)
bytes_ = f.read()
f.close()
return bytes_
def create_lambda_function(zip_bytes, func):
lambda_settings = settings.LAMBDA
client = boto3.client('lambda')
function_prefix = 'arn:aws:lambda:{}:{}:{}'.format(
client._client_config.region_name,
settings.ACCOUNT_ID,
lambda_settings['Prefix']
)
func_module = os.path.split(func.func_code.co_filename)[1].split('.')[0]
print client.create_function(
FunctionName='{}{}'.format(function_prefix, to_pascal_case(func.func_name)),
Runtime='python2.7',
Role=lambda_settings['Role'],
Handler='{}.{}'.format(func_module, func.func_name),
Code={
'ZipFile': zip_bytes
}
)
class ApiGatewayDeployer(object):
def __init__(self):
self.client = boto3.client('apigateway')
apis = self.client.get_rest_apis()['items']
filtered_apis = [api for api in apis if api['name'] == settings.PROJECT_NAME]
if filtered_apis:
self.api_id = filtered_apis[0]['id']
else:
self.api_id = self.client.create_rest_api(name=settings.PROJECT_NAME)['id']
def prepare_resources(self):
aws_resources = self.client.get_resources(restApiId=self.api_id, limit=500)['items']
aws_resource_by_path = dict((r['path'], r) for r in aws_resources)
for path, noopy_resource in Resource.resources.iteritems():
aws_resource = aws_resource_by_path.get(path)
if aws_resource:
noopy_resource.id = aws_resource['id']
self.create_omitted_resources(aws_resource_by_path.keys(), Resource.resources['/'])
def create_omitted_resources(self, exist_path, parent):
for child in parent.children:
if child.path not in exist_path:
created = self.client.create_resource(
restApiId=self.api_id,
parentId=parent.id,
pathPart=child.path.split('/')[-1]
)
child.id = created['id']
if child.children:
self.create_omitted_resources(exist_path, child)
if __name__ == '__main__':
main()
| #!/usr/bin/python
import glob
import importlib
import os
import sys
import zipfile
from StringIO import StringIO
import boto3
import noopy
from noopy.endpoint import Endpoint
from noopy.utils import to_pascal_case
import settings
def main():
target_dir = 'src'
zip_bytes = make_zip(target_dir)
for endpoint in settings.ENDPOINTS:
importlib.import_module('src.{}'.format(endpoint))
for func in Endpoint.endpoints.values():
create_lambda_function(zip_bytes, func)
def make_zip(target_dir):
f = StringIO()
zip_file = zipfile.ZipFile(f, 'w')
file_names = glob.glob('{}/*.py'.format(target_dir))
if not file_names:
sys.stderr.write('There is no python file in src directory')
sys.exit(1)
for file_name in file_names:
zip_file.write(file_name, os.path.split(file_name)[1])
noopy_parent = os.path.split(noopy.__path__[0])[0]
for root, _, file_names in os.walk(noopy.__path__[0]):
for file_name in file_names:
full_path = os.path.join(root, file_name)
local_path = full_path[len(noopy_parent):]
zip_file.write(full_path, local_path)
zip_file.close()
f.seek(0)
bytes_ = f.read()
f.close()
return bytes_
def create_lambda_function(zip_bytes, func):
lambda_settings = settings.LAMBDA
client = boto3.client('lambda')
function_prefix = 'arn:aws:lambda:{}:{}:{}'.format(
client._client_config.region_name,
settings.ACCOUNT_ID,
lambda_settings['Prefix']
)
func_module = os.path.split(func.func_code.co_filename)[1].split('.')[0]
print client.create_function(
FunctionName='{}{}'.format(function_prefix, to_pascal_case(func.func_name)),
Runtime='python2.7',
Role=lambda_settings['Role'],
Handler='{}.{}'.format(func_module, func.func_name),
Code={
'ZipFile': zip_bytes
}
)
if __name__ == '__main__':
main()
| Python | 0.000001 |
76289f734f622227c44487d8f44879e078dbdcb3 | Improve gzweb launcher | src/deedee_tutorials/src/deedee_tutorials/launcher.py | src/deedee_tutorials/src/deedee_tutorials/launcher.py | #! /usr/bin/env python
import rospy
import subprocess
class MainLauncher:
''' Node spawning the environment with respect to the global configs
'''
def __init__(self):
rospy.init_node("middleware_spawner")
rospy.sleep(0.5)
# Configs
self.configs = {"robot_name": "deedee",
"sim_plant": "true",
"autonomous": "true"}
self.cmd = ""
self.retrieve_config()
self.build_cmd()
self.spawn()
def retrieve_config(self):
for setting in self.configs.keys():
self.configs[setting] = rospy.get_param("/{}".format(setting))
def build_cmd(self):
self.cmd = "roslaunch deedee_tutorials follow_waypoints.launch"
for setting in self.configs.keys():
self.cmd += " {}:={}".format(setting, self.configs[setting])
def spawn(self):
subprocess.call(self.cmd, shell=True)
class GzwebManager:
''' Node spawning the environment with respect to the global configs
'''
def __init__(self):
rospy.init_node("gzweb_manager")
rospy.sleep(0.5)
# Configs
self.configs = {"gzweb_enable": "true",
"gzweb_path": ""}
self.retrieve_config()
if self.configs["gzweb_enable"]:
self.cmd = "{}/start_gzweb.sh".format(self.configs["gzweb_path"])
subprocess.call("{}/start_gzweb.sh".format(self.configs["gzweb_path"]),
shell=True)
rospy.on_shutdown(self.shutdown_hook)
rospy.spin()
def retrieve_config(self):
gzweb_params = rospy.get_param("gzweb")
for setting in self.configs.keys():
self.configs[setting] = gzweb_params[setting]
def shutdown_hook(self):
print "Stopping webserver!"
subprocess.call("{}/stop_gzweb.sh".format(self.configs["gzweb_path"]),
shell=True)
| #! /usr/bin/env python
import rospy
import subprocess
class MainLauncher:
''' Node spawning the environment with respect to the global configs
'''
def __init__(self):
rospy.init_node("middleware_spawner")
rospy.sleep(0.5)
# Configs
self.configs = {"robot_name": "deedee",
"sim_plant": "true",
"autonomous": "true"}
self.cmd = ""
self.retrieve_config()
self.build_cmd()
self.spawn()
def retrieve_config(self):
for setting in self.configs.keys():
self.configs[setting] = rospy.get_param("/{}".format(setting))
def build_cmd(self):
self.cmd = "roslaunch deedee_tutorials follow_waypoints.launch"
for setting in self.configs.keys():
self.cmd += " {}:={}".format(setting, self.configs[setting])
def spawn(self):
subprocess.call(self.cmd, shell=True)
class GzwebManager:
''' Node spawning the environment with respect to the global configs
'''
def __init__(self):
rospy.init_node("gzweb_manager")
rospy.sleep(0.5)
# Configs
self.configs = {"gzweb_enable": "true",
"gzweb_path": ""}
self.retrieve_config()
self.cmd = "{}/start_gzweb.sh".format(self.configs["gzweb_path"])
if self.configs["gzweb_enable"]:
subprocess.call("{}/start_gzweb.sh".format(self.configs["gzweb_path"]),
shell=True)
rospy.on_shutdown(self.shutdown_hook)
rospy.spin()
def retrieve_config(self):
gzweb_params = rospy.get_param("gzweb")
for setting in self.configs.keys():
self.configs[setting] = gzweb_params[setting]
def shutdown_hook(self):
print "Stopping webserver!"
subprocess.call("{}/stop_gzweb.sh".format(self.configs["gzweb_path"]),
shell=True)
| Python | 0 |
61c6f174b1e406955c3e881217ff863d6ff6c3ce | Fix validate/sanitize functions for click | pathvalidate/click.py | pathvalidate/click.py | """
.. codeauthor:: Tsuyoshi Hombashi <tsuyoshi.hombashi@gmail.com>
"""
import click
from ._common import PathType
from ._file import sanitize_filename, sanitize_filepath, validate_filename, validate_filepath
from .error import ValidationError
def validate_filename_arg(ctx, param, value) -> str:
if not value:
return ""
try:
validate_filename(value)
except ValidationError as e:
raise click.BadParameter(str(e))
return value
def validate_filepath_arg(ctx, param, value) -> str:
if not value:
return ""
try:
validate_filepath(value)
except ValidationError as e:
raise click.BadParameter(str(e))
return value
def sanitize_filename_arg(ctx, param, value) -> PathType:
if not value:
return ""
return sanitize_filename(value)
def sanitize_filepath_arg(ctx, param, value) -> PathType:
if not value:
return ""
return sanitize_filepath(value)
def filename(ctx, param, value):
# Deprecated
if not value:
return None
try:
validate_filename(value)
except ValidationError as e:
raise click.BadParameter(e)
return sanitize_filename(value)
def filepath(ctx, param, value):
# Deprecated
if not value:
return None
try:
validate_filepath(value)
except ValidationError as e:
raise click.BadParameter(e)
return sanitize_filepath(value)
| """
.. codeauthor:: Tsuyoshi Hombashi <tsuyoshi.hombashi@gmail.com>
"""
import click
from ._common import PathType
from ._file import sanitize_filename, sanitize_filepath, validate_filename, validate_filepath
from .error import ValidationError
def validate_filename_arg(ctx, param, value) -> None:
if not value:
return
try:
validate_filename(value)
except ValidationError as e:
raise click.BadParameter(str(e))
def validate_filepath_arg(ctx, param, value) -> None:
if not value:
return
try:
validate_filepath(value)
except ValidationError as e:
raise click.BadParameter(str(e))
def sanitize_filename_arg(ctx, param, value) -> PathType:
if not value:
return ""
return sanitize_filename(value)
def sanitize_filepath_arg(ctx, param, value) -> PathType:
if not value:
return ""
return sanitize_filepath(value)
def filename(ctx, param, value):
# Deprecated
if not value:
return None
try:
validate_filename(value)
except ValidationError as e:
raise click.BadParameter(e)
return sanitize_filename(value)
def filepath(ctx, param, value):
# Deprecated
if not value:
return None
try:
validate_filepath(value)
except ValidationError as e:
raise click.BadParameter(e)
return sanitize_filepath(value)
| Python | 0 |
b65283984b1be7e8bb88d3281bb3654a3dd12233 | Make sure test setup is run for subdirectories | nova/tests/scheduler/__init__.py | nova/tests/scheduler/__init__.py | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 Openstack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# NOTE(vish): this forces the fixtures from tests/__init.py:setup() to work
from nova.tests import *
| Python | 0.000001 | |
fd74c411fefac7a633627fd3eb5a3c194e6e2b1c | add never_cache for people_admin views | people_admin/views.py | people_admin/views.py | from django.shortcuts import render, get_object_or_404
from django.db.models import Count
from openstates.data.models import LegislativeSession, Person
from utils.common import abbr_to_jid, sessions_with_bills, states
from people_admin.models import UnmatchedName, NameStatus
from django.views.decorators.http import require_http_methods, never_cache
from django.contrib.auth.decorators import user_passes_test
from django.http import JsonResponse
import json
def person_data(person):
""" similar to utils.people.person_as_dict but customized for editable fields """
return {
"id": person.id,
"name": person.name,
"title": person.current_role["title"],
"district": person.current_role["district"],
"party": person.primary_party,
"image": person.image,
}
@user_passes_test(lambda u: u.is_staff)
def jurisdiction_list(request):
state_people_data = {}
unmatched_by_state = dict(
UnmatchedName.objects.filter(status="U")
.values_list("session__jurisdiction__name")
.annotate(number=Count("id"))
)
for state in states:
state_people_data[state.abbr.lower()] = {
"state": state.name,
"unmatched": unmatched_by_state.get(state.name, 0),
}
return render(
request,
"people_admin/jurisdiction_list.html",
{"state_people_data": state_people_data},
)
@never_cache
@user_passes_test(lambda u: u.is_staff)
def people_list(request, state):
jid = abbr_to_jid(state)
current_people = [
person_data(p)
for p in Person.objects.filter(
current_jurisdiction_id=jid, current_role__isnull=False
).order_by("family_name", "name")
]
context = {
"current_people": current_people,
}
return render(request, "people_admin/person_list.html", {"context": context})
@never_cache
@user_passes_test(lambda u: u.is_staff)
def people_matcher(request, state, session=None):
jid = abbr_to_jid(state)
all_sessions = sessions_with_bills(jid)
if all_sessions:
session = all_sessions[0]
else:
session = get_object_or_404(
LegislativeSession, identifier=session, jurisdiction_id=jid
)
unmatched = UnmatchedName.objects.filter(session_id=session, status="U").order_by(
"-sponsorships_count"
)
state_sponsors = Person.objects.filter(current_jurisdiction_id=jid)
unmatched_total = unmatched.count()
context = {
"state": state,
"session": session,
"all_sessions": all_sessions,
"unmatched": unmatched,
"state_sponsors": state_sponsors,
"unmatched_total": unmatched_total,
}
return render(request, "people_admin/people_matcher.html", context)
@user_passes_test(lambda u: u.is_staff)
@require_http_methods(["POST"])
def apply_match(request):
form_data = json.load(request)["match_data"]
button = form_data["button"]
match_id = form_data["matchedId"]
unmatched_id = form_data["unmatchedId"]
unmatched_name = get_object_or_404(UnmatchedName, pk=unmatched_id)
if button == "Match":
unmatched_name.matched_person_id = match_id
unmatched_name.status = NameStatus.MATCHED_PERSON
elif button == "Source Error":
unmatched_name.status = NameStatus.SOURCE_ERROR
elif button == "Ignore":
unmatched_name.status = NameStatus.IGNORED
else:
unmatched_name.status = NameStatus.UNMATCHED
unmatched_name.save()
return JsonResponse({"status": "success"})
| from django.shortcuts import render, get_object_or_404
from django.db.models import Count
from openstates.data.models import LegislativeSession, Person
from utils.common import abbr_to_jid, sessions_with_bills, states
from people_admin.models import UnmatchedName, NameStatus
from django.views.decorators.http import require_http_methods
from django.contrib.auth.decorators import user_passes_test
from django.http import JsonResponse
import json
def person_data(person):
""" similar to utils.people.person_as_dict but customized for editable fields """
return {
"id": person.id,
"name": person.name,
"title": person.current_role["title"],
"district": person.current_role["district"],
"party": person.primary_party,
"image": person.image,
}
@user_passes_test(lambda u: u.is_staff)
def jurisdiction_list(request):
state_people_data = {}
unmatched_by_state = dict(
UnmatchedName.objects.filter(status="U")
.values_list("session__jurisdiction__name")
.annotate(number=Count("id"))
)
for state in states:
state_people_data[state.abbr.lower()] = {
"state": state.name,
"unmatched": unmatched_by_state.get(state.name, 0),
}
return render(
request,
"people_admin/jurisdiction_list.html",
{"state_people_data": state_people_data},
)
@user_passes_test(lambda u: u.is_staff)
def people_list(request, state):
jid = abbr_to_jid(state)
current_people = [
person_data(p)
for p in Person.objects.filter(
current_jurisdiction_id=jid, current_role__isnull=False
).order_by("family_name", "name")
]
context = {
"current_people": current_people,
}
return render(request, "people_admin/person_list.html", {"context": context})
@user_passes_test(lambda u: u.is_staff)
def people_matcher(request, state, session=None):
jid = abbr_to_jid(state)
all_sessions = sessions_with_bills(jid)
if all_sessions:
session = all_sessions[0]
else:
session = get_object_or_404(
LegislativeSession, identifier=session, jurisdiction_id=jid
)
unmatched = UnmatchedName.objects.filter(session_id=session, status="U").order_by(
"-sponsorships_count"
)
state_sponsors = Person.objects.filter(current_jurisdiction_id=jid)
unmatched_total = unmatched.count()
context = {
"state": state,
"session": session,
"all_sessions": all_sessions,
"unmatched": unmatched,
"state_sponsors": state_sponsors,
"unmatched_total": unmatched_total,
}
return render(request, "people_admin/people_matcher.html", context)
@user_passes_test(lambda u: u.is_staff)
@require_http_methods(["POST"])
def apply_match(request):
form_data = json.load(request)["match_data"]
button = form_data["button"]
match_id = form_data["matchedId"]
unmatched_id = form_data["unmatchedId"]
unmatched_name = get_object_or_404(UnmatchedName, pk=unmatched_id)
if button == "Match":
unmatched_name.matched_person_id = match_id
unmatched_name.status = NameStatus.MATCHED_PERSON
elif button == "Source Error":
unmatched_name.status = NameStatus.SOURCE_ERROR
elif button == "Ignore":
unmatched_name.status = NameStatus.IGNORED
else:
unmatched_name.status = NameStatus.UNMATCHED
unmatched_name.save()
return JsonResponse({"status": "success"})
| Python | 0 |
aeb69479a6bf5492411e82bbcb77331daa8da819 | add a test to test the monitor | tests/test_bzoing.py | tests/test_bzoing.py | """
test_bzoing
----------------------------------
Tests for `bzoing` module.
"""
import unittest
from bzoing.tasks import Bzoinq, Monitor
import time
class TestTasksAndMonitor(unittest.TestCase):
def test_creating_task(self):
a = Bzoinq()
a.create_task()
self.assertTrue(len(a.task_list) == 1)
def test_delete_task(self):
a = Bzoinq()
a.create_task()
the_id = a.task_list[0].id
a.remove_task(the_id)
self.assertTrue(len(a.task_list) == 0)
def test_monitor(self):
import datetime
a = Bzoinq()
b = Monitor(a)
b.start()
first_time = datetime.datetime.now() + datetime.timedelta(seconds=10)
a.create_task("My test task", alarm=first_time)
# sleep a bit to see if alarm works
time.sleep(15)
# check the if task was removed from task list
self.assertTrue(len(a.task_list) == 0)
# kill Monitor
b.stop()
if __name__ == '__main__':
unittest.main()
| """
test_bzoing
----------------------------------
Tests for `bzoing` module.
"""
import unittest
from bzoing.tasks import Bzoinq, Monitor
import time
class TestTasksAndMonitor(unittest.TestCase):
def test_creating_task(self):
a = Bzoinq()
a.create_task()
self.assertTrue(len(a.task_list) == 1)
def test_delete_task(self):
a = Bzoinq()
a.create_task()
the_id = a.task_list[0].id
a.remove_task(the_id)
self.assertTrue(len(a.task_list) == 0)
if __name__ == '__main__':
unittest.main()
| Python | 0.000001 |
148826f75072576d7f0d0f206e3d1dba34688720 | Refactor getLongestWord to simplify maximum collection and reduce number of conditionals | stream_processor/stream_processor.py | stream_processor/stream_processor.py | '''
Created on Aug 7, 2017
@author: alkaitz
'''
import heapq
'''
You have a function that will be called with a stream of strings.
Every time you receive a new word, you should return the length of the longest
word that you have received that has showed in the string only once. Ex:
f("Yes") -> 3
f("No") -> 3
f("Yes") -> 2
'''
working_set = []
heapq.heapify(working_set)
repeated = set()
def process(str):
includeWord(str)
return getLongestWord()
'''
Structure will be sorted by negative numbers to transform it from a min heap to a max heap.
Storing the tuple, to provide right sorting.
None returned if data set is empty (all received words have appeared more than once)
'''
def includeWord(str):
if str not in repeated:
lenPlusStringTuple = (-len(str),str)
if lenPlusStringTuple not in working_set:
heapq.heappush(working_set, lenPlusStringTuple)
else:
working_set.remove(lenPlusStringTuple)
repeated.add(str)
def getLongestWord():
if len(working_set) > 0:
(length, _) = (working_set[0])
return -length
return None
if __name__ == '__main__':
assert(process("Hello") == 5)
assert(process("Hello") == None)
assert(process("Hello") == None)
assert(process("Hallo") == 5)
assert(process("Bye") == 5)
assert(process("By") == 5)
assert(process("B") == 5)
assert(process("Hallo") == 3)
assert(process("By") == 3)
assert(process("Bye") == 1)
print "Successful" | '''
Created on Aug 7, 2017
@author: alkaitz
'''
import heapq
'''
You have a function that will be called with a stream of strings.
Every time you receive a new word, you should return the length of the longest
word that you have received that has showed in the string only once. Ex:
f("Yes") -> 3
f("No") -> 3
f("Yes") -> 2
'''
working_set = []
heapq.heapify(working_set)
repeated = set()
def process(str):
includeWord(str)
return getLongestWord(str)
'''
Structure will be sorted by negative numbers to transform it from a min heap to a max heap.
Storing the tuple, to provide right sorting.
None returned if data set is empty (all received words have appeared more than once)
'''
def includeWord(str):
if str not in repeated:
lenPlusStringTuple = (-len(str),str)
if lenPlusStringTuple not in working_set:
heapq.heappush(working_set, lenPlusStringTuple)
else:
working_set.remove(lenPlusStringTuple)
repeated.add(str)
def getLongestWord(str):
(length, _) = (working_set[0]) if working_set else (None, None)
return -length if length else None
if __name__ == '__main__':
assert(process("Hello") == 5)
assert(process("Hello") == None)
assert(process("Hello") == None)
assert(process("Hallo") == 5)
assert(process("Bye") == 5)
assert(process("By") == 5)
assert(process("B") == 5)
assert(process("Hallo") == 3)
assert(process("By") == 3)
assert(process("Bye") == 1)
print "Successful" | Python | 0 |
6d267faaf9d18e58b24cf93906961b152ef0fcb7 | build vehicle list based on if make is provided | src/vehicles/views.py | src/vehicles/views.py | from django.shortcuts import render, render_to_response, RequestContext
# import the custom context processor
from vehicles.context_processor import global_context_processor
from vehicles.models import Vehicle, VehicleMake, Category
def home_page(request):
return render_to_response("home_page.html", locals(),
context_instance=RequestContext(request, processors=[global_context_processor]))
def category_page(request, slug):
# check if make slug parameter is passed into the url
vehicle_make_slug = request.GET.get('make', None)
# get category by slug
category = Category.objects.get_category_by_slug(slug)
# get all the vehicles by the category and make (if provided)
vehicles_list = None
if vehicle_make_slug is not None:
# get make by slug
make = VehicleMake.objects.get_make_by_slug(vehicle_make_slug)
vehicles_list = Vehicle.objects.get_vehicles_by_category_and_make(
category, make)
else:
vehicles_list = Vehicle.objects.get_vehicles_by_category(category)
return render_to_response("home_page.html", locals(),
context_instance=RequestContext(request, processors=[global_context_processor]))
def get_makes_in_category(category):
makes_in_category = []
# get all the vehicle objects by category
vehicles_in_category = Vehicle.objects.get_vehicles_by_category(category=category)
for vehicle in vehicles_in_category:
makes_in_category.append(vehicle.make)
# remove duplicate makes from the list
makes_in_category = list(set(makes_in_category))
makes_in_category = sorted(makes_in_category, key=lambda x:x.v_make)
return makes_in_category | from django.shortcuts import render, render_to_response, RequestContext
# import the custom context processor
from vehicles.context_processor import global_context_processor
from vehicles.models import Vehicle, Category
def home_page(request):
return render_to_response("home_page.html", locals(),
context_instance=RequestContext(request, processors=[global_context_processor]))
def category_page(request, slug):
# check if make parameter is passed into the url
vehicle_make = request.GET.get('make', None)
# get category by slug
category = Category.objects.get_category_by_slug(slug)
# get all the vehicles by the category and make (if provided)
vehicles_list = None
if vehicle_make is not None:
vehicles_list = Vehicle.objects.get_vehicles_by_category_and_make(
category, vehicle_make)
else:
vehicles_list = Vehicle.objects.get_vehicles_by_category(category)
return render_to_response("home_page.html", locals(),
context_instance=RequestContext(request, processors=[global_context_processor]))
def get_makes_in_category(category):
makes_in_category = []
# get all the vehicle objects by category
vehicles_in_category = Vehicle.objects.get_vehicles_by_category(category=category)
for vehicle in vehicles_in_category:
makes_in_category.append(vehicle.make)
# remove duplicate makes from the list
makes_in_category = list(set(makes_in_category))
makes_in_category = sorted(makes_in_category, key=lambda x:x.v_make)
return makes_in_category | Python | 0.000001 |
39ab86b500cc28420aa0062395adc9e6ddf2017c | allow reading fom multiple configuration files | src/vsphere/config.py | src/vsphere/config.py | import sys
from os import path
from ConfigParser import ConfigParser
VSPHERE_CFG_FILE = "vsphere.conf"
unix_platforms = [
"darwin",
"Linux"
]
class EsxConfig:
def __init__(self):
ok = False
# specific configuration
local_cfg = VSPHERE_CFG_FILE
# user-global configuration
user_cfg = ""
if sys.platform in unix_platforms:
user_cfg = path.join(path.expanduser("~"), '.{0}'.format(VSPHERE_CFG_FILE))
# system-wide configuration
system_cfg = ""
if sys.platform in unix_platforms:
system_cfg = path.join(path.expanduser("/etc/vsphere"), VSPHERE_CFG_FILE)
files = [ local_cfg, user_cfg, system_cfg ]
for f in files:
if path.exists(f):
parser = ConfigParser()
parser.read(f)
ok = True
break
if ok:
self.vs_host = parser.get('server', 'host')
self.vs_user = parser.get('server', 'user')
self.vs_password = parser.get('server', 'password')
self.vs_dc = parser.get('server', 'dc')
| from ConfigParser import ConfigParser
class EsxConfig:
def __init__(self):
parser = ConfigParser()
parser.read("vsphere.conf")
self.vs_host = parser.get('server', 'host')
self.vs_user = parser.get('server', 'user')
self.vs_password = parser.get('server', 'password')
self.vs_dc = parser.get('server', 'dc')
| Python | 0 |
170a50eeca4249a488cc9d0c69876c5f2708b743 | use two-tail for testing significance and right_tail for redundancy checking | stats/significance.py | stats/significance.py | '''
Significance testing methods.
@author: anze.vavpetic@ijs.si
'''
from fisher import pvalue
def is_redundant(rule, new_rule):
'''
Computes the redundancy coefficient of a new rule compared to its
immediate generalization.
Rules with a coeff > 1 are deemed non-redundant.
'''
return _fisher(new_rule).right_tail > _fisher(rule).right_tail
def fisher(rule):
'''
Fisher's p-value for one rule.
'''
return _fisher(rule).two_tail
def _fisher(rule):
'''
Fisher's p-value for one rule.
'''
N = float(len(rule.kb.examples))
nX = float(rule.coverage)
nY = rule.kb.distribution[rule.target]
nXY = rule.distribution[rule.target]
nXnotY = nX - nXY
nnotXY = nY - nXY
nnotXnotY = N - nXnotY - nnotXY
return pvalue(nXY, nXnotY, nnotXY, nnotXnotY)
def apply_fisher(ruleset):
'''
Fisher's exact test to test rule significance.
'''
for rule in ruleset:
rule.pval = fisher(rule)
| '''
Significance testing methods.
@author: anze.vavpetic@ijs.si
'''
from fisher import pvalue
def is_redundant(rule, new_rule):
'''
Computes the redundancy coefficient of a new rule compared to its
immediate generalization.
Rules with a coeff > 1 are deemed non-redundant.
'''
return fisher(new_rule) > fisher(rule)
def fisher(rule):
'''
Fisher's p-value for one rule.
'''
N = float(len(rule.kb.examples))
nX = float(rule.coverage)
nY = rule.kb.distribution[rule.target]
nXY = rule.distribution[rule.target]
nXnotY = nX - nXY
nnotXY = nY - nXY
nnotXnotY = N - nXnotY - nnotXY
return pvalue(nXY, nXnotY, nnotXY, nnotXnotY).right_tail
def apply_fisher(ruleset):
'''
Fisher's exact test to test rule significance.
'''
for rule in ruleset:
rule.pval = fisher(rule)
| Python | 0 |
8907993e48a59ce39dab1cdb359e287f527b7642 | Add --verbose parameter | stbt_control_relay.py | stbt_control_relay.py | #!/usr/bin/python
"""
Allows using any of the stbt remote control backends remotely using the lirc
protocol.
Presents the same socket protocol as lircd but sending keypresses using any of
stbt's controls. This allows for example controlling a roku over its HTTP
interface from some software that only speaks lirc.
Example usage:
$ stbt control-relay file:example
Listens on `/var/run/lirc/lircd` for lirc clients. Keypress sent will be
written to the file example. So
$ irsend SEND_ONCE stbt KEY_UP
Will write the text "KEY_UP" to the file `example`.
$ stbt control-relay --input=lircd:lircd.sock \\
roku:192.168.1.13 samsung:192.168.1.14
Listens on lircd.sock and will forward keypresses to the roku at 192.168.1.13
using its HTTP protocol and to the Samsung TV at 192.168.1.14 using its TCP
protocol. So
$ irsend -d lircd.sock SEND_ONCE stbt KEY_OK
Will press KEY_OK on both the Samsung and the roku devices simultaneously.
"""
import argparse
import signal
import sys
from _stbt.control import MultiRemote, uri_to_remote, uri_to_remote_recorder
from _stbt.logging import argparser_add_verbose_argument, debug
def main(argv):
parser = argparse.ArgumentParser(
epilog=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument(
"--input", default="lircd", help="""The source of remote control
presses. Values are the same as stbt record's --control-recorder.""")
parser.add_argument("output", nargs="+", help="""One or more remote control
configurations. Values are the same as stbt run's --control.""")
argparser_add_verbose_argument(parser)
args = parser.parse_args(argv[1:])
signal.signal(signal.SIGTERM, lambda _signo, _stack_frame: sys.exit(0))
r = MultiRemote(uri_to_remote(x) for x in args.output)
listener = uri_to_remote_recorder(args.input)
for key in listener:
debug("Received %s" % key)
try:
r.press(key)
except Exception as e: # pylint: disable=broad-except
sys.stderr.write("Error pressing key %r: %s\n" % (key, e))
if __name__ == "__main__":
sys.exit(main(sys.argv))
| #!/usr/bin/python
"""
Allows using any of the stbt remote control backends remotely using the lirc
protocol.
Presents the same socket protocol as lircd but sending keypresses using any of
stbt's controls. This allows for example controlling a roku over its HTTP
interface from some software that only speaks lirc.
Example usage:
$ stbt control-relay file:example
Listens on `/var/run/lirc/lircd` for lirc clients. Keypress sent will be
written to the file example. So
$ irsend SEND_ONCE stbt KEY_UP
Will write the text "KEY_UP" to the file `example`.
$ stbt control-relay --input=lircd:lircd.sock \\
roku:192.168.1.13 samsung:192.168.1.14
Listens on lircd.sock and will forward keypresses to the roku at 192.168.1.13
using its HTTP protocol and to the Samsung TV at 192.168.1.14 using its TCP
protocol. So
$ irsend -d lircd.sock SEND_ONCE stbt KEY_OK
Will press KEY_OK on both the Samsung and the roku devices simultaneously.
"""
import argparse
import signal
import sys
from _stbt.control import MultiRemote, uri_to_remote, uri_to_remote_recorder
def main(argv):
parser = argparse.ArgumentParser(
epilog=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument(
"--input", default="lircd", help="""The source of remote control
presses. Values are the same as stbt record's --control-recorder.""")
parser.add_argument("output", nargs="+", help="""One or more remote control
configurations. Values are the same as stbt run's --control.""")
args = parser.parse_args(argv[1:])
signal.signal(signal.SIGTERM, lambda _signo, _stack_frame: sys.exit(0))
r = MultiRemote(uri_to_remote(x) for x in args.output)
listener = uri_to_remote_recorder(args.input)
for key in listener:
sys.stderr.write("Received %s\n" % key)
try:
r.press(key)
except Exception as e: # pylint: disable=broad-except
sys.stderr.write("Error pressing key %r: %s\n" % (key, e))
if __name__ == "__main__":
sys.exit(main(sys.argv))
| Python | 0 |
84ee720fd2d8403de5f49c54fc41bfcb67a78f78 | Add missing vat alias for Turkey | stdnum/tr/__init__.py | stdnum/tr/__init__.py | # __init__.py - collection of Turkish numbers
# coding: utf-8
#
# Copyright (C) 2016 Arthur de Jong
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301 USA
"""Collection of Turkish numbers."""
from stdnum.tr import vkn as vat # noqa: F401
| # __init__.py - collection of Turkish numbers
# coding: utf-8
#
# Copyright (C) 2016 Arthur de Jong
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301 USA
"""Collection of Turkish numbers."""
| Python | 0.000355 |
e0bd89115c4d103334fe0c751cdbc96a9f005ba6 | version up | substance/_version.py | substance/_version.py | __version__ = '1.1.beta.4'
| __version__ = '1.1.beta.3'
| Python | 0.998795 |
f02eb748d33b621368198c10a965b27ee31effca | update tutorial section link | swagger/yamlscript.py | swagger/yamlscript.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
#
# This script, when run, parses the file "swagger.yaml" and strips it down to only
# include those paths and methods specified in the included variable.
#
# As of now, it is called with every "jekyll build" - see jekyll-freme/_plugins/jekyll-pages-directory.rb
# line: "exec(python swagger/yamlscript.py)"
#
# To be able to import yaml, on linux, run "sudo pip install PyYAML"
#
# Author: Jonathan Sauder (jonathan_paul.sauder@dfki.de)
#
def main():
import yaml,os,sys
try:
with open(os.path.dirname(__file__)+"/swagger.yaml","r") as f:
full=yaml.safe_load(f.read())
except IOError:
raise Exception("\n\tException Handled in /swagger/yamlscript.py:"+ os.path.dirname(__file__)+"/swagger.yaml could not be found. The generation of a simple API-Doc was skipped")
sys.exit(1)
except yaml.scanner.ScannerError:
raise Exception("\n\tException Handled in /swagger/yamlscript.py: The YAML File at "+ os.path.dirname(__file__)+"/swagger.yaml is invalid! The generation of a simple API-Doc was skipped")
sys.exit(1)
included_paths={
"/e-entity/freme-ner/documents": ["post"],
"/e-entity/dbpedia-spotlight/documents": ["post"],
"/e-publishing/html": ["post"],
"/e-link/documents/": ["post"],
"/e-translation/tilde": ["post"],
"/e-terminology/tilde": ["post"],
"/e-link/explore": ["post"]
}
for path in full["paths"].keys():
if path not in included_paths:
del full["paths"][path]
else:
for method in included_paths[path]:
if method not in full["paths"][path].keys():
del full["paths"][path][method]
# else:
# full["paths"][path][method]['tags']=["Enrichment Endpoints"]
full["tags"]=[x for x in full["tags"] if x["name"]!="General Information"]
full['info']['description']="This section only covers the most important endpoints of FREME: the enrichment endpoints.<br><br> The endpoints can be used to access FREME e-Services via common HTTP requests.<br><br> A full documentation of all e-Service endpoints, including all parameters, is provided <a href=\"full.html\">here</a>. For usage examples, see the <a href=\"../tutorials/overview.html\">tutorial section</a>."
with open(os.path.dirname(__file__)+"/simple.yaml",'w') as f:
f.write(yaml.dump(full))
return 0
if __name__ == '__main__':
main()
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
#
# This script, when run, parses the file "swagger.yaml" and strips it down to only
# include those paths and methods specified in the included variable.
#
# As of now, it is called with every "jekyll build" - see jekyll-freme/_plugins/jekyll-pages-directory.rb
# line: "exec(python swagger/yamlscript.py)"
#
# To be able to import yaml, on linux, run "sudo pip install PyYAML"
#
# Author: Jonathan Sauder (jonathan_paul.sauder@dfki.de)
#
def main():
import yaml,os,sys
try:
with open(os.path.dirname(__file__)+"/swagger.yaml","r") as f:
full=yaml.safe_load(f.read())
except IOError:
raise Exception("\n\tException Handled in /swagger/yamlscript.py:"+ os.path.dirname(__file__)+"/swagger.yaml could not be found. The generation of a simple API-Doc was skipped")
sys.exit(1)
except yaml.scanner.ScannerError:
raise Exception("\n\tException Handled in /swagger/yamlscript.py: The YAML File at "+ os.path.dirname(__file__)+"/swagger.yaml is invalid! The generation of a simple API-Doc was skipped")
sys.exit(1)
included_paths={
"/e-entity/freme-ner/documents": ["post"],
"/e-entity/dbpedia-spotlight/documents": ["post"],
"/e-publishing/html": ["post"],
"/e-link/documents/": ["post"],
"/e-translation/tilde": ["post"],
"/e-terminology/tilde": ["post"],
"/e-link/explore": ["post"]
}
for path in full["paths"].keys():
if path not in included_paths:
del full["paths"][path]
else:
for method in included_paths[path]:
if method not in full["paths"][path].keys():
del full["paths"][path][method]
# else:
# full["paths"][path][method]['tags']=["Enrichment Endpoints"]
full["tags"]=[x for x in full["tags"] if x["name"]!="General Information"]
full['info']['description']="This section only covers the most important endpoints of FREME: the enrichment endpoints.<br><br> The endpoints can be used to access FREME e-Services via common HTTP requests.<br><br> A full documentation of all e-Service endpoints, including all parameters, is provided <a href=\"full.html\">here</a>. For usage examples, see the <a href=\"../Tutorials/overview.html\">tutorial section</a>."
with open(os.path.dirname(__file__)+"/simple.yaml",'w') as f:
f.write(yaml.dump(full))
return 0
if __name__ == '__main__':
main()
| Python | 0 |
86b698a228ddf1309e8f2006726724af05c5fca1 | bump version | symposion/__init__.py | symposion/__init__.py | __version__ = "1.0b1.dev12"
| __version__ = "1.0b1.dev11"
| Python | 0 |
18d551d2495fc122edb142e416a06ce4129da1f7 | Update urls.py | life3/config/urls.py | life3/config/urls.py | """life3.0 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.conf.urls import include
from life3.dashboard import views as dashboard_view
urlpatterns = [
url(r'^$', dashboard_view.api_home),
url(r'^dashboard/api/', include('life3.dashboard.urls')),
url(r'^login/', include('life3.login.urls')),
]
| """life3.0 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.conf.urls import include
from life3.dashboard import views as dashboard_view
urlpatterns = [
url(r'^$/', dashboard_view.api_home),
url(r'^dashboard/api/', include('life3.dashboard.urls')),
url(r'^login/', include('life3.login.urls')),
]
| Python | 0.000002 |
ac3a9211725a0538c8c8f7899d86e4e22ceebb71 | Update binary_search.py | aids/sorting_and_searching/binary_search.py | aids/sorting_and_searching/binary_search.py | '''
In this module, we implement binary search in Python both
recrusively and iteratively
Assumption: Array is sorted
Time complexity: O(log n)
'''
def binary_search_recursive(arr, left, right, value):
'''
Recursive implementation of binary search of a sorted array
Return index of the value found else return None
'''
if arr and left <= right:
middle = left + (right - left) / 2
if arr[middle] == value:
return middle
if arr[middle] > value:
return binary_search_recursive(arr, left, middle - 1, value)
return binary_search_recursive(arr, middle + 1, right, value)
return None
def binary_search_iterative(arr, left, right, value):
'''
Iterative implementation of binary search of a sorted array
Return index of the value of found else return None
'''
if arr:
while left <= right:
middle = left + (right - left) / 2
if arr[middle] == value:
return middle
elif arr[middle] > value:
right = middle - 1
else:
left = middle + 1
return None
| '''
In this module, we implement binary search in Python both
recrusively and iteratively
Assumption: Array is sorted
Time complexity: O(log n)
'''
def binary_search_recursive(arr, left, right, value):
'''
Recursive implementation of binary search of a sorted array
Return index of the value found else return None
'''
if arr and left <= right:
middle = (left + right) / 2
if arr[middle] == value:
return middle
if arr[middle] > value:
return binary_search_recursive(arr, left, middle - 1, value)
return binary_search_recursive(arr, middle + 1, right, value)
return None
def binary_search_iterative(arr, left, right, value):
'''
Iterative implementation of binary search of a sorted array
Return index of the value of found else return None
'''
if arr:
while left <= right:
middle = (left + right) / 2 # left + (right - left) / 2
if arr[middle] == value:
return middle
elif arr[middle] > value:
right = middle - 1
else:
left = middle + 1
return None
| Python | 0.000002 |
1433106d2e36a08f79b4b2c67e07c1fdd361bda6 | fix MAINTENANCE_MODE logic | electionleaflets/urls.py | electionleaflets/urls.py | from django.conf import settings
from django.conf.urls import patterns, include, url
from django.contrib import admin
from django.conf.urls.static import static
from django.views.generic import TemplateView
admin.autodiscover()
from leaflets.feeds import *
from core.views import HomeView, MaintenanceView
MAINTENANCE_MODE = getattr(settings, 'MAINTENANCE_MODE', False)
if MAINTENANCE_MODE:
urlpatterns = patterns(
'',
url(r'.*', MaintenanceView.as_view(), name='maintenance_view'),
)
else:
urlpatterns = patterns(
'',
url(r'^$', HomeView.as_view(), name='home'),
url(r'^leaflets', include('leaflets.urls')),
url(r'^parties', include('parties.urls')),
url(r'^constituencies', include('constituencies.urls')),
url(r'^analysis', include('analysis.urls')),
url(r'^tags', include('tags.urls')),
url(r'^categories', include('categories.urls')),
url(r'^api/', include('api.urls')),
# Feeds
url(r'^feeds/latest/$', LatestLeafletsFeed(), name='latest_feed'),
# url(r'^feeds/party/(?P<party_slug>[\w_\-\.]+)/$', PartyFeed(), name='party_feed'),
# url(r'^feeds/attacking/(?P<party_slug>[\w_\-\.]+)/$', AttackingPartyFeed(), name='attacking_party_feed'),
url(r'^feeds/constituency/(?P<cons_slug>[\w_\-\.]+)/$', ConstituencyFeed(), name='constituency_feed'),
url(r'^feeds/category/(?P<cat_slug>[\w_\-\.]+)/$', CategoryFeed(), name='category_feed'),
url(r'^feeds/tag/(?P<tag_slug>[\w_\-\.]+)/$', TagFeed(), name='tag_feed'),
# Individual urls
url(r'^about/$', TemplateView.as_view(template_name='core/about.html'), name='about'),
url(r'^report/(?P<id>\d+)/sent/$', TemplateView.as_view(template_name='core/report_sent.html'), name='report_abuse_sent'),
url(r'^report/(?P<id>\d+)/$', 'core.views.report_abuse', name='report_abuse'),
# Administration URLS
(r'^admin/', include(admin.site.urls)),
url(r'^accounts/', include('allauth.urls')),
)
urlpatterns + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT) + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) | from django.conf import settings
from django.conf.urls import patterns, include, url
from django.contrib import admin
from django.conf.urls.static import static
from django.views.generic import TemplateView
admin.autodiscover()
from leaflets.feeds import *
from core.views import HomeView, MaintenanceView
if getattr(settings, 'MAINTENANCE_MODE', None):
urlpatterns = patterns(
'',
url(r'.*', MaintenanceView.as_view(), name='maintenance_view'),
)
else:
urlpatterns = patterns(
'',
url(r'^$', HomeView.as_view(), name='home'),
url(r'^leaflets', include('leaflets.urls')),
url(r'^parties', include('parties.urls')),
url(r'^constituencies', include('constituencies.urls')),
url(r'^analysis', include('analysis.urls')),
url(r'^tags', include('tags.urls')),
url(r'^categories', include('categories.urls')),
url(r'^api/', include('api.urls')),
# Feeds
url(r'^feeds/latest/$', LatestLeafletsFeed(), name='latest_feed'),
# url(r'^feeds/party/(?P<party_slug>[\w_\-\.]+)/$', PartyFeed(), name='party_feed'),
# url(r'^feeds/attacking/(?P<party_slug>[\w_\-\.]+)/$', AttackingPartyFeed(), name='attacking_party_feed'),
url(r'^feeds/constituency/(?P<cons_slug>[\w_\-\.]+)/$', ConstituencyFeed(), name='constituency_feed'),
url(r'^feeds/category/(?P<cat_slug>[\w_\-\.]+)/$', CategoryFeed(), name='category_feed'),
url(r'^feeds/tag/(?P<tag_slug>[\w_\-\.]+)/$', TagFeed(), name='tag_feed'),
# Individual urls
url(r'^about/$', TemplateView.as_view(template_name='core/about.html'), name='about'),
url(r'^report/(?P<id>\d+)/sent/$', TemplateView.as_view(template_name='core/report_sent.html'), name='report_abuse_sent'),
url(r'^report/(?P<id>\d+)/$', 'core.views.report_abuse', name='report_abuse'),
# Administration URLS
(r'^admin/', include(admin.site.urls)),
url(r'^accounts/', include('allauth.urls')),
)
urlpatterns + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT) + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) | Python | 0.000011 |
5605dd1f37f91d0fa627d49332f5550c90e6d2e7 | Check child is element before inspecting name | mammoth/docx/xmlparser.py | mammoth/docx/xmlparser.py | import collections
import xml.sax
XmlElementBase = collections.namedtuple("XmlElement", ["name", "attributes", "children"])
class XmlElement(XmlElementBase):
def find_child_or_null(self, name):
return self.find_child(name) or _null_xml_element
def find_child(self, name):
for child in self.children:
if child.name == name:
return child
def find_children(self, name):
return XmlElementList(filter(
lambda child: child.node_type == node_types.element and child.name == name,
self.children
))
class XmlElementList(object):
def __init__(self, elements):
self._elements = elements
def __iter__(self):
return iter(self._elements)
def find_children(self, name):
children = []
for element in self._elements:
for child in element.find_children(name):
children.append(child)
return XmlElementList(children)
class NullXmlElement(object):
attributes = {}
def find_child_or_null(self, name):
return self
def find_child(self, name):
return None
_null_xml_element = NullXmlElement()
XmlText = collections.namedtuple("XmlText", ["value"])
def element(name, attributes=None, children=None):
return XmlElement(name, attributes or {}, children or [])
text = XmlText
class node_types(object):
element = 1
text = 3
XmlElement.node_type = node_types.element
XmlText.node_type = node_types.text
def parse_xml(fileobj, namespace_mapping=None):
if namespace_mapping is None:
namespace_prefixes = {}
else:
namespace_prefixes = dict((uri, prefix) for prefix, uri in namespace_mapping)
handler = Handler(namespace_prefixes)
parser = xml.sax.make_parser()
parser.setFeature(xml.sax.handler.feature_namespaces, True)
parser.setContentHandler(handler)
parser.parse(fileobj)
return handler.root()
class Handler(xml.sax.handler.ContentHandler):
def __init__(self, namespace_prefixes):
self._namespace_prefixes = namespace_prefixes
self._element_stack = [RootElement()]
self._character_buffer = []
def root(self):
return self._element_stack[0].children[0]
def startElementNS(self, name, qname, attrs):
self._flush_character_buffer()
attributes = dict((self._read_name(key), value) for key, value in attrs.items())
element = XmlElement(self._read_name(name), attributes, [])
self._element_stack[-1].children.append(element)
self._element_stack.append(element)
def endElementNS(self, name, qname):
self._flush_character_buffer()
self._element_stack.pop()
def characters(self, content):
self._character_buffer.append(content)
def _flush_character_buffer(self):
if self._character_buffer:
text = "".join(self._character_buffer)
self._element_stack[-1].children.append(XmlText(text))
self._character_buffer = []
def _read_name(self, name):
uri, local_name = name
if uri is None:
return local_name
else:
prefix = self._namespace_prefixes.get(uri)
if prefix is None:
return "{%s}%s" % (uri, local_name)
else:
return "%s:%s" % (prefix, local_name)
class RootElement(object):
def __init__(self):
self.children = []
| import collections
import xml.sax
XmlElementBase = collections.namedtuple("XmlElement", ["name", "attributes", "children"])
class XmlElement(XmlElementBase):
def find_child_or_null(self, name):
return self.find_child(name) or _null_xml_element
def find_child(self, name):
for child in self.children:
if child.name == name:
return child
def find_children(self, name):
return XmlElementList(filter(lambda child: child.name == name, self.children))
class XmlElementList(object):
def __init__(self, elements):
self._elements = elements
def __iter__(self):
return iter(self._elements)
def find_children(self, name):
children = []
for element in self._elements:
for child in element.find_children(name):
children.append(child)
return XmlElementList(children)
class NullXmlElement(object):
attributes = {}
def find_child_or_null(self, name):
return self
def find_child(self, name):
return None
_null_xml_element = NullXmlElement()
XmlText = collections.namedtuple("XmlText", ["value"])
def element(name, attributes=None, children=None):
return XmlElement(name, attributes or {}, children or [])
text = XmlText
class node_types(object):
element = 1
text = 3
XmlElement.node_type = node_types.element
XmlText.node_type = node_types.text
def parse_xml(fileobj, namespace_mapping=None):
if namespace_mapping is None:
namespace_prefixes = {}
else:
namespace_prefixes = dict((uri, prefix) for prefix, uri in namespace_mapping)
handler = Handler(namespace_prefixes)
parser = xml.sax.make_parser()
parser.setFeature(xml.sax.handler.feature_namespaces, True)
parser.setContentHandler(handler)
parser.parse(fileobj)
return handler.root()
class Handler(xml.sax.handler.ContentHandler):
def __init__(self, namespace_prefixes):
self._namespace_prefixes = namespace_prefixes
self._element_stack = [RootElement()]
self._character_buffer = []
def root(self):
return self._element_stack[0].children[0]
def startElementNS(self, name, qname, attrs):
self._flush_character_buffer()
attributes = dict((self._read_name(key), value) for key, value in attrs.items())
element = XmlElement(self._read_name(name), attributes, [])
self._element_stack[-1].children.append(element)
self._element_stack.append(element)
def endElementNS(self, name, qname):
self._flush_character_buffer()
self._element_stack.pop()
def characters(self, content):
self._character_buffer.append(content)
def _flush_character_buffer(self):
if self._character_buffer:
text = "".join(self._character_buffer)
self._element_stack[-1].children.append(XmlText(text))
self._character_buffer = []
def _read_name(self, name):
uri, local_name = name
if uri is None:
return local_name
else:
prefix = self._namespace_prefixes.get(uri)
if prefix is None:
return "{%s}%s" % (uri, local_name)
else:
return "%s:%s" % (prefix, local_name)
class RootElement(object):
def __init__(self):
self.children = []
| Python | 0.000001 |
bc9bbe0075f8a6571179e2310a9cfeaff89652b2 | Remove unused argument | modules/pipeunion.py | modules/pipeunion.py | # pipeunion.py
#
from pipe2py import util
def pipe_union(context, _INPUT, **kwargs):
"""This operator merges up to 5 source together.
Keyword arguments:
context -- pipeline context
_INPUT -- source generator
kwargs -- _OTHER1 - another source generator
_OTHER2 etc.
Yields (_OUTPUT):
union of all source items
"""
#TODO the multiple sources should be pulled in parallel
# check David Beazely for suggestions (co-routines with queues?)
# or maybe use multiprocessing and Queues (perhaps over multiple servers too)
#Single thread and sequential pulling will do for now...
for item in _INPUT:
if item == True: #i.e. this is being fed forever, i.e. not a real source so just use _OTHERs
break
yield item
for other in kwargs:
if other.startswith('_OTHER'):
for item in kwargs[other]:
yield item
| # pipeunion.py
#
from pipe2py import util
def pipe_union(context, _INPUT, conf, **kwargs):
"""This operator merges up to 5 source together.
Keyword arguments:
context -- pipeline context
_INPUT -- source generator
kwargs -- _OTHER1 - another source generator
_OTHER2 etc.
conf:
Yields (_OUTPUT):
union of all source items
"""
#TODO the multiple sources should be pulled in parallel
# check David Beazely for suggestions (co-routines with queues?)
# or maybe use multiprocessing and Queues (perhaps over multiple servers too)
#Single thread and sequential pulling will do for now...
for item in _INPUT:
if item == True: #i.e. this is being fed forever, i.e. not a real source so just use _OTHERs
break
yield item
for other in kwargs:
if other.startswith('_OTHER'):
for item in kwargs[other]:
yield item
| Python | 0.000009 |
3053c57a67c4dfb5e20bb93d6a586c7acf84275e | Prepare release v1.3.5. | monitoring/nagios/__init__.py | monitoring/nagios/__init__.py | import monitoring.nagios.logger
__version__ = '1.3.5'
| import monitoring.nagios.logger
__version__ = '1.3.2'
| Python | 0 |
cf07c34fe3a3d7b8767e50e77e609253dd177cff | Use isoformat date RFC 3339 | moulinette/utils/serialize.py | moulinette/utils/serialize.py | import logging
from json.encoder import JSONEncoder
import datetime
logger = logging.getLogger('moulinette.utils.serialize')
# JSON utilities -------------------------------------------------------
class JSONExtendedEncoder(JSONEncoder):
"""Extended JSON encoder
Extend default JSON encoder to recognize more types and classes. It
will never raise if the object can't be encoded and return its repr
instead.
The following objects and types are supported:
- set: converted into list
"""
def default(self, o):
"""Return a serializable object"""
# Convert compatible containers into list
if isinstance(o, set) or (
hasattr(o, '__iter__') and hasattr(o, 'next')):
return list(o)
# Convert compatible containers into list
if isinstance(o, datetime.datetime) or isinstance(o, datetime.date):
return o.isoformat()
# Return the repr for object that json can't encode
logger.warning('cannot properly encode in JSON the object %s, '
'returned repr is: %r', type(o), o)
return repr(o)
| import logging
from json.encoder import JSONEncoder
import datetime
logger = logging.getLogger('moulinette.utils.serialize')
# JSON utilities -------------------------------------------------------
class JSONExtendedEncoder(JSONEncoder):
"""Extended JSON encoder
Extend default JSON encoder to recognize more types and classes. It
will never raise if the object can't be encoded and return its repr
instead.
The following objects and types are supported:
- set: converted into list
"""
def default(self, o):
"""Return a serializable object"""
# Convert compatible containers into list
if isinstance(o, set) or (
hasattr(o, '__iter__') and hasattr(o, 'next')):
return list(o)
# Convert compatible containers into list
if isinstance(o, datetime.datetime) or isinstance(o, datetime.date):
return str(o)
# Return the repr for object that json can't encode
logger.warning('cannot properly encode in JSON the object %s, '
'returned repr is: %r', type(o), o)
return repr(o)
| Python | 0 |
5d8b2224bf2864ad7e4bacb0624542dec8549b57 | add mpf-mc entry points in machine test | mpf/tests/MpfMachineTestCase.py | mpf/tests/MpfMachineTestCase.py | import inspect
from mpf.core.machine import MachineController
from mpf.tests.MpfTestCase import MpfTestCase
class MpfMachineTestCase(MpfTestCase):
def __init__(self, methodName='runTest'):
super().__init__(methodName)
# only disable bcp. everything else should run
self.machine_config_patches = dict()
self.machine_config_patches['bcp'] = []
# increase test expected duration
self.expected_duration = 5.0
@staticmethod
def _load_mc_players(cls):
mc_players = {
"sound_player": "mpfmc.config_players.sound_player",
"widget_player": "mpfmc.config_players.widget_player",
"slide_player": "mpfmc.config_players.slide_player"
}
for name, module in mc_players.items():
imported_module = inspect.importlib.import_module(module)
setattr(cls, '{}_player'.format(name),
imported_module.player_cls(cls))
def setUp(self):
MachineController._register_plugin_config_players = self._load_mc_players
super().setUp()
def get_enable_plugins(self):
return True
def getConfigFile(self):
return "config.yaml"
def getMachinePath(self):
return ""
def getAbsoluteMachinePath(self):
# do not use path relative to MPF folder
return self.getMachinePath()
def get_platform(self):
return 'smart_virtual'
| from mpf.tests.MpfTestCase import MpfTestCase
class MpfMachineTestCase(MpfTestCase):
def __init__(self, methodName='runTest'):
super().__init__(methodName)
# only disable bcp. everything else should run
self.machine_config_patches = dict()
self.machine_config_patches['bcp'] = []
# increase test expected duration
self.expected_duration = 5.0
def getConfigFile(self):
return "config.yaml"
def getMachinePath(self):
return ""
def getAbsoluteMachinePath(self):
# do not use path relative to MPF folder
return self.getMachinePath()
def get_platform(self):
return 'smart_virtual'
| Python | 0 |
04745c9c4074ee44e2cfd7ef5fecae1eb796b109 | Fix now_utc() to return aware datetime | mycroft/util/time.py | mycroft/util/time.py | # -*- coding: utf-8 -*-
#
# Copyright 2018 Mycroft AI Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from datetime import datetime
from dateutil.tz import gettz, tzlocal
def default_timezone():
""" Get the default timezone
Based on user location settings location.timezone.code or
the default system value if no setting exists.
Returns:
(datetime.tzinfo): Definition of the default timezone
"""
try:
# Obtain from user's configurated settings
# location.timezone.code (e.g. "America/Chicago")
# location.timezone.name (e.g. "Central Standard Time")
# location.timezone.offset (e.g. -21600000)
from mycroft.configuration import Configuration
config = Configuration.get()
code = config["location"]["timezone"]["code"]
return gettz(code)
except Exception:
# Just go with system default timezone
return tzlocal()
def now_utc():
""" Retrieve the current time in UTC
Returns:
(datetime): The current time in Universal Time, aka GMT
"""
return to_utc(datetime.utcnow())
def now_local(tz=None):
""" Retrieve the current time
Args:
tz (datetime.tzinfo, optional): Timezone, default to user's settings
Returns:
(datetime): The current time
"""
if not tz:
tz = default_timezone()
return datetime.now(tz)
def to_utc(dt):
""" Convert a datetime with timezone info to a UTC datetime
Args:
dt (datetime): A datetime (presumably in some local zone)
Returns:
(datetime): time converted to UTC
"""
tzUTC = gettz("UTC")
if dt.tzinfo:
return dt.astimezone(tzUTC)
else:
return dt.replace(tzinfo=gettz("UTC")).astimezone(tzUTC)
def to_local(dt):
""" Convert a datetime to the user's local timezone
Args:
dt (datetime): A datetime (if no timezone, defaults to UTC)
Returns:
(datetime): time converted to the local timezone
"""
tz = default_timezone()
if dt.tzinfo:
return dt.astimezone(tz)
else:
return dt.replace(tzinfo=gettz("UTC")).astimezone(tz)
| # -*- coding: utf-8 -*-
#
# Copyright 2018 Mycroft AI Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from datetime import datetime
from dateutil.tz import gettz, tzlocal
def default_timezone():
""" Get the default timezone
Based on user location settings location.timezone.code or
the default system value if no setting exists.
Returns:
(datetime.tzinfo): Definition of the default timezone
"""
try:
# Obtain from user's configurated settings
# location.timezone.code (e.g. "America/Chicago")
# location.timezone.name (e.g. "Central Standard Time")
# location.timezone.offset (e.g. -21600000)
from mycroft.configuration import Configuration
config = Configuration.get()
code = config["location"]["timezone"]["code"]
return gettz(code)
except Exception:
# Just go with system default timezone
return tzlocal()
def now_utc():
""" Retrieve the current time in UTC
Returns:
(datetime): The current time in Universal Time, aka GMT
"""
return datetime.utcnow()
def now_local(tz=None):
""" Retrieve the current time
Args:
tz (datetime.tzinfo, optional): Timezone, default to user's settings
Returns:
(datetime): The current time
"""
if not tz:
tz = default_timezone()
return datetime.now(tz)
def to_utc(dt):
""" Convert a datetime with timezone info to a UTC datetime
Args:
dt (datetime): A datetime (presumably in some local zone)
Returns:
(datetime): time converted to UTC
"""
tzUTC = gettz("UTC")
if dt.tzinfo:
return dt.astimezone(tzUTC)
else:
return dt.replace(tzinfo=gettz("UTC")).astimezone(tzUTC)
def to_local(dt):
""" Convert a datetime to the user's local timezone
Args:
dt (datetime): A datetime (if no timezone, defaults to UTC)
Returns:
(datetime): time converted to the local timezone
"""
tz = default_timezone()
if dt.tzinfo:
return dt.astimezone(tz)
else:
return dt.replace(tzinfo=gettz("UTC")).astimezone(tz)
| Python | 0 |
e4ccfdb49951ed9c4073ba389421d89fea273288 | make test more robust | mpfmc/tests/MpfSlideTestCase.py | mpfmc/tests/MpfSlideTestCase.py | from mpf.tests.MpfTestCase import MpfTestCase
class MpfSlideTestCase(MpfTestCase):
def assertSlideOnTop(self, slide_name, target="default"):
self.assertEqual(slide_name, self.mc.targets[target].current_slide.name)
def assertTextOnTopSlide(self, text, target="default"):
self.assertTextInSlide(text, self.mc.targets[target].current_slide.name)
def assertTextNotOnTopSlide(self, text, target="default"):
self.assertTextNotInSlide(text, self.mc.targets[target].current_slide.name)
def assertSlideActive(self, slide_name):
self.assertIn(slide_name, self.mc.active_slides, "Slide {} is not active.".format(slide_name))
def assertSlideNotActive(self, slide_name):
self.assertNotIn(slide_name, self.mc.active_slides, "Slide {} is active but should not.".format(slide_name))
def _get_texts_from_slide(self, slide):
texts = []
for children in slide.children:
if children.children:
texts.extend(self._get_texts_from_slide(children))
if hasattr(children, "text"):
texts.append(children.text)
return texts
def assertTextInSlide(self, text, slide_name):
self.assertSlideActive(slide_name)
self.assertIn(text, self._get_texts_from_slide(self.mc.active_slides[slide_name]),
"Text {} not found in slide {}.".format(text, slide_name))
def assertTextNotInSlide(self, text, slide_name):
self.assertSlideActive(slide_name)
self.assertNotIn(text, self._get_texts_from_slide(self.mc.active_slides[slide_name]),
"Text {} found in slide {} but should not be there.".format(text, slide_name)) | from mpf.tests.MpfTestCase import MpfTestCase
class MpfSlideTestCase(MpfTestCase):
def assertSlideOnTop(self, slide_name, target="default"):
self.assertEqual(slide_name, self.mc.targets[target].current_slide.name)
def assertTextOnTopSlide(self, text, target="default"):
self.assertTextInSlide(text, self.mc.targets[target].current_slide.name)
def assertTextNotOnTopSlide(self, text, target="default"):
self.assertTextNotInSlide(text, self.mc.targets[target].current_slide.name)
def assertSlideActive(self, slide_name):
self.assertIn(slide_name, self.mc.active_slides, "Slide {} is not active.".format(slide_name))
def assertSlideNotActive(self, slide_name):
self.assertNotIn(slide_name, self.mc.active_slides, "Slide {} is active but should not.".format(slide_name))
def assertTextInSlide(self, text, slide_name):
self.assertSlideActive(slide_name)
self.assertIn(text, [x.text for x in self.mc.active_slides[slide_name].children[0].children],
"Text {} not found in slide {}.".format(text, slide_name))
def assertTextNotInSlide(self, text, slide_name):
self.assertSlideActive(slide_name)
self.assertNotIn(text, [x.text for x in self.mc.active_slides[slide_name].children[0].children],
"Text {} found in slide {} but should not be there.".format(text, slide_name)) | Python | 0.000329 |
6ad60176892df0eabb7faf96277c792c742fc9f0 | simplify some codes in _build_request | mechanicalsoup/browser.py | mechanicalsoup/browser.py | import requests
import bs4
from six.moves import urllib
from six import string_types
from .form import Form
class Browser:
def __init__(self, session=None, soup_config=None):
self.session = session or requests.Session()
self.soup_config = soup_config or dict()
@staticmethod
def add_soup(response, soup_config):
if "text/html" in response.headers.get("Content-Type", ""):
response.soup = bs4.BeautifulSoup(
response.content, **soup_config)
def request(self, *args, **kwargs):
response = self.session.request(*args, **kwargs)
Browser.add_soup(response, self.soup_config)
return response
def get(self, *args, **kwargs):
response = self.session.get(*args, **kwargs)
Browser.add_soup(response, self.soup_config)
return response
def post(self, *args, **kwargs):
response = self.session.post(*args, **kwargs)
Browser.add_soup(response, self.soup_config)
return response
def _build_request(self, form, url=None, **kwargs):
method = form["method"]
action = form["action"]
url = urllib.parse.urljoin(url, action)
# read http://www.w3.org/TR/html5/forms.html
data = kwargs.get("data") or dict()
files = kwargs.get("files") or dict()
for input in form.select("input"):
name = input.get("name")
# web browsers use empty string for inputs with missing values
value = input.get("value", "")
if not name:
continue
if input.get("type") in ("radio", "checkbox") \
and "checked" not in input.attrs:
continue
if input.get("type") == "checkbox":
data.setdefault(name, []).append(value)
elif input.get("type") == "file":
# read http://www.cs.tut.fi/~jkorpela/forms/file.html
# in web browsers, file upload only happens if the form"s (or
# submit button"s) enctype attribute is set to
# "multipart/form-data". we don"t care, simplify.
if not value:
continue
if isinstance(value, string_types):
value = open(value, "rb")
files[name] = value
else:
data[name] = value
for textarea in form.select("textarea"):
name = textarea.get("name")
if not name:
continue
data[name] = textarea.text
for select in form.select("select"):
name = select.get("name")
if not name:
continue
for i, option in enumerate(select.select("option")):
if i == 0 or "selected" in option.attrs:
data[name] = option.get("value", "")
return requests.Request(method, url, data=data, files=files, **kwargs)
def _prepare_request(self, form, url=None, **kwargs):
request = self._build_request(form, url, **kwargs)
return self.session.prepare_request(request)
def submit(self, form, url=None, **kwargs):
if isinstance(form, Form):
form = form.form
request = self._prepare_request(form, url, **kwargs)
response = self.session.send(request)
Browser.add_soup(response, self.soup_config)
return response
| import requests
import bs4
from six.moves import urllib
from six import string_types
from .form import Form
class Browser:
def __init__(self, session=None, soup_config=None):
self.session = session or requests.Session()
self.soup_config = soup_config or dict()
@staticmethod
def add_soup(response, soup_config):
if "text/html" in response.headers.get("Content-Type", ""):
response.soup = bs4.BeautifulSoup(
response.content, **soup_config)
def request(self, *args, **kwargs):
response = self.session.request(*args, **kwargs)
Browser.add_soup(response, self.soup_config)
return response
def get(self, *args, **kwargs):
response = self.session.get(*args, **kwargs)
Browser.add_soup(response, self.soup_config)
return response
def post(self, *args, **kwargs):
response = self.session.post(*args, **kwargs)
Browser.add_soup(response, self.soup_config)
return response
def _build_request(self, form, url=None, **kwargs):
method = form["method"]
action = form["action"]
url = urllib.parse.urljoin(url, action)
# read http://www.w3.org/TR/html5/forms.html
data = kwargs.get("data") or dict()
files = kwargs.get("files") or dict()
for input in form.select("input"):
name = input.get("name")
# web browsers use empty string for inputs with missing values
value = input.get("value", "")
if not name:
continue
if input.get("type") in ("radio", "checkbox") and "checked" not in input.attrs:
continue
if input.get("type") == "checkbox":
if not name in data:
data[name] = list()
data[name].append(value)
elif input.get("type") == "file":
# read http://www.cs.tut.fi/~jkorpela/forms/file.html
# in web browsers, file upload only happens if the form"s (or
# submit button"s) enctype attribute is set to
# "multipart/form-data". we don"t care, simplify.
if not value:
continue
if isinstance(value, string_types):
value = open(value, "rb")
files[name] = value
else:
data[name] = value
for textarea in form.select("textarea"):
name = textarea.get("name")
if not name:
continue
data[name] = textarea.text
for select in form.select("select"):
name = select.get("name")
if not name:
continue
for i, option in enumerate(select.select("option")):
if i == 0 or "selected" in option.attrs:
data[name] = option.get("value", "")
return requests.Request(method, url, data=data, files=files, **kwargs)
def _prepare_request(self, form, url=None, **kwargs):
request = self._build_request(form, url, **kwargs)
return self.session.prepare_request(request)
def submit(self, form, url=None, **kwargs):
if isinstance(form, Form):
form = form.form
request = self._prepare_request(form, url, **kwargs)
response = self.session.send(request)
Browser.add_soup(response, self.soup_config)
return response
| Python | 0.000117 |
8a006ecff95e7699a4ca65f2af5ff566648c3a0d | Add norhh suggestion #1 | mythril/analysis/modules/dos.py | mythril/analysis/modules/dos.py | """This module contains the detection code SWC-128 - DOS with block gas limit."""
import logging
from typing import Dict, cast, List
from mythril.analysis.swc_data import DOS_WITH_BLOCK_GAS_LIMIT
from mythril.laser.ethereum.strategy.custom import JUMPDEST_LIMIT
from mythril.analysis.report import Issue
from mythril.analysis.modules.base import DetectionModule
from mythril.laser.ethereum.state.global_state import GlobalState
from mythril.laser.ethereum.state.annotation import StateAnnotation
from mythril.laser.ethereum import util
log = logging.getLogger(__name__)
class LoopAnnotation(StateAnnotation):
def __init__(self, loop_start: int, loop_end: int) -> None:
self.loop_start = loop_start
self.loop_end = loop_end
def contains(self, address: int) -> bool:
return self.loop_start < address < self.loop_end
class DOS(DetectionModule):
"""This module consists of a makeshift loop detector that annotates the state with
a list of byte ranges likely to be loops. If a CALL or SSTORE detection is found in
one of the ranges it creates a low-severity issue. This is not super precise but
good enough to identify places that warrant a closer look. Checking the loop condition
would be a possible improvement.
"""
def __init__(self) -> None:
""""""
super().__init__(
name="DOS",
swc_id=DOS_WITH_BLOCK_GAS_LIMIT,
description="Check for DOS",
entrypoint="callback",
pre_hooks=["JUMPI", "CALL", "SSTORE"],
)
"""Keeps track of how often jump destinations are reached."""
self._jumpdest_count = {} # type: Dict[object, dict]
def _execute(self, state: GlobalState) -> None:
"""
:param state:
:return:
"""
self._issues.extend(self._analyze_states(state))
def _analyze_states(self, state: GlobalState) -> List[Issue]:
"""
:param state: the current state
:return: returns the issues for that corresponding state
"""
opcode = state.get_current_instruction()["opcode"]
address = state.get_current_instruction()["address"]
if opcode == "JUMPI":
target = util.get_concrete_int(state.mstate.stack[-1])
transaction = state.current_transaction
if state.current_transaction in self._jumpdest_count:
try:
self._jumpdest_count[transaction][target] += 1
if self._jumpdest_count[transaction][target] == JUMPDEST_LIMIT - 1:
annotation = (
LoopAnnotation(address, target)
if target > address
else LoopAnnotation(target, address)
)
state.annotate(annotation)
except KeyError:
self._jumpdest_count[transaction][target] = 0
else:
self._jumpdest_count[transaction] = {}
self._jumpdest_count[transaction][target] = 0
else:
annotations = cast(
List[LoopAnnotation], list(state.get_annotations(LoopAnnotation))
)
for annotation in annotations:
if annotation.contains(address):
operation = (
"A storage modification"
if opcode == "SSTORE"
else "An external call"
)
description_head = (
"Potential denial-of-service if block gas limit is reached."
)
description_tail = "{} is executed in a loop.".format(operation)
issue = Issue(
contract=state.environment.active_account.contract_name,
function_name=state.environment.active_function_name,
address=annotation.loop_start,
swc_id=DOS_WITH_BLOCK_GAS_LIMIT,
bytecode=state.environment.code.bytecode,
title="Potential denial-of-service if block gas limit is reached",
severity="Low",
description_head=description_head,
description_tail=description_tail,
gas_used=(state.mstate.min_gas_used, state.mstate.max_gas_used),
)
return [issue]
return []
detector = DOS()
| """This module contains the detection code SWC-128 - DOS with block gas limit."""
import logging
from typing import Dict, cast, List
from mythril.analysis.swc_data import DOS_WITH_BLOCK_GAS_LIMIT
from mythril.analysis.report import Issue
from mythril.analysis.modules.base import DetectionModule
from mythril.laser.ethereum.state.global_state import GlobalState
from mythril.laser.ethereum.state.annotation import StateAnnotation
from mythril.laser.ethereum import util
log = logging.getLogger(__name__)
class LoopAnnotation(StateAnnotation):
def __init__(self, loop_start: int, loop_end: int) -> None:
self.loop_start = loop_start
self.loop_end = loop_end
def contains(self, address: int) -> bool:
return self.loop_start < address < self.loop_end
class DOS(DetectionModule):
"""This module consists of a makeshift loop detector that annotates the state with
a list of byte ranges likely to be loops. If a CALL or SSTORE detection is found in
one of the ranges it creates a low-severity issue. This is not super precise but
good enough to identify places that warrant a closer look. Checking the loop condition
would be a possible improvement.
"""
def __init__(self) -> None:
""""""
super().__init__(
name="DOS",
swc_id=DOS_WITH_BLOCK_GAS_LIMIT,
description="Check for DOS",
entrypoint="callback",
pre_hooks=["JUMPI", "CALL", "SSTORE"],
)
"""Keeps track of how often jump destinations are reached."""
self._jumpdest_count = {} # type: Dict[object, dict]
def _execute(self, state: GlobalState) -> None:
"""
:param state:
:return:
"""
self._issues.extend(self._analyze_states(state))
def _analyze_states(self, state: GlobalState) -> List[Issue]:
"""
:param state: the current state
:return: returns the issues for that corresponding state
"""
opcode = state.get_current_instruction()["opcode"]
address = state.get_current_instruction()["address"]
if opcode == "JUMPI":
target = util.get_concrete_int(state.mstate.stack[-1])
transaction = state.current_transaction
if state.current_transaction in self._jumpdest_count:
try:
self._jumpdest_count[transaction][target] += 1
if self._jumpdest_count[transaction][target] == 3:
annotation = (
LoopAnnotation(address, target)
if target > address
else LoopAnnotation(target, address)
)
state.annotate(annotation)
except KeyError:
self._jumpdest_count[transaction][target] = 0
else:
self._jumpdest_count[transaction] = {}
self._jumpdest_count[transaction][target] = 0
else:
annotations = cast(
List[LoopAnnotation], list(state.get_annotations(LoopAnnotation))
)
for annotation in annotations:
if annotation.contains(address):
operation = (
"A storage modification"
if opcode == "SSTORE"
else "An external call"
)
description_head = (
"Potential denial-of-service if block gas limit is reached."
)
description_tail = "{} is executed in a loop.".format(operation)
issue = Issue(
contract=state.environment.active_account.contract_name,
function_name=state.environment.active_function_name,
address=annotation.loop_start,
swc_id=DOS_WITH_BLOCK_GAS_LIMIT,
bytecode=state.environment.code.bytecode,
title="Potential denial-of-service if block gas limit is reached",
severity="Low",
description_head=description_head,
description_tail=description_tail,
gas_used=(state.mstate.min_gas_used, state.mstate.max_gas_used),
)
return [issue]
return []
detector = DOS()
| Python | 0 |
39a23d06cc09a9dbf0802740aaca8854bfd64b04 | Add check for directory access rights in LocalStorage | onitu/drivers/local_storage/local_storage.py | onitu/drivers/local_storage/local_storage.py | import os
from path import path
from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler
from onitu.api import Plug
plug = Plug()
# Ignore the next Watchdog event concerning those files
events_to_ignore = set()
# Store the mtime of the last write of each transfered file
last_mtime = {}
root = None
@plug.handler()
def get_chunk(filename, offset, size):
filename = root.joinpath(filename)
try:
with open(filename, 'rb') as f:
f.seek(offset)
return f.read(size)
except IOError as e:
plug.logger.warn("Error getting file `{}`: {}", filename, e)
@plug.handler()
def start_upload(metadata):
filename = root.joinpath(metadata.filename)
# We ignore the next Watchdog events concerning this file
events_to_ignore.add(metadata.filename)
try:
if not filename.exists():
filename.dirname().makedirs_p()
filename.open('wb').close()
except IOError as e:
plug.logger.warn("Error creating file `{}`: {}", filename, e)
@plug.handler()
def end_upload(metadata):
filename = root.joinpath(metadata.filename)
# this is to make sure that no further event concerning
# this set of writes will be propagated to the Referee
last_mtime[metadata.filename] = filename.mtime
metadata.revision = filename.mtime
metadata.write_revision()
if metadata.filename in events_to_ignore:
events_to_ignore.remove(metadata.filename)
@plug.handler()
def upload_chunk(filename, offset, chunk):
abs_path = root.joinpath(filename)
# We make sure events are ignored for this file
events_to_ignore.add(filename)
try:
# We should not append the file but seek to the right
# position.
# However, the behavior of `offset` isn't well defined
with open(abs_path, 'ab') as f:
f.write(chunk)
except IOError as e:
plug.logger.warn("Error writting file `{}`: {}", filename, e)
def check_changes():
for abs_path in root.walkfiles():
filename = abs_path.relpath(root).normpath()
metadata = plug.get_metadata(filename)
revision = metadata.revision
revision = float(revision) if revision else .0
if abs_path.mtime > revision:
update_file(metadata, abs_path)
def update_file(metadata, path):
if metadata.filename in events_to_ignore:
return
if metadata.filename in last_mtime:
if last_mtime[metadata.filename] >= path.mtime:
# We're about to send an event for a file that hasn't changed
# since the last upload, we stop here
return
else:
del last_mtime[metadata.filename]
metadata.size = path.size
metadata.revision = path.mtime
plug.update_file(metadata)
class EventHandler(FileSystemEventHandler):
def on_moved(self, event):
def handle_move(event):
if event.is_directory:
return
#if event.src_path:
#self._handle_deletion(event.src_path.decode())
self._handle_update(event.dest_path.decode())
handle_move(event)
if event.is_directory:
for subevent in event.sub_moved_events():
handle_move(subevent)
def on_modified(self, event):
if event.is_directory:
return
self._handle_update(event.src_path.decode())
def _handle_update(self, abs_path):
abs_path = path(abs_path)
filename = root.relpathto(abs_path)
metadata = plug.get_metadata(filename)
update_file(metadata, abs_path)
def start(*args, **kwargs):
plug.start(*args, **kwargs)
global root
root = path(plug.options['root'])
if not root.access(os.W_OK | os.R_OK):
plug.logger.error("Can't access directory `{}`.", root)
return
observer = Observer()
observer.schedule(EventHandler(), path=root, recursive=True)
observer.start()
check_changes()
plug.wait()
| from path import path
from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler
from onitu.api import Plug
plug = Plug()
# Ignore the next Watchdog event concerning those files
events_to_ignore = set()
# Store the mtime of the last write of each transfered file
last_mtime = {}
root = None
@plug.handler()
def get_chunk(filename, offset, size):
filename = root.joinpath(filename)
try:
with open(filename, 'rb') as f:
f.seek(offset)
return f.read(size)
except IOError as e:
plug.logger.warn("Error getting file `{}`: {}", filename, e)
@plug.handler()
def start_upload(metadata):
filename = root.joinpath(metadata.filename)
# We ignore the next Watchdog events concerning this file
events_to_ignore.add(metadata.filename)
try:
if not filename.exists():
filename.dirname().makedirs_p()
filename.open('wb').close()
except IOError as e:
plug.logger.warn("Error creating file `{}`: {}", filename, e)
@plug.handler()
def end_upload(metadata):
filename = root.joinpath(metadata.filename)
# this is to make sure that no further event concerning
# this set of writes will be propagated to the Referee
last_mtime[metadata.filename] = filename.mtime
metadata.revision = filename.mtime
metadata.write_revision()
if metadata.filename in events_to_ignore:
events_to_ignore.remove(metadata.filename)
@plug.handler()
def upload_chunk(filename, offset, chunk):
abs_path = root.joinpath(filename)
# We make sure events are ignored for this file
events_to_ignore.add(filename)
try:
# We should not append the file but seek to the right
# position.
# However, the behavior of `offset` isn't well defined
with open(abs_path, 'ab') as f:
f.write(chunk)
except IOError as e:
plug.logger.warn("Error writting file `{}`: {}", filename, e)
def check_changes():
for abs_path in root.walkfiles():
filename = abs_path.relpath(root).normpath()
metadata = plug.get_metadata(filename)
revision = metadata.revision
revision = float(revision) if revision else .0
if abs_path.mtime > revision:
update_file(metadata, abs_path)
def update_file(metadata, path):
if metadata.filename in events_to_ignore:
return
if metadata.filename in last_mtime:
if last_mtime[metadata.filename] >= path.mtime:
# We're about to send an event for a file that hasn't changed
# since the last upload, we stop here
return
else:
del last_mtime[metadata.filename]
metadata.size = path.size
metadata.revision = path.mtime
plug.update_file(metadata)
class EventHandler(FileSystemEventHandler):
def on_moved(self, event):
def handle_move(event):
if event.is_directory:
return
#if event.src_path:
#self._handle_deletion(event.src_path.decode())
self._handle_update(event.dest_path.decode())
handle_move(event)
if event.is_directory:
for subevent in event.sub_moved_events():
handle_move(subevent)
def on_modified(self, event):
if event.is_directory:
return
self._handle_update(event.src_path.decode())
def _handle_update(self, abs_path):
abs_path = path(abs_path)
filename = root.relpathto(abs_path)
metadata = plug.get_metadata(filename)
update_file(metadata, abs_path)
def start(*args, **kwargs):
plug.start(*args, **kwargs)
global root
root = path(plug.options['root'])
observer = Observer()
observer.schedule(EventHandler(), path=root, recursive=True)
observer.start()
check_changes()
plug.wait()
| Python | 0 |
2d95b9a4b6d87e9f630c59995403988dee390c20 | Fix simple typo: utilty -> utility (#5182) | doc/sphinx_util.py | doc/sphinx_util.py | # -*- coding: utf-8 -*-
"""Helper utility function for customization."""
import sys
import os
import docutils
import subprocess
READTHEDOCS_BUILD = (os.environ.get('READTHEDOCS', None) is not None)
if not os.path.exists('web-data'):
subprocess.call('rm -rf web-data;' +
'git clone https://github.com/dmlc/web-data', shell = True)
else:
subprocess.call('cd web-data; git pull', shell=True)
sys.stderr.write('READTHEDOCS=%s\n' % (READTHEDOCS_BUILD))
| # -*- coding: utf-8 -*-
"""Helper utilty function for customization."""
import sys
import os
import docutils
import subprocess
READTHEDOCS_BUILD = (os.environ.get('READTHEDOCS', None) is not None)
if not os.path.exists('web-data'):
subprocess.call('rm -rf web-data;' +
'git clone https://github.com/dmlc/web-data', shell = True)
else:
subprocess.call('cd web-data; git pull', shell=True)
sys.stderr.write('READTHEDOCS=%s\n' % (READTHEDOCS_BUILD))
| Python | 0.999997 |
d806cc19e058ad63c6be47d8e616b0c869549db7 | FIX remote does not have test file wired... | sklearn/decomposition/tests/test_spectra_embedding.py | sklearn/decomposition/tests/test_spectra_embedding.py | from nose.tools import assert_true
from nose.tools import assert_equal
from scipy.sparse import csr_matrix
import numpy as np
from numpy.testing import assert_almost_equal, assert_array_almost_equal
from sklearn.decomposition.spectra_embedding import SpectralEmbedding
from sklearn.metrics.pairwise import rbf_kernel
from sklearn.pipeline import Pipeline
from sklearn.metrics import normalized_mutual_info_score
from sklearn.cluster import KMeans, SpectralClustering
S = np.array([[1, 5, 2, 1, 0, 0, 0],
[5, 1, 3, 1, 0, 0, 0],
[2, 3, 1, 1, 0, 0, 0],
[1, 1, 1, 1, 2, 1, 1],
[0, 0, 0, 2, 2, 3, 2],
[0, 0, 0, 1, 3, 1, 4],
[0, 0, 0, 1, 2, 4, 1],
])
def test_spectra_embedding_precomputed_graph(seed=36):
"""Test spectral embedding with precomputed kernel"""
gamma = 1.0
se_precomp = SpectralEmbedding(n_components=3, affinity="precomputed",
random_state=np.random.RandomState(seed))
se_rbf = SpectralEmbedding(n_components=3, affinity="rbf",
gamma=gamma,
random_state=np.random.RandomState(seed))
se_knn = SpectralEmbedding(n_components=3, affinity="nearest_neighbors",
gamma=gamma,
random_state=np.random.RandomState(seed))
embed_precomp = se_precomp.fit_transform(rbf_kernel(S, gamma=gamma))
embed_rbf = se_rbf.fit_transform(S)
embed_knn = se_knn.fit_transform(S)
assert_array_almost_equal(
se_precomp.affinity_matrix_, se_rbf.affinity_matrix_)
assert_array_almost_equal(np.abs(embed_precomp), np.abs(embed_rbf), 0)
def test_spectra_embedding_knn_graph(seed=36):
"""Test spectral embedding with knn graph"""
def test_spectra_embedding_callable_graph(seed=36):
"""Test spectral embedding with knn graph"""
gamma = 0.9
kern = rbf_kernel(S, gamma=gamma)
se_callable = SpectralEmbedding(n_components=3,
affinity=(
lambda x: rbf_kernel(x, gamma=gamma)),
gamma=gamma,
random_state=np.random.RandomState(seed))
se_rbf = SpectralEmbedding(n_components=3, affinity="rbf",
gamma=gamma,
random_state=np.random.RandomState(seed))
embed_rbf = se_rbf.fit_transform(S)
embed_callable = se_callable.fit_transform(S)
def test_pipline_spectra_clustering():
"""Test using pipline to do spectral clustering"""
spectral_clustering = Pipeline([
('se', SpectralEmbedding()),
('km', KMeans()),
])
for n_cluster in range(1, 5):
n_cluster = 3
spectral_clustering.set_params(km__n_clusters=n_cluster)
spectral_clustering.set_params(se__n_components=n_cluster)
spectral_clustering.set_params(se__gamma=1.0)
spectral_clustering.fit(S)
SC = SpectralClustering(n_clusters=n_cluster)
SC.fit(S)
assert_array_almost_equal(
normalized_mutual_info_score(
spectral_clustering.steps[1][1].labels_,
SC.labels_), 0.0, 0)
| import numpy as np
from nose.tools import assert_true
from nose.tools import assert_equal
from scipy.sparse import csr_matrix
from numpy.testing import assert_almost_equal, assert_array_almost_equal
from sklearn.decomposition.spectra_embedding import SpectralEmbedding
from sklearn.metrics.pairwise import rbf_kernel
from sklearn.pipeline import Pipeline
from sklearn.metrics import normalized_mutual_info_score
from sklearn.cluster import KMeans, SpectralClustering
S = np.array([[1, 5, 2, 1, 0, 0, 0],
[5, 1, 3, 1, 0, 0, 0],
[2, 3, 1, 1, 0, 0, 0],
[1, 1, 1, 1, 2, 1, 1],
[0, 0, 0, 2, 2, 3, 2],
[0, 0, 0, 1, 3, 1, 4],
[0, 0, 0, 1, 2, 4, 1],
])
def test_spectra_embedding_precomputed_graph(seed=36):
"""Test spectral embedding with precomputed kernel"""
gamma = 1.0
se_precomp = SpectralEmbedding(n_components=3, affinity="precomputed",
random_state=np.random.RandomState(seed))
se_rbf = SpectralEmbedding(n_components=3, affinity="rbf",
gamma=gamma,
random_state=np.random.RandomState(seed))
se_knn = SpectralEmbedding(n_components=3, affinity="nearest_neighbors",
gamma=gamma,
random_state=np.random.RandomState(seed))
embed_precomp = se_precomp.fit_transform(rbf_kernel(S, gamma=gamma))
embed_rbf = se_rbf.fit_transform(S)
embed_knn = se_knn.fit_transform(S)
assert_array_almost_equal(
se_precomp.affinity_matrix_, se_rbf.affinity_matrix_)
assert_array_almost_equal(np.abs(embed_precomp), np.abs(embed_rbf), 0)
def test_spectra_embedding_knn_graph(seed=36):
"""Test spectral embedding with knn graph"""
def test_spectra_embedding_callable_graph(seed=36):
"""Test spectral embedding with knn graph"""
gamma = 0.9
kern = rbf_kernel(S, gamma=gamma)
se_callable = SpectralEmbedding(n_components=3,
affinity=(
lambda x: rbf_kernel(x, gamma=gamma)),
gamma=gamma,
random_state=np.random.RandomState(seed))
se_rbf = SpectralEmbedding(n_components=3, affinity="rbf",
gamma=gamma,
random_state=np.random.RandomState(seed))
embed_rbf = se_rbf.fit_transform(S)
embed_callable = se_callable.fit_transform(S)
def test_pipline_spectra_clustering():
"""Test using pipline to do spectral clustering"""
spectral_clustering = Pipeline([
('se', SpectralEmbedding()),
('km', KMeans()),
])
for n_cluster in range(1, 5):
n_cluster = 3
spectral_clustering.set_params(km__n_clusters=n_cluster)
spectral_clustering.set_params(se__n_components=n_cluster)
spectral_clustering.set_params(se__gamma=1.0)
spectral_clustering.fit(S)
SC = SpectralClustering(n_clusters=n_cluster)
SC.fit(S)
assert_array_almost_equal(
normalized_mutual_info_score(
spectral_clustering.steps[1][1].labels_,
SC.labels_), 0.0, 0)
| Python | 0 |
8784162eb60cd23bbbe669c698e9406d43c1a7ff | Explicitly set allow_empty = True | nextcloudappstore/core/views.py | nextcloudappstore/core/views.py | from django.views.generic.detail import DetailView
from django.views.generic.list import ListView
from nextcloudappstore.core.models import App, Category
from django.http import Http404
from django.db.models import Q
class AppDetailView(DetailView):
model = App
template_name = 'app/detail.html'
slug_field = 'id'
slug_url_kwarg = 'id'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['categories'] = Category.objects.all()
return context
class CategoryAppListView(ListView):
model = App
template_name = 'app/list.html'
allow_empty = True
def get_queryset(self):
category_id = self.kwargs['id']
queryset = super().get_queryset()
if category_id:
queryset = queryset.filter(categories__id=category_id)
if self.has_search_terms():
query = None
for term in self.get_search_terms():
q = Q(translations__name__contains=term) | \
Q(translations__description__contains=term)
if query is None:
query = q
else:
query = query | q
queryset = queryset.filter(query)
# Remove duplicates that for some reason sometimes occur
queryset = list(set(queryset))
return queryset
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['categories'] = Category.objects.all()
category_id = self.kwargs['id']
if category_id:
context['current_category'] = Category.objects.get(id=category_id)
if self.has_search_terms():
context['search_query'] = self.request.GET['search']
context['search_terms'] = self.get_search_terms()
return context
def has_search_terms(self):
return ('search' in self.request.GET) \
and self.request.GET['search'].strip()
def get_search_terms(self):
return self.request.GET['search'].strip().split()
| from django.views.generic.detail import DetailView
from django.views.generic.list import ListView
from nextcloudappstore.core.models import App, Category
from django.http import Http404
from django.db.models import Q
class AppDetailView(DetailView):
model = App
template_name = 'app/detail.html'
slug_field = 'id'
slug_url_kwarg = 'id'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['categories'] = Category.objects.all()
return context
class CategoryAppListView(ListView):
model = App
template_name = 'app/list.html'
def get_queryset(self):
category_id = self.kwargs['id']
queryset = super().get_queryset()
if category_id:
queryset = queryset.filter(categories__id=category_id)
if self.has_search_terms():
query = None
for term in self.get_search_terms():
q = Q(translations__name__contains=term) | \
Q(translations__description__contains=term)
if query is None:
query = q
else:
query = query | q
queryset = queryset.filter(query)
# Remove duplicates that for some reason sometimes occur
queryset = list(set(queryset))
return queryset
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['categories'] = Category.objects.all()
category_id = self.kwargs['id']
if category_id:
context['current_category'] = Category.objects.get(id=category_id)
if self.has_search_terms():
context['search_query'] = self.request.GET['search']
context['search_terms'] = self.get_search_terms()
return context
def has_search_terms(self):
return ('search' in self.request.GET) \
and self.request.GET['search'].strip()
def get_search_terms(self):
return self.request.GET['search'].strip().split()
| Python | 0.999992 |
22f9ff98e048f47493394570b519d179657d9427 | Add `--host/port` options | skylines/commands/tracking/generate_through_daemon.py | skylines/commands/tracking/generate_through_daemon.py | from __future__ import print_function
from flask_script import Command, Option
import sys
import socket
import struct
from skylines.model import User
from skylines.tracking.server import (
datetime,
FLAG_LOCATION,
FLAG_ALTITUDE,
TrackingFix,
MAGIC,
TYPE_FIX,
set_crc,
)
from math import sin
from random import randint
from time import sleep
class GenerateThroughDaemon(Command):
""" Generate fake live tracks for debugging on daemon """
option_list = (
Option("--host", type=str, default="127.0.0.1"),
Option("--port", type=int, default=5597),
Option("user_id", type=int, help="a user ID"),
)
def run(self, user_id, **kwargs):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
user = User.get(user_id)
if not user:
print('User with id "{}" not found.'.format(user_id))
sys.exit(1)
start_time = datetime.utcnow()
i = randint(0, 100)
_time = (
start_time.hour * 60 * 60 * 1000
+ start_time.minute * 60 * 1000
+ start_time.second * 1000
)
_longitude = randint(6500, 7500) / 1000.0
_latitude = randint(50500, 51500) / 1000.0
_altitude = 500
while True:
longitude = sin(i / 73.0) * 0.001 + _longitude
latitude = sin(i / 50.0) * 0.004 + _latitude
altitude = sin(i / 20.0) * 300 + _altitude
flags = FLAG_LOCATION | FLAG_ALTITUDE
fix = TrackingFix()
fix.pilot_id = user.id
fix.set_location(longitude, latitude)
fix.altitude = altitude
data = struct.pack(
"!IHHQIIiiIHHHhhH",
MAGIC,
0,
TYPE_FIX,
user.tracking_key,
flags,
_time,
int(latitude * 1000000),
int(longitude * 1000000),
0,
0,
0,
0,
int(altitude),
0,
0,
)
data = set_crc(data)
sock.sendto(data, (kwargs.get("host"), kwargs.get("port")))
print(".", end="")
sys.stdout.flush()
sleep(1)
i += 1
_time += 1000
| from __future__ import print_function
from flask_script import Command, Option
import sys
import socket
import struct
from skylines.model import User
from skylines.tracking.server import (
datetime,
FLAG_LOCATION,
FLAG_ALTITUDE,
TrackingFix,
MAGIC,
TYPE_FIX,
set_crc,
)
from math import sin
from random import randint
from time import sleep
class GenerateThroughDaemon(Command):
""" Generate fake live tracks for debugging on daemon """
UDP_IP = "127.0.0.1"
UDP_PORT = 5597
ADDRESS = (UDP_IP, UDP_PORT)
option_list = (Option("user_id", type=int, help="a user ID"),)
def run(self, user_id):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
user = User.get(user_id)
if not user:
print('User with id "{}" not found.'.format(user_id))
sys.exit(1)
start_time = datetime.utcnow()
i = randint(0, 100)
_time = (
start_time.hour * 60 * 60 * 1000
+ start_time.minute * 60 * 1000
+ start_time.second * 1000
)
_longitude = randint(6500, 7500) / 1000.0
_latitude = randint(50500, 51500) / 1000.0
_altitude = 500
while True:
longitude = sin(i / 73.0) * 0.001 + _longitude
latitude = sin(i / 50.0) * 0.004 + _latitude
altitude = sin(i / 20.0) * 300 + _altitude
flags = FLAG_LOCATION | FLAG_ALTITUDE
fix = TrackingFix()
fix.pilot_id = user.id
fix.set_location(longitude, latitude)
fix.altitude = altitude
data = struct.pack(
"!IHHQIIiiIHHHhhH",
MAGIC,
0,
TYPE_FIX,
user.tracking_key,
flags,
_time,
int(latitude * 1000000),
int(longitude * 1000000),
0,
0,
0,
0,
int(altitude),
0,
0,
)
data = set_crc(data)
sock.sendto(data, self.ADDRESS)
print(".", end="")
sys.stdout.flush()
sleep(1)
i += 1
_time += 1000
| Python | 0.000002 |
62f3a1ce0e2af511e897ac300e3ab32f4bf14463 | Fix docs | src/pybel/struct/filters/node_predicates/modifications.py | src/pybel/struct/filters/node_predicates/modifications.py | # -*- coding: utf-8 -*-
"""Predicates for checking nodes' variants."""
from functools import wraps
from typing import Tuple, Type, Union
from .utils import node_predicate
from ..typing import NodePredicate
from ....dsl import BaseEntity, CentralDogma, Fragment, GeneModification, Hgvs, ProteinModification, Variant
__all__ = [
'has_variant',
'has_protein_modification',
'has_gene_modification',
'has_fragment',
'has_hgvs',
]
@node_predicate
def has_variant(node: BaseEntity) -> bool:
"""Return true if the node has any variants."""
return isinstance(node, CentralDogma) and node.variants
def _variant_checker(variant_cls: Union[Type[Variant], Tuple[Type[Variant], ...]]) -> NodePredicate:
@node_predicate
@wraps(node_has_variant)
def _rv(node: BaseEntity):
return node_has_variant(node, variant_cls)
return _rv
def node_has_variant(node: BaseEntity, variant_cls) -> bool:
"""Return true if the node has at least one of the given variant."""
return isinstance(node, CentralDogma) and node.variants and any(
isinstance(variant, variant_cls)
for variant in node.variants
)
has_protein_modification = _variant_checker(ProteinModification)
has_gene_modification = _variant_checker(GeneModification)
has_hgvs = _variant_checker(Hgvs)
has_fragment = _variant_checker(Fragment)
| # -*- coding: utf-8 -*-
"""Predicates for checking nodes' variants."""
from typing import Tuple, Type, Union
from .utils import node_predicate
from ..typing import NodePredicate
from ....dsl import BaseEntity, CentralDogma, Fragment, GeneModification, Hgvs, ProteinModification, Variant
__all__ = [
'has_variant',
'has_protein_modification',
'has_gene_modification',
'has_fragment',
'has_hgvs',
]
@node_predicate
def has_variant(node: BaseEntity) -> bool:
"""Return true if the node has any variants."""
return isinstance(node, CentralDogma) and node.variants
def _variant_checker(variant_cls: Union[Type[Variant], Tuple[Type[Variant], ...]]) -> NodePredicate:
@node_predicate
def _node_has_variant(node: BaseEntity) -> bool:
"""Return true if the node has at least one of the given variant."""
return isinstance(node, CentralDogma) and node.variants and any(
isinstance(variant, variant_cls)
for variant in node.variants
)
return _node_has_variant
has_protein_modification = _variant_checker(ProteinModification)
has_gene_modification = _variant_checker(GeneModification)
has_hgvs = _variant_checker(Hgvs)
has_fragment = _variant_checker(Fragment)
| Python | 0.000003 |
65b658d9bb1b9220cfd15724692517c14f5e2cbc | Send more information | openprescribing/frontend/signals/handlers.py | openprescribing/frontend/signals/handlers.py | import logging
from allauth.account.signals import user_logged_in
from anymail.signals import tracking
from requests_futures.sessions import FuturesSession
from django.contrib.auth.models import User
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.conf import settings
from common.utils import google_user_id
from frontend.models import Profile
logger = logging.getLogger(__name__)
@receiver(post_save, sender=User)
def handle_user_save(sender, instance, created, **kwargs):
if created:
Profile.objects.create(user=instance)
@receiver(user_logged_in, sender=User)
def handle_user_logged_in(sender, request, user, **kwargs):
user.searchbookmark_set.update(approved=True)
user.orgbookmark_set.update(approved=True)
def send_ga_event(event):
user = User.objects.filter(email=event.recipient)
if user:
user = user[0]
session = FuturesSession()
payload = {
'v': 1,
'tid': settings.GOOGLE_TRACKING_ID,
'cid': google_user_id(user),
't': 'event',
'ec': 'email',
'ea': event.event_type,
'ua': event.user_agent,
'cm': 'email',
}
if event.esp_event:
payload['dt'] = event.esp_event['subject']
payload['cn'] = event.esp_event['campaign_name']
payload['cs'] = event.esp_event['campaign_source']
payload['dp'] = "/email/%s/%s/%s/%s" % (
event.esp_event['campaign_name'],
event.esp_event['campaign_source'],
event.esp_event['user_id'],
event.event_type
)
else:
logger.warn("No esp_event found for event: %s" % event.__dict__)
logger.info("Sending mail event data Analytics: %s" % payload)
session.post(
'https://www.google-analytics.com/collect', data=payload)
else:
logger.warn("Could not find receipient %s" % event.recipient)
@receiver(tracking)
def handle_anymail_webhook(sender, event, esp_name, **kwargs):
logger.debug("Received webhook from %s: %s" % (esp_name, event.__dict__))
send_ga_event(event)
| import logging
from allauth.account.signals import user_logged_in
from anymail.signals import tracking
from requests_futures.sessions import FuturesSession
from django.contrib.auth.models import User
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.conf import settings
from common.utils import google_user_id
from frontend.models import Profile
logger = logging.getLogger(__name__)
@receiver(post_save, sender=User)
def handle_user_save(sender, instance, created, **kwargs):
if created:
Profile.objects.create(user=instance)
@receiver(user_logged_in, sender=User)
def handle_user_logged_in(sender, request, user, **kwargs):
user.searchbookmark_set.update(approved=True)
user.orgbookmark_set.update(approved=True)
def send_ga_event(event):
user = User.objects.filter(email=event.recipient)
if user:
user = user[0]
session = FuturesSession()
payload = {
'v': 1,
'tid': settings.GOOGLE_TRACKING_ID,
'cid': google_user_id(user),
't': 'event',
'ec': 'email',
'ea': event.event_type,
'ua': event.user_agent,
'cm': 'email',
}
if event.esp_event:
payload['dt'] = event.esp_event['subject']
payload['cn'] = event.esp_event['campaign_name']
payload['cs'] = event.esp_event['campaign_source']
payload['dp'] = "/email/%s/%s/%s/%s" % (
event.esp_event['campaign_name'],
event.esp_event['campaign_source'],
event.esp_event['user_id'],
event.event_type
)
else:
logger.warn("No esp_event found for event: %s" % event.__dict__)
logger.info("Sending mail event data Analytics: %s" % payload)
session.post(
'https://www.google-analytics.com/collect', data=payload)
else:
logger.warn("Could not find receipient %s" % event.recipient)
@receiver(tracking)
def handle_anymail_webhook(sender, event, esp_name, **kwargs):
logger.debug("Received webhook from %s: %s" % (esp_name))
send_ga_event(event)
| Python | 0 |
09cb8a0fbb10f14d6622bbeed815e025e4eb1751 | Update newServer.py | Server/newServer.py | Server/newServer.py | __author__ = 'masudurrahman'
import sys
import os
from twisted.protocols import ftp
from twisted.protocols.ftp import FTPFactory, FTPAnonymousShell, FTPRealm, FTP, FTPShell, IFTPShell
from twisted.cred.portal import Portal
from twisted.cred import checkers
from twisted.cred.checkers import AllowAnonymousAccess, FilePasswordDB
from twisted.internet import reactor
from twisted.python import log
from twisted.internet.defer import succeed, failure
from twisted.cred.checkers import InMemoryUsernamePasswordDatabaseDontUse
# def opsCall(obj):
# print "Processing", obj.fObj.name
# return "Completed"
# class MyFTPRealm(FTPRealm):
# def __init__(self, anonymousRoot):
# self.anonymousRoot = filepath.FilePath(anonymousRoot)
# def requestAvatar(self, avatarId, mind, *interfaces):
# for iface in interfaces:
# if iface is IFTPShell:
# if avatarId is checkers.ANONYMOUS:
# avatar = FTPAnonymousShell(self.anonymousRoot)
# else:
# avatar = FTPShell(filepath.FilePath("/home/") + avatarId)
# return (IFTPShell, avatar,
# getattr(avatar, 'logout', lambda: None))
# raise NotImplementedError("Only IFTPShell interface is supported by this realm")
if __name__ == "__main__":
# Try#1
# p = Portal(MyFTPRealm('./'),[AllowAnonymousAccess(), FilePasswordDB("pass.dat")])
# Try#2
# p = Portal(MyFTPRealm('/no_anon_access/', userHome="/tmp/", callback=opsCall),[FilePasswordDB("pass.dat", ":", 0, 0, True, None, False)])
# Try#3
checker = checkers.InMemoryUsernamePasswordDatabaseDontUse()
checker.addUser("guest", "password")
realm = FTPRealm('./', userHome='/Users')
p = Portal(realm, [checker])
f = ftp.FTPFactory(p)
f.welcomeMessage = "CS3240 Team 4 Project"
log.startLogging(sys.stdout)
reactor.listenTCP(21, f)
reactor.run()
# PASSWORD = ''
# users = {
# os.environ['USER']: PASSWORD
# }
# p = Portal(FTPRealm('./', userHome='/Users'),
# ( AllowAnonymousAccess(),
# InMemoryDB(**users),)
# )
# f = FTPFactory(p)
# reactor.listenTCP(21, f)
# reactor.run()
| __author__ = 'masudurrahman'
import sys
import os
from twisted.protocols import ftp
from twisted.protocols.ftp import FTPFactory, FTPAnonymousShell, FTPRealm, FTP, FTPShell, IFTPShell
from twisted.cred.portal import Portal
from twisted.cred import checkers
from twisted.cred.checkers import AllowAnonymousAccess, FilePasswordDB
from twisted.internet import reactor
from twisted.python import log
from twisted.internet.defer import succeed, failure
from twisted.cred.checkers import InMemoryUsernamePasswordDatabaseDontUse
# def opsCall(obj):
# print "Processing", obj.fObj.name
# return "Completed"
# class MyFTPRealm(FTPRealm):
# def __init__(self, anonymousRoot):
# self.anonymousRoot = filepath.FilePath(anonymousRoot)
# def requestAvatar(self, avatarId, mind, *interfaces):
# for iface in interfaces:
# if iface is IFTPShell:
# if avatarId is checkers.ANONYMOUS:
# avatar = FTPAnonymousShell(self.anonymousRoot)
# else:
# avatar = FTPShell(filepath.FilePath("/home/") + avatarId)
# return (IFTPShell, avatar,
# getattr(avatar, 'logout', lambda: None))
# raise NotImplementedError("Only IFTPShell interface is supported by this realm")
if __name__ == "__main__":
# Try#1
# p = Portal(MyFTPRealm('./'),[AllowAnonymousAccess(), FilePasswordDB("pass.dat")])
# Try#2
# p = Portal(MyFTPRealm('/no_anon_access/', userHome="/tmp/", callback=opsCall),[FilePasswordDB("pass.dat", ":", 0, 0, True, None, False)])
# Try#3
checker = checkers.InMemoryUsernamePasswordDatabaseDontUse()
check.addUser("guest", "password")
realm = FTPRealm()
p = portal.Portal(realm, [checker])
f = ftp.FTPFactory(p)
f.welcomeMessage = "CS3240 Team 4 Project"
log.startLogging(sys.stdout)
reactor.listenTCP(21, f)
reactor.run()
# PASSWORD = ''
# users = {
# os.environ['USER']: PASSWORD
# }
# p = Portal(FTPRealm('./', userHome='/Users'),
# ( AllowAnonymousAccess(),
# InMemoryDB(**users),)
# )
# f = FTPFactory(p)
# reactor.listenTCP(21, f)
# reactor.run()
| Python | 0.000001 |
df5fc7af67aed3aa2d2aeea4cef03d8dd790f1a4 | Fix ios enable password regex in terminal plugin (#35741) | lib/ansible/plugins/terminal/ios.py | lib/ansible/plugins/terminal/ios.py | #
# (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
import re
from ansible.errors import AnsibleConnectionFailure
from ansible.module_utils._text import to_text, to_bytes
from ansible.plugins.terminal import TerminalBase
class TerminalModule(TerminalBase):
terminal_stdout_re = [
re.compile(br"[\r\n][\w\+\-\.:\/\[\]]+(?:\([^\)]+\)){0,3}(?:[>#]) ?$")
]
terminal_stderr_re = [
re.compile(br"% ?Error"),
# re.compile(br"^% \w+", re.M),
re.compile(br"% ?Bad secret"),
re.compile(br"[\r\n%] Bad passwords"),
re.compile(br"invalid input", re.I),
re.compile(br"(?:incomplete|ambiguous) command", re.I),
re.compile(br"connection timed out", re.I),
re.compile(br"[^\r\n]+ not found"),
re.compile(br"'[^']' +returned error code: ?\d+"),
re.compile(br"Bad mask", re.I),
re.compile(br"% ?(\S+) ?overlaps with ?(\S+)", re.I),
re.compile(br"[%\S] ?Error: ?[\s]+", re.I),
re.compile(br"[%\S] ?Informational: ?[\s]+", re.I)
]
def on_open_shell(self):
try:
for cmd in (b'terminal length 0', b'terminal width 512'):
self._exec_cli_command(cmd)
except AnsibleConnectionFailure:
raise AnsibleConnectionFailure('unable to set terminal parameters')
def on_become(self, passwd=None):
if self._get_prompt().endswith(b'#'):
return
cmd = {u'command': u'enable'}
if passwd:
# Note: python-3.5 cannot combine u"" and r"" together. Thus make
# an r string and use to_text to ensure it's text on both py2 and py3.
cmd[u'prompt'] = to_text(r"[\r\n]password: ?$", errors='surrogate_or_strict')
cmd[u'answer'] = passwd
cmd[u'prompt_retry_check'] = True
try:
self._exec_cli_command(to_bytes(json.dumps(cmd), errors='surrogate_or_strict'))
prompt = self._get_prompt()
if prompt is None or not prompt.endswith(b'#'):
raise AnsibleConnectionFailure('failed to elevate privilege to enable mode still at prompt [%s]' % prompt)
except AnsibleConnectionFailure as e:
prompt = self._get_prompt()
raise AnsibleConnectionFailure('unable to elevate privilege to enable mode, at prompt [%s] with error: %s' % (prompt, e.message))
def on_unbecome(self):
prompt = self._get_prompt()
if prompt is None:
# if prompt is None most likely the terminal is hung up at a prompt
return
if b'(config' in prompt:
self._exec_cli_command(b'end')
self._exec_cli_command(b'disable')
elif prompt.endswith(b'#'):
self._exec_cli_command(b'disable')
| #
# (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
import re
from ansible.errors import AnsibleConnectionFailure
from ansible.module_utils._text import to_text, to_bytes
from ansible.plugins.terminal import TerminalBase
class TerminalModule(TerminalBase):
terminal_stdout_re = [
re.compile(br"[\r\n][\w\+\-\.:\/\[\]]+(?:\([^\)]+\)){0,3}(?:[>#]) ?$")
]
terminal_stderr_re = [
re.compile(br"% ?Error"),
# re.compile(br"^% \w+", re.M),
re.compile(br"% ?Bad secret"),
re.compile(br"[\r\n%] Bad passwords"),
re.compile(br"invalid input", re.I),
re.compile(br"(?:incomplete|ambiguous) command", re.I),
re.compile(br"connection timed out", re.I),
re.compile(br"[^\r\n]+ not found"),
re.compile(br"'[^']' +returned error code: ?\d+"),
re.compile(br"Bad mask", re.I),
re.compile(br"% ?(\S+) ?overlaps with ?(\S+)", re.I),
re.compile(br"[%\S] ?Error: ?[\s]+", re.I),
re.compile(br"[%\S] ?Informational: ?[\s]+", re.I)
]
def on_open_shell(self):
try:
for cmd in (b'terminal length 0', b'terminal width 512'):
self._exec_cli_command(cmd)
except AnsibleConnectionFailure:
raise AnsibleConnectionFailure('unable to set terminal parameters')
def on_become(self, passwd=None):
if self._get_prompt().endswith(b'#'):
return
cmd = {u'command': u'enable'}
if passwd:
# Note: python-3.5 cannot combine u"" and r"" together. Thus make
# an r string and use to_text to ensure it's text on both py2 and py3.
cmd[u'prompt'] = to_text(r"[\r\n]password: $", errors='surrogate_or_strict')
cmd[u'answer'] = passwd
cmd[u'prompt_retry_check'] = True
try:
self._exec_cli_command(to_bytes(json.dumps(cmd), errors='surrogate_or_strict'))
prompt = self._get_prompt()
if prompt is None or not prompt.endswith(b'#'):
raise AnsibleConnectionFailure('failed to elevate privilege to enable mode still at prompt [%s]' % prompt)
except AnsibleConnectionFailure as e:
prompt = self._get_prompt()
raise AnsibleConnectionFailure('unable to elevate privilege to enable mode, at prompt [%s] with error: %s' % (prompt, e.message))
def on_unbecome(self):
prompt = self._get_prompt()
if prompt is None:
# if prompt is None most likely the terminal is hung up at a prompt
return
if b'(config' in prompt:
self._exec_cli_command(b'end')
self._exec_cli_command(b'disable')
elif prompt.endswith(b'#'):
self._exec_cli_command(b'disable')
| Python | 0 |
25e71a56d48e5bdc4d73522333196d69d735707a | Update the PCA10056 example to use new pin naming | ports/nrf/boards/pca10056/examples/buttons.py | ports/nrf/boards/pca10056/examples/buttons.py | import board
import digitalio
import gamepad
import time
pad = gamepad.GamePad(
digitalio.DigitalInOut(board.P0_11),
digitalio.DigitalInOut(board.P0_12),
digitalio.DigitalInOut(board.P0_24),
digitalio.DigitalInOut(board.P0_25),
)
prev_buttons = 0
while True:
buttons = pad.get_pressed()
if buttons != prev_buttons:
for i in range(0, 4):
bit = (1 << i)
if (buttons & bit) != (prev_buttons & bit):
print('Button %d %s' % (i + 1, 'pressed' if buttons & bit else 'released'))
prev_buttons = buttons
time.sleep(0.1)
| import board
import digitalio
import gamepad
import time
pad = gamepad.GamePad(
digitalio.DigitalInOut(board.PA11),
digitalio.DigitalInOut(board.PA12),
digitalio.DigitalInOut(board.PA24),
digitalio.DigitalInOut(board.PA25),
)
prev_buttons = 0
while True:
buttons = pad.get_pressed()
if buttons != prev_buttons:
for i in range(0, 4):
bit = (1 << i)
if (buttons & bit) != (prev_buttons & bit):
print('Button %d %s' % (i + 1, 'pressed' if buttons & bit else 'released'))
prev_buttons = buttons
time.sleep(0.1)
| Python | 0 |
3de29a3fdd17beece1fbe26c4f578cd854d16d0d | Fix bug introduced in update_from_old_problemformat.py | problemtools/update_from_old_problemformat.py | problemtools/update_from_old_problemformat.py | # -*- coding: utf-8 -*-
import argparse
import glob
import os.path
import yaml
def update(problemdir):
probyaml = os.path.join(problemdir, 'problem.yaml')
if not os.path.isfile(probyaml):
raise Exception('Could not find %s' % probyaml)
config = yaml.safe_load('%s' % open(probyaml, 'r').read())
stmts = glob.glob(os.path.join(problemdir, 'problem_statement/problem.tex'))
stmts.extend(glob.glob(os.path.join(problemdir, 'problem_statement/problem.[a-z][a-z].tex')))
yaml_changed = False
if 'name' in config:
print('Move problem name "%s" to these problem statement files: %s' % (config['name'], stmts))
for f in stmts:
stmt = open(f, 'r').read()
if stmt.find('\\problemname{') != -1:
print(' Statement %s already has a problemname, skipping' % f)
continue
newstmt = '\\problemname{%s}\n\n%s' % (config['name'], stmt)
open(f, 'w').write(newstmt)
del config['name']
yaml_changed = True
if 'validator' in config:
validator_flags = config['validator'].split()
validation = 'default'
if validator_flags[0] == 'custom':
validation = 'custom'
validator_flags = validator_flags[1:]
validator_flags = ' '.join(validator_flags)
print('Old validator option exists, moving to validation: %s, validator_flags: %s' % (validation, validator_flags))
config['validation'] = validation
if validator_flags != '':
config['validator_flags'] = validator_flags
del config['validator']
yaml_changed = True
if yaml_changed:
open(probyaml, 'w').write(yaml.dump(config, default_flow_style=False, allow_unicode=True))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('problemdir', nargs='+')
options = parser.parse_args()
for problemdir in options.problemdir:
try:
print('Updating %s' % problemdir)
update(problemdir)
except Exception as e:
print('Update FAILED: %s' % e)
| # -*- coding: utf-8 -*-
import argparse
import glob
import os.path
import yaml
def update(problemdir):
probyaml = os.path.join(problemdir, 'problem.yaml')
if not os.path.isfile(probyaml):
raise Exception('Could not find %s' % probyaml)
config = yaml.safe_load('%s' % open(probyaml, 'r').read())
stmts = glob.glob(os.path.join(problemdir, 'problem_statement/problem.tex'))
stmts.extend(glob.glob(os.path.join(problemdir, 'problem_statement/problem.[a-z][a-z].tex')))
yaml_changed = False
if 'name' in config:
print('Move problem name "%s" to these problem statement files: %s' % (config['name'], stmts))
for f in stmts:
stmt = open(f, 'r').read()
if stmt.find('\\problemname{') != -1:
print(' Statement %s already has a problemname, skipping' % f)
continue
newstmt = '\\problemname{%s}\n\n%s' % (config['name'], stmt)
open(f, 'w').write(newstmt)
del config['name']
yaml_changed = True
if 'validator' in config:
validator_flags = config['validator'].split()
validation = 'default'
if validator_flags[0] == 'custom':
validation = 'custom'
validator_flags = validator_flags[1:]
validator_flags = ' '.join(validator_flags)
print('Old validator option exists, moving to validation: %s, validator_flags: %s' % (validation, validator_flags))
config['validation'] = validation
if validator_flags != '':
config['validator_flags'] = validator_flags
del config['validator']
yaml_changed = True
if yaml_changed:
open(probyaml, 'w').write(yaml.dump(config, default_flow_style=False, allow_unicode=True))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('problemdir', nargs='+')
options = parser.parse_args()
for problemdir in options:
try:
print('Updating %s' % problemdir)
update(problemdir)
except Exception as e:
print('Update FAILED: %s' % e)
| Python | 0 |
a3bb1ff203789b6547e241f2ba0108e89bd1aefe | Remove mystery import | profile_collection/startup/80-areadetector.py | profile_collection/startup/80-areadetector.py | from ophyd.controls.area_detector import (AreaDetectorFileStoreHDF5,
AreaDetectorFileStoreTIFF,
AreaDetectorFileStoreTIFFSquashing)
# from shutter import sh1
shctl1 = EpicsSignal('XF:28IDC-ES:1{Det:PE1}cam1:ShutterMode', name='shctl1')
pe1 = AreaDetectorFileStoreTIFFSquashing(
'XF:28IDC-ES:1{Det:PE1}',
name='pe1',
stats=[],
ioc_file_path = 'G:/pe1_data',
file_path = '/home/xf28id1/pe1_data',
shutter=shctl1,
shutter_val=(1, 0)
)
# Dan and Sanjit commented this out in June.
#shctl2 = EpicsSignal('XF:28IDC-ES:1{Det:PE2}cam1:ShutterMode', name='shctl2')
#pe2 = AreaDetectorFileStoreTIFFSquashing(
# 'XF:28IDC-ES:1{Det:PE2}',
# name='pe2',
# stats=[],
# ioc_file_path = 'G:/pe2_data',
# file_path = '/home/xf28id1/pe2_data',
# shutter=shctl2,
# shutter_val=(1,0))
| from ophyd.controls.area_detector import (AreaDetectorFileStoreHDF5,
AreaDetectorFileStoreTIFF,
AreaDetectorFileStoreTIFFSquashing)
from shutter import sh1
shctl1 = EpicsSignal('XF:28IDC-ES:1{Det:PE1}cam1:ShutterMode', name='shctl1')
pe1 = AreaDetectorFileStoreTIFFSquashing(
'XF:28IDC-ES:1{Det:PE1}',
name='pe1',
stats=[],
ioc_file_path = 'G:/pe1_data',
file_path = '/home/xf28id1/pe1_data',
shutter=shctl1,
shutter_val=(1, 0)
)
# Dan and Sanjit commented this out in June.
#shctl2 = EpicsSignal('XF:28IDC-ES:1{Det:PE2}cam1:ShutterMode', name='shctl2')
#pe2 = AreaDetectorFileStoreTIFFSquashing(
# 'XF:28IDC-ES:1{Det:PE2}',
# name='pe2',
# stats=[],
# ioc_file_path = 'G:/pe2_data',
# file_path = '/home/xf28id1/pe2_data',
# shutter=shctl2,
# shutter_val=(1,0))
| Python | 0 |
37062a5695eea63726630e98019c85f7985306a2 | Use a clearer attribute name | dataobject.py | dataobject.py | import logging
import remoteobjects.fields
all_classes = {}
def find_by_name(name):
"""Finds and returns the DataObject subclass with the given name.
Parameter `name` should be a full dotted module and class name.
"""
return all_classes[name]
class DataObjectMetaclass(type):
def __new__(cls, name, bases, attrs):
fields = {}
new_fields = {}
# Inherit all the parent DataObject classes' fields.
for base in bases:
if isinstance(base, DataObjectMetaclass):
fields.update(base.fields)
# Move all the class's attributes that are Fields to the fields set.
for attrname, field in attrs.items():
if isinstance(field, remoteobjects.fields.Field):
new_fields[attrname] = field
del attrs[attrname]
elif attrname in fields:
# Throw out any parent fields that the subclass defined as
# something other than a Field.
del fields[attrname]
fields.update(new_fields)
attrs['fields'] = fields
obj_cls = super(DataObjectMetaclass, cls).__new__(cls, name, bases, attrs)
# Register the new class so Object fields can have forward-referenced it.
all_classes['.'.join((obj_cls.__module__, name))] = obj_cls
# Tell this class's fields what this class is, so they can find their
# forward references later.
for field in new_fields.values():
field.of_cls = obj_cls
return obj_cls
class DataObject(object):
"""An object that can be decoded from or encoded as a dictionary, suitable
for serializing to or deserializing from JSON.
DataObject subclasses should be declared with their different data
attributes defined as instances of fields from the `remoteobjects.fields`
module. For example:
>>> from remoteobjects import DataObject, fields
>>> class Asset(DataObject):
... name = fields.Something()
... updated = fields.Datetime()
... author = fields.Object('Author')
...
A DataObject's fields then provide the coding between live DataObject
instances and dictionaries.
"""
__metaclass__ = DataObjectMetaclass
def __init__(self, **kwargs):
self._id = None
self.__dict__.update(kwargs)
def to_dict(self):
"""Encodes the DataObject to a dictionary."""
try:
# TODO: this shallow copy only prevents sticky modification of the
# dict's contents, not the contents' contents.
data = dict(self._originaldata)
except AttributeError:
data = {}
for field_name, field in self.fields.iteritems():
field.encode_into(self, data, field_name=field_name)
return data
@classmethod
def from_dict(cls, data):
"""Decodes a dictionary into an instance of the DataObject class."""
self = cls()
self.update_from_dict(data)
return self
def update_from_dict(self, data):
"""Adds the content of a dictionary to this DataObject.
Parameter `data` is the dictionary from which to update the object.
Use this only when receiving newly updated or partial content for a
DataObject; that is, when the data is from the outside data source and
needs decoded through the object's fields. Data from "inside" should
be added to an object manually by setting the object's attributes.
Data that constitutes a new object should be turned into another
object with `from_dict()`.
"""
# Remember this extra data, so we can play it back later.
if not hasattr(self, '_originaldata'):
self._originaldata = {}
self._originaldata.update(data)
for field_name, field in self.fields.iteritems():
field.decode_into(data, self, field_name=field_name)
| import logging
import remoteobjects.fields
all_classes = {}
def find_by_name(name):
"""Finds and returns the DataObject subclass with the given name.
Parameter `name` should be a full dotted module and class name.
"""
return all_classes[name]
class DataObjectMetaclass(type):
def __new__(cls, name, bases, attrs):
fields = {}
new_fields = {}
# Inherit all the parent DataObject classes' fields.
for base in bases:
if isinstance(base, DataObjectMetaclass):
fields.update(base.fields)
# Move all the class's attributes that are Fields to the fields set.
for attrname, field in attrs.items():
if isinstance(field, remoteobjects.fields.Field):
new_fields[attrname] = field
del attrs[attrname]
elif attrname in fields:
# Throw out any parent fields that the subclass defined as
# something other than a Field.
del fields[attrname]
fields.update(new_fields)
attrs['fields'] = fields
obj_cls = super(DataObjectMetaclass, cls).__new__(cls, name, bases, attrs)
# Register the new class so Object fields can have forward-referenced it.
all_classes['.'.join((obj_cls.__module__, name))] = obj_cls
# Tell this class's fields what this class is, so they can find their
# forward references later.
for field in new_fields.values():
field.of_cls = obj_cls
return obj_cls
class DataObject(object):
"""An object that can be decoded from or encoded as a dictionary, suitable
for serializing to or deserializing from JSON.
DataObject subclasses should be declared with their different data
attributes defined as instances of fields from the `remoteobjects.fields`
module. For example:
>>> from remoteobjects import DataObject, fields
>>> class Asset(DataObject):
... name = fields.Something()
... updated = fields.Datetime()
... author = fields.Object('Author')
...
A DataObject's fields then provide the coding between live DataObject
instances and dictionaries.
"""
__metaclass__ = DataObjectMetaclass
def __init__(self, **kwargs):
self._id = None
self.__dict__.update(kwargs)
def to_dict(self):
"""Encodes the DataObject to a dictionary."""
try:
# TODO: this shallow copy only prevents sticky modification of the
# dict's contents, not the contents' contents.
data = dict(self._dict)
except AttributeError:
data = {}
for field_name, field in self.fields.iteritems():
field.encode_into(self, data, field_name=field_name)
return data
@classmethod
def from_dict(cls, data):
"""Decodes a dictionary into an instance of the DataObject class."""
self = cls()
self.update_from_dict(data)
return self
def update_from_dict(self, data):
"""Adds the content of a dictionary to this DataObject.
Parameter `data` is the dictionary from which to update the object.
Use this only when receiving newly updated or partial content for a
DataObject; that is, when the data is from the outside data source and
needs decoded through the object's fields. Data from "inside" should
be added to an object manually by setting the object's attributes.
Data that constitutes a new object should be turned into another
object with `from_dict()`.
"""
# Remember this extra data, so we can play it back later.
if not hasattr(self, '_dict'):
self._dict = {}
self._dict.update(data)
for field_name, field in self.fields.iteritems():
field.decode_into(data, self, field_name=field_name)
| Python | 0.005027 |
9bd5b66a50def87de2b8a37ba452ee4efc8a17b7 | add docstring for update_average | web/aliendb/apps/analytics/helpers.py | web/aliendb/apps/analytics/helpers.py | def update_average(field, value, tracked) -> float:
"""Updates a previously calculated average with a new value.
Args:
field: the current average;
value: the new value to include in the average;
tracked: the number of elements used to form the _original_ average;
Returns:
float: the updated average
"""
return (value + field * tracked) / (1 + tracked)
| def update_average(field, value, tracked):
return (value + field * tracked) / (1 + tracked)
| Python | 0 |
4839c43db77a88a872db07ab99be0fdd29bb24fc | Remove bland from 'irrelevant' preferable tendency | LandPortalEntities/lpentities/indicator.py | LandPortalEntities/lpentities/indicator.py | '''
Created on 19/12/2013
@author: Nacho
'''
from lpentities.measurement_unit import MeasurementUnit
class Indicator(object):
"""
classdocs
"""
#Simulated Enum Values
INCREASE = "increase"
DECREASE = "decrease"
IRRELEVANT = "irrelevant"
#Possible topics
_topics_set = ['CLIMATE_CHANGE', 'GEOGRAPH_SOCIO', 'LAND_USE', 'LAND_GENDER', 'LAND_TENURE', 'FSECURITY_HUNGER', 'TEMP_TOPIC']
def __init__(self, chain_for_id, int_for_id, name_en=None, name_es=None,
name_fr=None, description_en=None, description_es=None,
description_fr=None, dataset=None, measurement_unit=None,
topic=None, preferable_tendency=None):
"""
Constructor
"""
self.name_en = name_en
self.name_es = name_es
self.name_fr = name_fr
self.description_en = description_en
self.description_es = description_es
self.description_fr = description_fr
self.dataset = dataset
self._measurement_unit = measurement_unit
self._topic = topic
self._preferable_tendency = preferable_tendency
self.indicator_id = self._generate_id(chain_for_id, int_for_id)
def __get_measurement_unit(self):
return self._measurement_unit
def __set_measurement_unit(self, measurement_unit):
if isinstance(measurement_unit, MeasurementUnit):
self._measurement_unit = measurement_unit
else:
raise ValueError("Expected Measurement object in Indicator")
measurement_unit = property(fget=__get_measurement_unit, fset=__set_measurement_unit, doc="MeasurementUnit of the indicator")
def __get_topic(self):
return self._topic
def __set_topic(self, topic):
if topic.upper() in self._topics_set:
self._topic = topic
else:
raise ValueError("Provided topic not in the specified list")
topic = property(fget=__get_topic, fset=__set_topic, doc="Topic of the indicator")
def __get_preferable_tendency(self):
return self._preferable_tendency
def __set_preferable_tendency(self, preferable_tendency):
if preferable_tendency == self.DECREASE or preferable_tendency == self.INCREASE or preferable_tendency == self.IRRELEVANT:
self._preferable_tendency = preferable_tendency
else:
raise ValueError("Provided tendency not in the specified list")
preferable_tendency = property(fget=__get_preferable_tendency, fset=__set_preferable_tendency, doc="Preferable tendency of the indicator")
@staticmethod
def _generate_id(chain_for_id, int_for_id):
return "IND" + chain_for_id.upper() + str(int_for_id).upper()
| '''
Created on 19/12/2013
@author: Nacho
'''
from lpentities.measurement_unit import MeasurementUnit
class Indicator(object):
"""
classdocs
"""
#Simulated Enum Values
INCREASE = "increase"
DECREASE = "decrease"
IRRELEVANT = "irrelevant "
#Possible topics
_topics_set = ['CLIMATE_CHANGE', 'GEOGRAPH_SOCIO', 'LAND_USE', 'LAND_GENDER', 'LAND_TENURE', 'FSECURITY_HUNGER', 'TEMP_TOPIC']
def __init__(self, chain_for_id, int_for_id, name_en=None, name_es=None,
name_fr=None, description_en=None, description_es=None,
description_fr=None, dataset=None, measurement_unit=None,
topic=None, preferable_tendency=None):
"""
Constructor
"""
self.name_en = name_en
self.name_es = name_es
self.name_fr = name_fr
self.description_en = description_en
self.description_es = description_es
self.description_fr = description_fr
self.dataset = dataset
self._measurement_unit = measurement_unit
self._topic = topic
self._preferable_tendency = preferable_tendency
self.indicator_id = self._generate_id(chain_for_id, int_for_id)
def __get_measurement_unit(self):
return self._measurement_unit
def __set_measurement_unit(self, measurement_unit):
if isinstance(measurement_unit, MeasurementUnit):
self._measurement_unit = measurement_unit
else:
raise ValueError("Expected Measurement object in Indicator")
measurement_unit = property(fget=__get_measurement_unit, fset=__set_measurement_unit, doc="MeasurementUnit of the indicator")
def __get_topic(self):
return self._topic
def __set_topic(self, topic):
if topic.upper() in self._topics_set:
self._topic = topic
else:
raise ValueError("Provided topic not in the specified list")
topic = property(fget=__get_topic, fset=__set_topic, doc="Topic of the indicator")
def __get_preferable_tendency(self):
return self._preferable_tendency
def __set_preferable_tendency(self, preferable_tendency):
if preferable_tendency == self.DECREASE or preferable_tendency == self.INCREASE or preferable_tendency == self.IRRELEVANT:
self._preferable_tendency = preferable_tendency
else:
raise ValueError("Provided tendency not in the specified list")
preferable_tendency = property(fget=__get_preferable_tendency, fset=__set_preferable_tendency, doc="Preferable tendency of the indicator")
@staticmethod
def _generate_id(chain_for_id, int_for_id):
return "IND" + chain_for_id.upper() + str(int_for_id).upper()
| Python | 0.000236 |
48cf73c12f1c586d5ce71fd872f9054b4209d13b | adds missing colon | GPIOTest.py | GPIOTest.py | import RPi.GPIO as GPIO
import time
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
GPIO.setup(2, GPIO.OUT)
GPIO.setup(4, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.setup(17, GPIO.IN, pull_up_down=GPIO.PUD_UP)
status = not GPIO.input(2)
autoswitch=GPIO.input(4)
onswitch=GPIO.input(17)
GPIO.output(2, status)
if status:
print('Auto status = Off')
else:
print('Auto status = On')
if autoswitch:
print('Auto LED = Off')
else:
print('Auto LED = Blue')
if not onswitch:
print ('Status LED = Green')
elif autoswitch:
if status:
print ('Status LED = Red')
else:
print ('Status LED = Green')
else:
print ('Status LED = Red')
#GPIO.cleanup()
| import RPi.GPIO as GPIO
import time
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
GPIO.setup(2, GPIO.OUT)
GPIO.setup(4, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.setup(17, GPIO.IN, pull_up_down=GPIO.PUD_UP)
status = not GPIO.input(2)
autoswitch=GPIO.input(4)
onswitch=GPIO.input(17)
GPIO.output(2, status)
if status:
print('Auto status = Off')
else:
print('Auto status = On')
if autoswitch:
print('Auto LED = Off')
else:
print('Auto LED = Blue')
if not onswitch
print ('Status LED = Green')
elif autoswitch:
if status:
print ('Status LED = Red')
else:
print ('Status LED = Green')
else:
print ('Status LED = Red')
#GPIO.cleanup()
| Python | 0.998656 |
aaac2228119bf965183d30ebf9d4b8cb13699fd8 | fix tkinter for python 3 | GroupEng.py | GroupEng.py | #!/usr/bin/python
# Copyright 2011, Thomas G. Dimiduk
#
# This file is part of GroupEng.
#
# GroupEng is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GroupEng is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with GroupEng. If not, see <http://www.gnu.org/licenses/>.
"""
External GroupEng Application. Handles user invocation and marshalls things for
use by the rest of GroupEng
.. moduleauthor:: Thomas G. Dimiduk tgd8@cornell.edu
"""
import sys
import os.path
import os
from src import controller
if len(sys.argv) > 1:
try:
debug = os.environ['DEBUG'].lower() == 'true'
except KeyError:
debug = False
if debug:
status, outdir = controller.run(sys.argv[1])
if not status:
print('Could not completely meet all rules')
else:
try:
status, outdir = controller.run(sys.argv[1])
if not status:
print('Could not completely meet all rules')
except Exception as e:
print(e)
else:
# import gui stuff only if we are going to use it
try:
from tkinter import *
except ImportError:
from Tkinter import *
from tkFileDialog import askopenfilename
from tkMessageBox import showerror, showinfo
path = askopenfilename()
d, f = os.path.split(path)
os.chdir(d)
try:
status, outdir = controller.run(f)
except Exception as e:
showerror('GroupEng Error', '{0}'.format(e))
if status:
showinfo("GroupEng", "GroupEng Run Succesful\n Output in: {0}".format(outdir))
else:
showinfo("GroupEng", "GroupEng Ran Correctly but not all rules could be met\n"
"Output in: {0}".format(outdir))
| #!/usr/bin/python
# Copyright 2011, Thomas G. Dimiduk
#
# This file is part of GroupEng.
#
# GroupEng is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GroupEng is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with GroupEng. If not, see <http://www.gnu.org/licenses/>.
"""
External GroupEng Application. Handles user invocation and marshalls things for
use by the rest of GroupEng
.. moduleauthor:: Thomas G. Dimiduk tgd8@cornell.edu
"""
import sys
import os.path
import os
from src import controller
if len(sys.argv) > 1:
try:
debug = os.environ['DEBUG'].lower() == 'true'
except KeyError:
debug = False
if debug:
status, outdir = controller.run(sys.argv[1])
if not status:
print('Could not completely meet all rules')
else:
try:
status, outdir = controller.run(sys.argv[1])
if not status:
print('Could not completely meet all rules')
except Exception as e:
print(e)
else:
# import gui stuff only if we are going to use it
from Tkinter import *
from tkFileDialog import askopenfilename
from tkMessageBox import showerror, showinfo
path = askopenfilename()
d, f = os.path.split(path)
os.chdir(d)
try:
status, outdir = controller.run(f)
except Exception as e:
showerror('GroupEng Error', '{0}'.format(e))
if status:
showinfo("GroupEng", "GroupEng Run Succesful\n Output in: {0}".format(outdir))
else:
showinfo("GroupEng", "GroupEng Ran Correctly but not all rules could be met\n"
"Output in: {0}".format(outdir))
| Python | 0.000069 |
3e7d433c193bd2e35b2c760297d81973f56b3eec | Fix test cases | node/floor_divide.py | node/floor_divide.py | #!/usr/bin/env python
from nodes import Node
import math
class FloorDiv(Node):
char = "f"
args = 2
results = 1
@Node.test_func([3,2], [1])
@Node.test_func([6,-3], [-2])
def func(self, a:Node.number,b:Node.number):
"""a/b. Rounds down, returns an int."""
return a//b
@Node.test_func(["test", "e"], [["t", "e", "st"]])
def partition(self, string:str, sep:str):
"""Split the string at the first occurrence of sep,
return a 3-list containing the part before the separator,
the separator itself, and the part after the separator.
If the separator is not found,
return a 3-list containing the string itself,
followed by two empty strings."""
return [list(string.partition(sep))]
@Node.test_func(["134", 1], [["134"]])
@Node.test_func(["1234", 2], [["12", "34"]])
@Node.test_func(["1234", 3], [["1", "2", "34"]])
@Node.test_func([[4,8,15,16,23,42], 5], [[[4],[8],[15],[16],[23,42]]])
@Node.test_func(["123456789", 5], [['1', '2', '3', '4', '56789']])
@Node.test_func([[4,8,15,16,23,42], 7], [[[],[],[],[],[],[],[4,8,15,16,23,42]]])
def chunk(self, inp:Node.indexable, num:int):
"""Return inp seperated into num groups"""
rtn = []
size = len(inp)//num
try:
for i in range(0, num*size, size):
rtn.append(inp[i:i+size])
except ValueError:
for i in range(num): rtn.append([])
i = 0
if len(rtn) != num:
rtn.append(inp[i+size:])
else:
rtn[-1] += inp[i+size:]
return [rtn]
@Node.test_func([[4, 4, 2, 2, 9, 9], [0, -2, 0, 7, 0]], [[[4],[4,2],[2,9,9]]])
def split_at(self, inp:Node.sequence, splits:Node.sequence):
"""Split inp at truthy values in splits"""
rtn = [[]]
for i, do_split in zip(inp, splits+[0]):
if do_split: rtn.append([])
rtn[-1].append(i)
return [rtn] | #!/usr/bin/env python
from nodes import Node
import math
class FloorDiv(Node):
char = "f"
args = 2
results = 1
@Node.test_func([3,2], [1])
@Node.test_func([6,-3], [-2])
def func(self, a:Node.number,b:Node.number):
"""a/b. Rounds down, returns an int."""
return a//b
@Node.test_func(["test", "e"], [["t", "e", "st"]])
def partition(self, string:str, sep:str):
"""Split the string at the first occurrence of sep,
return a 3-list containing the part before the separator,
the separator itself, and the part after the separator.
If the separator is not found,
return a 3-list containing the string itself,
followed by two empty strings."""
return [list(string.partition(sep))]
@Node.test_func(["134", 1], [["134"]])
@Node.test_func(["1234", 2], [["12", "34"]])
@Node.test_func(["1234", 3], [["1", "2", "34"]])
@Node.test_func([[4,8,15,16,23,42], 5], [[[4],[8],[15],[16],[23,42]]])
@Node.test_func(["123456789", 5], [['1', '2', '3', '4', '56789']])
@Node.test_func([[4,8,15,16,23,42], 7], [[[],[],[],[],[],[],[4,8,15,16,23,42]]])
def chunk(self, inp:Node.indexable, num:int):
"""Return inp seperated into num groups"""
rtn = []
size = len(inp)//num
try:
for i in range(0, num*size, size):
rtn.append(inp[i:i+size])
except ValueError:
for i in range(num): rtn.append([])
i = 0
if len(rtn) != num:
rtn.append(inp[i+size:])
else:
rtn[-1] += inp[i+size:]
return [rtn]
@Node.test_func([[4, 4, 2, 2, 9, 9], [0, -2, 0, 7, 0]], [[[4,4],[2,2],[9,9]]])
def split_at(self, inp:Node.sequence, splits:Node.sequence):
"""Split inp at truthy values in splits"""
rtn = [[]]
for i, do_split in zip(inp, splits+[0]):
if do_split: rtn.append([])
rtn[-1].append(i)
return [rtn] | Python | 0.000241 |
a9c9cbac36568676be194024f6f660e4fc3f03b6 | Add old list to applist migration | src/yunohost/data_migrations/0010_migrate_to_apps_json.py | src/yunohost/data_migrations/0010_migrate_to_apps_json.py | import os
from moulinette.utils.log import getActionLogger
from yunohost.app import app_fetchlist, app_removelist, _read_appslist_list, APPSLISTS_JSON
from yunohost.tools import Migration
logger = getActionLogger('yunohost.migration')
BASE_CONF_PATH = '/home/yunohost.conf'
BACKUP_CONF_DIR = os.path.join(BASE_CONF_PATH, 'backup')
APPSLISTS_BACKUP = os.path.join(BACKUP_CONF_DIR, "appslist_before_migration_to_unified_list.json")
class MyMigration(Migration):
"Migrate from official.json to apps.json"
def migrate(self):
# Backup current app list json
os.system("cp %s %s" % (APPSLISTS_JSON, APPSLISTS_BACKUP))
# Remove all the deprecated lists
lists_to_remove = [
"http://app.yunohost.org/list.json", # Old list on old installs, alias to official.json
"https://app.yunohost.org/official.json",
"https://app.yunohost.org/community.json",
"https://labriqueinter.net/apps/labriqueinternet.json"
]
appslists = _read_appslist_list()
for appslist, infos in appslists.items():
if infos["url"] in lists_to_remove:
app_removelist(name=appslist)
# Replace by apps.json list
app_fetchlist(name="yunohost",
url="https://app.yunohost.org/apps.json")
def backward(self):
if os.path.exists(APPSLISTS_BACKUP):
os.system("cp %s %s" % (APPSLISTS_BACKUP, APPSLISTS_JSON))
| import os
from moulinette.utils.log import getActionLogger
from yunohost.app import app_fetchlist, app_removelist, _read_appslist_list, APPSLISTS_JSON
from yunohost.tools import Migration
logger = getActionLogger('yunohost.migration')
BASE_CONF_PATH = '/home/yunohost.conf'
BACKUP_CONF_DIR = os.path.join(BASE_CONF_PATH, 'backup')
APPSLISTS_BACKUP = os.path.join(BACKUP_CONF_DIR, "appslist_before_migration_to_unified_list.json")
class MyMigration(Migration):
"Migrate from official.json to apps.json"
def migrate(self):
# Backup current app list json
os.system("cp %s %s" % (APPSLISTS_JSON, APPSLISTS_BACKUP))
# Remove all the deprecated lists
lists_to_remove = [
"https://app.yunohost.org/official.json",
"https://app.yunohost.org/community.json",
"https://labriqueinter.net/apps/labriqueinternet.json"
]
appslists = _read_appslist_list()
for appslist, infos in appslists.items():
if infos["url"] in lists_to_remove:
app_removelist(name=appslist)
# Replace by apps.json list
app_fetchlist(name="yunohost",
url="https://app.yunohost.org/apps.json")
def backward(self):
if os.path.exists(APPSLISTS_BACKUP):
os.system("cp %s %s" % (APPSLISTS_BACKUP, APPSLISTS_JSON))
| Python | 0 |
9316ec9f2246ac14176d9bf9d27287dfccedb3f3 | Update to 0.3.0 | azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/version.py | azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/version.py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
VERSION = "0.3.0"
| # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
VERSION = "0.2.0" | Python | 0 |
8c89a0d52c43f96d9673b8b84786a7185ddc3f6f | Bump WireCloud version | src/wirecloud/platform/__init__.py | src/wirecloud/platform/__init__.py | # -*- coding: utf-8 -*-
# Copyright (c) 2011-2014 CoNWeT Lab., Universidad Politécnica de Madrid
# This file is part of Wirecloud.
# Wirecloud is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# Wirecloud is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with Wirecloud. If not, see <http://www.gnu.org/licenses/>.
__version_info__ = (0, 7, 0)
__version__ = '.'.join(map(str, __version_info__)) + 'b2'
| # -*- coding: utf-8 -*-
# Copyright (c) 2011-2014 CoNWeT Lab., Universidad Politécnica de Madrid
# This file is part of Wirecloud.
# Wirecloud is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# Wirecloud is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with Wirecloud. If not, see <http://www.gnu.org/licenses/>.
__version_info__ = (0, 7, 0)
__version__ = '.'.join(map(str, __version_info__)) + 'b1'
| Python | 0 |
a4dcfff5214e3956f45bf0ef853dea871dbd8da9 | Fix for missing 'methods' section | delphi-395.py | delphi-395.py | import argparse
import os
import re
from uuid import uuid4
from jinja2 import Environment, PackageLoader
from yaml import load, Loader
def get_config():
parser = argparse.ArgumentParser(description='Construct Delphi classes from a YAML template.')
parser.add_argument('source_files', metavar='SOURCE', type=argparse.FileType('r'), nargs='+',
help='A YAML template file from which Delphi source will be generated')
return parser.parse_args()
def split_on_space_underscore_and_upper(name):
return ((re.sub(r"([A-Z])", r" \1", name)).replace('_', ' ')).split()
def augment_name(target):
if 'name' in target:
split_name = split_on_space_underscore_and_upper(target['name'])
target['name_camelcase'] = split_name[0] + ''.join(x.title() for x in split_name[1:])
target['name_titlecase'] = ''.join(x.title() for x in split_name)
target['name_snakecase'] = '_'.join(x.lower() for x in split_name)
def augment_names(data):
if 'name' in data['type'] and data['type']['name'] is not None:
augment_name(data['type'])
if 'requirements' in data['type'] and data['type']['requirements'] is not None:
for requirement in data['type']['requirements']:
augment_name(requirement)
if 'variables' in data['type'] and data['type']['variables'] is not None:
for variable in data['type']['variables']:
augment_name(variable)
def augment_uuids(data):
data['type']['uuid'] = str(uuid4()).upper()
def create_method_definition(method, class_title):
first, rest = method.split('\n', 1)
match = re.match(r"(function\s+|procedure\s+)(.*)", first)
return {
'definition': first,
'body': "{0}T{1}.{2}\n{3}".format(match.group(1), class_title, match.group(2), rest)
}
def augment_methods(data):
if 'methods' in data['type'] and data['type']['methods'] is not None:
data['type']['methods'] = [create_method_definition(method, data['type']['name_titlecase']) for method in data['type']['methods']
if 'name_titlecase' in data['type'] and
data['type']['name_titlecase'] is not None and
'methods' in data['type'] and
data['type']['methods'] is not None]
def augment_data(data):
if 'type' in data:
augment_uuids(data)
augment_names(data)
augment_methods(data)
def expand_template(config, template, template_file_name, type_data):
with open(template_file_name % type_data, 'w') as dest_file:
dest_file.write(template.render(type_data))
def expand_templates(config, env, source_file):
data = load(source_file, Loader=Loader)
augment_data(data)
for template_file_name in os.listdir('templates'):
if template_file_name[-4:] == '.pas':
augment_uuids(data)
expand_template(config, env.get_template(template_file_name), template_file_name, data['type'])
def main():
config = get_config()
env = Environment(lstrip_blocks=True, trim_blocks=True, loader=PackageLoader('delphi-395', 'templates'))
for source_file in config.source_files:
expand_templates(config, env, source_file)
if __name__ == "__main__":
main()
| import argparse
import os
import re
from uuid import uuid4
from jinja2 import Environment, PackageLoader
from yaml import load, Loader
def get_config():
parser = argparse.ArgumentParser(description='Construct Delphi classes from a YAML template.')
parser.add_argument('source_files', metavar='SOURCE', type=argparse.FileType('r'), nargs='+',
help='A YAML template file from which Delphi source will be generated')
return parser.parse_args()
def split_on_space_underscore_and_upper(name):
return ((re.sub(r"([A-Z])", r" \1", name)).replace('_', ' ')).split()
def augment_name(target):
if 'name' in target:
split_name = split_on_space_underscore_and_upper(target['name'])
target['name_camelcase'] = split_name[0] + ''.join(x.title() for x in split_name[1:])
target['name_titlecase'] = ''.join(x.title() for x in split_name)
target['name_snakecase'] = '_'.join(x.lower() for x in split_name)
def augment_names(data):
if 'name' in data['type'] and data['type']['name'] is not None:
augment_name(data['type'])
if 'requirements' in data['type'] and data['type']['requirements'] is not None:
for requirement in data['type']['requirements']:
augment_name(requirement)
if 'variables' in data['type'] and data['type']['variables'] is not None:
for variable in data['type']['variables']:
augment_name(variable)
def augment_uuids(data):
data['type']['uuid'] = str(uuid4()).upper()
def create_method_definition(method, class_title):
first, rest = method.split('\n', 1)
match = re.match(r"(function\s+|procedure\s+)(.*)", first)
return {
'definition': first,
'body': "{0}T{1}.{2}\n{3}".format(match.group(1), class_title, match.group(2), rest)
}
def augment_methods(data):
data['type']['methods'] = [create_method_definition(method, data['type']['name_titlecase']) for method in data['type']['methods']
if 'name_titlecase' in data['type'] and
data['type']['name_titlecase'] is not None and
'methods' in data['type'] and
data['type']['methods'] is not None]
def augment_data(data):
if 'type' in data:
augment_uuids(data)
augment_names(data)
augment_methods(data)
def expand_template(config, template, template_file_name, type_data):
with open(template_file_name % type_data, 'w') as dest_file:
dest_file.write(template.render(type_data))
def expand_templates(config, env, source_file):
data = load(source_file, Loader=Loader)
augment_data(data)
for template_file_name in os.listdir('templates'):
if template_file_name[-4:] == '.pas':
augment_uuids(data)
expand_template(config, env.get_template(template_file_name), template_file_name, data['type'])
def main():
config = get_config()
env = Environment(lstrip_blocks=True, trim_blocks=True, loader=PackageLoader('delphi-395', 'templates'))
for source_file in config.source_files:
expand_templates(config, env, source_file)
if __name__ == "__main__":
main()
| Python | 0.001131 |
36bfa8f556941848eb1a809d48aae1aa43f23c3f | Add option to choose if we keep the <none> images | di-cleaner.py | di-cleaner.py | #!/usr/bin/env python
import argparse
import atexit
import logging
import sys
from pprint import pformat
DEFAULT_DOCKER_BASE_URL = 'unix://var/run/docker.sock'
HELP_DOCKER_BASE_URL = ('Refers to the protocol+hostname+port where the '
'Docker server is hosted. Defaults to %s') % DEFAULT_DOCKER_BASE_URL
DEFAULT_DOCKER_API_VERSION = 'auto'
HELP_DOCKER_API_VERSION = ('The version of the API the client will use. '
'Defaults to use the API version provided by the server')
DEFAULT_DOCKER_HTTP_TIMEOUT = 5
HELP_DOCKER_HTTP_TIMEOUT = ('The HTTP request timeout, in seconds. '
'Defaults to %d secs') % DEFAULT_DOCKER_HTTP_TIMEOUT
DEFAULT_IMAGES_TO_KEEP = 2
HELP_IMAGES_TO_KEEP = ('How many docker images to keep. '
'Defaults to %d images') % DEFAULT_IMAGES_TO_KEEP
HELP_KEEP_NONE_IMAGES = 'Keep <none> images'
def _exit():
logging.shutdown()
def debug_var(name, var):
logging.debug('Var %s has: %s' % (name, pformat(var)))
def setup_parser(parser):
parser.add_argument('--debug', help='debug mode', action='store_true')
parser.add_argument('--base-url', help=HELP_DOCKER_BASE_URL, default=DEFAULT_DOCKER_BASE_URL)
parser.add_argument('--api-version', help=HELP_DOCKER_API_VERSION, default=DEFAULT_DOCKER_API_VERSION)
parser.add_argument('--http-timeout', help=HELP_DOCKER_HTTP_TIMEOUT, default=DEFAULT_DOCKER_HTTP_TIMEOUT, type=int)
parser.add_argument('--images-to-keep', help=HELP_IMAGES_TO_KEEP, default=DEFAULT_IMAGES_TO_KEEP, type=int)
parser.add_argument('--keep-none-images', help=HELP_KEEP_NONE_IMAGES, action='store_true')
return parser
def validate_args(args):
if args.http_timeout < 0:
sys.stderr.write('HTTP timeout should be 0 or bigger\n')
if args.images_to_keep < 0:
sys.stderr.write('Images to keep should be 0 or bigger\n')
sys.exit(1)
def main():
atexit.register(func=_exit)
parser = setup_parser(argparse.ArgumentParser(description='Clean old docker images'))
args = parser.parse_args()
if args.debug:
logging.basicConfig(level=logging.DEBUG)
debug_var(name='args', var=args)
validate_args(args)
if __name__ == '__main__':
main()
| #!/usr/bin/env python
import argparse
import atexit
import logging
import sys
from pprint import pformat
DEFAULT_DOCKER_BASE_URL = 'unix://var/run/docker.sock'
HELP_DOCKER_BASE_URL = ('Refers to the protocol+hostname+port where the '
'Docker server is hosted. Defaults to %s') % DEFAULT_DOCKER_BASE_URL
DEFAULT_DOCKER_API_VERSION = 'auto'
HELP_DOCKER_API_VERSION = ('The version of the API the client will use. '
'Defaults to use the API version provided by the server')
DEFAULT_DOCKER_HTTP_TIMEOUT = 5
HELP_DOCKER_HTTP_TIMEOUT = ('The HTTP request timeout, in seconds. '
'Defaults to %d secs') % DEFAULT_DOCKER_HTTP_TIMEOUT
DEFAULT_IMAGES_TO_KEEP = 2
HELP_IMAGES_TO_KEEP = ('How many docker images to keep. '
'Defaults to %d images') % DEFAULT_IMAGES_TO_KEEP
def _exit():
logging.shutdown()
def debug_var(name, var):
logging.debug('Var %s has: %s' % (name, pformat(var)))
def setup_parser(parser):
parser.add_argument('--debug', help='debug mode', action='store_true')
parser.add_argument('--base-url', help=HELP_DOCKER_BASE_URL, default=DEFAULT_DOCKER_BASE_URL)
parser.add_argument('--api-version', help=HELP_DOCKER_API_VERSION, default=DEFAULT_DOCKER_API_VERSION)
parser.add_argument('--http-timeout', help=HELP_DOCKER_HTTP_TIMEOUT, default=DEFAULT_DOCKER_HTTP_TIMEOUT, type=int)
parser.add_argument('--images-to-keep', help=HELP_IMAGES_TO_KEEP, default=DEFAULT_IMAGES_TO_KEEP, type=int)
return parser
def validate_args(args):
if args.http_timeout < 0:
sys.stderr.write('HTTP timeout should be 0 or bigger\n')
if args.images_to_keep < 0:
sys.stderr.write('Images to keep should be 0 or bigger\n')
sys.exit(1)
def main():
atexit.register(func=_exit)
parser = setup_parser(argparse.ArgumentParser(description='Clean old docker images'))
args = parser.parse_args()
if args.debug:
logging.basicConfig(level=logging.DEBUG)
debug_var(name='args', var=args)
validate_args(args)
if __name__ == '__main__':
main()
| Python | 0 |
edf099ca644aae12daef65ff65744d99fcd3a634 | Remove function we won't actually use. | st2common/st2common/util/compat.py | st2common/st2common/util/compat.py | # -*- coding: utf-8 -*-
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import six
__all__ = [
'to_unicode',
'to_ascii',
]
def to_unicode(value):
"""
Ensure that the provided text value is represented as unicode.
:param value: Value to convert.
:type value: ``str`` or ``unicode``
:rtype: ``unicode``
"""
if not isinstance(value, six.string_types):
raise ValueError('Value "%s" must be a string.' % (value))
if not isinstance(value, six.text_type):
value = six.u(value)
return value
def to_ascii(value):
"""
Function which encodes the provided bytes / string to ASCII encoding ignoring any errors
which could come up when trying to encode a non-ascii value.
"""
return value.decode('ascii', errors='ignore')
| # -*- coding: utf-8 -*-
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import six
__all__ = [
'to_unicode',
'to_ascii',
'add_st2actions_pythonrunner_to_sys_path'
]
def to_unicode(value):
"""
Ensure that the provided text value is represented as unicode.
:param value: Value to convert.
:type value: ``str`` or ``unicode``
:rtype: ``unicode``
"""
if not isinstance(value, six.string_types):
raise ValueError('Value "%s" must be a string.' % (value))
if not isinstance(value, six.text_type):
value = six.u(value)
return value
def to_ascii(value):
"""
Function which encodes the provided bytes / string to ASCII encoding ignoring any errors
which could come up when trying to encode a non-ascii value.
"""
return value.decode('ascii', errors='ignore')
def add_st2actions_pythonrunner_to_sys_path():
"""
Function which adds "st2common.runners.pythonrunner" to sys.path and redirects it to
"st2common.runners.base_action".
First path was deprecated a long time ago, but some modules still rely on on it. This
is to be used in places where "st2common" is used as a standalone package without access to
st2actions (e.g. serverless).
"""
import st2common.runners.base_action
sys.modules['st2actions'] = {}
sys.modules['st2actions.runners'] = {}
sys.modules['st2actions.runners.pythonrunner'] = st2common.runners.base_action
return sys.modules
| Python | 0 |
a187bd1f89d40d4274f884bba567a2f6be160dcd | Remove unintended changes from reverthousekeeping command | cla_backend/apps/cla_butler/management/commands/reverthousekeeping.py | cla_backend/apps/cla_butler/management/commands/reverthousekeeping.py | # coding=utf-8
import os
from django.conf import settings
from django.contrib.admin.models import LogEntry
from django.core.management.base import BaseCommand
from cla_butler.qs_to_file import QuerysetToFile
from cla_eventlog.models import Log
from cla_provider.models import Feedback
from complaints.models import Complaint
from diagnosis.models import DiagnosisTraversal
from legalaid.models import (
Case,
EligibilityCheck,
CaseNotesHistory,
Person,
Income,
Savings,
Deductions,
PersonalDetails,
ThirdPartyDetails,
AdaptationDetails,
CaseKnowledgebaseAssignment,
EODDetails,
EODDetailsCategory,
Property,
)
from timer.models import Timer
MODELS = [
Deductions,
Income,
Savings,
Person,
AdaptationDetails,
PersonalDetails,
ThirdPartyDetails,
EligibilityCheck,
Property,
DiagnosisTraversal,
Case,
EODDetails,
EODDetailsCategory,
Complaint,
CaseKnowledgebaseAssignment,
Timer,
Feedback,
CaseNotesHistory,
Log,
LogEntry,
]
class Command(BaseCommand):
help = "Attempts to re-load data that was deleted in the housekeeping"
def add_arguments(self, parser):
parser.add_argument("directory", nargs=1)
def handle(self, *args, **options):
path = os.path.join(settings.TEMP_DIR, args[0])
filewriter = QuerysetToFile(path)
for model in MODELS:
self.stdout.write(model.__name__)
filewriter.load(model)
| # coding=utf-8
import os
import logging
from django.conf import settings
from django.contrib.admin.models import LogEntry
from django.core.management.base import BaseCommand
from cla_butler.qs_to_file import QuerysetToFile
from cla_eventlog.models import Log
from cla_provider.models import Feedback
from complaints.models import Complaint
from diagnosis.models import DiagnosisTraversal
from legalaid.models import (
Case,
EligibilityCheck,
CaseNotesHistory,
Person,
Income,
Savings,
Deductions,
PersonalDetails,
ThirdPartyDetails,
AdaptationDetails,
CaseKnowledgebaseAssignment,
EODDetails,
EODDetailsCategory,
Property,
)
from timer.models import Timer
MODELS = [
Deductions,
Income,
Savings,
Person,
AdaptationDetails,
PersonalDetails,
ThirdPartyDetails,
EligibilityCheck,
Property,
DiagnosisTraversal,
Case,
EODDetails,
EODDetailsCategory,
Complaint,
CaseKnowledgebaseAssignment,
Timer,
Feedback,
CaseNotesHistory,
Log,
LogEntry,
]
logger = logging.getLogger("django")
class Command(BaseCommand):
help = "Attempts to re-load data that was deleted in the housekeeping"
def add_arguments(self, parser):
parser.add_argument("directory", nargs=1)
def handle(self, *args, **options):
logger.info("Running monitor_multiple_outcome_codes cron job")
path = os.path.join(settings.TEMP_DIR, args[0])
filewriter = QuerysetToFile(path)
for model in MODELS:
self.stdout.write(model.__name__)
filewriter.load(model)
| Python | 0 |
5ffef1beb126fed15851ddc30ea9fca7edbca017 | Remove debug code | app/soc/modules/gsoc/views/student_forms.py | app/soc/modules/gsoc/views/student_forms.py | #!/usr/bin/env python2.5
#
# Copyright 2011 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for the GSoC student forms.
"""
__authors__ = [
'"Sverre Rabbelier" <sverre@rabbelier.nl>',
]
from google.appengine.api import users
from google.appengine.ext import blobstore
from google.appengine.ext import db
from django.forms import fields
from django.core.urlresolvers import reverse
from django.conf.urls.defaults import url
from django.utils import simplejson
from soc.logic import cleaning
from soc.logic import dicts
from soc.views import forms
from soc.models.user import User
from soc.modules.gsoc.models.organization import GSoCOrganization
from soc.modules.gsoc.models.profile import GSoCProfile
from soc.modules.gsoc.models.profile import GSoCStudentInfo
from soc.modules.gsoc.views.base import RequestHandler
from soc.modules.gsoc.views.base_templates import LoggedInMsg
from soc.modules.gsoc.views.helper import url_patterns
class TaxForm(forms.ModelForm):
"""Django form for the student tax form.
"""
class Meta:
model = GSoCStudentInfo
css_prefix = 'student_form'
fields = ['tax_form']
widgets = {}
tax_form = fields.FileField(label='Upload new tax form', required=False)
def __init__(self, data, *args, **kwargs):
super(TaxForm, self).__init__(*args, **kwargs)
self.data = data
def clean_tax_form(self):
uploads = self.data.request.file_uploads
return uploads[0] if uploads else None
class TaxFormPage(RequestHandler):
"""View for the participant profile.
"""
def djangoURLPatterns(self):
return [
url(r'^gsoc/student_forms/tax/%s$' % url_patterns.PROGRAM,
self, name='gsoc_tax_forms'),
]
def checkAccess(self):
self.check.isProfileActive()
def templatePath(self):
return 'v2/modules/gsoc/student_forms/tax.html'
def context(self):
tax_form = TaxForm(self.data, self.data.POST or None,
instance=self.data.student_info)
return {
'page_name': 'Tax form',
'forms': [tax_form],
'error': bool(tax_form.errors),
}
def validate(self):
tax_form = TaxForm(self.data, self.data.POST,
instance=self.data.student_info)
if not tax_form.is_valid():
return False
tax_form.save()
def json(self):
url = self.redirect.program().urlOf('gsoc_tax_forms')
upload_url = blobstore.create_upload_url(url)
self.response.write(upload_url)
def post(self):
validated = self.validate()
self.redirect.program().to('gsoc_tax_forms', validated=validated)
| #!/usr/bin/env python2.5
#
# Copyright 2011 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for the GSoC student forms.
"""
__authors__ = [
'"Sverre Rabbelier" <sverre@rabbelier.nl>',
]
from google.appengine.api import users
from google.appengine.ext import blobstore
from google.appengine.ext import db
from django.forms import fields
from django.core.urlresolvers import reverse
from django.conf.urls.defaults import url
from django.utils import simplejson
from soc.logic import cleaning
from soc.logic import dicts
from soc.views import forms
from soc.models.user import User
from soc.modules.gsoc.models.organization import GSoCOrganization
from soc.modules.gsoc.models.profile import GSoCProfile
from soc.modules.gsoc.models.profile import GSoCStudentInfo
from soc.modules.gsoc.views.base import RequestHandler
from soc.modules.gsoc.views.base_templates import LoggedInMsg
from soc.modules.gsoc.views.helper import url_patterns
class TaxForm(forms.ModelForm):
"""Django form for the student tax form.
"""
class Meta:
model = GSoCStudentInfo
css_prefix = 'student_form'
fields = ['tax_form']
widgets = {}
tax_form = fields.FileField(label='Upload new tax form', required=False)
def __init__(self, data, *args, **kwargs):
super(TaxForm, self).__init__(*args, **kwargs)
self.data = data
def clean_tax_form(self):
uploads = self.data.request.file_uploads
return uploads[0] if uploads else None
class TaxFormPage(RequestHandler):
"""View for the participant profile.
"""
def djangoURLPatterns(self):
return [
url(r'^gsoc/student_forms/tax/%s$' % url_patterns.PROGRAM,
self, name='gsoc_tax_forms'),
]
def checkAccess(self):
self.check.isProfileActive()
def templatePath(self):
return 'v2/modules/gsoc/student_forms/tax.html'
def context(self):
tax_form = TaxForm(self.data, self.data.POST or None,
instance=self.data.student_info)
return {
'page_name': 'Tax form',
'forms': [tax_form],
'error': bool(tax_form.errors),
}
def validate(self):
tax_form = TaxForm(self.data, self.data.POST,
instance=self.data.student_info)
if not tax_form.is_valid():
import logging
logging.warning("Sad")
return False
tax_form.save()
def json(self):
url = self.redirect.program().urlOf('gsoc_tax_forms')
upload_url = blobstore.create_upload_url(url)
self.response.write(upload_url)
def post(self):
validated = self.validate()
self.redirect.program().to('gsoc_tax_forms', validated=validated)
| Python | 0.000299 |
123401cb6ed88b77d9a584eea8f2de75e518e5da | remove try except when hintsvm is not installed | libact/query_strategies/__init__.py | libact/query_strategies/__init__.py | """
Concrete query strategy classes.
"""
import logging
logger = logging.getLogger(__name__)
from .active_learning_by_learning import ActiveLearningByLearning
from .hintsvm import HintSVM
from .uncertainty_sampling import UncertaintySampling
from .query_by_committee import QueryByCommittee
from .quire import QUIRE
from .random_sampling import RandomSampling
from .variance_reduction import VarianceReduction
| """
Concrete query strategy classes.
"""
import logging
logger = logging.getLogger(__name__)
from .active_learning_by_learning import ActiveLearningByLearning
try:
from .hintsvm import HintSVM
except ImportError:
logger.warn('HintSVM library not found, not importing.')
from .uncertainty_sampling import UncertaintySampling
from .query_by_committee import QueryByCommittee
from .quire import QUIRE
from .random_sampling import RandomSampling
from .variance_reduction import VarianceReduction
| Python | 0 |
0e56ed6234e1f28b0aac2e22063bb39faab1d54c | use '!XyZZy!' as value to be sustituted in metric name | librato_python_web/tools/compose.py | librato_python_web/tools/compose.py | # Copyright (c) 2015. Librato, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of Librato, Inc. nor the names of project contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL LIBRATO, INC. BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Helper methods to model Librato composite query dsl
METRIC_PREFIX = "!XyZZy!"
DUMMY_PREFIX = "DUMMY-PREFIX"
DEFAULT_PERIOD = 60
def s_(metric, source="{}-*".format(DUMMY_PREFIX), period=DEFAULT_PERIOD, function="mean"):
return 's("{}.{}", "{}", {{period: "{}", function: "{}"}})'.format(METRIC_PREFIX, metric, source, period, function)
def timeshift_(shift, series):
return 'timeshift("{}", {})'.format(shift, series)
def sum_(*args):
return 'sum([{}])'.format(', '.join(args))
def subtract_(series1, series2):
return 'subtract([{}, {}])'.format(series1, series2)
def multiply_(*args):
return 'multiply([{}])'.format(', '.join(args))
def divide_(series1, series2):
return 'divide([{}, {}])'.format(series1, series2)
def scale_(series, factor):
return 'scale({}, {{factor: "{}"}})'.format(series, factor)
def derive_(series, detect_reset="true"):
return 'derive({}, {{detect_reset: "{}"}})'.format(series, detect_reset)
| # Copyright (c) 2015. Librato, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of Librato, Inc. nor the names of project contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL LIBRATO, INC. BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Helper methods to model Librato composite query dsl
METRIC_PREFIX = "XyZZy"
DUMMY_PREFIX = "DUMMY-PREFIX"
DEFAULT_PERIOD = 60
def s_(metric, source="{}-*".format(DUMMY_PREFIX), period=DEFAULT_PERIOD, function="mean"):
return 's("{}.{}", "{}", {{period: "{}", function: "{}"}})'.format(METRIC_PREFIX, metric, source, period, function)
def timeshift_(shift, series):
return 'timeshift("{}", {})'.format(shift, series)
def sum_(*args):
return 'sum([{}])'.format(', '.join(args))
def subtract_(series1, series2):
return 'subtract([{}, {}])'.format(series1, series2)
def multiply_(*args):
return 'multiply([{}])'.format(', '.join(args))
def divide_(series1, series2):
return 'divide([{}, {}])'.format(series1, series2)
def scale_(series, factor):
return 'scale({}, {{factor: "{}"}})'.format(series, factor)
def derive_(series, detect_reset="true"):
return 'derive({}, {{detect_reset: "{}"}})'.format(series, detect_reset)
| Python | 0.000231 |
a31a8aa7d5ef0fb742f909c09c340c3f54104833 | clean up comments | linkedin/spiders/linkedin_spider.py | linkedin/spiders/linkedin_spider.py | from scrapy.contrib.spiders import CrawlSpider, Rule
from scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor
from scrapy.selector import HtmlXPathSelector
from scrapy.http import Request
from linkedin.items import LinkedinItem
# compile to C using Cython if processing speed becomes a constraint
class LinkedinSpider(CrawlSpider):
"""
Define the crawler's start urls, set its follow rules, parse HTML
and assign values to an item. Processing occurs in ../pipelines.py
"""
name = "linkedin"
allowed_domains = ["linkedin.com"]
# TODO: uncomment following lines for full spidering
'''
centilist_one = (i for i in xrange(1,100))
centilist_two = (i for i in xrange(1,100))
centilist_three = (i for i in xrange(1,100))
start_urls = ["http://www.linkedin.com/directory/people-%s-%d-%d-%d"
% (alphanum, num_one, num_two, num_three)
for alphanum in "abcdefghijklmnopqrstuvwxyz"
for num_one in centilist_one
for num_two in centilist_two
for num_three in centilist_three
]
'''
# temporary start url, remove for production
start_urls = ["http://www.linkedin.com/directory/people-a-23-23-2"]
# TODO: allow /in/name urls too? (LinkedIn custom URLs)
rules = (Rule(SgmlLinkExtractor(allow=('\/pub\/.+', ))
, callback='parse_item'),
)
def parse_item(self, response):
if response:
hxs = HtmlXPathSelector(response)
item = LinkedinItem()
# is this the best way to check that I'm scraping the right page?
item['full_name'] = hxs.select('//*[@id="name"]/span/span/text()').extract()
if not item['full_name']:
# recursively parse list of duplicate profiles
# NOTE: Results page only displays 25 of possibly many more names;
# LinkedIn requests authentication to see the rest. Need to resolve
# Fake account and log-in?
# TODO: add error checking here to ensure I'm getting the right links
# and links from "next>>" pages
multi_profile_urls = hxs.select('//*[@id="result-set"]/li/h2/strong/ \
a/@href').extract()
for profile_url in multi_profile_urls:
yield Request(profile_url, callback=self.parse_item)
else:
# add meta fields (date crawled/updated, etc)
item['first_name'] = item['full_name'][0]
item['last_name'] = item['full_name'][1]
item['full_name'] = hxs.select('//*[@id="name"]/span/span/text()').extract()
item['headline_title'] = hxs.select('//*[@id="member-1"]/p/text() \
').extract()
item['locality'] = hxs.select('//*[@id="headline"]/dd[1]/span/text() \
').extract()
item['industry'] = hxs.select('//*[@id="headline"]/dd[2]/text() \
').extract()
item['current_roles'] = hxs.select('//*[@id="overview"]/dd[1]/ul/li/ \
text()').extract()
# TODO: dynamically check for header of field, assign to object
# via variable
if hxs.select('//*[@id="overview"]/dt[2]/text()\
').extract() == [u' \n Education\n ']:
item['education_institutions'] = hxs.select('//*[@id="overview"]/\
dd[2]/ul/li/text()').extract()
# for debugging
print item
| from scrapy.contrib.spiders import CrawlSpider, Rule
from scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor
from scrapy.selector import HtmlXPathSelector
from scrapy.http import Request
from linkedin.items import LinkedinItem
# compile to C using Cython if processing speed becomes a constraint
class LinkedinSpider(CrawlSpider):
"""
Define the crawler's start urls, set its follow rules, parse HTML
and assign values to an item. Processing occurs in ../pipelines.py
"""
name = "linkedin"
allowed_domains = ["linkedin.com"]
# TODO: uncomment following lines for full spidering
'''
centilist_one = (i for i in xrange(1,100))
centilist_two = (i for i in xrange(1,100))
centilist_three = (i for i in xrange(1,100))
start_urls = ["http://www.linkedin.com/directory/people-%s-%d-%d-%d"
% (alphanum, num_one, num_two, num_three)
for alphanum in "abcdefghijklmnopqrstuvwxyz"
for num_one in centilist_one
for num_two in centilist_two
for num_three in centilist_three
]
'''
# temporary start url, remove for production
start_urls = ["http://www.linkedin.com/directory/people-a-23-23-2"]
# TODO: allow /in/name urls too? (LinkedIn custom URLs)
rules = (Rule(SgmlLinkExtractor(allow=('\/pub\/.+', ))
, callback='parse_item'),
)
def parse_item(self, response):
if response:
hxs = HtmlXPathSelector(response)
item = LinkedinItem()
# TODO: update this xpath to include class id
# is this the best way to check that I'm scraping the right page
item['full_name'] = hxs.select('//span/span/text()').extract()
if not item['full_name']:
# recursively parse list of duplicate profiles
# NOTE: Results page only displays 25 of possibly many more names;
# LinkedIn requests authentication to see the rest. Need to resolve
# TODO: add error checking here to ensure I'm getting the right links
multi_profile_urls = hxs.select('//*[@id="result-set"]/li/h2/strong/ \
a/@href').extract()
for profile_url in multi_profile_urls:
yield Request(profile_url, callback=self.parse_item)
else:
# handle cleaning in pipeline
item['first_name'] = item['full_name'][0]
item['last_name'] = item['full_name'][2]
item['full_name'] = hxs.select('//span/span/text()').extract()
item['headline_title'] = hxs.select('//*[@id="member-1"]/p/text() \
').extract()
item['locality'] = hxs.select('//*[@id="headline"]/dd[1]/span/text() \
').extract()
item['industry'] = hxs.select('//*[@id="headline"]/dd[2]/text() \
').extract()
item['current_roles'] = hxs.select('//*[@id="overview"]/dd[1]/ul/li/ \
text()').extract()
# TODO: dynamically check for header of field, assign to object
# via variable
if hxs.select('//*[@id="overview"]/dt[2]/text()\
').extract() == [u' \n Education\n ']:
item['education_institutions'] = hxs.select('//*[@id="overview"]/\
dd[2]/ul/li/text()').extract()
# for debugging
print item
| Python | 0 |
957e1c2ec602d4ec6aa990cdce4196083f0e5a2d | Fix buggy import setup for embedded bot tests. | zerver/tests/test_embedded_bot_system.py | zerver/tests/test_embedded_bot_system.py | # -*- coding: utf-8 -*-
from unittest import mock
from typing import Any, Dict, Tuple, Text, Optional
from zerver.lib.test_classes import ZulipTestCase
from zerver.models import UserProfile, Recipient, get_display_recipient
class TestEmbeddedBotMessaging(ZulipTestCase):
def setUp(self):
# type: () -> None
self.user_profile = self.example_user("othello")
self.bot_profile = self.create_test_bot('embedded-bot@zulip.testserver', self.user_profile, 'Embedded bot',
'embedded', UserProfile.EMBEDDED_BOT, service_name='helloworld')
def test_pm_to_embedded_bot(self):
# type: () -> None
self.send_personal_message(self.user_profile.email, self.bot_profile.email,
content="help")
last_message = self.get_last_message()
self.assertEqual(last_message.content, "beep boop")
self.assertEqual(last_message.sender_id, self.bot_profile.id)
display_recipient = get_display_recipient(last_message.recipient)
# The next two lines error on mypy because the display_recipient is of type Union[Text, List[Dict[str, Any]]].
# In this case, we know that display_recipient will be of type List[Dict[str, Any]].
# Otherwise this test will error, which is wanted behavior anyway.
self.assert_length(display_recipient, 1) # type: ignore
self.assertEqual(display_recipient[0]['email'], self.user_profile.email) # type: ignore
def test_stream_message_to_embedded_bot(self):
# type: () -> None
self.send_stream_message(self.user_profile.email, "Denmark",
content="@**{}** foo".format(self.bot_profile.full_name),
topic_name="bar")
last_message = self.get_last_message()
self.assertEqual(last_message.content, "beep boop")
self.assertEqual(last_message.sender_id, self.bot_profile.id)
self.assertEqual(last_message.subject, "bar")
display_recipient = get_display_recipient(last_message.recipient)
self.assertEqual(display_recipient, "Denmark")
def test_stream_message_not_to_embedded_bot(self):
# type: () -> None
self.send_stream_message(self.user_profile.email, "Denmark",
content="foo", topic_name="bar")
last_message = self.get_last_message()
self.assertEqual(last_message.content, "foo")
class TestEmbeddedBotFailures(ZulipTestCase):
@mock.patch("logging.error")
def test_invalid_embedded_bot_service(self, logging_error_mock):
# type: (mock.Mock) -> None
user_profile = self.example_user("othello")
bot_profile = self.create_test_bot('embedded-bot@zulip.testserver', user_profile, 'Embedded bot',
'embedded', UserProfile.EMBEDDED_BOT, service_name='nonexistent_service')
mention_bot_message = "@**{}** foo".format(bot_profile.full_name)
self.send_stream_message(user_profile.email, "Denmark",
content=mention_bot_message,
topic_name="bar")
last_message = self.get_last_message()
self.assertEqual(last_message.content, mention_bot_message)
| # -*- coding: utf-8 -*-
from unittest.mock import patch
from typing import Any, Dict, Tuple, Text, Optional
from zerver.lib.test_classes import ZulipTestCase
from zerver.models import UserProfile, Recipient, get_display_recipient
class TestEmbeddedBotMessaging(ZulipTestCase):
def setUp(self):
# type: () -> None
self.user_profile = self.example_user("othello")
self.bot_profile = self.create_test_bot('embedded-bot@zulip.testserver', self.user_profile, 'Embedded bot',
'embedded', UserProfile.EMBEDDED_BOT, service_name='helloworld')
def test_pm_to_embedded_bot(self):
# type: () -> None
self.send_personal_message(self.user_profile.email, self.bot_profile.email,
content="help")
last_message = self.get_last_message()
self.assertEqual(last_message.content, "beep boop")
self.assertEqual(last_message.sender_id, self.bot_profile.id)
display_recipient = get_display_recipient(last_message.recipient)
# The next two lines error on mypy because the display_recipient is of type Union[Text, List[Dict[str, Any]]].
# In this case, we know that display_recipient will be of type List[Dict[str, Any]].
# Otherwise this test will error, which is wanted behavior anyway.
self.assert_length(display_recipient, 1) # type: ignore
self.assertEqual(display_recipient[0]['email'], self.user_profile.email) # type: ignore
def test_stream_message_to_embedded_bot(self):
# type: () -> None
self.send_stream_message(self.user_profile.email, "Denmark",
content="@**{}** foo".format(self.bot_profile.full_name),
topic_name="bar")
last_message = self.get_last_message()
self.assertEqual(last_message.content, "beep boop")
self.assertEqual(last_message.sender_id, self.bot_profile.id)
self.assertEqual(last_message.subject, "bar")
display_recipient = get_display_recipient(last_message.recipient)
self.assertEqual(display_recipient, "Denmark")
def test_stream_message_not_to_embedded_bot(self):
# type: () -> None
self.send_stream_message(self.user_profile.email, "Denmark",
content="foo", topic_name="bar")
last_message = self.get_last_message()
self.assertEqual(last_message.content, "foo")
class TestEmbeddedBotFailures(ZulipTestCase):
@patch("logging.error")
def test_invalid_embedded_bot_service(self, logging_error_mock):
# type: (mock.Mock) -> None
user_profile = self.example_user("othello")
bot_profile = self.create_test_bot('embedded-bot@zulip.testserver', user_profile, 'Embedded bot',
'embedded', UserProfile.EMBEDDED_BOT, service_name='nonexistent_service')
mention_bot_message = "@**{}** foo".format(bot_profile.full_name)
self.send_stream_message(user_profile.email, "Denmark",
content=mention_bot_message,
topic_name="bar")
last_message = self.get_last_message()
self.assertEqual(last_message.content, mention_bot_message)
| Python | 0 |
f379d8ce256159a4fc7ce58abf87c609a4a0c3ab | rename present() _present(), indicating private | AlphaTwirl/EventReader/ProgressMonitor.py | AlphaTwirl/EventReader/ProgressMonitor.py | # Tai Sakuma <sakuma@fnal.gov>
import multiprocessing
import time
from ProgressReport import ProgressReport
##____________________________________________________________________________||
class ProgressReporter(object):
def __init__(self, queue, pernevents = 1000):
self.queue = queue
self.pernevents = pernevents
self.lastReportTime = time.time()
def report(self, event, component):
if not self.needToReport(event, component): return
done = event.iEvent + 1
report = ProgressReport(name = component.name, done = done, total = event.nEvents)
self.queue.put(report)
self.lastReportTime = time.time()
def needToReport(self, event, component):
iEvent = event.iEvent + 1 # add 1 because event.iEvent starts from 0
if time.time() - self.lastReportTime > 0.02: return True
if iEvent % self.pernevents == 0: return True
if iEvent == event.nEvents: return True
return False
##____________________________________________________________________________||
class Queue(object):
def __init__(self, presentation):
self.presentation = presentation
def put(self, report):
self.presentation.present(report)
##____________________________________________________________________________||
class ProgressMonitor(object):
def __init__(self, presentation):
self.queue = Queue(presentation = presentation)
def monitor(self): pass
def createReporter(self):
reporter = ProgressReporter(self.queue)
return reporter
##____________________________________________________________________________||
class MPProgressMonitor(object):
def __init__(self, presentation):
self.queue = multiprocessing.Queue()
self._presentation = presentation
self.lastTime = time.time()
def monitor(self):
if time.time() - self.lastTime < 0.1: return
self.lastTime = time.time()
self._present()
def last(self):
self._present()
def _present(self):
while not self.queue.empty():
report = self.queue.get()
self._presentation.present(report)
def createReporter(self):
return ProgressReporter(self.queue)
##____________________________________________________________________________||
| # Tai Sakuma <sakuma@fnal.gov>
import multiprocessing
import time
from ProgressReport import ProgressReport
##____________________________________________________________________________||
class ProgressReporter(object):
def __init__(self, queue, pernevents = 1000):
self.queue = queue
self.pernevents = pernevents
self.lastReportTime = time.time()
def report(self, event, component):
if not self.needToReport(event, component): return
done = event.iEvent + 1
report = ProgressReport(name = component.name, done = done, total = event.nEvents)
self.queue.put(report)
self.lastReportTime = time.time()
def needToReport(self, event, component):
iEvent = event.iEvent + 1 # add 1 because event.iEvent starts from 0
if time.time() - self.lastReportTime > 0.02: return True
if iEvent % self.pernevents == 0: return True
if iEvent == event.nEvents: return True
return False
##____________________________________________________________________________||
class Queue(object):
def __init__(self, presentation):
self.presentation = presentation
def put(self, report):
self.presentation.present(report)
##____________________________________________________________________________||
class ProgressMonitor(object):
def __init__(self, presentation):
self.queue = Queue(presentation = presentation)
def monitor(self): pass
def createReporter(self):
reporter = ProgressReporter(self.queue)
return reporter
##____________________________________________________________________________||
class MPProgressMonitor(object):
def __init__(self, presentation):
self.queue = multiprocessing.Queue()
self._presentation = presentation
self.lastTime = time.time()
def monitor(self):
if time.time() - self.lastTime < 0.1: return
self.lastTime = time.time()
self.present()
def last(self):
self.present()
def present(self):
while not self.queue.empty():
report = self.queue.get()
self._presentation.present(report)
def createReporter(self):
return ProgressReporter(self.queue)
##____________________________________________________________________________||
| Python | 0.003887 |
68fe7ecadeda267b5645fd804bb7bbf29afa3667 | add docstring | corehq/apps/cleanup/management/commands/delete_es_docs_in_domain.py | corehq/apps/cleanup/management/commands/delete_es_docs_in_domain.py | from django.core.management import BaseCommand, CommandError
from corehq.apps.domain.models import Domain
from corehq.apps.es import AppES, CaseES, CaseSearchES, FormES, GroupES, UserES
from corehq.apps.es.registry import registry_entry
from corehq.apps.es.transient_util import doc_adapter_from_info
class Command(BaseCommand):
"""
Intended for use in the event that a domain has been deleted, but ES docs have not been fully cleaned up
"""
def add_arguments(self, parser):
parser.add_argument('domain')
def handle(self, domain, **options):
domain_obj = Domain.get_by_name(domain)
if domain_obj and not domain_obj.doc_type.endswith('-Deleted'):
raise CommandError(
f"{domain} has not been deleted. This command is intended for use on deleted domains only."
)
for hqESQuery in [AppES, CaseES, CaseSearchES, FormES, GroupES, UserES]:
doc_ids = hqESQuery().domain(domain).source(['_id']).run().hits
doc_ids = [doc['_id'] for doc in doc_ids]
if not doc_ids:
continue
adapter = doc_adapter_from_info(registry_entry(hqESQuery.index))
adapter.bulk_delete(doc_ids)
| from django.core.management import BaseCommand, CommandError
from corehq.apps.domain.models import Domain
from corehq.apps.es import AppES, CaseES, CaseSearchES, FormES, GroupES, UserES
from corehq.apps.es.registry import registry_entry
from corehq.apps.es.transient_util import doc_adapter_from_info
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument('domain')
def handle(self, domain, **options):
domain_obj = Domain.get_by_name(domain)
if domain_obj and not domain_obj.doc_type.endswith('-Deleted'):
raise CommandError(
f"{domain} has not been deleted. This command is intended for use on deleted domains only."
)
for hqESQuery in [AppES, CaseES, CaseSearchES, FormES, GroupES, UserES]:
doc_ids = hqESQuery().domain(domain).source(['_id']).run().hits
doc_ids = [doc['_id'] for doc in doc_ids]
if not doc_ids:
continue
adapter = doc_adapter_from_info(registry_entry(hqESQuery.index))
adapter.bulk_delete(doc_ids)
| Python | 0.000005 |
bff3a087ec70ab07fe163394826a41c33f6bc38f | Add extra version of py-jinja2 (#14989) | var/spack/repos/builtin/packages/py-jinja2/package.py | var/spack/repos/builtin/packages/py-jinja2/package.py | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyJinja2(PythonPackage):
"""Jinja2 is a template engine written in pure Python. It provides
a Django inspired non-XML syntax but supports inline expressions
and an optional sandboxed environment."""
homepage = "https://palletsprojects.com/p/jinja/"
url = "https://pypi.io/packages/source/J/Jinja2/Jinja2-2.10.3.tar.gz"
import_modules = ['jinja2']
version('2.10.3', sha256='9fe95f19286cfefaa917656583d020be14e7859c6b0252588391e47db34527de')
version('2.10.1', sha256='065c4f02ebe7f7cf559e49ee5a95fb800a9e4528727aec6f24402a5374c65013')
version('2.10', sha256='f84be1bb0040caca4cea721fcbbbbd61f9be9464ca236387158b0feea01914a4')
version('2.9.6', sha256='ddaa01a212cd6d641401cb01b605f4a4d9f37bfc93043d7f760ec70fb99ff9ff')
version('2.8', sha256='bc1ff2ff88dbfacefde4ddde471d1417d3b304e8df103a7a9437d47269201bf4')
version('2.7.3', sha256='2e24ac5d004db5714976a04ac0e80c6df6e47e98c354cb2c0d82f8879d4f8fdb')
version('2.7.2', sha256='310a35fbccac3af13ebf927297f871ac656b9da1d248b1fe6765affa71b53235')
version('2.7.1', sha256='5cc0a087a81dca1c08368482fb7a92fe2bdd8cfbb22bc0fccfe6c85affb04c8b')
version('2.7', sha256='474f1518d189ae7e318b139fecc1d30b943f124448cfa0f09582ca23e069fa4d')
depends_on('py-setuptools', type='build')
depends_on('py-markupsafe@0.23:', type=('build', 'run'))
depends_on('py-babel@0.8:', type=('build', 'run')) # optional, required for i18n
| # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyJinja2(PythonPackage):
"""Jinja2 is a template engine written in pure Python. It provides
a Django inspired non-XML syntax but supports inline expressions
and an optional sandboxed environment."""
homepage = "https://palletsprojects.com/p/jinja/"
url = "https://pypi.io/packages/source/J/Jinja2/Jinja2-2.10.3.tar.gz"
import_modules = ['jinja2']
version('2.10.3', sha256='9fe95f19286cfefaa917656583d020be14e7859c6b0252588391e47db34527de')
version('2.10', sha256='f84be1bb0040caca4cea721fcbbbbd61f9be9464ca236387158b0feea01914a4')
version('2.9.6', sha256='ddaa01a212cd6d641401cb01b605f4a4d9f37bfc93043d7f760ec70fb99ff9ff')
version('2.8', sha256='bc1ff2ff88dbfacefde4ddde471d1417d3b304e8df103a7a9437d47269201bf4')
version('2.7.3', sha256='2e24ac5d004db5714976a04ac0e80c6df6e47e98c354cb2c0d82f8879d4f8fdb')
version('2.7.2', sha256='310a35fbccac3af13ebf927297f871ac656b9da1d248b1fe6765affa71b53235')
version('2.7.1', sha256='5cc0a087a81dca1c08368482fb7a92fe2bdd8cfbb22bc0fccfe6c85affb04c8b')
version('2.7', sha256='474f1518d189ae7e318b139fecc1d30b943f124448cfa0f09582ca23e069fa4d')
depends_on('py-setuptools', type='build')
depends_on('py-markupsafe@0.23:', type=('build', 'run'))
depends_on('py-babel@0.8:', type=('build', 'run')) # optional, required for i18n
| Python | 0 |
2b8716f5a1f0e1f147b6bbda3e45e4abec59811d | fix TB in indexing debug toolbar | abilian/services/indexing/debug_toolbar.py | abilian/services/indexing/debug_toolbar.py | # coding=utf-8
"""
"""
from __future__ import absolute_import
from flask import current_app
from flask_debugtoolbar.panels import DebugPanel
from abilian.core.util import fqcn
from abilian.i18n import _
from abilian.web.action import actions
class IndexedTermsDebugPanel(DebugPanel):
"""
A panel to display term values found in index for "current" object
FIXME: this notion of "current" object should formalized in
abilian.app.Application
"""
name = 'IndexedTerms'
@property
def current_obj(self):
return actions.context.get('object')
@property
def has_content(self):
obj = self.current_obj
return (obj is not None
and hasattr(obj, 'object_type')
and hasattr(obj, 'id')
and obj.id is not None)
def nav_title(self):
return _('Indexed Terms')
def nav_subtitle(self):
"""Subtitle showing until title in toolbar"""
obj = self.current_obj
if not obj:
return _(u'No current object')
try:
return u'{}(id={})'.format(obj.__class__.__name__, obj.id)
except:
return u''
def title(self):
return _('Indexed Terms')
def url(self):
return ''
def content(self):
obj = self.current_obj
svc = current_app.services['indexing']
index = svc.app_state.indexes['default']
schema = index.schema
context = self.context.copy()
context['schema'] = schema
context['sorted_fields'] = sorted(schema.names())
adapter = svc.adapted.get(fqcn(obj.__class__))
if adapter and adapter.indexable:
doc = context['current_document'] = svc.get_document(obj, adapter)
indexed = {}
for name, field in schema.items():
value = doc.get(name)
indexed[name] = None
if value and field.analyzer and field.format:
indexed[name] = list(field.process_text(value))
context['current_indexed'] = indexed
context['current_keys'] = sorted(set(doc) | set(indexed))
with index.searcher() as search:
document = search.document(object_key=obj.object_key)
sorted_keys = sorted(document) if document is not None else None
context.update({
'document': document,
'sorted_keys': sorted_keys,
})
jinja_env = current_app.jinja_env
jinja_env.filters.update(self.jinja_env.filters)
template = jinja_env.get_or_select_template(
'debug_panels/indexing_panel.html'
)
return template.render(context)
| # coding=utf-8
"""
"""
from __future__ import absolute_import
from flask import current_app
from flask_debugtoolbar.panels import DebugPanel
from abilian.core.util import fqcn
from abilian.i18n import _
from abilian.web.action import actions
class IndexedTermsDebugPanel(DebugPanel):
"""
A panel to display term values found in index for "current" object
FIXME: this notion of "current" object should formalized in
abilian.app.Application
"""
name = 'IndexedTerms'
@property
def current_obj(self):
return actions.context.get('object')
@property
def has_content(self):
obj = self.current_obj
return (obj is not None
and hasattr(obj, 'object_type')
and hasattr(obj, 'id')
and obj.id is not None)
def nav_title(self):
return _('Indexed Terms')
def nav_subtitle(self):
"""Subtitle showing until title in toolbar"""
obj = self.current_obj
if not obj:
return _(u'No current object')
try:
return u'{}(id={})'.format(obj.__class__.__name__, obj.id)
except:
return u''
def title(self):
return _('Indexed Terms')
def url(self):
return ''
def content(self):
obj = self.current_obj
svc = current_app.services['indexing']
index = svc.app_state.indexes['default']
schema = index.schema
context = self.context.copy()
context['schema'] = schema
context['sorted_fields'] = sorted(schema.names())
adapter = svc.adapted.get(fqcn(obj.__class__))
if adapter and adapter.indexable:
doc = context['current_document'] = svc.get_document(obj, adapter)
indexed = {}
for name, field in schema.items():
value = doc.get(name)
indexed[name] = None
if value and field.format:
indexed[name] = list(field.process_text(value))
context['current_indexed'] = indexed
context['current_keys'] = sorted(set(doc) | set(indexed))
with index.searcher() as search:
document = search.document(object_key=obj.object_key)
sorted_keys = sorted(document) if document is not None else None
context.update({
'document': document,
'sorted_keys': sorted_keys,
})
jinja_env = current_app.jinja_env
jinja_env.filters.update(self.jinja_env.filters)
template = jinja_env.get_or_select_template(
'debug_panels/indexing_panel.html'
)
return template.render(context)
| Python | 0 |
0c9f2f51778b26bb126eccfbef0b098da3db2877 | normalize version numbers | asynchronous_batch_mailings/__openerp__.py | asynchronous_batch_mailings/__openerp__.py | # -*- coding: utf-8 -*-
##############################################################################
#
# This file is part of asynchronous_batch_mailings, an Odoo module.
#
# Copyright (c) 2015 ACSONE SA/NV (<http://acsone.eu>)
#
# asynchronous_batch_mailings is free software:
# you can redistribute it and/or
# modify it under the terms of the GNU Affero General Public License
# as published by the Free Software Foundation, either version 3 of
# the License, or (at your option) any later version.
#
# asynchronous_batch_mailings is distributed in the hope
# that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the
# GNU Affero General Public License
# along with asynchronous_batch_mailings.
# If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Asynchronous Batch Mailings',
'version': '8.0.1.0.0',
'author': 'ACSONE SA/NV',
'maintainer': 'ACSONE SA/NV',
'website': 'http://www.acsone.eu',
'category': 'Marketing',
'depends': [
'mail',
'connector',
],
'description': """
Asynchronous Batch Mailings
===========================
This module allows to send emails by an asynchronous way.
Moreover it provides a way to split huge mailing.
Two parameters are available:
* the mailing size from which the mailing must become asynchronous
* the batch size
""",
'images': [
],
'data': [
'data/ir_config_parameter_data.xml',
],
'qweb': [
],
'demo': [
],
'test': [
],
'license': 'AGPL-3',
'installable': True,
'auto_install': False,
}
| # -*- coding: utf-8 -*-
##############################################################################
#
# This file is part of asynchronous_batch_mailings, an Odoo module.
#
# Copyright (c) 2015 ACSONE SA/NV (<http://acsone.eu>)
#
# asynchronous_batch_mailings is free software:
# you can redistribute it and/or
# modify it under the terms of the GNU Affero General Public License
# as published by the Free Software Foundation, either version 3 of
# the License, or (at your option) any later version.
#
# asynchronous_batch_mailings is distributed in the hope
# that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the
# GNU Affero General Public License
# along with asynchronous_batch_mailings.
# If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Asynchronous Batch Mailings',
'version': '1.0',
'author': 'ACSONE SA/NV',
'maintainer': 'ACSONE SA/NV',
'website': 'http://www.acsone.eu',
'category': 'Marketing',
'depends': [
'mail',
'connector',
],
'description': """
Asynchronous Batch Mailings
===========================
This module allows to send emails by an asynchronous way.
Moreover it provides a way to split huge mailing.
Two parameters are available:
* the mailing size from which the mailing must become asynchronous
* the batch size
""",
'images': [
],
'data': [
'data/ir_config_parameter_data.xml',
],
'qweb': [
],
'demo': [
],
'test': [
],
'license': 'AGPL-3',
'installable': True,
'auto_install': False,
}
| Python | 0.000673 |
dafc54e782c5ee9bda3cf1817df92ae16ed26979 | fix website url in manifest | attachment_base_synchronize/__openerp__.py | attachment_base_synchronize/__openerp__.py | # coding: utf-8
# @ 2015 Florian DA COSTA @ Akretion
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
{
'name': 'Attachment Base Synchronize',
'version': '9.0.1.0.0',
'author': 'Akretion,Odoo Community Association (OCA)',
'website': 'http://www.akretion.com/',
'license': 'AGPL-3',
'category': 'Generic Modules',
'depends': [
'base',
'mail',
],
'data': [
'views/attachment_view.xml',
'security/ir.model.access.csv',
'data/cron.xml',
],
'demo': [
'demo/attachment_metadata_demo.xml'
],
'installable': True,
'application': False,
'images': [],
}
| # coding: utf-8
# @ 2015 Florian DA COSTA @ Akretion
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
{
'name': 'Attachment Base Synchronize',
'version': '9.0.1.0.0',
'author': 'Akretion,Odoo Community Association (OCA)',
'website': 'www.akretion.com',
'license': 'AGPL-3',
'category': 'Generic Modules',
'depends': [
'base',
'mail',
],
'data': [
'views/attachment_view.xml',
'security/ir.model.access.csv',
'data/cron.xml',
],
'demo': [
'demo/attachment_metadata_demo.xml'
],
'installable': True,
'application': False,
'images': [],
}
| Python | 0.000001 |
a007f80dc2182787eca521c84f37aeedc307645a | Remove base64 padding | encrypted_id/__init__.py | encrypted_id/__init__.py | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
try:
basestring
except NameError:
basestring = str
from Crypto.Cipher import AES
import base64
import binascii
import struct
from django.conf import settings
from django.db.models import Model
from django.http import Http404
from django.shortcuts import get_object_or_404 as go4
__version__ = "0.1.2"
__license__ = "BSD"
__author__ = "Amit Upadhyay"
__email__ = "upadhyay@gmail.com"
__url__ = "http://amitu.com/encrypted-id/"
__source__ = "https://github.com/amitu/django-encrypted-id"
__docformat__ = "html"
def encode(the_id):
assert 0 <= the_id < 2 ** 64
crc = binascii.crc32(bytes(the_id)) & 0xffffffff
message = struct.pack(b"<IQxxxx", crc, the_id)
assert len(message) == 16
cypher = AES.new(
settings.SECRET_KEY[:24], AES.MODE_CBC,
settings.SECRET_KEY[-16:]
)
return base64.urlsafe_b64encode(cypher.encrypt(message)).replace(b"=", b"")
def decode(e):
if isinstance(e, basestring):
e = bytes(e.encode("ascii"))
try:
padding = (3 - len(e) % 3) * b"="
e = base64.urlsafe_b64decode(e + padding)
except (TypeError, AttributeError):
raise ValueError("Failed to decrypt, invalid input.")
for skey in getattr(settings, "SECRET_KEYS", [settings.SECRET_KEY]):
cypher = AES.new(skey[:24], AES.MODE_CBC, skey[-16:])
msg = cypher.decrypt(e)
crc, the_id = struct.unpack("<IQxxxx", msg)
if crc != binascii.crc32(bytes(the_id)) & 0xffffffff:
continue
return the_id
raise ValueError("Failed to decrypt, CRC never matched.")
def get_object_or_404(m, ekey, *arg, **kw):
try:
pk = decode(ekey)
except ValueError:
raise Http404
return go4(m, id=pk, *arg, **kw)
def ekey(instance):
assert isinstance(instance, Model)
return encode(instance.id)
| # -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
try:
basestring
except NameError:
basestring = str
from Crypto.Cipher import AES
import base64
import binascii
import struct
from django.conf import settings
from django.db.models import Model
from django.http import Http404
from django.shortcuts import get_object_or_404 as go4
__version__ = "0.1.2"
__license__ = "BSD"
__author__ = "Amit Upadhyay"
__email__ = "upadhyay@gmail.com"
__url__ = "http://amitu.com/encrypted-id/"
__source__ = "https://github.com/amitu/django-encrypted-id"
__docformat__ = "html"
def encode(the_id):
assert 0 <= the_id < 2 ** 64
crc = binascii.crc32(bytes(the_id)) & 0xffffffff
message = struct.pack(b"<IQxxxx", crc, the_id)
assert len(message) == 16
cypher = AES.new(
settings.SECRET_KEY[:24], AES.MODE_CBC,
settings.SECRET_KEY[-16:]
)
return base64.urlsafe_b64encode(cypher.encrypt(message)).replace(b"=", b".")
def decode(e):
if isinstance(e, basestring):
e = bytes(e.encode("ascii"))
try:
e = base64.urlsafe_b64decode(e.replace(b".", b"="))
except (TypeError, AttributeError):
raise ValueError("Failed to decrypt, invalid input.")
for skey in getattr(settings, "SECRET_KEYS", [settings.SECRET_KEY]):
cypher = AES.new(skey[:24], AES.MODE_CBC, skey[-16:])
msg = cypher.decrypt(e)
crc, the_id = struct.unpack("<IQxxxx", msg)
if crc != binascii.crc32(bytes(the_id)) & 0xffffffff:
continue
return the_id
raise ValueError("Failed to decrypt, CRC never matched.")
def get_object_or_404(m, ekey, *arg, **kw):
try:
pk = decode(ekey)
except ValueError:
raise Http404
return go4(m, id=pk, *arg, **kw)
def ekey(instance):
assert isinstance(instance, Model)
return encode(instance.id)
| Python | 0.000001 |
9be80df72954c05193fc6ded0998b28de182a699 | Add _validate method to Client.auth interface. | objectrocket/auth.py | objectrocket/auth.py | """Authentication operations."""
import logging
import requests
from objectrocket import bases
from objectrocket import errors
logger = logging.getLogger(__name__)
class Auth(bases.BaseAuthLayer):
"""Authentication operations.
:param objectrocket.client.Client base_client: An objectrocket.client.Client instance.
"""
def __init__(self, base_client):
self.__username = None
self.__password = None
self.__token = None
super(Auth, self).__init__(base_client=base_client)
#####################
# Public interface. #
#####################
def authenticate(self, username, password):
"""Authenticate against the ObjectRocket API.
:param str username: The username to perform basic authentication against the API with.
:param str password: The password to perform basic authentication against the API with.
:returns: A token used for authentication against token protected resources.
:rtype: str
"""
# Update the username and password bound to this instance for re-authentication needs.
self._username = username
self._password = password
# Attempt to authenticate.
resp = requests.get(
self._url,
auth=(username, password),
**self._default_request_kwargs
)
# Attempt to extract authentication data.
try:
if resp.status_code == 200:
json_data = resp.json()
token = json_data['data']['token']
elif resp.status_code == 401:
raise errors.AuthFailure(resp.json().get('message', 'Authentication Failure.'))
else:
raise errors.AuthFailure(
"Unknown exception while authenticating: '{}'".format(resp.text)
)
except errors.AuthFailure:
raise
except Exception as ex:
logging.exception(ex)
raise errors.AuthFailure('{}: {}'.format(ex.__class__.__name__, ex))
# Update the token bound to this instance for use by other client operations layers.
self._token = token
logger.info('New API token received: "{}".'.format(token))
return token
######################
# Private interface. #
######################
@property
def _default_request_kwargs(self):
"""The default request keyword arguments to be passed to the requests library."""
return super(Auth, self)._default_request_kwargs
@property
def _password(self):
"""The password currently being used for authentication."""
return self.__password
@_password.setter
def _password(self, new_password):
"""Update the password to be used for authentication."""
self.__password = new_password
def _refresh(self):
"""Refresh the API token using the currently bound credentials.
This is simply a convenience method to be invoked automatically if authentication fails
during normal client use.
"""
# Request and set a new API token.
new_token = self.authenticate(self._username, self._password)
self._token = new_token
logger.info('New API token received: "{}".'.format(new_token))
return self._token
@property
def _token(self):
"""The API token this instance is currently using."""
return self.__token
@_token.setter
def _token(self, new_token):
"""Update the API token which this instance is to use."""
self.__token = new_token
return self.__token
@property
def _url(self):
"""The base URL for authentication operations."""
return self._client._url + 'tokens/'
@property
def _username(self):
"""The username currently being used for authentication."""
return self.__username
@_username.setter
def _username(self, new_username):
"""Update the username to be used for authentication."""
self.__username = new_username
def _verify(self, token):
"""Verify that the given token is valid.
:param str token: The API token to verify.
:returns: The token's corresponding user model as a dict, or None if invalid.
:rtype: dict
"""
# Attempt to authenticate.
url = '{}{}'.format(self._url, 'verify')
resp = requests.post(
url,
json={'token': token},
**self._default_request_kwargs
)
if resp.status_code == 200:
return resp.json().get('data', None)
return None
| """Authentication operations."""
import logging
import requests
from objectrocket import bases
from objectrocket import errors
logger = logging.getLogger(__name__)
class Auth(bases.BaseAuthLayer):
"""Authentication operations.
:param objectrocket.client.Client base_client: An objectrocket.client.Client instance.
"""
def __init__(self, base_client):
self.__username = None
self.__password = None
self.__token = None
super(Auth, self).__init__(base_client=base_client)
#####################
# Public interface. #
#####################
def authenticate(self, username, password):
"""Authenticate against the ObjectRocket API.
:param str username: The username to perform basic authentication against the API with.
:param str password: The password to perform basic authentication against the API with.
:returns: A token used for authentication against token protected resources.
:rtype: str
"""
# Update the username and password bound to this instance for re-authentication needs.
self._username = username
self._password = password
# Attempt to authenticate.
resp = requests.get(
self._url,
auth=(username, password),
**self._default_request_kwargs
)
# Attempt to extract authentication data.
try:
if resp.status_code == 200:
json_data = resp.json()
token = json_data['data']['token']
elif resp.status_code == 401:
raise errors.AuthFailure(resp.json().get('message', 'Authentication Failure.'))
else:
raise errors.AuthFailure(
"Unknown exception while authenticating: '{}'".format(resp.text)
)
except errors.AuthFailure:
raise
except Exception as ex:
logging.exception(ex)
raise errors.AuthFailure('{}: {}'.format(ex.__class__.__name__, ex))
# Update the token bound to this instance for use by other client operations layers.
self._token = token
logger.info('New API token received: "{}".'.format(token))
return token
######################
# Private interface. #
######################
@property
def _default_request_kwargs(self):
"""The default request keyword arguments to be passed to the requests library."""
return super(Auth, self)._default_request_kwargs
@property
def _password(self):
"""The password currently being used for authentication."""
return self.__password
@_password.setter
def _password(self, new_password):
"""Update the password to be used for authentication."""
self.__password = new_password
def _refresh(self):
"""Refresh the API token using the currently bound credentials.
This is simply a convenience method to be invoked automatically if authentication fails
during normal client use.
"""
# Request and set a new API token.
new_token = self.authenticate(self._username, self._password)
self._token = new_token
logger.info('New API token received: "{}".'.format(new_token))
return self._token
@property
def _token(self):
"""The API token this instance is currently using."""
return self.__token
@_token.setter
def _token(self, new_token):
"""Update the API token which this instance is to use."""
self.__token = new_token
return self.__token
@property
def _url(self):
"""The base URL for authentication operations."""
return self._client._url + 'tokens/'
@property
def _username(self):
"""The username currently being used for authentication."""
return self.__username
@_username.setter
def _username(self, new_username):
"""Update the username to be used for authentication."""
self.__username = new_username
| Python | 0 |
c42ffe540b30da5dbf29d557a01503ecad246afb | Fix changelog for newer OSes | etc/scripts/changelog.py | etc/scripts/changelog.py | # -*- coding: utf-8 -*-
# Copyright (c) 2020, Compiler Explorer Authors
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import subprocess
html_escape_table = {
"&": "&",
'"': """,
"'": "'",
">": ">",
"<": "<",
}
commit_template = ''' <div class="row commit-entry">
<div class="col-sm-12">
<a href="{}commit/{}" rel="noreferrer noopener" target="_blank">{}</a>
</div>
</div>
'''
def html_escape(text):
return "".join(html_escape_table.get(c, c) for c in text)
def format_commit(url, commit):
# Input format is "<hash> <description>", so split only on the first space and escape the commit message
grouped_commit = commit.split(' ', 1)
print(grouped_commit)
try:
return commit_template.format(url, grouped_commit[0], html_escape(grouped_commit[1]))
except Exception as e:
print('There was an error in changelog.py: {}'.format(e))
return ''
def get_commits(repo):
coms = subprocess.check_output(['git', 'log', '--date=local', '--after="3 months ago"', '--grep=(#[0-9]*)', '--oneline']).decode('utf-8')
with open('static/changelog.html', 'w') as f:
f.write('<div class="commits-list">\n')
for commit in coms.splitlines():
f.write(format_commit(repo, commit))
f.write('</div>\n')
if __name__ == '__main__':
get_commits('https://github.com/compiler-explorer/compiler-explorer/')
| # -*- coding: utf-8 -*-
# Copyright (c) 2020, Compiler Explorer Authors
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import subprocess
html_escape_table = {
"&": "&",
'"': """,
"'": "'",
">": ">",
"<": "<",
}
commit_template = ' <div class="row commit-entry">\n' \
' <div class="col-sm-12">\n' \
' <a href="{}commit/{}" rel="noreferrer noopener" target="_blank">{}</a>\n' \
' </div>\n' \
' </div>\n'
def html_escape(text):
return "".join(html_escape_table.get(c, c) for c in text)
def format_commit(url, commit):
# Input format is "<hash> <description>", so split only on the first space and escape the commit message
grouped_commit = commit.split(' ', 1)
print(grouped_commit)
try:
return commit_template.format(url, grouped_commit[0], html_escape(grouped_commit[1]))
except Exception as e:
print('There was an error in changelog.py: {}'.format(e))
return ''
def get_commits(repo):
coms = subprocess.check_output(['git', 'log', '--date=local', '--after="3 months ago"', '--grep=(#[0-9]*)', '--oneline'])
with open('static/changelog.html', 'w') as f:
f.write('<div class="commits-list">\n')
for commit in coms.splitlines():
f.write(format_commit(repo, commit))
f.write('</div>\n')
if __name__ == '__main__':
get_commits('https://github.com/compiler-explorer/compiler-explorer/')
| Python | 0.000001 |
391d69f4ce485ff02a3844b4cf5a54f23125c477 | test presab | partBreaker.py | partBreaker.py | #!/usr/bin/env python
import argparse
import Get_fasta_from_Ref as GFR
import re
from sys import argv
import os
presab={}
def Subsetfromto(FastaDict, outFile, start,end):
"""Writes a subsect multifast file, boud at sequence indeces start and end, form sequence stored in a dictioanry"""
with open(outFile, 'w') as out:
for seqID in FastaDict.iterkeys():
presab[seqID]=[]
seq=FastaDict[seqID][start:end]
out.write(">%s\n%s\n" %(seqID,seq))
if set(seq) in set('-','?'):
presab[seqId].append(0)
else:
presab[seqId].append(1)
def main(matrix, partfile, outdir):
Smatrix=GFR.Fasta_to_Dict(matrix)
if not os.path.exists(outdir):
os.makedirs(outdir)
else:
print 'The output dir already exist!'
with open(partfile, 'r') as P:
for pline in P:
outN=pline.split(',')[0]
outf="%s/%s" %(outdir,outN)
start=int(pline.split(',')[1].split('-')[0]) -1
end=int(pline.split(',')[1].split('-')[1])
Subsetfromto(Smatrix, outf, start, end)
print presab
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='This is a simple script for breaking supermatrices in individual MSA based on a partition file. The required partition file is a two column comma separated value text file where the fisrt column indicates the name of partition, recycled to be used as the name of the output file, and the second column is an interval of the positions in the supermatrix, separated only by "-". This script deals only with consecutive data blocks. Codon partitioning is not implemented... yet.')
parser.add_argument('-in', dest = 'matrix', type = str, help = 'Input alignemnets in fasta format')
parser.add_argument('-p', dest = 'partitions', type =str, help = 'Input partition definition file: a comma separated text file with two columns, ')
parser.add_argument('-o', dest = 'outdir', help='Specify directory where to write partitions')
# parser.add_argument('-c', help="")
args = parser.parse_args()
main(args.matrix, args.partitions, args.outdir)
| #!/usr/bin/env python
import argparse
import Get_fasta_from_Ref as GFR
import re
from sys import argv
import os
def Subsetfromto(FastaDict, outFile, start,end):
"""Writes a subsect multifast file, boud at sequence indeces start and end, form sequence stored in a dictioanry"""
with open(outFile, 'w') as out:
for seqID in FastaDict.iterkeys():
seq=FastaDict[seqID][start:end]
out.write(">%s\n%s\n" %(seqID,seq))
def main(matrix, partfile, outdir):
Smatrix=GFR.Fasta_to_Dict(matrix)
if not os.path.exists(outdir):
os.makedirs(outdir)
else:
print 'The output dir already exist!'
with open(partfile, 'r') as P:
for pline in P:
outN=pline.split(',')[0]
outf="%s/%s" %(outdir,outN)
start=int(pline.split(',')[1].split('-')[0]) -1
end=int(pline.split(',')[1].split('-')[1])
Subsetfromto(Smatrix, outf, start, end)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='This is a simple script for breaking supermatrices in individual MSA based on a partition file. The required partition file is a two column comma separated value text file where the fisrt column indicates the name of partition, recycled to be used as the name of the output file, and the second column is an interval of the positions in the supermatrix, separated only by "-". This script deals only with consecutive data blocks. Codon partitioning is not implemented... yet.')
parser.add_argument('-in', dest = 'matrix', type = str, help = 'Input alignemnets in fasta format')
parser.add_argument('-p', dest = 'partitions', type =str, help = 'Input partiotion definition file: a comma separated text file with two columns, ')
parser.add_argument('-o', dest = 'outdir', help='Specify directory where to write partitions')
# parser.add_argument('-c', help="")
args = parser.parse_args()
main(args.matrix, args.partitions, args.outdir)
| Python | 0.000001 |
7399dfa45c9b5a563798f504e9eb4054faf2aa30 | print a more meaningful description of EventAct | open_municipio/events/models.py | open_municipio/events/models.py | from django.db import models
from django.utils.translation import ugettext_lazy as _
from open_municipio.acts.models import Act
from open_municipio.events.managers import EventManager
from open_municipio.people.models import Institution
from datetime import datetime, date
class Event(models.Model):
"""
This class allows OpenMunicipio site to keep track of upcoming
events.
Fields:
* A datefield, no time is needed
* A foreign key to the ``Institution`` that will "host" the event;
eg: council or city government
* A foreign key to the involved ``Act``
* A textfield for some description
Since we will always be interested in future events (with regard
to current date), a custom model manager is provided that allows
``Event.future.all()``.
"""
date = models.DateField(_("Event date"), help_text=_("The day when the event is going to be held"))
event_time = models.TimeField(_("Event time"), blank=True, null=True, help_text=_("The time of the event"))
institution = models.ForeignKey(Institution, verbose_name=_("Institution"), help_text=_("The institution that's going to meet during the event"))
acts = models.ManyToManyField(Act, verbose_name=_("Acts"), blank=True, null=True, help_text=_("Acts the discussion is linked to, if any"),through="EventAct")
title = models.CharField(_("Title"), max_length=128, blank=True, null=True, help_text=_("A short title for this event"))
description = models.TextField(_("Description"), blank=True, null=True, help_text=_("A description, containing the list of things that will be discussed during this event"))
address = models.CharField(_("Address"), max_length=128, blank=True, null=True, help_text=_("The physical address where the meeting is going to be held") )
# The default manager
objects = models.Manager()
# Future events will be retrieved using ``Event.future.all()``
future = EventManager()
class Meta:
verbose_name = _('event')
verbose_name_plural = _('events')
def __unicode__(self):
uc = u'%s %s - %s' % (self.date, self.event_time, self.title)
return uc
@property
def is_past_due(self):
if date.today() > self.date:
return True
return False
class EventAct(models.Model):
"""
WRITEME
"""
act = models.ForeignKey(Act)
event = models.ForeignKey(Event)
order = models.IntegerField(blank=False,null=False)
class Meta:
ordering = ('order',)
# the constraint below would be helpful, but it make the interface validation
# hard to manage -FS
# unique_together = ('order','event'),('act','event')
def __unicode__(self):
return "%s (%s)" % (self.act.title, self.event.date)
| from django.db import models
from django.utils.translation import ugettext_lazy as _
from open_municipio.acts.models import Act
from open_municipio.events.managers import EventManager
from open_municipio.people.models import Institution
from datetime import datetime, date
class Event(models.Model):
"""
This class allows OpenMunicipio site to keep track of upcoming
events.
Fields:
* A datefield, no time is needed
* A foreign key to the ``Institution`` that will "host" the event;
eg: council or city government
* A foreign key to the involved ``Act``
* A textfield for some description
Since we will always be interested in future events (with regard
to current date), a custom model manager is provided that allows
``Event.future.all()``.
"""
date = models.DateField(_("Event date"), help_text=_("The day when the event is going to be held"))
event_time = models.TimeField(_("Event time"), blank=True, null=True, help_text=_("The time of the event"))
institution = models.ForeignKey(Institution, verbose_name=_("Institution"), help_text=_("The institution that's going to meet during the event"))
acts = models.ManyToManyField(Act, verbose_name=_("Acts"), blank=True, null=True, help_text=_("Acts the discussion is linked to, if any"),through="EventAct")
title = models.CharField(_("Title"), max_length=128, blank=True, null=True, help_text=_("A short title for this event"))
description = models.TextField(_("Description"), blank=True, null=True, help_text=_("A description, containing the list of things that will be discussed during this event"))
address = models.CharField(_("Address"), max_length=128, blank=True, null=True, help_text=_("The physical address where the meeting is going to be held") )
# The default manager
objects = models.Manager()
# Future events will be retrieved using ``Event.future.all()``
future = EventManager()
class Meta:
verbose_name = _('event')
verbose_name_plural = _('events')
def __unicode__(self):
uc = u'%s %s - %s' % (self.date, self.event_time, self.title)
return uc
@property
def is_past_due(self):
if date.today() > self.date:
return True
return False
class EventAct(models.Model):
"""
WRITEME
"""
act = models.ForeignKey(Act)
event = models.ForeignKey(Event)
order = models.IntegerField(blank=False,null=False)
class Meta:
ordering = ('order',)
# the constraint below would be helpful, but it make the interface validation
# hard to manage -FS
# unique_together = ('order','event'),('act','event')
| Python | 0.999999 |
a9373c3e4c65160bc04e56edbc356e086d2dae71 | Tweak division display | opencivicdata/admin/division.py | opencivicdata/admin/division.py | from django.contrib import admin
from opencivicdata.models import division as models
@admin.register(models.Division)
class DivisionAdmin(admin.ModelAdmin):
list_display = ('display_name', 'id')
search_fields = list_display
| from django.contrib import admin
from opencivicdata.models import division as models
@admin.register(models.Division)
class DivisionAdmin(admin.ModelAdmin):
pass
| Python | 0 |
21c2daf95e7352932346dec2c570cfefce867ed1 | Add some convenience properties to Node | odlclient/v2/node.py | odlclient/v2/node.py | # Copyright 2014 Hewlett-Packard Development Company, L.P.
#
# Author: Endre Karlson <endre.karlson@hp.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from datetime import datetime
import time
from odlclient.openstack.common.apiclient import base
from odlclient.v2.base import Manager
class Node(base.Resource):
@property
def id(self):
return self.node['id']
@property
def type(self):
return self.node['type']
@property
def description(self):
data = self._info['properties']['description']['value']
return None if data == 'None' else data
@property
def connected_since(self):
data = self._info['properties']['timeStamp']['value']
return datetime.fromtimestamp(time.mktime(time.gmtime(data / 1000)))
class NodeConnector(base.Resource):
pass
class NodeManager(Manager):
base = 'controller/nb/v2/switchmanager'
has_container = True
resource_class = Node
def list(self, container=None):
url = self._url('nodes', container=container)
return self._list(url, response_key='nodeProperties')
def save(self, container=None):
url = self._url('nodes', container=container)
self._post(url)
def list_connectors(self, node_type, node_id, container=None):
url = self._url('node', node_type, node_id, container=container)
return self._list(url, response_key='nodeConnectorProperties',
obj_class=NodeConnector)
def create_property(self, node_type, node_id, name, value, container=None):
url = self._url(
'node', node_type, node_id, 'property', name, value,
container=container)
self._put(url, value)
def delete_property(self, node_type, node_id, name, value, container=None):
url = self._url(
'node', node_type, node_id, 'property', name, value,
container=container)
self._delete(url)
def create_connector_property(self, node_type, node_id, connector_type,
connector_name, name, value, container=None):
url = self._url('nodeconnector', node_type, node_id, connector_name,
'property', name, value, container=container)
self._put(url, value)
def delete_connector_property(self, node_type, node_id, connector_type,
connector_name, name, value, container=None):
url = self._url('nodeconnector', node_type, node_id, connector_name,
'property', name, value, container=container)
self._delete(url, value)
| # Copyright 2014 Hewlett-Packard Development Company, L.P.
#
# Author: Endre Karlson <endre.karlson@hp.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from odlclient.openstack.common.apiclient import base
from odlclient.v2.base import Manager
class Node(base.Resource):
@property
def id(self):
return self.node['id']
@property
def type(self):
return self.node['type']
class NodeConnector(base.Resource):
pass
class NodeManager(Manager):
base = 'controller/nb/v2/switchmanager'
has_container = True
resource_class = Node
def list(self, container=None):
url = self._url('nodes', container=container)
return self._list(url, response_key='nodeProperties')
def save(self, container=None):
url = self._url('nodes', container=container)
self._post(url)
def list_connectors(self, node_type, node_id, container=None):
url = self._url('node', node_type, node_id, container=container)
return self._list(url, response_key='nodeConnectorProperties',
obj_class=NodeConnector)
def create_property(self, node_type, node_id, name, value, container=None):
url = self._url(
'node', node_type, node_id, 'property', name, value,
container=container)
self._put(url, value)
def delete_property(self, node_type, node_id, name, value, container=None):
url = self._url(
'node', node_type, node_id, 'property', name, value,
container=container)
self._delete(url)
def create_connector_property(self, node_type, node_id, connector_type,
connector_name, name, value, container=None):
url = self._url('nodeconnector', node_type, node_id, connector_name,
'property', name, value, container=container)
self._put(url, value)
def delete_connector_property(self, node_type, node_id, connector_type,
connector_name, name, value, container=None):
url = self._url('nodeconnector', node_type, node_id, connector_name,
'property', name, value, container=container)
self._delete(url, value)
| Python | 0 |
b6f698f5fd6faf90b36bbb560ba4df13192cff42 | Update _settings.py | templates/root/appfiles/_settings.py | templates/root/appfiles/_settings.py | """
Django settings for template project.
Generated by 'django-admin startproject' using Django 1.8.2.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '<%= secret_key %>'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'<%= appName %>',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = '<%= projectName %>.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(BASE_DIR, 'templates')
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = '<%= projectName %>.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
)
| """
Django settings for template project.
Generated by 'django-admin startproject' using Django 1.8.2.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '<%= secret_key %>'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'<%= appName %>',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = '<%= projectName %>.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(BASE_DIR, 'templates')
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = '<%= projectName %>.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
)
| Python | 0 |
281eda574c6ed3d0d9b333b67f53a13ea3c17398 | Remove `tfds.core.builder_from_directory` alias | tensorflow_datasets/core/__init__.py | tensorflow_datasets/core/__init__.py | # coding=utf-8
# Copyright 2022 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""API to define datasets."""
# Ensure TensorFlow is importable and its version is sufficiently recent. This
# needs to happen before anything else, since the imports below will try to
# import tensorflow, too.
from tensorflow_datasets.core import tf_compat
tf_compat.ensure_tf_install()
# pylint:disable=g-import-not-at-top
from tensorflow_datasets.core import community # pylint: disable=g-bad-import-order
from tensorflow_datasets.core.dataset_builder import BeamBasedBuilder
from tensorflow_datasets.core.dataset_builder import BuilderConfig
from tensorflow_datasets.core.dataset_builder import DatasetBuilder
from tensorflow_datasets.core.dataset_builder import GeneratorBasedBuilder
from tensorflow_datasets.core.dataset_info import BeamMetadataDict
from tensorflow_datasets.core.dataset_info import DatasetInfo
from tensorflow_datasets.core.dataset_info import Metadata
from tensorflow_datasets.core.dataset_info import MetadataDict
from tensorflow_datasets.core.file_adapters import FileFormat
from tensorflow_datasets.core.lazy_imports_lib import lazy_imports
from tensorflow_datasets.core.naming import ShardedFileTemplate
from tensorflow_datasets.core.registered import DatasetNotFoundError
from tensorflow_datasets.core.split_builder import SplitGeneratorLegacy as SplitGenerator
from tensorflow_datasets.core.splits import ReadInstruction
from tensorflow_datasets.core.splits import Split
from tensorflow_datasets.core.splits import SplitDict
from tensorflow_datasets.core.splits import SplitInfo
from tensorflow_datasets.core.splits import SubSplitInfo
from tensorflow_datasets.core.utils import Experiment
from tensorflow_datasets.core.utils import gcs_path
from tensorflow_datasets.core.utils import tfds_path
from tensorflow_datasets.core.utils import Version
from tensorflow_datasets.core.utils.benchmark import BenchmarkResult
from tensorflow_datasets.core.utils.file_utils import add_data_dir
from tensorflow_datasets.core.utils.generic_path import as_path
from tensorflow_datasets.core.utils.type_utils import PathLike
from tensorflow_datasets.core.utils.type_utils import ReadOnlyPath
from tensorflow_datasets.core.utils.type_utils import ReadWritePath
def benchmark(*args, **kwargs):
raise DeprecationWarning(
"`tfds.core.benchmark` has been renamed to `tfds.benchmark`")
__all__ = [
"add_data_dir",
"as_path",
"BenchmarkResult",
"BeamBasedBuilder",
"BeamMetadataDict",
"BuilderConfig",
"DatasetBuilder",
"DatasetInfo",
"DatasetNotFoundError",
"Experiment",
"FileFormat",
"GeneratorBasedBuilder",
"gcs_path",
"lazy_imports",
"Metadata",
"MetadataDict",
"PathLike",
"ReadInstruction",
"ReadOnlyPath",
"ReadWritePath",
"ShardedFileTemplate",
"SplitDict",
"SplitGenerator",
"SplitInfo",
"tfds_path",
"Version",
]
| # coding=utf-8
# Copyright 2022 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""API to define datasets."""
# Ensure TensorFlow is importable and its version is sufficiently recent. This
# needs to happen before anything else, since the imports below will try to
# import tensorflow, too.
from tensorflow_datasets.core import tf_compat
tf_compat.ensure_tf_install()
# pylint:disable=g-import-not-at-top
from tensorflow_datasets.core import community # pylint: disable=g-bad-import-order
from tensorflow_datasets.core.dataset_builder import BeamBasedBuilder
from tensorflow_datasets.core.dataset_builder import BuilderConfig
from tensorflow_datasets.core.dataset_builder import DatasetBuilder
from tensorflow_datasets.core.dataset_builder import GeneratorBasedBuilder
from tensorflow_datasets.core.dataset_info import BeamMetadataDict
from tensorflow_datasets.core.dataset_info import DatasetInfo
from tensorflow_datasets.core.dataset_info import Metadata
from tensorflow_datasets.core.dataset_info import MetadataDict
from tensorflow_datasets.core.file_adapters import FileFormat
from tensorflow_datasets.core.lazy_imports_lib import lazy_imports
from tensorflow_datasets.core.naming import ShardedFileTemplate
from tensorflow_datasets.core.read_only_builder import builder_from_directory
from tensorflow_datasets.core.registered import DatasetNotFoundError
from tensorflow_datasets.core.split_builder import SplitGeneratorLegacy as SplitGenerator
from tensorflow_datasets.core.splits import ReadInstruction
from tensorflow_datasets.core.splits import Split
from tensorflow_datasets.core.splits import SplitDict
from tensorflow_datasets.core.splits import SplitInfo
from tensorflow_datasets.core.splits import SubSplitInfo
from tensorflow_datasets.core.utils import Experiment
from tensorflow_datasets.core.utils import gcs_path
from tensorflow_datasets.core.utils import tfds_path
from tensorflow_datasets.core.utils import Version
from tensorflow_datasets.core.utils.benchmark import BenchmarkResult
from tensorflow_datasets.core.utils.file_utils import add_data_dir
from tensorflow_datasets.core.utils.generic_path import as_path
from tensorflow_datasets.core.utils.type_utils import PathLike
from tensorflow_datasets.core.utils.type_utils import ReadOnlyPath
from tensorflow_datasets.core.utils.type_utils import ReadWritePath
def benchmark(*args, **kwargs):
raise DeprecationWarning(
"`tfds.core.benchmark` has been renamed to `tfds.benchmark`")
__all__ = [
"add_data_dir",
"as_path",
"BenchmarkResult",
"BeamBasedBuilder",
"BeamMetadataDict",
"BuilderConfig",
"builder_from_directory",
"DatasetBuilder",
"DatasetInfo",
"DatasetNotFoundError",
"Experiment",
"FileFormat",
"GeneratorBasedBuilder",
"gcs_path",
"lazy_imports",
"Metadata",
"MetadataDict",
"PathLike",
"ReadInstruction",
"ReadOnlyPath",
"ReadWritePath",
"ShardedFileTemplate",
"SplitDict",
"SplitGenerator",
"SplitInfo",
"tfds_path",
"Version",
]
| Python | 0 |
a39cbaf22401c466f02e5b12e3ebdd46fa8eef0c | Fix issue refs in test_numpy_piecewise_regression | sympy/printing/tests/test_numpy.py | sympy/printing/tests/test_numpy.py | from sympy import Piecewise
from sympy.abc import x
from sympy.printing.lambdarepr import NumPyPrinter
def test_numpy_piecewise_regression():
"""
NumPyPrinter needs to print Piecewise()'s choicelist as a list to avoid
breaking compatibility with numpy 1.8. This is not necessary in numpy 1.9+.
See sympy/sympy#9747 and sympy/sympy#9749 for details.
"""
p = Piecewise((1, x < 0), (0, True))
assert NumPyPrinter().doprint(p) == 'select([x < 0,True], [1,0], default=nan)'
| from sympy import Piecewise
from sympy.abc import x
from sympy.printing.lambdarepr import NumPyPrinter
def test_numpy_piecewise_regression():
"""
NumPyPrinter needs to print Piecewise()'s choicelist as a list to avoid
breaking compatibility with numpy 1.8. This is not necessary in numpy 1.9+.
See gh-9747 and gh-9749 for details.
"""
p = Piecewise((1, x < 0), (0, True))
assert NumPyPrinter().doprint(p) == 'select([x < 0,True], [1,0], default=nan)'
| Python | 0 |
1addeefdf51713d562788018ebfb6549b215f55b | Fix C typo error in a test | test/option/tree-lib.py | test/option/tree-lib.py | #!/usr/bin/env python
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
"""
Make sure that --tree=derived output with a library dependency shows
the dependency on the library. (On earlier versions of the Microsoft
toolchain this wouldn't show up unless the library already existed
on disk.)
Issue 1363: http://scons.tigris.org/issues/show_bug.cgi?id=1363
"""
import TestSCons
test = TestSCons.TestSCons()
test.write('SConstruct', """
env = Environment(LIBPREFIX='',
LIBSUFFIX='.lib',
OBJSUFFIX='.obj',
EXESUFFIX='.exe')
env.AppendENVPath('PATH', '.')
l = env.Library( 'util.lib', 'util.c' )
p = env.Program( 'test_tree_lib.exe', 'main.c', LIBS=l )
env.Command( 'foo.h', p, '$SOURCE > $TARGET')
""")
test.write('main.c', """\
#include <stdlib.h>
#include <stdio.h>
int
main(int argc, char **argv)
{
printf("#define FOO_H \\"foo.h\\"\\n");
return (0);
}
""")
test.write('util.c', """\
void
util(void)
{
;
}
""")
expect = """
+-test_tree_lib.exe
+-main.obj
+-util.lib
+-util.obj
"""
test.run(arguments = '--tree=derived foo.h')
test.must_contain_all_lines(test.stdout(), [expect])
test.up_to_date(arguments = 'foo.h')
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| #!/usr/bin/env python
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
"""
Make sure that --tree=derived output with a library dependency shows
the dependency on the library. (On earlier versions of the Microsoft
toolchain this wouldn't show up unless the library already existed
on disk.)
Issue 1363: http://scons.tigris.org/issues/show_bug.cgi?id=1363
"""
import TestSCons
test = TestSCons.TestSCons()
test.write('SConstruct', """
env = Environment(LIBPREFIX='',
LIBSUFFIX='.lib',
OBJSUFFIX='.obj',
EXESUFFIX='.exe')
env.AppendENVPath('PATH', '.')
l = env.Library( 'util.lib', 'util.c' )
p = env.Program( 'test_tree_lib.exe', 'main.c', LIBS=l )
env.Command( 'foo.h', p, '$SOURCE > $TARGET')
""")
test.write('main.c', """\
#include <stdlib.h>
#include <stdio.h>
int
main(int argc, char *argv)
{
printf("#define FOO_H \\"foo.h\\"\\n");
return (0);
}
""")
test.write('util.c', """\
void
util(void)
{
;
}
""")
expect = """
+-test_tree_lib.exe
+-main.obj
+-util.lib
+-util.obj
"""
test.run(arguments = '--tree=derived foo.h')
test.must_contain_all_lines(test.stdout(), [expect])
test.up_to_date(arguments = 'foo.h')
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| Python | 0.000144 |
25dc03c3db7e224463f11e513f94fb9cb15ed250 | Fix check_service_client_function doc typo | tempest/tests/lib/services/base.py | tempest/tests/lib/services/base.py | # Copyright 2015 Deutsche Telekom AG. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import fixtures
from oslo_serialization import jsonutils as json
from tempest.tests import base
from tempest.tests.lib import fake_http
class BaseServiceTest(base.TestCase):
def create_response(self, body, to_utf=False, status=200, headers=None):
json_body = {}
if body:
json_body = json.dumps(body)
if to_utf:
json_body = json_body.encode('utf-8')
resp = fake_http.fake_http_response(headers, status=status), json_body
return resp
def check_service_client_function(self, function, function2mock,
body, to_utf=False, status=200,
headers=None, mock_args=None,
**kwargs):
"""Mock a service client function for unit testing.
:param function: The service client function to call.
:param function2mock: The REST call to mock inside the service client
function.
:param body: Expected response body returned by the service client
function.
:param to_utf: Whether to use UTF-8 encoding for response.
:param status: Expected response status returned by the service client
function.
:param headers: Expected headers returned by the service client
function.
:param mock_args: List/dict/value of expected args/kwargs called by
function2mock. For example:
* If mock_args=['foo'] then ``assert_called_once_with('foo')``
is called.
* If mock_args={'foo': 'bar'} then
``assert_called_once_with(foo='bar')`` is called.
* If mock_args='foo' then ``assert_called_once_with('foo')``
is called.
:param kwargs: kwargs that are passed to function.
"""
mocked_response = self.create_response(body, to_utf, status, headers)
fixture = self.useFixture(fixtures.MockPatch(
function2mock, return_value=mocked_response))
if kwargs:
resp = function(**kwargs)
else:
resp = function()
self.assertEqual(body, resp)
if isinstance(mock_args, list):
fixture.mock.assert_called_once_with(*mock_args)
elif isinstance(mock_args, dict):
fixture.mock.assert_called_once_with(**mock_args)
elif mock_args is not None:
fixture.mock.assert_called_once_with(mock_args)
| # Copyright 2015 Deutsche Telekom AG. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import fixtures
from oslo_serialization import jsonutils as json
from tempest.tests import base
from tempest.tests.lib import fake_http
class BaseServiceTest(base.TestCase):
def create_response(self, body, to_utf=False, status=200, headers=None):
json_body = {}
if body:
json_body = json.dumps(body)
if to_utf:
json_body = json_body.encode('utf-8')
resp = fake_http.fake_http_response(headers, status=status), json_body
return resp
def check_service_client_function(self, function, function2mock,
body, to_utf=False, status=200,
headers=None, mock_args=None,
**kwargs):
"""Mock a service client function for unit testing.
:param function: The service client function to call.
:param function2mock: The REST call to mock inside the service client
function.
:param body: Expected response body returned by the service client
function.
:param to_utf: Whether to use UTF-8 encoding for request.
:param status: Expected response status returned by the service client
function.
:param headers: Expected headers returned by the service client
function.
:param mock_args: List/dict/value of expected args/kwargs called by
function2mock. For example:
* If mock_args=['foo'] then ``assert_called_once_with('foo')``
is called.
* If mock_args={'foo': 'bar'} then
``assert_called_once_with(foo='bar')`` is called.
* If mock_args='foo' then ``assert_called_once_with('foo')``
is called.
:param kwargs: kwargs that are passed to function.
"""
mocked_response = self.create_response(body, to_utf, status, headers)
fixture = self.useFixture(fixtures.MockPatch(
function2mock, return_value=mocked_response))
if kwargs:
resp = function(**kwargs)
else:
resp = function()
self.assertEqual(body, resp)
if isinstance(mock_args, list):
fixture.mock.assert_called_once_with(*mock_args)
elif isinstance(mock_args, dict):
fixture.mock.assert_called_once_with(**mock_args)
elif mock_args is not None:
fixture.mock.assert_called_once_with(mock_args)
| Python | 0.000001 |
df9ff4f13fc7da111bc11cf5f390efe94352b6e6 | Fix Setting class | src/wikicurses/__init__.py | src/wikicurses/__init__.py | import os
import json
import pkgutil
from enum import Enum
_data = pkgutil.get_data('wikicurses', 'interwiki.list').decode()
wikis = dict([i.split('|')[0:2] for i in _data.splitlines() if i[0]!='#'])
default_configdir = os.environ['HOME'] + '/.config'
configpath = os.environ.get('XDG_CONFIG_HOME', default_configdir) + '/wikicurses'
class Settings:
def __init__(self, name):
self.file = configpath + '/' + name
def __iter__(self):
if not os.path.exists(self.file):
yield from ()
return
with open(self.file) as file:
yield from json.load(file)
def _save(self, bookmarks):
if not os.path.exists(configpath):
os.mkdir(configpath)
with open(self.file, 'w') as file:
json.dump(bookmarks, file)
def add(self, bmark):
bookmarks = set(self)
bookmarks.add(bmark)
self._save(list(bookmarks))
def discard(self, bmark):
bookmarks = set(self)
bookmarks.discard(bmark)
self._save(list(bookmarks))
bmarks = Settings('bookmarks')
class BitEnum(int, Enum):
def __new__(cls, *args):
value = 1 << len(cls.__members__)
return int.__new__(cls, value)
formats = BitEnum("formats", "i b blockquote")
| import os
import json
import pkgutil
from enum import Enum
_data = pkgutil.get_data('wikicurses', 'interwiki.list').decode()
wikis = dict([i.split('|')[0:2] for i in _data.splitlines() if i[0]!='#'])
default_configdir = os.environ['HOME'] + '/.config'
configpath = os.environ.get('XDG_CONFIG_HOME', default_configdir) + '/wikicurses'
class Settings:
def __init__(self, name):
self.file = configpath + '/' + name
def __iter__(self):
if not os.path.exists(self.file):
yield from ()
with open(self.file) as file:
yield from json.load(file)
def _save(self, bookmarks):
if not os.path.exists(configpath):
os.mkdir(configpath)
with open(self.file, 'w') as file:
json.dump(bookmarks, file)
def add(self, bmark):
bookmarks = set(self)
bookmarks.add(bmark)
self._save(list(bookmarks))
def discard(self, bmark):
bookmarks = set(self)
bookmarks.discard(bmark)
self._save(list(bookmarks))
bmarks = Settings('bookmarks')
class BitEnum(int, Enum):
def __new__(cls, *args):
value = 1 << len(cls.__members__)
return int.__new__(cls, value)
formats = BitEnum("formats", "i b blockquote")
| Python | 0.000001 |
afedc41fd4e573f4db38f2fde38b2286d623b4c4 | Remove obsolete property | src/zeit/campus/article.py | src/zeit/campus/article.py | import zope.interface
import zeit.cms.content.reference
import zeit.campus.interfaces
class TopicpageLink(zeit.cms.related.related.RelatedBase):
zope.interface.implements(zeit.campus.interfaces.ITopicpageLink)
topicpagelink = zeit.cms.content.reference.SingleResource(
'.head.topicpagelink', 'related')
topicpagelink_label = zeit.cms.content.property.ObjectPathProperty(
'.head.topicpagelink.label',
zeit.campus.interfaces.ITopicpageLink['topicpagelink_label'])
| import zope.interface
import zeit.cms.content.reference
import zeit.campus.interfaces
class TopicpageLink(zeit.cms.related.related.RelatedBase):
zope.interface.implements(zeit.campus.interfaces.ITopicpageLink)
topicpagelink = zeit.cms.content.reference.SingleResource(
'.head.topicpagelink', 'related')
topicpagelink_label = zeit.cms.content.dav.mapProperties(
zeit.campus.interfaces.ITopicpageLink,
zeit.cms.interfaces.DOCUMENT_SCHEMA_NS,
('topicpagelink_label',))
topicpagelink_label = zeit.cms.content.property.ObjectPathProperty(
'.head.topicpagelink.label',
zeit.campus.interfaces.ITopicpageLink['topicpagelink_label'])
| Python | 0.000216 |
e60a05886c52574227b1a73fe02575ede81ffa5e | mark out-of-date tests with a @skip | staff/tests/tests_views.py | staff/tests/tests_views.py | from unittest import skip
from django.core import mail
from django.test import Client, TestCase
from django.urls import reverse
from django.utils.translation import ugettext_lazy as _
from django.utils.http import urlencode
class StaffAddView(TestCase):
fixtures = ['ophasebase.json', 'staff.json', 'students.json']
def test_redirect(self):
"""Test for Redirect to SSO Login page"""
c = Client()
suffix = urlencode({"next": reverse('staff:registration')})
redirect_url = "{}?{}".format(reverse('pyTUID:login'), suffix)
response = c.get(reverse('staff:registration'))
self.assertRedirects(response, redirect_url, target_status_code=302)
@skip
def test_send_email(self):
"""Sending an email after successfull register"""
# TODO Use fake SSO in test
c = Client()
register_view = reverse('staff:registration')
self.assertEqual(len(mail.outbox), 0)
testdata = {'prename': 'Leah',
'name': 'Bayer',
'email': 'leah.bayer@example.com',
'phone': '016031368212',
'matriculated_since': 'today',
'degree_course': 'Bachelor',
'experience_ophase': 'Nothing until now',
'is_helper': True,
'helper_jobs': 1,}
# sending a incomplet form should not send a email
response = c.post(register_view, testdata, follow=True)
self.assertEqual(response.status_code, 200)
self.assertFormError(response, 'form', 'why_participate', _('This field is required.'))
self.assertEqual(len(mail.outbox), 0)
# a complete form should send one email
testdata['why_participate'] = 'You need testdata'
response = c.post(register_view, testdata, follow=True)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.redirect_chain, [(reverse('staff:registration_success'), 302)])
self.assertEqual(len(mail.outbox), 1)
smail = mail.outbox[0]
self.assertEqual(len(smail.to), 1)
self.assertEqual(smail.to[0], 'Leah Bayer <leah.bayer@example.com>') | from django.core import mail
from django.test import Client, TestCase
from django.urls import reverse
from django.utils.translation import ugettext_lazy as _
from django.utils.http import urlencode
class StaffAddView(TestCase):
fixtures = ['ophasebase.json', 'staff.json', 'students.json']
def test_redirect(self):
"""Test for Redirect to SSO Login page"""
c = Client()
suffix = urlencode({"next": reverse('staff:registration')})
redirect_url = "{}?{}".format(reverse('pyTUID:login'), suffix)
response = c.get(reverse('staff:registration'))
self.assertRedirects(response, redirect_url, target_status_code=302)
def test_send_email(self):
"""Sending an email after successfull register"""
pass
# TODO Use fake SSO in test
"""
c = Client()
register_view = reverse('staff:registration')
self.assertEqual(len(mail.outbox), 0)
testdata = {'prename': 'Leah',
'name': 'Bayer',
'email': 'leah.bayer@example.com',
'phone': '016031368212',
'matriculated_since': 'today',
'degree_course': 'Bachelor',
'experience_ophase': 'Nothing until now',
'is_helper': True,
'helper_jobs': 1,}
# sending a incomplet form should not send a email
response = c.post(register_view, testdata, follow=True)
self.assertEqual(response.status_code, 200)
self.assertFormError(response, 'form', 'why_participate', _('This field is required.'))
self.assertEqual(len(mail.outbox), 0)
# a complete form should send one email
testdata['why_participate'] = 'You need testdata'
response = c.post(register_view, testdata, follow=True)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.redirect_chain, [(reverse('staff:registration_success'), 302)])
self.assertEqual(len(mail.outbox), 1)
smail = mail.outbox[0]
self.assertEqual(len(smail.to), 1)
self.assertEqual(smail.to[0], 'Leah Bayer <leah.bayer@example.com>')
"""
| Python | 0.000004 |
42ec06aa5e2034266f817dc6465cd8bf4fea6ead | fix migration | corehq/apps/linked_domain/migrations/0005_migrate_linked_app_toggle.py | corehq/apps/linked_domain/migrations/0005_migrate_linked_app_toggle.py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.9 on 2018-02-01 15:00
from __future__ import unicode_literals
from couchdbkit import ResourceNotFound
from django.db import migrations
from corehq.toggles import LINKED_DOMAINS
from toggle.models import Toggle
def _migrate_linked_apps_toggle(apps, schema_editor):
try:
linked_apps_toggle = Toggle.get('linked_apps')
except ResourceNotFound:
pass
else:
try:
Toggle.get(LINKED_DOMAINS.slug)
except ResourceNotFound:
linked_domains_toggle = Toggle(
slug=LINKED_DOMAINS.slug, enabled_users=linked_apps_toggle.enabled_users
)
linked_domains_toggle.save()
def noop(*args, **kwargs):
pass
class Migration(migrations.Migration):
initial = True
dependencies = [
('linked_domain', '0004_domainlinkhistory'),
]
operations = [
migrations.RunPython(_migrate_linked_apps_toggle, noop)
]
| # -*- coding: utf-8 -*-
# Generated by Django 1.11.9 on 2018-02-01 15:00
from __future__ import unicode_literals
from couchdbkit import ResourceNotFound
from django.db import migrations
from corehq.toggles import LINKED_DOMAINS
from toggle.models import Toggle
def _migrate_linked_apps_toggle(apps, schema_editor):
try:
linked_apps_toggle = Toggle.get('linked_apps')
except ResourceNotFound:
pass
else:
linked_domains_toggle = Toggle(
slug=LINKED_DOMAINS.slug, enabled_users=linked_apps_toggle.enabled_users
)
linked_domains_toggle.save()
def noop(*args, **kwargs):
pass
class Migration(migrations.Migration):
initial = True
dependencies = [
('linked_domain', '0004_domainlinkhistory'),
]
operations = [
migrations.RunPython(_migrate_linked_apps_toggle, noop)
]
| Python | 0.000001 |
d3d25e127592356d6b678dc8d013f83f53803f67 | update mordred.tests to check hidden modules | mordred/tests/__main__.py | mordred/tests/__main__.py | import os
import nose
def main():
base = os.path.dirname(os.path.dirname(__file__))
hidden = [
os.path.join(base, n)
for n in os.listdir(base)
if n[:1] == "_" and os.path.splitext(n)[1] == ".py"
]
tests = [base, os.path.join(base, "_base")] + hidden
os.environ["NOSE_WITH_DOCTEST"] = "1"
nose.main(
defaultTest=",".join(tests),
)
if __name__ == "__main__":
main()
| import os
import nose
def main():
base = os.path.dirname(os.path.dirname(__file__))
tests = [base, os.path.join(base, "_base")]
os.environ["NOSE_WITH_DOCTEST"] = "1"
nose.main(
defaultTest=",".join(tests),
)
if __name__ == "__main__":
main()
| Python | 0 |
7f9ca64313fff0716143cf7d56075a565f35d60f | add docstring describing public API (#2140) | tensorboard/plugins/hparams/api.py | tensorboard/plugins/hparams/api.py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Experimental public APIs for the HParams plugin.
This module supports a spectrum of use cases, depending on how much
structure you want. In the simplest case, you can simply collect your
hparams into a dict, and use a Keras callback to record them:
>>> from tensorboard.plugins.hparams import api as hp
>>> hparams = {
... "optimizer": "adam",
... "fc_dropout": 0.2,
... "neurons": 128,
... # ...
... }
>>>
>>> model = model_fn(hparams)
>>> callbacks = [
>>> tf.keras.callbacks.TensorBoard(logdir),
>>> hp.KerasCallback(logdir, hparams),
>>> ]
>>> model.fit(..., callbacks=callbacks)
The Keras callback requires that TensorFlow eager execution be enabled.
If not using Keras, use the `hparams` function to write the values
directly:
>>> # In eager mode:
>>> with tf.create_file_writer(logdir).as_default():
... hp.hparams(hparams)
>>>
>>> # In legacy graph mode:
>>> with tf.compat.v2.create_file_writer(logdir).as_default() as w:
... sess.run(w.init())
... sess.run(hp.hparams(hparams))
... sess.run(w.flush())
To control how hyperparameters and metrics appear in the TensorBoard UI,
you can define `HParam` and `Metric` objects and collect them in an
`Experiment`:
>>> HP_OPTIMIZER = hp.HParam("optimizer")
>>> HP_FC_DROPOUT = hp.HParam(
... "fc_dropout",
... display_name="f.c. dropout",
... description="Dropout rate for fully connected subnet.",
... )
>>> HP_NEURONS = hp.HParam("neurons", description="Neurons per dense layer")
>>>
>>> experiment = hp.Experiment(
... hparams=[
... HP_OPTIMIZER,
... HP_FC_DROPOUT,
... HP_NEURONS,
... ],
... metrics=[
... hp.Metric("xent", group="validation", display_name="cross-entropy"),
... hp.Metric("f1", group="validation", display_name="F₁ score"),
... hp.Metric("loss", group="train", display_name="training loss"),
... ],
... )
>>> with tf.summary.create_file_writer(base_logdir).as_default():
... hp.hparams_config(experiment) # write experiment summary
You can continue to pass a string-keyed dict to the Keras callback or
the `hparams` function, or you can use `HParam` objects as the keys. The
latter approach enables better static analysis: your favorite Python
linter can tell you if you misspell a hyperparameter name, your IDE can
help you find all the places where a hyperparameter is used, etc:
>>> hparams = {
... HP_OPTIMIZER: "adam",
... HP_FC_DROPOUT: 0.2,
... HP_NEURONS: 128,
... # ...
... }
>>>
>>> model = model_fn(hparams)
>>> callbacks = [
>>> tf.keras.callbacks.TensorBoard(logdir),
>>> hp.KerasCallback(logdir, hparams),
>>> ]
Finally, you can choose to annotate your hparam definitions with domain
information:
>>> HP_OPTIMIZER = hp.HParam("optimizer", hp.Discrete(["adam", "sgd"]))
>>> HP_FC_DROPOUT = hp.HParam("fc_dropout", hp.RealInterval(0.1, 0.4))
>>> HP_NEURONS = hp.HParam("neurons", hp.IntInterval(64, 256))
The TensorBoard HParams plugin does not provide tuners, but you can
integrate these domains into your preferred tuning framework if you so
desire. The domains will also be reflected in the TensorBoard UI.
See the `Experiment`, `HParam`, `Metric`, and `KerasCallback` classes
for API specifications. Consult the `hparams_demo.py` script in the
TensorBoard repository for an end-to-end MNIST example.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorboard.plugins.hparams import keras
from tensorboard.plugins.hparams import summary_v2
Discrete = summary_v2.Discrete
Domain = summary_v2.Domain
HParam = summary_v2.HParam
IntInterval = summary_v2.IntInterval
Metric = summary_v2.Metric
RealInterval = summary_v2.RealInterval
hparams = summary_v2.hparams
hparams_pb = summary_v2.hparams_pb
hparams_config = summary_v2.hparams_config
hparams_config_pb = summary_v2.hparams_config_pb
KerasCallback = keras.Callback
del absolute_import
del division
del keras
del print_function
del summary_v2
| # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Experimental public APIs for the HParams plugin."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorboard.plugins.hparams import keras
from tensorboard.plugins.hparams import summary_v2
Discrete = summary_v2.Discrete
Domain = summary_v2.Domain
HParam = summary_v2.HParam
IntInterval = summary_v2.IntInterval
Metric = summary_v2.Metric
RealInterval = summary_v2.RealInterval
hparams = summary_v2.hparams
hparams_pb = summary_v2.hparams_pb
hparams_config = summary_v2.hparams_config
hparams_config_pb = summary_v2.hparams_config_pb
KerasCallback = keras.Callback
del absolute_import
del division
del keras
del print_function
del summary_v2
| Python | 0 |
cff5052f94738942bfd18d745660a975a170ca4b | Fix build on non-git source | build-tools/code_generator/utils/common.py | build-tools/code_generator/utils/common.py | # Copyright (c) 2017 Sony Corporation. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import difflib
import io
import os
import re
import six
import subprocess
def which(name):
exec_suffix = '.exe' if os.name is 'nt' else ''
for p in os.environ['PATH'].split(os.pathsep):
if os.name is 'nt':
p = p.replace('"', '')
f = os.path.join(p, name + exec_suffix)
if os.path.isfile(f):
return f
return None
def check_update(filename, generated, force=False):
original = ''
if os.path.exists(filename):
with io.open(filename, 'rt', encoding='utf_8_sig') as f:
original = six.text_type(f.read())
s = difflib.SequenceMatcher(None, original, generated)
if(force or not os.path.exists(filename)) and s.ratio() < 1.0:
with open(filename, 'wb') as f:
print('Updating {}.'.format(filename))
write_content = generated.encode('utf_8')
write_content = write_content.replace(b'\r\n', b'\n')
write_content = write_content.replace(b'\r', b'\n')
f.write(write_content)
def get_version(dir):
os.chdir(dir)
version = default_version = '0.9.7'
if os.path.exists('.git'):
try:
nearest_tag = re.sub(r'^v', '', subprocess.check_output(['git', 'describe', '--abbrev=0', '--tags']).strip().decode('utf-8'))
nearest_tag = nearest_tag.replace('/', '_').lower()
version = nearest_tag
vv = subprocess.check_output(['git', 'describe', '--tags']).strip().decode('utf-8').split('-')
if len(vv) > 1:
cid = vv.pop()
version = '-'.join(vv) + '+' + cid
version = version.replace('/', '_').lower()
except:
nearest_tag = default_version
version = default_version
return version.replace('/', '_').lower(), nearest_tag.replace('/', '_').lower()
| # Copyright (c) 2017 Sony Corporation. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import difflib
import io
import os
import re
import six
import subprocess
def which(name):
exec_suffix = '.exe' if os.name is 'nt' else ''
for p in os.environ['PATH'].split(os.pathsep):
if os.name is 'nt':
p = p.replace('"', '')
f = os.path.join(p, name + exec_suffix)
if os.path.isfile(f):
return f
return None
def check_update(filename, generated, force=False):
original = ''
if os.path.exists(filename):
with io.open(filename, 'rt', encoding='utf_8_sig') as f:
original = six.text_type(f.read())
s = difflib.SequenceMatcher(None, original, generated)
if(force or not os.path.exists(filename)) and s.ratio() < 1.0:
with open(filename, 'wb') as f:
print('Updating {}.'.format(filename))
write_content = generated.encode('utf_8')
write_content = write_content.replace(b'\r\n', b'\n')
write_content = write_content.replace(b'\r', b'\n')
f.write(write_content)
def get_version(dir):
os.chdir(dir)
default_version = '0.9.7'
if os.path.exists('.git'):
try:
nearest_tag = re.sub(r'^v', '', subprocess.check_output(['git', 'describe', '--abbrev=0', '--tags']).strip().decode('utf-8'))
nearest_tag = nearest_tag.replace('/', '_').lower()
version = nearest_tag
vv = subprocess.check_output(['git', 'describe', '--tags']).strip().decode('utf-8').split('-')
if len(vv) > 1:
cid = vv.pop()
version = '-'.join(vv) + '+' + cid
version = version.replace('/', '_').lower()
except:
nearest_tag = default_version
version = default_version
return version.replace('/', '_').lower(), nearest_tag.replace('/', '_').lower()
| Python | 0 |
8b0d9378a1e48c010fb028395811d3f3720af3e9 | Translate language names from settings.LANGUAGES. | multilingual/languages.py | multilingual/languages.py | """
Django-multilingual: language-related settings and functions.
"""
# Note: this file did become a mess and will have to be refactored
# after the configuration changes get in place.
#retrieve language settings from settings.py
from django.conf import settings
LANGUAGES = settings.LANGUAGES
from django.utils.translation import ugettext_lazy as _
from multilingual.exceptions import LanguageDoesNotExist
try:
from threading import local
except ImportError:
from django.utils._threading_local import local
thread_locals = local()
def get_language_count():
return len(LANGUAGES)
def get_language_code(language_id):
return LANGUAGES[(int(language_id or get_default_language())) - 1][0]
def get_language_name(language_id):
return _(LANGUAGES[(int(language_id or get_default_language())) - 1][1])
def get_language_id_list():
return range(1, get_language_count() + 1)
def get_language_code_list():
return [lang[0] for lang in LANGUAGES]
def get_language_choices():
return [(language_id, get_language_code(language_id))
for language_id in get_language_id_list()]
def get_language_id_from_id_or_code(language_id_or_code, use_default=True):
if language_id_or_code is None:
if use_default:
return get_default_language()
else:
return None
if isinstance(language_id_or_code, int):
return language_id_or_code
i = 0
for (code, desc) in LANGUAGES:
i += 1
if code == language_id_or_code:
return i
raise LanguageDoesNotExist()
def get_language_idx(language_id_or_code):
# to do: optimize
language_id = get_language_id_from_id_or_code(language_id_or_code)
return get_language_id_list().index(language_id)
def set_default_language(language_id_or_code):
"""
Set the default language for the whole translation mechanism.
Accepts language codes or IDs.
"""
language_id = get_language_id_from_id_or_code(language_id_or_code)
thread_locals.DEFAULT_LANGUAGE = language_id
def get_default_language():
"""
Return the language ID set by set_default_language.
"""
return getattr(thread_locals, 'DEFAULT_LANGUAGE',
settings.DEFAULT_LANGUAGE)
def get_default_language_code():
"""
Return the language code of language ID set by set_default_language.
"""
language_id = get_language_id_from_id_or_code(get_default_language())
return get_language_code(language_id)
def _to_db_identifier(name):
"""
Convert name to something that is usable as a field name or table
alias in SQL.
For the time being assume that the only possible problem with name
is the presence of dashes.
"""
return name.replace('-', '_')
def get_translation_table_alias(translation_table_name, language_id):
"""
Return an alias for the translation table for a given language_id.
Used in SQL queries.
"""
return (translation_table_name
+ '_'
+ _to_db_identifier(get_language_code(language_id)))
def get_translated_field_alias(field_name, language_id=None):
"""
Return an alias for field_name field for a given language_id.
Used in SQL queries.
"""
return ('_trans_'
+ field_name
+ '_' + _to_db_identifier(get_language_code(language_id)))
| """
Django-multilingual: language-related settings and functions.
"""
# Note: this file did become a mess and will have to be refactored
# after the configuration changes get in place.
#retrieve language settings from settings.py
from django.conf import settings
LANGUAGES = settings.LANGUAGES
from multilingual.exceptions import LanguageDoesNotExist
try:
from threading import local
except ImportError:
from django.utils._threading_local import local
thread_locals = local()
def get_language_count():
return len(LANGUAGES)
def get_language_code(language_id):
return LANGUAGES[(int(language_id or get_default_language())) - 1][0]
def get_language_name(language_id):
return LANGUAGES[(int(language_id or get_default_language())) - 1][1]
def get_language_id_list():
return range(1, get_language_count() + 1)
def get_language_code_list():
return [lang[0] for lang in LANGUAGES]
def get_language_choices():
return [(language_id, get_language_code(language_id))
for language_id in get_language_id_list()]
def get_language_id_from_id_or_code(language_id_or_code, use_default=True):
if language_id_or_code is None:
if use_default:
return get_default_language()
else:
return None
if isinstance(language_id_or_code, int):
return language_id_or_code
i = 0
for (code, desc) in LANGUAGES:
i += 1
if code == language_id_or_code:
return i
raise LanguageDoesNotExist()
def get_language_idx(language_id_or_code):
# to do: optimize
language_id = get_language_id_from_id_or_code(language_id_or_code)
return get_language_id_list().index(language_id)
def set_default_language(language_id_or_code):
"""
Set the default language for the whole translation mechanism.
Accepts language codes or IDs.
"""
language_id = get_language_id_from_id_or_code(language_id_or_code)
thread_locals.DEFAULT_LANGUAGE = language_id
def get_default_language():
"""
Return the language ID set by set_default_language.
"""
return getattr(thread_locals, 'DEFAULT_LANGUAGE',
settings.DEFAULT_LANGUAGE)
def get_default_language_code():
"""
Return the language code of language ID set by set_default_language.
"""
language_id = get_language_id_from_id_or_code(get_default_language())
return get_language_code(language_id)
def _to_db_identifier(name):
"""
Convert name to something that is usable as a field name or table
alias in SQL.
For the time being assume that the only possible problem with name
is the presence of dashes.
"""
return name.replace('-', '_')
def get_translation_table_alias(translation_table_name, language_id):
"""
Return an alias for the translation table for a given language_id.
Used in SQL queries.
"""
return (translation_table_name
+ '_'
+ _to_db_identifier(get_language_code(language_id)))
def get_translated_field_alias(field_name, language_id=None):
"""
Return an alias for field_name field for a given language_id.
Used in SQL queries.
"""
return ('_trans_'
+ field_name
+ '_' + _to_db_identifier(get_language_code(language_id)))
| Python | 0.999999 |
a68bb0d268861d30c26647523991ed215853cdfe | add Reeve post | ca_ab_grande_prairie_county_no_1/people.py | ca_ab_grande_prairie_county_no_1/people.py | from pupa.scrape import Scraper
from utils import lxmlize, CanadianLegislator as Legislator
import re
COUNCIL_PAGE = 'http://www.countygp.ab.ca/EN/main/government/council.html'
REEVE_URL = 'http://www.countygp.ab.ca/EN/main/government/council/reeve-message.html'
class GrandePrairieCountyNo1PersonScraper(Scraper):
# @todo The Reeve is also a Councillor.
def get_people(self):
reeve_page = lxmlize(REEVE_URL)
reeve_name = reeve_page.xpath('string(//b)').split(',')[0]
page = lxmlize(COUNCIL_PAGE)
councillors = page.xpath('//table[@class="table-plain"]/tbody/tr/td[2]')
for councillor in councillors:
name = councillor.xpath('./h2')[0].text_content().split(
'Division')[0].strip()
district = re.findall(r'(Division [0-9])', councillor.xpath('./h2')[0].text_content())[0]
p = Legislator(name=name, post_id=district, role='Councillor')
if name == reeve_name:
p.add_committee_membership('Grande Prairie County No. 1', role='Reeve')
p.add_source(COUNCIL_PAGE)
image = councillor.xpath('./preceding-sibling::td//img/@src')[0]
p.image = image
address = councillor.xpath('./p[1]')[0].text_content()
email = councillor.xpath('.//a[contains(@href, "mailto:")]')[0].text_content()
p.add_contact('address', address, 'legislature')
p.add_contact('email', email, None)
numbers = councillor.xpath('./p[2]')[0].text_content().replace('Email: ', '').replace(email, '').split(':')
for index, number in enumerate(numbers):
if index == 0:
continue
contact_type = re.findall(r'[A-Za-z]+', numbers[index - 1])[0]
number = re.findall(r'[0-9]{3}.[0-9]{3}.[0-9]{4}', number)[0].replace('.', '-')
if contact_type == 'Fax':
p.add_contact('fax', number, 'legislature')
elif contact_type == 'Cell':
p.add_contact('cell', number, 'legislature')
elif contact_type == 'Hm':
p.add_contact('voice', number, 'residence')
else:
raise Exception('Unrecognized contact type %s' % contact_type)
yield p
| from pupa.scrape import Scraper
from utils import lxmlize, CanadianLegislator as Legislator
import re
COUNCIL_PAGE = 'http://www.countygp.ab.ca/EN/main/government/council.html'
class GrandePrairieCountyNo1PersonScraper(Scraper):
# @todo The Reeve is also a Councillor.
def get_people(self):
page = lxmlize(COUNCIL_PAGE)
councillors = page.xpath('//table[@class="table-plain"]/tbody/tr/td[2]')
for councillor in councillors:
name = councillor.xpath('./h2')[0].text_content().split('Division')[0]
district = re.findall(r'(Division [0-9])', councillor.xpath('./h2')[0].text_content())[0]
p = Legislator(name=name, post_id=district, role='Councillor')
p.add_source(COUNCIL_PAGE)
image = councillor.xpath('./preceding-sibling::td//img/@src')[0]
p.image = image
address = councillor.xpath('./p[1]')[0].text_content()
email = councillor.xpath('.//a[contains(@href, "mailto:")]')[0].text_content()
p.add_contact('address', address, 'legislature')
p.add_contact('email', email, None)
numbers = councillor.xpath('./p[2]')[0].text_content().replace('Email: ', '').replace(email, '').split(':')
for index, number in enumerate(numbers):
if index == 0:
continue
contact_type = re.findall(r'[A-Za-z]+', numbers[index - 1])[0]
number = re.findall(r'[0-9]{3}.[0-9]{3}.[0-9]{4}', number)[0].replace('.', '-')
if contact_type == 'Fax':
p.add_contact('fax', number, 'legislature')
elif contact_type == 'Cell':
p.add_contact('cell', number, 'legislature')
elif contact_type == 'Hm':
p.add_contact('voice', number, 'residence')
else:
raise Exception('Unrecognized contact type %s' % contact_type)
yield p
| Python | 0.000001 |
883ef42a4a02a98bbbec7a2a3c20938853805fb0 | Fix update_host API response schema | tempest/lib/api_schema/response/compute/v2_1/hosts.py | tempest/lib/api_schema/response/compute/v2_1/hosts.py | # Copyright 2014 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
list_hosts = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'hosts': {
'type': 'array',
'items': {
'type': 'object',
'properties': {
'host_name': {'type': 'string'},
'service': {'type': 'string'},
'zone': {'type': 'string'}
},
'additionalProperties': False,
'required': ['host_name', 'service', 'zone']
}
}
},
'additionalProperties': False,
'required': ['hosts']
}
}
get_host_detail = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'host': {
'type': 'array',
'item': {
'type': 'object',
'properties': {
'resource': {
'type': 'object',
'properties': {
'cpu': {'type': 'integer'},
'disk_gb': {'type': 'integer'},
'host': {'type': 'string'},
'memory_mb': {'type': 'integer'},
'project': {'type': 'string'}
},
'additionalProperties': False,
'required': ['cpu', 'disk_gb', 'host',
'memory_mb', 'project']
}
},
'additionalProperties': False,
'required': ['resource']
}
}
},
'additionalProperties': False,
'required': ['host']
}
}
startup_host = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'host': {'type': 'string'},
'power_action': {'enum': ['startup']}
},
'additionalProperties': False,
'required': ['host', 'power_action']
}
}
# The 'power_action' attribute of 'shutdown_host' API is 'shutdown'
shutdown_host = copy.deepcopy(startup_host)
shutdown_host['response_body']['properties']['power_action'] = {
'enum': ['shutdown']
}
# The 'power_action' attribute of 'reboot_host' API is 'reboot'
reboot_host = copy.deepcopy(startup_host)
reboot_host['response_body']['properties']['power_action'] = {
'enum': ['reboot']
}
update_host = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'host': {'type': 'string'},
'maintenance_mode': {'enum': ['on_maintenance',
'off_maintenance']},
'status': {'enum': ['enabled', 'disabled']}
},
'additionalProperties': False,
'anyOf': [
{'required': ['host', 'status']},
{'required': ['host', 'maintenance_mode']}
]
}
}
| # Copyright 2014 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
list_hosts = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'hosts': {
'type': 'array',
'items': {
'type': 'object',
'properties': {
'host_name': {'type': 'string'},
'service': {'type': 'string'},
'zone': {'type': 'string'}
},
'additionalProperties': False,
'required': ['host_name', 'service', 'zone']
}
}
},
'additionalProperties': False,
'required': ['hosts']
}
}
get_host_detail = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'host': {
'type': 'array',
'item': {
'type': 'object',
'properties': {
'resource': {
'type': 'object',
'properties': {
'cpu': {'type': 'integer'},
'disk_gb': {'type': 'integer'},
'host': {'type': 'string'},
'memory_mb': {'type': 'integer'},
'project': {'type': 'string'}
},
'additionalProperties': False,
'required': ['cpu', 'disk_gb', 'host',
'memory_mb', 'project']
}
},
'additionalProperties': False,
'required': ['resource']
}
}
},
'additionalProperties': False,
'required': ['host']
}
}
startup_host = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'host': {'type': 'string'},
'power_action': {'enum': ['startup']}
},
'additionalProperties': False,
'required': ['host', 'power_action']
}
}
# The 'power_action' attribute of 'shutdown_host' API is 'shutdown'
shutdown_host = copy.deepcopy(startup_host)
shutdown_host['response_body']['properties']['power_action'] = {
'enum': ['shutdown']
}
# The 'power_action' attribute of 'reboot_host' API is 'reboot'
reboot_host = copy.deepcopy(startup_host)
reboot_host['response_body']['properties']['power_action'] = {
'enum': ['reboot']
}
update_host = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'host': {'type': 'string'},
'maintenance_mode': {'enum': ['on_maintenance',
'off_maintenance']},
'status': {'enum': ['enabled', 'disabled']}
},
'additionalProperties': False,
'required': ['host', 'maintenance_mode', 'status']
}
}
| Python | 0.000269 |
1e28b43c8a02bd3e506fbd33012b4dcd7b193433 | Fix string-bools not translated | compose-v2/galaxy-configurator/customize.py | compose-v2/galaxy-configurator/customize.py | import os
def j2_environment_params():
""" Extra parameters for the Jinja2 Environment
Add AnsibleCoreFiltersExtension for filters known in Ansible
like `to_nice_yaml`
"""
return dict(
extensions=('jinja2_ansible_filters.AnsibleCoreFiltersExtension',),
)
def alter_context(context):
"""
Translates env variables that start with a specific prefix
and combines them into one dict (like all GALAXY_CONFIG_*
are stored at galaxy.*).
Variables that are stored in an input file overwrite
the input from env.
TODO: Unit test
"""
new_context = dict(os.environ)
translations = {
"GALAXY_CONFIG_": "galaxy",
"GALAXY_UWSGI_CONFIG_": "galaxy_uwsgi",
"GALAXY_JOB_METRICS_": "galaxy_job_metrics",
"NGINX_CONFIG_": "nginx",
"SLURM_CONFIG_": "slurm",
"HTCONDOR_GALAXY_": "htcondor_galaxy",
"HTCONDOR_MASTER_": "htcondor_master",
"HTCONDOR_EXECUTOR_": "htcondor_executor",
"PULSAR_CONFIG_": "pulsar"
}
# Add values from possible input file if existent
if context is not None and len(context) > 0:
new_context.update(context)
# Translate string-boolean to Python boolean
for key, value in new_context.items():
if not isinstance(value, str):
continue
if value.lower() == "true":
new_context[key] = True
elif value.lower() == "false":
new_context[key] = False
for to in translations.values():
if to not in new_context:
new_context[to] = {}
for key, value in new_context.items():
for frm, to in translations.items():
if key.startswith(frm):
# Format key depending on it being uppercase or not
# (to cope with different formatings: compare Slurm
# with Galaxy)
key = key[len(frm):]
if key.isupper():
key = key.lower()
new_context[to][key] = value
context = new_context
# Set HOST_EXPORT_DIR depending on EXPORT_DIR being absolute or relative
if "HOST_EXPORT_DIR" not in context and "EXPORT_DIR" in context \
and "HOST_PWD" in context:
if context["EXPORT_DIR"].startswith("./"):
context["HOST_EXPORT_DIR"] = context["HOST_PWD"] \
+ context["EXPORT_DIR"][1:]
else:
context["HOST_EXPORT_DIR"] = context["EXPORT_DIR"]
return context
| import os
def j2_environment_params():
""" Extra parameters for the Jinja2 Environment
Add AnsibleCoreFiltersExtension for filters known in Ansible
like `to_nice_yaml`
"""
return dict(
extensions=('jinja2_ansible_filters.AnsibleCoreFiltersExtension',),
)
def alter_context(context):
"""
Translates env variables that start with a specific prefix
and combines them into one dict (like all GALAXY_CONFIG_*
are stored at galaxy.*).
Variables that are stored in an input file overwrite
the input from env.
TODO: Unit test
"""
new_context = dict(os.environ)
translations = {
"GALAXY_CONFIG_": "galaxy",
"GALAXY_UWSGI_CONFIG_": "galaxy_uwsgi",
"GALAXY_JOB_METRICS_": "galaxy_job_metrics",
"NGINX_CONFIG_": "nginx",
"SLURM_CONFIG_": "slurm",
"HTCONDOR_GALAXY_": "htcondor_galaxy",
"HTCONDOR_MASTER_": "htcondor_master",
"HTCONDOR_EXECUTOR_": "htcondor_executor",
"PULSAR_CONFIG_": "pulsar"
}
# Add values from possible input file if existent
if context is not None and len(context) > 0:
new_context.update(context)
# Translate string-boolean to Python boolean
for key, value in new_context.items():
if not isinstance(value, str):
continue
if value.lower() == "true":
new_context[key] = True
elif value.lower() == "false":
new_context[key] = False
for to in translations.values():
if to not in new_context:
new_context[to] = {}
for key, value in os.environ.items():
for frm, to in translations.items():
if key.startswith(frm):
# Format key depending on it being uppercase or not
# (to cope with different formatings: compare Slurm
# with Galaxy)
key = key[len(frm):]
if key.isupper():
key = key.lower()
new_context[to][key] = value
context = new_context
# Set HOST_EXPORT_DIR depending on EXPORT_DIR being absolute or relative
if "HOST_EXPORT_DIR" not in context and "EXPORT_DIR" in context \
and "HOST_PWD" in context:
if context["EXPORT_DIR"].startswith("./"):
context["HOST_EXPORT_DIR"] = context["HOST_PWD"] \
+ context["EXPORT_DIR"][1:]
else:
context["HOST_EXPORT_DIR"] = context["EXPORT_DIR"]
return context
| Python | 0.000001 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.