commit stringlengths 40 40 | subject stringlengths 1 1.49k | old_file stringlengths 4 311 | new_file stringlengths 4 311 | new_contents stringlengths 1 29.8k | old_contents stringlengths 0 9.9k | lang stringclasses 3 values | proba float64 0 1 |
|---|---|---|---|---|---|---|---|
0f4e977f18dc1e3b9bbe2f25c3c326ac769fecbd | order size to have thumbnail in first | insight/api/async.py | insight/api/async.py | # -*- coding: utf-8 -*-
"""Async API view"""
from flask import abort, request
from redis import StrictRedis
import json
from insight.api.config import INSIGHT_ENGINES
try:
import settings
except ImportError:
settings = None
REDIS_QUEUE_KEY = getattr(settings, 'REDIS_QUEUE_KEY', 'insight')
REDIS_HOST = getattr(settings, 'REDIS_HOST', 'localhost')
REDIS_PORT = getattr(settings, 'REDIS_PORT', 6379)
REDIS_DB = getattr(settings, 'REDIS_PORT', 0)
redis = StrictRedis(host=REDIS_HOST, port=REDIS_PORT, db=REDIS_DB)
def view(engine):
"""Get job parameters and add it to a redis queue"""
params = {'url': request.args.get('url', None),
'engine': engine,
'callback': request.args.get('callback', None)}
# Get URL
if params['url']:
if params['url'].startswith('/'):
params['url'] = '%s%s' % (request.host_url, url[1:])
else:
abort(404)
# Get Engine
if engine not in INSIGHT_ENGINES:
abort(400, '%s engine is not installed on this server' % engine)
# Process sizes
widths = [int(x) for x in request.args.getlist('width')]
heights = [int(y) for y in request.args.getlist('height')]
nb_width = len(widths)
nb_height = len(heights)
if nb_width == 0 and nb_height == 0:
abort(400, u'You must set either width or height')
if nb_width == 0:
widths = heights
nb_width = nb_height
if nb_height == 0:
heights = widths
nb_height = nb_width
if nb_width == nb_height:
sizes = zip(widths, heights)
elif nb_width == 1:
if nb_height > 1:
sizes = zip(widths*nb_height, heights)
else:
sizes = zip(widths, heights)
elif nb_height == 1:
if nb_width > 1:
sizes = zip(widths, heights*nb_width)
else:
sizes = zip(widths, heights)
else:
abort(400, u'Number of widths and heights should be the same')
# Max number of pages to compile
try:
params['max_previews'] = int(request.args.get('pages', 20))
except:
params['max_previews'] = 20
params['sizes'] = sorted(sizes)
message = json.dumps(params)
redis.rpush(REDIS_QUEUE_KEY, message)
return "Job added to queue"
| # -*- coding: utf-8 -*-
"""Async API view"""
from flask import abort, request
from redis import StrictRedis
import json
from insight.api.config import INSIGHT_ENGINES
try:
import settings
except ImportError:
settings = None
REDIS_QUEUE_KEY = getattr(settings, 'REDIS_QUEUE_KEY', 'insight')
REDIS_HOST = getattr(settings, 'REDIS_HOST', 'localhost')
REDIS_PORT = getattr(settings, 'REDIS_PORT', 6379)
REDIS_DB = getattr(settings, 'REDIS_PORT', 0)
redis = StrictRedis(host=REDIS_HOST, port=REDIS_PORT, db=REDIS_DB)
def view(engine):
"""Get job parameters and add it to a redis queue"""
params = {'url': request.args.get('url', None),
'engine': engine,
'callback': request.args.get('callback', None)}
# Get URL
if params['url']:
if params['url'].startswith('/'):
params['url'] = '%s%s' % (request.host_url, url[1:])
else:
abort(404)
# Get Engine
if engine not in INSIGHT_ENGINES:
abort(400, '%s engine is not installed on this server' % engine)
# Process sizes
widths = [int(x) for x in request.args.getlist('width')]
heights = [int(y) for y in request.args.getlist('height')]
nb_width = len(widths)
nb_height = len(heights)
if nb_width == 0 and nb_height == 0:
abort(400, u'You must set either width or height')
if nb_width == 0:
widths = heights
nb_width = nb_height
if nb_height == 0:
heights = widths
nb_height = nb_width
if nb_width == nb_height:
sizes = zip(widths, heights)
elif nb_width == 1:
if nb_height > 1:
sizes = zip(widths*nb_height, heights)
else:
sizes = zip(widths, heights)
elif nb_height == 1:
if nb_width > 1:
sizes = zip(widths, heights*nb_width)
else:
sizes = zip(widths, heights)
else:
abort(400, u'Number of widths and heights should be the same')
# Max number of pages to compile
try:
params['max_previews'] = int(request.args.get('pages', 20))
except:
params['max_previews'] = 20
params['sizes'] = sizes
message = json.dumps(params)
redis.rpush(REDIS_QUEUE_KEY, message)
return "Job added to queue"
| Python | 0 |
ea545f205fd495f996b90910857af6e87da14272 | update adapter to use new scheme | courses/adapter.py | courses/adapter.py | from courses.models import Semester, Department, Course
from ccxp.fetch import Browser
def get_browser(browser=None):
if browser is None:
browser = Browser()
print(browser.get_captcha_url())
browser.set_captcha(input('Input captcha from above url: '))
def update_departments(browser=None):
if browser is None:
browser = Browser()
new = update = 0
for department in browser.get_departments():
if Department.objects.filter(abbr=department['abbr']).exists():
dbdep = Department.objects.get(abbr=department['abbr'])
dbdep.name_zh = department['name_zh']
dbdep.name_en = department['name_en']
update += 1
else:
Department.objects.create(**department)
new += 1
print(new, 'departments created,', update, 'updated.')
def update_semesters(browser=None):
if browser is None:
browser = Browser()
new = update = 0
for semester in browser.get_semesters():
if Semester.objects.filter(value=semester['value']).exists():
dbsem = Semester.objects.get(value=semester.pop('value'))
for key, value in semester.items():
setattr(dbsem, key, value)
update += 1
else:
Semester.objects.create(**semester)
new += 1
print(new, 'semesters created,', update, 'updated.')
def update_semester(browser=None, semester_code=None):
browser = get_browser(browser)
update_departments(browser)
update_semesters(browser)
if semester_code is not None:
browser.set_semester(semester_code)
browser_semester = browser.get_current_semester()
print(browser_semester)
semester = Semester.objects.get(value=semester_code)
departments = dict()
courses = dict()
for department in Department.objects.all():
cbd = browser.get_courses_by_department(department.abbr)
departments[department.abbr] = [c['no'] for c in cbd]
courses.update((c['no'], c) for c in cbd)
print(
'Collecting courses from',
format(department.abbr, '4'),
'...',
len(courses),
end='\r')
print()
semester_entry = semester.semesterentry_set.create()
try:
for n, course in enumerate(courses.values()):
semester_entry.course_set.create(**course)
print('Updating courses', '...', n, end='\r')
print()
for n, (department, course_nos) in enumerate(departments.items()):
courses = semester_entry.course_set.filter(no__in=course_nos)
ThroughModel = Course.departments.through
ThroughModel.objects.bulk_create(
ThroughModel(
department=Department.objects.get(abbr=department),
course=course,
)
for course in courses
)
print('Updating department data', '...', n, end='\r')
print()
semester_entry.ready = True
semester_entry.save()
except:
semester_entry.delete()
raise
else:
semester.semesterentry_set.filter(
semester=semester).exclude(
pk=semester_entry.pk).delete()
return browser
| from courses.models import Semester, Department, Course
from ccxp.fetch import Browser
def update_departments(browser=None):
if browser is None:
browser = Browser()
new = update = 0
for department in Browser().get_departments():
if Department.objects.filter(abbr=department['abbr']).exists():
dbdep = Department.objects.get(abbr=department['abbr'])
dbdep.name_zh = department['name_zh']
dbdep.name_en = department['name_en']
update += 1
else:
Department.objects.create(**department)
new += 1
print(new, 'departments created,', update, 'updated.')
return browser
def update_semester(browser=None, semester_code=None):
if browser is None:
browser = Browser()
print(browser.get_captcha_url())
browser.set_captcha(input('Input captcha from above url: '))
if semester_code is not None:
browser.set_semester(semester_code)
browser_semester = browser.get_current_semester()
print(browser_semester)
departments = dict()
courses = dict()
for department in Department.objects.all():
cbd = browser.get_courses_by_department(department.abbr)
departments[department.abbr] = [c['no'] for c in cbd]
courses.update((c['no'], c) for c in cbd)
print(
'Collecting courses from',
format(department.abbr, '4'),
'...',
len(courses),
end='\r')
print()
semester = Semester.objects.create(**browser_semester)
try:
for n, course in enumerate(courses.values()):
semester.course_set.create(**course)
print('Updating courses', '...', n, end='\r')
print()
for n, (department, course_nos) in enumerate(departments.items()):
courses = semester.course_set.filter(no__in=course_nos)
ThroughModel = Course.departments.through
ThroughModel.objects.bulk_create(
ThroughModel(
department=Department.objects.get(abbr=department),
course=course,
)
for course in courses
)
print('Updating department data', '...', n, end='\r')
print()
semester.ready = True
semester.save()
except:
semester.delete()
raise
else:
Semester.objects.filter(
value=semester.value).exclude(
pk=semester.pk).delete()
return browser
| Python | 0 |
0116f38160c03939306470127f0489c98aeee954 | Update nanomsg build file | shipyard/shipyard/nanomsg/build.py | shipyard/shipyard/nanomsg/build.py | """Build nanomsg from source."""
from foreman import define_parameter, define_rule, decorate_rule
from shipyard import (
ensure_directory,
git_clone,
run_commands,
install_packages,
copy_libraries,
)
(define_parameter('deps')
.with_doc("""Build-time Debian packages.""")
.with_type(list)
.with_parse(lambda pkgs: pkgs.split(','))
.with_default([
'build-essential',
'cmake',
])
)
(define_parameter('repo')
.with_doc("""Location of source repo.""")
.with_type(str)
.with_default('https://github.com/nanomsg/nanomsg.git')
)
(define_parameter('version')
.with_doc("""Version to build.""")
.with_type(str)
.with_default('1.0.0')
)
@decorate_rule('//base:build')
def build(parameters):
"""Build nanomsg from source."""
install_packages(parameters['deps'])
build_src = parameters['//base:build_src'] / 'nanomsg'
git_clone(parameters['repo'], build_src, parameters['version'])
build_dir = build_src / 'build'
if not ensure_directory(build_dir):
# Don't run `ctest .` at the moment.
run_commands(path=build_dir, commands_str='''
cmake ..
cmake --build .
sudo cmake --build . --target install
sudo ldconfig
''')
(define_rule('tapeout')
.with_doc("""Copy build artifacts.""")
.with_build(
lambda ps: copy_libraries(ps, '/usr/local/lib', ['libnanomsg']))
.depend('build')
.reverse_depend('//base:tapeout')
)
| """Build nanomsg from source."""
from foreman import define_parameter, define_rule, decorate_rule
from shipyard import (
ensure_directory,
git_clone,
run_commands,
install_packages,
copy_libraries,
)
(define_parameter('deps')
.with_doc("""Build-time Debian packages.""")
.with_type(list)
.with_parse(lambda pkgs: pkgs.split(','))
.with_default([
'build-essential',
'cmake',
])
)
(define_parameter('repo')
.with_doc("""Location of source repo.""")
.with_type(str)
.with_default('https://github.com/nanomsg/nanomsg.git')
)
(define_parameter('version')
.with_doc("""Version to build.""")
.with_type(str)
.with_default('1.0.0')
)
@decorate_rule('//base:build')
def build(parameters):
"""Build nanomsg from source."""
install_packages(parameters['deps'])
build_src = parameters['//base:build_src'] / 'nanomsg'
git_clone(parameters['repo'], build_src, parameters['version'])
build_dir = build_src / 'build'
if not ensure_directory(build_dir):
# Don't run `ctest .` at the moment.
run_commands(path=build_dir, commands_str='''
cmake ..
cmake --build .
sudo make install
''')
(define_rule('tapeout')
.with_doc("""Copy build artifacts.""")
.with_build(
lambda ps: copy_libraries(ps, '/usr/local/lib', ['libnanomsg']))
.depend('build')
.reverse_depend('//base:tapeout')
)
| Python | 0 |
fe3798cf932880b2eac14e86d2652d08fdcbd093 | Make method static to make it easier to move later. | src/tdl/client.py | src/tdl/client.py | __author__ = 'tdpreece'
__author__ = 'tdpreece'
import logging
import time
import json
from collections import OrderedDict
import stomp
logger = logging.getLogger('tdl.client')
logger.addHandler(logging.NullHandler())
class Client(object):
def __init__(self, hostname, port, username):
self.hostname = hostname
self.port = port
def go_live_with(self, implementation_map):
hosts = [(self.hostname, self.port)]
try:
conn = stomp.Connection(host_and_ports=hosts)
conn.start()
listener = MyListener(conn, implementation_map)
conn.connect(wait=True)
remote_broker = RemoteBroker(conn)
remote_broker.subscribe(listener)
time.sleep(1)
conn.disconnect()
except Exception as e:
logger.exception('Problem communicating with the broker.')
def trial_run_with(self, implementation_map):
hosts = [(self.hostname, self.port)]
conn = stomp.Connection(host_and_ports=hosts)
conn.start()
conn.connect(wait=True)
listener = PeekListener(conn, implementation_map)
remote_broker = RemoteBroker(conn)
remote_broker.subscribe(listener)
time.sleep(1)
conn.disconnect()
class Listener(stomp.ConnectionListener):
def __init__(self, conn, implementation_map):
self.conn = conn
self.remote_broker = RemoteBroker(self.conn)
self.implementation_map = implementation_map
def on_message(self, headers, message):
self.process_next_message_from(self.implementation_map, self.remote_broker, headers, message)
@staticmethod
def respond_to(implementation_map, message):
decoded_message = json.loads(message)
method = decoded_message['method']
params = decoded_message['params']
id = decoded_message['id']
implementation = implementation_map[method]
try:
result = implementation(params)
except Exception as e:
logger.info('The user implementation has thrown an exception: {}'.format(e.message))
result = None
params_str = ", ".join([str(p) for p in params])
print('id = {id}, req = {method}({params}), resp = {result}'.format(id=id, method=method, params=params_str,
result=result))
if result is not None:
response = OrderedDict([
('result', result),
('error', None),
('id', id),
])
return response
class MyListener(Listener):
@staticmethod
def process_next_message_from(implementation_map, remote_broker, headers, message):
response = Listener.respond_to(implementation_map, message)
if response is not None:
remote_broker.acknowledge(headers)
remote_broker.publish(response)
class PeekListener(Listener):
@staticmethod
def process_next_message_from(implementation_map, remote_broker, headers, message):
Listener.respond_to(implementation_map, message)
class RemoteBroker(object):
def __init__(self, conn):
self.conn = conn
def acknowledge(self, headers):
self.conn.ack(headers['message-id'], headers['subscription'])
def publish(self, response):
self.conn.send(
body=json.dumps(response, separators=(',', ':')),
destination='test.resp'
)
def subscribe(self, listener):
self.conn.set_listener('listener', listener)
self.conn.subscribe(destination='test.req', id=1, ack='client-individual')
| __author__ = 'tdpreece'
__author__ = 'tdpreece'
import logging
import time
import json
from collections import OrderedDict
import stomp
logger = logging.getLogger('tdl.client')
logger.addHandler(logging.NullHandler())
class Client(object):
def __init__(self, hostname, port, username):
self.hostname = hostname
self.port = port
def go_live_with(self, implementation_map):
hosts = [(self.hostname, self.port)]
try:
conn = stomp.Connection(host_and_ports=hosts)
conn.start()
listener = MyListener(conn, implementation_map)
conn.connect(wait=True)
remote_broker = RemoteBroker(conn)
remote_broker.subscribe(listener)
time.sleep(1)
conn.disconnect()
except Exception as e:
logger.exception('Problem communicating with the broker.')
def trial_run_with(self, implementation_map):
hosts = [(self.hostname, self.port)]
conn = stomp.Connection(host_and_ports=hosts)
conn.start()
conn.connect(wait=True)
listener = PeekListener(conn, implementation_map)
remote_broker = RemoteBroker(conn)
remote_broker.subscribe(listener)
time.sleep(1)
conn.disconnect()
class Listener(stomp.ConnectionListener):
def __init__(self, conn, implementation_map):
self.conn = conn
self.remote_broker = RemoteBroker(self.conn)
self.implementation_map = implementation_map
def on_message(self, headers, message):
self.process_next_message_from(headers, message)
@staticmethod
def respond_to(implementation_map, message):
decoded_message = json.loads(message)
method = decoded_message['method']
params = decoded_message['params']
id = decoded_message['id']
implementation = implementation_map[method]
try:
result = implementation(params)
except Exception as e:
logger.info('The user implementation has thrown an exception: {}'.format(e.message))
result = None
params_str = ", ".join([str(p) for p in params])
print('id = {id}, req = {method}({params}), resp = {result}'.format(id=id, method=method, params=params_str,
result=result))
if result is not None:
response = OrderedDict([
('result', result),
('error', None),
('id', id),
])
return response
class MyListener(Listener):
def process_next_message_from(self, headers, message):
response = self.respond_to(self.implementation_map, message)
if response is not None:
self.remote_broker.acknowledge(headers)
self.remote_broker.publish(response)
class PeekListener(Listener):
def process_next_message_from(self, headers, message):
self.respond_to(self.implementation_map, message)
class RemoteBroker(object):
def __init__(self, conn):
self.conn = conn
def acknowledge(self, headers):
self.conn.ack(headers['message-id'], headers['subscription'])
def publish(self, response):
self.conn.send(
body=json.dumps(response, separators=(',', ':')),
destination='test.resp'
)
def subscribe(self, listener):
self.conn.set_listener('listener', listener)
self.conn.subscribe(destination='test.req', id=1, ack='client-individual')
| Python | 0 |
22f3b74fec790847c3e353aad84b51252637a90f | Revert "oe.path.relative: switch to a different appraoch" | lib/oe/path.py | lib/oe/path.py | def join(*paths):
"""Like os.path.join but doesn't treat absolute RHS specially"""
import os.path
return os.path.normpath("/".join(paths))
def relative(src, dest):
""" Return a relative path from src to dest.
>>> relative("/usr/bin", "/tmp/foo/bar")
../../tmp/foo/bar
>>> relative("/usr/bin", "/usr/lib")
../lib
>>> relative("/tmp", "/tmp/foo/bar")
foo/bar
"""
import os.path
if hasattr(os.path, "relpath"):
return os.path.relpath(dest, src)
else:
destlist = os.path.normpath(dest).split(os.path.sep)
srclist = os.path.normpath(src).split(os.path.sep)
# Find common section of the path
common = os.path.commonprefix([destlist, srclist])
commonlen = len(common)
# Climb back to the point where they differentiate
relpath = [ pardir ] * (len(srclist) - commonlen)
if commonlen < len(destlist):
# Add remaining portion
relpath += destlist[commonlen:]
return sep.join(relpath)
def format_display(path, metadata):
""" Prepare a path for display to the user. """
rel = relative(metadata.getVar("TOPDIR", 1), path)
if len(rel) > len(path):
return path
else:
return rel
| def join(*paths):
"""Like os.path.join but doesn't treat absolute RHS specially"""
from os import sep
from os.path import normpath
return normpath(sep.join(paths))
def relative(src, dest=None):
""" Return a relative path from src to dest(default=cwd).
>>> relative("/usr/bin", "/tmp/foo/bar")
../../tmp/foo/bar
>>> relative("/usr/bin", "/usr/lib")
../lib
>>> relative("/tmp", "/tmp/foo/bar")
foo/bar
"""
if dest is None:
dest = getcwd()
if hasattr(os.path, "relpath"):
return os.path.relpath(dest, src)
else:
from os import getcwd, sep
from os.path import abspath, normpath
srclist = abspath(src).split(sep)
destlist = abspath(dest).split(sep)
loc = [spath == dpath for spath, dpath in zip(srclist, destlist)].index(False)
rellist = ([ ".." ] * (len(srclist) - loc)) + destlist[loc:]
return sep.join(rellist)
def format_display(path, metadata):
""" Prepare a path for display to the user. """
rel = relative(metadata.getVar("TOPDIR", 1), path)
if len(rel) > len(path):
return path
else:
return rel
| Python | 0 |
0f004830bd220ad8da1d4b151897630431d2f195 | tweak scoring functions, always | cryptools/crack.py | cryptools/crack.py | # -*- coding: utf-8 -*-
import math
import string
from stringutils import convert, freq
def brute_xor(cyphertext, st_freqs):
"""Bruteforce a given single-character XOR-encrypted cyphertext.
Statistical information is used to choose which character is the most
likely key.
:param cyphertext: the cyphertext to crack
:param st_freqs: a Counter of standard frequencies in the target language
:return: ``(key, message, score)``
"""
# standard frequency counts
st_keys = st_freqs.keys()
st_len = len(st_keys)
# store a map of each candidate and a simple frequency score
topchoice = None
lowdist = float('inf')
key = None
# bruteforce for each character
for test in (string.letters + string.digits):
dec = convert.xor(test, cyphertext)
cand_freqs = freq.get_freqs(freq.char_count(dec.lower()))
cand_keys = cand_freqs.keys()
score = 0.0
for c in cand_freqs:
# scoring
try:
st_in = st_keys.index(c)
except ValueError:
st_in = st_len
# find better scoring functions
score += abs(cand_keys.index(c) - st_in)
score += abs(st_freqs[c] - cand_freqs[c]) * 100
if lowdist > score:
lowdist = score
topchoice = dec
key = test
return key, topchoice, lowdist
| # -*- coding: utf-8 -*-
import string
from stringutils import convert, freq
def brute_xor(cyphertext, st_freqs):
"""Bruteforce a given single-character XOR-encrypted cyphertext.
Statistical information is used to choose which character is the most
likely key.
:param cyphertext: the cyphertext to crack
:param st_freqs: a Counter of standard frequencies in the target language
:return: ``(key, message, distance)``
"""
# standard frequency counts
st_keys = st_freqs.keys()
st_len = len(st_keys)
# store a map of each candidate and a simple frequency score
topchoice = None
lowdist = float('inf')
key = None
# bruteforce for each character
for test in (string.letters + string.digits):
dec = convert.xor(test, cyphertext)
cand_freqs = freq.get_freqs(freq.char_count(dec.lower()))
cand_keys = cand_freqs.keys()
distance = 0.0
for c in cand_freqs:
# use two classifiers, based on pos'n in std freq list & freq dist
try:
st_in = st_keys.index(c)
except ValueError:
st_in = st_len
distance += abs(cand_keys.index(c) - st_in)
distance += abs(st_freqs[c] - cand_freqs[c]) * 100
if lowdist > distance:
lowdist = distance
topchoice = dec
key = test
return key, topchoice, lowdist
| Python | 0 |
d48099080cedc81e70f79cbf45514cd77c5329eb | fix recorder bug | uliweb/contrib/recorder/middle_recorder.py | uliweb/contrib/recorder/middle_recorder.py | from uliweb import Middleware
from uliweb.utils.common import request_url
class RecorderrMiddle(Middleware):
ORDER = 600
def process_response(self, request, response):
from uliweb import settings, functions, json_dumps
import base64
#if not debug status it'll quit
if not settings.get_var('GLOBAL/DEBUG'):
return response
S = functions.get_model('uliwebrecorderstatus')
s = S.all().one()
if not s or s.status == 'E':
return response
if settings.get_var('ULIWEBRECORDER/response_text'):
text = response.data
else:
text = ''
#test if post_data need to convert base64
if not request.content_type:
post_data_is_text = True
else:
post_data_is_text = self.test_text(request.content_type)
if not post_data_is_text:
post_data = base64.encodestring(request.data)
else:
post_data = json_dumps(request.POST.to_dict())
#test if response.data need to convert base64
response_data_is_text = self.test_text(response.content_type)
if not response_data_is_text:
response_data = base64.encodestring(text)
else:
response_data = text
R = functions.get_model('uliwebrecorder')
if request.user:
user_id = request.user.id
else:
user_id = None
recorder = R(method=request.method,
url=request_url(request),
post_data_is_text=post_data_is_text,
post_data=post_data, user=user_id,
response_data=response_data,
response_data_is_text=response_data_is_text,
status_code=response.status_code,
)
recorder.save()
return response
def test_text(self, content_type):
from uliweb.utils.common import match
from uliweb import settings
m = content_type.split(';', 1)[0]
r = match(m, settings.get_var('ULIWEBRECORDER/text_content_types'))
return r | from uliweb import Middleware
from uliweb.utils.common import request_url
class RecorderrMiddle(Middleware):
ORDER = 600
def process_response(self, request, response):
from uliweb import settings, functions, json_dumps
import base64
#if not debug status it'll quit
if not settings.get_var('GLOBAL/DEBUG'):
return response
S = functions.get_model('uliwebrecorderstatus')
s = S.all().one()
if s and s.status == 'E':
return response
if settings.get_var('ULIWEBRECORDER/response_text'):
text = response.data
else:
text = ''
#test if post_data need to convert base64
if not request.content_type:
post_data_is_text = True
else:
post_data_is_text = self.test_text(request.content_type)
if not post_data_is_text:
post_data = base64.encodestring(request.data)
else:
post_data = json_dumps(request.POST.to_dict())
#test if response.data need to convert base64
response_data_is_text = self.test_text(response.content_type)
if not response_data_is_text:
response_data = base64.encodestring(text)
else:
response_data = text
R = functions.get_model('uliwebrecorder')
if request.user:
user_id = request.user.id
else:
user_id = None
recorder = R(method=request.method,
url=request_url(request),
post_data_is_text=post_data_is_text,
post_data=post_data, user=user_id,
response_data=response_data,
response_data_is_text=response_data_is_text,
status_code=response.status_code,
)
recorder.save()
return response
def test_text(self, content_type):
from uliweb.utils.common import match
from uliweb import settings
m = content_type.split(';', 1)[0]
r = match(m, settings.get_var('ULIWEBRECORDER/text_content_types'))
return r | Python | 0.000001 |
c6a9fcfe817128d3e7b0f52625bcd2e6c1c92f76 | fix #4491: auth1 test needs sapi for login (#4492) | tests/auth1_test.py | tests/auth1_test.py | # -*- coding: utf-8 -*-
u"""Test sirepo.auth
:copyright: Copyright (c) 2019 RadiaSoft LLC. All Rights Reserved.
:license: http://www.apache.org/licenses/LICENSE-2.0.html
"""
from __future__ import absolute_import, division, print_function
import pytest
from pykern import pkcollections
from sirepo import srunit
@srunit.wrap_in_request(sim_types='myapp', want_user=False)
def test_login():
from pykern import pkunit, pkcompat
from pykern.pkunit import pkeq, pkok, pkre, pkfail, pkexcept
from sirepo import auth
import flask
import sirepo.api
import sirepo.auth.guest
import sirepo.cookie
import sirepo.http_request
import sirepo.uri_router
import sirepo.util
r = sirepo.uri_router.call_api('authState')
pkre('LoggedIn": false.*Registration": false', pkcompat.from_bytes(r.data))
auth.process_request()
with pkunit.pkexcept('SRException.*routeName=login'):
auth.logged_in_user()
with pkexcept('SRException.*routeName=login'):
auth.require_user()
sirepo.cookie.set_sentinel()
# copying examples for new user takes time
try:
# TODO(rorour): get sapi from current request
r = auth.login(sirepo.auth.guest, sim_type='myapp', sapi=sirepo.api.Base())
pkfail('expecting sirepo.util.Response')
except sirepo.util.Response as e:
r = e.sr_args.response
pkre(r'LoggedIn":\s*true.*Registration":\s*false', pkcompat.from_bytes(r.data))
u = auth.logged_in_user()
pkok(u, 'user should exist')
# guests do not require completeRegistration
auth.require_user()
| # -*- coding: utf-8 -*-
u"""Test sirepo.auth
:copyright: Copyright (c) 2019 RadiaSoft LLC. All Rights Reserved.
:license: http://www.apache.org/licenses/LICENSE-2.0.html
"""
from __future__ import absolute_import, division, print_function
import pytest
from pykern import pkcollections
from sirepo import srunit
@srunit.wrap_in_request(sim_types='myapp', want_user=False)
def test_login():
from pykern import pkunit, pkcompat
from pykern.pkunit import pkeq, pkok, pkre, pkfail, pkexcept
from sirepo import auth
import flask
import sirepo.auth.guest
import sirepo.cookie
import sirepo.http_request
import sirepo.uri_router
import sirepo.util
r = sirepo.uri_router.call_api('authState')
pkre('LoggedIn": false.*Registration": false', pkcompat.from_bytes(r.data))
auth.process_request()
with pkunit.pkexcept('SRException.*routeName=login'):
auth.logged_in_user()
with pkexcept('SRException.*routeName=login'):
auth.require_user()
sirepo.cookie.set_sentinel()
# copying examples for new user takes time
try:
r = auth.login(sirepo.auth.guest, sim_type='myapp')
pkfail('expecting sirepo.util.Response')
except sirepo.util.Response as e:
r = e.sr_args.response
pkre(r'LoggedIn":\s*true.*Registration":\s*false', pkcompat.from_bytes(r.data))
u = auth.logged_in_user()
pkok(u, 'user should exist')
# guests do not require completeRegistration
auth.require_user()
| Python | 0 |
646db72eca34f6006d189f0a143d0c00388d1955 | Update viehicle.py | sketches/ev_steering_1/viehicle.py | sketches/ev_steering_1/viehicle.py | class Viehicle():
def __init__(self, x, y):
self.acceleration = PVector(0, 0)
self.velocity = PVector(0, 0)
self.location = PVector(x, y)
self.r = 8.0
self.maxspeed = 5
self.maxforce = 0.1
self.d = 25
def update(self):
self.velocity.add(self.acceleration)
self.velocity.limit(self.maxspeed)
self.location.add(self.velocity)
self.acceleration.mult(0)
def applyForce(self, force):
self.acceleration.add(force)
def seek(self, target):
desired = PVector.sub(target, self.location)
# Check Boundaries
if self.location.x < self.d:
desired = PVector(self.maxspeed, self.velocity.y)
elif self.location.x > width - self.d:
desired = PVector(-self.maxspeed, self.velocity.y)
if self.location.y < self.d:
desired = PVector(self.velocity.x, self.maxspeed)
elif self.location.y > height - self.d:
desired = PVector(self.velocity.x, -self.maxspeed)
desired.normalize()
desired.mult(self.maxspeed)
steer = PVector.sub(desired, self.velocity)
steer.limit(self.maxforce)
self.applyForce(steer)
def display(self):
theta = self.velocity.heading() + PI/2
fill(color(98, 199, 119))
stroke(1)
strokeWeight(1)
with pushMatrix():
translate(self.location.x, self.location.y)
rotate(theta)
with beginShape():
vertex(0, -self.r*2)
vertex(-self.r, self.r*2)
vertex(self.r, self.r*2)
| class Viehicle():
def __init__(self, x, y):
self.acceleration = PVector(0, 0)
self.velocity = PVector(0, 0)
self.location = PVector(x, y)
self.r = 8.0
self.maxspeed = 5
self.maxforce = 0.1
self.d = 25
def update(self):
self.velocity.add(self.acceleration)
self.velocity.limit(self.maxspeed)
self.location.add(self.velocity)
self.acceleration.mult(0)
def applyForce(self, force):
self.acceleration.add(force)
def seek(self, target):
desired = PVector.sub(target, self.location)
# Check Boundaries
if self.location.x < self.d:
desired = PVector(self.maxspeed, self.velocity.y)
elif self.location.x > width - self.d:
desired = PVector(-self.maxspeed, self.velocity.y)
if self.location.y < self.d:
desired = PVector(self.velocity.x, self.maxspeed)
elif self.location.y > height - self.d:
desired = PVector(self.velocity.x, -self.maxspeed)
desired.normalize()
desired.mult(self.maxspeed)
steer = PVector.sub(desired, self.velocity)
steer.limit(self.maxforce)
self.applyForce(steer)
def display(self):
theta = self.velocity.heading() + PI/2
fill(color(98, 199, 119))
stroke(1)
strokeWeight(1)
with pushMatrix():
translate(self.location.x, self.location.y)
rotate(theta)
with beginShape():
vertex(0, -self.r*2)
vertex(-self.r, self.r*2)
vertex(self.r, self.r*2)
| Python | 0 |
b58c8b4f9d049207b7e7e0e4de7058959df90b70 | Use sendgrid's Subject type when sending email. (#1033) | src/appengine/libs/mail.py | src/appengine/libs/mail.py | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helpers for sending mail."""
from builtins import str
from sendgrid import SendGridAPIClient
from sendgrid.helpers.mail import From
from sendgrid.helpers.mail import HtmlContent
from sendgrid.helpers.mail import Mail
from sendgrid.helpers.mail import Subject
from sendgrid.helpers.mail import To
from config import db_config
from metrics import logs
def send(to_email, subject, html_content):
"""Send email."""
sendgrid_api_key = db_config.get_value('sendgrid_api_key')
if not sendgrid_api_key:
logs.log_warn('Skipping email as SendGrid API key is not set in config.')
return
from_email = db_config.get_value('sendgrid_sender')
if not from_email:
logs.log_warn('Skipping email as SendGrid sender is not set in config.')
return
message = Mail(
from_email=From(str(from_email)),
to_emails=To(str(to_email)),
subject=Subject(subject),
html_content=HtmlContent(str(html_content)))
try:
sg = SendGridAPIClient(sendgrid_api_key)
response = sg.send(message)
logs.log(
'Sent email to %s.' % to_email,
status_code=response.status_code,
body=response.body,
headers=response.headers)
except Exception:
logs.log_error('Failed to send email to %s.' % to_email)
| # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helpers for sending mail."""
from builtins import str
from sendgrid import SendGridAPIClient
from sendgrid.helpers.mail import From
from sendgrid.helpers.mail import HtmlContent
from sendgrid.helpers.mail import Mail
from sendgrid.helpers.mail import To
from config import db_config
from metrics import logs
def send(to_email, subject, html_content):
"""Send email."""
sendgrid_api_key = db_config.get_value('sendgrid_api_key')
if not sendgrid_api_key:
logs.log_warn('Skipping email as SendGrid API key is not set in config.')
return
from_email = db_config.get_value('sendgrid_sender')
if not from_email:
logs.log_warn('Skipping email as SendGrid sender is not set in config.')
return
message = Mail(
from_email=From(str(from_email)),
to_emails=To(str(to_email)),
subject=subject,
html_content=HtmlContent(str(html_content)))
try:
sg = SendGridAPIClient(sendgrid_api_key)
response = sg.send(message)
logs.log(
'Sent email to %s.' % to_email,
status_code=response.status_code,
body=response.body,
headers=response.headers)
except Exception:
logs.log_error('Failed to send email to %s.' % to_email)
| Python | 0 |
425ae0042b050773e7c55f3cdc34ca3a68069238 | use test app | sacrud/pyramid_ext/tests/__init__.py | sacrud/pyramid_ext/tests/__init__.py | # -*- coding: utf-8 -*-
from sqlalchemy.orm import sessionmaker
from sqlalchemy import create_engine, orm
import unittest
from sacrud.tests.test_models import User, Profile, PHOTO_PATH, Base
from sacrud.action import get_relations, delete_fileobj, read, update, delete
from sacrud.action import get_pk, index, create
from pyramid.testing import DummyRequest
from StringIO import StringIO
import glob
import os
from zope.sqlalchemy import ZopeTransactionExtension
import transaction
from pyramid import testing
from pyramid.config import Configurator
from webtest.app import TestApp
from pyramid.url import route_url
class MockCGIFieldStorage(object):
pass
class SacrudTests(unittest.TestCase):
def setUp(self):
request = testing.DummyRequest()
config = testing.setUp(request=request)
config.registry.settings['sqlalchemy.url'] = "sqlite:///:memory:"
config.include('sacrud.pyramid_ext')
settings = config.registry.settings
settings['sacrud_models'] = (User, Profile)
config.scan()
engine = create_engine('sqlite:///:memory:')
DBSession = orm.scoped_session(
orm.sessionmaker(extension=ZopeTransactionExtension()))
DBSession.remove()
DBSession.configure(bind=engine)
session = DBSession
self.session = session
# To create tables, you typically do:
#User.metadata.create_all(engine)
User.metadata.create_all(engine)
Profile.metadata.create_all(engine)
self.app = config.make_wsgi_app()
self.testapp = TestApp(self.app)
def tearDown(self):
def clear_files():
for filename in glob.glob("%s/*.html" % (PHOTO_PATH, )):
os.remove(os.path.join(PHOTO_PATH, filename))
clear_files()
self.session.remove()
testing.tearDown()
def add_user(self):
user = User(u'Vasya', u'Pupkin', u"123")
self.session.add(user)
transaction.commit()
user = self.session.query(User).get(1)
return user
def test_home_view(self):
self.add_user()
request = testing.DummyRequest()
name = route_url('sa_home', request)
response = self.testapp.get(name)
self.failUnlessEqual(response.status, '200 OK')
self.failUnlessEqual("Tables" in response, True)
self.failUnlessEqual("user" in response, True)
self.failUnlessEqual("profile" in response, True)
def test_list_view(self):
request = testing.DummyRequest()
name = route_url('sa_list', request, table="user")
response = self.testapp.get(name)
self.failUnlessEqual(response.status, '200 OK')
def test_add_view(self):
pass
def test_update_view(self):
pass
def test_delete_view(self):
pass | # -*- coding: utf-8 -*-
from sqlalchemy.orm import sessionmaker
from sqlalchemy import create_engine, orm
import unittest
from sacrud.tests.test_models import User, Profile, PHOTO_PATH, Base
from sacrud.action import get_relations, delete_fileobj, read, update, delete
from sacrud.action import get_pk, index, create
from pyramid.testing import DummyRequest
from StringIO import StringIO
import glob
import os
from zope.sqlalchemy import ZopeTransactionExtension
import transaction
from pyramid import testing
from pyramid.config import Configurator
from webtest.app import TestApp
from pyramid.url import route_url
class MockCGIFieldStorage(object):
pass
class SacrudTests(unittest.TestCase):
def setUp(self):
request = testing.DummyRequest()
config = testing.setUp(request=request)
config.registry.settings['sqlalchemy.url'] = "sqlite:///:memory:"
config.include('sacrud.pyramid_ext')
settings = config.registry.settings
settings['sacrud_models'] = (User, Profile)
config.scan()
engine = create_engine('sqlite:///:memory:')
DBSession = orm.scoped_session(
orm.sessionmaker(extension=ZopeTransactionExtension()))
DBSession.remove()
DBSession.configure(bind=engine)
session = DBSession
self.session = session
# To create tables, you typically do:
#User.metadata.create_all(engine)
User.metadata.create_all(engine)
Profile.metadata.create_all(engine)
self.app = config.make_wsgi_app()
self.testapp = TestApp(self.app)
def tearDown(self):
def clear_files():
for filename in glob.glob("%s/*.html" % (PHOTO_PATH, )):
os.remove(os.path.join(PHOTO_PATH, filename))
clear_files()
self.session.remove()
testing.tearDown()
def add_user(self):
user = User(u'Vasya', u'Pupkin', u"123")
self.session.add(user)
transaction.commit()
user = self.session.query(User).get(1)
return user
def test_home_view(self):
self.add_user()
request = testing.DummyRequest()
name = route_url('sa_home', request)
response = self.testapp.get(name)
self.failUnlessEqual(response.status, '200 OK')
self.failUnlessEqual("Tables" in response, True)
self.failUnlessEqual("user" in response, True)
self.failUnlessEqual("profile" in response, True)
def test_list_view(self):
request = testing.DummyRequest()
name = route_url('sa_list', request, table="user")
response = self.app.get(name)
self.failUnlessEqual(response.status, '200 OK')
def test_add_view(self):
pass
def test_update_view(self):
pass
def test_delete_view(self):
pass | Python | 0.000001 |
b393b432c4f24906e1919999402ed56bde49086e | Fix test case - found another trunk tunnel on layer 0. | integration-test/546-road-sort-keys-tunnel.py | integration-test/546-road-sort-keys-tunnel.py | # tunnels at level = 0
#https://www.openstreetmap.org/way/167952621
assert_has_feature(
16, 10475, 25324, "roads",
{"kind": "highway", "kind_detail": "motorway", "id": 167952621,
"name": "Presidio Pkwy.", "is_tunnel": True, "sort_rank": 333})
# http://www.openstreetmap.org/way/259492789
assert_has_feature(
16, 19266, 24635, "roads",
{"kind": "major_road", "kind_detail": "trunk", "id": 259492789,
"name": "McCarter Hwy.", "is_tunnel": True, "sort_rank": 331})
# http://www.openstreetmap.org/way/277441866
assert_has_feature(
16, 17563, 25792, "roads",
{"kind": "major_road", "kind_detail": "trunk", "id": 277441866,
"name": "Gatlinburg Spur Road (north)", "is_tunnel": True, "sort_rank": 331})
#https://www.openstreetmap.org/way/117837633
assert_has_feature(
16, 16808, 24434, "roads",
{"kind": "major_road", "kind_detail": "primary", "id": 117837633,
"name": "Dixie Hwy.", "is_tunnel": True, "sort_rank": 330})
#https://www.openstreetmap.org/way/57782075
assert_has_feature(
16, 16812, 24391, "roads",
{"kind": "major_road", "kind_detail": "secondary", "id": 57782075,
"name": "S Halsted St.", "is_tunnel": True, "sort_rank": 329})
#https://www.openstreetmap.org/way/57708079
assert_has_feature(
16, 16813, 24386, "roads",
{"kind": "major_road", "kind_detail": "tertiary", "id": 57708079,
"name": "W 74th St.", "is_tunnel": True, "sort_rank": 327})
#https://www.openstreetmap.org/way/56393654
assert_has_feature(
16, 16808, 24362, "roads",
{"kind": "minor_road", "kind_detail": "residential", "id": 56393654,
"name": "S Paulina St.", "is_tunnel": True, "sort_rank": 310})
#https://www.openstreetmap.org/way/190835369
assert_has_feature(
16, 16814, 24363, "roads",
{"kind": "minor_road", "kind_detail": "service", "id": 190835369,
"name": "S Wong Pkwy.", "is_tunnel": True, "sort_rank": 308})
| # tunnels at level = 0
#https://www.openstreetmap.org/way/167952621
assert_has_feature(
16, 10475, 25324, "roads",
{"kind": "highway", "kind_detail": "motorway", "id": 167952621,
"name": "Presidio Pkwy.", "is_tunnel": True, "sort_rank": 333})
# http://www.openstreetmap.org/way/259492762
assert_has_feature(
16, 19267, 24634, "roads",
{"kind": "major_road", "kind_detail": "trunk", "id": 259492762,
"name": "Raymond Blvd.", "is_tunnel": True, "sort_rank": 331})
# http://www.openstreetmap.org/way/277441866
assert_has_feature(
16, 17563, 25792, "roads",
{"kind": "major_road", "kind_detail": "trunk", "id": 277441866,
"name": "Gatlinburg Spur Road (north)", "is_tunnel": True, "sort_rank": 331})
#https://www.openstreetmap.org/way/117837633
assert_has_feature(
16, 16808, 24434, "roads",
{"kind": "major_road", "kind_detail": "primary", "id": 117837633,
"name": "Dixie Hwy.", "is_tunnel": True, "sort_rank": 330})
#https://www.openstreetmap.org/way/57782075
assert_has_feature(
16, 16812, 24391, "roads",
{"kind": "major_road", "kind_detail": "secondary", "id": 57782075,
"name": "S Halsted St.", "is_tunnel": True, "sort_rank": 329})
#https://www.openstreetmap.org/way/57708079
assert_has_feature(
16, 16813, 24386, "roads",
{"kind": "major_road", "kind_detail": "tertiary", "id": 57708079,
"name": "W 74th St.", "is_tunnel": True, "sort_rank": 327})
#https://www.openstreetmap.org/way/56393654
assert_has_feature(
16, 16808, 24362, "roads",
{"kind": "minor_road", "kind_detail": "residential", "id": 56393654,
"name": "S Paulina St.", "is_tunnel": True, "sort_rank": 310})
#https://www.openstreetmap.org/way/190835369
assert_has_feature(
16, 16814, 24363, "roads",
{"kind": "minor_road", "kind_detail": "service", "id": 190835369,
"name": "S Wong Pkwy.", "is_tunnel": True, "sort_rank": 308})
| Python | 0 |
c3b92c1de1c8a2b9e0b3e585277186d5e453a06e | Copy the namespace of the root as well, otherwise it gets added to the string elements themselves and this gets messy and ugly | java/graveyard/support/scripts/copy-string.py | java/graveyard/support/scripts/copy-string.py | #!/usr/bin/env python
import os
import os.path
import sys
import lxml.etree
source_path = os.path.expanduser('~/workspace/git/android/packages/apps/Mms')
#source_path = os.path.expanduser('~/workspace/git/android/platform/packages/apps/Mms')
#source_path = os.path.expanduser('~/workspace/git/android/frameworks/base/core/res')
dest_path = os.path.expanduser('~/workspace/git/android-sms-merge/android_sms_merge')
def main():
if len(sys.argv) < 2:
sys.exit('Error: STRING is required')
string_to_copy = sys.argv[1]
source_res_path = os.path.join(source_path, 'res')
dest_res_path = os.path.join(dest_path, 'res')
# This allows lxml to output much nicer looking output
parser = lxml.etree.XMLParser(remove_blank_text=True)
for values_folder in os.listdir(source_res_path):
source_values_path = os.path.join(source_res_path, values_folder)
if (os.path.isdir(source_values_path)
and values_folder.startswith('values')):
source_strings_path = os.path.join(source_values_path, 'strings.xml')
if (os.path.isfile(source_strings_path)):
source_root = lxml.etree.parse(source_strings_path, parser)
for source_element in source_root.iter('string'):
if source_element.get('name') == string_to_copy:
dest_values_path = os.path.join(dest_res_path, values_folder)
# Create the destination values folder if necessary
if not os.path.exists(dest_values_path):
os.mkdir(dest_values_path)
dest_strings_path = os.path.join(dest_values_path, 'strings.xml')
if not os.path.exists(dest_strings_path):
root = lxml.etree.Element(
source_root.getroot().tag,
nsmap=source_root.getroot().nsmap)
root.append(source_element)
dest_root = lxml.etree.ElementTree(root)
else:
dest_root = lxml.etree.parse(dest_strings_path, parser)
# Iterate over the elements in the destination file
it = dest_root.iter('string')
while True:
try:
dest_element = it.next()
# Don't insert duplicate elements
if dest_element.attrib == source_element.attrib:
break
# Insert the new string alphabetically
if string_to_copy < dest_element.get('name'):
dest_element.addprevious(source_element)
# Don't process any more destination elements
break
except StopIteration:
# If we made it this far, add it to the end
dest_element.addnext(source_element)
break
# Write the updated XML file
dest_root.write(
dest_strings_path,
encoding='utf-8',
pretty_print=True,
xml_declaration=True,
)
if __name__ == '__main__':
main()
| #!/usr/bin/env python
import os
import os.path
import sys
import lxml.etree
source_path = os.path.expanduser('~/workspace/git/android/packages/apps/Mms')
#source_path = os.path.expanduser('~/workspace/git/android/platform/packages/apps/Mms')
#source_path = os.path.expanduser('~/workspace/git/android/frameworks/base/core/res')
dest_path = os.path.expanduser('~/workspace/git/android-sms-merge/android_sms_merge')
def main():
if len(sys.argv) < 2:
sys.exit('Error: STRING is required')
string_to_copy = sys.argv[1]
source_res_path = os.path.join(source_path, 'res')
dest_res_path = os.path.join(dest_path, 'res')
# This allows lxml to output much nicer looking output
parser = lxml.etree.XMLParser(remove_blank_text=True)
for values_folder in os.listdir(source_res_path):
source_values_path = os.path.join(source_res_path, values_folder)
if (os.path.isdir(source_values_path)
and values_folder.startswith('values')):
source_strings_path = os.path.join(source_values_path, 'strings.xml')
if (os.path.isfile(source_strings_path)):
source_root = lxml.etree.parse(source_strings_path, parser)
for source_element in source_root.iter('string'):
if source_element.get('name') == string_to_copy:
dest_values_path = os.path.join(dest_res_path, values_folder)
# Create the destination values folder if necessary
if not os.path.exists(dest_values_path):
os.mkdir(dest_values_path)
dest_strings_path = os.path.join(dest_values_path, 'strings.xml')
if not os.path.exists(dest_strings_path):
root = lxml.etree.Element('resources')
root.append(source_element)
dest_root = lxml.etree.ElementTree(root)
else:
dest_root = lxml.etree.parse(dest_strings_path, parser)
# Iterate over the elements in the destination file
it = dest_root.iter('string')
while True:
try:
dest_element = it.next()
# Don't insert duplicate elements
if dest_element.attrib == source_element.attrib:
break
# Insert the new string alphabetically
if string_to_copy < dest_element.get('name'):
dest_element.addprevious(source_element)
# Don't process any more destination elements
break
except StopIteration:
# If we made it this far, add it to the end
dest_element.addnext(source_element)
break
# Write the updated XML file
dest_root.write(
dest_strings_path,
encoding='utf-8',
pretty_print=True,
xml_declaration=True,
)
if __name__ == '__main__':
main()
| Python | 0 |
90c42beafe4dc5168224fd96cf7891695c7cf346 | fix save default values | ini_tools/ini_file.py | ini_tools/ini_file.py | import os
from config_parser import WZConfigParser
from profile_loader import Profile, get_profiles_name_list
from generate_ini_header import get_header
class WZException(Exception):
pass
class IniFile(dict):
profiles = get_profiles_name_list()
def get_profile_for_ini(self):
name = os.path.basename(self.path)[:-4]
if name in self.profiles:
return Profile(name)
# hack for research
elif name[:-5] in self.profiles:
return Profile(name[:-5])
else:
raise WZException("Can't find profile for %s" % self.path)
def __init__(self, path, data_dict=None):
self.path = path
self.name = os.path.basename(path)[:-4]
self.profile = self.get_profile_for_ini()
if data_dict:
self.update(data_dict)
else:
config = WZConfigParser()
config.load(path)
for section_name in config.sections():
self[section_name] = dict(config.items(section_name))
def save(self, filename=None):
if filename is None:
filename = self.path
text_list = [get_header(self.profile)]
for section_name, section_items in self.items():
section_list = ['', '[%s]' % section_name]
for item in sorted(section_items.items(), key=lambda x: self.profile.field_order.index(x[0])):
prepared_value = self.prepare_value(item)
if prepared_value:
section_list.append(prepared_value)
text_list.extend(section_list)
with open(filename, 'w') as fd:
fd.write('\n'.join(text_list))
def prepare_value(self, item):
key, val = item
field = self.profile[key]
if str(field.get('default')) == str(val):
return None
if field['type'] == 'pie':
return "%s = %s" % (key, val.lower())
return "%s = %s" % item
#if __name__ == '__main__':
# ini_file = IniFile("G:/warzone2100/data/base/stats/propulsion.ini")
# with open('tmp.ini', 'w') as fd:
# ini_file.save(fd)
@classmethod
def from_dict(cls, data_dict, dest_file):
return IniFile() | import os
from config_parser import WZConfigParser
from profile_loader import Profile, get_profiles_name_list
from generate_ini_header import get_header
class WZException(Exception):
pass
class IniFile(dict):
profiles = get_profiles_name_list()
def get_profile_for_ini(self):
name = os.path.basename(self.path)[:-4]
if name in self.profiles:
return Profile(name)
# hack for research
elif name[:-5] in self.profiles:
return Profile(name[:-5])
else:
raise WZException("Can't find profile for %s" % self.path)
def __init__(self, path, data_dict=None):
self.path = path
self.name = os.path.basename(path)[:-4]
self.profile = self.get_profile_for_ini()
if data_dict:
self.update(data_dict)
else:
config = WZConfigParser()
config.load(path)
for section_name in config.sections():
self[section_name] = dict(config.items(section_name))
def save(self, filename=None):
if filename is None:
filename = self.path
text_list = [get_header(self.profile)]
for section_name, section_items in self.items():
section_list = ['', '[%s]' % section_name]
for item in sorted(section_items.items(), key=lambda x: self.profile.field_order.index(x[0])):
prepared_value = self.prepare_value(item)
if prepared_value:
section_list.append(prepared_value)
text_list.extend(section_list)
with open(filename, 'w') as fd:
fd.write('\n'.join(text_list))
def prepare_value(self, item):
key, val = item
field = self.profile[key]
if str(field.get('default')) == val:
return None
if field['type'] == 'pie':
return "%s = %s" % (key, val.lower())
return "%s = %s" % item
#if __name__ == '__main__':
# ini_file = IniFile("G:/warzone2100/data/base/stats/propulsion.ini")
# with open('tmp.ini', 'w') as fd:
# ini_file.save(fd)
@classmethod
def from_dict(cls, data_dict, dest_file):
return IniFile() | Python | 0.000001 |
8f898be3d642bb4690e19b7e91ba087fba68dac0 | Fix bug that conditions are ignored other than last [ignore_properties add-on] | jumeaux/addons/judgement/ignore_properties.py | jumeaux/addons/judgement/ignore_properties.py | # -*- coding:utf-8 -*-
"""For example of config
judgement:
- name: jumeaux.addons.judgement.ignore_properties
config:
ignores:
- title: reason
image: https://......png
link: https://......
conditions:
- path: '/route'
changed:
- root['items'][0]
- root['unit']
- path: '/repositories'
added:
- root['items'][\d+]
removed:
- root['items']
"""
import logging
import re
from fn import _
from owlmixin import OwlMixin
from owlmixin.owlcollections import TList
from typing import Optional, List
from jumeaux.addons.judgement import JudgementExecutor
from jumeaux.models import JudgementAddOnPayload, DiffKeys
logger = logging.getLogger(__name__)
class Condition(OwlMixin):
path: Optional[str]
added: TList[str]
removed: TList[str]
changed: TList[str]
def __init__(self, path: Optional[str]=None, added: Optional[List[str]]=None, removed: Optional[List[str]]=None, changed: Optional[List[str]]=None):
self.path = path
self.added = TList(added) if added is not None else TList()
self.removed = TList(removed) if removed is not None else TList()
self.changed = TList(changed) if changed is not None else TList()
class Ignore(OwlMixin):
title: Optional[str]
conditions: TList[Condition]
image: Optional[str]
link: Optional[str]
def __init__(self, title: str, conditions: TList[Condition], image: Optional[str]=None, link: Optional[str]=None):
self.title = title
self.conditions = Condition.from_dicts(conditions)
self.image = image
self.link = link
class Config(OwlMixin):
ignores: TList[Ignore]
def __init__(self, ignores):
self.ignores = Ignore.from_dicts(ignores)
class Executor(JudgementExecutor):
config: Config
def __init__(self, config: dict):
self.config = Config.from_dict(config or {})
def exec(self, payload: JudgementAddOnPayload):
if payload.regard_as_same or payload.diff_keys is None:
return payload
def filter_diff_keys(diff_keys: DiffKeys, condition: Condition) -> DiffKeys:
if condition.path and not re.search(condition.path, payload.path):
return diff_keys
return DiffKeys.from_dict({
"added": diff_keys.added.reject(
lambda dk: condition.added.any(lambda ig: re.search(ig, dk))
),
"removed": diff_keys.removed.reject(
lambda dk: condition.removed.any(lambda ig: re.search(ig, dk))
),
"changed": diff_keys.changed.reject(
lambda dk: condition.changed.any(lambda ig: re.search(ig, dk))
)
})
filtered_diff_keys = self.config.ignores.flat_map(_.conditions).reduce(filter_diff_keys, payload.diff_keys)
return JudgementAddOnPayload.from_dict({
"path": payload.path,
"qs": payload.qs,
"headers": payload.headers,
"res_one": payload.res_one,
"res_other": payload.res_other,
"diff_keys": payload.diff_keys.to_dict(),
"regard_as_same": not (filtered_diff_keys.added or filtered_diff_keys.removed or filtered_diff_keys.changed)
})
| # -*- coding:utf-8 -*-
"""For example of config
judgement:
- name: jumeaux.addons.judgement.ignore_properties
config:
ignores:
- title: reason
image: https://......png
link: https://......
conditions:
- path: '/route'
changed:
- root['items'][0]
- root['unit']
- path: '/repositories'
added:
- root['items'][\d+]
removed:
- root['items']
"""
import logging
import re
from fn import _
from owlmixin import OwlMixin
from owlmixin.owlcollections import TList
from typing import Optional, List
from jumeaux.addons.judgement import JudgementExecutor
from jumeaux.models import JudgementAddOnPayload, DiffKeys
logger = logging.getLogger(__name__)
class Condition(OwlMixin):
path: Optional[str]
added: TList[str]
removed: TList[str]
changed: TList[str]
def __init__(self, path: Optional[str]=None, added: Optional[List[str]]=None, removed: Optional[List[str]]=None, changed: Optional[List[str]]=None):
self.path = path
self.added = TList(added) if added is not None else TList()
self.removed = TList(removed) if removed is not None else TList()
self.changed = TList(changed) if changed is not None else TList()
class Ignore(OwlMixin):
title: Optional[str]
conditions: TList[Condition]
image: Optional[str]
link: Optional[str]
def __init__(self, title: str, conditions: TList[Condition], image: Optional[str]=None, link: Optional[str]=None):
self.title = title
self.conditions = Condition.from_dicts(conditions)
self.image = image
self.link = link
class Config(OwlMixin):
ignores: TList[Ignore]
def __init__(self, ignores):
self.ignores = Ignore.from_dicts(ignores)
class Executor(JudgementExecutor):
config: Config
def __init__(self, config: dict):
self.config = Config.from_dict(config or {})
def exec(self, payload: JudgementAddOnPayload):
if payload.regard_as_same or payload.diff_keys is None:
return payload
def filter_diff_keys(diff_keys: DiffKeys, condition: Condition) -> DiffKeys:
if condition.path and not re.search(condition.path, payload.path):
return diff_keys
return DiffKeys.from_dict({
"added": payload.diff_keys.added.reject(
lambda dk: condition.added.any(lambda ig: re.search(ig, dk))
),
"removed": payload.diff_keys.removed.reject(
lambda dk: condition.removed.any(lambda ig: re.search(ig, dk))
),
"changed": payload.diff_keys.changed.reject(
lambda dk: condition.changed.any(lambda ig: re.search(ig, dk))
)
})
filtered_diff_keys = self.config.ignores.flat_map(_.conditions).reduce(filter_diff_keys, payload.diff_keys)
return JudgementAddOnPayload.from_dict({
"path": payload.path,
"qs": payload.qs,
"headers": payload.headers,
"res_one": payload.res_one,
"res_other": payload.res_other,
"diff_keys": payload.diff_keys.to_dict(),
"regard_as_same": not (filtered_diff_keys.added or filtered_diff_keys.removed or filtered_diff_keys.changed)
})
| Python | 0 |
59bdc15846158db9123a764f87cdb0dd1a959a22 | remove print statements from unit test | test_qudt4dt.py | test_qudt4dt.py | __author__ = 'adam'
#import urllib
#import time
#from subprocess import Popen
#import shlex
#import os
import fusekiutils
import qudt4dt
import unittest
class TestQudt(unittest.TestCase):
def setUp(self,result = None):
self.barb = qudt4dt.Barbara("http://localhost:3030")
def test_get_unit_class(self):
self.assertEqual(self.barb.get_unit_class(u'http://qudt.org/vocab/unit#DegreeCelsius'),
[u'http://qudt.org/schema/qudt#TemperatureUnit'])
def test_get_units_in_class(self):
result = self.barb.get_units_in_class(u'http://qudt.org/schema/qudt#TemperatureUnit')
units = [u'http://qudt.org/vocab/unit#DegreeFahrenheit',
u'http://qudt.org/vocab/unit#Kelvin',
u'http://qudt.org/vocab/unit#PlanckTemperature',
u'http://qudt.org/vocab/unit#DegreeCentigrade',
u'http://qudt.org/vocab/unit#DegreeCelsius',
u'http://qudt.org/vocab/unit#DegreeRankine']
self.assertItemsEqual(result, units)
def test_get_units_in_same_class(self):
result = self.barb.get_units_in_same_class(u'http://qudt.org/vocab/unit#DegreeCelsius')
units = [u'http://qudt.org/vocab/unit#DegreeFahrenheit',
u'http://qudt.org/vocab/unit#Kelvin',
u'http://qudt.org/vocab/unit#PlanckTemperature',
u'http://qudt.org/vocab/unit#DegreeCentigrade',
u'http://qudt.org/vocab/unit#DegreeCelsius',
u'http://qudt.org/vocab/unit#DegreeRankine']
self.assertItemsEqual(result, units)
def test_convert_value(self):
convert_value = self.barb.convert_value
degreeCelsius = u'http://qudt.org/vocab/unit#DegreeCelsius'
degreeFahrenheit = u'http://qudt.org/vocab/unit#DegreeFahrenheit'
inch = u'http://qudt.org/vocab/unit#Inc'
temperatureUnit = u'http://qudt.org/schema/qudt#TemperatureUnit'
self.assertAlmostEqual(convert_value(degreeCelsius,degreeFahrenheit,100),212.003333333)
self.assertRaises(ValueError,convert_value,degreeFahrenheit,inch,300)
self.assertRaises(ValueError,convert_value,temperatureUnit,degreeFahrenheit,300)
def main():
try:
print "launching fuseki..."
fuseki = fusekiutils.LaunchFuseki()
unittest.main()
print ""
finally:
print "Terminating fuseki..."
fuseki.terminate()
if __name__ == '__main__':
main()
| __author__ = 'adam'
#import urllib
#import time
#from subprocess import Popen
#import shlex
#import os
import fusekiutils
import qudt4dt
import unittest
class TestQudt(unittest.TestCase):
def setUp(self,result = None):
self.barb = qudt4dt.Barbara("http://localhost:3030")
def test_get_unit_class(self):
self.assertEqual(self.barb.get_unit_class(u'http://qudt.org/vocab/unit#DegreeCelsius'),
[u'http://qudt.org/schema/qudt#TemperatureUnit'])
def test_get_units_in_class(self):
result = self.barb.get_units_in_class(u'http://qudt.org/schema/qudt#TemperatureUnit')
units = [u'http://qudt.org/vocab/unit#DegreeFahrenheit',
u'http://qudt.org/vocab/unit#Kelvin',
u'http://qudt.org/vocab/unit#PlanckTemperature',
u'http://qudt.org/vocab/unit#DegreeCentigrade',
u'http://qudt.org/vocab/unit#DegreeCelsius',
u'http://qudt.org/vocab/unit#DegreeRankine']
print result
self.assertItemsEqual(result, units)
def test_get_units_in_same_class(self):
result = self.barb.get_units_in_same_class(u'http://qudt.org/vocab/unit#DegreeCelsius')
units = [u'http://qudt.org/vocab/unit#DegreeFahrenheit',
u'http://qudt.org/vocab/unit#Kelvin',
u'http://qudt.org/vocab/unit#PlanckTemperature',
u'http://qudt.org/vocab/unit#DegreeCentigrade',
u'http://qudt.org/vocab/unit#DegreeCelsius',
u'http://qudt.org/vocab/unit#DegreeRankine']
print result
self.assertItemsEqual(result, units)
def test_convert_value(self):
convert_value = self.barb.convert_value
degreeCelsius = u'http://qudt.org/vocab/unit#DegreeCelsius'
degreeFahrenheit = u'http://qudt.org/vocab/unit#DegreeFahrenheit'
inch = u'http://qudt.org/vocab/unit#Inc'
temperatureUnit = u'http://qudt.org/schema/qudt#TemperatureUnit'
self.assertAlmostEqual(convert_value(degreeCelsius,degreeFahrenheit,100),212.003333333)
self.assertRaises(ValueError,convert_value,degreeFahrenheit,inch,300)
self.assertRaises(ValueError,convert_value,temperatureUnit,degreeFahrenheit,300)
def main():
try:
print "launching fuseki..."
fuseki = fusekiutils.LaunchFuseki()
unittest.main()
print ""
finally:
print "Terminating fuseki..."
fuseki.terminate()
if __name__ == '__main__':
main()
| Python | 0.000018 |
3640cb895bb93d144a615d4b745af135016d67af | order imports | src/plone.server/plone/server/__init__.py | src/plone.server/plone/server/__init__.py | # -*- encoding: utf-8 -*-
# load the patch before anything else.
from plone.server import patch # noqa
from plone.server import interfaces
from plone.server import languages
# load defined migrations
from plone.server.migrate import migrations # noqa
from zope.i18nmessageid import MessageFactory
import collections
import logging
# create logging
logger = logging.getLogger('plone.server')
_ = MessageFactory('plone')
app_settings = {
"databases": [],
"address": 8080,
"static": [],
"utilities": [],
"root_user": {
"password": ""
},
"auth_extractors": [
"plone.server.auth.extractors.BearerAuthPolicy",
"plone.server.auth.extractors.BasicAuthPolicy",
"plone.server.auth.extractors.WSTokenAuthPolicy",
],
"auth_user_identifiers": [],
"auth_token_validators": [
"plone.server.auth.validators.SaltedHashPasswordValidator",
"plone.server.auth.validators.JWTValidator"
],
"default_layers": [
interfaces.IDefaultLayer
],
"http_methods": {
"PUT": interfaces.IPUT,
"POST": interfaces.IPOST,
"PATCH": interfaces.IPATCH,
"DELETE": interfaces.IDELETE,
"GET": interfaces.IGET,
"OPTIONS": interfaces.IOPTIONS,
"HEAD": interfaces.IHEAD,
"CONNECT": interfaces.ICONNECT
},
"renderers": collections.OrderedDict({
"application/json": interfaces.IRendererFormatJson,
"text/html": interfaces.IRendererFormatHtml,
"*/*": interfaces.IRendererFormatRaw
}),
"languages": {
"en": languages.IEN,
"en-us": languages.IENUS,
"ca": languages.ICA
},
"default_permission": 'zope.Public',
"available_addons": {},
"api_definition": {},
"cors": {
"allow_origin": ["http://localhost:8080"],
"allow_methods": ["GET", "POST", "DELETE", "HEAD", "PATCH", "OPTIONS"],
"allow_headers": ["*"],
"expose_headers": ["*"],
"allow_credentials": True,
"max_age": 3660
},
"jwt": {
"secret": "foobar",
"algorithm": "HS256"
}
}
SCHEMA_CACHE = {}
PERMISSIONS_CACHE = {}
FACTORY_CACHE = {}
BEHAVIOR_CACHE = {}
| # -*- encoding: utf-8 -*-
# create logging
import logging
logger = logging.getLogger('plone.server')
from zope.i18nmessageid import MessageFactory # noqa
_ = MessageFactory('plone')
# load the patch before anything else.
from plone.server import patch # noqa
# load defined migrations
from plone.server.migrate import migrations # noqa
from plone.server import interfaces
from plone.server import languages
import collections
app_settings = {
"databases": [],
"address": 8080,
"static": [],
"utilities": [],
"root_user": {
"password": ""
},
"auth_extractors": [
"plone.server.auth.extractors.BearerAuthPolicy",
"plone.server.auth.extractors.BasicAuthPolicy",
"plone.server.auth.extractors.WSTokenAuthPolicy",
],
"auth_user_identifiers": [],
"auth_token_validators": [
"plone.server.auth.validators.SaltedHashPasswordValidator",
"plone.server.auth.validators.JWTValidator"
],
"default_layers": [
interfaces.IDefaultLayer
],
"http_methods": {
"PUT": interfaces.IPUT,
"POST": interfaces.IPOST,
"PATCH": interfaces.IPATCH,
"DELETE": interfaces.IDELETE,
"GET": interfaces.IGET,
"OPTIONS": interfaces.IOPTIONS,
"HEAD": interfaces.IHEAD,
"CONNECT": interfaces.ICONNECT
},
"renderers": collections.OrderedDict({
"application/json": interfaces.IRendererFormatJson,
"text/html": interfaces.IRendererFormatHtml,
"*/*": interfaces.IRendererFormatRaw
}),
"languages": {
"en": languages.IEN,
"en-us": languages.IENUS,
"ca": languages.ICA
},
"default_permission": 'zope.Public',
"available_addons": {},
"api_definition": {},
"cors": {
"allow_origin": ["http://localhost:8080"],
"allow_methods": ["GET", "POST", "DELETE", "HEAD", "PATCH", "OPTIONS"],
"allow_headers": ["*"],
"expose_headers": ["*"],
"allow_credentials": True,
"max_age": 3660
},
"jwt": {
"secret": "foobar",
"algorithm": "HS256"
}
}
SCHEMA_CACHE = {}
PERMISSIONS_CACHE = {}
FACTORY_CACHE = {}
BEHAVIOR_CACHE = {}
| Python | 0.000002 |
17c2d6baadfa91985ed8f3d32754ee7d30ba87d9 | Use "1" as ui3 cookie value to not confuse IA analytics. | internetarchive/search.py | internetarchive/search.py | import requests.sessions
from . import session
# Search class
# ________________________________________________________________________________________
class Search(object):
"""This class represents an archive.org item search. You can use
this class to search for archive.org items using the advanced
search engine.
Usage::
>>> import internetarchive.search
>>> search = internetarchive.search.Search('(uploader:jake@archive.org)')
>>> for result in search:
... print(result['identifier'])
"""
# init()
# ____________________________________________________________________________________
def __init__(self, query, fields=['identifier'], params={}, config=None, v2=False):
self.session = session.ArchiveSession(config)
self.http_session = requests.sessions.Session()
self.url = 'http://archive.org/advancedsearch.php'
default_params = dict(
q=query,
rows=100,
)
if v2:
# Use "1" as value to not confuse IA analytics.
self.session.cookies['ui3'] = '1'
self.http_session.cookies = self.session.cookies
self.params = default_params.copy()
self.params.update(params)
if not self.params.get('output'):
self.params['output'] = 'json'
for k, v in enumerate(fields):
key = 'fl[{0}]'.format(k)
self.params[key] = v
self._search_info = self._get_search_info()
self.num_found = self._search_info['response']['numFound']
self.query = self._search_info['responseHeader']['params']['q']
# __repr__()
# ____________________________________________________________________________________
def __repr__(self):
return ('Search(query={query!r}, '
'num_found={num_found!r})'.format(**self.__dict__))
# _get_search_info()
# ____________________________________________________________________________________
def _get_search_info(self):
info_params = self.params.copy()
info_params['rows'] = 0
r = self.http_session.get(self.url, params=self.params)
results = r.json()
del results['response']['docs']
return results
# __iter__()
# ____________________________________________________________________________________
def __iter__(self):
"""Generator for iterating over search results"""
total_pages = ((self.num_found / self.params['rows']) + 2)
for page in range(1, total_pages):
self.params['page'] = page
r = self.http_session.get(self.url, params=self.params)
results = r.json()
for doc in results['response']['docs']:
yield doc
| import requests.sessions
from . import session
# Search class
# ________________________________________________________________________________________
class Search(object):
"""This class represents an archive.org item search. You can use
this class to search for archive.org items using the advanced
search engine.
Usage::
>>> import internetarchive.search
>>> search = internetarchive.search.Search('(uploader:jake@archive.org)')
>>> for result in search:
... print(result['identifier'])
"""
# init()
# ____________________________________________________________________________________
def __init__(self, query, fields=['identifier'], params={}, config=None, v2=False):
self.session = session.ArchiveSession(config)
self.http_session = requests.sessions.Session()
self.url = 'http://archive.org/advancedsearch.php'
default_params = dict(
q=query,
rows=100,
)
if v2:
self.session.cookies['ui3'] = 'ia-wrapper'
self.http_session.cookies = self.session.cookies
self.params = default_params.copy()
self.params.update(params)
if not self.params.get('output'):
self.params['output'] = 'json'
for k, v in enumerate(fields):
key = 'fl[{0}]'.format(k)
self.params[key] = v
self._search_info = self._get_search_info()
self.num_found = self._search_info['response']['numFound']
self.query = self._search_info['responseHeader']['params']['q']
# __repr__()
# ____________________________________________________________________________________
def __repr__(self):
return ('Search(query={query!r}, '
'num_found={num_found!r})'.format(**self.__dict__))
# _get_search_info()
# ____________________________________________________________________________________
def _get_search_info(self):
info_params = self.params.copy()
info_params['rows'] = 0
r = self.http_session.get(self.url, params=self.params)
results = r.json()
del results['response']['docs']
return results
# __iter__()
# ____________________________________________________________________________________
def __iter__(self):
"""Generator for iterating over search results"""
total_pages = ((self.num_found / self.params['rows']) + 2)
for page in range(1, total_pages):
self.params['page'] = page
r = self.http_session.get(self.url, params=self.params)
results = r.json()
for doc in results['response']['docs']:
yield doc
| Python | 0 |
e0b298f1df9a2d4e8868d6f055a27b5fb0bb8296 | Add helper method to model | links/maker/models.py | links/maker/models.py | import uuid
from datetime import datetime
from django.contrib.auth.models import AbstractBaseUser, PermissionsMixin
from django.db import models
from django.utils import timezone
from maker.managers import (MakerManager,
PasswordResetTokenManager,
EmailChangeTokenManager)
def make_token():
return str(uuid.uuid4())
class Maker(PermissionsMixin, AbstractBaseUser):
REGULAR = 'RG'
SIGNUP_TYPES = (
(REGULAR, 'Regular'),
)
identifier = models.CharField(max_length=200, unique=True)
email = models.EmailField()
is_admin = models.BooleanField(default=False)
joined = models.DateTimeField(auto_now_add=True)
verified = models.BooleanField(default=False)
photo_url = models.URLField(blank=True)
first_name = models.CharField(max_length=50)
last_name = models.CharField(max_length=50)
bio = models.TextField(blank=True)
signup_type = models.CharField(max_length=2, choices=SIGNUP_TYPES,
default=REGULAR)
USERNAME_FIELD = 'identifier'
REQUIRED_FIELDS = ['first_name', 'last_name']
objects = MakerManager()
def __unicode__(self):
return self.identifier
@property
def is_staff(self):
return self.is_admin
def get_short_name(self):
return self.first_name
def get_full_name(self):
return "{0} {1}".format(self.first_name, self.last_name)
def change_email(self, new_email):
self.email = new_email
self.identifier = new_email
self.save()
def verify_email(self):
self.verified = True
self.save()
class EmailVerificationToken(models.Model):
maker = models.ForeignKey('Maker')
token = models.CharField(max_length=50, default=make_token)
date = models.DateTimeField(auto_now_add=True)
class Meta:
verbose_name = 'Email Verification Token'
verbose_name_plural = 'Email Verification Tokens'
def __unicode__(self):
return self.maker.identifier
class PasswordResetToken(models.Model):
maker = models.ForeignKey('Maker')
token = models.CharField(max_length=50, default=make_token)
date = models.DateTimeField(auto_now_add=True)
objects = PasswordResetTokenManager()
def __unicode__(self):
return self.maker.identifier
@property
def is_valid(self):
return self.date < timezone.now()
class EmailChangeToken(models.Model):
maker = models.ForeignKey('Maker')
new_email = models.EmailField()
token = models.CharField(max_length=50, default=make_token)
date = models.DateTimeField(auto_now_add=True)
objects = EmailChangeTokenManager()
def __unicode__(self):
return self.maker.identifier
@property
def is_valid(self):
return self.date < timezone.now()
| import uuid
from datetime import datetime
from django.contrib.auth.models import AbstractBaseUser, PermissionsMixin
from django.db import models
from django.utils import timezone
from maker.managers import (MakerManager,
PasswordResetTokenManager,
EmailChangeTokenManager)
def make_token():
return str(uuid.uuid4())
class Maker(PermissionsMixin, AbstractBaseUser):
REGULAR = 'RG'
SIGNUP_TYPES = (
(REGULAR, 'Regular'),
)
identifier = models.CharField(max_length=200, unique=True)
email = models.EmailField()
is_admin = models.BooleanField(default=False)
joined = models.DateTimeField(auto_now_add=True)
verified = models.BooleanField(default=False)
photo_url = models.URLField(blank=True)
first_name = models.CharField(max_length=50)
last_name = models.CharField(max_length=50)
bio = models.TextField(blank=True)
signup_type = models.CharField(max_length=2, choices=SIGNUP_TYPES,
default=REGULAR)
USERNAME_FIELD = 'identifier'
REQUIRED_FIELDS = ['first_name', 'last_name']
objects = MakerManager()
def __unicode__(self):
return self.identifier
@property
def is_staff(self):
return self.is_admin
def get_short_name(self):
return self.first_name
def get_full_name(self):
return "{0} {1}".format(self.first_name, self.last_name)
def change_email(self, new_email):
self.email = new_email
self.identifier = new_email
self.save()
class EmailVerificationToken(models.Model):
maker = models.ForeignKey('Maker')
token = models.CharField(max_length=50, default=make_token)
date = models.DateTimeField(auto_now_add=True)
class Meta:
verbose_name = 'Email Verification Token'
verbose_name_plural = 'Email Verification Tokens'
def __unicode__(self):
return self.maker.identifier
class PasswordResetToken(models.Model):
maker = models.ForeignKey('Maker')
token = models.CharField(max_length=50, default=make_token)
date = models.DateTimeField(auto_now_add=True)
objects = PasswordResetTokenManager()
def __unicode__(self):
return self.maker.identifier
@property
def is_valid(self):
return self.date < timezone.now()
class EmailChangeToken(models.Model):
maker = models.ForeignKey('Maker')
new_email = models.EmailField()
token = models.CharField(max_length=50, default=make_token)
date = models.DateTimeField(auto_now_add=True)
objects = EmailChangeTokenManager()
def __unicode__(self):
return self.maker.identifier
@property
def is_valid(self):
return self.date < timezone.now()
| Python | 0.000001 |
63cdfe0de155ed32af0332310340b4d57dcef145 | bump version for release | stdeb/__init__.py | stdeb/__init__.py | # setuptools is required for distutils.commands plugin we use
import logging
import setuptools
__version__ = '0.4.3'
log = logging.getLogger('stdeb')
log.setLevel(logging.INFO)
handler = logging.StreamHandler()
handler.setLevel(logging.INFO)
formatter = logging.Formatter('%(message)s')
handler.setFormatter(formatter)
log.addHandler(handler)
| # setuptools is required for distutils.commands plugin we use
import logging
import setuptools
__version__ = '0.4.2.git'
log = logging.getLogger('stdeb')
log.setLevel(logging.INFO)
handler = logging.StreamHandler()
handler.setLevel(logging.INFO)
formatter = logging.Formatter('%(message)s')
handler.setFormatter(formatter)
log.addHandler(handler)
| Python | 0 |
a88a01f9e6ba01be7d68719f493405ea584b1566 | Fix merge fallout | lib/aquilon/worker/commands/search_machine.py | lib/aquilon/worker/commands/search_machine.py | # -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2009,2010,2011,2012,2013 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains the logic for `aq search machine`."""
from sqlalchemy.orm import aliased, subqueryload, joinedload, lazyload
from aquilon.aqdb.model import (Machine, Cpu, Cluster, ClusterResource, Share,
VirtualNasDisk, Disk, MetaCluster, DnsRecord)
from aquilon.worker.broker import BrokerCommand # pylint: disable=W0611
from aquilon.worker.dbwrappers.hardware_entity import (
search_hardware_entity_query)
from aquilon.worker.formats.list import StringAttributeList
class CommandSearchMachine(BrokerCommand):
required_parameters = []
def render(self, session, hostname, machine, cpuname, cpuvendor, cpuspeed,
cpucount, memory, cluster, share, fullinfo, style, **arguments):
if fullinfo or style != 'raw':
q = search_hardware_entity_query(session, Machine, **arguments)
else:
q = search_hardware_entity_query(session, Machine.label, **arguments)
if machine:
q = q.filter_by(label=machine)
if hostname:
dns_rec = DnsRecord.get_unique(session, fqdn=hostname, compel=True)
q = q.filter(Machine.primary_name_id == dns_rec.id)
if cpuname or cpuvendor or cpuspeed is not None:
subq = Cpu.get_matching_query(session, name=cpuname,
vendor=cpuvendor, speed=cpuspeed,
compel=True)
q = q.filter(Machine.cpu_id.in_(subq))
if cpucount is not None:
q = q.filter_by(cpu_quantity=cpucount)
if memory is not None:
q = q.filter_by(memory=memory)
if cluster:
dbcluster = Cluster.get_unique(session, cluster, compel=True)
if isinstance(dbcluster, MetaCluster):
q = q.join('vm_container', ClusterResource, Cluster)
q = q.filter_by(metacluster=dbcluster)
else:
q = q.join('vm_container', ClusterResource)
q = q.filter_by(cluster=dbcluster)
q = q.reset_joinpoint()
if share:
#v2
v2shares = session.query(Share.id).filter_by(name=share).all()
if v2shares:
NasAlias = aliased(VirtualNasDisk)
q = q.join('disks', (NasAlias, NasAlias.id == Disk.id))
q = q.filter(
NasAlias.share_id.in_(map(lambda s: s[0], v2shares)))
q = q.reset_joinpoint()
if fullinfo:
q = q.options(joinedload('location'),
subqueryload('interfaces'),
lazyload('interfaces.hardware_entity'),
joinedload('interfaces.assignments'),
joinedload('interfaces.assignments.dns_records'),
joinedload('chassis_slot'),
subqueryload('chassis_slot.chassis'),
subqueryload('disks'),
subqueryload('host'),
lazyload('host.hardware_entity'),
subqueryload('host.services_used'),
subqueryload('host._cluster'),
lazyload('host._cluster.host'))
return q.all()
return StringAttributeList(q.all(), "label")
| # -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2009,2010,2011,2012,2013 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains the logic for `aq search machine`."""
from sqlalchemy.orm import aliased, subqueryload, joinedload, lazyload
from aquilon.aqdb.model import (Machine, Cpu, Cluster, ClusterResource, Share,
VirtualNasDisk, Disk, MetaCluster, DnsRecord)
from aquilon.worker.broker import BrokerCommand # pylint: disable=W0611
from aquilon.worker.dbwrappers.hardware_entity import (
search_hardware_entity_query)
from aquilon.worker.formats.list import StringAttributeList
class CommandSearchMachine(BrokerCommand):
required_parameters = []
def render(self, session, hostname, machine, cpuname, cpuvendor, cpuspeed,
cpucount, memory, cluster, share, fullinfo, style, **arguments):
if fullinfo or style != 'raw':
q = search_hardware_entity_query(session, Machine, **arguments)
else:
q = search_hardware_entity_query(session, Machine.label, **arguments)
if machine:
q = q.filter_by(label=machine)
if hostname:
dns_rec = DnsRecord.get_unique(session, fqdn=hostname, compel=True)
q = q.filter(Machine.primary_name_id == dns_rec.id)
if cpuname or cpuvendor or cpuspeed is not None:
subq = Cpu.get_matching_query(session, name=cpuname,
vendor=cpuvendor, speed=cpuspeed,
compel=True)
q = q.filter(Machine.cpu_id.in_(subq))
if cpucount is not None:
q = q.filter_by(cpu_quantity=cpucount)
if memory is not None:
q = q.filter_by(memory=memory)
if cluster:
dbcluster = Cluster.get_unique(session, cluster, compel=True)
if isinstance(dbcluster, MetaCluster):
q = q.join('vm_container', ClusterResource, Cluster)
q = q.filter_by(metacluster=dbcluster)
else:
q = q.join('vm_container', ClusterResource)
q = q.filter_by(cluster=dbcluster)
q = q.reset_joinpoint()
if share:
#v2
v2shares = session.query(Share.id).filter_by(name=share).all()
if v2shares:
NasAlias = aliased(VirtualNasDisk)
q = q.join('disks', (NasAlias, NasAlias.id == Disk.id))
q = q.filter(
NasAlias.share_id.in_(map(lambda s: s[0], v2shares)))
q = q.reset_joinpoint()
if fullinfo:
q = q.options(joinedload('location'),
subqueryload('interfaces'),
lazyload('interfaces.hardware_entity'),
joinedload('interfaces.assignments'),
joinedload('interfaces.assignments.dns_records'),
joinedload('chassis_slot'),
subqueryload('chassis_slot.chassis'),
subqueryload('disks'),
subqueryload('host'),
lazyload('host.machine'),
subqueryload('host.services_used'),
subqueryload('host._cluster'),
lazyload('host._cluster.host'))
return q.all()
return StringAttributeList(q.all(), "label")
| Python | 0.000329 |
cbe58b74f6d5fe5c96b197ced9c2269cf8886d24 | make boolean functions in utils return real booleans | livesettings/utils.py | livesettings/utils.py | import sys
import types
import os
def can_loop_over(maybe):
"""Test value to see if it is list like"""
try:
iter(maybe)
except TypeError:
return False
return True
def is_list_or_tuple(maybe):
return isinstance(maybe, (types.TupleType, types.ListType))
def is_scalar(maybe):
"""Test to see value is a string, an int, or some other scalar type"""
return is_string_like(maybe) or not can_loop_over(maybe)
def is_string_like(maybe):
"""Test value to see if it acts like a string"""
try:
maybe+""
except TypeError:
return False
return True
def flatten_list(sequence, scalarp=is_scalar, result=None):
"""flatten out a list by putting sublist entries in the main list"""
if result is None:
result = []
for item in sequence:
if scalarp(item):
result.append(item)
else:
flatten_list(item, scalarp, result)
def load_module(module):
"""Load a named python module."""
try:
module = sys.modules[module]
except KeyError:
__import__(module)
module = sys.modules[module]
return module
def get_flat_list(sequence):
"""flatten out a list and return the flat list"""
flat = []
flatten_list(sequence, result=flat)
return flat
def url_join(*args):
"""Join any arbitrary strings into a forward-slash delimited string.
Do not strip leading / from first element, nor trailing / from last element.
This function can take lists as arguments, flattening them appropriately.
example:
url_join('one','two',['three','four'],'five') => 'one/two/three/four/five'
"""
if len(args) == 0:
return ""
args = get_flat_list(args)
if len(args) == 1:
return str(args[0])
else:
args = [str(arg).replace("\\", "/") for arg in args]
work = [args[0]]
for arg in args[1:]:
if arg.startswith("/"):
work.append(arg[1:])
else:
work.append(arg)
joined = reduce(os.path.join, work)
return joined.replace("\\", "/")
| import sys
import types
import os
def can_loop_over(maybe):
"""Test value to see if it is list like"""
try:
iter(maybe)
except:
return 0
else:
return 1
def is_list_or_tuple(maybe):
return isinstance(maybe, (types.TupleType, types.ListType))
def is_scalar(maybe):
"""Test to see value is a string, an int, or some other scalar type"""
return is_string_like(maybe) or not can_loop_over(maybe)
def is_string_like(maybe):
"""Test value to see if it acts like a string"""
try:
maybe+""
except TypeError:
return 0
else:
return 1
def flatten_list(sequence, scalarp=is_scalar, result=None):
"""flatten out a list by putting sublist entries in the main list"""
if result is None:
result = []
for item in sequence:
if scalarp(item):
result.append(item)
else:
flatten_list(item, scalarp, result)
def load_module(module):
"""Load a named python module."""
try:
module = sys.modules[module]
except KeyError:
__import__(module)
module = sys.modules[module]
return module
def get_flat_list(sequence):
"""flatten out a list and return the flat list"""
flat = []
flatten_list(sequence, result=flat)
return flat
def url_join(*args):
"""Join any arbitrary strings into a forward-slash delimited string.
Do not strip leading / from first element, nor trailing / from last element.
This function can take lists as arguments, flattening them appropriately.
example:
url_join('one','two',['three','four'],'five') => 'one/two/three/four/five'
"""
if len(args) == 0:
return ""
args = get_flat_list(args)
if len(args) == 1:
return str(args[0])
else:
args = [str(arg).replace("\\", "/") for arg in args]
work = [args[0]]
for arg in args[1:]:
if arg.startswith("/"):
work.append(arg[1:])
else:
work.append(arg)
joined = reduce(os.path.join, work)
return joined.replace("\\", "/")
| Python | 0.999134 |
2bf6b59a129a9d93328c3478e57a27f35bdf2e6a | Trim the hardcoded list of keywords | screencasts/hello-weave/highlight.py | screencasts/hello-weave/highlight.py | #!/usr/bin/env python3
import json
prompt = 'ilya@weave-01:~$ '
highlight = [
('weave-01', 'red'),
('weave-02', 'red'),
('docker', 'red'),
('run', 'red'),
('--name', 'red'),
('hello', 'red'),
('netcat', 'red'),
('-lk', 'red'),
('1234', 'red'),
('Hello, Weave!\r\n', 'red'),
]
highlight_tokens = [t[0] for t in highlight]
colours = {
'red': ('\033[91m', '\033[00m'),
}
for f in ['rec-weave-01.json', 'rec-weave-02.json']:
with open(f) as json_data:
tokens = []
d = json.load(json_data)
json_data.close()
commands = d['stdout']
word = ''
word_start = 0
for i,x in enumerate(commands):
curr = x[1]
if curr == prompt: continue
elif curr != '\r\n' and curr != ' ' and len(curr) == 1:
if word_start == 0:
word_start = i
word = curr
else:
word += curr
elif (curr == '\r\n' or curr == ' ') and word_start != 0:
tokens.append((word, word_start, True))
word_start = 0
elif curr != '\r\n' and len(curr) > 1:
tokens.append((curr, i, False))
offset = 0
for x in tokens:
if x[0] in highlight_tokens:
commands.insert(x[1] + offset, [0, colours['red'][0]])
offset += 1
l = len(x[0]) if x[2] else 1
commands.insert(x[1] + l + offset, [0, colours['red'][1]])
offset += 1
d['commands'] = commands
with open('fancy-' + f, 'w') as json_output:
json_output.write(json.dumps(d))
json_output.close()
| #!/usr/bin/env python3
import json
prompt = 'ilya@weave-01:~$ '
highlight = [
('weave-01', 'red'),
('weave-02', 'red'),
('docker', 'red'),
('run', 'red'),
('--name', 'red'),
('hello', 'red'),
('netcat', 'red'),
('-lk', 'red'),
('1234', 'red'),
('sudo curl -s -L git.io/weave -o /usr/local/bin/weave', 'red'),
('b4e40e4b4665a1ffa23f90eb3ab57c83ef243e64151bedc1501235df6e532e09\r\n', 'red'),
('Hello, Weave!\r\n', 'red'),
]
highlight_tokens = [t[0] for t in highlight]
colours = {
'red': ('\033[91m', '\033[00m'),
}
for f in ['rec-weave-01.json', 'rec-weave-02.json']:
with open(f) as json_data:
tokens = []
d = json.load(json_data)
json_data.close()
commands = d['stdout']
word = ''
word_start = 0
for i,x in enumerate(commands):
curr = x[1]
if curr == prompt: continue
elif curr != '\r\n' and curr != ' ' and len(curr) == 1:
if word_start == 0:
word_start = i
word = curr
else:
word += curr
elif (curr == '\r\n' or curr == ' ') and word_start != 0:
tokens.append((word, word_start, True))
word_start = 0
elif curr != '\r\n' and len(curr) > 1:
tokens.append((curr, i, False))
offset = 0
for x in tokens:
if x[0] in highlight_tokens:
commands.insert(x[1] + offset, [0, colours['red'][0]])
offset += 1
l = len(x[0]) if x[2] else 1
commands.insert(x[1] + l + offset, [0, colours['red'][1]])
offset += 1
d['commands'] = commands
with open('fancy-' + f, 'w') as json_output:
json_output.write(json.dumps(d))
json_output.close()
| Python | 0.999999 |
d387a1976e902bbf7fa6d960bee5d16db7aacbb0 | Fix indentation. Comment out superfluous code | tools/_build.py | tools/_build.py | """
The cython function was adapted from scikits-image (http://scikits-image.org/)
"""
import sys
import os
import shutil
import subprocess
import platform
from distutils.dist import Distribution
from distutils.command.config import config as distutils_config
from distutils import log
import optparse # deprecated in 2.7 for argparse
dummy_c_text = r'''
/* This file is generated from statsmodels/tools/_build.py to */
void do_nothing(void);
int main(void) {
do_nothing();
return 0;
}
'''
def has_c_compiler():
c = distutils_config(Distribution())
if platform.system() == "Windows": # HACK
# this doesn't matter because mingw won't be given at install step
# check if mingw was given in compiler options
#parser = optparse.OptionParser()
#parser.add_option('-c', '--compiler', dest='compiler')
#options, args = parser.parse_args()
#if options.compiler and 'mingw' in options.compiler:
# return True
# if not, then check to see if compiler is set in disutils.cfg
try: # Josef's code to check the distutils.cfg file
c.distribution.parse_config_files(c.distribution.find_config_files())
# this will raise a key error if there's not one
c.distribution.command_options['build']['compiler'][1]
return True
except:
pass
# just see if there's a system compiler
try:
success = c.try_compile(dummy_c_text)
return True
except:
log.info("No C compiler detected. Not installing Cython version "
"of files.")
return False
def cython(pyx_files, working_path=''):
"""Use Cython to convert the given files to C.
Parameters
----------
pyx_files : list of str
The input .pyx files.
"""
# Do not build cython files if target is clean
if len(sys.argv) >= 2 and sys.argv[1] == 'clean':
return
try:
import Cython
except ImportError:
# If cython is not found, we do nothing -- the build will make use of
# the distributed .c files
print("Cython not found; falling back to pre-built %s" \
% " ".join([f.replace('.pyx', '.c') for f in pyx_files]))
else:
for pyxfile in [os.path.join(working_path, f) for f in pyx_files]:
#TODO: replace this with already written hash_funcs once merged
# if the .pyx file stayed the same, we don't need to recompile
#if not _changed(pyxfile):
# continue
c_file = pyxfile[:-4] + '.c'
# run cython compiler
cmd = 'cython -o %s %s' % (c_file, pyxfile)
print(cmd)
if platform.system() == 'Windows':
status = subprocess.call(
[sys.executable,
os.path.join(os.path.dirname(sys.executable),
'Scripts', 'cython.py'),
'-o', c_file, pyxfile],
shell=True)
else:
status = subprocess.call(['cython', '-o', c_file, pyxfile])
| """
The cython function was adapted from scikits-image (http://scikits-image.org/)
"""
import sys
import os
import shutil
import subprocess
import platform
from distutils.dist import Distribution
from distutils.command.config import config as distutils_config
from distutils import log
import optparse # deprecated in 2.7 for argparse
dummy_c_text = r'''
/* This file is generated from statsmodels/tools/_build.py to */
void do_nothing(void);
int main(void) {
do_nothing();
return 0;
}
'''
def has_c_compiler():
c = distutils_config(Distribution())
if platform.system() == "Windows": # HACK
# check if mingw was given in compiler options
parser = optparse.OptionParser()
parser.add_option('-c', '--compiler', dest='compiler')
options, args = parser.parse_args()
if options.compiler and 'mingw' in options.compiler:
return True
# if not, then check to see if compiler is set in disutils.cfg
try: # Josef's code to check the distutils.cfg file
c.distribution.parse_config_files(c.distribution.find_config_files())
# this will raise a key error if there's not one
c.distribution.command_options['build']['compiler'][1]
return True
except:
pass
# just see if there's a system compiler
try:
success = c.try_compile(dummy_c_text)
return True
except:
log.info("No C compiler detected. Not installing Cython version "
"of files.")
return False
def cython(pyx_files, working_path=''):
"""Use Cython to convert the given files to C.
Parameters
----------
pyx_files : list of str
The input .pyx files.
"""
# Do not build cython files if target is clean
if len(sys.argv) >= 2 and sys.argv[1] == 'clean':
return
try:
import Cython
except ImportError:
# If cython is not found, we do nothing -- the build will make use of
# the distributed .c files
print("Cython not found; falling back to pre-built %s" \
% " ".join([f.replace('.pyx', '.c') for f in pyx_files]))
else:
for pyxfile in [os.path.join(working_path, f) for f in pyx_files]:
#TODO: replace this with already written hash_funcs once merged
# if the .pyx file stayed the same, we don't need to recompile
#if not _changed(pyxfile):
# continue
c_file = pyxfile[:-4] + '.c'
# run cython compiler
cmd = 'cython -o %s %s' % (c_file, pyxfile)
print(cmd)
if platform.system() == 'Windows':
status = subprocess.call(
[sys.executable,
os.path.join(os.path.dirname(sys.executable),
'Scripts', 'cython.py'),
'-o', c_file, pyxfile],
shell=True)
else:
status = subprocess.call(['cython', '-o', c_file, pyxfile])
| Python | 0.000001 |
a91ac10af21cf644bfc45ef729e465726491db7b | Enable android_test and friends as waf commands. | tools/flambe.py | tools/flambe.py | #!/usr/bin/env python
from waflib import *
from waflib.TaskGen import *
import os
# Waf hates absolute paths for some reason
FLAMBE_ROOT = os.path.dirname(__file__) + "/.."
def options(ctx):
ctx.add_option("--debug", action="store_true", default=False, help="Build a development version")
def configure(ctx):
ctx.load("haxe", tooldir=FLAMBE_ROOT+"/tools")
ctx.env.debug = ctx.options.debug
@feature("flambe")
def apply_flambe(ctx):
flags = ["-main", ctx.main]
hasBootstrap = ctx.path.find_dir("res/bootstrap")
if ctx.env.debug:
flags += "-debug --no-opt --no-inline".split()
else:
#flags += "--dead-code-elimination --no-traces".split()
flags += "--no-traces".split()
ctx.bld(features="haxe", classpath=["src", FLAMBE_ROOT+"/src"],
flags=flags,
swflib="bootstrap.swf" if hasBootstrap else None,
target="app.swf")
ctx.bld(features="haxe", classpath=["src", FLAMBE_ROOT+"/src"],
flags=flags + "-D amity --macro flambe.macro.AmityJSGenerator.use()".split(),
target="app.js")
res = ctx.path.find_dir("res")
if res is not None:
# Create asset swfs from the directories in /res
ctx.bld(features="haxe", classpath=FLAMBE_ROOT+"/tools",
flags="-main AssetPackager",
libs="format",
target="packager.n")
# -interp because neko JIT is unstable...
ctx.bld(rule="neko -interp ${SRC} " + res.abspath() + " .",
source="packager.n", target= "bootstrap.swf" if hasBootstrap else None, always=True)
def android_test(ctx):
os.system("adb push res /sdcard/amity-dev")
os.system("adb push build/app.js /sdcard/amity-dev")
os.system("adb shell am start -a android.intent.action.MAIN " +
"-c android.intent.category.HOME")
os.system("adb shell am start -a android.intent.action.MAIN " +
"-n com.threerings.amity/.AmityActivity")
Context.g_module.__dict__["android_test"] = android_test
def android_log(ctx):
os.system("adb logcat -v tag amity:V SDL:V *:W")
Context.g_module.__dict__["android_log"] = android_log
def flash_test(ctx):
os.system("flashplayer build/app.swf")
Context.g_module.__dict__["flash_test"] = flash_test
| #!/usr/bin/env python
from waflib import *
from waflib.TaskGen import *
import os
# Waf hates absolute paths for some reason
FLAMBE_ROOT = os.path.dirname(__file__) + "/.."
def options(ctx):
ctx.add_option("--debug", action="store_true", default=False, help="Build a development version")
def configure(ctx):
ctx.load("haxe", tooldir=FLAMBE_ROOT+"/tools")
ctx.env.debug = ctx.options.debug
@feature("flambe")
def apply_flambe(ctx):
flags = ["-main", ctx.main]
hasBootstrap = ctx.path.find_dir("res/bootstrap")
if ctx.env.debug:
flags += "-debug --no-opt --no-inline".split()
else:
#flags += "--dead-code-elimination --no-traces".split()
flags += "--no-traces".split()
ctx.bld(features="haxe", classpath=["src", FLAMBE_ROOT+"/src"],
flags=flags,
swflib="bootstrap.swf" if hasBootstrap else None,
target="app.swf")
ctx.bld(features="haxe", classpath=["src", FLAMBE_ROOT+"/src"],
flags=flags + "-D amity --macro flambe.macro.AmityJSGenerator.use()".split(),
target="app.js")
res = ctx.path.find_dir("res")
if res is not None:
# Create asset swfs from the directories in /res
ctx.bld(features="haxe", classpath=FLAMBE_ROOT+"/tools",
flags="-main AssetPackager",
libs="format",
target="packager.n")
# -interp because neko JIT is unstable...
ctx.bld(rule="neko -interp ${SRC} " + res.abspath() + " .",
source="packager.n", target= "bootstrap.swf" if hasBootstrap else None, always=True)
# TODO: How can we expose these handy commands to the main wscript?
def android_test(ctx):
os.system("adb push res /sdcard/amity-dev")
os.system("adb push build/app.js /sdcard/amity-dev")
os.system("adb shell am start -a android.intent.action.MAIN " +
"-c android.intent.category.HOME")
os.system("adb shell am start -a android.intent.action.MAIN " +
"-n com.threerings.amity/.AmityActivity")
def flash_test(ctx):
os.system("flashplayer build/app.swf")
def android_log(ctx):
os.system("adb logcat -v tag amity:V SDL:V *:W")
| Python | 0 |
5e57234ec619d0de930333a8dde3004d1dc575d6 | Support automatically stashing local modifications during repo-rebase. | subcmds/rebase.py | subcmds/rebase.py | #
# Copyright (C) 2010 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from command import Command
from git_command import GitCommand
from git_refs import GitRefs, HEAD, R_HEADS, R_TAGS, R_PUB, R_M
from error import GitError
class Rebase(Command):
common = True
helpSummary = "Rebase local branches on upstream branch"
helpUsage = """
%prog {[<project>...] | -i <project>...}
"""
helpDescription = """
'%prog' uses git rebase to move local changes in the current topic branch to
the HEAD of the upstream history, useful when you have made commits in a topic
branch but need to incorporate new upstream changes "underneath" them.
"""
def _Options(self, p):
p.add_option('-i', '--interactive',
dest="interactive", action="store_true",
help="interactive rebase (single project only)")
p.add_option('-f', '--force-rebase',
dest='force_rebase', action='store_true',
help='Pass --force-rebase to git rebase')
p.add_option('--no-ff',
dest='no_ff', action='store_true',
help='Pass --no-ff to git rebase')
p.add_option('-q', '--quiet',
dest='quiet', action='store_true',
help='Pass --quiet to git rebase')
p.add_option('--autosquash',
dest='autosquash', action='store_true',
help='Pass --autosquash to git rebase')
p.add_option('--whitespace',
dest='whitespace', action='store', metavar='WS',
help='Pass --whitespace to git rebase')
p.add_option('--auto-stash',
dest='auto_stash', action='store_true',
help='Stash local modifications before starting')
def Execute(self, opt, args):
all = self.GetProjects(args)
one_project = len(all) == 1
if opt.interactive and not one_project:
print >>sys.stderr, 'error: interactive rebase not supported with multiple projects'
return -1
for project in all:
cb = project.CurrentBranch
if not cb:
if one_project:
print >>sys.stderr, "error: project %s has a detatched HEAD" % project.relpath
return -1
# ignore branches with detatched HEADs
continue
upbranch = project.GetBranch(cb)
if not upbranch.LocalMerge:
if one_project:
print >>sys.stderr, "error: project %s does not track any remote branches" % project.relpath
return -1
# ignore branches without remotes
continue
args = ["rebase"]
if opt.whitespace:
args.append('--whitespace=%s' % opt.whitespace)
if opt.quiet:
args.append('--quiet')
if opt.force_rebase:
args.append('--force-rebase')
if opt.no_ff:
args.append('--no-ff')
if opt.autosquash:
args.append('--autosquash')
if opt.interactive:
args.append("-i")
args.append(upbranch.LocalMerge)
print >>sys.stderr, '# %s: rebasing %s -> %s' % \
(project.relpath, cb, upbranch.LocalMerge)
needs_stash = False
if opt.auto_stash:
stash_args = ["update-index", "--refresh", "-q"]
if GitCommand(project, stash_args).Wait() != 0:
needs_stash = True
# Dirty index, requires stash...
stash_args = ["stash"]
if GitCommand(project, stash_args).Wait() != 0:
return -1
if GitCommand(project, args).Wait() != 0:
return -1
if needs_stash:
stash_args.append('pop')
stash_args.append('--quiet')
if GitCommand(project, stash_args).Wait() != 0:
return -1
| #
# Copyright (C) 2010 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from command import Command
from git_command import GitCommand
from git_refs import GitRefs, HEAD, R_HEADS, R_TAGS, R_PUB, R_M
from error import GitError
class Rebase(Command):
common = True
helpSummary = "Rebase local branches on upstream branch"
helpUsage = """
%prog {[<project>...] | -i <project>...}
"""
helpDescription = """
'%prog' uses git rebase to move local changes in the current topic branch to
the HEAD of the upstream history, useful when you have made commits in a topic
branch but need to incorporate new upstream changes "underneath" them.
"""
def _Options(self, p):
p.add_option('-i', '--interactive',
dest="interactive", action="store_true",
help="interactive rebase (single project only)")
p.add_option('-f', '--force-rebase',
dest='force_rebase', action='store_true',
help='Pass --force-rebase to git rebase')
p.add_option('--no-ff',
dest='no_ff', action='store_true',
help='Pass --no-ff to git rebase')
p.add_option('-q', '--quiet',
dest='quiet', action='store_true',
help='Pass --quiet to git rebase')
p.add_option('--autosquash',
dest='autosquash', action='store_true',
help='Pass --autosquash to git rebase')
p.add_option('--whitespace',
dest='whitespace', action='store', metavar='WS',
help='Pass --whitespace to git rebase')
def Execute(self, opt, args):
all = self.GetProjects(args)
one_project = len(all) == 1
if opt.interactive and not one_project:
print >>sys.stderr, 'error: interactive rebase not supported with multiple projects'
return -1
for project in all:
cb = project.CurrentBranch
if not cb:
if one_project:
print >>sys.stderr, "error: project %s has a detatched HEAD" % project.relpath
return -1
# ignore branches with detatched HEADs
continue
upbranch = project.GetBranch(cb)
if not upbranch.LocalMerge:
if one_project:
print >>sys.stderr, "error: project %s does not track any remote branches" % project.relpath
return -1
# ignore branches without remotes
continue
args = ["rebase"]
if opt.whitespace:
args.append('--whitespace=%s' % opt.whitespace)
if opt.quiet:
args.append('--quiet')
if opt.force_rebase:
args.append('--force-rebase')
if opt.no_ff:
args.append('--no-ff')
if opt.autosquash:
args.append('--autosquash')
if opt.interactive:
args.append("-i")
args.append(upbranch.LocalMerge)
print >>sys.stderr, '# %s: rebasing %s -> %s' % \
(project.relpath, cb, upbranch.LocalMerge)
if GitCommand(project, args).Wait() != 0:
return -1
| Python | 0.000007 |
d42b47f971675af4b12f59089326276b3b8ff9f4 | Bump version to 0.14.0 | syntex/pkgmeta.py | syntex/pkgmeta.py | # -------------------------------------------------------------------------
# Package meta data.
# -------------------------------------------------------------------------
# Package version number.
__version__ = "0.14.0"
| # -------------------------------------------------------------------------
# Package meta data.
# -------------------------------------------------------------------------
# Package version number.
__version__ = "0.13.4"
| Python | 0 |
64d83d2f9c0d955b9d6ef721c0d953158ebfb72c | Add API to manually set the path of an item. + Automatic creation of files when getPath() is called. | jasy/item/Abstract.py | jasy/item/Abstract.py | #
# Jasy - Web Tooling Framework
# Copyright 2010-2012 Zynga Inc.
# Copyright 2013-2014 Sebastian Werner
#
import os
from jasy import UserError
import jasy.core.File as File
class AbstractItem:
id = None
project = None
kind = "jasy.Item"
mtime = None
__path = None
__cache = None
__text = None
__textFilter = None
@classmethod
def fromPath(cls, project, relpath, package=None):
"Initialize MyData from a dict's items"
item = cls(project)
item.setId(item.generateId(relpath, package))
return item
def __init__(self, project, id=None, package=None):
self.project = project
if id:
self.setId(id)
def generateId(self, relpath, package):
return "%s/%s" % (package,relpath)
def attach(self, path):
self.__path = path
entry = None
try:
if type(path) is list:
mtime = 0
for entry in path:
entryTime = os.stat(entry).st_mtime
if entryTime > mtime:
mtime = entryTime
self.mtime = mtime
else:
entry = path
self.mtime = os.stat(entry).st_mtime
except OSError as oserr:
raise UserError("Invalid item path: %s" % entry)
return self
def getId(self):
"""Returns a unique identify of the class. Typically as it is stored inside the project."""
return self.id
def setId(self, id):
self.id = id
return self
def getProject(self):
"""Returns the project which the class belongs to"""
return self.project
def getPath(self):
"""Returns the exact position of the class file in the file system."""
# Automatically write file (from eventually processed text content) when it does not exist
if self.__text is not None and not File.exists(self.__path):
File.write(self.__path, self.getText())
return self.__path
def setPath(self, path):
"""Sets the path for the item"""
self.__path = path
def getModificationTime(self):
"""Returns last modification time of the class"""
return self.mtime
def setText(self, text):
"""Stores text from custom reader"""
self.__text = text
def saveText(self, text, path, encoding="utf-8"):
"""
Saves the given text under the given path and stores both for future access
This is mainly useful for "virtual" files which are not edited by the developer
but which are created dynamically during runtime.
"""
self.__text = text
self.__path = path
if not File.exists(path) or File.read(path) != text:
File.write(path, text)
self.mtime = os.stat(path).st_mtime
def getText(self, encoding="utf-8"):
"""
Reads the file (as UTF-8) and returns the text
"""
if self.__text is not None:
if self.__textFilter is not None:
return self.__textFilter(self.__text, self)
else:
return self.__text
if self.__path is None:
return None
if type(self.__path) == list:
text = "".join([open(filename, mode="r", encoding=encoding).read() for filename in self.__path])
else:
text = open(self.__path, mode="r", encoding=encoding).read()
if self.__textFilter is not None:
return self.__textFilter(text, self)
else:
return text
def setTextFilter(self, filterCallback):
"""
Sets text filter callback that is called on getText().
With this callback e.g. transformations from CoffeeScript to JavaScript are possible.
The callback gets two parameter (text, ItemClass)
"""
self.__textFilter = filterCallback
def getChecksum(self, mode="rb"):
"""
Returns the SHA1 checksum of the item
"""
return File.sha1(open(self.getPath(), mode))
# Map Python built-ins
__repr__ = getId
__str__ = getId
| #
# Jasy - Web Tooling Framework
# Copyright 2010-2012 Zynga Inc.
# Copyright 2013-2014 Sebastian Werner
#
import os
from jasy import UserError
import jasy.core.File as File
class AbstractItem:
id = None
project = None
kind = "jasy.Item"
mtime = None
__path = None
__cache = None
__text = None
__textFilter = None
@classmethod
def fromPath(cls, project, relpath, package=None):
"Initialize MyData from a dict's items"
item = cls(project)
item.setId(item.generateId(relpath, package))
return item
def __init__(self, project, id=None, package=None):
self.project = project
if id:
self.setId(id)
def generateId(self, relpath, package):
return "%s/%s" % (package,relpath)
def attach(self, path):
self.__path = path
entry = None
try:
if type(path) is list:
mtime = 0
for entry in path:
entryTime = os.stat(entry).st_mtime
if entryTime > mtime:
mtime = entryTime
self.mtime = mtime
else:
entry = path
self.mtime = os.stat(entry).st_mtime
except OSError as oserr:
raise UserError("Invalid item path: %s" % entry)
return self
def getId(self):
"""Returns a unique identify of the class. Typically as it is stored inside the project."""
return self.id
def setId(self, id):
self.id = id
return self
def getProject(self):
"""Returns the project which the class belongs to"""
return self.project
def getPath(self):
"""Returns the exact position of the class file in the file system."""
return self.__path
def getModificationTime(self):
"""Returns last modification time of the class"""
return self.mtime
def setText(self, text):
"""Stores text from custom reader"""
self.__text = text
def saveText(self, text, path, encoding="utf-8"):
"""
Saves the given text under the given path and stores both for future access
This is mainly useful for "virtual" files which are not edited by the developer
but which are created dynamically during runtime.
"""
self.__text = text
self.__path = path
if not File.exists(path) or File.read(path) != text:
File.write(path, text)
self.mtime = os.stat(path).st_mtime
def getText(self, encoding="utf-8"):
"""
Reads the file (as UTF-8) and returns the text
"""
if self.__text is not None:
if self.__textFilter is not None:
return self.__textFilter(self.__text, self)
else:
return self.__text
if self.__path is None:
return None
if type(self.__path) == list:
text = "".join([open(filename, mode="r", encoding=encoding).read() for filename in self.__path])
else:
text = open(self.__path, mode="r", encoding=encoding).read()
if self.__textFilter is not None:
return self.__textFilter(text, self)
else:
return text
def setTextFilter(self, filterCallback):
"""
Sets text filter callback that is called on getText().
With this callback e.g. transformations from CoffeeScript to JavaScript are possible.
The callback gets two parameter (text, ItemClass)
"""
self.__textFilter = filterCallback
def getChecksum(self, mode="rb"):
"""
Returns the SHA1 checksum of the item
"""
return File.sha1(open(self.getPath(), mode))
# Map Python built-ins
__repr__ = getId
__str__ = getId
| Python | 0 |
a8679b6ac5392b80cd56fa2d67fd3bf3fb6f488f | Add distance handling to base class | turbustat/statistics/base_statistic.py | turbustat/statistics/base_statistic.py |
from astropy.io import fits
import astropy.units as u
import numpy as np
from ..io import input_data
class BaseStatisticMixIn(object):
"""
Common properties to all statistics
"""
# Disable this flag when a statistic does not need a header
need_header_flag = True
# Disable this when the data property will not be used.
no_data_flag = False
@property
def header(self):
return self._header
@header.setter
def header(self, input_hdr):
if not self.need_header_flag:
input_hdr = None
elif not isinstance(input_hdr, fits.header.Header):
raise TypeError("The header must be a"
" astropy.io.fits.header.Header.")
self._header = input_hdr
@property
def data(self):
return self._data
@data.setter
def data(self, values):
if self.no_data_flag:
values = None
elif not isinstance(values, np.ndarray):
raise TypeError("Data is not a numpy array.")
self._data = values
def input_data_header(self, data, header):
'''
Check if the header is given separately from the data type.
'''
if header is not None:
self.data = input_data(data, no_header=True)
self.header = header
else:
self.data, self.header = input_data(data)
@property
def angular_equiv(self):
return [(u.pix, u.deg, lambda x: x * float(self.ang_size.value),
lambda x: x / float(self.ang_size.value))]
@property
def ang_size(self):
return np.abs(self.header["CDELT2"]) * u.deg
def to_pixel(self, value):
'''
Convert from angular to pixel scale.
'''
if not isinstance(value, u.Quantity):
raise TypeError("value must be an astropy Quantity object.")
return value.to(u.pix, equivalencies=self.angular_equiv)
@property
def distance(self):
return self._distance
@distance.setter
def distance(self, value):
'''
Value must be a quantity with a valid distance unit. Will keep the
units given.
'''
if not isinstance(value, u.Quantity):
raise TypeError("Value for distance must an astropy Quantity.")
if not value.unit.is_equivalent(u.pc):
raise u.UnitConversionError("Given unit ({}) is not a valid unit"
" of distance.")
if not value.isscalar:
raise TypeError("Distance must be a scalar quantity.")
self._distance = value
@property
def distance_size(self):
return (self.ang_size *
self.distance).to(self.distance.unit,
equivalencies=u.dimensionless_angles())
@property
def distance_equiv(self):
return [(u.pix, self.distance.unit,
lambda x: x * float(self.distance_size.value),
lambda x: x / float(self.distance_size.value))]
|
from astropy.io import fits
import astropy.units as u
import numpy as np
from ..io import input_data
class BaseStatisticMixIn(object):
"""
Common properties to all statistics
"""
# Disable this flag when a statistic does not need a header
need_header_flag = True
# Disable this when the data property will not be used.
no_data_flag = False
@property
def header(self):
return self._header
@header.setter
def header(self, input_hdr):
if not self.need_header_flag:
input_hdr = None
elif not isinstance(input_hdr, fits.header.Header):
raise TypeError("The header must be a"
" astropy.io.fits.header.Header.")
self._header = input_hdr
@property
def data(self):
return self._data
@data.setter
def data(self, values):
if self.no_data_flag:
values = None
elif not isinstance(values, np.ndarray):
raise TypeError("Data is not a numpy array.")
self._data = values
def input_data_header(self, data, header):
'''
Check if the header is given separately from the data type.
'''
if header is not None:
self.data = input_data(data, no_header=True)
self.header = header
else:
self.data, self.header = input_data(data)
@property
def angular_equiv(self):
return [(u.pix, u.deg, lambda x: x * float(self.ang_size.value),
lambda x: x / float(self.ang_size.value))]
@property
def ang_size(self):
return np.abs(self.header["CDELT2"]) * u.deg
def to_pixel(self, value):
'''
Convert from angular to pixel scale.
'''
if not isinstance(value, u.Quantity):
raise TypeError("value must be an astropy Quantity object.")
return value.to(u.pix, equivalencies=self.angular_equiv)
| Python | 0 |
dadd800384358356542ccc49bbdad1ae54006cfc | Fix test_Bucket.BucketDataTests to test `needed` attribute. | lib/bridgedb/test/test_Bucket.py | lib/bridgedb/test/test_Bucket.py | # -*- coding: utf-8 -*-
#
# This file is part of BridgeDB, a Tor bridge distribution system.
#
# :copyright: (c) 2007-2014, The Tor Project, Inc.
# (c) 2007-2014, all entities within the AUTHORS file
# :license: 3-Clause BSD, see LICENSE for licensing information
"""Unittests for the :mod:`bridgedb.Bucket` module.
These tests are meant to ensure that the :mod:`bridgedb.Bucket` module is
functioning as expected.
"""
from __future__ import print_function
from io import StringIO
import sure
from sure import this
from sure import the
from sure import expect
from bridgedb import Bucket
from twisted.trial import unittest
class BucketDataTest(unittest.TestCase):
"""Tests for :class:`bridgedb.Bucket.BucketData`."""
def test_alloc_some_of_the_bridges(self):
"""Set the needed number of bridges"""
needed = 10
distname = "test-distributor"
bucket = Bucket.BucketData(distname, alloc)
this(bucket.name).should.be.equal(distname)
this(bucket.needed).should.be.equal(needed)
def test_alloc_all_the_bridges(self):
"""Set the needed number of bridges to the default"""
needed = '*'
distname = "test-distributor"
bucket = Bucket.BucketData(distname, alloc)
this(bucket.name).should.be.equal(distname)
this(bucket.needed).should.be.equal(needed)
class BucketManagerTest(unittest.TestCase):
"""Tests for :class:`bridgedb.Bucket.BucketManager`."""
TEST_CONFIG_FILE = StringIO(unicode("""\
FILE_BUCKETS = { 'test1': 7, 'test2': 11 }
COLLECT_TIMESTAMPS = False
COUNTRY_BLOCK_FILE = []"""))
def setUp(self):
configuration = {}
TEST_CONFIG_FILE.seek(0)
compiled = compile(TEST_CONFIG_FILE.read(), '<string>', 'exec')
exec compiled in configuration
self.config = persistent.Conf(**configuration)
self.state = persistent.State(**config.__dict__)
self.bucket = Bucket.BucketManager(self.config)
| # -*- coding: utf-8 -*-
#
# This file is part of BridgeDB, a Tor bridge distribution system.
#
# :copyright: (c) 2007-2014, The Tor Project, Inc.
# (c) 2007-2014, all entities within the AUTHORS file
# :license: 3-Clause BSD, see LICENSE for licensing information
"""Unittests for the :mod:`bridgedb.Bucket` module.
These tests are meant to ensure that the :mod:`bridgedb.Bucket` module is
functioning as expected.
"""
from __future__ import print_function
from io import StringIO
import sure
from sure import this
from sure import the
from sure import expect
from bridgedb import Bucket
from twisted.trial import unittest
class BucketDataTest(unittest.TestCase):
"""Tests for :class:`bridgedb.Bucket.BucketData`."""
def test_alloc_some_of_the_bridges(self):
"""Set the needed number of bridges"""
alloc = 10
distname = "test-distributor"
bucket = Bucket.BucketData(distname, alloc)
this(distname).should.be.equal(bucket.name)
this(alloc).should.be.equal(bucket.needed)
def test_alloc_all_the_bridges(self):
"""Set the needed number of bridges to the default"""
alloc = '*'
distname = "test-distributor"
bucket = Bucket.BucketData(distname, alloc)
this(distname).should.be.equal(bucket.name)
this(alloc).should.be.equal(1000000)
class BucketManagerTest(unittest.TestCase):
"""Tests for :class:`bridgedb.Bucket.BucketManager`."""
TEST_CONFIG_FILE = StringIO(unicode("""\
FILE_BUCKETS = { 'test1': 7, 'test2': 11 }
COLLECT_TIMESTAMPS = False
COUNTRY_BLOCK_FILE = []"""))
def setUp(self):
configuration = {}
TEST_CONFIG_FILE.seek(0)
compiled = compile(TEST_CONFIG_FILE.read(), '<string>', 'exec')
exec compiled in configuration
self.config = persistent.Conf(**configuration)
self.state = persistent.State(**config.__dict__)
self.bucket = Bucket.BucketManager(self.config)
| Python | 0 |
eb856e854c3b6f94f49db6de41c3a5af758494b3 | Change in forbidden_view_config in Pyramid 1.5a3 | usingnamespace/views/authentication.py | usingnamespace/views/authentication.py | import logging
log = logging.getLogger(__name__)
from pyramid.view import (
view_config,
view_defaults,
forbidden_view_config,
)
from pyramid.security import (
remember,
forget,
authenticated_userid
)
from pyramid.httpexceptions import (
HTTPForbidden,
HTTPSeeOther,
)
from deform import ValidationFailure
from ..forms.user import (
LoginForm,
)
@view_defaults(context='..traversal.ManagementRoot', route_name='management')
class Authentication(object):
"""Authentication provides views for things related to authentication"""
def __init__(self, context, request):
"""Initialises the view class
:context: The traversal context
:request: The current request
"""
self.context = context
self.request = request
@view_config(
name='auth',
renderer='management/authenticate.mako',
)
def authenticate(self):
if authenticated_userid(self.request) is not None:
return HTTPSeeOther(location=self.request.route_url('management',
traverse=self.request.session.get('next', '')))
(schema, f) = LoginForm.create_form(request=self.request,
action=self.request.current_route_url())
return {
'form': f.render(),
}
@view_config(
name='auth',
renderer='management/authenticate.mako',
request_method='POST',
)
def authenticate_submit(self):
controls = self.request.POST.items()
(schema, f) = LoginForm.create_form(request=self.request,
action=self.request.current_route_url())
try:
appstruct = f.validate(controls)
headers = remember(self.request, appstruct['email'])
log.debug("Sending user to: {}".format(self.request.session.get('next', None)))
return HTTPSeeOther(location=self.request.route_url(
'management', traverse=self.request.session.get('next', '')),
headers = headers)
except ValidationFailure as e:
if e.field['csrf_token'].error is not None:
e.field.error = e.field['csrf_token'].error
e.field['csrf_token'].cstruct = self.request.session.get_csrf_token()
return {
'form': e.render(),
}
@view_config(
name='deauth',
)
def deauth(self):
headers = forget(self.request)
return HTTPSeeOther(location=self.request.route_url('management',
traverse=''), headers = headers)
@view_config(
context=HTTPForbidden,
containment='..traversal.ManagementRoot',
renderer='string',
)
def forbidden(self):
# Check to see if a user is already logged in...
if authenticated_userid(self.request):
request.response.status_int = 403
return {}
if self.request.path != '/':
self.request.session['next'] = self.request.path
return HTTPSeeOther(location=self.request.route_url(
'management', traverse='auth'))
| import logging
log = logging.getLogger(__name__)
from pyramid.view import (
view_config,
view_defaults,
forbidden_view_config,
)
from pyramid.security import (
remember,
forget,
authenticated_userid
)
from pyramid.httpexceptions import HTTPSeeOther
from deform import ValidationFailure
from ..forms.user import (
LoginForm,
)
@view_defaults(context='..traversal.ManagementRoot', route_name='management')
class Authentication(object):
"""Authentication provides views for things related to authentication"""
def __init__(self, context, request):
"""Initialises the view class
:context: The traversal context
:request: The current request
"""
self.context = context
self.request = request
@view_config(
name='auth',
renderer='management/authenticate.mako',
)
def authenticate(self):
if authenticated_userid(self.request) is not None:
return HTTPSeeOther(location=self.request.route_url('management',
traverse=self.request.session.get('next', '')))
(schema, f) = LoginForm.create_form(request=self.request,
action=self.request.current_route_url())
return {
'form': f.render(),
}
@view_config(
name='auth',
renderer='management/authenticate.mako',
request_method='POST',
)
def authenticate_submit(self):
controls = self.request.POST.items()
(schema, f) = LoginForm.create_form(request=self.request,
action=self.request.current_route_url())
try:
appstruct = f.validate(controls)
headers = remember(self.request, appstruct['email'])
log.debug("Sending user to: {}".format(self.request.session.get('next', None)))
return HTTPSeeOther(location=self.request.route_url(
'management', traverse=self.request.session.get('next', '')),
headers = headers)
except ValidationFailure as e:
if e.field['csrf_token'].error is not None:
e.field.error = e.field['csrf_token'].error
e.field['csrf_token'].cstruct = self.request.session.get_csrf_token()
return {
'form': e.render(),
}
@view_config(
name='deauth',
)
def deauth(self):
headers = forget(self.request)
return HTTPSeeOther(location=self.request.route_url('management',
traverse=''), headers = headers)
@forbidden_view_config(
containment='..traversal.ManagementRoot',
renderer='string',
)
def forbidden(self):
# Check to see if a user is already logged in...
if authenticated_userid(self.request):
request.response.status_int = 403
return {}
if self.request.path != '/':
self.request.session['next'] = self.request.path
return HTTPSeeOther(location=self.request.route_url(
'management', traverse='auth'))
| Python | 0 |
c86c80854ac5ea60f43619610a21bfba9b1094f2 | add ratio | example/simple_male_female_ratio.py | example/simple_male_female_ratio.py | import pydcard
def main():
male = 0
female = 0
for page_num in range(1, 41):
print ('Sending request to page %d' % page_num)
page = pydcard.get_all_page(page_num)
for post_thread in range(0, len(page)):
if page[post_thread].get('member').get('gender') == 'M':
male = male + 1
elif page[post_thread].get('member').get('gender') == 'F':
female = female + 1
else:
print ('Unknown gender')
print (page[post_thread].get('member').get('gender'))
print ('Female posts: %d, Male posts: %d' % (female, male))
print ('Female to Male ratio: %f' % (female/male))
if __name__ == '__main__':
main()
| import pydcard
def main():
male = 0
female = 0
for page_num in range(1, 41):
print ('Sending request to page %d' % page_num)
page = pydcard.getAllPage(page_num)
for post_thread in range(0, len(page)):
if page[post_thread].get('member').get('gender') == 'M':
male = male + 1
elif page[post_thread].get('member').get('gender') == 'F':
female = female + 1
else:
print ('Unknown gender')
print (page[post_thread].get('member').get('gender'))
print ('Female posts: %d, Male posts: %d' % (female, male))
if __name__ == '__main__':
main()
| Python | 0.000001 |
eb41e61e80cfc29957edfa30221cbbca3d8e7958 | Update variance_reduction.py | libact/query_strategies/variance_reduction.py | libact/query_strategies/variance_reduction.py | """Variance Reduction"""
import copy
from multiprocessing import Pool
import numpy as np
from libact.base.interfaces import QueryStrategy
from libact.base.dataset import Dataset
import libact.models
from libact.query_strategies._variance_reduction import estVar
class VarianceReduction(QueryStrategy):
"""Variance Reduction
This class implements Variance Reduction active learning algorithm [1]_.
Parameters
----------
model: {libact.model.LogisticRegression instance, 'LogisticRegression'}
The model used for variance reduction to evaluate the variance.
Only Logistic regression are supported now.
sigma: float, >0, optional (default=100.0)
1/sigma is added to the diagonal of the Fisher information matrix as a
regularization term.
optimality : {'trace', 'determinant', 'eigenvalue'}, optional (default='trace')
The type of optimal design. The options are the trace, determinant, or
maximum eigenvalue of the inverse Fisher information matrix.
Only 'trace' are supported now.
Attributes
----------
References
----------
.. [1] Schein, Andrew I., and Lyle H. Ungar. "Active learning for logistic
regression: an evaluation." Machine Learning 68.3 (2007): 235-265.
.. [2] Settles, Burr. "Active learning literature survey." University of
Wisconsin, Madison 52.55-66 (2010): 11.
"""
def __init__(self, *args, **kwargs):
super(VarianceReduction, self).__init__(*args, **kwargs)
model = kwargs.pop('model', None)
if type(model) is str:
self.model = getattr(libact.models, model)()
else:
self.model = model
self.optimality = kwargs.pop('optimality', 'trace')
self.sigma = kwargs.pop('sigma', 1.0)
def Phi(self, PI, X, epi, ex, label_count, feature_count):
ret = estVar(self.sigma, PI, X, epi, ex)
return ret
def E(self, args):
X, y, qx, clf, label_count = args
sigmoid = lambda x: 1 / (1 + np.exp(-x))
query_point = sigmoid(clf.predict_real([qx]))
feature_count = len(X[0])
ret = 0.0
for i in range(label_count):
clf = copy.copy(self.model)
clf.train(Dataset(np.vstack((X, [qx])), np.append(y, i)))
PI = sigmoid(clf.predict_real(np.vstack((X, [qx]))))
ret += query_point[-1][i] * self.Phi(PI[:-1], X, PI[-1], qx,
label_count, feature_count)
return ret
def make_query(self, n_jobs=1):
"""
Calculate which point to query.
Parameters
----------
n_jobs : int, optional (default=1)
The number of jobs to run in parallel.
Returns
-------
ask_id : int
The entry id of the sample wants to query.
"""
labeled_entries = self.dataset.get_labeled_entries()
Xlabeled, y = zip(*labeled_entries)
Xlabeled = np.array(Xlabeled)
y = list(y)
unlabeled_entries = self.dataset.get_unlabeled_entries()
unlabeled_entry_ids, X_pool = zip(*unlabeled_entries)
label_count = self.dataset.get_num_of_labels()
clf = copy.copy(self.model)
clf.train(Dataset(Xlabeled, y))
p = Pool(n_jobs)
errors = p.map(self.E, [(Xlabeled, y, x, clf, label_count) for x in
X_pool])
p.terminate()
return unlabeled_entry_ids[errors.index(min(errors))]
| """Variance Reduction"""
import copy
from multiprocessing import Pool
import numpy as np
from libact.base.interfaces import QueryStrategy
from libact.base.dataset import Dataset
import libact.models
from libact.query_strategies._variance_reduction import estVar
class VarianceReduction(QueryStrategy):
"""Variance Reduction
This class implements Variance Reduction active learning algorithm [1]_.
Parameters
----------
model: {libact.model.LogisticRegression instance, 'LogisticRegression'}
The model used for variance reduction to evaluate the variance.
Only Logistic regression are supported now.
sigma: float, >0, optional (default=100.0)
1/sigma is added to the diagonal of the Fisher information matrix as
regularization term.
optimality : {'trace', 'determinant', 'eigenvalue'}, optional (default='trace')
The type of optimal design. The options are the trace, determinant, or
maximum eigenvalue of the inverse Fisher information matrix.
Only 'trace' are supported now.
Attributes
----------
References
----------
.. [1] Schein, Andrew I., and Lyle H. Ungar. "Active learning for logistic
regression: an evaluation." Machine Learning 68.3 (2007): 235-265.
.. [2] Settles, Burr. "Active learning literature survey." University of
Wisconsin, Madison 52.55-66 (2010): 11.
"""
def __init__(self, *args, **kwargs):
super(VarianceReduction, self).__init__(*args, **kwargs)
model = kwargs.pop('model', None)
if type(model) is str:
self.model = getattr(libact.models, model)()
else:
self.model = model
self.optimality = kwargs.pop('optimality', 'trace')
self.sigma = kwargs.pop('sigma', 1.0)
def Phi(self, PI, X, epi, ex, label_count, feature_count):
ret = estVar(self.sigma, PI, X, epi, ex)
return ret
def E(self, args):
X, y, qx, clf, label_count = args
sigmoid = lambda x: 1 / (1 + np.exp(-x))
query_point = sigmoid(clf.predict_real([qx]))
feature_count = len(X[0])
ret = 0.0
for i in range(label_count):
clf = copy.copy(self.model)
clf.train(Dataset(np.vstack((X, [qx])), np.append(y, i)))
PI = sigmoid(clf.predict_real(np.vstack((X, [qx]))))
ret += query_point[-1][i] * self.Phi(PI[:-1], X, PI[-1], qx,
label_count, feature_count)
return ret
def make_query(self, n_jobs=1):
"""
Calculate which point to query.
Parameters
----------
n_jobs : int, optional (default=1)
The number of jobs to run in parallel.
Returns
-------
ask_id : int
The entry id of the sample wants to query.
"""
labeled_entries = self.dataset.get_labeled_entries()
Xlabeled, y = zip(*labeled_entries)
Xlabeled = np.array(Xlabeled)
y = list(y)
unlabeled_entries = self.dataset.get_unlabeled_entries()
unlabeled_entry_ids, X_pool = zip(*unlabeled_entries)
label_count = self.dataset.get_num_of_labels()
clf = copy.copy(self.model)
clf.train(Dataset(Xlabeled, y))
p = Pool(n_jobs)
errors = p.map(self.E, [(Xlabeled, y, x, clf, label_count) for x in
X_pool])
p.terminate()
return unlabeled_entry_ids[errors.index(min(errors))]
| Python | 0.000001 |
832525402091562950b1d14ccca40a68be5f306d | test that large big decimal roundtrips | tests/regression.py | tests/regression.py | ## Copyright 2014 Cognitect. All Rights Reserved.
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS-IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
# This test suite verifies that issues corrected remain corrected.
import unittest
from transit.reader import Reader
from transit.writer import Writer
from transit.transit_types import Symbol, frozendict, true, false
from decimal import Decimal
from StringIO import StringIO
class RegressionBaseTest(unittest.TestCase):
pass
def regression(name, value):
class RegressionTest(RegressionBaseTest):
def test_roundtrip(self):
in_data = value
io = StringIO()
w = Writer(io, "json")
w.write(in_data)
r = Reader("json")
out_data = r.read(StringIO(io.getvalue()))
self.assertEqual(in_data, out_data)
globals()["test_" + name + "_json"] = RegressionTest
regression("cache_consistency", ({"Problem?":true},
Symbol("Here"),
Symbol("Here")))
regression("one_pair_frozendict", frozendict({"a":1}))
regression("json_int_max", (2**53+100, 2**63+100))
regression("newline_in_string", "a\nb")
regression("big_decimal", Decimal("190234710272.2394720347203642836434"))
class BooleanTest(unittest.TestCase):
"""Even though we're roundtripping transit_types.true and
transit_types.false now, make sure we can still write Python bools.
Additionally, make sure we can still do basic logical evaluation on transit
Boolean values.
"""
def test_write_bool(self):
for protocol in ("json", "json-verbose", "msgpack"):
io = StringIO()
w = Writer(io, protocol)
w.write((True, False))
r = Reader(protocol)
io.seek(0)
out_data = r.read(io)
assert out_data[0] == true
assert out_data[1] == false
def test_basic_eval(self):
assert true
assert not false
def test_or(self):
assert true or false
assert not (false or false)
assert true or true
def test_and(self):
assert not (true and false)
assert true and true
assert not (false and false)
if __name__ == '__main__':
unittest.main()
| ## Copyright 2014 Cognitect. All Rights Reserved.
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS-IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
# This test suite verifies that issues corrected remain corrected.
import unittest
from transit.reader import Reader
from transit.writer import Writer
from transit.transit_types import Symbol, frozendict, true, false
from StringIO import StringIO
class RegressionBaseTest(unittest.TestCase):
pass
def regression(name, value):
class RegressionTest(RegressionBaseTest):
def test_roundtrip(self):
in_data = value
io = StringIO()
w = Writer(io, "json")
w.write(in_data)
r = Reader("json")
out_data = r.read(StringIO(io.getvalue()))
self.assertEqual(in_data, out_data)
globals()["test_" + name + "_json"] = RegressionTest
regression("cache_consistency", ({"Problem?":true},
Symbol("Here"),
Symbol("Here")))
regression("one_pair_frozendict", frozendict({"a":1}))
regression("json_int_max", (2**53+100, 2**63+100))
regression("newline_in_string", "a\nb")
class BooleanTest(unittest.TestCase):
"""Even though we're roundtripping transit_types.true and
transit_types.false now, make sure we can still write Python bools.
Additionally, make sure we can still do basic logical evaluation on transit
Boolean values.
"""
def test_write_bool(self):
for protocol in ("json", "json-verbose", "msgpack"):
io = StringIO()
w = Writer(io, protocol)
w.write((True, False))
r = Reader(protocol)
io.seek(0)
out_data = r.read(io)
assert out_data[0] == true
assert out_data[1] == false
def test_basic_eval(self):
assert true
assert not false
def test_or(self):
assert true or false
assert not (false or false)
assert true or true
def test_and(self):
assert not (true and false)
assert true and true
assert not (false and false)
if __name__ == '__main__':
unittest.main()
| Python | 0.998839 |
b3f33521bc7f837a7e4f055758cd035339446a98 | Fix an undefined variable in DB code | utils/database.py | utils/database.py | import json
import copy
from zirc.wrappers import connection_wrapper
class Database(dict):
"""Holds a dict that contains all the information
about the users and their last seen actions in a channel"""
def __init__(self, bot):
with open("userdb.json") as f:
super(Database, self).__init__(json.load(f))
class x(object):
def __init__(self, bot):
self.send = bot.send
self._config = bot.config
self.irc = connection_wrapper(x(bot))
def change_attr(self, name, attr, value, channel=None):
if channel is not None:
self[channel][name][attr] = value
for i in self:
try:
if attr == "host":
nick_ident = self[i][name]["hostmask"].split("@")[0]
self[i][name]["hostmask"] = nick_ident + '@' + value
self[i][name][attr] = value
elif attr == "ident":
self[i][name]["hostmask"] = name + '!' + value + '@' + self[i][name]["host"]
else:
self[i][name][attr] = value
except KeyError:
pass
def remove_entry(self, event, nick):
try:
del self[event.target][nick]
except KeyError:
for i in self[event.target].values():
if i['host'] == event.source.host:
del self[event.target][i['hostmask'].split("!")[0]]
break
def add_entry(self, channel, nick, hostmask, account):
temp = {
'hostmask': hostmask,
'host': hostmask.split("@")[1],
'account': account,
'seen': None
}
if nick in self[channel]:
del temp['seen']
self[channel][nick].update(temp)
else:
self[channel][nick] = temp
def get_user_host(self, channel, nick):
try:
host = "*!*@" + self[channel][nick]['host']
except KeyError:
self.irc.send("WHO {0} nuhs%nhuac".format(channel))
host = "*!*@" + self[channel][nick]['host']
return host
def flush(self):
with open('userdb.json', 'w') as f:
# Use dict(self) to onyly get the actual dict object
# Use copy.deepcopy() to avoid having errors due to the DB being updated while we flush it
json.dump(copy.deepcopy(dict(self)), f, indent=2, separators=(',', ': '))
f.write("\n")
| import json
import copy
from zirc.wrappers import connection_wrapper
class Database(dict):
"""Holds a dict that contains all the information
about the users and their last seen actions in a channel"""
def __init__(self, bot):
with open("userdb.json") as f:
super(Database, self).__init__(json.load(f))
class x(object):
def __init__(self, bot):
self.send = bot.send
self._config = bot.config
self.irc = connection_wrapper(x(bot))
def change_attr(self, name, attr, value, channel=None):
if channel is not None:
self[channel][name][attr] = value
for i in self:
try:
if attr == "host":
nick_ident = self[i][name]["hostmask"].split("@")[0]
self[i][name]["hostmask"] = nick_indent + '@' + value
self[i][name][attr] = value
elif attr == "ident":
self[i][name]["hostmask"] = name + '!' + value + '@' + self[i][name]["host"]
else:
self[i][name][attr] = value
except KeyError:
pass
def remove_entry(self, event, nick):
try:
del self[event.target][nick]
except KeyError:
for i in self[event.target].values():
if i['host'] == event.source.host:
del self[event.target][i['hostmask'].split("!")[0]]
break
def add_entry(self, channel, nick, hostmask, account):
temp = {
'hostmask': hostmask,
'host': hostmask.split("@")[1],
'account': account,
'seen': None
}
if nick in self[channel]:
del temp['seen']
self[channel][nick].update(temp)
else:
self[channel][nick] = temp
def get_user_host(self, channel, nick):
try:
host = "*!*@" + self[channel][nick]['host']
except KeyError:
self.irc.send("WHO {0} nuhs%nhuac".format(channel))
host = "*!*@" + self[channel][nick]['host']
return host
def flush(self):
with open('userdb.json', 'w') as f:
# Use dict(self) to onyly get the actual dict object
# Use copy.deepcopy() to avoid having errors due to the DB being updated while we flush it
json.dump(copy.deepcopy(dict(self)), f, indent=2, separators=(',', ': '))
f.write("\n")
| Python | 0.018335 |
8d438da54a15fa213c5b57899505e040a42548bf | Fix init for tests | linked_list.py | linked_list.py | from __future__ import unicode_literals
class Node(object):
def __init__(self, val, next=None):
self.val = val
self.next = next
def __repr__(self):
# Just display value
return "{val}".format(val=self.val)
class LinkedList(object):
"""Class for a singly-linked list."""
def __init__(self, iterable=()):
self.header = None
self.length = 0
for val in reversed(iterable):
self.insert(val)
def __repr__(self):
"""Print LinkedList as Tuple literal."""
end_flag = False
vals = [] # Can't use list!
current_node = self.header
while not end_flag:
vals.append(current_node.val)
if current_node.next:
current_node = current_node.next
else:
end_flag = True
break
vals = tuple(vals) # No tuples, even for formatting.
return str(vals)
def insert(self, val):
"""Insert val at head of LinkedList."""
self.header = Node(val, self.header)
self.length += 1
return None
def pop(self):
"""Pop the first val off the head and return it."""
to_return = self.header # Use tuple reassignment
self.header = to_return.next
to_return.next = None
self.length -= 1
return to_return
def size(self):
"""Return current length of LinkedList."""
return self.length
def search(self, val):
"""Return the node containing val if present, else None"""
node, left = self._find(val)
return node
def remove(self, val): # Check Spec: Pass node vs val
"""Remove given node from list, return None"""
node_to_remove, left_neighbor = self._find(val)
if self.header == node_to_remove:
self.pop()
else:
left_neighbor.next = node_to_remove.next
node_to_remove.next = None
return None
def display(self):
"""Print LinkedList as Tuple literal"""
return self.__repr__()
def _find(self, val): # Check with spec re: this.
"""Return a Node and left-neighboor by val."""
val_present = False
node_inspected = self.header
left_node = None
while not val_present:
# Interrogate each Node
if node_inspected.val == val:
val_present = True
break
else:
# Keeping track of node to left; incrementing node
left_node = node_inspected # use tuple assignment
node_inspected = node_inspected.next
return node_inspected, left_node
| from __future__ import unicode_literals
class LinkedList(object):
"""Class for a singly-linked list."""
def __init__(self, iterable=()):
self.length = 0
for val in iterable:
self.insert(val)
def __repr__(self):
"""Print LinkedList as Tuple literal."""
end_flag = False
vals = [] # Can't use list!
current_node = self.header
while not end_flag:
vals.append(current_node.val)
if current_node.next:
current_node = current_node.next
else:
end_flag = True
break
vals = tuple(vals) # No tuples, even for formatting.
return str(vals)
def insert(self, val):
"""Insert val at head of LinkedList."""
self.header = Node(val, self.header)
self.length += 1
return None
def pop(self):
"""Pop the first val off the head and return it."""
to_return = self.header # Use tuple reassignment
self.header = to_return.next
to_return.next = None
self.length -= 1
return to_return
def size(self):
"""Return current length of LinkedList."""
return self.length
def search(self, val):
"""Return the node containing val if present, else None"""
node, left = self._find(val)
return node
def remove(self, val): # Check Spec: Pass node vs val
"""Remove given node from list, return None"""
node_to_remove, left_neighbor = self._find(val)
if self.header == node_to_remove:
self.pop()
else:
left_neighbor.next = node_to_remove.next
node_to_remove.next = None
return None
def display(self):
"""Print LinkedList as Tuple literal"""
return self.__repr__()
def _find(self, val): # Check with spec re: this.
"""Return a Node and left-neighboor by val."""
val_present = False
node_inspected = self.header
left_node = None
while not val_present:
# Interrogate each Node
if node_inspected.val == val:
val_present = True
break
else:
# Keeping track of node to left; incrementing node
left_node = node_inspected # use tuple assignment
node_inspected = node_inspected.next
return node_inspected, left_node
class Node(object):
def __init__(self, val, next=None):
self.val = val
self.next = next
def __repr__(self):
# Just display value
return "{val}".format(val=self.val)
| Python | 0.000007 |
1bb4059a783fdbc8f397b596d5d5d5ed6d97a7b4 | use radiasoft/beamsim-jupyter image | srv/salt/jupyterhub/jupyterhub_config.py | srv/salt/jupyterhub/jupyterhub_config.py | c.Authenticator.admin_users = {'{{ pillar.jupyterhub.admin_user }}',}
c.JupyterHub.confirm_no_ssl = True
c.JupyterHub.ip = '0.0.0.0'
import base64
c.JupyterHub.cookie_secret = base64.b64decode('{{ pillar.jupyterhub.cookie_secret }}')
c.JupyterHub.proxy_auth_token = '{{ pillar.jupyterhub.proxy_auth_token }}'
# Allow both local and GitHub users; Useful for bootstrap
c.JupyterHub.authenticator_class = 'oauthenticator.GitHubOAuthenticator'
c.GitHubOAuthenticator.oauth_callback_url = 'https://jupyter.radiasoft.org/hub/oauth_callback'
c.GitHubOAuthenticator.client_id = '{{ pillar.jupyterhub.github_client_id }}'
c.GitHubOAuthenticator.client_secret = '{{ pillar.jupyterhub.github_client_secret }}'
c.JupyterHub.spawner_class = 'dockerspawner.DockerSpawner'
c.DockerSpawner.use_internal_ip = True
from IPython.utils.localinterfaces import public_ips
c.JupyterHub.hub_ip = public_ips()[0]
c.DockerSpawner.container_image = 'radiasoft/beamsim-jupyter'
| c.Authenticator.admin_users = {'{{ pillar.jupyterhub.admin_user }}',}
c.JupyterHub.confirm_no_ssl = True
c.JupyterHub.ip = '0.0.0.0'
import base64
c.JupyterHub.cookie_secret = base64.b64decode('{{ pillar.jupyterhub.cookie_secret }}')
c.JupyterHub.proxy_auth_token = '{{ pillar.jupyterhub.proxy_auth_token }}'
# Allow both local and GitHub users; Useful for bootstrap
c.JupyterHub.authenticator_class = 'oauthenticator.GitHubOAuthenticator'
c.GitHubOAuthenticator.oauth_callback_url = 'https://jupyter.radiasoft.org/hub/oauth_callback'
c.GitHubOAuthenticator.client_id = '{{ pillar.jupyterhub.github_client_id }}'
c.GitHubOAuthenticator.client_secret = '{{ pillar.jupyterhub.github_client_secret }}'
c.JupyterHub.spawner_class = 'dockerspawner.DockerSpawner'
c.DockerSpawner.use_internal_ip = True
from IPython.utils.localinterfaces import public_ips
c.JupyterHub.hub_ip = public_ips()[0]
# jupyter_client.localinterfaces
#container_image = radiasoft/jupyterhub-singleuser
| Python | 0 |
16fc80f36fa0bade1f4e5e7bef5595b3617a42bc | fix bartlett to pass participant not participant uuid | examples/bartlett1932/experiment.py | examples/bartlett1932/experiment.py | """Bartlett's trasmission chain experiment from Remembering (1932)."""
from wallace.networks import Chain
from wallace.nodes import Source, ReplicatorAgent
from wallace import processes
from wallace.experiments import Experiment
import random
class Bartlett1932(Experiment):
"""Defines the experiment."""
def __init__(self, session):
"""Set up the initial networks."""
super(Bartlett1932, self).__init__(session)
self.practice_repeats = 0
self.experiment_repeats = 1
self.agent = ReplicatorAgent
self.network = lambda: Chain(max_size=3)
if not self.networks():
self.setup()
self.save()
def setup(self):
super(Bartlett1932, self).setup()
# Setup for first time experiment is accessed
for net in self.networks():
if not net.nodes(type=Source):
source = WarOfTheGhostsSource(network=net)
net.add_source(source)
def create_agent_trigger(self, agent, network):
"""When an agent is created, add it to the network and take a step."""
network.add_agent(agent)
processes.random_walk(network)
def recruit(self):
"""Recruit participants to the experiment as needed."""
if self.networks(full=False):
self.recruiter().recruit_participants(n=1)
else:
self.recruiter().close_recruitment()
def bonus(self, participant=None):
"""Compute the bonus for the given participant.
This is called automatically when a participant finishes,
it is called immediately prior to the participant_submission_trigger
"""
return 1
class WarOfTheGhostsSource(Source):
"""Transmit a story from Bartlett (1932)."""
__mapper_args__ = {"polymorphic_identity": "war_of_the_ghosts_source"}
def _contents(self):
"""Read the markdown source of the story from a file."""
stories = [
"ghosts.md",
"cricket.md",
"moochi.md",
"outwit.md",
"raid.md",
"species.md",
"tennis.md",
"vagabond.md"
]
story = random.choice(stories)
with open("static/stimuli/{}".format(story), "r") as f:
return f.read()
| """Bartlett's trasmission chain experiment from Remembering (1932)."""
from wallace.networks import Chain
from wallace.nodes import Source, ReplicatorAgent
from wallace import processes
from wallace.experiments import Experiment
import random
class Bartlett1932(Experiment):
"""Defines the experiment."""
def __init__(self, session):
"""Set up the initial networks."""
super(Bartlett1932, self).__init__(session)
self.practice_repeats = 0
self.experiment_repeats = 1
self.agent = ReplicatorAgent
self.network = lambda: Chain(max_size=3)
if not self.networks():
self.setup()
self.save()
def setup(self):
super(Bartlett1932, self).setup()
# Setup for first time experiment is accessed
for net in self.networks():
if not net.nodes(type=Source):
source = WarOfTheGhostsSource(network=net)
net.add_source(source)
def create_agent_trigger(self, agent, network):
"""When an agent is created, add it to the network and take a step."""
network.add_agent(agent)
processes.random_walk(network)
def recruit(self):
"""Recruit participants to the experiment as needed."""
if self.networks(full=False):
self.recruiter().recruit_participants(n=1)
else:
self.recruiter().close_recruitment()
def bonus(self, participant_uuid=None):
"""Compute the bonus for the given participant.
This is called automatically when a participant finishes,
it is called immediately prior to the participant_submission_trigger
"""
return 1
class WarOfTheGhostsSource(Source):
"""Transmit a story from Bartlett (1932)."""
__mapper_args__ = {"polymorphic_identity": "war_of_the_ghosts_source"}
def _contents(self):
"""Read the markdown source of the story from a file."""
stories = [
"ghosts.md",
"cricket.md",
"moochi.md",
"outwit.md",
"raid.md",
"species.md",
"tennis.md",
"vagabond.md"
]
story = random.choice(stories)
with open("static/stimuli/{}".format(story), "r") as f:
return f.read()
| Python | 0 |
6073610cb08e03e142b80dc7b1196ce359a1f55a | fix pylint import error | selfdrive/debug/toyota_eps_factor.py | selfdrive/debug/toyota_eps_factor.py | #!/usr/bin/env python3
import sys
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model # pylint: disable=import-error
from tools.lib.route import Route
from tools.lib.logreader import MultiLogIterator
MIN_SAMPLES = 30*100
def to_signed(n, bits):
if n >= (1 << max((bits - 1), 0)):
n = n - (1 << max(bits, 0))
return n
def get_eps_factor(lr, plot=False):
engaged = False
torque_cmd, eps_torque = None, None
cmds, eps = [], []
for msg in lr:
if msg.which() != 'can':
continue
for m in msg.can:
if m.address == 0x2e4 and m.src == 128:
engaged = bool(m.dat[0] & 1)
torque_cmd = to_signed((m.dat[1] << 8) | m.dat[2], 16)
elif m.address == 0x260 and m.src == 0:
eps_torque = to_signed((m.dat[5] << 8) | m.dat[6], 16)
if engaged and torque_cmd is not None and eps_torque is not None:
cmds.append(torque_cmd)
eps.append(eps_torque)
else:
if len(cmds) > MIN_SAMPLES:
break
cmds, eps = [], []
if len(cmds) < MIN_SAMPLES:
raise Exception("too few samples found in route")
lm = linear_model.LinearRegression(fit_intercept=False)
lm.fit(np.array(cmds).reshape(-1, 1), eps)
scale_factor = 1./lm.coef_[0]
if plot:
plt.plot(np.array(eps)*scale_factor)
plt.plot(cmds)
plt.show()
return scale_factor
if __name__ == "__main__":
r = Route(sys.argv[1])
lr = MultiLogIterator(r.log_paths(), wraparound=False)
n = get_eps_factor(lr, plot="--plot" in sys.argv)
print("EPS torque factor: ", n)
| #!/usr/bin/env python3
import sys
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
from tools.lib.route import Route
from tools.lib.logreader import MultiLogIterator
MIN_SAMPLES = 30*100
def to_signed(n, bits):
if n >= (1 << max((bits - 1), 0)):
n = n - (1 << max(bits, 0))
return n
def get_eps_factor(lr, plot=False):
engaged = False
torque_cmd, eps_torque = None, None
cmds, eps = [], []
for msg in lr:
if msg.which() != 'can':
continue
for m in msg.can:
if m.address == 0x2e4 and m.src == 128:
engaged = bool(m.dat[0] & 1)
torque_cmd = to_signed((m.dat[1] << 8) | m.dat[2], 16)
elif m.address == 0x260 and m.src == 0:
eps_torque = to_signed((m.dat[5] << 8) | m.dat[6], 16)
if engaged and torque_cmd is not None and eps_torque is not None:
cmds.append(torque_cmd)
eps.append(eps_torque)
else:
if len(cmds) > MIN_SAMPLES:
break
cmds, eps = [], []
if len(cmds) < MIN_SAMPLES:
raise Exception("too few samples found in route")
lm = linear_model.LinearRegression(fit_intercept=False)
lm.fit(np.array(cmds).reshape(-1, 1), eps)
scale_factor = 1./lm.coef_[0]
if plot:
plt.plot(np.array(eps)*scale_factor)
plt.plot(cmds)
plt.show()
return scale_factor
if __name__ == "__main__":
r = Route(sys.argv[1])
lr = MultiLogIterator(r.log_paths(), wraparound=False)
n = get_eps_factor(lr, plot="--plot" in sys.argv)
print("EPS torque factor: ", n)
| Python | 0.000001 |
a6f95b71030026693683588287f8c54bbd7e3ee8 | use persistor to upload trained model to s3 | src/trainers/spacy_sklearn_trainer.py | src/trainers/spacy_sklearn_trainer.py | import spacy
import os, datetime, json
import cloudpickle
from rasa_nlu import util
from rasa_nlu.featurizers.spacy_featurizer import SpacyFeaturizer
from rasa_nlu.classifiers.sklearn_intent_classifier import SklearnIntentClassifier
from rasa_nlu.extractors.spacy_entity_extractor import SpacyEntityExtractor
class SpacySklearnTrainer(object):
def __init__(self):
self.name="spacy_sklearn"
self.training_data = None
self.nlp = spacy.load('en', tagger=False, parser=False, entity=False)
self.featurizer = SpacyFeaturizer(self.nlp)
self.intent_classifier = SklearnIntentClassifier()
self.entity_extractor = SpacyEntityExtractor()
def train(self,data):
self.training_data = data
self.entity_extractor.train(self.nlp,data.entity_examples)
self.train_intent_classifier(data.intent_examples)
def train_intent_classifier(self,intent_examples):
labels = [e["intent"] for e in intent_examples]
sents = [e["text"] for e in intent_examples]
y = self.intent_classifier.transform_labels(labels)
X = self.featurizer.create_bow_vecs(sents)
self.intent_classifier.train(X,y)
def persist(self,path,persistor=None):
tstamp = datetime.datetime.now().strftime('%Y%m%d-%H%M%S')
dirname = os.path.join(path,"model_"+tstamp)
os.mkdir(dirname)
data_file = os.path.join(dirname,"training_data.json")
classifier_file = os.path.join(dirname,"intent_classifier.pkl")
ner_dir = os.path.join(dirname,'ner')
os.mkdir(ner_dir)
entity_extractor_config_file = os.path.join(ner_dir,"config.json")
entity_extractor_file = os.path.join(ner_dir,"model")
metadata = {
"trained_at":tstamp,
"training_data":data_file,
"backend":self.name,
"intent_classifier":classifier_file,
"entity_extractor": ner_dir
}
with open(os.path.join(dirname,'metadata.json'),'w') as f:
f.write(json.dumps(metadata,indent=4))
with open(data_file,'w') as f:
f.write(self.training_data.as_json(indent=2))
with open(classifier_file,'w') as f:
cloudpickle.dump(self.intent_classifier,f)
with open(entity_extractor_config_file,'w') as f:
json.dump(self.entity_extractor.ner.cfg, f)
self.entity_extractor.ner.model.dump(entity_extractor_file)
if (persistor is not None):
persistor.send_tar_to_s3(dirname)
| import spacy
import os, datetime, json
import cloudpickle
import util
from rasa_nlu.featurizers.spacy_featurizer import SpacyFeaturizer
from rasa_nlu.classifiers.sklearn_intent_classifier import SklearnIntentClassifier
from rasa_nlu.extractors.spacy_entity_extractor import SpacyEntityExtractor
class SpacySklearnTrainer(object):
def __init__(self):
self.name="spacy_sklearn"
self.training_data = None
self.nlp = spacy.load('en', tagger=False, parser=False, entity=False)
self.featurizer = SpacyFeaturizer(self.nlp)
self.intent_classifier = SklearnIntentClassifier()
self.entity_extractor = SpacyEntityExtractor()
def train(self,data):
self.training_data = data
self.entity_extractor.train(self.nlp,data.entity_examples)
self.train_intent_classifier(data.intent_examples)
def train_intent_classifier(self,intent_examples):
labels = [e["intent"] for e in intent_examples]
sents = [e["text"] for e in intent_examples]
y = self.intent_classifier.transform_labels(labels)
X = self.featurizer.create_bow_vecs(sents)
self.intent_classifier.train(X,y)
def persist(self,path):
tstamp = datetime.datetime.now().strftime('%Y%m%d-%H%M%S')
dirname = os.path.join(path,"model_"+tstamp)
os.mkdir(dirname)
data_file = os.path.join(dirname,"training_data.json")
classifier_file = os.path.join(dirname,"intent_classifier.pkl")
ner_dir = os.path.join(dirname,'ner')
os.mkdir(ner_dir)
entity_extractor_config_file = os.path.join(ner_dir,"config.json")
entity_extractor_file = os.path.join(ner_dir,"model")
metadata = {
"trained_at":tstamp,
"training_data":data_file,
"backend":self.name,
"intent_classifier":classifier_file,
"entity_extractor": ner_dir
}
with open(os.path.join(dirname,'metadata.json'),'w') as f:
f.write(json.dumps(metadata,indent=4))
with open(data_file,'w') as f:
f.write(self.training_data.as_json(indent=2))
with open(classifier_file,'w') as f:
cloudpickle.dump(self.intent_classifier,f)
with open(entity_extractor_config_file,'w') as f:
json.dump(self.entity_extractor.ner.cfg, f)
self.entity_extractor.ner.model.dump(entity_extractor_file)
util.sync_to_s3(dirname,'us-east-1','rasa_nlu')
| Python | 0 |
60ebebb4cc167a010904763c5a4ffed6347c029e | Fix license tab. | lms/djangoapps/labster_course_license/tabs.py | lms/djangoapps/labster_course_license/tabs.py | """
Registers the Labster Course License for the edX platform.
"""
from django.conf import settings
from django.utils.translation import ugettext_noop
from xmodule.tabs import CourseTab
from student.roles import CourseCcxCoachRole
from courseware.access import has_access
class LicenseCourseTab(CourseTab):
"""
The representation of the LTI Passport course tab
"""
type = "course_license"
title = ugettext_noop("License")
view_name = "labster_license_handler"
is_dynamic = True
@classmethod
def is_enabled(cls, course, user=None):
"""
Returns true if CCX has been enabled and the specified user is a coach
"""
if not settings.FEATURES.get('CUSTOM_COURSES_EDX', False) or not course.enable_ccx:
# If ccx is not enable do not show License tab.
return False
if has_access(user, 'staff', course) or has_access(user, 'instructor', course):
# if user is staff or instructor then he can always see License tab.
return True
role = CourseCcxCoachRole(course.id)
return role.has_user(user)
| """
Registers the Labster Course License for the edX platform.
"""
from django.conf import settings
from django.utils.translation import ugettext_noop
from xmodule.tabs import CourseTab
from student.roles import CourseCcxCoachRole
class LicenseCourseTab(CourseTab):
"""
The representation of the LTI Passport course tab
"""
type = "course_license"
title = ugettext_noop("License")
view_name = "labster_license_handler"
is_dynamic = True
@classmethod
def is_enabled(cls, course, user=None):
"""
Returns true if CCX has been enabled and the specified user is a coach
"""
if not user:
return True
if not settings.FEATURES.get('CUSTOM_COURSES_EDX', False) or not course.enable_ccx:
# If ccx is not enable do not show License tab.
return False
if has_access(user, 'staff', course) or has_access(user, 'instructor', course):
# if user is staff or instructor then he can always see License tab.
return True
role = CourseCcxCoachRole(course.id)
return role.has_user(user)
| Python | 0 |
d8e872c3d2aa141c29d993c08c207c1b7994b055 | Add missing filter decorators | sequere/templatetags/sequere_tags.py | sequere/templatetags/sequere_tags.py | from django import template
from sequere.registry import registry
from sequere.models import (get_followers_count, get_followings_count)
register = template.Library()
@register.filter
def identifier(instance, arg=None):
return registry.get_identifier(instance)
@register.filter
def followers_count(instance, identifier=None):
return get_followers_count(instance, identifier)
@register.filter
def followings_count(instance, identifier=None):
return get_followings_count(instance, identifier)
| from django import template
from sequere.registry import registry
from sequere.models import (get_followers_count, get_followings_count)
register = template.Library()
def identifier(instance, arg=None):
return registry.get_identifier(instance)
def followers_count(instance, identifier=None):
return get_followers_count(instance, identifier)
def followings_count(instance, identifier=None):
return get_followings_count(instance, identifier)
| Python | 0.000001 |
90103ce492a77070a0d6e30c5247b334c803b5e7 | check access and execute as superuser | mail_move_message/mail_move_message_models.py | mail_move_message/mail_move_message_models.py | from openerp import api, models, fields, SUPERUSER_ID
from openerp.tools.translate import _
class wizard(models.TransientModel):
_name = 'mail_move_message.wizard'
message_id = fields.Many2one('mail.message', string='Message')
message_body = fields.Html(related='message_id.body', string='Message to move', readonly=True)
parent_id = fields.Many2one('mail.message', string='Search by name')
model_id = fields.Many2one('ir.model', string='Record type')
res_id = fields.Integer('Record ID')
record_url = fields.Char('Link to record', readonly=True)
@api.onchange('parent_id')
def on_change_parent_id(self):
if self.parent_id and self.parent_id.model:
self.model_id = self.env['ir.model'].search([('model', '=', self.parent_id.model)])[0]
self.res_id = self.parent_id.res_id
else:
self.model_id = None
self.res_id = None
@api.onchange('model_id', 'res_id')
def on_change_res(self):
if not ( self.model_id and self.res_id ):
self.record_url = ''
return
self.record_url = '/web#id=%s&model=%s' % (self.res_id, self.model_id.model)
@api.one
def check_access(self):
cr = self._cr
uid = self.env.user.id
operation = 'write'
context = self._context
if not ( self.model_id and self.res_id ):
return True
model_obj = self.pool[self.model_id.model]
mids = model_obj.exists(cr, uid, [self.res_id])
if hasattr(model_obj, 'check_mail_message_access'):
model_obj.check_mail_message_access(cr, uid, mids, operation, context=context)
else:
self.pool['mail.thread'].check_mail_message_access(cr, uid, mids, operation, model_obj=model_obj, context=context)
@api.multi
def move(self):
for r in self:
r.check_access()
if r.parent_id:
if not (r.parent_id.model == r.model_id.model and
r.parent_id.res_id == r.res_id):
r.parent_id = None
r.message_id.sudo().write({'parent_id': r.parent_id.id, 'res_id': r.res_id, 'model': r.model_id.model})
if not ( r.model_id and r.res_id ):
obj = self.pool.get('ir.model.data').get_object_reference(self._cr, SUPERUSER_ID, 'mail', 'mail_archivesfeeds')[1]
return {
'type' : 'ir.actions.client',
'name' : 'Archive',
'tag' : 'reload',
'params' : {'menu_id': obj},
}
return {
'name': _('Record'),
'view_type': 'form',
'view_mode': 'form',
'res_model': r.model_id.model,
'res_id': r.res_id,
'views': [(False, 'form')],
'type': 'ir.actions.act_window',
}
class mail_message(models.Model):
_inherit = 'mail.message'
def name_get(self, cr, uid, ids, context=None):
if not (context or {}).get('extended_name'):
return super(mail_message, self).name_get(cr, uid, ids, context=context)
if isinstance(ids, (list, tuple)) and not len(ids):
return []
if isinstance(ids, (long, int)):
ids = [ids]
reads = self.read(cr, uid, ids, ['record_name','model', 'res_id'], context=context)
res = []
for record in reads:
name = record['record_name']
extended_name = ' [%s] ID %s' % (record.get('model', 'UNDEF'), record.get('res_id', 'UNDEF'))
res.append((record['id'], name + extended_name))
return res
| from openerp import api, models, fields, SUPERUSER_ID
from openerp.tools.translate import _
class wizard(models.TransientModel):
_name = 'mail_move_message.wizard'
message_id = fields.Many2one('mail.message', string='Message')
message_body = fields.Html(related='message_id.body', string='Message to move', readonly=True)
parent_id = fields.Many2one('mail.message', string='Search by name')
model_id = fields.Many2one('ir.model', string='Record type')
res_id = fields.Integer('Record ID')
record_url = fields.Char('Link to record', readonly=True)
@api.onchange('parent_id')
def on_change_parent_id(self):
if self.parent_id and self.parent_id.model:
self.model_id = self.env['ir.model'].search([('model', '=', self.parent_id.model)])[0]
self.res_id = self.parent_id.res_id
else:
self.model_id = None
self.res_id = None
@api.onchange('model_id', 'res_id')
def on_change_res(self):
if not ( self.model_id and self.res_id ):
self.record_url = ''
return
self.record_url = '/web#id=%s&model=%s' % (self.res_id, self.model_id.model)
@api.multi
def move(self):
for r in self:
if r.parent_id:
if not (r.parent_id.model == r.model_id.model and
r.parent_id.res_id == r.res_id):
r.parent_id = None
r.message_id.write({'parent_id': r.parent_id.id, 'res_id': r.res_id, 'model': r.model_id.model})
if not ( r.model_id and r.res_id ):
obj = self.pool.get('ir.model.data').get_object_reference(self._cr, SUPERUSER_ID, 'mail', 'mail_archivesfeeds')[1]
return {
'type' : 'ir.actions.client',
'name' : 'Archive',
'tag' : 'reload',
'params' : {'menu_id': obj},
}
return {
'name': _('Record'),
'view_type': 'form',
'view_mode': 'form',
'res_model': r.model_id.model,
'res_id': r.res_id,
'views': [(False, 'form')],
'type': 'ir.actions.act_window',
}
class mail_message(models.Model):
_inherit = 'mail.message'
def name_get(self, cr, uid, ids, context=None):
if not (context or {}).get('extended_name'):
return super(mail_message, self).name_get(cr, uid, ids, context=context)
if isinstance(ids, (list, tuple)) and not len(ids):
return []
if isinstance(ids, (long, int)):
ids = [ids]
reads = self.read(cr, uid, ids, ['record_name','model', 'res_id'], context=context)
res = []
for record in reads:
name = record['record_name']
extended_name = ' [%s] ID %s' % (record.get('model', 'UNDEF'), record.get('res_id', 'UNDEF'))
res.append((record['id'], name + extended_name))
return res
| Python | 0 |
99c3eba0d6384cd42c90ef347823e6d66659d6e3 | Fix typo in division operator | viper/interpreter/prelude/operators.py | viper/interpreter/prelude/operators.py | from ..value import ForeignCloVal
def plus(a: int, b: int) -> int:
return a + b
def minus(a: int, b: int) -> int:
return a - b
def times(a: int, b: int) -> int:
return a * b
def divide(a: int, b: int) -> float:
return a / b
env = {
'+': ForeignCloVal(plus, {}),
'-': ForeignCloVal(minus, {}),
'*': ForeignCloVal(times, {}),
'/': ForeignCloVal(divide, {}),
} | from ..value import ForeignCloVal
def plus(a: int, b: int) -> int:
return a + b
def minus(a: int, b: int) -> int:
return a - b
def times(a: int, b: int) -> int:
return a * b
def divide(a: int, b: int) -> float:
return a / b
env = {
'+': ForeignCloVal(plus, {}),
'-': ForeignCloVal(minus, {}),
'*': ForeignCloVal(times, {}),
'//': ForeignCloVal(divide, {}),
} | Python | 0.014756 |
340e872114363ddc041b2c5cdcc5769c9b793efe | Add test_select_with_seed_too_small_raise_Exception | tests/test_bingo.py | tests/test_bingo.py | """Unit tests for cat2cohort."""
import unittest
from bingo import bingo
class TestBingoGenerator(unittest.TestCase):
"""Test methods from bingo."""
def test_bingo_generator_has_default_size(self):
bingo_generator = bingo.BingoGenerator()
expected = pow(bingo.DEFAULT_SIZE, 2)
self.assertEquals(bingo_generator.size, expected)
def test_bingo_generator_has_given_size(self):
bingo_generator = bingo.BingoGenerator(4)
self.assertEquals(bingo_generator.size, 16)
def test_select_words_should_have_the_right_size(self):
test_size = 2
bingo_generator = bingo.BingoGenerator(size=test_size)
seed_list = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i']
bingo_generator.words = seed_list
selection = bingo_generator.select_words()
self.assertEquals(len(selection), pow(test_size, 2))
def test_select_words_should_return_words_from_the_seed_list(self):
test_size = 2
bingo_generator = bingo.BingoGenerator(size=test_size)
seed_list = set(['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i'])
bingo_generator.words = seed_list
selection = set(bingo_generator.select_words())
self.assertTrue(seed_list.difference(selection))
def test_select_with_seed_too_small_raise_Exception(self):
with self.assertRaises(ValueError):
test_size = 2
bingo_generator = bingo.BingoGenerator(size=test_size)
seed_list = ['a', 'b', 'c']
bingo_generator.words = seed_list
bingo_generator.select_words()
| """Unit tests for cat2cohort."""
import unittest
from bingo import bingo
class TestBingoGenerator(unittest.TestCase):
"""Test methods from bingo."""
def test_bingo_generator_has_default_size(self):
bingo_generator = bingo.BingoGenerator()
expected = pow(bingo.DEFAULT_SIZE, 2)
self.assertEquals(bingo_generator.size, expected)
def test_bingo_generator_has_given_size(self):
bingo_generator = bingo.BingoGenerator(4)
self.assertEquals(bingo_generator.size, 16)
def test_select_words_should_have_the_right_size(self):
test_size = 2
bingo_generator = bingo.BingoGenerator(size=test_size)
seed_list = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i']
bingo_generator.words = seed_list
selection = bingo_generator.select_words()
self.assertEquals(len(selection), pow(test_size, 2))
def test_select_words_should_return_words_from_the_seed_list(self):
test_size = 2
bingo_generator = bingo.BingoGenerator(size=test_size)
seed_list = set(['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i'])
bingo_generator.words = seed_list
selection = set(bingo_generator.select_words())
self.assertTrue(seed_list.difference(selection))
| Python | 0.000008 |
bfb3b4825a41380b3cd299fc899bcf473323b68a | switch to BioPython to store FASTA | lib/msa_muscle/msa_muscleImpl.py | lib/msa_muscle/msa_muscleImpl.py | #BEGIN_HEADER
from biokbase.workspace.client import Workspace as workspaceService
from Bio import SeqIO
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from Bio.Alphabet import generic_protein
import os
#END_HEADER
class msa_muscle:
'''
Module Name:
msa_muscle
Module Description:
A KBase module: msa_muscle
This sample module contains one small method - count_contigs.
'''
######## WARNING FOR GEVENT USERS #######
# Since asynchronous IO can lead to methods - even the same method -
# interrupting each other, you must be *very* careful when using global
# state. A method could easily clobber the state set by another while
# the latter method is running.
#########################################
#BEGIN_CLASS_HEADER
workspaceURL = None
fileFastaName = '/tmp/muscle/in.fasta'
fileOutputName = '/tmp/muscle/out.fasta'
def buildGenome2Features(self, ws, workspace_name, featureset_id):
genome2Features = {}
featureSet = ws.get_objects([{'ref':workspace_name+'/'+featureset_id}])[0]['data']
features = featureSet['elements']
for fId in features:
genomeRef = features[fId][0]
if genomeRef not in genome2Features:
genome2Features[genomeRef] = []
genome2Features[genomeRef].append(fId)
return genome2Features
def exportFasta(self, ws, workspace_name, featureset_id):
# Build genome2Features hash
genome2Features = self.buildGenome2Features(ws, workspace_name, featureset_id)
# Process each genome one by one
records = []
for genomeRef in genome2Features:
genome = ws.get_objects([{'ref':genomeRef}])[0]['data']
featureIds = genome2Features[genomeRef]
for feature in genome['features']:
for fId in featureIds:
if fId == feature['id']:
record = SeqRecord(Seq(feature['protein_translation']), id=fId, description=genomeRef)
records.append(record)
SeqIO.write(records, self.fileFastaName, "fasta")
#END_CLASS_HEADER
# config contains contents of config file in a hash or None if it couldn't
# be found
def __init__(self, config):
#BEGIN_CONSTRUCTOR
self.workspaceURL = config['workspace-url']
#END_CONSTRUCTOR
pass
def build_msa(self, ctx, workspace_name, featureset_id, msa_id):
# ctx is the context object
# return variables are: returnVal
#BEGIN build_msa
returnVal = ''
# create workspace client
token = ctx['token']
ws = workspaceService(self.workspaceURL, token=token)
self.exportFasta(ws, workspace_name, featureset_id)
os.system('/kb/runtime/bin/muscle ' + ' -in ' + fileFastaName + ' -out ' + fileOutputName)
with open(fileOutputName, 'r') as fr:
returnVal = fr.read()
#END build_msa
# At some point might do deeper type checking...
if not isinstance(returnVal, basestring):
raise ValueError('Method build_msa return value ' +
'returnVal is not type basestring as required.')
# return the results
return [returnVal]
| #BEGIN_HEADER
from biokbase.workspace.client import Workspace as workspaceService
import os
#END_HEADER
class msa_muscle:
'''
Module Name:
msa_muscle
Module Description:
A KBase module: msa_muscle
This sample module contains one small method - count_contigs.
'''
######## WARNING FOR GEVENT USERS #######
# Since asynchronous IO can lead to methods - even the same method -
# interrupting each other, you must be *very* careful when using global
# state. A method could easily clobber the state set by another while
# the latter method is running.
#########################################
#BEGIN_CLASS_HEADER
workspaceURL = None
def testRun(self, ctx, workspace_name):
returnVal = ''
fastaFileIn = '/tmp/muscle/in.fasta'
fastaFileOut = '/tmp/muscle/out.fasta'
ff = open( fastaFileIn,'w')
ff.write('>1\nMTTPVDAPKWPRQIPYIIASEACERFSFYG\n')
ff.write('>2\nMTTPVDAPAAAAAKWPRQIPYIIASEACERFSFYG\n')
ff.write('>3\nMTTPVDAPKWPRQIPYIQQQQQQQIASEACERFSFYG\n')
ff.close()
os.system('/kb/runtime/bin/muscle ' + ' -in ' + fastaFileIn + ' -out ' + fastaFileOut)
with open(fastaFileOut, 'r') as fr:
returnVal = fr.read()
return returnVal
def buildGenome2Features(self, ws, workspace_name, featureset_id):
genome2Features = {}
featureSet = ws.get_objects([{'ref':workspace_name+'/'+featureset_id}])[0]['data']
features = featureSet['elements']
for fId in features:
genomeRef = features[fId][0]
if genomeRef not in genome2Features:
genome2Features[genomeRef] = []
genome2Features[genomeRef].append(fId)
return genome2Features
#END_CLASS_HEADER
# config contains contents of config file in a hash or None if it couldn't
# be found
def __init__(self, config):
#BEGIN_CONSTRUCTOR
self.workspaceURL = config['workspace-url']
#END_CONSTRUCTOR
pass
def build_msa(self, ctx, workspace_name, featureset_id, msa_id):
# ctx is the context object
# return variables are: returnVal
#BEGIN build_msa
returnVal = ''
fileFastaName = '/tmp/muscle/in.fasta'
fileOutputName = '/tmp/muscle/out.fasta'
# create workspace client
token = ctx['token']
ws = workspaceService(self.workspaceURL, token=token)
# Build genome2Features hash
genome2Features = self.buildGenome2Features(ws, workspace_name, featureset_id)
# Process each genome one by one
with open(fileFastaName, 'w') as ff:
for genomeRef in genome2Features:
genome = ws.get_objects([{'ref':genomeRef}])[0]['data']
featureIds = genome2Features[genomeRef]
for feature in genome['features']:
for fId in featureIds:
if fId == feature['id']:
ff.write('>' + fId + '\n' + feature['protein_translation'] + '\n')
os.system('/kb/runtime/bin/muscle ' + ' -in ' + fileFastaName + ' -out ' + fileOutputName)
with open(fileOutputName, 'r') as fr:
returnVal = fr.read()
# statinfo = os.stat('/kb/runtime/muscle/muscle3.8.31_i86linux64')
# returnVal = str(statinfo.st_size)
#END build_msa
# At some point might do deeper type checking...
if not isinstance(returnVal, basestring):
raise ValueError('Method build_msa return value ' +
'returnVal is not type basestring as required.')
# return the results
return [returnVal]
| Python | 0.000001 |
36f2376a2f23b295bba8cc2af16577efd3fe03ff | Add a couple of snippets. | utils/snippets.py | utils/snippets.py | #!/usr/bin/env python
# A hacky script to do dynamic snippets.
import sys
import os
import datetime
snippet_map = {
'date': datetime.datetime.now().strftime('%b %d %G %I:%M%p '),
'time': datetime.datetime.now().strftime('%I:%M%p '),
'best': 'Best,\nSameer',
'cheers': 'Cheers,\nSameer',
'thanks': 'Thanks,\nSameer',
}
keys = '\n'.join(snippet_map.keys())
result = os.popen('printf "%s" | rofi -dmenu ' % keys)
selected_key = result.read().strip()
os.system('sleep 0.1; xdotool type --clearmodifiers "$(printf "%s")"' % str(
snippet_map[selected_key]))
| #!/usr/bin/env python
# A hacky script to do dynamic snippets.
import sys
import os
import datetime
snippet_map = {
'date' : datetime.datetime.now().strftime('%b %d %G %I:%M%p '),
'time' : datetime.datetime.now().strftime('%I:%M%p '),
'sign' : 'Best,\nSameer',
}
keys = '\n'.join(snippet_map.keys())
result = os.popen('printf "%s" | rofi -dmenu ' % keys)
selected_key = result.read().strip()
os.system('sleep 0.1; xdotool type --clearmodifiers "$(printf "%s")"' % str(snippet_map[selected_key]))
| Python | 0.000002 |
7d2c4140a74fa052eda6a6a19593321056c9eb80 | convert prints to logging in jsonparser | src/unix/plugins/jsonparser/jsonparser.py | src/unix/plugins/jsonparser/jsonparser.py | # vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright (c) 2011 Openstack, LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""
JSON agent command parser main code module
"""
import nova_agent
import logging
import anyjson
class CommandNotFoundError(Exception):
def __init__(self, cmd):
self.cmd = cmd
def __str__(self):
return "No such agent command '%s'" % self.cmd
class command_metaclass(type):
def __init__(cls, cls_name, bases, attrs):
if not hasattr(cls, '_cmd_classes'):
cls._cmd_classes = []
cls._cmd_instances = []
cls._cmds = {}
else:
cls._cmd_classes.append(cls)
class command(object):
"""
The class that all command classes should inherit from
"""
# Set the metaclass
__metaclass__ = command_metaclass
@classmethod
def _get_commands(self, inst):
cmds = {}
for objname in dir(inst):
obj = getattr(inst, objname)
if getattr(obj, '_is_cmd', False):
try:
cmds[obj._cmd_name] = obj
except AttributeError:
# skip it if there's no _cmd_name
pass
return cmds
@classmethod
def create_instances(self, *args, **kwargs):
for cls in self._cmd_classes:
inst = cls(*args, **kwargs)
self._cmd_instances.append(inst)
self._cmds.update(self._get_commands(inst))
@classmethod
def command_names(self):
return [x for x in self._cmds]
@classmethod
def run_command(self, cmd_name, arg):
try:
result = self._cmds[cmd_name](arg)
except KeyError:
raise CommandNotFoundError(cmd_name)
return result
def command_add(cmd_name):
"""
Decorator for command classes to use to add commands
"""
def wrap(f):
f._is_cmd = True
f._cmd_name = cmd_name
return f
return wrap
class command_parser(nova_agent.plugin):
"""
JSON command parser plugin for nova-agent
"""
type = "parser"
def __init__(self, *args, **kwargs):
super(command_parser, self).__init__(*args, **kwargs)
__import__("plugins.jsonparser.commands")
command.create_instances()
def encode_result(self, result):
our_format = {"returncode": str(result[0]),
"message": result[1]}
return {"data": anyjson.serialize(our_format)}
def parse_request(self, request):
try:
request = anyjson.deserialize(request['data'])
except Exception, e:
logging.error("Request dictionary contains no 'data' key")% \
return None
try:
cmd_name = request['name']
except KeyError:
logging.error("Request is missing 'name' key")% \
return None
try:
cmd_string = request['value']
except KeyError:
cmd_string = ''
logging.info("Received command '%s' with argument: '%s'" % \
(cmd_name, cmd_string))
try:
result = command.run_command(cmd_name, cmd_string)
except CommandNotFoundError, e:
logging.warn(str(e))
return self.encode_result((404, str(e)))
logging.info("'%s' completed with code '%s', message '%s'" % \
(cmd_name, result[0], result[1]))
return self.encode_result(result)
| # vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright (c) 2011 Openstack, LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""
JSON agent command parser main code module
"""
import nova_agent
import logging
import anyjson
class CommandNotFoundError(Exception):
def __init__(self, cmd):
self.cmd = cmd
def __str__(self):
return "No such agent command '%s'" % self.cmd
class command_metaclass(type):
def __init__(cls, cls_name, bases, attrs):
if not hasattr(cls, '_cmd_classes'):
cls._cmd_classes = []
cls._cmd_instances = []
cls._cmds = {}
else:
cls._cmd_classes.append(cls)
class command(object):
"""
The class that all command classes should inherit from
"""
# Set the metaclass
__metaclass__ = command_metaclass
@classmethod
def _get_commands(self, inst):
cmds = {}
for objname in dir(inst):
obj = getattr(inst, objname)
if getattr(obj, '_is_cmd', False):
try:
cmds[obj._cmd_name] = obj
except AttributeError:
# skip it if there's no _cmd_name
pass
return cmds
@classmethod
def create_instances(self, *args, **kwargs):
for cls in self._cmd_classes:
inst = cls(*args, **kwargs)
self._cmd_instances.append(inst)
self._cmds.update(self._get_commands(inst))
@classmethod
def command_names(self):
return [x for x in self._cmds]
@classmethod
def run_command(self, cmd_name, arg):
try:
result = self._cmds[cmd_name](arg)
except KeyError:
raise CommandNotFoundError(cmd_name)
return result
def command_add(cmd_name):
"""
Decorator for command classes to use to add commands
"""
def wrap(f):
f._is_cmd = True
f._cmd_name = cmd_name
return f
return wrap
class command_parser(nova_agent.plugin):
"""
JSON command parser plugin for nova-agent
"""
type = "parser"
def __init__(self, *args, **kwargs):
super(command_parser, self).__init__(*args, **kwargs)
__import__("plugins.jsonparser.commands")
command.create_instances()
def encode_result(self, result):
our_format = {"returncode": str(result[0]),
"message": result[1]}
return {"data": anyjson.serialize(our_format)}
def parse_request(self, request):
try:
request = anyjson.deserialize(request['data'])
except Exception, e:
# log it
print "Missing data"
print e
return None
try:
cmd_name = request['name']
except KeyError:
print "Missing command name"
return None
try:
cmd_string = request['value']
except KeyError:
cmd_string = ''
logging.info("Received command '%s' with argument: '%s'" % \
(cmd_name, cmd_string))
try:
result = command.run_command(cmd_name, cmd_string)
except CommandNotFoundError, e:
logging.warn(str(e))
return self.encode_result((404, str(e)))
logging.info("'%s' completed with code '%s', message '%s'" % \
(cmd_name, result[0], result[1]))
return self.encode_result(result)
| Python | 0.002846 |
6dcde2c4931b0b8945e235005c28f7eb344cbebc | build LOAFER_ROUTE based on envvars | loafer/conf.py | loafer/conf.py | # -*- coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
from prettyconf import config
class Settings(object):
# Logging
LOAFER_LOGLEVEL = config('LOAFER_LOGLEVEL', default='WARNING')
LOAFER_LOG_FORMAT = config('LOAFER_LOG_FORMAT',
default='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
# Max concurrent jobs (asyncio)
LOAFER_MAX_JOBS = config('LOAFER_MAX_JOBS', default=10)
# Default value are determined from the number of machine cores
LOAFER_MAX_THREAD_POOL = config('LOAFER_MAX_THREAD_POOL', default=None)
# Translator
LOAFER_DEFAULT_MESSAGE_TRANSLATOR_CLASS = 'loafer.message_translator.StringMessageTranslator'
# Routes
LOAFER_ROUTES = [
{'name': config('LOAFER_DEFAULT_ROUTE_NAME', default='default'),
'source': config('LOAFER_DEFAULT_ROUTE_SOURCE'),
'handler': config('LOAFER_DEFAULT_ROUTE_HANDLER',
default='loafer.example.jobs.async_example_job'),
'message_translator': LOAFER_DEFAULT_MESSAGE_TRANSLATOR_CLASS},
]
# Consumer
# Currently, only AWS is supported, references:
# http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-long-polling.html
# http://docs.aws.amazon.com/AWSSimpleQueueService/latest/APIReference/API_ReceiveMessage.html
# By default, SQS does not set long-polling (WaitTimeSeconds) and the MaxNumberOfMessages is 1
# TODO: tweak default values for acceptable performance
LOAFER_DEFAULT_CONSUMER_CLASS = 'loafer.aws.consumer.Consumer'
LOAFER_DEFAULT_CONSUMER_OPTIONS = {'WaitTimeSeconds': 5, # from 1-20
'MaxNumberOfMessages': 5} # from 1-10
# Setting LOAFER_CONSUMERS is only needed when there's more than one consumer.
# Otherwise, all routes will use the LOAFER_DEFAULT_CONSUMER_CLASS
# and LOAFER_DEFAULT_MESSAGE_TRANSLATOR_CLASS automatically.
# This is an example configuration that will be available in the future:
LOAFER_CONSUMERS = [
{'route_source': {'consumer_class': LOAFER_DEFAULT_CONSUMER_CLASS,
'consumer_options': LOAFER_DEFAULT_CONSUMER_OPTIONS}},
]
def __init__(self, **defaults):
if defaults:
safe_defaults = {k: v for k, v in defaults.items()
if k.isupper() and k.startswith('LOAFER_')}
self.__dict__.update(safe_defaults)
settings = Settings()
| # -*- coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
from prettyconf import config
class Settings(object):
# Logging
LOAFER_LOGLEVEL = config('LOAFER_LOGLEVEL', default='WARNING')
LOAFER_LOG_FORMAT = config('LOAFER_LOG_FORMAT',
default='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
# Max concurrent jobs (asyncio)
LOAFER_MAX_JOBS = config('LOAFER_MAX_JOBS', default=10)
# Default value are determined from the number of machine cores
LOAFER_MAX_THREAD_POOL = config('LOAFER_MAX_THREAD_POOL', default=None)
# Translator
LOAFER_DEFAULT_MESSAGE_TRANSLATOR_CLASS = 'loafer.message_translator.StringMessageTranslator'
# Routes
LOAFER_ROUTES = [
{'name': 'example_route',
'source': 'route_source',
'handler': 'loafer.example.jobs.async_example_job',
'message_translator': LOAFER_DEFAULT_MESSAGE_TRANSLATOR_CLASS},
]
# Consumer
# Currently, only AWS is supported, references:
# http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-long-polling.html
# http://docs.aws.amazon.com/AWSSimpleQueueService/latest/APIReference/API_ReceiveMessage.html
# By default, SQS does not set long-polling (WaitTimeSeconds) and the MaxNumberOfMessages is 1
# TODO: tweak default values for acceptable performance
LOAFER_DEFAULT_CONSUMER_CLASS = 'loafer.aws.consumer.Consumer'
LOAFER_DEFAULT_CONSUMER_OPTIONS = {'WaitTimeSeconds': 5, # from 1-20
'MaxNumberOfMessages': 5} # from 1-10
# Setting LOAFER_CONSUMERS is only needed when there's more than one consumer.
# Otherwise, all routes will use the LOAFER_DEFAULT_CONSUMER_CLASS
# and LOAFER_DEFAULT_MESSAGE_TRANSLATOR_CLASS automatically.
# This is an example configuration and will not match anything (probably).
LOAFER_CONSUMERS = [
{'route_source': {'consumer_class': LOAFER_DEFAULT_CONSUMER_CLASS,
'consumer_options': LOAFER_DEFAULT_CONSUMER_OPTIONS}},
]
def __init__(self, **defaults):
if defaults:
safe_defaults = {k: v for k, v in defaults.items()
if k.isupper() and k.startswith('LOAFER_')}
self.__dict__.update(safe_defaults)
settings = Settings()
| Python | 0 |
8887ac66a221b443215e7ab57a2f21b1521b167b | move docs to readme | utils/workflow.py | utils/workflow.py | from __future__ import print_function
import os
import subprocess
import sys
from datetime import datetime
from string import Template
from lektor.utils import slugify
HERE = os.path.dirname(__file__)
PROJECT_PATH = os.path.join(HERE, '..')
DRAFTS_PATH = os.path.join(PROJECT_PATH, 'drafts')
CONTENT_PATH = os.path.join(PROJECT_PATH, 'content')
def draft():
title = sys.argv[1]
with open(os.path.join(HERE, 'article-blueprint.md')) as f:
content = f.read()
rep = dict(title=title)
content = Template(content).safe_substitute(rep)
dst = os.path.join(DRAFTS_PATH, '%s.md' % slugify(title))
assert not os.path.exists(dst), dst
with open(dst, 'w') as f:
f.write(content)
def publish():
srcPath = sys.argv[1]
with open(srcPath) as f:
content = f.read()
rep = dict(date=datetime.now().strftime('%Y-%m-%d'))
content = Template(content).safe_substitute(rep)
slug = os.path.splitext(os.path.basename(srcPath))[0]
containerPath = os.path.join(CONTENT_PATH, slug)
assert not os.path.exists(containerPath), containerPath
os.mkdir(containerPath)
dst = os.path.join(containerPath, 'contents.lr')
with open(dst, 'w') as f:
f.write(content)
os.remove(srcPath)
def deploy():
if len(sys.argv) > 2 and sys.argv[2] == 'clean':
print(subprocess.check_output(['lektor', 'clean', '--yes']))
else:
print(subprocess.check_output(['lektor', 'build']))
print(subprocess.check_output(['lektor', 'deploy']))
| """
Helpers for my evolving workflow.
draft [art] "My super article"
creates a prepared md file with all the necessary settings to work on.
publish drafts/my-super-article.md
will make the necessary adjustments and publish it in the contents.
deploy [clean]
will create a [clean] build and push it online.
"""
from __future__ import print_function
import os
import subprocess
import sys
from datetime import datetime
from string import Template
from lektor.utils import slugify
HERE = os.path.dirname(__file__)
PROJECT_PATH = os.path.join(HERE, '..')
DRAFTS_PATH = os.path.join(PROJECT_PATH, 'drafts')
CONTENT_PATH = os.path.join(PROJECT_PATH, 'content')
def draft():
title = sys.argv[1]
with open(os.path.join(HERE, 'article-blueprint.md')) as f:
content = f.read()
rep = dict(title=title)
content = Template(content).safe_substitute(rep)
dst = os.path.join(DRAFTS_PATH, '%s.md' % slugify(title))
assert not os.path.exists(dst), dst
with open(dst, 'w') as f:
f.write(content)
def publish():
srcPath = sys.argv[1]
with open(srcPath) as f:
content = f.read()
rep = dict(date=datetime.now().strftime('%Y-%m-%d'))
content = Template(content).safe_substitute(rep)
slug = os.path.splitext(os.path.basename(srcPath))[0]
containerPath = os.path.join(CONTENT_PATH, slug)
assert not os.path.exists(containerPath), containerPath
os.mkdir(containerPath)
dst = os.path.join(containerPath, 'contents.lr')
with open(dst, 'w') as f:
f.write(content)
os.remove(srcPath)
def deploy():
if len(sys.argv) > 2 and sys.argv[2] == 'clean':
print(subprocess.check_output(['lektor', 'clean', '--yes']))
else:
print(subprocess.check_output(['lektor', 'build']))
print(subprocess.check_output(['lektor', 'deploy']))
| Python | 0 |
377f2120b3474d131b02dab90b6e51c35deb0c74 | Add comments | mathphys/constants.py | mathphys/constants.py | """Constants module."""
import math as _math
from . import base_units as _u
# temporary auxiliary derived units
_volt = (_u.kilogram * _u.meter**2) / (_u.ampere * _u.second**2)
_coulomb = _u.second * _u.ampere
_joule = _u.kilogram * _u.meter**2 / _u.second**2
_pascal = _u.kilogram / (_u.meter * _u.second**2)
# physical constants
# ==================
# --- exact by definition --
light_speed = 299792458 * (_u.meter / _u.second)
gas_constant = 8.314462618 * (_joule / _u.mole / _u.kelvin)
boltzmann_constant = 1.380649e-23 * (_joule / _u.kelvin)
avogadro_constant = 6.02214076e23 * (1 / _u.mole)
elementary_charge = 1.602176634e-19 * (_coulomb)
reduced_planck_constant = 1.054571817e-34 * (_joule * _u.second)
# --- measured ---
# 2021-04-15 - https://physics.nist.gov/cgi-bin/cuu/Value?me|search_for=electron+mass
electron_mass = 9.1093837015e-31 * (_u.kilogram)
# 2021-04-15 - https://physics.nist.gov/cgi-bin/cuu/Value?mu0|search_for=vacuum+permeability
vacuum_permeability = 1.25663706212e-6 * \
(_volt * _u.second / _u.ampere / _u.meter)
# --- derived ---
# [Kg̣*m^2/s^2] - derived
electron_rest_energy = electron_mass * _math.pow(light_speed, 2)
# [V·s/(A.m)] - derived
vacuum_permitticity = 1.0/(vacuum_permeability * _math.pow(light_speed, 2))
# [T·m^2/(A·s)] - derived
vacuum_impedance = vacuum_permeability * light_speed
# [m] - derived
electron_radius = _math.pow(elementary_charge, 2) / \
(4*_math.pi*vacuum_permitticity*electron_rest_energy)
_joule_2_eV = _joule / elementary_charge
# [m]/[GeV]^3 - derived
rad_cgamma = 4*_math.pi*electron_radius / \
_math.pow(electron_rest_energy/elementary_charge/1.0e9, 3) / 3
# [m] - derived
Cq = (55.0/(32*_math.sqrt(3.0))) * (reduced_planck_constant) * \
light_speed / electron_rest_energy
# [m^2/(s·GeV^3)] - derived
Ca = electron_radius*light_speed / \
(3*_math.pow(electron_rest_energy*_joule_2_eV/1.0e9, 3))
| """Constants module."""
import math as _math
from . import base_units as _u
# temporary auxiliary derived units
_volt = (_u.kilogram * _u.meter**2) / (_u.ampere * _u.second**2)
_coulomb = _u.second * _u.ampere
_joule = _u.kilogram * _u.meter**2 / _u.second**2
_pascal = _u.kilogram / (_u.meter * _u.second**2)
# physical constants
# ==================
# --- exact --
light_speed = 299792458 * (_u.meter / _u.second)
gas_constant = 8.314462618 * (_joule / _u.mole / _u.kelvin)
boltzmann_constant = 1.380649e-23 * (_joule / _u.kelvin)
avogadro_constant = 6.02214076e23 * (1 / _u.mole)
elementary_charge = 1.602176634e-19 * (_coulomb)
reduced_planck_constant = 1.054571817e-34 * (_joule * _u.second)
# --- measured ---
# 2021-04-15 - https://physics.nist.gov/cgi-bin/cuu/Value?me|search_for=electron+mass
electron_mass = 9.1093837015e-31 * (_u.kilogram)
# 2021-04-15 - https://physics.nist.gov/cgi-bin/cuu/Value?mu0|search_for=vacuum+permeability
vacuum_permeability = 1.25663706212e-6 * \
(_volt * _u.second / _u.ampere / _u.meter)
# --- derived ---
# [Kg̣*m^2/s^2] - derived
electron_rest_energy = electron_mass * _math.pow(light_speed, 2)
# [V·s/(A.m)] - derived
vacuum_permitticity = 1.0/(vacuum_permeability * _math.pow(light_speed, 2))
# [T·m^2/(A·s)] - derived
vacuum_impedance = vacuum_permeability * light_speed
# [m] - derived
electron_radius = _math.pow(elementary_charge, 2) / \
(4*_math.pi*vacuum_permitticity*electron_rest_energy)
_joule_2_eV = _joule / elementary_charge
# [m]/[GeV]^3 - derived
rad_cgamma = 4*_math.pi*electron_radius / \
_math.pow(electron_rest_energy/elementary_charge/1.0e9, 3) / 3
# [m] - derived
Cq = (55.0/(32*_math.sqrt(3.0))) * (reduced_planck_constant) * \
light_speed / electron_rest_energy
# [m^2/(s·GeV^3)] - derived
Ca = electron_radius*light_speed / \
(3*_math.pow(electron_rest_energy*_joule_2_eV/1.0e9, 3))
| Python | 0 |
e732615e2e8586cc3f6a31614372ef16bae26a36 | update tests for prices.py | tests/test_price.py | tests/test_price.py | from bitshares import BitShares
from bitshares.instance import set_shared_bitshares_instance
from bitshares.amount import Amount
from bitshares.price import Price
from bitshares.asset import Asset
import unittest
class Testcases(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(Testcases, self).__init__(*args, **kwargs)
bitshares = BitShares(
"wss://node.bitshares.eu"
)
set_shared_bitshares_instance(bitshares)
def test_init(self):
# self.assertEqual(1, 1)
Price("0.315 USD/BTS")
Price(1.0, "USD/GOLD")
Price(0.315, base="USD", quote="BTS")
Price(0.315, base=Asset("USD"), quote=Asset("BTS"))
Price({
"base": {"amount": 1, "asset_id": "1.3.0"},
"quote": {"amount": 10, "asset_id": "1.3.106"}})
Price({
"receives": {"amount": 1, "asset_id": "1.3.0"},
"pays": {"amount": 10, "asset_id": "1.3.106"},
}, base_asset=Asset("1.3.0"))
Price(quote="10 GOLD", base="1 USD")
Price("10 GOLD", "1 USD")
Price(Amount("10 GOLD"), Amount("1 USD"))
def test_multiplication(self):
p1 = Price(10.0, "USD/GOLD")
p2 = Price(5.0, "USD/EUR")
p3 = p1 * p2
p4 = p3.as_base("GOLD")
self.assertEqual(p4["quote"]["symbol"], "EUR")
self.assertEqual(p4["base"]["symbol"], "GOLD")
# 10 USD/GOLD * 0.2 EUR/USD = 2 EUR/GOLD = 0.5 GOLD/EUR
self.assertEqual(float(p4), 0.5)
# Inline multiplication
p5 = p1
p5 *= p2
p4 = p5.as_base("GOLD")
self.assertEqual(p4["quote"]["symbol"], "EUR")
self.assertEqual(p4["base"]["symbol"], "GOLD")
# 10 USD/GOLD * 0.2 EUR/USD = 2 EUR/GOLD = 0.5 GOLD/EUR
self.assertEqual(float(p4), 0.5)
def test_div(self):
p1 = Price(10.0, "USD/GOLD")
p2 = Price(5.0, "USD/EUR")
# 10 USD/GOLD / 5 USD/EUR = 2 EUR/GOLD
p3 = p1 / p2
p4 = p3.as_base("EUR")
self.assertEqual(p4["base"]["symbol"], "EUR")
self.assertEqual(p4["quote"]["symbol"], "GOLD")
# 10 USD/GOLD * 0.2 EUR/USD = 2 EUR/GOLD = 0.5 GOLD/EUR
self.assertEqual(float(p4), 2)
def test_div2(self):
p1 = Price(10.0, "USD/GOLD")
p2 = Price(5.0, "USD/GOLD")
# 10 USD/GOLD / 5 USD/EUR = 2 EUR/GOLD
p3 = p1 / p2
self.assertTrue(isinstance(p3, (float, int)))
self.assertEqual(float(p3), 2.0)
| from bitshares import BitShares
from bitshares.instance import set_shared_bitshares_instance
from bitshares.amount import Amount
from bitshares.price import Price
from bitshares.asset import Asset
import unittest
class Testcases(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(Testcases, self).__init__(*args, **kwargs)
bitshares = BitShares(
"wss://node.bitshares.eu"
)
set_shared_bitshares_instance(bitshares)
def test_init(self):
# self.assertEqual(1, 1)
Price("0.315 USD/BTS")
Price(1.0, "USD/GOLD")
Price(0.315, base="USD", quote="BTS")
Price(0.315, base=Asset("USD"), quote=Asset("BTS"))
Price({
"base": {"amount": 1, "asset_id": "1.3.0"},
"quote": {"amount": 10, "asset_id": "1.3.106"}})
Price({
"receives": {"amount": 1, "asset_id": "1.3.0"},
"pays": {"amount": 10, "asset_id": "1.3.106"},
}, base_asset=Asset("1.3.0"))
Price(quote="10 GOLD", base="1 USD")
Price("10 GOLD", "1 USD")
Price(Amount("10 GOLD"), Amount("1 USD"))
def test_multiplication(self):
p1 = Price(10.0, "USD/GOLD")
p2 = Price(5.0, "USD/EUR")
p3 = p1 * p2
p4 = p3.as_base("GOLD")
self.assertEqual(p4["quote"]["symbol"], "EUR")
self.assertEqual(p4["base"]["symbol"], "GOLD")
# 10 USD/GOLD * 0.2 EUR/USD = 2 EUR/GOLD = 0.5 GOLD/EUR
self.assertEqual(float(p4), 0.5)
# Inline multiplication
p5 = p1
p5 *= p2
p4 = p5.as_base("GOLD")
self.assertEqual(p4["quote"]["symbol"], "EUR")
self.assertEqual(p4["base"]["symbol"], "GOLD")
# 10 USD/GOLD * 0.2 EUR/USD = 2 EUR/GOLD = 0.5 GOLD/EUR
self.assertEqual(float(p4), 0.5)
def test_div(self):
p1 = Price(10.0, "USD/GOLD")
p2 = Price(5.0, "USD/EUR")
# 10 USD/GOLD / 5 USD/EUR = 2 EUR/GOLD
p3 = p1 / p2
p4 = p3.as_base("EUR")
self.assertEqual(p4["base"]["symbol"], "EUR")
self.assertEqual(p4["quote"]["symbol"], "GOLD")
# 10 USD/GOLD * 0.2 EUR/USD = 2 EUR/GOLD = 0.5 GOLD/EUR
self.assertEqual(float(p4), 2)
| Python | 0 |
a26f04bddcdb92af050c2d8237ccb6c2ef1406e5 | Fix identation | jst/common/context.py | jst/common/context.py | '''
Created on Jan 18, 2015
@author: rz
'''
import configparser
import os
from os.path import expanduser
def load():
global_cfg_file = expanduser("~") + '/.jst/jst.properties'
if (not os.path.isfile(global_cfg_file)):
raise FileNotFoundError(global_cfg_file)
cwd = os.getcwd()
ctx_file = cwd + '/jstcontext.properties'
if (not os.path.isfile(ctx_file)):
raise FileNotFoundError(ctx_file)
global_cfg = configparser.ConfigParser()
global_cfg.read(global_cfg_file)
ctx = configparser.ConfigParser()
ctx.read(ctx_file)
ctx['src']['url_ce'] = 'svn+ssh://' + global_cfg['src']['user'] + '@' + global_cfg['src']['url_ce'] + '/' + ctx['src']['branch_ce']
ctx['src']['url_pro'] = 'svn+ssh://' + global_cfg['src']['user'] + '@' + global_cfg['src']['url_pro'] + '/' + ctx['src']['branch_pro']
ctx['src']['working_copy_ce'] = cwd + '/ce'
ctx['src']['working_copy_pro'] = cwd + '/pro'
ctx['tc']['distribution'] = global_cfg['tc']['distribution']
ctx['tc']['home'] = cwd + '/tc'
return ctx
def show(ctx):
print('src.url_ce = ' + ctx['src']['url_ce'])
print('src.url_pro = ' + ctx['src']['url_pro'])
print('src.working_copy_ce = ' + ctx['src']['working_copy_ce'])
print('src.working_copy_pro = ' + ctx['src']['working_copy_pro'])
print('tc.home = ' + ctx['tc']['home'])
print('tc.distribution = ' + ctx['tc']['distribution'])
print('tc.catalina_opts = ' + ctx['tc']['catalina_opts'])
print('tc.java_opts = ' + ctx['tc']['java_opts']) | '''
Created on Jan 18, 2015
@author: rz
'''
import configparser
import os
from os.path import expanduser
def load():
global_cfg_file = expanduser("~") + '/.jst/jst.properties'
if (not os.path.isfile(global_cfg_file)):
raise FileNotFoundError(global_cfg_file)
cwd = os.getcwd()
ctx_file = cwd + '/jstcontext.properties'
if (not os.path.isfile(ctx_file)):
raise FileNotFoundError(ctx_file)
global_cfg = configparser.ConfigParser()
global_cfg.read(global_cfg_file)
ctx = configparser.ConfigParser()
ctx.read(ctx_file)
ctx['src']['url_ce'] = 'svn+ssh://' + global_cfg['src']['user'] + '@' + global_cfg['src']['url_ce'] + '/' + ctx['src']['branch_ce']
ctx['src']['url_pro'] = 'svn+ssh://' + global_cfg['src']['user'] + '@' + global_cfg['src']['url_pro'] + '/' + ctx['src']['branch_pro']
ctx['src']['working_copy_ce'] = cwd + '/ce'
ctx['src']['working_copy_pro'] = cwd + '/pro'
ctx['tc']['distribution'] = global_cfg['tc']['distribution']
ctx['tc']['home'] = cwd + '/tc'
return ctx
def show(ctx):
print('src.url_ce = ' + ctx['src']['url_ce'])
print('src.url_pro = ' + ctx['src']['url_pro'])
print('src.working_copy_ce = ' + ctx['src']['working_copy_ce'])
print('src.working_copy_pro = ' + ctx['src']['working_copy_pro'])
print('tc.home = ' + ctx['tc']['home'])
print('tc.distribution = ' + ctx['tc']['distribution'])
print('tc.catalina_opts = ' + ctx['tc']['catalina_opts'])
print('tc.java_opts = ' + ctx['tc']['java_opts']) | Python | 0.001406 |
99bc38b7d33eef76fd99d7ce362b00080edf5067 | Change dependencies | stock_shipment_management/__openerp__.py | stock_shipment_management/__openerp__.py | # -*- coding: utf-8 -*-
##############################################################################
#
# Author: Joël Grand-Guillaume
# Copyright 2013 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more description.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{"name": "Transportation Plan",
"version": "0.1",
"author": "Camptocamp",
"category": "Transportation",
"license": 'AGPL-3',
'complexity': "normal",
"images" : [],
"website": "http://www.camptocamp.com",
"depends" : ["delivery",
"stock_route_transit",
],
"demo": [],
"data": ["data/tranport_plan_sequence.xml",
"data/tranport_mode_data.xml",
"view/transport_plan.xml",
"view/transport_mode.xml",
"security/ir.model.access.csv",
],
"auto_install": False,
"test": [],
'installable': True,
}
| # -*- coding: utf-8 -*-
##############################################################################
#
# Author: Joël Grand-Guillaume
# Copyright 2013 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more description.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{"name": "Transportation Plan",
"version": "0.1",
"author": "Camptocamp",
"category": "Transportation",
"license": 'AGPL-3',
'complexity': "normal",
"images" : [],
"website": "http://www.camptocamp.com",
"depends" : ["sale",
"purchase",
"stock",
],
"demo": [],
"data": ["data/tranport_plan_sequence.xml",
"data/tranport_mode_data.xml",
"view/transport_plan.xml",
"view/transport_mode.xml",
"security/ir.model.access.csv",
],
"auto_install": False,
"test": [],
'installable': True,
}
| Python | 0.000001 |
aad19b0373f2b331ffbada431385173d2bf3e43e | Update cronjob.py | k8s/models/cronjob.py | k8s/models/cronjob.py | #!/usr/bin/env python
# -*- coding: utf-8
# Copyright 2017-2019 The FIAAS Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import six
from .common import ObjectMeta, ObjectReference, Time, ListMeta
from .job import JobTemplateSpec
from ..base import Model
from ..fields import Field, ListField
class CronJobSpec(Model):
concurrencyPolicy = Field(six.text_type)
failedJobsHistoryLimit = Field(int)
jobTemplate = Field(JobTemplateSpec)
schedule = Field(six.text_type)
startingDeadlineSeconds = Field(int)
successfulJobsHistoryLimit = Field(int)
suspend = Field(bool)
class CronJobStatus(Model):
active = ListField(ObjectReference)
lastScheduleTime = Field(Time)
class CronJob(Model):
class Meta:
list_url = "/apis/batch/v1beta1/cronjobs"
url_template = "/apis/batch/v1beta1/namespaces/{namespace}/cronjobs/{name}"
metadata = Field(ObjectMeta)
spec = Field(CronJobSpec)
status = Field(CronJobStatus)
| #!/usr/bin/env python
# -*- coding: utf-8
# Copyright 2017-2019 The FIAAS Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import six
from .common import ObjectMeta, ObjectReference, Time, ListMeta
from .job import JobTemplateSpec
from ..base import Model
from ..fields import Field, ListField
class CronJobSpec(Model):
concurrencyPolicy = Field(six.text_type)
failedJobsHistoryLimit = Field(int)
jobTemplate = Field(JobTemplateSpec)
schedule = Field(six.text_type)
startingDeadlineSeconds = Field(int)
successfulJobsHistoryLimit = Field(int)
suspend = Field(bool)
class CronJobStatus(Model):
active = ListField(ObjectReference)
lastScheduleTime = Field(Time)
class CronJob(Model):
class Meta:
list_url = "/apis/batch/v1beta1/cronjobs"
url_template = "/apis/batch/v1beta1/namespaces/{namespace}/cronjobs/{name}"
metadata = Field(ObjectMeta)
spec = Field(CronJobSpec)
status = Field(CronJobStatus)
class CronJobList(Model):
apiVersion = Field(six.text_type)
items = ListField(CronJob)
kind = Field(six.text_type)
metadata = Field(ListMeta)
| Python | 0.000004 |
e7dca1dae8300dd702ecfc36110518b16c9c5231 | change directory back to previous location (prevents following tests from pointing into the forest) | tests/testhelper.py | tests/testhelper.py | from contextlib import contextmanager
import tempfile
import os
import shutil
from configuration import Builder
from gitFunctions import Initializer
import configuration
@contextmanager
def mkchdir(subfolder, folderprefix="rtc2test_case"):
tempfolder = tempfile.mkdtemp(prefix=folderprefix + subfolder)
previousdir = os.getcwd()
os.chdir(tempfolder)
try:
yield tempfolder
finally:
os.chdir(previousdir)
shutil.rmtree(tempfolder, ignore_errors=True) # on windows folder remains in temp, git process locks it
@contextmanager
def createrepo(reponame="test.git", folderprefix="rtc2test_case"):
repodir = tempfile.mkdtemp(prefix=folderprefix)
configuration.config = Builder().setworkdirectory(repodir).setgitreponame(reponame).build()
initializer = Initializer()
previousdir = os.getcwd()
os.chdir(repodir)
initializer.initalize()
try:
yield
finally:
os.chdir(previousdir)
shutil.rmtree(repodir, ignore_errors=True) # on windows folder remains in temp, git process locks it
| from contextlib import contextmanager
import tempfile
import os
import shutil
from configuration import Builder
from gitFunctions import Initializer
import configuration
@contextmanager
def mkchdir(subfolder, folderprefix="rtc2test_case"):
tempfolder = tempfile.mkdtemp(prefix=folderprefix + subfolder)
os.chdir(tempfolder)
try:
yield tempfolder
finally:
shutil.rmtree(tempfolder, ignore_errors=True) # on windows folder remains in temp, git process locks it
@contextmanager
def createrepo(reponame="test.git", folderprefix="rtc2test_case"):
repodir = tempfile.mkdtemp(prefix=folderprefix)
configuration.config = Builder().setworkdirectory(repodir).setgitreponame(reponame).build()
initializer = Initializer()
os.chdir(repodir)
initializer.initalize()
try:
yield
finally:
shutil.rmtree(repodir, ignore_errors=True) # on windows folder remains in temp, git process locks it
| Python | 0 |
5a8199744bf658d491721b16fea7639303e47d3f | Edit view pre-populates with data from user object | july/people/views.py | july/people/views.py | from django.shortcuts import render_to_response
from django.contrib.auth.decorators import login_required
from django.template.context import RequestContext
#from google.appengine.ext import db
from july.people.models import Commit
from gae_django.auth.models import User
from django.http import Http404, HttpResponseRedirect
from django.core.urlresolvers import reverse
def user_profile(request, username):
user = User.all().filter("username", username).get()
if user == None:
raise Http404("User not found")
commits = Commit.all().ancestor(request.user.key())
return render_to_response('people/profile.html',
{"commits":commits},
RequestContext(request))
@login_required
def edit_profile(request, username, template_name='people/edit.html'):
from forms import EditUserForm
user = request.user
form = EditUserForm(request.POST or None, user=request.user)
if form.is_valid():
for key in form.cleaned_data:
setattr(user, key, form.cleaned_data.get(key))
user.put()
return HttpResponseRedirect(
reverse('member-profile', kwargs={'username':request.user.username})
)
if user == None:
raise Http404("User not found")
return render_to_response(template_name,
{'form':form},
RequestContext(request))
| from django.shortcuts import render_to_response
from django.contrib.auth.decorators import login_required
from django.template.context import RequestContext
#from google.appengine.ext import db
from july.people.models import Commit
from gae_django.auth.models import User
from django.http import Http404, HttpResponseRedirect
from django.core.urlresolvers import reverse
def user_profile(request, username):
user = User.all().filter("username", username).get()
if user == None:
raise Http404("User not found")
commits = Commit.all().ancestor(request.user.key())
return render_to_response('people/profile.html',
{"commits":commits},
RequestContext(request))
@login_required
def edit_profile(request, username, template_name='people/edit.html'):
from forms import EditUserForm
user = request.user
#CONSIDER FILES with no POST? Can that happen?
form = EditUserForm(request.POST or None, request.FILES or None)
if form.is_valid():
for key in form.cleaned_data:
setattr(user,key,form.cleaned_data.get(key))
user.put()
return HttpResponseRedirect(
reverse('member-profile', kwargs={'username': request.user.username})
)
if user == None:
raise Http404("User not found")
return render_to_response(template_name,
{'form':form,},
RequestContext(request))
| Python | 0 |
8e9edf002368df0cd4bfa33975271b75af191ef0 | fix cache expiring | ujt/dash_app.py | ujt/dash_app.py | """ Configuration for Dash app.
Exposes app and cache to enable other files (namely callbacks) to register callbacks and update cache.
App is actually started by ujt.py
"""
import dash
import dash_bootstrap_components as dbc
import dash_cytoscape as cyto
from flask_caching import Cache
# Initialize Dash app and Flask-Cache
cyto.load_extra_layouts()
app = dash.Dash(__name__, external_stylesheets=[dbc.themes.BOOTSTRAP])
cache = Cache()
cache.init_app(
app.server,
config={
"CACHE_TYPE": "filesystem",
"CACHE_DIR": "cache_dir",
"CACHE_DEFAULT_TIMEOUT": 0,
"CACHE_THRESHOLD": 0,
},
)
| """ Configuration for Dash app.
Exposes app and cache to enable other files (namely callbacks) to register callbacks and update cache.
App is actually started by ujt.py
"""
import dash
import dash_bootstrap_components as dbc
import dash_cytoscape as cyto
from flask_caching import Cache
# Initialize Dash app and Flask-Cache
cyto.load_extra_layouts()
app = dash.Dash(__name__, external_stylesheets=[dbc.themes.BOOTSTRAP])
cache = Cache()
cache.init_app(
app.server,
config={"CACHE_TYPE": "filesystem", "CACHE_DIR": "cache_dir"},
)
| Python | 0 |
a8e43dcdbdd00de9d4336385b3f3def1ae5c2515 | Update UserX, with back compatibility | main/modelx.py | main/modelx.py | # -*- coding: utf-8 -*-
import hashlib
class BaseX(object):
@classmethod
def retrieve_one_by(cls, name, value):
cls_db_list = cls.query(getattr(cls, name) == value).fetch(1)
if cls_db_list:
return cls_db_list[0]
return None
class ConfigX(object):
@classmethod
def get_master_db(cls):
return cls.get_or_insert('master')
class UserX(object):
def avatar_url_size(self, size=None):
return '//gravatar.com/avatar/%(hash)s?d=identicon&r=x%(size)s' % {
'hash': hashlib.md5((self.email or self.name).encode('utf-8')).hexdigest().lower(),
'size': '&s=%d' % size if size > 0 else '',
}
avatar_url = property(avatar_url_size)
| # -*- coding: utf-8 -*-
import hashlib
class BaseX(object):
@classmethod
def retrieve_one_by(cls, name, value):
cls_db_list = cls.query(getattr(cls, name) == value).fetch(1)
if cls_db_list:
return cls_db_list[0]
return None
class ConfigX(object):
@classmethod
def get_master_db(cls):
return cls.get_or_insert('master')
class UserX(object):
def avatar_url(self, size=None):
return '//gravatar.com/avatar/%(hash)s?d=identicon&r=x%(size)s' % {
'hash': hashlib.md5((self.email or self.name).encode('utf-8')).hexdigest().lower(),
'size': '&s=%d' % size if size > 0 else '',
}
| Python | 0 |
6ba3fc5c9fade3609695aa7f5b0498b77a8c18fa | revert to 0.2.7 tag | keras_cv/__init__.py | keras_cv/__init__.py | # Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from keras_cv import layers
from keras_cv import metrics
from keras_cv import utils
from keras_cv import version_check
from keras_cv.core import ConstantFactorSampler
from keras_cv.core import FactorSampler
from keras_cv.core import NormalFactorSampler
from keras_cv.core import UniformFactorSampler
version_check.check_tf_version()
__version__ = "0.2.7"
| # Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from keras_cv import layers
from keras_cv import metrics
from keras_cv import utils
from keras_cv import version_check
from keras_cv.core import ConstantFactorSampler
from keras_cv.core import FactorSampler
from keras_cv.core import NormalFactorSampler
from keras_cv.core import UniformFactorSampler
version_check.check_tf_version()
__version__ = "0.2.8dev"
| Python | 0.000001 |
97523ce0b98d97a4ce6d9d99d5807e1f32b21077 | correcting a typo | constructiveness_toxicity_crowdsource/common/crowd_data_aggregator.py | constructiveness_toxicity_crowdsource/common/crowd_data_aggregator.py | import pandas as pd
import numpy as np
import math
from crowd_data_aggregation_functions import *
class CrowdsourceAggregator:
'''
Aggregator for crowdsourced data for constructiveness and toxicity
'''
def __init__(self, input_csv):
self.df = pd.read_csv(input_csv)
def get_gold_questions(self):
return self.df.query('_golden == True')
def get_non_gold_questions(self):
return self.df.query('_golden == False')
def get_nannotators(self):
return len(self.get_non_gold_questions()['_worker_id'].unique())
def aggregate_annotations(self, df, attribs):
# Relevant columns
unit_id_col = attribs['unit_id_col']
meta_cols = attribs['meta_cols']
avg_cols = attribs['avg_cols']
nominal_cols = attribs['nominal_cols']
text_cols = attribs['text_cols']
# Replace text values with numerical values in the dataframe
#attrs = self.df[avg_cols].replace(['yes', 'no', 'partially', 'not_sure', 'noopinion'], [1, 0, 0.5, 0.5, np.nan])
attrs = self.df[avg_cols].replace(['yes', 'no', 'partially', 'not_sure', 'noopinion'], [1, 0, 0.5, 0.5, 0.5])
other_cols = unit_id_col + meta_cols + nominal_cols + text_cols
df = df[other_cols].join(attrs)
# aggregation method for each class of attributes
avg_dict = {k: 'mean' for k in avg_cols}
meta_dict = {k: 'first' for k in meta_cols}
nominal_dict = {k: list_and_sort for k in nominal_cols}
text_dict = {k: concatenate for k in text_cols}
agg_dict = {**avg_dict, **meta_dict, **nominal_dict, **text_dict}
# Aggregate the results for all workers on a particular comment
aggregated_df = df.groupby(unit_id_col).agg(agg_dict)
for col in avg_cols:
aggregated_df[col] = aggregated_df[col].apply(pd.to_numeric)
aggregated_df[col] = aggregated_df[col].apply(lambda x: round(x,2))
return aggregated_df
def write_csv(self, dframe, cols, csv_path):
dframe.to_csv(csv_path, columns = cols, index = False)
print('CSV written: ', csv_path)
if __name__=='__main__':
ca = CrowdsourceAggregator('../CF_output/constructiveness/batch8/batch8_f1285429.csv')
print('The number of annotators: ', ca.get_nannotators())
| import pandas as pd
import numpy as np
import math
from crowd_data_aggregation_functions import *
class CrowdsourceAggregator:
'''
Aggregator for crowdsourced data for constructiveness and toxicity
'''
def __init__(self, input_csv):
self.df = pd.read_csv(input_csv)
def get_gold_questions(self):
return self.df.query('_golden == True')
def get_non_gold_questions(self):
return self.df.query('_golden == False')
def get_nannotators(self):
return len(self.non_gold_df['_worker_id'].unique())
def aggregate_annotations(self, df, attribs):
# Relevant columns
unit_id_col = attribs['unit_id_col']
meta_cols = attribs['meta_cols']
avg_cols = attribs['avg_cols']
nominal_cols = attribs['nominal_cols']
text_cols = attribs['text_cols']
# Replace text values with numerical values in the dataframe
#attrs = self.df[avg_cols].replace(['yes', 'no', 'partially', 'not_sure', 'noopinion'], [1, 0, 0.5, 0.5, np.nan])
attrs = self.df[avg_cols].replace(['yes', 'no', 'partially', 'not_sure', 'noopinion'], [1, 0, 0.5, 0.5, 0.5])
other_cols = unit_id_col + meta_cols + nominal_cols + text_cols
df = df[other_cols].join(attrs)
# aggregation method for each class of attributes
avg_dict = {k: 'mean' for k in avg_cols}
meta_dict = {k: 'first' for k in meta_cols}
nominal_dict = {k: list_and_sort for k in nominal_cols}
text_dict = {k: concatenate for k in text_cols}
agg_dict = {**avg_dict, **meta_dict, **nominal_dict, **text_dict}
# Aggregate the results for all workers on a particular comment
aggregated_df = df.groupby(unit_id_col).agg(agg_dict)
for col in avg_cols:
aggregated_df[col] = aggregated_df[col].apply(pd.to_numeric)
aggregated_df[col] = aggregated_df[col].apply(lambda x: round(x,2))
return aggregated_df
def write_csv(self, dframe, cols, csv_path):
dframe.to_csv(csv_path, columns = cols, index = False)
print('CSV written: ', csv_path)
if __name__=='__main__':
pass
| Python | 0.999886 |
55a3b3a845014d0e4c4c4d057bbe088d7791d43d | Prepare for v1.10.0 | src/pyckson/__init__.py | src/pyckson/__init__.py | from pyckson.decorators import *
from pyckson.json import *
from pyckson.parser import parse
from pyckson.parsers.base import Parser
from pyckson.serializer import serialize
from pyckson.serializers.base import Serializer
from pyckson.dates.helpers import configure_date_formatter, configure_explicit_nulls
from pyckson.defaults import set_defaults
__version__ = '1.10.0'
| from pyckson.decorators import *
from pyckson.json import *
from pyckson.parser import parse
from pyckson.parsers.base import Parser
from pyckson.serializer import serialize
from pyckson.serializers.base import Serializer
from pyckson.dates.helpers import configure_date_formatter, configure_explicit_nulls
from pyckson.defaults import set_defaults
__version__ = '1.9.0'
| Python | 0.000001 |
7fcb80a43d39473001610015e92973d95c0b0267 | Fix not track percent so it is not track percent | mica/web/star_hist.py | mica/web/star_hist.py | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from astropy.table import Table
from Chandra.Time import DateTime
from mica.stats import acq_stats, guide_stats
def get_acq_data(agasc_id):
"""
Fetch acquisition history from mica acq stats for an agasc id
:param agasc_id: AGASC id
:returns: list of dicts of acquisitions
"""
acq = acq_stats.get_star_stats(agasc_id)
# make list of dicts for use in light templates in kadi web app
if not len(acq):
return []
acq = Table(acq)
acq.sort('guide_start')
acq_table = []
for s in acq:
srec = {}
# Use these columns as they are named from the mica acq stats table
for col in ['type', 'obsid', 'obi', 'slot', 'mag', 'mag_obs', 'star_tracked']:
srec[col] = s[col]
# rename these columns in the dictionary
srec['date'] = s['guide_start']
srec['acq_dy'] = s['cdy']
srec['acq_dz'] = s['cdz']
srec['id'] = s['acqid']
acq_table.append(srec)
return acq_table
def get_gui_data(agasc_id):
"""
Fetch guide/track history for an agasc id
:param agasc_id: AGASC id
:returns: list of dicts of uses as guide stars
"""
gui = guide_stats.get_star_stats(agasc_id)
if not len(gui):
return []
gui = Table(gui)
gui.sort('kalman_datestart')
# make list of dicts for use in light templates in kadi web app
gui_table = []
for s in gui:
srec = {}
# Use these columns as they are named from the mica acq stats table
for col in ['type', 'obsid', 'obi', 'slot']:
srec[col] = s[col]
# rename these columns in the dictionary
srec['date'] = s['kalman_datestart']
srec['mag'] = s['mag_aca']
srec['mag_obs'] = s['aoacmag_mean']
srec['perc_not_track'] = (1 - s['f_track']) * 100.0
gui_table.append(srec)
return gui_table
def get_star_stats(agasc_id, start=None, stop=None):
"""
Fetch acq and gui history of a star
:param agasc_id: AGASC id
:param start: start of optional time filter (>=) (Chandra.Time compatible)
:param stop: stop time of optional time filter (<) (Chandra.time compatible)
:returns: 2 lists, first of acq attempts, second of guide attempts
"""
acq_table = get_acq_data(agasc_id)
gui_table = get_gui_data(agasc_id)
if start is not None:
acq_table = [s for s in acq_table if s['date'] >= DateTime(start).date]
gui_table = [s for s in gui_table if s['date'] >= DateTime(start).date]
if stop is not None:
acq_table = [s for s in acq_table if s['date'] < DateTime(stop).date]
gui_table = [s for s in gui_table if s['date'] < DateTime(stop).date]
return acq_table, gui_table
| # Licensed under a 3-clause BSD style license - see LICENSE.rst
from astropy.table import Table
from Chandra.Time import DateTime
from mica.stats import acq_stats, guide_stats
def get_acq_data(agasc_id):
"""
Fetch acquisition history from mica acq stats for an agasc id
:param agasc_id: AGASC id
:returns: list of dicts of acquisitions
"""
acq = acq_stats.get_star_stats(agasc_id)
# make list of dicts for use in light templates in kadi web app
if not len(acq):
return []
acq = Table(acq)
acq.sort('guide_start')
acq_table = []
for s in acq:
srec = {}
# Use these columns as they are named from the mica acq stats table
for col in ['type', 'obsid', 'obi', 'slot', 'mag', 'mag_obs', 'star_tracked']:
srec[col] = s[col]
# rename these columns in the dictionary
srec['date'] = s['guide_start']
srec['acq_dy'] = s['cdy']
srec['acq_dz'] = s['cdz']
srec['id'] = s['acqid']
acq_table.append(srec)
return acq_table
def get_gui_data(agasc_id):
"""
Fetch guide/track history for an agasc id
:param agasc_id: AGASC id
:returns: list of dicts of uses as guide stars
"""
gui = guide_stats.get_star_stats(agasc_id)
if not len(gui):
return []
gui = Table(gui)
gui.sort('kalman_datestart')
# make list of dicts for use in light templates in kadi web app
gui_table = []
for s in gui:
srec = {}
# Use these columns as they are named from the mica acq stats table
for col in ['type', 'obsid', 'obi', 'slot']:
srec[col] = s[col]
# rename these columns in the dictionary
srec['date'] = s['kalman_datestart']
srec['mag'] = s['mag_aca']
srec['mag_obs'] = s['aoacmag_mean']
srec['perc_not_track'] = s['f_track'] * 100.0
gui_table.append(srec)
return gui_table
def get_star_stats(agasc_id, start=None, stop=None):
"""
Fetch acq and gui history of a star
:param agasc_id: AGASC id
:param start: start of optional time filter (>=) (Chandra.Time compatible)
:param stop: stop time of optional time filter (<) (Chandra.time compatible)
:returns: 2 lists, first of acq attempts, second of guide attempts
"""
acq_table = get_acq_data(agasc_id)
gui_table = get_gui_data(agasc_id)
if start is not None:
acq_table = [s for s in acq_table if s['date'] >= DateTime(start).date]
gui_table = [s for s in gui_table if s['date'] >= DateTime(start).date]
if stop is not None:
acq_table = [s for s in acq_table if s['date'] < DateTime(stop).date]
gui_table = [s for s in gui_table if s['date'] < DateTime(stop).date]
return acq_table, gui_table
| Python | 0.998902 |
6d63ab2ef50512a794948c86cf1ce834b59acd90 | Add str method for map area | maps/models.py | maps/models.py | import json
from django.conf import settings
# from django.contrib.postgres.fields import JSONField
from django.db import models
JSONTextField = models.TextField
# See
# https://developers.google.com/maps/documentation/javascript/reference?hl=en#LatLngBoundsLiteral
class LatLngBounds(models.Model):
east = models.FloatField()
north = models.FloatField()
south = models.FloatField()
west = models.FloatField()
class MapArea(models.Model):
title = models.CharField(max_length=100, blank=True)
display_area = models.ForeignKey(LatLngBounds, related_name='+')
# This data should be obviously moved to a storage suitable
# for blobs. Keeping in here to ease deployment. Migration is
# an exercise for the happy future developer.
# The data stored is image in PNG or JPEG format.
contour_map_image = models.BinaryField()
# Which part of the Earth the rectangular stored
# in contour_map_image represents.
contour_map_reference = models.ForeignKey(LatLngBounds, related_name='+')
def __str__(self):
return self.title or 'Map area #{}'.format(self.id)
class Question(models.Model):
map_area = models.ForeignKey(MapArea)
max_duration = models.DurationField()
creator = models.ForeignKey(settings.AUTH_USER_MODEL)
# See "JSON Objects per Question Type" for more details.
type = models.TextField()
statement_data = JSONTextField()
reference_data = JSONTextField()
class QuestionSet(models.Model):
title = models.CharField(max_length=100)
creator = models.ForeignKey(settings.AUTH_USER_MODEL)
max_duration = models.DurationField()
question_ids = JSONTextField(default=json.dumps(None))
def get_questions(self):
questions = []
for question_id in json.loads(self.question_ids):
questions.append(Question.objects.get(id=question_id))
return questions
class AnswerSet(models.Model):
student = models.ForeignKey(settings.AUTH_USER_MODEL, db_index=True)
question_set = models.ForeignKey(QuestionSet, null=True)
start_time = models.DateTimeField()
end_time = models.DateTimeField(null=True)
class Answer(models.Model):
answer_set = models.ForeignKey(AnswerSet, db_index=True)
question_set = models.ForeignKey(QuestionSet, null=True)
question = models.ForeignKey(Question, null=True)
answer_data = JSONTextField(default=json.dumps(None))
scoring_data = JSONTextField(default=json.dumps(None)) # May be recalculated
duration = models.DurationField()
submission_time = models.DateTimeField()
| import json
from django.conf import settings
# from django.contrib.postgres.fields import JSONField
from django.db import models
JSONTextField = models.TextField
# See
# https://developers.google.com/maps/documentation/javascript/reference?hl=en#LatLngBoundsLiteral
class LatLngBounds(models.Model):
east = models.FloatField()
north = models.FloatField()
south = models.FloatField()
west = models.FloatField()
class MapArea(models.Model):
title = models.CharField(max_length=100, blank=True)
display_area = models.ForeignKey(LatLngBounds, related_name='+')
# This data should be obviously moved to a storage suitable
# for blobs. Keeping in here to ease deployment. Migration is
# an exercise for the happy future developer.
# The data stored is image in PNG or JPEG format.
contour_map_image = models.BinaryField()
# Which part of the Earth the rectangular stored
# in contour_map_image represents.
contour_map_reference = models.ForeignKey(LatLngBounds, related_name='+')
class Question(models.Model):
map_area = models.ForeignKey(MapArea)
max_duration = models.DurationField()
creator = models.ForeignKey(settings.AUTH_USER_MODEL)
# See "JSON Objects per Question Type" for more details.
type = models.TextField()
statement_data = JSONTextField()
reference_data = JSONTextField()
class QuestionSet(models.Model):
title = models.CharField(max_length=100)
creator = models.ForeignKey(settings.AUTH_USER_MODEL)
max_duration = models.DurationField()
question_ids = JSONTextField(default=json.dumps(None))
def get_questions(self):
questions = []
for question_id in json.loads(self.question_ids):
questions.append(Question.objects.get(id=question_id))
return questions
class AnswerSet(models.Model):
student = models.ForeignKey(settings.AUTH_USER_MODEL, db_index=True)
question_set = models.ForeignKey(QuestionSet, null=True)
start_time = models.DateTimeField()
end_time = models.DateTimeField(null=True)
class Answer(models.Model):
answer_set = models.ForeignKey(AnswerSet, db_index=True)
question_set = models.ForeignKey(QuestionSet, null=True)
question = models.ForeignKey(Question, null=True)
answer_data = JSONTextField(default=json.dumps(None))
scoring_data = JSONTextField(default=json.dumps(None)) # May be recalculated
duration = models.DurationField()
submission_time = models.DateTimeField()
| Python | 0 |
f8fde8fd984242f75e36644d2e54c1d306c1b785 | Remove --population=default | keysmith/__main__.py | keysmith/__main__.py | """Keysmith Default Interface"""
import argparse
import math
import string
import pkg_resources
import keysmith
def cli(parser=None):
"""Parse CLI arguments and options."""
if parser is None:
parser = argparse.ArgumentParser(prog=keysmith.CONSOLE_SCRIPT)
parser.add_argument(
'-d', '--delimiter',
help='a delimiter for the samples (teeth) in the key',
default=' ',
)
parser.add_argument(
'-n', '--nsamples',
help='the number of random samples to take',
type=int,
default=3,
dest='nteeth',
)
parser.add_argument(
'-p', '--population',
help='alphanumeric, printable, or a path',
default=pkg_resources.resource_filename('keysmith', 'words.txt'),
)
parser.add_argument(
'--stats',
help='statistics for the key',
default=False,
action='store_true',
)
parser.add_argument(
'--version',
action='version',
version='%(prog)s {0}'.format(keysmith.__version__),
)
return parser
def main(args=None):
"""Execute CLI commands."""
if args is None:
args = cli().parse_args()
words = {
'alphanumeric': string.ascii_letters + string.digits,
'printable': string.printable,
}.get(args.population)
if words is None:
with open(args.population, 'r') as f:
words = f.read().splitlines()
key = keysmith.key(
seq=words,
nteeth=args.nteeth,
delimiter=args.delimiter,
)
print(key)
if args.stats:
print('=' * len(key))
print('characters = {characters}'.format(characters=len(key)))
print(' samples = {nteeth}'.format(nteeth=args.nteeth))
print('population = {pop}'.format(pop=len(words)))
print(' entropy {sign} {bits}b'.format(
sign='<' if len(args.delimiter) < 1 else '~',
bits=round(math.log(len(words), 2) * args.nteeth, 2),
))
if __name__ == '__main__':
main()
| """Keysmith Default Interface"""
import argparse
import math
import string
import pkg_resources
import keysmith
def cli(parser=None):
"""Parse CLI arguments and options."""
if parser is None:
parser = argparse.ArgumentParser(prog=keysmith.CONSOLE_SCRIPT)
parser.add_argument(
'-d', '--delimiter',
help='a delimiter for the samples (teeth) in the key',
default=' ',
)
parser.add_argument(
'-n', '--nsamples',
help='the number of random samples to take',
type=int,
default=3,
dest='nteeth',
)
parser.add_argument(
'-p', '--population',
help='alphanumeric, default, printable, or a path',
default='default',
)
parser.add_argument(
'--stats',
help='statistics for the key',
default=False,
action='store_true',
)
parser.add_argument(
'--version',
action='version',
version='%(prog)s {0}'.format(keysmith.__version__),
)
return parser
def main(args=None):
"""Execute CLI commands."""
if args is None:
args = cli().parse_args()
words = {
'alphanumeric': string.ascii_letters + string.digits,
'printable': string.printable,
}.get(args.population)
if words is None:
if args.population == 'default':
args.population = pkg_resources.resource_filename('keysmith', 'words.txt')
with open(args.population, 'r') as f:
words = f.read().splitlines()
key = keysmith.key(
seq=words,
nteeth=args.nteeth,
delimiter=args.delimiter,
)
print(key)
if args.stats:
print('=' * len(key))
print('characters = {characters}'.format(characters=len(key)))
print(' samples = {nteeth}'.format(nteeth=args.nteeth))
print('population = {pop}'.format(pop=len(words)))
print(' entropy {sign} {bits}b'.format(
sign='<' if len(args.delimiter) < 1 else '~',
bits=round(math.log(len(words), 2) * args.nteeth, 2),
))
if __name__ == '__main__':
main()
| Python | 0.000009 |
646548dff38ea476a35462cf51ba028e3275748a | Fix some undefined reference and attribute errors in the deallocate simprocedure | simuvex/procedures/cgc/deallocate.py | simuvex/procedures/cgc/deallocate.py | import simuvex
import logging
l = logging.getLogger("simuvex.procedures.cgc.deallocate")
class deallocate(simuvex.SimProcedure):
#pylint:disable=arguments-differ
def run(self, addr, length): #pylint:disable=unused-argument
# return code (see deallocate() docs)
r = self.state.se.ite_cases((
(addr % 0x1000 != 0, self.state.cgc.EINVAL),
(length == 0, self.state.cgc.EINVAL),
(self.state.cgc.addr_invalid(addr), self.state.cgc.EINVAL),
(self.state.cgc.addr_invalid(addr + length), self.state.cgc.EINVAL),
), self.state.se.BVV(0, self.state.arch.bits))
aligned_length = ((length + 0xfff) / 0x1000) * 0x1000
# TODO: not sure if this is valuable until we actually model CGC
# allocations accurately
# self.state.memory.unmap_region(addr, aligned_length)
return r
| import simuvex
class deallocate(simuvex.SimProcedure):
#pylint:disable=arguments-differ
def run(self, addr, length): #pylint:disable=unused-argument
# return code (see deallocate() docs)
r = self.state.se.ite_cases((
(addr % 0x1000 != 0, self.state.cgc.EINVAL),
(length == 0, self.state.cgc.EINVAL),
(self.state.cgc.addr_invalid(addr), self.state.cgc.EINVAL),
(self.state.cgc.addr_invalid(addr + length), self.state.cgc.EINVAL),
), self.state.se.BVV(0, self.state.arch.bits))
return r
| Python | 0.000001 |
70c520d3ff882b499febfe021d02108f79171773 | Fix ST2(python26) compatibility. | OmniMarkupLib/Renderers/MarkdownRenderer.py | OmniMarkupLib/Renderers/MarkdownRenderer.py | from .base_renderer import *
import re
import markdown
@renderer
class MarkdownRenderer(MarkupRenderer):
FILENAME_PATTERN_RE = re.compile(r'\.(md|mkdn?|mdwn|mdown|markdown|litcoffee)$')
YAML_FRONTMATTER_RE = re.compile(r'\A---\s*\n.*?\n?^---\s*$\n?', re.DOTALL | re.MULTILINE)
def load_settings(self, renderer_options, global_setting):
super(MarkdownRenderer, self).load_settings(renderer_options, global_setting)
if 'extensions' in renderer_options:
extensions = renderer_options['extensions']
else:
# Fallback to the default GFM style
extensions = ['tables', 'strikeout', 'fenced_code', 'codehilite']
extensions = set(extensions)
if global_setting.mathjax_enabled:
if 'mathjax' not in extensions:
extensions.add('mathjax')
if 'smartypants' in extensions:
extensions.remove('smartypants')
extensions.add('smarty')
self.extensions = list(extensions)
@classmethod
def is_enabled(cls, filename, syntax):
if syntax == "text.html.markdown":
return True
return cls.FILENAME_PATTERN_RE.search(filename) is not None
def render(self, text, **kwargs):
text = self.YAML_FRONTMATTER_RE.sub('', text)
return markdown.markdown(text, output_format='html5',
extensions=self.extensions)
| from .base_renderer import *
import re
import markdown
@renderer
class MarkdownRenderer(MarkupRenderer):
FILENAME_PATTERN_RE = re.compile(r'\.(md|mkdn?|mdwn|mdown|markdown|litcoffee)$')
YAML_FRONTMATTER_RE = re.compile(r'\A---\s*\n.*?\n?^---\s*$\n?', re.DOTALL | re.MULTILINE)
def load_settings(self, renderer_options, global_setting):
super(MarkdownRenderer, self).load_settings(renderer_options, global_setting)
if 'extensions' in renderer_options:
extensions = set(renderer_options['extensions'])
else:
# Fallback to the default GFM style
extensions = {'tables', 'strikeout', 'fenced_code', 'codehilite'}
if global_setting.mathjax_enabled:
if 'mathjax' not in extensions:
extensions.add('mathjax')
if 'smartypants' in extensions:
extensions.remove('smartypants')
extensions.add('smarty')
self.extensions = list(extensions)
@classmethod
def is_enabled(cls, filename, syntax):
if syntax == "text.html.markdown":
return True
return cls.FILENAME_PATTERN_RE.search(filename) is not None
def render(self, text, **kwargs):
text = self.YAML_FRONTMATTER_RE.sub('', text)
return markdown.markdown(text, output_format='html5',
extensions=self.extensions)
| Python | 0.000001 |
b7523a8bbac9fdce7d97afda32b9a7982f00a6d0 | Update Exp 7_2 | examples/sparkfun_redbot/sparkfun_experiments/Exp7_2_DriveDistance.py | examples/sparkfun_redbot/sparkfun_experiments/Exp7_2_DriveDistance.py | """
Exp7_2_DriveDistance -- RedBot Experiment 7.2
In an earlier experiment, we used a combination of speed and time to
drive a certain distance. Using the encoders, we can me much more accurate.
In this example, we will show you how to setup your robot to drive a certain
distance regardless of the motorPower.
This sketch was written by SparkFun Electronics, with lots of help from
the Arduino community. This code is completely free for any use.
8 Oct 2013 M. Hord
Revised, 31 Oct 2014 B. Huang
"""
from pymata_aio.pymata3 import PyMata3
from pymata_aio.constants import Constants
from library.redbot import RedBotMotors,RedBotEncoder
import math
COM_PORT = None # Use automatic com port detection (the default)
#COM_PORT = "COM10" # Manually specify the com port (optional)
board = PyMata3(com_port=COM_PORT)
motors = RedBotMotors(board)
encoders = RedBotEncoder(board)
BUTTON_PIN = 12
COUNT_PER_REV = 192 # 4 pairs of N-S x 48:1 gearbox = 192 ticks per wheel rev
WHEEL_DIAM = 2.56 # diam = 65mm / 25.4 mm/in
WHEEL_CIRC = math.pi * WHEEL_DIAM
print(WHEEL_CIRC)
ENCODER_PIN_LEFT = 16
ENCODER_PIN_RIGHT = 10
def setup():
board.set_pin_mode(BUTTON_PIN, Constants.INPUT)
board.digital_write(BUTTON_PIN, 1) # writing pin high sets the pull-up resistor
def loop():
# wait for a button press to start driving.
if board.digital_read(BUTTON_PIN) == 0:
driveDistance(12, 150) # drive 12 inches at motor_power = 150
def driveDistance(distance, motor_power):
left_count= 0
right_count = 0
num_rev = distance / WHEEL_CIRC
# debug
print("drive_distance() {} inches at {} power for {:.2f} revolutions".format(distance, motor_power, num_rev))
encoders.clear_enc() # clear the encoder count
motors.drive(motor_power)
while right_count < num_rev * COUNT_PER_REV:
left_count = encoders.get_ticks(ENCODER_PIN_LEFT)
right_count = encoders.get_ticks(ENCODER_PIN_RIGHT)
print("{} {} stop once over {:.0f} ticks".format(left_count, right_count, num_rev * COUNT_PER_REV))
board.sleep(0.1)
motors.brake()
if __name__ == "__main__":
setup()
while True:
loop()
board.sleep(.01)
# print("Encoder Read: {}".format(board.encoder_read(encoder_pin_right)))
| """
Exp7_2_DriveDistance -- RedBot Experiment 7.2
In an earlier experiment, we used a combination of speed and time to
drive a certain distance. Using the encoders, we can me much more accurate.
In this example, we will show you how to setup your robot to drive a certain
distance regardless of the motorPower.
This sketch was written by SparkFun Electronics, with lots of help from
the Arduino community. This code is completely free for any use.
8 Oct 2013 M. Hord
Revised, 31 Oct 2014 B. Huang
"""
from pymata_aio.pymata3 import PyMata3
from pymata_aio.constants import Constants
from library.redbot import RedBotMotors,RedBotEncoder
import math
# This line "includes" the RedBot library into your sketch.
# Provides special objects, methods, and functions for the RedBot.
board = PyMata3()
encoders = RedBotEncoder(board)
motors = RedBotMotors(board)
encoder_pin_left = 16
encoder_pin_right = 10
BUTTON_PIN = 12
counts_per_rev = 192 # 4 pairs of N-S x 48:1 gearbox = 192 ticks per wheel rev
wheel_diam = 2.56 # diam = 65mm / 25.4 mm/in
wheel_circ = math.pi * wheel_diam
# variables used to store the left and right encoder counts.
left_count = 0
right_count = 0
def setup():
board.set_pin_mode(BUTTON_PIN, Constants.INPUT)
board.digital_write(BUTTON_PIN, 1) # writing pin high sets the pull-up resistor
def loop():
# wait for a button press to start driving.
if board.digital_read(BUTTON_PIN) == 0:
board.sleep(0.05)
if board.digital_read(BUTTON_PIN) == 0:
driveDistance(12, 150) # drive 12 inches at motor_power = 150
def driveDistance(distance, motor_power):
global left_count
global right_count
left_count= 0
right_count = 0
numRev = float(distance/wheel_circ)
# debug
print("drive_distance() {} inches at {} power".format(distance,motor_power))
print(numRev)
encoders.clear_enc() # clear the encoder count
motors.drive(motor_power)
# TODO: Find the 'proper' way to access these variables
iteration = 0
while right_count< numRev*counts_per_rev:
left_count = encoders.get_ticks(encoder_pin_left)
right_count = encoders.get_ticks(encoder_pin_right)
print("{} {}".format(left_count,right_count)) # stores the encoder count to a variable
# print(numRev*counts_per_rev)
board.sleep(0.01)
# if either left or right motor are more than 5 revolutions, stop
motors.brake()
if __name__ == "__main__":
setup()
while True:
loop()
board.sleep(.01)
# print("Encoder Read: {}".format(board.encoder_read(encoder_pin_right)))
| Python | 0 |
b58eaf077ff748c3604aa7520a956b03cdce6995 | Add libevent_home command line parameter | site_scons/community/command_line.py | site_scons/community/command_line.py | from SCons.Script import *
## Command Line Variables
#
# Setup all of the command line variables across all of the products and
# platforms. NOTE: if a path is configurable and will be created in the
# build process then the validation MUST be PathAccept
def get_command_line_opts( host, products, VERSIONS ):
opts = Variables('omama.conf')
opts.format = '\n%s: %s\n Default: %s [ %s ]\n'
opts.AddVariables(
# Must be #install by default, otherwise when it comes to cleaning the
# install folder, can remove whole tree
PathVariable('prefix', 'Installation prefix', '#openmama_install_%s' % (VERSIONS['mama']['releaseString']),
PathVariable.PathAccept),
PathVariable('blddir', 'Object directory', '#objdir',
PathVariable.PathAccept),
PathVariable('java_home', 'JAVA Home folder', os.environ.get('JAVA_HOME',None) , PathVariable.PathAccept),
PathVariable('logfile', 'Output Log File', 'scons.log', PathVariable.PathAccept),
BoolVariable('verbose','Whether to print verbose output',True),
BoolVariable('package','Whether to tar up the installation directory',False),
BoolVariable('with_docs','Build with documentation',False),
BoolVariable('with_unittest','Build with gunit tests',False),
BoolVariable('with_testtools','Build with test tools',False),
BoolVariable('with_examples','Build with test tools',True),
BoolVariable('entitled','Whether the build is entitled or unentitled',False),
PathVariable('gtest_home','Path to Google Test home',None, PathVariable.PathIsDir),
ListVariable('middleware','Middleware(s) to be compiled in', 'avis', names = ['avis', 'qpid'] ),
)
if host['os'] == 'Windows':
opts.AddVariables(
ListVariable( 'buildtype', 'Windows Build type e.g dynamic', 'all', names = ['dynamic','dynamic-debug','static','static-debug'] ),
PathVariable('avis_home', 'Path to Avis',
'c:\\avis', PathVariable.PathAccept),
PathVariable('qpid_home', 'Path to QPID Proton Libraries',
'c:\\proton', PathVariable.PathAccept),
EnumVariable('vsver','Visual Studio Version to use', '10.0',
allowed_values=('8.0','9.0','10.0')),
EnumVariable('product', 'Product to be built', 'mamda',
allowed_values=( products )),
EnumVariable('dotnet_version', 'Dotnet Version used to determine framework directory', '2.0',
allowed_values=('1.0','2.0', '4.0')),
PathVariable('dotnet_framework', 'Path to desired dotnet framework', None,
PathVariable.PathIsDir),
PathVariable('libevent_home', 'Path to libevent Libraries',
'c:\\libevent', PathVariable.PathAccept),
)
if host['os'] == 'Linux':
opts.AddVariables(
PathVariable('avis_home','Path to Avis', '/usr/local/', PathVariable.PathIsDir),
PathVariable('qpid_home','Path to QPID Proton Libraries',
'/usr/local/', PathVariable.PathIsDir),
PathVariable('cache_dir','Path to object cache', None, PathVariable.PathIsDir),
EnumVariable('product', 'Product to be built', 'mamda',
#mamda all is a windows only build
allowed_values=( [ x for x in products if x != "mamdaall" ] )),
)
return opts
| from SCons.Script import *
## Command Line Variables
#
# Setup all of the command line variables across all of the products and
# platforms. NOTE: if a path is configurable and will be created in the
# build process then the validation MUST be PathAccept
def get_command_line_opts( host, products, VERSIONS ):
opts = Variables('omama.conf')
opts.format = '\n%s: %s\n Default: %s [ %s ]\n'
opts.AddVariables(
# Must be #install by default, otherwise when it comes to cleaning the
# install folder, can remove whole tree
PathVariable('prefix', 'Installation prefix', '#openmama_install_%s' % (VERSIONS['mama']['releaseString']),
PathVariable.PathAccept),
PathVariable('blddir', 'Object directory', '#objdir',
PathVariable.PathAccept),
PathVariable('java_home', 'JAVA Home folder', os.environ.get('JAVA_HOME',None) , PathVariable.PathAccept),
PathVariable('logfile', 'Output Log File', 'scons.log', PathVariable.PathAccept),
BoolVariable('verbose','Whether to print verbose output',True),
BoolVariable('package','Whether to tar up the installation directory',False),
BoolVariable('with_docs','Build with documentation',False),
BoolVariable('with_unittest','Build with gunit tests',False),
BoolVariable('with_testtools','Build with test tools',False),
BoolVariable('with_examples','Build with test tools',True),
BoolVariable('entitled','Whether the build is entitled or unentitled',False),
PathVariable('gtest_home','Path to Google Test home',None, PathVariable.PathIsDir),
ListVariable('middleware','Middleware(s) to be compiled in', 'avis', names = ['avis', 'qpid'] ),
)
if host['os'] == 'Windows':
opts.AddVariables(
ListVariable( 'buildtype', 'Windows Build type e.g dynamic', 'all', names = ['dynamic','dynamic-debug','static','static-debug'] ),
PathVariable('avis_home', 'Path to Avis',
'c:\\avis', PathVariable.PathAccept),
PathVariable('qpid_home', 'Path to QPID Proton Libraries',
'c:\\proton', PathVariable.PathAccept),
EnumVariable('vsver','Visual Studio Version to use', '10.0',
allowed_values=('8.0','9.0','10.0')),
EnumVariable('product', 'Product to be built', 'mamda',
allowed_values=( products )),
EnumVariable('dotnet_version', 'Dotnet Version used to determine framework directory', '2.0',
allowed_values=('1.0','2.0', '4.0')),
PathVariable('dotnet_framework', 'Path to desired dotnet framework', None,
PathVariable.PathIsDir),
)
if host['os'] == 'Linux':
opts.AddVariables(
PathVariable('avis_home','Path to Avis', '/usr/local/', PathVariable.PathIsDir),
PathVariable('qpid_home','Path to QPID Proton Libraries',
'/usr/local/', PathVariable.PathIsDir),
PathVariable('cache_dir','Path to object cache', None, PathVariable.PathIsDir),
EnumVariable('product', 'Product to be built', 'mamda',
#mamda all is a windows only build
allowed_values=( [ x for x in products if x != "mamdaall" ] )),
)
return opts
| Python | 0.000001 |
168c80e3bf024f74fbb49184ceffbc2a09abe6c1 | Allow empty labels | kk/models/hearing.py | kk/models/hearing.py |
from django.conf import settings
from django.db import models
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from .base import ModifiableModel
class Label(ModifiableModel):
label = models.CharField(verbose_name=_('Label'), default='', max_length=200)
def __str__(self):
return self.label
class Hearing(ModifiableModel):
COMMENT_OPTION_DISALLOW = '1'
COMMENT_OPTION_REGISTERED = '2'
COMMENT_OPTION_ANONYMOUS = '3'
COMMENT_OPTION = (
(COMMENT_OPTION_DISALLOW, 'Disallow'),
(COMMENT_OPTION_REGISTERED, 'Registered'),
(COMMENT_OPTION_ANONYMOUS, 'Anonymous')
)
close_at = models.DateTimeField(verbose_name=_('Closing time'), default=timezone.now)
n_comments = models.IntegerField(verbose_name=_('Number of comments'), blank=True, default=0)
closed = models.BooleanField(verbose_name=_('Whether hearing is closed'), default=False)
heading = models.TextField(verbose_name=_('Heading'), blank=True, default='')
abstract = models.TextField(verbose_name=_('Abstract'), blank=True, default='')
heading = models.TextField(verbose_name=_('Content'), blank=True, default='')
borough = models.CharField(verbose_name=_('Borough to which hearing concerns'), blank=True, default='', max_length=200)
comment_option = models.CharField(verbose_name=_('Commenting option'), max_length=1, choices=COMMENT_OPTION, default='1')
servicemap_url = models.CharField(verbose_name=_('Servicemap url'), default='', max_length=255, blank=True)
latitude = models.CharField(verbose_name=_('Latitude'), max_length=20, default='', blank=True)
longitude = models.CharField(verbose_name=_('Longitude'), max_length=20, default='', blank=True)
labels = models.ManyToManyField(Label, blank=True)
|
from django.conf import settings
from django.db import models
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from .base import ModifiableModel
class Label(ModifiableModel):
label = models.CharField(verbose_name=_('Label'), default='', max_length=200)
def __str__(self):
return self.label
class Hearing(ModifiableModel):
COMMENT_OPTION_DISALLOW = '1'
COMMENT_OPTION_REGISTERED = '2'
COMMENT_OPTION_ANONYMOUS = '3'
COMMENT_OPTION = (
(COMMENT_OPTION_DISALLOW, 'Disallow'),
(COMMENT_OPTION_REGISTERED, 'Registered'),
(COMMENT_OPTION_ANONYMOUS, 'Anonymous')
)
close_at = models.DateTimeField(verbose_name=_('Closing time'), default=timezone.now)
n_comments = models.IntegerField(verbose_name=_('Number of comments'), blank=True, default=0)
closed = models.BooleanField(verbose_name=_('Whether hearing is closed'), default=False)
heading = models.TextField(verbose_name=_('Heading'), blank=True, default='')
abstract = models.TextField(verbose_name=_('Abstract'), blank=True, default='')
heading = models.TextField(verbose_name=_('Content'), blank=True, default='')
borough = models.CharField(verbose_name=_('Borough to which hearing concerns'), blank=True, default='', max_length=200)
comment_option = models.CharField(verbose_name=_('Commenting option'), max_length=1, choices=COMMENT_OPTION, default='1')
servicemap_url = models.CharField(verbose_name=_('Servicemap url'), default='', max_length=255, blank=True)
latitude = models.CharField(verbose_name=_('Latitude'), max_length=20, default='', blank=True)
longitude = models.CharField(verbose_name=_('Longitude'), max_length=20, default='', blank=True)
labels = models.ManyToManyField(Label)
| Python | 0.998839 |
d58b82997d9e5d616da2f517c19c5191c43cd823 | make membship optional, on which we revert to matching_dissim; speed improvement | kmodes/util/dissim.py | kmodes/util/dissim.py | """
Dissimilarity measures for clustering
"""
import numpy as np
def matching_dissim(a, b, **_):
"""Simple matching dissimilarity function"""
return np.sum(a != b, axis=1)
def euclidean_dissim(a, b, **_):
"""Euclidean distance dissimilarity function"""
if np.isnan(a).any() or np.isnan(b).any():
raise ValueError("Missing values detected in numerical columns.")
return np.sum((a - b) ** 2, axis=1)
def ng_dissim(a, b, X, membship=None):
"""Ng et al.'s dissimilarity measure, as presented in
Michael K. Ng, Mark Junjie Li, Joshua Zhexue Huang, and Zengyou He, "On the
Impact of Dissimilarity Measure in k-Modes Clustering Algorithm", IEEE
Transactions on Pattern Analysis and Machine Intelligence, Vol. 29, No. 3,
January, 2007
Note that membship must be a rectangular array such that the
len(membship) = len(a) and len(membship[i]) = X.shape[1]
In case of missing membship, this function reverts back to
matching dissimilarity.
"""
# Without membership, revert to matching dissimilarity
if membship is None:
return matching_dissim(a, b)
def calc_cjr(b, X, memj, idr):
"""Num objects w/ category value x_{i,r} for rth attr in jth cluster"""
xcids = np.where(memj == 1)
return float((np.take(X, xcids, axis=0)[0][:, idr] == b[idr]).sum(0))
def calc_dissim(b, X, memj, idr):
# Size of jth cluster
cj = float(np.sum(memj))
return (1.0 - (calc_cjr(b, X, memj, idr) / cj)) if cj != 0.0 else 0.0
if len(membship) != a.shape[0] and len(membship[0]) != X.shape[1]:
raise ValueError("'membship' must be a rectangular array where "
"the number of rows in 'membship' equals the "
"number of rows in 'a' and the number of "
"columns in 'membship' equals the number of rows in 'X'.")
return np.array([np.array([calc_dissim(b, X, membship[idj], idr)
if b[idr] == t else 1.0
for idr, t in enumerate(val_a)]).sum(0)
for idj, val_a in enumerate(a)])
| """
Dissimilarity measures for clustering
"""
import numpy as np
def matching_dissim(a, b, **_):
"""Simple matching dissimilarity function"""
return np.sum(a != b, axis=1)
def euclidean_dissim(a, b, **_):
"""Euclidean distance dissimilarity function"""
if np.isnan(a).any() or np.isnan(b).any():
raise ValueError("Missing values detected in numerical columns.")
return np.sum((a - b) ** 2, axis=1)
def ng_dissim(a, b, X, membship):
"""Ng et al.'s dissimilarity measure, as presented in
Michael K. Ng, Mark Junjie Li, Joshua Zhexue Huang, and Zengyou He, "On the
Impact of Dissimilarity Measure in k-Modes Clustering Algorithm", IEEE
Transactions on Pattern Analysis and Machine Intelligence, Vol. 29, No. 3,
January, 2007
Note that membship must be a rectangular array such that the
len(membship) = len(a) and len(membship[i]) = X.shape[1]
"""
def calcCJR(b, X, memj, idr):
"""Num objects w/ category value x_{i,r} for rth attr in jth cluster"""
xcids = np.where(np.in1d(memj.ravel(), [1]).reshape(memj.shape))
return float((np.take(X, xcids, axis=0)[0][:, idr] == b[idr]).sum(0))
def calc_dissim(b, X, memj, idr):
# Size of jth cluster
CJ = float(np.sum(memj))
return (1.0 - (calcCJR(b, X, memj, idr) / CJ)) if CJ != 0.0 else 0.0
if len(membship) != a.shape[0] and len(membship[0]) != X.shape[1]:
raise ValueError("'membship' must be a rectangular array where "
"the number of rows in 'membship' equals the "
"number of rows in 'a' and the number of "
"columns in 'membship' equals the number of rows in 'X'.")
return np.array([np.array([calc_dissim(b, X, membship[idj], idr)
if b[idr] == t else 1.0
for idr, t in enumerate(val_a)]).sum(0)
for idj, val_a in enumerate(a)])
| Python | 0 |
1b972c4ab088fd6566dd144992167f4a4ae62356 | rebuild LevelRenderData after saving changed_geometries | src/c3nav/mapdata/models/update.py | src/c3nav/mapdata/models/update.py | from contextlib import contextmanager
from django.conf import settings
from django.core.cache import cache
from django.db import models, transaction
from django.utils.http import int_to_base36
from django.utils.timezone import make_naive
from django.utils.translation import ugettext_lazy as _
from c3nav.mapdata.tasks import delete_old_cached_tiles
class MapUpdate(models.Model):
"""
A map update. created whenever mapdata is changed.
"""
datetime = models.DateTimeField(auto_now_add=True, db_index=True)
user = models.ForeignKey(settings.AUTH_USER_MODEL, null=True, on_delete=models.PROTECT)
type = models.CharField(max_length=32)
class Meta:
verbose_name = _('Map update')
verbose_name_plural = _('Map updates')
default_related_name = 'mapupdates'
get_latest_by = 'datetime'
@classmethod
def last_update(cls):
last_update = cache.get('mapdata:last_update', None)
if last_update is not None:
return last_update
with cls.lock():
last_update = cls.objects.latest()
cache.set('mapdata:last_update', (last_update.pk, last_update.datetime), 900)
return last_update.pk, last_update.datetime
@property
def cache_key(self):
return int_to_base36(self.pk)+'_'+int_to_base36(int(make_naive(self.datetime).timestamp()))
@classmethod
def current_cache_key(cls):
pk, dt = cls.last_update()
return int_to_base36(pk)+'_'+int_to_base36(int(make_naive(dt).timestamp()))
@classmethod
@contextmanager
def lock(cls):
with transaction.atomic():
yield cls.objects.select_for_update().earliest()
def save(self, **kwargs):
if self.pk is not None:
raise TypeError
old_cache_key = MapUpdate.current_cache_key()
from c3nav.mapdata.models import AltitudeArea
AltitudeArea.recalculate()
super().save(**kwargs)
from c3nav.mapdata.cache import changed_geometries
changed_geometries.save(old_cache_key, self.cache_key)
from c3nav.mapdata.render.base import LevelRenderData
LevelRenderData.rebuild()
cache.set('mapdata:last_update', (self.pk, self.datetime), 900)
delete_old_cached_tiles.apply_async(countdown=5)
| from contextlib import contextmanager
from django.conf import settings
from django.core.cache import cache
from django.db import models, transaction
from django.utils.http import int_to_base36
from django.utils.timezone import make_naive
from django.utils.translation import ugettext_lazy as _
from c3nav.mapdata.tasks import delete_old_cached_tiles
class MapUpdate(models.Model):
"""
A map update. created whenever mapdata is changed.
"""
datetime = models.DateTimeField(auto_now_add=True, db_index=True)
user = models.ForeignKey(settings.AUTH_USER_MODEL, null=True, on_delete=models.PROTECT)
type = models.CharField(max_length=32)
class Meta:
verbose_name = _('Map update')
verbose_name_plural = _('Map updates')
default_related_name = 'mapupdates'
get_latest_by = 'datetime'
@classmethod
def last_update(cls):
last_update = cache.get('mapdata:last_update', None)
if last_update is not None:
return last_update
with cls.lock():
last_update = cls.objects.latest()
cache.set('mapdata:last_update', (last_update.pk, last_update.datetime), 900)
return last_update.pk, last_update.datetime
@property
def cache_key(self):
return int_to_base36(self.pk)+'_'+int_to_base36(int(make_naive(self.datetime).timestamp()))
@classmethod
def current_cache_key(cls):
pk, dt = cls.last_update()
return int_to_base36(pk)+'_'+int_to_base36(int(make_naive(dt).timestamp()))
@classmethod
@contextmanager
def lock(cls):
with transaction.atomic():
yield cls.objects.select_for_update().earliest()
def save(self, **kwargs):
if self.pk is not None:
raise TypeError
old_cache_key = MapUpdate.current_cache_key()
from c3nav.mapdata.models import AltitudeArea
AltitudeArea.recalculate()
from c3nav.mapdata.render.base import LevelRenderData
LevelRenderData.rebuild()
super().save(**kwargs)
from c3nav.mapdata.cache import changed_geometries
changed_geometries.save(old_cache_key, self.cache_key)
cache.set('mapdata:last_update', (self.pk, self.datetime), 900)
delete_old_cached_tiles.apply_async(countdown=5)
| Python | 0 |
dc300cf24651036e93e94f8c40f00f1126da4a85 | fix 404 when user want to verify payments | lms/djangoapps/commerce/views.py | lms/djangoapps/commerce/views.py | """ Commerce views. """
import logging
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.views.decorators.csrf import csrf_exempt
from edxmako.shortcuts import render_to_response
from microsite_configuration import microsite
from lms.djangoapps.verify_student.models import SoftwareSecurePhotoVerification
from shoppingcart.processors.CyberSource2 import is_user_payment_error
from django.utils.translation import ugettext as _
log = logging.getLogger(__name__)
@csrf_exempt
def checkout_cancel(_request):
""" Checkout/payment cancellation view. """
context = {'payment_support_email': microsite.get_value('payment_support_email', settings.PAYMENT_SUPPORT_EMAIL)}
return render_to_response("commerce/checkout_cancel.html", context)
@csrf_exempt
def checkout_error(_request):
""" Checkout/payment error view. """
context = {'payment_support_email': microsite.get_value('payment_support_email', settings.PAYMENT_SUPPORT_EMAIL)}
return render_to_response("commerce/checkout_error.html", context)
@csrf_exempt
@login_required
def checkout_receipt(request):
""" Receipt view. """
page_title = _('Receipt')
is_payment_complete = True
payment_support_email = microsite.get_value('payment_support_email', settings.PAYMENT_SUPPORT_EMAIL)
payment_support_link = '<a href=\"mailto:{email}\">{email}</a>'.format(email=payment_support_email)
is_cybersource = all(k in request.POST for k in ('signed_field_names', 'decision', 'reason_code'))
if is_cybersource and request.POST['decision'] != 'ACCEPT':
# Cybersource may redirect users to this view if it couldn't recover
# from an error while capturing payment info.
is_payment_complete = False
page_title = _('Payment Failed')
reason_code = request.POST['reason_code']
# if the problem was with the info submitted by the user, we present more detailed messages.
if is_user_payment_error(reason_code):
error_summary = _("There was a problem with this transaction. You have not been charged.")
error_text = _(
"Make sure your information is correct, or try again with a different card or another form of payment."
)
else:
error_summary = _("A system error occurred while processing your payment. You have not been charged.")
error_text = _("Please wait a few minutes and then try again.")
for_help_text = _("For help, contact {payment_support_link}.").format(payment_support_link=payment_support_link)
else:
# if anything goes wrong rendering the receipt, it indicates a problem fetching order data.
error_summary = _("An error occurred while creating your receipt.")
error_text = None # nothing particularly helpful to say if this happens.
for_help_text = _(
"If your course does not appear on your dashboard, contact {payment_support_link}."
).format(payment_support_link=payment_support_link)
context = {
'page_title': page_title,
'is_payment_complete': is_payment_complete,
'platform_name': microsite.get_value('platform_name', settings.PLATFORM_NAME),
'verified': SoftwareSecurePhotoVerification.verification_valid_or_pending(request.user).exists(),
'error_summary': error_summary,
'error_text': error_text,
'for_help_text': for_help_text,
'payment_support_email': payment_support_email,
'username': request.user.username,
'nav_hidden': True,
'is_request_in_themed_site': True,
'subscription_course_key': settings.SUBSCRIPTION_COURSE_KEY
}
return render_to_response('commerce/checkout_receipt.html', context)
| """ Commerce views. """
import logging
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.views.decorators.csrf import csrf_exempt
from edxmako.shortcuts import render_to_response
from microsite_configuration import microsite
from lms.djangoapps.verify_student.models import SoftwareSecurePhotoVerification
from shoppingcart.processors.CyberSource2 import is_user_payment_error
from django.utils.translation import ugettext as _
from openedx.core.djangoapps.theming.helpers import is_request_in_themed_site
log = logging.getLogger(__name__)
@csrf_exempt
def checkout_cancel(_request):
""" Checkout/payment cancellation view. """
context = {'payment_support_email': microsite.get_value('payment_support_email', settings.PAYMENT_SUPPORT_EMAIL)}
return render_to_response("commerce/checkout_cancel.html", context)
@csrf_exempt
def checkout_error(_request):
""" Checkout/payment error view. """
context = {'payment_support_email': microsite.get_value('payment_support_email', settings.PAYMENT_SUPPORT_EMAIL)}
return render_to_response("commerce/checkout_error.html", context)
@csrf_exempt
@login_required
def checkout_receipt(request):
""" Receipt view. """
page_title = _('Receipt')
is_payment_complete = True
payment_support_email = microsite.get_value('payment_support_email', settings.PAYMENT_SUPPORT_EMAIL)
payment_support_link = '<a href=\"mailto:{email}\">{email}</a>'.format(email=payment_support_email)
is_cybersource = all(k in request.POST for k in ('signed_field_names', 'decision', 'reason_code'))
if is_cybersource and request.POST['decision'] != 'ACCEPT':
# Cybersource may redirect users to this view if it couldn't recover
# from an error while capturing payment info.
is_payment_complete = False
page_title = _('Payment Failed')
reason_code = request.POST['reason_code']
# if the problem was with the info submitted by the user, we present more detailed messages.
if is_user_payment_error(reason_code):
error_summary = _("There was a problem with this transaction. You have not been charged.")
error_text = _(
"Make sure your information is correct, or try again with a different card or another form of payment."
)
else:
error_summary = _("A system error occurred while processing your payment. You have not been charged.")
error_text = _("Please wait a few minutes and then try again.")
for_help_text = _("For help, contact {payment_support_link}.").format(payment_support_link=payment_support_link)
else:
# if anything goes wrong rendering the receipt, it indicates a problem fetching order data.
error_summary = _("An error occurred while creating your receipt.")
error_text = None # nothing particularly helpful to say if this happens.
for_help_text = _(
"If your course does not appear on your dashboard, contact {payment_support_link}."
).format(payment_support_link=payment_support_link)
context = {
'page_title': page_title,
'is_payment_complete': is_payment_complete,
'platform_name': microsite.get_value('platform_name', settings.PLATFORM_NAME),
'verified': SoftwareSecurePhotoVerification.verification_valid_or_pending(request.user).exists(),
'error_summary': error_summary,
'error_text': error_text,
'for_help_text': for_help_text,
'payment_support_email': payment_support_email,
'username': request.user.username,
'nav_hidden': True,
'is_request_in_themed_site': is_request_in_themed_site(),
'subscription_course_key': settings.SUBSCRIPTION_COURSE_KEY
}
return render_to_response('commerce/checkout_receipt.html', context)
| Python | 0 |
73b67a30495e7a6d638421ba8b9544a5e2dc4185 | Fix task full resource | zou/app/resources/project/task_full.py | zou/app/resources/project/task_full.py | from flask import abort
from flask_login import login_required
from zou.app.models.task import Task
from zou.app.models.project import Project
from zou.app.models.person import Person
from zou.app.models.entity import Entity
from zou.app.models.entity_type import EntityType
from zou.app.models.task_status import TaskStatus
from zou.app.models.task_type import TaskType
from zou.app.resources.data.base import BaseModelResource
from zou.app.project import task_info
from zou.app.project.exception import TaskNotFoundException
class TaskFullResource(BaseModelResource):
def __init__(self):
BaseModelResource.__init__(self, Task)
@login_required
def get(self, instance_id):
try:
task = task_info.get_task(instance_id)
except TaskNotFoundException:
abort(404)
result = task.serialize()
task_type = TaskType.get(task.task_type_id)
result["task_type"] = task_type.serialize()
assigner = Person.get(task.assigner_id)
result["assigner"] = assigner.serialize()
project = Project.get(task.project_id)
result["project"] = project.serialize()
task_status = TaskStatus.get(task.task_status_id)
result["task_status"] = task_status.serialize()
entity = Entity.get(task.entity_id)
result["entity"] = entity.serialize()
if entity.parent_id is not None:
parent = Entity.get(entity.parent_id)
result["entity_parent"] = parent.serialize()
entity_type = EntityType.get(entity.entity_type_id)
result["entity_type"] = entity_type.serialize()
assignees = []
for assignee in task.assignees:
assignees.append(assignee.serialize())
result["persons"] = assignees
return result, 200
| from flask import abort
from flask_login import login_required
from zou.app.models.task import Task
from zou.app.models.project import Project
from zou.app.models.person import Person
from zou.app.models.entity import Entity
from zou.app.models.entity_type import EntityType
from zou.app.models.task_status import TaskStatus
from zou.app.models.task_type import TaskType
from zou.app.resources.data.base import BaseModelResource
from zou.app.project import task_info
from zou.app.project.exception import TaskNotFoundException
class TaskFullResource(BaseModelResource):
def __init__(self):
BaseModelResource.__init__(self, Task)
@login_required
def get(self, instance_id):
try:
task = task_info.get_task(instance_id)
except TaskNotFoundException:
abort(404)
result = task.serialize()
task_type = TaskType.get(task.task_type_id)
result["task_type"] = task_type.serialize()
assigner = Person.get(task.assigner_id)
result["assigner"] = assigner.serialize()
project = Project.get(task.project_id)
result["project"] = project.serialize()
task_status = TaskStatus.get(task.task_status_id)
result["task_status"] = task_status.serialize()
entity = Entity.get(task.entity_id)
result["entity"] = entity.serialize()
parent = Entity.get(entity.parent_id)
if parent is not None:
result["entity_parent"] = parent.serialize()
entity_type = EntityType.get(entity.entity_type_id)
result["entity_type"] = entity_type.serialize()
assignees = []
for assignee in task.assignees:
assignees.append(assignee.serialize())
result["persons"] = assignees
return result, 200
| Python | 0.000029 |
d4563fe6991ee644350528a469884f697f02308d | Add production of very high S/N model images | models/make_images.py | models/make_images.py | #!/usr/bin/env python
from glob import glob
import pyfits
import sys, os
import numpy
shape = (100,100)
bands = ['u', 'g', 'r', 'i', 'z', 'Y', 'J', 'H', 'K']
zp = numpy.array([16.75,15.957,15.0,14.563,14.259,14.162,13.955,13.636,13.525])
def make_images(model='A', noiselevel=5,
bandsel=['u', 'g', 'r', 'i', 'z', 'Y', 'J', 'H', 'K']):
noisebands = 10**(-0.4*(zp-15.0)) * noiselevel/2.0
noise = []
for n in noisebands:
noise.append(numpy.random.normal(0.0, n, shape))
gals = glob('model%s.galfit'%model)
for g in gals:
os.system('nice galfit %s > %s.out'%(g,g))
imgname = g.replace('.galfit', '')
img = pyfits.open(imgname+'.fits')
for j, b in enumerate(bands):
if b in bandsel:
ext = img['MODEL_'+b]
print g, b, j, ext.name, noisebands[j]
ext.data += noise[j]
pyfits.writeto(imgname+'_%i%s_n%i.fits'%(j+1, b, noiselevel), ext.data, clobber=True)
if __name__ =='__main__':
make_images('A', 0.01)
make_images('A', 5)
make_images('A', 50, ['H'])
make_images('B', 0.01)
make_images('B', 5)
make_images('B', 50, ['H'])
for x in 'abcdefghi':
make_images('C'+x, 0, ['r'])
make_images('C'+x, 5, ['r'])
make_images('D', 0.01)
make_images('D', 5)
make_images('E', 0.01)
make_images('E', 5)
| #!/usr/bin/env python
from glob import glob
import pyfits
import sys, os
import numpy
shape = (100,100)
bands = ['u', 'g', 'r', 'i', 'z', 'Y', 'J', 'H', 'K']
zp = numpy.array([16.75,15.957,15.0,14.563,14.259,14.162,13.955,13.636,13.525])
def make_images(model='A', noiselevel=5,
bandsel=['u', 'g', 'r', 'i', 'z', 'Y', 'J', 'H', 'K']):
noisebands = 10**(-0.4*(zp-15.0)) * noiselevel/2.0
noise = []
for n in noisebands:
noise.append(numpy.random.normal(0.0, n, shape))
gals = glob('model%s.galfit'%model)
for g in gals:
os.system('nice galfit %s > %s.out'%(g,g))
imgname = g.replace('.galfit', '')
img = pyfits.open(imgname+'.fits')
for j, b in enumerate(bands):
if b in bandsel:
ext = img['MODEL_'+b]
print g, b, j, ext.name, noisebands[j]
ext.data += noise[j]
pyfits.writeto(imgname+'_%i%s_n%i.fits'%(j+1, b, noiselevel), ext.data, clobber=True)
if __name__ =='__main__':
make_images('A', 5)
make_images('A', 50, ['H'])
make_images('B', 5)
make_images('B', 50, ['H'])
for x in 'abcdefghi':
make_images('C'+x, 5, ['r'])
make_images('D', 5)
make_images('E', 5)
| Python | 0 |
2417f7e3c445c7f369c9eb8cb48c83ebb4c2e43d | Change blueprints to be more container-like. | kyokai/blueprints.py | kyokai/blueprints.py | """
Kyōkai blueprints are simply groups of routes.
They're a simpler way of grouping your routes together instead of having to import your app object manually all of
the time.
"""
from kyokai.route import Route
class Blueprint(object):
"""
A Blueprint is a container for routes.
"""
def __init__(self, name: str, parent: 'Blueprint',
url_prefix: str=""):
self._prefix = url_prefix
self._name = name
self.routes = []
self.errhandlers = {}
self._parent = parent
@property
def parent(self) -> 'Blueprint':
"""
Returns the parent Blueprint of the currentl Blueprint.
"""
return self._parent
def route(self, regex, methods: list = None, hard_match: bool = False):
"""
Create an incoming route for a function.
Parameters:
regex:
The regular expression to match the path to.
In standard Python `re` forme.
Group matches are automatically extracted from the regex, and passed as arguments.
methods:
The list of allowed methods, e.g ["GET", "POST"].
You can check the method with `request.method`.
hard_match:
Should we match based on equality, rather than regex?
This prevents index or lower level paths from matching 404s at higher levels.
"""
if not methods:
methods = ["GET"]
# Override hard match if it's a `/` route.
if regex == "/":
hard_match = True
regex = self._prefix + regex
r = Route(self, regex, methods, hard_match)
self.routes.append(r)
return r
def errorhandler(self, code: int):
"""
Create an error handler for the specified code.
This will wrap the function in a Route.
"""
r = Route(self, "", [])
self.errhandlers[code] = r
return r
| """
Kyōkai are simply groups of routes.
They're a simpler way of grouping your routes together instead of having to import your app object manually all of
the time.
"""
from kyokai.route import Route
class Blueprint(object):
"""
A Blueprint contains one public method: `bp.route`. It acts exactly the same as a normal route method.
If you set a `url_prefix` in the constructor, this prefix will be added onto your routes.
"""
def __init__(self, name: str, url_prefix: str=""):
self._prefix = url_prefix
self._name = name
self.routes = []
self.errhandlers = {}
def _bp_get_errhandler(self, code: int):
return self.errhandlers.get(code)
def route(self, regex, methods: list = None, hard_match: bool = False):
"""
Create an incoming route for a function.
Parameters:
regex:
The regular expression to match the path to.
In standard Python `re` forme.
Group matches are automatically extracted from the regex, and passed as arguments.
methods:
The list of allowed methods, e.g ["GET", "POST"].
You can check the method with `request.method`.
hard_match:
Should we match based on equality, rather than regex?
This prevents index or lower level paths from matching 404s at higher levels.
"""
if not methods:
methods = ["GET"]
# Override hard match if it's a `/` route.
if regex == "/":
hard_match = True
regex = self._prefix + regex
r = Route(regex, methods, hard_match)
r.set_errorhandler_factory(self._bp_get_errhandler)
self.routes.append(r)
return r
def errorhandler(self, code: int):
"""
Create an error handler for the specified code.
This will wrap the function in a Route.
"""
r = Route("", [])
self.errhandlers[code] = r
return r
def _init_bp(self):
return self.routes | Python | 0 |
a43ada7785db136f3a5d7d96c6b64b0a686d052e | fix total_force missing | labs/lab2/analyze.py | labs/lab2/analyze.py | #!/usr/bin/env python
import re
import sys
import csv
import argparse
# This defines the patterns for extracting relevant data from the output
# files.
patterns = {
"energy": re.compile("total energy\s+=\s+([\d\.\-]+)\sRy"),
"ecut": re.compile("kinetic\-energy cutoff\s+=\s+([\d\.\-]+)\s+Ry"),
"alat": re.compile("celldm\(1\)=\s+([\d\.]+)\s"),
"nkpts": re.compile("number of k points=\s+([\d]+)"),
"total_force": re.compile("Total force =\s+([\d\.]+)")
}
def get_results(filename):
data = {}
with open(filename) as f:
for l in f:
for k, p in patterns.items():
m = p.search(l)
if m:
data[k] = float(m.group(1))
continue
return data
def analyze(filenames):
fieldnames = ['filename', 'ecut', 'nkpts', 'alat', 'energy','total_force']
with open('results.csv', 'w') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
for f in filenames:
r = get_results(f)
r["filename"] = f
writer.writerow(r)
print("Results written to results.csv!")
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='''Tool for analysis of PWSCF calculations.''')
parser.add_argument(
'filenames', metavar='filenames', type=str, nargs="+",
help='Files to process. You may use wildcards, e.g., "python analyze.py *.out".')
args = parser.parse_args()
analyze(args.filenames)
| #!/usr/bin/env python
import re
import sys
import csv
import argparse
# This defines the patterns for extracting relevant data from the output
# files.
patterns = {
"energy": re.compile("total energy\s+=\s+([\d\.\-]+)\sRy"),
"ecut": re.compile("kinetic\-energy cutoff\s+=\s+([\d\.\-]+)\s+Ry"),
"alat": re.compile("celldm\(1\)=\s+([\d\.]+)\s"),
"nkpts": re.compile("number of k points=\s+([\d]+)"),
"total_force": re.compile("Total force =\s+([\d\.]+)")
}
def get_results(filename):
data = {}
with open(filename) as f:
for l in f:
for k, p in patterns.items():
m = p.search(l)
if m:
data[k] = float(m.group(1))
continue
return data
def analyze(filenames):
fieldnames = ['filename', 'ecut', 'nkpts', 'alat', 'energy']
with open('results.csv', 'w') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
for f in filenames:
r = get_results(f)
r["filename"] = f
writer.writerow(r)
print("Results written to results.csv!")
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='''Tool for analysis of PWSCF calculations.''')
parser.add_argument(
'filenames', metavar='filenames', type=str, nargs="+",
help='Files to process. You may use wildcards, e.g., "python analyze.py *.out".')
args = parser.parse_args()
analyze(args.filenames)
| Python | 0.000053 |
0ce8050b797b3e2c2a9b0e74cbc67fd8e31736b3 | Remove working distros to focus on non-working ones | fog-aws-testing/scripts/settings.py | fog-aws-testing/scripts/settings.py | # The list of OSs.
#OSs = ["debian9","centos7","rhel7","fedora29","arch","ubuntu18_04"]
OSs = ["rhel7","fedora29","arch","ubuntu18_04"]
#dnsAddresses = ["debian9.fogtesting.cloud","centos7.fogtesting.cloud","rhel7.fogtesting.cloud","fedora29.fogtesting.cloud","arch.fogtesting.cloud","ubuntu18_04.fogtesting.cloud"]
dnsAddresses = ["rhel7.fogtesting.cloud","fedora29.fogtesting.cloud","arch.fogtesting.cloud","ubuntu18_04.fogtesting.cloud"]
# The list of branches to process.
branches = ["master"]
branches = ["master","dev-branch"]
# The region we operate in, dictated by terraform.
theRegion = "us-east-2"
# The availibility zone, which we use just one zone.
zone = theRegion + 'a'
# For when we need to wait for something to get done while in a loop, wait this long.
wait = 1
scriptDir = "/home/admin/fog-community-scripts/fog-aws-testing/scripts"
webdir = '/tmp/webdir'
statusDir = '/tmp/statuses'
indexHtml = 'index.html'
green = "green.png"
orange = "orange.png"
red = "red.png"
s3bucket = "fogtesting2.theworkmans.us"
http = "http://"
port = ""
netdir = ""
remoteResult = "/root/result"
ssh = "/usr/bin/ssh"
scp = "/usr/bin/scp"
timeout = "/usr/bin/timeout"
s3cmd = "/usr/bin/s3cmd"
ssh_keyscan = "/usr/bin/ssh-keyscan"
sshTimeout = "15"
fogTimeout= "15m" #Time to wait for FOG installation to complete. Must end with a unit of time. s for seconds, m for minutes.
sshTime="15s" #Time to wait for small SSH commands to complete. Must end with a unit of time. s for seconds, m for minutes.
codes = {
"-1":{
"reason":"Installer did not complete within alotted time.",
"status":orange
},
"0":{
"reason":"Success.",
"status":green
},
"1":{
"reason":"Failed to call script properly.",
"status":orange
},
"2":{
"reason":"Failed to reset git.",
"status":orange
},
"3":{
"reason":"Failed to pull git.",
"status":orange
},
"4":{
"reason":"Failed to checkout git.",
"status":orange
},
"5":{
"reason":"Failed to change directory.",
"status":orange
},
"6":{
"reason":"Installation failed.",
"status":red
}
}
| # The list of OSs.
OSs = ["debian9","centos7","rhel7","fedora29","arch","ubuntu18_04"]
dnsAddresses = ["debian9.fogtesting.cloud","centos7.fogtesting.cloud","rhel7.fogtesting.cloud","fedora29.fogtesting.cloud","arch.fogtesting.cloud","ubuntu18_04.fogtesting.cloud"]
# The list of branches to process.
branches = ["master","dev-branch"]
# The region we operate in, dictated by terraform.
theRegion = "us-east-2"
# The availibility zone, which we use just one zone.
zone = theRegion + 'a'
# For when we need to wait for something to get done while in a loop, wait this long.
wait = 1
scriptDir = "/home/admin/fog-community-scripts/fog-aws-testing/scripts"
webdir = '/tmp/webdir'
statusDir = '/tmp/statuses'
indexHtml = 'index.html'
green = "green.png"
orange = "orange.png"
red = "red.png"
s3bucket = "fogtesting2.theworkmans.us"
http = "http://"
port = ""
netdir = ""
remoteResult = "/root/result"
ssh = "/usr/bin/ssh"
scp = "/usr/bin/scp"
timeout = "/usr/bin/timeout"
s3cmd = "/usr/bin/s3cmd"
ssh_keyscan = "/usr/bin/ssh-keyscan"
sshTimeout = "15"
fogTimeout= "15m" #Time to wait for FOG installation to complete. Must end with a unit of time. s for seconds, m for minutes.
sshTime="15s" #Time to wait for small SSH commands to complete. Must end with a unit of time. s for seconds, m for minutes.
codes = {
"-1":{
"reason":"Installer did not complete within alotted time.",
"status":orange
},
"0":{
"reason":"Success.",
"status":green
},
"1":{
"reason":"Failed to call script properly.",
"status":orange
},
"2":{
"reason":"Failed to reset git.",
"status":orange
},
"3":{
"reason":"Failed to pull git.",
"status":orange
},
"4":{
"reason":"Failed to checkout git.",
"status":orange
},
"5":{
"reason":"Failed to change directory.",
"status":orange
},
"6":{
"reason":"Installation failed.",
"status":red
}
}
| Python | 0 |
b72ab35056ca6ec1e48db963d61c31d89ec80161 | fix on winsock2 | autoconf/winsock2.py | autoconf/winsock2.py | from _external import *
winsock2 = LibWithHeaderChecker( 'ws2_32',
['winsock2.h'],
'c',
name='winsock2' )
| from _external import *
winsock2 = LibWithHeaderChecker( 'winsock2',
['winsock2.h'],
'c',
name='ws2_32' )
| Python | 0 |
0b499f01d517775fb03294c1c785318ca6224874 | Bump to v0.0.5 | backache/__init__.py | backache/__init__.py | from . core import *
from . antioxidant import celerize # flake8: noqa
from . errors import *
__version__ = (0, 0, 5)
| from . core import *
from . antioxidant import celerize # flake8: noqa
from . errors import *
__version__ = (0, 0, 4)
| Python | 0.000001 |
da26428a6f7adf58e7cfed8ece61fc42ed76345e | Remove commented out code. pep8/pyflakes | src/recore/amqp.py | src/recore/amqp.py | # -*- coding: utf-8 -*-
# Copyright © 2014 SEE AUTHORS FILE
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import logging
import json
import pika
import recore.fsm
MQ_CONF = {}
connection = None
out = logging.getLogger('recore.amqp')
def init_amqp(mq):
"""Open a channel to our AMQP server"""
import recore.amqp
recore.amqp.MQ_CONF = mq
creds = pika.credentials.PlainCredentials(mq['NAME'], mq['PASSWORD'])
params = pika.ConnectionParameters(
host=str(mq['SERVER']),
credentials=creds)
connect_string = "amqp://%s:******@%s:%s/%s" % (
mq['NAME'], mq['SERVER'], mq['PORT'], mq['EXCHANGE'])
out.debug('Attemtping to open channel with connect string: %s' % connect_string)
recore.amqp.connection = pika.SelectConnection(parameters=params,
on_open_callback=on_open)
return recore.amqp.connection
def on_open(connection):
"""
Call back when a connection is opened.
"""
out.debug("Opened AMQP connection")
connection.channel(on_channel_open)
def on_channel_open(channel):
"""
Call back when a channel is opened.
"""
out.debug("MQ channel opened. Declaring exchange ...")
channel.exchange_declare(exchange=MQ_CONF['EXCHANGE'],
durable=True,
exchange_type='topic')
consumer_tag = channel.basic_consume(
receive,
queue=MQ_CONF['QUEUE'])
return consumer_tag
def receive(ch, method, properties, body):
"""
Callback for watching the FSM queue
"""
out = logging.getLogger('recore')
notify = logging.getLogger('recore.stdout')
msg = json.loads(body)
topic = method.routing_key
out.debug("Message: %s" % msg)
ch.basic_ack(delivery_tag=method.delivery_tag)
if topic == 'job.create':
id = None
try:
# We need to get the name of the temporary
# queue to respond back on.
notify.info("new job create for: %s" % msg['project'])
out.info(
"New job requested, starting release "
"process for %s ..." % msg["project"])
notify.debug("Job message: %s" % msg)
reply_to = properties.reply_to
id = recore.job.create.release(
ch, msg['project'], reply_to, msg['dynamic'])
except KeyError, ke:
notify.info("Missing an expected key in message: %s" % ke)
out.error("Missing an expected key in message: %s" % ke)
return
if id:
# Skip this try/except until we work all the bugs out of the FSM
# try:
runner = recore.fsm.FSM(id)
runner.start()
while runner.isAlive():
runner.join(0.3)
# except Exception, e:
# notify.error(str(e))
else:
out.warn("Unknown routing key %s. Doing nothing ...")
notify.info("IDK what this is: %s" % topic)
notify.info("end receive() routine")
out.debug("end receive() routine")
| # -*- coding: utf-8 -*-
# Copyright © 2014 SEE AUTHORS FILE
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import logging
import json
import pika
import recore.fsm
import threading
MQ_CONF = {}
connection = None
out = logging.getLogger('recore.amqp')
def init_amqp(mq):
"""Open a channel to our AMQP server"""
import recore.amqp
recore.amqp.MQ_CONF = mq
creds = pika.credentials.PlainCredentials(mq['NAME'], mq['PASSWORD'])
params = pika.ConnectionParameters(
host=str(mq['SERVER']),
credentials=creds)
out.debug('Attemtping to open channel...')
connect_string = "amqp://%s:******@%s:%s/%s" % (
mq['NAME'], mq['SERVER'], mq['PORT'], mq['EXCHANGE'])
recore.amqp.connection = pika.SelectConnection(parameters=params,
on_open_callback=on_open)
return recore.amqp.connection
def on_open(connection):
"""
Call back when a connection is opened.
"""
out.debug("Opened AMQP connection")
connection.channel(on_channel_open)
def on_channel_open(channel):
"""
Call back when a channel is opened.
"""
out.debug("MQ channel opened. Declaring exchange ...")
channel.exchange_declare(exchange=MQ_CONF['EXCHANGE'],
durable=True,
exchange_type='topic')
consumer_tag = channel.basic_consume(
receive,
queue=MQ_CONF['QUEUE'])
# def watch_the_queue(channel, connection, queue_name):
# """Begin consuming messages `queue_name` on the bus. Set our default
# callback handler
# """
# channel.basic_consume(receive,
# queue=queue_name)
# try:
# notify = logging.getLogger('recore.stdout')
# notify.info('FSM online and listening for messages')
# out = logging.getLogger('recore')
# out.debug('Consuming messages from queue: %s' % queue_name)
# except KeyboardInterrupt:
# channel.close()
# connection.close()
# pass
def receive(ch, method, properties, body):
"""
Callback for watching the FSM queue
"""
out = logging.getLogger('recore')
notify = logging.getLogger('recore.stdout')
msg = json.loads(body)
topic = method.routing_key
out.debug("Message: %s" % msg)
ch.basic_ack(delivery_tag=method.delivery_tag)
if topic == 'job.create':
id = None
try:
# We need to get the name of the temporary
# queue to respond back on.
notify.info("new job create for: %s" % msg['project'])
out.info(
"New job requested, starting release "
"process for %s ..." % msg["project"])
notify.debug("Job message: %s" % msg)
reply_to = properties.reply_to
id = recore.job.create.release(
ch, msg['project'], reply_to, msg['dynamic'])
except KeyError, ke:
notify.info("Missing an expected key in message: %s" % ke)
out.error("Missing an expected key in message: %s" % ke)
return
if id:
# Skip this try/except until we work all the bugs out of the FSM
# try:
runner = recore.fsm.FSM(id)
runner.start()
while runner.isAlive():
runner.join(0.3)
# except Exception, e:
# notify.error(str(e))
else:
out.warn("Unknown routing key %s. Doing nothing ...")
notify.info("IDK what this is: %s" % topic)
notify.info("end receive() routine")
out.debug("end receive() routine")
| Python | 0.000071 |
ce2cf07d9fa9dc3bdd229b1cbb56745784e3049d | Fix stray char. | law/sandbox/docker.py | law/sandbox/docker.py | # -*- coding: utf-8 -*-
"""
Docker sandbox implementation.
"""
__all__ = ["DockerSandbox"]
from law.sandbox.base import Sandbox
class DockerSandbox(Sandbox):
sandbox_type = "docker"
@property
def image(self):
return self.name
def cmd(self, task, task_cmd):
# get args for the docker command as configured in the task
docker_args = getattr(task, "docker_args", ["--rm"])
if isinstance(docker_args, (list, tuple)):
docker_args = " ".join(str(arg) for arg in docker_args)
cmd = "docker run {docker_args} {image} \"{task_cmd}\""
cmd = cmd.format(docker_args=docker_args, image=self.image, task_cmd=task_cmd)
return cmd
| # -*- coding: utf-8 -*-
"""
Docker sandbox implementation.
"""
__all__ = ["DockerSandbox"]
from law.sandbox.base import Sandbox
class DockerSandbox(Sandbox):
sandbox_type = "docker"
@property
def image(self):
return self.name
def cmd(self, task, task_cmd):
# get args for the docker command as configured in the task
docker_args = getattr(task, "docker_args", ["--rm"]):
if isinstance(docker_args, (list, tuple)):
docker_args = " ".join(str(arg) for arg in docker_args)
cmd = "docker run {docker_args} {image} \"{task_cmd}\""
cmd = cmd.format(docker_args=docker_args, image=self.image, task_cmd=task_cmd)
return cmd
| Python | 0 |
699085edd1db5aa7a827a16ffffcbcc9a69cbf52 | Add forgotten imports for bucketlist endpoints | app/endpoints.py | app/endpoints.py | from flask import request, Blueprint
from flask_restful import Api
from controllers.accounts_manager import LoginResource, RegisterResource
from controllers.bucketlist import BucketListsResource, BucketListResource
from controllers.bucketlist_items import BucketListItems
bucketlist_blueprint = Blueprint('bucket_list', __name__)
api = Api(bucketlist_blueprint)
# login routes
api.add_resource(RegisterResource, '/auth/register')
api.add_resource(LoginResource, '/auth/login')
# bucketlist routes
api.add_resource(BucketListsResource, '/bucketlists')
api.add_resource(BucketListResource, '/bucketlists/<int:id>')
# bucketlist items routes
api.add_resource(BucketListItems,
'/bucketlists/<int:bucketlist_id>/items',
'/bucketlists/<int:bucketlist_id>/items/<int:item_id>')
| from flask import request, Blueprint
from flask_restful import Api
from controllers.accounts_manager import LoginResource, RegisterResource
from controllers.bucketlist import GetAllBucketLists, GetBucketList
from controllers.bucketlist_items import BucketListItems
bucketlist_blueprint = Blueprint('bucket_list', __name__)
api = Api(bucketlist_blueprint)
# login routes
api.add_resource(RegisterResource, '/auth/register')
api.add_resource(LoginResource, '/auth/login')
# bucketlist routes
api.add_resource(BucketListsResource, '/bucketlists')
api.add_resource(BucketListResource, '/bucketlists/<int:id>')
# bucketlist items routes
api.add_resource(BucketListItems,
'/bucketlists/<int:bucketlist_id>/items',
'/bucketlists/<int:bucketlist_id>/items/<int:item_id>')
| Python | 0 |
6908060af5b872e54d42f63e580591931b7ff230 | Check empty string | museum_site/scroll.py | museum_site/scroll.py | from django.db import models
class Scroll(models.Model):
# Constants
SCROLL_TOP = """```
╞╤═════════════════════════════════════════════╤╡
│ Scroll ### │
╞═════════════════════════════════════════════╡
│ • • • • • • • • •│"""
SCROLL_BOTTOM = """\n │ • • • • • • • • •│
╞╧═════════════════════════════════════════════╧╡```"""
# Fields
identifier = models.IntegerField()
content = models.TextField(
default="",
help_text="Lines starting with @ will be skipped. Initial whitespace is trimmed by DB, so an extra @ line is a fix."
)
source = models.CharField(max_length=160)
published = models.BooleanField(default=False)
suggestion = models.CharField(max_length=500, blank=True, default="")
class Meta:
ordering = ["-id"]
def __str__(self):
return "Scroll #{} ID:{} Pub:{}".format(self.identifier, self.id, self.published)
def lines(self):
return self.content.split("\n")
def render_for_discord(self):
lines = self.lines()
output = self.SCROLL_TOP.replace("###", ("000"+str(self.identifier))[-3:])
for line in lines:
line = line.replace("\r", "")
line = line.replace("\n", "")
if line and line[0] == "@":
continue
output += "\n │ " + (line + " " * 42)[:42] + " │ "
output += self.SCROLL_BOTTOM
return output
| from django.db import models
class Scroll(models.Model):
# Constants
SCROLL_TOP = """```
╞╤═════════════════════════════════════════════╤╡
│ Scroll ### │
╞═════════════════════════════════════════════╡
│ • • • • • • • • •│"""
SCROLL_BOTTOM = """\n │ • • • • • • • • •│
╞╧═════════════════════════════════════════════╧╡```"""
# Fields
identifier = models.IntegerField()
content = models.TextField(
default="",
help_text="Lines starting with @ will be skipped. Initial whitespace is trimmed by DB, so an extra @ line is a fix."
)
source = models.CharField(max_length=160)
published = models.BooleanField(default=False)
suggestion = models.CharField(max_length=500, blank=True, default="")
class Meta:
ordering = ["-id"]
def __str__(self):
return "Scroll #{} ID:{} Pub:{}".format(self.identifier, self.id, self.published)
def lines(self):
return self.content.split("\n")
def render_for_discord(self):
lines = self.lines()
output = self.SCROLL_TOP.replace("###", ("000"+str(self.identifier))[-3:])
for line in lines:
line = line.replace("\r", "")
line = line.replace("\n", "")
if line[0] == "@":
continue
output += "\n │ " + (line + " " * 42)[:42] + " │ "
output += self.SCROLL_BOTTOM
return output
| Python | 0.026724 |
43a348865dcc21e9d88ebf05fd794fed2b7b350c | Update suite | mx.irbuilder/suite.py | mx.irbuilder/suite.py | suite = {
"mxversion" : "5.70.2",
"name" : "java-llvm-ir-builder",
"versionConflictResolution" : "latest",
"imports" : {
"suites" : [
{
"name" : "sulong",
"version" : "f25a652b20e9c2c7d99fbd3844b64a44da5547a6",
"urls" : [
{
"url" : "https://github.com/pointhi/sulong",
"kind" : "git"
},
]
},
],
},
"javac.lint.overrides" : "none",
"projects" : {
"at.pointhi.irbuilder.irwriter" : {
"subDir" : "projects",
"sourceDirs" : ["src"],
"dependencies" : [
"sulong:SULONG",
],
"checkstyle" : "at.pointhi.irbuilder.irwriter",
"javaCompliance" : "1.8",
"license" : "BSD-new",
},
"at.pointhi.irbuilder.test": {
"subDir": "projects",
"sourceDirs": ["src"],
"dependencies": [
"at.pointhi.irbuilder.irwriter",
"sulong:SULONG",
"sulong:SULONG_TEST",
"mx:JUNIT",
],
"checkstyle": "at.pointhi.irbuilder.irwriter",
"javaCompliance": "1.8",
"license": "BSD-new",
},
},
"distributions" : {
"IRWRITER" : {
"path" : "build/irwriter.jar",
"subDir" : "graal",
"sourcesPath" : "build/irbuilder.src.zip",
"mainClass" : "at.pointhi.irbuilder.irwriter.SourceParser",
"dependencies" : [
"at.pointhi.irbuilder.irwriter"
],
"distDependencies" : [
"sulong:SULONG",
]
},
"IRWRITER_TEST" : {
"path" : "build/irwriter_test.jar",
"subDir" : "graal",
"sourcesPath" : "build/irwriter_test.src.zip",
"dependencies" : [
"at.pointhi.irbuilder.test"
],
"exclude" : [
"mx:JUNIT"
],
"distDependencies" : [
"IRWRITER",
"sulong:SULONG",
"sulong:SULONG_TEST",
]
},
}
}
| suite = {
"mxversion" : "5.70.2",
"name" : "java-llvm-ir-builder",
"versionConflictResolution" : "latest",
"imports" : {
"suites" : [
{
"name" : "sulong",
"version" : "38a5bad302f48d676f15a0b3fd9b02f6f3a8abdd",
"urls" : [
{
"url" : "https://github.com/pointhi/sulong",
"kind" : "git"
},
]
},
],
},
"javac.lint.overrides" : "none",
"projects" : {
"at.pointhi.irbuilder.irwriter" : {
"subDir" : "projects",
"sourceDirs" : ["src"],
"dependencies" : [
"sulong:SULONG",
],
"checkstyle" : "at.pointhi.irbuilder.irwriter",
"javaCompliance" : "1.8",
"license" : "BSD-new",
},
"at.pointhi.irbuilder.test": {
"subDir": "projects",
"sourceDirs": ["src"],
"dependencies": [
"at.pointhi.irbuilder.irwriter",
"sulong:SULONG",
"sulong:SULONG_TEST",
"mx:JUNIT",
],
"checkstyle": "at.pointhi.irbuilder.irwriter",
"javaCompliance": "1.8",
"license": "BSD-new",
},
},
"distributions" : {
"IRWRITER" : {
"path" : "build/irwriter.jar",
"subDir" : "graal",
"sourcesPath" : "build/irbuilder.src.zip",
"mainClass" : "at.pointhi.irbuilder.irwriter.SourceParser",
"dependencies" : [
"at.pointhi.irbuilder.irwriter"
],
"distDependencies" : [
"sulong:SULONG",
]
},
"IRWRITER_TEST" : {
"path" : "build/irwriter_test.jar",
"subDir" : "graal",
"sourcesPath" : "build/irwriter_test.src.zip",
"dependencies" : [
"at.pointhi.irbuilder.test"
],
"exclude" : [
"mx:JUNIT"
],
"distDependencies" : [
"IRWRITER",
"sulong:SULONG",
"sulong:SULONG_TEST",
]
},
}
}
| Python | 0.000001 |
e00b7c612f34c938a3d42dada006874ffea021c8 | complete localizer | app/localizer.py | app/localizer.py | # -*- coding: utf-8 -*-
"""
localizer
localize bounding boxes and pad rest of image with zeros (255, 255, 255)
"""
import os
import cv2
import numpy as np
import multiprocessing as mp
from app.pipeline import generate_data_skeleton
from app.cv.serializer import deserialize_json
from app.settings import BOUNDINGBOX, IMAGE_PATH
class Localizer(object):
def __init__(self, path_to_image):
# cv2 loads image in BGR channel order
self.path = path_to_image
self.image = cv2.imread(path_to_image, -1)
self.fname = os.path.split(path_to_image)[1]
try:
self.bboxes = \
deserialize_json(BOUNDINGBOX)[self.fname]['annotations']
except IndexError:
self.bboxes = None
self.output_image = None
@property
def coordinates_factory(self):
"""yield bounding boxes"""
for bbox in self.bboxes:
x = int(bbox['x'])
y = int(bbox['y'])
height = int(bbox['height'])
width = int(bbox['width'])
yield x, x + width, y, y + height
def declutter(self):
filter_layer = np.zeros(shape=self.image.shape)
# highlight image with (1, 1, 1) on background of zeros
if self.bboxes:
for x, x_end, y, y_end in self.coordinates_factory:
filter_layer[y: y_end, x: x_end, :] = (1., 1., 1.)
# elementwise multiplication of filter layer and original image
self.output_image = cv2.convertScaleAbs(self.image * filter_layer)
elif not self.bboxes:
self.output_image = self.image
return self
def show(self):
cv2.imshow("output", self.output_image)
cv2.waitKey(0)
def write(self):
print('writing {}'.format(self.path))
cv2.imwrite(self.path, self.output_image)
def localize(path_to_image):
Localizer(path_to_image).declutter().write()
paths_to_images = generate_data_skeleton(IMAGE_PATH)[0]
with mp.Pool(10) as p:
p.map(localize, paths_to_images)
| # -*- coding: utf-8 -*-
"""
localizer
localize bounding boxes and pad rest of image with zeros (255, 255, 255)
"""
import os
import cv2
import numpy as np
from app.cv.serializer import deserialize_json
from app.settings import CV_SAMPLE_PATH, BOUNDINGBOX
test_image = CV_SAMPLE_PATH + 'pos/img_00003.jpg'
class Localizer(object):
def __init__(self, path_to_image):
self.image = cv2.imread(path_to_image, -1)
self.fname = os.path.split(path_to_image)[1]
self.bboxes = \
deserialize_json(BOUNDINGBOX)[self.fname]['annotations']
@property
def factory(self):
"""yield bounding boxes"""
for bbox in self.bboxes:
x = int(bbox['x'])
y = int(bbox['y'])
height = int(bbox['height'])
width = int(bbox['width'])
yield x, x + width, y, y + height
def new_image(self):
background = np.zeros(shape=self.image.shape)
# highlight image with (1, 1, 1) on background of zeros
for x, x_end, y, y_end in self.factory:
background[x: x_end, y: y_end] = [1, 1, 1]
# mirrir original image's bounding boxes into new
self.output_image = np.mutiply(self.image, background)
def show(self):
cv2.imshow("Display window", self.output_image)
cv2.waitKey(0)
# # image read as it is in as BGR
# image = cv2.imread(test_image, -1)
# b = image[2: 10, 3: 11, :]
# print(b)
# c = np.zeros(shape=(8, 8, 3))
# c[3, 3] = (1, 1, 1)
# d = np.multiply(b, c)
# print(d)
| Python | 0.000001 |
73b9246164994049d291d5b482d4dbf2ca41a124 | Rename master branch to main | tests/app/test_accessibility_statement.py | tests/app/test_accessibility_statement.py | import re
import subprocess
from datetime import datetime
def test_last_review_date():
statement_file_path = "app/templates/views/accessibility_statement.html"
# test local changes against main for a full diff of what will be merged
statement_diff = subprocess.run(
[f"git diff --exit-code origin/main -- {statement_file_path}"], stdout=subprocess.PIPE, shell=True
)
# if statement has changed, test the review date was part of those changes
if statement_diff.returncode == 1:
raw_diff = statement_diff.stdout.decode("utf-8")
today = datetime.now().strftime("%d %B %Y")
with open(statement_file_path, "r") as statement_file:
current_review_date = re.search(
(r'"Last updated": "(\d{1,2} [A-Z]{1}[a-z]+ \d{4})"'), statement_file.read()
).group(1)
# guard against changes that don't need to update the review date
if current_review_date != today:
assert '"Last updated": "' in raw_diff
| import re
import subprocess
from datetime import datetime
def test_last_review_date():
statement_file_path = "app/templates/views/accessibility_statement.html"
# test local changes against master for a full diff of what will be merged
statement_diff = subprocess.run(
[f"git diff --exit-code origin/master -- {statement_file_path}"], stdout=subprocess.PIPE, shell=True
)
# if statement has changed, test the review date was part of those changes
if statement_diff.returncode == 1:
raw_diff = statement_diff.stdout.decode("utf-8")
today = datetime.now().strftime("%d %B %Y")
with open(statement_file_path, "r") as statement_file:
current_review_date = re.search(
(r'"Last updated": "(\d{1,2} [A-Z]{1}[a-z]+ \d{4})"'), statement_file.read()
).group(1)
# guard against changes that don't need to update the review date
if current_review_date != today:
assert '"Last updated": "' in raw_diff
| Python | 0.999013 |
5f5bdcf5c6b6fb70dc94945d463c5200a46699d6 | revert unfinished task test | tests/integration/unfinished_task_test.py | tests/integration/unfinished_task_test.py | # stdlib
import time
# third party
import pytest
# syft absolute
import syft as sy
from syft.core.node.common.action.save_object_action import SaveObjectAction
from syft.core.store.storeable_object import StorableObject
@pytest.mark.general
def test_unfinished_task(get_clients) -> None:
print("running test_unfinished_task")
client = get_clients(1)[0]
list_pointer = sy.lib.python.List().send(client)
int_pointer = sy.lib.python.Int(1).send(client)
time.sleep(5)
int_obj = int_pointer.get()
list_pointer.append(int_pointer)
storeable_object = StorableObject(id=int_pointer.id_at_location, data=int_obj)
save_object_action = SaveObjectAction(obj=storeable_object, address=client.address)
client.send_immediate_msg_without_reply(msg=save_object_action)
time.sleep(5)
assert list_pointer.get() == [1]
| # third party
import pytest
# syft absolute
import syft as sy
from syft.core.node.common.action.save_object_action import SaveObjectAction
from syft.core.store.storeable_object import StorableObject
@pytest.mark.general
def test_unfinished_task(get_clients) -> None:
print("running test_unfinished_task")
client = get_clients(1)[0]
list_pointer = sy.lib.python.List().send(client)
int_pointer = sy.lib.python.Int(1).send(client)
int_pointer.block_with_timeout(secs=10)
int_obj = int_pointer.get()
list_pointer.append(int_pointer)
storeable_object = StorableObject(id=int_pointer.id_at_location, data=int_obj)
save_object_action = SaveObjectAction(obj=storeable_object, address=client.address)
client.send_immediate_msg_without_reply(msg=save_object_action)
list_pointer.block_with_timeout(secs=10)
assert list_pointer.get() == [1]
| Python | 0.000167 |
da3a4e8036a5933a9ce00f42795c8ca398925c38 | Update geogig_init_repo.py | lib/rogue/geogig_init_repo.py | lib/rogue/geogig_init_repo.py | from base64 import b64encode
from optparse import make_option
import json
import urllib
import urllib2
import argparse
import time
import os
import subprocess
#==#
import _geogig_init_repo
#==#
parser = argparse.ArgumentParser(description='Initialize GeoGig repository and optionally add to GeoServer instance. If you want to add the GeoGig repo include the optional parameters.')
parser.add_argument("--path", help="The location in the filesystem of the Geogig repository.")
parser.add_argument("--name", help="The name of the GeoGig repo and data store in GeoServer.")
parser.add_argument('-gs', '--geoserver', help="The url of the GeoServer servicing the GeoGig repository.")
parser.add_argument('-ws', '--workspace', help="The GeoServer workspace to use for the data store.")
#parser.add_argument("--path", help="The location in the filesystem of the Geogig repository.")
parser.add_argument("--username", help="The username to use for basic auth requests.")
parser.add_argument("--password", help="The password to use for basic auth requests.")
parser.add_argument('--verbose', '-v', default=0, action='count', help="Print out intermediate status messages.")
parser.add_argument("--publish_datastore", default=0, action='count', help="Publish datastore in GeoServer for GeoGig repository")
parser.add_argument('--publish_layers', default=0, action='count', help="Publish layers from GeoGig data store")
args = parser.parse_args()
#==#
_geogig_init_repo.run(args)
| from base64 import b64encode
from optparse import make_option
import json
import urllib
import urllib2
import argparse
import time
import os
import subprocess
#==#
import _geogig_init_repo
#==#
parser = argparse.ArgumentParser(description='Initialize GeoGig repository and optionally add to GeoServer instance. If you want to add the GeoGig repo include the optional parameters.')
parser.add_argument("--path", help="The location in the filesystem of the Geogig repository.")
parser.add_argument("--name", help="The name of the GeoGig repo and data store in GeoServer.")
parser.add_argument("--geoserver", help="The url of the GeoServer servicing the GeoGig repository.")
parser.add_argument("--workspace", help="The GeoServer workspace to use for the data store.")
#parser.add_argument("--path", help="The location in the filesystem of the Geogig repository.")
parser.add_argument("--username", help="The username to use for basic auth requests.")
parser.add_argument("--password", help="The password to use for basic auth requests.")
parser.add_argument('--verbose', '-v', default=0, action='count', help="Print out intermediate status messages.")
parser.add_argument("--publish_datastore", default=0, action='count', help="Publish datastore in GeoServer for GeoGig repository")
parser.add_argument('--publish_layers', default=0, action='count', help="Publish layers from GeoGig data store")
args = parser.parse_args()
#==#
_geogig_init_repo.run(args)
| Python | 0.000001 |
64cbe20e2a415d4ee294862acc02a6a7682d7af3 | Isolate memcache test from network | tests/nydus/db/backends/memcache/tests.py | tests/nydus/db/backends/memcache/tests.py | from __future__ import absolute_import
from tests import BaseTest
from nydus.db import create_cluster
from nydus.db.base import BaseCluster
from nydus.db.backends.memcache import Memcache
import mock
import pylibmc
class MemcacheTest(BaseTest):
def setUp(self):
self.memcache = Memcache(num=0)
def test_provides_retryable_exceptions(self):
self.assertEquals(Memcache.retryable_exceptions, frozenset([pylibmc.Error]))
def test_provides_identifier(self):
self.assertEquals(self.memcache.identifier, str(self.memcache.identifier))
@mock.patch('pylibmc.Client')
def test_client_instantiates_with_kwargs(self, Client):
client = Memcache(num=0)
client.connect()
self.assertEquals(Client.call_count, 1)
Client.assert_any_call(['localhost:11211'], binary=True, behaviors=None)
@mock.patch('pylibmc.Client.get')
def test_with_cluster(self, get):
p = BaseCluster(hosts={0: self.memcache})
result = p.get('MemcacheTest_with_cluster')
get.assert_called_once_with('MemcacheTest_with_cluster')
self.assertEquals(result, get.return_value)
@mock.patch('pylibmc.Client')
def test_map_does_pipeline(self, Client):
cluster = create_cluster({
'engine': 'nydus.db.backends.memcache.Memcache',
'router': 'nydus.db.routers.RoundRobinRouter',
'hosts': {
0: {'binary': True},
1: {'binary': True},
2: {'binary': True},
3: {'binary': True},
}
})
with cluster.map() as conn:
conn.set('a', 1)
conn.set('b', 2)
conn.set('c', 3)
conn.set('d', 4)
conn.set('e', 5)
conn.set('f', 6)
conn.set('g', 7)
self.assertEqual(Client().set.call_count, 7)
self.assertEqual(Client.call_count, 5)
self.assertEqual(len(conn.get_results()), 7)
@mock.patch('pylibmc.Client')
def test_pipeline_get_multi(self, Client):
cluster = create_cluster({
'engine': 'nydus.db.backends.memcache.Memcache',
'router': 'nydus.db.routers.RoundRobinRouter',
'hosts': {
0: {'binary': True},
1: {'binary': True},
}
})
keys = ['a', 'b', 'c', 'd', 'e', 'f']
with cluster.map() as conn:
for key in keys:
conn.get(key)
self.assertEqual(len(conn.get_results()), len(keys))
self.assertEqual(Client().get.call_count, 0)
# Note: This is two because it should execute the command once for each
# of the two servers.
self.assertEqual(Client().get_multi.call_count, 2)
| from __future__ import absolute_import
from tests import BaseTest
from nydus.db import create_cluster
from nydus.db.base import BaseCluster
from nydus.db.backends.memcache import Memcache
import mock
import pylibmc
class MemcacheTest(BaseTest):
def setUp(self):
self.memcache = Memcache(num=0)
def test_provides_retryable_exceptions(self):
self.assertEquals(Memcache.retryable_exceptions, frozenset([pylibmc.Error]))
def test_provides_identifier(self):
self.assertEquals(self.memcache.identifier, str(self.memcache.identifier))
@mock.patch('pylibmc.Client')
def test_client_instantiates_with_kwargs(self, Client):
client = Memcache(num=0)
client.connect()
self.assertEquals(Client.call_count, 1)
Client.assert_any_call(['localhost:11211'], binary=True, behaviors=None)
def test_with_cluster(self):
p = BaseCluster(hosts={0: self.memcache})
self.assertEquals(p.get('MemcacheTest_with_cluster'), None)
@mock.patch('pylibmc.Client')
def test_map_does_pipeline(self, Client):
cluster = create_cluster({
'engine': 'nydus.db.backends.memcache.Memcache',
'router': 'nydus.db.routers.RoundRobinRouter',
'hosts': {
0: {'binary': True},
1: {'binary': True},
2: {'binary': True},
3: {'binary': True},
}
})
with cluster.map() as conn:
conn.set('a', 1)
conn.set('b', 2)
conn.set('c', 3)
conn.set('d', 4)
conn.set('e', 5)
conn.set('f', 6)
conn.set('g', 7)
self.assertEqual(Client().set.call_count, 7)
self.assertEqual(Client.call_count, 5)
self.assertEqual(len(conn.get_results()), 7)
@mock.patch('pylibmc.Client')
def test_pipeline_get_multi(self, Client):
cluster = create_cluster({
'engine': 'nydus.db.backends.memcache.Memcache',
'router': 'nydus.db.routers.RoundRobinRouter',
'hosts': {
0: {'binary': True},
1: {'binary': True},
}
})
keys = ['a', 'b', 'c', 'd', 'e', 'f']
with cluster.map() as conn:
for key in keys:
conn.get(key)
self.assertEqual(len(conn.get_results()), len(keys))
self.assertEqual(Client().get.call_count, 0)
# Note: This is two because it should execute the command once for each
# of the two servers.
self.assertEqual(Client().get_multi.call_count, 2)
| Python | 0.000001 |
a19a52a42486eaa8e849d2f0a175f9a76497029d | bump version number | intercom/__init__.py | intercom/__init__.py | __version__ = "0.0.7"
| __version__ = "0.0.6" | Python | 0.000004 |
4324418262824f59e9b38dc01673f694d434f7d4 | add check | lesscpy/plib/call.py | lesscpy/plib/call.py | """
"""
import re
from urllib.parse import quote as urlquote
from .node import Node
import lesscpy.lessc.utility as utility
import lesscpy.lessc.color as Color
class Call(Node):
def parse(self, scope):
if not self.parsed:
name = ''.join(self.tokens.pop(0))
parsed = self.process(self.tokens, scope)
if name == '%(':
name = 'sformat'
elif name == '~':
name = 'e'
color = Color.Color()
args = [t for t in parsed
if type(t) is not str or t not in '(),']
if hasattr(self, name):
try:
return getattr(self, name)(*args)
except ValueError:
pass
if hasattr(color, name):
try:
return getattr(color, name)(*args)
except ValueError:
pass
self.parsed = name + ''.join([p for p in parsed])
return self.parsed
def e(self, string):
""" Less Escape.
@param string: value
@return string
"""
return utility.destring(string.strip('~'))
def sformat(self, *args):
""" String format
@param list: values
@return string
"""
format = args[0]
items = []
m = re.findall('(%[asdA])', format)
i = 1
for n in m:
v = {
'%d' : int,
'%A' : urlquote,
'%s' : utility.destring,
}.get(n, str)(args[i])
items.append(v)
i += 1
format = format.replace('%A', '%s')
return format % tuple(items)
def increment(self, v):
""" Increment function
@param Mixed: value
@return: incremented value
"""
n, u = utility.analyze_number(v)
return utility.with_unit(n+1, u)
def decrement(self, v):
""" Decrement function
@param Mixed: value
@return: incremented value
"""
n, u = utility.analyze_number(v)
return utility.with_unit(n-1, u)
def add(self, *args):
""" Add integers
@param list: values
@return: int
"""
return sum([int(v) for v in args])
def round(self, v):
""" Round number
@param Mixed: value
@return: rounded value
"""
n, u = utility.analyze_number(v)
return utility.with_unit(round(float(n)), u)
| """
"""
import re
from urllib.parse import quote as urlquote
from .node import Node
import lesscpy.lessc.utility as utility
import lesscpy.lessc.color as Color
class Call(Node):
def parse(self, scope):
name = ''.join(self.tokens.pop(0))
parsed = self.process(self.tokens, scope)
if name == '%(':
name = 'sformat'
elif name == '~':
name = 'e'
color = Color.Color()
args = [t for t in parsed
if type(t) is not str or t not in '(),']
if hasattr(self, name):
try:
return getattr(self, name)(*args)
except ValueError:
pass
if hasattr(color, name):
try:
return getattr(color, name)(*args)
except ValueError:
pass
return name + ''.join([p for p in parsed])
def e(self, string):
""" Less Escape.
@param string: value
@return string
"""
return utility.destring(string.strip('~'))
def sformat(self, *args):
""" String format
@param list: values
@return string
"""
format = args[0]
items = []
m = re.findall('(%[asdA])', format)
i = 1
for n in m:
v = {
'%d' : int,
'%A' : urlquote,
'%s' : utility.destring,
}.get(n, str)(args[i])
items.append(v)
i += 1
format = format.replace('%A', '%s')
return format % tuple(items)
def increment(self, v):
""" Increment function
@param Mixed: value
@return: incremented value
"""
n, u = utility.analyze_number(v)
return utility.with_unit(n+1, u)
def decrement(self, v):
""" Decrement function
@param Mixed: value
@return: incremented value
"""
n, u = utility.analyze_number(v)
return utility.with_unit(n-1, u)
def add(self, *args):
""" Add integers
@param list: values
@return: int
"""
return sum([int(v) for v in args])
def round(self, v):
""" Round number
@param Mixed: value
@return: rounded value
"""
n, u = utility.analyze_number(v)
return utility.with_unit(round(float(n)), u)
| Python | 0 |
b4abd0045178f3368fb1ddc0ba5b96094c933c22 | Verify domain exists before scanning | musubi/scan.py | musubi/scan.py |
"""
Scan multiple DNSBLs for IP addresss or domain.
Copyright (c) 2012, Rob Cakebread
All rights reserved.
If you give the domain, musubi will try to find all your IP addresses
for each mail server by querying MX DNS records and then doing a lookup
for the IPs. If your mail server uses round-robin DNS, this of course
won't find all the IPs. You must find out the IP CIDR range and then
give that, e.g.
musubi scan 192.0.64.0/24
"""
import logging
import dns
from cliff.lister import Lister
from .dnsbl import Base
from IPy import IP
import requests
from .netdns import get_mx_hosts, ips_from_domains, get_txt, build_query, \
net_calc, verify_domain
requests_log = logging.getLogger("requests")
requests_log.setLevel(logging.WARNING)
DNSBL_LIST = 'http://musubi.cakebread.info/dnsbl.txt'
# Try to get list of working DNSBLs checked hourly, experimental.
# TODO Add options to use local list, pipe in, etc.
req = requests.get(DNSBL_LIST)
if req.status_code == 200:
BASE_DNSBLS = req.text.split()
else:
from .dnsbllist import BASE_DNSBLS
class Scan(Lister):
"""Scan multiple DNSBLs by IP or domain"""
log = logging.getLogger(__name__)
def get_parser(self, prog_name):
parser = super(Scan, self).get_parser(prog_name)
parser.add_argument('scan', nargs='?', default=None)
return parser
def dnsbl_check(self, ip):
backend = Base(ip=ip, dnsbls=BASE_DNSBLS)
return backend.check()
def dnsbl_scanner(self, rdata, ip):
for dnsbl, blacklisted in self.dnsbl_check(ip):
# Scan.log.debug('Testing: %s' % dnsbl)
if blacklisted:
Scan.log.debug('blacklisted: %s' % dnsbl)
try:
query = build_query(ip, dnsbl)
txt = get_txt(query)[0]
except dns.resolver.NoAnswer:
Scan.log.debug("No TXT record for %s" % query)
rdata.append(
(ip,
dnsbl,
blacklisted,
txt,)
)
return rdata
def take_action(self, parsed_args):
"""This could be a lot prettier if I used these as arguments
instead of trying to detect input type --IP --domain --range
It's just easier to use without them, hmm.
"""
arg = parsed_args.scan
rdata = []
if "/" in arg:
# CIDR notation
ips = net_calc(arg)
else:
try:
# Throw exception if it's not an IP and then try domain name
ip = IP(arg)
ips = [ip]
except ValueError:
if verify_domain(arg):
hosts = get_mx_hosts(arg)
ips = ips_from_domains(hosts)
else:
raise RuntimeError('Can not lookup domain: %s' % arg)
for ip in ips:
ip = str(ip)
rdata = self.dnsbl_scanner(rdata, ip)
if not len(rdata):
# TODO: Check cliff docs for better way to exit if no results!
rdata.append((("", "", "", "")))
#raise RuntimeError('%s is not listed on any DNSBLs monitored by Musubi.' % arg)
Scan.log.debug(rdata)
return (('IP', 'DNSBL Host', 'Response Code', 'DNS TXT Record'), rdata)
|
"""
Scan multiple DNSBLs for IP addresss or domain.
Copyright (c) 2012, Rob Cakebread
All rights reserved.
If you give the domain, musubi will try to find all your IP addresses
for each mail server by querying MX DNS records and then doing a lookup
for the IPs. If your mail server uses round-robin DNS, this of course
won't find all the IPs. You must find out the IP CIDR range and then
give that, e.g.
musubi scan 192.0.64.0/24
"""
import logging
import dns
from cliff.lister import Lister
from .dnsbl import Base
from IPy import IP
import requests
from .netdns import get_mx_hosts, ips_from_domains, get_txt, build_query, \
net_calc
requests_log = logging.getLogger("requests")
requests_log.setLevel(logging.WARNING)
DNSBL_LIST = 'http://musubi.cakebread.info/dnsbl.txt'
# Try to get list of working DNSBLs checked hourly, experimental.
# TODO Add options to use local list, pipe in, etc.
req = requests.get(DNSBL_LIST)
if req.status_code == 200:
BASE_DNSBLS = req.text.split()
else:
from .dnsbllist import BASE_DNSBLS
class Scan(Lister):
"""Scan multiple DNSBLs by IP or domain"""
log = logging.getLogger(__name__)
def get_parser(self, prog_name):
parser = super(Scan, self).get_parser(prog_name)
parser.add_argument('scan', nargs='?', default=None)
return parser
def dnsbl_check(self, ip):
backend = Base(ip=ip, dnsbls=BASE_DNSBLS)
return backend.check()
def dnsbl_scanner(self, rdata, ip):
for dnsbl, blacklisted in self.dnsbl_check(ip):
# Scan.log.debug('Testing: %s' % dnsbl)
if blacklisted:
Scan.log.debug('blacklisted: %s' % dnsbl)
try:
query = build_query(ip, dnsbl)
txt = get_txt(query)[0]
except dns.resolver.NoAnswer:
Scan.log.debug("No TXT record for %s" % query)
rdata.append(
(ip,
dnsbl,
blacklisted,
txt,)
)
return rdata
def take_action(self, parsed_args):
"""This could be a lot prettier if I used these as arguments
instead of trying to detect input type --IP --domain --range
It's just easier to use without them, hmm.
"""
arg = parsed_args.scan
rdata = []
if "/" in arg:
# CIDR notation
ips = net_calc(arg)
else:
try:
# Throw exception if it's not an IP and then try domain name
ip = IP(arg)
ips = [ip]
except ValueError:
hosts = get_mx_hosts(arg)
ips = ips_from_domains(hosts)
for ip in ips:
ip = str(ip)
rdata = self.dnsbl_scanner(rdata, ip)
if not len(rdata):
# TODO: Check cliff docs for better way to exit if no results!
rdata.append((("", "", "", "")))
Scan.log.debug(rdata)
return (('IP', 'DNSBL Host', 'Response Code', 'DNS TXT Record'), rdata)
| Python | 0 |
d23a53f5c97a3939952ecb8f39d24603fe0d4bab | bump `datadog-checks-base` version (#9718) | mysql/setup.py | mysql/setup.py | # (C) Datadog, Inc. 2018-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
from codecs import open
from os import path
from setuptools import setup
HERE = path.abspath(path.dirname(__file__))
# Get version info
ABOUT = {}
with open(path.join(HERE, "datadog_checks", "mysql", "__about__.py")) as f:
exec(f.read(), ABOUT)
# Get the long description from the README file
with open(path.join(HERE, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
def get_dependencies():
dep_file = path.join(HERE, 'requirements.in')
if not path.isfile(dep_file):
return []
with open(dep_file, encoding='utf-8') as f:
return f.readlines()
CHECKS_BASE_REQ = 'datadog-checks-base>=20.2.0'
setup(
name='datadog-mysql',
version=ABOUT['__version__'],
description='The MySQL check',
long_description=long_description,
long_description_content_type='text/markdown',
keywords='datadog agent mysql check',
# The project's main homepage.
url='https://github.com/DataDog/integrations-core',
# Author details
author='Datadog',
author_email='packages@datadoghq.com',
# License
license='MIT',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Topic :: System :: Monitoring',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
],
# The package we're going to ship
packages=['datadog_checks.mysql'],
# Run-time dependencies
install_requires=[CHECKS_BASE_REQ],
extras_require={'deps': get_dependencies()},
# Extra files to ship with the wheel package
include_package_data=True,
)
| # (C) Datadog, Inc. 2018-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
from codecs import open
from os import path
from setuptools import setup
HERE = path.abspath(path.dirname(__file__))
# Get version info
ABOUT = {}
with open(path.join(HERE, "datadog_checks", "mysql", "__about__.py")) as f:
exec(f.read(), ABOUT)
# Get the long description from the README file
with open(path.join(HERE, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
def get_dependencies():
dep_file = path.join(HERE, 'requirements.in')
if not path.isfile(dep_file):
return []
with open(dep_file, encoding='utf-8') as f:
return f.readlines()
CHECKS_BASE_REQ = 'datadog-checks-base>=20.1.0'
setup(
name='datadog-mysql',
version=ABOUT['__version__'],
description='The MySQL check',
long_description=long_description,
long_description_content_type='text/markdown',
keywords='datadog agent mysql check',
# The project's main homepage.
url='https://github.com/DataDog/integrations-core',
# Author details
author='Datadog',
author_email='packages@datadoghq.com',
# License
license='MIT',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Topic :: System :: Monitoring',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
],
# The package we're going to ship
packages=['datadog_checks.mysql'],
# Run-time dependencies
install_requires=[CHECKS_BASE_REQ],
extras_require={'deps': get_dependencies()},
# Extra files to ship with the wheel package
include_package_data=True,
)
| Python | 0 |
1ecf42f474b17e01de12d235a29b08e7f18d0726 | bump version to v1.10.3 | ndd/package.py | ndd/package.py | # -*- coding: utf-8 -*-
"""Template package file"""
__title__ = 'ndd'
__version__ = '1.10.3'
__author__ = 'Simone Marsili'
__summary__ = ''
__url__ = 'https://github.com/simomarsili/ndd'
__email__ = 'simo.marsili@gmail.com'
__license__ = 'BSD 3-Clause'
__copyright__ = 'Copyright (c) 2020, Simone Marsili'
__classifiers__ = [
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
]
| # -*- coding: utf-8 -*-
"""Template package file"""
__title__ = 'ndd'
__version__ = '1.10.2'
__author__ = 'Simone Marsili'
__summary__ = ''
__url__ = 'https://github.com/simomarsili/ndd'
__email__ = 'simo.marsili@gmail.com'
__license__ = 'BSD 3-Clause'
__copyright__ = 'Copyright (c) 2020, Simone Marsili'
__classifiers__ = [
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
]
| Python | 0 |
527ccd5790aa08d33387b43fd25beb2ed20335c7 | remove defaults, use self.asserts | tensorflow/python/ops/script_ops_test.py | tensorflow/python/ops/script_ops_test.py | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for script operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.eager import def_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import script_ops
from tensorflow.python.ops.script_ops import numpy_function
from tensorflow.python.platform import test
class NumpyFunctionTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes
def test_numpy_arguments(self):
def plus(a, b):
return a + b
actual_result = script_ops.numpy_function(plus, [1, 2], dtypes.int32)
expect_result = constant_op.constant(3, dtypes.int32)
self.assertAllEqual(actual_result, expect_result)
def test_stateless_flag(self):
call_count = 0
def plus(a, b):
global call_count
call_count += 1
return a + b
@def_function.function
def tensor_plus_stateful(a, b):
return numpy_function(plus, [a, b], dtypes.int32, stateful=True)
@def_function.function
def tensor_plus_stateless(a, b):
return numpy_function(plus, [a, b], dtypes.int32, stateful=False)
@def_function.function(autograph=False)
def tensor_double_plus_stateless(a, b):
sum1 = tensor_plus_stateless(a, b)
sum2 = tensor_plus_stateless(a, b)
return sum1 + sum2
# different argument
tensor_double_plus_stateless(
constant_op.constant(1),
constant_op.constant(2),
)
self.assertEqual(call_count, 1) # +1 as only the first one was executed
@def_function.function(autograph=False)
def tensor_double_plus_stateful(a, b):
sum1 = tensor_plus_stateful(a, b)
sum2 = tensor_plus_stateful(a, b)
return sum1 + sum2
tensor_double_plus_stateful(
constant_op.constant(3),
constant_op.constant(4),
)
self.assertEqual(call_count, 3) # +2 as it is stateful, both were executed
if __name__ == "__main__":
test.main()
| # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for script operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.eager import def_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import script_ops
from tensorflow.python.ops.script_ops import numpy_function
from tensorflow.python.platform import test
class NumpyFunctionTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes
def test_numpy_arguments(self):
def plus(a, b):
return a + b
actual_result = script_ops.numpy_function(plus, [1, 2], dtypes.int32)
expect_result = constant_op.constant(3, dtypes.int32)
self.assertAllEqual(actual_result, expect_result)
def test_stateless_flag(self):
call_count = 0
def plus(a, b):
global call_count
call_count += 1
return a + b
@def_function.function
def tensor_plus_stateful(a, b):
return numpy_function(plus, [a, b], dtypes.int32, stateful=True)
@def_function.function
def tensor_plus_stateless(a, b):
return numpy_function(plus, [a, b], dtypes.int32, stateful=False)
@def_function.function(autograph=False)
def tensor_double_plus_stateless(a, b):
sum1 = tensor_plus_stateless(a, b)
sum2 = tensor_plus_stateless(a, b)
return sum1 + sum2
# different argument
tensor_double_plus_stateless(
constant_op.constant(1, dtype=dtypes.int32),
constant_op.constant(2, dtype=dtypes.int32),
)
assert call_count == 1 # +1 as only the first one was executed
@def_function.function(autograph=False)
def tensor_double_plus_stateful(a, b):
sum1 = tensor_plus_stateful(a, b)
sum2 = tensor_plus_stateful(a, b)
return sum1 + sum2
tensor_double_plus_stateful(
constant_op.constant(3, dtype=dtypes.int32),
constant_op.constant(4, dtype=dtypes.int32),
)
assert call_count == 3 # +2 as it is stateful, both were executed
if __name__ == "__main__":
test.main()
| Python | 0.000028 |
10f0807b9ab85bfa6f6bbb4ed533e1a8af642571 | fix bug in raw service | lib/svtplay_dl/service/raw.py | lib/svtplay_dl/service/raw.py | from __future__ import absolute_import
import os
import re
from svtplay_dl.service import Service
from svtplay_dl.fetcher.hds import hdsparse
from svtplay_dl.fetcher.hls import hlsparse
from svtplay_dl.fetcher.dash import dashparse
class Raw(Service):
def get(self):
if self.exclude():
return
extention = False
filename = os.path.basename(self.url[:self.url.rfind("/")])
if self.options.output and os.path.isdir(self.options.output):
self.options.output = os.path.join(os.path.dirname(self.options.output), filename)
extention = True
elif self.options.output is None:
self.options.output = filename
extention = True
streams = []
if re.search(".f4m", self.url):
if extention:
self.options.output = "{0}.flv".format(self.options.output)
streams.append(hdsparse(self.options, self.http.request("get", self.url, params={"hdcore": "3.7.0"}), self.url))
if re.search(".m3u8", self.url):
streams.append(hlsparse(self.options, self.http.request("get", self.url), self.url))
if re.search(".mpd", self.url):
streams.append(dashparse(self.options, self.http.request("get", self.url), self.url))
for stream in streams:
if stream:
for n in list(stream.keys()):
yield stream[n]
| from __future__ import absolute_import
import os
import re
from svtplay_dl.service import Service
from svtplay_dl.fetcher.hds import hdsparse
from svtplay_dl.fetcher.hls import hlsparse
from svtplay_dl.fetcher.dash import dashparse
class Raw(Service):
def get(self):
if self.exclude():
return
extention = False
filename = os.path.basename(self.url[:self.url.rfind("/")])
if self.options.output and os.path.isdir(self.options.output):
self.options.output = os.path.join(os.path.dirname(self.options.output), filename)
extention = True
elif self.options.output is None:
self.options.output = filename
extention = True
streams = []
if re.search(".f4m", self.url):
if extention:
self.options.output = "{0}.flv".format(self.options.output)
streams.append(hdsparse(self.options, self.http.request("get", self.url, params={"hdcore": "3.7.0"}), self.url))
if re.search(".m3u8", self.url):
streams.append(hlsparse(self.options, self.http.request("get", self.url), self.url))
if re.search(".mpd", self.url):
streams.append(dashparse(self.options, self.http.request("get", self.url), self.url))
for stream in streams:
for n in list(stream.keys()):
yield stream[n]
| Python | 0 |
905c6d82c0b568788cd755cb5a98b0e24550f9a5 | test .to() method on particle collection | streams/nbody/tests/test_particles.py | streams/nbody/tests/test_particles.py | # coding: utf-8
""" """
from __future__ import absolute_import, unicode_literals, division, print_function
__author__ = "adrn <adrn@astro.columbia.edu>"
# Standard library
import os, sys
# Third-party
import astropy.units as u
import numpy as np
import pytest
from ...misc.units import UnitSystem
from ..particles import *
usys = UnitSystem(u.kpc, u.Myr, u.M_sun)
def test_particlecollection_init():
# Init with individual arrays of ndim=1
r = np.random.random(3)*u.kpc
v = np.random.random(3)*u.km/u.s
m = np.random.random()*u.M_sun
with pytest.raises(ValueError):
pc = ParticleCollection(r=r, v=v, m=m, unit_system=usys)
r = np.random.random(size=(10,3))*u.kpc
v = np.random.random(size=(10,3))*u.kpc/u.Myr
m = np.random.random(10)*u.M_sun
pc = ParticleCollection(r=r, v=v, m=m, unit_system=usys)
assert np.all(pc.r.value == r.value)
assert np.all(pc.v.value == v.value)
assert np.all(pc.m.value == m.value)
assert np.all(pc._r == r.value)
assert np.all(pc._v == v.value)
assert np.all(pc._m == m.value)
def test_acceleration():
r = np.array([[1.,0.],
[0, 1.],
[-1., 0.],
[0., -1.]])*u.kpc
v = np.zeros_like(r.value)*u.km/u.s
m = np.random.random()*u.M_sun
pc = ParticleCollection(r=r, v=v, m=m, unit_system=usys)
pc.acceleration_at(np.array([0.,0.])*u.kpc, m=1.*u.M_sun)
a = pc.acceleration_at(np.array([[0.5,0.5], [0.0,0.0], [-0.5, -0.5]])*u.kpc,
m=[1.,1.,1.]*u.M_sun)
def test_merge():
# test merging two particle collections
r = np.random.random(size=(10,3))*u.kpc
v = np.random.random(size=(10,3))*u.km/u.s
m = np.random.random(10)*u.M_sun
pc1 = ParticleCollection(r=r, v=v, m=m, unit_system=usys)
r = np.random.random(size=(10,3))*u.kpc
v = np.random.random(size=(10,3))*u.km/u.s
m = np.random.random(10)*u.M_sun
pc2 = ParticleCollection(r=r, v=v, m=m, unit_system=usys)
pc_merged = pc1.merge(pc2)
assert pc_merged._r.shape == (20,3)
def test_to():
r = np.random.random(size=(10,3))*u.kpc
v = np.random.random(size=(10,3))*u.kpc/u.Myr
m = np.random.random(10)*u.M_sun
pc = ParticleCollection(r=r, v=v, m=m, unit_system=usys)
usys2 = UnitSystem(u.km, u.s, u.kg)
pc2 = pc.to(usys2)
assert np.all(pc2._r == r.to(u.km).value)
assert np.all(pc2._v == v.to(u.km/u.s).value)
assert np.all(pc2._m == m.to(u.kg).value) | # coding: utf-8
""" """
from __future__ import absolute_import, unicode_literals, division, print_function
__author__ = "adrn <adrn@astro.columbia.edu>"
# Standard library
import os, sys
# Third-party
import astropy.units as u
import numpy as np
import pytest
from ...misc.units import UnitSystem
from ..particles import *
usys = UnitSystem(u.kpc, u.Myr, u.M_sun)
def test_particlecollection_init():
# Init with individual arrays of ndim=1
r = np.random.random(3)*u.kpc
v = np.random.random(3)*u.km/u.s
m = np.random.random()*u.M_sun
with pytest.raises(ValueError):
pc = ParticleCollection(r=r, v=v, m=m, unit_system=usys)
r = np.random.random(size=(10,3))*u.kpc
v = np.random.random(size=(10,3))*u.kpc/u.Myr
m = np.random.random(10)*u.M_sun
pc = ParticleCollection(r=r, v=v, m=m, unit_system=usys)
assert np.all(pc.r.value == r.value)
assert np.all(pc.v.value == v.value)
assert np.all(pc.m.value == m.value)
assert np.all(pc._r == r.value)
assert np.all(pc._v == v.value)
assert np.all(pc._m == m.value)
def test_acceleration():
r = np.array([[1.,0.],
[0, 1.],
[-1., 0.],
[0., -1.]])*u.kpc
v = np.zeros_like(r.value)*u.km/u.s
m = np.random.random()*u.M_sun
pc = ParticleCollection(r=r, v=v, m=m, unit_system=usys)
pc.acceleration_at(np.array([0.,0.])*u.kpc, m=1.*u.M_sun)
a = pc.acceleration_at(np.array([[0.5,0.5], [0.0,0.0], [-0.5, -0.5]])*u.kpc,
m=[1.,1.,1.]*u.M_sun)
def test_merge():
# test merging two particle collections
r = np.random.random(size=(10,3))*u.kpc
v = np.random.random(size=(10,3))*u.km/u.s
m = np.random.random(10)*u.M_sun
pc1 = ParticleCollection(r=r, v=v, m=m, unit_system=usys)
r = np.random.random(size=(10,3))*u.kpc
v = np.random.random(size=(10,3))*u.km/u.s
m = np.random.random(10)*u.M_sun
pc2 = ParticleCollection(r=r, v=v, m=m, unit_system=usys)
pc_merged = pc1.merge(pc2)
assert pc_merged._r.shape == (20,3) | Python | 0 |
6a2aa6051c7922d1b2b37824d92634a4880e9ff2 | Correct semantic version format. | tensorflow_probability/python/version.py | tensorflow_probability/python/version.py | # Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Define TensorFlow Probability version information."""
# We follow Semantic Versioning (https://semver.org/)
_MAJOR_VERSION = '0'
_MINOR_VERSION = '5'
_PATCH_VERSION = '0'
# When building releases, we can update this value on the release branch to
# reflect the current release candidate ('rc0', 'rc1') or, finally, the official
# stable release (indicated by `_VERSION_SUFFIX = ''`). Outside the context of a
# release branch, the current version is by default assumed to be a
# 'development' version, labeled 'dev'.
_VERSION_SUFFIX = 'dev'
# Example, '0.4.0-dev'
__version__ = '.'.join([
_MAJOR_VERSION,
_MINOR_VERSION,
_PATCH_VERSION,
])
if _VERSION_SUFFIX:
__version__ = '{}-{}'.format(__version__, _VERSION_SUFFIX)
| # Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Define TensorFlow Probability version information."""
# We follow Semantic Versioning (https://semver.org/)
_MAJOR_VERSION = '0'
_MINOR_VERSION = '5'
_PATCH_VERSION = '0'
# When building releases, we can update this value on the release branch to
# reflect the current release candidate ('rc0', 'rc1') or, finally, the official
# stable release (indicated by `_VERSION_SUFFIX = ''`). Outside the context of a
# release branch, the current version is by default assumed to be a
# 'development' version, labeled 'dev'.
_VERSION_SUFFIX = 'dev'
# Example, '0.4.0.dev'
__version__ = '.'.join(s for s in [
_MAJOR_VERSION,
_MINOR_VERSION,
_PATCH_VERSION,
_VERSION_SUFFIX,
] if s) # Prevent trailing dot when version suffix is empty.
| Python | 0.00053 |
82665b999fb07e3ebc41de8132ba9d22dc04140c | Change version number back to 0.8.0.dev | neo/version.py | neo/version.py | # -*- coding: utf-8 -*-
version = '0.8.0.dev'
| # -*- coding: utf-8 -*-
version = '0.7.1'
| Python | 0.000001 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.