commit stringlengths 40 40 | subject stringlengths 1 1.49k | old_file stringlengths 4 311 | new_file stringlengths 4 311 | new_contents stringlengths 1 29.8k | old_contents stringlengths 0 9.9k | lang stringclasses 3 values | proba float64 0 1 |
|---|---|---|---|---|---|---|---|
3b77fbb82d2ba098f00f7221070f9610d0d90809 | add unknown person | game.py | game.py | import random
from adventurelib import Item, Bag, when, start
import rooms
import characters
from sys import exit
people = '123456'
rooms = 'abcdef'
# murder configuration
# who was where
# who is the murderer
# current configuration
# who was where
# player location
murder_config_people = list(people)
random.shuffle(murder_config_people)
murder_location = random.choice(rooms)
murderer = people[rooms.find(murder_location)]
current_config_people = list(people)
random.shuffle(current_config_people)
current_location = random.choice(rooms)
@when('where am i')
def my_room():
print("I am in: ", current_location)
@when('go to ROOM')
@when('go to the ROOM')
def to_room(room):
if room in rooms:
print("I am now in %s" % room)
global current_location
current_location = room
else:
print("I can't find the %s" % room)
@when('it was M')
def accuse(m):
if m == murderer:
print ("Yes, %s is the murderer!" % m)
exit
else:
if m in people:
print ("%s said: 'How could you!'" % m)
else:
print ("No one has ever heard of '%s'!" % m)
start()
| import random
from adventurelib import Item, Bag, when, start
import rooms
import characters
from sys import exit
people = '123456'
rooms = 'abcdef'
# murder configuration
# who was where
# who is the murderer
# current configuration
# who was where
# player location
murder_config_people = list(people)
random.shuffle(murder_config_people)
murder_location = random.choice(rooms)
murderer = people[rooms.find(murder_location)]
current_config_people = list(people)
random.shuffle(current_config_people)
current_location = random.choice(rooms)
@when('where am i')
def my_room():
print("I am in: " , current_location)
@when('go to ROOM')
@when('go to the ROOM')
def to_room(room):
if room in rooms:
print("I am now in %s" % room)
global current_location
current_location = room
else:
print("I can't find the %s" % room)
@when('it was M')
def accuse(m):
if m == murderer:
print ("Yes, %s is the murderer!" % m)
exit
else:
print ("%s said: 'How could you!'" % m)
start() | Python | 0.999999 |
55ff20aa2d2504fb85fa2f63cc9b52934245b849 | make the subscription also work for new minions, fixes #8 | saltobserver/redis_stream.py | saltobserver/redis_stream.py | from saltobserver import app, redis_pool
import gevent
from redis import Redis
from distutils.version import StrictVersion
import json
import time
class RedisStream(object):
def __init__(self):
self.redis = Redis(connection_pool=redis_pool)
actual_version = StrictVersion(self.redis.info()['redis_version'])
minimum_version = StrictVersion("2.8.0")
if actual_version < minimum_version:
raise NotImplementedError
self.redis.config_set('notify-keyspace-events', 'Ks')
self.pubsub = self.redis.pubsub()
self.pubsub.psubscribe("__keyspace@0__:*:*.*")
# TODO: make redis db number (this ^) configurable
self.clients = list()
def _generator(self):
for message in self.pubsub.listen():
if message['type'] == 'pmessage':
app.logger.debug("Message received from Redis, building data packet.")
minion_id = message['channel'].split(':')[1]
function = message['channel'].split(':')[2]
jid = self.redis.lindex('{0}:{1}'.format(minion_id, function), 0)
success = True if json.loads(self.redis.get('{0}:{1}'.format(minion_id, jid))).get('retcode') == 0 else False
try:
timestamp = time.strptime(jid, "%Y%m%d%H%M%S%f")
except ValueError:
continue # do not pass info with faked jid's
yield dict(minion_id=minion_id, function=function, jid=jid, success=success, time=time.strftime('%Y-%m-%d, at %H:%M:%S', timestamp))
def register(self, client, function):
self.clients.append((client, function))
app.logger.debug("Client %s (function %s) registered." % (client, function))
def send_or_discard_connection(self, client_tupl, data):
client, function = client_tupl
try:
client.send(json.dumps(data))
app.logger.debug("Data for jid %s sent to %s (function %s)" % (data['jid'], client, function))
except Exception as e: # TODO: this is either a ValueError from json, or some other exception from gevents websocket stuff
self.clients.remove(client_tupl)
app.logger.debug("%s (function %s) removed with reason: %s" % (client, function, e))
def run(self):
for data in self._generator():
sent = 0
for client, function in self.clients:
if data['function'] == function:
gevent.spawn(self.send_or_discard_connection, (client, function), data)
sent = sent + 1
app.logger.debug("Attempted to send data packet sent to %s of %s clients." % (sent, len(self.clients)))
def start(self):
gevent.spawn(self.run)
| from saltobserver import app, redis_pool
import gevent
from redis import Redis
from distutils.version import StrictVersion
import json
import time
class RedisStream(object):
def __init__(self):
self.redis = Redis(connection_pool=redis_pool)
actual_version = StrictVersion(self.redis.info()['redis_version'])
minimum_version = StrictVersion("2.8.0")
if actual_version < minimum_version:
raise NotImplementedError
self.redis.config_set('notify-keyspace-events', 'Ks')
self.pubsub = self.redis.pubsub()
# TODO: update subscription on newcomer minions
self.pubsub.psubscribe(["__keyspace@0__:{0}:*.*".format(minion) for minion in self.redis.smembers('minions')])
self.clients = list()
def _generator(self):
for message in self.pubsub.listen():
if message['type'] == 'pmessage':
app.logger.debug("Message received from Redis, building data packet.")
minion_id = message['channel'].split(':')[1]
function = message['channel'].split(':')[2]
jid = self.redis.lindex('{0}:{1}'.format(minion_id, function), 0)
success = True if json.loads(self.redis.get('{0}:{1}'.format(minion_id, jid))).get('retcode') == 0 else False
try:
timestamp = time.strptime(jid, "%Y%m%d%H%M%S%f")
except ValueError:
continue # do not pass info with faked jid's
yield dict(minion_id=minion_id, function=function, jid=jid, success=success, time=time.strftime('%Y-%m-%d, at %H:%M:%S', timestamp))
def register(self, client, function):
self.clients.append((client, function))
app.logger.debug("Client %s (function %s) registered." % (client, function))
def send_or_discard_connection(self, client_tupl, data):
client, function = client_tupl
try:
client.send(json.dumps(data))
app.logger.debug("Data for jid %s sent to %s (function %s)" % (data['jid'], client, function))
except Exception as e: # TODO: this is either a ValueError from json, or some other exception from gevents websocket stuff
self.clients.remove(client_tupl)
app.logger.debug("%s (function %s) removed with reason: %s" % (client, function, e))
def run(self):
for data in self._generator():
sent = 0
for client, function in self.clients:
if data['function'] == function:
gevent.spawn(self.send_or_discard_connection, (client, function), data)
sent = sent + 1
app.logger.debug("Attempted to send data packet sent to %s of %s clients." % (sent, len(self.clients)))
def start(self):
gevent.spawn(self.run)
| Python | 0 |
144a35d639ccd3a60f100793df00fd62aa81766b | document no trust algo | game.py | game.py | """
Play with trust:
for player in game:
if current player:
send move
else:
listen for move
receive move
decide winner
Play trusting no one:
Swap hashes:
for player in game:
if current player:
send hasher(move + salt)
else:
listen for hash
receive hash
Swap salts:
for player in game:
if current player:
send move + salt
else:
listen for move + salt
receive move + salt
verify hasher(move + salt) == hash
decide winner
"""
| """
For player in game:
if current player:
send move
else:
listen for move
receive move
decide winner
"""
| Python | 0 |
866e0ec72163debd9f46b1ecb8e4d07b040694b4 | Fix absolute import | sand/cytoscape/themes/ops.py | sand/cytoscape/themes/ops.py | from . import colors as c
from . import label_positions as p
settings = {
# node style
'NODE_TRANSPARENCY': 255,
'NODE_SIZE': 25,
'NODE_BORDER_WIDTH': 4,
'NODE_BORDER_PAINT': c.BRIGHT_GREEN,
'NODE_FILL_COLOR': c.DARK_GREEN,
'NODE_SELECTED_PAINT': c.BRIGHT_YELLOW,
# node label style
'NODE_LABEL_COLOR': c.BRIGHT_GRAY,
'NODE_LABEL_FONT_SIZE': 16,
'NODE_LABEL_POSITION': p.LOWER_RIGHT,
# edge style
'EDGE_TRANSPARENCY': 255,
'EDGE_WIDTH': 2.5,
'EDGE_LINE_TYPE': 'SOLID',
'EDGE_STROKE_SELECTED_PAINT': c.BRIGHT_YELLOW,
'EDGE_STROKE_UNSELECTED_PAINT': c.BRIGHT_GRAY,
'EDGE_TARGET_ARROW_UNSELECTED_PAINT': c.BRIGHT_GRAY,
'EDGE_TARGET_ARROW_SHAPE': 'DELTA',
# network style
'NETWORK_BACKGROUND_PAINT': c.DARK_GRAY
}
| import sand.cytoscape.themes.colors as c
import sand.cytoscape.themes.label_positions as p
settings = {
# node style
'NODE_TRANSPARENCY': 255,
'NODE_SIZE': 25,
'NODE_BORDER_WIDTH': 4,
'NODE_BORDER_PAINT': c.BRIGHT_GREEN,
'NODE_FILL_COLOR': c.DARK_GREEN,
'NODE_SELECTED_PAINT': c.BRIGHT_YELLOW,
# node label style
'NODE_LABEL_COLOR': c.BRIGHT_GRAY,
'NODE_LABEL_FONT_SIZE': 16,
'NODE_LABEL_POSITION': p.LOWER_RIGHT,
# edge style
'EDGE_TRANSPARENCY': 255,
'EDGE_WIDTH': 2.5,
'EDGE_LINE_TYPE': 'SOLID',
'EDGE_STROKE_SELECTED_PAINT': c.BRIGHT_YELLOW,
'EDGE_STROKE_UNSELECTED_PAINT': c.BRIGHT_GRAY,
'EDGE_TARGET_ARROW_UNSELECTED_PAINT': c.BRIGHT_GRAY,
'EDGE_TARGET_ARROW_SHAPE': 'DELTA',
# network style
'NETWORK_BACKGROUND_PAINT': c.DARK_GRAY
}
| Python | 0.000173 |
75635315598ccbcad887bf77f7cdc99772157033 | Add construct_data function to construct data for the API | gist.py | gist.py | import os
import sys
from parser import parser
args = parser.parse_args()
def process_files(args):
"""
:param args:
The arguments parsed by argparse
:returns:
A dict containing file_names as keys and a
dict containing a key `content` as the value
Example return:
{
"file_name": {
"content": {
# file contents
}
}
}
"""
files = [os.path.abspath(file) for file in args.files]
file_contents = {}
for file in files:
try:
f = open(file)
file_contents[os.path.split(file)[1]] = f.read()
f.close()
except FileNotFoundError:
print('File "{}"\n\tdoes not exist'.format(file))
should_create = input('Create the gist without this file [Y/n]: ') or 'Y'
if not should_create == 'Y':
sys.exit("gist: exiting ...")
return file_contents
def create_gist(data):
"""
:param data:
The JSON data to be posted to the API
:returns:
request object of the POST request made to create the gist
"""
end_point = 'https://api.github.com/gists'
rq = requests.post(end_point, json=data)
return rq
def construct_data(args):
"""
:param args:
The arguments parsed by argparse
:returns:
`data` dict to be passed to crete the POST request
"""
data = {
"public": args.secret,
"description": args.description,
"files": process_files(args)
}
return data
| import os
import sys
from parser import parser
args = parser.parse_args()
def process_files(args):
"""
:param args:
The arguments parsed by argparse
:returns:
A dict containing file_names as keys and a
dict containing a key `content` as the value
Example return:
{
"file_name": {
"content": {
# file contents
}
}
}
"""
files = [os.path.abspath(file) for file in args.files]
file_contents = {}
for file in files:
try:
f = open(file)
file_contents[os.path.split(file)[1]] = f.read()
f.close()
except FileNotFoundError:
print('File "{}"\n\tdoes not exist'.format(file))
should_create = input('Create the gist without this file [Y/n]: ') or 'Y'
if not should_create == 'Y':
sys.exit("gist: exiting ...")
return file_contents
def create_gist(data):
"""
:param data:
The JSON data to be posted to the API
:returns:
request object of the POST request made to create the gist
"""
end_point = 'https://api.github.com/gists'
rq = requests.post(end_point, json=data)
return rq
| Python | 0 |
8db806d30d7591828528ac937e8f3b334e957ed3 | remove shim should by symmetric to add_shim | _distutils_hack/__init__.py | _distutils_hack/__init__.py | import sys
import os
import re
import importlib
import warnings
is_pypy = '__pypy__' in sys.builtin_module_names
def enabled():
"""
Allow selection of distutils by environment variable.
"""
which = os.environ.get('SETUPTOOLS_USE_DISTUTILS', 'stdlib')
return which == 'local'
def warn_distutils_present():
if 'distutils' not in sys.modules:
return
if is_pypy and sys.version_info < (3, 7):
# PyPy for 3.6 unconditionally imports distutils, so bypass the warning
# https://foss.heptapod.net/pypy/pypy/-/blob/be829135bc0d758997b3566062999ee8b23872b4/lib-python/3/site.py#L250
return
warnings.warn(
"Distutils was imported before Setuptools. This usage is discouraged "
"and may exhibit undesirable behaviors or errors. Please use "
"Setuptools' objects directly or at least import Setuptools first.")
def clear_distutils():
if 'distutils' not in sys.modules:
return
warnings.warn("Setuptools is replacing distutils.")
mods = [name for name in sys.modules if re.match(r'distutils\b', name)]
for name in mods:
del sys.modules[name]
def ensure_local_distutils():
clear_distutils()
distutils = importlib.import_module('setuptools._distutils')
distutils.__name__ = 'distutils'
sys.modules['distutils'] = distutils
# sanity check that submodules load as expected
core = importlib.import_module('distutils.core')
assert '_distutils' in core.__file__, core.__file__
def do_override():
"""
Ensure that the local copy of distutils is preferred over stdlib.
See https://github.com/pypa/setuptools/issues/417#issuecomment-392298401
for more motivation.
"""
warn_distutils_present()
if enabled():
ensure_local_distutils()
class DistutilsMetaFinder:
def find_spec(self, fullname, path, target=None):
if path is not None or fullname != "distutils":
return None
return self.get_distutils_spec()
def get_distutils_spec(self):
import importlib.util
class DistutilsLoader(importlib.util.abc.Loader):
def create_module(self, spec):
return importlib.import_module('._distutils', 'setuptools')
def exec_module(self, module):
pass
return importlib.util.spec_from_loader('distutils', DistutilsLoader())
DISTUTILS_FINDER = DistutilsMetaFinder()
def add_shim():
sys.meta_path.insert(0, DISTUTILS_FINDER)
def remove_shim():
try:
sys.meta_path.remove(DISTUTILS_FINDER)
except ValueError:
pass
| import sys
import os
import re
import importlib
import warnings
is_pypy = '__pypy__' in sys.builtin_module_names
def enabled():
"""
Allow selection of distutils by environment variable.
"""
which = os.environ.get('SETUPTOOLS_USE_DISTUTILS', 'stdlib')
return which == 'local'
def warn_distutils_present():
if 'distutils' not in sys.modules:
return
if is_pypy and sys.version_info < (3, 7):
# PyPy for 3.6 unconditionally imports distutils, so bypass the warning
# https://foss.heptapod.net/pypy/pypy/-/blob/be829135bc0d758997b3566062999ee8b23872b4/lib-python/3/site.py#L250
return
warnings.warn(
"Distutils was imported before Setuptools. This usage is discouraged "
"and may exhibit undesirable behaviors or errors. Please use "
"Setuptools' objects directly or at least import Setuptools first.")
def clear_distutils():
if 'distutils' not in sys.modules:
return
warnings.warn("Setuptools is replacing distutils.")
mods = [name for name in sys.modules if re.match(r'distutils\b', name)]
for name in mods:
del sys.modules[name]
def ensure_local_distutils():
clear_distutils()
distutils = importlib.import_module('setuptools._distutils')
distutils.__name__ = 'distutils'
sys.modules['distutils'] = distutils
# sanity check that submodules load as expected
core = importlib.import_module('distutils.core')
assert '_distutils' in core.__file__, core.__file__
def do_override():
"""
Ensure that the local copy of distutils is preferred over stdlib.
See https://github.com/pypa/setuptools/issues/417#issuecomment-392298401
for more motivation.
"""
warn_distutils_present()
if enabled():
ensure_local_distutils()
class DistutilsMetaFinder:
def find_spec(self, fullname, path, target=None):
if path is not None or fullname != "distutils":
return None
return self.get_distutils_spec()
def get_distutils_spec(self):
import importlib.util
class DistutilsLoader(importlib.util.abc.Loader):
def create_module(self, spec):
return importlib.import_module('._distutils', 'setuptools')
def exec_module(self, module):
pass
return importlib.util.spec_from_loader('distutils', DistutilsLoader())
DISTUTILS_FINDER = DistutilsMetaFinder()
def add_shim():
sys.meta_path.insert(0, DISTUTILS_FINDER)
def remove_shim():
try:
sys.path.remove(DISTUTILS_FINDER)
except ValueError:
pass
| Python | 0.000001 |
37ab58016e69993b5ab1d63c99d9afcf54bd95af | Implement more TGT Neutral Epics | fireplace/cards/tgt/neutral_epic.py | fireplace/cards/tgt/neutral_epic.py | from ..utils import *
##
# Minions
# Twilight Guardian
class AT_017:
play = HOLDING_DRAGON & Buff(SELF, "AT_017e")
# Sideshow Spelleater
class AT_098:
play = Summon(CONTROLLER, Copy(ENEMY_HERO_POWER))
# Kodorider
class AT_099:
inspire = Summon(CONTROLLER, "AT_099t")
# Master of Ceremonies
class AT_117:
play = Find(FRIENDLY_MINIONS + SPELLPOWER) & Buff(SELF, "AT_117e")
# Frost Giant
class AT_120:
cost = lambda self, i: i - self.controller.times_hero_power_used_this_game
# Crowd Favorite
class AT_121:
events = Play(CONTROLLER, BATTLECRY).on(Buff(SELF, "AT_121e"))
| from ..utils import *
##
# Minions
# Kodorider
class AT_099:
inspire = Summon(CONTROLLER, "AT_099t")
| Python | 0.000006 |
dfe1213ba9de5e5e5aaf9690a2cf5e3b295869fa | Remove Python 3 incompatible print statement | examples/graph/degree_sequence.py | examples/graph/degree_sequence.py | #!/usr/bin/env python
"""
Random graph from given degree sequence.
"""
__author__ = """Aric Hagberg (hagberg@lanl.gov)"""
__date__ = "$Date: 2004-11-03 08:11:09 -0700 (Wed, 03 Nov 2004) $"
__credits__ = """"""
__revision__ = "$Revision: 503 $"
# Copyright (C) 2004 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
from networkx import *
z=[5,3,3,3,3,2,2,2,1,1,1]
print(is_valid_degree_sequence(z))
print("Configuration model")
G=configuration_model(z) # configuration model
degree_sequence=list(degree(G).values()) # degree sequence
print("Degree sequence %s" % degree_sequence)
print("Degree histogram")
hist={}
for d in degree_sequence:
if d in hist:
hist[d]+=1
else:
hist[d]=1
print("degree #nodes")
for d in hist:
print('%d %d' % (d,hist[d]))
| #!/usr/bin/env python
"""
Random graph from given degree sequence.
"""
__author__ = """Aric Hagberg (hagberg@lanl.gov)"""
__date__ = "$Date: 2004-11-03 08:11:09 -0700 (Wed, 03 Nov 2004) $"
__credits__ = """"""
__revision__ = "$Revision: 503 $"
# Copyright (C) 2004 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
from networkx import *
z=[5,3,3,3,3,2,2,2,1,1,1]
print is_valid_degree_sequence(z)
print("Configuration model")
G=configuration_model(z) # configuration model
degree_sequence=list(degree(G).values()) # degree sequence
print("Degree sequence %s" % degree_sequence)
print("Degree histogram")
hist={}
for d in degree_sequence:
if d in hist:
hist[d]+=1
else:
hist[d]=1
print("degree #nodes")
for d in hist:
print('%d %d' % (d,hist[d]))
| Python | 0.998568 |
fcb3d026faf4648bbacc73f84e0e6dd6a25eeb6d | delete plotting function | code/lamost/li_giants/residuals.py | code/lamost/li_giants/residuals.py | """ Calculate residuals """
import numpy as np
import matplotlib.pyplot as plt
from math import log10, floor
from matplotlib import rc
import matplotlib.gridspec as gridspec
from matplotlib.colors import LogNorm
plt.rc('text', usetex=True)
from matplotlib.ticker import MaxNLocator
import sys
sys.path.insert(0, '/home/annaho/TheCannon')
from TheCannon import model
from TheCannon import dataset
def get_residuals(ds, m):
""" Using the dataset and model object, calculate the residuals and return
Parameters
----------
ds: dataset object
m: model object
Return
------
residuals: array of residuals, spec minus model spec
"""
m.infer_spectra(ds)
resid = ds.test_flux - m.model_spectra
return resid
def load_model():
""" Load the model
Parameters
----------
direc: directory with all of the model files
Returns
-------
m: model object
"""
direc = "/home/annaho/TheCannon/code/lamost/mass_age/cn"
m = model.CannonModel(2)
m.coeffs = np.load(direc + "/coeffs.npz")['arr_0']
m.scatters = np.load(direc + "/scatters.npz")['arr_0']
m.chisqs = np.load(direc + "/chisqs.npz")['arr_0']
m.pivots = np.load(direc + "/pivots.npz")['arr_0']
return m
def load_dataset(date):
""" Load the dataset for a single date
Parameters
----------
date: the date (string) for which to load the data & dataset
Returns
-------
ds: the dataset object
"""
DATA_DIR = "/home/annaho/TheCannon/data/lamost"
WL_DIR = "/home/annaho/TheCannon/code/lamost/mass_age/cn"
wl = np.load(WL_DIR + "/wl_cols.npz")['arr_0']
ds = dataset.Dataset(wl, [], [], [], [], [], [], [])
test_label = np.load("%s/%s_all_cannon_labels.npz" %(DATA_DIR,date))['arr_0']
ds.test_label_vals = test_label
ds.test_flux = np.load("%s/%s_test_flux.npz" %(DATA_DIR,date))['arr_0']
ds.test_ivar = np.load("%s/%s_test_ivar.npz" %(DATA_DIR,date))['arr_0']
return ds
def run_all_data():
""" Load the data that we're using to search for Li-rich giants.
Store it in dataset and model objects. """
DATA_DIR = "/home/annaho/TheCannon/code/apogee_lamost/xcalib_4labels"
dates = os.listdir("/home/share/LAMOST/DR2/DR2_release")
dates = np.array(dates)
dates = np.delete(dates, np.where(dates=='.directory')[0][0])
dates = np.delete(dates, np.where(dates=='all_folders.list')[0][0])
dates = np.delete(dates, np.where(dates=='dr2.lis')[0][0])
for date in dates:
print ("loading data for %s" %date)
load_date(date)
if __name__=="__main__":
# load a spectrum
ds = load_dataset("20121006")
m = load_model()
resid = get_residuals(ds, m)
| """ Calculate residuals """
import numpy as np
import matplotlib.pyplot as plt
from math import log10, floor
from matplotlib import rc
import matplotlib.gridspec as gridspec
from matplotlib.colors import LogNorm
plt.rc('text', usetex=True)
from matplotlib.ticker import MaxNLocator
import sys
sys.path.insert(0, '/home/annaho/TheCannon')
from TheCannon import model
from TheCannon import dataset
def get_residuals(ds, m):
""" Using the dataset and model object, calculate the residuals and return
Parameters
----------
ds: dataset object
m: model object
Return
------
residuals: array of residuals, spec minus model spec
"""
m.infer_spectra(ds)
resid = ds.test_flux - m.model_spectra
return resid
def load_model():
""" Load the model
Parameters
----------
direc: directory with all of the model files
Returns
-------
m: model object
"""
direc = "/home/annaho/TheCannon/code/lamost/mass_age/cn"
m = model.CannonModel(2)
m.coeffs = np.load(direc + "/coeffs.npz")['arr_0']
m.scatters = np.load(direc + "/scatters.npz")['arr_0']
m.chisqs = np.load(direc + "/chisqs.npz")['arr_0']
m.pivots = np.load(direc + "/pivots.npz")['arr_0']
return m
def load_dataset(date):
""" Load the dataset for a single date
Parameters
----------
date: the date (string) for which to load the data & dataset
Returns
-------
ds: the dataset object
"""
DATA_DIR = "/home/annaho/TheCannon/data/lamost"
WL_DIR = "/home/annaho/TheCannon/code/lamost/mass_age/cn"
wl = np.load(WL_DIR + "/wl_cols.npz")['arr_0']
ds = dataset.Dataset(wl, [], [], [], [], [], [], [])
test_label = np.load("%s/%s_all_cannon_labels.npz" %(DATA_DIR,date))['arr_0']
ds.test_label_vals = test_label
ds.test_flux = np.load("%s/%s_test_flux.npz" %(DATA_DIR,date))['arr_0']
ds.test_ivar = np.load("%s/%s_test_ivar.npz" %(DATA_DIR,date))['arr_0']
return ds
def plot_fit():
plt.plot(wl, resid[ii])
plt.xlim(6400,7000)
plt.ylim(-0.1,0.1)
plt.axvline(x=6707.8, c='r', linestyle='--', linewidth=2)
plt.axvline(x=6103, c='r', linestyle='--', linewidth=2)
plt.show()
plt.savefig("resid_%s.png" %ii)
plt.close()
def run_all_data():
""" Load the data that we're using to search for Li-rich giants.
Store it in dataset and model objects. """
DATA_DIR = "/home/annaho/TheCannon/code/apogee_lamost/xcalib_4labels"
dates = os.listdir("/home/share/LAMOST/DR2/DR2_release")
dates = np.array(dates)
dates = np.delete(dates, np.where(dates=='.directory')[0][0])
dates = np.delete(dates, np.where(dates=='all_folders.list')[0][0])
dates = np.delete(dates, np.where(dates=='dr2.lis')[0][0])
for date in dates:
print ("loading data for %s" %date)
load_date(date)
if __name__=="__main__":
# load a spectrum
ds = load_dataset("20121006")
#m = load_model()
#print(ds.test_flux.shape)
#print(m.coeffs.shape)
#resid = get_residuals(ds, m)
| Python | 0.000001 |
72e30b3b881418d40dd0446842176fc5c4468802 | Add name url converter | flask_roots/routing.py | flask_roots/routing.py | from werkzeug.routing import BaseConverter
class RegexConverter(BaseConverter):
def __init__(self, url_map, *items):
super(RegexConverter, self).__init__(url_map)
self.regex = items[0]
def strip_accents(s):
s = unicode(s)
return ''.join((c for c in unicodedata.normalize('NFD', s) if unicodedata.category(c) != 'Mn'))
def _urlify_name(name):
"""Converts a name or title into something we can put into a URI.
This is designed to only be for one way usage (ie. we can't use the
urlified names to figure out what photo or photoset we are talking about).
"""
return re.sub(r'\W+', '-', name).strip('-') or 'Untitled'
def urlify_name(name):
return _urlify_name(strip_accents(name).encode('ascii', 'ignore'))
class NameConverter(BaseConverter):
def to_python(self, value):
return value
def to_url(self, value):
if not isinstance(value, str) and hasattr(value, 'name'):
value = value.name
return urlify_name(str(value)).lower()
def setup_routing(app):
app.url_map.converters['re'] = RegexConverter
app.url_map.converters['name'] = NameConverter
| from werkzeug.routing import BaseConverter
class RegexConverter(BaseConverter):
def __init__(self, url_map, *items):
super(RegexConverter, self).__init__(url_map)
self.regex = items[0]
def setup_routing(app):
app.url_map.converters['re'] = RegexConverter
| Python | 0.000001 |
42463351a598d45f2738c894e00d0eceec308f9c | Add docstring | aegea/billing.py | aegea/billing.py | """
View detailed billing reports.
Detailed billing reports can be configured at https://console.aws.amazon.com/billing/home#/preferences.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import os, sys, json, zipfile, csv, io
from io import BytesIO, TextIOWrapper
from datetime import datetime
import boto3, requests
from botocore.exceptions import ClientError
from . import register_parser
from .util.printing import format_table, page_output, get_field, get_cell, tabulate
from .util.aws import ARN
def filter_line_items(args):
def filter_fn(item):
if args.min_cost and float(item["Cost"]) < args.min_cost:
return False
return True
return filter_fn
def billing(args):
s3 = boto3.resource("s3")
iam = boto3.resource("iam")
account_id = ARN(iam.CurrentUser().user.arn).account_id
args.detailed_billing_reports_bucket = args.detailed_billing_reports_bucket.format(account_id=account_id)
now = datetime.utcnow()
report = "{account_id}-aws-billing-detailed-line-items-with-resources-and-tags-{year}-{month}.csv.zip"
report = report.format(account_id=account_id, year=args.year or now.year, month="%02d" % (args.month or now.month))
try:
billing_object = s3.Bucket(args.detailed_billing_reports_bucket).Object(report)
billing_object_body = billing_object.get()["Body"]
except ClientError as e:
console_url = "https://console.aws.amazon.com/billing/home#/preferences"
msg = "Can't get detailed billing report {} from bucket {} in account {}: {}. Go to {} to set up detailed billing."
sys.exit(msg.format(report, args.detailed_billing_reports_bucket, account_id, e, console_url))
zbuf = BytesIO(billing_object_body.read())
with zipfile.ZipFile(zbuf) as zfile:
with TextIOWrapper(zfile.open(report.rstrip(".zip"))) as fh:
reader = csv.DictReader(fh)
page_output(tabulate(filter(filter_line_items(args), reader), args))
parser = register_parser(billing, help='List contents of AWS detailed billing reports', description=__doc__)
parser.add_argument("--columns", nargs="+")
#parser.add_argument("--sort-by")
parser.add_argument("--year", type=int, help="Year to get billing reports for. Defaults to current year")
parser.add_argument("--month", type=int, help="Month (numeral) to get billing reports for. Defaults to current month")
parser.add_argument("--detailed-billing-reports-bucket", help="Name of S3 bucket to retrieve detailed billing reports from")
parser.add_argument("--min-cost", type=float, help="Omit billing line items below this cost")
| """
View detailed billing reports.
Detailed billing reports can be configured at https://console.aws.amazon.com/billing/home#/preferences.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import os, sys, json, zipfile, csv, io
from io import BytesIO, TextIOWrapper
from datetime import datetime
import boto3, requests
from botocore.exceptions import ClientError
from . import register_parser
from .util.printing import format_table, page_output, get_field, get_cell, tabulate
from .util.aws import ARN
def filter_line_items(args):
def filter_fn(item):
if args.min_cost and float(item["Cost"]) < args.min_cost:
return False
return True
return filter_fn
def billing(args):
s3 = boto3.resource("s3")
iam = boto3.resource("iam")
account_id = ARN(iam.CurrentUser().user.arn).account_id
args.detailed_billing_reports_bucket = args.detailed_billing_reports_bucket.format(account_id=account_id)
now = datetime.utcnow()
report = "{account_id}-aws-billing-detailed-line-items-with-resources-and-tags-{year}-{month}.csv.zip"
report = report.format(account_id=account_id, year=args.year or now.year, month="%02d" % (args.month or now.month))
try:
billing_object = s3.Bucket(args.detailed_billing_reports_bucket).Object(report)
billing_object_body = billing_object.get()["Body"]
except ClientError as e:
console_url = "https://console.aws.amazon.com/billing/home#/preferences"
msg = "Can't get detailed billing report {} from bucket {} in account {}: {}. Go to {} to set up detailed billing."
sys.exit(msg.format(report, args.detailed_billing_reports_bucket, account_id, e, console_url))
zbuf = BytesIO(billing_object_body.read())
with zipfile.ZipFile(zbuf) as zfile:
with TextIOWrapper(zfile.open(report.rstrip(".zip"))) as fh:
reader = csv.DictReader(fh)
page_output(tabulate(filter(filter_line_items(args), reader), args))
parser = register_parser(billing, help='List contents of AWS detailed billing reports')
parser.add_argument("--columns", nargs="+")
#parser.add_argument("--sort-by")
parser.add_argument("--year", type=int, help="Year to get billing reports for. Defaults to current year")
parser.add_argument("--month", type=int, help="Month (numeral) to get billing reports for. Defaults to current month")
parser.add_argument("--detailed-billing-reports-bucket", help="Name of S3 bucket to retrieve detailed billing reports from")
parser.add_argument("--min-cost", type=float, help="Omit billing line items below this cost")
| Python | 0.000005 |
19025b97d38706eda4f425667b69f7803a39ca35 | add tinyint as a bool type | flask_admin/contrib/sqla/filters.py | flask_admin/contrib/sqla/filters.py | import warnings
from flask.ext.admin.babel import lazy_gettext
from flask.ext.admin.model import filters
from flask.ext.admin.contrib.sqla import tools
class BaseSQLAFilter(filters.BaseFilter):
"""
Base SQLAlchemy filter.
"""
def __init__(self, column, name, options=None, data_type=None):
"""
Constructor.
:param column:
Model field
:param name:
Display name
:param options:
Fixed set of options
:param data_type:
Client data type
"""
super(BaseSQLAFilter, self).__init__(name, options, data_type)
self.column = column
# Common filters
class FilterEqual(BaseSQLAFilter):
def apply(self, query, value):
return query.filter(self.column == value)
def operation(self):
return lazy_gettext('equals')
class FilterNotEqual(BaseSQLAFilter):
def apply(self, query, value):
return query.filter(self.column != value)
def operation(self):
return lazy_gettext('not equal')
class FilterLike(BaseSQLAFilter):
def apply(self, query, value):
stmt = tools.parse_like_term(value)
return query.filter(self.column.ilike(stmt))
def operation(self):
return lazy_gettext('contains')
class FilterNotLike(BaseSQLAFilter):
def apply(self, query, value):
stmt = tools.parse_like_term(value)
return query.filter(~self.column.ilike(stmt))
def operation(self):
return lazy_gettext('not contains')
class FilterGreater(BaseSQLAFilter):
def apply(self, query, value):
return query.filter(self.column > value)
def operation(self):
return lazy_gettext('greater than')
class FilterSmaller(BaseSQLAFilter):
def apply(self, query, value):
return query.filter(self.column < value)
def operation(self):
return lazy_gettext('smaller than')
# Customized type filters
class BooleanEqualFilter(FilterEqual, filters.BaseBooleanFilter):
pass
class BooleanNotEqualFilter(FilterNotEqual, filters.BaseBooleanFilter):
pass
# Base SQLA filter field converter
class FilterConverter(filters.BaseFilterConverter):
strings = (FilterEqual, FilterNotEqual, FilterLike, FilterNotLike)
numeric = (FilterEqual, FilterNotEqual, FilterGreater, FilterSmaller)
bool = (BooleanEqualFilter, BooleanNotEqualFilter)
enum = (FilterEqual, FilterNotEqual)
def convert(self, type_name, column, name, **kwargs):
if type_name.lower() in self.converters:
return self.converters[type_name.lower()](column, name, **kwargs)
return None
@filters.convert('string', 'unicode', 'text', 'unicodetext', 'varchar')
def conv_string(self, column, name, **kwargs):
return [f(column, name, **kwargs) for f in self.strings]
@filters.convert('boolean', 'tinyint')
def conv_bool(self, column, name, **kwargs):
return [f(column, name, **kwargs) for f in self.bool]
@filters.convert('integer', 'smallinteger', 'numeric', 'float', 'biginteger')
def conv_int(self, column, name, **kwargs):
return [f(column, name, **kwargs) for f in self.numeric]
@filters.convert('date')
def conv_date(self, column, name, **kwargs):
return [f(column, name, data_type='datepicker', **kwargs) for f in self.numeric]
@filters.convert('datetime')
def conv_datetime(self, column, name, **kwargs):
return [f(column, name, data_type='datetimepicker', **kwargs) for f in self.numeric]
@filters.convert('enum')
def conv_enum(self, column, name, options=None, **kwargs):
if not options:
options = [
(v, v)
for v in column.type.enums
]
return [f(column, name, options, **kwargs) for f in self.enum]
| import warnings
from flask.ext.admin.babel import lazy_gettext
from flask.ext.admin.model import filters
from flask.ext.admin.contrib.sqla import tools
class BaseSQLAFilter(filters.BaseFilter):
"""
Base SQLAlchemy filter.
"""
def __init__(self, column, name, options=None, data_type=None):
"""
Constructor.
:param column:
Model field
:param name:
Display name
:param options:
Fixed set of options
:param data_type:
Client data type
"""
super(BaseSQLAFilter, self).__init__(name, options, data_type)
self.column = column
# Common filters
class FilterEqual(BaseSQLAFilter):
def apply(self, query, value):
return query.filter(self.column == value)
def operation(self):
return lazy_gettext('equals')
class FilterNotEqual(BaseSQLAFilter):
def apply(self, query, value):
return query.filter(self.column != value)
def operation(self):
return lazy_gettext('not equal')
class FilterLike(BaseSQLAFilter):
def apply(self, query, value):
stmt = tools.parse_like_term(value)
return query.filter(self.column.ilike(stmt))
def operation(self):
return lazy_gettext('contains')
class FilterNotLike(BaseSQLAFilter):
def apply(self, query, value):
stmt = tools.parse_like_term(value)
return query.filter(~self.column.ilike(stmt))
def operation(self):
return lazy_gettext('not contains')
class FilterGreater(BaseSQLAFilter):
def apply(self, query, value):
return query.filter(self.column > value)
def operation(self):
return lazy_gettext('greater than')
class FilterSmaller(BaseSQLAFilter):
def apply(self, query, value):
return query.filter(self.column < value)
def operation(self):
return lazy_gettext('smaller than')
# Customized type filters
class BooleanEqualFilter(FilterEqual, filters.BaseBooleanFilter):
pass
class BooleanNotEqualFilter(FilterNotEqual, filters.BaseBooleanFilter):
pass
# Base SQLA filter field converter
class FilterConverter(filters.BaseFilterConverter):
strings = (FilterEqual, FilterNotEqual, FilterLike, FilterNotLike)
numeric = (FilterEqual, FilterNotEqual, FilterGreater, FilterSmaller)
bool = (BooleanEqualFilter, BooleanNotEqualFilter)
enum = (FilterEqual, FilterNotEqual)
def convert(self, type_name, column, name, **kwargs):
if type_name.lower() in self.converters:
return self.converters[type_name.lower()](column, name, **kwargs)
return None
@filters.convert('string', 'unicode', 'text', 'unicodetext', 'varchar')
def conv_string(self, column, name, **kwargs):
return [f(column, name, **kwargs) for f in self.strings]
@filters.convert('boolean')
def conv_bool(self, column, name, **kwargs):
return [f(column, name, **kwargs) for f in self.bool]
@filters.convert('integer', 'smallinteger', 'numeric', 'float', 'biginteger')
def conv_int(self, column, name, **kwargs):
return [f(column, name, **kwargs) for f in self.numeric]
@filters.convert('date')
def conv_date(self, column, name, **kwargs):
return [f(column, name, data_type='datepicker', **kwargs) for f in self.numeric]
@filters.convert('datetime')
def conv_datetime(self, column, name, **kwargs):
return [f(column, name, data_type='datetimepicker', **kwargs) for f in self.numeric]
@filters.convert('enum')
def conv_enum(self, column, name, options=None, **kwargs):
if not options:
options = [
(v, v)
for v in column.type.enums
]
return [f(column, name, options, **kwargs) for f in self.enum]
| Python | 0.000001 |
803fead9cbfa9d2a950e9fa16f42e905f6a942d7 | add module imports | flocker/ca/__init__.py | flocker/ca/__init__.py | # Copyright ClusterHQ Inc. See LICENSE file for details.
"""
A minimal certificate authority.
"""
__all__ = [
"RootCredential", "ControlCredential", "NodeCredential", "UserCredential",
"ComparableKeyPair", "PathError", "CertificateAlreadyExistsError",
"KeyAlreadyExistsError", "EXPIRY_20_YEARS",
"AUTHORITY_CERTIFICATE_FILENAME", "AUTHORITY_KEY_FILENAME",
"CONTROL_CERTIFICATE_FILENAME", "CONTROL_KEY_FILENAME"
]
from ._ca import (
RootCredential, ControlCredential, NodeCredential, UserCredential,
ComparableKeyPair, PathError, CertificateAlreadyExistsError,
KeyAlreadyExistsError, EXPIRY_20_YEARS,
AUTHORITY_CERTIFICATE_FILENAME, AUTHORITY_KEY_FILENAME,
CONTROL_CERTIFICATE_FILENAME, CONTROL_KEY_FILENAME
)
| # Copyright ClusterHQ Inc. See LICENSE file for details.
"""
A minimal certificate authority.
"""
__all__ = [
"RootCredential", "ControlCredential", "NodeCredential",
"ComparableKeyPair", "PathError", "CertificateAlreadyExistsError",
"KeyAlreadyExistsError", "EXPIRY_20_YEARS",
"AUTHORITY_CERTIFICATE_FILENAME", "AUTHORITY_KEY_FILENAME",
"CONTROL_CERTIFICATE_FILENAME", "CONTROL_KEY_FILENAME"
]
from ._ca import (
RootCredential, ControlCredential, NodeCredential,
ComparableKeyPair, PathError, CertificateAlreadyExistsError,
KeyAlreadyExistsError, EXPIRY_20_YEARS,
AUTHORITY_CERTIFICATE_FILENAME, AUTHORITY_KEY_FILENAME,
CONTROL_CERTIFICATE_FILENAME, CONTROL_KEY_FILENAME
)
| Python | 0.000001 |
b45db0476212891dd23934d775bc3082cbcaabdf | Fix KLD | ws/CSUIBotClass2014/MCL/kldmcl.py | ws/CSUIBotClass2014/MCL/kldmcl.py | # @obj: implement the standard MCL alg.; table 8.2 on the book Prob. Robotics by S. Thrun
# @author: vektor dewanto
import numpy as np
import CSUIBotClass2014.action_model.model_uas as act_model
import CSUIBotClass2014.perception_model.beam_range_finder_model as obs_model
from scipy import stats
import math
def normalize_weight(X):
# Normalize all weights, so that they sum up to one
total_w = sum([xw[1] for xw in X])
X = [(xw[0], xw[1]/total_w) for xw in X]
return X
def resample(X_bar):
'''
draw i with probability proportional to w_t^i
'''
X_bar = normalize_weight(X_bar)
X = []
while len(X) < len(X_bar):
candidate_idx = np.random.random_integers(low=0, high= len(X_bar)-1)
candidate_w = X_bar[candidate_idx][1]
sampled = np.random.binomial(n=1, p=candidate_w)# a Bernoulli dist.
if sampled==1:
return X_bar[candidate_idx]
return X
def run(X_past, u, z, m):
'''
\param X: is a list of tuples (x, w)
\param u: the control/action
\param z: the observation
\param m: the given map
'''
epsilon = 0.05
delta = 0.01
Xt = []
b = [[0]*20]*20
M = 0
Mx = 0
Mxmin = 20
k = 0
n_particle = len(X_past)# fixed #particle for ever :(
while True:
xt1 = resample(X_past)
print "menunggu pagi"
print len(X_past)
print xt1
xmt = act_model.sample_motion_model(u, xt1[0], m)
w = 1-obs_model.beam_range_finder_model(z, xmt, m)
Xt.append((xmt, w))
idx = int(math.floor(xmt['x']))
idy = int(math.floor(xmt['y']))
if(b[idy][idx]==0):
k += 1
b[idy][idx] = 1
if(k>1):
var1 = 2.0/(9*(k-1))
Mx = ((k-1)/2.0*epsilon*
(1 - var1 + math.sqrt(var1)*stats.norm.ppf(1-delta))**3)
M+=1
if not ((M<Mx) or (M<Mxmin)):
return Xt
return Xt
| # @obj: implement the standard MCL alg.; table 8.2 on the book Prob. Robotics by S. Thrun
# @author: vektor dewanto
import numpy as np
import CSUIBotClass2014.action_model.model_uas as act_model
import CSUIBotClass2014.perception_model.beam_range_finder_model as obs_model
def normalize_weight(X):
# Normalize all weights, so that they sum up to one
total_w = sum([xw[1] for xw in X])
X = [(xw[0], xw[1]/total_w) for xw in X]
return X
def resample(X_bar):
'''
draw i with probability proportional to w_t^i
'''
X_bar = normalize_weight(X_bar)
X = []
while len(X) < len(X_bar):
candidate_idx = np.random.random_integers(low=0, high= len(X_bar)-1)
candidate_w = X_bar[candidate_idx][1]
sampled = np.random.binomial(n=1, p=candidate_w)# a Bernoulli dist.
if sampled==1:
X.append(X_bar[candidate_idx])
return X
def run(X_past, u, z, m):
'''
\param X: is a list of tuples (x, w)
\param u: the control/action
\param z: the observation
\param m: the given map
'''
X_bar = []
X = []
n_particle = len(X_past)# fixed #particle for ever :(
for i in range(n_particle):
x = act_model.sample_motion_model(u, X_past[i][0], m)
w = 1-obs_model.beam_range_finder_model(z, x, m)
X_bar.append((x, w))
X = resample(X_bar)
return X
| Python | 0.000003 |
bf476a199492c7966b6a3886da284867622a8b04 | Update populate_vm_metrics.py | perfmetrics/scripts/populate_vm_metrics.py | perfmetrics/scripts/populate_vm_metrics.py | """Executes vm_metrics.py by passing appropriate arguments.
To run the script:
>> python3 populate_vm_metrics.py <start_time> <end_time>
"""
import socket
import sys
import time
import os
from vm_metrics import vm_metrics
INSTANCE = socket.gethostname()
metric_data_name = ['start_time_sec', 'cpu_utilization_peak','cpu_utilization_mean',
'network_bandwidth_peak', 'network_bandwidth_mean', 'gcs/ops_latency',
'gcs/read_bytes_count', 'gcs/ops_error_count']
if __name__ == '__main__':
argv = sys.argv
if len(argv) != 3:
raise TypeError('Incorrect number of arguments.\n'
'Usage: '
'python3 populate_vm_metrics.py <start_time> <end_time>')
print('Waiting for 250 seconds for metrics to be updated on VM...')
# It takes up to 240 seconds for sampled data to be visible on the VM metrics graph
# So, waiting for 250 seconds to ensure the returned metrics are not empty
time.sleep(250)
vm_metrics_obj = vm_metrics.VmMetrics()
start_time_sec = int(argv[1])
end_time_sec = int(argv[2])
period = end_time_sec - start_time_sec
print(f'Getting VM metrics for ML model')
vm_metrics_obj.fetch_metrics_and_write_to_google_sheet(start_time_sec, end_time_sec, INSTANCE, period, 'read', 'ml_metrics')
| """Executes vm_metrics.py by passing appropriate arguments.
To run the script:
>> python3 populate_vm_metrics.py <start_time> <end_time>
"""
import socket
import sys
import time
import os
from vm_metrics import vm_metrics
INSTANCE = socket.gethostname()
metric_data_name = ['start_time_sec', 'cpu_utilization_peak','cpu_utilization_mean',
'network_bandwidth_peak', 'network_bandwidth_mean', 'gcs/ops_latency',
'gcs/read_bytes_count', 'gcs/ops_error_count']
if __name__ == '__main__':
argv = sys.argv
if len(argv) != 3:
raise TypeError('Incorrect number of arguments.\n'
'Usage: '
'python3 populate_vm_metrics.py <start_time> <end_time>')
print('Waiting for 250 seconds for metrics to be updated on VM...')
# It takes up to 240 seconds for sampled data to be visible on the VM metrics graph
# So, waiting for 250 seconds to ensure the returned metrics are not empty
time.sleep(250)
vm_metrics_obj = vm_metrics.VmMetrics()
start_time_sec = int(argv[1])
end_time_sec = int(argv[2])
period = end_time_sec - start_time_sec
print(f'Getting VM metrics for ML model')
vm_metrics_obj.fetch_metrics_and_write_to_google_sheet(start_time_sec, end_time_sec, INSTANCE, period, 'read', 'ml_metrics!')
| Python | 0.000004 |
853c6ec8d1c4f518e28f9f14547e2d8999c17ad9 | Update models.py | flask_appbuilder/security/models.py | flask_appbuilder/security/models.py | from sqlalchemy import Table, Column, Integer, String, Boolean, ForeignKey
from sqlalchemy.orm import relationship, column_property
from flask.ext.appbuilder import Base
class Permission(Base):
__tablename__ = 'ab_permission'
id = Column(Integer, primary_key=True)
name = Column(String(100), unique = True, nullable=False)
def __repr__(self):
return self.name
class ViewMenu(Base):
__tablename__ = 'ab_view_menu'
id = Column(Integer, primary_key=True)
name = Column(String(100), unique = True, nullable=False)
def __eq__(self, other):
return (isinstance(other, self.__class__)) and (self.name == other.name)
def __neq__(self, other):
return self.name != other.name
def __repr__(self):
return self.name
class PermissionView(Base):
__tablename__ = 'ab_permission_view'
id = Column(Integer, primary_key=True)
permission_id = Column(Integer, ForeignKey('ab_permission.id'))
permission = relationship("Permission", backref='permission')
view_menu_id = Column(Integer, ForeignKey('ab_view_menu.id'))
view_menu = relationship("ViewMenu")
def __repr__(self):
return str(self.permission).replace('_',' ') + ' on ' + str(self.view_menu)
assoc_permissionview_role = Table('ab_permission_view_role', Base.metadata,
Column('id', Integer, primary_key=True),
Column('permission_view_id', Integer, ForeignKey('ab_permission_view.id')),
Column('role_id', Integer, ForeignKey('ab_role.id'))
)
class Role(Base):
__tablename__ = 'ab_role'
id = Column(Integer, primary_key=True)
name = Column(String(64), unique = True, nullable=False)
permissions = relationship('PermissionView', secondary = assoc_permissionview_role, backref='role')
def __repr__(self):
return self.name
class User(Base):
__tablename__ = 'ab_user'
id = Column(Integer, primary_key=True)
first_name = Column(String(64), nullable = False)
last_name = Column(String(64), nullable = False)
full_name = column_property(first_name + " " + last_name)
username = Column(String(32), unique=True, nullable = False)
password = Column(String(32))
active = Column(Boolean)
email = Column(String(64))
role_id = Column(Integer, ForeignKey('ab_role.id'))
role = relationship("Role")
@staticmethod
def make_unique_nickname(nickname):
if User.query.filter_by(nickname = nickname).first() == None:
return nickname
version = 2
while True:
new_nickname = nickname + str(version)
if User.query.filter_by(nickname = new_nickname).first() == None:
break
version += 1
return new_nickname
def check_password(self, password):
return check_password_hash(self.password, password)
def is_authenticated(self):
return True
def is_active(self):
return self.active
def is_anonymous(self):
return False
def get_id(self):
return unicode(self.id)
def get_full_name(self):
return self.first_name + " " + self.last_name
def __repr__(self):
return (self.get_full_name())
| from sqlalchemy import Table, Column, Integer, String, Boolean, ForeignKey
from sqlalchemy.orm import relationship, column_property
from flask.ext.appbuilder import Base
class Permission(Base):
__tablename__ = 'ab_permission'
id = Column(Integer, primary_key=True)
name = Column(String(100), unique = True, nullable=False)
def __repr__(self):
return self.name
class ViewMenu(Base):
__tablename__ = 'ab_view_menu'
id = Column(Integer, primary_key=True)
name = Column(String(100), unique = True, nullable=False)
def __eq__(self, other):
return (isinstance(other, self.__class__)) and (self.name == other.name)
def __neq__(self, other):
return self.name != other.name
def __repr__(self):
return self.name
class PermissionView(Base):
__tablename__ = 'ab_permission_view'
id = Column(Integer, primary_key=True)
permission_id = Column(Integer, ForeignKey('ab_permission.id'))
permission = relationship("Permission", backref='permissionview')
view_menu_id = Column(Integer, ForeignKey('ab_view_menu.id'))
view_menu = relationship("ViewMenu")
def __repr__(self):
return str(self.permission).replace('_',' ') + ' on ' + str(self.view_menu)
assoc_permissionview_role = Table('ab_permission_view_role', Base.metadata,
Column('id', Integer, primary_key=True),
Column('permission_view_id', Integer, ForeignKey('ab_permission_view.id')),
Column('role_id', Integer, ForeignKey('ab_role.id'))
)
class Role(Base):
__tablename__ = 'ab_role'
id = Column(Integer, primary_key=True)
name = Column(String(64), unique = True, nullable=False)
permissions = relationship('PermissionView', secondary = assoc_permissionview_role, backref='role')
def __repr__(self):
return self.name
class User(Base):
__tablename__ = 'ab_user'
id = Column(Integer, primary_key=True)
first_name = Column(String(64), nullable = False)
last_name = Column(String(64), nullable = False)
full_name = column_property(first_name + " " + last_name)
username = Column(String(32), unique=True, nullable = False)
password = Column(String(32))
active = Column(Boolean)
email = Column(String(64))
role_id = Column(Integer, ForeignKey('ab_role.id'))
role = relationship("Role")
@staticmethod
def make_unique_nickname(nickname):
if User.query.filter_by(nickname = nickname).first() == None:
return nickname
version = 2
while True:
new_nickname = nickname + str(version)
if User.query.filter_by(nickname = new_nickname).first() == None:
break
version += 1
return new_nickname
def check_password(self, password):
return check_password_hash(self.password, password)
def is_authenticated(self):
return True
def is_active(self):
return self.active
def is_anonymous(self):
return False
def get_id(self):
return unicode(self.id)
def get_full_name(self):
return self.first_name + " " + self.last_name
def __repr__(self):
return (self.get_full_name())
| Python | 0 |
c18972be7609b3de061ec41977ad73efccd5213c | Fix HTTP Basic authentication decorator | agir/lib/http.py | agir/lib/http.py | import base64
from functools import wraps
from hashlib import sha1
from django.http import HttpResponse
from django.utils.crypto import constant_time_compare
EMPTY_HASH = sha1().digest()
class HttpResponseUnauthorized(HttpResponse):
status_code = 401
def __init__(self, content=b'', realm="api", *args, **kwargs):
super().__init__(content, *args, **kwargs)
self['WWW-Authenticate'] = f'Basic realm="{realm}", charset="UTF-8"'
def check_basic_auth(request, identities):
auth = request.META.get('HTTP_AUTHORIZATION', '').split()
if len(auth) != 2 or auth[0].lower() != 'basic':
return HttpResponseUnauthorized()
try:
user, password = base64.b64decode(auth[1]).split(b':')
except:
return HttpResponseUnauthorized()
h = sha1()
h.update(password)
digest = h.digest()
user_exists = user in identities
identical_password = constant_time_compare(digest, identities.get(user, EMPTY_HASH))
if not user_exists or not identical_password:
return HttpResponseUnauthorized()
return None
def with_http_basic_auth(identities):
hashed_identities = {}
for user, password in identities.items():
h = sha1()
h.update(password.encode('utf8'))
hashed_identities[user.encode('utf8')] = h.digest()
def decorator(view):
if isinstance(view, type):
wrapped_dispatch = type.dispatch
@wraps(wrapped_dispatch)
def wrapper(self, request, *args, **kwargs):
return check_basic_auth(request, hashed_identities) or wrapped_dispatch(self, request, *args, **kwargs)
view.dispatch = wrapper
return view
@wraps(view)
def wrapper(request, *args, **kwargs):
return check_basic_auth(request, hashed_identities) or view(request, *args, **kwargs)
return wrapper
return decorator
| import base64
from functools import wraps
from hashlib import sha1
from django.http import HttpResponse
from django.utils.crypto import constant_time_compare
EMPTY_HASH = sha1().digest()
class HttpResponseUnauthorized(HttpResponse):
status_code = 401
def __init__(self, content=b'', realm="api", *args, **kwargs):
super().__init__(content, *args, **kwargs)
self['WWW-Authenticate'] = f'Basic realm="{realm}"'
def check_basic_auth(request, identities):
auth = request.META.get('HTTP_AUTHORIZATION', '').split()
if len(auth) != 2 or auth[0].lower() != 'basic':
return HttpResponseUnauthorized()
try:
user, password = base64.b64decode(auth[1]).decode().split(':')
except:
return HttpResponseUnauthorized()
h = sha1()
h.update(password)
digest = h.digest()
user_exists = user in identities
identical_password = constant_time_compare(digest, identities.get(user, EMPTY_HASH))
if not user_exists or not identical_password:
return HttpResponseUnauthorized()
def with_http_basic_auth(identities):
hashed_identities = {}
for user, password in identities.items():
h = sha1()
h.update(password.encode('utf8'))
hashed_identities[user] = h.digest()
def decorator(view):
if isinstance(view, type):
wrapped_dispatch = type.dispatch
@wraps(wrapped_dispatch)
def wrapper(self, request, *args, **kwargs):
check_basic_auth(request, hashed_identities)
return wrapped_dispatch(self, request, *args, **kwargs)
view.dispatch = wrapper
return view
@wraps(view)
def wrapper(request, *args, **kwargs):
check_basic_auth(request, hashed_identities)
return view(request, *args, **kwargs)
return wrapper
return decorator
| Python | 0.000091 |
70de505674e5675d969a84339b6bb59431333ed3 | Revise comments and add space lines, & revise main() | lc0234_palindrome_linked_list.py | lc0234_palindrome_linked_list.py | """Leetcode 234. Palindrome Linked List
Easy
URL: https://leetcode.com/problems/palindrome-linked-list/
Given a singly linked list, determine if it is a palindrome.
Example 1:
Input: 1->2
Output: false
Example 2:
Input: 1->2->2->1
Output: true
Follow up:
Could you do it in O(n) time and O(1) space?
"""
# Definition for singly-linked list.
class ListNode(object):
def __init__(self, val):
self.val = val
self.next = None
class SolutionStack(object):
def isPalindrome(self, head):
"""
:type head: ListNode
:rtype: bool
Time complexity: O(n).
Space complexity: O(n).
"""
# Use stack to collect values.
stack = []
current = head
while current:
stack.append(current.val)
current = current.next
for i in range(len(stack) // 2):
if stack[i] != stack[len(stack) - 1 - i]:
return False
return True
class Solution2(object):
def isPalindrome(self, head):
"""
:type head: ListNode
:rtype: bool
Time complexity: O(n).
Space complexity: O(1).
"""
# Find the middle node: slow + fast.
slow = fast = head
while fast and fast.next:
slow = slow.next
fast = fast.next.next
# Reverse the 2nd half of linked list using slow.
reverse = None
while slow:
nxt = slow.next
slow.next = reverse
reverse = slow
slow = nxt
# Traverse the 1st half and reversed 2nd half at the same time
# and compare their val.
while reverse:
if reverse.val != head.val:
return False
reverse = reverse.next
head = head.next
return True
def main():
import time
# 1->2->2->1: Yes.
head.ListNode(1)
head.next = ListNode(2)
head.next.next = ListNode(2)
head.next.next.next = ListNode(1)
start_time = time.time()
print 'Naive: {}'.format(SolutionStack().isPalindrome(head))
print 'Time: {}'.format(time.time() - start_time)
start_time = time.time()
print 'Optimized: {}'.format(Solution2().isPalindrome(head))
print 'Time: {}'.format(time.time() - start_time)
# 1->2->3->1: No.
head.ListNode(1)
head.next = ListNode(2)
head.next.next = ListNode(3)
head.next.next.next = ListNode(1)
start_time = time.time()
print 'Naive: {}'.format(SolutionStack().isPalindrome(head))
print 'Time: {}'.format(time.time() - start_time)
start_time = time.time()
print 'Optimized: {}'.format(Solution2().isPalindrome(head))
print 'Time: {}'.format(time.time() - start_time)
if __name__ == '__main__':
main()
| """Leetcode 234. Palindrome Linked List
Easy
URL: https://leetcode.com/problems/palindrome-linked-list/
Given a singly linked list, determine if it is a palindrome.
Example 1:
Input: 1->2
Output: false
Example 2:
Input: 1->2->2->1
Output: true
Follow up:
Could you do it in O(n) time and O(1) space?
"""
# Definition for singly-linked list.
class ListNode(object):
def __init__(self, val):
self.val = val
self.next = None
class LinkedList(object):
def __init__(self):
self.head = None
def append(self, val):
if not self.head:
self.head = ListNode(val)
return None
current = self.head
while current.next:
current = current.next
current.next = ListNode(val)
class Solution1(object):
def isPalindrome(self, head):
"""
:type head: ListNode
:rtype: bool
Time complexity: O(n).
Space complexity: O(n).
"""
stack = []
current = head
while current:
stack.append(current.val)
current = current.next
for i in range(len(stack) // 2):
if stack[i] != stack[len(stack) - 1 - i]:
return False
return True
class Solution2(object):
def isPalindrome(self, head):
"""
:type head: ListNode
:rtype: bool
Time complexity: O(n).
Space complexity: O(1).
"""
# Find the middle node: slow
slow = fast = head
while fast and fast.next:
slow = slow.next
fast = fast.next.next
# Reverse the 2nd half of linked list using slow.
reverse = None
while slow:
nxt = slow.next
slow.next = reverse
reverse = slow
slow = nxt
# Traverse the 1st half and reversed 2nd half at the same time
# and compare their val.
while reverse:
if reverse.val != head.val:
return False
reverse = reverse.next
head = head.next
return True
def main():
import time
# 1->2->2->1: Yes.
a_list = LinkedList()
a_list.append(1)
a_list.append(2)
a_list.append(2)
a_list.append(1)
print a_list.head.val
print a_list.head.next.val
print a_list.head.next.next.val
print a_list.head.next.next.next.val
start_time = time.time()
print 'Naive: {}'.format(Solution1().isPalindrome(a_list.head))
print 'Time: {}'.format(time.time() - start_time)
start_time = time.time()
print 'Optimized: {}'.format(Solution2().isPalindrome(a_list.head))
print 'Time: {}'.format(time.time() - start_time)
# 1->2->3->1: No.
a_list = LinkedList()
a_list.append(1)
a_list.append(2)
a_list.append(3)
a_list.append(1)
print a_list.head.val
print a_list.head.next.val
print a_list.head.next.next.val
print a_list.head.next.next.next.val
start_time = time.time()
print 'Naive: {}'.format(Solution1().isPalindrome(a_list.head))
print 'Time: {}'.format(time.time() - start_time)
start_time = time.time()
print 'Optimized: {}'.format(Solution2().isPalindrome(a_list.head))
print 'Time: {}'.format(time.time() - start_time)
if __name__ == '__main__':
main()
| Python | 0 |
3e8d113a6fa32c7c9163d3334e484993c29080ba | remove split test | vlermv/test/test_s3.py | vlermv/test/test_s3.py | import json
import pytest
from .._s3 import S3Vlermv
class FakeBucket:
def __init__(self, name, **db):
self.db = db
self.name = name
def list(self):
for key in self.db:
yield self.new_key(key)
def new_key(self, key):
return FakeKey(self.db, key)
def get_key(self, key):
if key in self.db:
return FakeKey(self.db, key)
def delete_key(self, key):
del(self.db[key])
class FakeKey:
def __init__(self, db, key):
self.db = db
self.key = key
def get_contents_as_string(self):
return self.db[self.key]
def get_contents_to_filename(self, filename):
with open(filename, 'wb') as fp:
fp.write(self.db[self.key])
def set_contents_from_string(self, payload, **kwargs):
self.db[self.key] = payload
def set_contents_from_filename(self, filename, **kwargs):
with open(filename, 'rb') as fp:
self.db[self.key] = fp.read()
CONTRACT = {
'bids': [],
'contract': 'http://search.worldbank.org/wcontractawards/procdetails/OP00032101',
'method.selection': 'QCBS ? Quality andCost-Based Selection',
'price': 'INR 1,96,53,750',
'project': None
}
PAYLOAD = json.dumps(CONTRACT).encode('utf-8')
def test_read():
d = S3Vlermv('contracts', serializer = json,
bucket = FakeBucket('aoeu', OP00032101 = PAYLOAD))
assert d['OP00032101'] == CONTRACT
def test_write():
fakebucket = FakeBucket('aoeu')
d = S3Vlermv('contracts', bucket = fakebucket, serializer = json)
assert fakebucket.db == {}
d['OP00032101'] = CONTRACT
assert fakebucket.db == {'OP00032101': PAYLOAD}
def test_delete():
fakebucket = FakeBucket('aoeu')
d = S3Vlermv('contracts', bucket = fakebucket, serializer = json)
d['OP00032101'] = CONTRACT
del(d['OP00032101'])
assert len(fakebucket.db) == 0
| import json
import pytest
from .._s3 import S3Vlermv, split
class FakeBucket:
def __init__(self, name, **db):
self.db = db
self.name = name
def list(self):
for key in self.db:
yield self.new_key(key)
def new_key(self, key):
return FakeKey(self.db, key)
def get_key(self, key):
if key in self.db:
return FakeKey(self.db, key)
def delete_key(self, key):
del(self.db[key])
class FakeKey:
def __init__(self, db, key):
self.db = db
self.key = key
def get_contents_as_string(self):
return self.db[self.key]
def get_contents_to_filename(self, filename):
with open(filename, 'wb') as fp:
fp.write(self.db[self.key])
def set_contents_from_string(self, payload, **kwargs):
self.db[self.key] = payload
def set_contents_from_filename(self, filename, **kwargs):
with open(filename, 'rb') as fp:
self.db[self.key] = fp.read()
CONTRACT = {
'bids': [],
'contract': 'http://search.worldbank.org/wcontractawards/procdetails/OP00032101',
'method.selection': 'QCBS ? Quality andCost-Based Selection',
'price': 'INR 1,96,53,750',
'project': None
}
PAYLOAD = json.dumps(CONTRACT).encode('utf-8')
def test_read():
d = S3Vlermv('contracts', serializer = json,
bucket = FakeBucket('aoeu', OP00032101 = PAYLOAD))
assert d['OP00032101'] == CONTRACT
def test_write():
fakebucket = FakeBucket('aoeu')
d = S3Vlermv('contracts', bucket = fakebucket, serializer = json)
assert fakebucket.db == {}
d['OP00032101'] = CONTRACT
assert fakebucket.db == {'OP00032101': PAYLOAD}
def test_split():
assert split('a/bb/cc') == ('a', 'bb', 'cc')
assert split('one') == ('one',)
def test_delete():
fakebucket = FakeBucket('aoeu')
d = S3Vlermv('contracts', bucket = fakebucket, serializer = json)
d['OP00032101'] = CONTRACT
del(d['OP00032101'])
assert len(fakebucket.db) == 0
| Python | 0.000006 |
d4ffe068638aa1394c1a34eaa43859edb47c0473 | Update hodograph_inset example for plot the colormap by height. | examples/plots/Hodograph_Inset.py | examples/plots/Hodograph_Inset.py | # Copyright (c) 2016 MetPy Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
"""
Hodograph Inset
===============
Layout a Skew-T plot with a hodograph inset into the plot.
"""
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
import numpy as np
import pandas as pd
import metpy.calc as mpcalc
from metpy.cbook import get_test_data
from metpy.plots import add_metpy_logo, Hodograph, SkewT
from metpy.units import units
###########################################
# Upper air data can be obtained using the siphon package, but for this example we will use
# some of MetPy's sample data.
col_names = ['pressure', 'height', 'temperature', 'dewpoint', 'direction', 'speed']
df = pd.read_fwf(get_test_data('may4_sounding.txt', as_file_obj=False),
skiprows=5, usecols=[0, 1, 2, 3, 6, 7], names=col_names)
df['u_wind'], df['v_wind'] = mpcalc.wind_components(df['speed'],
np.deg2rad(df['direction']))
# Drop any rows with all NaN values for T, Td, winds
df = df.dropna(subset=('temperature', 'dewpoint', 'direction', 'speed',
'u_wind', 'v_wind'), how='all').reset_index(drop=True)
###########################################
# We will pull the data out of the example dataset into individual variables and
# assign units.
hght = df['height'].values * units.hPa
p = df['pressure'].values * units.hPa
T = df['temperature'].values * units.degC
Td = df['dewpoint'].values * units.degC
wind_speed = df['speed'].values * units.knots
wind_dir = df['direction'].values * units.degrees
u, v = mpcalc.wind_components(wind_speed, wind_dir)
###########################################
# Create a new figure. The dimensions here give a good aspect ratio
fig = plt.figure(figsize=(9, 9))
add_metpy_logo(fig, 115, 100)
# Grid for plots
skew = SkewT(fig, rotation=45)
# Plot the data using normal plotting functions, in this case using
# log scaling in Y, as dictated by the typical meteorological plot
skew.plot(p, T, 'r')
skew.plot(p, Td, 'g')
skew.plot_barbs(p, u, v)
skew.ax.set_ylim(1000, 100)
# Add the relevant special lines
skew.plot_dry_adiabats()
skew.plot_moist_adiabats()
skew.plot_mixing_lines()
# Good bounds for aspect ratio
skew.ax.set_xlim(-50, 60)
# Create a hodograph
ax_hod = inset_axes(skew.ax, '40%', '40%', loc=1)
h = Hodograph(ax_hod, component_range=80.)
h.add_grid(increment=20)
h.plot_colormapped(u, v, hght)
# Show the plot
plt.show()
| # Copyright (c) 2016 MetPy Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
"""
Hodograph Inset
===============
Layout a Skew-T plot with a hodograph inset into the plot.
"""
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
import numpy as np
import pandas as pd
import metpy.calc as mpcalc
from metpy.cbook import get_test_data
from metpy.plots import add_metpy_logo, Hodograph, SkewT
from metpy.units import units
###########################################
# Upper air data can be obtained using the siphon package, but for this example we will use
# some of MetPy's sample data.
col_names = ['pressure', 'height', 'temperature', 'dewpoint', 'direction', 'speed']
df = pd.read_fwf(get_test_data('may4_sounding.txt', as_file_obj=False),
skiprows=5, usecols=[0, 1, 2, 3, 6, 7], names=col_names)
df['u_wind'], df['v_wind'] = mpcalc.wind_components(df['speed'],
np.deg2rad(df['direction']))
# Drop any rows with all NaN values for T, Td, winds
df = df.dropna(subset=('temperature', 'dewpoint', 'direction', 'speed',
'u_wind', 'v_wind'), how='all').reset_index(drop=True)
###########################################
# We will pull the data out of the example dataset into individual variables and
# assign units.
p = df['pressure'].values * units.hPa
T = df['temperature'].values * units.degC
Td = df['dewpoint'].values * units.degC
wind_speed = df['speed'].values * units.knots
wind_dir = df['direction'].values * units.degrees
u, v = mpcalc.wind_components(wind_speed, wind_dir)
###########################################
# Create a new figure. The dimensions here give a good aspect ratio
fig = plt.figure(figsize=(9, 9))
add_metpy_logo(fig, 115, 100)
# Grid for plots
skew = SkewT(fig, rotation=45)
# Plot the data using normal plotting functions, in this case using
# log scaling in Y, as dictated by the typical meteorological plot
skew.plot(p, T, 'r')
skew.plot(p, Td, 'g')
skew.plot_barbs(p, u, v)
skew.ax.set_ylim(1000, 100)
# Add the relevant special lines
skew.plot_dry_adiabats()
skew.plot_moist_adiabats()
skew.plot_mixing_lines()
# Good bounds for aspect ratio
skew.ax.set_xlim(-50, 60)
# Create a hodograph
ax_hod = inset_axes(skew.ax, '40%', '40%', loc=1)
h = Hodograph(ax_hod, component_range=80.)
h.add_grid(increment=20)
h.plot_colormapped(u, v, np.hypot(u, v))
# Show the plot
plt.show()
| Python | 0 |
130234f0f62c04b3cc0a4b20f0de789959abf4c9 | Change default zoom to 16 and make it overridable | molly/maps/__init__.py | molly/maps/__init__.py | from molly.maps.osm.utils import fit_to_map
class Map:
"""
An object which represents a Map. This should be added to a context and then
passed to @C{render_map} in your template to get the appropriate HTML
"""
def __init__(self, centre_point, points, min_points, zoom, width, height):
"""
@param centre_point: A tuple of longitude, latitude and colour
corresponding to the "centre" of the map. This is
NOT necessarily the central latitude/longitude of
the generated image, but simply a special marker
which is indicated with a star.
@type centre_point: (float, float, str) or None
@param points: An (ordered) list of points to be plotted on the map.
These are indicated on the map with numbered markers.
This list consists of tuples of longitude, latitude and a
string indicating the colours of the markers to be
rendered.
@type points: [(float, float, str)]
@param min_points: The minimum number of points to be displayed on the
resulting map
@type min_points: int
@param zoom: A bound on the maximum zoom level to be rendered. If this
zoom level is too small to fit @C{min_points} points on it,
then the map will be zoomed out further to fit in. If this
is None, then this is equivalent to the smallest zoom
level.
@type zoom: int
@param width: The width of the generated map image, in pixels
@type width: int
@param height: The height of the generated map image, in pixels
@type height: int
"""
self.centre_point = centre_point
self.min_points = min_points
self.width = width
self.height = height
self.static_map_hash, (self.points, self.zoom) = fit_to_map(
centre_point = centre_point,
points = points,
min_points = min_points,
zoom = zoom,
width = width,
height = height,
)
def map_from_point(point, width, height, colour='green', zoom=16):
"""
A shortcut which renders a simple map containing only one point rendered as
a star
"""
return Map((point[0], point[1], colour), [], 1, zoom, width, height) | from molly.maps.osm.utils import fit_to_map
class Map:
"""
An object which represents a Map. This should be added to a context and then
passed to @C{render_map} in your template to get the appropriate HTML
"""
def __init__(self, centre_point, points, min_points, zoom, width, height):
"""
@param centre_point: A tuple of longitude, latitude and colour
corresponding to the "centre" of the map. This is
NOT necessarily the central latitude/longitude of
the generated image, but simply a special marker
which is indicated with a star.
@type centre_point: (float, float, str) or None
@param points: An (ordered) list of points to be plotted on the map.
These are indicated on the map with numbered markers.
This list consists of tuples of longitude, latitude and a
string indicating the colours of the markers to be
rendered.
@type points: [(float, float, str)]
@param min_points: The minimum number of points to be displayed on the
resulting map
@type min_points: int
@param zoom: A bound on the maximum zoom level to be rendered. If this
zoom level is too small to fit @C{min_points} points on it,
then the map will be zoomed out further to fit in. If this
is None, then this is equivalent to the smallest zoom
level.
@type zoom: int
@param width: The width of the generated map image, in pixels
@type width: int
@param height: The height of the generated map image, in pixels
@type height: int
"""
self.centre_point = centre_point
self.min_points = min_points
self.width = width
self.height = height
self.static_map_hash, (self.points, self.zoom) = fit_to_map(
centre_point = centre_point,
points = points,
min_points = min_points,
zoom = zoom,
width = width,
height = height,
)
def map_from_point(point, width, height, colour='green'):
"""
A shortcut which renders a simple map containing only one point rendered as
a star
"""
return Map((point[0], point[1], colour), [], 1, 18, width, height) | Python | 0 |
3b0865bbfcee18afb842cc9f50f8c83c0d70f221 | Add the other v ;-). | sphinx/fabfile.py | sphinx/fabfile.py | from fabric.api import run, env, roles
from fabric.contrib.files import exists
from fabric.contrib.project import rsync_project
import sys
sys.path.append("source")
import conf
env.roledefs = {
'web': ['bokeh.pydata.org']
}
env.user = "bokeh"
@roles('web')
def deploy(v=None):
if v is None:
v = conf.version
elif v == "latest":
raise RuntimeError("You can not pass 'latest' as fab argument. Use "
"fab latest:x.x.x instead.")
# make a backup of the old directory
run("rm -rf /www/bokeh/en/%s.bak" % v)
run("mkdir -p /www/bokeh/en/%s" % v)
run("cp -ar /www/bokeh/en/%s /www/bokeh/en/%s.bak" % (v, v))
rsync_project(
local_dir="_build/html/",
remote_dir="/www/bokeh/en/%s" % v,
delete=True
)
# set permissions
run("chmod -R g+w /www/bokeh/en/%s" % v)
@roles('web')
def latest(v=None):
if v is None:
raise RuntimeError("You need to specify a version number: fab latest:x.x.x")
if exists("/www/bokeh/en/%s" % v):
# switch the current symlink to new docs
run("rm /www/bokeh/en/latest")
run("ln -s /www/bokeh/en/%s /www/bokeh/en/latest" % v)
else:
raise RuntimeError("We did not detect a %s docs version, please use "
"fab deploy:%s first." % (v, v))
| from fabric.api import run, env, roles
from fabric.contrib.files import exists
from fabric.contrib.project import rsync_project
import sys
sys.path.append("source")
import conf
env.roledefs = {
'web': ['bokeh.pydata.org']
}
env.user = "bokeh"
@roles('web')
def deploy(v=None):
if v is None:
v = conf.version
elif v == "latest":
raise RuntimeError("You can not pass 'latest' as fab argument. Use "
"fab latest:x.x.x instead.")
# make a backup of the old directory
run("rm -rf /www/bokeh/en/%s.bak" % v)
run("mkdir -p /www/bokeh/en/%s" % v)
run("cp -ar /www/bokeh/en/%s /www/bokeh/en/%s.bak" % (v, v))
rsync_project(
local_dir="_build/html/",
remote_dir="/www/bokeh/en/%s" % v,
delete=True
)
# set permissions
run("chmod -R g+w /www/bokeh/en/%s" % v)
@roles('web')
def latest(v=None):
if v is None:
raise RuntimeError("You need to specify a version number: fab latest:x.x.x")
if exists("/www/bokeh/en/%s" % v):
# switch the current symlink to new docs
run("rm /www/bokeh/en/latest")
run("ln -s /www/bokeh/en/%s /www/bokeh/en/latest" % v)
else:
raise RuntimeError("We did not detect a %s docs version, please use "
"fab deploy:%s first." % v)
| Python | 0.000005 |
b3ddba27c92f36ee9534903b43ff632daa148585 | Fix public body search index by indexing jurisdiction name | froide/publicbody/search_indexes.py | froide/publicbody/search_indexes.py | from django.conf import settings
from haystack import indexes
from haystack import site
from publicbody.models import PublicBody
from helper.searchindex import QueuedRealTimeSearchIndex
PUBLIC_BODY_BOOSTS = getattr(settings, "FROIDE_PUBLIC_BODY_BOOSTS", {})
class PublicBodyIndex(QueuedRealTimeSearchIndex):
text = indexes.EdgeNgramField(document=True, use_template=True)
name = indexes.CharField(model_attr='name', boost=1.5)
jurisdiction = indexes.CharField(model_attr='jurisdiction__name', default='')
topic_auto = indexes.EdgeNgramField(model_attr='topic_name')
topic_slug = indexes.CharField(model_attr='topic__slug')
name_auto = indexes.EdgeNgramField(model_attr='name')
url = indexes.CharField(model_attr='get_absolute_url')
def index_queryset(self):
"""Used when the entire index for model is updated."""
return PublicBody.objects.get_for_search_index()
def prepare(self, obj):
data = super(PublicBodyIndex, self).prepare(obj)
if obj.classification in PUBLIC_BODY_BOOSTS:
data['boost'] = PUBLIC_BODY_BOOSTS[obj.classification]
print "Boosting %s at %f" % (obj, data['boost'])
return data
site.register(PublicBody, PublicBodyIndex)
| from django.conf import settings
from haystack import indexes
from haystack import site
from publicbody.models import PublicBody
from helper.searchindex import QueuedRealTimeSearchIndex
PUBLIC_BODY_BOOSTS = getattr(settings, "FROIDE_PUBLIC_BODY_BOOSTS", {})
class PublicBodyIndex(QueuedRealTimeSearchIndex):
text = indexes.EdgeNgramField(document=True, use_template=True)
name = indexes.CharField(model_attr='name', boost=1.5)
jurisdiction = indexes.CharField(model_attr='jurisdiction', default='')
topic_auto = indexes.EdgeNgramField(model_attr='topic_name')
topic_slug = indexes.CharField(model_attr='topic__slug')
name_auto = indexes.EdgeNgramField(model_attr='name')
url = indexes.CharField(model_attr='get_absolute_url')
def index_queryset(self):
"""Used when the entire index for model is updated."""
return PublicBody.objects.get_for_search_index()
def prepare(self, obj):
data = super(PublicBodyIndex, self).prepare(obj)
if obj.classification in PUBLIC_BODY_BOOSTS:
data['boost'] = PUBLIC_BODY_BOOSTS[obj.classification]
print "Boosting %s at %f" % (obj, data['boost'])
return data
site.register(PublicBody, PublicBodyIndex)
| Python | 0.000723 |
8191d25e732b16a0121bd64320348108b9259892 | Add SecurityQuestionModelAdmin | molo/profiles/admin.py | molo/profiles/admin.py | import csv
from daterange_filter.filter import DateRangeFilter
from django.contrib import admin
from django.http import HttpResponse
from django.contrib.auth.models import User
from django.contrib.auth.admin import UserAdmin
from django.contrib.admin.sites import NotRegistered
from molo.profiles.admin_views import FrontendUsersAdminView
from molo.profiles.models import SecurityQuestion
from wagtailmodeladmin.options import ModelAdmin as WagtailModelAdmin
try:
admin.site.unregister(User)
except NotRegistered:
pass
def download_as_csv(ProfileUserAdmin, request, queryset):
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment;filename=export.csv'
writer = csv.writer(response)
user_model_fields = UserAdmin.list_display + ('date_joined', )
profile_fields = ('alias', 'mobile_number')
field_names = user_model_fields + profile_fields
writer.writerow(field_names)
for obj in queryset:
if obj.profile.alias:
obj.profile.alias = obj.profile.alias.encode('utf-8')
obj.username = obj.username.encode('utf-8')
obj.date_joined = obj.date_joined.strftime("%Y-%m-%d %H:%M")
writer.writerow(
[getattr(obj, field) for field in user_model_fields] +
[getattr(obj.profile, field) for field in profile_fields])
return response
download_as_csv.short_description = "Download selected as csv"
@admin.register(User)
class ProfileUserAdmin(UserAdmin):
list_display = UserAdmin.list_display + (
'date_joined', '_alias', '_mobile_number', '_date_of_birth')
list_filter = UserAdmin.list_filter + ('date_joined', )
actions = [download_as_csv]
def _alias(self, obj, *args, **kwargs):
if hasattr(obj, 'profile') and obj.profile.alias:
return obj.profile.alias
return ''
def _mobile_number(self, obj, *args, **kwargs):
if hasattr(obj, 'profile') and obj.profile.mobile_number:
return obj.profile.mobile_number
return ''
def _date_of_birth(self, obj, *args, **kwargs):
if hasattr(obj, 'profile') and obj.profile.date_of_birth:
return obj.profile.date_of_birth
return ''
# Below here is for Wagtail Admin
class FrontendUsersDateRangeFilter(DateRangeFilter):
template = 'admin/frontend_users_date_range_filter.html'
class FrontendUsersModelAdmin(WagtailModelAdmin, ProfileUserAdmin):
model = User
menu_label = 'End Users'
menu_icon = 'user'
menu_order = 600
index_view_class = FrontendUsersAdminView
add_to_settings_menu = True
list_display = ('username', '_alias', '_mobile_number', '_date_of_birth',
'email', 'date_joined', 'is_active')
list_filter = (('date_joined', FrontendUsersDateRangeFilter), 'is_active')
search_fields = ('username',)
def get_queryset(self, request):
queryset = User.objects.filter(is_staff=False)
return queryset
class SecurityQuestionModelAdmin(WagtailModelAdmin):
model = SecurityQuestion
menu_label = "Security Questions"
add_to_settings_menu = True
search_fields = ("questions",)
| import csv
from daterange_filter.filter import DateRangeFilter
from django.contrib import admin
from django.http import HttpResponse
from django.contrib.auth.models import User
from django.contrib.auth.admin import UserAdmin
from django.contrib.admin.sites import NotRegistered
from molo.profiles.admin_views import FrontendUsersAdminView
from wagtailmodeladmin.options import ModelAdmin as WagtailModelAdmin
try:
admin.site.unregister(User)
except NotRegistered:
pass
def download_as_csv(ProfileUserAdmin, request, queryset):
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment;filename=export.csv'
writer = csv.writer(response)
user_model_fields = UserAdmin.list_display + ('date_joined', )
profile_fields = ('alias', 'mobile_number')
field_names = user_model_fields + profile_fields
writer.writerow(field_names)
for obj in queryset:
if obj.profile.alias:
obj.profile.alias = obj.profile.alias.encode('utf-8')
obj.username = obj.username.encode('utf-8')
obj.date_joined = obj.date_joined.strftime("%Y-%m-%d %H:%M")
writer.writerow(
[getattr(obj, field) for field in user_model_fields] +
[getattr(obj.profile, field) for field in profile_fields])
return response
download_as_csv.short_description = "Download selected as csv"
@admin.register(User)
class ProfileUserAdmin(UserAdmin):
list_display = UserAdmin.list_display + (
'date_joined', '_alias', '_mobile_number', '_date_of_birth')
list_filter = UserAdmin.list_filter + ('date_joined', )
actions = [download_as_csv]
def _alias(self, obj, *args, **kwargs):
if hasattr(obj, 'profile') and obj.profile.alias:
return obj.profile.alias
return ''
def _mobile_number(self, obj, *args, **kwargs):
if hasattr(obj, 'profile') and obj.profile.mobile_number:
return obj.profile.mobile_number
return ''
def _date_of_birth(self, obj, *args, **kwargs):
if hasattr(obj, 'profile') and obj.profile.date_of_birth:
return obj.profile.date_of_birth
return ''
# Below here is for Wagtail Admin
class FrontendUsersDateRangeFilter(DateRangeFilter):
template = 'admin/frontend_users_date_range_filter.html'
class FrontendUsersModelAdmin(WagtailModelAdmin, ProfileUserAdmin):
model = User
menu_label = 'End Users'
menu_icon = 'user'
menu_order = 600
index_view_class = FrontendUsersAdminView
add_to_settings_menu = True
list_display = ('username', '_alias', '_mobile_number', '_date_of_birth',
'email', 'date_joined', 'is_active')
list_filter = (('date_joined', FrontendUsersDateRangeFilter), 'is_active')
search_fields = ('username',)
def get_queryset(self, request):
queryset = User.objects.filter(is_staff=False)
return queryset
| Python | 0 |
6499aecb18104114d47707ba4c1080bb817f7ccc | Update loadlogs.py | logger/loadlogs.py | logger/loadlogs.py | #!/usr/bin/env python
from tools import *
from ratchet import *
from logaccess_config import *
# Retrieving from CouchDB a Title dictionary as: dict['bjmbr']=XXXX-XXXX
acrondict = getTitles()
proc_coll = get_proc_collection()
allowed_issns = []
for key, issn in acrondict.items():
allowed_issns.append(issn)
if acrondict:
for logdir in get_logdirs():
print "listing log files at: " + logdir
for logfile in get_files_in_logdir(logdir):
if log_was_processed(proc_coll, logfile):
continue
else:
print "processing: {0}".format(logfile)
reg_logfile(proc_coll, logfile)
rq = RatchetQueue(limit=5)
for line in get_file_lines(logfile):
parsed_line = parse_apache_line(line, acrondict)
if parsed_line:
if parsed_line['access_type'] == "PDF":
pdfid = parsed_line['pdf_path']
issn = parsed_line['pdf_issn']
rq.register_download_access(pdfid, issn, parsed_line['iso_date'])
if parsed_line['access_type'] == "HTML":
if is_allowed_query(parsed_line['query_string'], allowed_issns):
script = parsed_line['query_string']['script'][0]
pid = parsed_line['query_string']['pid'][0].upper().replace('S', '')
if script == "sci_serial":
rq.register_journal_access(pid, parsed_line['iso_date'])
elif script == "sci_abstract":
rq.register_abstract_access(pid, parsed_line['iso_date'])
elif script == "sci_issuetoc":
rq.register_toc_access(pid, parsed_line['iso_date'])
elif script == "sci_arttext":
rq.register_article_access(pid, parsed_line['iso_date'])
elif script == "sci_pdf":
rq.register_pdf_access(pid, parsed_line['iso_date'])
elif script == "sci_home":
rq.register_home_access(pid, parsed_line['iso_date'])
elif script == "sci_issues":
rq.register_issues_access(pid, parsed_line['iso_date'])
elif script == "sci_alphabetic":
rq.register_alpha_access(pid, parsed_line['iso_date'])
rq.send()
else:
print "Connection to CouchDB Fail"
| #!/usr/bin/env python
from tools import *
from ratchet import *
from logaccess_config import *
# Retrieving from CouchDB a Title dictionary as: dict['bjmbr']=XXXX-XXXX
acrondict = getTitles()
proc_coll = get_proc_collection()
allowed_issns = []
for key, issn in acrondict.items():
allowed_issns.append(issn)
if acrondict:
for logdir in get_logdirs():
print "listing log files at: " + logdir
for logfile in get_files_in_logdir(logdir):
if log_was_processed(proc_coll, logfile):
continue
else:
print "processing: {0}".format(logfile)
reg_logfile(proc_coll, logfile)
rq = RatchetQueue(limit=100)
for line in get_file_lines(logfile):
parsed_line = parse_apache_line(line, acrondict)
if parsed_line:
if parsed_line['access_type'] == "PDF":
pdfid = parsed_line['pdf_path']
issn = parsed_line['pdf_issn']
rq.register_download_access(pdfid, issn, parsed_line['iso_date'])
if parsed_line['access_type'] == "HTML":
if is_allowed_query(parsed_line['query_string'], allowed_issns):
script = parsed_line['query_string']['script'][0]
pid = parsed_line['query_string']['pid'][0].upper().replace('S', '')
if script == "sci_serial":
rq.register_journal_access(pid, parsed_line['iso_date'])
elif script == "sci_abstract":
rq.register_abstract_access(pid, parsed_line['iso_date'])
elif script == "sci_issuetoc":
rq.register_toc_access(pid, parsed_line['iso_date'])
elif script == "sci_arttext":
rq.register_article_access(pid, parsed_line['iso_date'])
elif script == "sci_pdf":
rq.register_pdf_access(pid, parsed_line['iso_date'])
elif script == "sci_home":
rq.register_home_access(pid, parsed_line['iso_date'])
elif script == "sci_issues":
rq.register_issues_access(pid, parsed_line['iso_date'])
elif script == "sci_alphabetic":
rq.register_alpha_access(pid, parsed_line['iso_date'])
rq.send()
else:
print "Connection to CouchDB Fail"
| Python | 0.000001 |
3da17a2f61daecc34772ead7e6caffa9da49bf48 | Add default values and shebang | 06-setPositionFromArgs.py | 06-setPositionFromArgs.py | #!/usr/bin/env python
# We have to import the minecraft api module to do anything in the minecraft world
from mcpi.minecraft import *
import sys
# this means that the file can be imported without executing anything in this code block
if __name__ == "__main__":
"""
First thing you do is create a connection to minecraft
This is like dialling a phone.
It sets up a communication line between your script and the minecraft world
"""
# Create a connection to Minecraft
# Any communication with the world must use this object
mc = Minecraft.create()
# Get the current tile/block that the player is located at in the world
playerPosition = mc.player.getTilePos()
# create the output message as a string
message = " you are at (" +str(playerPosition.x)+","+str(playerPosition.y)+","+str(playerPosition.z)+")"
# print to the python interpreter standard output (terminal or IDLE probably)
print(message)
# send message to the minecraft chat
mc.postToChat(message)
#Set Default values
newXposn = 0
newZposn = 0
numOfArgs = len(sys.argv)
if numOfArgs == 3:
newXposn = int(sys.argv[1])
newZposn = int(sys.argv[2])
else:
print("incorrect number of arguments")
sys.exit()
newYposn = mc.getHeight(newXposn, newZposn)
mc.player.setTilePos(newXposn, newYposn, newZposn)
# Get the current tile/block that the player is located at in the world
playerPosition = mc.player.getTilePos()
message = " you are now at (" +str(playerPosition.x)+","+str(playerPosition.y)+","+str(playerPosition.z)+")"
print(message)
mc.postToChat(message)
| # We have to import the minecraft api module to do anything in the minecraft world
from mcpi.minecraft import *
import sys
# this means that the file can be imported without executing anything in this code block
if __name__ == "__main__":
"""
First thing you do is create a connection to minecraft
This is like dialling a phone.
It sets up a communication line between your script and the minecraft world
"""
# Create a connection to Minecraft
# Any communication with the world must use this object
mc = Minecraft.create()
# Get the current tile/block that the player is located at in the world
playerPosition = mc.player.getTilePos()
# create the output message as a string
message = " you are at (" +str(playerPosition.x)+","+str(playerPosition.y)+","+str(playerPosition.z)+")"
# print to the python interpreter standard output (terminal or IDLE probably)
print(message)
# send message to the minecraft chat
mc.postToChat(message)
numOfArgs = len(sys.argv)
if numOfArgs == 3:
newXposn = int(sys.argv[1])
newZposn = int(sys.argv[2])
else:
print("incorrect number of arguments")
sys.exit()
newYposn = mc.getHeight(newXposn, newZposn)
mc.player.setTilePos(newXposn, newYposn, newZposn)
# Get the current tile/block that the player is located at in the world
playerPosition = mc.player.getTilePos()
message = " you are now at (" +str(playerPosition.x)+","+str(playerPosition.y)+","+str(playerPosition.z)+")"
print(message)
mc.postToChat(message)
| Python | 0 |
de381a56e87a21da1e82146da01bb546c5094ec4 | Print the traceback as well for debugging purposes. | scripts/asgard-deploy.py | scripts/asgard-deploy.py | #!/usr/bin/env python
import sys
import logging
import traceback
import click
from os import path
# Add top-level module path to sys.path before importing tubular code.
sys.path.append( path.dirname( path.dirname( path.abspath(__file__) ) ) )
from tubular import asgard
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
@click.command()
@click.option('--ami_id', envvar='AMI_ID', help='The ami-id to deploy', required=True)
def deploy(ami_id):
try:
asgard.deploy(ami_id)
except Exception, e:
traceback.print_exc()
click.secho("Error Deploying AMI: {0}.\nMessage: {1}".format(ami_id, e.message), fg='red')
sys.exit(1)
sys.exit(0)
if __name__ == "__main__":
deploy()
| #!/usr/bin/env python
import sys
import logging
import click
from os import path
# Add top-level module path to sys.path before importing tubular code.
sys.path.append( path.dirname( path.dirname( path.abspath(__file__) ) ) )
from tubular import asgard
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
@click.command()
@click.option('--ami_id', envvar='AMI_ID', help='The ami-id to deploy', required=True)
def deploy(ami_id):
try:
asgard.deploy(ami_id)
except Exception, e:
click.secho("Error Deploying AMI: {0}.\nMessage: {1}".format(ami_id, e.message), fg='red')
sys.exit(1)
sys.exit(0)
if __name__ == "__main__":
deploy()
| Python | 0 |
0a81356e0f8011f0764a8c28719d1371e5860656 | Make sure create_privatekml mgmt command produces unique names less than 100 chars; fail gracefully if not | lingcod/layers/management/commands/create_privatekml.py | lingcod/layers/management/commands/create_privatekml.py | from django.core.management.base import BaseCommand, AppCommand
from django.conf import settings
from optparse import make_option
import os
import glob
from lingcod.layers.models import PrivateKml
from django.contrib.auth.models import User, Group
class Command(BaseCommand):
help = "Populates the PrivateKml table from the PRIVATE_KML_ROOT contents .. a good starting point"
args = '[optional group name to share all KMLs with]'
# Validation is called explicitly each time the server is reloaded.
requires_model_validation = False
def handle(self, groupname=None, *args, **options):
for pkml in PrivateKml.objects.all():
pkml.delete()
if groupname:
g = Group.objects.get(name=groupname)
if not os.path.exists(settings.PRIVATE_KML_ROOT):
raise Exception("Please create or set up a PRIVATE_KML_ROOT directory (currently set to %s" %
settings.PRIVATE_KML_ROOT)
for d in os.listdir(settings.PRIVATE_KML_ROOT):
path = os.path.join(settings.PRIVATE_KML_ROOT,d)
kmls = glob.glob(os.path.join(path,'*.km*'))
if len(kmls) == 0:
print "No KML/KMZ found in %s" % path
continue
for kml in kmls:
basename = os.path.basename(kml).split('.')[0]
privatekml_name = d+'_'+basename
try:
pkml = PrivateKml.objects.create(name=privatekml_name[:99],base_kml=kml)
if groupname:
pkml.sharing_groups.add(g)
print "Created %s from %s" % (pkml,kml)
except:
print "couldn't create privatekml from %s" % s
| from django.core.management.base import BaseCommand, AppCommand
from django.conf import settings
from optparse import make_option
import os
import glob
from lingcod.layers.models import PrivateKml
from django.contrib.auth.models import User, Group
class Command(BaseCommand):
help = "Populates the PrivateKml table from the PRIVATE_KML_ROOT contents .. a good starting point"
args = '[optional group name to share all KMLs with]'
# Validation is called explicitly each time the server is reloaded.
requires_model_validation = False
def handle(self, groupname=None, *args, **options):
for pkml in PrivateKml.objects.all():
pkml.delete()
if groupname:
g = Group.objects.get(name=groupname)
if not os.path.exists(settings.PRIVATE_KML_ROOT):
raise Exception("Please create or set up a PRIVATE_KML_ROOT directory (currently set to %s" %
settings.PRIVATE_KML_ROOT)
for d in os.listdir(settings.PRIVATE_KML_ROOT):
path = os.path.join(settings.PRIVATE_KML_ROOT,d)
kmls = glob.glob(os.path.join(path,'*.km*'))
if len(kmls) == 0:
print "No KML/KMZ found in %s" % path
continue
for kml in kmls:
basename = os.path.basename(kml).split('.')[0]
pkml = PrivateKml.objects.create(name=d+"_"+basename,base_kml=kml)
if groupname:
pkml.sharing_groups.add(g)
print "Created %s from %s" % (pkml,kml)
| Python | 0 |
9a1921fb27b7073d9c79f6727766eb516478f403 | Bump version 0.6.0 (git sync solution) | cmscloud_client/__init__.py | cmscloud_client/__init__.py | # -*- coding: utf-8 -*-
__version__ = '0.6.0'
| # -*- coding: utf-8 -*-
__version__ = '0.5.4'
| Python | 0 |
9926cbb1919b96999d479f5a8d67e17ce71a1091 | Improve the get_nick a tiny amount | motobot/irc_message.py | motobot/irc_message.py | class IRCMessage:
""" Class to store and parse an IRC Message. """
def __init__(self, msg):
""" Parse a raw IRC message to IRCMessage. """
self.sender = None
self.nick = None
self.command = None
self.params = []
self.__parse_msg(msg)
def __parse_msg(self, msg):
if msg[0] == ':':
self.sender, msg = msg[1:].split(' ', 1)
self.nick = get_nick(self.sender)
if ' :' in msg:
msg, trailing = msg.split(' :', 1)
self.params = msg.split(' ')
self.params.append(trailing)
else:
self.params = msg.split(' ')
self.command = self.params.pop(0)
def __repr__(self):
""" Print the IRCMessage all nice 'n' pretty. """
return "Sender: {};\nCommand: {};\nParams: {};\n".format(
self.sender, self.command, self.params)
def action(message):
""" Make the message an action. """
return '\u0001ACTION {}\u0001'.format(message)
def get_nick(host):
""" Get the user's nick from a host. """
return host.split('!', 1)[0]
| class IRCMessage:
""" Class to store and parse an IRC Message. """
def __init__(self, msg):
""" Parse a raw IRC message to IRCMessage. """
self.sender = None
self.nick = None
self.command = None
self.params = []
self.__parse_msg(msg)
def __parse_msg(self, msg):
if msg[0] == ':':
self.sender, msg = msg[1:].split(' ', 1)
self.nick = get_nick(self.sender)
if ' :' in msg:
msg, trailing = msg.split(' :', 1)
self.params = msg.split(' ')
self.params.append(trailing)
else:
self.params = msg.split(' ')
self.command = self.params.pop(0)
def __repr__(self):
""" Print the IRCMessage all nice 'n' pretty. """
return "Sender: {};\nCommand: {};\nParams: {};\n".format(
self.sender, self.command, self.params)
def action(message):
""" Make the message an action. """
return '\u0001ACTION {}\u0001'.format(message)
def get_nick(host):
""" Get the user's nick from a host. """
return host.split('!')[0]
| Python | 0.000021 |
119ce47d9e876c345c2bc44751ccf04f0b226259 | Remove lie_system package dependency | components/lie_structures/setup.py | components/lie_structures/setup.py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# package: lie_docking
# file: setup.py
#
# Part of ‘lie_docking’, a package providing molecular docking functionality
# for the LIEStudio package.
#
# Copyright © 2016 Marc van Dijk, VU University Amsterdam, the Netherlands
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from setuptools import setup, find_packages
distribution_name = 'lie_structures'
setup(
name=distribution_name,
version=0.1,
description='LIEStudio structure database module',
author='Marc van Dijk, VU University, Amsterdam, The Netherlands',
author_email='m4.van.dijk@vu.nl',
url='https://github.com/NLeSC/LIEStudio',
license='Apache Software License 2.0',
keywords='LIEStudio structures database',
platforms=['Any'],
packages=find_packages(),
package_data={'': ['*.json']},
py_modules=[distribution_name],
install_requires=['openbabel'],
include_package_data=True,
zip_safe=True,
entry_points={
'autobahn.twisted.wamplet': [
'wamp_services = lie_structures.wamp_services:make'
],
},
classifiers=[
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Topic :: Scientific/Engineering :: Chemistry',
'Operating System :: OS Independent',
'Intended Audience :: Science/Research',
],
)
| #! /usr/bin/env python
# -*- coding: utf-8 -*-
# package: lie_docking
# file: setup.py
#
# Part of ‘lie_docking’, a package providing molecular docking functionality
# for the LIEStudio package.
#
# Copyright © 2016 Marc van Dijk, VU University Amsterdam, the Netherlands
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from setuptools import setup, find_packages
distribution_name = 'lie_structures'
setup(
name=distribution_name,
version=0.1,
description='LIEStudio structure database module',
author='Marc van Dijk, VU University, Amsterdam, The Netherlands',
author_email='m4.van.dijk@vu.nl',
url='https://github.com/NLeSC/LIEStudio',
license='Apache Software License 2.0',
keywords='LIEStudio structures database',
platforms=['Any'],
packages=find_packages(),
package_data={'': ['*.json']},
py_modules=[distribution_name],
install_requires=['lie_system', 'openbabel'],
include_package_data=True,
zip_safe=True,
entry_points={
'autobahn.twisted.wamplet': [
'wamp_services = lie_structures.wamp_services:make'
],
},
classifiers=[
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Topic :: Scientific/Engineering :: Chemistry',
'Operating System :: OS Independent',
'Intended Audience :: Science/Research',
],
)
| Python | 0 |
eb48fba5b3334437a752681df200c2bbefb0bc18 | change font to be purple | NEMbox/osdlyrics.py | NEMbox/osdlyrics.py | from PyQt4 import QtGui, QtCore, QtDBus
import sys
import os
from multiprocessing import Process
class Lyrics(QtGui.QWidget):
def __init__(self):
super(Lyrics, self).__init__()
self.initUI()
def initUI(self):
self.setAttribute(QtCore.Qt.WA_TranslucentBackground)
self.resize(900, 150)
self.text = u"OSD Lyrics for Musicbox"
self.setWindowTitle("Lyrics")
self.show()
@QtCore.pyqtSlot(str)
def refresh_lyrics(self, text):
self.text = text
self.repaint()
def paintEvent(self, event):
qp = QtGui.QPainter()
qp.begin(self)
self.drawText(event, qp)
qp.end()
def drawText(self, event, qp):
qp.setPen(QtGui.QColor(128, 0, 128))
qp.setFont(QtGui.QFont('Decorative', 16))
qp.drawText(event.rect(), QtCore.Qt.AlignCenter, self.text)
def show_lyrics():
app = QtGui.QApplication(sys.argv)
# lyrics_receiver = LyricsReceiver()
lyrics = Lyrics()
QtDBus.QDBusConnection.sessionBus().registerService('org.musicbox.Bus')
QtDBus.QDBusConnection.sessionBus().registerObject('/', lyrics, QtDBus.QDBusConnection.ExportAllSlots)
sys.exit(app.exec_())
def show_lyrics_new_process():
p = Process(target=show_lyrics)
p.start()
# p.join()
| from PyQt4 import QtGui, QtCore, QtDBus
import sys
import os
from multiprocessing import Process
class Lyrics(QtGui.QWidget):
def __init__(self):
super(Lyrics, self).__init__()
self.initUI()
def initUI(self):
self.setAttribute(QtCore.Qt.WA_TranslucentBackground)
self.resize(900, 150)
self.text = u"OSD Lyrics for Musicbox"
self.setWindowTitle("Lyrics")
self.show()
@QtCore.pyqtSlot(str)
def refresh_lyrics(self, text):
self.text = text
self.repaint()
def paintEvent(self, event):
qp = QtGui.QPainter()
qp.begin(self)
self.drawText(event, qp)
qp.end()
def drawText(self, event, qp):
qp.setPen(QtGui.QColor(0, 0, 0))
qp.setFont(QtGui.QFont('Decorative', 16))
qp.drawText(event.rect(), QtCore.Qt.AlignCenter, self.text)
def show_lyrics():
app = QtGui.QApplication(sys.argv)
# lyrics_receiver = LyricsReceiver()
lyrics = Lyrics()
QtDBus.QDBusConnection.sessionBus().registerService('org.musicbox.Bus')
QtDBus.QDBusConnection.sessionBus().registerObject('/', lyrics, QtDBus.QDBusConnection.ExportAllSlots)
sys.exit(app.exec_())
def show_lyrics_new_process():
p = Process(target=show_lyrics)
p.start()
# p.join()
| Python | 0.999989 |
c61d4c6df77fe505074c81eebaec938c6716d9ab | Create columns before querying them. | sqlaload/query.py | sqlaload/query.py | import logging
from itertools import count
from sqlalchemy.sql import expression, and_
from sqlaload.schema import _ensure_columns
log = logging.getLogger(__name__)
def resultiter(rp):
""" SQLAlchemy ResultProxies are not iterable to get a
list of dictionaries. This is to wrap them. """
keys = rp.keys()
while True:
row = rp.fetchone()
if row is None:
break
yield dict(zip(keys, row))
def find_one(engine, table, **kw):
res = list(find(engine, table, _limit=1, **kw))
if not len(res):
return None
return res[0]
def find(engine, table, _limit=None, _step=5000, _offset=0,
order_by=None, **kw):
_ensure_columns(engine, table, kw)
if order_by is None:
order_by = [table.c.id.asc()]
qargs = []
try:
for col, val in kw.items():
qargs.append(table.c[col]==val)
except KeyError:
return
for i in count():
qoffset = _offset + (_step * i)
qlimit = _step
if _limit is not None:
qlimit = min(_limit-(_step*i), _step)
if qlimit <= 0:
break
q = table.select(whereclause=and_(*qargs), limit=qlimit,
offset=qoffset, order_by=order_by)
rows = list(resultiter(engine.execute(q)))
if not len(rows):
return
for row in rows:
yield row
def distinct(engine, table, *columns):
columns = [table.c[c] for c in columns]
q = expression.select(columns, distinct=True)
return list(resultiter(engine.execute(q)))
def all(engine, table):
return find(engine, table)
| import logging
from itertools import count
from sqlalchemy.sql import expression, and_
log = logging.getLogger(__name__)
def resultiter(rp):
""" SQLAlchemy ResultProxies are not iterable to get a
list of dictionaries. This is to wrap them. """
keys = rp.keys()
while True:
row = rp.fetchone()
if row is None:
break
yield dict(zip(keys, row))
def find_one(engine, table, **kw):
res = list(find(engine, table, _limit=1, **kw))
if not len(res):
return None
return res[0]
def find(engine, table, _limit=None, _step=5000, _offset=0,
order_by=None, **kw):
if order_by is None:
order_by = [table.c.id.asc()]
qargs = []
try:
for col, val in kw.items():
qargs.append(table.c[col]==val)
except KeyError:
return
for i in count():
qoffset = _offset + (_step * i)
qlimit = _step
if _limit is not None:
qlimit = min(_limit-(_step*i), _step)
if qlimit <= 0:
break
q = table.select(whereclause=and_(*qargs), limit=qlimit,
offset=qoffset, order_by=order_by)
rows = list(resultiter(engine.execute(q)))
if not len(rows):
return
for row in rows:
yield row
def distinct(engine, table, *columns):
columns = [table.c[c] for c in columns]
q = expression.select(columns, distinct=True)
return list(resultiter(engine.execute(q)))
def all(engine, table):
return find(engine, table)
| Python | 0 |
0c4e6ff26d716bf20a1a7c36a4e3e363a1101c2a | add forced/default to plexpy.library.stream | Contents/Libraries/Shared/plex/objects/library/stream.py | Contents/Libraries/Shared/plex/objects/library/stream.py | from plex.objects.core.base import Descriptor, Property
class Stream(Descriptor):
id = Property(type=int)
index = Property(type=int)
stream_type = Property('streamType', type=int)
selected = Property(type=bool)
forced = Property(type=bool)
default = Property(type=bool)
title = Property
duration = Property(type=int)
codec = Property
codec_id = Property('codecID')
bit_depth = Property('bitDepth', type=int)
chroma_subsampling = Property('chromaSubsampling')
color_space = Property('colorSpace')
width = Property(type=int)
height = Property(type=int)
bitrate = Property(type=int)
bitrate_mode = Property('bitrateMode')
channels = Property(type=int)
sampling_rate = Property('samplingRate', type=int)
frame_rate = Property('frameRate')
profile = Property
scan_type = Property('scanType')
language = Property('language')
language_code = Property('languageCode')
bvop = Property(type=int)
gmc = Property(type=int)
level = Property(type=int)
qpel = Property(type=int)
@classmethod
def from_node(cls, client, node):
items = []
for genre in cls.helpers.findall(node, 'Stream'):
_, obj = Stream.construct(client, genre, child=True)
items.append(obj)
return [], items
| from plex.objects.core.base import Descriptor, Property
class Stream(Descriptor):
id = Property(type=int)
index = Property(type=int)
stream_type = Property('streamType', type=int)
selected = Property(type=bool)
title = Property
duration = Property(type=int)
codec = Property
codec_id = Property('codecID')
bit_depth = Property('bitDepth', type=int)
chroma_subsampling = Property('chromaSubsampling')
color_space = Property('colorSpace')
width = Property(type=int)
height = Property(type=int)
bitrate = Property(type=int)
bitrate_mode = Property('bitrateMode')
channels = Property(type=int)
sampling_rate = Property('samplingRate', type=int)
frame_rate = Property('frameRate')
profile = Property
scan_type = Property('scanType')
language = Property('language')
language_code = Property('languageCode')
bvop = Property(type=int)
gmc = Property(type=int)
level = Property(type=int)
qpel = Property(type=int)
@classmethod
def from_node(cls, client, node):
items = []
for genre in cls.helpers.findall(node, 'Stream'):
_, obj = Stream.construct(client, genre, child=True)
items.append(obj)
return [], items
| Python | 0 |
e92a612ba231eebb8dbe7ac42d24ac002a89fbe1 | add docstring | frappe/utils/logger.py | frappe/utils/logger.py | # imports - compatibility imports
from __future__ import unicode_literals
# imports - standard imports
import logging
import os
from logging.handlers import RotatingFileHandler
# imports - third party imports
from six import text_type
# imports - module imports
import frappe
default_log_level = logging.DEBUG
site = getattr(frappe.local, 'site', None)
def get_logger(module, with_more_info=False, _site=None):
"""Application Logger for your given module
Args:
module (str): Name of your logger and consequently your log file.
with_more_info (bool, optional): Will log the form dict using the SiteContextFilter. Defaults to False.
_site (str, optional): If set, validates the current site context with the passed value. The `frappe.web` logger uses this to determine that the application is logging information related to the logger called. Defaults to None.
Returns:
<class 'logging.Logger'>: Returns a Python logger object with Site and Bench level logging capabilities.
"""
global site
def allow_site():
allow = False
if site: allow = True
if _site: allow = site == _site
return allow
if module in frappe.loggers:
return frappe.loggers[module]
if not module:
module = "frappe"
with_more_info = True
logfile = module + '.log'
site = getattr(frappe.local, 'site', None)
LOG_FILENAME = os.path.join('..', 'logs', logfile)
logger = logging.getLogger(module)
logger.setLevel(frappe.log_level or default_log_level)
logger.propagate = False
formatter = logging.Formatter('%(asctime)s %(levelname)s %(name)s %(message)s')
handler = RotatingFileHandler(LOG_FILENAME, maxBytes=100_000, backupCount=20)
logger.addHandler(handler)
if allow_site():
SITELOG_FILENAME = os.path.join(site, 'logs', logfile)
site_handler = RotatingFileHandler(SITELOG_FILENAME, maxBytes=100_000, backupCount=20)
site_handler.setFormatter(formatter)
logger.addHandler(site_handler)
if with_more_info:
handler.addFilter(SiteContextFilter())
handler.setFormatter(formatter)
frappe.loggers[module] = logger
return logger
class SiteContextFilter(logging.Filter):
"""This is a filter which injects request information (if available) into the log."""
def filter(self, record):
if "Form Dict" not in text_type(record.msg):
record.msg = text_type(record.msg) + "\nSite: {0}\nForm Dict: {1}".format(site, getattr(frappe.local, 'form_dict', None))
return True
def set_log_level(level):
'''Use this method to set log level to something other than the default DEBUG'''
frappe.log_level = getattr(logging, (level or '').upper(), None) or default_log_level
frappe.loggers = {}
| # imports - compatibility imports
from __future__ import unicode_literals
# imports - standard imports
import logging
import os
from logging.handlers import RotatingFileHandler
# imports - third party imports
from six import text_type
# imports - module imports
import frappe
default_log_level = logging.DEBUG
site = getattr(frappe.local, 'site', None)
def get_logger(module, with_more_info=False, _site=None):
global site
if module in frappe.loggers:
return frappe.loggers[module]
if not module:
module = "frappe"
with_more_info = True
logfile = module + '.log'
site = getattr(frappe.local, 'site', None)
LOG_FILENAME = os.path.join('..', 'logs', logfile)
logger = logging.getLogger(module)
logger.setLevel(frappe.log_level or default_log_level)
logger.propagate = False
formatter = logging.Formatter('%(asctime)s %(levelname)s %(name)s %(message)s')
handler = RotatingFileHandler(LOG_FILENAME, maxBytes=100_000, backupCount=20)
logger.addHandler(handler)
#
if site == _site:
SITELOG_FILENAME = os.path.join(site, 'logs', logfile)
site_handler = RotatingFileHandler(SITELOG_FILENAME, maxBytes=100_000, backupCount=20)
site_handler.setFormatter(formatter)
logger.addHandler(site_handler)
if with_more_info:
handler.addFilter(SiteContextFilter())
handler.setFormatter(formatter)
frappe.loggers[module] = logger
return logger
class SiteContextFilter(logging.Filter):
"""This is a filter which injects request information (if available) into the log."""
def filter(self, record):
if "Form Dict" not in text_type(record.msg):
record.msg = text_type(record.msg) + "\nSite: {0}\nForm Dict: {1}".format(site, getattr(frappe.local, 'form_dict', None))
return True
def set_log_level(level):
'''Use this method to set log level to something other than the default DEBUG'''
frappe.log_level = getattr(logging, (level or '').upper(), None) or default_log_level
frappe.loggers = {}
| Python | 0.000005 |
24fbe55a3517e50f4d158bbb7b8857f8f10dc148 | Use argparse to parse julia-py arguments | src/julia/julia_py.py | src/julia/julia_py.py | """
Launch Julia through PyJulia.
"""
from __future__ import print_function, absolute_import
import argparse
import os
import sys
from .api import LibJulia
from .core import enable_debug
from .tools import julia_py_executable
def julia_py(julia, pyjulia_debug, jl_args):
if pyjulia_debug:
enable_debug()
os.environ["_PYJULIA_JULIA_PY"] = julia_py_executable()
os.environ["_PYJULIA_PATCH_JL"] = os.path.join(
os.path.dirname(os.path.realpath(__file__)), "patch.jl"
)
api = LibJulia.load(julia=julia)
api.init_julia(jl_args)
code = 1
if api.jl_eval_string(b"""Base.include(Main, ENV["_PYJULIA_PATCH_JL"])"""):
if api.jl_eval_string(b"Base.invokelatest(Base._start)"):
code = 0
api.jl_atexit_hook(code)
sys.exit(code)
class CustomFormatter(
argparse.RawDescriptionHelpFormatter, argparse.ArgumentDefaultsHelpFormatter
):
pass
def parse_args(args, **kwargs):
options = dict(
prog="julia-py",
usage="%(prog)s [--julia JULIA] [--pyjulia-debug] [<julia arguments>...]",
formatter_class=CustomFormatter,
description=__doc__,
)
options.update(kwargs)
parser = argparse.ArgumentParser(**options)
parser.add_argument(
"--julia",
default="julia",
help="""
Julia `executable` used by PyJulia.
""",
)
parser.add_argument(
"--pyjulia-debug",
action="store_true",
help="""
Print PyJulia's debugging messages to standard error.
""",
)
ns, jl_args = parser.parse_known_args(args)
ns.jl_args = jl_args
return ns
def main(args=None, **kwargs):
julia_py(**vars(parse_args(args, **kwargs)))
if __name__ == "__main__":
main()
| from __future__ import print_function, absolute_import
from argparse import Namespace
import os
import sys
from .api import LibJulia
from .tools import julia_py_executable
def parse_args(args):
ns = Namespace(julia="julia")
jl_args = list(args)
if len(jl_args) >= 2 and jl_args[0] == "--julia":
ns.julia = jl_args[1]
jl_args = jl_args[2:]
elif len(jl_args) >= 1 and jl_args[0].startswith("--julia="):
ns.julia = jl_args[0][len("--julia=") :]
jl_args = jl_args[1:]
return ns, jl_args
def main(args=None):
if args is None:
args = sys.argv[1:]
ns, jl_args = parse_args(args)
os.environ["_PYJULIA_JULIA_PY"] = julia_py_executable()
os.environ["_PYJULIA_PATCH_JL"] = os.path.join(
os.path.dirname(os.path.realpath(__file__)), "patch.jl"
)
api = LibJulia.load(julia=ns.julia)
api.init_julia(jl_args)
code = 1
if api.jl_eval_string(b"""Base.include(Main, ENV["_PYJULIA_PATCH_JL"])"""):
if api.jl_eval_string(b"Base.invokelatest(Base._start)"):
code = 0
api.jl_atexit_hook(code)
sys.exit(code)
if __name__ == "__main__":
main()
| Python | 0.000004 |
59e7fc5c924ebf8af66e0aeef990da55e84d3f9e | update to 3.30.1 | packages/dependencies/sqlite3.py | packages/dependencies/sqlite3.py | {
'repo_type' : 'archive',
'custom_cflag' : '-O2', # make sure we build it without -ffast-math
'download_locations' : [
{ 'url' : 'https://www.sqlite.org/2019/sqlite-autoconf-3300100.tar.gz', 'hashes' : [ { 'type' : 'sha256', 'sum' : '8c5a50db089bd2a1b08dbc5b00d2027602ca7ff238ba7658fabca454d4298e60' }, ], },
{ 'url' : 'https://fossies.org/linux/misc/sqlite-autoconf-3300100.tar.gz', 'hashes' : [ { 'type' : 'sha256', 'sum' : '8c5a50db089bd2a1b08dbc5b00d2027602ca7ff238ba7658fabca454d4298e60' }, ], },
],
'cflag_addition' : '-fexceptions -DSQLITE_ENABLE_COLUMN_METADATA=1 -DSQLITE_USE_MALLOC_H=1 -DSQLITE_USE_MSIZE=1 -DSQLITE_DISABLE_DIRSYNC=1 -DSQLITE_ENABLE_RTREE=1 -fno-strict-aliasing',
'configure_options': '--host={target_host} --prefix={target_prefix} --disable-shared --enable-static --enable-threadsafe --disable-editline --enable-readline --enable-json1 --enable-fts5 --enable-session',
'depends_on': (
'zlib',
),
'update_check' : { 'url' : 'https://www.sqlite.org/index.html', 'type' : 'httpregex', 'regex' : r'<a href="releaselog/.*\.html">Version (?P<version_num>[\d.]+)<\/a>' },
'_info' : { 'version' : '3.30.1', 'fancy_name' : 'libsqlite3' },
} | {
'repo_type' : 'archive',
'custom_cflag' : '-O2', # make sure we build it without -ffast-math
'download_locations' : [
{ 'url' : 'https://www.sqlite.org/2019/sqlite-autoconf-3300000.tar.gz', 'hashes' : [ { 'type' : 'sha256', 'sum' : 'e0a8cf4c7a87455e55e10413d16f358ca121ccec687fe1301eac95e2d340fc58' }, ], },
{ 'url' : 'https://fossies.org/linux/misc/sqlite-autoconf-3300000.tar.gz', 'hashes' : [ { 'type' : 'sha256', 'sum' : 'e0a8cf4c7a87455e55e10413d16f358ca121ccec687fe1301eac95e2d340fc58' }, ], },
],
'cflag_addition' : '-fexceptions -DSQLITE_ENABLE_COLUMN_METADATA=1 -DSQLITE_USE_MALLOC_H=1 -DSQLITE_USE_MSIZE=1 -DSQLITE_DISABLE_DIRSYNC=1 -DSQLITE_ENABLE_RTREE=1 -fno-strict-aliasing',
'configure_options': '--host={target_host} --prefix={target_prefix} --disable-shared --enable-static --enable-threadsafe --disable-editline --enable-readline --enable-json1 --enable-fts5 --enable-session',
'depends_on': (
'zlib',
),
'update_check' : { 'url' : 'https://www.sqlite.org/index.html', 'type' : 'httpregex', 'regex' : r'<a href="releaselog/.*\.html">Version (?P<version_num>[\d.]+)<\/a>' },
'_info' : { 'version' : '3.30.0', 'fancy_name' : 'libsqlite3' },
} | Python | 0.000001 |
6ad4796030aab2f6dbf8389b4030007d0fcf8761 | Update to test for mount setup | panoptes/test/mount/test_ioptron.py | panoptes/test/mount/test_ioptron.py | from nose.tools import raises
import panoptes
from panoptes.mount.ioptron import Mount
class TestIOptron():
@raises(AssertionError)
def test_no_config_no_commands(self):
""" Mount needs a config """
mount = Mount()
@raises(AssertionError)
def test_config_bad_commands(self):
""" Passes in a default config but blank commands, which should error """
mount = Mount(config={'mount': { 'model': 'ioptron', 'port':'/dev/ttyUSB0' } }, commands={'foo': 'bar'})
def test_config_auto_commands(self):
""" Passes in config like above, but no commands, so they should read from defaults """
mount = Mount(config={'mount': { 'model': 'ioptron', 'port':'/dev/ttyUSB0' } }) | from nose.tools import raises
import panoptes
from panoptes.mount.ioptron import Mount
class TestIOptron():
@raises(AssertionError)
def test_no_config_no_commands(self):
""" Mount needs a config """
mount = Mount()
@raises(AssertionError)
def test_config_no_commands(self):
""" """
mount = Mount(config={'mount': { 'model': 'ioptron', 'port':'/dev/ttyUSB0' } }, commands=dict()) | Python | 0 |
6b3e44b5e3ba66b870a584544a15a17036cf043a | fix syntax error | fruitScope/plotjson.py | fruitScope/plotjson.py | import json
import Gnuplot, Gnuplot.PlotItems, Gnuplot.funcutils
import argparse
import time, os, sys
import tempfile
import math
def check_dir(directory):
if not os.path.exists(directory):
print "Directory {} does not exist...creating...".format(directory)
os.makedirs(directory)
def main():
parser = argparse.ArgumentParser(description = "Plots and saves json objects in folder parseddata, uses a config file for plot setting inside cfg/.plotjson for gnuplot settings")
parser.add_argument('f', metavar = 'f', help="Filename of json file to be plotted.")
parser.add_argument('title', metavar = 't', help = "Title to be included in plot.")
parser.add_argument('-e', '--errorbars', help = "Use this flag to NOT plot with error bars, in case the plot is too messy.", action = "store_true")
args = parser.parse_args()
p = plotJson(args.errorbars)
p.load(args.f, args.title)
def hms(x):
m, s = divmod(x, 60)
h, m = divmod(m, 60)
return "%dh:%02dm:%02ds" % (h, m, s)
class plotJson():
def __init__(self, eb):
self.eb = eb
self.cfg = {}
try:
with open('cfg/.plotjson', 'rb+') as f:
x = f.read()
x = x.split('\n')
for i in x:
if len(i) > 0:
i = i.rstrip()
i = i.split('=')
self.cfg[i[0]] = i[1]
except:
print "No config file found...using default settings";
self.cfg = {
"format": "epscairo",
"xlabel": "t@level (ns)",
"ylabel": "Two-photon coincidence events"
}
def getx(self,hist, desc):
h_offset = float(desc['horiz_offset']) * 10 ** 9
h_binsize = float(desc['horiz_interval']) * 10 ** 9
s = []
for i in xrange(len(hist)):
s.append([(i * h_binsize) + h_offset, hist[i]])
return s
def load(self, path, title):
#fpath = 'parseddata/' + path
fpath = path
with open(fpath, 'rb+') as datafile:
data = json.load(datafile)
if not isinstance(data['hist'][0], list):
data['hist'] = self.getx(data['hist'], data['desc'])
duration = int(float(data['desc']['acq_duration']))
duration = hms(duration)
rawf = open(fpath + ".dat", 'wb+')
for i in xrange(len(data['hist'])):
_x = data['hist'][i][0]
_y = data['hist'][i][1]
_yerror = round(math.sqrt(_y),1)
rawf.write("{}\t{}\n".format(_x, _y))
rawf.close()
self.initPlot()
self.g('set title "{} {}, acquisition duration {}"'.format(path,title,duration))
self.g('set output "{}.eps"'.format(fpath))
if not self.eb:
self.g('f(x) = mean_y')
self.g('fit f(x) "{}" u 1:2 via mean_y'.format(fpath))
self.g('plot "{}" u 1:2:(sqrt(mean_y)) with yerrorbars pt 7 ps 0.2 '.format(fpath))
else:
self.g('plot "{}" u 1:2 w p pt 7 ps 0.2'.format(fpath))
def initPlot(self):
self.g = Gnuplot.Gnuplot()
self.g('set term {} transparent truecolor size 10,7.5'.format(self.cfg['format']))
self.g('set xlabel "{}"'.format(self.cfg['xlabel']))
self.g('set ylabel {}'.format(self.cfg['ylabel']))
main()
| import json
import Gnuplot, Gnuplot.PlotItems, Gnuplot.funcutils
import argparse
import time, os, sys
import tempfile
import math
def check_dir(directory):
if not os.path.exists(directory):
print "Directory {} does not exist...creating...".format(directory)
os.makedirs(directory)
def main():
parser = argparse.ArgumentParser(description = "Plots and saves json objects in folder parseddata, uses a config file for plot setting inside cfg/.plotjson for gnuplot settings")
parser.add_argument('f', metavar = 'f', help="Filename of json file to be plotted.")
parser.add_argument('title', metavar = 't', help = "Title to be included in plot.")
parser.add_argument('-e', '--errorbars', help = "Use this flag to NOT plot with error bars, in case the plot is too messy.", action = "store_true")
args = parser.parse_args()
p = plotJson(args.errorbars)
p.load(args.f, args.title)
def hms(x):
m, s = divmod(x, 60)
h, m = divmod(m, 60)
return "%dh:%02dm:%02ds" % (h, m, s)
class plotJson():
def __init__(self, eb):
self.eb = eb
self.cfg = {}
try:
with open('cfg/.plotjson', 'rb+') as f:
x = f.read()
x = x.split('\n')
for i in x:
if len(i) > 0:
i = i.rstrip()
i = i.split('=')
self.cfg[i[0]] = i[1]
except:
print "No config file found...using default settings";
self.cfg =
{
"format": "epscairo",
"xlabel": "t@level (ns)",
"ylabel": "Two-photon coincidence events"
}
def getx(self,hist, desc):
h_offset = float(desc['horiz_offset']) * 10 ** 9
h_binsize = float(desc['horiz_interval']) * 10 ** 9
s = []
for i in xrange(len(hist)):
s.append([(i * h_binsize) + h_offset, hist[i]])
return s
def load(self, path, title):
#fpath = 'parseddata/' + path
fpath = path
with open(fpath, 'rb+') as datafile:
data = json.load(datafile)
if not isinstance(data['hist'][0], list):
data['hist'] = self.getx(data['hist'], data['desc'])
duration = int(float(data['desc']['acq_duration']))
duration = hms(duration)
rawf = open(fpath + ".dat", 'wb+')
for i in xrange(len(data['hist'])):
_x = data['hist'][i][0]
_y = data['hist'][i][1]
_yerror = round(math.sqrt(_y),1)
rawf.write("{}\t{}\n".format(_x, _y))
rawf.close()
self.initPlot()
self.g('set title "{} {}, acquisition duration {}"'.format(path,title,duration))
self.g('set output "{}.eps"'.format(fpath))
if not self.eb:
self.g('f(x) = mean_y')
self.g('fit f(x) "{}" u 1:2 via mean_y'.format(fpath))
self.g('plot "{}" u 1:2:(sqrt(mean_y)) with yerrorbars pt 7 ps 0.2 '.format(fpath))
else:
self.g('plot "{}" u 1:2 w p pt 7 ps 0.2'.format(fpath))
def initPlot(self):
self.g = Gnuplot.Gnuplot()
self.g('set term {} transparent truecolor size 10,7.5'.format(self.cfg['format']))
self.g('set xlabel "{}"'.format(self.cfg['xlabel']))
self.g('set ylabel {}'.format(self.cfg['ylabel']))
main()
| Python | 0.000003 |
c884eae90e41577670b8bd194cc55b31e49f3f61 | fix data provider ref | src/py/crankshaft/crankshaft/clustering/kmeans.py | src/py/crankshaft/crankshaft/clustering/kmeans.py | from sklearn.cluster import KMeans
import numpy as np
from crankshaft.analysis_data_provider import AnalysisDataProvider
class Kmeans:
def __init__(self, data_provider=None):
if data_provider is None:
self.data_provider = AnalysisDataProvider()
else:
self.data_provider = data_provider
def spatial(self, query, no_clusters, no_init=20):
"""
find centers based on clusters of latitude/longitude pairs
query: SQL query that has a WGS84 geometry (the_geom)
"""
params = {"subquery": query,
"geom_col": "the_geom",
"id_col": "cartodb_id"}
data = self.data_provider.get_spatial_kmeans(params)
# Unpack query response
xs = data[0]['xs']
ys = data[0]['ys']
ids = data[0]['ids']
km = KMeans(n_clusters=no_clusters, n_init=no_init)
labels = km.fit_predict(zip(xs, ys))
return zip(ids, labels)
def nonspatial(self, subquery, colnames, num_clusters=5,
id_col='cartodb_id', standarize=True):
"""
query (string): A SQL query to retrieve the data required to do the
k-means clustering analysis, like so:
SELECT * FROM iris_flower_data
colnames (list): a list of the column names which contain the data
of interest, like so: ["sepal_width",
"petal_width",
"sepal_length",
"petal_length"]
num_clusters (int): number of clusters (greater than zero)
id_col (string): name of the input id_column
"""
import json
from sklearn import metrics
out_id_colname = 'rowids'
# TODO: need a random seed?
params = {"cols": colnames,
"subquery": subquery,
"id_col": id_col}
data = self.data_provider.get_nonspatial_kmeans(params, standarize)
# fill array with values for k-means clustering
if standarize:
cluster_columns = _scale_data(
_extract_columns(data, colnames))
else:
cluster_columns = _extract_columns(data, colnames)
print str(cluster_columns)
# TODO: decide on optimal parameters for most cases
# Are there ways of deciding parameters based on inputs?
kmeans = KMeans(n_clusters=num_clusters,
random_state=0).fit(cluster_columns)
centers = [json.dumps(dict(zip(colnames, c)))
for c in kmeans.cluster_centers_[kmeans.labels_]]
silhouettes = metrics.silhouette_samples(cluster_columns,
kmeans.labels_,
metric='sqeuclidean')
return zip(kmeans.labels_,
centers,
silhouettes,
data[0][out_id_colname])
# -- Preprocessing steps
def _extract_columns(data, colnames):
"""
Extract the features from the query and pack them into a NumPy array
data (list of dicts): result of the kmeans request
id_col_name (string): name of column which has the row id (not a
feature of the analysis)
"""
return np.array([data[0]['arr_{}'.format(c)] for c in colnames],
dtype=float).T
def _scale_data(features):
"""
Scale all input columns to center on 0 with a standard devation of 1
features (numpy matrix): features of dimension (n_features, n_samples)
"""
from sklearn.preprocessing import StandardScaler
return StandardScaler().fit_transform(features)
| from sklearn.cluster import KMeans
import numpy as np
from crankshaft.analysis_data_provider import AnalysisDataProvider
class Kmeans:
def __init__(self, data_provider=None):
if data_provider is None:
self.data_provider = AnalysisDataProvider()
else:
self.data_provider = data_provider
def spatial(self, query, no_clusters, no_init=20):
"""
find centers based on clusters of latitude/longitude pairs
query: SQL query that has a WGS84 geometry (the_geom)
"""
params = {"subquery": query,
"geom_col": "the_geom",
"id_col": "cartodb_id"}
data = self.data_provider.get_spatial_kmeans(params)
# Unpack query response
xs = data[0]['xs']
ys = data[0]['ys']
ids = data[0]['ids']
km = KMeans(n_clusters=no_clusters, n_init=no_init)
labels = km.fit_predict(zip(xs, ys))
return zip(ids, labels)
def nonspatial(self, subquery, colnames, num_clusters=5,
id_col='cartodb_id', standarize=True):
"""
query (string): A SQL query to retrieve the data required to do the
k-means clustering analysis, like so:
SELECT * FROM iris_flower_data
colnames (list): a list of the column names which contain the data
of interest, like so: ["sepal_width",
"petal_width",
"sepal_length",
"petal_length"]
num_clusters (int): number of clusters (greater than zero)
id_col (string): name of the input id_column
"""
import json
from sklearn import metrics
out_id_colname = 'rowids'
# TODO: need a random seed?
params = {"cols": colnames,
"subquery": subquery,
"id_col": id_col}
data = self.query_runner.get_nonspatial_kmeans(params, standarize)
# fill array with values for k-means clustering
if standarize:
cluster_columns = _scale_data(
_extract_columns(data, colnames))
else:
cluster_columns = _extract_columns(data, colnames)
print str(cluster_columns)
# TODO: decide on optimal parameters for most cases
# Are there ways of deciding parameters based on inputs?
kmeans = KMeans(n_clusters=num_clusters,
random_state=0).fit(cluster_columns)
centers = [json.dumps(dict(zip(colnames, c)))
for c in kmeans.cluster_centers_[kmeans.labels_]]
silhouettes = metrics.silhouette_samples(cluster_columns,
kmeans.labels_,
metric='sqeuclidean')
return zip(kmeans.labels_,
centers,
silhouettes,
data[0][out_id_colname])
# -- Preprocessing steps
def _extract_columns(data, colnames):
"""
Extract the features from the query and pack them into a NumPy array
data (list of dicts): result of the kmeans request
id_col_name (string): name of column which has the row id (not a
feature of the analysis)
"""
return np.array([data[0]['arr_{}'.format(c)] for c in colnames],
dtype=float).T
def _scale_data(features):
"""
Scale all input columns to center on 0 with a standard devation of 1
features (numpy matrix): features of dimension (n_features, n_samples)
"""
from sklearn.preprocessing import StandardScaler
return StandardScaler().fit_transform(features)
| Python | 0 |
4f0e0d4d92301dea408925d99001913e76a15ee1 | Update filterscan.py | lib/filterscan.py | lib/filterscan.py | try:
import os
import subprocess
from lib.core.core import Core
from lib.filter.filter import Filter
except ImportError, err:
from lib.core.core import Core
Core.print_error(err)
class FilterScan(Filter):
def __init__(self, args):
self.__args = args
Filter.__init__(self, [self.__args.pcap], self.__args, "filter")
def __run_cmd(self, cmd, file_name, result_set, logger):
output_file = "{0}{1}_{2}.txt".format(self._output_dir, file_name, os.path.basename(self.__args.pcap))
result_file = open(output_file, "w")
logger._logging("Filter: {0} parsing".format(file_name))
proc = subprocess.Popen([cmd], shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE,)
if isinstance(result_set, (list, tuple)):
[ result_set.append(line) for line in iter(proc.stdout.readline, '') if line not in result_set ]
else:
for line in iter(proc.stdout.readline, ''):
try:
result_set[line.rstrip()] += 1
except:
result_set[line.rstrip()] = 1
if isinstance(result_set, (list, tuple)):
if len(result_set) > 10:
result_file.write("".join(result_set[1:10]))
else:
result_file.write("".join(result_set))
else:
for counter, value in enumerate(sorted(result_set, key=result_set.get, reverse=True)):
if counter == 10:
break
else:
result_file.write("{0} {1}\n".format(result_set[value], value))
def _run(self, logger):
logger._logging("START: Filter pcap file")
for file_name, tshark_cmd in self._filter_commands.iteritems():
result_set = {} if file_name.startswith("top10") else []
self.__run_cmd(tshark_cmd, file_name, result_set, logger)
logger._logging("STOP: Filter pcap file")
logger._logging("Finished Filtering. Results saved in {0} folder".format(self._output_dir))
|
try:
import subprocess
from lib.core.core import Core
from lib.filter.filter import Filter
except ImportError, err:
from lib.core.core import Core
Core.print_error(err)
class FilterScan(Filter):
def __init__(self, args):
Filter.__init__(self, [args.pcap], args, "filter")
print self._output_dir
def __run_cmd(self, cmd, file_name, result_set):
output_file = "{0}{1}.txt".format(self._output_dir, file_name)
result_file = open(output_file, "w")
proc = subprocess.Popen([cmd], shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE,)
if isinstance(result_set, (list, tuple)):
for line in iter(proc.stdout.readline, ''):
if line not in result_set:
result_set.append(line)
else:
for line in iter(proc.stdout.readline, ''):
try:
result_set[line.rstrip()] += 1
except:
result_set[line.rstrip()] = 1
if isinstance(result_set, (list, tuple)):
if len(result_set) > 10:
result_file.write("".join(result_set[1:10]))
else:
result_file.write("".join(result_set))
else:
for counter, value in enumerate(sorted(result_set, key=result_set.get, reverse=True)):
if counter == 10:
break
else:
print result_set[value], value
def _run(self, logger):
for file_name, tshark_cmd in self._filter_commands.iteritems():
result_set = None
if file_name.startswith("top10"):
result_set = {}
else:
result_set = []
self.__run_cmd(tshark_cmd,file_name, result_set)
| Python | 0 |
bf9c799d1fb13098bd4bce65d44f86bb352b834a | Comment out an extensive validation | main.py | main.py | #!/usr/bin/python3
#
# The MIT License (MIT)
#
# Copyright (c) 2013 Andrian Nord
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import sys
import ljd.rawdump.parser
import ljd.pseudoasm.writer
import ljd.ast.builder
import ljd.ast.validator
import ljd.ast.locals
import ljd.ast.slotworks
import ljd.ast.unwarper
import ljd.ast.mutator
import ljd.lua.writer
def dump(name, obj, level=0):
indent = level * '\t'
if name is not None:
prefix = indent + name + " = "
else:
prefix = indent
if isinstance(obj, (int, float, str)):
print(prefix + str(obj))
elif isinstance(obj, list):
print (prefix + "[")
for value in obj:
dump(None, value, level + 1)
print (indent + "]")
elif isinstance(obj, dict):
print (prefix + "{")
for key, value in obj.items():
dump(key, value, level + 1)
print (indent + "}")
else:
print (prefix + obj.__class__.__name__)
for key in dir(obj):
if key.startswith("__"):
continue
val = getattr(obj, key)
dump(key, val, level + 1)
def main():
file_in = sys.argv[1]
header, prototype = ljd.rawdump.parser.parse(file_in)
if not prototype:
return 1
# TODO: args
# ljd.pseudoasm.writer.write(sys.stdout, header, prototype)
ast = ljd.ast.builder.build(prototype)
assert ast is not None
ljd.ast.validator.validate(ast, warped=True)
ljd.ast.mutator.pre_pass(ast)
# ljd.ast.validator.validate(ast, warped=True)
ljd.ast.locals.mark_locals(ast)
# ljd.ast.validator.validate(ast, warped=True)
ljd.ast.slotworks.eliminate_temporary(ast)
# ljd.ast.validator.validate(ast, warped=True)
if True:
ljd.ast.unwarper.unwarp(ast)
# ljd.ast.validator.validate(ast, warped=False)
ljd.ast.locals.mark_local_definitions(ast)
# ljd.ast.validator.validate(ast, warped=False)
ljd.ast.mutator.primary_pass(ast)
ljd.ast.validator.validate(ast, warped=False)
ljd.lua.writer.write(sys.stdout, ast)
return 0
if __name__ == "__main__":
retval = main()
sys.exit(retval)
# vim: ts=8 noexpandtab nosmarttab softtabstop=8 shiftwidth=8
| #!/usr/bin/python3
#
# The MIT License (MIT)
#
# Copyright (c) 2013 Andrian Nord
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import sys
import ljd.rawdump.parser
import ljd.pseudoasm.writer
import ljd.ast.builder
import ljd.ast.validator
import ljd.ast.locals
import ljd.ast.slotworks
import ljd.ast.unwarper
import ljd.ast.mutator
import ljd.lua.writer
def dump(name, obj, level=0):
indent = level * '\t'
if name is not None:
prefix = indent + name + " = "
else:
prefix = indent
if isinstance(obj, (int, float, str)):
print(prefix + str(obj))
elif isinstance(obj, list):
print (prefix + "[")
for value in obj:
dump(None, value, level + 1)
print (indent + "]")
elif isinstance(obj, dict):
print (prefix + "{")
for key, value in obj.items():
dump(key, value, level + 1)
print (indent + "}")
else:
print (prefix + obj.__class__.__name__)
for key in dir(obj):
if key.startswith("__"):
continue
val = getattr(obj, key)
dump(key, val, level + 1)
def main():
file_in = sys.argv[1]
header, prototype = ljd.rawdump.parser.parse(file_in)
if not prototype:
return 1
# TODO: args
# ljd.pseudoasm.writer.write(sys.stdout, header, prototype)
ast = ljd.ast.builder.build(prototype)
assert ast is not None
ljd.ast.validator.validate(ast, warped=True)
ljd.ast.mutator.pre_pass(ast)
ljd.ast.validator.validate(ast, warped=True)
ljd.ast.locals.mark_locals(ast)
ljd.ast.validator.validate(ast, warped=True)
ljd.ast.slotworks.eliminate_temporary(ast)
ljd.ast.validator.validate(ast, warped=True)
if True:
ljd.ast.unwarper.primary_pass(ast)
ljd.ast.validator.validate(ast, warped=True)
ljd.ast.locals.mark_local_definitions(ast)
ljd.ast.validator.validate(ast, warped=False)
ljd.ast.mutator.primary_pass(ast)
ljd.ast.validator.validate(ast, warped=False)
ljd.lua.writer.write(sys.stdout, ast)
return 0
if __name__ == "__main__":
retval = main()
sys.exit(retval)
# vim: ts=8 noexpandtab nosmarttab softtabstop=8 shiftwidth=8
| Python | 0 |
3b6cc83cfea47550619d8a1d966131a1cc90f1c9 | clean up processes/threads | lib/ipf/engine.py | lib/ipf/engine.py |
###############################################################################
# Copyright 2012 The University of Texas at Austin #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
###############################################################################
import copy
import json
import logging
import logging.config
import os
import sys
import time
import traceback
from ipf.error import WorkflowError
from ipf.home import IPF_HOME
from ipf.step import Step
from ipf.workflow import Workflow
#######################################################################################################################
logging.config.fileConfig(os.path.join(IPF_HOME,"etc","logging.conf"))
logger = logging.getLogger(__name__)
#######################################################################################################################
class WorkflowEngine(object):
def __init__(self):
pass
def run(self, workflow_file_name):
workflow = Workflow()
if os.path.isfile(workflow_file_name):
workflow.read(workflow_file_name)
else:
file_name = os.path.join(IPF_HOME,"etc","workflow",workflow_file_name)
if os.path.isfile(file_name):
workflow.read(file_name)
else:
raise WorkflowError("cannot open workflow file %s as a path or relative to %s/etc/workflow" % \
(workflow_file_name,IPF_HOME))
self._setDependencies(workflow)
logger.debug(workflow)
logger.info("starting workflow %s",workflow.name)
for step in workflow.steps:
step.start()
start_time = time.time()
steps_with_inputs = filter(self._sendNoMoreInputs,workflow.steps)
while self._anyAlive(workflow.steps):
if workflow.timeout is not None and time.time() - start_time > workflow.timeout:
logger.warn("time out, terminating workflow")
for step in workflow.steps:
if step.is_alive():
step.terminate()
break
time.sleep(0.1)
steps_with_inputs = filter(self._sendNoMoreInputs,steps_with_inputs)
for step in workflow.steps:
step.join()
if reduce(lambda b1,b2: b1 and b2, map(lambda step: step.exitcode == 0, workflow.steps)):
logger.info("workflow succeeded")
else:
logger.error("workflow failed")
for step in workflow.steps:
if step.exitcode == 0:
logger.info(" %10s succeeded (%s)",step.id,step.__class__.__name__)
else:
logger.error(" %10s failed (%s)",step.id,step.__class__.__name__)
def _anyAlive(self, steps):
return reduce(lambda b1,b2: b1 or b2, map(lambda step: step.is_alive(), steps), False)
def _sendNoMoreInputs(self, step):
if self._anyAlive(step.depends_on):
return True
logger.debug("no more inputs to step %s",step.id)
step.input_queue.put(None) # send None to indicate no more inputs
step.input_queue.close() # close the queue to stop the background thread
return False
def _setDependencies(self, workflow):
for step in workflow.steps:
step.depends_on = [] # [step, ...]
for step in workflow.steps:
for type in step.outputs:
for dstep in step.outputs[type]:
dstep.depends_on.append(step)
#######################################################################################################################
|
###############################################################################
# Copyright 2012 The University of Texas at Austin #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
###############################################################################
import copy
import json
import logging
import logging.config
import os
import sys
import time
import traceback
from ipf.error import WorkflowError
from ipf.home import IPF_HOME
from ipf.step import Step
from ipf.workflow import Workflow
#######################################################################################################################
logging.config.fileConfig(os.path.join(IPF_HOME,"etc","logging.conf"))
logger = logging.getLogger(__name__)
#######################################################################################################################
class WorkflowEngine(object):
def __init__(self):
pass
def run(self, workflow_file_name):
workflow = Workflow()
if os.path.isfile(workflow_file_name):
workflow.read(workflow_file_name)
else:
file_name = os.path.join(IPF_HOME,"etc","workflow",workflow_file_name)
if os.path.isfile(file_name):
workflow.read(file_name)
else:
raise WorkflowError("cannot open workflow file %s as a path or relative to %s/etc/workflow" % \
(workflow_file_name,IPF_HOME))
self._setDependencies(workflow)
logger.debug(workflow)
logger.info("starting workflow %s",workflow.name)
for step in workflow.steps:
step.start()
start_time = time.time()
steps_with_inputs = filter(self._sendNoMoreInputs,workflow.steps)
while self._anyAlive(workflow.steps):
if workflow.timeout is not None and time.time() - start_time > workflow.timeout:
logger.warn("time out, terminating workflow")
for step in workflow.steps:
if step.is_alive():
step.terminate()
break
time.sleep(0.1)
steps_with_inputs = filter(self._sendNoMoreInputs,steps_with_inputs)
# wait again, in case we terminated
while self._anyAlive(workflow.steps):
time.sleep(0.1)
if reduce(lambda b1,b2: b1 and b2, map(lambda step: step.exitcode == 0, workflow.steps)):
logger.info("workflow succeeded")
else:
logger.error("workflow failed")
for step in workflow.steps:
if step.exitcode == 0:
logger.info(" %10s succeeded (%s)",step.id,step.__class__.__name__)
else:
logger.error(" %10s failed (%s)",step.id,step.__class__.__name__)
def _anyAlive(self, steps):
return reduce(lambda b1,b2: b1 or b2, map(lambda step: step.is_alive(), steps), False)
def _sendNoMoreInputs(self, step):
if self._anyAlive(step.depends_on):
return True
logger.debug("no more inputs to step %s",step.id)
step.input_queue.put(None)
return False
def _setDependencies(self, workflow):
for step in workflow.steps:
step.depends_on = [] # [step, ...]
for step in workflow.steps:
for type in step.outputs:
for dstep in step.outputs[type]:
dstep.depends_on.append(step)
#######################################################################################################################
| Python | 0.000001 |
c52d056091acf49624450cc2d1e01cbf0900a08f | Add a profiling option | main.py | main.py | #!/usr/bin/env python
import sys
from PyQt4.QtGui import QApplication as QApp
from gui.EditorWindow import MainWindow
def main():
import grammar.grammars
grammar.grammars.compileGrammars()
app = QApp(sys.argv)
ex = MainWindow()
sys.exit(app.exec_())
if __name__ == '__main__':
profile = False
if profile:
import cProfile
cProfile.run('main()')
else:
main()
| #!/usr/bin/env python
import sys
from PyQt4.QtGui import QApplication as QApp
from gui.EditorWindow import MainWindow
def main():
import grammar.grammars
grammar.grammars.compileGrammars()
app = QApp(sys.argv)
ex = MainWindow()
sys.exit(app.exec_())
if __name__ == '__main__':
main()
| Python | 0.00003 |
d951b11e9991c021e631299f0e22da8eb4c7d850 | comment out post-checkout undo demonstration | main.py | main.py | # this is the main file that get called
import os
import sys
import gitTA as git
import colorama
from colorama import Fore, Back # add color output to terminal: we want anything printed to be VERY visible to user
colorama.init() # called so that windows colors work
'''
modify this file! When git runs certain commands, it will run THIS main.py
which will trigger the functions you've decorated here with gitta.listen('event-name')
your methods can listen for the following events:
pre-push, pre-commit, # pre-x methods can be aborted by raising an exception
post-commit, post-checkout, post-merge
'''
# pre-* events can be aborted by raising an exception ???
@git.listen('pre-push')
def prepush(*args, **kwargs):
print(Fore.GREEN) # set so that ALL next prints will be green
print(args, kwargs)
@git.listen('pre-commit')
def precommit(*args, **kwargs):
print(Fore.GREEN)
print(args, kwargs)
@git.listen('post-commit')
def postcommit(*args, **kwargs):
print(Fore.GREEN)
print(args, kwargs)
@git.listen('post-checkout')
def postcheckout(*args, **kwargs):
print(Fore.GREEN) # set so that ALL next prints will be green
print(args, kwargs)
# branches = git.Branch()
# branches.undo_checkout(*args, **kwargs)
@git.listen('post-merge')
def postmerge(*args, **kwargs):
print(args, kwargs)
if __name__ == '__main__':
git.trigger(45, event='post-checkout') # example of what might get passed to postcheckout
# the garbled message that appears before (45, ) is the Fore.GREEN. On normal terminals this garbled output will NOT appear
# ['.gitta/py/main.py', 'pre-push', 'origin', 'https://github.com/lancekindle/test.git']
# ['.gitta/py/main.py', 'pre-commit']
# ['.gitta/py/main.py', 'post-commit']
| # this is the main file that get called
import os
import sys
import gitTA as git
import colorama
from colorama import Fore, Back # add color output to terminal: we want anything printed to be VERY visible to user
colorama.init() # called so that windows colors work
'''
modify this file! When git runs certain commands, it will run THIS main.py
which will trigger the functions you've decorated here with gitta.listen('event-name')
your methods can listen for the following events:
pre-push, pre-commit, # pre-x methods can be aborted by raising an exception
post-commit, post-checkout, post-merge
'''
# pre-* events can be aborted by raising an exception ???
@git.listen('pre-push')
def prepush(*args, **kwargs):
print(Fore.GREEN) # set so that ALL next prints will be green
print(args, kwargs)
@git.listen('pre-commit')
def precommit(*args, **kwargs):
print(Fore.GREEN)
print(args, kwargs)
@git.listen('post-commit')
def postcommit(*args, **kwargs):
print(Fore.GREEN)
print(args, kwargs)
@git.listen('post-checkout')
def postcheckout(*args, **kwargs):
print(Fore.GREEN) # set so that ALL next prints will be green
print(args, kwargs)
branches = git.Branch()
branches.undo_checkout(*args, **kwargs)
@git.listen('post-merge')
def postmerge(*args, **kwargs):
print(args, kwargs)
if __name__ == '__main__':
git.trigger(45, event='post-checkout') # example of what might get passed to postcheckout
# the garbled message that appears before (45, ) is the Fore.GREEN. On normal terminals this garbled output will NOT appear
# ['.gitta/py/main.py', 'pre-push', 'origin', 'https://github.com/lancekindle/test.git']
# ['.gitta/py/main.py', 'pre-commit']
# ['.gitta/py/main.py', 'post-commit']
| Python | 0 |
ea2b1dfd5d27f1b4a537b85e823bb7ba047887f1 | Wrap everything in a main function | main.py | main.py | import argparse
from os import exit
from encrypt import encrypt
from decrypt import decrypt
def main():
parser = argparse.ArgumentParser(prog='vic')
subparsers = parser.add_subparsers(help='sub-command help')
# Encryption subparser
parser_encrypt = subparsers.add_parser('encrypt', description='VIC cipher encrypter')
parser_encrypt.set_defaults(func=encrypt)
parser_encrypt.add_argument('-m', '--message',
dest='plaintext',
action='store',
required=True,
help='Message to be encrypted')
parser_encrypt.add_argument('-c', '--checkerboard-key',
dest='checkerboard_key',
action='store',
required=True,
help='Key for the straddling checkerboard')
parser_encrypt.add_argument('-p', '--passphrase',
dest='passphrase',
action='store',
required=True,
help='Passphrase used to derive keys')
parser_encrypt.add_argument('-M', '--message-id',
dest='message_id',
action='store',
required=True,
help='Unique and random message ID')
parser_encrypt.add_argument('-d', '--date',
dest='date',
action='store',
required=True,
help='Date used to derive keys and to insert the message ID group')
parser_encrypt.add_argument('-i', '--personal-id',
dest='personal_id',
action='store',
required=True,
help='Personal ID for generating transposition tables')
# Decryption subparser
parser_decrypt = subparsers.add_parser('decrypt', description='VIC cipher decrypter')
parser_decrypt.set_defaults(func=decrypt)
parser_decrypt.add_argument('-m', '--message',
dest='ciphertext',
action='store',
required=True,
help='Ciphertext to decrypt')
parser_decrypt.add_argument('-c', '--checkerboard-key',
dest='checkerboard_key',
action='store',
required=True,
help='Key for the straddling checkerboard')
parser_decrypt.add_argument('-p', '--passphrase',
dest='passphrase',
action='store',
required=True,
help='Passphrase used to derive keys')
parser_decrypt.add_argument('-d', '--date',
dest='date',
action='store',
required=True,
help='Date used to derive keys and to insert the message ID group')
parser_decrypt.add_argument('-i', '--personal-id',
dest='personal_id',
action='store',
required=True,
help='Personal ID for generating transposition tables')
args = parser.parse_args()
exit(args.func(args))
if __name__ == '__main__':
main()
| import argparse
from encrypt import encrypt
from decrypt import decrypt
parser = argparse.ArgumentParser(prog='vic')
subparsers = parser.add_subparsers(help='sub-command help')
# Encryption subparser
parser_encrypt = subparsers.add_parser('encrypt', description='VIC cipher encrypter')
parser_encrypt.set_defaults(func=encrypt)
parser_encrypt.add_argument('-m', '--message',
dest='plaintext',
action='store',
required=True,
help='Message to be encrypted')
parser_encrypt.add_argument('-c', '--checkerboard-key',
dest='checkerboard_key',
action='store',
required=True,
help='Key for the straddling checkerboard')
parser_encrypt.add_argument('-p', '--passphrase',
dest='passphrase',
action='store',
required=True,
help='Passphrase used to derive keys')
parser_encrypt.add_argument('-M', '--message-id',
dest='message_id',
action='store',
required=True,
help='Unique and random message ID')
parser_encrypt.add_argument('-d', '--date',
dest='date',
action='store',
required=True,
help='Date used to derive keys and to insert the message ID group')
parser_encrypt.add_argument('-i', '--personal-id',
dest='personal_id',
action='store',
required=True,
help='Personal ID for generating transposition tables')
# Decryption subparser
parser_decrypt = subparsers.add_parser('decrypt', description='VIC cipher decrypter')
parser_decrypt.set_defaults(func=decrypt)
parser_decrypt.add_argument('-m', '--message',
dest='ciphertext',
action='store',
required=True,
help='Ciphertext to decrypt')
parser_decrypt.add_argument('-c', '--checkerboard-key',
dest='checkerboard_key',
action='store',
required=True,
help='Key for the straddling checkerboard')
parser_decrypt.add_argument('-p', '--passphrase',
dest='passphrase',
action='store',
required=True,
help='Passphrase used to derive keys')
parser_decrypt.add_argument('-d', '--date',
dest='date',
action='store',
required=True,
help='Date used to derive keys and to insert the message ID group')
parser_decrypt.add_argument('-i', '--personal-id',
dest='personal_id',
action='store',
required=True,
help='Personal ID for generating transposition tables')
args = parser.parse_args()
args.func(args)
| Python | 0.9996 |
1624504bd966eaf47698938e387a58dd14738a76 | add warnings about deprecation of compiler specific template tags | static_precompiler/templatetags/compile_static.py | static_precompiler/templatetags/compile_static.py | import six
import warnings
from django.template import Library
from django.templatetags.static import static
from static_precompiler.settings import PREPEND_STATIC_URL, USE_CACHE, CACHE_TIMEOUT
from static_precompiler.utils import compile_static, get_compiler_by_name, get_cache_key, get_hexdigest, get_cache
from static_precompiler.templatetags.base import container_tag
register = Library()
@register.simple_tag(name="compile")
def compile_tag(source_path, compiler=None):
if compiler:
compiled = compiler.compile(source_path)
else:
compiled = compile_static(source_path)
if PREPEND_STATIC_URL:
compiled = static(compiled)
return compiled
@container_tag(register)
def inlinecompile(nodelist, context, compiler):
source = nodelist.render(context)
if isinstance(compiler, six.string_types):
compiler = get_compiler_by_name(compiler)
if USE_CACHE:
cache_key = get_cache_key("{0}.{1}".format(
compiler.__class__.__name__,
get_hexdigest(source)
))
cache = get_cache()
cached = cache.get(cache_key, None)
if cached is not None:
return cached
output = compiler.compile_source(source)
cache.set(cache_key, output, CACHE_TIMEOUT)
return output
return compiler.compile_source(source)
def _warn(old, new):
warnings.warn(
"{%% %s %%} tag has been deprecated, use {%% %s %%} "
"from `compile_static` template tag library instead." % (old, new),
UserWarning,
)
def register_compiler_tags(register, compiler):
@register.simple_tag(name=compiler.name)
def tag(source_path):
_warn(compiler.name, 'compile')
return compile_tag(source_path, compiler)
@container_tag(register, name="inline" + compiler.name)
def inline_tag(nodelist, context):
_warn('inline%s' % compiler.name, 'inlinecompile "%s"' % compiler.name)
return inlinecompile(nodelist, context, compiler)
| import six
from django.template import Library
from django.templatetags.static import static
from static_precompiler.settings import PREPEND_STATIC_URL, USE_CACHE, CACHE_TIMEOUT
from static_precompiler.utils import compile_static, get_compiler_by_name, get_cache_key, get_hexdigest, get_cache
from static_precompiler.templatetags.base import container_tag
register = Library()
@register.simple_tag(name="compile")
def compile_tag(source_path, compiler=None):
if compiler:
compiled = compiler.compile(source_path)
else:
compiled = compile_static(source_path)
if PREPEND_STATIC_URL:
compiled = static(compiled)
return compiled
@container_tag(register)
def inlinecompile(nodelist, context, compiler):
source = nodelist.render(context)
if isinstance(compiler, six.string_types):
compiler = get_compiler_by_name(compiler)
if USE_CACHE:
cache_key = get_cache_key("{0}.{1}".format(
compiler.__class__.__name__,
get_hexdigest(source)
))
cache = get_cache()
cached = cache.get(cache_key, None)
if cached is not None:
return cached
output = compiler.compile_source(source)
cache.set(cache_key, output, CACHE_TIMEOUT)
return output
return compiler.compile_source(source)
def register_compiler_tags(register, compiler):
@register.simple_tag(name=compiler.name)
def tag(source_path):
return compile_tag(source_path, compiler)
@container_tag(register, name="inline" + compiler.name)
def inline_tag(nodelist, context):
return inlinecompile(nodelist, context, compiler)
| Python | 0 |
437643d0f0680470b52ce893555df5dac17bdca1 | use selenium for loading js content | main.py | main.py | import time
from bs4 import BeautifulSoup
from selenium import webdriver
browser = webdriver.Firefox()
ffResults = browser.get("https://www.expedia.com/Flights-Search?trip=roundtrip&leg1=from:Hamburg,%20Germany%20(HAM-All%20Airports),to:Amman,%20Jordan%20(AMM-Queen%20Alia%20Intl.),departure:03/08/2017TANYT&leg2=from:Amman,%20Jordan%20(AMM-Queen%20Alia%20Intl.),to:Hamburg,%20Germany%20(HAM-All%20Airports),departure:03/24/2017TANYT&passengers=adults:2,children:0,seniors:0,infantinlap:Y&mode=search")
time.sleep(15)
full_content = browser.execute_script("return document.getElementsByTagName('html')[0].innerHTML")
browser.quit()
soup = BeautifulSoup(full_content, "lxml" )
print(soup.find_all('span', class_='dollars'))
#for dollar in dollars_copy:
# print(dollar.text)
#print(dollars)
#print(result)
| import urllib.request
result=urllib.request.urlopen("https://www.expedia.de/Flights-Search?trip=roundtrip&leg1=from:Hamburg,%20Deutschland%20(HAM-Alle%20Flugh%C3%A4fen),to:Amman,%20Jordanien%20(AMM-Queen%20Alia%20Intl.),departure:08.03.2017TANYT&leg2=from:Amman,%20Jordanien%20(AMM-Queen%20Alia%20Intl.),to:Hamburg,%20Deutschland%20(HAM-Alle%20Flugh%C3%A4fen),departure:26.03.2017TANYT&passengers=children:0,adults:2,seniors:0,infantinlap:Y&mode=search").read()
print(result)
| Python | 0 |
1550660e39ded9cbcaf0ad429f01f2803f3c5256 | Add a register function prior to enacting reporting | main.py | main.py | #!/usr/bin/python
from hashlib import md5
import os
import sys
import json
import time
import sched
import socket
import psutil
from lib import cpu, memory, disks, network, system, transport
_cache = []
_cache_timer = 0
_cache_keeper = 0
_version = 1.0
def main(scheduler, config, sock, hostname, callers):
global _cache
global _cache_timer
global _cache_keeper
global _version
payload = {
"_id": {
"time": time.time(),
"id": config['identification']['id'],
"hostname": hostname,
"type": config['identification']['type']
},
"cpu": callers['cpu'].snapshot(),
"memory": callers['memory'].snapshot(),
"disks": callers['disks'].snapshot(),
"network": callers['network'].snapshot(),
"system": callers['system'].snapshot(),
"version": _version
}
payload['digest'] = md5(payload)
_cache.append(payload)
if _cache_keeper < _cache_timer:
_cache_keeper += config['interval']
else:
transport.Transport({ "payload": json.dumps(_cache) }, config, sock)
_cache_keeper = 0
_cache = []
# Schedule a new run at the specified interval
scheduler.enter(config['interval'], 1, main, (scheduler, config, sock, hostname, callers))
scheduler.run()
def register(config):
"""
Register this server/device with the mothership
"""
return True
if __name__ == '__main__':
try:
config = (json.loads(open(os.path.dirname(os.path.abspath(__file__)) + "/config.json").read()))['config']
config['identification']['type'] = config['identification'].get('type', 'false')
config['disable_cache'] = False
if config['cache'].get('enabled') is True:
_cache_timer = config['cache'].get('time_seconds_to_cache_between_sends', 60)
config['interval'] = config['cache'].get('interval_seconds_between_captures', 5)
# If the interval is higher, just exit
if config['interval'] > _cache_timer:
print >> sys.stderr, "Report interval is higher than cache timer."
sys.exit(1)
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
scheduler = sched.scheduler(time.time, time.sleep)
hostname = config['identification'].get('hostname', socket.gethostname())
callers = {
"cpu": cpu.CPU(psutil),
"memory": memory.Memory(psutil),
"disks": disks.Disks(psutil),
"network": network.Network(psutil),
"system": system.System(psutil)
}
if register():
main(scheduler, config, sock, hostname, callers)
except KeyboardInterrupt:
print >> sys.stderr, '\nExiting by user request.\n'
sys.exit(0)
except Exception as e:
location = '\n' + type(e).__name__
print >> sys.stderr, location, '=>', str(e)
sys.exit(1)
| #!/usr/bin/python
import os
import sys
import json
import time
import sched
import socket
import psutil
from lib import cpu, memory, disks, network, system, transport
_cache = []
_cache_timer = 0
_cache_keeper = 0
def main(scheduler, config, sock, hostname, callers):
global _cache
global _cache_timer
global _cache_keeper
payload = {
"_id": {
"time": time.time(),
"id": config['identification']['id'],
"hostname": hostname,
"type": config['identification']['type']
},
"cpu": callers['cpu'].snapshot(),
"memory": callers['memory'].snapshot(),
"disks": callers['disks'].snapshot(),
"network": callers['network'].snapshot(),
"system": callers['system'].snapshot()
}
_cache.append(payload)
if _cache_keeper < _cache_timer:
_cache_keeper += config['interval']
else:
transport.Transport({ "payload": json.dumps(_cache) }, config, sock)
_cache_keeper = 0
_cache = []
# Schedule a new run at the specified interval
scheduler.enter(config['interval'], 1, main, (scheduler, config, sock, hostname, callers))
scheduler.run()
if __name__ == '__main__':
try:
config = (json.loads(open(os.path.dirname(os.path.abspath(__file__)) + "/config.json").read()))['config']
config['identification']['type'] = config['identification'].get('type', 'false')
config['disable_cache'] = False
if config['cache'].get('enabled') is True:
_cache_timer = config['cache'].get('time_seconds_to_cache_between_sends', 60)
config['interval'] = config['cache'].get('interval_seconds_between_captures', 5)
# If the interval is higher, just exit
if config['interval'] > _cache_timer:
print >> sys.stderr, "Report interval is higher than cache timer."
sys.exit(1)
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
scheduler = sched.scheduler(time.time, time.sleep)
hostname = config['identification'].get('hostname', socket.gethostname())
callers = {
"cpu": cpu.CPU(psutil),
"memory": memory.Memory(psutil),
"disks": disks.Disks(psutil),
"network": network.Network(psutil),
"system": system.System(psutil)
}
main(scheduler, config, sock, hostname, callers)
except KeyboardInterrupt:
print >> sys.stderr, '\nExiting by user request.\n'
sys.exit(0)
except Exception as e:
location = '\n' + type(e).__name__
print >> sys.stderr, location, '=>', str(e)
sys.exit(1)
| Python | 0 |
1cab65aba369263904607738cd69b2ad7d6a8e63 | change web framework from wsgi to cgi | main.py | main.py | #!/usr/bin/env python
# coding=utf-8
from datetime import date
import time
from webapp.web import Application, BaseHandler
URLS = (
("/", "Index"),
("/hello/(.*)", "Hello"),
)
class Index(BaseHandler):
def get(self):
header = "Content-type:text/html\r\n\r\n"
# self.write(header+"Welcome~")
body = self.wrap_html('static/index.html')
self.write(header)
self.write(body)
class Hello(BaseHandler):
def get(self, name):
params = {'name': name, 'date': date.today(), 'time': time.time()}
header = "Content-type:text/html\r\n\r\n"
body = self.wrap_html('static/hello.html', params)
self.write(header)
self.write(body)
if __name__ == '__main__':
app = Application(globals(), URLS)
app.run()
| #!/usr/bin/env python
# coding=utf-8
from datetime import date
import time
from webapp.web import Application, BaseHandler
URLS = (
("/", "Index"),
("/hello/(.*)", "Hello"),
)
class Index(BaseHandler):
def get(self):
header = "Content-type:text/html\r\n\r\n"
# self.write(header+"Welcome~")
body = self.wrap_html('static/index.html')
self.write(header)
self.write(body)
class Hello(BaseHandler):
def get(self, name):
params = {'name': name, 'date': date.today(), 'time': time.time()}
header = "Content-type:text/html\r\n\r\n"
body = self.wrap_html('static/hello.html', params)
self.write(header)
self.write(body)
if __name__ == '__main__':
app = Application(globals(), URLS)
app.run()
| Python | 0.000016 |
24d4fee92c1c2ff4bac1fe09d9b436748234a48c | Add argument for execution of defective server. | main.py | main.py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
""" Main script. Executes the XML Server implementation with an HTTP
connection and default parameters.
"""
import sys
import argparse
from server import xml_server, defective_servers
from connection import http_connection
parser = argparse.ArgumentParser()
parser.add_argument('-p', '--port', type=int, default=8080,
help="server's HTTP port")
parser.add_argument('--sensordata', type=str,
default='server/sensor_data.csv', help="sensor data file")
parser.add_argument('--randomloss', action='store_true')
if __name__ == '__main__':
args = parser.parse_args()
if args.randomloss:
server = defective_servers.RandomLossXMLServer(args.sensordata)
else:
server = xml_server.XMLServer(args.sensordata)
connection = http_connection.HttpConnection(server, port=args.port)
| #! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
""" Main script. Executes the XML Server implementation with an HTTP
connection and default parameters.
"""
import sys
import argparse
from server import xml_server
from connection import http_connection
parser = argparse.ArgumentParser()
parser.add_argument('-p', '--port', type=int, default=8080,
help="server's HTTP port")
parser.add_argument('--sensordata', type=str,
default='server/sensor_data.csv', help="sensor data file")
if __name__ == '__main__':
args = parser.parse_args()
server = xml_server.XMLServer(args.sensordata)
connection = http_connection.HttpConnection(server, port=args.port)
| Python | 0 |
52c2205804d8dc38447bca1ccbf5599e00cd1d7b | Rename user_id config key to admin_user_id | main.py | main.py | #!/usr/bin/env python3
import requests
CONFIG_DIR = "config"
class Bot:
def __init__(self):
self.config = Config(CONFIG_DIR)
self.api = TelegramBotApi(self.config.get_auth_token())
def run(self):
self.api.send_message(self.config.get_admin_user_id(), "test")
class TelegramBotApi:
def __init__(self, auth_token):
self.base_url = "https://api.telegram.org/bot" + auth_token + "/"
def send_message(self, chat_id, text):
self.__send_request("sendMessage", chat_id=chat_id, text=text)
def __send_request(self, command, **params):
requests.get(self.base_url + command, params=params)
class Config:
def __init__(self, config_dir):
self.config_dir = config_dir + "/"
def get_auth_token(self):
return self.__get_config_value("auth_token")
def get_admin_user_id(self):
return self.__get_config_value("admin_user_id")
def __get_config_value(self, config_key):
return open(self.config_dir + config_key).read().strip()
if __name__ == "__main__":
Bot().run()
| #!/usr/bin/env python3
import requests
CONFIG_DIR = "config"
class Bot:
def __init__(self):
self.config = Config(CONFIG_DIR)
self.api = TelegramBotApi(self.config.get_auth_token())
def run(self):
self.api.send_message(self.config.get_user_id(), "test")
class TelegramBotApi:
def __init__(self, auth_token):
self.base_url = "https://api.telegram.org/bot" + auth_token + "/"
def send_message(self, chat_id, text):
self.__send_request("sendMessage", chat_id=chat_id, text=text)
def __send_request(self, command, **params):
requests.get(self.base_url + command, params=params)
class Config:
def __init__(self, config_dir):
self.config_dir = config_dir + "/"
def get_auth_token(self):
return self.__get_config_value("auth_token")
def get_user_id(self):
return self.__get_config_value("user_id")
def __get_config_value(self, config_key):
return open(self.config_dir + config_key).read().strip()
if __name__ == "__main__":
Bot().run()
| Python | 0.005562 |
8cbe375b478764f05e67b3d5600ca51bbd5b5c48 | enable 'inline_defnode_calls' optimisation for benchmarks (even though they don't benefit currently) | Demos/benchmarks/setup.py | Demos/benchmarks/setup.py | from distutils.core import setup
from Cython.Build import cythonize
directives = {
'optimize.inline_defnode_calls': True
}
setup(
name = 'benchmarks',
ext_modules = cythonize("*.py", language_level=3, annotate=True,
compiler_directives=directives),
)
| from distutils.core import setup
from Cython.Build import cythonize
setup(
name = 'benchmarks',
ext_modules = cythonize("*.py", language_level=3, annotate=True),
)
| Python | 0 |
0389759b9b300c5a0cc807e9d6d154e757abecad | make sentry optional | main.py | main.py | import logging
from time import mktime
import feedparser
import sys
import yaml
from raven import Client
from wallabag_api.wallabag import Wallabag
import github_stars
import golem_top
logger = logging.getLogger()
logger.handlers = []
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
ch = logging.StreamHandler(stream=sys.stdout)
ch.setLevel(logging.WARNING)
ch.setFormatter(formatter)
logger.addHandler(ch)
fh = logging.FileHandler('debug.log')
fh.setFormatter(formatter)
fh.setLevel(logging.DEBUG)
logger.addHandler(fh)
with open("config.yaml", 'r') as stream:
try:
config = yaml.load(stream)
except (yaml.YAMLError, FileNotFoundError) as exception:
logger.error(exception)
config = None
exit(1)
with open("sites.yaml", 'r') as stream:
try:
sites = yaml.load(stream)
except (yaml.YAMLError, FileNotFoundError) as exception:
logger.error(exception)
sites = None
exit(1)
if "sentry_url" in config:
client = Client(
dsn=config["sentry_url"],
processors=(
'raven.processors.SanitizePasswordsProcessor',
)
)
token = Wallabag.get_token(**config["wallabag"])
wall = Wallabag(host=config["wallabag"]["host"], client_secret=config["wallabag"]["client_secret"],
client_id=config["wallabag"]["client_id"], token=token)
sites = github_stars.get_starred_repos(config["github_username"], sites)
for sitetitle, site in sites.items():
logger.info(sitetitle + ": Downloading feed")
# r = requests.get(site["url"])
logger.info(sitetitle + ": Parsing feed")
f = feedparser.parse(site["url"])
logger.debug(sitetitle + ": finished parsing")
# feedtitle = f["feed"]["title"]
if "latest_article" in site:
for article in f.entries:
if article.title == site["latest_article"]:
logger.debug("already added: " + article.title)
break
logger.info(article.title + ": article found")
taglist = [sitetitle]
if site["tags"]:
taglist.extend(site["tags"])
tags = ",".join(taglist)
if "published_parsed" in article:
published = mktime(article.published_parsed)
elif "updated_parsed" in article:
published = mktime(article.updated_parsed)
else:
published = None
logger.info(article.title + ": add to wallabag")
if "github" in site and site["github"]:
title = sitetitle + ": " + article.title
else:
title = article.title
# wall.post_entries(url=article.link, title=title, tags=tags)
else:
logger.debug(sitetitle + ": no latest_article")
if f.entries:
sites[sitetitle]["latest_article"] = f.entries[0].title
# articles = golem_top.get_top_articles()
# params = {
# 'access_token': wall.token,
# "urls[]": articles
# }
# response = wall.query("/api/entries/exists.{format}".format(format=wall.format), "get", **params)
# for url, old in response.items():
# if not old:
# wall.post_entries(url=url, tags="golem,it", title=None)
# print(response)
with open("sites.yaml", 'w') as stream:
yaml.dump(sites, stream, default_flow_style=False)
| import logging
from time import mktime
import feedparser
import sys
import yaml
from raven import Client
from wallabag_api.wallabag import Wallabag
import github_stars
import golem_top
logger = logging.getLogger()
logger.handlers = []
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
ch = logging.StreamHandler(stream=sys.stdout)
ch.setLevel(logging.WARNING)
ch.setFormatter(formatter)
logger.addHandler(ch)
fh = logging.FileHandler('debug.log')
fh.setFormatter(formatter)
fh.setLevel(logging.DEBUG)
logger.addHandler(fh)
with open("config.yaml", 'r') as stream:
try:
config = yaml.load(stream)
except (yaml.YAMLError, FileNotFoundError) as exception:
logger.error(exception)
config = None
exit(1)
with open("sites.yaml", 'r') as stream:
try:
sites = yaml.load(stream)
except (yaml.YAMLError, FileNotFoundError) as exception:
logger.error(exception)
sites = None
exit(1)
client = Client(
dsn=config["sentry_url"],
processors=(
'raven.processors.SanitizePasswordsProcessor',
)
)
token = Wallabag.get_token(**config["wallabag"])
wall = Wallabag(host=config["wallabag"]["host"], client_secret=config["wallabag"]["client_secret"],
client_id=config["wallabag"]["client_id"], token=token)
sites = github_stars.get_starred_repos(config["github_username"], sites)
for sitetitle, site in sites.items():
logger.info(sitetitle + ": Downloading feed")
# r = requests.get(site["url"])
logger.info(sitetitle + ": Parsing feed")
f = feedparser.parse(site["url"])
logger.debug(sitetitle + ": finished parsing")
# feedtitle = f["feed"]["title"]
if "latest_article" in site:
for article in f.entries:
if article.title == site["latest_article"]:
logger.debug("already added: " + article.title)
break
logger.info(article.title + ": article found")
taglist = [sitetitle]
if site["tags"]:
taglist.extend(site["tags"])
tags = ",".join(taglist)
if "published_parsed" in article:
published = mktime(article.published_parsed)
elif "updated_parsed" in article:
published = mktime(article.updated_parsed)
else:
published = None
logger.info(article.title + ": add to wallabag")
if "github" in site and site["github"]:
title = sitetitle + ": " + article.title
else:
title = article.title
# wall.post_entries(url=article.link, title=title, tags=tags)
else:
logger.debug(sitetitle + ": no latest_article")
if f.entries:
sites[sitetitle]["latest_article"] = f.entries[0].title
# articles = golem_top.get_top_articles()
# params = {
# 'access_token': wall.token,
# "urls[]": articles
# }
# response = wall.query("/api/entries/exists.{format}".format(format=wall.format), "get", **params)
# for url, old in response.items():
# if not old:
# wall.post_entries(url=url, tags="golem,it", title=None)
# print(response)
with open("sites.yaml", 'w') as stream:
yaml.dump(sites, stream, default_flow_style=False)
| Python | 0.000001 |
25d67637fafb04bae67033a4deef4bc71fd91ef2 | Fix elision of needed path joins. | main.py | main.py | from markdown import Markdown
import sys
import codecs
import os
import errno
def ensure_output_exists(dir):
if not os.path.isdir(dir):
try:
print("mkdir", dir)
os.makedirs(dir)
except OSError as e:
raise SnabbptException("Unable to create output directory") from e
class SnabbptException(Exception):
pass
class HTMLTemplate:
def __init__(self, html):
self.html = html
def from_file(filename):
with codecs.open(filename, mode="r", encoding="utf-8") as file:
return HTMLTemplate(file.read())
def render(self, file, outfile):
with codecs.open(outfile, mode="w", encoding="utf-8", errors="xmlcharrefreplace") as out:
out.write(self.html.replace("{{PAGE-TITLE}}", file.title).replace("{{PAGE-CONTENT}}", file.html))
class File:
def __init__(self, filename):
self.filename = filename
with codecs.open(self.filename, mode="r", encoding="utf-8") as input_file:
text = input_file.read()
self.md = Markdown(extensions=["markdown.extensions.meta"])
self.html = self.md.convert(text)
self.title = self.md.Meta["title"][-1]
self.template = self.md.Meta["template"][-1]
self.output_path = "{0}/{1}.html".format(os.path.dirname(self.filename), self.title)
def __str__(self):
return self.filename
class Renderer:
def __init__(self, outDir):
self.templates = {}
self.outDir = outDir
ensure_output_exists(outDir)
def renderDir(self, path):
if not os.path.isdir(path):
raise SnabbptException("{0} is not a directory".format(path))
for file in self.get_files(path):
self.renderFile(os.path.join(path, file))
def get_files(self, path):
files = []
for file in os.listdir(path):
if file.startswith('.'):
continue
if os.path.isdir(file) and file != self.outDir:
files.extend(list(map(lambda x: os.path.normpath(os.path.join(path, file, x)), self.get_files(os.path.join(path, file)))))
elif file.endswith(".md"):
files.append(file)
return files
def renderFile(self, filename):
file = File(filename)
if file.template not in self.templates:
ext = file.template.split('.')[-1].upper()
if ext not in fileTypes:
raise SnabbptException("Invalid template type: {0}".format(ext))
self.templates[file.template] = fileTypes[ext].from_file(file.template)
input_file = file.filename
output_file = os.path.join(self.outDir, file.output_path)
try:
if os.stat(input_file).st_mtime < os.stat(output_file).st_mtime:
print(file, "is up to date")
return
except:
pass
ensure_output_exists(os.path.dirname(output_file))
print("{0} -> {1}".format(input_file, output_file))
self.templates[file.template].render(file, output_file)
fileTypes = {
"HTML": HTMLTemplate,
}
if __name__ == "__main__":
try:
Renderer(str(sys.argv[2])).renderDir(str(sys.argv[1]))
except SnabbptException as e:
if e.__cause__:
print("{0} ({1})".format(e, e.__cause__))
else:
print(e)
| from markdown import Markdown
import sys
import codecs
import os
import errno
def ensure_output_exists(dir):
if not os.path.isdir(dir):
try:
print("mkdir", dir)
os.makedirs(dir)
except OSError as e:
raise SnabbptException("Unable to create output directory") from e
class SnabbptException(Exception):
pass
class HTMLTemplate:
def __init__(self, html):
self.html = html
def from_file(filename):
with codecs.open(filename, mode="r", encoding="utf-8") as file:
return HTMLTemplate(file.read())
def render(self, file, outfile):
with codecs.open(outfile, mode="w", encoding="utf-8", errors="xmlcharrefreplace") as out:
out.write(self.html.replace("{{PAGE-TITLE}}", file.title).replace("{{PAGE-CONTENT}}", file.html))
class File:
def __init__(self, filename):
self.filename = filename
with codecs.open(self.filename, mode="r", encoding="utf-8") as input_file:
text = input_file.read()
self.md = Markdown(extensions=["markdown.extensions.meta"])
self.html = self.md.convert(text)
self.title = self.md.Meta["title"][-1]
self.template = self.md.Meta["template"][-1]
self.output_path = "{0}/{1}.html".format(os.path.dirname(self.filename), self.title)
def __str__(self):
return self.filename
class Renderer:
def __init__(self, outDir):
self.templates = {}
self.outDir = outDir
ensure_output_exists(outDir)
def renderDir(self, path):
if not os.path.isdir(path):
raise SnabbptException("{0} is not a directory".format(path))
for file in self.get_files(path):
self.renderFile(file)
def get_files(self, path):
files = []
for file in os.listdir(path):
if file.startswith('.'):
continue
if os.path.isdir(file) and file != self.outDir:
files.extend(list(map(lambda x: os.path.normpath(os.path.join(path, file, x)), self.get_files(file))))
elif file.endswith(".md"):
files.append(file)
return files
def renderFile(self, filename):
file = File(filename)
if file.template not in self.templates:
ext = file.template.split('.')[-1].upper()
if ext not in fileTypes:
raise SnabbptException("Invalid template type: {0}".format(ext))
self.templates[file.template] = fileTypes[ext].from_file(file.template)
input_file = file.filename
output_file = os.path.join(self.outDir, file.output_path)
try:
if os.stat(input_file).st_mtime < os.stat(output_file).st_mtime:
print(file, "is up to date")
return
except:
pass
ensure_output_exists(os.path.dirname(output_file))
print("{0} -> {1}".format(input_file, output_file))
self.templates[file.template].render(file, output_file)
fileTypes = {
"HTML": HTMLTemplate,
}
if __name__ == "__main__":
try:
Renderer(str(sys.argv[2])).renderDir(str(sys.argv[1]))
except SnabbptException as e:
if e.__cause__:
print("{0} ({1})".format(e, e.__cause__))
else:
print(e)
| Python | 0 |
18d59a1d23cc9021fa388028ab723822e031dc07 | Add health check | main.py | main.py | # Copyright 2015, Google, Inc.
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use
# this file except in compliance with the License. You may obtain a copy of the
# License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable
# law or agreed to in writing, software distributed under the License is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and
# limitations under the License.
import urllib2
import json
from google.appengine.ext import vendor
vendor.add('lib')
from flask import Flask
app = Flask(__name__)
from api_key import key
@app.route('/_ah/health')
def health_check():
return 'ok', 200
@app.route('/get_author/<title>')
def get_author(title):
host = 'https://www.googleapis.com/books/v1/volume?q={}&key={}&country=US'.format(title, key)
request = urllib2.Request(host)
try:
response = urllib2.urlopen(request)
except urllib2.HTTPError, error:
contents = error.read()
print ('Received error from Books API {}'.format(contents))
return str(contents)
html = response.read()
author = json.loads(html)['items'][0]['volumeInfo']['authors'][0]
return author
if __name__ == '__main__':
app.run(debug=True)
| # Copyright 2015, Google, Inc.
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use
# this file except in compliance with the License. You may obtain a copy of the
# License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable
# law or agreed to in writing, software distributed under the License is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and
# limitations under the License.
import urllib2
import json
from google.appengine.ext import vendor
vendor.add('lib')
from flask import Flask
app = Flask(__name__)
from api_key import key
@app.route('/get_author/<title>')
def get_author(title):
host = 'https://www.googleapis.com/books/v1/volume?q={}&key={}&country=US'.format(title, key)
request = urllib2.Request(host)
try:
response = urllib2.urlopen(request)
except urllib2.HTTPError, error:
contents = error.read()
print ('Received error from Books API {}'.format(contents))
return str(contents)
html = response.read()
author = json.loads(html)['items'][0]['volumeInfo']['authors'][0]
return author
if __name__ == '__main__':
app.run(debug=True)
| Python | 0.000001 |
788f11632ce085d82be6d90665b9b277f7a60148 | Refactor Task function to properly switch if it is a TaskTemplate, and if there is a CloudHarenssTask use CloudHarnessWorkflow. | gbdxtools/interface.py | gbdxtools/interface.py | """
Main Interface to GBDX API.
Contact: kostas.stamatiou@digitalglobe.com
"""
from __future__ import absolute_import
from builtins import object
from future import standard_library
import json
import os
import logging
from gbdx_auth import gbdx_auth
from gbdxtools.s3 import S3
from gbdxtools.ordering import Ordering
from gbdxtools.workflow import Workflow
from gbdxtools.catalog import Catalog
from gbdxtools.idaho import Idaho
import gbdxtools.simpleworkflows
from gbdxtools.cloudharness import CloudHarnessTask, CloudHarnessWorkflow
from gbdx_task_template import TaskTemplate
class Interface(object):
gbdx_connection = None
def __init__(self, **kwargs):
if (kwargs.get('username') and kwargs.get('password') and
kwargs.get('client_id') and kwargs.get('client_secret')):
self.gbdx_connection = gbdx_auth.session_from_kwargs(**kwargs)
elif kwargs.get('gbdx_connection'):
# Pass in a custom gbdx connection object, for testing purposes
self.gbdx_connection = kwargs.get('gbdx_connection')
else:
# This will throw an exception if your .ini file is not set properly
self.gbdx_connection = gbdx_auth.get_session()
# create a logger
# for now, just log to the console. We'll replace all the 'print' statements
# with at least logger.info or logger.debug statements
# later, we can log to a service, file, or some other aggregator
self.logger = logging.getLogger('gbdxtools')
self.logger.setLevel(logging.ERROR)
console_handler = logging.StreamHandler()
console_handler.setLevel(logging.ERROR)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
console_handler.setFormatter(formatter)
self.logger.addHandler(console_handler)
self.logger.info('Logger initialized')
# create and store an instance of the GBDX s3 client
self.s3 = S3(self)
# create and store an instance of the GBDX Ordering Client
self.ordering = Ordering(self)
# create and store an instance of the GBDX Catalog Client
self.catalog = Catalog(self)
# create and store an instance of the GBDX Workflow Client
self.workflow = Workflow(self)
# create and store an instance of the Idaho Client
self.idaho = Idaho(self)
def Task(self, __task_name=None, cloudharness=None, **kwargs):
# Check if __task_name has been passed as a CloudHarnessTask object,
# or the keyword cloudharness has been provied.
task_is_subclass = False
if not isinstance(__task_name, str) and __task_name is not None:
task_is_subclass = issubclass(__task_name, TaskTemplate)
if task_is_subclass or __task_name is None and cloudharness is not None:
# Create a cloudharness gbdxtools task object
return CloudHarnessTask(
self,
__task_name if task_is_subclass else cloudharness,
**kwargs
)
else:
# Create a standard gbdxtools task object.
return gbdxtools.simpleworkflows.Task(self, __task_name, **kwargs)
def Workflow(self, tasks, **kwargs):
# Check if any of the tasks are CloudHarnessTasks
if len([task for task in tasks if isinstance(task, CloudHarnessTask)]) > 0:
return CloudHarnessWorkflow(self, tasks, **kwargs)
return gbdxtools.simpleworkflows.Workflow(self, tasks, **kwargs)
| """
Main Interface to GBDX API.
Contact: kostas.stamatiou@digitalglobe.com
"""
from __future__ import absolute_import
from builtins import object
from future import standard_library
import json
import os
import logging
from gbdx_auth import gbdx_auth
from gbdxtools.s3 import S3
from gbdxtools.ordering import Ordering
from gbdxtools.workflow import Workflow
from gbdxtools.catalog import Catalog
from gbdxtools.idaho import Idaho
import gbdxtools.simpleworkflows
from gbdxtools.cloudharness_task import CloudHarnessTask, TaskCreationError
class Interface(object):
gbdx_connection = None
def __init__(self, **kwargs):
if (kwargs.get('username') and kwargs.get('password') and
kwargs.get('client_id') and kwargs.get('client_secret')):
self.gbdx_connection = gbdx_auth.session_from_kwargs(**kwargs)
elif kwargs.get('gbdx_connection'):
# Pass in a custom gbdx connection object, for testing purposes
self.gbdx_connection = kwargs.get('gbdx_connection')
else:
# This will throw an exception if your .ini file is not set properly
self.gbdx_connection = gbdx_auth.get_session()
# create a logger
# for now, just log to the console. We'll replace all the 'print' statements
# with at least logger.info or logger.debug statements
# later, we can log to a service, file, or some other aggregator
self.logger = logging.getLogger('gbdxtools')
self.logger.setLevel(logging.ERROR)
console_handler = logging.StreamHandler()
console_handler.setLevel(logging.ERROR)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
console_handler.setFormatter(formatter)
self.logger.addHandler(console_handler)
self.logger.info('Logger initialized')
# create and store an instance of the GBDX s3 client
self.s3 = S3(self)
# create and store an instance of the GBDX Ordering Client
self.ordering = Ordering(self)
# create and store an instance of the GBDX Catalog Client
self.catalog = Catalog(self)
# create and store an instance of the GBDX Workflow Client
self.workflow = Workflow(self)
# create and store an instance of the Idaho Client
self.idaho = Idaho(self)
def Task(self, __task_name=None, cloudharness_obj=None, **kwargs):
if __task_name is None:
# Create a cloudharness gbdxtools task object
return CloudHarnessTask(self, __task_name, **kwargs)
else:
# Create a standard gbdxtools task object.
return gbdxtools.simpleworkflows.Task(self, __task_name, **kwargs)
def Workflow(self, tasks, **kwargs):
return gbdxtools.simpleworkflows.Workflow(self, tasks, **kwargs)
| Python | 0 |
6ddc63dcb1005ccf6d09f2577faf99566bafced7 | fix Log.add_group() use in live_plot.py example | examples/miscellaneous/live_plot.py | examples/miscellaneous/live_plot.py | from __future__ import print_function
from __future__ import absolute_import
import os
import sys
sys.path.append( '.' )
import numpy as nm
from sfepy.base.base import output, pause
from sfepy.base.log import Log
def main():
cwd = os.path.split(os.path.join(os.getcwd(), __file__))[0]
log = Log((['sin(x)', 'cos(x)'], ['exp(x)']),
yscales=['linear', 'log'],
xlabels=['angle', None], ylabels=[None, 'a function'],
log_filename=os.path.join(cwd, 'live_plot.log'))
log2 = Log([['x^3']],
yscales=['linear'],
xlabels=['x'], ylabels=['a cubic function'],
aggregate=50, sleep=0.5,
log_filename=os.path.join(cwd, 'live_plot2.log'))
added = 0
for x in nm.linspace(0, 4.0 * nm.pi, 200):
output('x: ', x)
if x < (2.0 * nm.pi):
log(nm.sin(x), nm.cos(x), nm.exp(x), x = [x, None])
else:
if added:
log(nm.sin(x), nm.cos(x), nm.exp(x), x**2,
x=[x, None, x])
else:
log.plot_vlines(color='r', linewidth=2)
log.add_group(['x^2'], yscale='linear', xlabel='new x',
ylabel='square', formats=['%+g'])
added += 1
if (added == 20) or (added == 50):
log.plot_vlines([2], color='g', linewidth=2)
log2(x*x*x, x=[x])
print(log)
print(log2)
pause()
log(finished=True)
log2(finished=True)
if __name__ == '__main__':
main()
| from __future__ import print_function
from __future__ import absolute_import
import os
import sys
sys.path.append( '.' )
import numpy as nm
from sfepy.base.base import output, pause
from sfepy.base.log import Log
def main():
cwd = os.path.split(os.path.join(os.getcwd(), __file__))[0]
log = Log((['sin(x)', 'cos(x)'], ['exp(x)']),
yscales=['linear', 'log'],
xlabels=['angle', None], ylabels=[None, 'a function'],
log_filename=os.path.join(cwd, 'live_plot.log'))
log2 = Log([['x^3']],
yscales=['linear'],
xlabels=['x'], ylabels=['a cubic function'],
aggregate=50, sleep=0.5,
log_filename=os.path.join(cwd, 'live_plot2.log'))
added = 0
for x in nm.linspace(0, 4.0 * nm.pi, 200):
output('x: ', x)
if x < (2.0 * nm.pi):
log(nm.sin(x), nm.cos(x), nm.exp(x), x = [x, None])
else:
if added:
log(nm.sin(x), nm.cos(x), nm.exp(x), x**2,
x=[x, None, x])
else:
log.plot_vlines(color='r', linewidth=2)
log.add_group(['x^2'], 'linear', 'new x', 'square',
formats=['%+g'])
added += 1
if (added == 20) or (added == 50):
log.plot_vlines([2], color='g', linewidth=2)
log2(x*x*x, x=[x])
print(log)
print(log2)
pause()
log(finished=True)
log2(finished=True)
if __name__ == '__main__':
main()
| Python | 0.000001 |
43a087c69eedd26d3bab699fca08b5a01a06a6a4 | Add test to check if InvalidFrequencyException is thrown | skrf/tests/test_frequency.py | skrf/tests/test_frequency.py | from skrf.frequency import InvalidFrequencyException
import unittest
import os
import numpy as npy
import skrf as rf
class FrequencyTestCase(unittest.TestCase):
'''
'''
def setUp(self):
'''
'''
self.test_dir = os.path.dirname(os.path.abspath(__file__))+'/'
def test_create_linear_sweep(self):
freq = rf.Frequency(1,10,10,'ghz')
self.assertTrue((freq.f == npy.linspace(1,10,10)*1e9).all())
self.assertTrue((freq.f_scaled ==npy.linspace(1,10,10)).all())
self.assertTrue((freq.sweep_type == 'lin'))
def test_create_log_sweep(self):
freq = rf.Frequency(1,10,10,'ghz', sweep_type='log')
#Check end points
self.assertTrue((freq.f[0] == 1e9))
self.assertTrue((freq.f[-1] == 10e9))
spacing = [freq.f[i+1]/freq.f[i] for i in range(len(freq.f)-1)]
#Check that frequency is increasing
self.assertTrue(all(s > 1 for s in spacing))
#Check that ratio of adjacent frequency points is identical
self.assertTrue(all(abs(spacing[i] - spacing[0]) < 1e-10 for i in range(len(spacing))))
self.assertTrue((freq.sweep_type == 'log'))
def test_create_rando_sweep(self):
f = npy.array([1,5,200])
freq = rf.Frequency.from_f(f,unit='khz')
self.assertTrue((freq.f ==f*1e3).all())
self.assertTrue((freq.f_scaled== f).all())
self.assertTrue((freq.sweep_type == 'unknown'))
with self.assertRaises(ValueError):
freq.npoints = 10
def test_rando_sweep_from_touchstone(self):
'''
this also tests the ability to read a touchstone file.
'''
rando_sweep_ntwk = rf.Network(os.path.join(self.test_dir, 'ntwk_arbitrary_frequency.s2p'))
self.assertTrue((rando_sweep_ntwk.f == \
npy.array([1,4,10,20])).all())
self.assertTrue((rando_sweep_ntwk.frequency.sweep_type == 'unknown'))
def test_slicer(self):
a = rf.Frequency.from_f([1,2,4,5,6])
b = a['2-5ghz']
tinyfloat = 1e-12
self.assertTrue((abs(b.f - [2e9,4e9,5e9]) < tinyfloat).all())
def test_frequency_check(self):
with self.assertRaises(InvalidFrequencyException):
a = rf.Frequency.from_f([2,1])
suite = unittest.TestLoader().loadTestsFromTestCase(FrequencyTestCase)
unittest.TextTestRunner(verbosity=2).run(suite)
| import unittest
import os
import numpy as npy
import skrf as rf
class FrequencyTestCase(unittest.TestCase):
'''
'''
def setUp(self):
'''
'''
self.test_dir = os.path.dirname(os.path.abspath(__file__))+'/'
def test_create_linear_sweep(self):
freq = rf.Frequency(1,10,10,'ghz')
self.assertTrue((freq.f == npy.linspace(1,10,10)*1e9).all())
self.assertTrue((freq.f_scaled ==npy.linspace(1,10,10)).all())
self.assertTrue((freq.sweep_type == 'lin'))
def test_create_log_sweep(self):
freq = rf.Frequency(1,10,10,'ghz', sweep_type='log')
#Check end points
self.assertTrue((freq.f[0] == 1e9))
self.assertTrue((freq.f[-1] == 10e9))
spacing = [freq.f[i+1]/freq.f[i] for i in range(len(freq.f)-1)]
#Check that frequency is increasing
self.assertTrue(all(s > 1 for s in spacing))
#Check that ratio of adjacent frequency points is identical
self.assertTrue(all(abs(spacing[i] - spacing[0]) < 1e-10 for i in range(len(spacing))))
self.assertTrue((freq.sweep_type == 'log'))
def test_create_rando_sweep(self):
f = npy.array([1,5,200])
freq = rf.Frequency.from_f(f,unit='khz')
self.assertTrue((freq.f ==f*1e3).all())
self.assertTrue((freq.f_scaled== f).all())
self.assertTrue((freq.sweep_type == 'unknown'))
with self.assertRaises(ValueError):
freq.npoints = 10
def test_rando_sweep_from_touchstone(self):
'''
this also tests the ability to read a touchstone file.
'''
rando_sweep_ntwk = rf.Network(os.path.join(self.test_dir, 'ntwk_arbitrary_frequency.s2p'))
self.assertTrue((rando_sweep_ntwk.f == \
npy.array([1,4,10,20])).all())
self.assertTrue((rando_sweep_ntwk.frequency.sweep_type == 'unknown'))
def test_slicer(self):
a = rf.Frequency.from_f([1,2,4,5,6])
b = a['2-5ghz']
tinyfloat = 1e-12
self.assertTrue((abs(b.f - [2e9,4e9,5e9]) < tinyfloat).all())
suite = unittest.TestLoader().loadTestsFromTestCase(FrequencyTestCase)
unittest.TextTestRunner(verbosity=2).run(suite)
| Python | 0 |
e7a01079e57acfa4486fc6cf786a1012da436d0f | Revise snapshot parsing to not expect multiple samples for contrast | solar_snapshot_name_parse.py | solar_snapshot_name_parse.py | #!/usr/bin/env python3
################################################################################
# Description:
# * Parses names of files in directory containing snapshots of solar
# suitcase displays, and formats them for pasting into timestamp column of
# solar energy log spreadsheet
# * Requires that Box WebDAV mount is active
# * Expects a configuration file in home directory named
# '.solar_snapshot_name_parse_cfg.json', in the following format:
# {
# "snapshot_dir": "/mnt/box_webdav/.../Solar charge logs"
# }
#
# Arguments:
# * --help (optional)
# Displays help message
#
# Examples:
# * ./solar_snapshot_name_parse.py
# * ./solar_snapshot_name_parse.py --help
#
# Limitations:
# * Tested on only Raspbian
# * Makes no attempt to verify that Box WebDAV mount is valid
################################################################################
# Modules
import sys
import os
import time
import argparse
import json
import re
# Constants
CFG_FILE_PATH = "~/.solar_snapshot_name_parse_cfg.json"
# Main function
def main(argv):
# Configure argument parser
desc_str = "Parses names of files in directory containing snapshots of "
desc_str += "solar suitcase displays, and formats them for pasting into "
desc_str += "timestamp column of solar energy log spreadsheet"
parser = argparse.ArgumentParser(description=desc_str)
# Print current time
print(time.strftime("%a %Y-%m-%d %I:%M:%S %p"))
print("")
# Parse arguments
print("Parsing arguments...")
args = parser.parse_args()
for (arg, val) in sorted(vars(args).items()):
print(" * {}: {}".format(arg, val))
print("")
# Parse configuration file
cfg_file_path = os.path.expanduser(CFG_FILE_PATH)
cfg_file_path = os.path.expandvars(cfg_file_path)
print("Parsing configuration file '{}'...".format(cfg_file_path))
cfg = json.load(open(cfg_file_path))
check_cfg_file(cfg) # Check that file contains all required information
print("")
# Retrieve names of files in snapshot directory
print("Retrieving names of files in '{}'...".format(cfg["snapshot_dir"]))
file_names = os.listdir(cfg["snapshot_dir"])
# Format file names and print results
print("Formatting file names and print results...")
count = fmt_print_file_names(sorted(file_names))
print("")
# Exit
print("Printed {} lines.".format(count))
print("Done.")
print("")
sys.exit(0) # Success
# Checks that configuration file contained all required information
def check_cfg_file(cfg):
# Snapshot directory
if ("snapshot_dir" in cfg):
msg = "Parsed snapshot directory name from configuration file: "
msg += "{}".format(cfg["snapshot_dir"])
print(msg)
else: # No snapshot directory parsed
msg = "Configuration file does not contain 'snapshot_dir' string."
raise Exception(msg)
# Formats file names and prints results
def fmt_print_file_names(file_names):
re_file_name = re.compile(r'^(\d{4}-\d{2}-\d{2})_(\d{2})(\d{2})\.jpg$')
num_printed = 0
for file_name in file_names:
m = re_file_name.match(file_name)
if m: # Regular expression match
m_date = m.group(1)
m_hour = m.group(2)
m_minute = m.group(3)
print("{} {}:{}".format(m_date, m_hour, m_minute))
num_printed += 1
return num_printed
# Execute 'main()' function
if (__name__ == "__main__"):
main(sys.argv)
| #!/usr/bin/env python3
################################################################################
# Description:
# * Parses names of files in directory containing snapshots of solar
# suitcase displays, and formats them for pasting into timestamp column of
# solar energy log spreadsheet
# * Requires that Box WebDAV mount is active
# * Expects a configuration file in home directory named
# '.solar_snapshot_name_parse_cfg.json', in the following format:
# {
# "snapshot_dir": "/mnt/box_webdav/.../Solar charge logs"
# }
#
# Arguments:
# * --help (optional)
# Displays help message
#
# Examples:
# * ./solar_snapshot_name_parse.py
# * ./solar_snapshot_name_parse.py --help
#
# Limitations:
# * Tested on only Raspbian
# * Makes no attempt to verify that Box WebDAV mount is valid
################################################################################
# Modules
import sys
import os
import time
import argparse
import json
import re
# Constants
CFG_FILE_PATH = "~/.solar_snapshot_name_parse_cfg.json"
# Main function
def main(argv):
# Configure argument parser
desc_str = "Parses names of files in directory containing snapshots of "
desc_str += "solar suitcase displays, and formats them for pasting into "
desc_str += "timestamp column of solar energy log spreadsheet"
parser = argparse.ArgumentParser(description=desc_str)
# Print current time
print(time.strftime("%a %Y-%m-%d %I:%M:%S %p"))
print("")
# Parse arguments
print("Parsing arguments...")
args = parser.parse_args()
for (arg, val) in sorted(vars(args).items()):
print(" * {}: {}".format(arg, val))
print("")
# Parse configuration file
cfg_file_path = os.path.expanduser(CFG_FILE_PATH)
cfg_file_path = os.path.expandvars(cfg_file_path)
print("Parsing configuration file '{}'...".format(cfg_file_path))
cfg = json.load(open(cfg_file_path))
check_cfg_file(cfg) # Check that file contains all required information
print("")
# Retrieve names of files in snapshot directory
print("Retrieving names of files in '{}'...".format(cfg["snapshot_dir"]))
file_names = os.listdir(cfg["snapshot_dir"])
# Format file names and print results
print("Formatting file names and print results...")
count = fmt_print_file_names(sorted(file_names))
print("")
# Exit
print("Printed {} lines.".format(count))
print("Done.")
print("")
sys.exit(0) # Success
# Checks that configuration file contained all required information
def check_cfg_file(cfg):
# Snapshot directory
if ("snapshot_dir" in cfg):
msg = "Parsed snapshot directory name from configuration file: "
msg += "{}".format(cfg["snapshot_dir"])
print(msg)
else: # No snapshot directory parsed
msg = "Configuration file does not contain 'snapshot_dir' string."
raise Exception(msg)
# Formats file names and prints results
def fmt_print_file_names(file_names):
re_file_name = re.compile(r'^(\d{4}-\d{2}-\d{2})_(\d{2})(\d{2})_c(\d{2})\.jpg$')
num_printed = 0
for file_name in file_names:
m = re_file_name.match(file_name)
if m: # Regular expression match
m_date = m.group(1)
m_hour = m.group(2)
m_minute = m.group(3)
m_contrast = m.group(4)
if (m_contrast == "00"): # Ignore duplicates
print("{} {}:{}".format(m_date, m_hour, m_minute))
num_printed += 1
return num_printed
# Execute 'main()' function
if (__name__ == "__main__"):
main(sys.argv)
| Python | 0 |
653376cf10edb42e6d5c429e61bc9ef23eb51234 | fix test for GenomicFilter | solvebio/test/test_filter.py | solvebio/test/test_filter.py | import unittest
import solvebio
from solvebio import Filter, GenomicFilter
class FilterTest(unittest.TestCase):
def test_filter_basic(self):
f = Filter()
self.assertEqual(repr(f), '<Filter []>', 'empty filter')
self.assertEqual(repr(~f), '<Filter []>', '"not" of empty filter')
# Because the order in listing keys is arbitrary, we only
# test with one entry.
f1 = Filter(price='Free')
self.assertEqual(repr(f1), "<Filter [('price', 'Free')]>")
self.assertEqual(repr(~~f1), "<Filter [('price', 'Free')]>",
'"not" of empty filter')
a = solvebio.query.Filter(chr1="3")
b = solvebio.query.Filter(chr2="4")
self.assertEqual(repr(a | b),
"<Filter [{'or': [('chr1', '3'), ('chr2', '4')]}]>",
'"or" filter')
f |= a
self.assertEqual(repr(f), "<Filter [('chr1', '3')]>",
"'or' with empty filter")
self.assertEqual(repr(a), "<Filter [('chr1', '3')]>",
"prior 'or' doesn't mung filter")
filters3 = Filter(omim_id=144650) | Filter(omim_id=144600) \
| Filter(omim_id=145300)
self.assertEqual(repr(filters3),
"<Filter [{'or': [('omim_id', 144650)," +
" ('omim_id', 144600), ('omim_id', 145300)]}]>")
def test_process_filters(self):
# FIXME: add more and put in a loop.
filters = [('omim_id', None)]
expect = filters
dataset_name = 'omim/0.0.1-1/omim'
x = solvebio.Query(dataset_name)
self.assertEqual(repr(x._process_filters(filters)), repr(expect))
class GenomicFilterTest(unittest.TestCase):
def test_single_position(self):
f = GenomicFilter('chr1', 100)
self.assertEqual(repr(f), "<GenomicFilter [{'and': [('genomic_coordinates.start__lte', 100), ('genomic_coordinates.stop__gte', 100), ('genomic_coordinates.chromosome', '1')]}]>") # noqa
f = GenomicFilter('chr1', 100, exact=True)
self.assertEqual(repr(f), "<GenomicFilter [{'and': [('genomic_coordinates.stop', 100), ('genomic_coordinates.start', 100), ('genomic_coordinates.chromosome', '1')]}]>") # noqa
def test_range(self):
f = GenomicFilter('chr1', 100, 200)
self.assertEqual(repr(f), "<GenomicFilter [{'and': [{'or': [{'and': [('genomic_coordinates.start__lte', 100), ('genomic_coordinates.stop__gte', 200)]}, ('genomic_coordinates.start__range', [100, 200]), ('genomic_coordinates.stop__range', [100, 200])]}, ('genomic_coordinates.chromosome', '1')]}]>") # noqa
f = GenomicFilter('chr1', 100, 200, exact=True)
self.assertEqual(repr(f), "<GenomicFilter [{'and': [('genomic_coordinates.stop', 200), ('genomic_coordinates.start', 100), ('genomic_coordinates.chromosome', '1')]}]>") # noqa
if __name__ == "__main__":
unittest.main()
| import unittest
import solvebio
from solvebio import Filter, GenomicFilter
class FilterTest(unittest.TestCase):
def test_filter_basic(self):
f = Filter()
self.assertEqual(repr(f), '<Filter []>', 'empty filter')
self.assertEqual(repr(~f), '<Filter []>', '"not" of empty filter')
# Because the order in listing keys is arbitrary, we only
# test with one entry.
f1 = Filter(price='Free')
self.assertEqual(repr(f1), "<Filter [('price', 'Free')]>")
self.assertEqual(repr(~~f1), "<Filter [('price', 'Free')]>",
'"not" of empty filter')
a = solvebio.query.Filter(chr1="3")
b = solvebio.query.Filter(chr2="4")
self.assertEqual(repr(a | b),
"<Filter [{'or': [('chr1', '3'), ('chr2', '4')]}]>",
'"or" filter')
f |= a
self.assertEqual(repr(f), "<Filter [('chr1', '3')]>",
"'or' with empty filter")
self.assertEqual(repr(a), "<Filter [('chr1', '3')]>",
"prior 'or' doesn't mung filter")
filters3 = Filter(omim_id=144650) | Filter(omim_id=144600) \
| Filter(omim_id=145300)
self.assertEqual(repr(filters3),
"<Filter [{'or': [('omim_id', 144650)," +
" ('omim_id', 144600), ('omim_id', 145300)]}]>")
def test_process_filters(self):
# FIXME: add more and put in a loop.
filters = [('omim_id', None)]
expect = filters
dataset_name = 'omim/0.0.1-1/omim'
x = solvebio.Query(dataset_name)
self.assertEqual(repr(x._process_filters(filters)), repr(expect))
class GenomicFilterTest(unittest.TestCase):
def test_single_position(self):
f = GenomicFilter('chr1', 100)
self.assertEqual(repr(f), "<GenomicFilter [{'and': [('genomic_coordinates.start__lte', 100), ('genomic_coordinates.stop__gte', 100), ('genomic_coordinates.chromosome', '1')]}]>") # noqa
f = GenomicFilter('chr1', 100, exact=True)
self.assertEqual(repr(f), "<GenomicFilter [{'and': [('genomic_coordinates.stop', 100), ('genomic_coordinates.start', 100), ('genomic_coordinates.chromosome', '1')]}]>") # noqa
def test_range(self):
f = GenomicFilter('chr1', 100, 200)
self.assertEqual(repr(f), "<GenomicFilter [{'and': [{'or': [{'and': [('genomic_coordinates.start__lte', 100), ('genomic_coordinates.stop__gte', 200)]}, ('genomic_coordinates.start__range', [100, 201]), ('genomic_coordinates.stop__range', [100, 201])]}, ('genomic_coordinates.chromosome', '1')]}]>") # noqa
f = GenomicFilter('chr1', 100, 200, exact=True)
self.assertEqual(repr(f), "<GenomicFilter [{'and': [('genomic_coordinates.stop', 200), ('genomic_coordinates.start', 100), ('genomic_coordinates.chromosome', '1')]}]>") # noqa
if __name__ == "__main__":
unittest.main()
| Python | 0 |
a4ea5f9a6b6de93188a590b918aa122e4fbe437b | Fix jsbox formset usage. | go/apps/jsbox/forms.py | go/apps/jsbox/forms.py | from django import forms
from django.forms.formsets import BaseFormSet, formset_factory
from go.base.widgets import CodeField, SourceUrlField
SOURCE_URL_HELP_TEXT = (
'HTTP Basic Authentication is supported. If using GitHub '
'please use '
'<a href="http://developer.github.com/v3/#authentication">'
'OAuth2 access tokens'
'</a>.')
class JsboxForm(forms.Form):
javascript = CodeField(required=False)
source_url = SourceUrlField(code_field='javascript',
help_text=SOURCE_URL_HELP_TEXT,
required=False)
@staticmethod
def initial_from_config(metadata):
return metadata
def to_config(self):
return {
'javascript': self.cleaned_data['javascript'],
'source_url': self.cleaned_data['source_url'],
}
class JsboxAppConfigForm(forms.Form):
key = forms.CharField()
value = CodeField(required=False)
source_url = SourceUrlField(code_field='value',
help_text=None,
required=False)
@staticmethod
def initial_from_config(metadata):
return metadata
def to_config(self):
return {
'key': self.cleaned_data['key'],
'value': self.cleaned_data['value'],
'source_url': self.cleaned_data['source_url'],
}
class BaseJsboxAppConfigFormset(BaseFormSet):
def to_config(self):
metadata = {}
for form in self.forms:
if not form.cleaned_data or form in self.deleted_forms:
continue
submeta = form.to_config()
metadata[submeta['key']] = submeta
del submeta['key']
return metadata
@classmethod
def initial_from_config(cls, metadata):
initials = []
for key in sorted(metadata):
submeta = metadata[key].copy()
submeta['key'] = key
if hasattr(cls.form, 'initial_from_config'):
submeta = getattr(cls.form, 'initial_from_config')(submeta)
initials.append(submeta)
return initials
JsboxAppConfigFormset = formset_factory(
JsboxAppConfigForm, can_delete=True, extra=1,
formset=BaseJsboxAppConfigFormset)
| from django import forms
from django.forms.formsets import BaseFormSet, DEFAULT_MAX_NUM
from go.base.widgets import CodeField, SourceUrlField
SOURCE_URL_HELP_TEXT = (
'HTTP Basic Authentication is supported. If using GitHub '
'please use '
'<a href="http://developer.github.com/v3/#authentication">'
'OAuth2 access tokens'
'</a>.')
class JsboxForm(forms.Form):
javascript = CodeField(required=False)
source_url = SourceUrlField(code_field='javascript',
help_text=SOURCE_URL_HELP_TEXT,
required=False)
@staticmethod
def initial_from_config(metadata):
return metadata
def to_config(self):
return {
'javascript': self.cleaned_data['javascript'],
'source_url': self.cleaned_data['source_url'],
}
class JsboxAppConfigForm(forms.Form):
key = forms.CharField()
value = CodeField(required=False)
source_url = SourceUrlField(code_field='value',
help_text=None,
required=False)
@staticmethod
def initial_from_config(metadata):
return metadata
def to_config(self):
return {
'key': self.cleaned_data['key'],
'value': self.cleaned_data['value'],
'source_url': self.cleaned_data['source_url'],
}
class JsboxAppConfigFormset(BaseFormSet):
form = JsboxAppConfigForm
absolute_max = DEFAULT_MAX_NUM
extra = 1
can_order = False
can_delete = True
max_num = None
def to_config(self):
metadata = {}
for form in self.forms:
if not form.cleaned_data or form in self.deleted_forms:
continue
submeta = form.to_config()
metadata[submeta['key']] = submeta
del submeta['key']
return metadata
@classmethod
def initial_from_config(cls, metadata):
initials = []
for key in sorted(metadata):
submeta = metadata[key].copy()
submeta['key'] = key
if hasattr(cls.form, 'initial_from_config'):
submeta = getattr(cls.form, 'initial_from_config')(submeta)
initials.append(submeta)
return initials
| Python | 0 |
884e17eb92e35ab5a9f4d6bc94f11f49977711a3 | Use render() so that we can pass in the request context and thus link to static files correctly (reviewed by @smn). | go/apps/jsbox/views.py | go/apps/jsbox/views.py | import requests
from urlparse import urlparse, urlunparse
from django.http import HttpResponse
from django.contrib.auth.decorators import login_required
from django.shortcuts import render
from django.views.decorators.csrf import csrf_exempt
from go.conversation.base import ConversationViews
from go.apps.jsbox.forms import JsboxForm, JsboxAppConfigFormset
from go.apps.jsbox.log import LogManager
from go.base.utils import conversation_or_404
class JsboxConversationViews(ConversationViews):
conversation_type = u'jsbox'
conversation_display_name = u'Javascript App'
conversation_initiator = None
edit_conversation_forms = (
('jsbox', JsboxForm),
('jsbox_app_config', JsboxAppConfigFormset),
)
@login_required
@csrf_exempt
def cross_domain_xhr(request):
url = request.POST.get('url', None)
parse_result = urlparse(url)
if parse_result.username:
auth = (parse_result.username, parse_result.password)
url = urlunparse((parse_result.scheme,
('%s:%s' % (parse_result.hostname, parse_result.port)
if parse_result.port
else parse_result.hostname),
parse_result.path,
parse_result.params,
parse_result.query,
parse_result.fragment))
else:
auth = None
url = url
r = requests.get(url, auth=auth)
return HttpResponse(r.text, status=r.status_code)
@login_required
def jsbox_logs(request, conversation_key):
campaign_key = request.user_api.user_account_key
conversation = conversation_or_404(request.user_api, conversation_key)
log_manager = LogManager(request.user_api.api.redis)
logs = log_manager.get_logs(campaign_key, conversation_key)
logs = list(reversed(logs))
return render(request, "jsbox/jsbox_logs.html", {
"conversation": conversation,
"logs": logs,
})
| import requests
from urlparse import urlparse, urlunparse
from django.http import HttpResponse
from django.contrib.auth.decorators import login_required
from django.shortcuts import render_to_response
from django.views.decorators.csrf import csrf_exempt
from go.conversation.base import ConversationViews
from go.apps.jsbox.forms import JsboxForm, JsboxAppConfigFormset
from go.apps.jsbox.log import LogManager
from go.base.utils import conversation_or_404
class JsboxConversationViews(ConversationViews):
conversation_type = u'jsbox'
conversation_display_name = u'Javascript App'
conversation_initiator = None
edit_conversation_forms = (
('jsbox', JsboxForm),
('jsbox_app_config', JsboxAppConfigFormset),
)
@login_required
@csrf_exempt
def cross_domain_xhr(request):
url = request.POST.get('url', None)
parse_result = urlparse(url)
if parse_result.username:
auth = (parse_result.username, parse_result.password)
url = urlunparse((parse_result.scheme,
('%s:%s' % (parse_result.hostname, parse_result.port)
if parse_result.port
else parse_result.hostname),
parse_result.path,
parse_result.params,
parse_result.query,
parse_result.fragment))
else:
auth = None
url = url
r = requests.get(url, auth=auth)
return HttpResponse(r.text, status=r.status_code)
@login_required
def jsbox_logs(request, conversation_key):
campaign_key = request.user_api.user_account_key
conversation = conversation_or_404(request.user_api, conversation_key)
log_manager = LogManager(request.user_api.api.redis)
logs = log_manager.get_logs(campaign_key, conversation_key)
logs = list(reversed(logs))
return render_to_response("jsbox/jsbox_logs.html", {
"conversation": conversation,
"logs": logs,
})
| Python | 0 |
f10e01a180cca2185862c1f6cf926c2a197536ed | lower the name to be stripped | gozerlib/utils/name.py | gozerlib/utils/name.py | # gozerlib/utils/name.py
#
#
""" name related helper functions. """
## basic imports
import string
import os
## defines
allowednamechars = string.ascii_letters + string.digits + '!.@-+#'
## stripname function
def stripname(name, allowed=""):
""" strip all not allowed chars from name. """
name = name.lower()
res = ""
for c in name:
if ord(c) < 31: res += "-"
elif c in allowednamechars + allowed: res += c
else: res += "-"
res = res.replace(os.sep, '+')
res = res.replace("@", '+')
res = res.replace("#", '+')
return res
## testnam function
def testname(name):
""" test if name is correct. """
for c in name:
if c not in allowedchars or ord(c) < 31: return False
return True
| # gozerlib/utils/name.py
#
#
""" name related helper functions. """
## basic imports
import string
import os
## defines
allowednamechars = string.ascii_letters + string.digits + '!.@-+#'
## stripname function
def stripname(name, allowed=""):
""" strip all not allowed chars from name. """
res = ""
for c in name:
if ord(c) < 31: res += "-"
elif c in allowednamechars + allowed: res += c
else: res += "-"
res = res.replace(os.sep, '+')
res = res.replace("@", '+')
res = res.replace("#", '+')
return res
## testnam function
def testname(name):
""" test if name is correct. """
for c in name:
if c not in allowedchars or ord(c) < 31: return False
return True
| Python | 0.999995 |
debeefabeb64766b380af42458433a05c2a2f04a | Add F1 score to metrics. | non_semantic_speech_benchmark/eval_embedding/metrics.py | non_semantic_speech_benchmark/eval_embedding/metrics.py | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Metrics for evaluation.
1) Equal Error Rate (EER) metric.
2) D-Prime.
3) AUC.
4) Balanced accuracy.
5) F1 score.
"""
import math
from typing import Any, Iterable, Tuple, Optional, Text
import numpy as np
import scipy.stats
from sklearn import metrics as skmetrics
def calculate_eer(labels, scores):
"""Returns the equal error rate for a binary classifier.
EER is defined as the point on the DET curve where the false positive and
false negative rates are equal.
Args:
labels: Ground truth labels for each data point.
scores: Regression scores for each data point. A score of 1 indicates a
classification of label 1.
Returns:
eer: The Equal Error Rate.
"""
fpr, fnr = calculate_det_curve(labels, scores)
min_diff_idx = np.argmin(np.abs(fpr - fnr))
return np.mean((fpr[min_diff_idx], fnr[min_diff_idx]))
def calculate_det_curve(labels,
scores):
"""Calculates the false positive and negative rate at each score.
The DET curve is related to the ROC curve, except it plots false positive rate
against false negative rate.
See https://en.wikipedia.org/wiki/Detection_error_tradeoff for a full
description of the DET curve.
Args:
labels: Ground truth labels for each data point.
scores: Regression scores for each data point. A score of 1 indicates a
classification of label 1. Should be in range (0, 1).
Returns:
fpr, fnr
All returned values are numpy arrays with the same length as scores.
fpr: False positive rate at a given threshold value.
fnr: False negative rate at a given threshold value.
"""
scores = np.asarray(scores, dtype=float)
labels = np.asarray(labels, dtype=float)
indices = np.argsort(scores)
labels = labels[indices]
fnr = np.cumsum(labels) / np.sum(labels)
fnr = np.insert(fnr, 0, 0)
negative_labels = 1 - labels
fpr = np.cumsum(negative_labels[::-1])[::-1]
fpr /= np.sum(negative_labels)
fpr = np.append(fpr, 0)
return fpr, fnr
def calculate_auc(labels,
predictions,
sample_weight = None,
multi_class = None):
return skmetrics.roc_auc_score(
labels, predictions, sample_weight=sample_weight, multi_class=multi_class)
def dprime_from_auc(auc):
"""Returns a d-prime measure corresponding to an ROC area under the curve.
D-prime denotes the sensitivity index:
https://en.wikipedia.org/wiki/Sensitivity_index
Args:
auc: (float) Area under an ROC curve.
Returns:
Float value representing the separation of score distributions
between negative and positive scores for a labeler (an algorithm or
group of readers who assign continuous suspicion scores to a series
of cases). The AUC is given by PHI(mu / sqrt(2)), where PHI is the
cumulative distribution function of the normal distribution.
"""
return math.sqrt(2) * scipy.stats.norm.ppf(auc)
def balanced_accuracy(labels,
predictions):
return skmetrics.balanced_accuracy_score(y_true=labels, y_pred=predictions)
def f1_score(labels,
predictions):
return skmetrics.f1_score(y_true=labels, y_pred=predictions,
average='weighted')
| # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Metrics for evaluation.
1) Equal Error Rate (EER) metric.
2) D-Prime.
3) AUC.
4) Balanced accuracy.
"""
import math
from typing import Any, Iterable, Tuple, Optional
import numpy as np
import scipy.stats
from sklearn import metrics as skmetrics
def calculate_eer(labels, scores):
"""Returns the equal error rate for a binary classifier.
EER is defined as the point on the DET curve where the false positive and
false negative rates are equal.
Args:
labels: Ground truth labels for each data point.
scores: Regression scores for each data point. A score of 1 indicates a
classification of label 1.
Returns:
eer: The Equal Error Rate.
"""
fpr, fnr = calculate_det_curve(labels, scores)
min_diff_idx = np.argmin(np.abs(fpr - fnr))
return np.mean((fpr[min_diff_idx], fnr[min_diff_idx]))
def calculate_det_curve(labels,
scores):
"""Calculates the false positive and negative rate at each score.
The DET curve is related to the ROC curve, except it plots false positive rate
against false negative rate.
See https://en.wikipedia.org/wiki/Detection_error_tradeoff for a full
description of the DET curve.
Args:
labels: Ground truth labels for each data point.
scores: Regression scores for each data point. A score of 1 indicates a
classification of label 1. Should be in range (0, 1).
Returns:
fpr, fnr
All returned values are numpy arrays with the same length as scores.
fpr: False positive rate at a given threshold value.
fnr: False negative rate at a given threshold value.
"""
scores = np.asarray(scores, dtype=float)
labels = np.asarray(labels, dtype=float)
indices = np.argsort(scores)
labels = labels[indices]
fnr = np.cumsum(labels) / np.sum(labels)
fnr = np.insert(fnr, 0, 0)
negative_labels = 1 - labels
fpr = np.cumsum(negative_labels[::-1])[::-1]
fpr /= np.sum(negative_labels)
fpr = np.append(fpr, 0)
return fpr, fnr
def calculate_auc(labels,
predictions,
sample_weight = None):
return skmetrics.roc_auc_score(
labels, predictions, sample_weight=sample_weight)
def dprime_from_auc(auc):
"""Returns a d-prime measure corresponding to an ROC area under the curve.
D-prime denotes the sensitivity index:
https://en.wikipedia.org/wiki/Sensitivity_index
Args:
auc: (float) Area under an ROC curve.
Returns:
Float value representing the separation of score distributions
between negative and positive scores for a labeler (an algorithm or
group of readers who assign continuous suspicion scores to a series
of cases). The AUC is given by PHI(mu / sqrt(2)), where PHI is the
cumulative distribution function of the normal distribution.
"""
return math.sqrt(2) * scipy.stats.norm.ppf(auc)
def balanced_accuracy(labels,
predictions):
return skmetrics.balanced_accuracy_score(y_true=labels, y_pred=predictions)
| Python | 0.999995 |
8d2167bc3bc37f68e225ddcd86bc4114d90be87e | Update version number | local_packages.py | local_packages.py | import sublime
from .event_handler import EventHandler
from .settings import Settings
package_control_installed = False
LOCAL_PACKAGES_VERSION = "0.1.2"
evaluating = False
retry_times = 3
def plugin_loaded():
Settings.reset()
Settings.startup()
EventHandler().register_handler(
evaluate_install,
EventHandler().ON_LOAD
)
print("[Local Packages] v%s" % (LOCAL_PACKAGES_VERSION))
check_package_control()
def check_package_control():
try:
__import__("Package Control").package_control
global package_control_installed
package_control_installed = True
except:
global retry_times
if retry_times > 0:
retry_times -= 1
sublime.set_timeout(check_package_control, 3000)
else:
sublime.error_message(
"Package Control is not found.\n\n" +
"Local Packages will now disabled"
)
return
evaluate_install()
def evaluate_install(view=None):
global evaluating
if evaluating:
return
print("[Local Packages] Evaluating missing packages")
from .package_evaluator import PackageEvaluatorThread
evaluating = True
PackageEvaluatorThread(
window=sublime.active_window(),
callback=on_installed
).start()
def on_installed(failed_packages=[]):
global evaluating
evaluating = False
if len(failed_packages) > 0:
msg = "Local Packages failed to install %s missing packages...\n" % (
len(failed_packages)
)
limit = 10
for package in failed_packages:
limit -= 1
if limit < 0:
break
msg += " - %s\n" % (package)
if limit < 0:
msg += "and more..."
sublime.error_message(msg)
else:
print("[Local Packages] Dependencies already installed")
| import sublime
from .event_handler import EventHandler
from .settings import Settings
package_control_installed = False
LOCAL_PACKAGES_VERSION = "0.1.1"
evaluating = False
retry_times = 3
def plugin_loaded():
Settings.reset()
Settings.startup()
EventHandler().register_handler(
evaluate_install,
EventHandler().ON_LOAD
)
print("[Local Packages] v%s" % (LOCAL_PACKAGES_VERSION))
check_package_control()
def check_package_control():
try:
__import__("Package Control").package_control
global package_control_installed
package_control_installed = True
except:
global retry_times
if retry_times > 0:
retry_times -= 1
sublime.set_timeout(check_package_control, 3000)
else:
sublime.error_message(
"Package Control is not found.\n\n" +
"Local Packages will now disabled"
)
return
evaluate_install()
def evaluate_install(view=None):
global evaluating
if evaluating:
return
print("[Local Packages] Evaluating missing packages")
from .package_evaluator import PackageEvaluatorThread
evaluating = True
PackageEvaluatorThread(
window=sublime.active_window(),
callback=on_installed
).start()
def on_installed(failed_packages=[]):
global evaluating
evaluating = False
if len(failed_packages) > 0:
msg = "Local Packages failed to install %s missing packages...\n" % (
len(failed_packages)
)
limit = 10
for package in failed_packages:
limit -= 1
if limit < 0:
break
msg += " - %s\n" % (package)
if limit < 0:
msg += "and more..."
sublime.error_message(msg)
else:
print("[Local Packages] Dependencies already installed")
| Python | 0.000002 |
b67642ce07631ffe621dc94207524c8049141987 | calculate vcirc | galpy/potential_src/plotRotcurve.py | galpy/potential_src/plotRotcurve.py | import numpy as nu
import galpy.util.bovy_plot as plot
def plotRotcurve(Pot,*args,**kwargs):
"""
NAME:
plotRotcurve
PURPOSE:
plot the rotation curve for this potential (in the z=0 plane for
non-spherical potentials)
INPUT:
Pot - Potential or list of Potential instances
Rrange -
grid - grid in R
savefilename - save to or restore from this savefile (pickle)
+bovy_plot.bovy_plot args and kwargs
OUTPUT:
plot to output device
HISTORY:
2010-07-10 - Written - Bovy (NYU)
"""
if kwargs.has_key('Rrange'):
Rrange= kwargs['Rrange']
kwargs.pop('Rrange')
else:
Rrange= [0.01,5.]
if kwargs.has_key('grid'):
grid= kwargs['grid']
kwargs.pop('grid')
else:
grid= 1001
if kwargs.has_key('savefilename'):
savefilename= kwargs['savefilename']
kwargs.pop('savefilename')
else:
savefilename= None
if not savefilename == None and os.path.exists(savefilename):
print "Restoring savefile "+savefilename+" ..."
savefile= open(savefilename,'rb')
rotcurve= pickle.load(savefile)
Rs= pickle.load(savefile)
savefile.close()
else:
Rs= nu.linspace(Rrange[0],Rrange[1],grid)
rotcurve= calcRotcurve(Pot,Rs)
if not savefilename == None:
print "Writing savefile "+savefilename+" ..."
savefile= open(savefilename,'wb')
pickle.dump(rotcurve,savefile)
pickle.dump(Rs,savefile)
savefile.close()
if not kwargs.has_key('xlabel'):
kwargs['xlabel']= r"$R/R_0$"
if not kwargs.has_key('ylabel'):
kwargs['ylabel']= r"$v_c(R)/v_c(R_0)$"
kwargs['xrange']= Rrange
return plot.bovy_plot(Rs,rotcurve,*args,
**kwargs)
def calcRotcurve(Pot,Rs):
"""
NAME:
calcRotcurve
PURPOSE:
calculate the rotation curve for this potential (in the z=0 plane for
non-spherical potentials)
INPUT:
Pot - Potential or list of Potential instances
Rs - (array of) radius(i)
OUTPUT:
array of vc
HISTORY:
2011-04-13 - Written - Bovy (NYU)
"""
isList= isinstance(Pot,list)
isNonAxi= ((isList and Pot[0].isNonAxi) or (not isList and Pot.isNonAxi))
if isNonAxi:
raise AttributeError("Rotation curve plotting for non-axisymmetric potentials is not currently supported")
try:
grid= len(Rs)
except TypeError:
grid=1
Rs= nu.array([Rs])
rotcurve= nu.zeros(grid)
from planarPotential import evaluateplanarRforces
for ii in range(grid):
try:
rotcurve[ii]= nu.sqrt(Rs[ii]*-evaluateplanarRforces(Rs[ii],Pot))
except TypeError:
from planarPotential import RZToplanarPotential
Pot= RZToplanarPotential(Pot)
rotcurve[ii]= nu.sqrt(Rs[ii]*-evaluateplanarRforces(Rs[ii],Pot))
return rotcurve
def vcirc(Pot,R):
"""
NAME:
vcirc
PURPOSE:
calculate the circular velocity at R in potential Pot
INPUT:
Pot - Potential instance or list of such instances
R - Galactocentric radius
OUTPUT:
circular rotation velocity
HISTORY:
2011-10-09 - Written - Bovy (IAS)
"""
return nu.sqrt(R*-evaluateplanarRforces(R,Pot))
| import numpy as nu
import galpy.util.bovy_plot as plot
def plotRotcurve(Pot,*args,**kwargs):
"""
NAME:
plotRotcurve
PURPOSE:
plot the rotation curve for this potential (in the z=0 plane for
non-spherical potentials)
INPUT:
Pot - Potential or list of Potential instances
Rrange -
grid - grid in R
savefilename - save to or restore from this savefile (pickle)
+bovy_plot.bovy_plot args and kwargs
OUTPUT:
plot to output device
HISTORY:
2010-07-10 - Written - Bovy (NYU)
"""
if kwargs.has_key('Rrange'):
Rrange= kwargs['Rrange']
kwargs.pop('Rrange')
else:
Rrange= [0.01,5.]
if kwargs.has_key('grid'):
grid= kwargs['grid']
kwargs.pop('grid')
else:
grid= 1001
if kwargs.has_key('savefilename'):
savefilename= kwargs['savefilename']
kwargs.pop('savefilename')
else:
savefilename= None
if not savefilename == None and os.path.exists(savefilename):
print "Restoring savefile "+savefilename+" ..."
savefile= open(savefilename,'rb')
rotcurve= pickle.load(savefile)
Rs= pickle.load(savefile)
savefile.close()
else:
Rs= nu.linspace(Rrange[0],Rrange[1],grid)
rotcurve= calcRotcurve(Pot,Rs)
if not savefilename == None:
print "Writing savefile "+savefilename+" ..."
savefile= open(savefilename,'wb')
pickle.dump(rotcurve,savefile)
pickle.dump(Rs,savefile)
savefile.close()
if not kwargs.has_key('xlabel'):
kwargs['xlabel']= r"$R/R_0$"
if not kwargs.has_key('ylabel'):
kwargs['ylabel']= r"$v_c(R)/v_c(R_0)$"
kwargs['xrange']= Rrange
return plot.bovy_plot(Rs,rotcurve,*args,
**kwargs)
def calcRotcurve(Pot,Rs):
"""
NAME:
calcRotcurve
PURPOSE:
calculate the rotation curve for this potential (in the z=0 plane for
non-spherical potentials)
INPUT:
Pot - Potential or list of Potential instances
Rs - (array of) radius(i)
OUTPUT:
array of vc
HISTORY:
2011-04-13 - Written - Bovy (NYU)
"""
isList= isinstance(Pot,list)
isNonAxi= ((isList and Pot[0].isNonAxi) or (not isList and Pot.isNonAxi))
if isNonAxi:
raise AttributeError("Rotation curve plotting for non-axisymmetric potentials is not currently supported")
try:
grid= len(Rs)
except TypeError:
grid=1
Rs= nu.array([Rs])
rotcurve= nu.zeros(grid)
from planarPotential import evaluateplanarRforces
for ii in range(grid):
try:
rotcurve[ii]= nu.sqrt(Rs[ii]*-evaluateplanarRforces(Rs[ii],Pot))
except TypeError:
from planarPotential import RZToplanarPotential
Pot= RZToplanarPotential(Pot)
rotcurve[ii]= nu.sqrt(Rs[ii]*-evaluateplanarRforces(Rs[ii],Pot))
return rotcurve
| Python | 0.998805 |
4ca8889396595f9da99becbb88fb7e38ab0ed560 | Raise exception if connection not succeed and customize error message | hunter/reviewsapi.py | hunter/reviewsapi.py | import requests
import os
from .endpoints import *
class UnauthorizedToken(Exception):
pass
class ReviewsAPI:
def __init__(self):
token = os.environ.get('UDACITY_AUTH_TOKEN')
self.headers = {'Authorization': token, 'Content-Length': '0'}
def certifications(self):
try:
raw_response = requests.get(CERTIFICATIONS_URL, headers=self.headers)
response = raw_response.json()
raw_response.raise_for_status()
certifications_list = [item['project_id'] for item in response if item['status'] == 'certified']
return certifications_list
except requests.exceptions.HTTPError:
raise UnauthorizedToken('Maybe it\'s time to change you token!')
def request_reviews(self, certifications_list):
projects = self.__projects(certifications_list)
return requests.post(SUBMISSION_REQUESTS, json=projects, headers=self.headers)
# TODO Add support to multi language
def __projects(self, certifications_list):
projects_list = []
for certification in certifications_list:
projects_list.append({'project_id': certification, 'language': 'pt-br'})
return {'projects': projects_list}
| import requests
import os
from .endpoints import *
class UnauthorizedToken(Exception):
pass
class ReviewsAPI:
def __init__(self):
token = os.environ.get('UDACITY_AUTH_TOKEN')
self.headers = {'Authorization': token, 'Content-Length': '0'}
def certifications(self):
try:
raw_response = requests.get(CERTIFICATIONS_URL, headers=self.headers)
response = raw_response.json()
certifications_list = [item['project_id'] for item in response if item['status'] == 'certified']
return certifications_list
except requests.exceptions.HTTPError:
raise UnauthorizedToken
def request_reviews(self, certifications_list):
projects = self.__projects(certifications_list)
return requests.post(SUBMISSION_REQUESTS, json=projects, headers=self.headers)
# TODO Add support to multi language
def __projects(self, certifications_list):
projects_list = []
for certification in certifications_list:
projects_list.append({'project_id': certification, 'language': 'pt-br'})
return {'projects': projects_list}
| Python | 0 |
2ba350d71e8a24471ea80fafa75803eb439c4ea6 | add require(internet) | i3pystatus/parcel.py | i3pystatus/parcel.py |
from urllib.request import urlopen
import webbrowser
import lxml.html
from lxml.cssselect import CSSSelector
from i3pystatus import IntervalModule
from i3pystatus.core.util import internet, require
class TrackerAPI:
def __init__(self, idcode):
pass
def status(self):
return {}
class DHL(TrackerAPI):
URL = "http://nolp.dhl.de/nextt-online-public/set_identcodes.do?lang=en&idc={idcode}"
def __init__(self, idcode):
self.idcode = idcode
self.url = self.URL.format(idcode=self.idcode)
error_selector = CSSSelector("#set_identcodes .error")
self.error = lambda page: len(error_selector(page)) >= 1
self.progress_selector = CSSSelector(
".greyprogressbar > span, .greenprogressbar > span")
self.last_status_selector = CSSSelector(".events .eventList tr")
self.intrarow_status_selector = CSSSelector("td.status div")
def status(self):
ret = {}
with urlopen(self.url) as page:
page = lxml.html.fromstring(page.read())
if self.error(page):
ret["progress"] = ret["status"] = "n/a"
else:
ret["progress"] = self.progress_selector(page)[0].text.strip()
last_row = self.last_status_selector(page)[-1]
ret["status"] = self.intrarow_status_selector(
last_row)[0].text.strip()
return ret
def get_url(self):
return self.url
class UPS(TrackerAPI):
URL = "http://wwwapps.ups.com/WebTracking/processRequest?HTMLVersion=5.0&Requester=NES&AgreeToTermsAndConditions=yes&loc=en_US&tracknum={idcode}"
def __init__(self, idcode):
self.idcode = idcode
self.url = self.URL.format(idcode=self.idcode)
error_selector = CSSSelector(".secBody .error")
self.error = lambda page: len(error_selector(page)) >= 1
self.status_selector = CSSSelector("#tt_spStatus")
self.progress_selector = CSSSelector(".pkgProgress div")
def status(self):
ret = {}
with urlopen(self.url) as page:
page = lxml.html.fromstring(page.read())
if self.error(page):
ret["progress"] = ret["status"] = "n/a"
else:
ret["status"] = self.status_selector(page)[0].text.strip()
progress_cls = int(
int(self.progress_selector(page)[0].get("class").strip("staus")) / 5 * 100)
ret["progress"] = progress_cls
return ret
def get_url(self):
return self.url
class ParcelTracker(IntervalModule):
interval = 20
settings = (
("instance", "Tracker instance"),
"format",
"name",
)
required = ("instance",)
format = "{name}:{progress}"
@require(internet)
def run(self):
fdict = {
"name": self.name,
}
fdict.update(self.instance.status())
self.output = {
"full_text": self.format.format(**fdict).strip(),
"instance": self.name,
}
def on_leftclick(self):
webbrowser.open_new_tab(self.instance.get_url())
|
from urllib.request import urlopen
import webbrowser
import lxml.html
from lxml.cssselect import CSSSelector
from i3pystatus import IntervalModule
class TrackerAPI:
def __init__(self, idcode):
pass
def status(self):
return {}
class DHL(TrackerAPI):
URL = "http://nolp.dhl.de/nextt-online-public/set_identcodes.do?lang=en&idc={idcode}"
def __init__(self, idcode):
self.idcode = idcode
self.url = self.URL.format(idcode=self.idcode)
error_selector = CSSSelector("#set_identcodes .error")
self.error = lambda page: len(error_selector(page)) >= 1
self.progress_selector = CSSSelector(
".greyprogressbar > span, .greenprogressbar > span")
self.last_status_selector = CSSSelector(".events .eventList tr")
self.intrarow_status_selector = CSSSelector("td.status div")
def status(self):
ret = {}
with urlopen(self.url) as page:
page = lxml.html.fromstring(page.read())
if self.error(page):
ret["progress"] = ret["status"] = "n/a"
else:
ret["progress"] = self.progress_selector(page)[0].text.strip()
last_row = self.last_status_selector(page)[-1]
ret["status"] = self.intrarow_status_selector(
last_row)[0].text.strip()
return ret
def get_url(self):
return self.url
class UPS(TrackerAPI):
URL = "http://wwwapps.ups.com/WebTracking/processRequest?HTMLVersion=5.0&Requester=NES&AgreeToTermsAndConditions=yes&loc=en_US&tracknum={idcode}"
def __init__(self, idcode):
self.idcode = idcode
self.url = self.URL.format(idcode=self.idcode)
error_selector = CSSSelector(".secBody .error")
self.error = lambda page: len(error_selector(page)) >= 1
self.status_selector = CSSSelector("#tt_spStatus")
self.progress_selector = CSSSelector(".pkgProgress div")
def status(self):
ret = {}
with urlopen(self.url) as page:
page = lxml.html.fromstring(page.read())
if self.error(page):
ret["progress"] = ret["status"] = "n/a"
else:
ret["status"] = self.status_selector(page)[0].text.strip()
progress_cls = int(
int(self.progress_selector(page)[0].get("class").strip("staus")) / 5 * 100)
ret["progress"] = progress_cls
return ret
def get_url(self):
return self.url
class ParcelTracker(IntervalModule):
interval = 20
settings = (
("instance", "Tracker instance"),
"format",
"name",
)
required = ("instance",)
format = "{name}:{progress}"
def run(self):
fdict = {
"name": self.name,
}
fdict.update(self.instance.status())
self.output = {
"full_text": self.format.format(**fdict).strip(),
"instance": self.name,
}
def on_leftclick(self):
webbrowser.open_new_tab(self.instance.get_url())
| Python | 0.000005 |
792e46bcd01d2718215a3cb324b8deca5e4e1a7e | bump 1.3.10 release (#160) | icontrol/__init__.py | icontrol/__init__.py | __version__ = "1.3.10"
| __version__ = "1.3.9"
| Python | 0 |
9d20717b39154252109153a6c5936922d28c6511 | mark unicode context values as safe | mailviews/utils.py | mailviews/utils.py | import textwrap
from collections import namedtuple
from django.utils.safestring import mark_safe
Docstring = namedtuple('Docstring', ('summary', 'body'))
def split_docstring(value):
"""
Splits the docstring of the given value into it's summary and body.
:returns: a 2-tuple of the format ``(summary, body)``
"""
docstring = getattr(value, '__doc__', '') or ''
docstring = textwrap.dedent(docstring)
if not docstring:
return None
pieces = docstring.strip().split('\n\n', 1)
try:
body = pieces[1]
except IndexError:
body = None
return Docstring(pieces[0], body)
def unimplemented(*args, **kwargs):
raise NotImplementedError
def unescape(context):
"""
Accepts a context object, returning a new context with autoescape off.
Useful for rendering plain-text templates without having to wrap the entire
template in an `{% autoescape off %}` tag.
"""
for key in context:
if type(context[key]) in [str, unicode]:
context[key] = mark_safe(context[key])
return context
| import textwrap
from collections import namedtuple
from django.utils.safestring import mark_safe
Docstring = namedtuple('Docstring', ('summary', 'body'))
def split_docstring(value):
"""
Splits the docstring of the given value into it's summary and body.
:returns: a 2-tuple of the format ``(summary, body)``
"""
docstring = getattr(value, '__doc__', '') or ''
docstring = textwrap.dedent(docstring)
if not docstring:
return None
pieces = docstring.strip().split('\n\n', 1)
try:
body = pieces[1]
except IndexError:
body = None
return Docstring(pieces[0], body)
def unimplemented(*args, **kwargs):
raise NotImplementedError
def unescape(context):
"""
Accepts a context object, returning a new context with autoescape off.
Useful for rendering plain-text templates without having to wrap the entire
template in an `{% autoescape off %}` tag.
"""
for key in context:
if type(context[key]) is str:
context[key] = mark_safe(context[key])
return context
| Python | 0.999996 |
bdca4889442e7d84f8c4e68ecdbee676d46ff264 | Fix data provider example file. | examples/test_with_data_provider.py | examples/test_with_data_provider.py | from pytf.dataprovider import DataProvider, call
@DataProvider(max_5=call(max=5), max_10=call(max=10), max_15=call(max=15))
class TestCase(object):
def __init__(self, max):
self.max = max
@DataProvider(n_3=call(n=3), n_7=call(n=7), n_12=call(n=12), n_20=call(n=20))
def test_test(self, n):
assert n < self.max
| from pytf.dataprovider import DataProvider
try:
from unittest.mock import call
except ImportError:
from mock import call
@DataProvider([call(max=5), call(max=10), call(max=15)])
class TestCase(object):
def __init__(self, max):
self.max = max
@DataProvider([call(n=3), call(n=7), call(n=12), call(n=20)])
def test_test(self, n):
assert n < self.max
| Python | 0 |
97e2e80b43ba3639e5af9deb6485c28da1a5e7af | change path | make_submission.py | make_submission.py | """
Ensemble by columnwise weighted sum.
The weights are determined by scipy.optimize.minimize using validation set predictions.
LB Private: 0.40076
LB Public: 0.39773
"""
import numpy as np
import pandas as pd
import sklearn.preprocessing as pp
path = './'
# Neural Networks
pred = [np.load(path + 'pred_TRI_kmax_' + str(k_max) + '.npy') for k_max in [4,5]]
pred.append(np.load(path + 'pred_Sparse_RI.npy'))
pred_NN = (pred[0] + pred[1] + pred[2]) / 3
# XGBoost
pred_XGB = (np.load(path + 'pred_RI.npy') + np.load(path + 'pred_CF.npy')) / 2
# Ensemble weights
w = np.array([1.,0.95657896,0.52392701,0.75156431,1.,0.77871818,0.81764163,0.9541003,0.82863579])
pr005 = pp.normalize(pred_NN * w + pred_XGB * (1 - w), norm = 'l1')
pred005 = pd.read_csv(path + 'sampleSubmission.csv', index_col = 0)
pred005.iloc[:,:] = pr005
pred005.to_csv(path + 'pred005.csv', float_format='%.8f')
| """
Ensemble by columnwise weighted sum.
The weights are determined by scipy.optimize.minimize using validation set predictions.
LB Private: 0.40076
LB Public: 0.39773
"""
import numpy as np
import pandas as pd
import sklearn.preprocessing as pp
path = '~/'
# Neural Networks
pred = [np.load(path + 'pred_TRI_kmax_' + str(k_max) + '.npy') for k_max in [4,5]]
pred.append(np.load(path + 'pred_Sparse_RI.npy'))
pred_NN = (pred[0] + pred[1] + pred[2]) / 3
# XGBoost
pred_XGB = (np.load(path + 'pred_RI.npy') + np.load(path + 'pred_CF.npy')) / 2
# Ensemble weights
w = np.array([1.,0.95657896,0.52392701,0.75156431,1.,0.77871818,0.81764163,0.9541003,0.82863579])
pr005 = pp.normalize(pred_NN * w + pred_XGB * (1 - w), norm = 'l1')
pred005 = pd.read_csv(path + 'sampleSubmission.csv', index_col = 0)
pred005.iloc[:,:] = pr005
pred005.to_csv(path + 'pred005.csv', float_format='%.8f')
| Python | 0.000001 |
542ddc0d0bd96c8ff8635f649344f468d7d497d0 | bump version to 0.2.3 | mallory/version.py | mallory/version.py | Version = "0.2.3"
| Version = "0.2.2"
| Python | 0.000001 |
ff9444ea838bb7ed3efae125d343cee2cec994a9 | Improve the level of comments in mysite/base/depends.py | mysite/base/depends.py | mysite/base/depends.py | # -*- coding: utf-8 -*-
# This file is part of OpenHatch.
# Copyright (C) 2011 Asheesh Laroia
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
### This file exists to wrap some dependencies for other parts of the code.
###
### In general, core parts of the OpenHatch site are forbidden from importing
### some hard-to-install modules, like lxml. Those files import from here
### instead so that if the import fails, the site doesn't crash.
###
### This is so that new contributors can run the OpenHatch site without
### installing these hard-to-install dependencies.
# Used within this file
import os
import logging
# Wrap lxml and the modules that are part of it
try:
import lxml
import lxml.etree
import lxml.html
except:
class nothing(object):
pass
lxml = nothing()
lxml.etree = None
lxml.html = None
if lxml.html is None:
logging.warning("Some parts of the OpenHatch site may fail because the lxml"
" library is not installed. Look in README.mkd for"
" information about lxml.")
# Provide a helper to check if svnadmin is available. If not,
# we can skip running code (and tests) that require it.
def svnadmin_available():
# FIXME: This should move to a variable controlled
# by settings.py.
SVNADMIN_PATH = '/usr/bin/svnadmin'
return os.path.exists(SVNADMIN_PATH)
### Here we try to import "Image", from the Python Imaging Library.
### If we fail, Image is None.
Image = None
try:
import Image
except:
try:
from PIL import Image
except ImportError:
### Okay, for a good time, let's hack sys.modules.
### This permits Django to think ImageFields might
### possibly work.
import sys
sys.modules['Image'] = sys.modules['sys']
# Wrap launchpadbugs. We wrap it because it imports libxml2,
# which qualifies as hard-to-install.
try:
import launchpadbugs
import launchpadbugs.connector
import launchpadbugs.basebuglistfilter
import launchpadbugs.text_bug
import launchpadbugs.lphelper
except ImportError: # usually because python-libxml2 is missing
launchpadbugs = None
logging.warning("launchpadbugs did not import. Install python-libxml2.")
| import os
try:
import lxml
import lxml.etree
import lxml.html
except:
class nothing(object):
pass
lxml = nothing()
lxml.etree = None
lxml.html = None
import logging
if lxml.html is None:
logging.warning("Some parts of the OpenHatch site may fail because the lxml"
" library is not installed. Look in README.mkd for"
" information about lxml.")
def svnadmin_available():
# FIXME: This should move to a variable controlled
# by settings.py.
SVNADMIN_PATH = '/usr/bin/svnadmin'
return os.path.exists(SVNADMIN_PATH)
### Here we try to import "Image", from the Python Imaging Library.
### If we fail, Image is None.
Image = None
try:
import Image
except:
try:
from PIL import Image
except ImportError:
### Okay, for a good time, let's hack sys.modules.
### This permits Django to think ImageFields might
### possibly work.
import sys
sys.modules['Image'] = sys.modules['sys']
try:
import launchpadbugs
import launchpadbugs.connector
import launchpadbugs.basebuglistfilter
import launchpadbugs.text_bug
import launchpadbugs.lphelper
except ImportError: # usually because python2libxml2 is missing
launchpadbugs = None
logging.warning("launchpadbugs did not import. Install python-libxml2.")
| Python | 0.000015 |
bf8b29e7d05a7b476198109f1dccfd42da38f73b | Update pack.py: copy directory to destination instead of compressing | pack.py | pack.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
' Generate static webpage files '
import os
import sys
import shutil
usage_prompt = 'Usage: python3 pack.py <destination_path> [-H <hostname>]'
protocal = "http"
hostname = ''
host_path = os.path.join('scripts', 'host.js')
site_dir = 'site'
if (len(sys.argv) < 2):
print(usage_prompt)
sys.exit(0)
else:
des_path = sys.argv[1] + site_dir
for i, arg in enumerate(sys.argv[2:]):
if (arg == '-H' and i + 3 < len(sys.argv)):
hostname = protocal + '://' + sys.argv[i + 3]
if hostname != '':
print("Hostname changed to '%s'" % hostname)
host_file = open(host_path, 'w')
host_file.write("var hostname = '%s'" % hostname)
host_file.close()
print("Gulp building...")
os.system("gulp clean --silent")
os.system("gulp build --silent")
print("Copying files to '%s'..." % des_path)
shutil.rmtree(des_path, ignore_errors=True)
shutil.copytree(site_dir, des_path)
print("Done.")
| #!/usr/bin/env python3
# -*- coding: utf-8 -*-
' Generate static webpage files '
import os
import sys
usage_prompt = '''Usage:
python3 pack.py
python3 pack.py -H <hostname>
python3 pack.py { ? | -h | --help }'''
protocal = "http"
hostname = 'localhost'
filename_host = os.path.join('scripts', 'host.js')
dir_site = 'site'
filename_pkg = dir_site + '.tar.gz'
for i, arg in enumerate(sys.argv[1:]):
if (arg == '?' or arg == '-h' or arg == '--help'):
print(usage_prompt)
sys.exit(0)
elif (arg == '-H' and i + 2 < len(sys.argv)):
hostname = sys.argv[i + 2]
hostname = protocal + '://' + hostname
print("Hostname set to '%s'" % hostname)
host_file = open(filename_host, 'w')
host_file.write("var hostname = '%s'" % hostname)
host_file.close()
print("Gulp building...")
os.system("gulp clean --silent")
os.system("gulp build --silent")
print("Compressing...")
os.system("tar -zcf %s %s" % (filename_pkg, dir_site))
print("Files saved to '%s'" % filename_pkg)
| Python | 0 |
0db54aacbb1607e2d1d505bc57864dd421d90529 | fix indentation | adhocracy/model/userbadges.py | adhocracy/model/userbadges.py | from datetime import datetime
import logging
from sqlalchemy import Table, Column, Integer, ForeignKey, DateTime, Unicode
from adhocracy.model import meta
log = logging.getLogger(__name__)
badge_table = Table(
'badge', meta.data,
Column('id', Integer, primary_key=True),
Column('create_time', DateTime, default=datetime.utcnow),
Column('title', Unicode(40), nullable=False),
Column('color', Unicode(7), nullable=False),
Column('group_id', Integer, ForeignKey('group.id', ondelete="CASCADE")))
user_badges_table = Table(
'user_badges', meta.data,
Column('id', Integer, primary_key=True),
Column('badge_id', Integer, ForeignKey('badge.id'),
nullable=False),
Column('user_id', Integer, ForeignKey('user.id'),
nullable=False),
Column('create_time', DateTime, default=datetime.utcnow),
Column('creator_id', Integer, ForeignKey('user.id'), nullable=False))
class Badge(object):
def __init__(self, title, color):
self.title = title
self.color = color
def __repr__(self):
return "<Badge(%s,%s)>" % (self.id,
self.title.encode('ascii', 'replace'))
def __unicode__(self):
return self.title
def count(self):
if self._count is None:
from badges import Badges
q = meta.Session.query(Badges)
q = q.filter(Badges.badge == self)
self._count = q.count()
return self._count
def __le__(self, other):
return self.title >= other.title
def __lt__(self, other):
return self.title > other.title
@classmethod
def by_id(cls, id, instance_filter=True, include_deleted=False):
try:
q = meta.Session.query(Badge)
q = q.filter(Badge.id == id)
return q.limit(1).first()
except Exception, e:
log.warn("by_id(%s): %s" % (id, e))
return None
@classmethod
def find(cls, title):
q = meta.Session.query(Badge).filter(Badge.title.like(title))
return q.first()
@classmethod
def all(cls):
q = meta.Session.query(Badge)
return q.all()
@classmethod
def create(cls, title, color):
badge = cls(title, color)
meta.Session.add(badge)
meta.Session.flush()
return badge
@classmethod
def find_or_create(cls, title):
badge = cls.find(title)
if badge is None:
badge = cls.create(title)
return badge
def to_dict(self):
return dict(id=self.id,
title=self.title,
color=self.color,
users=[user.name for user in self.users])
def _index_id(self):
return self.id
class UserBadge(object):
def __init__(self, user, badge, creator):
self.user = user
self.badge = badge
self.creator = creator
def __repr__(self):
badge = self.badge.name.encode('ascii', 'replace')
return "<userbadges(%s, badge %s/%s for user%s/%s)>" % (
self.id, self.user.id, self.user.name, self.badge.id, badge)
def delete(self):
meta.Session.delete(self)
meta.Session.flush()
@classmethod
def find(cls, id):
q = meta.Session.query(UserBadge)
q = q.filter(UserBadge.id == id)
return q.limit(1).first()
@classmethod
def create(cls, user, badge, creator):
assert isinstance(badge, Badge), (
"badge has to be an :class:`adhocracy.model.badge.Badge`")
userbadge = cls(user, badge, creator)
meta.Session.add(userbadge)
meta.Session.flush()
return userbadge
def _index_id(self):
return self.id
| from datetime import datetime
import logging
from sqlalchemy import Table, Column, Integer, ForeignKey, DateTime, Unicode
from adhocracy.model import meta
log = logging.getLogger(__name__)
badge_table = Table(
'badge', meta.data,
Column('id', Integer, primary_key=True),
Column('create_time', DateTime, default=datetime.utcnow),
Column('title', Unicode(40), nullable=False),
Column('color', Unicode(7), nullable=False),
Column('group_id', Integer, ForeignKey('group.id', ondelete="CASCADE")))
user_badges_table = Table('user_badges', meta.data,
Column('id', Integer, primary_key=True),
Column('badge_id', Integer, ForeignKey('badge.id'),
nullable=False),
Column('user_id', Integer, ForeignKey('user.id'),
nullable=False),
Column('create_time', DateTime, default=datetime.utcnow),
Column('creator_id', Integer, ForeignKey('user.id'), nullable=False))
class Badge(object):
def __init__(self, title, color):
self.title = title
self.color = color
def __repr__(self):
return "<Badge(%s,%s)>" % (self.id,
self.title.encode('ascii', 'replace'))
def __unicode__(self):
return self.title
def count(self):
if self._count is None:
from badges import Badges
q = meta.Session.query(Badges)
q = q.filter(Badges.badge == self)
self._count = q.count()
return self._count
def __le__(self, other):
return self.title >= other.title
def __lt__(self, other):
return self.title > other.title
@classmethod
def by_id(cls, id, instance_filter=True, include_deleted=False):
try:
q = meta.Session.query(Badge)
q = q.filter(Badge.id == id)
return q.limit(1).first()
except Exception, e:
log.warn("by_id(%s): %s" % (id, e))
return None
@classmethod
def find(cls, title):
q = meta.Session.query(Badge).filter(Badge.title.like(title))
return q.first()
@classmethod
def all(cls):
q = meta.Session.query(Badge)
return q.all()
@classmethod
def create(cls, title, color):
badge = cls(title, color)
meta.Session.add(badge)
meta.Session.flush()
return badge
@classmethod
def find_or_create(cls, title):
badge = cls.find(title)
if badge is None:
badge = cls.create(title)
return badge
def to_dict(self):
return dict(id=self.id,
title=self.title,
color=self.color,
users=[user.name for user in self.users])
def _index_id(self):
return self.id
class UserBadge(object):
def __init__(self, user, badge, creator):
self.user = user
self.badge = badge
self.creator = creator
def __repr__(self):
badge = self.badge.name.encode('ascii', 'replace')
return "<userbadges(%s, badge %s/%s for user%s/%s)>" % (
self.id, self.user.id, self.user.name, self.badge.id, badge)
def delete(self):
meta.Session.delete(self)
meta.Session.flush()
@classmethod
def find(cls, id):
q = meta.Session.query(UserBadge)
q = q.filter(UserBadge.id == id)
return q.limit(1).first()
@classmethod
def create(cls, user, badge, creator):
assert isinstance(badge, Badge), (
"badge has to be an :class:`adhocracy.model.badge.Badge`")
userbadge = cls(user, badge, creator)
meta.Session.add(userbadge)
meta.Session.flush()
return userbadge
def _index_id(self):
return self.id
| Python | 0.000358 |
3027c1ece280bc665f03781203d6b37b1c1bd82c | fix parsing of HTML entities with HTMLParser | weboob/tools/parser.py | weboob/tools/parser.py | # -*- coding: utf-8 -*-
"""
Copyright(C) 2010 Romain Bignon
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, version 3 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
"""
try:
from xml.etree import cElementTree as ElementTree
except ImportError:
from xml.etree import ElementTree
try:
# XXX Currently, elementtidy segfaults when there are no error, because of
# the behavior of libtidy.
# A patch has been sent to Debian:
# http://bugs.debian.org/cgi-bin/bugreport.cgi?bug=576343
#
# As it is not integrated in Debian yet, and as this problem persists on other
# systems, using elementtidy is for now disabled.
raise ImportError
from elementtidy import TidyHTMLTreeBuilder
TidyHTMLTreeBuilder.ElementTree = ElementTree # force cElementTree if using it.
HTMLTreeBuilder = TidyHTMLTreeBuilder.TidyHTMLTreeBuilder
except ImportError:
from HTMLParser import HTMLParser
import htmlentitydefs
class HTMLTreeBuilder(HTMLParser):
def __init__(self, encoding=None):
HTMLParser.__init__(self)
self._target = ElementTree.TreeBuilder()
def doctype(self, name, pubid, system):
pass
def close(self):
tree = self._target.close()
return tree
def handle_starttag(self, tag, attrs):
self._target.start(tag, dict(attrs))
def handle_startendtag(self, tag, attrs):
self._target.start(tag, dict(attrs))
self._target.end(tag)
def handle_charref(self, name):
self._target.data(unichr(int(name)))
def handle_entityref(self, name):
self._target.data(unichr(htmlentitydefs.name2codepoint[name]))
def handle_data(self, data):
self._target.data(data)
def handle_endtag(self, tag):
try:
self._target.end(tag)
except:
pass
class StandardParser(object):
def parse(self, data, encoding=None):
parser = HTMLTreeBuilder(encoding)
tree = ElementTree.parse(data, parser)
for elem in tree.getiterator():
if elem.tag.startswith('{'):
elem.tag = elem.tag[elem.tag.find('}')+1:]
return tree
def tostring(element):
e = ElementTree.Element('body')
e.text = element.text
e.tail = element.tail
for sub in element.getchildren():
e.append(sub)
s = ElementTree.tostring(e, 'utf-8')
return unicode(s)
| # -*- coding: utf-8 -*-
"""
Copyright(C) 2010 Romain Bignon
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, version 3 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
"""
try:
from xml.etree import cElementTree as ElementTree
except ImportError:
from xml.etree import ElementTree
try:
# XXX Currently, elementtidy segfaults when there are no error, because of
# the behavior of libtidy.
# A patch has been sent to Debian:
# http://bugs.debian.org/cgi-bin/bugreport.cgi?bug=576343
#
# As it is not integrated in Debian yet, and as this problem persists on other
# systems, using elementtidy is for now disabled.
raise ImportError
from elementtidy import TidyHTMLTreeBuilder
TidyHTMLTreeBuilder.ElementTree = ElementTree # force cElementTree if using it.
HTMLTreeBuilder = TidyHTMLTreeBuilder.TidyHTMLTreeBuilder
except ImportError:
from HTMLParser import HTMLParser
class HTMLTreeBuilder(HTMLParser):
def __init__(self, encoding=None):
HTMLParser.__init__(self)
self._target = ElementTree.TreeBuilder()
def doctype(self, name, pubid, system):
pass
def close(self):
tree = self._target.close()
return tree
def handle_starttag(self, tag, attrs):
self._target.start(tag, dict(attrs))
def handle_startendtag(self, tag, attrs):
self._target.start(tag, dict(attrs))
self._target.end(tag)
def handle_data(self, data):
self._target.data(data)
def handle_endtag(self, tag):
try:
self._target.end(tag)
except:
pass
class StandardParser(object):
def parse(self, data, encoding=None):
parser = HTMLTreeBuilder(encoding)
tree = ElementTree.parse(data, parser)
for elem in tree.getiterator():
if elem.tag.startswith('{'):
elem.tag = elem.tag[elem.tag.find('}')+1:]
return tree
def tostring(element):
e = ElementTree.Element('body')
e.text = element.text
e.tail = element.tail
for sub in element.getchildren():
e.append(sub)
s = ElementTree.tostring(e, 'utf-8')
return unicode(s)
| Python | 0.000001 |
bc2e7d77eb4aaa6d0063951a98de78c462f261ae | Use timezone-aware datetime object | confirmation/models.py | confirmation/models.py | # -*- coding: utf-8 -*-
# Copyright: (c) 2008, Jarek Zgoda <jarek.zgoda@gmail.com>
__revision__ = '$Id: models.py 28 2009-10-22 15:03:02Z jarek.zgoda $'
import os
import re
from hashlib import sha1
from django.db import models
from django.core.urlresolvers import reverse
from django.core.mail import send_mail
from django.conf import settings
from django.template import loader, Context
from django.contrib.sites.models import Site
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes import generic
from django.utils.translation import ugettext_lazy as _
from django.utils.timezone import now
from confirmation.util import get_status_field
try:
import mailer
send_mail = mailer.send_mail
except ImportError:
# no mailer app present, stick with default
pass
SHA1_RE = re.compile('^[a-f0-9]{40}$')
class ConfirmationManager(models.Manager):
def confirm(self, confirmation_key):
if SHA1_RE.search(confirmation_key):
try:
confirmation = self.get(confirmation_key=confirmation_key)
except self.model.DoesNotExist:
return False
obj = confirmation.content_object
status_field = get_status_field(obj._meta.app_label, obj._meta.module_name)
setattr(obj, status_field, getattr(settings, 'STATUS_ACTIVE', 1))
obj.save()
return obj
return False
def send_confirmation(self, obj, email_address):
confirmation_key = sha1(str(os.urandom(20)) + str(email_address)).hexdigest()
current_site = Site.objects.get_current()
activate_url = u'https://%s%s' % (current_site.domain,
reverse('confirmation.views.confirm', kwargs={'confirmation_key': confirmation_key}))
context = Context({
'activate_url': activate_url,
'current_site': current_site,
'confirmation_key': confirmation_key,
'target': obj,
'days': getattr(settings, 'EMAIL_CONFIRMATION_DAYS', 10),
})
templates = [
'confirmation/%s_confirmation_email_subject.txt' % obj._meta.module_name,
'confirmation/confirmation_email_subject.txt',
]
template = loader.select_template(templates)
subject = template.render(context).strip().replace(u'\n', u' ') # no newlines, please
templates = [
'confirmation/%s_confirmation_email_body.txt' % obj._meta.module_name,
'confirmation/confirmation_email_body.txt',
]
template = loader.select_template(templates)
body = template.render(context)
send_mail(subject, body, settings.DEFAULT_FROM_EMAIL, [email_address])
return self.create(content_object=obj, date_sent=now(), confirmation_key=confirmation_key)
class Confirmation(models.Model):
content_type = models.ForeignKey(ContentType)
object_id = models.PositiveIntegerField()
content_object = generic.GenericForeignKey('content_type', 'object_id')
date_sent = models.DateTimeField(_('sent'))
confirmation_key = models.CharField(_('activation key'), max_length=40)
objects = ConfirmationManager()
class Meta:
verbose_name = _('confirmation email')
verbose_name_plural = _('confirmation emails')
def __unicode__(self):
return _('confirmation email for %s') % self.content_object
| # -*- coding: utf-8 -*-
# Copyright: (c) 2008, Jarek Zgoda <jarek.zgoda@gmail.com>
__revision__ = '$Id: models.py 28 2009-10-22 15:03:02Z jarek.zgoda $'
import os
import re
import datetime
from hashlib import sha1
from django.db import models
from django.core.urlresolvers import reverse
from django.core.mail import send_mail
from django.conf import settings
from django.template import loader, Context
from django.contrib.sites.models import Site
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes import generic
from django.utils.translation import ugettext_lazy as _
from confirmation.util import get_status_field
try:
import mailer
send_mail = mailer.send_mail
except ImportError:
# no mailer app present, stick with default
pass
SHA1_RE = re.compile('^[a-f0-9]{40}$')
class ConfirmationManager(models.Manager):
def confirm(self, confirmation_key):
if SHA1_RE.search(confirmation_key):
try:
confirmation = self.get(confirmation_key=confirmation_key)
except self.model.DoesNotExist:
return False
obj = confirmation.content_object
status_field = get_status_field(obj._meta.app_label, obj._meta.module_name)
setattr(obj, status_field, getattr(settings, 'STATUS_ACTIVE', 1))
obj.save()
return obj
return False
def send_confirmation(self, obj, email_address):
confirmation_key = sha1(str(os.urandom(20)) + str(email_address)).hexdigest()
current_site = Site.objects.get_current()
activate_url = u'https://%s%s' % (current_site.domain,
reverse('confirmation.views.confirm', kwargs={'confirmation_key': confirmation_key}))
context = Context({
'activate_url': activate_url,
'current_site': current_site,
'confirmation_key': confirmation_key,
'target': obj,
'days': getattr(settings, 'EMAIL_CONFIRMATION_DAYS', 10),
})
templates = [
'confirmation/%s_confirmation_email_subject.txt' % obj._meta.module_name,
'confirmation/confirmation_email_subject.txt',
]
template = loader.select_template(templates)
subject = template.render(context).strip().replace(u'\n', u' ') # no newlines, please
templates = [
'confirmation/%s_confirmation_email_body.txt' % obj._meta.module_name,
'confirmation/confirmation_email_body.txt',
]
template = loader.select_template(templates)
body = template.render(context)
send_mail(subject, body, settings.DEFAULT_FROM_EMAIL, [email_address])
return self.create(content_object=obj, date_sent=datetime.datetime.now(), confirmation_key=confirmation_key)
class Confirmation(models.Model):
content_type = models.ForeignKey(ContentType)
object_id = models.PositiveIntegerField()
content_object = generic.GenericForeignKey('content_type', 'object_id')
date_sent = models.DateTimeField(_('sent'))
confirmation_key = models.CharField(_('activation key'), max_length=40)
objects = ConfirmationManager()
class Meta:
verbose_name = _('confirmation email')
verbose_name_plural = _('confirmation emails')
def __unicode__(self):
return _('confirmation email for %s') % self.content_object
| Python | 0.000001 |
c159a61396cd0b2e9a26c5210212e2dd93849fd5 | Add functions | ImageEvolution.py | ImageEvolution.py | import os, sys
from PIL import Image, ImageDraw
from random import randint
inputPath= 'picture.jpg'
outputPath = 'altered.png'
def evolveImage():
"""Given an image, returns an altered version of the image"""
def readImage(path):
"""Returns a PIL image object given a path."""
return Image.open(path)
def saveImage(img):
"""Given a PIL image, saves it to disk."""
img.save(outputPath)
def initializePopulation(x,y):
"""Input: x,y dimensions
Output: An initial population.
Each individual is a list of four lists.
The first three lists are [x,y] coordinates.
The fourth list corresponds to a [R,G,B] color."""
def popToImage(x,y,population):
"""Input: x,y dimensions, the whole population
Output: a PIL image object created by overlaying
individuals triangles on top of each other and mixing the
RGB color values."""
def evaluation(population, img, imgAltered):
"""Input: the entire population, the original image, an altered image
Output: a list of length len(population), composed of
floats between 0 and 100, where 0 is the least fit,
and 100 is the most fit."""
def selection(population, fitness):
"""Input: the entire population, fitness output from the evaluation function
Output: a population list with length less than the input"""
def crossover(population):
"""Input: the entire population
Output: a population list with length longer than the input"""
def mutation(population):
"""Input: the entire population
Output: a population list with some random properties of
some of the individuals altered."""
saveImage(evolveImage())
"""
JACK
"""
def randomRBG():
"""returns a tuple with 3 random values"""
return (randint(0,255),randint(0,255),randint(0,255))
def randomTri(height,width):
"""
given height and width of image, generate a triangle with size relative to the dimensions
returns a list of tuples
"""
x_offset = randint(-width/5,width)
y_offset = randint(-height/5,height)
#offset of the triangle
factor = 2
#the maximum size of the triangle relative to dimensions. ie 1/factor
h = int(height / factor)
w = int(width / factor)
A = [randint(0, int(w/2)),randint(0,int(2*h/3))]
B = [randint(int(w/2), w),randint(0, int(2*h/3))]
C = [randint(0, w),randint(int(2*h/3), h)]
A[0] += x_offset
B[0] += x_offset
C[0] += x_offset
A[1] += y_offset
B[1] += y_offset
C[1] += y_offset
return [tuple(A),tuple(B),tuple(C)]
def inTriangle(pt_coord,tri_coord):
"""
pt_coord is a list(point) containing 2 coordinates
tri_coord is a list containing 3 lists(points)
returns True if point is in triangle, False otherwise
"""
x,y = pt_coord[0], pt_coord[1]
A = [tri_coord[0][0], tri_coord[0][1]]
B = [tri_coord[1][0], tri_coord[1][1]]
C = [tri_coord[2][0], tri_coord[2][1]]
x_low = min(A[0], B[0],C[0])
x_hi = max(A[0], B[0],C[0])
if x not in range(x_low, x_hi):
return False
y_low = min(A[1], B[1],C[1])
y_hi = max(A[1], B[1],C[1])
if y not in range(y_low, y_hi):
return False
lines = []
if x in range(min(A[0],B[0]), max(A[0],B[0])):
lines.append([A,B])
if x in range(min(C[0],B[0]), max(C[0],B[0])):
lines.append([B,C])
if x in range(min(A[0],C[0]), max(A[0],C[0])):
lines.append([C,A])
if y in range(findY(lines[0][0],lines[0][1],x), findY(lines[1][0],lines[1][1],x)):
return True
else:
return False
def findY(A,B,x):
"""
given a line formed by 2 points A and B
returns the value of y at x on that line
"""
m = (B[1]-A[1]) / (B[0]-A[0])
b = A[1] - m*A[0]
return m*x + b
| import os, sys
from PIL import Image
inputPath= 'picture.jpg'
outputPath = 'altered.png'
def evolveImage():
"""Given an image, returns an altered version of the image"""
def readImage(path):
"""Returns a PIL image object given a path."""
return Image.open(path)
def saveImage(img):
"""Given a PIL image, saves it to disk."""
img.save(outputPath)
def initializePopulation(x,y):
"""Input: x,y dimensions
Output: An initial population.
Each individual is a list of four lists.
The first three lists are [x,y] coordinates.
The fourth list corresponds to a [R,G,B] color."""
def popToImage(x,y,population):
"""Input: x,y dimensions, the whole population
Output: a PIL image object created by overlaying
individuals triangles on top of each other and mixing the
RGB color values."""
def evaluation(population, img, imgAltered):
"""Input: the entire population, the original image, an altered image
Output: a list of length len(population), composed of
floats between 0 and 100, where 0 is the least fit,
and 100 is the most fit."""
def selection(population, fitness):
"""Input: the entire population, fitness output from the evaluation function
Output: a population list with length less than the input"""
def crossover(population):
"""Input: the entire population
Output: a population list with length longer than the input"""
def mutation(population):
"""Input: the entire population
Output: a population list with some random properties of
some of the individuals altered."""
saveImage(evolveImage())
| Python | 0.007992 |
9fd89a23e55d9b0b393c3975758b9e2c16a3cda1 | Set MOOSE_DIR if the user doesn't have it | python/MooseDocs/common/moose_docs_file_tree.py | python/MooseDocs/common/moose_docs_file_tree.py | #pylint: disable=missing-docstring
####################################################################################################
# DO NOT MODIFY THIS HEADER #
# MOOSE - Multiphysics Object Oriented Simulation Environment #
# #
# (c) 2010 Battelle Energy Alliance, LLC #
# ALL RIGHTS RESERVED #
# #
# Prepared by Battelle Energy Alliance, LLC #
# Under Contract No. DE-AC07-05ID14517 #
# With the U. S. Department of Energy #
# #
# See COPYRIGHT for full restrictions #
####################################################################################################
#pylint: enable=missing-docstring
import os
import re
import MooseDocs
from moose_docs_import import moose_docs_import
from nodes import DirectoryNode, MarkdownFileIndexNode, MarkdownFilePageNode, CopyFileNode
def finder(node, name):
"""Helper for finding child by name"""
for child in node.children:
if child.name == name:
return child
return None
def tree_builder(files, root, base, node, directory):
"""
Helper for building markdown file tree.
Inputs:
files[set]:
"""
for item in os.listdir(directory):
# Complete path to the directory item (path or filename)
path = os.path.join(directory, item)
# Move along if path not in list of files
if path in files:
# Special case when the supplied node is the root, this maintains the root node
# and creates an index node from which everything will stem.
if item == 'index.md':
if node.parent is None:
node = MarkdownFileIndexNode('', base=base, root_directory=root, parent=node)
elif isinstance(node, DirectoryNode):
node = node.replace(MarkdownFileIndexNode(node.name, root_directory=root,
base=base))
# General markdown files
elif item.endswith('.md'):
MarkdownFilePageNode(item[:-3], root_directory=root, base=base, parent=node)
# Other files to copy
elif item.endswith(MooseDocs.common.EXTENSIONS):
CopyFileNode(item.lstrip('/'), root_directory=root, base=base, parent=node)
# Directories
elif os.path.isdir(path):
n = finder(node, item)
if n is None:
n = DirectoryNode(item, base=base, parent=node)
tree_builder(files, root, base, n, path)
def moose_docs_file_tree(config):
"""
Creates a unified markdown file tree from multiple locations.
Inputs:
config[dict]: Contains key value pairs, with each value containing another dict() with
key value pairs that are passed to moose_docs_import function.
"""
# Set the MOOSE_DIR if it does not exists so that the root_dir can always use it
if 'MOOSE_DIR' not in os.environ:
os.environ['MOOSE_DIR'] = MooseDocs.MOOSE_DIR
# Build the file tree
node = DirectoryNode('')
for value in config.itervalues():
value.setdefault('include', [])
value.setdefault('exclude', [])
value.setdefault('extensions', MooseDocs.common.EXTENSIONS)
value.setdefault('base', '')
value.setdefault('root_dir', MooseDocs.ROOT_DIR)
value['root_dir'] = re.sub(r'\$(\w+)', lambda m: os.getenv(m.group(1)), value['root_dir'])
if not os.path.isabs(value['root_dir']):
value['root_dir'] = os.path.join(MooseDocs.ROOT_DIR, value['root_dir'])
files = set(moose_docs_import(**value))
tree_builder(files,
value['root_dir'],
value['base'],
node,
os.path.join(value['root_dir'], value['base']))
# Remove un-used directories
for desc in node.descendants:
if isinstance(desc, DirectoryNode) and \
all(isinstance(x, DirectoryNode) for x in desc.descendants):
desc.parent = None
return node
| #pylint: disable=missing-docstring
####################################################################################################
# DO NOT MODIFY THIS HEADER #
# MOOSE - Multiphysics Object Oriented Simulation Environment #
# #
# (c) 2010 Battelle Energy Alliance, LLC #
# ALL RIGHTS RESERVED #
# #
# Prepared by Battelle Energy Alliance, LLC #
# Under Contract No. DE-AC07-05ID14517 #
# With the U. S. Department of Energy #
# #
# See COPYRIGHT for full restrictions #
####################################################################################################
#pylint: enable=missing-docstring
import os
import re
import MooseDocs
from moose_docs_import import moose_docs_import
from nodes import DirectoryNode, MarkdownFileIndexNode, MarkdownFilePageNode, CopyFileNode
def finder(node, name):
"""Helper for finding child by name"""
for child in node.children:
if child.name == name:
return child
return None
def tree_builder(files, root, base, node, directory):
"""
Helper for building markdown file tree.
Inputs:
files[set]:
"""
for item in os.listdir(directory):
# Complete path to the directory item (path or filename)
path = os.path.join(directory, item)
# Move along if path not in list of files
if path in files:
# Special case when the supplied node is the root, this maintains the root node
# and creates an index node from which everything will stem.
if item == 'index.md':
if node.parent is None:
node = MarkdownFileIndexNode('', base=base, root_directory=root, parent=node)
elif isinstance(node, DirectoryNode):
node = node.replace(MarkdownFileIndexNode(node.name, root_directory=root,
base=base))
# General markdown files
elif item.endswith('.md'):
MarkdownFilePageNode(item[:-3], root_directory=root, base=base, parent=node)
# Other files to copy
elif item.endswith(MooseDocs.common.EXTENSIONS):
CopyFileNode(item.lstrip('/'), root_directory=root, base=base, parent=node)
# Directories
elif os.path.isdir(path):
n = finder(node, item)
if n is None:
n = DirectoryNode(item, base=base, parent=node)
tree_builder(files, root, base, n, path)
def moose_docs_file_tree(config):
"""
Creates a unified markdown file tree from multiple locations.
Inputs:
config[dict]: Contains key value pairs, with each value containing another dict() with
key value pairs that are passed to moose_docs_import function.
"""
node = DirectoryNode('')
for value in config.itervalues():
value.setdefault('include', [])
value.setdefault('exclude', [])
value.setdefault('extensions', MooseDocs.common.EXTENSIONS)
value.setdefault('base', '')
value.setdefault('root_dir', MooseDocs.ROOT_DIR)
value['root_dir'] = re.sub(r'\$(\w+)', lambda m: os.getenv(m.group(1)), value['root_dir'])
if not os.path.isabs(value['root_dir']):
value['root_dir'] = os.path.join(MooseDocs.ROOT_DIR, value['root_dir'])
files = set(moose_docs_import(**value))
tree_builder(files,
value['root_dir'],
value['base'],
node,
os.path.join(value['root_dir'], value['base']))
# Remove un-used directories
for desc in node.descendants:
if isinstance(desc, DirectoryNode) and \
all(isinstance(x, DirectoryNode) for x in desc.descendants):
desc.parent = None
return node
| Python | 0 |
346e296872e1ca011eb5e469505de1c15c86732f | Clarify the comment about setting the PYTHON variable for the Doc Makefile. | Doc/tools/sphinx-build.py | Doc/tools/sphinx-build.py | # -*- coding: utf-8 -*-
"""
Sphinx - Python documentation toolchain
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: 2007 by Georg Brandl.
:license: Python license.
"""
import sys
if __name__ == '__main__':
if sys.version_info[:3] < (2, 5, 0):
print >>sys.stderr, """\
Error: Sphinx needs to be executed with Python 2.5 or newer
(If you run this from the Makefile, you can set the PYTHON variable
to the path of an alternative interpreter executable, e.g.,
``make html PYTHON=python2.5``).
"""
sys.exit(1)
from sphinx import main
sys.exit(main(sys.argv))
| # -*- coding: utf-8 -*-
"""
Sphinx - Python documentation toolchain
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: 2007 by Georg Brandl.
:license: Python license.
"""
import sys
if __name__ == '__main__':
if sys.version_info[:3] < (2, 5, 0):
print >>sys.stderr, """\
Error: Sphinx needs to be executed with Python 2.5 or newer.
(If you run this from the Makefile, you can set the PYTHON variable
to the path of an alternative interpreter executable.)
"""
sys.exit(1)
from sphinx import main
sys.exit(main(sys.argv))
| Python | 0 |
8070b119c11ad18e2c1979afef21503a255dd8d8 | Check the number of matches for each query | rockuefort.py | rockuefort.py | #!/usr/bin/python3
"""
Usage: rockuefort copy <file> <destination>
rockuefort symlink <file> <destination>
rockuefort list <file>
"""
from collections import OrderedDict
import subprocess
import sys
from docopt import docopt
def log(*args, **kwargs):
print("rockuefort:", *args, file=sys.stderr, **kwargs)
if __name__ == '__main__':
args = docopt(__doc__)
# Load and evaluate queries
files = OrderedDict()
queries = []
with open(args['<file>']) as f:
for line in f:
try:
c, query = line.strip().split(':', 1)
c = int(c)
except ValueError:
c = 1
query = line.strip()
queries.append((c, query))
for c, query in queries:
r = subprocess.check_output(['quodlibet', '--print-query', query])
matched_files = [mf.decode() for mf in r.splitlines() if mf]
nm = len(matched_files)
if nm != c:
log("Matched {} (expected {}): {}".format(nm, c, query))
for file in matched_files:
log(" match: {}".format(file))
for file in matched_files:
files.setdefault(file, []).append(query)
# Check for multiply-matched files
for file, queries in files.items():
if len(queries) > 1:
log("Matched by multiple: {}".format(file))
for q in queries:
log(" query: {}".format(q))
# Perform the requested action
if args['copy']:
log("Copying to {}".format(args['<destination>']))
...
elif args['symlink']:
log("Symlinking to {}".format(args['<destination>']))
...
else: # args['list']
for file in files:
print(file)
| #!/usr/bin/python3
"""
Usage: rockuefort copy <file> <destination>
rockuefort symlink <file> <destination>
rockuefort list <file>
"""
from collections import OrderedDict
import subprocess
import sys
from docopt import docopt
def log(*args, **kwargs):
print("rockuefort:", *args, file=sys.stderr, **kwargs)
if __name__ == '__main__':
args = docopt(__doc__)
# Load and evaluate queries
files = OrderedDict()
with open(args['<file>']) as f:
queries = [line.strip() for line in f]
for query in queries:
r = subprocess.check_output(['quodlibet', '--print-query', query])
matched_files = [mf.decode() for mf in r.splitlines() if mf]
for file in matched_files:
files.setdefault(file, []).append(query)
if not matched_files:
log("No match: {}".format(query))
# Check for multiply-matched files
for file, queries in files.items():
if len(queries) > 1:
log("Matched multiple: {}".format(file))
for q in queries:
log(" query: {}".format(q))
# Perform the requested action
if args['copy']:
log("Copying to {}".format(args['<destination>']))
...
elif args['symlink']:
log("Symlinking to {}".format(args['<destination>']))
...
else: # args['list']
for file in files:
print(file)
| Python | 0.00149 |
0b39cfbdbfa397be5e428425aedc9ebced62c6ec | Fix reversed lat/lon | projects/tpoafptarbmit/scrape.py | projects/tpoafptarbmit/scrape.py | #!/usr/bin/env python3
from urllib.parse import parse_qsl
import json
import os
import sys
import requests
from bs4 import BeautifulSoup
ROUTE_BASE_URL = 'http://www.thepassageride.com/Routes/'
def fetch_text(url):
r = requests.get(url)
if r.status_code != 200:
r.raise_for_status()
return r.text
def scrape_route_list(html):
print('Fetching route list...', end='')
routes = []
soup = BeautifulSoup(html, 'html.parser')
for link in soup.select('#wikitext a[href*="/Routes/"]'):
href = link.get('href')
routes.append({
'name': link.text,
'number': int(href.strip(ROUTE_BASE_URL)),
'url': href,
})
print('done (%d routes)' % len(routes))
return routes
def fetch_route_description(route_url):
print('\t%s' % route_url)
html = fetch_text(route_url)
soup = BeautifulSoup(html, 'html.parser')
description = [p.prettify() for p in soup.select('#wikitext p')]
map_url = soup.select_one('#wikitext a[href*="gmap-pedometer"]')
if map_url is not None:
map_url = map_url.get('href')
return {
'map_url': map_url,
'description': '\n'.join(description),
}
def fetch_route_map(map_url):
print('\t%s' % map_url, end='')
_, map_id = map_url.split('?r=')
path = '/getRoute.php' if int(map_id) <= 5_000_000 else '/gp/ajaxRoute/get'
r = requests.post('https://www.gmap-pedometer.com' + path, {'rId': map_id})
if r.status_code != 200:
r.raise_for_status()
data = parse_qsl(r.text)
polyline = [x[1] for x in data if x[0] == 'polyline'][0]
coords = []
points = polyline.split('a')
for i in range(0, len(points)-1, 2):
coords.append({
'lat': float(points[i+1]),
'lon': float(points[i]),
})
print(' ... done (%d coords)' % len(coords))
return coords
def route_to_geojson(route_meta, coords):
return {
'type': 'Feature',
'geometry': {
'type': 'LineString',
'coordinates': [
[c['lat'], c['lon']]
for c in coords
]
},
'properties': route_meta
}
def main():
html = fetch_text(ROUTE_BASE_URL)
routes = []
for r in scrape_route_list(html):
print('#%d "%s"' % (r['number'], r['name']))
desc = fetch_route_description(r['url'])
if desc['map_url'] is not None:
coords = fetch_route_map(desc['map_url'])
else:
coords = []
geo = route_to_geojson({**r, **desc}, coords)
routes.append(geo)
collection = {
'type': 'FeatureCollection',
'features': routes
}
print('Dumping to file...')
with open('tpoafptarbmit.geojson', 'w') as fp:
json.dump(collection, fp, indent=4)
print('All done!')
if __name__ == '__main__':
main()
| #!/usr/bin/env python3
from urllib.parse import parse_qsl
import json
import os
import sys
import requests
from bs4 import BeautifulSoup
ROUTE_BASE_URL = 'http://www.thepassageride.com/Routes/'
def fetch_text(url):
r = requests.get(url)
if r.status_code != 200:
r.raise_for_status()
return r.text
def scrape_route_list(html):
print('Fetching route list...', end='')
routes = []
soup = BeautifulSoup(html, 'html.parser')
for link in soup.select('#wikitext a[href*="/Routes/"]'):
href = link.get('href')
routes.append({
'name': link.text,
'number': int(href.strip(ROUTE_BASE_URL)),
'url': href,
})
print('done (%d routes)' % len(routes))
return routes
def fetch_route_description(route_url):
print('\t%s' % route_url)
html = fetch_text(route_url)
soup = BeautifulSoup(html, 'html.parser')
description = [p.prettify() for p in soup.select('#wikitext p')]
map_url = soup.select_one('#wikitext a[href*="gmap-pedometer"]')
if map_url is not None:
map_url = map_url.get('href')
return {
'map_url': map_url,
'description': '\n'.join(description),
}
def fetch_route_map(map_url):
print('\t%s' % map_url, end='')
_, map_id = map_url.split('?r=')
path = '/getRoute.php' if int(map_id) <= 5_000_000 else '/gp/ajaxRoute/get'
r = requests.post('https://www.gmap-pedometer.com' + path, {'rId': map_id})
if r.status_code != 200:
r.raise_for_status()
data = parse_qsl(r.text)
polyline = [x[1] for x in data if x[0] == 'polyline'][0]
coords = []
points = polyline.split('a')
for i in range(0, len(points)-1, 2):
coords.append({
'lat': float(points[i]),
'lon': float(points[i+1]),
})
print(' ... done (%d coords)' % len(coords))
return coords
def route_to_geojson(route_meta, coords):
return {
'type': 'Feature',
'geometry': {
'type': 'LineString',
'coordinates': [
[c['lat'], c['lon']]
for c in coords
]
},
'properties': route_meta
}
def main():
html = fetch_text(ROUTE_BASE_URL)
routes = []
for r in scrape_route_list(html):
print('#%d "%s"' % (r['number'], r['name']))
desc = fetch_route_description(r['url'])
if desc['map_url'] is not None:
coords = fetch_route_map(desc['map_url'])
else:
coords = []
geo = route_to_geojson({**r, **desc}, coords)
routes.append(geo)
collection = {
'type': 'FeatureCollection',
'features': routes
}
print('Dumping to file...')
with open('tpoafptarbmit.geojson', 'w') as fp:
json.dump(collection, fp, indent=4)
print('All done!')
if __name__ == '__main__':
main()
| Python | 0.999871 |
02f59b60062004fc23dbfbfc6201b326b08513a8 | Add 404 exception | src/client/exceptions.py | src/client/exceptions.py | class HTTP4xx(Exception):
pass
class HTTP400(HTTP4xx):
pass
class HTTP404(HTTP4xx):
pass
class HTTP409(HTTP4xx):
pass
| class HTTP4xx(Exception):
pass
class HTTP400(HTTP4xx):
pass
class HTTP409(HTTP4xx):
pass
| Python | 0.000019 |
8d06ccd7aeefe5945bab44b01764bd62685a2e17 | Add missing member to API. | mindbender/api.py | mindbender/api.py | """Public API
Anything that is not defined here is **internal** and
unreliable for external use.
Motivation for api.py:
Storing the API in a module, as opposed to in __init__.py, enables
use of it internally.
For example, from `pipeline.py`:
>> from . import api
>> api.do_this()
The important bit is avoiding circular dependencies, where api.py
is calling upon a module which in turn calls upon api.py.
"""
import logging
from . import schema
from .pipeline import (
install,
uninstall,
ls,
search,
Loader,
discover_loaders,
register_root,
register_data,
register_host,
register_format,
register_silo,
register_family,
register_loaders_path,
register_plugins,
registered_host,
registered_families,
registered_loaders_paths,
registered_formats,
registered_data,
registered_root,
registered_silos,
deregister_plugins,
deregister_format,
deregister_family,
deregister_data,
deregister_loaders_path,
any_representation,
fixture,
)
from .lib import (
format_staging_dir,
format_shared_dir,
format_version,
time,
find_latest_version,
parse_version,
)
logging.basicConfig()
__all__ = [
"install",
"uninstall",
"schema",
"ls",
"search",
"Loader",
"discover_loaders",
"register_host",
"register_data",
"register_format",
"register_silo",
"register_family",
"register_loaders_path",
"register_plugins",
"register_root",
"registered_root",
"registered_silos",
"registered_loaders_paths",
"registered_host",
"registered_families",
"registered_formats",
"registered_data",
"deregister_plugins",
"deregister_format",
"deregister_family",
"deregister_data",
"deregister_loaders_path",
"format_staging_dir",
"format_shared_dir",
"format_version",
"find_latest_version",
"parse_version",
"time",
"any_representation",
"fixture",
]
| """Public API
Anything that is not defined here is **internal** and
unreliable for external use.
Motivation for api.py:
Storing the API in a module, as opposed to in __init__.py, enables
use of it internally.
For example, from `pipeline.py`:
>> from . import api
>> api.do_this()
The important bit is avoiding circular dependencies, where api.py
is calling upon a module which in turn calls upon api.py.
"""
import logging
from . import schema
from .pipeline import (
install,
uninstall,
ls,
search,
Loader,
discover_loaders,
register_root,
register_data,
register_host,
register_format,
register_silo,
register_family,
register_loaders_path,
register_plugins,
registered_host,
registered_families,
registered_loaders_paths,
registered_formats,
registered_data,
registered_root,
registered_silos,
deregister_plugins,
deregister_format,
deregister_family,
deregister_data,
deregister_loaders_path,
any_representation,
fixture,
)
from .lib import (
format_staging_dir,
format_shared_dir,
format_version,
time,
find_latest_version,
parse_version,
)
logging.basicConfig()
__all__ = [
"install",
"uninstall",
"schema",
"ls",
"search",
"Loader",
"discover_loaders",
"register_host",
"register_data",
"register_format",
"register_silo",
"register_family",
"register_loaders_path",
"register_plugins",
"register_root",
"registered_root",
"registered_silos",
"registered_loaders_paths",
"registered_host",
"registered_families",
"registered_formats",
"registered_data",
"deregister_plugins",
"deregister_family",
"deregister_data",
"deregister_loaders_path",
"format_staging_dir",
"format_shared_dir",
"format_version",
"find_latest_version",
"parse_version",
"time",
"any_representation",
"fixture",
]
| Python | 0 |
f68e8612f1e8198a4b300b67536d654e13809eb4 | Allow SHA256 hashes in URLs | plinth/modules/monkeysphere/urls.py | plinth/modules/monkeysphere/urls.py | #
# This file is part of Plinth.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
URLs for the monkeysphere module.
"""
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^sys/monkeysphere/$', views.index, name='index'),
url(r'^sys/monkeysphere/(?P<ssh_fingerprint>[0-9A-Za-z:+/]+)/import/$',
views.import_key, name='import'),
url(r'^sys/monkeysphere/(?P<fingerprint>[0-9A-Fa-f]+)/details/$',
views.details, name='details'),
url(r'^sys/monkeysphere/(?P<fingerprint>[0-9A-Fa-f]+)/publish/$',
views.publish, name='publish'),
url(r'^sys/monkeysphere/cancel/$', views.cancel, name='cancel'),
]
| #
# This file is part of Plinth.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
URLs for the monkeysphere module.
"""
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^sys/monkeysphere/$', views.index, name='index'),
url(r'^sys/monkeysphere/(?P<ssh_fingerprint>[0-9A-Fa-f:]+)/import/$',
views.import_key, name='import'),
url(r'^sys/monkeysphere/(?P<fingerprint>[0-9A-Fa-f]+)/details/$',
views.details, name='details'),
url(r'^sys/monkeysphere/(?P<fingerprint>[0-9A-Fa-f]+)/publish/$',
views.publish, name='publish'),
url(r'^sys/monkeysphere/cancel/$', views.cancel, name='cancel'),
]
| Python | 0.000004 |
547c9e36255870bcee8a800a3fa95c3806a95c2c | Update links when it starts getting redirected | newsApp/linkManager.py | newsApp/linkManager.py | import os
import time
from constants import *
from dbhelper import *
from dbItemManagerV2 import DbItemManagerV2
from link import Link
LINK_EXPIRY_TIME_IN_DAYS = 80
class LinkManager(DbItemManagerV2):
"""
Manage links stored on AWS dynamo db database.
Contains functions for CRUD operations on the links stored
Following environment variables need to be set -
'LINKTAGSTABLE_CONNECTIONSTRING' : connection string of link tags table.
"""
def __init__(self):
"""
Instantiates the linkManager.
"""
DbItemManagerV2.__init__(self,
os.environ['LINKTAGSTABLE_CONNECTIONSTRING'])
def get(self, linkId):
"""
Put a new link.
"""
dbItem = DbItemManagerV2.get(self, linkId);
link = Link(linkId, dbItem.tags)
#handle the case when link starts gettting redirected to new url
if link.id != linkId:
self.delete(linkId)
self.put(link)
return link
def getStaleLinks(self):
"""
Returns a list of linkIds of stale links.
"""
linkExpiryCutoff = int(time.time()) - LINK_EXPIRY_TIME_IN_DAYS*24*60*60;
scanResults = DbItemManagerV2.scan(self, pubtime__lte = linkExpiryCutoff)
return (result.id for result in scanResults)
def getUnprocessedLinks(self):
return DbItemManagerV2.query_2(
self,
isProcessed__eq = 'false',
index = 'isProcessed-itemId-index')
| import os
import time
from constants import *
from dbhelper import *
from dbItemManagerV2 import DbItemManagerV2
from link import Link
LINK_EXPIRY_TIME_IN_DAYS = 80
class LinkManager(DbItemManagerV2):
"""
Manage links stored on AWS dynamo db database.
Contains functions for CRUD operations on the links stored
Following environment variables need to be set -
'LINKTAGSTABLE_CONNECTIONSTRING' : connection string of link tags table.
"""
def __init__(self):
"""
Instantiates the linkManager.
"""
DbItemManagerV2.__init__(self,
os.environ['LINKTAGSTABLE_CONNECTIONSTRING'])
def get(self, linkId):
"""
Put a new link.
"""
dbItem = DbItemManagerV2.get(self, linkId);
return Link(linkId, dbItem.tags)
def getStaleLinks(self):
"""
Returns a list of linkIds of stale links.
"""
linkExpiryCutoff = int(time.time()) - LINK_EXPIRY_TIME_IN_DAYS*24*60*60;
scanResults = DbItemManagerV2.scan(self, pubtime__lte = linkExpiryCutoff)
return (result.id for result in scanResults)
def getUnprocessedLinks(self):
return DbItemManagerV2.query_2(
self,
isProcessed__eq = 'false',
index = 'isProcessed-itemId-index')
| Python | 0 |
366ecdd77520004c307cbbf127bb374ab546ce7e | Use windows API to change the AppID and use our icon. | run-quince.py | run-quince.py | #!/usr/bin/env python3
# coding: utf-8
# Raytheon BBN Technologies 2016
# Contributiors: Graham Rowlands
#
# This file runs the main loop
# Use PyQt5 by default
import os
os.environ["QT_API"] = 'pyqt5'
from qtpy.QtWidgets import QApplication
import sys
import argparse
import ctypes
from quince.view import *
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('filename', type=str, help='Measurement library filename')
args = parser.parse_args()
app = QApplication([])
# Setup icon
png_path = os.path.join(os.path.dirname(__file__), "assets/quince_icon.png")
app.setWindowIcon(QIcon(png_path))
# Convince windows that this is a separate application to get the task bar icon working
# https://stackoverflow.com/questions/1551605/how-to-set-applications-taskbar-icon-in-windows-7/1552105#1552105
if (os.name == 'nt'):
myappid = u'BBN.quince.gui.0001' # arbitrary string
ctypes.windll.shell32.SetCurrentProcessExplicitAppUserModelID(myappid)
window = NodeWindow()
window.load_yaml(args.filename)
app.aboutToQuit.connect(window.cleanup)
window.show()
sys.exit(app.exec_())
| #!/usr/bin/env python3
# coding: utf-8
# Raytheon BBN Technologies 2016
# Contributiors: Graham Rowlands
#
# This file runs the main loop
# Use PyQt5 by default
import os
os.environ["QT_API"] = 'pyqt5'
from qtpy.QtWidgets import QApplication
import sys
import argparse
from quince.view import *
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('filename', type=str, help='Measurement library filename')
args = parser.parse_args()
app = QApplication([])
# Setup icon
png_path = os.path.join(os.path.dirname(__file__), "assets/quince_icon.png")
app.setWindowIcon(QIcon(png_path))
window = NodeWindow()
window.load_yaml(args.filename)
app.aboutToQuit.connect(window.cleanup)
window.show()
sys.exit(app.exec_())
| Python | 0 |
976e9b622b66bee30d304a801cc39733cc3e8d58 | refactor to reduce size of __init__ | wikichatter/section.py | wikichatter/section.py | import mwparserfromhell as mwp
class Error(Exception):
pass
class TooManyHeadingsError(Error):
pass
EPI_LEVEL = 0
class Section(object):
def __init__(self, wikitext):
self._subsections = []
self.comments = []
wikicode = self._get_wikicode_from_input(wikitext)
self._load_section_info(wikicode)
def _get_wikicode_from_input(self, wikitext):
# wikitext can be either a wikicode object or a string
if type(wikitext) is not mwp.wikicode.Wikicode:
wikicode = mwp.parse(self.wikitext, skip_style_tags=True)
else:
wikicode = wikitext
return wikicode
def _load_section_info(self, wikicode):
wiki_headings = [h for h in wikicode.filter_headings()]
if len(wiki_headings) > 1:
raise TooManyHeadingsError()
if len(wiki_headings) == 0:
self.heading = None
self.level = EPI_LEVEL
else:
self.heading = str(wiki_headings[0].title)
self.level = wiki_headings[0].level
self.text = self._get_section_text_from_wikicode(wikicode)
def append_subsection(self, subsection):
self._subsections.append(subsection)
def extract_comments(self, extractor):
self.comments = extractor(self.text)
for s in self._subsections:
s.extract_comments(extractor)
def _get_section_text_from_wikicode(self, wikicode):
sections = wikicode.get_sections(include_headings=False)
return str(sections[-1])
@property
def subsections(self):
return list(self._subsections)
def __str__(self):
return "<{0}: {1}>".format(self.level, self.heading)
def __repr__(self):
return str(self)
def simplify(self):
basic = {}
basic["subsections"] = [s.simplify() for s in self._subsections]
basic["comments"] = [c.simplify() for c in self.comments]
if self.heading is not None:
basic["heading"] = self.heading
return basic
def generate_sections_from_raw_text(text):
flat_sections = _generate_flat_list_of_sections(text)
return _sort_into_hierarchy(flat_sections)
def _generate_flat_list_of_sections(text):
wikicode = mwp.parse(text, skip_style_tags=True)
mw_sections = wikicode.get_sections(include_lead=True, flat=True)
sections = [Section(s) for s in mw_sections if len(s.nodes) > 0]
return sections
def _sort_into_hierarchy(section_list):
top_level_sections = []
section_stack = []
for section in section_list:
while len(section_stack) > 0:
cur_sec = section_stack[-1]
if cur_sec.level < section.level and cur_sec.level is not EPI_LEVEL:
cur_sec.append_subsection(section)
section_stack.append(section)
break
section_stack.pop()
if len(section_stack) is 0:
top_level_sections.append(section)
section_stack.append(section)
return top_level_sections
| import mwparserfromhell as mwp
class Error(Exception):
pass
class TooManyHeadingsError(Error):
pass
EPI_LEVEL = 0
class Section(object):
def __init__(self, wikitext):
self._subsections = []
self.comments = []
# wikitext can be either a wikicode object or a string
if type(wikitext) is not mwp.wikicode.Wikicode:
wikicode = mwp.parse(self.wikitext, skip_style_tags=True)
else:
wikicode = wikitext
wiki_headings = [h for h in wikicode.filter_headings()]
if len(wiki_headings) > 1:
raise TooManyHeadingsError()
if len(wiki_headings) == 0:
self.heading = None
self.level = EPI_LEVEL
else:
self.heading = str(wiki_headings[0].title)
self.level = wiki_headings[0].level
self.text = self._get_section_text_from_wikicode(wikicode)
def append_subsection(self, subsection):
self._subsections.append(subsection)
def extract_comments(self, extractor):
self.comments = extractor(self.text)
for s in self._subsections:
s.extract_comments(extractor)
def _get_section_text_from_wikicode(self, wikicode):
sections = wikicode.get_sections(include_headings=False)
return str(sections[-1])
@property
def subsections(self):
return list(self._subsections)
def __str__(self):
return "<{0}: {1}>".format(self.level, self.heading)
def __repr__(self):
return str(self)
def simplify(self):
basic = {}
basic["subsections"] = [s.simplify() for s in self._subsections]
basic["comments"] = [c.simplify() for c in self.comments]
if self.heading is not None:
basic["heading"] = self.heading
return basic
def generate_sections_from_raw_text(text):
flat_sections = _generate_flat_list_of_sections(text)
return _sort_into_hierarchy(flat_sections)
def _generate_flat_list_of_sections(text):
wikicode = mwp.parse(text, skip_style_tags=True)
mw_sections = wikicode.get_sections(include_lead=True, flat=True)
sections = [Section(s) for s in mw_sections if len(s.nodes) > 0]
return sections
def _sort_into_hierarchy(section_list):
top_level_sections = []
section_stack = []
for section in section_list:
while len(section_stack) > 0:
cur_sec = section_stack[-1]
if cur_sec.level < section.level and cur_sec.level is not EPI_LEVEL:
cur_sec.append_subsection(section)
section_stack.append(section)
break
section_stack.pop()
if len(section_stack) is 0:
top_level_sections.append(section)
section_stack.append(section)
return top_level_sections
| Python | 0.000005 |
9e6b596aa856e1d50a9c2c2882289cf1a5d8c0c0 | Fix up plotting script | plot.py | plot.py | #!/usr/bin/env python
"""Processing routines for the waveFlapper case."""
import foampy
import numpy as np
import matplotlib.pyplot as plt
width_2d = 0.1
width_3d = 3.66
m_paddle = 1270.0 # Paddle mass in kg, from OMB manual
h_piston = 3.3147
I_paddle = 1/3*m_paddle*h_piston**2
def plot_force():
"""Plots the streamwise force on the paddle over time."""
pass
def plot_moment():
data = foampy.load_forces()
i = 10
t = data["time"][i:]
m = data.mz
m = m[i:] * width_3d / width_2d
period = 2.2
omega = 2 * np.pi / period
theta = 0.048 * np.sin(omega * t)
theta_doubledot = -0.048 * omega**2 * np.sin(omega * t)
m_inertial = I_paddle * theta_doubledot
m = m + m_inertial
plt.figure()
plt.plot(t, m)
plt.xlabel("Time (s)")
plt.ylabel("Flapper moment (Nm)")
print(
"Max moment from CFD (including inertia) = {:0.1f} Nm".format(m.max())
)
print("Theoretical max moment (including inertia) =", 5500*3.3, "Nm")
plt.show()
if __name__ == "__main__":
plot_moment()
| #!/usr/bin/env python
"""Processing routines for the waveFlapper case."""
import foampy
import numpy as np
import matplotlib.pyplot as plt
width_2d = 0.1
width_3d = 3.66
m_paddle = 1270.0 # Paddle mass in kg, from OMB manual
h_piston = 3.3147
I_paddle = 1/3*m_paddle*h_piston**2
def plot_force():
"""Plots the streamwise force on the paddle over time."""
def plot_moment():
data = foampy.load_forces_moments()
i = 10
t = data["time"][i:]
m = data["moment"]["pressure"]["z"] + data["moment"]["viscous"]["z"]
m = m[i:]*width_3d/width_2d
period = 2.2
omega = 2*np.pi/period
theta = 0.048*np.sin(omega*t)
theta_doubledot = -0.048*omega**2*np.sin(omega*t)
m_inertial = I_paddle*theta_doubledot
m = m + m_inertial
plt.figure()
plt.plot(t, m)
plt.xlabel("t (s)")
plt.ylabel("Flapper moment (Nm)")
print("Max moment from CFD (including inertia) = {:0.1f}".format(m.max()), "Nm")
print("Theoretical max moment (including inertia) =", 5500*3.3, "Nm")
plt.show()
if __name__ == "__main__":
plot_moment()
| Python | 0.000095 |
8d6fcc6d318423e87e9942c2551c0d9b3c282e25 | Allow TELEGRAM_TEMPLATE to be a string (#208) | plugins/telegram/alerta_telegram.py | plugins/telegram/alerta_telegram.py | import logging
import os
try:
from alerta.plugins import app # alerta >= 5.0
except ImportError:
from alerta.app import app # alerta < 5.0
from alerta.plugins import PluginBase
import telepot
from jinja2 import Template, UndefinedError
DEFAULT_TMPL = """
{% if customer %}Customer: `{{customer}}` {% endif %}
*[{{ status.capitalize() }}] {{ environment }} {{ severity.capitalize() }}*
{{ event | replace("_","\_") }} {{ resource.capitalize() }}
```
{{ text }}
```
"""
LOG = logging.getLogger('alerta.plugins.telegram')
TELEGRAM_TOKEN = app.config.get('TELEGRAM_TOKEN') \
or os.environ.get('TELEGRAM_TOKEN')
TELEGRAM_CHAT_ID = app.config.get('TELEGRAM_CHAT_ID') \
or os.environ.get('TELEGRAM_CHAT_ID')
TELEGRAM_WEBHOOK_URL = app.config.get('TELEGRAM_WEBHOOK_URL', None) \
or os.environ.get('TELEGRAM_WEBHOOK_URL')
TELEGRAM_TEMPLATE = app.config.get('TELEGRAM_TEMPLATE') \
or os.environ.get('TELEGRAM_TEMPLATE')
DASHBOARD_URL = app.config.get('DASHBOARD_URL', '') \
or os.environ.get('DASHBOARD_URL')
class TelegramBot(PluginBase):
def __init__(self, name=None):
self.bot = telepot.Bot(TELEGRAM_TOKEN)
LOG.debug('Telegram: %s', self.bot.getMe())
if TELEGRAM_WEBHOOK_URL and \
TELEGRAM_WEBHOOK_URL != self.bot.getWebhookInfo()['url']:
self.bot.setWebhook(TELEGRAM_WEBHOOK_URL)
LOG.debug('Telegram: %s', self.bot.getWebhookInfo())
super(TelegramBot, self).__init__(name)
if TELEGRAM_TEMPLATE:
if os.path.exists(TELEGRAM_TEMPLATE):
with open(TELEGRAM_TEMPLATE, 'r') as f:
self.template = Template(f.read())
else:
self.template = Template(TELEGRAM_TEMPLATE)
else:
self.template = Template(DEFAULT_TMPL)
def pre_receive(self, alert):
return alert
def post_receive(self, alert):
if alert.repeat:
return
try:
text = self.template.render(alert.__dict__)
except UndefinedError:
text = "Something bad has happened but also we " \
"can't handle your telegram template message."
LOG.debug('Telegram: message=%s', text)
if TELEGRAM_WEBHOOK_URL:
keyboard = {
'inline_keyboard': [
[
{'text': 'ack', 'callback_data': '/ack ' + alert.id},
{'text': 'close', 'callback_data': '/close ' + alert.id},
{'text': 'blackout',
'callback_data': '/blackout %s|%s|%s' % (alert.environment,
alert.resource,
alert.event)}
]
]
}
else:
keyboard = None
try:
response = self.bot.sendMessage(TELEGRAM_CHAT_ID,
text,
parse_mode='Markdown',
reply_markup=keyboard)
except telepot.exception.TelegramError as e:
raise RuntimeError("Telegram: ERROR - %s, description= %s, json=%s",
e.error_code,
e.description,
e.json)
except Exception as e:
raise RuntimeError("Telegram: ERROR - %s", e)
LOG.debug('Telegram: %s', response)
def status_change(self, alert, status, summary):
return
| import logging
import os
try:
from alerta.plugins import app # alerta >= 5.0
except ImportError:
from alerta.app import app # alerta < 5.0
from alerta.plugins import PluginBase
import telepot
from jinja2 import Template, UndefinedError
DEFAULT_TMPL = """
{% if customer %}Customer: `{{customer}}` {% endif %}
*[{{ status.capitalize() }}] {{ environment }} {{ severity.capitalize() }}*
{{ event | replace("_","\_") }} {{ resource.capitalize() }}
```
{{ text }}
```
"""
LOG = logging.getLogger('alerta.plugins.telegram')
TELEGRAM_TOKEN = app.config.get('TELEGRAM_TOKEN') \
or os.environ.get('TELEGRAM_TOKEN')
TELEGRAM_CHAT_ID = app.config.get('TELEGRAM_CHAT_ID') \
or os.environ.get('TELEGRAM_CHAT_ID')
TELEGRAM_WEBHOOK_URL = app.config.get('TELEGRAM_WEBHOOK_URL', None) \
or os.environ.get('TELEGRAM_WEBHOOK_URL')
TELEGRAM_TEMPLATE = app.config.get('TELEGRAM_TEMPLATE') \
or os.environ.get('TELEGRAM_TEMPLATE')
DASHBOARD_URL = app.config.get('DASHBOARD_URL', '') \
or os.environ.get('DASHBOARD_URL')
class TelegramBot(PluginBase):
def __init__(self, name=None):
self.bot = telepot.Bot(TELEGRAM_TOKEN)
LOG.debug('Telegram: %s', self.bot.getMe())
if TELEGRAM_WEBHOOK_URL and \
TELEGRAM_WEBHOOK_URL != self.bot.getWebhookInfo()['url']:
self.bot.setWebhook(TELEGRAM_WEBHOOK_URL)
LOG.debug('Telegram: %s', self.bot.getWebhookInfo())
super(TelegramBot, self).__init__(name)
if TELEGRAM_TEMPLATE and os.path.exists(TELEGRAM_TEMPLATE):
with open(TELEGRAM_TEMPLATE, 'r') as f:
self.template = Template(f.read())
else:
self.template = Template(DEFAULT_TMPL)
def pre_receive(self, alert):
return alert
def post_receive(self, alert):
if alert.repeat:
return
try:
text = self.template.render(alert.__dict__)
except UndefinedError:
text = "Something bad has happened but also we " \
"can't handle your telegram template message."
LOG.debug('Telegram: message=%s', text)
if TELEGRAM_WEBHOOK_URL:
keyboard = {
'inline_keyboard': [
[
{'text': 'ack', 'callback_data': '/ack ' + alert.id},
{'text': 'close', 'callback_data': '/close ' + alert.id},
{'text': 'blackout',
'callback_data': '/blackout %s|%s|%s' % (alert.environment,
alert.resource,
alert.event)}
]
]
}
else:
keyboard = None
try:
response = self.bot.sendMessage(TELEGRAM_CHAT_ID,
text,
parse_mode='Markdown',
reply_markup=keyboard)
except telepot.exception.TelegramError as e:
raise RuntimeError("Telegram: ERROR - %s, description= %s, json=%s",
e.error_code,
e.description,
e.json)
except Exception as e:
raise RuntimeError("Telegram: ERROR - %s", e)
LOG.debug('Telegram: %s', response)
def status_change(self, alert, status, summary):
return
| Python | 0.000002 |
5a6cdb9dc08924dc90a24271dc45f4412250b06a | bump version | src/experimentator/__version__.py | src/experimentator/__version__.py | __version__ = '0.2.1'
| __version__ = '0.2.0'
| Python | 0 |
101f8c44ec0b55111f93e7c2a0d8f1710405452f | FIX get_name funtion | addons/nautical_search_by_ni/res_partner.py | addons/nautical_search_by_ni/res_partner.py | # -*- coding: utf-8 -*-
import datetime
from lxml import etree
import math
import pytz
import re
import openerp
from openerp import SUPERUSER_ID
from openerp import pooler, tools
from openerp.osv import osv, fields
from openerp.tools.translate import _
from openerp.tools.yaml_import import is_comment
class res_partner(osv.osv):
_inherit = "res.partner"
def name_get(self, cr, uid, ids, context=None):
if context is None:
context = {}
if isinstance(ids, (int, long)):
ids = [ids]
res = []
for record in self.browse(cr, uid, ids, context=context):
name = record.name
national_identity = ''
if record.national_identity:
national_identity = '[' + record.national_identity + ']'
name = "%s %s" % (name, national_identity)
if record.parent_id and not record.is_company:
name = "%s, %s" % (record.parent_id.name, name)
if context.get('show_address'):
name = name + "\n" + self._display_address(cr, uid, record, without_company=True, context=context)
name = name.replace('\n\n','\n')
name = name.replace('\n\n','\n')
if context.get('show_email') and record.email:
name = "%s <%s>" % (name, record.email)
res.append((record.id, name))
return res
def name_search(self, cr, uid, name, args=None, operator='ilike', context=None, limit=100):
if not args:
args = []
if name and operator in ('=', 'ilike', '=ilike', 'like', '=like'):
# search on the name of the contacts and of its company
search_name = name
if operator in ('ilike', 'like'):
search_name = '%%%s%%' % name
if operator in ('=ilike', '=like'):
operator = operator[1:]
query_args = {'name': search_name}
# TODO: simplify this in trunk with `display_name`, once it is stored
# Perf note: a CTE expression (WITH ...) seems to have an even higher cost
# than this query with duplicated CASE expressions. The bulk of
# the cost is the ORDER BY, and it is inevitable if we want
# relevant results for the next step, otherwise we'd return
# a random selection of `limit` results.
query = ('''SELECT partner.id FROM res_partner partner
LEFT JOIN res_partner company
ON partner.parent_id = company.id
WHERE partner.national_identity ''' + operator + ''' %(name)s OR
partner.email ''' + operator + ''' %(name)s OR
CASE
WHEN company.id IS NULL OR partner.is_company
THEN partner.name
ELSE company.name || ', ' || partner.name
END ''' + operator + ''' %(name)s
ORDER BY
CASE
WHEN company.id IS NULL OR partner.is_company
THEN partner.name
ELSE company.name || ', ' || partner.name
END''')
if limit:
query += ' limit %(limit)s'
query_args['limit'] = limit
cr.execute(query, query_args)
ids = map(lambda x: x[0], cr.fetchall())
ids = self.search(cr, uid, [('id', 'in', ids)] + args, limit=limit, context=context)
if ids:
return self.name_get(cr, uid, ids, context)
return super(res_partner,self).name_search(cr, uid, name, args, operator=operator, context=context, limit=limit)
| # -*- coding: utf-8 -*-
import datetime
from lxml import etree
import math
import pytz
import re
import openerp
from openerp import SUPERUSER_ID
from openerp import pooler, tools
from openerp.osv import osv, fields
from openerp.tools.translate import _
from openerp.tools.yaml_import import is_comment
class res_partner(osv.osv):
_inherit = "res.partner"
def name_get(self, cr, uid, ids, context=None):
if context is None:
context = {}
if isinstance(ids, (int, long)):
ids = [ids]
res = []
for record in self.browse(cr, uid, ids, context=context):
name = record.name
national_identity = ''
if record.national_identity:
national_identity = '[' + record.national_identity + ']'
name = "%s %s" % (national_identity, name)
if record.parent_id and not record.is_company:
name = "%s, %s" % (record.parent_id.name, name)
if context.get('show_address'):
name = name + "\n" + self._display_address(cr, uid, record, without_company=True, context=context)
name = name.replace('\n\n','\n')
name = name.replace('\n\n','\n')
if context.get('show_email') and record.email:
name = "%s <%s>" % (name, record.email)
res.append((record.id, name))
return res
def name_search(self, cr, uid, name, args=None, operator='ilike', context=None, limit=100):
if not args:
args = []
if name and operator in ('=', 'ilike', '=ilike', 'like', '=like'):
# search on the name of the contacts and of its company
search_name = name
if operator in ('ilike', 'like'):
search_name = '%%%s%%' % name
if operator in ('=ilike', '=like'):
operator = operator[1:]
query_args = {'name': search_name}
# TODO: simplify this in trunk with `display_name`, once it is stored
# Perf note: a CTE expression (WITH ...) seems to have an even higher cost
# than this query with duplicated CASE expressions. The bulk of
# the cost is the ORDER BY, and it is inevitable if we want
# relevant results for the next step, otherwise we'd return
# a random selection of `limit` results.
query = ('''SELECT partner.id FROM res_partner partner
LEFT JOIN res_partner company
ON partner.parent_id = company.id
WHERE partner.national_identity ''' + operator + ''' %(name)s OR
partner.email ''' + operator + ''' %(name)s OR
CASE
WHEN company.id IS NULL OR partner.is_company
THEN partner.name
ELSE company.name || ', ' || partner.name
END ''' + operator + ''' %(name)s
ORDER BY
CASE
WHEN company.id IS NULL OR partner.is_company
THEN partner.name
ELSE company.name || ', ' || partner.name
END''')
if limit:
query += ' limit %(limit)s'
query_args['limit'] = limit
cr.execute(query, query_args)
ids = map(lambda x: x[0], cr.fetchall())
ids = self.search(cr, uid, [('id', 'in', ids)] + args, limit=limit, context=context)
if ids:
return self.name_get(cr, uid, ids, context)
return super(res_partner,self).name_search(cr, uid, name, args, operator=operator, context=context, limit=limit)
| Python | 0.000002 |
7b3f447e7fa83eed97b9d54fe79db01ea325f1d2 | Drop cargo-culted pin | application/setup.py | application/setup.py | from setuptools import setup
name = 'senic.nuimo_hub'
setup(
name=name,
version_format='{tag}.{commitcount}+{gitsha}',
url='https://github.com/getsenic/nuimo-hub-app',
author='Senic GmbH',
author_email='tom@senic.com',
description='...',
classifiers=[
"Programming Language :: Python",
"Framework :: Pylons",
"Topic :: Internet :: WWW/HTTP",
"Topic :: Internet :: WWW/HTTP :: WSGI :: Application",
],
packages=[name],
namespace_packages=['senic'],
include_package_data=True,
package_dir={name: 'senic/nuimo_hub'},
package_data={
name: [
'.coveragerc',
'tests/*.py',
'tests/data/*.*',
'views/*.*',
],
},
zip_safe=False,
setup_requires=[
'setuptools-git >= 0',
'setuptools-git-version'
],
install_requires=[
'click',
'colander',
'cornice<2.0',
'pyramid',
'pyramid_tm',
'pytz',
'requests',
'senic.cryptoyaml',
'wifi',
],
extras_require={
'development': [
'devpi-client',
'docutils',
'flake8',
'jinja2',
'mock',
'pbr',
'pdbpp',
'pep8 < 1.6',
'py >= 1.4.17',
'pyflakes',
'pyquery',
'pyramid_debugtoolbar',
'pytest',
'pytest-cov',
'pytest-flakes',
'pytest-pep8',
'python-dateutil',
'repoze.sphinx.autointerface',
'setuptools-git',
'Sphinx',
'tox',
'waitress',
'webtest',
],
},
entry_points="""
[paste.app_factory]
main = senic.nuimo_hub:main
[console_scripts]
scan_wifi = senic.nuimo_hub.commands:scan_wifi
join_wifi = senic.nuimo_hub.commands:join_wifi
""",
)
| from setuptools import setup
name = 'senic.nuimo_hub'
setup(
name=name,
version_format='{tag}.{commitcount}+{gitsha}',
url='https://github.com/getsenic/nuimo-hub-app',
author='Senic GmbH',
author_email='tom@senic.com',
description='...',
classifiers=[
"Programming Language :: Python",
"Framework :: Pylons",
"Topic :: Internet :: WWW/HTTP",
"Topic :: Internet :: WWW/HTTP :: WSGI :: Application",
],
packages=[name],
namespace_packages=['senic'],
include_package_data=True,
package_dir={name: 'senic/nuimo_hub'},
package_data={
name: [
'.coveragerc',
'tests/*.py',
'tests/data/*.*',
'views/*.*',
],
},
zip_safe=False,
setup_requires=[
'setuptools-git >= 0',
'setuptools-git-version'
],
install_requires=[
'click',
'colander',
'cornice<2.0',
'pyramid',
'pyramid_tm',
'pytz',
'requests',
'senic.cryptoyaml',
'wifi',
],
extras_require={
'development': [
'devpi-client',
'docutils',
'flake8',
'jinja2',
'mock',
'pbr',
'pdbpp',
'pep8 < 1.6',
'py >= 1.4.17',
'pyflakes < 1.4.0',
'pyquery',
'pyramid_debugtoolbar',
'pytest',
'pytest-cov',
'pytest-flakes',
'pytest-pep8',
'python-dateutil',
'repoze.sphinx.autointerface',
'setuptools-git',
'Sphinx',
'tox',
'waitress',
'webtest',
],
},
entry_points="""
[paste.app_factory]
main = senic.nuimo_hub:main
[console_scripts]
scan_wifi = senic.nuimo_hub.commands:scan_wifi
join_wifi = senic.nuimo_hub.commands:join_wifi
""",
)
| Python | 0 |
c880b8d7388cb700eee8184bda9f117d0a86887d | Update WinRMWebService. Implement __init__ and open_shell methods | winrm/winrm_service.py | winrm/winrm_service.py | from datetime import timedelta
import uuid
from http.transport import HttpPlaintext
from isodate.isoduration import duration_isoformat
import xmlwitch
import requests
import xml.etree.ElementTree as ET
class WinRMWebService(object):
"""
This is the main class that does the SOAP request/response logic. There are a few helper classes, but pretty
much everything comes through here first.
"""
DEFAULT_TIMEOUT = 'PT60S'
DEFAULT_MAX_ENV_SIZE = 153600
DEFAULT_LOCALE = 'en-US'
def __init__(self, endpoint, transport='kerberos', username=None, password=None, realm=None, service=None, keytab=None, ca_trust_path=None):
"""
@param string endpoint: the WinRM webservice endpoint
@param string transport: transport type, one of 'kerberos' (default), 'ssl', 'plaintext'
@param string username: username
@param string password: password
@param string realm: the Kerberos realm we are authenticating to
@param string service: the service name, default is HTTP
@param string keytab: the path to a keytab file if you are using one
@param string ca_trust_path: Certification Authority trust path
"""
self.endpoint = endpoint
self.timeout = WinRMWebService.DEFAULT_TIMEOUT
self.max_env_sz = WinRMWebService.DEFAULT_MAX_ENV_SIZE
self.locale = WinRMWebService.DEFAULT_LOCALE
self.transport = transport
self.username = username
self.password = password
self.service = service
self.keytab = keytab
self.ca_trust_path = ca_trust_path
def set_timeout(self, seconds):
"""
Operation timeout, see http://msdn.microsoft.com/en-us/library/ee916629(v=PROT.13).aspx
@param int seconds: the number of seconds to set the timeout to. It will be converted to an ISO8601 format.
"""
# in original library there is an alias - op_timeout method
return duration_isoformat(timedelta(seconds))
def open_shell(self, i_stream='stdin', o_stream='stdout stderr', working_directory=None, env_vars=None, noprofile=False, codepage=437, lifetime=None, idle_timeout=None):
"""
Create a Shell on the destination host
@param string i_stream: Which input stream to open. Leave this alone unless you know what you're doing (default: stdin)
@param string o_stream: Which output stream to open. Leave this alone unless you know what you're doing (default: stdout stderr)
@param string working_directory: the directory to create the shell in
@param dict env_vars: environment variables to set for the shell. Fir instance: {'PATH': '%PATH%;c:/Program Files (x86)/Git/bin/', 'CYGWIN': 'nontsec codepage:utf8'}
@returns The ShellId from the SOAP response. This is our open shell instance on the remote machine.
@rtype string
"""
# TODO implement self.merge_headers(header, resource_uri_cmd, action_create, h_opts)
xml = xmlwitch.Builder(version='1.0', encoding='utf-8')
with xml.env__Envelope(**self.namespaces):
with xml.env__Header:
xml.a__To(self.endpoint)
with xml.a__ReplyTo:
xml.a__Address('http://schemas.xmlsoap.org/ws/2004/08/addressing/role/anonymous', mustUnderstand='true')
xml.w__MaxEnvelopeSize('153600', mustUnderstand='true')
xml.a__MessageID('uuid:{0}'.format(uuid.uuid4()))
xml.w__Locale(None, xml__lang='en-US', mustUnderstand='false')
xml.p__DataLocale(None, xml__lang='en-US', mustUnderstand='false')
xml.w__OperationTimeout('PT60S')
xml.w__ResourceURI('http://schemas.microsoft.com/wbem/wsman/1/windows/shell/cmd', mustUnderstand='true')
xml.a__Action('http://schemas.xmlsoap.org/ws/2004/09/transfer/Create', mustUnderstand='true')
with xml.w__OptionSet:
xml.w__Option(str(noprofile).upper(), Name='WINRS_NOPROFILE')
xml.w__Option(str(codepage), Name='WINRS_CODEPAGE')
with xml.env__Body:
with xml.rsp__Shell:
xml.rsp__InputStreams(i_stream)
xml.rsp__OutputStreams(o_stream)
if working_directory:
#TODO ensure that rsp:WorkingDirectory should be nested within rsp:Shell
xml.rsp_WorkingDirectory(working_directory)
# TODO: research Lifetime a bit more: http://msdn.microsoft.com/en-us/library/cc251546(v=PROT.13).aspx
#if lifetime:
# xml.rsp_Lifetime = iso8601_duration.sec_to_dur(lifetime)
# TODO: make it so the input is given in milliseconds and converted to xs:duration
if idle_timeout:
xml.rsp_IdleTimeOut = idle_timeout
if env_vars:
with xml.rsp_Environment:
for key, value in env_vars.items():
xml.rsp_Variable(value, Name=key)
response = self.send_message(str(xml))
root = ET.fromstring(response)
return root.find('.//*[@Name="ShellId"]').text
@property
def namespaces(self):
return {
'xmlns:xsd': 'http://www.w3.org/2001/XMLSchema',
'xmlns:xsi': 'http://www.w3.org/2001/XMLSchema-instance',
'xmlns:env': 'http://www.w3.org/2003/05/soap-envelope',
'xmlns:a': 'http://schemas.xmlsoap.org/ws/2004/08/addressing',
'xmlns:b': 'http://schemas.dmtf.org/wbem/wsman/1/cimbinding.xsd',
'xmlns:n': 'http://schemas.xmlsoap.org/ws/2004/09/enumeration',
'xmlns:x': 'http://schemas.xmlsoap.org/ws/2004/09/transfer',
'xmlns:w': 'http://schemas.dmtf.org/wbem/wsman/1/wsman.xsd',
'xmlns:p': 'http://schemas.microsoft.com/wbem/wsman/1/wsman.xsd',
'xmlns:rsp': 'http://schemas.microsoft.com/wbem/wsman/1/windows/shell',
'xmlns:cfg': 'http://schemas.microsoft.com/wbem/wsman/1/config'
}
def send_request(self, message):
return requests.post(
self.endpoint,
data=message,
auth=(self.username, self.password))
def send_message(self, message):
response = self.send_request(message)
# TODO handle status codes other than HTTP OK 200
# TODO port error handling code
return response.text
def close_shell(self, shell_id):
"""
Close the shell
@param string shell_id: The shell id on the remote machine. See #open_shell
@returns This should have more error checking but it just returns true for now.
@rtype bool
"""
pass
endpoint = ''
transport = HttpPlaintext(endpoint) | from datetime import timedelta
from http.transport import HttpPlaintext
from isodate.isoduration import duration_isoformat
class WinRMWebService(object):
"""
This is the main class that does the SOAP request/response logic. There are a few helper classes, but pretty
much everything comes through here first.
"""
def set_timeout(self, seconds):
"""
Operation timeout, see http://msdn.microsoft.com/en-us/library/ee916629(v=PROT.13).aspx
@type seconds: number
@param seconds: the number of seconds to set the timeout to. It will be converted to an ISO8601 format.
"""
# in Ruby library there is an alias - op_timeout method
return duration_isoformat(timedelta(seconds))
endpoint = ''
transport = HttpPlaintext(endpoint) | Python | 0.000034 |
52dd018d08e00356218cb2789cee10976eff4359 | Disable automatic geocoding for addresses in Django admin | firecares/firecares_core/admin.py | firecares/firecares_core/admin.py | import autocomplete_light
from .models import Address, ContactRequest, AccountRequest, RegistrationWhitelist
from django.conf import settings
from django.contrib.auth import get_user_model
from django.contrib.auth.admin import UserAdmin as BaseUserAdmin
from django.contrib.gis import admin
from import_export.admin import ExportMixin
from firecares.firecares_core.models import UserProfile, PredeterminedUser, DepartmentAssociationRequest
User = get_user_model()
class LocalOpenLayersAdmin(admin.OSMGeoAdmin):
openlayers_url = settings.STATIC_URL + 'openlayers/OpenLayers.js'
class AddressAdmin(LocalOpenLayersAdmin):
list_display = ['__unicode__']
list_filter = ['state_province']
search_fields = ['address_line1', 'state_province', 'city']
class ContactRequestAdmin(ExportMixin, admin.ModelAdmin):
list_display = ['name', 'email', 'created_at']
search_fields = ['name', 'email']
class AccountRequestAdmin(ExportMixin, admin.ModelAdmin):
list_display = ['email', 'created_at']
search_fields = ['email']
form = autocomplete_light.modelform_factory(AccountRequest, fields='__all__')
class ProfileInline(admin.StackedInline):
model = UserProfile
can_delete = False
form = autocomplete_light.modelform_factory(UserProfile, fields='__all__')
class UserAdmin(ExportMixin, BaseUserAdmin):
list_display = ['username', 'email', 'first_name', 'last_name', 'is_staff', 'date_joined']
inlines = [ProfileInline]
class DepartmentAssociationRequestAdmin(ExportMixin, admin.ModelAdmin):
model = DepartmentAssociationRequest
form = autocomplete_light.modelform_factory(DepartmentAssociationRequest, fields='__all__')
search_fields = ['user__username', 'user__email', 'approved_by__username', 'denied_by__username']
list_filter = ['approved_by', 'denied_by', 'approved_at', 'denied_at']
class RegistrationWhitelistAdmin(ExportMixin, admin.ModelAdmin):
model = RegistrationWhitelist
form = autocomplete_light.modelform_factory(RegistrationWhitelist, fields='__all__')
search_fields = ['email_or_domain', 'department__name', 'created_by__username']
list_filter = ['created_by', 'created_at', 'department__state']
class PredeterminedUserAdmin(ExportMixin, admin.ModelAdmin):
model = PredeterminedUser
form = autocomplete_light.modelform_factory(PredeterminedUser, fields='__all__')
search_fields = ['email', 'department__name']
admin.site.register(Address, AddressAdmin)
admin.site.register(ContactRequest, ContactRequestAdmin)
admin.site.register(AccountRequest, AccountRequestAdmin)
admin.site.register(RegistrationWhitelist, RegistrationWhitelistAdmin)
admin.site.register(PredeterminedUser, PredeterminedUserAdmin)
admin.site.register(DepartmentAssociationRequest, DepartmentAssociationRequestAdmin)
admin.site.unregister(User)
admin.site.register(User, UserAdmin)
| import autocomplete_light
from .models import Address, ContactRequest, AccountRequest, RegistrationWhitelist
from django.conf import settings
from django.contrib.auth import get_user_model
from django.contrib.auth.admin import UserAdmin as BaseUserAdmin
from django.contrib.gis import admin
from import_export.admin import ExportMixin
from firecares.firecares_core.models import UserProfile, PredeterminedUser, DepartmentAssociationRequest
User = get_user_model()
class LocalOpenLayersAdmin(admin.OSMGeoAdmin):
openlayers_url = settings.STATIC_URL + 'openlayers/OpenLayers.js'
class AddressAdmin(LocalOpenLayersAdmin):
list_display = ['__unicode__']
list_filter = ['state_province']
search_fields = ['address_line1', 'state_province', 'city']
def save_model(self, request, obj, form, change):
if change:
obj.geocode()
super(AddressAdmin, self).save_model(request, obj, form, change)
class ContactRequestAdmin(ExportMixin, admin.ModelAdmin):
list_display = ['name', 'email', 'created_at']
search_fields = ['name', 'email']
class AccountRequestAdmin(ExportMixin, admin.ModelAdmin):
list_display = ['email', 'created_at']
search_fields = ['email']
form = autocomplete_light.modelform_factory(AccountRequest, fields='__all__')
class ProfileInline(admin.StackedInline):
model = UserProfile
can_delete = False
form = autocomplete_light.modelform_factory(UserProfile, fields='__all__')
class UserAdmin(ExportMixin, BaseUserAdmin):
list_display = ['username', 'email', 'first_name', 'last_name', 'is_staff', 'date_joined']
inlines = [ProfileInline]
class DepartmentAssociationRequestAdmin(ExportMixin, admin.ModelAdmin):
model = DepartmentAssociationRequest
form = autocomplete_light.modelform_factory(DepartmentAssociationRequest, fields='__all__')
search_fields = ['user__username', 'user__email', 'approved_by__username', 'denied_by__username']
list_filter = ['approved_by', 'denied_by', 'approved_at', 'denied_at']
class RegistrationWhitelistAdmin(ExportMixin, admin.ModelAdmin):
model = RegistrationWhitelist
form = autocomplete_light.modelform_factory(RegistrationWhitelist, fields='__all__')
search_fields = ['email_or_domain', 'department__name', 'created_by__username']
list_filter = ['created_by', 'created_at', 'department__state']
class PredeterminedUserAdmin(ExportMixin, admin.ModelAdmin):
model = PredeterminedUser
form = autocomplete_light.modelform_factory(PredeterminedUser, fields='__all__')
search_fields = ['email', 'department__name']
admin.site.register(Address, AddressAdmin)
admin.site.register(ContactRequest, ContactRequestAdmin)
admin.site.register(AccountRequest, AccountRequestAdmin)
admin.site.register(RegistrationWhitelist, RegistrationWhitelistAdmin)
admin.site.register(PredeterminedUser, PredeterminedUserAdmin)
admin.site.register(DepartmentAssociationRequest, DepartmentAssociationRequestAdmin)
admin.site.unregister(User)
admin.site.register(User, UserAdmin)
| Python | 0 |
7b3f239964c6663a9b655553202567fccead85c8 | Add 'me' to profile IdentifierError | mollie/api/resources/profiles.py | mollie/api/resources/profiles.py | from ..error import IdentifierError
from ..objects.profile import Profile
from .base import Base
class Profiles(Base):
RESOURCE_ID_PREFIX = 'pfl_'
def get_resource_object(self, result):
return Profile(result, self.client)
def get(self, profile_id, **params):
if not profile_id or \
(not profile_id.startswith(self.RESOURCE_ID_PREFIX)
and not profile_id == 'me'):
raise IdentifierError(
"Invalid profile ID: '{id}'. A profile ID should start with '{prefix}' "
"or it should be 'me'.".format(
id=profile_id,
prefix=self.RESOURCE_ID_PREFIX)
)
return super(Profiles, self).get(profile_id, **params)
| from ..error import IdentifierError
from ..objects.profile import Profile
from .base import Base
class Profiles(Base):
RESOURCE_ID_PREFIX = 'pfl_'
def get_resource_object(self, result):
return Profile(result, self.client)
def get(self, profile_id, **params):
if not profile_id or \
(not profile_id.startswith(self.RESOURCE_ID_PREFIX)
and not profile_id == 'me'):
raise IdentifierError(
"Invalid profile ID: '{id}'. A profile ID should start with '{prefix}'.".format(
id=profile_id, prefix=self.RESOURCE_ID_PREFIX)
)
return super(Profiles, self).get(profile_id, **params)
| Python | 0.000003 |
b000bef2ec323dc9b7862a828ab1fd2e9574f3b0 | allow networks to be read from Document objects as well as filenames | nineml/user/network.py | nineml/user/network.py | from itertools import chain
from .population import Population
from .projection import Projection
from .selection import Selection
from ..document import Document
from . import BaseULObject
from .component import write_reference, resolve_reference
from nineml.annotations import annotate_xml, read_annotations
from nineml.xmlns import E, NINEML
from nineml.utils import check_tag
import nineml
from nineml.exceptions import handle_xml_exceptions, NineMLRuntimeError
class Network(BaseULObject):
"""
Container for populations and projections between those populations.
**Arguments**:
*name*
a name for the network.
*populations*
a dict containing the populations contained in the network.
*projections*
a dict containing the projections contained in the network.
*selections*
a dict containing the selections contained in the network.
"""
element_name = "Network"
defining_attributes = ("populations", "projections", "selections")
children = ("populations", "projections", "selections")
def __init__(self, name="anonymous", populations={}, projections={},
selections={}):
# better would be *items, then sort by type, taking the name from the
# item
super(Network, self).__init__()
self.name = name
self.populations = populations
self.projections = projections
self.selections = selections
def add(self, *objs):
"""
Add one or more Population, Projection or Selection instances to the
network.
"""
for obj in objs:
if isinstance(obj, Population):
self.populations[obj.name] = obj
elif isinstance(obj, Projection):
self.projections[obj.name] = obj
elif isinstance(obj, Selection):
self.selections[obj.name] = obj
else:
raise Exception("Networks may only contain Populations, "
"Projections, or Selections")
def get_components(self):
components = []
for p in chain(self.populations.values(), self.projections.values()):
components.extend(p.get_components())
return components
@write_reference
@annotate_xml
def to_xml(self):
return E(self.element_name,
name=self.name,
*[p.to_xml() for p in chain(self.populations.values(),
self.selections.values(),
self.projections.values())])
@classmethod
@resolve_reference
@read_annotations
@handle_xml_exceptions
def from_xml(cls, element, document):
check_tag(element, cls)
populations = []
for pop_elem in element.findall(NINEML + 'PopulationItem'):
pop = Population.from_xml(pop_elem, document)
populations[pop.name] = pop
projections = []
for proj_elem in element.findall(NINEML + 'ProjectionItem'):
proj = Projection.from_xml(proj_elem, document)
projections[proj.name] = proj
selections = []
for sel_elem in element.findall(NINEML + 'Selection'):
sel = Selection.from_xml(sel_elem, document)
selections[sel.name] = sel
network = cls(name=element.attrib["name"], populations=populations,
projections=projections, selections=selections)
return network
def write(self, filename):
document = Document(*chain(
self.populations.itervalues(), self.projections.itervalues(),
self.selections.itervalues()))
document.write(filename)
@classmethod
def read(self, filename):
if isinstance(filename, basestring):
document = nineml.read(filename)
elif isinstance(filename, Document):
document = filename
else:
raise NineMLRuntimeError(
"Unrecognised argument type {}, can be either filename or "
"Document".format(filename))
return Network(
name='root',
populations=dict((p.name, p) for p in document.populations),
projections=dict((p.name, p) for p in document.projections),
selections=dict((s.name, s) for s in document.selections))
| from itertools import chain
from .population import Population
from .projection import Projection
from .selection import Selection
from ..document import Document
from . import BaseULObject
from .component import write_reference, resolve_reference
from nineml.annotations import annotate_xml, read_annotations
from nineml.xmlns import E, NINEML
from nineml.utils import check_tag
import nineml
from nineml.exceptions import handle_xml_exceptions
class Network(BaseULObject):
"""
Container for populations and projections between those populations.
**Arguments**:
*name*
a name for the network.
*populations*
a dict containing the populations contained in the network.
*projections*
a dict containing the projections contained in the network.
*selections*
a dict containing the selections contained in the network.
"""
element_name = "Network"
defining_attributes = ("populations", "projections", "selections")
children = ("populations", "projections", "selections")
def __init__(self, name="anonymous", populations={}, projections={},
selections={}):
# better would be *items, then sort by type, taking the name from the
# item
super(Network, self).__init__()
self.name = name
self.populations = populations
self.projections = projections
self.selections = selections
def add(self, *objs):
"""
Add one or more Population, Projection or Selection instances to the
network.
"""
for obj in objs:
if isinstance(obj, Population):
self.populations[obj.name] = obj
elif isinstance(obj, Projection):
self.projections[obj.name] = obj
elif isinstance(obj, Selection):
self.selections[obj.name] = obj
else:
raise Exception("Networks may only contain Populations, "
"Projections, or Selections")
def get_components(self):
components = []
for p in chain(self.populations.values(), self.projections.values()):
components.extend(p.get_components())
return components
@write_reference
@annotate_xml
def to_xml(self):
return E(self.element_name,
name=self.name,
*[p.to_xml() for p in chain(self.populations.values(),
self.selections.values(),
self.projections.values())])
@classmethod
@resolve_reference
@read_annotations
@handle_xml_exceptions
def from_xml(cls, element, document):
check_tag(element, cls)
populations = []
for pop_elem in element.findall(NINEML + 'PopulationItem'):
pop = Population.from_xml(pop_elem, document)
populations[pop.name] = pop
projections = []
for proj_elem in element.findall(NINEML + 'ProjectionItem'):
proj = Projection.from_xml(proj_elem, document)
projections[proj.name] = proj
selections = []
for sel_elem in element.findall(NINEML + 'Selection'):
sel = Selection.from_xml(sel_elem, document)
selections[sel.name] = sel
network = cls(name=element.attrib["name"], populations=populations,
projections=projections, selections=selections)
return network
def write(self, filename):
document = Document(*chain(
self.populations.itervalues(), self.projections.itervalues(),
self.selections.itervalues()))
document.write(filename)
@classmethod
def read(self, filename):
document = nineml.read(filename)
return Network(
name='root',
populations=dict((p.name, p) for p in document.populations),
projections=dict((p.name, p) for p in document.projections),
selections=dict((s.name, s) for s in document.selections))
| Python | 0 |
5efdd29804249b40c9b9e589cb00cf10c56decb0 | Add the standard imports | conveyor/tasks/bulk.py | conveyor/tasks/bulk.py | from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import datetime
import logging
import time
from requests.exceptions import ConnectionError, HTTPError
from ..core import Conveyor
logger = logging.getLogger(__name__)
# We ignore the last component as we cannot properly handle it
def get_jobs(last=0):
current = time.mktime(datetime.datetime.utcnow().timetuple())
logger.info("Current time is '%s'", current)
app = Conveyor()
for package in set(app.processor.pypi.list_packages()):
yield package
def handle_job(name):
try:
tried = 0
delay = 1
while True:
try:
tried += 1
app = Conveyor()
app.processor.update(name)
break
except (ConnectionError, HTTPError):
# Attempt to process again if we have a connection error
if tried >= 10: # Try a max of 10 times
raise
else:
# Wait a moment
time.sleep(delay)
delay = delay * 2
except Exception as e:
logger.exception(str(e))
raise
| import datetime
import logging
import time
from requests.exceptions import ConnectionError, HTTPError
from ..core import Conveyor
logger = logging.getLogger(__name__)
# We ignore the last component as we cannot properly handle it
def get_jobs(last=0):
current = time.mktime(datetime.datetime.utcnow().timetuple())
logger.info("Current time is '%s'", current)
app = Conveyor()
for package in set(app.processor.pypi.list_packages()):
yield package
def handle_job(name):
try:
tried = 0
delay = 1
while True:
try:
tried += 1
app = Conveyor()
app.processor.update(name)
break
except (ConnectionError, HTTPError):
# Attempt to process again if we have a connection error
if tried >= 10: # Try a max of 10 times
raise
else:
# Wait a moment
time.sleep(delay)
delay = delay * 2
except Exception as e:
logger.exception(str(e))
raise
| Python | 0.000378 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.