text stringlengths 0 1.05M | meta dict |
|---|---|
# A simple tool to connect to the Ensembl server and retrieve sequences using
# the Ensembl REST API.
from __future__ import print_function
import json
import optparse
from itertools import islice
import requests
from six.moves.urllib.parse import urljoin
parser = optparse.OptionParser()
parser.add_option('-i', '--input', help='List of Ensembl IDs')
parser.add_option('-s', '--species', type='choice',
choices=['ensembl', 'ensemblgenomes'], default='ensembl',
help='Specify the genome databases for vertebrates and other eukaryotic species')
parser.add_option('-t', '--type', type='choice',
choices=['genomic', 'cds', 'cdna', 'protein'],
default='genomic', help='Type of sequence')
parser.add_option('--expand_3prime', type='int', default=0,
help='Expand the sequence downstream of the sequence by this many basepairs. Only available when using genomic sequence type')
parser.add_option('--expand_5prime', type='int', default=0,
help='Expand the sequence upstream of the sequence by this many basepairs. Only available when using genomic sequence type')
options, args = parser.parse_args()
if options.input is None:
raise Exception('-i option must be specified')
server = 'http://rest.%s.org' % options.species
ext = 'sequence/id'
headers = {'Content-Type': 'text/x-fasta', 'Accept': 'text/x-fasta'}
params = dict((k, getattr(options, k)) for k in ['type', 'expand_3prime', 'expand_5prime'])
with open(options.input) as f:
# Need to split the file in chunks of 50 lines because of the limit imposed by Ensembl
while True:
ids = [line.strip() for line in islice(f, 50)]
if not ids:
break
data = {'ids': ids}
r = requests.post(urljoin(server, ext), params=params, headers=headers,
data=json.dumps(data))
if not r.ok:
r.raise_for_status()
print(r.text)
| {
"repo_name": "PerlaTroncosoRey/tgac-galaxytools",
"path": "tools/Ensembl-REST/get_sequences.py",
"copies": "2",
"size": "1979",
"license": "mit",
"hash": 3939747932937843700,
"line_mean": 40.2291666667,
"line_max": 144,
"alpha_frac": 0.6503284487,
"autogenerated": false,
"ratio": 3.7623574144486693,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0009530861442405671,
"num_lines": 48
} |
"""A simple TV show renamer, no TVDB required
Args:
file (str): Path to the file you want to rename
-s / --silent (bool): Program will run without any messages
(except unknown exceptions).
Attributes:
SHOWNAMES_DICT_FILEPATH (str): showname match dictionary JSON filepath
"""
import re
import argparse
import os
import json
SHOWNAMES_DICT_FILENAME = "shownames.json"
SHOWNAMES_DICT_FILEPATH = os.path.join(
os.path.dirname(os.path.realpath(__file__)), SHOWNAMES_DICT_FILENAME
)
# Enums
SHOWNAME_STYLE_SXXEXX = "showname_style_SxxExx"
SHOWNAME_STYLE_XXXX = "showname_style_xxxx"
SHOWNAME_STYLE_DAILY = "showname_style_daily"
# TODO: Pattern string dictionary
PATTERN_STRINGS = {
'styles': {
'seasonal_SE': r"(?:s|\[)(\d{1,2})(?:e|x)(\d{1,2})",
'seasonal_4_digit': r".+?\D(\d{1,2})(\d{2})\D.+",
'daily': r".+?\W(\d{4})\W(\d{2})\W(\d{2})\W.+"
},
'full_filenames': {
'seasonal_SE': \
r"^(.+)(?:s|\[)(\d{1,2})(?:e|x)(\d{1,2})(.+)$",
'seasonal_4_digit': r"^(.+?)(\d{1,2})(\d{2})(.+)$",
'daily': \
r"^(.+)(\d{4})\W(\d{2})\W(\d{2})(.+)$"
}
}
def fix_episode(episode, style_enum):
"""Processes episode section of filename
Args:
episode (tuple): episode numbering data
style_enum (str): global enum string
representing episode numbering style
Returns:
str: Processed episode (daily/seasonal as appropriate)
Raises:
ValueError: on invalid episode string
"""
return_value = ""
if (style_enum == SHOWNAME_STYLE_SXXEXX
or style_enum == SHOWNAME_STYLE_XXXX) :
season_num, episode_num = episode
if not season_num.isdigit():
raise ValueError
if not episode_num.isdigit():
raise ValueError
season_num = season_num.zfill(2)
return_value = "[{}x{}]".format(season_num, episode_num)
# elif match_seasonal_4_digit_style:
# season_num, episode_num = match_seasonal_SE_style.groups()
# season_num = season_num.zfill(2)
# return_value = "[{}x{}]".format(season_num, episode_num)
elif style_enum == SHOWNAME_STYLE_DAILY :
year, month, day = episode
if not year.isdigit():
raise ValueError
if not month.isdigit():
raise ValueError
if not day.isdigit():
raise ValueError
month = month.zfill(2)
day = day.zfill(2)
return_value = "[{}-{}-{}]".format(year, month, day)
else:
raise ValueError
return return_value
def fix_title(filename_start, shownames_dict):
"""Processes starting section of filename to get showname
Args:
filename_start (str): starting section of filename
shownames_dict (dict): Matches raw showname to real showname
Returns:
str: Processed showname
"""
return_value = filename_start
# Check for prefix, remove if present
pattern_string_prefix = r"^(?:[\[({].+?[\])}])*(.+)$"
pattern_prefix = re.compile(
pattern_string_prefix,
flags=re.IGNORECASE
)
match_prefix = pattern_prefix.search(return_value)
return_value = match_prefix.group(1)
# Remove all punctuation and whitespace
return_value = re.sub(r'\W+', '', return_value)
# Convert to lowercase
return_value = return_value.lower()
# Lookup correct showname
return_value = shownames_dict[return_value]
return return_value
def fix_extension(filename_end):
"""Processes ending section of filename to get extension
Args:
filename_end (str): starting section of filename
Returns:
str: file extension
"""
return_value = filename_end
pattern_string = r".*\.(\w{3})$"
pattern = re.compile(
pattern_string,
flags=re.IGNORECASE
)
match = pattern.search(return_value)
if match == None:
raise ValueError
return_value = match.group(1)
return return_value
def find_raw_showname_style(filename):
"""something
Args:
filename (str): filename of show file
Returns:
str: style of showname (see enums above)
"""
return_value = filename
# compile patterns
pattern_style_seasonal_SE = re.compile(
PATTERN_STRINGS['styles']['seasonal_SE'],
flags=re.IGNORECASE)
pattern_style_seasonal_4_digit = re.compile(
PATTERN_STRINGS['styles']['seasonal_4_digit'],
flags=re.IGNORECASE)
pattern_style_daily = re.compile(
PATTERN_STRINGS['styles']['daily'],
flags=re.IGNORECASE)
# find match
match_style_seasonal_SE = pattern_style_seasonal_SE.search(return_value)
match_style_seasonal_4_digit = pattern_style_seasonal_4_digit.search(
return_value)
match_style_daily = pattern_style_daily.search(return_value)
# check matches and return style enum
if match_style_seasonal_SE :
return_value = SHOWNAME_STYLE_SXXEXX
elif match_style_daily :
return_value = SHOWNAME_STYLE_DAILY
elif match_style_seasonal_4_digit :
return_value = SHOWNAME_STYLE_XXXX
else :
raise ValueError
return return_value
def tvregex(filename, shownames_dict):
"""Main program flow
Args:
filename (str): Path to the file you want to rename
shownames_dict (dict): Matches raw showname to real showname
Returns:
str: Renamed filename
"""
return_value = filename
# pattern_string = (
# r"(.+)\." +
# "((?:s\d{2}e\d{2})|(?:\d{4}\.\d{2}\.\d{2}))" +
# ".+\.(.+)"
# )
# pattern = re.compile(pattern_string, flags=re.IGNORECASE)
# match = pattern.search(filename)
# showname, episode, extension = match.groups()
# showname = fix_title(showname, shownames_dict)
# episode = fix_episode(episode)
# return_value = "{} - {}.{}".format(showname, episode, extension)
raw_showname_style = find_raw_showname_style(filename)
if raw_showname_style == SHOWNAME_STYLE_SXXEXX :
pattern = re.compile(
PATTERN_STRINGS['full_filenames']['seasonal_SE'],
flags=re.IGNORECASE)
match = pattern.search(filename)
start, season_number, episode_number, end = match.groups()
showname = fix_title(start, shownames_dict)
episode_data = (season_number, episode_number)
episode = fix_episode(episode_data, raw_showname_style)
extension = fix_extension(end)
return_value = "{} - {}.{}".format(showname, episode, extension)
elif raw_showname_style == SHOWNAME_STYLE_DAILY :
pattern = re.compile(
PATTERN_STRINGS['full_filenames']['daily'],
flags=re.IGNORECASE)
match = pattern.search(filename)
start, year, month, day, end = match.groups()
showname = fix_title(start, shownames_dict)
episode_data = (year, month, day)
episode = fix_episode(episode_data, raw_showname_style)
extension = fix_extension(end)
return_value = "{} - {}.{}".format(showname, episode, extension)
elif raw_showname_style == SHOWNAME_STYLE_XXXX :
pattern = re.compile(
PATTERN_STRINGS['full_filenames']['seasonal_4_digit'],
flags=re.IGNORECASE)
match = pattern.search(filename)
start, season_number, episode_number, end = match.groups()
showname = fix_title(start, shownames_dict)
episode_data = (season_number, episode_number)
episode = fix_episode(episode_data, raw_showname_style)
extension = fix_extension(end)
return_value = "{} - {}.{}".format(showname, episode, extension)
else :
raise ValueError
return return_value
def attempt_rename(folder, old_filename, shownames_dict, silent):
try:
new_filename = tvregex(old_filename, shownames_dict)
new_filepath = os.path.join(folder, new_filename)
old_filepath = os.path.join(folder, old_filename)
os.rename(old_filepath, new_filepath)
except KeyError as ke:
if not silent:
raw_showname = ke.args[0]
print(
"The filename was processed to give {}".format(
raw_showname
)
)
print("No show name match is known.")
print("Type the show name that matches this")
good_showname = input(
"(or just press Enter if there's no match):"
)
if good_showname != "":
shownames_dict[raw_showname] = good_showname
with open(SHOWNAMES_DICT_FILEPATH, "w") as f:
json.dump(shownames_dict, f, indent=4)
print("Thanks! Please run me again with this file!")
# Can I just run main() again?
except ValueError as ve:
if not silent:
print("Cannot read episode number or date for this file")
except AttributeError as ae:
if not silent:
print("Cannot read file name as TV show")
def main():
"""Takes in args
Passes to real main program flow in tvregex()
Outputs to file system
"""
shownames_dict = {}
with open(SHOWNAMES_DICT_FILEPATH) as f:
shownames_dict = json.load(f)
parser = argparse.ArgumentParser()
parser.add_argument(
"file",
help="Path to the file you want to rename",
nargs="+"
)
parser.add_argument(
"-s", "--silent",
help="Program will run without any messages" +
" (except unknown exceptions). " +
"(If set, this won't ask for matches)",
action="store_true"
)
program_args = parser.parse_args()
silent = program_args.silent
all_filepaths = program_args.file
for filepath in all_filepaths:
if os.path.isfile(filepath):
folder = os.path.dirname(filepath)
old_filename = os.path.basename(filepath)
attempt_rename(
folder, old_filename, shownames_dict, silent
)
elif os.path.isdir(filepath):
# Get names of all files in folder, store to list
files_list = [
f for f in os.listdir(filepath) if os.path.isfile(
os.path.join(filepath, f)
)
]
# Iterate over list, run rename attempt on each
for file in files_list:
attempt_rename(filepath, file, shownames_dict, silent)
else:
if not silent:
print("Nothing at file path")
if __name__ == '__main__':
main()
| {
"repo_name": "ROldford/tvregex",
"path": "tvregex/tvregex.py",
"copies": "1",
"size": "10618",
"license": "mit",
"hash": -8033595166736801000,
"line_mean": 32.7079365079,
"line_max": 76,
"alpha_frac": 0.5936146167,
"autogenerated": false,
"ratio": 3.7559250088432967,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9838441134576272,
"avg_score": 0.002219698193405002,
"num_lines": 315
} |
# A simple type validator to check types of bdecoded data that comes from
# an untrusted source (say, network).
#
# This source module is in public domain. You may do whatever you want with it.
#
# Originally written by Heikki Orsila <heikki.orsila@iki.fi> on 2009-09-12
from types import FunctionType
# BOOL*, INT*, STRING* and FLOAT* are used for backward compability
# with the old interface. New code should use bool/int/str/float directly.
BOOL = bool
BOOL_KEY = bool
INT = int
INT_KEY = int
STRING = str
STRING_KEY = str
FLOAT = float
FLOAT_KEY = float
class ANY:
pass
class ZERO_OR_MORE:
pass
class ONE_OR_MORE:
pass
class OPTIONAL_KEY:
def __init__(self, key):
self.key = key
class Invalid_Format_Object(Exception):
def __init__(self, reason=''):
self.reason = reason
def __str__(self):
return self.reason
def validate_list(fmt, o):
if type(o) != list:
return False
fmt = list(fmt)
o = list(o)
while len(fmt) > 0:
fitem = fmt.pop(0)
if fitem == ZERO_OR_MORE or fitem == ONE_OR_MORE:
if len(fmt) == 0:
raise Invalid_Format_Object()
ftype = fmt.pop(0)
if len(o) == 0:
if len(fmt) > 0:
continue
return fitem == ZERO_OR_MORE
while len(o) > 0:
if not validate(ftype, o[0]):
if len(fmt) > 0:
break
return False
o.pop(0)
continue
if len(o) == 0:
return False
oitem = o.pop(0)
if not validate(fitem, oitem):
return False
return len(o) == 0
def validate_dict(fmt, o):
if type(o) != dict:
return False
for key in fmt.keys():
if o.has_key(key) and validate(fmt[key], o[key]):
continue
if type(key) == type:
# str, int, ...
for okey in o.keys():
if type(okey) != key:
# Associate int with long
if key != int or type(okey) != long:
return False
if validate(fmt[key], o[okey]) == False:
return False
elif isinstance(key, OPTIONAL_KEY):
# OPTIONAL_KEY
if o.has_key(key.key) and validate(fmt[key], o[key.key]) == False:
return False
else:
return False
return True
def validate(fmt, o):
if fmt == ANY:
return True
# Is this a user defined checker function?
if type(fmt) == FunctionType:
return fmt(o)
elif type(fmt) == list:
return validate_list(fmt, o)
elif type(fmt) == dict:
return validate_dict(fmt, o)
elif type(fmt) == type:
if fmt != type(o):
# Associate int type with long. We don't use LONG as a validator
# keyword, just int
if fmt != int or type(o) != long:
return False
# If given format is a not a type but a value, compare input to the given value
elif fmt != o:
return False
return True
def test_validate():
assert(validate([str, [ONE_OR_MORE, int], [ZERO_OR_MORE, int], {'a': int, 1: str}], ['fff', [0], [], {'a': 0, 1: 'foo'}]))
assert(validate([str, [ONE_OR_MORE, int], [ZERO_OR_MORE, int], {'a': int, 1: str}], [1, [0], [], {'a': 0, 1: 'foo'}]) == False)
assert(validate([str, [ONE_OR_MORE, int], [ZERO_OR_MORE, int], {'a': int, 1: str}], ['fff', [], [], {'a': 0, 1: 'foo'}]) == False)
assert(validate([ONE_OR_MORE, int, ZERO_OR_MORE, str], [1, 1, 1]))
assert(validate([ONE_OR_MORE, int, ZERO_OR_MORE, str], [1, 1, 1, 's']))
assert(validate([ZERO_OR_MORE, int, ONE_OR_MORE, str], [1, 1, 1, 's']))
assert(validate([ZERO_OR_MORE, int, ONE_OR_MORE, str], [1, 1, 1]) == False)
assert(validate([ZERO_OR_MORE, int, ONE_OR_MORE, str], ['d']))
assert(validate([ZERO_OR_MORE, int, ONE_OR_MORE, str], []) == False)
assert(validate(lambda x: x % 2 == 0, 0))
assert(validate(lambda x: x % 2 == 0, 1) == False)
assert(validate({str: str}, {'a': 'b'}))
assert(validate({str: str}, {1: 'b'}) == False)
assert(validate({str: str}, {'a': 1}) == False)
assert(validate({str: int}, {'a': 1}))
assert(validate({int: str}, {1: 'a'}))
assert(validate({int: str}, {1: 'a', 'b': 2}) == False)
# Extra keys in dictionary are allowed
assert(validate({'x': int}, {'x': 1, 'y': 1}))
# Missing key fails
assert(validate({'x': int}, {'y': 1}) == False)
# OK
assert(validate({'x': int, str: int}, {'x': 1, 'y': 1}))
# Non-string key
assert(validate({'x': int, str: int}, {'x': 1, 1: 1}) == False)
# Missing key, but correct key type
assert(validate({'x': int, str: int}, {'y': 1}) == False)
assert(validate({'x': bool}, {'x': False}))
assert(validate({'x': bool}, {'x': 0}) == False)
# Test OPTIONAL_KEY
assert(validate({OPTIONAL_KEY('x'): int}, {}))
assert(validate({OPTIONAL_KEY('x'): int}, {'x': 1}))
assert(validate({OPTIONAL_KEY('x'): int}, {'x': 'invalid'}) == False)
# Test that int and long are equivalent
assert(validate({'x': int}, {'x': 0L}))
assert(validate({int: ANY}, {0L: 'x'}))
# Typevalidator can be used to check that values are equal
assert(validate([1, 2, 3, [True, 'a']], [1, 2, 3, [True, 'a']]))
assert(validate('foo', 'bar') == False)
def benchmark():
specification = {'uid': str,
'ids': [ZERO_OR_MORE, int],
'purposes': [ZERO_OR_MORE, str],
'metas': [ZERO_OR_MORE, {}],
}
request = {'uid': '0123456789abcdef',
'ids': [0, 1, 2, 3, 4],
'purposes': ['a', 'b', 'c', 'd', 'e'],
'metas': [{}, {}, {}, {}, {}],
}
for i in xrange(100000):
if not validate(specification, request):
assert(False)
if __name__ == '__main__':
test_validate()
| {
"repo_name": "japeq/bencode-tools",
"path": "typevalidator.py",
"copies": "1",
"size": "6044",
"license": "bsd-3-clause",
"hash": 8296411709688626000,
"line_mean": 32.7653631285,
"line_max": 134,
"alpha_frac": 0.5177035076,
"autogenerated": false,
"ratio": 3.3484764542936287,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4366179961893628,
"avg_score": null,
"num_lines": null
} |
# A Simple UDP class
import socket
class UDP(object):
"""simple UDP ping class"""
handle = None # Socket for send/recv
port = 0 # UDP port we work on
address = '' # Own address
broadcast = '' # Broadcast address
def __init__(self, port, address=None, broadcast=None):
if address is None:
local_addrs = socket.gethostbyname_ex(socket.gethostname())[-1]
for addr in local_addrs:
if not addr.startswith('127'):
address = addr
if broadcast is None:
broadcast = '255.255.255.255'
self.address = address
self.broadcast = broadcast
self.port = port
# Create UDP socket
self.handle = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
# Ask operating system to let us do broadcasts from socket
self.handle.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
# Bind UDP socket to local port so we can receive pings
self.handle.bind(('', port))
def send(self, buf):
self.handle.sendto(buf, 0, (self.broadcast, self.port))
def recv(self, n):
buf, addrinfo = self.handle.recvfrom(n)
if addrinfo[0] != self.address:
print("Found peer %s:%d" % addrinfo)
return buf
| {
"repo_name": "soscpd/bee",
"path": "root/tests/zguide/examples/Python/udplib.py",
"copies": "1",
"size": "1329",
"license": "mit",
"hash": 329172411702204160,
"line_mean": 31.4146341463,
"line_max": 90,
"alpha_frac": 0.5914221219,
"autogenerated": false,
"ratio": 3.9790419161676644,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5070464038067665,
"avg_score": null,
"num_lines": null
} |
''' A simple UI that shows a valence arousal (VA) graph with 6 universal emotion touch points.
Will write values to NAO memory.
'''
from kivy.app import App
from kivy.uix.widget import Widget
from kivy.graphics import Color, Ellipse, Line
from kivy.uix.button import Button
from kivy.uix.label import Label
from naoqi import ALProxy
NAO_IP = 'mistcalf.local'
# Global variables for proxies.
memory = None
class emotionWidget(Widget):
global memory
def __init__(self, **kwargs):
super(emotionWidget, self).__init__(**kwargs)
with self.canvas:
Line(points=[400, 0, 400, 600], width = 1)
Line(points=[0, 300, 800, 300], width = 1)
Label(text = 'valence')
class emotionApp(App):
global memory
def build(self):
emotional_dictionary = {"happiness" : (1.00, 0.75),
"sadness" : (-0.75, -0.75),
"anger" : (-0.75, 0.75),
"fear" : (-1.00, 0.00),
"surprise" : (0.00, 0.50),
"disgust" : (-0.4, 0.25),
"thinking" : (0.25, 0.00)
}
parent = Widget()
grid = emotionWidget()
happy_btn = Button(text = 'happy', pos = (700, 500), size = (50, 50))
angry_btn = Button(text = 'angry', pos = (100, 500), size = (50, 50))
surprise_btn = Button(text = 'surprise', pos = (375, 400), size = (50, 50))
disgust_btn = Button(text = 'disgust', pos = (200, 350), size = (50, 50))
think_btn = Button(text = 'think', pos = (450, 275), size = (50, 50))
fear_btn = Button(text = 'fear', pos = (50, 275), size = (50, 50))
sad_btn = Button(text = 'sad', pos = (100, 100), size = (50, 50))
label_valence = Label(text = 'valence', pos = (700, 225))
label_arousal = Label(text = 'arousal', pos = (400, 0))
parent.add_widget(grid)
parent.add_widget(happy_btn)
parent.add_widget(angry_btn)
parent.add_widget(surprise_btn)
parent.add_widget(disgust_btn)
parent.add_widget(think_btn)
parent.add_widget(fear_btn)
parent.add_widget(sad_btn)
parent.add_widget(label_valence)
parent.add_widget(label_arousal)
def happy(obj):
key = "happiness"
valence = emotional_dictionary[key][0]
arousal = emotional_dictionary[key][1]
current_emotion = [(valence, arousal), ("valence_mood", "arousal_mood"), ("personality"), (key, "param2")]
memory.insertData("Emotion/Current", current_emotion)
print "emotion: ", memory.getData("Emotion/Current")
def angry(obj):
key = "anger"
valence = emotional_dictionary[key][0]
arousal = emotional_dictionary[key][1]
current_emotion = [(valence, arousal), ("valence_mood", "arousal_mood"), ("personality"), (key, "param2")]
memory.insertData("Emotion/Current", current_emotion)
print "emotion: ", memory.getData("Emotion/Current")
def surprise(obj):
key = "surprise"
valence = emotional_dictionary[key][0]
arousal = emotional_dictionary[key][1]
current_emotion = [(valence, arousal), ("valence_mood", "arousal_mood"), ("personality"), (key, "param2")]
memory.insertData("Emotion/Current", current_emotion)
print "emotion: ", memory.getData("Emotion/Current")
def disgust(obj):
key = "disgust"
valence = emotional_dictionary[key][0]
arousal = emotional_dictionary[key][1]
current_emotion = [(valence, arousal), ("valence_mood", "arousal_mood"), ("personality"), (key, "param2")]
memory.insertData("Emotion/Current", current_emotion)
print "emotion: ", memory.getData("Emotion/Current")
def think(obj):
key = "thinking"
valence = emotional_dictionary[key][0]
arousal = emotional_dictionary[key][1]
current_emotion = [(valence, arousal), ("valence_mood", "arousal_mood"), ("personality"), (key, "param2")]
memory.insertData("Emotion/Current", current_emotion)
print "emotion: ", memory.getData("Emotion/Current")
def fear(obj):
key = "fear"
valence = emotional_dictionary[key][0]
arousal = emotional_dictionary[key][1]
current_emotion = [(valence, arousal), ("valence_mood", "arousal_mood"), ("personality"), (key, "param2")]
memory.insertData("Emotion/Current", current_emotion)
print "emotion: ", memory.getData("Emotion/Current")
def sad(obj):
key = "sadness"
valence = emotional_dictionary[key][0]
arousal = emotional_dictionary[key][1]
current_emotion = [(valence, arousal), ("valence_mood", "arousal_mood"), ("personality"), (key, "param2")]
memory.insertData("Emotion/Current", current_emotion)
print "emotion: ", memory.getData("Emotion/Current")
happy_btn.bind(on_release=happy)
angry_btn.bind(on_release=angry)
surprise_btn.bind(on_release=surprise)
disgust_btn.bind(on_release=disgust)
think_btn.bind(on_release=think)
fear_btn.bind(on_release=fear)
sad_btn.bind(on_release=sad)
return parent
def NAO_setup():
global memory
try:
memory = ALProxy("ALMemory", NAO_IP, 9559)
except Exception, e:
print 'Could not setup ALMemory, error: ', e
def main():
NAO_setup()
emotionApp().run()
if __name__ == "__main__":
main()
| {
"repo_name": "davesnowdon/nao-emotional-framework",
"path": "python_prototypes/standalone_VA_based_UI.py",
"copies": "1",
"size": "5758",
"license": "apache-2.0",
"hash": -4573431949208360400,
"line_mean": 36.3896103896,
"line_max": 118,
"alpha_frac": 0.5620006947,
"autogenerated": false,
"ratio": 3.345729227193492,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4407729921893492,
"avg_score": null,
"num_lines": null
} |
# A simple upload script to add pictures to a database.
# ARGV:
# [0]: The script name
# [1]: The packed ziparchive (named for it's directory: example.zip becomes example/)
# [2]: The gallery directory
# [3]: The name of the gallery
import os, zipfile, re
import MySQLdb as mdb
from PIL import Image
from fnmatch import fnmatch
from sys import argv
from time import time, sleep
from json import loads
class g_sql(object):
def __init__(self, connFile = 'conn.json'):
connDetails = self.getConnDetails(connFile)
self.conn = mdb.connect(**connDetails)
self.cursor = self.conn.cursor(mdb.cursors.DictCursor)
def getConnDetails(self, connFile = 'conn.json'):
fhConnFile = open(connFile)
rjConnDetails = fhConnFile.read()
fhConnFile.close()
pjConnDetails = loads(rjConnDetails)
pjConnDetails.pop('dsn')
return pjConnDetails
def checkPicStatus(self, file_view):
stmn = self.cursor.execute("SELECT p_id FROM pictures WHERE file_view = %s", (file_view))
if stmn:
return True
else:
return False
def checkGalStatus(self, g_dir):
stmn = self.cursor.execute("SELECT g_id FROM galleries WHERE g_dir = %s", (g_dir))
if stmn:
return self.cursor.fetchone()
else:
return False
def makeGallery(self, g_name, g_dir, g_url):
sql = """
INSERT INTO galleries (g_id, g_name, g_dir, g_url)
VALUES (DEFAULT, %s, %s, %s);
"""
try:
stmn = self.cursor.execute(sql, (g_name, g_dir, g_url))
except:
self.conn.rollback()
else:
self.conn.commit()
def insertPicInGal(self, g_id, file_view, file_thumb):
sql = """
INSERT INTO pictures (p_id, g_id, file_view, file_thumb, uploaded)
VALUES (DEFAULT, %s, %s, %s, %s);
"""
try:
stmn = self.cursor.execute(sql, (g_id, file_view, file_thumb, int(time())))
except:
self.conn.rollback()
else:
self.conn.commit()
def extractall(packed, target):
try:
z = zipfile.ZipFile(packed)
for name in z.namelist():
(dirname, filename) = os.path.split(name)
imgpath = os.path.join(target, filename)
fd = open(imgpath, "w")
fd.write(z.read(name))
fd.close()
return True
except zipfile.error:
return False
finally:
z.close()
def resizeImage(imageFile, galPath):
thumb = Image.open(os.path.join(galPath, imageFile))
full = Image.open(os.path.join(galPath, imageFile))
(name, ext) = os.path.splitext(imageFile)
thumbname = "%s_thumb%s" % (name, ext)
thumbHeight = 180
fullHeight = 600
thumbHeightPercent = thumbHeight / float(thumb.size[1])
fullHeightPercent = fullHeight / float(full.size[1])
thumbWidth = int(thumbHeightPercent * thumb.size[0])
fullWidth = int(fullHeightPercent * full.size[0])
thumbSize = (thumbWidth, thumbHeight)
fullSize = (fullWidth, fullHeight)
try:
thumb = thumb.resize(thumbSize, Image.ANTIALIAS)
thumb.save(os.path.join(galPath, thumbname), "JPEG")
full = full.resize(fullSize, Image.ANTIALIAS)
full.save(os.path.join(galPath, imageFile), "JPEG")
return thumbname
except IOError:
print "Cannot create thumbfile for %s" % imageFile
raise
def main(argv):
if len(argv) == 4:
print "All arguements filled."
g_dir = os.path.basename(argv[1]).split('.')[0]
print "g_dir reads: %s" % (g_dir)
target = os.path.join(argv[2], g_dir)
print "full target reads: %s" % (target)
if not os.path.exists(target):
os.makedirs(target)
if extractall(argv[1], target):
print "extraction succeeded."
db = g_sql()
if db:
print "connected to database."
g_id = db.checkGalStatus(g_dir)
if not g_id:
pattern = '[`~!@#$%^&*()\[\]{}:;\'"?/\\<>,.]'
print "the gallery isn't in the database, making it."
g_url = re.sub(pattern, '', argv[3].lower()).rstrip().replace(' ', '-')
print "args are: %s, %s, %s" % (argv[3], g_dir, g_url)
db.makeGallery(argv[3], g_dir, g_url)
g_id = db.checkGalStatus(g_dir)
print "g_id: %s" % (g_id)
files = os.listdir(target)
print files
for f in files:
if not fnmatch(f, "*[_thumb].jpg") and not db.checkPicStatus(f):
print "%s is not a thumb already, making a thumb" % (f)
thumbname = resizeImage(f, target)
print "new thumbnail name: %s" % (thumbname)
print "inserting into kaitlyngarrett.pictures"
db.insertPicInGal(g_id['g_id'], f, thumbname)
sleep(1)
else:
print "Was image already a thumb? %s" % (fnmatch(f, "*[_thumb].jpg"))
print "Was image already in the gallery? %s" % (db.checkPicStatus(f))
else:
os.rmdir(target)
print "Extraction failed."
exit()
else:
print "upload expects three arguements, %d given." % (len(argv) - 1)
if __name__ == "__main__":
main(argv)
| {
"repo_name": "justanr/PHP-Galleria",
"path": "upload.py",
"copies": "1",
"size": "5589",
"license": "mit",
"hash": 5857253033757712000,
"line_mean": 30.9371428571,
"line_max": 97,
"alpha_frac": 0.5428520308,
"autogenerated": false,
"ratio": 3.5644132653061225,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4607265296106122,
"avg_score": null,
"num_lines": null
} |
"""A simple URL shortener using Werkzeug and redis."""
import os
import redis
from jinja2 import Environment
from jinja2 import FileSystemLoader
from werkzeug.exceptions import HTTPException
from werkzeug.exceptions import NotFound
from werkzeug.middleware.shared_data import SharedDataMiddleware
from werkzeug.routing import Map
from werkzeug.routing import Rule
from werkzeug.urls import url_parse
from werkzeug.utils import redirect
from werkzeug.wrappers import Request
from werkzeug.wrappers import Response
def base36_encode(number):
assert number >= 0, "positive integer required"
if number == 0:
return "0"
base36 = []
while number != 0:
number, i = divmod(number, 36)
base36.append("0123456789abcdefghijklmnopqrstuvwxyz"[i])
return "".join(reversed(base36))
def is_valid_url(url):
parts = url_parse(url)
return parts.scheme in ("http", "https")
def get_hostname(url):
return url_parse(url).netloc
class Shortly:
def __init__(self, config):
self.redis = redis.Redis(config["redis_host"], config["redis_port"])
template_path = os.path.join(os.path.dirname(__file__), "templates")
self.jinja_env = Environment(
loader=FileSystemLoader(template_path), autoescape=True
)
self.jinja_env.filters["hostname"] = get_hostname
self.url_map = Map(
[
Rule("/", endpoint="new_url"),
Rule("/<short_id>", endpoint="follow_short_link"),
Rule("/<short_id>+", endpoint="short_link_details"),
]
)
def on_new_url(self, request):
error = None
url = ""
if request.method == "POST":
url = request.form["url"]
if not is_valid_url(url):
error = "Please enter a valid URL"
else:
short_id = self.insert_url(url)
return redirect(f"/{short_id}+")
return self.render_template("new_url.html", error=error, url=url)
def on_follow_short_link(self, request, short_id):
link_target = self.redis.get(f"url-target:{short_id}")
if link_target is None:
raise NotFound()
self.redis.incr(f"click-count:{short_id}")
return redirect(link_target)
def on_short_link_details(self, request, short_id):
link_target = self.redis.get(f"url-target:{short_id}")
if link_target is None:
raise NotFound()
click_count = int(self.redis.get(f"click-count:{short_id}") or 0)
return self.render_template(
"short_link_details.html",
link_target=link_target,
short_id=short_id,
click_count=click_count,
)
def error_404(self):
response = self.render_template("404.html")
response.status_code = 404
return response
def insert_url(self, url):
short_id = self.redis.get(f"reverse-url:{url}")
if short_id is not None:
return short_id
url_num = self.redis.incr("last-url-id")
short_id = base36_encode(url_num)
self.redis.set(f"url-target:{short_id}", url)
self.redis.set(f"reverse-url:{url}", short_id)
return short_id
def render_template(self, template_name, **context):
t = self.jinja_env.get_template(template_name)
return Response(t.render(context), mimetype="text/html")
def dispatch_request(self, request):
adapter = self.url_map.bind_to_environ(request.environ)
try:
endpoint, values = adapter.match()
return getattr(self, f"on_{endpoint}")(request, **values)
except NotFound:
return self.error_404()
except HTTPException as e:
return e
def wsgi_app(self, environ, start_response):
request = Request(environ)
response = self.dispatch_request(request)
return response(environ, start_response)
def __call__(self, environ, start_response):
return self.wsgi_app(environ, start_response)
def create_app(redis_host="localhost", redis_port=6379, with_static=True):
app = Shortly({"redis_host": redis_host, "redis_port": redis_port})
if with_static:
app.wsgi_app = SharedDataMiddleware(
app.wsgi_app, {"/static": os.path.join(os.path.dirname(__file__), "static")}
)
return app
if __name__ == "__main__":
from werkzeug.serving import run_simple
app = create_app()
run_simple("127.0.0.1", 5000, app, use_debugger=True, use_reloader=True)
| {
"repo_name": "pallets/werkzeug",
"path": "examples/shortly/shortly.py",
"copies": "1",
"size": "4568",
"license": "bsd-3-clause",
"hash": -1792375020338852600,
"line_mean": 32.3430656934,
"line_max": 88,
"alpha_frac": 0.6149299475,
"autogenerated": false,
"ratio": 3.8290025146689017,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4943932462168902,
"avg_score": null,
"num_lines": null
} |
"""A simple utility to convert csv files to bibtex."""
import re
import os
import csv
import sys
import argparse
import textwrap
import numpy as np
from nltk.corpus import stopwords
from bibtexparser.bwriter import BibTexWriter
from bibtexparser.bibdatabase import BibDatabase
STOP_WORDS = stopwords.words('english')
MAX_ENTRIES = 9999999999
HEADERS = {
'Abstract Note': 'abstract',
'File Attachments': 'file',
'Manual Tags': 'keywords',
'Publication Title': 'journal',
'Publication Year': 'year',
'Publisher': 'publisher'}
TYPES = {
'journalArticle': {
'type': 'article',
'remap': {}},
'newspaperArticle': {
'type': 'article',
'remap': {}},
'magazineArticle': {
'type': 'article',
'remap': {}},
'conferencePaper': {
'type': 'inproceedings',
'remap': {'Publication Title': 'booktitle'}},
'book': {
'type': 'book',
'remap': {'Publication Title': 'booktitle'}},
'bookSection': {
'type': 'incollection',
'remap': {'Publication Title': 'booktitle'}},
'report': {
'type': 'techreport',
'remap': {'Publisher': 'institution'}},
'thesis': {
'type': 'phdthesis',
'remap': {'Publisher': 'school'}},
'webpage': {
'type': 'unpublished',
'remap': {}}}
def add_entry(row, entry_type, remap):
"""Add an entry to the bibtex"""
entry = {'ID': entry_key(row), 'ENTRYTYPE': entry_type}
for header, value in row.items():
if not value:
continue
elif header in ['Item Type', 'key', '']:
continue
elif header in remap:
entry[remap[header]] = value
elif header in HEADERS:
entry[HEADERS[header]] = value
else:
bibtex = re.sub(r'\W', '', header).lower()
entry[bibtex] = value
entry['author'] = entry['author'].replace('; ', ' and ')
if 'pages' in entry:
entry['pages'] = re.sub('-', '--', entry['pages'])
entry['pages'] = re.sub(u'\u2013|\u2014', '--', entry['pages'])
if 'file' in entry:
files = entry_file(entry['file'], entry['ID'])
if files:
entry['file'] = files
else:
del entry['file']
if 'keywords' in entry:
entry['keywords'] = entry_keywords(entry['keywords'])
return entry
def entry_key(row):
"""Build the bibtex key."""
author = row['Author'].split()[0].lower()
author = re.sub(r'\W+$', '', author)
words = [w for w in row['Title'].lower().split() if w not in STOP_WORDS]
title = words[0].lower() if words else 'anon'
year = row['Publication Year'].split()
year = year[0].lower() if year else '????'
return '_'.join([author, title, year])
def entry_keywords(value):
"""Build the bibtex keywords field."""
keywords = []
for word in value.split(';'):
word = word.strip()
keywords.append(word)
return ', '.join(keywords)
def entry_file(value, key):
"""Build the bibtex file field."""
attachments = []
attachment_list = value.split(';')
for attachment in attachment_list:
attachment = attachment.strip()
if attachment.startswith(('dn=', 'res=')):
continue
if 'documentSummary' in attachment:
continue
_, ext = os.path.splitext(attachment.lower())
attachment = re.sub(r'\\', r'\\\\', attachment)
attachment = re.sub(r':', r'\\:', attachment)
if ext == '.pdf':
attachment = '{}_pdf:{}:application/pdf'.format(key, attachment)
else:
attachment = '{}_html:{}:text/html'.format(key, attachment)
attachments.append(attachment)
return ';'.join(attachments)
def fix_columns_headers(old_row):
"""R replaces spaces in column headers with dots. We need to handle this.
"""
new_row = {}
for old_key, value in old_row.items():
if re.match(r'(.*)?key$', old_key, re.IGNORECASE):
old_key = 'key'
new_key = old_key.replace('.', ' ').strip()
new_row[new_key] = value
return new_row
def get_filter_rows(args):
"""Set up a dict with all of the rows that will be filtered out."""
filter_rows = {}
if not args.filter_csv:
return filter_rows
with open(args.filter_csv) as csv_file:
reader = csv.DictReader(csv_file)
for row in reader:
row = fix_columns_headers(row)
key = entry_key(row)
filter_rows[key] = 1
return filter_rows
def parse_csv_file(args):
"""Parse a CSV file into bibtex format."""
entries = []
filter_rows = get_filter_rows(args)
with open(args.csv_file) as csv_file:
reader = csv.DictReader(csv_file)
for i, row in enumerate(reader, 1):
if i < args.starting_row:
continue
row = fix_columns_headers(row)
key = entry_key(row)
if filter_rows.get(key):
continue
if row['Item Type'] not in TYPES:
print('ItemType not found: "{}"'.format(row['Item Type']))
sys.exit()
row_type = TYPES[row['Item Type']]
entry = add_entry(row, row_type['type'], row_type['remap'])
entries.append(entry)
if args.randomize:
entries = np.random.permutation(entries) # pylint: disable=no-member
for i, beg in enumerate(range(0, len(entries), args.max_entries), 1):
file_name = args.bibtex_file
if args.max_entries != MAX_ENTRIES:
root, ext = os.path.splitext(file_name)
file_name = '{}{}{}'.format(root, i, ext)
print(i, len(entries[beg:beg + args.max_entries]))
bibtex_db = BibDatabase()
bibtex_db.entries = entries[beg:beg + args.max_entries]
writer = BibTexWriter()
writer.order_entries_by = None
with open(file_name, 'w') as bibtex_file:
bibtex_file.write(writer.write(bibtex_db))
def parse_command_line():
"""Process command-line arguments."""
description = """A utility to convert CSV files to Bibtex format."""
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description=textwrap.dedent(description))
group = parser.add_argument_group('required arguments')
group.add_argument('-c', '--csv-file', required=True, metavar='CSV',
help='Input CSV file containing the bibliographic '
'entries.')
group.add_argument('-b', '--bibtex-file', required=True, metavar='BIBTEX',
help='Output Bibtex file.')
group.add_argument('-f', '--filter-csv', metavar='FILTER',
help='Filter CSV rows by rows in this file.')
group.add_argument('-m', '--max-entries', default=MAX_ENTRIES, type=int,
help='Maximum entries per bibtex output file.')
group.add_argument('-r', '--randomize', action='store_true',
help='Shuffle the bibtext output entries?')
group.add_argument('-s', '--starting-row', metavar='ROW', default=0,
type=int,
help='Output Bibtex file.')
return parser.parse_args()
if __name__ == '__main__':
ARGS = parse_command_line()
parse_csv_file(ARGS)
| {
"repo_name": "joandamerow/lit-mining-occurrencedb",
"path": "util/csv_to_bibtex.py",
"copies": "1",
"size": "7434",
"license": "isc",
"hash": -399882508084543940,
"line_mean": 26.5333333333,
"line_max": 78,
"alpha_frac": 0.5608017218,
"autogenerated": false,
"ratio": 3.8941854374017812,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9954987159201781,
"avg_score": 0,
"num_lines": 270
} |
# A simple value function iteration example: the standard dynamic investment/consumption problem
# utility function is log() and production function is k^alpha
from __future__ import division #use Python 3 division operation (Python 2 does integer division by default)
import numpy as np
#set parameters
A = 1
alpha = 0.3
beta = 0.6 #time discount factor
ab = alpha * beta
# closed form for A=1: v(k) = c0 + c1*log(k)
c0 = 1/(1-beta) * ( ab/(1-ab) * np.log(ab) + np.log(1-ab) )
c1 = alpha / (1-ab)
#analytical solution
def v_star(k):
return c0 + c1 * np.log(k)
#grid (domain) on which value function is defined
grid = np.array([0.04,0.08,0.12,0.16,0.2])
#intialize v_0 = 0
v0 = np.zeros(len(grid))
#instantiate v_star on grid
v = np.array([v_star(k) for k in grid])
def T_g(w):
"""
Bellman Operator
Input: a flat numpy array of same length as grid
Returns one update of the value function iteration and associated policy function
"""
Tw = np.zeros(len(w))
g = np.zeros(len(w))
for i,k in enumerate(grid): #each (index,point) pair in the domain of the value function
values = [] #this list will hold the values of the objective function for all k' which satisfy the constraint
for j,kp in enumerate(grid): #values k' that the objective function can take
if kp < A*k**alpha: #non-negative consumption constraint
values.append(np.log(A*k**alpha - kp) + beta*w[j])
Tw[i] = np.amax(values) #select the max
g[i] = grid[np.argmax(values)] # policy function: the maximizer k' corresponding to each k
return Tw,g
def iterate_fixed_point(F_h,w,tol=10e-4,max_it=100):
"""
Computes a fixed point in function space.
Inputs: Bellman Operator that returns a (value function, policy function) pair, initial function guess
Returns: converged (or max iterate) (value function, policy function) pair
"""
Fw,h = F_h(w) # initialize (T_g returns a pair: value function, policy function)
for k in range(max_it):
Fw_new,h = F_h(Fw)
error = np.max(np.abs(Fw_new-Fw))
if error < tol:
print "Complete: %d iterations" % (k+1)
return Fw,h
Fw = Fw_new
print "Maximum number of iterations (%d) complete, max error = %f" % (max_it, error)
return Fw,h
# run this code
Tv_star,g_star = T_g(v)
v1,g1 = T_g(v0)
v_approx,g_approx = iterate_fixed_point(T_g,v0)
print "Analitic solution v* = ",v
print "T(v*) = ",Tv_star
print "v1 = T(v0) = ",v1
print "Fixed point approximation of v*: ",v_approx
print "Fixed point approximation of policy function: ",g_approx
| {
"repo_name": "tobanw/py-econ-examples",
"path": "value_function_iteration.py",
"copies": "1",
"size": "2628",
"license": "mit",
"hash": 2361679606427094000,
"line_mean": 35.5,
"line_max": 117,
"alpha_frac": 0.6544901065,
"autogenerated": false,
"ratio": 3.0665110851808635,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4221001191680863,
"avg_score": null,
"num_lines": null
} |
# A simple vector class
def vec(*v):
return apply(Vec, v)
class Vec:
def __init__(self, *v):
self.v = []
for x in v:
self.v.append(x)
def fromlist(self, v):
self.v = []
if type(v) <> type([]):
raise TypeError
self.v = v[:]
return self
def __repr__(self):
return 'vec(' + `self.v`[1:-1] + ')'
def __len__(self):
return len(self.v)
def __getitem__(self, i):
return self.v[i]
def __add__(a, b):
# Element-wise addition
v = []
for i in range(len(a)):
v.append(a[i] + b[i])
return Vec().fromlist(v)
def __sub__(a, b):
# Element-wise subtraction
v = []
for i in range(len(a)):
v.append(a[i] - b[i])
return Vec().fromlist(v)
def __mul__(self, scalar):
# Multiply by scalar
v = []
for i in range(len(self.v)):
v.append(self.v[i]*scalar)
return Vec().fromlist(v)
def test():
a = vec(1, 2, 3)
b = vec(3, 2, 1)
print a
print b
print a+b
print a*3.0
test()
| {
"repo_name": "OS2World/APP-INTERNET-torpak_2",
"path": "Demo/classes/Vec.py",
"copies": "1",
"size": "1151",
"license": "mit",
"hash": -658614749522852000,
"line_mean": 16.984375,
"line_max": 44,
"alpha_frac": 0.4448305821,
"autogenerated": false,
"ratio": 3.2061281337047354,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4150958715804735,
"avg_score": null,
"num_lines": null
} |
"""A simple view with a text box, to display log messages obtained from the stream handler"""
from julesTk import view
__author__ = "Joeri Jongbloets <joeri@jongbloets.net>"
class LogView(view.Frame):
"""A view to show log messages"""
def __init__(self, parent):
super(LogView, self).__init__(parent=parent)
self.grid_rowconfigure(0, weight=1)
self.grid_columnconfigure(0, weight=1)
xscroll = view.tk.Scrollbar(self, orient=view.tk.HORIZONTAL)
xscroll.grid(row=1, column=0, sticky="ew")
yscroll = view.tk.Scrollbar(self)
yscroll.grid(row=0, column=1, sticky="ns")
self._text = view.tk.Text(
self, wrap="none",
xscrollcommand=xscroll.set,
yscrollcommand=yscroll.set
)
self._text.config(state="disabled")
self._text.bind("<1>", lambda event: self._text.focus_set())
self._text.grid(row=0, column=0, sticky="nsew")
xscroll.config(command=self._text.xview)
yscroll.config(command=self._text.yview)
@property
def text(self):
"""The text widget"""
return self._text
def write(self, message):
"""Write a message to the console"""
self._text.config(state="normal")
self._text.insert(view.tk.END, message)
self._text.config(state="disabled")
def flush(self):
pass
def clear(self, event=None):
"""Remove all messages from the console"""
self._text.config(state="normal")
self._text.delete(1.0, view.tk.END)
self._text.config(state="disabled")
| {
"repo_name": "jjongbloets/julesTk",
"path": "julesTk/utils/console.py",
"copies": "1",
"size": "1612",
"license": "mit",
"hash": -5604416678394960000,
"line_mean": 28.8518518519,
"line_max": 93,
"alpha_frac": 0.6060794045,
"autogenerated": false,
"ratio": 3.5428571428571427,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46489365473571426,
"avg_score": null,
"num_lines": null
} |
""" A simple way of interacting to a ethereum node through JSON RPC commands. """
from __future__ import print_function
from builtins import map
from builtins import str
from builtins import object
import logging
import warnings
import json
import gevent
from ethereum.abi import ContractTranslator
from ethereum.tools.keys import privtoaddr
from ethereum.transactions import Transaction
from ethereum.utils import (
denoms,
int_to_big_endian,
big_endian_to_int,
normalize_address,
decode_hex,
encode_hex,
)
from ethereum.tools._solidity import solidity_unresolved_symbols, solidity_library_symbol, solidity_resolve_symbols
from tinyrpc.protocols.jsonrpc import JSONRPCErrorResponse, JSONRPCSuccessResponse
from tinyrpc.protocols.jsonrpc import JSONRPCProtocol
from tinyrpc.transports.http import HttpPostClientTransport
from pyethapp.jsonrpc import address_encoder as _address_encoder
from pyethapp.jsonrpc import (
data_encoder, data_decoder, address_decoder, default_gasprice,
default_startgas, quantity_encoder, quantity_decoder,
)
# pylint: disable=invalid-name,too-many-arguments,too-few-public-methods
# The number of arguments an it's names are determined by the JSON-RPC spec
z_address = b'\x00' * 20
log = logging.getLogger(__name__)
def address_encoder(address):
""" Normalize address and hex encode it with the additional of the '0x'
prefix.
"""
normalized_address = normalize_address(address, allow_blank=True)
return _address_encoder(normalized_address)
def block_tag_encoder(val):
if isinstance(val, int):
return quantity_encoder(val)
elif val and isinstance(val, bytes):
assert val in ('latest', 'pending')
return data_encoder(val)
else:
assert not val
def topic_encoder(topic):
assert isinstance(topic, int)
return data_encoder(int_to_big_endian(topic))
def topic_decoder(topic):
return big_endian_to_int(data_decoder(topic))
def deploy_dependencies_symbols(all_contract):
dependencies = {}
symbols_to_contract = dict()
for contract_name in all_contract:
symbol = solidity_library_symbol(contract_name)
if symbol in symbols_to_contract:
raise ValueError('Conflicting library names.')
symbols_to_contract[symbol] = contract_name
for contract_name, contract in list(all_contract.items()):
unresolved_symbols = solidity_unresolved_symbols(contract['bin_hex'])
dependencies[contract_name] = [
symbols_to_contract[unresolved]
for unresolved in unresolved_symbols
]
return dependencies
def dependencies_order_of_build(target_contract, dependencies_map):
""" Return an ordered list of contracts that is sufficient to sucessfully
deploys the target contract.
Note:
This function assumes that the `dependencies_map` is an acyclic graph.
"""
if len(dependencies_map) == 0:
return [target_contract]
if target_contract not in dependencies_map:
raise ValueError('no dependencies defined for {}'.format(target_contract))
order = [target_contract]
todo = list(dependencies_map[target_contract])
while len(todo):
target_contract = todo.pop(0)
target_pos = len(order)
for dependency in dependencies_map[target_contract]:
# we need to add the current contract before all it's depedencies
if dependency in order:
target_pos = order.index(dependency)
else:
todo.append(dependency)
order.insert(target_pos, target_contract)
order.reverse()
return order
class JSONRPCClientReplyError(Exception):
pass
class JSONRPCClient(object):
protocol = JSONRPCProtocol()
def __init__(self, host='127.0.0.1', port=4000, print_communication=True,
privkey=None, sender=None, use_ssl=False, transport=None):
"""
Args:
host (str): host address to connect to.
port (int): port number to connect to.
print_communication (bool): True to print the rpc communication.
privkey: specify privkey for local signing
sender (address): the sender address, computed from privkey if provided.
use_ssl (bool): Use https instead of http.
transport: Tiny rpc transport instance.
"""
if transport is None:
self.transport = HttpPostClientTransport('{}://{}:{}'.format(
'https' if use_ssl else 'http', host, port), headers={'content-type': 'application/json'})
else:
self.transport = transport
self.print_communication = print_communication
self.privkey = privkey
self._sender = sender
self.port = port
def __repr__(self):
return '<JSONRPCClient @%d>' % self.port
@property
def sender(self):
if self.privkey:
return privtoaddr(self.privkey)
if self._sender is None:
self._sender = self.coinbase
return self._sender
@property
def coinbase(self):
""" Return the client coinbase address. """
return address_decoder(self.call('eth_coinbase'))
def blocknumber(self):
""" Return the most recent block. """
return quantity_decoder(self.call('eth_blockNumber'))
def nonce(self, address):
if len(address) == 40:
address = decode_hex(address)
try:
res = self.call('eth_nonce', address_encoder(address), 'pending')
return quantity_decoder(res)
except JSONRPCClientReplyError as e:
if e.message == 'Method not found':
raise JSONRPCClientReplyError(
"'eth_nonce' is not supported by your endpoint (pyethapp only). "
"For transactions use server-side nonces: "
"('eth_sendTransaction' with 'nonce=None')")
raise e
def balance(self, account):
""" Return the balance of the account of given address. """
res = self.call('eth_getBalance', address_encoder(account), 'pending')
return quantity_decoder(res)
def gaslimit(self):
return quantity_decoder(self.call('eth_gasLimit'))
def lastgasprice(self):
return quantity_decoder(self.call('eth_lastGasPrice'))
def new_abi_contract(self, contract_interface, address):
warnings.warn('deprecated, use new_contract_proxy', DeprecationWarning)
return self.new_contract_proxy(contract_interface, address)
def new_contract_proxy(self, contract_interface, address):
""" Return a proxy for interacting with a smart contract.
Args:
contract_interface: The contract interface as defined by the json.
address: The contract's address.
"""
sender = self.sender or privtoaddr(self.privkey)
return ContractProxy(
sender,
contract_interface,
address,
self.eth_call,
self.send_transaction,
self.eth_estimateGas,
)
def deploy_solidity_contract(self, sender, contract_name, all_contracts, # pylint: disable=too-many-locals
libraries, constructor_parameters, timeout=None, gasprice=default_gasprice):
if contract_name not in all_contracts:
raise ValueError('Unkonwn contract {}'.format(contract_name))
libraries = dict(libraries)
contract = all_contracts[contract_name]
contract_interface = contract['abi']
symbols = solidity_unresolved_symbols(contract['bin_hex'])
if symbols:
available_symbols = list(map(solidity_library_symbol, list(all_contracts.keys()))) # pylint: disable=bad-builtin
unknown_symbols = set(symbols) - set(available_symbols)
if unknown_symbols:
msg = 'Cannot deploy contract, known symbols {}, unresolved symbols {}.'.format(
available_symbols,
unknown_symbols,
)
raise Exception(msg)
dependencies = deploy_dependencies_symbols(all_contracts)
deployment_order = dependencies_order_of_build(contract_name, dependencies)
deployment_order.pop() # remove `contract_name` from the list
log.debug('Deploing dependencies: {}'.format(str(deployment_order)))
for deploy_contract in deployment_order:
dependency_contract = all_contracts[deploy_contract]
hex_bytecode = solidity_resolve_symbols(dependency_contract['bin_hex'], libraries)
bytecode = decode_hex(hex_bytecode)
dependency_contract['bin_hex'] = hex_bytecode
dependency_contract['bin'] = bytecode
transaction_hash_hex = self.send_transaction(
sender,
to='',
data=bytecode,
gasprice=gasprice,
)
transaction_hash = decode_hex(transaction_hash_hex)
self.poll(transaction_hash, timeout=timeout)
receipt = self.eth_getTransactionReceipt(transaction_hash)
contract_address = receipt['contractAddress']
contract_address = contract_address[2:] # remove the hexadecimal prefix 0x from the address
libraries[deploy_contract] = contract_address
deployed_code = self.eth_getCode(decode_hex(contract_address))
if deployed_code == '0x':
raise RuntimeError("Contract address has no code, check gas usage.")
hex_bytecode = solidity_resolve_symbols(contract['bin_hex'], libraries)
bytecode = decode_hex(hex_bytecode)
contract['bin_hex'] = hex_bytecode
contract['bin'] = bytecode
if constructor_parameters:
translator = ContractTranslator(contract_interface)
parameters = translator.encode_constructor_arguments(constructor_parameters)
bytecode = contract['bin'] + parameters
else:
bytecode = contract['bin']
transaction_hash_hex = self.send_transaction(
sender,
to='',
data=bytecode,
gasprice=gasprice,
)
transaction_hash = decode_hex(transaction_hash_hex)
self.poll(transaction_hash, timeout=timeout)
receipt = self.eth_getTransactionReceipt(transaction_hash)
contract_address = receipt['contractAddress']
deployed_code = self.eth_getCode(decode_hex(contract_address[2:]))
if deployed_code == '0x':
raise RuntimeError("Deployment of {} failed. Contract address has no code, check gas usage.".format(
contract_name
))
return self.new_contract_proxy(
contract_interface,
contract_address,
)
def find_block(self, condition):
"""Query all blocks one by one and return the first one for which
`condition(block)` evaluates to `True`.
"""
i = 0
while True:
block = self.call('eth_getBlockByNumber', quantity_encoder(i), True)
if condition(block) or not block:
return block
i += 1
def new_filter(self, fromBlock=None, toBlock=None, address=None, topics=None):
""" Creates a filter object, based on filter options, to notify when
the state changes (logs). To check if the state has changed, call
eth_getFilterChanges.
"""
json_data = {
'fromBlock': block_tag_encoder(fromBlock or ''),
'toBlock': block_tag_encoder(toBlock or ''),
}
if address is not None:
json_data['address'] = address_encoder(address)
if topics is not None:
if not isinstance(topics, list):
raise ValueError('topics must be a list')
json_data['topics'] = [topic_encoder(topic) for topic in topics]
filter_id = self.call('eth_newFilter', json_data)
return quantity_decoder(filter_id)
def filter_changes(self, fid):
changes = self.call('eth_getFilterChanges', quantity_encoder(fid))
if not changes:
return None
elif isinstance(changes, bytes):
return data_decoder(changes)
else:
decoders = dict(blockHash=data_decoder,
transactionHash=data_decoder,
data=data_decoder,
address=address_decoder,
topics=lambda x: [topic_decoder(t) for t in x],
blockNumber=quantity_decoder,
logIndex=quantity_decoder,
transactionIndex=quantity_decoder)
return [{k: decoders[k](v) for k, v in list(c.items()) if v is not None} for c in changes]
def call(self, method, *args):
""" Do the request and returns the result.
Args:
method (str): The RPC method.
args: The encoded arguments expected by the method.
- Object arguments must be supplied as an dictionary.
- Quantity arguments must be hex encoded starting with '0x' and
without left zeros.
- Data arguments must be hex encoded starting with '0x'
"""
request = self.protocol.create_request(method, args)
reply = self.transport.send_message(request.serialize())
if self.print_communication:
print(json.dumps(json.loads(request.serialize()), indent=2))
print(reply)
jsonrpc_reply = self.protocol.parse_reply(reply)
if isinstance(jsonrpc_reply, JSONRPCSuccessResponse):
return jsonrpc_reply.result
elif isinstance(jsonrpc_reply, JSONRPCErrorResponse):
raise JSONRPCClientReplyError(jsonrpc_reply.error)
else:
raise JSONRPCClientReplyError('Unknown type of JSONRPC reply')
__call__ = call
def send_transaction(self, sender, to, value=0, data='', startgas=0,
gasprice=10 * denoms.szabo, nonce=None):
""" Helper to send signed messages.
This method will use the `privkey` provided in the constructor to
locally sign the transaction. This requires an extended server
implementation that accepts the variables v, r, and s.
"""
if not self.privkey and not sender:
raise ValueError('Either privkey or sender needs to be supplied.')
if self.privkey and not sender:
sender = privtoaddr(self.privkey)
if nonce is None:
nonce = self.nonce(sender)
elif self.privkey:
if sender != privtoaddr(self.privkey):
raise ValueError('sender for a different privkey.')
if nonce is None:
nonce = self.nonce(sender)
else:
if nonce is None:
nonce = 0
if not startgas:
startgas = self.gaslimit() - 1
tx = Transaction(nonce, gasprice, startgas, to=to, value=value, data=data)
if self.privkey:
# add the fields v, r and s
tx.sign(self.privkey)
tx_dict = tx.to_dict()
# Transaction.to_dict() encodes 'data', so we need to decode it here.
tx_dict['data'] = data_decoder(tx_dict['data'])
# rename the fields to match the eth_sendTransaction signature
tx_dict.pop('hash')
tx_dict['sender'] = sender
tx_dict['gasPrice'] = tx_dict.pop('gasprice')
tx_dict['gas'] = tx_dict.pop('startgas')
res = self.eth_sendTransaction(**tx_dict)
assert len(res) in (20, 32)
return encode_hex(res)
def eth_sendTransaction(self, nonce=None, sender='', to='', value=0, data='',
gasPrice=default_gasprice, gas=default_startgas,
v=None, r=None, s=None):
""" Creates new message call transaction or a contract creation, if the
data field contains code.
Note:
The support for local signing through the variables v,r,s is not
part of the standard spec, a extended server is required.
Args:
from (address): The 20 bytes address the transaction is send from.
to (address): DATA, 20 Bytes - (optional when creating new
contract) The address the transaction is directed to.
gas (int): Gas provided for the transaction execution. It will
return unused gas.
gasPrice (int): gasPrice used for each paid gas.
value (int): Value send with this transaction.
data (bin): The compiled code of a contract OR the hash of the
invoked method signature and encoded parameters.
nonce (int): This allows to overwrite your own pending transactions
that use the same nonce.
"""
if to == '' and data.isalnum():
warnings.warn(
'Verify that the data parameter is _not_ hex encoded, if this is the case '
'the data will be double encoded and result in unexpected '
'behavior.'
)
if to == '0' * 40:
warnings.warn('For contract creating the empty string must be used.')
json_data = {
'to': data_encoder(normalize_address(to, allow_blank=True)),
'value': quantity_encoder(value),
'gasPrice': quantity_encoder(gasPrice),
'gas': quantity_encoder(gas),
'data': data_encoder(data),
}
if not sender and not (v and r and s):
raise ValueError('Either sender or v, r, s needs to be informed.')
if sender is not None:
json_data['from'] = address_encoder(sender)
if v and r and s:
json_data['v'] = quantity_encoder(v)
json_data['r'] = quantity_encoder(r)
json_data['s'] = quantity_encoder(s)
if nonce is not None:
json_data['nonce'] = quantity_encoder(nonce)
res = self.call('eth_sendTransaction', json_data)
return data_decoder(res)
def _format_call(self, sender='', to='', value=0, data='',
startgas=default_startgas, gasprice=default_gasprice):
""" Helper to format the transaction data. """
json_data = dict()
if sender is not None:
json_data['from'] = address_encoder(sender)
if to is not None:
json_data['to'] = data_encoder(to)
if value is not None:
json_data['value'] = quantity_encoder(value)
if gasprice is not None:
json_data['gasPrice'] = quantity_encoder(gasprice)
if startgas is not None:
json_data['gas'] = quantity_encoder(startgas)
if data is not None:
json_data['data'] = data_encoder(data)
return json_data
def eth_call(self, sender='', to='', value=0, data='',
startgas=default_startgas, gasprice=default_gasprice,
block_number='latest'):
""" Executes a new message call immediately without creating a
transaction on the block chain.
Args:
from: The address the transaction is send from.
to: The address the transaction is directed to.
gas (int): Gas provided for the transaction execution. eth_call
consumes zero gas, but this parameter may be needed by some
executions.
gasPrice (int): gasPrice used for each paid gas.
value (int): Integer of the value send with this transaction.
data (bin): Hash of the method signature and encoded parameters.
For details see Ethereum Contract ABI.
block_number: Determines the state of ethereum used in the
call.
"""
json_data = self._format_call(
sender,
to,
value,
data,
startgas,
gasprice,
)
res = self.call('eth_call', json_data, block_number)
return data_decoder(res)
def eth_estimateGas(self, sender='', to='', value=0, data='',
startgas=default_startgas, gasprice=default_gasprice):
""" Makes a call or transaction, which won't be added to the blockchain
and returns the used gas, which can be used for estimating the used
gas.
Args:
from: The address the transaction is send from.
to: The address the transaction is directed to.
gas (int): Gas provided for the transaction execution. eth_call
consumes zero gas, but this parameter may be needed by some
executions.
gasPrice (int): gasPrice used for each paid gas.
value (int): Integer of the value send with this transaction.
data (bin): Hash of the method signature and encoded parameters.
For details see Ethereum Contract ABI.
block_number: Determines the state of ethereum used in the
call.
"""
json_data = self._format_call(
sender,
to,
value,
data,
startgas,
gasprice,
)
res = self.call('eth_estimateGas', json_data)
return quantity_decoder(res)
def eth_getTransactionReceipt(self, transaction_hash):
""" Returns the receipt of a transaction by transaction hash.
Args:
transaction_hash: Hash of a transaction.
Returns:
A dict representing the transaction receipt object, or null when no
receipt was found.
"""
if transaction_hash.startswith('0x'):
warnings.warn(
'transaction_hash seems to be already encoded, this will'
' result in unexpected behavior'
)
if len(transaction_hash) != 32:
raise ValueError(
'transaction_hash length must be 32 (it might be hex encode)'
)
transaction_hash = data_encoder(transaction_hash)
return self.call('eth_getTransactionReceipt', transaction_hash)
def eth_getCode(self, address, block='latest'):
""" Returns code at a given address.
Args:
address: An address.
block_number: Integer block number, or the string "latest",
"earliest" or "pending".
"""
if address.startswith('0x'):
warnings.warn(
'address seems to be already encoded, this will result '
'in unexpected behavior'
)
if len(address) != 20:
raise ValueError(
'address length must be 20 (it might be hex encode)'
)
return self.call(
'eth_getCode',
address_encoder(address),
block,
)
def eth_getTransactionByHash(self, transaction_hash):
""" Returns the information about a transaction requested by
transaction hash.
"""
if transaction_hash.startswith('0x'):
warnings.warn(
'transaction_hash seems to be already encoded, this will'
' result in unexpected behavior'
)
if len(transaction_hash) != 32:
raise ValueError(
'transaction_hash length must be 32 (it might be hex encode)'
)
transaction_hash = data_encoder(transaction_hash)
return self.call('eth_getTransactionByHash', transaction_hash)
def poll(self, transaction_hash, confirmations=None, timeout=None):
""" Wait until the `transaction_hash` is applied or rejected.
If timeout is None, this could wait indefinitely!
Args:
transaction_hash (hash): Transaction hash that we are waiting for.
confirmations (int): Number of block confirmations that we will
wait for.
timeout (float): Timeout in seconds, raise an Excpetion on
timeout.
"""
if transaction_hash.startswith('0x'):
warnings.warn(
'transaction_hash seems to be already encoded, this will'
' result in unexpected behavior'
)
if len(transaction_hash) != 32:
raise ValueError(
'transaction_hash length must be 32 (it might be hex encode)'
)
transaction_hash = data_encoder(transaction_hash)
deadline = None
if timeout:
deadline = gevent.Timeout(timeout)
deadline.start()
try:
# used to check if the transaction was removed, this could happen
# if gas price is to low:
#
# > Transaction (acbca3d6) below gas price (tx=1 Wei ask=18
# > Shannon). All sequential txs from this address(7d0eae79)
# > will be ignored
#
last_result = None
while True:
# Could return None for a short period of time, until the
# transaction is added to the pool
transaction = self.call('eth_getTransactionByHash', transaction_hash)
# if the transaction was added to the pool and then removed
if transaction is None and last_result is not None:
raise Exception('invalid transaction, check gas price')
# the transaction was added to the pool and mined
if transaction and transaction['blockNumber'] is not None:
break
last_result = transaction
gevent.sleep(.5)
if confirmations:
# this will wait for both APPLIED and REVERTED transactions
transaction_block = quantity_decoder(transaction['blockNumber'])
confirmation_block = transaction_block + confirmations
block_number = self.blocknumber()
while block_number < confirmation_block:
gevent.sleep(.5)
block_number = self.blocknumber()
except gevent.Timeout:
raise Exception('timeout when polling for transaction')
finally:
if deadline:
deadline.cancel()
class MethodProxy(object):
""" A callable interface that exposes a contract function. """
valid_kargs = set(('gasprice', 'startgas', 'value'))
def __init__(self, sender, contract_address, function_name, translator,
call_function, transaction_function, estimate_function=None):
self.sender = sender
self.contract_address = contract_address
self.function_name = function_name
self.translator = translator
self.call_function = call_function
self.transaction_function = transaction_function
self.estimate_function = estimate_function
def transact(self, *args, **kargs):
assert set(kargs.keys()).issubset(self.valid_kargs)
data = self.translator.encode(self.function_name, args)
txhash = self.transaction_function(
sender=self.sender,
to=self.contract_address,
value=kargs.pop('value', 0),
data=data,
**kargs
)
return txhash
def call(self, *args, **kargs):
assert set(kargs.keys()).issubset(self.valid_kargs)
data = self.translator.encode(self.function_name, args)
res = self.call_function(
sender=self.sender,
to=self.contract_address,
value=kargs.pop('value', 0),
data=data,
**kargs
)
if res:
res = self.translator.decode(self.function_name, res)
res = res[0] if len(res) == 1 else res
return res
def estimate_gas(self, *args, **kargs):
if not self.estimate_function:
raise RuntimeError('estimate_function wasnt supplied.')
assert set(kargs.keys()).issubset(self.valid_kargs)
data = self.translator.encode(self.function_name, args)
res = self.estimate_function(
sender=self.sender,
to=self.contract_address,
value=kargs.pop('value', 0),
data=data,
**kargs
)
return res
def __call__(self, *args, **kargs):
if self.translator.function_data[self.function_name]['is_constant']:
return self.call(*args, **kargs)
else:
return self.transact(*args, **kargs)
class ContractProxy(object):
""" Exposes a smart contract as a python object.
Contract calls can be made directly in this object, all the functions will
be exposed with the equivalent api and will perform the argument
translation.
"""
def __init__(self, sender, abi, address, call_func, transact_func, estimate_function=None):
sender = normalize_address(sender)
self.abi = abi
self.address = address = normalize_address(address)
self.translator = ContractTranslator(abi)
for function_name in self.translator.function_data:
function_proxy = MethodProxy(
sender,
address,
function_name,
self.translator,
call_func,
transact_func,
estimate_function,
)
type_argument = self.translator.function_data[function_name]['signature']
arguments = [
'{type} {argument}'.format(type=type_, argument=argument)
for type_, argument in type_argument
]
function_signature = ', '.join(arguments)
function_proxy.__doc__ = '{function_name}({function_signature})'.format(
function_name=function_name,
function_signature=function_signature,
)
setattr(self, function_name, function_proxy)
# backwards compatibility
ABIContract = ContractProxy
| {
"repo_name": "ethereum/pyethapp",
"path": "pyethapp/rpc_client.py",
"copies": "1",
"size": "30408",
"license": "mit",
"hash": 9204775271048691000,
"line_mean": 34.816254417,
"line_max": 125,
"alpha_frac": 0.59234412,
"autogenerated": false,
"ratio": 4.514251781472684,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0004417367501968077,
"num_lines": 849
} |
""" A simple way of interacting to a ethereum node through JSON RPC commands. """
import logging
import time
import warnings
import json
import gevent
from ethereum.abi import ContractTranslator
from ethereum.keys import privtoaddr
from ethereum.transactions import Transaction
from ethereum.utils import denoms, int_to_big_endian, big_endian_to_int, normalize_address
from ethereum._solidity import compile_file, solidity_unresolved_symbols, solidity_library_symbol, solidity_resolve_symbols
from tinyrpc.protocols.jsonrpc import JSONRPCErrorResponse, JSONRPCSuccessResponse
from tinyrpc.protocols.jsonrpc import JSONRPCProtocol
from tinyrpc.transports.http import HttpPostClientTransport
from pyethapp.jsonrpc import address_encoder as _address_encoder
from pyethapp.jsonrpc import (
data_encoder, data_decoder, address_decoder, default_gasprice,
default_startgas, quantity_encoder, quantity_decoder,
)
# pylint: disable=invalid-name,too-many-arguments,too-few-public-methods
# The number of arguments an it's names are determined by the JSON-RPC spec
z_address = '\x00' * 20
log = logging.getLogger(__name__)
def address_encoder(address):
""" Normalize address and hex encode it with the additional of the '0x'
prefix.
"""
normalized_address = normalize_address(address, allow_blank=True)
return _address_encoder(normalized_address)
def block_tag_encoder(val):
if isinstance(val, int):
return quantity_encoder(val)
elif val and isinstance(val, bytes):
assert val in ('latest', 'pending')
return data_encoder(val)
else:
assert not val
def topic_encoder(topic):
assert isinstance(topic, (int, long))
return data_encoder(int_to_big_endian(topic))
def topic_decoder(topic):
return big_endian_to_int(data_decoder(topic))
def deploy_dependencies_symbols(all_contract):
dependencies = {}
symbols_to_contract = dict()
for contract_name in all_contract:
symbol = solidity_library_symbol(contract_name)
if symbol in symbols_to_contract:
raise ValueError('Conflicting library names.')
symbols_to_contract[symbol] = contract_name
for contract_name, contract in all_contract.items():
unresolved_symbols = solidity_unresolved_symbols(contract['bin_hex'])
dependencies[contract_name] = [
symbols_to_contract[unresolved]
for unresolved in unresolved_symbols
]
return dependencies
def dependencies_order_of_build(target_contract, dependencies_map):
""" Return an ordered list of contracts that is sufficient to sucessfully
deploys the target contract.
Note:
This function assumes that the `dependencies_map` is an acyclic graph.
"""
if len(dependencies_map) == 0:
return [target_contract]
if target_contract not in dependencies_map:
raise ValueError('no dependencies defined for {}'.format(target_contract))
order = [target_contract]
todo = list(dependencies_map[target_contract])
while len(todo):
target_contract = todo.pop(0)
target_pos = len(order)
for dependency in dependencies_map[target_contract]:
# we need to add the current contract before all it's depedencies
if dependency in order:
target_pos = order.index(dependency)
else:
todo.append(dependency)
order.insert(target_pos, target_contract)
order.reverse()
return order
class JSONRPCClientReplyError(Exception):
pass
class JSONRPCClient(object):
protocol = JSONRPCProtocol()
def __init__(self, port=4000, print_communication=True, privkey=None, sender=None):
"specify privkey for local signing"
self.transport = HttpPostClientTransport('http://127.0.0.1:{}'.format(port))
self.print_communication = print_communication
self.privkey = privkey
self._sender = sender
self.port = port
def __repr__(self):
return '<JSONRPCClient @%d>' % self.port
@property
def sender(self):
if self.privkey:
return privtoaddr(self.privkey)
if self._sender is None:
self._sender = self.coinbase
return self._sender
@property
def coinbase(self):
""" Return the client coinbase address. """
return address_decoder(self.call('eth_coinbase'))
def blocknumber(self):
""" Return the most recent block. """
return quantity_decoder(self.call('eth_blockNumber'))
def nonce(self, address):
if len(address) == 40:
address = address.decode('hex')
res = self.call('eth_getTransactionCount', address_encoder(address), 'pending')
return quantity_decoder(res)
def balance(self, account):
""" Return the balance of the account of given address. """
res = self.call('eth_getBalance', address_encoder(account), 'pending')
return quantity_decoder(res)
def gaslimit(self):
return quantity_decoder(self.call('eth_gasLimit'))
def lastgasprice(self):
return quantity_decoder(self.call('eth_lastGasPrice'))
def new_abi_contract(self, contract_interface, address):
warnings.warn('deprecated, use new_contract_proxy', DeprecationWarning)
return self.new_contract_proxy(contract_interface, address)
def new_contract_proxy(self, contract_interface, address):
""" Return a proxy for interacting with a smart contract.
Args:
contract_interface: The contract interface as defined by the json.
address: The contract's address.
"""
sender = self.sender or privtoaddr(self.privkey)
return ContractProxy(
sender,
contract_interface,
address,
self.eth_call,
self.send_transaction,
)
def deploy_solidity_contract(self, sender, contract_name, all_contracts, # pylint: disable=too-many-locals
libraries, contructor_paramenters, timeout=None):
if contract_name not in all_contracts:
raise ValueError('Unkonwn contract {}'.format(contract_name))
libraries = dict(libraries)
contract = all_contracts[contract_name]
contract_interface = contract['abi']
symbols = solidity_unresolved_symbols(contract['bin_hex'])
if symbols:
available_symbols = map(solidity_library_symbol, all_contracts.keys()) # pylint: disable=bad-builtin
unknown_symbols = set(symbols) - set(available_symbols)
if unknown_symbols:
msg = 'Cannot deploy contract, known symbols {}, unresolved symbols {}.'.format(
available_symbols,
unknown_symbols,
)
raise Exception(msg)
dependencies = deploy_dependencies_symbols(all_contracts)
deployment_order = dependencies_order_of_build(contract_name, dependencies)
deployment_order.pop() # remove `contract_name` from the list
log.debug('Deploing dependencies: {}'.format(str(deployment_order)))
for deploy_contract in deployment_order:
dependency_contract = all_contracts[deploy_contract]
hex_bytecode = solidity_resolve_symbols(dependency_contract['bin_hex'], libraries)
bytecode = hex_bytecode.decode('hex')
dependency_contract['bin_hex'] = hex_bytecode
dependency_contract['bin'] = bytecode
transaction_hash = self.send_transaction(
sender,
to='',
data=bytecode,
gasprice=denoms.wei,
)
self.poll(transaction_hash.decode('hex'), timeout=timeout)
receipt = self.call('eth_getTransactionReceipt', '0x' + transaction_hash)
contract_address = receipt['contractAddress']
libraries[deploy_contract] = contract_address[2:] # remove the hexadecimal prefix 0x from the address
hex_bytecode = solidity_resolve_symbols(contract['bin_hex'], libraries)
bytecode = hex_bytecode.decode('hex')
contract['bin_hex'] = hex_bytecode
contract['bin'] = bytecode
if contructor_paramenters:
translator = ContractTranslator(contract_interface)
parameters = translator.encode_constructor_arguments(contructor_paramenters)
bytecode = contract['bin'] + parameters
else:
bytecode = contract['bin']
transaction_hash = self.send_transaction(
sender,
to='',
data=bytecode,
gasprice=denoms.wei,
)
self.poll(transaction_hash.decode('hex'), timeout=timeout)
receipt = self.call('eth_getTransactionReceipt', '0x' + transaction_hash)
contract_address = receipt['contractAddress']
return ContractProxy(
sender,
contract_interface,
contract_address,
self.eth_call,
self.send_transaction,
)
def find_block(self, condition):
"""Query all blocks one by one and return the first one for which
`condition(block)` evaluates to `True`.
"""
i = 0
while True:
block = self.call('eth_getBlockByNumber', quantity_encoder(i), True)
if condition(block) or not block:
return block
i += 1
def new_filter(self, fromBlock=None, toBlock=None, address=None, topics=None):
""" Creates a filter object, based on filter options, to notify when
the state changes (logs). To check if the state has changed, call
eth_getFilterChanges.
"""
json_data = {
'fromBlock': block_tag_encoder(fromBlock or ''),
'toBlock': block_tag_encoder(toBlock or ''),
}
if address is not None:
json_data['address'] = address_encoder(address)
if topics is not None:
if not isinstance(topics, list):
raise ValueError('topics must be a list')
json_data['topics'] = [topic_encoder(topic) for topic in topics]
filter_id = self.call('eth_newFilter', json_data)
return quantity_decoder(filter_id)
def filter_changes(self, fid):
changes = self.call('eth_getFilterChanges', quantity_encoder(fid))
if not changes:
return None
elif isinstance(changes, bytes):
return data_decoder(changes)
else:
decoders = dict(blockHash=data_decoder,
transactionHash=data_decoder,
data=data_decoder,
address=address_decoder,
topics=lambda x: [topic_decoder(t) for t in x],
blockNumber=quantity_decoder,
logIndex=quantity_decoder,
transactionIndex=quantity_decoder)
return [{k: decoders[k](v) for k, v in c.items() if v is not None} for c in changes]
def call(self, method, *args):
""" Do the request and returns the result.
Args:
method (str): The RPC method.
args: The encoded arguments expected by the method.
- Object arguments must be supplied as an dictionary.
- Quantity arguments must be hex encoded starting with '0x' and
without left zeros.
- Data arguments must be hex encoded starting with '0x'
"""
request = self.protocol.create_request(method, args)
reply = self.transport.send_message(request.serialize())
if self.print_communication:
print json.dumps(json.loads(request.serialize()), indent=2)
print reply
jsonrpc_reply = self.protocol.parse_reply(reply)
if isinstance(jsonrpc_reply, JSONRPCSuccessResponse):
return jsonrpc_reply.result
elif isinstance(jsonrpc_reply, JSONRPCErrorResponse):
raise JSONRPCClientReplyError(jsonrpc_reply.error)
else:
raise JSONRPCClientReplyError('Unknown type of JSONRPC reply')
__call__ = call
def send_transaction(self, sender, to, value=0, data='', startgas=0,
gasprice=10 * denoms.szabo, nonce=None):
""" Helper to send signed messages.
This method will use the `privkey` provided in the constructor to
locally sign the transaction. This requires an extended server
implementation that accepts the variables v, r, and s.
"""
if not self.privkey and not sender:
raise ValueError('Either privkey or sender needs to be supplied.')
if self.privkey and not sender:
sender = privtoaddr(self.privkey)
if nonce is None:
nonce = self.nonce(sender)
elif self.privkey:
if sender != privtoaddr(self.privkey):
raise ValueError('sender for a different privkey.')
if nonce is None:
nonce = self.nonce(sender)
else:
if nonce is None:
nonce = 0
if not startgas:
startgas = self.gaslimit() - 1
tx = Transaction(nonce, gasprice, startgas, to=to, value=value, data=data)
if self.privkey:
# add the fields v, r and s
tx.sign(self.privkey)
tx_dict = tx.to_dict()
# rename the fields to match the eth_sendTransaction signature
tx_dict.pop('hash')
tx_dict['sender'] = sender
tx_dict['gasPrice'] = tx_dict.pop('gasprice')
tx_dict['gas'] = tx_dict.pop('startgas')
res = self.eth_sendTransaction(**tx_dict)
assert len(res) in (20, 32)
return res.encode('hex')
def eth_sendTransaction(self, nonce=None, sender='', to='', value=0, data='',
gasPrice=default_gasprice, gas=default_startgas,
v=None, r=None, s=None):
""" Creates new message call transaction or a contract creation, if the
data field contains code.
Note:
The support for local signing through the variables v,r,s is not
part of the standard spec, a extended server is required.
Args:
from (address): The 20 bytes address the transaction is send from.
to (address): DATA, 20 Bytes - (optional when creating new
contract) The address the transaction is directed to.
gas (int): Gas provided for the transaction execution. It will
return unused gas.
gasPrice (int): gasPrice used for each paid gas.
value (int): Value send with this transaction.
data (bin): The compiled code of a contract OR the hash of the
invoked method signature and encoded parameters.
nonce (int): This allows to overwrite your own pending transactions
that use the same nonce.
"""
if to == '' and data.isalnum():
warnings.warn(
'Verify that the data parameter is _not_ hex encoded, if this is the case '
'the data will be double encoded and result in unexpected '
'behavior.'
)
if to == '0' * 40:
warnings.warn('For contract creating the empty string must be used.')
if not sender and not (v and r and s):
raise ValueError('Either sender or v, r, s needs to be informed.')
json_data = {
'from': address_encoder(sender),
'to': data_encoder(normalize_address(to, allow_blank=True)),
'nonce': quantity_encoder(nonce),
'value': quantity_encoder(value),
'gasPrice': quantity_encoder(gasPrice),
'gas': quantity_encoder(gas),
'data': data_encoder(data),
}
if v and r and s:
json_data['v'] = quantity_encoder(v)
json_data['r'] = quantity_encoder(r)
json_data['s'] = quantity_encoder(s)
res = self.call('eth_sendTransaction', json_data)
return data_decoder(res)
def eth_call(self, sender='', to='', value=0, data='',
startgas=default_startgas, gasprice=default_gasprice,
block_number=None):
""" Executes a new message call immediately without creating a
transaction on the block chain.
Args:
from: The address the transaction is send from.
to: The address the transaction is directed to.
gas (int): Gas provided for the transaction execution. eth_call
consumes zero gas, but this parameter may be needed by some
executions.
gasPrice (int): gasPrice used for each paid gas.
value (int): Integer of the value send with this transaction.
data (bin): Hash of the method signature and encoded parameters.
For details see Ethereum Contract ABI.
block_number: Determines the state of ethereum used in the
call.
"""
json_data = dict()
if sender is not None:
json_data['from'] = address_encoder(sender)
if to is not None:
json_data['to'] = data_encoder(to)
if value is not None:
json_data['value'] = quantity_encoder(value)
if gasprice is not None:
json_data['gasPrice'] = quantity_encoder(gasprice)
if startgas is not None:
json_data['gas'] = quantity_encoder(startgas)
if data is not None:
json_data['data'] = data_encoder(data)
if block_number is not None:
res = self.call('eth_call', json_data, block_number)
else:
res = self.call('eth_call', json_data)
return data_decoder(res)
def poll(self, transaction_hash, confirmations=None, timeout=None):
""" Wait until the `transaction_hash` is applied or rejected.
Args:
transaction_hash (hash): Transaction hash that we are waiting for.
confirmations (int): Number of block confirmations that we will
wait for.
timeout (float): Timeout in seconds, raise an Excpetion on
timeout.
"""
if transaction_hash.startswith('0x'):
warnings.warn(
'transaction_hash seems to be already encoded, this will result '
'in unexpected behavior'
)
if len(transaction_hash) != 32:
raise ValueError('transaction_hash length must be 32 (it might be hex encode)')
deadline = None
if timeout:
deadline = time.time() + timeout
transaction_hash = data_encoder(transaction_hash)
pending_block = self.call('eth_getBlockByNumber', 'pending', True)
while any(tx['hash'] == transaction_hash for tx in pending_block['transactions']):
if deadline and time.time() > deadline:
raise Exception('timeout')
gevent.sleep(.5)
pending_block = self.call('eth_getBlockByNumber', 'pending', True)
transaction = self.call('eth_getTransactionByHash', transaction_hash)
if transaction is None:
# either wrong transaction hash or the transaction was invalid
log.error('transaction {} not found.'.format(transaction_hash))
return
if confirmations is None:
return
# this will wait for both APPLIED and REVERTED transactions
transaction_block = quantity_decoder(transaction['blockNumber'])
confirmation_block = transaction_block + confirmations
block_number = self.blocknumber()
while confirmation_block > block_number:
if deadline and time.time() > deadline:
raise Exception('timeout')
gevent.sleep(.5)
block_number = self.blocknumber()
class MethodProxy(object):
""" A callable interface that exposes a contract function. """
valid_kargs = set(('gasprice', 'startgas', 'value'))
def __init__(self, sender, contract_address, function_name, translator,
call_function, transaction_function):
self.sender = sender
self.contract_address = contract_address
self.function_name = function_name
self.translator = translator
self.call_function = call_function
self.transaction_function = transaction_function
def transact(self, *args, **kargs):
assert set(kargs.keys()).issubset(self.valid_kargs)
data = self.translator.encode(self.function_name, args)
txhash = self.transaction_function(
sender=self.sender,
to=self.contract_address,
value=kargs.pop('value', 0),
data=data,
**kargs
)
return txhash
def call(self, *args, **kargs):
assert set(kargs.keys()).issubset(self.valid_kargs)
data = self.translator.encode(self.function_name, args)
res = self.call_function(
sender=self.sender,
to=self.contract_address,
value=kargs.pop('value', 0),
data=data,
**kargs
)
if res:
res = self.translator.decode(self.function_name, res)
res = res[0] if len(res) == 1 else res
return res
def __call__(self, *args, **kargs):
if self.translator.function_data[self.function_name]['is_constant']:
return self.call(*args, **kargs)
else:
return self.transact(*args, **kargs)
class ContractProxy(object):
""" Exposes a smart contract as a python object.
Contract calls can be made directly in this object, all the functions will
be exposed with the equivalent api and will perform the argument
translation.
"""
def __init__(self, sender, abi, address, call_func, transact_func):
sender = normalize_address(sender)
self.abi = abi
self.address = address = normalize_address(address)
self.translator = ContractTranslator(abi)
for function_name in self.translator.function_data:
function_proxy = MethodProxy(
sender,
address,
function_name,
self.translator,
call_func,
transact_func,
)
type_argument = self.translator.function_data[function_name]['signature']
arguments = [
'{type} {argument}'.format(type=type_, argument=argument)
for type_, argument in type_argument
]
function_signature = ', '.join(arguments)
function_proxy.__doc__ = '{function_name}({function_signature})'.format(
function_name=function_name,
function_signature=function_signature,
)
setattr(self, function_name, function_proxy)
# backwards compatibility
ABIContract = ContractProxy
| {
"repo_name": "RomanZacharia/pyethapp",
"path": "pyethapp/rpc_client.py",
"copies": "1",
"size": "23308",
"license": "mit",
"hash": -8914590944920157000,
"line_mean": 34.9691358025,
"line_max": 123,
"alpha_frac": 0.6040844345,
"autogenerated": false,
"ratio": 4.416903543680121,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0006076039678957665,
"num_lines": 648
} |
""" A simple way of interacting to a ethereum node through JSON RPC commands. """
import logging
import warnings
import json
import gevent
from ethereum.abi import ContractTranslator
from ethereum.tools.keys import privtoaddr
from ethereum.transactions import Transaction
from ethereum.utils import denoms, int_to_big_endian, big_endian_to_int, normalize_address
from ethereum.tools._solidity import solidity_unresolved_symbols, solidity_library_symbol, solidity_resolve_symbols
from tinyrpc.protocols.jsonrpc import JSONRPCErrorResponse, JSONRPCSuccessResponse
from tinyrpc.protocols.jsonrpc import JSONRPCProtocol
from tinyrpc.transports.http import HttpPostClientTransport
from pyethapp.jsonrpc import address_encoder as _address_encoder
from pyethapp.jsonrpc import (
data_encoder, data_decoder, address_decoder, default_gasprice,
default_startgas, quantity_encoder, quantity_decoder,
)
# pylint: disable=invalid-name,too-many-arguments,too-few-public-methods
# The number of arguments an it's names are determined by the JSON-RPC spec
z_address = '\x00' * 20
log = logging.getLogger(__name__)
def address_encoder(address):
""" Normalize address and hex encode it with the additional of the '0x'
prefix.
"""
normalized_address = normalize_address(address, allow_blank=True)
return _address_encoder(normalized_address)
def block_tag_encoder(val):
if isinstance(val, int):
return quantity_encoder(val)
elif val and isinstance(val, bytes):
assert val in ('latest', 'pending')
return data_encoder(val)
else:
assert not val
def topic_encoder(topic):
assert isinstance(topic, (int, long))
return data_encoder(int_to_big_endian(topic))
def topic_decoder(topic):
return big_endian_to_int(data_decoder(topic))
def deploy_dependencies_symbols(all_contract):
dependencies = {}
symbols_to_contract = dict()
for contract_name in all_contract:
symbol = solidity_library_symbol(contract_name)
if symbol in symbols_to_contract:
raise ValueError('Conflicting library names.')
symbols_to_contract[symbol] = contract_name
for contract_name, contract in all_contract.items():
unresolved_symbols = solidity_unresolved_symbols(contract['bin_hex'])
dependencies[contract_name] = [
symbols_to_contract[unresolved]
for unresolved in unresolved_symbols
]
return dependencies
def dependencies_order_of_build(target_contract, dependencies_map):
""" Return an ordered list of contracts that is sufficient to sucessfully
deploys the target contract.
Note:
This function assumes that the `dependencies_map` is an acyclic graph.
"""
if len(dependencies_map) == 0:
return [target_contract]
if target_contract not in dependencies_map:
raise ValueError('no dependencies defined for {}'.format(target_contract))
order = [target_contract]
todo = list(dependencies_map[target_contract])
while len(todo):
target_contract = todo.pop(0)
target_pos = len(order)
for dependency in dependencies_map[target_contract]:
# we need to add the current contract before all it's depedencies
if dependency in order:
target_pos = order.index(dependency)
else:
todo.append(dependency)
order.insert(target_pos, target_contract)
order.reverse()
return order
class JSONRPCClientReplyError(Exception):
pass
class JSONRPCClient(object):
protocol = JSONRPCProtocol()
def __init__(self, host='127.0.0.1', port=4000, print_communication=True,
privkey=None, sender=None, use_ssl=False, transport=None):
"""
Args:
host (str): host address to connect to.
port (int): port number to connect to.
print_communication (bool): True to print the rpc communication.
privkey: specify privkey for local signing
sender (address): the sender address, computed from privkey if provided.
use_ssl (bool): Use https instead of http.
transport: Tiny rpc transport instance.
"""
if transport is None:
self.transport = HttpPostClientTransport('{}://{}:{}'.format(
'https' if use_ssl else 'http', host, port), headers={'content-type': 'application/json'})
else:
self.transport = transport
self.print_communication = print_communication
self.privkey = privkey
self._sender = sender
self.port = port
def __repr__(self):
return '<JSONRPCClient @%d>' % self.port
@property
def sender(self):
if self.privkey:
return privtoaddr(self.privkey)
if self._sender is None:
self._sender = self.coinbase
return self._sender
@property
def coinbase(self):
""" Return the client coinbase address. """
return address_decoder(self.call('eth_coinbase'))
def blocknumber(self):
""" Return the most recent block. """
return quantity_decoder(self.call('eth_blockNumber'))
def nonce(self, address):
if len(address) == 40:
address = address.decode('hex')
try:
res = self.call('eth_nonce', address_encoder(address), 'pending')
return quantity_decoder(res)
except JSONRPCClientReplyError as e:
if e.message == 'Method not found':
raise JSONRPCClientReplyError(
"'eth_nonce' is not supported by your endpoint (pyethapp only). "
"For transactions use server-side nonces: "
"('eth_sendTransaction' with 'nonce=None')")
raise e
def balance(self, account):
""" Return the balance of the account of given address. """
res = self.call('eth_getBalance', address_encoder(account), 'pending')
return quantity_decoder(res)
def gaslimit(self):
return quantity_decoder(self.call('eth_gasLimit'))
def lastgasprice(self):
return quantity_decoder(self.call('eth_lastGasPrice'))
def new_abi_contract(self, contract_interface, address):
warnings.warn('deprecated, use new_contract_proxy', DeprecationWarning)
return self.new_contract_proxy(contract_interface, address)
def new_contract_proxy(self, contract_interface, address):
""" Return a proxy for interacting with a smart contract.
Args:
contract_interface: The contract interface as defined by the json.
address: The contract's address.
"""
sender = self.sender or privtoaddr(self.privkey)
return ContractProxy(
sender,
contract_interface,
address,
self.eth_call,
self.send_transaction,
self.eth_estimateGas,
)
def deploy_solidity_contract(self, sender, contract_name, all_contracts, # pylint: disable=too-many-locals
libraries, constructor_parameters, timeout=None, gasprice=default_gasprice):
if contract_name not in all_contracts:
raise ValueError('Unkonwn contract {}'.format(contract_name))
libraries = dict(libraries)
contract = all_contracts[contract_name]
contract_interface = contract['abi']
symbols = solidity_unresolved_symbols(contract['bin_hex'])
if symbols:
available_symbols = map(solidity_library_symbol, all_contracts.keys()) # pylint: disable=bad-builtin
unknown_symbols = set(symbols) - set(available_symbols)
if unknown_symbols:
msg = 'Cannot deploy contract, known symbols {}, unresolved symbols {}.'.format(
available_symbols,
unknown_symbols,
)
raise Exception(msg)
dependencies = deploy_dependencies_symbols(all_contracts)
deployment_order = dependencies_order_of_build(contract_name, dependencies)
deployment_order.pop() # remove `contract_name` from the list
log.debug('Deploing dependencies: {}'.format(str(deployment_order)))
for deploy_contract in deployment_order:
dependency_contract = all_contracts[deploy_contract]
hex_bytecode = solidity_resolve_symbols(dependency_contract['bin_hex'], libraries)
bytecode = hex_bytecode.decode('hex')
dependency_contract['bin_hex'] = hex_bytecode
dependency_contract['bin'] = bytecode
transaction_hash_hex = self.send_transaction(
sender,
to='',
data=bytecode,
gasprice=gasprice,
)
transaction_hash = transaction_hash_hex.decode('hex')
self.poll(transaction_hash, timeout=timeout)
receipt = self.eth_getTransactionReceipt(transaction_hash)
contract_address = receipt['contractAddress']
contract_address = contract_address[2:] # remove the hexadecimal prefix 0x from the address
libraries[deploy_contract] = contract_address
deployed_code = self.eth_getCode(contract_address.decode('hex'))
if deployed_code == '0x':
raise RuntimeError("Contract address has no code, check gas usage.")
hex_bytecode = solidity_resolve_symbols(contract['bin_hex'], libraries)
bytecode = hex_bytecode.decode('hex')
contract['bin_hex'] = hex_bytecode
contract['bin'] = bytecode
if constructor_parameters:
translator = ContractTranslator(contract_interface)
parameters = translator.encode_constructor_arguments(constructor_parameters)
bytecode = contract['bin'] + parameters
else:
bytecode = contract['bin']
transaction_hash_hex = self.send_transaction(
sender,
to='',
data=bytecode,
gasprice=gasprice,
)
transaction_hash = transaction_hash_hex.decode('hex')
self.poll(transaction_hash, timeout=timeout)
receipt = self.eth_getTransactionReceipt(transaction_hash)
contract_address = receipt['contractAddress']
deployed_code = self.eth_getCode(contract_address[2:].decode('hex'))
if deployed_code == '0x':
raise RuntimeError("Deployment of {} failed. Contract address has no code, check gas usage.".format(
contract_name
))
return self.new_contract_proxy(
contract_interface,
contract_address,
)
def find_block(self, condition):
"""Query all blocks one by one and return the first one for which
`condition(block)` evaluates to `True`.
"""
i = 0
while True:
block = self.call('eth_getBlockByNumber', quantity_encoder(i), True)
if condition(block) or not block:
return block
i += 1
def new_filter(self, fromBlock=None, toBlock=None, address=None, topics=None):
""" Creates a filter object, based on filter options, to notify when
the state changes (logs). To check if the state has changed, call
eth_getFilterChanges.
"""
json_data = {
'fromBlock': block_tag_encoder(fromBlock or ''),
'toBlock': block_tag_encoder(toBlock or ''),
}
if address is not None:
json_data['address'] = address_encoder(address)
if topics is not None:
if not isinstance(topics, list):
raise ValueError('topics must be a list')
json_data['topics'] = [topic_encoder(topic) for topic in topics]
filter_id = self.call('eth_newFilter', json_data)
return quantity_decoder(filter_id)
def filter_changes(self, fid):
changes = self.call('eth_getFilterChanges', quantity_encoder(fid))
if not changes:
return None
elif isinstance(changes, bytes):
return data_decoder(changes)
else:
decoders = dict(blockHash=data_decoder,
transactionHash=data_decoder,
data=data_decoder,
address=address_decoder,
topics=lambda x: [topic_decoder(t) for t in x],
blockNumber=quantity_decoder,
logIndex=quantity_decoder,
transactionIndex=quantity_decoder)
return [{k: decoders[k](v) for k, v in c.items() if v is not None} for c in changes]
def call(self, method, *args):
""" Do the request and returns the result.
Args:
method (str): The RPC method.
args: The encoded arguments expected by the method.
- Object arguments must be supplied as an dictionary.
- Quantity arguments must be hex encoded starting with '0x' and
without left zeros.
- Data arguments must be hex encoded starting with '0x'
"""
request = self.protocol.create_request(method, args)
reply = self.transport.send_message(request.serialize())
if self.print_communication:
print json.dumps(json.loads(request.serialize()), indent=2)
print reply
jsonrpc_reply = self.protocol.parse_reply(reply)
if isinstance(jsonrpc_reply, JSONRPCSuccessResponse):
return jsonrpc_reply.result
elif isinstance(jsonrpc_reply, JSONRPCErrorResponse):
raise JSONRPCClientReplyError(jsonrpc_reply.error)
else:
raise JSONRPCClientReplyError('Unknown type of JSONRPC reply')
__call__ = call
def send_transaction(self, sender, to, value=0, data='', startgas=0,
gasprice=10 * denoms.szabo, nonce=None):
""" Helper to send signed messages.
This method will use the `privkey` provided in the constructor to
locally sign the transaction. This requires an extended server
implementation that accepts the variables v, r, and s.
"""
if not self.privkey and not sender:
raise ValueError('Either privkey or sender needs to be supplied.')
if self.privkey and not sender:
sender = privtoaddr(self.privkey)
if nonce is None:
nonce = self.nonce(sender)
elif self.privkey:
if sender != privtoaddr(self.privkey):
raise ValueError('sender for a different privkey.')
if nonce is None:
nonce = self.nonce(sender)
else:
if nonce is None:
nonce = 0
if not startgas:
startgas = self.gaslimit() - 1
tx = Transaction(nonce, gasprice, startgas, to=to, value=value, data=data)
if self.privkey:
# add the fields v, r and s
tx.sign(self.privkey)
tx_dict = tx.to_dict()
# Transaction.to_dict() encodes 'data', so we need to decode it here.
tx_dict['data'] = data_decoder(tx_dict['data'])
# rename the fields to match the eth_sendTransaction signature
tx_dict.pop('hash')
tx_dict['sender'] = sender
tx_dict['gasPrice'] = tx_dict.pop('gasprice')
tx_dict['gas'] = tx_dict.pop('startgas')
res = self.eth_sendTransaction(**tx_dict)
assert len(res) in (20, 32)
return res.encode('hex')
def eth_sendTransaction(self, nonce=None, sender='', to='', value=0, data='',
gasPrice=default_gasprice, gas=default_startgas,
v=None, r=None, s=None):
""" Creates new message call transaction or a contract creation, if the
data field contains code.
Note:
The support for local signing through the variables v,r,s is not
part of the standard spec, a extended server is required.
Args:
from (address): The 20 bytes address the transaction is send from.
to (address): DATA, 20 Bytes - (optional when creating new
contract) The address the transaction is directed to.
gas (int): Gas provided for the transaction execution. It will
return unused gas.
gasPrice (int): gasPrice used for each paid gas.
value (int): Value send with this transaction.
data (bin): The compiled code of a contract OR the hash of the
invoked method signature and encoded parameters.
nonce (int): This allows to overwrite your own pending transactions
that use the same nonce.
"""
if to == '' and data.isalnum():
warnings.warn(
'Verify that the data parameter is _not_ hex encoded, if this is the case '
'the data will be double encoded and result in unexpected '
'behavior.'
)
if to == '0' * 40:
warnings.warn('For contract creating the empty string must be used.')
json_data = {
'to': data_encoder(normalize_address(to, allow_blank=True)),
'value': quantity_encoder(value),
'gasPrice': quantity_encoder(gasPrice),
'gas': quantity_encoder(gas),
'data': data_encoder(data),
}
if not sender and not (v and r and s):
raise ValueError('Either sender or v, r, s needs to be informed.')
if sender is not None:
json_data['from'] = address_encoder(sender)
if v and r and s:
json_data['v'] = quantity_encoder(v)
json_data['r'] = quantity_encoder(r)
json_data['s'] = quantity_encoder(s)
if nonce is not None:
json_data['nonce'] = quantity_encoder(nonce)
res = self.call('eth_sendTransaction', json_data)
return data_decoder(res)
def _format_call(self, sender='', to='', value=0, data='',
startgas=default_startgas, gasprice=default_gasprice):
""" Helper to format the transaction data. """
json_data = dict()
if sender is not None:
json_data['from'] = address_encoder(sender)
if to is not None:
json_data['to'] = data_encoder(to)
if value is not None:
json_data['value'] = quantity_encoder(value)
if gasprice is not None:
json_data['gasPrice'] = quantity_encoder(gasprice)
if startgas is not None:
json_data['gas'] = quantity_encoder(startgas)
if data is not None:
json_data['data'] = data_encoder(data)
return json_data
def eth_call(self, sender='', to='', value=0, data='',
startgas=default_startgas, gasprice=default_gasprice,
block_number='latest'):
""" Executes a new message call immediately without creating a
transaction on the block chain.
Args:
from: The address the transaction is send from.
to: The address the transaction is directed to.
gas (int): Gas provided for the transaction execution. eth_call
consumes zero gas, but this parameter may be needed by some
executions.
gasPrice (int): gasPrice used for each paid gas.
value (int): Integer of the value send with this transaction.
data (bin): Hash of the method signature and encoded parameters.
For details see Ethereum Contract ABI.
block_number: Determines the state of ethereum used in the
call.
"""
json_data = self._format_call(
sender,
to,
value,
data,
startgas,
gasprice,
)
res = self.call('eth_call', json_data, block_number)
return data_decoder(res)
def eth_estimateGas(self, sender='', to='', value=0, data='',
startgas=default_startgas, gasprice=default_gasprice):
""" Makes a call or transaction, which won't be added to the blockchain
and returns the used gas, which can be used for estimating the used
gas.
Args:
from: The address the transaction is send from.
to: The address the transaction is directed to.
gas (int): Gas provided for the transaction execution. eth_call
consumes zero gas, but this parameter may be needed by some
executions.
gasPrice (int): gasPrice used for each paid gas.
value (int): Integer of the value send with this transaction.
data (bin): Hash of the method signature and encoded parameters.
For details see Ethereum Contract ABI.
block_number: Determines the state of ethereum used in the
call.
"""
json_data = self._format_call(
sender,
to,
value,
data,
startgas,
gasprice,
)
res = self.call('eth_estimateGas', json_data)
return quantity_decoder(res)
def eth_getTransactionReceipt(self, transaction_hash):
""" Returns the receipt of a transaction by transaction hash.
Args:
transaction_hash: Hash of a transaction.
Returns:
A dict representing the transaction receipt object, or null when no
receipt was found.
"""
if transaction_hash.startswith('0x'):
warnings.warn(
'transaction_hash seems to be already encoded, this will'
' result in unexpected behavior'
)
if len(transaction_hash) != 32:
raise ValueError(
'transaction_hash length must be 32 (it might be hex encode)'
)
transaction_hash = data_encoder(transaction_hash)
return self.call('eth_getTransactionReceipt', transaction_hash)
def eth_getCode(self, address, block='latest'):
""" Returns code at a given address.
Args:
address: An address.
block_number: Integer block number, or the string "latest",
"earliest" or "pending".
"""
if address.startswith('0x'):
warnings.warn(
'address seems to be already encoded, this will result '
'in unexpected behavior'
)
if len(address) != 20:
raise ValueError(
'address length must be 20 (it might be hex encode)'
)
return self.call(
'eth_getCode',
address_encoder(address),
block,
)
def eth_getTransactionByHash(self, transaction_hash):
""" Returns the information about a transaction requested by
transaction hash.
"""
if transaction_hash.startswith('0x'):
warnings.warn(
'transaction_hash seems to be already encoded, this will'
' result in unexpected behavior'
)
if len(transaction_hash) != 32:
raise ValueError(
'transaction_hash length must be 32 (it might be hex encode)'
)
transaction_hash = data_encoder(transaction_hash)
return self.call('eth_getTransactionByHash', transaction_hash)
def poll(self, transaction_hash, confirmations=None, timeout=None):
""" Wait until the `transaction_hash` is applied or rejected.
If timeout is None, this could wait indefinitely!
Args:
transaction_hash (hash): Transaction hash that we are waiting for.
confirmations (int): Number of block confirmations that we will
wait for.
timeout (float): Timeout in seconds, raise an Excpetion on
timeout.
"""
if transaction_hash.startswith('0x'):
warnings.warn(
'transaction_hash seems to be already encoded, this will'
' result in unexpected behavior'
)
if len(transaction_hash) != 32:
raise ValueError(
'transaction_hash length must be 32 (it might be hex encode)'
)
transaction_hash = data_encoder(transaction_hash)
deadline = None
if timeout:
deadline = gevent.Timeout(timeout)
deadline.start()
try:
# used to check if the transaction was removed, this could happen
# if gas price is to low:
#
# > Transaction (acbca3d6) below gas price (tx=1 Wei ask=18
# > Shannon). All sequential txs from this address(7d0eae79)
# > will be ignored
#
last_result = None
while True:
# Could return None for a short period of time, until the
# transaction is added to the pool
transaction = self.call('eth_getTransactionByHash', transaction_hash)
# if the transaction was added to the pool and then removed
if transaction is None and last_result is not None:
raise Exception('invalid transaction, check gas price')
# the transaction was added to the pool and mined
if transaction and transaction['blockNumber'] is not None:
break
last_result = transaction
gevent.sleep(.5)
if confirmations:
# this will wait for both APPLIED and REVERTED transactions
transaction_block = quantity_decoder(transaction['blockNumber'])
confirmation_block = transaction_block + confirmations
block_number = self.blocknumber()
while block_number < confirmation_block:
gevent.sleep(.5)
block_number = self.blocknumber()
except gevent.Timeout:
raise Exception('timeout when polling for transaction')
finally:
if deadline:
deadline.cancel()
class MethodProxy(object):
""" A callable interface that exposes a contract function. """
valid_kargs = set(('gasprice', 'startgas', 'value'))
def __init__(self, sender, contract_address, function_name, translator,
call_function, transaction_function, estimate_function=None):
self.sender = sender
self.contract_address = contract_address
self.function_name = function_name
self.translator = translator
self.call_function = call_function
self.transaction_function = transaction_function
self.estimate_function = estimate_function
def transact(self, *args, **kargs):
assert set(kargs.keys()).issubset(self.valid_kargs)
data = self.translator.encode(self.function_name, args)
txhash = self.transaction_function(
sender=self.sender,
to=self.contract_address,
value=kargs.pop('value', 0),
data=data,
**kargs
)
return txhash
def call(self, *args, **kargs):
assert set(kargs.keys()).issubset(self.valid_kargs)
data = self.translator.encode(self.function_name, args)
res = self.call_function(
sender=self.sender,
to=self.contract_address,
value=kargs.pop('value', 0),
data=data,
**kargs
)
if res:
res = self.translator.decode(self.function_name, res)
res = res[0] if len(res) == 1 else res
return res
def estimate_gas(self, *args, **kargs):
if not self.estimate_function:
raise RuntimeError('estimate_function wasnt supplied.')
assert set(kargs.keys()).issubset(self.valid_kargs)
data = self.translator.encode(self.function_name, args)
res = self.estimate_function(
sender=self.sender,
to=self.contract_address,
value=kargs.pop('value', 0),
data=data,
**kargs
)
return res
def __call__(self, *args, **kargs):
if self.translator.function_data[self.function_name]['is_constant']:
return self.call(*args, **kargs)
else:
return self.transact(*args, **kargs)
class ContractProxy(object):
""" Exposes a smart contract as a python object.
Contract calls can be made directly in this object, all the functions will
be exposed with the equivalent api and will perform the argument
translation.
"""
def __init__(self, sender, abi, address, call_func, transact_func, estimate_function=None):
sender = normalize_address(sender)
self.abi = abi
self.address = address = normalize_address(address)
self.translator = ContractTranslator(abi)
for function_name in self.translator.function_data:
function_proxy = MethodProxy(
sender,
address,
function_name,
self.translator,
call_func,
transact_func,
estimate_function,
)
type_argument = self.translator.function_data[function_name]['signature']
arguments = [
'{type} {argument}'.format(type=type_, argument=argument)
for type_, argument in type_argument
]
function_signature = ', '.join(arguments)
function_proxy.__doc__ = '{function_name}({function_signature})'.format(
function_name=function_name,
function_signature=function_signature,
)
setattr(self, function_name, function_proxy)
# backwards compatibility
ABIContract = ContractProxy
| {
"repo_name": "changwu-tw/pyethapp",
"path": "pyethapp/rpc_client.py",
"copies": "1",
"size": "30236",
"license": "mit",
"hash": 7640459665317167000,
"line_mean": 35.0811455847,
"line_max": 115,
"alpha_frac": 0.5915465009,
"autogenerated": false,
"ratio": 4.522961854899028,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00047709445500350553,
"num_lines": 838
} |
"""A simple webapp2 server."""
import os.path
import urllib
from google.appengine.api import users
from google.appengine.ext import ndb
from google.appengine.api import memcache
from google.appengine.runtime import DeadlineExceededError
import jinja2
import webapp2
import models
import geojson
import time
import logging
JINJA_ENVIRONMENT = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.join(os.path.dirname(__file__),'templates')),
extensions=['jinja2.ext.autoescape'],
autoescape=True)
STATISTICS_KEY = 'statistics'
class MainPage(webapp2.RequestHandler):
def get(self):
if users.get_current_user():
user = users.get_current_user()
user_url = users.create_logout_url(self.request.uri)
user_linktext = 'Logout ' + user.nickname()
else:
user_url = users.create_login_url(self.request.uri)
user_linktext = 'Login to Make Changes'
# could be none, if we don't have current statistics
#statistics = ndb.key(Statistics).get()
statistics = memcache.get(STATISTICS_KEY)
if not statistics:
statistics_query = models.Statistics.query().order(-models.Statistics.updateTime) # get the most recent statistics
statistics_list = statistics_query.fetch(1)
if statistics_list:
logging.debug('Statistics from database.')
statistics = statistics_list[0]
else:
logging.debug('Statistics generated.')
statistics = models.generateStatistics()
statistics.put()
memcache.set(STATISTICS_KEY, statistics, time=60*60*24) # expire after 24 hours
else:
logging.debug('Statistics from cache.')
logging.debug(str(statistics))
# TODO: for users who aren't logged in, consider caching the rendered page.
template_values = {
'user_url' : user_url,
'user_linktext' : user_linktext,
'statistics' : statistics,
'vars' : models.JAVASCRIPT_VARS,
}
template = JINJA_ENVIRONMENT.get_template('main.html')
self.response.write(template.render(template_values))
class ViewWay(webapp2.RequestHandler):
def get(self):
if users.get_current_user():
user = users.get_current_user()
user_url = users.create_logout_url(self.request.uri)
user_linktext = 'Logout ' + user.nickname()
else:
user_url = users.create_login_url(self.request.uri)
user_linktext = 'Login to Make Changes'
sectionId = self.request.get('sectionId', None)
dbkey = models.way_key_db(sectionId)
way = dbkey.get()
template_values = {
'user_url' : user_url,
'user_linktext' : user_linktext,
'vars' : models.JAVASCRIPT_VARS,
'way' : way,
}
template = JINJA_ENVIRONMENT.get_template('wayinfo.html')
self.response.write(template.render(template_values))
class Unauthorised(webapp2.RequestHandler):
def get(self):
if users.get_current_user():
user = users.get_current_user()
user_url = users.create_logout_url(self.request.uri)
user_linktext = 'Logout ' + user.nickname()
else:
user_url = users.create_login_url(self.request.uri)
user_linktext = 'Login to Make Changes'
template_values = {
'user_url' : user_url,
'user_linktext' : user_linktext,
'vars' : models.JAVASCRIPT_VARS,
}
template = JINJA_ENVIRONMENT.get_template('unauthorised.html')
self.response.write(template.render(template_values))
class UpdateWay(webapp2.RequestHandler):
def post(self):
query_params = {'focal_way': None}
self.redirect('/?' + urllib.urlencode(query_params))
class GetWaysForZone(webapp2.RequestHandler):
""" Returns a GeoJSON object with all the ways for this zone """
def get(self):
zone = self.request.get('zone', None)
if not zone:
logging.warning('Request to GetWaysForZone did not request a zone')
self.response.write('No valid zone requested.')
self.response.set_status(400)
return
cachekey = models.zone_key_cache(zone)
json = memcache.get(cachekey) # TODO: ndb already caches results, so is this redundant?
if json is not None:
logging.debug('For zone ' + str(zone) + ' JSON from cache')
else:
dbkey = models.zone_key_db(zone)
e = dbkey.get()
if not e:
logging.warning('Zone requested in GetWaysForZone did not exist: ' + str(zone))
self.response.write('No valid zone requested.')
self.response.set_status(400)
return
if e.json:
json = e.json
logging.debug('For zone ' + str(zone) + ' JSON from database')
else:
e.generateJson(forceNew=True)
json = e.json
logging.info('For zone ' + str(zone) + ' JSON generated')
e.put()
memcache.set(cachekey, json) # TODO: consider deflate compression for json, in memcache and on the wire
# TODO: investigate use of 'cache-control' and 'etag' headers
#
self.response.headers['Content-Type'] = 'application/json'
self.response.write(json)
class GenerateStatistics(webapp2.RequestHandler):
def get(self):
if not users.get_current_user() or not users.is_current_user_admin():
self.redirect('/unauthorised')
return
logging.info('Forcing statistics generation')
# generate statistics
statistics = models.generateStatistics()
statistics.put()
memcache.set(STATISTICS_KEY, statistics, time=60*60*24) # expire after 24 hours
self.redirect('/')
class GenerateJson(webapp2.RequestHandler):
def get(self):
if not users.get_current_user() or not users.is_current_user_admin():
self.redirect('/unauthorised')
return
zoneid = self.request.get('zone', None)
done = []
clearonly = self.request.get('clearonly', 0)
if clearonly:
logging.info('Clearing JSON only')
# generate json
starttime = time.clock()
if zoneid is None:
logging.info('Forcing fresh json for all zones')
zone_query = models.MapZone.query()
for zonekey in zone_query:
if clearonly:
json = zonekey.json = None
else:
zonekey.generateJson(forceNew=True)
json = zonekey.json
zonekey.put()
cachekey = models.zone_key_cache(zonekey.key.id())
memcache.set(cachekey, json) # overwrite previous memcache values TODO: compression
if clearonly:
done.append(zonekey.key.id())
else:
done.append((zonekey.key.id(),len(json)))
else:
logging.info('Forcing fresh json for zone id: ' + repr(zoneid))
zonekey = models.zone_key_db(zoneid).get()
if clearonly:
json = zonekey.json = None
else:
zonekey.generateJson(forceNew=True)
json = zonekey.json
zonekey.put()
cachekey = models.zone_key_cache(zonekey.key.id())
memcache.set(cachekey, json) # overwrite previous memcache values TODO: compression
if clearonly:
done.append(zonekey.key.id())
else:
done.append((zonekey.key.id(),len(json)))
self.response.headers['Content-Type'] = 'text/plain'
if clearonly:
self.response.write('Successfully cleared json in ' + str(time.clock()-starttime) + ' seconds\n')
self.response.write('Cleared JSON for zones: ' + str(done))
else:
self.response.write('Successfully generated json in ' + str(time.clock()-starttime) + ' seconds\n')
self.response.write('Json file sizes by zone (zone, size)' + str(done))
application = webapp2.WSGIApplication([
('/', MainPage),
('/way', ViewWay),
('/unauthorised', Unauthorised),
('/update_way', UpdateWay),
('/get_ways_for_zone', GetWaysForZone),
('/generate_json', GenerateJson),
('/generate_stats', GenerateStatistics),
], debug=True) | {
"repo_name": "arobrien/brisbanebikewaychecker",
"path": "brisbanebikewaychecker.py",
"copies": "1",
"size": "9102",
"license": "mit",
"hash": -6031406756594941000,
"line_mean": 35.7056451613,
"line_max": 126,
"alpha_frac": 0.5598769501,
"autogenerated": false,
"ratio": 4.285310734463277,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5345187684563277,
"avg_score": null,
"num_lines": null
} |
"""A simple webapp2 server."""
import sys
sys.path.insert(0, 'lib')
import cgi
import webapp2
import rdflib
from rdflib import Graph
import pyparsing
MAIN_PAGE_HTML = """\
<!doctype html>
<html>
<body>
<script src="js/jquery/jquery-1.11.0.min.js"></script>
<script src="js/arbor/arbor.js"></script>
<script>
(function($){
var Renderer = function(canvas){
var canvas = $(canvas).get(0)
var ctx = canvas.getContext("2d");
var particleSystem
var that = {
init:function(system){
//
// the particle system will call the init function once, right before the
// first frame is to be drawn. it's a good place to set up the canvas and
// to pass the canvas size to the particle system
//
// save a reference to the particle system for use in the .redraw() loop
particleSystem = system
// inform the system of the screen dimensions so it can map coords for us.
// if the canvas is ever resized, screenSize should be called again with
// the new dimensions
particleSystem.screenSize(canvas.width, canvas.height)
particleSystem.screenPadding(80) // leave an extra 80px of whitespace per side
// set up some event handlers to allow for node-dragging
that.initMouseHandling()
},
redraw:function(){
//
// redraw will be called repeatedly during the run whenever the node positions
// change. the new positions for the nodes can be accessed by looking at the
// .p attribute of a given node. however the p.x & p.y values are in the coordinates
// of the particle system rather than the screen. you can either map them to
// the screen yourself, or use the convenience iterators .eachNode (and .eachEdge)
// which allow you to step through the actual node objects but also pass an
// x,y point in the screen's coordinate system
//
ctx.fillStyle = "white";
ctx.fillRect(0,0, canvas.width, canvas.height);
particleSystem.eachEdge(function(edge, pt1, pt2){
// edge: {source:Node, target:Node, length:#, data:{}}
// pt1: {x:#, y:#} source position in screen coords
// pt2: {x:#, y:#} target position in screen coords
// draw a line from pt1 to pt2
ctx.strokeStyle = "rgba(0,0,0, .333)";
ctx.lineWidth = 1;
ctx.beginPath();
ctx.moveTo(pt1.x, pt1.y);
ctx.lineTo(pt2.x, pt2.y);
ctx.stroke();
ctx.fillStyle = "black";
ctx.font = 'italic 13px sans-serif';
ctx.fillText (edge.data.name, (pt1.x + pt2.x) / 2, (pt1.y + pt2.y) / 2);
})
particleSystem.eachNode(function(node, pt){
// node: {mass:#, p:{x,y}, name:"", data:{}};
// pt: {x:#, y:#} node position in screen coords;
// draw a rectangle centered at pt
var w = 10;
ctx.fillStyle = (node.data.alone) ? "orange" : "black";
ctx.fillRect(pt.x-w/2, pt.y-w/2, w,w);
ctx.fillStyle = "black";
ctx.font = 'italic 13px sans-serif';
ctx.fillText (node.name, pt.x+8, pt.y+8);
})
},
initMouseHandling:function(){
// no-nonsense drag and drop (thanks springy.js)
var dragged = null;
// set up a handler object that will initially listen for mousedowns then
// for moves and mouseups while dragging
var handler = {
clicked:function(e){
var pos = $(canvas).offset();
_mouseP = arbor.Point(e.pageX-pos.left, e.pageY-pos.top);
dragged = particleSystem.nearest(_mouseP);
if (dragged && dragged.node !== null){
// while we're dragging, don't let physics move the node
dragged.node.fixed = true;
}
$(canvas).bind('mousemove', handler.dragged);
$(window).bind('mouseup', handler.dropped);
return false;
},
dragged:function(e){
var pos = $(canvas).offset();
var s = arbor.Point(e.pageX-pos.left, e.pageY-pos.top);
if (dragged && dragged.node !== null){
var p = particleSystem.fromScreen(s);
dragged.node.p = p;
}
return false;
},
dropped:function(e){
if (dragged===null || dragged.node===undefined) return;
if (dragged.node !== null) dragged.node.fixed = false;
dragged.node.tempMass = 1000;
dragged = null;
$(canvas).unbind('mousemove', handler.dragged);
$(window).unbind('mouseup', handler.dropped);
_mouseP = null;
return false;
}
}
// start listening
$(canvas).mousedown(handler.clicked);
},
}
return that
}
$(document).ready(function(){
var sys = arbor.ParticleSystem(1000, 600, 0.5) // create the system with sensible repulsion/stiffness/friction
sys.parameters({gravity:true}) // use center-gravity to make the graph settle nicely (ymmv)
sys.renderer = Renderer("#viewport") // our newly created renderer will have its .init() method called shortly by sys...
/* add some nodes to the graph and watch it go...
sys.addNode('a', {mass: 3, link: '/dinges'})
sys.addEdge('a','b', 'van a naar b')
sys.addEdge('a','c', 'van a naar c')
sys.addEdge('a','d', 'van a naar d')
sys.addEdge('a','e', 'van a naar e')
sys.addNode('f', {alone:true, mass:.25})
*/
var theUI = {
nodes:{
"arbor.js":{color:"red", shape:"dot", alpha:1},
demos:{color:"purple", shape:"dot", alpha:1},
halfviz:{color:"green", alpha:0, link:'/halfviz'},
atlas:{color:"yellow", alpha:0, link:'/atlas'},
echolalia:{color:"blue", alpha:0, link:'/echolalia'},
docs:{color:"black", shape:"dot", alpha:1},
reference:{color:"blue", alpha:0, link:'#reference'},
introduction:{color:"red", alpha:0, link:'#introduction'},
code:{color:"black", shape:"dot", alpha:1},
github:{color:"grey", alpha:0, link:'https://github.com/samizdatco/arbor'},
".zip":{color:"red", alpha:0, link:'/js/dist/arbor-v0.92.zip'},
".tar.gz":{color:"green", alpha:0, link:'/js/dist/arbor-v0.92.tar.gz'}
},
edges:{
"arbor.js":{
demos:{length:.8},
docs:{length:.8},
code:{length:.8}
},
demos:{halfviz:{},
atlas:{},
echolalia:{}
},
docs:{reference:{},
introduction:{}
},
code:{".zip":{},
".tar.gz":{},
"github":{}
}
}
}
sys.graft(theUI)
})
})(this.jQuery)
function visualizeGraph()
{
$.ajax({
url:"/parse",
type:'POST',
data: 'content=' + $('#content').val(),
success: function(data){
alert(data);
}
});
}
</script>
<h1>The incredible chronological consistency checker!</h1>
<h2>RDF</h2>
<p>This simple application uses the <a href="http://www.w3.org/RDF/">Resource Description Framework (RDF)</a> and <a href="http://en.wikipedia.org/wiki/SPARQL">SPARQL</a> to detect inconsistencies in simple RDF facts commonly known as <a href="http://en.wikipedia.org/wiki/Resource_Description_Framework#Overview">triples</a>. The example below shows a circular argument of several triples, where the whole represents something like the <a href="http://en.wikipedia.org/wiki/Penrose_stairs">Penrose stairs</a> immortalized in <a href="http://www.mcescher.nl/galerij/erkenning-succes/klimmen-en-dalen/">Escher's famous never-ending staircase</a> - an impossible spatial configuration</p>
<p>Where detection using standard database and SQL of such an inconsistency would be a <a href="http://stackoverflow.com/questions/1757260/simplest-way-to-do-a-recursive-self-join-in-sql-server">complex operation</a>, with linked data this operation is very simple.</p>
<h2>SPARQL</h2>
<p>The SPARQL query looks a bit different from standard SQL, but is not hard to understand:</p>
PREFIX time:<http://www.w3.org/2006/time#> <br>
SELECT DISTINCT ?startcontext ?endcontext<br>
WHERE {<br>
?startcontext time:after+ ?endcontext .<br>
?endcontext time:after ?startcontext. <br>
} ORDER BY ?startcontext<br>
<p>The plus-sign is the only 'extra' operator needed for the recursivity in the query and indicates a <a href="http://www.w3.org/TR/sparql11-property-paths/">'property path'</a> operator, meaning that the query will try following the same predicate <i>time:after</i> once; twice; as many times as it can from fact to fact, before checking whether the 'endcontext' doubles back to the 'startcontext' to form a circular argument.</p>
<p>Try it yourself with the example <a href="http://www.google.nl/url?sa=t&rct=j&q=&esrc=s&source=web&cd=2&cad=rja&uact=8&ved=0CDgQFjAB&url=http%3A%2F%2Fen.wikipedia.org%2Fwiki%2FTurtle_(syntax)&ei=tcA5U-2xFoXt0gXKgoHwBg&usg=AFQjCNGNS_ZDcryLexf8rsfgQG-dYZbbpA&sig2=ycEST949IBN9JjGr-hs5fg&bvm=bv.63808443,d.d2k">rdf/turtle</a> snippet below...</p>
<div style="width:auto ;">
<div style="float:right; width:50%;"><canvas id="viewport" width="400" height="300"></canvas></div>
<div style="float:left; width:50%;"><form action="/sign" method="post">
<div><textarea name="content" id="content" rows="10" cols="60">
@prefix time: <http://www.w3.org/2006/time#> .
"a" time:after "b" .
"b" time:after "c" .
"c" time:after "d" .
"d" time:after "e" .
"e" time:after "a" .
</textarea></div>
<div><input type="submit" value="Parse rdf/turtle"></div>
<div><input type="button" value="Visualize" onclick="visualizeGraph();"></div>
</form></div>
</div>
</body>
</html>
"""
class MainPage(webapp2.RequestHandler):
def get(self):
self.response.headers['Content-Type'] = 'text/HTML'
self.response.write('<p>Using <a href="http://www.rdflib.net">RDFlib</a> version ' + rdflib.__version__ + '</p>')
self.response.write(MAIN_PAGE_HTML)
class CheckConsistency(webapp2.RequestHandler):
def post(self):
#try:
#self.response.write(cgi.escape(self.request.get('content')))
g = Graph()
g.parse(data=self.request.get('content'), format='turtle')
self.response.write('<h2>Succesfully parsed!</h2>')
qres = g.query(
"""
PREFIX time:<http://www.w3.org/2006/time#>
SELECT DISTINCT ?startcontext ?endcontext
WHERE {
?startcontext time:after+ ?endcontext .
?endcontext time:after ?startcontext.
} ORDER BY ?startcontext """)
if qres.__len__() != 0:
self.response.write('<p>A chronological circularity has been detected:</p>')
for row in qres:
self.response.write('<p> %s is younger than %s ' % row + '</p>')
else:
self.response.write('<p>No chronological circularity has been detected</p>')
#self.response.write(cgi.escape(g.serialize(format='xml')))
#except Exception:
#self.response.write('<br/><p>An error occurred parsing the input<p><br/>')
#pass
class ParseInput(webapp2.RequestHandler):
def post(self):
g = Graph()
g.parse(data=self.request.get('content'), format='turtle')
qres = g.query(
"""
PREFIX time:<http://www.w3.org/2006/time#>
SELECT DISTINCT ?startcontext ?endcontext
WHERE {
?startcontext time:after+ ?endcontext .
?endcontext time:after ?startcontext.
} ORDER BY ?startcontext """)
if qres.__len__() != 0:
self.response.write('<p>A chronological circularity has been detected:</p>')
for row in qres:
self.response.write('<p> %s is younger than %s ' % row + '</p>')
else:
self.response.write('<p>No chronological circularity has been detected</p>')
application = webapp2.WSGIApplication([
('/', MainPage),
('/sign', CheckConsistency),
('/parse', ParseInput)
], debug=True) | {
"repo_name": "reinvantveer/semcontext",
"path": "helloworld.py",
"copies": "1",
"size": "11648",
"license": "mit",
"hash": -4159816468294894600,
"line_mean": 36.4488448845,
"line_max": 687,
"alpha_frac": 0.6265453297,
"autogenerated": false,
"ratio": 3.050013092432574,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8857299710097941,
"avg_score": 0.06385174240692673,
"num_lines": 303
} |
"""A simple web crawler -- classes implementing crawling logic."""
import asyncio
import cgi
from http.client import BadStatusLine
import logging
import re
import time
import urllib.parse
logger = logging.getLogger(__name__)
def unescape(s):
"""The inverse of cgi.escape()."""
s = s.replace('"', '"').replace('>', '>').replace('<', '<')
return s.replace('&', '&') # Must be last.
class ConnectionPool:
"""A connection pool.
To open a connection, use reserve(). To recycle it, use unreserve().
The pool is mostly just a mapping from (host, port, ssl) tuples to
lists of Connections. The currently active connections are *not*
in the data structure; get_connection() takes the connection out,
and recycle_connection() puts it back in. To recycle a
connection, call conn.close(recycle=True).
There are limits to both the overall pool and the per-key pool.
"""
def __init__(self, max_pool=10, max_tasks=5):
self.max_pool = max_pool # Overall limit.
self.max_tasks = max_tasks # Per-key limit.
self.loop = asyncio.get_event_loop()
self.connections = {} # {(host, port, ssl): [Connection, ...], ...}
self.queue = [] # [Connection, ...]
def close(self):
"""Close all connections available for reuse."""
for conns in self.connections.values():
for conn in conns:
conn.close()
self.connections.clear()
self.queue.clear()
@asyncio.coroutine
def get_connection(self, host, port, ssl):
"""Create or reuse a connection."""
port = port or (443 if ssl else 80)
try:
ipaddrs = yield from self.loop.getaddrinfo(host, port)
except Exception as exc:
logger.error('Exception %r for (%r, %r)', exc, host, port)
raise
logger.warn('* %s resolves to %s',
host, ', '.join(ip[4][0] for ip in ipaddrs))
# Look for a reusable connection.
for _, _, _, _, (h, p, *_) in ipaddrs:
key = h, p, ssl
conn = None
conns = self.connections.get(key)
while conns:
conn = conns.pop(0)
self.queue.remove(conn)
if not conns:
del self.connections[key]
if conn.stale():
logger.warn('closing stale connection %r', key)
conn.close() # Just in case.
else:
logger.warn('* Reusing pooled connection %r', key)
return conn
# Create a new connection.
conn = Connection(self, host, port, ssl)
yield from conn.connect()
logger.warn('* New connection %r', conn.key)
return conn
def recycle_connection(self, conn):
"""Make a connection available for reuse.
This also prunes the pool if it exceeds the size limits.
"""
conns = self.connections.setdefault(conn.key, [])
conns.append(conn)
self.queue.append(conn)
if len(conns) > self.max_tasks:
victims = conns # Prune one connection for this key.
elif len(self.queue) > self.max_pool:
victims = self.queue # Prune one connection for any key.
else:
return
for victim in victims:
if victim.stale(): # Prefer pruning the oldest stale connection.
logger.warn('closing stale connection %r', victim.key)
break
else:
victim = victims[0]
logger.warn('closing oldest connection %r', victim.key)
conns = self.connections[victim.key]
conns.remove(victim)
if not conns:
del self.connections[victim.key]
self.queue.remove(victim)
victim.close()
class Connection:
"""A connection that can be recycled to the pool."""
def __init__(self, pool, host, port, ssl):
self.pool = pool
self.host = host
self.port = port
self.ssl = ssl
self.reader = None
self.writer = None
self.key = None
def stale(self):
return self.reader is None or self.reader.at_eof()
@asyncio.coroutine
def connect(self):
self.reader, self.writer = yield from asyncio.open_connection(
self.host, self.port, ssl=self.ssl)
peername = self.writer.get_extra_info('peername')
if peername:
self.host, self.port = peername[:2]
else:
logger.warn('NO PEERNAME %r %r %r', self.host, self.port, self.ssl)
self.key = self.host, self.port, self.ssl
def close(self, recycle=False):
if recycle and not self.stale():
self.pool.recycle_connection(self)
else:
self.writer.close()
self.pool = self.reader = self.writer = None
@asyncio.coroutine
def make_request(url, pool, *, method='GET', headers=None, version='1.1'):
"""Start an HTTP request. Return a Connection."""
parts = urllib.parse.urlparse(url)
assert parts.scheme in ('http', 'https'), repr(url)
ssl = parts.scheme == 'https'
port = parts.port or (443 if ssl else 80)
path = parts.path or '/'
path = '%s?%s' % (path, parts.query) if parts.query else path
logger.warn('* Connecting to %s:%s using %s for %s',
parts.hostname, port, 'ssl' if ssl else 'tcp', url)
conn = yield from pool.get_connection(parts.hostname, port, ssl)
headers = dict(headers) if headers else {} # Must use Cap-Words.
headers.setdefault('User-Agent', 'asyncio-example-crawl/0.0')
headers.setdefault('Host', parts.netloc)
headers.setdefault('Accept', '*/*')
lines = ['%s %s HTTP/%s' % (method, path, version)]
lines.extend('%s: %s' % kv for kv in headers.items())
for line in lines + ['']:
logger.info('> %s', line)
# TODO: close conn if this fails.
conn.writer.write('\r\n'.join(lines + ['', '']).encode('latin-1'))
return conn # Caller must send body if desired, then call read_response().
@asyncio.coroutine
def read_response(conn):
"""Read an HTTP response from a connection."""
@asyncio.coroutine
def getline():
line = (yield from conn.reader.readline()).decode('latin-1').rstrip()
logger.info('< %s', line)
return line
status_line = yield from getline()
status_parts = status_line.split(None, 2)
if len(status_parts) != 3 or not status_parts[1].isdigit():
logger.error('bad status_line %r', status_line)
raise BadStatusLine(status_line)
http_version, status, reason = status_parts
status = int(status)
headers = {}
while True:
header_line = yield from getline()
if not header_line:
break
key, value = header_line.split(':', 1)
# TODO: Continuation lines; multiple header lines per key..
headers[key.lower()] = value.lstrip()
if 'content-length' in headers:
nbytes = int(headers['content-length'])
output = asyncio.StreamReader()
asyncio.async(length_handler(nbytes, conn.reader, output))
elif headers.get('transfer-encoding') == 'chunked':
output = asyncio.StreamReader()
asyncio.async(chunked_handler(conn.reader, output))
else:
output = conn.reader
return http_version[5:], status, reason, headers, output
@asyncio.coroutine
def length_handler(nbytes, input, output):
"""Async handler for reading a body given a Content-Length header."""
while nbytes > 0:
buffer = yield from input.read(min(nbytes, 256*1024))
if not buffer:
logger.error('premature end for content-length')
output.set_exception(EOFError())
return
output.feed_data(buffer)
nbytes -= len(buffer)
output.feed_eof()
@asyncio.coroutine
def chunked_handler(input, output):
"""Async handler for reading a body using Transfer-Encoding: chunked."""
logger.info('parsing chunked response')
nblocks = 0
nbytes = 0
while True:
size_header = yield from input.readline()
if not size_header:
logger.error('premature end of chunked response')
output.set_exception(EOFError())
return
logger.debug('size_header = %r', size_header)
parts = size_header.split(b';')
size = int(parts[0], 16)
nblocks += 1
nbytes += size
if size:
logger.debug('reading chunk of %r bytes', size)
block = yield from input.readexactly(size)
assert len(block) == size, (len(block), size)
output.feed_data(block)
crlf = yield from input.readline()
assert crlf == b'\r\n', repr(crlf)
if not size:
break
logger.warn('chunked response had %r bytes in %r blocks', nbytes, nblocks)
output.feed_eof()
class Fetcher:
"""Logic and state for one URL.
When found in crawler.busy, this represents a URL to be fetched or
in the process of being fetched; when found in crawler.done, this
holds the results from fetching it.
This is usually associated with a task. This references the
crawler for the connection pool and to add more URLs to its todo
list.
Call fetch() to do the fetching; results are in instance variables.
"""
def __init__(self, url, crawler, max_redirect=10, max_tries=4):
self.url = url
self.crawler = crawler
# We don't loop resolving redirects here -- we just use this
# to decide whether to add the redirect URL to crawler.todo.
self.max_redirect = max_redirect
# But we do loop to retry on errors a few times.
self.max_tries = max_tries
# Everything we collect from the response goes here.
self.task = None
self.exceptions = []
self.tries = 0
self.conn = None
self.status = None
self.headers = None
self.body = None
self.next_url = None
self.ctype = None
self.pdict = None
self.encoding = None
self.urls = None
self.new_urls = None
@asyncio.coroutine
def fetch(self):
"""Attempt to fetch the contents of the URL.
If successful, and the data is HTML, extract further links and
add them to the crawler. Redirects are also added back there.
"""
while self.tries < self.max_tries:
self.tries += 1
conn = None
try:
conn = yield from make_request(self.url, self.crawler.pool)
_, status, _, headers, output = yield from read_response(conn)
self.status, self.headers = status, headers
self.body = yield from output.read()
h_conn = headers.get('connection', '').lower()
if h_conn != 'close':
conn.close(recycle=True)
conn = None
if self.tries > 1:
logger.warn('try %r for %r success', self.tries, self.url)
break
except (BadStatusLine, OSError) as exc:
self.exceptions.append(exc)
logger.warn('try %r for %r raised %r',
self.tries, self.url, exc)
finally:
if conn is not None:
conn.close()
else:
# We never broke out of the while loop, i.e. all tries failed.
logger.error('no success for %r in %r tries',
self.url, self.max_tries)
return
if status in (300, 301, 302, 303, 307) and headers.get('location'):
next_url = headers['location']
self.next_url = urllib.parse.urljoin(self.url, next_url)
if self.max_redirect > 0:
logger.warn('redirect to %r from %r', self.next_url, self.url)
self.crawler.add_url(self.next_url, self.max_redirect-1)
else:
logger.error('redirect limit reached for %r from %r',
self.next_url, self.url)
else:
if status == 200:
self.ctype = headers.get('content-type')
self.pdict = {}
if self.ctype:
self.ctype, self.pdict = cgi.parse_header(self.ctype)
self.encoding = self.pdict.get('charset', 'utf-8')
if self.ctype == 'text/html':
body = self.body.decode(self.encoding, 'replace')
# Replace href with (?:href|src) to follow image links.
self.urls = set(re.findall(r'(?i)href=["\']?([^\s"\'<>]+)',
body))
if self.urls:
logger.warn('got %r distinct urls from %r',
len(self.urls), self.url)
self.new_urls = set()
for url in self.urls:
url = unescape(url)
url = urllib.parse.urljoin(self.url, url)
url, frag = urllib.parse.urldefrag(url)
if self.crawler.add_url(url):
self.new_urls.add(url)
class Crawler:
"""Crawl a set of URLs.
This manages three disjoint sets of URLs (todo, busy, done). The
data structures actually store dicts -- the values in todo give
the redirect limit, while the values in busy and done are Fetcher
instances.
"""
def __init__(self, roots,
exclude=None, strict=True, # What to crawl.
max_redirect=10, max_tries=4, # Per-url limits.
max_tasks=10, max_pool=10, # Global limits.
):
self.roots = roots
self.exclude = exclude
self.strict = strict
self.max_redirect = max_redirect
self.max_tries = max_tries
self.max_tasks = max_tasks
self.max_pool = max_pool
self.todo = {}
self.busy = {}
self.done = {}
self.pool = ConnectionPool(max_pool, max_tasks)
self.root_domains = set()
for root in roots:
parts = urllib.parse.urlparse(root)
host, port = urllib.parse.splitport(parts.netloc)
if not host:
continue
if re.match(r'\A[\d\.]*\Z', host):
self.root_domains.add(host)
else:
host = host.lower()
if self.strict:
self.root_domains.add(host)
else:
self.root_domains.add(host)
for root in roots:
self.add_url(root)
self.governor = asyncio.Semaphore(max_tasks)
self.termination = asyncio.Condition()
self.t0 = time.time()
self.t1 = None
def close(self):
"""Close resources (currently only the pool)."""
self.pool.close()
def host_okay(self, host):
"""Check if a host should be crawled.
A literal match (after lowercasing) is always good. For hosts
that don't look like IP addresses, some approximate matches
are okay depending on the strict flag.
"""
host = host.lower()
if host in self.root_domains:
return True
if re.match(r'\A[\d\.]*\Z', host):
return False
if self.strict:
return self._host_okay_strictish(host)
else:
return self._host_okay_lenient(host)
def _host_okay_strictish(self, host):
"""Check if a host should be crawled, strict-ish version.
This checks for equality modulo an initial 'www.' component.
"""
host = host[4:] if host.startswith('www.') else 'www.' + host
return host in self.root_domains
def _host_okay_lenient(self, host):
"""Check if a host should be crawled, lenient version.
This compares the last two components of the host.
"""
return host in self.root_domains
def add_url(self, url, max_redirect=None):
"""Add a URL to the todo list if not seen before."""
if self.exclude and re.search(self.exclude, url):
return False
parts = urllib.parse.urlparse(url)
if parts.scheme not in ('http', 'https'):
logger.info('skipping non-http scheme in %r', url)
return False
host, port = urllib.parse.splitport(parts.netloc)
if not self.host_okay(host):
logger.info('skipping non-root host in %r', url)
return False
if max_redirect is None:
max_redirect = self.max_redirect
if url in self.todo or url in self.busy or url in self.done:
return False
logger.warn('adding %r %r', url, max_redirect)
self.todo[url] = max_redirect
return True
@asyncio.coroutine
def crawl(self):
"""Run the crawler until all finished."""
with (yield from self.termination):
while self.todo or self.busy:
if self.todo:
url, max_redirect = self.todo.popitem()
fetcher = Fetcher(url,
crawler=self,
max_redirect=max_redirect,
max_tries=self.max_tries,
)
self.busy[url] = fetcher
fetcher.task = asyncio.Task(self.fetch(fetcher))
else:
yield from self.termination.wait()
self.t1 = time.time()
@asyncio.coroutine
def fetch(self, fetcher):
"""Call the Fetcher's fetch(), with a limit on concurrency.
Once this returns, move the fetcher from busy to done.
"""
url = fetcher.url
with (yield from self.governor):
try:
yield from fetcher.fetch() # Fetcher gonna fetch.
finally:
# Force GC of the task, so the error is logged.
fetcher.task = None
with (yield from self.termination):
self.done[url] = fetcher
del self.busy[url]
self.termination.notify()
| {
"repo_name": "mikar/60-days-of-python",
"path": "webcrawler/crawling.py",
"copies": "1",
"size": "18360",
"license": "mit",
"hash": -151917443345292000,
"line_mean": 35.6467065868,
"line_max": 79,
"alpha_frac": 0.5557734205,
"autogenerated": false,
"ratio": 4.084538375973303,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5140311796473304,
"avg_score": null,
"num_lines": null
} |
# A simple web crawler - still under development
# 简单的网络爬虫 - 还在开发中
import urllib2 # for get_page(url)
def crawler(seed):
to_crawl = [seed]
crawled = []
while to_crawl:
url = to_crawl.pop
to_crawl = get_links_in_a_page(url, to_crawl, crawled)
crawled.append(url)
def get_links_in_a_page(url, to_crawl, crawled):
to_crawl = to_crawl
page = get_page(url)
start_index = page.find('<a href=')
if start_index > -1:
while start_index > -1:
page = page[start_index + 1:]
start_index = page.find('"')
end_index = page[start_index + 1:].find('"')
new_url = page[start_index: end_index + 1]
if check_if_crawled(new_url, crawled):
to_crawl.append(new_url)
return to_crawl
else:
return to_crawl
def get_page(url):
req = urllib2.Request(url)
response = urllib2.urlopen(req)
page = response.read()
return page
def check_if_crawled(new_url, crawled):
if new_url in crawled:
return True
else:
return False
| {
"repo_name": "roy2020china/BingDemo",
"path": "10_web_crawler.py",
"copies": "1",
"size": "1119",
"license": "mit",
"hash": -5968316038344713000,
"line_mean": 23.8863636364,
"line_max": 62,
"alpha_frac": 0.5753424658,
"autogenerated": false,
"ratio": 2.9594594594594597,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.40348019252594597,
"avg_score": null,
"num_lines": null
} |
"""A simple web server for video annotation"""
# parsing args
import argparse
# encoding / decoding
import json
# time / logging
import time
#import logging
#import traceback
# flask
import flask
from flask_cors import CORS, cross_origin
import tornado.wsgi
import tornado.httpserver
# database
import sqlite3
# redirect stdout and stderr for logging
import sys
# sys.stdout = open('./web_app.log', 'a', 1)
# sys.stderr = open('./web_app.err', 'a', 1)
import random
import boto.mturk.connection
# Obtain the flask app object (and make it cors)
app = flask.Flask(__name__) # pylint: disable=invalid-name
CORS(app)
# Maximum time allowed for one task
MAX_DELAY = 120
# maximum difference between correct start_time/end_time and verification attempt's start_time/end_time in seconds
TRIM_DIFFERENCE_MAX = 1.0
def dict_factory(cursor, row):
"""Helper function to convert sql item into a dict"""
d = {}
for idx, col in enumerate(cursor.description):
d[col[0]] = row[idx]
return d
def print_log_info(str_info):
"""Helper function for logging info"""
prefix = time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime())
print "{:s} {:s}".format(prefix, str_info)
def collect_db_stats():
"""
Collect DB stats
"""
ant_tasks = app.annotation_tasks
db_cursor = ant_tasks.cursor()
# show us some stats
try:
db_cursor.execute('''SELECT count(*) FROM video_db WHERE named=1''')
num_clips_named = db_cursor.fetchone()['count(*)']
db_cursor.execute('''SELECT count(*) FROM video_db WHERE trimmed=1''')
num_clips_trimmed = db_cursor.fetchone()['count(*)']
db_cursor.execute('''SELECT count(*) FROM video_db
WHERE trim_locked=1 OR name_locked=1''')
num_clips_locked = db_cursor.fetchone()['count(*)']
db_cursor.execute('''SELECT count(*) FROM video_db
WHERE red_flag>=1''')
num_clips_flaged = db_cursor.fetchone()['count(*)']
print_log_info("All Stats: Named {:d}, Trimmed {:d}, flagged {:d}, Locked {:d}".format(
num_clips_named, num_clips_trimmed, num_clips_flaged, num_clips_locked))
except sqlite3.Error as e:
print_log_info(str(e))
return
def approve_assignments():
"""
Periodic callback decides whether assignments pending approval
can be automatically approved and then marks them accordingly
"""
# TODO verify correct verification labels here
# TODO make Mturk login details command line arguments
sandbox_host = 'mechanicalturk.sandbox.amazonaws.com'
real_host = 'mechanicalturk.amazonaws.com'
host = (sandbox_host if app.sandbox else real_host)
mturk = boto.mturk.connection.MTurkConnection(
aws_access_key_id=app.aws_access_key_id,
aws_secret_access_key=app.aws_secret_access_key,
host=host,
debug=1 # debug = 2 prints out all requests.
)
mturk_cur = app.mturk_db_connection.cursor()
db_cursor = app.annotation_tasks.cursor()
try:
# TODO make pending approval a separate table if we think that would be time-efficient
mturk_cur.execute("SELECT assignment_id, hit_id, task FROM hits WHERE status='pending_approval'")
except sqlite3.Error as e:
print_log_info(str(e))
return
query_result = mturk_cur.fetchall()
# We need to loop through every assignment/hit set pending approval
for result in query_result:
assignment_id = str(result["assignment_id"])
hit_id = str(result["hit_id"])
task = str(result["task"])
all_verifications_correct = True
print assignment_id
try:
if task == "name":
mturk_cur.execute("SELECT id, action_noun, action_verb FROM name_verification_attempts WHERE hit_id=?", (hit_id,))
action_query_result = mturk_cur.fetchall()
for attempt_action_set in action_query_result:
db_cursor.execute("SELECT action_noun, action_verb FROM video_db WHERE id=?",
(attempt_action_set['id'],))
verified_action_set = db_cursor.fetchone()
if attempt_action_set['action_verb'] != verified_action_set['action_verb']:
print_log_info("Verification Attempt failed! Attempt had verb "
+ str(attempt_action_set['action_verb'])
+ " but the verified had verb "
+ str(verified_action_set['action_verb']))
all_verifications_correct = False
break
if attempt_action_set['action_noun'] != verified_action_set['action_noun']:
print_log_info("Verification Attempt failed! Attempt had noun "
+ str(attempt_action_set['action_noun'])
+ " but the verified had noun "
+ str(verified_action_set['action_noun']))
all_verifications_correct = False
break
else: # ie. elif task == "trim":
print "trim thing"
mturk_cur.execute("SELECT id, start_time, end_time FROM trim_verification_attempts WHERE hit_id=?", (hit_id,))
times_query_result = mturk_cur.fetchall()
for attempt_times_set in times_query_result:
db_cursor.execute("SELECT start_time, end_time FROM video_db WHERE id=?",
(attempt_times_set['id'],))
verified_times_set = db_cursor.fetchone()
if abs(attempt_times_set['start_time'] - verified_times_set['start_time']) > TRIM_DIFFERENCE_MAX:
print_log_info("Verification Attempt failed! Attempt had start time "
+ str(attempt_times_set['start_time'])
+ " but the verified had start time "
+ str(verified_times_set['start_time']))
all_verifications_correct = False
break
if abs(attempt_times_set['end_time'] - verified_times_set['end_time']) > TRIM_DIFFERENCE_MAX:
print_log_info("Verification Attempt failed! Attempt had end time "
+ str(attempt_times_set['end_time'])
+ " but the verified had end time "
+ str(verified_times_set['end_time']))
all_verifications_correct = False
break
except sqlite3.Error as e:
print_log_info(str(e))
continue
if all_verifications_correct:
# TODO Find out if this needs to be a transaction
print_log_info("Approving assignment " + assignment_id)
try:
response = mturk.approve_assignment(assignment_id)
except boto.mturk.connection.MTurkRequestError as e:
print_log_info("MTurk verification rejected. Typically, this means the client's completion "
+ "has not propagated through Amazon's servers.")
print_log_info(str(e))
query_result = mturk_cur.fetchone()
continue
print_log_info(assignment_id + " approved. Amazon response: " + str(response))
try:
mturk_cur.execute('''UPDATE hits SET status='approved' WHERE hit_id=?''', (hit_id,))
app.mturk_db_connection.commit()
except sqlite3.Error as e:
print_log_info(str(e))
else:
try:
mturk_cur.execute('''UPDATE hits SET status='pending_manual_approval' WHERE hit_id=?''', (hit_id,))
app.mturk_db_connection.commit()
except sqlite3.Error as e:
print_log_info(str(e))
return
def expire_locked_items():
"""
Expires a locked item based on its time stamp
"""
ant_tasks = app.annotation_tasks
db_cursor = ant_tasks.cursor()
# Task: name
db_cursor.execute('''SELECT * FROM video_db WHERE name_locked=1 AND named=0''')
locked_items = db_cursor.fetchall()
for item in locked_items:
delay = time.time() - item['name_lock_time']
if delay > MAX_DELAY:
print_log_info("Expiring task {:d} (Name)".format(item['id']))
try:
db_cursor.execute('''UPDATE video_db SET name_locked=0, name_lock_time=?
WHERE id=?''', (0.0, item['id']))
ant_tasks.commit()
except sqlite3.Error as e:
print_log_info(str(e))
# Task: trim
db_cursor.execute('''SELECT * FROM video_db WHERE trim_locked=1 AND trimmed=0''')
locked_items = db_cursor.fetchall()
for item in locked_items:
delay = time.time() - item['trim_lock_time']
if delay > MAX_DELAY:
print_log_info("Expiring task {:d} (Trim)".format(item['id']))
try:
db_cursor.execute('''UPDATE video_db SET trim_locked=0, trim_lock_time=?
WHERE id=?''', (0.0, item['id']))
ant_tasks.commit()
except sqlite3.Error as e:
print_log_info(str(e))
return
def load_annotation_tasks(video_db):
"""
Wrapper for loading annotations
"""
# id integer primary key,
# url text,
# named integer,
# name_locked integer,
# name_lock_time real,
# named_by_user text,
# occluded integer,
# trimmed integer,
# trim_locked integer,
# trim_lock_time real,
# trimmed_by_user text,
# video_src text
# src_start_time integer,
# src_end_time integer,
# pad_start_frame integer,
# pad_end_frame integer,
# start_time real,
# end_time real,
# action_verb text,
# action_noun text,
# red_flag integer
# Instantiate a connection to db
annotation_tasks = sqlite3.connect(video_db)
annotation_tasks.row_factory = dict_factory
# returns the database
return annotation_tasks
def decide_if_needs_verification(json_res, mturk_db_connection):
"""
Makes the decision as to whether this request is going to be a verification video or not.
Let
a = the verification videos left
b = total number of videos left
The chance of getting a verification videos is a/b
This gives a uniform distribution of chance of getting a verification video across all requests.
Called by get_task().
:param json_res: JSON given by frontend's submit button; must have hitId key
:type json_res: dict
:param mturk_db_connection: connection to database containing mturk-related data
:type mturk_db_connection: sqlite3.Connection
:return boolean representing whether verification video will be returned
"""
print json_res
mturk_cur = mturk_db_connection.cursor()
try:
mturk_cur.execute('''SELECT verifications_total, labels_total,
verifications_completed, labels_completed FROM hits
WHERE hit_id=?''', (json_res['hitId'],))
except sqlite3.Error as e:
print_log_info(str(e))
query_result = mturk_cur.fetchone()
print_log_info(json_res['hitId'])
verifications_total, labels_total, verifications_completed, labels_completed = \
query_result["verifications_total"], query_result["labels_total"], \
query_result["verifications_completed"], query_result["labels_completed"]
chance_of_verification_video = (float(max(verifications_total - verifications_completed, 0))
/ max(verifications_total + labels_total
- verifications_completed - labels_completed, 1))
return chance_of_verification_video > random.random()
def get_verification_task(annotation_tasks, annotation_type):
"""
Wrapper for querying database for a verification task.
:param annotation_tasks: connection to database containing mturk-related data
:type annotation_tasks: sqlite3.Connection
:param annotation_type: client-defined string for the type of the annotations we're doing
:type annotation_type: string
:return dict from querying database
"""
db_cursor = annotation_tasks.cursor()
if annotation_type == 'name' or annotation_type == 'name_preview':
try:
# from https://stackoverflow.com/questions/4114940/select-random-rows-in-sqlite
db_cursor.execute('''SELECT * FROM video_db WHERE id IN
(SELECT id FROM named_verification_videos
ORDER BY RANDOM() LIMIT 1)''')
except sqlite3.Error as e:
print_log_info(str(e))
else:
db_cursor.execute('''SELECT * FROM video_db WHERE id IN
(SELECT id FROM trimmed_verification_videos
ORDER BY RANDOM() LIMIT 1)''')
return db_cursor.fetchone()
def task_completed(json_res, mturk_db_connection):
"""
Tells whether an mturk task has been completed
:param json_res: JSON given by frontend's submit button; must have hitId key
:type json_res: dict
:param mturk_db_connection: connection to database containing mturk-related data
:type mturk_db_connection: sqlite3.Connection
:return: boolean representing if task referred to in json_res' hitId has been completed
"""
mturk_cur = mturk_db_connection.cursor()
try:
mturk_cur.execute('''SELECT verifications_total, labels_total,
verifications_completed, labels_completed FROM hits
WHERE hit_id=?''', (json_res['hitId'],))
except sqlite3.Error as e:
print_log_info(str(e))
query_result = mturk_cur.fetchone()
verifications_total, labels_total, verifications_completed, labels_completed = \
query_result["verifications_total"], query_result["labels_total"], \
query_result["verifications_completed"], query_result["labels_completed"]
return verifications_total - verifications_completed <= 0 and labels_total - labels_completed <= 0
def get_next_available_task(annotation_tasks, annotation_type):
"""
Wrapper for querying database for a new labelling task.
Called by get_task().
:param annotation_tasks: connection to database containing mturk-related data
:type annotation_tasks: sqlite3.Connection
:param annotation_type: client-defined string for the type of the annotations we're doing
:type annotation_type: string
:return dict from querying database
"""
# get db cursor
db_cursor = annotation_tasks.cursor()
# Get the next task
if annotation_type == 'name':
try:
db_cursor.execute('''SELECT * FROM video_db WHERE named=0
AND name_locked=0
AND id not in
(SELECT id from named_verification_videos)
''') # LIMIT 1 maybe?
except sqlite3.Error as e:
print_log_info(str(e))
else: # So annotation_type == 'trim'
try:
db_cursor.execute('''SELECT * FROM video_db WHERE named=1
AND red_flag=0
AND trimmed=0
AND trim_locked=0
AND id not in
(SELECT id from trimmed_verification_videos)
''') # LIMIT 1 maybe?
except sqlite3.Error as e:
print_log_info(str(e))
item = db_cursor.fetchone()
# No task available
if item is None:
return None
# Otherwise return a task.
else:
task = item
cur_time = time.time()
# update the lock
if annotation_type == 'name':
try:
db_cursor.execute('''UPDATE video_db SET name_locked=1, name_lock_time=?
WHERE id=?''', (cur_time, task['id']))
except sqlite3.Error as e:
print_log_info(str(e))
else: # So annotation_type == 'trim'
try:
db_cursor.execute('''UPDATE video_db SET trim_locked=1, trim_lock_time=?
WHERE id=?''', (cur_time, task['id']))
except sqlite3.Error as e:
print_log_info(str(e))
annotation_tasks.commit()
return task
def update_task(mturk_db_connection, annotation_tasks, json_res, is_mturk):
"""
Updates the data for a labelling task plus relevant mturk variables if it's an mturk task.
:param mturk_db_connection: connection to database containing mturk-related data
:type mturk_db_connection: sqlite3.Connection
:param annotation_tasks: connection to database containing mturk-related data
:type annotation_tasks: sqlite3.Connection
:param json_res: JSON given by frontend's submit button; must have hitId key
:type json_res: dict
:param is_mturk: indicates if
:return dict from querying database
"""
# get db cursor
db_cursor = annotation_tasks.cursor()
mturk_cur = mturk_db_connection.cursor()
# get annotation_type and video id
ant_type = json_res['annotation_type']
# Update naming task
if ant_type == 'name':
try:
# Decide if video we are updating is a verification video
db_cursor.execute('''SELECT * FROM named_verification_videos where id=?''',
(json_res['id'],)) # todo find out if is good query
is_verification = not (db_cursor.fetchone() is None)
# Apply new label if it isn't a verification video
if not is_verification:
update_item = (int(json_res['occluded']),
json_res['nouns'], json_res['verb'],
json_res['user_name'], int(json_res['red_flag'])*1,
int(json_res['id']))
db_cursor.execute('''UPDATE video_db
SET named=1, name_locked=0, occluded=?,
action_noun=?, action_verb=?, named_by_user=?, red_flag=?
WHERE id=?''', update_item)
# Update MTurk database to reflect this change
if is_mturk and is_verification:
mturk_cur.execute('''UPDATE hits SET assignment_id=?, worker_id=?,
verifications_completed = verifications_completed + 1
WHERE hit_id=?''', (json_res['assignmentId'],
json_res['workerId'], json_res['hitId']))
mturk_cur.execute('''INSERT INTO name_verification_attempts(
hit_id, assignment_id, worker_id,
id, action_noun, action_verb)
VALUES (?,?,?,?,?,?)''', (json_res['hitId'],
json_res['assignmentId'], json_res['workerId'],
json_res['id'], json_res['nouns'], json_res['verb']))
mturk_db_connection.commit()
elif is_mturk and not is_verification:
print(json_res['assignmentId'],
json_res['workerId'], json_res['hitId'])
mturk_cur.execute('''UPDATE hits SET assignment_id=?, worker_id=?,
labels_completed = labels_completed + 1
WHERE hit_id=?''', (json_res['assignmentId'],
json_res['workerId'], json_res['hitId']))
mturk_db_connection.commit()
annotation_tasks.commit()
except sqlite3.Error as e:
print_log_info(str(e))
return False
else: # ie. it's a trimming task
try:
# Decide if video we are updating is a verification video
db_cursor.execute('''SELECT * FROM trimmed_verification_videos where id=?''',
(json_res['id'],)) # todo find out if is good query
is_verification = not (db_cursor.fetchone() is None)
# Apply new label if it isn't a verification video
if not is_verification:
update_item = (float(json_res['start_time']),
float(json_res['end_time']),
json_res['user_name'], int(json_res['red_flag'])*2,
int(json_res['id']))
db_cursor.execute('''UPDATE video_db
SET trimmed=1, trim_locked=0,
start_time=?, end_time=?, trimmed_by_user=?, red_flag=?
WHERE id=?''', update_item)
# Update MTurk database to reflect this change
if is_mturk and is_verification:
mturk_cur.execute('''UPDATE hits SET assignment_id=?, worker_id=?,
verifications_completed = verifications_completed + 1
WHERE hit_id=?''', (json_res['assignmentId'],
json_res['workerId'], json_res['hitId']))
mturk_cur.execute('''INSERT INTO trim_verification_attempts(
hit_id, assignment_id, worker_id,
id, start_time, end_time)
VALUES (?,?,?,?,?,?)''', (json_res['hitId'],
json_res['assignmentId'], json_res['workerId'],
json_res['id'], float(json_res['start_time']),
float(json_res['end_time'])))
mturk_db_connection.commit()
elif is_mturk and not is_verification:
print(json_res['assignmentId'],
json_res['workerId'], json_res['hitId'])
mturk_cur.execute('''UPDATE hits SET assignment_id=?, worker_id=?,
labels_completed = labels_completed + 1
WHERE hit_id=?''', (json_res['assignmentId'],
json_res['workerId'], json_res['hitId']))
mturk_db_connection.commit()
# TODO update mturk stuff
annotation_tasks.commit()
except sqlite3.Error as e:
print_log_info(str(e))
return False
# color print the red flag
if json_res['red_flag']:
print_log_info('\033[93m' + "Task ID ({:d}) Type ({:s}) has been RED_FLAGGED!".format(
json_res['id'], ant_type) + '\033[0m')
# return
return True
@app.errorhandler(404)
def not_found(error):
"""
Default error handler for 404
"""
return flask.make_response(json.dumps({'error': str(error)}), 404)
@app.route('/get_task', methods=['POST'])
def get_task():
"""
Get a task from the server
A request is a json file with the following fields:
- "annotation_type" which can have the values...
- name
- name_preview
- trim
- trim_preview
- "user_name"
If it is a request from an MTurk iFrame, it also has the following:
- "workerId"
- "hitId"
"""
# Dict holds the results to return to client
ret = {}
# Make sure the content type is json
try:
request_type = flask.request.headers.get('Content-Type')
if request_type != 'application/json':
raise ValueError('request type must be JSON')
request_data = flask.request.get_data()
except ValueError as err:
ret['code'] = -1
ret['error_msg'] = str(err)
return json.dumps(ret)
except:
ret['code'] = -2
ret['error_msg'] = 'unknown parameter error'
return json.dumps(ret)
# Decode json from request data into a dict, and make sure all required data is present
try:
json_file = json.JSONDecoder().decode(request_data)
print_log_info("Task request: {:s}".format(json_file))
is_mturk = "assignmentId" in json_file and "workerId" in json_file and \
"hitId" in json_file
if 'annotation_type' not in json_file:
raise ValueError('annotation_type missing in request')
else:
# more sanity check
ant_type = json_file['annotation_type']
if not ((ant_type == 'name') or (ant_type == 'trim')
or (ant_type == 'name_preview') or (ant_type == 'trim_preview')):
raise ValueError('unknown annotation_type')
except ValueError as err:
ret['code'] = -3
ret['error_msg'] = str(err)
return json.dumps(ret)
# Decide if we need a verification task
if ant_type == 'name_preview' or ant_type == 'trim_preview':
needs_verification_task = True
elif ((ant_type == 'name'
or ant_type == 'trim')
and is_mturk):
needs_verification_task = \
decide_if_needs_verification(json_file, app.mturk_db_connection)
else:
needs_verification_task = False
# Get a verification task or next available task, and return to user
try:
if needs_verification_task:
task = get_verification_task(app.annotation_tasks, ant_type)
else:
task = get_next_available_task(app.annotation_tasks, ant_type)
if not task:
raise ValueError('can not get a valid task. please re-try.')
else:
ret = task
except ValueError as err:
ret['code'] = -1
ret['error_msg'] = str(err)
return json.dumps(ret)
return json.dumps(ret)
@app.route('/return_task', methods=['POST'])
def return_task():
"""
Processes the JSON sent from the client to submit a label
JSON has the following fields:
- id, which is the video ID
- annotation_type, which can be "name" or "trim"
- user_name
If the request is coming from an mturk iFrame, it should have:
- assignmentId
- workerId
- hitId
If annotation_type is "name", it should have the following:
- verb, a string representing the word selected from the dropdown menu
- occluded, a boolean from the checkbox in the page
- nouns, a string filled out by the user for the objects being handled
TODO figure out the trim stuff
"""
# Dict holds the results to return to client
ret = {}
try:
# make sure the content type is json
request_type = flask.request.headers.get('Content-Type')
if request_type != 'application/json':
raise ValueError('request type must be JSON')
request_data = flask.request.get_data()
except ValueError as err:
ret['code'] = -1
ret['error_msg'] = str(err)
return json.dumps(ret)
except:
ret['code'] = -2
ret['error_msg'] = 'unknown parameter error'
return json.dumps(ret)
# decode json from request data into a dict
try:
json_file = json.JSONDecoder().decode(request_data)
print_log_info("Task returned: {:s}".format(json_file))
if 'annotation_type' not in json_file:
raise ValueError('annotation_type missing in request')
if 'id' not in json_file:
raise ValueError('id missing in request')
else:
# more sanity check
ant_type = json_file['annotation_type']
if not ((ant_type == 'name') or (ant_type == 'trim')):
raise ValueError('unknown annotation_type')
except ValueError as err:
ret['code'] = -3
ret['error_msg'] = str(err)
return json.dumps(ret)
is_mturk = "assignmentId" in json_file and "workerId" in json_file and \
"hitId" in json_file
# Get next available task
try:
flag = update_task(app.mturk_db_connection, app.annotation_tasks, json_file, is_mturk)
if not flag:
raise ValueError('can not update the task. Please re-try.')
else:
ret['code'] = 0
ret['error_msg'] = 'success'
except ValueError as err:
ret['code'] = -3
ret['error_msg'] = str(err)
return json.dumps(ret)
more_to_complete = not is_mturk or \
not task_completed(json_file, app.mturk_db_connection)
if not more_to_complete:
try:
mturk_db_connection = app.mturk_db_connection
mturk_cur = mturk_db_connection.cursor()
mturk_cur.execute('''UPDATE hits SET status='pending_approval' WHERE assignment_id=?''',
(json_file["assignmentId"],))
mturk_db_connection.commit()
except sqlite3.Error as err:
ret['code'] = -3
ret['error_msg'] = str(err)
return json.dumps(ret)
ret['more_to_complete'] = more_to_complete
return json.dumps(ret)
@app.route('/hello')
def hello():
return 'hello world'
def parse_args():
"""
Parse input arguments
"""
parser = argparse.ArgumentParser(description='Setup a web server for video annotation')
parser.add_argument('--port', dest='port',
help='which port to serve content on',
default=5050, type=int)
parser.add_argument('--video_db', dest='video_db',
help='SQLite3 database with normal videos',
default='video_db.db', type=str)
parser.add_argument('--mturk_db', dest='mturk_db',
help='SQLite3 database with logs for mturk',
default='mturk_db.db', type=str)
parser.add_argument('--sandbox', dest='sandbox',
help='If this is a sandbox HIT (otherwise is a real one)',
default=False, action='store_true')
parser.add_argument('--aws_key_id', dest='aws_access_key_id',
help='AWS Access Key ID',
default='', type=str)
parser.add_argument('--aws_key', dest='aws_secret_access_key',
help='AWS Secret Access Key',
default='', type=str)
parser.add_argument('--certfile', dest='certfile',
help='SSL certfile location',
default='', type=str)
parser.add_argument('--keyfile', dest='keyfile',
help='SSL keyfile location',
default='', type=str)
args = parser.parse_args()
return args
def start_from_terminal():
"""
entry of the main function
"""
# parse params
args = parse_args()
# load annotation tasks
app.annotation_tasks = load_annotation_tasks(args.video_db)
app.mturk_db_connection = load_annotation_tasks(args.mturk_db)
# Set global variables
app.aws_access_key_id = args.aws_access_key_id
app.aws_secret_access_key = args.aws_secret_access_key
app.sandbox = args.sandbox
# start server without cert if none provided
if args.certfile == '' and args.keyfile == '':
server = tornado.httpserver.HTTPServer(tornado.wsgi.WSGIContainer(app))
else:
server = tornado.httpserver.HTTPServer(tornado.wsgi.WSGIContainer(app), ssl_options={
"certfile": args.certfile,
"keyfile": args.keyfile,
})
server.bind(args.port)
# setup exist function
def save_db():
app.annotation_tasks.close()
app.mturk_db_connection.close()
import atexit
atexit.register(save_db)
# set up one server
server.start(1)
print_log_info("Tornado server starting on port {}".format(args.port))
# show stats every time we launch the service
collect_db_stats()
approve_assignments()
tornado.ioloop.PeriodicCallback(expire_locked_items, 20*1000).start()
tornado.ioloop.PeriodicCallback(collect_db_stats, 3600*1000).start()
tornado.ioloop.PeriodicCallback(approve_assignments, 20*1000).start()
tornado.ioloop.IOLoop.current().start()
if __name__ == '__main__':
start_from_terminal() | {
"repo_name": "happyharrycn/vatic_fpv",
"path": "simple_vatic/server/web_app.py",
"copies": "1",
"size": "32608",
"license": "mit",
"hash": 3825575773130755600,
"line_mean": 39.7103620474,
"line_max": 130,
"alpha_frac": 0.564892051,
"autogenerated": false,
"ratio": 4.161306789178152,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.002543369206185058,
"num_lines": 801
} |
"""A simple web server that accepts POSTS containing a list of feed urls,
and returns the titles of those feeds.
"""
import eventlet
feedparser = eventlet.import_patched('feedparser')
# the pool provides a safety limit on our concurrency
pool = eventlet.GreenPool()
def fetch_title(url):
d = feedparser.parse(url)
return d.feed.get('title', '')
def app(environ, start_response):
if environ['REQUEST_METHOD'] != 'POST':
start_response('403 Forbidden', [])
return []
# the pile collects the result of a concurrent operation -- in this case,
# the collection of feed titles
pile = eventlet.GreenPile(pool)
for line in environ['wsgi.input'].readlines():
url = line.strip()
if url:
pile.spawn(fetch_title, url)
# since the pile is an iterator over the results,
# you can use it in all sorts of great Pythonic ways
titles = '\n'.join(pile)
start_response('200 OK', [('Content-type', 'text/plain')])
return [titles]
if __name__ == '__main__':
from eventlet import wsgi
wsgi.server(eventlet.listen(('localhost', 9010)), app)
| {
"repo_name": "cloudera/hue",
"path": "desktop/core/ext-py/eventlet-0.24.1/examples/feedscraper.py",
"copies": "10",
"size": "1125",
"license": "apache-2.0",
"hash": -5100363753745846000,
"line_mean": 29.4054054054,
"line_max": 77,
"alpha_frac": 0.656,
"autogenerated": false,
"ratio": 3.800675675675676,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 37
} |
"""A simple web server that accepts POSTS containing a list of feed urls,
and returns the titles of those feeds.
"""
import evy
feedparser = evy.import_patched('feedparser')
# the pool provides a safety limit on our concurrency
pool = evy.GreenPool()
def fetch_title (url):
d = feedparser.parse(url)
return d.feed.get('title', '')
def app (environ, start_response):
if environ['REQUEST_METHOD'] != 'POST':
start_response('403 Forbidden', [])
return []
# the pile collects the result of a concurrent operation -- in this case,
# the collection of feed titles
pile = evy.GreenPile(pool)
for line in environ['wsgi.input'].readlines():
url = line.strip()
if url:
pile.spawn(fetch_title, url)
# since the pile is an iterator over the results,
# you can use it in all sorts of great Pythonic ways
titles = '\n'.join(pile)
start_response('200 OK', [('Content-type', 'text/plain')])
return [titles]
if __name__ == '__main__':
from evy import wsgi
wsgi.server(evy.listen(('localhost', 9010)), app)
| {
"repo_name": "inercia/evy",
"path": "examples/feedscraper.py",
"copies": "1",
"size": "1102",
"license": "mit",
"hash": 4288047885381993500,
"line_mean": 28,
"line_max": 77,
"alpha_frac": 0.6424682396,
"autogenerated": false,
"ratio": 3.6490066225165565,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9776273816023061,
"avg_score": 0.003040209218698921,
"num_lines": 38
} |
#a simple web server to replace the buggy one in python's standard library
import os, os.path, sys, time, socket, traceback, stat
from math import *
INDEXFILE = "/bluenoise6.html"
PORT = 8080
mimetypes = {
".js" : "application/x-javascript",
".html" : "text/html",
".json" : "application/x-javascript",
".glsl" : "text/plain",
".png" : "image/png",
".jpg" : "image/jpeg",
".obj" : "text/plain"
};
def get_mime(path):
path = path.strip().lower()
for k in mimetypes:
if path.endswith(k):
return mimetypes[k]
return "application/x-octet-stream"
class SocketFile:
def __init__(self, con):
self.sock = con
self.writebuf = b""
self.readbuf = b""
con.setblocking(False)
def __next__(self):
bsize = 2048
wsize = 1024*8
try:
buf = self.sock.recv(2048)
self.readbuf += buf
except BlockingIOError:
pass
try:
buf = self.writebuf
if len(buf) > wsize:
buf = buf[:wsize]
self.sock.send(buf)
self.writebuf = self.writebuf[len(buf):]
except BlockingIOError:
pass
def write(self, buf):
self.writebuf += buf
def read(self, max=2048):
buf = self.readbuf
if len(buf) > max:
buf = buf[:max]
self.readbuf = self.readbuf[max:]
else:
self.readbuf = b""
return buf
def Connection(con, addr, cls):
con.setblocking(False)
file = SocketFile(con)
while 1:
sbuf = b""
yield
while 1:
file.__next__()
buf = file.read()
yield 1
if (len(buf) == 0): continue;
sbuf += buf
if b"\r\n\r\n" in sbuf:
break;
lines = [l.strip() for l in sbuf.split(b"\r\n")]
method = lines[0];
headers = {}
path = method[method.find(b" ")+1:method.find(b" HTTP")].strip()
method = method[:method.find(b" ")]
print(str(method + b" " + path, "latin-1"))
for l in lines[1:]:
key = l[:l.find(b":")].strip()
val = l[l.find(b":")+1:].strip()
#print(key, val)
headers[str(key, "latin-1")] = str(val, "latin-1")
h = cls()
h.path = str(path, "latin-1")
h.method = str(method, "latin-1")
h.headers = headers
h.rfile = file
h.wfile = file
getattr(h, "do_"+str(method, "latin-1").strip())()
#print("\n")
yield
class Server:
def __init__(self, addr, cls):
self.connections = []
self.addr = addr
self.cls = cls
self.socket = socket.socket()
def start(self):
self.socket.bind(self.addr)
self.socket.listen(10)
sock = self.socket
sock.setblocking(False)
while 1:
dellist = []
try:
ret = sock.accept()
#print(ret[1])
con = Connection(ret[0], ret[1], self.cls)
self.connections.append(con)
except BlockingIOError:
pass
for con in self.connections:
try:
for i in range(45):
con.__next__()
except StopIteration:
print(" connection closed")
dellist.append(con)
except:
traceback.print_exc()
dellist.append(con);
for con in dellist:
self.connections.remove(con)
time.sleep(1.0/420.0)
pass
def bs(s):
if type(s) == bytes:
return s
return bytes(str(s), "latin-1")
class Handler:
def __init__(self):
self.path = ""
self.headers = {}
self.wfile = None
self.rfile = None
self.send_headers = []
self.body = b""
self.code = 200
def send_response(self, code):
self.body = bs(self.body)
buf = b"HTTP/1.1 " + bs(code) + b" None\r\n"
had_content = False
headers = [
[b"Connection", b"keep-alive"]
] + self.send_headers
for h in headers:
if h[0] == b"Content-length":
had_content = True
buf += bs(h[0]) + b":" + b" " + bs(h[1]) + b"\r\n"
if not had_content:
buf += b"Content-length: " + bs(len(self.body)) + b"\r\n"
buf += b"\r\n"
buf += self.body
self.wfile.write(buf)
def add_header(self, key, val):
self.send_headers.append([bs(key), bs(val)])
def set_body(self, body):
self.body = body
def send_error(self, error):
body = b"Error: " + bs(error)
self.add_header("MimeType", "text/plain")
self.set_body(body)
self.send_response(error)
def do_GET(self):
path = self.path.strip()
dir = os.getcwd()
if path == "/" or path == "":
path = INDEXFILE
abspath = os.path.abspath(os.path.normpath(dir+os.path.sep+path))
if not abspath.startswith(dir):
self.send_error(404)
return
if not os.path.exists(abspath):
self.send_error(404)
return
st = os.stat(abspath)
if stat.S_ISDIR(st.st_mode):
self.send_error(405)
return
file = open(abspath, "rb")
buf = file.read()
file.close()
self.set_body(buf)
self.add_header("MimeType", get_mime(path))
self.send_response(200)
server = Server(("", PORT), Handler)
print("serving at port", PORT)
server.start()
| {
"repo_name": "joeedh/BlueNoiseStippling",
"path": "serv.py",
"copies": "1",
"size": "5248",
"license": "mit",
"hash": -6771048206116360000,
"line_mean": 20.3333333333,
"line_max": 74,
"alpha_frac": 0.5322027439,
"autogenerated": false,
"ratio": 3.265712507778469,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8974656940680281,
"avg_score": 0.0646516621996376,
"num_lines": 246
} |
#a simple web server to replace the buggy one in python's standard library
import os, os.path, sys, time, socket, traceback, stat
from math import *
INDEXFILE = "/main.html"
PORT = 8079
mimetypes = {
".js" : "application/x-javascript",
".html" : "text/html",
".json" : "application/x-javascript",
".glsl" : "text/plain",
".png" : "image/png",
".jpg" : "image/jpeg",
".obj" : "text/plain"
};
def get_mime(path):
path = path.strip().lower()
for k in mimetypes:
if path.endswith(k):
return mimetypes[k]
return "application/x-octet-stream"
class SocketFile:
def __init__(self, con):
self.sock = con
self.writebuf = b""
self.readbuf = b""
con.setblocking(False)
def __next__(self):
bsize = 2048
wsize = 1024*8
try:
buf = self.sock.recv(2048)
self.readbuf += buf
except BlockingIOError:
pass
try:
buf = self.writebuf
if len(buf) > wsize:
buf = buf[:wsize]
self.sock.send(buf)
self.writebuf = self.writebuf[len(buf):]
except BlockingIOError:
pass
def write(self, buf):
self.writebuf += buf
def read(self, max=2048):
buf = self.readbuf
if len(buf) > max:
buf = buf[:max]
self.readbuf = self.readbuf[max:]
else:
self.readbuf = b""
return buf
def Connection(con, addr, cls):
con.setblocking(False)
file = SocketFile(con)
while 1:
sbuf = b""
yield
while 1:
file.__next__()
buf = file.read()
yield 1
if (len(buf) == 0): continue;
sbuf += buf
if b"\r\n\r\n" in sbuf:
break;
lines = [l.strip() for l in sbuf.split(b"\r\n")]
method = lines[0];
headers = {}
path = method[method.find(b" ")+1:method.find(b" HTTP")].strip()
method = method[:method.find(b" ")]
print(str(method + b" " + path, "latin-1"))
for l in lines[1:]:
key = l[:l.find(b":")].strip()
val = l[l.find(b":")+1:].strip()
#print(key, val)
headers[str(key, "latin-1")] = str(val, "latin-1")
h = cls()
h.path = str(path, "latin-1")
h.method = str(method, "latin-1")
h.headers = headers
h.rfile = file
h.wfile = file
getattr(h, "do_"+str(method, "latin-1").strip())()
#print("\n")
yield
class Server:
def __init__(self, addr, cls):
self.connections = []
self.addr = addr
self.cls = cls
self.socket = socket.socket()
def start(self):
self.socket.bind(self.addr)
self.socket.listen(10)
sock = self.socket
sock.setblocking(False)
while 1:
dellist = []
try:
ret = sock.accept()
#print(ret[1])
con = Connection(ret[0], ret[1], self.cls)
self.connections.append(con)
except BlockingIOError:
pass
for con in self.connections:
try:
for i in range(5):
con.__next__()
except StopIteration:
print(" connection closed")
dellist.append(con)
except:
traceback.print_exc()
dellist.append(con);
for con in dellist:
self.connections.remove(con)
time.sleep(1.0/420.0)
pass
def bs(s):
if type(s) == bytes:
return s
return bytes(str(s), "latin-1")
class Handler:
def __init__(self):
self.path = ""
self.headers = {}
self.wfile = None
self.rfile = None
self.send_headers = []
self.body = b""
self.code = 200
def send_response(self, code):
self.body = bs(self.body)
buf = b"HTTP/1.1 " + bs(code) + b" None\r\n"
had_content = False
headers = [
[b"Connection", b"keep-alive"]
] + self.send_headers
for h in headers:
if h[0] == b"Content-length":
had_content = True
buf += bs(h[0]) + b":" + b" " + bs(h[1]) + b"\r\n"
if not had_content:
buf += b"Content-length: " + bs(len(self.body)) + b"\r\n"
buf += b"\r\n"
buf += self.body
self.wfile.write(buf)
def add_header(self, key, val):
self.send_headers.append([bs(key), bs(val)])
def set_body(self, body):
self.body = body
def send_error(self, error):
body = b"Error: " + bs(error)
self.add_header("MimeType", "text/plain")
self.set_body(body)
self.send_response(error)
def do_GET(self):
path = self.path.strip()
dir = os.getcwd()
if path == "/" or path == "":
path = INDEXFILE
abspath = os.path.abspath(os.path.normpath(dir+os.path.sep+path))
if not abspath.startswith(dir):
self.send_error(404)
return
if not os.path.exists(abspath):
self.send_error(404)
return
st = os.stat(abspath)
if stat.S_ISDIR(st.st_mode):
self.send_error(405)
return
file = open(abspath, "rb")
buf = file.read()
file.close()
self.set_body(buf)
self.add_header("MimeType", get_mime(path))
self.send_response(200)
server = Server(("", PORT), Handler)
print("serving at port", PORT)
server.start()
| {
"repo_name": "joeedh/small-loader.es6",
"path": "serv.py",
"copies": "1",
"size": "5241",
"license": "mit",
"hash": 7091665429418229000,
"line_mean": 20.3048780488,
"line_max": 74,
"alpha_frac": 0.5315779431,
"autogenerated": false,
"ratio": 3.269494697442296,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.43010726405422955,
"avg_score": null,
"num_lines": null
} |
"""A simple web spider implementation."""
__all__ = [
'Document',
'Spider',
'Parser',
]
import functools
import logging
import typing
from garage.collections import NamedTuple
from garage.http import clients
from garage.threads import queues
from garage.threads import supervisors
from garage.threads import tasklets
from garage.threads import utils
LOG = logging.getLogger(__name__)
# Your parser may return a compatible class of this (duck typing).
class Document(NamedTuple):
# The unique identity of this document (multiple URIs may point to
# the same document).
identity: object
# Return HTTP requests and an estimate numbers of further links from
# that document (estimates could be None).
links: typing.Tuple[typing.Tuple[str, object], ...]
class Parser:
"""Application-specific business logics."""
def is_outside(self, uri):
"""True if the URI is outside the boundary of this spider."""
return False # A boundary-less web.
def parse(self, request, response):
"""Parse response and return a document object."""
raise NotImplementedError
def on_request_error(self, request, error):
"""Callback on HTTP request error.
Return False to suppress exception.
"""
return True
def on_parse_error(self, request, response, error):
"""Callback on error during parse().
Return False to suppress exception.
"""
return True
def on_document(self, document):
"""Callback to further process the document."""
def on_document_error(self, request, document, error):
"""Callback on error during on_document().
Return False to suppress exception.
"""
return True
def on_estimate(self, estimate, document):
"""Callback to assess accuracy of the estimations."""
def on_estimate_error(self, request, estimate, document, error):
"""Callback on error during on_estimate().
Return False to suppress exception.
"""
return True
class Spider:
def __init__(self, *,
parser,
num_spiders=1,
client=None):
self._parser = parser
self._client = client or clients.Client()
self._task_queue = tasklets.TaskQueue(queues.PriorityQueue())
# XXX: Use a cache for these two sets?
self._uris = utils.AtomicSet()
self._identities = utils.AtomicSet()
self.num_spiders = num_spiders
self.future = None
def start(self):
"""Start crawling the web.
We don't start crawling right after the spider is initialized
due to a task queue's design limitation that you should not put
new tasks into it after tasklets are started (the queue may have
been closed already). I'm not saying you can't, but you might
encounter an queues.Closed error.
"""
supervisor = supervisors.supervisor(
self.num_spiders,
functools.partial(tasklets.tasklet, self._task_queue),
)
# Use this future to wait for completion of the crawling
self.future = supervisor._get_future()
def stop(self, graceful=True):
items = self._task_queue.close(graceful)
LOG.info('stop spider; drop %d tasks', len(items))
def crawl(self, request, estimate=None):
"""Enqueue a request for later processing."""
if isinstance(request, str):
request = clients.Request(method='GET', uri=request)
if self._parser.is_outside(request.uri):
LOG.debug('exclude URI to the outside: %s', request.uri)
return
if request.method == 'GET' and self._uris.check_and_add(request.uri):
LOG.debug('exclude crawled URI: %s', request.uri)
return
try:
LOG.debug('enqueue %r', request)
self._task_queue.put(Task(self, request, estimate))
except queues.Closed:
LOG.error('task_queue is closed when adding %s', request.uri)
# Called by Task.
def process(self, request, estimate):
LOG.debug('request %s %s', request.method, request.uri)
try:
response = self._client.send(request)
except clients.HttpError as exc:
LOG.warning('cannot request %s %s', request.method, request.uri)
if self._parser.on_request_error(request, exc):
raise
return # Cannot proceed; return now.
try:
document = self._parser.parse(request, response)
except Exception as exc:
LOG.exception('cannot parse %s %s', request.method, request.uri)
if self._parser.on_parse_error(request, response, exc):
raise
return # Cannot proceed; return now.
if document is None:
LOG.debug('cannot parse %s %s', request.method, request.uri)
return
if self._identities.check_and_add(document.identity):
LOG.debug(
'exclude URIs from crawled document: %s',
document.identity,
)
return
for req_from_doc, estimate in document.links:
self.crawl(req_from_doc, estimate)
try:
self._parser.on_document(document)
except Exception as exc:
LOG.exception(
'cannot handle document %s %s', request.method, request.uri)
if self._parser.on_document_error(request, document, exc):
raise
try:
self._parser.on_estimate(estimate, document)
except Exception as exc:
LOG.exception(
'cannot estimate document %s %s', request.method, request.uri)
if self._parser.on_estimate_error(
request, estimate, document, exc):
raise
class Task:
def __init__(self, spider, request, estimate):
if estimate is None:
self.priority = utils.Priority.LOWEST
else:
self.priority = utils.Priority(estimate)
self.spider = spider
self.request = request
self.estimate = estimate
def __lt__(self, other):
return self.priority < other.priority
def __call__(self):
self.spider.process(self.request, self.estimate)
| {
"repo_name": "clchiou/garage",
"path": "py/garage/garage/spiders.py",
"copies": "1",
"size": "6380",
"license": "mit",
"hash": 1405430948359090200,
"line_mean": 30.4285714286,
"line_max": 78,
"alpha_frac": 0.6040752351,
"autogenerated": false,
"ratio": 4.36986301369863,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 203
} |
"""A Simple Wildfire-inspired MDP model. Version 1.0"""
import random, math, numpy
def simulate(timesteps, policy=[0,0,0], random_seed=0, model_parameters={}, SILENT=False, PROBABILISTIC_CHOICES=True):
random.seed(random_seed)
constant_reward = 2
if "Constant Reward" in model_parameters.keys(): constant_reward = model_parameters["Constant Reward"]
#range of the randomly drawn, uniformally distributed "event"
#this is the only so-called state "feature" in this MDP and
#is comparable to a wildfire
event_max = 1.0
event_min = 0.0
timesteps = int(timesteps)
#cost of suppression in a mild event
supp_cost_mild = 1
if "Suppression Cost - Mild Event" in model_parameters.keys(): supp_cost_mild = model_parameters["Suppression Cost - Mild Event"]
#cost of suppresion in a severe event
supp_cost_severe = 4
if "Suppression Cost - Severe Event" in model_parameters.keys(): supp_cost_severe = model_parameters["Suppression Cost - Severe Event"]
#cost of a severe fire on the next timestep
burn_cost = 12
if "Severe Burn Cost" in model_parameters.keys(): burn_cost = model_parameters["Severe Burn Cost"]
threshold_suppression = 0.8
threshold_mild = 0.85
threshold_severe = 0.95
if "Threshold After Suppression" in model_parameters.keys(): threshold_suppression = model_parameters["Threshold After Suppression"]
if "Threshold After Mild Event" in model_parameters.keys(): threshold_mild = model_parameters["Threshold After Mild Event"]
if "Threshold After Severe Event" in model_parameters.keys(): threshold_severe = model_parameters["Threshold After Severe Event"]
if "Probabilistic Choices" in model_parameters.keys():
if model_parameters["Probabilistic Choices"] == "True":
PROBABILISTIC_CHOICES = True
else:
PROBABILISTIC_CHOICES = False
#setting 'enums'
POST_SUPPRESSION = 0
POST_MILD = 1
POST_SEVERE = 2
MILD=0
SEVERE=1
#starting simulations
states = [None] * timesteps
#start current condition randomly among the three states
current_condition = random.randint(0,2)
for i in range(timesteps):
#event value is the single "feature" of events in this MDP
ev = random.uniform(event_min, event_max)
#severity is meant to be a hidden, "black box" variable inside the MDP
# and not available to the logistic function as a parameter
severity = MILD
if current_condition == POST_SUPPRESSION:
if ev >= threshold_suppression:
severity = SEVERE
elif current_condition == POST_MILD:
if ev >= threshold_mild:
severity = SEVERE
elif current_condition == POST_SUPPRESSION:
if ev >= threshold_severe:
severity = SEVERE
#logistic function for the policy choice
#policy_crossproduct = policy[0] + policy[1]*ev
#modified logistic policy function
# CONSTANT COEFFICIENT SHIFT
policy_crossproduct = policy[0] + ( policy[1] * (ev + policy[2]) )
if policy_crossproduct > 100: policy_crossproduct = 100
if policy_crossproduct < -100: policy_crossproduct = -100
policy_value = 1.0 / (1.0 + math.exp(-1*(policy_crossproduct)))
choice_roll = random.uniform(0,1)
#assume let-burn
choice = False
choice_prob = 1.0 - policy_value
#check for suppress, and update values if necessary
if PROBABILISTIC_CHOICES:
if choice_roll < policy_value:
choice = True
choice_prob = policy_value
else:
if policy_value >= 0.5:
choice = True
choice_prob = policy_value
### CALCULATE REWARD ###
supp_cost = 0
if choice:
#suppression was chosen
if severity == MILD:
supp_cost = supp_cost_mild
elif severity == SEVERE:
supp_cost = supp_cost_severe
burn_penalty = 0
if current_condition == POST_SEVERE:
burn_penalty = burn_cost
current_reward = constant_reward - supp_cost - burn_penalty
states[i] = [ev, choice, choice_prob, policy_value, current_reward, i]
### TRANSITION ###
if not choice:
#no suppression
if severity == SEVERE:
current_condition = POST_SEVERE
elif severity == MILD:
current_condition = POST_MILD
else:
#suppression
current_condition = POST_SUPPRESSION
#finished simulations, report some values
vals = []
suppressions = 0.0
joint_prob = 1.0
prob_sum = 0.0
for i in range(timesteps):
if states[i][1]: suppressions += 1
joint_prob *= states[i][2]
prob_sum += states[i][2]
vals.append(states[i][4])
ave_prob = prob_sum / timesteps
summary = {
"Average State Value": round(numpy.mean(vals),1),
"Total Pathway Value": round(numpy.sum(vals),0),
"STD State Value": round(numpy.std(vals),1),
"Suppressions": suppressions,
"Suppression Rate": round((float(suppressions)/timesteps),2),
"Joint Probability": joint_prob,
"Average Probability": round(ave_prob, 3),
"ID Number": random_seed,
"Timesteps": timesteps,
"Generation Policy": policy,
"SWIMM Version": "1.0",
"Threshold After Suppression": threshold_suppression,
"Threshold After Mild": threshold_mild,
"Threshold After Severe": threshold_severe,
"Suppression Cost - Mild": supp_cost_mild,
"Suppression Cost - Severe": supp_cost_severe,
"Severe Burn Cost": burn_cost
}
if not SILENT:
print("")
print("Simulation Complete - Pathway " + str(random_seed))
print("Average State Value: " + str(round(numpy.mean(vals),1)) + " STD: " + str(round(numpy.std(vals),1)))
print("Suppressions: " + str(suppressions))
print("Suppression Rate: " + str(round((float(suppressions)/timesteps),2)))
print("Joint Probability:" + str(joint_prob))
print("Average Probability: " + str(round(ave_prob, 3)))
print("")
summary["States"] = states
return summary
def simulate_all_policies(timesteps=10000, start_seed=0):
result_CT = simulate(timesteps, policy=[ 0, 0, 0.0], random_seed=start_seed, SILENT=True)
result_LB = simulate(timesteps, policy=[-20, 0, 0.0], random_seed=start_seed, SILENT=True)
result_SA = simulate(timesteps, policy=[ 20, 0, 0.0], random_seed=start_seed, SILENT=True)
result_KNOWN = simulate(timesteps, policy=[ 0, 20,-0.8], random_seed=start_seed, SILENT=True)
result_CT["Name"] = "Coin-Toss: "
result_SA["Name"] = "Suppress-All: "
result_LB["Name"] = "Let-burn: "
result_KNOWN["Name"] = "Known: "
results = [result_CT, result_SA, result_LB, result_KNOWN]
print("Policy Ave STD SupRate AveProb JointProb")
for r in results:
print(r["Name"] + " "),
print(str(r["Average State Value"]) + " "),
print(str(r["STD State Value"]) + " "),
print(str(r["Suppression Rate"]) + " "),
print(str(r["Average Probability"]) + " "),
print(str(r["Joint Probability"]))
| {
"repo_name": "buckinha/gravity",
"path": "SWIMM.py",
"copies": "1",
"size": "7661",
"license": "mpl-2.0",
"hash": 243023378303836450,
"line_mean": 34.6325581395,
"line_max": 139,
"alpha_frac": 0.5914371492,
"autogenerated": false,
"ratio": 3.78320987654321,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9795888644711956,
"avg_score": 0.015751676206250623,
"num_lines": 215
} |
"""A Simple Wildfire-inspired MDP model. Version 1.1"""
import random, math, numpy
def simulate(timesteps, policy=[0,0,0], random_seed=0, model_parameters={}, SILENT=False, PROBABILISTIC_CHOICES=True):
random.seed(random_seed)
#range of the randomly drawn, uniformally distributed "event" that corresponds to fire severity
event_max = 1.0
event_min = 0.0
timesteps = int(timesteps)
#sanitize policy
policy = sanitize_policy(policy)
#REWARD STRUCTURE
constant_reward = 5
if "Constant Reward" in model_parameters.keys(): constant_reward = model_parameters["Constant Reward"]
#cost of suppression in a mild event
supp_cost_mild = 15
if "Suppression Cost - Mild Event" in model_parameters.keys(): supp_cost_mild = model_parameters["Suppression Cost - Mild Event"]
#cost of suppresion in a severe event
supp_cost_severe = 30
if "Suppression Cost - Severe Event" in model_parameters.keys(): supp_cost_severe = model_parameters["Suppression Cost - Severe Event"]
#cost of a severe fire on the next timestep
burn_cost = 50
if "Severe Burn Cost" in model_parameters.keys(): burn_cost = model_parameters["Severe Burn Cost"]
condition_change_after_suppression = -0.01
condition_change_after_mild = 0.02
condition_change_after_severe = 0.00
if "Condition Change After Suppression" in model_parameters.keys(): condition_change_after_suppression = model_parameters["Condition Change After Suppression"]
if "Condition Change After Mild" in model_parameters.keys(): condition_change_after_mild = model_parameters["Condition Change After Mild"]
if "Condition Change After Severe" in model_parameters.keys(): condition_change_after_severe = model_parameters["Condition Change After Severe"]
if "Probabilistic Choices" in model_parameters.keys():
if model_parameters["Probabilistic Choices"] == "True":
PROBABILISTIC_CHOICES = True
else:
PROBABILISTIC_CHOICES = False
#starting_condition = 0.8
starting_condition = random.uniform(0.2,0.8)
if "Starting Condition" in model_parameters.keys(): starting_condition = model_parameters["Starting Condition"]
#setting 'enums'
MILD=0
SEVERE=1
#starting simulations
states = [None] * timesteps
#start current condition randomly among the three states
current_condition = starting_condition
for i in range(timesteps):
#event value is the single "feature" of events in this MDP
ev = random.uniform(event_min, event_max)
#severity is meant to be a hidden, "black box" variable inside the MDP
# and not available to the logistic function as a parameter
severity = MILD
if ev >= current_condition: severity = SEVERE
#logistic function for the policy choice
#policy_crossproduct = policy[0] + policy[1]*ev
#modified logistic policy function
# CONSTANT COEFFICIENT SHIFT
policy_crossproduct = policy[0] + ( policy[1] * (ev + policy[2]) )
if policy_crossproduct > 100: policy_crossproduct = 100
if policy_crossproduct < -100: policy_crossproduct = -100
policy_value = 1.0 / (1.0 + math.exp(-1*(policy_crossproduct)))
choice_roll = random.uniform(0,1)
#assume let-burn
choice = False
choice_prob = 1.0 - policy_value
#check for suppress, and update values if necessary
if PROBABILISTIC_CHOICES:
if choice_roll < policy_value:
choice = True
choice_prob = policy_value
else:
if policy_value >= 0.5:
choice = True
choice_prob = policy_value
### CALCULATE REWARD ###
supp_cost = 0
burn_penalty = 0
if choice:
#suppression was chosen
if severity == MILD:
supp_cost = supp_cost_mild
elif severity == SEVERE:
supp_cost = supp_cost_severe
else:
#suppress was NOT chosen
if severity == SEVERE:
#set this timestep's burn penalty to the value given in the overall model parameter
#this is modeling the timber values lost in a large fire.
burn_penalty = burn_cost
current_reward = constant_reward - supp_cost - burn_penalty
states[i] = [current_condition, ev, choice, choice_prob, policy_value, current_reward, i]
### TRANSITION ###
if not choice:
#no suppression
if severity == SEVERE:
current_condition += condition_change_after_severe
elif severity == MILD:
current_condition += condition_change_after_mild
else:
#suppression
current_condition += condition_change_after_suppression
#Enforce max/min
if current_condition > 1.0: current_condition = 1.0
if current_condition < 0.0: current_condition = 0.0
#finished simulations, report some values
vals = []
suppressions = 0.0
joint_prob = 1.0
prob_sum = 0.0
for i in range(timesteps):
if states[i][2]: suppressions += 1
joint_prob *= states[i][3]
prob_sum += states[i][3]
vals.append(states[i][5])
ave_prob = prob_sum / timesteps
summary = {
"Average State Value": round(numpy.mean(vals),1),
"Total Pathway Value": round(numpy.sum(vals),0),
"STD State Value": round(numpy.std(vals),1),
"Suppressions": suppressions,
"Suppression Rate": round((float(suppressions)/timesteps),2),
"Joint Probability": joint_prob,
"Average Probability": round(ave_prob, 3),
"ID Number": random_seed,
"Timesteps": timesteps,
"Generation Policy": policy,
"Version": "1.1",
"Constant Reward": constant_reward,
"Condition Change After Suppression": condition_change_after_suppression,
"Condition Change After Mild": condition_change_after_mild,
"Condition Change After Severe": condition_change_after_severe,
"Suppression Cost - Mild": supp_cost_mild,
"Suppression Cost - Severe": supp_cost_severe,
"Severe Burn Cost": burn_cost,
"Starting Condition": starting_condition
}
if not SILENT:
print("")
print("Simulation Complete - Pathway " + str(random_seed))
print("Average State Value: " + str(round(numpy.mean(vals),1)) + " STD: " + str(round(numpy.std(vals),1)))
print("Suppressions: " + str(suppressions))
print("Suppression Rate: " + str(round((float(suppressions)/timesteps),2)))
print("Joint Probability:" + str(joint_prob))
print("Average Probability: " + str(round(ave_prob, 3)))
print("")
summary["States"] = states
return summary
def simulate_all_policies(timesteps=10000, start_seed=0):
result_CT = simulate(timesteps, policy=[ 0, 0, 0.0], random_seed=start_seed, SILENT=True)
result_LB = simulate(timesteps, policy=[-20, 0, 0.0], random_seed=start_seed, SILENT=True)
result_SA = simulate(timesteps, policy=[ 20, 0, 0.0], random_seed=start_seed, SILENT=True)
result_KNOWN = simulate(timesteps, policy=[ 0, 20,-0.8], random_seed=start_seed, SILENT=True)
result_CT["Name"] = "Coin-Toss: "
result_SA["Name"] = "Suppress-All: "
result_LB["Name"] = "Let-burn: "
result_KNOWN["Name"] = "Known: "
results = [result_CT, result_SA, result_LB, result_KNOWN]
print("Policy Ave STD SupRate AveProb JointProb")
for r in results:
print(r["Name"] + " "),
print(str(r["Average State Value"]) + " "),
print(str(r["STD State Value"]) + " "),
print(str(r["Suppression Rate"]) + " "),
print(str(r["Average Probability"]) + " "),
print(str(r["Joint Probability"]))
def sanitize_policy(policy):
pol = []
if isinstance(policy, list):
if len(policy) == 2:
#it's length-2, so add the shift parameter
pol = policy + [0]
else:
#it's probably length-3, so just assign it
pol = policy
else:
#it's not a list, so find out what string it is
if policy == 'LB': pol = [-20,0,0]
elif policy == 'SA': pol = [ 20,0,0]
elif policy == 'CT': pol = [ 0,0,0]
return pol
| {
"repo_name": "buckinha/gravity",
"path": "SWIMMv1_1.py",
"copies": "1",
"size": "8713",
"license": "mpl-2.0",
"hash": 2658447383516361000,
"line_mean": 35.9194915254,
"line_max": 163,
"alpha_frac": 0.5979570756,
"autogenerated": false,
"ratio": 3.8231680561649846,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49211251317649846,
"avg_score": null,
"num_lines": null
} |
"""A simple wrapper for Asterisk call file actions."""
class Action(object):
"""A generic Asterisk action."""
pass
class Application(Action):
"""Stores and manipulates Asterisk applications and data."""
def __init__(self, application, data):
"""Create a new `Application` object.
:param str application: Asterisk application.
:param str data: Asterisk application data.
"""
self.application = application
self.data = data
def render(self):
"""Render this action as call file directives.
:rtype: Tuple of strings.
"""
return ('Application: ' + self.application, 'Data: ' + self.data)
class Context(Action):
"""Stores and manipulates Asterisk contexts, extensions, and priorities."""
def __init__(self, context, extension, priority):
"""Create a new `Context` object.
:param str context: Asterisk context to run.
:param str extension: Asterisk extension to run.
:param str priority: Asterisk priority to run.
"""
self.context = context
self.extension = extension
self.priority = priority
def render(self):
"""Render this action as call file directives.
:rtype: Tuple of strings.
"""
return ('Context: ' + self.context, 'Extension: ' + self.extension,
'Priority: ' + self.priority)
| {
"repo_name": "rdegges/pycall",
"path": "pycall/actions.py",
"copies": "1",
"size": "1417",
"license": "unlicense",
"hash": 7019433707068752000,
"line_mean": 27.9183673469,
"line_max": 79,
"alpha_frac": 0.6125617502,
"autogenerated": false,
"ratio": 4.4420062695924765,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5554568019792476,
"avg_score": null,
"num_lines": null
} |
"""A simple wrapper for Asterisk call files."""
from __future__ import with_statement
from shutil import move
from time import mktime
from pwd import getpwnam
from tempfile import mkstemp
from os import chown, error, utime
import os
from path import Path
from .call import Call
from .actions import Action, Context
from .errors import InvalidTimeError, NoSpoolPermissionError, NoUserError, \
NoUserPermissionError, ValidationError
class CallFile(object):
"""Stores and manipulates Asterisk call files."""
#: The default spooling directory (should be OK for most systems).
DEFAULT_SPOOL_DIR = '/var/spool/asterisk/outgoing'
def __init__(self, call, action, archive=None, filename=None, tempdir=None,
user=None, spool_dir=None):
"""Create a new `CallFile` obeject.
:param obj call: A `pycall.Call` instance.
:param obj action: Either a `pycall.actions.Application` instance
or a `pycall.actions.Context` instance.
:param bool archive: Should Asterisk archive the call file?
:param str filename: Filename of the call file.
:param str tempdir: Temporary directory to store the call file before
spooling.
:param str user: Username to spool the call file as.
:param str spool_dir: Directory to spool the call file to.
:rtype: `CallFile` object.
"""
self.call = call
self.action = action
self.archive = archive
self.user = user
self.spool_dir = spool_dir or self.DEFAULT_SPOOL_DIR
if filename and tempdir:
self.filename = Path(filename)
self.tempdir = Path(tempdir)
else:
tup = mkstemp(suffix='.call')
f = Path(tup[1])
self.filename = f.name
self.tempdir = f.parent
os.close(tup[0])
def __str__(self):
"""Render this call file object for developers.
:returns: String representation of this object.
:rtype: String.
"""
return 'CallFile-> archive: %s, user: %s, spool_dir: %s' % (
self.archive, self.user, self.spool_dir)
def is_valid(self):
"""Check to see if all attributes are valid.
:returns: True if all attributes are valid, False otherwise.
:rtype: Boolean.
"""
if not isinstance(self.call, Call):
return False
if not (isinstance(self.action, Action) or
isinstance(self.action, Context)):
return False
if self.spool_dir and not Path(self.spool_dir).abspath().isdir():
return False
if not self.call.is_valid():
return False
return True
def buildfile(self):
"""Build a call file in memory.
:raises: `ValidationError` if this call file can not be validated.
:returns: A list of call file directives as they will be written to the
disk.
:rtype: List of strings.
"""
if not self.is_valid():
raise ValidationError
cf = []
cf += self.call.render()
cf += self.action.render()
if self.archive:
cf.append('Archive: yes')
return cf
@property
def contents(self):
"""Get the contents of this call file.
:returns: Call file contents.
:rtype: String.
"""
return '\n'.join(self.buildfile())
def writefile(self):
"""Write a temporary call file to disk."""
with open(Path(self.tempdir) / Path(self.filename), 'w') as f:
f.write(self.contents)
def spool(self, time=None):
"""Spool the call file with Asterisk.
This will move the call file to the Asterisk spooling directory. If
the `time` attribute is specified, then the call file will be spooled
at the specified time instead of immediately.
:param datetime time: The date and time to spool this call file (eg:
Asterisk will run this call file at the specified time).
"""
self.writefile()
if self.user:
try:
pwd = getpwnam(self.user)
uid = pwd[2]
gid = pwd[3]
try:
chown(Path(self.tempdir) / Path(self.filename), uid, gid)
except error:
raise NoUserPermissionError
except KeyError:
raise NoUserError
if time:
try:
time = mktime(time.timetuple())
utime(Path(self.tempdir) / Path(self.filename), (time, time))
except (error, AttributeError, OverflowError, ValueError):
raise InvalidTimeError
try:
move(Path(self.tempdir) / Path(self.filename),
Path(self.spool_dir) / Path(self.filename))
except IOError:
raise NoSpoolPermissionError
| {
"repo_name": "rdegges/pycall",
"path": "pycall/callfile.py",
"copies": "1",
"size": "4959",
"license": "unlicense",
"hash": 8546422790357098000,
"line_mean": 30.5859872611,
"line_max": 79,
"alpha_frac": 0.5854002823,
"autogenerated": false,
"ratio": 4.256652360515021,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5342052642815022,
"avg_score": null,
"num_lines": null
} |
"""A simple wrapper for Asterisk calls."""
class Call(object):
"""Stores and manipulates Asterisk calls."""
def __init__(self, channel, callerid=None, variables=None, account=None,
wait_time=None, retry_time=None, max_retries=None):
"""Create a new `Call` object.
:param str channel: The Asterisk channel to call. Should be in standard
Asterisk format.
:param str callerid: CallerID to use.
:param dict variables: Variables to pass to Asterisk upon answer.
:param str account: Account code to associate with this call.
:param int wait_time: Amount of time to wait for answer (in seconds).
:param int retry_time: Amount of time to wait (in seconds) between
retry attempts.
:param int max_retries: Maximum amount of retry attempts.
"""
self.channel = channel
self.callerid = callerid
self.variables = variables
self.account = account
self.wait_time = wait_time
self.retry_time = retry_time
self.max_retries = max_retries
def is_valid(self):
"""Check to see if the `Call` attributes are valid.
:returns: True if all attributes are valid, False otherwise.
:rtype: Boolean.
"""
if self.variables and not isinstance(self.variables, dict):
return False
if self.wait_time != None and type(self.wait_time) != int:
return False
if self.retry_time != None and type(self.retry_time) != int:
return False
if self.max_retries != None and type(self.max_retries) != int:
return False
return True
def render(self):
"""Render this call as call file directives.
:returns: A list of call file directives.
:rtype: List of strings.
"""
c = ['Channel: ' + self.channel]
if self.callerid:
c.append('Callerid: ' + self.callerid)
if self.variables:
for var, value in self.variables.items():
c.append('Set: %s=%s' % (var, value))
if self.account:
c.append('Account: ' + self.account)
if self.wait_time != None:
c.append('WaitTime: %d' % self.wait_time)
if self.retry_time != None:
c.append('RetryTime: %d' % self.retry_time)
if self.max_retries != None:
c.append('Maxretries: %d' % self.max_retries)
return c
| {
"repo_name": "rdegges/pycall",
"path": "pycall/call.py",
"copies": "1",
"size": "2476",
"license": "unlicense",
"hash": 9113417343231359000,
"line_mean": 35.9552238806,
"line_max": 79,
"alpha_frac": 0.5868336026,
"autogenerated": false,
"ratio": 4.006472491909385,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5093306094509384,
"avg_score": null,
"num_lines": null
} |
"""A simple wrapper for `tvtk.Cutter`.
"""
# Author: Prabhu Ramachandran <prabhu_r@users.sf.net>
# Copyright (c) 2005-2008, Enthought, Inc.
# License: BSD Style.
# Enthought library imports.
from traits.api import Instance, Property
from traitsui.api import View, Group, Item
from tvtk.api import tvtk
# Local imports.
from mayavi.core.component import Component
######################################################################
# `Cutter` class.
######################################################################
class Cutter(Component):
# The version of this class. Used for persistence.
__version__ = 0
# The mapper.
cutter = Instance(tvtk.Cutter, args=())
# The cut function. This should be a delegate but due to a bug in
# traits that does not work.
cut_function = Property
########################################
# View related traits.
view = View(Group(Item(name='cutter',
style='custom',
resizable=True),
show_labels=False),
resizable=True)
######################################################################
# `Component` interface
######################################################################
def update_pipeline(self):
"""Override this method so that it *updates* the tvtk pipeline
when data upstream is known to have changed.
This method is invoked (automatically) when the input fires a
`pipeline_changed` event.
"""
if (len(self.inputs) == 0) or (len(self.inputs[0].outputs) == 0):
return
c = self.cutter
self.configure_connection(c, self.inputs[0])
c.update()
self.outputs = [c.output]
def update_data(self):
"""Override this method to do what is necessary when upstream
data changes.
This method is invoked (automatically) when any of the inputs
sends a `data_changed` event.
"""
self.data_changed = True
######################################################################
# `Cutter` interface
######################################################################
def _get_cut_function(self):
return self.cutter.cut_function
def _set_cut_function(self, val):
old = self.cutter.cut_function
self.cutter.cut_function = val
self.trait_property_changed('cut_function', old, val)
| {
"repo_name": "dmsurti/mayavi",
"path": "mayavi/components/cutter.py",
"copies": "3",
"size": "2478",
"license": "bsd-3-clause",
"hash": -8944235589836962000,
"line_mean": 32.04,
"line_max": 74,
"alpha_frac": 0.4963680387,
"autogenerated": false,
"ratio": 4.631775700934579,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6628143739634579,
"avg_score": null,
"num_lines": null
} |
'''A simple wrapper of logging that support color prompt on Linux'''
import sys
import logging
from logging import DEBUG, INFO, WARNING, ERROR, CRITICAL
__all__ = ['getlogger', 'stdoutlogger', 'filelogger', 'streamlogger',
'ColorFormatter',
'DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL']
# Note: Only terminal which support color can use this
# Don't use it in file logger
if type('') is not type(b''):
# def u(s):
# return s
bytes_type = bytes
unicode_type = str
basestring_type = str
else:
# def u(s):
# return s.decode('unicode_escape')
bytes_type = str
unicode_type = unicode
basestring_type = basestring
_TO_UNICODE_TYPES = (unicode_type, type(None))
def _stderr_supports_color():
if sys.platform.startswith('win32'):
return False
if hasattr(sys.stderr, 'isatty') and sys.stderr.isatty():
return True
return False
def to_unicode(value):
"""Converts a string argument to a unicode string.
If the argument is already a unicode string or None, it is returned
unchanged. Otherwise it must be a byte string and is decoded as utf8.
"""
if isinstance(value, _TO_UNICODE_TYPES):
return value
if not isinstance(value, bytes_type):
raise TypeError(
"Expected bytes, unicode, or None; got %r" % type(value)
)
return value.decode("utf-8")
def _safe_unicode(s):
try:
return to_unicode(s)
except UnicodeDecodeError:
return repr(s)
class ColorFormatter(logging.Formatter):
RS = "\033[0m" # reset
HC = "\033[1m" # hicolor
UL = "\033[4m" # underline
INV = "\033[7m" # inverse background and foreground
FBLK = "\033[30m" # foreground black
FRED = "\033[31m" # foreground red
FGRN = "\033[32m" # foreground green
FYEL = "\033[33m" # foreground yellow
FBLE = "\033[34m" # foreground blue
FMAG = "\033[35m" # foreground magenta
FCYN = "\033[36m" # foreground cyan
FWHT = "\033[37m" # foreground white
BBLK = "\033[40m" # background black
BRED = "\033[41m" # background red
BGRN = "\033[42m" # background green
BYEL = "\033[43m" # background yellow
BBLE = "\033[44m" # background blue
BMAG = "\033[45m" # background magenta
BCYN = "\033[46m" # background cyan
BWHT = "\033[47m" # background white
DEFAULT_FORMAT = (
'%(all)s%(color)s'
'[%(levelname)1.1s %(lineno)3d %(asctime)s %(module)s:%(funcName)s]'
'%(end_color)s %(message)s')
DEFAULT_DATE_FORMAT = '%y%m%d %H:%M:%S'
DEFAULT_COLORS = {
logging.DEBUG: FGRN,
logging.INFO: FBLE,
logging.WARNING: FYEL,
logging.ERROR: FRED,
logging.CRITICAL: FRED,
}
def __init__(self, color=True, fmt=DEFAULT_FORMAT,
datefmt=DEFAULT_DATE_FORMAT, colors=DEFAULT_COLORS):
super(ColorFormatter, self).__init__(fmt, datefmt)
self._color = (color and _stderr_supports_color())
self._normal = self.RS if self._color else ''
self._colors = colors if self._color else {}
self._fmt = fmt
def format(self, record):
try:
message = record.getMessage()
assert isinstance(message, basestring_type)
record.message = _safe_unicode(message)
except Exception as e:
record.message = "Bad message (%r): %r" % (e, record.__dict__)
record.asctime = self.formatTime(record, self.datefmt)
record.all = ''
if record.levelno in self._colors:
if record.levelno >= logging.CRITICAL:
record.all = self.HC+self.UL
record.color = self._colors[record.levelno]
record.end_color = self._normal
else:
record.color = record.end_color = ''
formatted = self._fmt % record.__dict__
if record.exc_info:
if not record.exc_text:
record.exc_text = self.formatException(record.exc_info)
if record.exc_text:
# exc_text contains multiple lines. We need to _safe_unicode
# each line separately so that non-utf8 bytes don't cause
# all the newlines to turn into '\n'.
lines = [formatted.rstrip()]
lines.extend(_safe_unicode(ln)
for ln in record.exc_text.split('\n'))
formatted = '\n'.join(lines)
return formatted.replace("\n", "\n ")
def _getlogger(hdlr, logger=None, level=None, color=True):
if not isinstance(logger, logging.Logger):
logger = logging.getLogger(logger)
hdlr.setFormatter(ColorFormatter(color=color))
logger.addHandler(hdlr)
if level is not None:
logger.setLevel(level)
return logger
def stdoutlogger(logger=None, level=None, color=True):
hdlr = logging.StreamHandler(sys.stdout)
return _getlogger(hdlr, logger, level, color)
def stderrlogger(logger=None, level=None, color=True):
hdlr = logging.StreamHandler(sys.stderr)
return _getlogger(hdlr, logger, level, color)
def filelogger(file, logger=None, level=None):
hdlr = logging.FileHandler(file)
return _getlogger(hdlr, logger, level, False)
def streamlogger(stream, logger=None, level=None, color=True):
hdlr = logging.StreamHandler(stream)
return _getlogger(hdlr, logger, level, color)
getlogger = stdoutlogger
if __name__ == '__main__':
logger = getlogger('test1', DEBUG)
logger.debug('debug')
logger.info('info')
logger.warning('warning')
logger.error('error')
logger.critical('critical')
logger = getlogger('test2', DEBUG, False)
logger.debug('debug')
logger.info('info')
logger.warning('warning')
logger.error('error')
logger.critical('critical')
| {
"repo_name": "TylerTemp/docpie",
"path": "docpie/bashlog.py",
"copies": "1",
"size": "5834",
"license": "mit",
"hash": 5387916541138713000,
"line_mean": 30.5351351351,
"line_max": 76,
"alpha_frac": 0.6091875214,
"autogenerated": false,
"ratio": 3.6123839009287924,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4721571422328792,
"avg_score": null,
"num_lines": null
} |
"""A simple wrapper to call sendmail-like binary."""
# =============================================================================
# CONTENTS
# -----------------------------------------------------------------------------
# phlsys_sendmail
#
# Public Classes:
# Sendmail
# .set_default_binary
# .set_default_params_from_type
# .send
#
# -----------------------------------------------------------------------------
# (this contents block is generated, edits will be lost)
# =============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import phlsys_subprocess
class Sendmail():
_default_binary = 'sendmail'
_default_params = ['-t']
@classmethod
def set_default_binary(cls, binary):
cls._default_binary = binary
@classmethod
def set_default_params_from_type(cls, sendmail_type):
if "sendmail" == sendmail_type:
cls._default_params = ["-t"]
elif "catchmail" == sendmail_type:
cls._default_params = []
else:
raise TypeError(str(sendmail_type) + " is not a type of sendmail")
def __init__(self, binary=None, params=None):
"""Simply copy the supplied parameters and store in the object.
:binary: the binary to execute, 'sendmail' if None
Note that other binaries that are sendmail-compatabile, like
'catchmail' can be used here instead.
"""
self._binary = binary if binary is not None else self._default_binary
self._params = params if params is not None else self._default_params
# def call(*args, stdin=None): <-- supported in Python 3
def send(self, stdin):
result = phlsys_subprocess.run(
self._binary, *self._params, stdin=stdin)
return result.stdout
# -----------------------------------------------------------------------------
# Copyright (C) 2013-2014 Bloomberg Finance L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------ END-OF-FILE ----------------------------------
| {
"repo_name": "cs-shadow/phabricator-tools",
"path": "py/phl/phlsys_sendmail.py",
"copies": "4",
"size": "2647",
"license": "apache-2.0",
"hash": -7504162105024252000,
"line_mean": 34.2933333333,
"line_max": 79,
"alpha_frac": 0.5527011711,
"autogenerated": false,
"ratio": 4.486440677966102,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.7039141849066102,
"avg_score": null,
"num_lines": null
} |
"""A simple WSGI application that just echoes the request body.
This is a good starting point for other WSGI applications. See the
source for what's implemented and why.
Configuration Options::
[wsgi_echo]
call = brim.wsgi_echo.WSGIEcho
# path = <path>
# The request path to match and serve; any other paths will be
# passed on to the next WSGI app in the chain. Default: /echo
# max_echo = <bytes>
# The maximum bytes to echo; any additional bytes will be ignored.
# Default: 65536
Stats Variables (where *n.* is the name of the app in the config):
============== ====== ================================================
Name Type Description
============== ====== ================================================
n.requests sum The number of requests received.
start_time worker Timestamp when the app was started. If the app
had to be restarted, this timestamp will be
updated with the new start time. This item is
available with all apps and set by the
controlling :py:class:`brim.server.Subserver`.
============== ====== ================================================
"""
"""Copyright and License.
Copyright 2012-2014 Gregory Holt
Licensed under the Apache License, Version 2.0 (the "License"); you may
not use this file except in compliance with the License. You may obtain
a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
class WSGIEcho(object):
"""A simple WSGI application that just echoes the request body.
This is a good starting point for other WSGI applications. See the
source for what's implemented and why.
:param name: The name of the app.
:param parsed_conf: The conf result from :py:meth:`parse_conf`.
:param next_app: The next WSGI app in the chain.
"""
def __init__(self, name, parsed_conf, next_app):
self.name = name
"""The name of the app."""
self.next_app = next_app
"""The next WSGI app in the chain."""
self.path = parsed_conf['path']
"""The URL path to serve."""
self.max_echo = parsed_conf['max_echo']
"""The maximum request size to echo back."""
def __call__(self, env, start_response):
"""Handles incoming requests.
If the request path exactly matches the one configured for this
app, the request body will be read and then sent back in the
response. Otherwise, the request is passed on to the next app in
the chain.
:param env: The WSGI env as per the spec.
:param start_response: The WSGI start_response as per the spec.
:returns: Calls *start_response* and returns an iterable as per
the WSGI spec.
"""
if env['PATH_INFO'] != self.path:
return self.next_app(env, start_response)
env['brim.stats'].incr('%s.requests' % self.name)
body = []
length = 0
while length < self.max_echo:
try:
chunk = env['wsgi.input'].read(self.max_echo - length)
except Exception:
chunk = ''
if not chunk:
break
length += len(chunk)
body.append(chunk)
start_response('200 OK', [('Content-Length', str(length))])
return body
@classmethod
def parse_conf(cls, name, conf):
"""Translates the overall server configuration.
The conf is translated into an app-specific configuration dict
suitable for passing as ``parsed_conf`` in the
:py:class:`WSGIEcho` constructor.
See the overall docs of :py:mod:`brim.wsgi_echo` for
configuration options.
:param name: The name of the app, indicates the app's section in
the overall configuration for the server.
:param conf: The :py:class:`brim.conf.Conf` instance
representing the overall configuration of the server.
:returns: A dict suitable for passing as ``parsed_conf`` in the
:py:class:`WSGIEcho` constructor.
"""
return {'path': conf.get(name, 'path', '/echo'),
'max_echo': conf.get_int(name, 'max_echo', 65536)}
@classmethod
def stats_conf(cls, name, parsed_conf):
"""Returns a list of (stat_name, stat_type) pairs.
These pairs specify the stat variables this app wants
established in the ``stats`` instance passed to
:py:meth:`__call__`.
Stats are often retrieved by users and utilities through WSGI
apps like :py:class:`brim.wsgi_stats.WSGIStats`.
See the overall docs of :py:mod:`brim.wsgi_echo` for what
stats are defined.
The available stat_types are:
====== ========================================================
worker Indicates a worker only stat. No overall stat will be
reported.
sum Indicates an overall stat should be reported that is a
sum of the stat from all workers.
min Indicates an overall stat should be reported that is the
smallest value of the stat from all workers.
max Indicates an overall stat should be reported that is the
largest value of the stat from all workers.
====== ========================================================
:param name: The name of the app, indicates the app's section in
the overall configuration for the daemon server.
:param parsed_conf: The result from :py:meth:`parse_conf`.
:returns: A list of (stat_name, stat_type) pairs.
"""
return [('%s.requests' % name, 'sum')]
| {
"repo_name": "gholt/python-brim",
"path": "brim/wsgi_echo.py",
"copies": "1",
"size": "6078",
"license": "apache-2.0",
"hash": 1791162680371552300,
"line_mean": 38.9868421053,
"line_max": 72,
"alpha_frac": 0.5904902929,
"autogenerated": false,
"ratio": 4.502222222222223,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5592712515122222,
"avg_score": null,
"num_lines": null
} |
""" A simplified version mimicking the built-in property class.
See https://docs.python.org/2/howto/descriptor.html
"""
class Property(object):
def __init__(self, fget, fset=None):
self.fget = fget
self.fset = fset
self.__name__ = fget.__name__
def __get__(self, obj, objtype=None):
if obj is None:
return self
if self.fget is None:
raise AttributeError('unreadable attribute')
return self.fget(obj)
def __set__(self, obj, value):
if self.fset is None:
raise AttributeError('can not set attribute')
self.fset(obj, value)
def setter(self, fset):
self.fset = fset
return self
class Test(object):
def __init__(self, name):
self.__name = name
@Property
def name(self):
return '{} in Test'.format(self.__name)
@name.setter
def name(self, name):
if not isinstance(name, basestring):
raise TypeError('name must be string')
self.__name = name
| {
"repo_name": "seanlin0800/python_snippets",
"path": "simple_property.py",
"copies": "1",
"size": "1043",
"license": "unlicense",
"hash": -4860573266512922000,
"line_mean": 22.7045454545,
"line_max": 63,
"alpha_frac": 0.573346117,
"autogenerated": false,
"ratio": 3.8773234200743496,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4950669537074349,
"avg_score": null,
"num_lines": null
} |
'''A simplistic LL(1) parser for lambda-terms
@author: F. Peschanski
'''
if __name__ == "__main__":
import sys
sys.path.append("../src")
from popparser import Grammar, parsers, tokens
from popparser.llparser import LLParsing
from popparser.tokenizer import Tokenizer
class Term:
def __init__(self, start_pos, end_pos):
self.start_pos = start_pos
self.end_pos = end_pos
class Var(Term):
def __init__(self, var_name, var_type, start_pos, end_pos):
Term.__init__(self, start_pos, end_pos)
self.var_name = var_name
self.var_type = var_type
def __repr__(self):
return "Var({0}, {1})".format(repr(self.var_name), repr(self.var_type))
class App(Term):
def __init__(self, rator, rand, start_pos, end_pos):
Term.__init__(self, start_pos, end_pos)
self.rator = rator
self.rand = rand
def __repr__(self):
return "App({0}, {1})".format(repr(self.rator), repr(self.rand))
class Lambda(Term):
def __init__(self, bind_name, bind_type, body, start_pos, end_pos):
Term.__init__(self, start_pos, end_pos)
self.bind_name = bind_name
self.bind_type = bind_type
self.body = body
def __repr__(self):
return "Lambda({0}, {1}, {2})".format(repr(self.bind_name),
repr(self.bind_type),
repr(self.body))
class LambdaParser:
def __init__(self):
self.grammar = Grammar()
self.prepare_grammar(self.grammar)
def prepare_tokenizer(self, tokenizer):
# reserved symbols
tokenizer.add_rule(tokens.Char('dot', '.'))
tokenizer.add_rule(tokens.Char('column', ':'))
tokenizer.add_rule(tokens.Char('lparen', '('))
tokenizer.add_rule(tokens.Char('rparen', ')'))
tokenizer.add_rule(tokens.CharSet('space', ' ', '\t', '\r', '\n'))
# lambdas
tokenizer.add_rule(tokens.CharSet('lambda', 'λ', '\\'))
tokenizer.add_rule(tokens.Literal('lambda', '\\lambda '))
# identifiers
tokenizer.add_rule(tokens.Regexp('identifier',
"[a-zA-Z_][a-zA-Z_0-9]*'*"))
def prepare_grammar(self, grammar):
# punctuation
grammar.register('column', parsers.Token('column'))
grammar.register('dot', parsers.Token('dot'))
grammar.register('lparen', parsers.Token('lparen'))
grammar.register('rparen', parsers.Token('rparen'))
grammar.register('spaces', parsers.Repeat(parsers.Token('space'),
minimum=1))
# constructions
grammar.register('lambda', parsers.Token('lambda'))
grammar.register('identifier', parsers.Token('identifier'))
ref_parser = parsers.Tuple().element(grammar.ref('identifier'))\
.skip(grammar.ref('spaces'))\
.skip(grammar.ref('column'))\
.element(grammar.ref('identifier'))
def ref_xform(result):
var_ident = result.content[0]
var_type = result.content[1]
return Var(var_ident.content.value, var_type.content.value,
result.start_pos, result.end_pos)
ref_parser.xform_content = ref_xform
grammar.register('ref', ref_parser)
app_parser = parsers.Tuple().skip(grammar.ref('lparen'))\
.skip(grammar.ref('spaces'))\
.element(grammar.ref('expr'))\
.skip(grammar.ref('spaces'))\
.element(grammar.ref('expr'))\
.skip(grammar.ref('spaces'))\
.skip(grammar.ref('rparen'))
def app_xform(result):
rator = result.content[0]
rand = result.content[1]
return App(rator.content, rand.content,
result.start_pos, result.end_pos)
app_parser.xform_content = app_xform
grammar.register('app', app_parser)
lam_parser = parsers.Tuple().skip(grammar.ref('lambda'))\
.element(grammar.ref('ref'))\
.skip(grammar.ref('spaces'))\
.skip(grammar.ref('dot'))\
.skip(grammar.ref('spaces'))\
.element(grammar.ref('expr'))
def lam_xform(result):
var_name = result.content[0].content.var_name
var_type = result.content[0].content.var_type
return Lambda(var_name, var_type, result.content[1].content,
result.start_pos, result.end_pos)
lam_parser.xform_content = lam_xform
grammar.register('lam', lam_parser)
expr_parser = parsers.Choice().either(grammar.ref('ref'))\
.orelse(grammar.ref('app'))\
.orelse(grammar.ref('lam'))
grammar.register('expr', expr_parser)
grammar.entry = grammar.ref('expr')
def parse_from_string(self, string):
tokenizer = Tokenizer()
self.prepare_tokenizer(tokenizer)
parser = LLParsing(self.grammar)
parser.tokenizer = tokenizer
tokenizer.from_string(string)
return parser.parse()
# entrypoint
if __name__ == '__main__':
parser = LambdaParser()
input_ = '(λx:Bool. x:Bool y:Bool)'
result = parser.parse_from_string(input_)
print("{0}\n==> {1}".format(input_, result))
| {
"repo_name": "fredokun/popparser",
"path": "test/lambda_parser.py",
"copies": "1",
"size": "5718",
"license": "mit",
"hash": 9055428222434673000,
"line_mean": 34.725,
"line_max": 79,
"alpha_frac": 0.5225682295,
"autogenerated": false,
"ratio": 4.033874382498236,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5056442611998235,
"avg_score": null,
"num_lines": null
} |
""" A simplistic pickle-and-file-based store for CRON urls, that failed to be executed """
import os.path
import datetime
from pickle import load, dump
from functools import partial, wraps
from twisted.web.client import Agent, WebClientContextFactory
from twisted.web.http_headers import Headers
from twisted.internet import reactor
from intranet3.log import DEBUG_LOG, EXCEPTION_LOG, INFO_LOG
from intranet3.helpers import dates_between
from intranet3.helpers import previous_day
from intranet3 import config
LOG = INFO_LOG(__name__)
EXCEPTION = EXCEPTION_LOG(__name__)
DEBUG = DEBUG_LOG(__name__)
class RequiredAction(object):
def __init__(self, name, url_producer):
"""
@param name: unique ID of this action
@param url_producer: function that produces an URL for this action from url_prefix and datetime.date
"""
self.name = name
self.url_producer = url_producer
class Repeater(object):
USER_AGENT = 'STXNext Intranet 2 Cron task'
contextFactory = WebClientContextFactory()
client = Agent(reactor, contextFactory)
def __init__(self, *actions):
self.headers = {
'User-Agent': [self.USER_AGENT],
'X-Intranet-Cron': [config['CRON_SECRET_KEY']]
}
self.actions = {}
for action in actions:
self.actions[action.name] = action.url_producer
self.file_path = config['REPEATER_FILE']
def pending(self):
""" returns a list of pending entries """
result = []
yesterday = previous_day(datetime.date.today())
cron_url = config['CRON_URL']
with PickleStore(self.file_path) as store:
for date, action_name in store.get_pending(yesterday, self.actions.keys()):
result.append(
('%s%s' % (cron_url, self.actions[action_name](date)),
partial(self.update, date, action_name))
)
return result
def update(self, date, action_name, done):
with PickleStore(self.file_path) as store:
store.update(date, action_name, done)
def on_success(self, url, callback, resp):
LOG(u"Repeater %s succeeded with status %s" % (url, resp.code))
callback(resp.code == 200)
def on_failure(self, url, callback, err):
EXCEPTION(u"Repeater %s failed %s" % (url, err))
callback(False)
def __call__(self):
DEBUG(u"Repeater starting")
i = 0
try:
for url, callback in self.pending():
DEBUG(u"Will call action %s" % (url, ))
deferred = self.client.request(
'GET',
url,
Headers(self.headers)
)
deferred.addCallbacks(
partial(self.on_success, url, callback),
partial(self.on_failure, url, callback)
)
i += 1
except:
EXCEPTION(u"Repeater could not start")
DEBUG(u"Repeater started %s jobs" % (i, ))
def online_action(func):
@wraps(func)
def action(self, *args, **kwargs):
assert hasattr(self, '_storage')
return func(self, *args, **kwargs)
return action
class PickleStore(object):
def __init__(self, file_path):
self.file_path = file_path
def __enter__(self):
if not os.path.exists(self.file_path):
self._storage = {}
self._storage['__from__'] = datetime.date.today()
DEBUG(u'Initialized empty pickle store instead of %s' % (self.file_path, ))
else:
DEBUG(u'Will load pickle store %s' % (self.file_path, ))
with open(self.file_path, 'rb') as store_file:
self._storage = load(store_file)
DEBUG(u'Loaded pickle store %s' % (self.file_path, ))
return self
def __exit__(self, exc_type, exc_value, traceback):
try:
DEBUG(u"Will dump pickle store %s" % (self.file_path, ))
with open(self.file_path, 'wb') as store_file:
dump(self._storage, store_file)
DEBUG(u"Dumped pickle store %s" % (self.file_path, ))
finally:
del self._storage
@online_action
def get_pending(self, today, action_names):
"""
returns a generator over tuples
(date, action_name)
of actions that failed to work before today
"""
store = self._storage
first_date = store['__from__']
for date in dates_between(first_date, today):
for action_name in action_names:
if action_name not in store:
DEBUG(u"Creating filling for new action %s" % (action_name, ))
store[action_name] = {}
yesterday = today - datetime.timedelta(days=1)
for fill_date in dates_between(first_date, yesterday):
DEBUG(u"Creating fill for new action %s on date %s" % (action_name, fill_date))
store[action_name][fill_date] = {'done': True, 'created': datetime.datetime.now(), 'times': []}
action_store = store[action_name]
if date not in action_store:
DEBUG(u"Creating a skipped action %s on %s" % (action_name, date))
action_store[date] = {'done': False, 'created': datetime.datetime.now(), 'times': []}
yield date, action_name
else:
entry = action_store[date]
if entry['done']:
continue
else:
DEBUG(u'Found failed action %s on %s' % (action_name, date))
yield date, action_name
@online_action
def update(self, date, action_name, result):
DEBUG(u"Updating action %s on %s status to %s" % (action_name, date, result))
store = self._storage
entry = store[action_name][date]
entry['done'] = result
entry['times'].append(datetime.datetime.now()) | {
"repo_name": "pytlakp/intranetref",
"path": "src/intranet3/cron/failsafe.py",
"copies": "1",
"size": "6233",
"license": "mit",
"hash": -1238911578105851000,
"line_mean": 37.245398773,
"line_max": 119,
"alpha_frac": 0.5482111343,
"autogenerated": false,
"ratio": 4.100657894736842,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5148869029036842,
"avg_score": null,
"num_lines": null
} |
"""A simply script to scrape zmq.h for the zeromq version.
This is similar to the version.sh script in a zeromq source dir, but
it searches for an installed header, rather than in the current dir.
"""
# Copyright (c) PyZMQ Developers
# Distributed under the terms of the Modified BSD License.
from __future__ import with_statement
import os
import sys
import re
import traceback
from warnings import warn
try:
from configparser import ConfigParser
except:
from ConfigParser import ConfigParser
pjoin = os.path.join
MAJOR_PAT='^#define +ZMQ_VERSION_MAJOR +[0-9]+$'
MINOR_PAT='^#define +ZMQ_VERSION_MINOR +[0-9]+$'
PATCH_PAT='^#define +ZMQ_VERSION_PATCH +[0-9]+$'
def include_dirs_from_path():
"""Check the exec path for include dirs."""
include_dirs = []
for p in os.environ['PATH'].split(os.path.pathsep):
if p.endswith('/'):
p = p[:-1]
if p.endswith('bin'):
include_dirs.append(p[:-3]+'include')
return include_dirs
def default_include_dirs():
"""Default to just /usr/local/include:/usr/include"""
return ['/usr/local/include', '/usr/include']
def find_zmq_version():
"""check setup.cfg, then /usr/local/include, then /usr/include for zmq.h.
Then scrape zmq.h for the version tuple.
Returns
-------
((major,minor,patch), "/path/to/zmq.h")"""
include_dirs = []
if os.path.exists('setup.cfg'):
cfg = ConfigParser()
cfg.read('setup.cfg')
if 'build_ext' in cfg.sections():
items = cfg.items('build_ext')
for name,val in items:
if name == 'include_dirs':
include_dirs = val.split(os.path.pathsep)
if not include_dirs:
include_dirs = default_include_dirs()
for include in include_dirs:
zmq_h = pjoin(include, 'zmq.h')
if os.path.isfile(zmq_h):
with open(zmq_h) as f:
contents = f.read()
else:
continue
line = re.findall(MAJOR_PAT, contents, re.MULTILINE)[0]
major = int(re.findall('[0-9]+',line)[0])
line = re.findall(MINOR_PAT, contents, re.MULTILINE)[0]
minor = int(re.findall('[0-9]+',line)[0])
line = re.findall(PATCH_PAT, contents, re.MULTILINE)[0]
patch = int(re.findall('[0-9]+',line)[0])
return ((major,minor,patch), zmq_h)
raise IOError("Couldn't find zmq.h")
def ver_str(version):
"""version tuple as string"""
return '.'.join(map(str, version))
def check_zmq_version(min_version):
"""Check that zmq.h has an appropriate version."""
sv = ver_str(min_version)
try:
found, zmq_h = find_zmq_version()
sf = ver_str(found)
if found < min_version:
print ("This pyzmq requires zeromq >= %s"%sv)
print ("but it appears you are building against %s"%zmq_h)
print ("which has zeromq %s"%sf)
sys.exit(1)
except IOError:
msg = '\n'.join(["Couldn't find zmq.h to check for version compatibility.",
"If you see 'undeclared identifier' errors, your ZeroMQ is likely too old.",
"This pyzmq requires zeromq >= %s"%sv])
warn(msg)
except IndexError:
msg = '\n'.join(["Couldn't find ZMQ_VERSION macros in zmq.h to check for version compatibility.",
"This probably means that you have ZeroMQ <= 2.0.9",
"If you see 'undeclared identifier' errors, your ZeroMQ is likely too old.",
"This pyzmq requires zeromq >= %s"%sv])
warn(msg)
except Exception:
traceback.print_exc()
msg = '\n'.join(["Unexpected Error checking for zmq version.",
"If you see 'undeclared identifier' errors, your ZeroMQ is likely too old.",
"This pyzmq requires zeromq >= %s"%sv])
warn(msg)
if __name__ == '__main__':
v,h = find_zmq_version()
print (h)
print (ver_str(v))
| {
"repo_name": "Mustard-Systems-Ltd/pyzmq",
"path": "zmqversion.py",
"copies": "7",
"size": "3916",
"license": "bsd-3-clause",
"hash": 9041915508493684000,
"line_mean": 31.9075630252,
"line_max": 105,
"alpha_frac": 0.5990806946,
"autogenerated": false,
"ratio": 3.605893186003683,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.7704973880603683,
"avg_score": null,
"num_lines": null
} |
""" A simulation environment for simple blackjack (see blackjack.pdf)
The state is playerSum in {12-20}, dealerCard in {1-10}, and usableAce (boolean)
The actions are hit or stick, 1 or 0
"""
from pylab import *
playerSum = 0
dealerCard = 0
usableAce = False
def card():
return min(10, randint(1,14))
def encode():
return 1 + (90 if usableAce else 0) + 9*(dealerCard-1) + (playerSum-12)
def decode(state):
global playerSum, dealerCard, usableAce
if state==0: return
state = state - 1
usableAce = state >= 90
state = state % 90
dealerCard = 1 + state / 9
playerSum = (state % 9) + 12
def init():
return 0
def numActions(s):
return 2
def sample(s, a):
global playerSum, dealerCard, usableAce
decode(s)
if s==0: return firstSample()
if a==0: return dealerPlaySample() # sticking
playerSum += card() # hitting
if playerSum==21: return dealerPlaySample()
if playerSum > 21:
if usableAce:
playerSum -= 10
usableAce = False
return 0, encode()
else:
return -1, -1
return 0, encode()
def firstSample():
global playerSum, dealerCard, usableAce
""" deal first cards and check for naturals """
playerCard1 = card()
playerCard2 = card()
playerSum = playerCard1 + playerCard2
usableAce = playerCard1==1 or playerCard2==1
if usableAce: playerSum += 10
dealerCard = card()
if playerSum==21: # player has natural
dealerCard2 = card()
dealerSum = dealerCard + dealerCard2
if (dealerCard==1 or dealerCard2==1) and dealerSum==11: # dealer has a natural too
return 0, -1
else:
return 1, -1
while playerSum < 12: playerSum += card()
if playerSum==21: return dealerPlaySample()
return 0, encode()
def dealerPlaySample():
dealerCard2 = card()
dealerSum = dealerCard + dealerCard2
usableAce = dealerCard==1 or dealerCard2==1 # now usableAce refers to the dealer
if usableAce: dealerSum += 10
if dealerSum==21: return -1, -1 # dealer has a natural
while dealerSum < 16:
dealerSum += card()
if dealerSum > 21:
if usableAce:
dealerSum -= 10
usableAce = False
else:
return 1, -1
if dealerSum < playerSum: return 1, -1
elif dealerSum > playerSum: return -1, -1
else: return 0, -1
def printPolicy(policy):
global playerSum, dealerCard, usableAce
for usableAce in [True, False]:
print
print "" if usableAce else " No", "Usable Ace:"
for playerSum in range(20, 11, -1):
for dealerCard in range(1,11):
print "S" if policy(encode())==0 else "H",
print playerSum
for dealerCard in range(1,11): print dealerCard,
print
| {
"repo_name": "smithe0/366Prog1",
"path": "blackjack.py",
"copies": "1",
"size": "2887",
"license": "apache-2.0",
"hash": 6590946340741328000,
"line_mean": 28.7628865979,
"line_max": 91,
"alpha_frac": 0.5992379633,
"autogenerated": false,
"ratio": 3.486714975845411,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45859529391454107,
"avg_score": null,
"num_lines": null
} |
""" A simulation of diffusion """
import importlib
import math
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import mesh.patch as patch
from simulation_null import NullSimulation, grid_setup, bc_setup
import multigrid.MG as MG
from util import msg
class Simulation(NullSimulation):
""" A simulation of diffusion """
def initialize(self):
"""
Initialize the grid and variables for diffusion and set the initial
conditions for the chosen problem.
"""
# setup the grid
my_grid = grid_setup(self.rp, ng=1)
# for MG, we need to be a power of two
if my_grid.nx != my_grid.ny:
msg.fail("need nx = ny for diffusion problems")
n = int(math.log(my_grid.nx)/math.log(2.0))
if 2**n != my_grid.nx:
msg.fail("grid needs to be a power of 2")
# create the variables
# first figure out the boundary conditions -- we allow periodic,
# Dirichlet, and Neumann.
bc, _, _ = bc_setup(self.rp)
for bnd in [bc.xlb, bc.xrb, bc.ylb, bc.yrb]:
if bnd not in ["periodic", "neumann", "dirichlet"]:
msg.fail("invalid BC")
my_data = patch.CellCenterData2d(my_grid)
my_data.register_var("phi", bc)
my_data.create()
self.cc_data = my_data
# now set the initial conditions for the problem
problem = importlib.import_module("diffusion.problems.{}".format(self.problem_name))
problem.init_data(self.cc_data, self.rp)
def method_compute_timestep(self):
"""
The diffusion timestep() function computes the timestep
using the explicit timestep constraint as the starting point.
We then multiply by the CFL number to get the timestep.
Since we are doing an implicit discretization, we do not
require CFL < 1.
"""
cfl = self.rp.get_param("driver.cfl")
k = self.rp.get_param("diffusion.k")
# the timestep is min(dx**2/k, dy**2/k)
xtmp = self.cc_data.grid.dx**2/k
ytmp = self.cc_data.grid.dy**2/k
self.dt = cfl*min(xtmp, ytmp)
def evolve(self):
"""
Diffusion through dt using C-N implicit solve with multigrid
"""
self.cc_data.fill_BC_all()
phi = self.cc_data.get_var("phi")
myg = self.cc_data.grid
# diffusion coefficient
k = self.rp.get_param("diffusion.k")
# setup the MG object -- we want to solve a Helmholtz equation
# equation of the form:
# (alpha - beta L) phi = f
#
# with alpha = 1
# beta = (dt/2) k
# f = phi + (dt/2) k L phi
#
# this is the form that arises with a Crank-Nicolson discretization
# of the diffusion equation.
mg = MG.CellCenterMG2d(myg.nx, myg.ny,
xmin=myg.xmin, xmax=myg.xmax,
ymin=myg.ymin, ymax=myg.ymax,
xl_BC_type=self.cc_data.BCs['phi'].xlb,
xr_BC_type=self.cc_data.BCs['phi'].xrb,
yl_BC_type=self.cc_data.BCs['phi'].ylb,
yr_BC_type=self.cc_data.BCs['phi'].yrb,
alpha=1.0, beta=0.5*self.dt*k,
verbose=0)
# form the RHS: f = phi + (dt/2) k L phi (where L is the Laplacian)
f = mg.soln_grid.scratch_array()
f.v()[:, :] = phi.v() + 0.5*self.dt*k * (
(phi.ip(1) + phi.ip(-1) - 2.0*phi.v())/myg.dx**2 +
(phi.jp(1) + phi.jp(-1) - 2.0*phi.v())/myg.dy**2)
mg.init_RHS(f)
# initial guess is zeros
mg.init_zeros()
# solve the MG problem for the updated phi
mg.solve(rtol=1.e-10)
# mg.smooth(mg.nlevels-1,100)
# update the solution
phi.v()[:, :] = mg.get_solution().v()
# increment the time
self.cc_data.t += self.dt
self.n += 1
def dovis(self):
"""
Do runtime visualization.
"""
plt.clf()
phi = self.cc_data.get_var("phi")
myg = self.cc_data.grid
img = plt.imshow(np.transpose(phi.v()),
interpolation="nearest", origin="lower",
extent=[myg.xmin, myg.xmax, myg.ymin, myg.ymax],
cmap=self.cm)
plt.xlabel("x")
plt.ylabel("y")
plt.title("phi")
cb = plt.colorbar(img)
cb.formatter = matplotlib.ticker.FormatStrFormatter("")
plt.figtext(0.05, 0.0125, "t = {:10.5f}".format(self.cc_data.t))
plt.pause(0.001)
plt.draw()
| {
"repo_name": "zingale/pyro2",
"path": "diffusion/simulation.py",
"copies": "1",
"size": "4732",
"license": "bsd-3-clause",
"hash": 6576336203106591000,
"line_mean": 29.7272727273,
"line_max": 92,
"alpha_frac": 0.5338123415,
"autogenerated": false,
"ratio": 3.428985507246377,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4462797848746377,
"avg_score": null,
"num_lines": null
} |
""" A simulation of diffusion """
import importlib
import math
import numpy as np
import matplotlib.pyplot as plt
import mesh.patch as patch
from simulation_null import NullSimulation, grid_setup, bc_setup
import multigrid.MG as MG
from util import msg
class Simulation(NullSimulation):
""" A simulation of diffusion """
def initialize(self):
"""
Initialize the grid and variables for diffusion and set the initial
conditions for the chosen problem.
"""
# setup the grid
my_grid = grid_setup(self.rp, ng=1)
# for MG, we need to be a power of two
if my_grid.nx != my_grid.ny:
msg.fail("need nx = ny for diffusion problems")
n = int(math.log(my_grid.nx)/math.log(2.0))
if 2**n != my_grid.nx:
msg.fail("grid needs to be a power of 2")
# create the variables
# first figure out the boundary conditions -- we allow periodic,
# Dirichlet, and Neumann.
bc, _, _ = bc_setup(self.rp)
for bnd in [bc.xlb, bc.xrb, bc.ylb, bc.yrb]:
if bnd not in ["periodic", "neumann", "dirichlet"]:
msg.fail("invalid BC")
my_data = patch.CellCenterData2d(my_grid)
my_data.register_var("phi", bc)
my_data.create()
self.cc_data = my_data
# now set the initial conditions for the problem
problem = importlib.import_module("diffusion.problems.{}".format(self.problem_name))
problem.init_data(self.cc_data, self.rp)
def method_compute_timestep(self):
"""
The diffusion timestep() function computes the timestep
using the explicit timestep constraint as the starting point.
We then multiply by the CFL number to get the timestep.
Since we are doing an implicit discretization, we do not
require CFL < 1.
"""
cfl = self.rp.get_param("driver.cfl")
k = self.rp.get_param("diffusion.k")
# the timestep is min(dx**2/k, dy**2/k)
xtmp = self.cc_data.grid.dx**2/k
ytmp = self.cc_data.grid.dy**2/k
self.dt = cfl*min(xtmp, ytmp)
def evolve(self):
"""
Diffusion through dt using C-N implicit solve with multigrid
"""
self.cc_data.fill_BC_all()
phi = self.cc_data.get_var("phi")
myg = self.cc_data.grid
# diffusion coefficient
k = self.rp.get_param("diffusion.k")
# setup the MG object -- we want to solve a Helmholtz equation
# equation of the form:
# (alpha - beta L) phi = f
#
# with alpha = 1
# beta = (dt/2) k
# f = phi + (dt/2) k L phi
#
# this is the form that arises with a Crank-Nicolson discretization
# of the diffusion equation.
mg = MG.CellCenterMG2d(myg.nx, myg.ny,
xmin=myg.xmin, xmax=myg.xmax,
ymin=myg.ymin, ymax=myg.ymax,
xl_BC_type=self.cc_data.BCs['phi'].xlb,
xr_BC_type=self.cc_data.BCs['phi'].xrb,
yl_BC_type=self.cc_data.BCs['phi'].ylb,
yr_BC_type=self.cc_data.BCs['phi'].yrb,
alpha=1.0, beta=0.5*self.dt*k,
verbose=0)
# form the RHS: f = phi + (dt/2) k L phi (where L is the Laplacian)
f = mg.soln_grid.scratch_array()
f.v()[:, :] = phi.v() + 0.5*self.dt*k * (
(phi.ip(1) + phi.ip(-1) - 2.0*phi.v())/myg.dx**2 +
(phi.jp(1) + phi.jp(-1) - 2.0*phi.v())/myg.dy**2)
mg.init_RHS(f)
# initial guess is zeros
mg.init_zeros()
# solve the MG problem for the updated phi
mg.solve(rtol=1.e-10)
# mg.smooth(mg.nlevels-1,100)
# update the solution
phi.v()[:, :] = mg.get_solution().v()
# increment the time
self.cc_data.t += self.dt
self.n += 1
def dovis(self):
"""
Do runtime visualization.
"""
plt.clf()
phi = self.cc_data.get_var("phi")
myg = self.cc_data.grid
plt.imshow(np.transpose(phi.v()),
interpolation="nearest", origin="lower",
extent=[myg.xmin, myg.xmax, myg.ymin, myg.ymax],
cmap=self.cm)
plt.xlabel("x")
plt.ylabel("y")
plt.title("phi")
plt.colorbar()
plt.figtext(0.05, 0.0125, "t = {:10.5f}".format(self.cc_data.t))
plt.pause(0.001)
plt.draw()
| {
"repo_name": "harpolea/pyro2",
"path": "diffusion/simulation.py",
"copies": "1",
"size": "4636",
"license": "bsd-3-clause",
"hash": -7553926476143549000,
"line_mean": 29.5,
"line_max": 92,
"alpha_frac": 0.5299827437,
"autogenerated": false,
"ratio": 3.413843888070692,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9443472924922928,
"avg_score": 0.00007074136955291455,
"num_lines": 152
} |
"""A simulator for Zombie Apocalypse.
(see Munz et al. 2009)
http://mysite.science.uottawa.ca/rsmith43/Zombies.pdf
"""
import os
# Set this before import matplotlib
os.environ['MPLCONFIGDIR'] = '/tmp'
# This is needed before importing pyplot for rendering the image
import matplotlib
matplotlib.use('Agg')
import numpy
from matplotlib import pyplot
from scipy.integrate import odeint
DEFAULT_TIME_VECTOR = numpy.linspace(0, 5., 1000)
DEFAULT_INITIAL_POPULATION = 500.
DEFAULT_INITIAL_ZOMBIE_POPULATION = 0.
DEFAULT_INITIAL_DEATH_POPULATION = 0.
DEFAULT_BIRTH_RATE = 0. # birth rate
DEFAULT_NATURAL_DEATH_PERCENT = 0.01 # natural death percent (per day)
DEFAULT_TRANSMISSION_PERCENT = 0.95 # transmission percent (per day)
DEFAULT_RESURRECT_PERCENT = 0.01 # resurrect percent (per day)
DEFAULT_DESTROY_PERCENT = 0.01 # destroy percent (per day)
PLOT_TITLE_TEMPLATE = """\
init po: {}, init zombie: {}, init death: {}, {} daily birth
death pct. {}%, trans pct. {}%, resur pct. {}%, destroy pct. {}%."""
class ZombieApocalypseSimulator(object):
"""A simulator for Zombie Apocalypse (see Munz et al. 2009)."""
def __init__(self,
time_vector=DEFAULT_TIME_VECTOR,
initial_population=DEFAULT_INITIAL_POPULATION,
initial_zombie_population=DEFAULT_INITIAL_ZOMBIE_POPULATION,
initial_death_population=DEFAULT_INITIAL_DEATH_POPULATION,
birth_rate=DEFAULT_BIRTH_RATE,
natural_death_percent=DEFAULT_NATURAL_DEATH_PERCENT,
transmission_percent=DEFAULT_TRANSMISSION_PERCENT,
resurrect_percent=DEFAULT_RESURRECT_PERCENT,
destroy_percent=DEFAULT_DESTROY_PERCENT):
"""A constructor for the simulator.
Args:
time_vector: A time span for the simulation.
initial_population: Initial population of the human beings.
initial_zombie_population: Initial population of the Zombies.
initial_death_population: Initial number of the dead bodies.
birth_rate: Number of births/day.
natural_death_percent: Natural death percent per day.
transmission_percent: Transmission percent per day
(a human becomes a zombie).
resurrect_percent: Resurrection percent per day
(a dead body becomes a zombie).
destroy_percent: Destroy percent per day
(a zombie is completely destroyed).
"""
self.time_vector = time_vector
self.initial_population = initial_population
self.initial_zombie_population = initial_zombie_population
self.initial_death_population = initial_death_population
self.birth_rate = birth_rate
self.natural_death_rate = natural_death_percent / 100
self.transmission_rate = transmission_percent / 100
self.resurrect_rate = resurrect_percent / 100
self.destroy_rate = destroy_percent / 100
def _compute_populations(self, y, t):
"""Compute the populations at the given time point.
Args:
y: A list containing the numbers of human beings, zombies,
and dead bodies.
t: A time point for the computation.
Returns:
A list containing the numbers of human beings, zombies,
and dead bodies for the given time point.
"""
Si = y[0]
Zi = y[1]
Ri = y[2]
# the model equations (see Munz et al. 2009)
f0 = (self.birth_rate - self.transmission_rate * Si * Zi
- self.natural_death_rate * Si)
f1 = (self.transmission_rate * Si * Zi
+ self.resurrect_rate * Ri
- self.destroy_rate / 100 * Si * Zi)
f2 = (self.natural_death_rate * Si + self.destroy_rate * Si * Zi
- self.resurrect_rate * Ri)
return [f0, f1, f2]
def _solve(self):
"""Returns the simulated result."""
y0 = [self.initial_population, self.initial_zombie_population,
self.initial_death_population]
return odeint(self._compute_populations, y0, self.time_vector)
@property
def plot_title(self):
"""Creates a title string of the result image.
Returns:
A title string of the image.
"""
return PLOT_TITLE_TEMPLATE.format(
self.initial_population,
self.initial_zombie_population,
self.initial_death_population,
self.birth_rate,
self.natural_death_rate*100.0,
self.transmission_rate*100.0,
self.resurrect_rate*100.0,
self.destroy_rate*100.0)
def plot(self, file_object):
"""Creates a png object and write it to the given object.
Args:
file_object: A file object for writing the binary data
of the generated png file.
"""
soln = self._solve()
S = soln[:, 0]
Z = soln[:, 1]
R = soln[:, 2]
pyplot.figure()
pyplot.plot(self.time_vector, S, label='Living')
pyplot.plot(self.time_vector, Z, label='Zombies')
pyplot.xlabel('Days from outbreak')
pyplot.ylabel('Population')
pyplot.title(self.plot_title)
pyplot.legend(loc=0)
pyplot.savefig(file_object, format='png')
| {
"repo_name": "GoogleCloudPlatform/appengine-scipy-zombie-apocalypse-python",
"path": "zombie_apocalypse.py",
"copies": "1",
"size": "5368",
"license": "apache-2.0",
"hash": 3910787837652989400,
"line_mean": 37.3428571429,
"line_max": 77,
"alpha_frac": 0.6177347243,
"autogenerated": false,
"ratio": 3.689347079037801,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9797758586871728,
"avg_score": 0.0018646432932147218,
"num_lines": 140
} |
"""A Simulator object holds together information about and functionality of
cellular automaton simulators.
"""
# This file is part of zasim. zasim is licensed under the BSD 3-clause license.
# See LICENSE.txt for details.
try:
from .external.qt import QObject, Signal
except ImportError:
from zasim.lightweight_signal import Signal
QObject = object
print "using lightweight signal"
class TargetProxy(object):
def __init__(self, target, attrs):
self.target = target
self.attrs = attrs
def __getattr__(self, attr):
if attr in self.attrs:
return getattr(self.target, attr)
else:
raise AttributeError("%s not in target attrs" % attr)
class SimulatorInterface(QObject):
"""This class serves as the base for simulator objects.
.. note::
If you ever derive from this class and you get an error like
"PySide.QtCore.Signal object has no attribute 'emit'", then you
have most likely forgotten to call super's __init__."""
shape = ()
"""The shape of the cellular space of this automaton.
It's a tuple with the size of each dimension as entries.
.. note ::
This excludes any borders that have redundant cells in them."""
step_number = 0
"""The number of the current step, beginning with the initial configuration
being step 0."""
available_history = 0
"""How many steps back the simulator can go."""
prepared = True
"""Wether the simulator needs to run any kind of preparation before being
able to perform steps"""
palette_info = {}
"""Dictionary fro different display options and their extra data.
colors32: list or dictionary of 32bit padded RGB values, like QImage::Format_RGB32
qcolors: list or dictionary of QColor values
tiles: dictionary with 'images', an image map and 'rects', a dictionary of rects
chars: list or dictionary of ascii/unicode values
cboxes: list or dictionary of multiline ascii/unicode values
hexcols: list or dictionary of colors usable in css-like color definitions.
"""
#rect_updated = Signal(tuple)
#"""Is emitted when only a rectangular shaped area of the conf has changed."""
updated = Signal()
"""Is emitted when the conf has changed as result of a step."""
changed = Signal()
"""Is emitted when the configuration has changed, but there was no step."""
started = Signal()
"""Is emitted when continuous updating has been started."""
stopped = Signal()
"""Is emitted when continuous updating has been stopped."""
shapshot_taken = Signal()
"""Is emitted when a snapshot has been taken."""
snapshot_restored = Signal()
"""Is emitted when a snapshot is restored or a completely new configuration
has been set."""
target_attrs = []
"""The extra-attributes the target has to offer, such as histogram."""
t = TargetProxy(object(), [])
"""A proxy object to access the target_attrs."""
changeinfo = None
"""Information about the last configuration change.
May be
1. a rectangle (x, y, w, h) that has been changed
2. None, if everything changed."""
def get_config(self):
"""Returns a copy of the configuration space as a numpy array.
Its shape matches up with :attr:`shape`, so it also does not
include any borders."""
def set_config(self, config):
"""Sets a new config for the simulator.
Emits snapshot_restored"""
def set_config_value(self, pos, value=None):
"""Set the config value at pos to value.
If value is None, flip the value instead."""
def step(self):
"""Step the simulator once."""
self.updated.emit()
self.step_number += 1
def copy(self):
"""Duplicate the simulator."""
def start(self):
"""Call this to notify views, that continuous updates have
been started."""
self.started.emit()
def stop(self):
"""Call this to notify views, that continuous updates have
been stopped."""
self.stopped.emit()
def snapshot(self):
"""Get a lightweight snapshot of this simulator, that can be restored
again later on.
.. note ::
This emits the :attr:`snapshot_taken` signal."""
self.snapshot_taken.emit()
def restore(self, snapshot):
"""Restore the simulator to an earlier state.
.. note ::
This emits the :attr:`sanpshot_restored` signal."""
raise NotImplementedError("restoring of snapshots not implemented"\
"for %s" % (self.__class__))
def reset(self, configurator=None):
"""Reset the simulator by using the same generator that was initially
used, if it's still available, or set a new configurator for the future
and reset the configuration with it once.
See also `cagen.config`"""
raise NotImplementedError("reset not implemented.")
def limit_palette(self):
result = {}
for key, pal in self.palette_info.iteritems():
new_dict = {k: pal[k] for k in self.t.possible_values}
result[key] = new_dict
self.palette_info = result
class CagenSimulator(SimulatorInterface):
"""This Simulator takes a `StepFunc` instance and packs it in an interface
compatible with `SimulatorInterface`."""
def __init__(self, step_func):
super(CagenSimulator, self).__init__()
self._step_func = step_func
self._target = step_func.target
self._size = self._target.size
self._bbox = self._step_func.neigh.bounding_box()
self.shape = self._size
self.prepared = self._step_func.prepared
self.t = TargetProxy(self._target, self._step_func.attrs + ["possible_values"])
def get_config(self):
"""Return the config, sans borders."""
if len(self.shape) == 1:
((l, r),) = self._bbox
return self._target.cconf[abs(l):-abs(r)].copy()
elif len(self.shape) == 2:
(l, r), (u, d) = self._bbox
return self._target.cconf[abs(u):-abs(d),abs(l):-abs(r)].copy()
def set_config(self, config):
self._step_func.set_config(config)
self.snapshot_restored.emit()
def set_config_value(self, pos, value=None):
try:
self._step_func.set_config_value(pos, value)
except IndexError:
return
self.changed.emit()
def step(self):
"""Delegate the stepping to the :meth:`StepFunc.step` method, then
emit :attr:`updated`."""
self._step_func.step()
# XXX what's the order? what happens if a slot called from here changes something?
self.prepared = True
self.step_number += 1
self.updated.emit()
def step_inline(self):
"""Step the simulator using the weave.inline version of the code."""
self._step_func.step_inline()
self.prepared = True
self.step_number += 1
self.updated.emit()
def step_pure_py(self):
"""Step the simulator using the pure python code version."""
self._step_func.step_pure_py()
self.step_number += 1
self.updated.emit()
def reset(self, configurator=None):
if configurator is not None:
self._target._reset_generator = configurator
if self._target._reset_generator:
newconf = self._target._reset_generator.generate(size_hint=self._target._reset_size)
self.set_config(newconf)
else:
raise ValueError("This simulator's target wasn't created with a generator as config value.")
def __str__(self):
try:
return str(self._step_func)
except:
return repr(self)
class ElementaryCagenSimulator(CagenSimulator):
"""This Simulator has a few special options available only if you have an
elementary step func with a rule number."""
rule_number = 0
"""The rule number of the target."""
def __init__(self, step_func, rule_nr):
super(ElementaryCagenSimulator, self).__init__(step_func=step_func)
self.rule_number = rule_nr
| {
"repo_name": "timo/zasim",
"path": "zasim/simulator.py",
"copies": "1",
"size": "8237",
"license": "bsd-3-clause",
"hash": 4566577815396524000,
"line_mean": 32.2137096774,
"line_max": 104,
"alpha_frac": 0.6266844725,
"autogenerated": false,
"ratio": 4.2415036045314105,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0013305022820547401,
"num_lines": 248
} |
"""A single clock for all patchers."""
MIN_START_TIME = 86400.0 # Windows support unix times starting from 86,400.
class TimeMachineClock(object):
"""Unifing class for clock types."""
def __init__(self, start_time=MIN_START_TIME, clock_listeners=None):
"""Initialise a unified clock."""
if start_time < MIN_START_TIME:
raise ValueError('start_time cannot be lower than {} ({} given)'.
format(MIN_START_TIME, start_time))
self._time = start_time
self._clock_listeners = clock_listeners
if clock_listeners is None:
self._clock_listeners = []
@property
def time(self):
"""Get the clock time in seconds since the epoch."""
return self._time
@time.setter
def time(self, time):
"""Set the clock time.
time - is time in seconds since the epoch.
"""
self._time = float(time)
for listener in self._clock_listeners:
listener.set_time(self._time)
| {
"repo_name": "snudler6/time-travel",
"path": "src/time_travel/time_machine_clock.py",
"copies": "1",
"size": "1033",
"license": "mit",
"hash": 753145375360163800,
"line_mean": 27.6944444444,
"line_max": 77,
"alpha_frac": 0.5876089061,
"autogenerated": false,
"ratio": 4.165322580645161,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.525293148674516,
"avg_score": null,
"num_lines": null
} |
"""A single common terminal for all websockets.
"""
import tornado.web
# This demo requires tornado_xstatic and XStatic-term.js
import tornado_xstatic
from terminado import TermSocket, SingleTermManager
from common_demo_stuff import run_and_show_browser, STATIC_DIR, TEMPLATE_DIR
class TerminalPageHandler(tornado.web.RequestHandler):
def get(self):
return self.render("termpage.html", static=self.static_url,
xstatic=self.application.settings['xstatic_url'],
ws_url_path="/websocket")
def main(argv):
term_manager = SingleTermManager(shell_command=['bash'])
handlers = [
(r"/websocket", TermSocket,
{'term_manager': term_manager}),
(r"/", TerminalPageHandler),
(r"/xstatic/(.*)", tornado_xstatic.XStaticFileHandler,
{'allowed_modules': ['termjs']})
]
app = tornado.web.Application(handlers, static_path=STATIC_DIR,
template_path=TEMPLATE_DIR,
xstatic_url = tornado_xstatic.url_maker('/xstatic/'))
app.listen(8765, 'localhost')
run_and_show_browser("http://localhost:8765/", term_manager)
if __name__ == '__main__':
main([]) | {
"repo_name": "akalipetis/terminado",
"path": "demos/single.py",
"copies": "4",
"size": "1267",
"license": "bsd-2-clause",
"hash": 2676608328521362400,
"line_mean": 38.625,
"line_max": 76,
"alpha_frac": 0.6069455406,
"autogenerated": false,
"ratio": 4.0094936708860756,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6616439211486076,
"avg_score": null,
"num_lines": null
} |
"""A single layer of black phosphorus"""
from math import pi, sin, cos
import pybinding as pb
def monolayer_4band(num_hoppings=5):
"""Monolayer phosphorene lattice using the four-band model
Parameters
----------
num_hoppings : int
Number of hopping terms to consider: from t2 to t5.
"""
a = 0.222 # nm
ax = 0.438 # nm
ay = 0.332 # nm
theta = 96.79 * (pi / 180)
phi = 103.69 * (pi / 180)
lat = pb.Lattice(a1=[ax, 0], a2=[0, ay])
h = a * sin(phi - pi / 2)
s = 0.5 * ax - a * cos(theta / 2)
lat.add_sublattices(('A', [-s/2, -ay/2, h], 0),
('B', [ s/2, -ay/2, 0], 0),
('C', [-s/2 + ax/2, 0, 0], 0),
('D', [ s/2 + ax/2, 0, h], 0))
lat.register_hopping_energies({'t1': -1.22, 't2': 3.665, 't3': -0.205,
't4': -0.105, 't5': -0.055})
if num_hoppings < 2:
raise RuntimeError("t1 and t2 must be included")
elif num_hoppings > 5:
raise RuntimeError("t5 is the last one")
if num_hoppings >= 2:
lat.add_hoppings(([-1, 0], 'A', 'D', 't1'),
([-1, -1], 'A', 'D', 't1'),
([ 0, 0], 'B', 'C', 't1'),
([ 0, -1], 'B', 'C', 't1'))
lat.add_hoppings(([ 0, 0], 'A', 'B', 't2'),
([ 0, 0], 'C', 'D', 't2'))
if num_hoppings >= 3:
lat.add_hoppings(([ 0, 0], 'A', 'D', 't3'),
([ 0, -1], 'A', 'D', 't3'),
([ 1, 1], 'C', 'B', 't3'),
([ 1, 0], 'C', 'B', 't3'))
if num_hoppings >= 4:
lat.add_hoppings(([ 0, 0], 'A', 'C', 't4'),
([ 0, -1], 'A', 'C', 't4'),
([-1, 0], 'A', 'C', 't4'),
([-1, -1], 'A', 'C', 't4'),
([ 0, 0], 'B', 'D', 't4'),
([ 0, -1], 'B', 'D', 't4'),
([-1, 0], 'B', 'D', 't4'),
([-1, -1], 'B', 'D', 't4'))
if num_hoppings >= 5:
lat.add_hoppings(([-1, 0], 'A', 'B', 't5'),
([-1, 0], 'C', 'D', 't5'))
lat.min_neighbors = 2
return lat
| {
"repo_name": "MAndelkovic/pybinding",
"path": "pybinding/repository/phosphorene.py",
"copies": "2",
"size": "2298",
"license": "bsd-2-clause",
"hash": 3866831495226111500,
"line_mean": 35.4761904762,
"line_max": 74,
"alpha_frac": 0.3416013925,
"autogenerated": false,
"ratio": 2.8092909535452324,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.41508923460452324,
"avg_score": null,
"num_lines": null
} |
"""A single place for constructing and exposing the main parser
"""
import os
import sys
from typing import List, Tuple
from pip._internal.cli import cmdoptions
from pip._internal.cli.parser import ConfigOptionParser, UpdatingDefaultsHelpFormatter
from pip._internal.commands import commands_dict, get_similar_commands
from pip._internal.exceptions import CommandError
from pip._internal.utils.misc import get_pip_version, get_prog
__all__ = ["create_main_parser", "parse_command"]
def create_main_parser() -> ConfigOptionParser:
"""Creates and returns the main parser for pip's CLI"""
parser = ConfigOptionParser(
usage="\n%prog <command> [options]",
add_help_option=False,
formatter=UpdatingDefaultsHelpFormatter(),
name="global",
prog=get_prog(),
)
parser.disable_interspersed_args()
parser.version = get_pip_version()
# add the general options
gen_opts = cmdoptions.make_option_group(cmdoptions.general_group, parser)
parser.add_option_group(gen_opts)
# so the help formatter knows
parser.main = True # type: ignore
# create command listing for description
description = [""] + [
f"{name:27} {command_info.summary}"
for name, command_info in commands_dict.items()
]
parser.description = "\n".join(description)
return parser
def parse_command(args: List[str]) -> Tuple[str, List[str]]:
parser = create_main_parser()
# Note: parser calls disable_interspersed_args(), so the result of this
# call is to split the initial args into the general options before the
# subcommand and everything else.
# For example:
# args: ['--timeout=5', 'install', '--user', 'INITools']
# general_options: ['--timeout==5']
# args_else: ['install', '--user', 'INITools']
general_options, args_else = parser.parse_args(args)
# --version
if general_options.version:
sys.stdout.write(parser.version)
sys.stdout.write(os.linesep)
sys.exit()
# pip || pip help -> print_help()
if not args_else or (args_else[0] == "help" and len(args_else) == 1):
parser.print_help()
sys.exit()
# the subcommand name
cmd_name = args_else[0]
if cmd_name not in commands_dict:
guess = get_similar_commands(cmd_name)
msg = [f'unknown command "{cmd_name}"']
if guess:
msg.append(f'maybe you meant "{guess}"')
raise CommandError(" - ".join(msg))
# all the args without the subcommand
cmd_args = args[:]
cmd_args.remove(cmd_name)
return cmd_name, cmd_args
| {
"repo_name": "pypa/pip",
"path": "src/pip/_internal/cli/main_parser.py",
"copies": "4",
"size": "2614",
"license": "mit",
"hash": 1702276450586536200,
"line_mean": 29.0459770115,
"line_max": 86,
"alpha_frac": 0.6534047437,
"autogenerated": false,
"ratio": 3.7994186046511627,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6452823348351163,
"avg_score": null,
"num_lines": null
} |
"""A single place for constructing and exposing the main parser
"""
import os
import sys
from pipenv.patched.notpip._internal.cli import cmdoptions
from pipenv.patched.notpip._internal.cli.parser import (
ConfigOptionParser,
UpdatingDefaultsHelpFormatter,
)
from pipenv.patched.notpip._internal.commands import commands_dict, get_similar_commands
from pipenv.patched.notpip._internal.exceptions import CommandError
from pipenv.patched.notpip._internal.utils.misc import get_pip_version, get_prog
from pipenv.patched.notpip._internal.utils.typing import MYPY_CHECK_RUNNING
if MYPY_CHECK_RUNNING:
from typing import Tuple, List
__all__ = ["create_main_parser", "parse_command"]
def create_main_parser():
# type: () -> ConfigOptionParser
"""Creates and returns the main parser for pip's CLI
"""
parser_kw = {
'usage': '\n%prog <command> [options]',
'add_help_option': False,
'formatter': UpdatingDefaultsHelpFormatter(),
'name': 'global',
'prog': get_prog(),
}
parser = ConfigOptionParser(**parser_kw)
parser.disable_interspersed_args()
parser.version = get_pip_version()
# add the general options
gen_opts = cmdoptions.make_option_group(cmdoptions.general_group, parser)
parser.add_option_group(gen_opts)
# so the help formatter knows
parser.main = True # type: ignore
# create command listing for description
description = [''] + [
'%-27s %s' % (name, command_info.summary)
for name, command_info in commands_dict.items()
]
parser.description = '\n'.join(description)
return parser
def parse_command(args):
# type: (List[str]) -> Tuple[str, List[str]]
parser = create_main_parser()
# Note: parser calls disable_interspersed_args(), so the result of this
# call is to split the initial args into the general options before the
# subcommand and everything else.
# For example:
# args: ['--timeout=5', 'install', '--user', 'INITools']
# general_options: ['--timeout==5']
# args_else: ['install', '--user', 'INITools']
general_options, args_else = parser.parse_args(args)
# --version
if general_options.version:
sys.stdout.write(parser.version) # type: ignore
sys.stdout.write(os.linesep)
sys.exit()
# pip || pip help -> print_help()
if not args_else or (args_else[0] == 'help' and len(args_else) == 1):
parser.print_help()
sys.exit()
# the subcommand name
cmd_name = args_else[0]
if cmd_name not in commands_dict:
guess = get_similar_commands(cmd_name)
msg = ['unknown command "%s"' % cmd_name]
if guess:
msg.append('maybe you meant "%s"' % guess)
raise CommandError(' - '.join(msg))
# all the args without the subcommand
cmd_args = args[:]
cmd_args.remove(cmd_name)
return cmd_name, cmd_args
| {
"repo_name": "kennethreitz/pipenv",
"path": "pipenv/patched/notpip/_internal/cli/main_parser.py",
"copies": "1",
"size": "2927",
"license": "mit",
"hash": 8470825658093950000,
"line_mean": 28.5656565657,
"line_max": 88,
"alpha_frac": 0.6508370345,
"autogenerated": false,
"ratio": 3.6957070707070705,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9845353113587447,
"avg_score": 0.00023819832392449954,
"num_lines": 99
} |
"""A single place for constructing and exposing the main parser
"""
import os
import sys
from pip import __version__
from pip._internal.cli import cmdoptions
from pip._internal.cli.parser import (
ConfigOptionParser, UpdatingDefaultsHelpFormatter,
)
from pip._internal.commands import (
commands_dict, get_similar_commands, get_summaries,
)
from pip._internal.exceptions import CommandError
from pip._internal.utils.misc import get_prog
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
if MYPY_CHECK_RUNNING:
from typing import Tuple, List
__all__ = ["create_main_parser", "parse_command"]
def create_main_parser():
# type: () -> ConfigOptionParser
"""Creates and returns the main parser for pip's CLI
"""
parser_kw = {
'usage': '\n%prog <command> [options]',
'add_help_option': False,
'formatter': UpdatingDefaultsHelpFormatter(),
'name': 'global',
'prog': get_prog(),
}
parser = ConfigOptionParser(**parser_kw)
parser.disable_interspersed_args()
pip_pkg_dir = os.path.abspath(os.path.join(
os.path.dirname(__file__), "..", "..",
))
parser.version = 'pip %s from %s (python %s)' % (
__version__, pip_pkg_dir, sys.version[:3],
)
# add the general options
gen_opts = cmdoptions.make_option_group(cmdoptions.general_group, parser)
parser.add_option_group(gen_opts)
# so the help formatter knows
parser.main = True # type: ignore
# create command listing for description
command_summaries = get_summaries()
description = [''] + ['%-27s %s' % (i, j) for i, j in command_summaries]
parser.description = '\n'.join(description)
return parser
def parse_command(args):
# type: (List[str]) -> Tuple[str, List[str]]
parser = create_main_parser()
# Note: parser calls disable_interspersed_args(), so the result of this
# call is to split the initial args into the general options before the
# subcommand and everything else.
# For example:
# args: ['--timeout=5', 'install', '--user', 'INITools']
# general_options: ['--timeout==5']
# args_else: ['install', '--user', 'INITools']
general_options, args_else = parser.parse_args(args)
# --version
if general_options.version:
sys.stdout.write(parser.version) # type: ignore
sys.stdout.write(os.linesep)
sys.exit()
# pip || pip help -> print_help()
if not args_else or (args_else[0] == 'help' and len(args_else) == 1):
parser.print_help()
sys.exit()
# the subcommand name
cmd_name = args_else[0]
if cmd_name not in commands_dict:
guess = get_similar_commands(cmd_name)
msg = ['unknown command "%s"' % cmd_name]
if guess:
msg.append('maybe you meant "%s"' % guess)
raise CommandError(' - '.join(msg))
# all the args without the subcommand
cmd_args = args[:]
cmd_args.remove(cmd_name)
return cmd_name, cmd_args
| {
"repo_name": "huguesv/PTVS",
"path": "Python/Product/Miniconda/Miniconda3-x64/Lib/site-packages/pip/_internal/cli/main_parser.py",
"copies": "5",
"size": "3002",
"license": "apache-2.0",
"hash": 5921931613611613000,
"line_mean": 27.8653846154,
"line_max": 77,
"alpha_frac": 0.6332445037,
"autogenerated": false,
"ratio": 3.656516443361754,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 104
} |
"""A single place for constructing and exposing the main parser
"""
import os
import sys
from pip._internal.cli import cmdoptions
from pip._internal.cli.parser import ConfigOptionParser, UpdatingDefaultsHelpFormatter
from pip._internal.commands import commands_dict, get_similar_commands
from pip._internal.exceptions import CommandError
from pip._internal.utils.misc import get_pip_version, get_prog
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
if MYPY_CHECK_RUNNING:
from typing import List, Tuple
__all__ = ["create_main_parser", "parse_command"]
def create_main_parser():
# type: () -> ConfigOptionParser
"""Creates and returns the main parser for pip's CLI
"""
parser_kw = {
'usage': '\n%prog <command> [options]',
'add_help_option': False,
'formatter': UpdatingDefaultsHelpFormatter(),
'name': 'global',
'prog': get_prog(),
}
parser = ConfigOptionParser(**parser_kw)
parser.disable_interspersed_args()
parser.version = get_pip_version()
# add the general options
gen_opts = cmdoptions.make_option_group(cmdoptions.general_group, parser)
parser.add_option_group(gen_opts)
# so the help formatter knows
parser.main = True # type: ignore
# create command listing for description
description = [''] + [
'{name:27} {command_info.summary}'.format(**locals())
for name, command_info in commands_dict.items()
]
parser.description = '\n'.join(description)
return parser
def parse_command(args):
# type: (List[str]) -> Tuple[str, List[str]]
parser = create_main_parser()
# Note: parser calls disable_interspersed_args(), so the result of this
# call is to split the initial args into the general options before the
# subcommand and everything else.
# For example:
# args: ['--timeout=5', 'install', '--user', 'INITools']
# general_options: ['--timeout==5']
# args_else: ['install', '--user', 'INITools']
general_options, args_else = parser.parse_args(args)
# --version
if general_options.version:
sys.stdout.write(parser.version) # type: ignore
sys.stdout.write(os.linesep)
sys.exit()
# pip || pip help -> print_help()
if not args_else or (args_else[0] == 'help' and len(args_else) == 1):
parser.print_help()
sys.exit()
# the subcommand name
cmd_name = args_else[0]
if cmd_name not in commands_dict:
guess = get_similar_commands(cmd_name)
msg = ['unknown command "{}"'.format(cmd_name)]
if guess:
msg.append('maybe you meant "{}"'.format(guess))
raise CommandError(' - '.join(msg))
# all the args without the subcommand
cmd_args = args[:]
cmd_args.remove(cmd_name)
return cmd_name, cmd_args
| {
"repo_name": "jsirois/pex",
"path": "pex/vendor/_vendored/pip/pip/_internal/cli/main_parser.py",
"copies": "4",
"size": "2830",
"license": "apache-2.0",
"hash": 1399369011953255700,
"line_mean": 28.4791666667,
"line_max": 86,
"alpha_frac": 0.6462897527,
"autogenerated": false,
"ratio": 3.7783711615487316,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00011973180076628352,
"num_lines": 96
} |
"""A single place for constructing and exposing the main parser
"""
import os
import sys
from pip._internal.cli import cmdoptions
from pip._internal.cli.parser import (
ConfigOptionParser,
UpdatingDefaultsHelpFormatter,
)
from pip._internal.commands import commands_dict, get_similar_commands
from pip._internal.exceptions import CommandError
from pip._internal.utils.misc import get_pip_version, get_prog
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
if MYPY_CHECK_RUNNING:
from typing import Tuple, List
__all__ = ["create_main_parser", "parse_command"]
def create_main_parser():
# type: () -> ConfigOptionParser
"""Creates and returns the main parser for pip's CLI
"""
parser_kw = {
'usage': '\n%prog <command> [options]',
'add_help_option': False,
'formatter': UpdatingDefaultsHelpFormatter(),
'name': 'global',
'prog': get_prog(),
}
parser = ConfigOptionParser(**parser_kw)
parser.disable_interspersed_args()
parser.version = get_pip_version()
# add the general options
gen_opts = cmdoptions.make_option_group(cmdoptions.general_group, parser)
parser.add_option_group(gen_opts)
# so the help formatter knows
parser.main = True # type: ignore
# create command listing for description
description = [''] + [
'{name:27} {command_info.summary}'.format(**locals())
for name, command_info in commands_dict.items()
]
parser.description = '\n'.join(description)
return parser
def parse_command(args):
# type: (List[str]) -> Tuple[str, List[str]]
parser = create_main_parser()
# Note: parser calls disable_interspersed_args(), so the result of this
# call is to split the initial args into the general options before the
# subcommand and everything else.
# For example:
# args: ['--timeout=5', 'install', '--user', 'INITools']
# general_options: ['--timeout==5']
# args_else: ['install', '--user', 'INITools']
general_options, args_else = parser.parse_args(args)
# --version
if general_options.version:
sys.stdout.write(parser.version) # type: ignore
sys.stdout.write(os.linesep)
sys.exit()
# pip || pip help -> print_help()
if not args_else or (args_else[0] == 'help' and len(args_else) == 1):
parser.print_help()
sys.exit()
# the subcommand name
cmd_name = args_else[0]
if cmd_name not in commands_dict:
guess = get_similar_commands(cmd_name)
msg = ['unknown command "{}"'.format(cmd_name)]
if guess:
msg.append('maybe you meant "{}"'.format(guess))
raise CommandError(' - '.join(msg))
# all the args without the subcommand
cmd_args = args[:]
cmd_args.remove(cmd_name)
return cmd_name, cmd_args
| {
"repo_name": "mdworks2016/work_development",
"path": "Python/20_Third_Certification/venv/lib/python3.7/site-packages/pip/_internal/cli/main_parser.py",
"copies": "13",
"size": "2843",
"license": "apache-2.0",
"hash": 3087813262610980000,
"line_mean": 27.7171717172,
"line_max": 77,
"alpha_frac": 0.6433345058,
"autogenerated": false,
"ratio": 3.7655629139072846,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 99
} |
"""A single place for constructing and exposing the main parser
"""
import os
import sys
from pip._internal.cli import cmdoptions
from pip._internal.cli.parser import (
ConfigOptionParser, UpdatingDefaultsHelpFormatter,
)
from pip._internal.commands import (
commands_dict, get_similar_commands, get_summaries,
)
from pip._internal.exceptions import CommandError
from pip._internal.utils.misc import get_pip_version, get_prog
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
if MYPY_CHECK_RUNNING:
from typing import Tuple, List
__all__ = ["create_main_parser", "parse_command"]
def create_main_parser():
# type: () -> ConfigOptionParser
"""Creates and returns the main parser for pip's CLI
"""
parser_kw = {
'usage': '\n%prog <command> [options]',
'add_help_option': False,
'formatter': UpdatingDefaultsHelpFormatter(),
'name': 'global',
'prog': get_prog(),
}
parser = ConfigOptionParser(**parser_kw)
parser.disable_interspersed_args()
parser.version = get_pip_version()
# add the general options
gen_opts = cmdoptions.make_option_group(cmdoptions.general_group, parser)
parser.add_option_group(gen_opts)
# so the help formatter knows
parser.main = True # type: ignore
# create command listing for description
command_summaries = get_summaries()
description = [''] + ['%-27s %s' % (i, j) for i, j in command_summaries]
parser.description = '\n'.join(description)
return parser
def parse_command(args):
# type: (List[str]) -> Tuple[str, List[str]]
parser = create_main_parser()
# Note: parser calls disable_interspersed_args(), so the result of this
# call is to split the initial args into the general options before the
# subcommand and everything else.
# For example:
# args: ['--timeout=5', 'install', '--user', 'INITools']
# general_options: ['--timeout==5']
# args_else: ['install', '--user', 'INITools']
general_options, args_else = parser.parse_args(args)
# --version
if general_options.version:
sys.stdout.write(parser.version) # type: ignore
sys.stdout.write(os.linesep)
sys.exit()
# pip || pip help -> print_help()
if not args_else or (args_else[0] == 'help' and len(args_else) == 1):
parser.print_help()
sys.exit()
# the subcommand name
cmd_name = args_else[0]
if cmd_name not in commands_dict:
guess = get_similar_commands(cmd_name)
msg = ['unknown command "%s"' % cmd_name]
if guess:
msg.append('maybe you meant "%s"' % guess)
raise CommandError(' - '.join(msg))
# all the args without the subcommand
cmd_args = args[:]
cmd_args.remove(cmd_name)
return cmd_name, cmd_args
| {
"repo_name": "perlygatekeeper/glowing-robot",
"path": "Little_Alchemy_2/Scraper_python/env/lib/python3.7/site-packages/pip/_internal/cli/main_parser.py",
"copies": "9",
"size": "2817",
"license": "artistic-2.0",
"hash": -7539006061369997000,
"line_mean": 27.7448979592,
"line_max": 77,
"alpha_frac": 0.6443024494,
"autogenerated": false,
"ratio": 3.706578947368421,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8850881396768422,
"avg_score": null,
"num_lines": null
} |
"""A single place for FHIR concepts."""
from collections import namedtuple
from rdr_service.census_regions import census_regions
Concept = namedtuple("Concept", ["system", "code"])
SYSTEM_CONSENT_FORM = "http://terminology.pmi-ops.org/CodeSystem/consent-form"
ENROLLMENT_CONSENT_FORM = Concept(SYSTEM_CONSENT_FORM, "enrollment")
ELECTRONIC_HEALTH_RECORDS_CONSENT_FORM = Concept(SYSTEM_CONSENT_FORM, "electronic-health-records")
SYSTEM_PPI_MODULE = "http://terminology.pmi-ops.org/CodeSystem/ppi-module"
OVERALL_HEALTH_PPI_MODULE = Concept(SYSTEM_PPI_MODULE, "overall-health")
PERSONAL_HABITS_PPI_MODULE = Concept(SYSTEM_PPI_MODULE, "personal-habits")
SOCIODEMOGRAPHICS_PPI_MODULE = Concept(SYSTEM_PPI_MODULE, "sociodemographics")
HEALTHCARE_ACCESS_PPI_MODULE = Concept(SYSTEM_PPI_MODULE, "healthcare-access")
MEDICAL_HISTORY_PPI_MODULE = Concept(SYSTEM_PPI_MODULE, "medical-history")
MEDICATIONS_PPI_MODULE = Concept(SYSTEM_PPI_MODULE, "medications")
FAMILY_HEALTH_PPI_MODULE = Concept(SYSTEM_PPI_MODULE, "family-health")
SYSTEM_PHYSICAL_MEASUREMENTS = "http://terminology.pmi-ops.org/CodeSystem/document-type"
PHYSICAL_MEASUREMENTS_CONCEPT_CODE_PREFIX = "intake-exam-v"
SYSTEM_LOINC = "http://loinc.org"
SYSTEM_FHIR_NULL = "http://hl7.org/fhir/v3/NullFlavor"
SYSTEM_PMI_BASE = "http://terminology.pmi-ops.org/CodeSystem/"
SYSTEM_UNIT_OF_MEASURE = "http://unitsofmeasure.org"
ASKED_BUT_NO_ANSWER = Concept(SYSTEM_FHIR_NULL, "ASKU")
PREFER_NOT_TO_SAY = Concept(SYSTEM_FHIR_NULL, "ASKU")
# Used in the questionnaire response.
ETHNICITY = Concept(SYSTEM_LOINC, "69490-1")
HISPANIC = Concept("http://hl7.org/fhir/v3/Ethnicity", "2135-2")
NON_HISPANIC = Concept("http://hl7.org/fhir/v3/Ethnicity", "2186-5")
RACE = Concept(SYSTEM_LOINC, "72826-1")
SYSTEM_RACE = "http://hl7.org/fhir/v3/Race"
AMERICAN_INDIAN_OR_ALASKA_NATIVE = Concept(SYSTEM_RACE, "1002-5")
BLACK_OR_AFRICAN_AMERICAN = Concept(SYSTEM_RACE, "2054-5")
ASIAN = Concept(SYSTEM_RACE, "2028-9")
NATIVE_HAWAIIAN_OR_OTHER_PACIFIC_ISLANDER = Concept(SYSTEM_RACE, "2076-8")
WHITE = Concept(SYSTEM_RACE, "2106-3")
OTHER_RACE = Concept(SYSTEM_RACE, "2131-1")
GENDER_IDENTITY = Concept(SYSTEM_LOINC, "76691-5")
SYSTEM_GENDER_IDENTITY = SYSTEM_PMI_BASE + "gender-identity"
FEMALE = Concept(SYSTEM_GENDER_IDENTITY, "female")
FEMALE_TO_MALE_TRANSGENDER = Concept(SYSTEM_GENDER_IDENTITY, "female-to-male-transgender")
MALE = Concept(SYSTEM_GENDER_IDENTITY, "male")
MALE_TO_FEMALE_TRANSGENDER = Concept(SYSTEM_GENDER_IDENTITY, "male-to-female-transgender")
INTERSEX = Concept(SYSTEM_GENDER_IDENTITY, "intersex")
OTHER_GENDER = Concept(SYSTEM_GENDER_IDENTITY, "other")
SYSTEM_PPI_QUESTION = SYSTEM_PMI_BASE + "ppi-question"
MEMBERSHIP_TIER = Concept(SYSTEM_PMI_BASE, "membership-tier")
SYSTEM_MEMBERSHIP_TIER = SYSTEM_PMI_BASE + "membership-tier"
REGISTERED = Concept(SYSTEM_MEMBERSHIP_TIER, "registered")
VOLUNTEER = Concept(SYSTEM_MEMBERSHIP_TIER, "volunteer")
FULL_PARTICIPANT = Concept(SYSTEM_MEMBERSHIP_TIER, "full-participant")
ENROLLEE = Concept(SYSTEM_MEMBERSHIP_TIER, "enrollee")
DATE_OF_BIRTH = Concept(SYSTEM_PPI_QUESTION, "date-of-birth")
FIRST_NAME = Concept(SYSTEM_PPI_QUESTION, "first-name")
MIDDLE_NAME = Concept(SYSTEM_PPI_QUESTION, "middle-name")
LAST_NAME = Concept(SYSTEM_PPI_QUESTION, "last-name")
STATE_OF_RESIDENCE = Concept(SYSTEM_LOINC, "46499-0")
SYSTEM_STATE = SYSTEM_PMI_BASE + "us-state"
STATE_LIST = [Concept(SYSTEM_STATE, s) for s in list(census_regions.keys())]
STATES_BY_ABBREV = {c.code: c for c in STATE_LIST}
# Used in physical measurements.
SYSTOLIC_BP = Concept(SYSTEM_LOINC, "8480-6")
DIASTOLIC_BP = Concept(SYSTEM_LOINC, "8462-4")
HEART_RATE = Concept(SYSTEM_LOINC, "8867-4")
WEIGHT = Concept(SYSTEM_LOINC, "29463-7")
BMI = Concept(SYSTEM_LOINC, "39156-5")
HIP_CIRCUMFERENCE = Concept(SYSTEM_LOINC, "62409-8")
WAIST_CIRCUMFERENCE = Concept(SYSTEM_LOINC, "56086-2")
# UNITS
UNIT_MM_HG = Concept(SYSTEM_UNIT_OF_MEASURE, "mm[Hg]")
UNIT_KG = Concept(SYSTEM_UNIT_OF_MEASURE, "kg")
UNIT_CM = Concept(SYSTEM_UNIT_OF_MEASURE, "cm")
UNIT_PER_MIN = Concept(SYSTEM_UNIT_OF_MEASURE, "/min")
UNIT_KG_M2 = Concept(SYSTEM_UNIT_OF_MEASURE, "kg/m2")
| {
"repo_name": "all-of-us/raw-data-repository",
"path": "rdr_service/concepts.py",
"copies": "1",
"size": "4142",
"license": "bsd-3-clause",
"hash": -5132223090346454000,
"line_mean": 41.2653061224,
"line_max": 98,
"alpha_frac": 0.7508450024,
"autogenerated": false,
"ratio": 2.4906794948887554,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8739314548965531,
"avg_score": 0.000441989664645019,
"num_lines": 98
} |
"""A single process, persistent multi-producer, multi-consumer queue."""
import os
import pickle
import sys
import tempfile
if sys.version_info < (3, 0):
from Queue import Queue as SyncQ
else:
from queue import Queue as SyncQ
def _truncate(fn, length):
fd = os.open(fn, os.O_RDWR)
os.ftruncate(fd, length)
os.close(fd)
class Queue(SyncQ):
def __init__(self, path, maxsize=0, chunksize=100, tempdir=None):
"""Create a persistent queue object on a given path.
The argument path indicates a directory where enqueued data should be
persisted. If the directory doesn't exist, one will be created. If maxsize
is <= 0, the queue size is infinite. The optional argument chunksize
indicates how many entries should exist in each chunk file on disk.
The tempdir parameter indicates where temporary files should be stored.
The tempdir has to be located on the same disk as the enqueued data in
order to obtain atomic operations.
"""
self.path = path
self.chunksize = chunksize
self.tempdir = tempdir
if self.tempdir:
if os.stat(self.path).st_dev != os.stat(self.tempdir).st_dev:
raise ValueError("tempdir has to be located "
"on same path filesystem")
SyncQ.__init__(self, maxsize)
self.info = self._loadinfo()
# truncate head case it contains garbage
hnum, hcnt, hoffset = self.info['head']
headfn = self._qfile(hnum)
if os.path.exists(headfn):
if hoffset < os.path.getsize(headfn):
_truncate(headfn, hoffset)
# let the head file open
self.headf = self._openchunk(hnum, 'ab+')
# let the tail file open
tnum, _, toffset = self.info['tail']
self.tailf = self._openchunk(tnum)
self.tailf.seek(toffset)
# update unfinished tasks with the current number of enqueued tasks
self.unfinished_tasks = self.info['size']
# optimize info file updates
self.update_info = True
def _init(self, maxsize):
if not os.path.exists(self.path):
os.makedirs(self.path)
def _qsize(self, len=len):
return self.info['size']
def _put(self, item):
pickle.dump(item, self.headf)
self.headf.flush()
hnum, hpos, _ = self.info['head']
hpos += 1
if hpos == self.info['chunksize']:
hpos = 0
hnum += 1
self.headf.close()
self.headf = self._openchunk(hnum, 'ab+')
self.info['size'] += 1
self.info['head'] = [hnum, hpos, self.headf.tell()]
self._saveinfo()
def _get(self):
tnum, tcnt, toffset = self.info['tail']
hnum, hcnt, _ = self.info['head']
if [tnum, tcnt] >= [hnum, hcnt]:
return None
data = pickle.load(self.tailf)
toffset = self.tailf.tell()
tcnt += 1
if tcnt == self.info['chunksize'] and tnum <= hnum:
tcnt = toffset = 0
tnum += 1
self.tailf.close()
self.tailf = self._openchunk(tnum)
self.info['size'] -= 1
self.info['tail'] = [tnum, tcnt, toffset]
self.update_info = True
return data
def task_done(self):
SyncQ.task_done(self)
if self.update_info:
self._saveinfo()
self.update_info = False
def _openchunk(self, number, mode='rb'):
return open(self._qfile(number), mode)
def _loadinfo(self):
infopath = self._infopath()
if os.path.exists(infopath):
with open(infopath, 'rb') as f:
info = pickle.load(f)
else:
info = {
'chunksize': self.chunksize,
'size': 0,
'tail': [0, 0, 0],
'head': [0, 0, 0],
}
return info
def _gettempfile(self):
if self.tempdir:
return tempfile.mkstemp(dir=self.tempdir)
else:
return tempfile.mkstemp()
def _saveinfo(self):
tmpfd, tmpfn = self._gettempfile()
os.write(tmpfd, pickle.dumps(self.info))
os.close(tmpfd)
# POSIX requires that 'rename' is an atomic operation
os.rename(tmpfn, self._infopath())
self._clear_old_file()
def _clear_old_file(self):
tnum, _, _ = self.info['tail']
while tnum >= 1:
tnum -= 1
path = self._qfile(tnum)
if os.path.exists(path):
os.remove(path)
else:
break
def _qfile(self, number):
return os.path.join(self.path, 'q%05d' % number)
def _infopath(self):
return os.path.join(self.path, 'info')
| {
"repo_name": "balena/python-pqueue",
"path": "pqueue/pqueue.py",
"copies": "1",
"size": "4811",
"license": "bsd-3-clause",
"hash": 5164981360515170000,
"line_mean": 30.8609271523,
"line_max": 82,
"alpha_frac": 0.554562461,
"autogenerated": false,
"ratio": 3.7733333333333334,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9827496847552835,
"avg_score": 0.00007978935609989628,
"num_lines": 151
} |
# A single sample test taken from:
# https://github.com/EsriOceans/btm/tree/master/tests
import arcpy
import os
import sys
import unittest
try:
from tempdir import TempDir
from scripts import bpi
from scripts import utils
except:
print("Example only, see full testing suite for operational code.")
sys.exit()
class TestBpi(unittest.TestCase):
def testBpiImport(self):
"""Is there a 'main' method in the bpi script?"""
self.assertTrue('main' in vars(bpi))
def testBpiRun(self):
"""Execute the BPI script with a raster and verify the results."""
# set up a 'context', or a temporary environment. Here,
# we use this to create a temporary directory that will automatically
# be destroyed when the test is finished.
with TempDir() as d:
# set up our output raster and input
input_raster = os.path.join('data', 'input_raster.tif')
# what's the full path of our expected output BPI raster?
output_raster = os.path.join(d, 'test_run_bpi.tif')
# run the BPI calculation on this raster
bpi.main(bathy=input_raster, inner_radius=10,
outer_radius=30, out_raster=output_raster)
# our first test -- make sure the raster output exists
# at the expected location
self.assertTrue(raster_fn in os.listdir(d))
# test -- does our raster match the mean
# and standard deviation we expect?
# NOTE: AlmostEqual used here because these are floating point values
self.assertAlmostEqual(
utils.raster_properties(output_raster, "MEAN"), 0.295664335664)
self.assertAlmostEqual(
utils.raster_properties(bpi_raster, "STD"), 1.65611606614)
| {
"repo_name": "scw/scipy-devsummit-2015-talk",
"path": "examples/nose-test.py",
"copies": "1",
"size": "1861",
"license": "apache-2.0",
"hash": -2379910719131580400,
"line_mean": 37.7708333333,
"line_max": 81,
"alpha_frac": 0.6217087587,
"autogenerated": false,
"ratio": 4.210407239819005,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5332115998519005,
"avg_score": null,
"num_lines": null
} |
# A singleton to handle flags, which can be imported more easily
# as it will not suffer the problems with circular references that
# the CommandLine singleton suffers from.
# xia2#42: this is due for retirement & working into the Phil structure
import os
from xia2.Wrappers.XDS.XDS import xds_read_xparm
class _Flags:
"""A singleton to manage boolean flags."""
def __init__(self):
# XDS specific things - to help with handling tricky data sets
self._xparm = None
self._xparm_beam_vector = None
self._xparm_rotation_axis = None
self._xparm_origin = None
self._xparm_a = None
self._xparm_b = None
self._xparm_c = None
# starting directory (to allow setting working directory && relative
# paths on input)
self._starting_directory = os.getcwd()
def get_starting_directory(self):
return self._starting_directory
def set_xparm(self, xparm):
self._xparm = xparm
xparm_info = xds_read_xparm(xparm)
self._xparm_origin = xparm_info["ox"], xparm_info["oy"]
self._xparm_beam_vector = tuple(xparm_info["beam"])
self._xparm_rotation_axis = tuple(xparm_info["axis"])
self._xparm_distance = xparm_info["distance"]
def get_xparm(self):
return self._xparm
def get_xparm_origin(self):
return self._xparm_origin
def get_xparm_rotation_axis(self):
return self._xparm_rotation_axis
def get_xparm_beam_vector(self):
return self._xparm_beam_vector
def get_xparm_distance(self):
return self._xparm_distance
def set_xparm_ub(self, xparm):
self._xparm_ub = xparm
with open(xparm) as fh:
tokens = list(map(float, fh.read().split()))
self._xparm_a = tokens[-9:-6]
self._xparm_b = tokens[-6:-3]
self._xparm_c = tokens[-3:]
def get_xparm_a(self):
return self._xparm_a
def get_xparm_b(self):
return self._xparm_b
def get_xparm_c(self):
return self._xparm_c
Flags = _Flags()
| {
"repo_name": "xia2/xia2",
"path": "src/xia2/Handlers/Flags.py",
"copies": "1",
"size": "2082",
"license": "bsd-3-clause",
"hash": -3996701249702875000,
"line_mean": 25.3544303797,
"line_max": 76,
"alpha_frac": 0.6176753122,
"autogenerated": false,
"ratio": 3.47,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.95876753122,
"avg_score": 0,
"num_lines": 79
} |
""" A singleton to process Python objects and spit out xml """
from lxml import etree
def xmlify(object_):
""" a generic Python function to take a python object
and serialize it as XML. Note that the output is entirely
dependent on the data structure of the input - which makes this
function useful for xmlifying a variety of python objects.
However, if we want to customize the xml output format at any time,
we can always drop in a different type of XMLify-er to
reporters/api_/resources.py
understands: python objects, strings, tuples, + dictionaries of basic types
plus lists of either basic types or any of the above
"""
root = _xmlify(object_)
return etree.tostring(root, pretty_print=True)
def _xmlify(object_):
""" root is a special case just because there can be only one root """
root = etree.Element( _sanitize_tag(str(type(object_))) )
if hasattr(object_,"__dict__"):
for i in object_.__dict__:
i_val = getattr(object_,i)
if isinstance(i_val, basestring):
# set strings to be attributes
root.set(_sanitize_tag(i),_sanitize_text(i_val) )
elif isinstance( i_val, list):
# for lists in root, we don't need to create child elements
# (because we already have 'root')
for val in i_val:
_inner_xmlify(val, root)
elif isinstance( i_val, dict):
# i = string name of field
# i_val = actual field value
children = etree.Element( i )
for key in i_val.keys():
child = etree.Element( _sanitize_tag(i) , name=_sanitize_text(key), value=_sanitize_text(i_val[key]) )
children.append(child)
root.append(children)
return root
def _inner_xmlify(object_, parent, name=None):
""" creates children xml elements and automatically adds them to parent """
if name is None:
# oddly enough 'unicode' is not as universal as 'str'
name = _sanitize_tag(str(type(object_)))
# end-condition: when we receive a tuple, list, or custom python object
if isinstance( object_, tuple):
# the first element of tuple is attribute, the second is text
element = etree.Element( 'value', index=_sanitize_text(unicode(object_[0])) )
element.text = unicode(object_[1])
parent.append(element)
elif isinstance( object_, list):
element = etree.Element( name )
if hasattr(object_,"__dict__"):
for i in object_.__dict__:
i_val = getattr(object_,i)
if isinstance(i_val, basestring):
# set strings to be attributes
element.set(_sanitize_tag(i),_sanitize_text(i_val) )
elif isinstance( i_val, dict):
# i = string name of field
# i_val = actual field value
children = etree.Element( i )
for key in i_val.keys():
child = etree.Element( _sanitize_tag(i) , name=_sanitize_text(key), value=_sanitize_text(i_val[key]) )
children.append(child)
element.append(children)
for val in object_:
_inner_xmlify(val, element)
parent.append(element)
else:
# child is a python object
element = etree.Element( name )
if hasattr(object_,"__dict__"):
for i in object_.__dict__:
i_val = getattr(object_,i)
if isinstance(i_val, basestring):
# set strings to be attributes
element.set(_sanitize_tag(i),_sanitize_text(i_val) )
elif isinstance( i_val, list):
_inner_xmlify(i_val, element, i)
elif isinstance( i_val, dict):
# i = string name of field
# i_val = actual field value
children = etree.Element( i )
for key in i_val.keys():
child = etree.Element( _sanitize_tag(i) , name=_sanitize_text(key), value=_sanitize_text(i_val[key]) )
children.append(child)
element.append(children)
else:
# set custom data structures to child elements
_inner_xmlify(i, element)
parent.append(element)
def _sanitize_tag(string):
value_sanitized = _sanitize_text(string)
name_sanitized = value_sanitized.replace("/","_").replace(" ","_").replace("-","_").lower()
return name_sanitized
def _sanitize_text(string):
string = unicode(string)
sanitized = string.replace("<","_").replace(">","_").replace("'","_").replace(":","").replace(".","_")
stripped = sanitized.strip('_')
tail = stripped.rsplit('_',1)[-1]
return tail
| {
"repo_name": "commtrack/commtrack-old-to-del",
"path": "apps/transformers/xml.py",
"copies": "4",
"size": "4960",
"license": "bsd-3-clause",
"hash": 630833529011874800,
"line_mean": 44.9259259259,
"line_max": 126,
"alpha_frac": 0.5558467742,
"autogenerated": false,
"ratio": 4.279551337359793,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.014300906202351566,
"num_lines": 108
} |
""" A singleton to process Python objects and spit out xml """
from lxml import etree
def xmlify(object_):
""" a generic Python function to take a python object
and serialize it as XML. Note that the output is entirely
dependent on the data structure of the input - which makes this
function useful for xmlifying a variety of python objects.
However, if we want to customize the xml output format at any time,
we can always drop in a different type of XMLify-er to
reporters/api_/resources.py
understands: python objects, strings, tuples, + dictionaries of basic types
plus lists of either basic types or any of the above
"""
root = _xmlify(object_)
return etree.tostring(root, pretty_print=True)
def _xmlify(object_):
""" root is a special case just because there can be only one root """
root = etree.Element( _sanitize_tag(str(type(object_))) )
if hasattr(object_,"__dict__"):
for i in object_.__dict__:
i_val = getattr(object_,i)
if isinstance(i_val, basestring):
# set strings to be attributes
root.set(_sanitize_tag(i),_sanitize_text(i_val) )
elif isinstance( i_val, list):
# for lists in root, we don't need to create child elements
# (because we already have 'root')
for val in i_val:
_inner_xmlify(val, root)
elif isinstance( i_val, dict):
# i = string name of field
# i_val = actual field value
children = etree.Element( i )
for key in i_val.keys():
child = etree.Element( _sanitize_tag(i) , name=_sanitize_text(key), value=_sanitize_text(i_val[key]) )
children.append(child)
root.append(children)
return root
def _inner_xmlify(object_, parent, name=None):
""" creates children xml elements and automatically adds them to parent """
if name is None:
# oddly enough 'unicode' is not as universal as 'str'
name = _sanitize_tag(str(type(object_)))
# end-condition: when we receive a tuple, list, or custom python object
if isinstance( object_, tuple):
# the first element of tuple is attribute, the second is text
element = etree.Element( 'value', index=_sanitize_text(unicode(object_[0])) )
element.text = unicode(object_[1])
parent.append(element)
elif isinstance( object_, list):
element = etree.Element( name )
if hasattr(object_,"__dict__"):
for i in object_.__dict__:
i_val = getattr(object_,i)
if isinstance(i_val, basestring):
# set strings to be attributes
element.set(_sanitize_tag(i),_sanitize_text(i_val) )
elif isinstance( i_val, dict):
# i = string name of field
# i_val = actual field value
children = etree.Element( i )
for key in i_val.keys():
child = etree.Element( _sanitize_tag(i) , name=_sanitize_text(key), value=_sanitize_text(i_val[key]) )
children.append(child)
element.append(children)
for val in object_:
_inner_xmlify(val, element)
parent.append(element)
else:
# child is a python object
element = etree.Element( name )
if hasattr(object_,"__dict__"):
for i in object_.__dict__:
i_val = getattr(object_,i)
if isinstance(i_val, basestring):
# set strings to be attributes
element.set(_sanitize_tag(i),_sanitize_text(i_val) )
elif isinstance( i_val, list):
_inner_xmlify(i_val, element, i)
elif isinstance( i_val, dict):
# i = string name of field
# i_val = actual field value
children = etree.Element( i )
for key in i_val.keys():
child = etree.Element( _sanitize_tag(i) , name=_sanitize_text(key), value=_sanitize_text(i_val[key]) )
children.append(child)
element.append(children)
else:
# set custom data structures to child elements
_inner_xmlify(i, element)
parent.append(element)
def _sanitize_tag(string):
value_sanitized = _sanitize_text(string)
name_sanitized = value_sanitized.replace("/","_").replace(" ","_").replace("-","_").lower()
return name_sanitized
def _sanitize_text(string):
string = unicode(string)
sanitized = string.replace("<","_").replace(">","_").replace("'","_").replace(":","").replace(".","_")
stripped = sanitized.strip('_')
tail = stripped.rsplit('_',1)[-1]
return tail
| {
"repo_name": "commtrack/temp-aquatest",
"path": "apps/transformers/xml.py",
"copies": "1",
"size": "5068",
"license": "bsd-3-clause",
"hash": -1301307385871422700,
"line_mean": 44.9259259259,
"line_max": 126,
"alpha_frac": 0.5440015785,
"autogenerated": false,
"ratio": 4.36896551724138,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.013453990610448765,
"num_lines": 108
} |
""" A singly-linked-list implementation.
"""
class Node(object):
""" The proverbial Node.
Based initially on: http://stackoverflow.com/a/283630/577190
"""
def __init__(self, data=None, next_node=None):
self.data = data
self.next_node = next_node
class SinglyLinkedList(object):
""" The list itself.
"""
def __init__(self, first_node=None):
self.first_node = first_node
def __str__(self):
data = []
n = self.first_node
while n:
data.append(n.data)
n = n.next_node
return ', '.join(map(str, data))
def populate(self, nodes):
""" Populate with nodes.
"""
for (i, node) in enumerate(nodes):
if i == 0:
self.insert_beginning(node)
else:
self.insert_after(nodes[i-1], node)
@staticmethod
def insert_after(node, new_node):
""" Insert a node after a given node.
Points the new_node's 'next_node' to the node following the given
node, then points the given node's 'next_node' to the new node.
"""
new_node.next_node = node.next_node
node.next_node = new_node
def insert_beginning(self, new_node):
""" Insert a new node at the beginning.
Point the new_node's 'next_node' to the current root node, update
the root node to be the new_node.
"""
new_node.next_node = self.first_node
self.first_node = new_node
@staticmethod
def remove_after(node):
""" Remove the node after a node.
Sets the 'next_node' of the node to be the 'next_node' of the next
node, assuming the next node is not None.
"""
if node.next_node:
node.next_node = node.next_node.next_node
def remove_beginning(self):
""" Remove the first node.
Sets the first_node node to be the value of the first_node's next.
"""
self.first_node = self.first_node.next_node
def append(self, new_node):
""" Append a node to the end of the linked list.
Traverse to the end, replace the last node's 'next_node' with the
passed in node.
"""
n = self.first_node
if n is None:
self.insert_beginning(new_node)
return
while n.next_node:
n = n.next_node
self.insert_after(n, new_node)
def delete(self, node):
""" Delete a node, ignore missing node.
TODO: clarify behaviour around missing nodes.
"""
n = self.first_node
if n is None:
return
while n.next_node is not node:
n = n.next_node
# Handle reaching end without finding node
if n is None:
return
self.remove_after(n)
def exists(self, node):
""" Checks for the existance of a node.
"""
n = self.first_node
while n is not node:
n = n.next_node
# Identify end of list
if n is None:
return False
return True
| {
"repo_name": "thisismyrobot/dsa",
"path": "src/singly_linked_list.py",
"copies": "1",
"size": "3302",
"license": "unlicense",
"hash": 903107472618014800,
"line_mean": 25.7478991597,
"line_max": 78,
"alpha_frac": 0.5175651121,
"autogenerated": false,
"ratio": 4.137844611528822,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5155409723628821,
"avg_score": null,
"num_lines": null
} |
"""A singly-linked list"""
class SLNode(object):
"""A single node in the list.
Attributes:
value: the value stored at this node
next: the next node in the list
"""
def __init__(self, value, next_node):
self.value = value
self.next = next_node
def __repr__(self):
return 'SLNode({})'.format(self.value)
class SList(object):
"""Singly-linked list class.
Implemented as a cycle of SLNode objects, with a single sentinel SLNode at
the end, pointing to the beginning.
Args:
values (optional): an iterable of values to initially populate the
LList
Attributes:
sentinel: the SLNode at the end of the list, pointing to the start
"""
def __init__(self, values=None):
self.sentinel = SLNode(None, None)
self.sentinel.next = self.sentinel
self._tail = None
self._len = 0
if values is not None:
for value in values:
self.append(value)
self._len += 1
def __bool__(self):
return self.sentinel.next is not self.sentinel
@property
def head(self):
"""The first SLNode in the list, or None if it is empty."""
if not self:
return None
else:
return self.sentinel.next
@property
def tail(self):
"""The last SLNode in the list, or None if it is empty."""
if not self:
return None
else:
return self._tail
def insert(self, value, prev):
"""Insert `value` in a new SLNode immediately following `prev`.
Args:
value: the value to insert
prev: the SLNode that should precede the newly created node
Returns:
the new SLNode containing `value`
"""
node = SLNode(value, prev.next)
if prev.next is self.sentinel:
self._tail = node
prev.next = node
self._len += 1
return node
def push(self, value):
"""Push `value` to the front of the SList in a new SLNode.
Args:
value: the value to push onto the front of the list
Returns:
the new SLNode containing `value`
"""
return self.insert(value, self.sentinel)
def append(self, value):
"""Append `value` to the end of the SList in a new SLNode.
Args:
value: the value to append
Returns:
the new SLNode containing `value`
"""
tail = self.tail if self else self.sentinel
return self.insert(value, tail)
def clear(self):
"""Remove all nodes from the list.
The nodes themselves will be left in an inconsistent state (i.e.,
pointers will not be set to None).
"""
self._len = 0
self.sentinel.next = self.sentinel
def extend(self, other):
"""Append every value of SList `other` to the end of `self`.
This removes all the nodes from `other`."""
if not other:
return
self._len += len(other)
tail = self._tail if self else self.sentinel
tail.next = other.head
self._tail = other.tail
self._tail.next = self.sentinel
other.sentinel.next = other.sentinel
other.clear()
def remove_next(self, node):
"""Remove `node.next` from the linked list."""
if node.next is self.sentinel:
raise ValueError("node out of bounds")
self._len -= 1
removed_node = node.next
node.next = removed_node.next
if node.next is self.sentinel:
self._tail = node
removed_node.next = None
def __len__(self):
"""Get the length of the list.
This requires O(1) time, but correctness requires that the linked list
is only manipulated via the methods of this class (and any subclasses)
Returns:
int: the length of the list.
"""
return self._len
def __iter__(self):
def slist_iter():
"""Generator yielding the list one node at a time."""
node = self.sentinel
while node.next != self.sentinel:
node = node.next
yield node
return slist_iter()
def __repr__(self):
inner = ', '.join(node.value for node in self)
return 'SList({})'.format(inner)
| {
"repo_name": "johnwilmes/py-data-structures",
"path": "py_data_structures/slist.py",
"copies": "1",
"size": "4424",
"license": "mit",
"hash": 5246400173591173000,
"line_mean": 27.358974359,
"line_max": 78,
"alpha_frac": 0.5592224231,
"autogenerated": false,
"ratio": 4.27852998065764,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0005966255966255966,
"num_lines": 156
} |
# A singular message for an error. An error is made up of multiple error messages
# A error message defines the formatting of an error.
class _ErrorMessage:
def __init__(self, message:str = None,
content:str = None,
object = None,
tokens:[] = None,
source = None):
# Grab message, tokens and source from object if empty
if object is not None:
if message is None:
message = "`{}`".format(object)
if tokens is None:
tokens = object.tokens
if source is None:
source = object.source
# Put quoted message if content
if content is not None:
message = "`{}`".format(content)
self.message = message
self.tokens = tokens
self.source = source
# Return a 3-tuple of the line number, start and end position of the line
# at position in source
def _getLine(self, source:str, position:int):
number = source[:position].count("\n") + 1
start = position - len(source[:position].rpartition("\n")[2])
end = position + len(source[position:].partition("\n")[0]) + 1
return number, start, end
def format(self):
if self.source is None or self.tokens is None:
return self.message, None
# Read in entire source (could be optimised)
self.source.seek(0)
source = self.source.read()
lines = {}
for token in self.tokens:
number, start, end = self._getLine(source, token.start)
line = lines.setdefault(number, (set(), start, end))
line[0].update(set(range(token.start, token.end)))
appendix = []
for number, (highlights, start, end) in sorted(lines.items(), key=lambda a: a[0]):
number_str = str(number)
appendix.append("{}| {}{}| {}".format(
number_str,
source[start:end],
" " * len(number_str),
"".join(("^" if i in highlights else " ") for i in range(start, end))
))
return self.message, "\n".join(appendix)
# Generic CompilerError
class CompilerError(Exception):
# Create a new CompilerError
#
# A compiler error takes a single error message and a list of tokens.
# When displayed, the error will contain the specified message along with
# nicely formatted source code extracts, highlighting the specified tokens
def __init__(self, **kwargs):
Exception.__init__(self, "")
self.messages = []
self.notes = []
self.add(**kwargs)
def add(self, **kwargs):
self.messages.append(_ErrorMessage(**kwargs))
return self
def addNote(self, **kwargs):
self.notes.append(_ErrorMessage(**kwargs))
return self
def _format(self, list):
content = []
appendix = []
for message in list:
message = message.format()
if message[0]: content.append(message[0])
if message[1]: appendix.append(message[1])
return " ".join(content) + "\n" + "\n".join(appendix)
def format(self):
self.args = (self._format(self.messages) + self._format(self.notes),)
class SyntaxError(CompilerError):
pass
class SemanticError(CompilerError):
pass
class TypeError(SemanticError):
pass
class DependencyError(TypeError):
pass
class ValueError(SemanticError):
pass
class AmbiguityError(SemanticError):
pass
class MissingReferenceError(SemanticError):
pass
class ImportError(SemanticError):
pass
class ExecutionError(Exception):
pass
class InternalError(Exception):
pass
from . import lekvar
| {
"repo_name": "CameronLonsdale/jam",
"path": "compiler/errors.py",
"copies": "2",
"size": "3775",
"license": "mit",
"hash": -7392158869071803000,
"line_mean": 29.4435483871,
"line_max": 90,
"alpha_frac": 0.5846357616,
"autogenerated": false,
"ratio": 4.241573033707865,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.006462034509037617,
"num_lines": 124
} |
"""A sink using the PhilDB timeseries database."""
from phildb.create import create
from phildb.exceptions import AlreadyExistsError, DuplicateError
from phildb.database import PhilDB
from sensor_feed.sink import BufferedSink
class PhilDBSink(BufferedSink):
"""
A buffered sink using the PhilDB timeseries database.
"""
def __init__(self, dbfile, *args, **kwargs):
super(PhilDBSink, self).__init__(*args, **kwargs)
try:
create(dbfile)
except AlreadyExistsError:
pass # Database already exists, so no creation required.
self.db = PhilDB(dbfile)
self.last_known_freq = None
try:
self.db.add_source('SENSOR', 'Data from hardware sensor')
except DuplicateError:
pass # DuplicateError means the source already existed
def write_buffer(self, param_name, series):
"""Write buffer of data to database."""
if len(series) == 0:
return
try:
self.db.add_measurand(param_name, param_name, param_name)
except DuplicateError:
pass # DuplicateError means the measurand already existed
try:
self.db.add_timeseries(param_name)
except DuplicateError:
pass # DuplicateError means the timeseries already existed
freq = series.index.inferred_freq
# need to handle special case where only one value being written
# unable to calculate the frequency so we use the last known
# value which in general should always be the same.
if len(series) == 1:
freq = self.last_known_freq
elif freq is not None:
self.last_known_freq = freq
if freq is None:
raise ValueError('Unable to determine sensor frequency')
try:
self.db.add_timeseries_instance(param_name, freq, 'None',
measurand=param_name,
source='SENSOR')
except DuplicateError:
pass # DuplicateError - the timeseries instance already existed
self.db.write(param_name, freq, series, measurand=param_name, source='SENSOR')
| {
"repo_name": "dmkent/sensor-feed",
"path": "sensor_feed/sink_phildb.py",
"copies": "1",
"size": "2221",
"license": "mit",
"hash": -498506583046116860,
"line_mean": 34.8225806452,
"line_max": 86,
"alpha_frac": 0.6118865376,
"autogenerated": false,
"ratio": 4.514227642276423,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.001329685414449837,
"num_lines": 62
} |
"""askcoding URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
# Third Party
from django.conf.urls import include, url
from django.contrib import admin
from django.views.generic import TemplateView
# AskCoding
from askcoding.users.views import user_profile_update
urlpatterns = [
url(r'^$', TemplateView.as_view(template_name='home.html'), name='home'),
url(r'^manager/', admin.site.urls),
url(r'^accounts/', include('allauth.urls')),
url(r'^accounts/$', TemplateView.as_view(template_name="profile/view.html"), name='account_profile'),
url(r'^accounts/update/$', user_profile_update, name='account_profile_update'),
]
| {
"repo_name": "akarambir/askcoding",
"path": "askcoding/urls.py",
"copies": "1",
"size": "1220",
"license": "mit",
"hash": 8707821029722068000,
"line_mean": 39.6666666667,
"line_max": 105,
"alpha_frac": 0.7081967213,
"autogenerated": false,
"ratio": 3.5057471264367814,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9710387393708628,
"avg_score": 0.0007112908056304282,
"num_lines": 30
} |
"""a skeleton class for archive records"""
from __future__ import print_function
from gzip import GzipFile
import re
from hanzo.warctools.stream import open_record_stream
strip = re.compile(br'[^\w\t \|\\\/]')
def add_headers(**kwargs):
"""a useful helper for defining header names in record formats"""
def _add_headers(cls):
for k, v in kwargs.items():
setattr(cls, k, v)
cls._HEADERS = list(kwargs.keys())
return cls
return _add_headers
class ArchiveParser(object):
""" methods parse, and trim """
pass
@add_headers(DATE=b'Date',
CONTENT_TYPE=b'Type',
CONTENT_LENGTH=b'Length',
TYPE=b'Type',
URL=b'Url')
class ArchiveRecord(object):
"""An archive record has some headers, maybe some content and
a list of errors encountered. record.headers is a list of tuples (name,
value). errors is a list, and content is a tuple of (type, data)"""
#pylint: disable-msg=e1101
def __init__(self, headers=None, content=None, errors=None):
self.headers = headers if headers else []
self.errors = errors if errors else []
self._content = content
HEADERS = staticmethod(add_headers)
@property
def date(self):
return self.get_header(self.DATE)
def error(self, *args):
self.errors.append(args)
@property
def type(self):
return self.get_header(self.TYPE)
@property
def content_type(self):
return self.content[0]
@property
def content_file(self):
"""
File handle for streaming the payload.
If the record has been read from a RecordStream, content_file wraps the
same underlying file handle as the RecordStream itself. This has
important implications. Results are undefined if you try to read from
content_file after reading the next record from RecordStream; and
closing content_file will close the RecordStream, and vice versa.
But if you avoid these caveats, content_file takes care to bound itself
within the content-length specified in the warc record, so that reading
to the end of content_file will bring you only to the end of the
record's payload.
When creating a record for writing and supplying content_file, the
record can only be written once, since writing the record entails
reading content_file and advancing the file position. Subsequent
attempts to write using content_file will throw an exception.
"""
return self._content_file
@content_file.setter
def content_file(self, fh):
self._content_file = fh
self._content_file_valid = fh is not None
@property
def content(self):
"""A tuple (content_type, content). When first referenced, content[0]
is populated from the Content-Type header, and content[1] by reading
self.content_file."""
if self._content is None:
content_type = self.get_header(self.CONTENT_TYPE)
try:
content = self.content_file.read()
self._content = (content_type, content)
finally:
self.content_file = None
return self._content
@property
def content_type(self):
"""If self.content tuple was supplied, or has already been snarfed, or
we don't have a Content-Type header, return self.content[0]. Otherwise,
return the value of the Content-Type header."""
if self._content is None:
content_type = self.get_header(self.CONTENT_TYPE)
if content_type is not None:
return content_type
return self.content[0]
@property
def content_length(self):
"""If self.content tuple was supplied, or has already been snarfed, or
we don't have a Content-Length header, return len(self.content[1]).
Otherwise, return the value of the Content-Length header."""
if self._content is None:
content_length = self.get_header(self.CONTENT_LENGTH)
if content_length is not None:
return int(content_length)
return len(self.content[1])
@property
def url(self):
return self.get_header(self.URL)
def get_header(self, name):
"""Returns value of first header found matching name, case
insensitively."""
for k, v in self.headers:
if name.lower() == k.lower():
return v
def set_header(self, name, value):
self.headers = [(k, v) for (k, v) in self.headers if k != name]
self.headers.append((name, value))
def dump(self, content=True):
print('Headers:')
for (h, v) in self.headers:
print('\t%s:%s' % (h.decode('latin1'), v.decode('latin1')))
if content and self.content:
print('Content Headers:')
content_type, content_body = self.content
print('\t' + self.CONTENT_TYPE.decode('latin1'), ':', content_type.decode('latin1'))
print('\t' + self.CONTENT_LENGTH.decode('latin1'), ':', len(content_body))
print('Content:')
ln = min(1024, len(content_body))
abbr_strp_content = strip.sub(lambda x: ('\\x%00X' % ord(x.group())).encode('ascii'), content_body[:ln])
print('\t' + abbr_strp_content.decode('ascii'))
print('\t...')
print()
else:
print('Content: none')
print()
print()
if self.errors:
print('Errors:')
for e in self.errors:
print('\t' + e)
def write_to(self, out, newline=b'\x0D\x0A', gzip=False):
if self.content_file is not None:
if not self._content_file_valid:
raise Exception('cannot write record because content_file has already been used')
if gzip:
if hasattr(out, 'mode'):
out = GzipFile(fileobj=out)
else:
out = GzipFile(fileobj=out, mode='ab')
self._write_to(out, newline)
if gzip:
out.flush()
out.close()
if self.content_file is not None:
self._content_file_valid = False
def _write_to(self, out, newline):
raise AssertionError('this is bad')
### class methods for parsing
@classmethod
def open_archive(cls, filename=None, file_handle=None,
mode="rb", gzip="auto", offset=None, length=None):
"""Generically open an archive - magic autodetect"""
if cls is ArchiveRecord:
cls = None # means guess
return open_record_stream(cls, filename, file_handle, mode, gzip, offset, length)
@classmethod
def make_parser(self):
"""Reads a (w)arc record from the stream, returns a tuple (record,
errors). Either records is null or errors is null. Any
record-specific errors are contained in the record - errors is only
used when *nothing* could be parsed"""
raise Exception()
| {
"repo_name": "internetarchive/warctools",
"path": "hanzo/warctools/record.py",
"copies": "1",
"size": "7116",
"license": "mit",
"hash": 2851246191979667500,
"line_mean": 33.712195122,
"line_max": 116,
"alpha_frac": 0.6001967397,
"autogenerated": false,
"ratio": 4.142025611175786,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5242222350875786,
"avg_score": null,
"num_lines": null
} |
"""a skeleton class for archive records"""
from gzip import GzipFile
import re
import urlparse
from hanzo.warctools.stream import open_record_stream
strip = re.compile(r'[^\w\t \|\\\/]')
content_content_type = re.compile("Content-Type: (.*?)\r")
def add_headers(**kwargs):
"""a useful helper for defining header names in record formats"""
def _add_headers(cls):
for k, v in kwargs.iteritems():
setattr(cls, k, v)
cls._HEADERS = kwargs.keys()
return cls
return _add_headers
class ArchiveParser(object):
""" methods parse, and trim """
pass
@add_headers(DATE='Date',
CONTENT_TYPE='Type',
CONTENT_LENGTH='Length',
TYPE='Type',
URL='Url')
class ArchiveRecord(object):
"""An archive record has some headers, maybe some content and
a list of errors encountered. record.headers is a list of tuples (name,
value). errors is a list, and content is a tuple of (type, data)"""
#pylint: disable-msg=e1101
def __init__(self, headers=None, content=None, errors=None):
self.headers = headers if headers else []
self.content = content if content else (None, "")
self.errors = errors if errors else []
HEADERS = staticmethod(add_headers)
@property
def date(self):
return self.get_header(self.DATE)
def error(self, *args):
self.errors.append(args)
@property
def type(self):
return self.get_header(self.TYPE)
@property
def content_type(self):
return self.content[0]
@property
def content_content_type(self):
matches = content_content_type.search(self.content[1])
if matches == None:
return None
return matches.group(1)
@property
def content_length(self):
return len(self.content[1])
@property
def url(self):
return self.get_header(self.URL)
@property
def domain_name(self):
url = self.get_header(self.URL)
return urlparse.urlparse(url).netloc
def is_response(self):
return self.content_type == 'application/http; msgtype=response'
def get_header(self, name):
for k, v in self.headers:
if name == k:
return v
def set_header(self, name, value):
self.headers = [(k, v) for (k, v) in self.headers if k != name]
self.headers.append((name, value))
def dump(self, content=True):
print 'Headers:'
for (h, v) in self.headers:
print '\t%s:%s' % (h, v)
if content and self.content:
print 'Content Headers:'
content_type, content_body = self.content
print '\t', self.CONTENT_TYPE, ':', content_type
print '\t', self.CONTENT_LENGTH, ':', len(content_body)
print 'Content:'
ln = min(1024, len(content_body))
print '\t', strip.sub(lambda x: '\\x%00X' % ord(x.group()),
content_body[:ln])
print '\t...'
print
else:
print 'Content: none'
print
print
if self.errors:
print 'Errors:'
for e in self.errors:
print '\t', e
def write_to(self, out, newline='\x0D\x0A', gzip=False):
if gzip:
out = GzipFile(fileobj=out)
self._write_to(out, newline)
if gzip:
out.flush()
out.close()
def _write_to(self, out, newline):
raise AssertionError('this is bad')
### class methods for parsing
@classmethod
def open_archive(cls, filename=None, file_handle=None,
mode="rb+", gzip="auto", offset=None, length=None):
"""Generically open an archive - magic autodetect"""
if cls is ArchiveRecord:
cls = None # means guess
return open_record_stream(cls, filename, file_handle, mode, gzip, offset, length)
@classmethod
def make_parser(self):
"""Reads a (w)arc record from the stream, returns a tuple (record,
errors). Either records is null or errors is null. Any
record-specific errors are contained in the record - errors is only
used when *nothing* could be parsed"""
raise StandardError()
| {
"repo_name": "martinsbalodis/warc-tools",
"path": "hanzo/warctools/record.py",
"copies": "1",
"size": "4320",
"license": "mit",
"hash": -6760290832787171000,
"line_mean": 28.7931034483,
"line_max": 89,
"alpha_frac": 0.5763888889,
"autogenerated": false,
"ratio": 3.985239852398524,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5061628741298524,
"avg_score": null,
"num_lines": null
} |
"""A skeleton file for a class dealing with different spectral models"""
import numpy as np
import pyspeckit
from subcube import SubCube
class MultiCube:
def __init__(self, *args):
"""
A collection of Specfit objects mapped to SubCubes
by the mapper method. Includes* methods to fit multiple
guess grids for different models, and the means to
decide between the results of those fits.
*In a distant[citation needed] future.
Input parameters: see ~pyspeckit.Cube
"""
# parent cube, used for attribute propagation
self.supercube = pyspeckit.Cube(*args)
# making a bunch of references to make our life easier
self.cube = self.SuperCube.cube
self.xarr = self.SuperCube.xarr
self.header = self.SuperCube.header
# FIXME: rewrite mapplot to include the mapper/judge methods!
# doesn't work in its current implementation, will need
# to rewire different params and errors, for variable
# number of parameters across different models
self.multiplot = self.SuperCube.mapplot
# MultiCube's own instances:
self.multigrid = {}
self.tesseract = {}
def __repr__(self):
return ('Parent: MultiCube with TODO models\n'
'Child: %s' % self.SuperCube.__repr__())
def spawn(self, model, guesses=None):
"""
Add a new model and a SubCube for it through Cube()
The idea is to pass a reference to large data instances
of SuperCube to avoid excessive memory usage.
Not implemented yet.
"""
self.tesseract[model]=SubCube()
raise NotImplementedError
def mapper(model):
"""
Returns a list of SubCubes for a given model?
"""
raise NotImplementedError
def judge_multimodel(subcubes, model, method):
"""
Decide which model to use.
First milestone: have one component added only
when residual has SNR>3
Actual goal: proper model selection via DIC.
"""
raise NotImplementedError
def multifit(self, multigrid=None):
"""
Fit the optimized guesses. This should be delegated
to SubCubes maybe? MultiCube should only call the
judge function.
Not really, this approach would allow to juggle all
the SubCubes defined! In this case, multifit is a
wrapper for SubCube.fiteach() method. This will do.
"""
raise NotImplementedError
| {
"repo_name": "vlas-sokolov/multicube",
"path": "multicube/multicube.py",
"copies": "1",
"size": "2587",
"license": "mit",
"hash": -6150599167763892000,
"line_mean": 32.5974025974,
"line_max": 72,
"alpha_frac": 0.6238886741,
"autogenerated": false,
"ratio": 4.491319444444445,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5615208118544445,
"avg_score": null,
"num_lines": null
} |
#Ask for country ID from user then show all regions that belong to that country
# then prompt region name and display more info about it.
#Python 2.7, change raw_input to input for 3.x version
from erepapi import api, CountryRegions
api.public_key="your public key goes here"
api.private_key="your private key goes here"
while True:
entered_id=raw_input("Enter Country id:\n>>>")
try:
regions_object=CountryRegions(entered_id)
print("All regions:")
for region in regions_object.regions:
print("\t",region.name)
info_region_name=raw_input("Enter region name to find out more about it:\n>>>")
region_object=regions_object.get(info_region_name,None)
if region_object is None:
print("That region is not part of this country... exiting")
else:
print("Name:{}\nID:{}\nOwner ID:{}\nOriginal owner ID:{}\nURL:{}".format(region_object.name,
region_object.id,
region_object.owner_id,
region_object.original_owner_id,
region_object.url))
except ValueError:
print("Invalid country id")
#Enter Country id:
#>>>65
#All regions:
#('\t', u'Kosovo')
#('\t', u'Vojvodina')
#('\t', u'Belgrade')
#('\t', u'Sumadija')
#('\t', u'Eastern Serbia')
#('\t', u'Western Serbia')
#('\t', u'Raska')
#('\t', u'Southern Serbia')
#Enter region name to find out more about it:
#>>>Belgrade
#Name:Belgrade
#ID:635
#Owner ID:65
#Original owner ID:65
#URL:http://www.erepublik.com/en/main/region/Belgrade | {
"repo_name": "Nikola-K/erepAPI",
"path": "docs/examples/c_regions_example.py",
"copies": "1",
"size": "1828",
"license": "mit",
"hash": -4824405579554876000,
"line_mean": 34.1730769231,
"line_max": 117,
"alpha_frac": 0.5393873085,
"autogenerated": false,
"ratio": 3.79253112033195,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.483191842883195,
"avg_score": null,
"num_lines": null
} |
#Ask for country ID, name or just print list of all countries
#Python 2.7, change raw_input to input for 3.x version
from erepapi import api, Country
api.public_key="your public key goes here"
api.private_key="your private key goes here"
country=Country()
while True:
user_choice=int(raw_input("Enter option.\n1. List all countries\n"
"2. Print country info from name\n"
"3. Print country info from ID\n>>>"))
if user_choice==1:
for item in country.all_countries:
print(item)
elif user_choice==2:
country_name=raw_input("Enter country name:\n>>>")
country.by_name(country_name)
if country.name is None:
print("No country found by that name")
else:
print("Country:{}\nID:{}\nInitials:{}\nColor:{}\n"
"Continent ID:{}\nContinent Name:{}\n"
"Capital region ID:{}\nCapital region name:{}".format(
country.name,country.id,country.initials,
country.color,country.continent_id,
country.continent_name,country.capital_region_id,
country.capital_region_name
))
elif user_choice==3:
country_id=raw_input("Enter country ID:\n>>>")
country.by_id(country_id)
if country.name is None:
print("No country found by that ID")
else:
print("Country:{}\nID:{}\nInitials:{}\nColor:{}\n"
"Continent ID:{}\nContinent Name:{}\n"
"Capital region ID:{}\nCapital region name:{}".format(
country.name, country.id, country.initials,
country.color, country.continent_id,
country.continent_name, country.capital_region_id,
country.capital_region_name
))
else:
print("Invalid input, try again")
# Output
#Enter option.
#1. List all countries
#2. Print country info from name
#3. Print country info from ID
#>>>1
#Albania
#Argentina
#Australia
#Austria
#[...]
#Enter option.
#1. List all countries
#2. Print country info from name
#3. Print country info from ID
#>>>2
#Enter country name:
#>>>Serbia
#Country:Serbia
#ID:65
#Initials:CS
#Color:FFB47F
#Continent ID:1
#Continent Name:None
#Capital region ID:743
#Capital region name:Kosovo
#Enter option.
#1. List all countries
#2. Print country info from name
#3. Print country info from ID
#>>>3
#Enter country ID:
#>>>65
#Country:Serbia
#ID:65
#Initials:CS
#Color:FFB47F
#Continent ID:1
#Continent Name:None
#Capital region ID:743
#Capital region name:Kosovo
#Enter option.
#1. List all countries
#2. Print country info from name
#3. Print country info from ID
#>>> | {
"repo_name": "Nikola-K/erepAPI",
"path": "docs/examples/country_example.py",
"copies": "1",
"size": "2738",
"license": "mit",
"hash": 4589752255759257600,
"line_mean": 26.1188118812,
"line_max": 72,
"alpha_frac": 0.6150474799,
"autogenerated": false,
"ratio": 3.4834605597964376,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45985080396964373,
"avg_score": null,
"num_lines": null
} |
# ask for int, report runnig total / version 1
num = 0
total = 0
while num != -1:
total = total + num
print("total so far = " + str(total))
num = int(input("next int: "))
# ask for int, report runnig total / version 2
total = 0
while True:
num = int(input("next int: "))
if num == -1:
break
total += num
print("total so far = " + str(total))
# check if number is prime
num = int(input("int: "))
total = 0
for x in range(2, num):
if num % x == 0:
print(str(num) + " is NOT prime")
break # we don't need to continue checking
else:
print(str(num) + " is PRIME")
# check multiple numbers
while True:
num = int(input("int: "))
if num == -1:
break
if num < 3:
print("int must be greater than 2")
continue
is_prime = True
for i in range(2, num):
if num % i == 0:
is_prime = False
break
if is_prime:
print(str(num) + " is PRIME")
else:
print(str(num) + " is NOT prime")
# print out primes up to 100
for i in range(3, 101):
is_prime = True
for j in range(2, i-1):
if i % j == 0:
is_prime = False
break
if is_prime:
print(str(i) + " is PRIME")
else:
print(str(i) + " is NOT prime")
# print multilication table
for i in range(1, 11):
for j in range(1, 11):
print("%3d" % (i * j), end=' ')
print()
print()
| {
"repo_name": "sgolitsynskiy/sergey.cs.uni.edu",
"path": "www/courses/cs1510/fall2017/sessions/092117.py",
"copies": "1",
"size": "1456",
"license": "mit",
"hash": -2457461500574343000,
"line_mean": 20.7313432836,
"line_max": 50,
"alpha_frac": 0.5178571429,
"autogenerated": false,
"ratio": 3.2355555555555555,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.42534126984555554,
"avg_score": null,
"num_lines": null
} |
# Asks for price, if it isn't a number asks again
while True:
try:
cost = float(raw_input("What is the cost of your meal? $"))
break
except ValueError:
print("Please enter a monetary value.")
# Asks for tax and then figures out said tax, if it isn't a number asks again
while True:
try:
tax = float(raw_input("What is the tax rate in your state? If it is a whole number such as '15' please enter it as 15.0. "))
tax = tax/100
break
except ValueError:
print("A tax has to be a number...")
# Asks for tip and figures out said tip, if it isn't a number asks again
while True:
try:
tip = float(raw_input("What would you like to tip? If it is a whole number such as '15' please enter it as 15.0. "))
tip = tip/100
break
except ValueError:
print("This is the third step in paying your bill yet you still fuck it up. Enter a number.")
# Sets cost and total to the appropriate values
cost = cost + cost*tax
total = cost + cost*tip
# Prints out the total using the % operator to fill in the place holder
print "Your total is $%s. Thank you for eating at Kahlin's House, please pay Tor on the way out." % (total) | {
"repo_name": "TheDeepEnd/python-snippets",
"path": "kahlin_tip_calculator.py",
"copies": "1",
"size": "1243",
"license": "mit",
"hash": -2592000232586785000,
"line_mean": 35.5882352941,
"line_max": 132,
"alpha_frac": 0.6403861625,
"autogenerated": false,
"ratio": 3.6666666666666665,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48070528291666664,
"avg_score": null,
"num_lines": null
} |
# Ask the user to enter the names of files to compare
fname1 = "output.txt"
fname2 = "naive_output.txt"
# Open file for reading in text mode (default mode)
f1 = open(fname1)
f2 = open(fname2)
# Print confirmation
print("-----------------------------------")
print("Comparing files ", " > " + fname1, " < " +fname2, sep='\n')
print("-----------------------------------")
# Read the first line from the files
f1_line = f1.readline()
f2_line = f2.readline()
# Initialize counter for line number
line_no = 1
# Loop if either file1 or file2 has not reached EOF
while f1_line != '' or f2_line != '':
# Strip the leading whitespaces
f1_line = f1_line.rstrip()
f2_line = f2_line.rstrip()
# Compare the lines from both file
if f1_line != f2_line:
# If a line does not exist on file2 then mark the output with + sign
if f2_line == '' and f1_line != '':
print(">+", "Line-%d" % line_no, f1_line)
# otherwise output the line on file1 and mark it with > sign
elif f1_line != '':
print(">", "Line-%d" % line_no, f1_line)
# If a line does not exist on file1 then mark the output with + sign
if f1_line == '' and f2_line != '':
print("<+", "Line-%d" % line_no, f2_line)
# otherwise output the line on file2 and mark it with < sign
elif f2_line != '':
print("<", "Line-%d" % line_no, f2_line)
# Print a blank line
print()
#Read the next line from the file
f1_line = f1.readline()
f2_line = f2.readline()
#Increment line counter
line_no += 1
# Close the files
f1.close()
f2.close()
| {
"repo_name": "saisankargochhayat/algo_quest",
"path": "Rishav/workstation/comparer.py",
"copies": "2",
"size": "1647",
"license": "apache-2.0",
"hash": -4258296064730263600,
"line_mean": 27.3965517241,
"line_max": 76,
"alpha_frac": 0.5634486946,
"autogenerated": false,
"ratio": 3.334008097165992,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4897456791765992,
"avg_score": null,
"num_lines": null
} |
### Ask the user to input a number
# assign a number to a variable
x=1
# then do a while loop
while x==1:
number1=int(input("Please enter the first number: "))
number2=int(input("Please enter the second number: "))
number3=int(input("Please enter the third number: "))
# conditions
if (number1>number2) and (number1>number3):
print("The highest number is",number1)
# assign a new number to the same variable
x=2
# second while loop
while x==2:
answer=input("Would you like to play again? [Yes/No]")
if (answer=="yes") or (answer=="y") or (answer=="Y") or (answer=="Yes"):
print("Program will now restart in 3... 2... 1...")
# this will re-activate the first loop
x=1
elif (answer=="no") or (answer=="n") or (answer=="N") or (answer=="No"):
print("This program will now be terminated.")
# this will boot you out from the loop
x=0
exit()
else:
print("Can't recognize input. I will ask again.")
# loops the second while loop
x=2
elif (number2>number1) and (number2>number3):
print("The highest number is",number2)
# assign a new number to the same variable
x=2
# second while loop
while x==2:
answer=input("Would you like to play again? [Yes/No]")
if (answer=="yes") or (answer=="y") or (answer=="Y") or (answer=="Yes"):
print("Program will now restart in 3... 2... 1...")
# this will re-activate the first loop
x=1
elif (answer=="no") or (answer=="n") or (answer=="N") or (answer=="No"):
print("This program will now be terminated.")
# this will boot you out from the loop
x=0
exit()
else:
print("Can't recognize input. I will ask again.")
# loops the second while loop
x=2
elif (number3>number1) and (number3>number2):
print("The highest number is",number3)
# assign a new number to the same variable
x=2
# second while loop
while x==2:
answer=input("Would you like to play again? [Yes/No]")
if (answer=="yes") or (answer=="y") or (answer=="Y") or (answer=="Yes"):
print("Program will now restart in 3... 2... 1...")
# this will re-activate the first loop
x=1
elif (answer=="no") or (answer=="n") or (answer=="N") or (answer=="No"):
print("This program will now be terminated.")
# this will boot you out from the loop
x=0
exit()
else:
print("Can't recognize input. I will ask again.")
# loops the second while loop
x=2
elif (number1>number2) and (number1==number3):
print("Both the first and third number - which is",number1,"- are the highest.")
# assign a new number to the same variable
x=2
# second while loop
while x==2:
answer=input("Would you like to play again? [Yes/No]")
if (answer=="yes") or (answer=="y") or (answer=="Y") or (answer=="Yes"):
print("Program will now restart in 3... 2... 1...")
# this will re-activate the first loop
x=1
elif (answer=="no") or (answer=="n") or (answer=="N") or (answer=="No"):
print("This program will now be terminated.")
# this will boot you out from the loop
x=0
exit()
else:
print("Can't recognize input. I will ask again.")
# loops the second while loop
x=2
elif (number2>number1) and (number2==number3):
print("Both the second and third number - which is",number2,"- are the highest.")
# assign a new number to the same variable
x=2
# second while loop
while x==2:
answer=input("Would you like to play again? [Yes/No]")
if (answer=="yes") or (answer=="y") or (answer=="Y") or (answer=="Yes"):
print("Program will now restart in 3... 2... 1...")
# this will re-activate the first loop
x=1
elif (answer=="no") or (answer=="n") or (answer=="N") or (answer=="No"):
print("This program will now be terminated.")
# this will boot you out from the loop
x=0
exit()
else:
print("Can't recognize input. I will ask again.")
# loops the second while loop
x=2
elif (number2>number3) and (number2==number1):
print("Both the first and second number - which is",number2,"- are the highest.")
# assign a new number to the same variable
x=2
# second while loop
while x==2:
answer=input("Would you like to play again? [Yes/No]")
if (answer=="yes") or (answer=="y") or (answer=="Y") or (answer=="Yes"):
print("Program will now restart in 3... 2... 1...")
# this will re-activate the first loop
x=1
elif (answer=="no") or (answer=="n") or (answer=="N") or (answer=="No"):
print("This program will now be terminated.")
# this will boot you out from the loop
x=0
exit()
else:
print("Can't recognize input. I will ask again.")
# loops the second while loop
x=2
# You can actually put anything here - as long as you inform the user that they have entered the same number in a row.
elif (number1==number2) and (number1==number3):
print("Woah there, you typed the same number three times in a row - you sure 'bout that?")
# assign a new number to the same variable
x=2
# second while loop
while x==2:
answer=input("Would you like to play again? [Yes/No]")
if (answer=="yes") or (answer=="y") or (answer=="Y") or (answer=="Yes"):
print("Program will now restart in 3... 2... 1...")
# this will re-activate the first loop
x=1
elif (answer=="no") or (answer=="n") or (answer=="N") or (answer=="No"):
print("This program will now be terminated.")
# this will boot you out from the loop
x=0
exit()
else:
print("Can't recognize input. I will ask again.")
# loops the second while loop
x=2
else:
print("Can't compute.")
# assign a new number to the same variable
x=2
# second while loop
while x==2:
answer=input("Would you like to play again? [Yes/No]")
if (answer=="yes") or (answer=="y") or (answer=="Y") or (answer=="Yes"):
print("Program will now restart in 3... 2... 1...")
# this will re-activate the first loop
x=1
elif (answer=="no") or (answer=="n") or (answer=="N") or (answer=="No"):
print("This program will now be terminated.")
# this will boot you out from the loop
x=0
exit()
else:
print("Can't recognize input. I will ask again.")
# loops the second while loop
x=2
| {
"repo_name": "dracaether/python",
"path": "highestnumber_loop.py",
"copies": "1",
"size": "7768",
"license": "mit",
"hash": -2728737577738959000,
"line_mean": 40.9891891892,
"line_max": 118,
"alpha_frac": 0.4997425335,
"autogenerated": false,
"ratio": 4.396151669496321,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5395894202996321,
"avg_score": null,
"num_lines": null
} |
### Ask the user to input a number
# define the first user-defined function. In this case, this is the main()
def main():
number1=int(input("Please enter the first number: "))
number2=int(input("Please enter the second number: "))
number3=int(input("Please enter the third number: "))
if (number1>number2) and (number1>number3):
print("The highest number is",number1)
# the second user-defined function goes here. In this case, call on try_again()
try_again()
elif (number2>number1) and (number2>number3):
print("The highest number is",number2)
try_again()
elif (number3>number1) and (number3>number2):
print("The highest number is",number3)
try_again()
elif (number1>number2) and (number1==number3):
print("Both the first and third number - which is",number1,"- are the highest.")
try_again()
elif (number2>number1) and (number2==number3):
print("Both the second and third number - which is",number2,"- are the highest.")
try_again()
elif (number2>number3) and (number2==number1):
print("Both the first and second number - which is",number2,"- are the highest.")
try_again()
# You can actually put anything here - as long as you inform the user that they have entered the same number in a row.
elif (number1==number2) and (number1==number3):
print("Woah there, you typed the same number three times in a row - you sure 'bout that?")
try_again()
else:
print("Can't compute.")
try_again()
# then define the second function
def try_again():
answer=input("Would you like to play again? ")
if (answer=="yes") or (answer=="y"):
print("Program will now restart in 3... 2... 1...")
main()
elif (answer=="no") or (answer=="n"):
print("This program will now be terminated.")
exit()
else:
print("Can't recognize input. I will ask again.")
try_again()
# call on the first function you have defined to start this program
main()
| {
"repo_name": "dracaether/python",
"path": "highestnumber_function.py",
"copies": "1",
"size": "2083",
"license": "mit",
"hash": -693856223951467600,
"line_mean": 35.5438596491,
"line_max": 118,
"alpha_frac": 0.6293807009,
"autogenerated": false,
"ratio": 3.8431734317343174,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49725541326343176,
"avg_score": null,
"num_lines": null
} |
"""ask URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
from qa import views
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^$', views.index, name='index'),
url(r'^login$', views.login_view, name='login'),
url(r'^logout$', views.logout_view, name='logout'),
url(r'^signup$', views.signup_view, name = 'signup'),
url(r'^question/(?P<question_id>\d+)', views.detail, name = 'detail'),
url(r'ask$', views.ask, name = 'ask'),
url(r'popular$', views.popular, name = 'popular'),
]
| {
"repo_name": "etanever/nginx-django-bootstrap",
"path": "ask/ask/urls.py",
"copies": "1",
"size": "1179",
"license": "mit",
"hash": -1508746747125765400,
"line_mean": 37.0322580645,
"line_max": 79,
"alpha_frac": 0.6624257846,
"autogenerated": false,
"ratio": 3.358974358974359,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9465912290419848,
"avg_score": 0.011097570630902241,
"num_lines": 31
} |
"""ask URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import include, url
from django.contrib import admin
from qa import views
urlpatterns = [
url(r'^admin/', include(admin.site.urls)),
url(r'^$', views.new, name='index'),
url(r'^login/', views.login_view, name='login_view'),
url(r'^signup/', views.signup, name='signup'),
url(r'^question/(?P<id>\d+)/$', views.question, name='question'),
url(r'^ask/', views.new_question, name='new_question'),
url(r'^popular/', views.popular, name='index'),
url(r'^new/', views.new, name='index'),
url(r'^time/$', views.time),
]
| {
"repo_name": "takzhanov/stepic-web-tech",
"path": "ask/ask/urls.py",
"copies": "1",
"size": "1184",
"license": "mit",
"hash": -5164911643556594000,
"line_mean": 37.1935483871,
"line_max": 77,
"alpha_frac": 0.660472973,
"autogenerated": false,
"ratio": 3.316526610644258,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9476999583644258,
"avg_score": 0,
"num_lines": 31
} |
"""Ask WolframAlpha questions."""
from bs4 import BeautifulSoup
from plumeria import config
from plumeria.command import commands, CommandError
from plumeria.plugin import PluginSetupError
from plumeria.util import http
from plumeria.util.format import escape_markdown
from plumeria.util.ratelimit import rate_limit
api_key = config.create("wolfram", "key",
fallback="",
comment="An API key from http://products.wolframalpha.com/api/")
@commands.create("wolfram", category="Search")
@rate_limit()
async def wolfram(message):
"""
Looks up information about a topic with Wolfram Alpha.
Example::
/wolfram 2+50
/wolfram Integrate[4x^2,x]
/wolfram pi
Response::
Decimal approximation: 3.14159265358...
Property: pi is a transcendental number
Continued fraction: [3; 7, 15, 1, 292, [...]
"""
q = message.content.strip()
if not q:
raise CommandError("Search term required!")
r = await http.get("http://api.wolframalpha.com/v2/query", params=[
('input', q),
('appid', api_key()),
])
doc = BeautifulSoup(r.text(), features="lxml")
pods = doc.queryresult.find_all("pod", recursive=False)
if len(pods):
lines = []
for pod in pods:
if pod.get("id", "") != "Input":
for node in pod.find_all("plaintext"):
line = (' '.join(node.stripped_strings)).strip()
if len(line):
lines.append("**{}:** {}".format(pod['title'], escape_markdown(line)))
if len(lines):
return "\n".join(lines[:4])
else:
return "no data"
else:
raise CommandError("no results found")
def setup():
config.add(api_key)
if not api_key():
raise PluginSetupError("This plugin requires an API key from http://products.wolframalpha.com/api. "
"Registration is free.")
commands.add(wolfram)
| {
"repo_name": "sk89q/Plumeria",
"path": "orchard/wolfram.py",
"copies": "1",
"size": "2030",
"license": "mit",
"hash": -1745920846239642000,
"line_mean": 28.8529411765,
"line_max": 108,
"alpha_frac": 0.5862068966,
"autogenerated": false,
"ratio": 3.8666666666666667,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49528735632666665,
"avg_score": null,
"num_lines": null
} |
"""A skyrimsque dungeon"""
minions_killed, boss_killed, money_looted, stuff_taken, potion_taken =\
False, False, False, False, False
door_locked = True
stairs_found = False
health = 0
facing_towards = 'east'
orientation = {
'ahead': 'east',
'left': 'north',
'right': 'south',
'back': 'west'
}
sentences = {
'question': (
'Where do you go?',
'What do you do?',
'How much do you take from it?'
),
'main_room': (
'You find yourself at the entrance of an ancient room. You remember you had decided to '
'enter it out of curiosity.',
'You are blocked by a wall. The wall looks impenetrable.',
'You feel accomplished.',
'You won your adversary and you did good.',
'You persevered in the face of stronger enemy today. A day shall come for you to return and'
' take on what you left behind.',
'You did not expect this. You prefer more leisurely activities. You stepped out.',
'You march onwards.'
),
'tunnel': (
'You find yourself in a tunnel. You see it ends in a big room full of darkness.',
'It looked too daunting. You decide to play safe.',
'Undaunted, you march onwards.'
),
'big_room': (
'You are in a big room. Darkness hampers your vision.',
'You can make out a door in front of you, and a ledge above it.',
'You know where the stairs are.',
'You wonder if there are any stairs to the ledge.',
'You see stairs going up ahead to the ledge. Hmm...',
'The door is locked from the other side.',
'The door is unlocked.',
'You step backwards.'
),
'stairs': (
'You take the stairs.',
),
'money_room': (
'You are in a small room. You find an unlocked coffer.',
'You have already taken care of it. There was nothing else to do so you decide to step '
'back.',
'Gold shines from inside.',
'You could not control your greed. You feel heavy but carry on.',
'You take a coin for study. You plan to come back.',
'You do not know who it belongs to. You decide against taking from it.'
),
'ledge': (
'You are now on the ledge. You see a room {0} and a door {1}.',
'The door leads through a tunnel and opens to a room to the right.',
'There is only empty space. You step back.',
'You step over a metallic object. It shines sharply and feels pointy.',
'You pick the pointy metallic object. It is an old sword. You keep it.',
'It looked too dangerous to touch. You step back.'
),
'potions_room': (
'You are in a small room. You find a stove with boiling hot tea over it. A cup is nearby.',
"You know something about what's boiling, and that feels enough. You couldn't possibly try "
'drinking it again. You step back.',
'You could only take a sip before its repulsive taste told you it is not tea.',
'You decide not to taste unknown drink. You grow suspicious of recent activity.',
'Quickly, you get out of that room.'
),
'minions_room': (
'You are inside a well-lit room.',
'You see the carcasses of 2 people.',
'But it is not empty like others. There are 2 people inside.',
'Both of them look at you with fear and anger.',
'You feel un-welcomed in your gut.',
'They do not know you carry a coin of theirs, yet.',
'You feel the heaviness of gold. It slows you down.',
'You start moving fast... very fast... so fast you surprise yourself.',
'You remember your fencing lessons. The sword you had picked moves in your hand without any'
' effort. Before long you emerge a survivor.',
'Your bare hands are an even match for 2 of them. Some bruises are nothing new for you.',
'You check the pockets of the dead, looking for an id. A piece of paper comes in your hand '
'on which "WANTED" is printed.',
'Your legs disobey you for a moment.',
'You fail to make the first move. But there is no other option left for you. The fight goes'
' on longer than you expected. You survive.',
'Their eyes are fixated on the coin shining through your pocket. They move fast. You never '
'had a chance against the two of them.',
'Against your better decisions, you decide to talk to strangers.\n\tSurprisingly, they '
'appear friendly.',
'They see gold in your pockets. Sadly, you have no option but to fight.',
'The coin you carry is never revealed in front of them.',
'They tell you they have a leader. They ask you to meet him.',
'You tell them you feel tired. You say your goodbyes.',
'They happily take you ahead.',
'You carefully step back.',
'They probably did not notice you.'
),
'boss_room': (
'You are inside a room. There is door at one end, almost hidden.',
"You see the giant's dead body. You remember the risky encounter with caution.",
'A giant sits in front of it. He immediately takes a disliking of you.',
'The giant ignores you.',
'You trace your way back.',
'Not in mood of another encounter, you trace your way back.',
'The door behind you is locked. You cannot go back.',
'You are blocked by a wall. You could not possibly go that way.',
"You step over the giant's body to reach towards the door.",
'The key unlocks the door.',
'You go ahead through the unlocked door.',
'The giant is not too fond of the coin in your pocket. The attack is immediate.',
'The rage of giant is 10 folded knowing the fate of his group.',
'The fight is tough.',
'You barely survive.',
'A key falls from his pocket. You use it to unlock the door.',
'The giant barely survives.',
'Your charm wins over the giant. He hands over a key to you to unlock the door. You unlock '
'the door with it.',
'You go ahead through the unlocked door.'
)
}
def set_defaults():
global minions_killed, boss_killed, money_looted, stuff_taken, potion_taken, door_locked,\
stairs_found, health, facing_towards, orientation
minions_killed, boss_killed, money_looted, stuff_taken, potion_taken = \
False, False, False, False, False
door_locked = True
stairs_found = False
health = 50
facing_towards = 'east'
orientation = {
'ahead': 'east',
'left': 'north',
'right': 'south',
'back': 'west'
}
def set_orientation(direction):
global facing_towards
facing_towards = orientation[direction]
if direction == 'left':
orientation['ahead'], orientation['left'], orientation['back'], orientation['right'] =\
orientation['left'], orientation['back'], orientation['right'], orientation['ahead']
elif direction == 'right':
orientation['ahead'], orientation['left'], orientation['back'], orientation['right'] = \
orientation['right'], orientation['ahead'], orientation['left'], orientation['back']
elif direction == 'back':
orientation['ahead'], orientation['left'], orientation['back'], orientation['right'] = \
orientation['back'], orientation['right'], orientation['ahead'], orientation['left']
def get_input(question, valid_values):
if health == 0:
print 'You die.'
print '.'
print '.'
print '.'
print 'You are awaken from the slumber by the sudden turn of events. You realize you had ' \
'just fallen asleep. Or did you?'
set_defaults()
while main_room():
pass
print '\n' + question + '\n'
inp = raw_input('> ')
while inp not in valid_values:
print 'Your valid moves are:\n\t', valid_values
inp = get_input(question, valid_values)
return inp
def main_room():
global facing_towards
print '\n\t' + sentences['main_room'][0]
set_orientation(get_input(sentences['question'][0], ('left', 'right', 'back', 'ahead')))
if facing_towards in ('north', 'south'):
print '\n\t' + sentences['main_room'][1]
main_room()
elif facing_towards == 'west':
if money_looted and stuff_taken and potion_taken:
print '\n\t' + sentences['main_room'][2]
if boss_killed:
print '\n\t' + sentences['main_room'][3]
return
elif health != 50:
print '\n\t' + sentences['main_room'][4]
return
else:
print '\n\t' + sentences['main_room'][5]
else:
print '\n\t' + sentences['main_room'][6]
tunnel()
def tunnel():
global facing_towards
print '\n\t' + sentences['tunnel'][0]
set_orientation(get_input(sentences['question'][0], ('back', 'ahead')))
if facing_towards == 'west':
print '\n\t' + sentences['tunnel'][1]
main_room()
else:
print '\n\t' + sentences['tunnel'][2]
big_room()
def big_room():
global stairs_found, facing_towards
print '\n\t' + sentences['big_room'][0]
if facing_towards == 'east':
print '\n\t' + sentences['big_room'][1]
if stairs_found:
print '\n\t' + sentences['big_room'][2]
else:
print '\n\t' + sentences['big_room'][3]
set_orientation(get_input(sentences['question'][0], ('left', 'right', 'back', 'ahead')))
if facing_towards == 'north':
if stairs_found:
stairs(True)
else:
print '\n\t' + sentences['big_room'][4]
stairs_found = True
big_room()
elif facing_towards == 'south':
money_room()
elif facing_towards == 'east':
if door_locked:
print '\n\t' + sentences['big_room'][5]
big_room()
else:
print '\n\t' + sentences['big_room'][6]
boss_room()
else:
print '\n\t' + sentences['big_room'][7]
tunnel()
def stairs(go_up):
print '\n\t' + sentences['stairs'][0]
if go_up:
set_orientation('right')
ledge()
else:
set_orientation('left')
big_room()
def money_room():
global money_looted
print '\n\t' + sentences['money_room'][0]
if money_looted or money_looted is None:
print '\n\t' + sentences['money_room'][1]
set_orientation('back')
else:
print '\n\t' + sentences['money_room'][2]
inp = get_input(sentences['question'][2], ('all', 'coin', 'nothing'))
if inp == 'all':
print '\n\t' + sentences['money_room'][3]
money_looted = True
money_room()
elif inp == 'coin':
print '\n\t' + sentences['money_room'][4]
money_looted = None
money_room()
else:
print '\n\t' + sentences['money_room'][5]
set_orientation('back')
big_room()
def ledge():
global stuff_taken, health, facing_towards
keys = {value: key for key, value in orientation.items() if value == 'north' or value == 'east'}
for key, value in keys.items():
if value == 'ahead':
keys[key] = 'up ahead'
elif value == 'left' or value == 'right':
keys[key] = 'to the ' + value
else:
keys[key] = 'behind'
print '\n\t' + sentences['ledge'][0].format(keys['north'], keys['east'])
set_orientation(get_input(sentences['question'][0], ('left', 'right', 'back', 'ahead')))
if facing_towards == 'east':
print '\n\t' + sentences['ledge'][1]
set_orientation('right')
minions_room()
elif facing_towards == 'north':
potions_room()
elif facing_towards == 'west':
stairs(False)
else:
if stuff_taken:
print '\n\t' + sentences['ledge'][2]
else:
print '\n\t' + sentences['ledge'][3]
health -= 10
inp = get_input(sentences['question'][1], ('pick', 'ignore'))
if inp == 'pick':
print '\n\t' + sentences['ledge'][4]
stuff_taken = True
else:
print '\n\t' + sentences['ledge'][5]
set_orientation('back')
ledge()
def potions_room():
global potion_taken
print '\n\t' + sentences['potions_room'][0]
if potion_taken:
print '\n\t' + sentences['potions_room'][1]
else:
inp = get_input(sentences['question'][1], ('drink', 'leave'))
if inp == 'drink':
print '\n\t' + sentences['potions_room'][2]
potion_taken = True
else:
print '\n\t' + sentences['potions_room'][3]
print '\n\t' + sentences['potions_room'][4]
set_orientation('back')
ledge()
def minions_room():
global minions_killed, health, facing_towards
if minions_killed:
print '\n\t' + '\n\t'.join((sentences['minions_room'][0], sentences['minions_room'][1]))
else:
print '\n\t' + '\n\t'.join((sentences['minions_room'][0], sentences['minions_room'][2]))
set_orientation(get_input(sentences['question'][0], ('back', 'ahead')))
if facing_towards == 'south':
if minions_killed:
boss_room()
else:
print '\n\t' + sentences['minions_room'][3]
if money_looted:
print '\n\t' + sentences['minions_room'][4]
inp = get_input(sentences['question'][1], ('attack', 'leave'))
else:
if money_looted is None:
print '\n\t' + sentences['minions_room'][5]
inp = get_input(sentences['question'][1], ('attack', 'leave', 'talk'))
if inp == 'attack':
if money_looted:
print '\n\t' + sentences['minions_room'][6]
health -= 10
if potion_taken:
print '\n\t' + sentences['minions_room'][7]
if stuff_taken:
print '\n\t' + sentences['minions_room'][8]
else:
print '\n\t' + '\n\t'.join((sentences['minions_room'][9],
sentences['minions_room'][10]))
health -= 10
minions_killed = True
minions_room()
else:
print '\n\t' + sentences['minions_room'][11]
health -= 10
if stuff_taken:
print '\n\t' + '\n\t'.join((sentences['minions_room'][12],
sentences['minions_room'][10]))
minions_killed = True
minions_room()
else:
print sentences['minions_room'][13]
health = 0
elif inp == 'leave':
set_orientation('right')
ledge()
else:
print '\n\t' + sentences['minions_room'][14]
if money_looted:
print '\n\t' + '\n\t'.join((sentences['minions_room'][15],
sentences['minions_room'][6]))
health -= 10
if potion_taken:
if stuff_taken:
print '\n\t' + sentences['minions_room'][8]
else:
print '\n\t' + '\n\t'.join((sentences['minions_room'][9],
sentences['minions_room'][10]))
health -= 10
else:
print '\n\t' + sentences['minions_room'][11]
health -= 10
if stuff_taken:
print '\n\t' + '\n\t'.join((sentences['minions_room'][12],
sentences['minions_room'][10]))
minions_killed = True
else:
print sentences['minions_room'][13]
health = 0
else:
if money_looted is None:
print '\n\t' + sentences['minions_room'][16]
print '\n\t' + sentences['minions_room'][17]
set_orientation(get_input(sentences['question'][1], ('back', 'ahead')))
if facing_towards == 'north':
print '\n\t' + sentences['minions_room'][18]
set_orientation('left')
ledge()
else:
print '\n\t' + sentences['minions_room'][19]
boss_room()
else:
print '\n\t' + sentences['minions_room'][20],
if not minions_killed:
print '\n\t' + sentences['minions_room'][21]
set_orientation('left')
ledge()
def boss_room():
global boss_killed, health, door_locked, facing_towards
print '\n\t' + sentences['boss_room'][0]
if boss_killed:
print '\n\t' + sentences['boss_room'][1]
elif door_locked:
print '\n\t' + sentences['boss_room'][2]
else:
print '\n\t' + sentences['boss_room'][3]
set_orientation(get_input(sentences['question'][1], ('back', 'ahead', 'left', 'right')))
if facing_towards == 'north':
if minions_killed:
if boss_killed:
print '\n\t' + sentences['boss_room'][4]
else:
print '\n\t' + sentences['boss_room'][5]
minions_room()
else:
print '\n\t' + sentences['boss_room'][6]
boss_room()
elif facing_towards in ('east', 'south'):
print '\n\t' + sentences['boss_room'][7]
boss_room()
else:
if boss_killed:
print '\n\t' + sentences['boss_room'][8]
if door_locked:
print '\n\t' + sentences['boss_room'][9]
door_locked = False
else:
print '\n\t' + sentences['boss_room'][10]
big_room()
else:
if money_looted or money_looted is None:
print '\n\t' + sentences['boss_room'][11]
health -= 10
if minions_killed:
print '\n\t' + sentences['boss_room'][12]
health -= 10
print '\n\t' + sentences['boss_room'][13],
if health >= 10:
print '\n\t' + sentences['boss_room'][14]
if door_locked:
print '\n\t' + sentences['boss_room'][15]
boss_killed = True
door_locked = False
else:
print '\n\t' + sentences['boss_room'][16]
health = 0
elif door_locked:
print '\n\t' + sentences['boss_room'][17]
door_locked = False
else:
print '\n\t' + sentences['boss_room'][18]
big_room()
print '\n'
print ' +-+-+-+'
print ' T h e A d v e n t u r e r'
print ' +0+0+0+'
print '\n'
set_defaults()
main_room()
| {
"repo_name": "anupamsr/TheAdventurer",
"path": "ex36.py",
"copies": "1",
"size": "19388",
"license": "unlicense",
"hash": -3645461180374799400,
"line_mean": 39.0578512397,
"line_max": 100,
"alpha_frac": 0.5215081494,
"autogenerated": false,
"ratio": 3.8399683105565456,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4861476459956546,
"avg_score": null,
"num_lines": null
} |
"""A Slack bot using the real-time messaging API."""
from itertools import count
import json
import logging
from random import randint
from textwrap import dedent
from aiohttp import MsgType, ws_connect
from aslack import __name__ as mod_name, __version__
from aslack.slack_api import SlackApiError, SlackBotApi
from aslack.utils import truncate
logger = logging.getLogger(__name__)
class SlackBot:
"""Base class Slack bot.
Arguments:
id_ (:py:class:`str`): The bot's Slack ID.
user (:py:class:`str`): The bot's friendly name.
api (:py:class:`SlackApi`): The Slack API wrapper.
Attributes:
address_as (:py:class:`str`): The text that appears at the
start of messages addressed to this bot (e.g.
``'<@user>: '``).
full_name (:py:class:`str`): The name of the bot as it
appears in messages about the bot (e.g. ``'<@user>'``).
socket (:py:class:`aiohttp.web.WebSocketResponse`): The web
socket to respond on.
API_AUTH_ENDPOINT (:py:class:`str`): Test endpoint for API
authorisation.
INSTRUCTIONS (:py:class:`str`): Message to give the user when
they request instructions.
MESSAGE_FILTERS (:py:class:`list`): Default filters for
incoming messages
RTM_HANDSHAKE (:py:class:`dict`): Expected handshake message
from RTM API.
RTM_START_ENDPOINT (:py:class:`str`): Start endpoint for
real-time messaging.
VERSION (:py:class:`str`): Version string to show to the user (if
not overridden, will show the aSlack version).
"""
API_AUTH_ENDPOINT = 'auth.test'
INSTRUCTIONS = dedent("""
These are the default instructions for an aSlack bot.
Override these as appropriate for your specific needs.
""")
MESSAGE_FILTERS = []
RTM_HANDSHAKE = {'type': 'hello'}
RTM_START_ENDPOINT = 'rtm.start'
VERSION = ' '.join((mod_name, __version__))
def __init__(self, id_, user, api):
self.id_ = id_
self.user = user
self.api = api
self.full_name = '<@{}>'.format(id_)
self.address_as = '{}: '.format(self.full_name)
self._msg_ids = count(randint(1, 1000))
self.socket = None
async def join_rtm(self, filters=None):
"""Join the real-time messaging service.
Arguments:
filters (:py:class:`dict`, optional): Dictionary mapping
message filters to the functions they should dispatch to.
Use a :py:class:`collections.OrderedDict` if precedence is
important; only one filter, the first match, will be
applied to each message.
"""
if filters is None:
filters = [cls(self) for cls in self.MESSAGE_FILTERS]
url = await self._get_socket_url()
logger.debug('Connecting to %r', url)
async with ws_connect(url) as socket:
first_msg = await socket.receive()
self._validate_first_message(first_msg)
self.socket = socket
async for message in socket:
if message.tp == MsgType.text:
await self.handle_message(message, filters)
elif message.tp in (MsgType.closed, MsgType.error):
if not socket.closed:
await socket.close()
self.socket = None
break
logger.info('Left real-time messaging.')
async def handle_message(self, message, filters):
"""Handle an incoming message appropriately.
Arguments:
message (:py:class:`aiohttp.websocket.Message`): The incoming
message to handle.
filters (:py:class:`list`): The filters to apply to incoming
messages.
"""
data = self._unpack_message(message)
logger.debug(data)
if data.get('type') == 'error':
raise SlackApiError(
data.get('error', {}).get('msg', str(data))
)
elif self.message_is_to_me(data):
text = data['text'][len(self.address_as):].strip()
if text == 'help':
return self._respond(
channel=data['channel'],
text=self._instruction_list(filters),
)
elif text == 'version':
return self._respond(
channel=data['channel'],
text=self.VERSION,
)
for _filter in filters:
if _filter.matches(data):
logger.debug('Response triggered')
async for response in _filter:
self._respond(channel=data['channel'], text=response)
def message_mentions_me(self, data):
"""If you send a message that mentions me"""
return (data.get('type') == 'message' and
self.full_name in data.get('text', ''))
def message_is_to_me(self, data):
"""If you send a message directly to me"""
return (data.get('type') == 'message' and
data.get('text', '').startswith(self.address_as))
@classmethod
async def from_api_token(cls, token=None, api_cls=SlackBotApi):
"""Create a new instance from the API token.
Arguments:
token (:py:class:`str`, optional): The bot's API token
(defaults to ``None``, which means looking in the
environment).
api_cls (:py:class:`type`, optional): The class to create
as the ``api`` argument for API access (defaults to
:py:class:`aslack.slack_api.SlackBotApi`).
Returns:
:py:class:`SlackBot`: The new instance.
"""
api = api_cls.from_env() if token is None else api_cls(api_token=token)
data = await api.execute_method(cls.API_AUTH_ENDPOINT)
return cls(data['user_id'], data['user'], api)
def _format_message(self, channel, text):
"""Format an outgoing message for transmission.
Note:
Adds the message type (``'message'``) and incremental ID.
Arguments:
channel (:py:class:`str`): The channel to send to.
text (:py:class:`str`): The message text to send.
Returns:
:py:class:`str`: The JSON string of the message.
"""
payload = {'type': 'message', 'id': next(self._msg_ids)}
payload.update(channel=channel, text=text)
return json.dumps(payload)
async def _get_socket_url(self):
"""Get the WebSocket URL for the RTM session.
Warning:
The URL expires if the session is not joined within 30
seconds of the API call to the start endpoint.
Returns:
:py:class:`str`: The socket URL.
"""
data = await self.api.execute_method(
self.RTM_START_ENDPOINT,
simple_latest=True,
no_unreads=True,
)
return data['url']
def _instruction_list(self, filters):
"""Generates the instructions for a bot and its filters.
Note:
The guidance for each filter is generated by combining the
docstrings of the predicate filter and resulting dispatch
function with a single space between. The class's
:py:attr:`INSTRUCTIONS` and the default help command are
added.
Arguments:
filters (:py:class:`list`): The filters to apply to incoming
messages.
Returns:
:py:class:`str`: The bot's instructions.
"""
return '\n\n'.join([
self.INSTRUCTIONS.strip(),
'*Supported methods:*',
'If you send "@{}: help" to me I reply with these '
'instructions.'.format(self.user),
'If you send "@{}: version" to me I reply with my current '
'version.'.format(self.user),
] + [filter.description() for filter in filters])
def _respond(self, channel, text):
"""Respond to a message on the current socket.
Args:
channel (:py:class:`str`): The channel to send to.
text (:py:class:`str`): The message text to send.
"""
result = self._format_message(channel, text)
if result is not None:
logger.info(
'Sending message: %r',
truncate(result, max_len=50),
)
self.socket.send_str(result)
@staticmethod
def _unpack_message(msg):
"""Unpack the data from the message.
Arguments:
msg (:py:class:`aiohttp.websocket.Message`): The message to
unpack.
Returns:
:py:class:`dict`: The loaded data.
Raises:
:py:class:`AttributeError`: If there is no data attribute.
:py:class:`json.JSONDecodeError`: If the data isn't valid
JSON.
"""
return json.loads(msg.data)
@classmethod
def _validate_first_message(cls, msg):
"""Check the first message matches the expected handshake.
Note:
The handshake is provided as :py:attr:`RTM_HANDSHAKE`.
Arguments:
msg (:py:class:`aiohttp.Message`): The message to validate.
Raises:
:py:class:`SlackApiError`: If the data doesn't match the
expected handshake.
"""
data = cls._unpack_message(msg)
logger.debug(data)
if data != cls.RTM_HANDSHAKE:
raise SlackApiError('Unexpected response: {!r}'.format(data))
logger.info('Joined real-time messaging.')
| {
"repo_name": "textbook/aslack",
"path": "aslack/slack_bot/bot.py",
"copies": "1",
"size": "9574",
"license": "isc",
"hash": -3928088515859787300,
"line_mean": 32.5929824561,
"line_max": 79,
"alpha_frac": 0.5715479423,
"autogenerated": false,
"ratio": 4.247559893522626,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5319107835822626,
"avg_score": null,
"num_lines": null
} |
"""ASL-English Translation Trials
Usage:
asl_english_trials.py [options]
Options:
-h, --help show this help
-f, --ffmpeg EXE ffmpeg executable [default: c:/ffmpeg/bin/ffmpeg.exe]
"""
if __name__ == '__main__':
import os, errno, sys
from docopt import docopt
config = docopt(__doc__)
# ffmpeg.exe has to exist
if not os.path.isfile(config['--ffmpeg']):
print config['--ffmpeg'], 'does not exist'
exit()
# don't load the dependencies if the args are wrong
import expyriment as e
import cv2
from traceback import format_exc
import asl_english_trials as protocols
exp = e.design.Experiment(name="Protocol 3: Picture Naming")
exp.data_variable_names = [
'protocol',
'list',
'item',
'condition',
'intro time',
'reaction time',
'output'
]
output_dir = 'data/media'
transrecog = protocols.TranslationRecognition()
transaudio = protocols.TranslationProductionFromAudio(output_dir)
transvideo = protocols.TranslationProductionFromVideo(output_dir)
naming = protocols.PictureNaming(output_dir)
namingaudio = protocols.PictureNamingWithAudio(output_dir)
device = cv2.VideoCapture(0)
device.release()
try:
e.control.initialize(exp)
for block in transrecog.blocks + transvideo.blocks + transaudio.blocks + naming.blocks + namingaudio.blocks:
exp.add_block(block)
for block in exp.blocks:
for trial in block.trials:
trial.preload_stimuli()
trial.config = config
e.control.start(exp)
e.control.stop_audiosystem()
for block in exp.blocks:
for trial in block.trials:
trial.present_callback(trial, exp, device)
except BaseException as ex:
print format_exc()
finally:
e.control.end()
# turn the output data file into a csv
# (we have to keep the xpd file around because expyriment uses it to get
# the next subject id)
if os.path.isfile(exp.data.fullpath):
data, headers, info, comments = e.misc.data_preprocessing.read_datafile(exp.data.fullpath)
e.misc.data_preprocessing.write_csv_file(exp.data.fullpath+'.csv', data, headers)
# clean up practice files
try:
os.remove('practice.mp3.ogg')
except:
pass
try:
os.remove('practice.avi.mpeg')
except:
pass
| {
"repo_name": "benjaminanible/dissertation",
"path": "asl_english_trials.py",
"copies": "1",
"size": "2474",
"license": "bsd-3-clause",
"hash": 2625619195597705000,
"line_mean": 27.7674418605,
"line_max": 116,
"alpha_frac": 0.6261115602,
"autogenerated": false,
"ratio": 3.853582554517134,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9964099889820583,
"avg_score": 0.003118844979310095,
"num_lines": 86
} |
"""A slice factory that generates slices given an input stream."""
import random
from math import pi
from collections import OrderedDict
def get_slice(frame_rate, stream_size, ratio=1.0):
block = int(frame_rate * ratio)
start = random.randint(0,stream_size-block-1)
return slice(start, start+block)
def get_angle():
return random.uniform(0, 2*pi)
def get_item(items):
if isinstance(items, dict):
return random.choice(items.items())[1]
else:
return random.choice(items)
def get_cuts(max=5):
return random.randint(1,5)
def pick_channel():
return random.randint(0,1)
def generated(slices, *args, **kwargs):
"""This decorator takes a number of expected slices,
as well as arbitrary arguments. These arbitrary arguments
must be no-parameter functions which generate values required
for the generated initialization of an instance of the
decorated class.
>>> class Foo:
... def __init__(self, a, garbage):
... self.a = a
... self.garbage = garbage
>>> generated(1, lambda: "test")(Foo)
>>> g = Foo.generate(1, 100)
>>> g.garbage
'test'
"""
def wrapper(cls):
def generate(cls, frame_rate, stream_size, ratio=1.0):
s = [get_slice(frame_rate, stream_size, ratio) for i in range(slices)]
s += map(lambda x: x(), args)
kw = {k: v() for k,v in kwargs.items()}
return cls(*s, **kw)
cls.generate = classmethod(generate)
return wrapper
class Population(object):
"""Build a weighted population that can
return random items based on that probability.
>>> p = Population(opts, Swap=0.5, Merge=0.5)
>>> p._get(0.45)
'Merge'
>>> p._get(0.55)
'Swap'
>>> p._object("Swap") is Swap
True
>>> p._object("Invert") is Invert
True
>>> p2 = Population(opts)
>>> p2._get(0.45)
'Dup'
>>> p2._get(0.24)
'Invert'
>>> p2._get(0.92)
'Merge'
>>> s = p2._object(p2._get(0.24)).generate(1, 100)
>>> isinstance(s, Invert)
True
"""
def __init__(self, options, **kwargs):
self._options = options
self.overrides = kwargs
self.prob_total = 1.0
self.probs = OrderedDict()
self.build_probs()
def build_probs(self):
allocated = 0
unused = map(lambda x: x.__name__, self._options)
prob = self.prob_total
for k,v in self.overrides.items():
allocated += v
self.probs[allocated] = k
unused.remove(k)
if allocated >= prob:
return
remaining = (prob - allocated) / float(len(unused))
for i in unused:
allocated += remaining
self.probs[allocated] = i
def _get(self, value):
r = value
for k in self.probs.keys():
if r < k:
return self.probs[k]
return self.probs.values()[-1]
def _object(self, key):
i = map(lambda x: x.__name__, self._options).index(key)
return self._options[i]
def get(self):
s = self._get(random.random())
return self._object(s)
if __name__ == "__main__":
import doctest
from .operations import __all__ as opts
from .operations import Swap, Invert
doctest.testmod()
| {
"repo_name": "RemyPorter/Mangler",
"path": "mangler/randomizers.py",
"copies": "1",
"size": "3311",
"license": "mit",
"hash": -6330077961818400000,
"line_mean": 26.8235294118,
"line_max": 82,
"alpha_frac": 0.5738447599,
"autogenerated": false,
"ratio": 3.630482456140351,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4704327216040351,
"avg_score": null,
"num_lines": null
} |
"""A slide which can show widgets."""
from bisect import bisect
from typing import List, Optional
from kivy.graphics.vertex_instructions import Rectangle
from kivy.uix.screenmanager import Screen
from kivy.uix.stencilview import StencilView
from kivy.graphics import Color
from kivy.properties import ListProperty, AliasProperty
from mpfmc.uix.widget import Widget, create_widget_objects_from_config
MYPY = False
if MYPY: # pragma: no cover
from mpfmc.core.mc import MpfMc # pylint: disable-msg=cyclic-import,unused-import
from kivy.uix.widget import \
Widget as KivyWidget # pylint: disable-msg=cyclic-import,unused-import,ungrouped-imports
from mpfmc.uix.widget import WidgetContainer # pylint: disable-msg=cyclic-import,unused-import,ungrouped-imports
# pylint: disable-msg=too-many-instance-attributes
class Slide(Screen, StencilView):
"""A slide on a display."""
next_id = 0
@classmethod
def get_next_id(cls) -> int:
"""Return the next slide id."""
Slide.next_id += 1
return Slide.next_id
# pylint: disable-msg=too-many-arguments
def __init__(self, mc: "MpfMc", name: Optional[str], config: Optional[dict] = None,
target: str = 'default', key: Optional[str] = None,
priority: int = 0, play_kwargs: Optional[dict] = None) -> None:
"""Initialise slide."""
# config is a dict. widgets will be in a key
# assumes config, if present, is validated.
self.creation_order = Slide.get_next_id()
if not name:
name = 'Anon_{}'.format(self.creation_order)
self.mc = mc
self.name = name
self.priority = priority
self.pending_widgets = set()
self.key = key
self.mc.track_leak_reference(self)
if not config:
config = self.mc.config_validator.validate_config('slides', dict())
self.transition_out = config.get('transition_out', None)
self.expire = config.get('expire', None)
self.display = self.mc.targets[target]
self.size_hint = (None, None)
super().__init__()
self.size = self.display.native_size
self.orig_w, self.orig_h = self.size
self.z = 0
if 'widgets' in config: # don't want try, swallows too much
widgets = create_widget_objects_from_config(
mc=self.mc,
config=config['widgets'], key=self.key,
play_kwargs=play_kwargs)
self.add_widgets(widgets)
self.display.add_widget(self)
self.mc.active_slides[name] = self
self.mc.slides[name] = config
self.background_color = config.get('background_color', [0.0, 0.0, 0.0, 1.0])
if self.background_color != [0.0, 0.0, 0.0, 0.0]:
with self.canvas.before: # noqa
Color(*self.background_color)
Rectangle(size=self.size, pos=(0, 0))
self.opacity = config.get('opacity', 1.0)
self.mc.post_mc_native_event(
'slide_{}_created'.format(self.name))
"""event: slide_(name)_created
config_section: slides
class_label: slide
desc: A slide called (name) has just been created.
This means that this slide now exists, but it's not necessarily the
active (showing) slide, depending on the priorities of the other slides
and/or what else is going on.
This is useful for things like the widget_player where you want to
target a widget for a specific slide, but you can only do so if
that slide exists.
Slide names do not take into account what display or slide frame
they're playing on, so be sure to create machine-wide unique names
when you're naming your slides.
"""
def __repr__(self):
return '<Slide name={}, priority={}, id={}>'.format(self.name,
self.priority,
self.creation_order)
def add_widgets_from_library(self, name: str, key: Optional[str] = None,
widget_settings: Optional[dict] = None,
play_kwargs: Optional[dict] = None,
**kwargs) -> List["Widget"]:
"""Add a widget to the slide by name from the library of pre-defined widgets.
Args:
name: The name of the widget to add.
key: An optional key.
widget_settings: An optional dictionary of widget settings to override those in
the library.
play_kwargs: An optional dictionary of play settings to override those in
the library.
**kwargs: Additional arguments.
Returns:
A list of widgets (MpfWidget objects) added to the slide.
"""
del kwargs
if name not in self.mc.widgets:
raise ValueError("Widget {} not found".format(name))
widgets_added = create_widget_objects_from_config(config=self.mc.widgets[name],
mc=self.mc,
key=key,
widget_settings=widget_settings,
play_kwargs=play_kwargs)
for widget in widgets_added:
self.add_widget(widget)
return widgets_added
def add_widgets(self, widgets: List["Widget"]):
"""Adds a list of widgets to this slide."""
for w in widgets:
self.add_widget(w)
# pylint: disable-msg=arguments-differ
def add_widget(self, widget: "Widget", **kwargs) -> None:
"""Add a widget to this slide.
Args:
widget: An MPF-enhanced widget (which will include details like z
order and removal keys.)
Notes:
Widgets are drawn in order from the end of the children list to the
beginning, meaning the first item in the child list is draw last so
it will appear on top of all other items.
This method respects the z-order of the widget it's adding and inserts
it into the proper position in the widget tree. Higher numbered z order
values will be inserted after (so they draw on top) of existing ones.
If the new widget has the same priority of existing widgets, the new
one is inserted after the widgets of that priority, to maintain the
drawing order of the configuration file.
"""
del kwargs
if widget.get_display() == self.display:
raise AssertionError("Cannot add widget {} to display {} because the widget uses the same display.".
format(widget, self.display))
if widget.z < 0:
self.add_widget_to_parent_frame(widget)
return
# Insert the widget in the proper position in the z-order
super().add_widget(widget, bisect(self.children, widget))
def remove_widgets_by_key(self, key: str) -> None:
"""Removes all widgets from this slide with the specified key value."""
for widget in self.find_widgets_by_key(key):
if isinstance(widget, Widget):
widget.remove()
else:
self.remove_widget(widget)
def find_widgets_by_key(self, key: str) -> List["Widget"]:
"""Return a list of widgets with the matching key value by searching
the tree of children belonging to this slide."""
return [w for child in self.children
for w in child.walk(restrict=True, loopback=False) if hasattr(w, "key") and w.key == key]
def add_widget_to_parent_frame(self, widget: "KivyWidget"):
"""Adds this widget to this slide's parent instead of to this slide.
Args:
widget:
The widget object.
Notes:
Widgets added to the parent slide_frame stay active and visible even
if the slide in the frame changes.
"""
# TODO: Determine proper z-order for negative z-order values
self.manager.container.add_widget(widget)
def schedule_removal(self, secs: float) -> None:
"""Schedules the removal of this slide after the specified number of seconds elapse."""
self.mc.clock.schedule_once(self.remove, secs)
def remove(self, dt=None) -> None:
"""Removes the slide from the parent display."""
del dt
try:
self.manager.remove_slide(slide=self,
transition_config=self.transition_out)
except AttributeError:
# looks like slide was already removed, but let's clean it up just
# in case
self.prepare_for_removal()
self.mc.active_slides.pop(self.name, None)
def prepare_for_removal(self) -> None:
"""Performs housekeeping chores just prior to a slide being removed."""
self.mc.clock.unschedule(self.remove)
for widget in self.children:
if hasattr(widget, 'prepare_for_removal'): # try swallows too much
widget.prepare_for_removal()
self.mc.post_mc_native_event('slide_{}_removed'.format(self.name))
"""event: slide_(name)_removed
config_section: slides
class_label: slide
desc: A slide called (name) has just been removed.
This event is posted whenever a slide is removed, regardless of
whether or not that slide was active (showing).
Note that even though this event is called "removed", it's actually
posted as part of the removal process. (e.g. there are still some
clean-up things that happen afterwards.)
Slide names do not take into account what display or slide frame
they're playing on, so be sure to create machine-wide unique names
when you're naming your slides.
"""
def on_pre_enter(self, *args):
del args
for widget in self.children:
widget.on_pre_show_slide()
def on_enter(self, *args):
del args
for widget in self.children:
widget.on_show_slide()
def on_pre_leave(self, *args):
del args
for widget in self.children:
widget.on_pre_slide_leave()
def on_leave(self, *args):
del args
for widget in self.children:
widget.on_slide_leave()
def on_slide_play(self):
for widget in self.children:
widget.on_slide_play()
#
# Properties
#
background_color = ListProperty([0.0, 0.0, 0.0, 1.0])
'''The background color of the slide, in the (r, g, b, a)
format.
:attr:`background_color` is a :class:`~kivy.properties.ListProperty` and
defaults to [0, 0, 0, 1.0].
'''
def _get_parent_widgets(self) -> List["WidgetContainer"]:
"""Return the current list of widgets owned by the slide manager parent."""
return [x for x in self.manager.parent.children if x != self.manager]
parent_widgets = AliasProperty(_get_parent_widgets, None)
'''List of all the :class:`MpfWidget` widgets that belong to the slide
manager of this slide (read-only). You should not change this list
manually. Use the :meth:`add_widget <mpfmc.uix.widget.MpfWidget.add_widget>`
method instead.
Use this property rather than the 'self.manager.parent.children' property in
case the slide architecture changes in the future.
'''
def _get_widgets(self) -> List["WidgetContainer"]:
"""Returns the current list of widget children owned by this slide."""
return self.children
widgets = AliasProperty(_get_widgets, None, bind=('children', ))
'''List of all the :class:`MpfWidget` children widgets of this slide (read-only).
You should not change this list manually. Use the
:meth:`add_widget <mpfmc.uix.widget.MpfWidget.add_widget>` method instead.
Use this property rather than the 'children' property in case the slide
architecture changes in the future.
'''
| {
"repo_name": "missionpinball/mpf-mc",
"path": "mpfmc/uix/slide.py",
"copies": "1",
"size": "12290",
"license": "mit",
"hash": 8035044992925590000,
"line_mean": 37.1677018634,
"line_max": 119,
"alpha_frac": 0.5983726607,
"autogenerated": false,
"ratio": 4.280738418669453,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5379111079369453,
"avg_score": null,
"num_lines": null
} |
"""A slightly convoluted and stupid way of logging important information.
"""
import sys
import traceback
import os
from functools import wraps
from . import settings
class logger:
"""A class that logs important info about the program.
Same singleton implementation as the db module.
"""
__instance = None
def __new__(cls):
"""Abuse the __new__ function to allow instance persistance.
Singleton hacks.
"""
if logger.__instance is None:
logger.__instance = object.__new__(cls)
logger.__instance.log = open(
os.path.join(settings.DATA_PATH, settings.LOG_NAME), 'a'
)
logger.__instance.terminal = sys.stdout
return logger.__instance
def write(self, message):
"""Conform our class to be passed as a writable file.
"""
self.terminal.write(message + '\n')
self.log.write(message + '\n')
def flush():
"""Conform our class to be passed as a writable file.
"""
pass
@staticmethod
def log(func):
"""Wraps a specified function and performs logging based on useful info.
"""
@wraps(func)
def wrapped(*args, **kwargs):
"""Log our information based on the settings.LOGGING level.
"""
try:
if settings.LOGGING > 1: logger().write(
f'Started executing function "{func.__name__}" in {__file__}...')
func(*args, **kwargs)
if settings.LOGGING > 1: logger().write(
f'Leaving function "{func.__name__}" in {__file__}...')
except BaseException as e:
# Catch all errors and raise them 'from None' to keep
# later exceptions from bubbling up.
raise e from None
logger().write(f'Something went wrong...\n')
traceback.print_tb(file=logger())
return wrapped
| {
"repo_name": "SeedyROM/kalci",
"path": "kalci/logger.py",
"copies": "1",
"size": "1999",
"license": "mit",
"hash": -5287251369691053000,
"line_mean": 29.7538461538,
"line_max": 85,
"alpha_frac": 0.5502751376,
"autogenerated": false,
"ratio": 4.627314814814815,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5677589952414815,
"avg_score": null,
"num_lines": null
} |
# A slightly earlier version of this module was submitted to the Python
# standard library SIG, but never accepted (or rejected):
#
# https://mail.python.org/pipermail/stdlib-sig/2015-July/000990.html
#
# Copyright (c) 2015 Daniel Lenski <dlenski@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from argparse import ArgumentTypeError as err
import os
class PathType(object):
def __init__(self, exists=True, type='file', dash_ok=True):
'''exists:
True: a path that does exist
False: a path that does not exist, in a valid parent directory
None: don't care
type: file, dir, symlink, None, or a function returning True for valid paths
None: don't care
dash_ok: whether to allow "-" as stdin/stdout'''
assert exists in (True, False, None)
assert type in ('file','dir','symlink',None) or hasattr(type,'__call__')
self._exists = exists
self._type = type
self._dash_ok = dash_ok
def __call__(self, string):
if string=='-':
# the special argument "-" means sys.std{in,out}
if self._type == 'dir':
raise err('standard input/output (-) not allowed as directory path')
elif self._type == 'symlink':
raise err('standard input/output (-) not allowed as symlink path')
elif not self._dash_ok:
raise err('standard input/output (-) not allowed')
else:
e = os.path.exists(string)
if self._exists==True:
if not e:
raise err("path does not exist: '%s'" % string)
if self._type is None:
pass
elif self._type=='file':
if not os.path.isfile(string):
raise err("path is not a file: '%s'" % string)
elif self._type=='symlink':
if not os.path.symlink(string):
raise err("path is not a symlink: '%s'" % string)
elif self._type=='dir':
if not os.path.isdir(string):
raise err("path is not a directory: '%s'" % string)
elif not self._type(string):
raise err("path not valid: '%s'" % string)
else:
if self._exists==False and e:
raise err("path exists: '%s'" % string)
p = os.path.dirname(os.path.normpath(string)) or '.'
if not os.path.isdir(p):
raise err("parent path is not a directory: '%s'" % p)
elif not os.path.exists(p):
raise err("parent directory does not exist: '%s'" % p)
return string
| {
"repo_name": "dlenski/python-vipaccess",
"path": "vipaccess/patharg.py",
"copies": "1",
"size": "3808",
"license": "apache-2.0",
"hash": -7725460792836771000,
"line_mean": 44.3333333333,
"line_max": 87,
"alpha_frac": 0.5913865546,
"autogenerated": false,
"ratio": 4.427906976744186,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.004349078298412085,
"num_lines": 84
} |
"""A slightly smarter telnet client that, if available, supports readline."""
import telnetlib
from telnetlib import IAC, WILL, DO, WONT, DONT, SB, SE
from curses.ascii import NAK
try:
import readline
old_readline_callback=readline.get_completer()
except ImportError:
readline=None
def telnet_callback(sock, cmd, option):
global tn, old_completer, completion_result
if option==OPTION_READLINE:
if cmd==DO:
# Telnet: do not acknowledge a request to enter a requeat we are already in
if readline is not None and readline.get_completer() is not completer:
old_completer=readline.get_completer()
readline.set_completer(completer)
sock.sendall(IAC+WILL+OPTION_READLINE)
else:
sock.sendall(IAC+WONT+OPTION_READLINE)
elif cmd==DONT:
if readline is not None and readline.get_completer() is completer:
readline.set_completer(old_completer)
sock.sendall(IAC+WONT+OPTION_READLINE)
elif cmd==SE:
s = tn.read_sb_data()
opt = s[:1]
data = s[1:]
if opt == PARSE_AND_BIND:
if readline is not None:
readline.parse_and_bind(data.decode('ascii'))
else:
sock.sendall(IAC+WONT+OPTION_READLINE)
elif opt == COMPLETION_MATCHES:
completion_result = [r.decode('ascii') for r in data.split(COMPLETION_MATCH_SEP)]
result_ready.set()
else:
sock.sendall(IAC+NAK)
elif cmd == NAK:
sock.sendall(IAC+SB+last_data+IAC+SE) # resend the last SB ... SE sent
def completer(text, nth):
return get_completions(text, readline.get_line_buffer(), readline.get_begidx(), readline.get_endidx())[nth]
@functools.lru_cache(200)
def get_completions(text, buffer, begidx, endidx):
to_send=text.encode('ascii')+SEP+buffer.encode('ascii')+SEP+begidx.to_bytes(2, 'little')+endidx.to_bytes(2, 'little')
tn.get_socket().sendall(IAC+SB+COMPLETE+to_send+IAC+SE)
last_data=COMPLETE+data
| {
"repo_name": "wallefan/cosmic-encounter",
"path": "client.py",
"copies": "1",
"size": "2153",
"license": "mit",
"hash": -5599920687380048000,
"line_mean": 36.7719298246,
"line_max": 121,
"alpha_frac": 0.6112401301,
"autogenerated": false,
"ratio": 3.618487394957983,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4729727525057983,
"avg_score": null,
"num_lines": null
} |
'''A slugify function which handle unicode
.. autofunction:: slugify
'''
import re
from unicodedata import normalize
from html.entities import name2codepoint
try:
from unidecode import unidecode
except ImportError:
unidecode = None
from .pep import to_string
# character entity reference
CHAR_ENTITY_REXP = re.compile('&(%s);' % '|'.join(name2codepoint))
# decimal character reference
DECIMAL_REXP = re.compile('&#(\d+);')
# hexadecimal character reference
HEX_REXP = re.compile('&#x([\da-fA-F]+);')
REPLACE1_REXP = re.compile(r'[\']+')
REPLACE2_REXP = re.compile(r'[^-a-z0-9]+')
REMOVE_REXP = re.compile('-{2,}')
def slugify(value, separator='-', max_length=0, word_boundary=False,
entities=True, decimal=True, hexadecimal=True):
'''Normalizes string, removes non-alpha characters,
and converts spaces to ``separator`` character
'''
value = normalize('NFKD', to_string(value, 'utf-8', 'ignore'))
if unidecode:
value = unidecode(value)
# character entity reference
if entities:
value = CHAR_ENTITY_REXP.sub(
lambda m: chr(name2codepoint[m.group(1)]), value)
# decimal character reference
if decimal:
try:
value = DECIMAL_REXP.sub(lambda m: chr(int(m.group(1))), value)
except:
pass
# hexadecimal character reference
if hexadecimal:
try:
value = HEX_REXP.sub(lambda m: chr(int(m.group(1), 16)), value)
except:
pass
value = value.lower()
value = REPLACE1_REXP.sub('', value)
value = REPLACE2_REXP.sub('-', value)
# remove redundant -
value = REMOVE_REXP.sub('-', value).strip('-')
# smart truncate if requested
if max_length > 0:
value = smart_truncate(value, max_length, word_boundary, '-')
if separator != '-':
value = value.replace('-', separator)
return value
def smart_truncate(value, max_length=0, word_boundaries=False, separator=' '):
""" Truncate a string """
value = value.strip(separator)
if not max_length:
return value
if len(value) < max_length:
return value
if not word_boundaries:
return value[:max_length].strip(separator)
if separator not in value:
return value[:max_length]
truncated = ''
for word in value.split(separator):
if word:
next_len = len(truncated) + len(word) + len(separator)
if next_len <= max_length:
truncated += '{0}{1}'.format(word, separator)
if not truncated:
truncated = value[:max_length]
return truncated.strip(separator)
| {
"repo_name": "nooperpudd/pulsar",
"path": "pulsar/utils/slugify.py",
"copies": "6",
"size": "2642",
"license": "bsd-3-clause",
"hash": -6022611373443985000,
"line_mean": 24.6504854369,
"line_max": 78,
"alpha_frac": 0.6192278577,
"autogenerated": false,
"ratio": 3.7581792318634424,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.7377407089563441,
"avg_score": null,
"num_lines": null
} |
"""A small CLI application for tracking autoscaling of minions.
For a minion to be auto-accepted by a Salt master there are two separate events that needs to happen:
1. new-instance: The autoscaling group must have published that minion will
connect.
2. new-minion: The minion must have connected to the master.
When both requirements are satisfied (check), a minion can be accepted. This
script keeps a small sqlite database of the auto-scaling states minions are in.
TODO: Add a pruning command to eventually delete old data.
"""
import argparse
import collections
import contextlib
import datetime
import os
import sqlite3
import sys
import time
def pid_is_running(pid):
"""Check For the existence of a unix pid. """
try:
os.kill(pid, 0)
except OSError:
return False
else: return True
class InterprocessLock:
def __init__(self, path, timeout):
self._path = path
self._timeout = timeout # seconds
def _try_lock_once(self):
try:
f = os.open(self._path, os.O_CREAT | os.O_EXCL | os.O_WRONLY | os.O_TRUNC)
try:
os.write(f, "{0}".format(os.getpid()))
finally:
os.close(f)
return True
except OSError, e:
return False
def _lock_owner_alive(self):
try:
f = open(self._path)
with contextlib.closing(f):
return pid_is_running(int(f.read()))
except ValueError:
return False
except IOError:
return False
def _invalidate_lock(self):
os.unlink(self._path)
def _try_lock_with_retry(self):
for _ in range(self._timeout):
if self._try_lock_once():
return True
elif not self._lock_owner_alive():
self._invalidate_lock()
else:
time.sleep(1)
return False
def try_lock(self):
self._locked = self._try_lock_with_retry()
return self._locked
def close(self):
if self._locked:
self._invalidate_lock()
@contextlib.contextmanager
def sqlite_connection(path):
conn = sqlite3.connect(path)
yield conn
conn.close()
@contextlib.contextmanager
def sqlite_cursor(conn):
c = conn.cursor()
yield c
c.close()
Record = collections.namedtuple('Record', ('instanceid', 'instancetimestamp', 'miniontimestamp'))
def create_table_if_not_exist(conn):
with sqlite_cursor(conn) as c:
c.execute("CREATE TABLE IF NOT EXISTS instances (instanceid TEXT CONSTRAINT instanceid PRIMARY KEY ON CONFLICT REPLACE, instancetimestamp NUMERIC, miniontimestamp NUMERIC)")
def read_record(conn, instanceid):
with sqlite_cursor(conn) as c:
c.execute("SELECT instanceid, instancetimestamp, miniontimestamp FROM instances WHERE instanceid=?", (instanceid,))
for row in c:
return Record(*row)
else:
return None
def write_record(conn, record):
with sqlite_cursor(conn) as c:
c.execute("INSERT INTO instances (instanceid, instancetimestamp, miniontimestamp) VALUES(?, ?, ?)", record)
def new_instance(args):
lock = InterprocessLock("{0}.lock".format(args.database_file), args.lock_timeout)
if not lock.try_lock():
print "Could not take lock."
return 1
with contextlib.closing(lock), sqlite_connection(args.database_file) as conn:
create_table_if_not_exist(conn)
for instance in args.instances:
record = read_record(conn, instance)
if record is None:
record = Record(instanceid=instance, instancetimestamp=time.time(), miniontimestamp=None)
else:
record = record._replace(instancetimestamp=time.time())
write_record(conn, record)
conn.commit()
return 0
def new_minion(args):
lock = InterprocessLock("{0}.lock".format(args.database_file), args.lock_timeout)
if not lock.try_lock():
print "Could not take lock."
return 1
with contextlib.closing(lock), sqlite_connection(args.database_file) as conn:
create_table_if_not_exist(conn)
for minion in args.minions:
record = read_record(conn, minion)
if record is None:
record = Record(instanceid=minion, instancetimestamp=None, miniontimestamp=time.time())
else:
record = record._replace(miniontimestamp=time.time())
write_record(conn, record)
conn.commit()
return 0
def check(args):
lock = InterprocessLock("{0}.lock".format(args.database_file), args.lock_timeout)
if not lock.try_lock():
print "Could not take lock."
return 1
with contextlib.closing(lock), sqlite_connection(args.database_file) as conn:
create_table_if_not_exist(conn)
with sqlite_cursor(conn) as c:
c.execute("SELECT instanceid FROM instances"
" WHERE miniontimestamp IS NULL AND instancetimestamp IS NOT NULL")
for row in c:
print "Pending instance registered as minion, but not as EC2 instance: {0}".format(row[0])
c.execute("SELECT instanceid FROM instances"
" WHERE miniontimestamp IS NOT NULL"
" AND instancetimestamp IS NULL")
for row in c:
print "Pending instance registered as EC2 instance, but not as minion: {0}".format(row[0])
c.execute("SELECT COUNT(*) FROM instances "
"WHERE miniontimestamp IS NOT NULL"
" AND instancetimestamp IS NOT NULL AND instanceid=?",
(args.instance,))
found = c.fetchone()[0] > 0
if found:
print "The minion can be accepted."
return 0
else:
print "The minion is NOT ready for acceptance."
return 1
def purge(args):
lock = InterprocessLock("{0}.lock".format(args.database_file), args.lock_timeout)
if not lock.try_lock():
print "Could not take lock."
return 1
duration = datetime.timedelta(**{args.unit[0]: args.duration[0]}).total_seconds()
purge_older_than = time.time() - duration
with contextlib.closing(lock), sqlite_connection(args.database_file) as conn:
create_table_if_not_exist(conn)
with sqlite_cursor(conn) as c:
c.execute("DELETE FROM instances"
" WHERE (miniontimestamp < ? or miniontimestamp IS NULL)"
" AND (instancetimestamp < ? or instancetimestamp IS NULL)",
(purge_older_than, purge_older_than,))
print "Deleted", c.rowcount, "rows."
conn.commit()
def main(args):
parser = argparse.ArgumentParser(description='A small database of minions to be accepted.')
parser.add_argument('--database-file', default='autoscaling.db',
help='sqlite3 database where where data is stored.')
parser.add_argument('--lock-timeout', metavar='SECONDS', default=30, type=int,
help='number of seconds to wait for exclusive database lock')
subparsers = parser.add_subparsers(help='subcommand')
new_instance_parser = subparsers.add_parser('new-instance',
help='register a new instance started')
new_instance_parser.add_argument('instances', nargs='+', metavar='INSTANCE',
help='instance(s) to be added')
new_instance_parser.set_defaults(func=new_instance)
new_minion_parser = subparsers.add_parser('new-minion',
help='register a new minion registered')
new_minion_parser.add_argument('minions', nargs='+', metavar='MINION',
help='minion(s) to be added')
new_minion_parser.set_defaults(func=new_minion)
purge_parser = subparsers.add_parser('purge',
help='purge older records from the database')
purge_parser.add_argument('duration', nargs=1, metavar='DURATION', type=int,
help='time duration')
purge_parser.add_argument('unit', nargs=1, metavar='UNIT',
choices=('seconds', 'minutes','hours', 'days', 'weeks'), default='days',
help='minion(s) to be added')
purge_parser.set_defaults(func=purge)
check_parser = subparsers.add_parser('check', help=('check if instance is'
' registered both in EC2 and Salt. This is to avoid autoaccepting minions'
' not in autoscaling groups. Also logs pending minions. Returns 0 if'
' instance is not pending, 1 otherwise.'))
check_parser.add_argument('instance', metavar='INSTANCE',
help='minion(s) to be added')
check_parser.set_defaults(func=check)
args = parser.parse_args()
return args.func(args)
if __name__=='__main__':
sys.exit(main(sys.argv))
| {
"repo_name": "JensRantil/saltstack-autoscaling",
"path": "reactors/autoscaling/autoscale_registry.py",
"copies": "1",
"size": "8104",
"license": "mit",
"hash": 3799551181549672400,
"line_mean": 31.2868525896,
"line_max": 177,
"alpha_frac": 0.6786771964,
"autogenerated": false,
"ratio": 3.74664817383264,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9830075522033141,
"avg_score": 0.01904996963989968,
"num_lines": 251
} |
"""A small curses-based adventure game.
By Tymewalk:
https://scratch.mit.edu/users/Tymewalk
https://github.com/Tymewalk
"""
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
#with open(path.join(here, 'README.md'), encoding='utf-8') as f:
# long_description = f.read()
setup(
name='tymeventure',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version='0.1.2',
description='A curses-based adventure game',
long_description='A curses-based adventure game',
# The project's main homepage.
url='https://github.com/Tymewalk/Tymeventure',
# Author details
author='Tymewalk',
author_email='none',
# Choose your license
license='MIT',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 3 - Alpha',
# Indicate who your project is intended for
'Intended Audience :: Other Audience',
'Topic :: Games/Entertainment',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: MIT License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
# What does your project relate to?
keywords='game adventure',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=find_packages(exclude=['contrib', 'docs', 'tests']),
# Alternatively, if you want to distribute just a my_module.py, uncomment
# this:
# py_modules=["my_module"],
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=[],
# List additional groups of dependencies here (e.g. development
# dependencies). You can install these using the following syntax,
# for example:
# $ pip install -e .[dev,test]
extras_require={
},
# If there are data files included in your packages that need to be
# installed, specify them here. If using Python 2.6 or less, then these
# have to be included in MANIFEST.in as well.
package_data={
},
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages. See:
# http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files # noqa
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
data_files=[],
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
entry_points={
'console_scripts': [
'tymeventure=tymeventure:main',
],
},
)
| {
"repo_name": "Tymewalk/Tymeventure",
"path": "setup.py",
"copies": "1",
"size": "3813",
"license": "mit",
"hash": -6507430046232397000,
"line_mean": 33.9816513761,
"line_max": 94,
"alpha_frac": 0.6648308419,
"autogenerated": false,
"ratio": 4.022151898734177,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5186982740634177,
"avg_score": null,
"num_lines": null
} |
# A small DNS server that anwsers all queries with the same IP.
# From http://code.activestate.com/recipes/491264/
import socket
class DNSQuery:
def __init__(self, data):
self.data=data
self.dominio=''
tipo = (ord(data[2]) >> 3) & 15 # Opcode bits
if tipo == 0: # Standard query
ini=12
lon=ord(data[ini])
while lon != 0:
self.dominio+=data[ini+1:ini+lon+1]+'.'
ini+=lon+1
lon=ord(data[ini])
def respuesta(self, ip):
packet=''
if self.dominio:
packet+=self.data[:2] + "\x81\x80"
packet+=self.data[4:6] + self.data[4:6] + '\x00\x00\x00\x00' # Questions and Answers Counts
packet+=self.data[12:] # Original Domain Name Question
print "asked for " + self.data[12:]
packet+='\xc0\x0c' # Pointer to domain name
packet+='\x00\x01\x00\x01\x00\x00\x00\x3c\x00\x04' # Response type, ttl and resource data length -> 4 bytes
packet+=str.join('',map(lambda x: chr(int(x)), ip.split('.'))) # 4bytes of IP
return packet
if __name__ == '__main__':
ip='192.168.1.1'
print 'pyminifakeDNS:: dom.query. 60 IN A %s' % ip
udps = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
udps.bind(('127.0.0.17', 53))
try:
while 1:
data, addr = udps.recvfrom(1024)
print "got packet!"
p=DNSQuery(data)
udps.sendto(p.respuesta(ip), addr)
print 'Respuesta: %s -> %s' % (p.dominio, ip)
except KeyboardInterrupt:
print 'Finalizando'
udps.close()
| {
"repo_name": "arkanis/minidyndns",
"path": "references/fakedns.py",
"copies": "1",
"size": "1611",
"license": "mit",
"hash": 7110942148448881000,
"line_mean": 32.5625,
"line_max": 125,
"alpha_frac": 0.5567970205,
"autogenerated": false,
"ratio": 3.0569259962049338,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8884948743414214,
"avg_score": 0.04575485465814386,
"num_lines": 48
} |
"""A small function for listing installed Python instances on a Windows machine as defined by PEP 514."""
import os, winreg, collections
# https://www.python.org/dev/peps/pep-0514/
def _enum_keys(key):
nsub, nval, modified = winreg.QueryInfoKey(key)
for i in range(nsub):
yield winreg.EnumKey(key, i)
def _get_value(key, value_name):
try:
value, type = winreg.QueryValueEx(key, value_name)
return value
except FileNotFoundError:
return None
"""A simple data-only object that represents a Python installation on disk."""
PythonInstallation = collections.namedtuple("PythonInstallation",
"company name support_url version sys_version sys_arch exec_path")
def _create_python_installation(company, tag, tag_key):
name = _get_value(tag_key, "DisplayName") or ("Python " + tag)
url = _get_value(tag_key, "SupportUrl") or "http://www.python.org/"
version = _get_value(tag_key, "Version") or tag[:3]
sys_version = _get_value(tag_key, "SysVersion") or tag[:3]
sys_arch = _get_value(tag_key, "SysArchitecture") or None
exec_path = None
try:
with winreg.OpenKey(tag_key, "InstallPath") as ip_key:
exec_path = (_get_value(ip_key, "ExecutablePath")
or os.path.join(_get_value(ip_key, None), "python.exe"))
except FileNotFoundError:
pass
return PythonInstallation(company, name, url, version, sys_version,
sys_arch, exec_path)
def get_python_installations():
"""Returns a list of python executables on the machine."""
installations = set()
search_keys = [
(winreg.HKEY_CURRENT_USER, r"Software\Python", 0),
(winreg.HKEY_LOCAL_MACHINE, r"Software\Python", winreg.KEY_WOW64_64KEY),
(winreg.HKEY_LOCAL_MACHINE, r"Software\Python", winreg.KEY_WOW64_32KEY),
]
for hive, key, flags in search_keys:
try:
with winreg.OpenKeyEx(hive, key, access=winreg.KEY_READ | flags) as root_key:
for company in _enum_keys(root_key):
with winreg.OpenKey(root_key, company) as company_key:
for tag in _enum_keys(company_key):
with winreg.OpenKey(company_key, tag) as tag_key:
installations.add(_create_python_installation(company, tag, tag_key))
except FileNotFoundError:
continue
return installations
if __name__ == "__main__":
print("\n".join(repr(x) for x in get_python_installations())) | {
"repo_name": "SeanCline/PyExt",
"path": "test/scripts/python_installations.py",
"copies": "1",
"size": "2537",
"license": "mit",
"hash": 6675060341210110000,
"line_mean": 39.2857142857,
"line_max": 105,
"alpha_frac": 0.6306661411,
"autogenerated": false,
"ratio": 3.655619596541787,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9728304602465397,
"avg_score": 0.01159622703527782,
"num_lines": 63
} |
# A small helper class to house functions needed by KeplerMapper.visualize
import numpy as np
from sklearn import preprocessing
import json
from collections import defaultdict
from ast import literal_eval
colorscale_default = [
[0.0, "rgb(68, 1, 84)"], # Viridis
[0.1, "rgb(72, 35, 116)"],
[0.2, "rgb(64, 67, 135)"],
[0.3, "rgb(52, 94, 141)"],
[0.4, "rgb(41, 120, 142)"],
[0.5, "rgb(32, 144, 140)"],
[0.6, "rgb(34, 167, 132)"],
[0.7, "rgb(68, 190, 112)"],
[0.8, "rgb(121, 209, 81)"],
[0.9, "rgb(189, 222, 38)"],
[1.0, "rgb(253, 231, 36)"],
]
palette = [
"#0500ff",
"#0300ff",
"#0100ff",
"#0002ff",
"#0022ff",
"#0044ff",
"#0064ff",
"#0084ff",
"#00a4ff",
"#00a4ff",
"#00c4ff",
"#00e4ff",
"#00ffd0",
"#00ff83",
"#00ff36",
"#17ff00",
"#65ff00",
"#b0ff00",
"#fdff00",
"#FFf000",
"#FFdc00",
"#FFc800",
"#FFb400",
"#FFa000",
"#FF8c00",
"#FF7800",
"#FF6400",
"#FF5000",
"#FF3c00",
"#FF2800",
"#FF1400",
"#FF0000",
]
def _colors_to_rgb(colorscale):
""" Ensure that the color scale is formatted in rgb strings.
If the colorscale is a hex string, then convert to rgb.
"""
if colorscale[0][1][0] == "#":
plotly_colors = np.array(colorscale)[:, 1].tolist()
for k, hexcode in enumerate(plotly_colors):
hexcode = hexcode.lstrip("#")
hex_len = len(hexcode)
step = hex_len // 3
colorscale[k][1] = "rgb" + str(
tuple(int(hexcode[j : j + step], 16) for j in range(0, hex_len, step))
)
return colorscale
def _to_html_format(st):
return st.replace("\n", "<br>")
def _map_val2color(val, vmin, vmax, colorscale=None):
""" Maps a value val in [vmin, vmax] to the corresponding color in
the colorscale
returns the rgb color code of that color
"""
colorscale = colorscale or colorscale_default
if vmin >= vmax:
raise ValueError("vmin should be < vmax")
scale = list(map(float, np.array(colorscale)[:, 0]))
colors = np.array(colorscale)[:, 1]
colors_01 = (
np.array(list(map(literal_eval, [color[3:] for color in colors]))) / 255.0
)
v = (val - vmin) / float((vmax - vmin)) # val is mapped to v in[0,1]
idx = 0
# sequential search for the two consecutive indices idx, idx+1 such that
# v belongs to the interval [scale[idx], scale[idx+1]
while v > scale[idx + 1]:
idx += 1
left_scale_val = scale[idx]
right_scale_val = scale[idx + 1]
vv = (v - left_scale_val) / (right_scale_val - left_scale_val)
# get the triplet of three values in [0,1] that represent the rgb color
# corresponding to val
val_color01 = colors_01[idx] + vv * (colors_01[idx + 1] - colors_01[idx])
val_color_0255 = list(map(np.uint8, 255 * val_color01))
return "rgb" + str(tuple(val_color_0255))
def init_color_function(graph, color_function=None):
# If no color_function provided we color by row order in data set
# Reshaping to 2-D array is required for sklearn 0.19
n_samples = np.max([i for s in graph["nodes"].values() for i in s]) + 1
if color_function is None:
color_function = np.arange(n_samples).reshape(-1, 1)
else:
color_function = color_function.reshape(-1, 1)
color_function = color_function.astype(np.float64)
# MinMax Scaling to be friendly to non-scaled input.
scaler = preprocessing.MinMaxScaler()
color_function = scaler.fit_transform(color_function).ravel()
# "Scaler might have floating point issues, 1.0000...0002". Force max and min
color_function[color_function > 1] = 1
color_function[color_function < 0] = 0
return color_function
def format_meta(graph, custom_meta=None, color_function_name=None):
n = [l for l in graph["nodes"].values()]
n_unique = len(set([i for s in n for i in s]))
if custom_meta is None:
custom_meta = graph["meta_data"]
if "clusterer" in custom_meta.keys():
clusterer = custom_meta["clusterer"]
custom_meta["clusterer"] = _to_html_format(clusterer)
if "projection" in custom_meta.keys():
projection = custom_meta["projection"]
custom_meta["projection"] = _to_html_format(projection)
if color_function_name is not None:
custom_meta["color_function"] = color_function_name
mapper_summary = {
"custom_meta": custom_meta,
"n_nodes": len(graph["nodes"]),
"n_edges": sum([len(l) for l in graph["links"].values()]),
"n_total": sum([len(l) for l in graph["nodes"].values()]),
"n_unique": n_unique,
}
return mapper_summary
def format_mapper_data(
graph, color_function, X, X_names, lens, lens_names, custom_tooltips, env, nbins=10
):
# import pdb; pdb.set_trace()
json_dict = {"nodes": [], "links": []}
node_id_to_num = {}
for i, (node_id, member_ids) in enumerate(graph["nodes"].items()):
node_id_to_num[node_id] = i
c = _color_function(member_ids, color_function)
t = _type_node()
s = _size_node(member_ids)
tt = _format_tooltip(
env,
member_ids,
custom_tooltips,
X,
X_names,
lens,
lens_names,
color_function,
node_id,
nbins,
)
n = {
"id": "",
"name": node_id,
"color": c,
"type": _type_node(),
"size": s,
"tooltip": tt,
}
json_dict["nodes"].append(n)
for i, (node_id, linked_node_ids) in enumerate(graph["links"].items()):
for linked_node_id in linked_node_ids:
json_dict["links"].append(
{
"source": node_id_to_num[node_id],
"target": node_id_to_num[linked_node_id],
"width": _size_link_width(graph, node_id, linked_node_id),
}
)
return json_dict
def build_histogram(data, colorscale=None, nbins=10):
""" Build histogram of data based on values of color_function
"""
if colorscale is None:
colorscale = colorscale_default
# TODO: we should weave this method of handling colors into the normal build_histogram and combine both functions
colorscale = _colors_to_rgb(colorscale)
h_min, h_max = 0, 1
hist, bin_edges = np.histogram(data, range=(h_min, h_max), bins=nbins)
bin_mids = np.mean(np.array(list(zip(bin_edges, bin_edges[1:]))), axis=1)
histogram = []
max_bucket_value = max(hist)
sum_bucket_value = sum(hist)
for bar, mid in zip(hist, bin_mids):
height = np.floor(((bar / max_bucket_value) * 100) + 0.5)
perc = round((bar / sum_bucket_value) * 100.0, 1)
color = _map_val2color(mid, 0.0, 1.0, colorscale)
histogram.append({"height": height, "perc": perc, "color": color})
return histogram
def graph_data_distribution(graph, color_function, colorscale, nbins=10):
node_averages = []
for node_id, member_ids in graph["nodes"].items():
member_colors = color_function[member_ids]
node_averages.append(np.mean(member_colors))
histogram = build_histogram(node_averages, colorscale=colorscale, nbins=nbins)
return histogram
def _format_cluster_statistics(member_ids, X, X_names):
# TODO: Cache X_mean and X_std for all clusters.
# TODO: replace long tuples with named tuples.
# TODO: Name all the single letter variables.
# TODO: remove duplication between above_stats and below_stats
# TODO: Should we only show variables that are much above or below the mean?
cluster_data = {"above": [], "below": [], "size": len(member_ids)}
cluster_stats = ""
if X is not None:
# List vs. numpy handling: cast to numpy array
if isinstance(X_names, list):
X_names = np.array(X_names)
# Defaults when providing no X_names
if X_names.shape[0] == 0:
X_names = np.array(["f_%s" % (i) for i in range(X.shape[1])])
cluster_X_mean = np.mean(X[member_ids], axis=0)
X_mean = np.mean(X, axis=0)
X_std = np.std(X, axis=0)
above_mean = cluster_X_mean > X_mean
std_m = np.sqrt((cluster_X_mean - X_mean) ** 2) / X_std
stat_zip = list(
zip(
std_m,
X_names,
np.mean(X, axis=0),
cluster_X_mean,
above_mean,
np.std(X, axis=0),
)
)
stats = sorted(stat_zip, reverse=True)
above_stats = [a for a in stats if bool(a[4]) is True]
below_stats = [a for a in stats if bool(a[4]) is False]
if len(above_stats) > 0:
for s, f, i, c, a, v in above_stats[:5]:
cluster_data["above"].append(
{"feature": f, "mean": round(c, 3), "std": round(s, 1)}
)
if len(below_stats) > 0:
for s, f, i, c, a, v in below_stats[:5]:
cluster_data["below"].append(
{"feature": f, "mean": round(c, 3), "std": round(s, 1)}
)
return cluster_data
def _format_projection_statistics(member_ids, lens, lens_names):
projection_data = []
if lens is not None:
if isinstance(lens_names, list):
lens_names = np.array(lens_names)
# Create defaults when providing no lens_names
if lens_names.shape[0] == 0:
lens_names = np.array(["p_%s" % (i) for i in range(lens.shape[1])])
means_v = np.mean(lens[member_ids], axis=0)
maxs_v = np.max(lens[member_ids], axis=0)
mins_v = np.min(lens[member_ids], axis=0)
for name, mean_v, max_v, min_v in zip(lens_names, means_v, maxs_v, mins_v):
projection_data.append(
{
"name": name,
"mean": round(mean_v, 3),
"max": round(max_v, 3),
"min": round(min_v, 3),
}
)
return projection_data
def _tooltip_components(
member_ids,
X,
X_names,
lens,
lens_names,
color_function,
node_ID,
colorscale,
nbins=10,
):
projection_stats = _format_projection_statistics(member_ids, lens, lens_names)
cluster_stats = _format_cluster_statistics(member_ids, X, X_names)
member_histogram = build_histogram(
color_function[member_ids], colorscale=colorscale, nbins=nbins
)
return projection_stats, cluster_stats, member_histogram
def _format_tooltip(
env,
member_ids,
custom_tooltips,
X,
X_names,
lens,
lens_names,
color_function,
node_ID,
nbins,
):
# TODO: Allow customization in the form of aggregate per node and per entry in node.
# TODO: Allow users to turn off tooltip completely.
custom_tooltips = (
custom_tooltips[member_ids] if custom_tooltips is not None else member_ids
)
# list will render better than numpy arrays
custom_tooltips = list(custom_tooltips)
colorscale = colorscale_default
projection_stats, cluster_stats, histogram = _tooltip_components(
member_ids,
X,
X_names,
lens,
lens_names,
color_function,
node_ID,
colorscale,
nbins,
)
tooltip = env.get_template("cluster_tooltip.html").render(
projection_stats=projection_stats,
cluster_stats=cluster_stats,
custom_tooltips=custom_tooltips,
histogram=histogram,
dist_label="Member",
node_id=node_ID,
)
return tooltip
def _color_function(member_ids, color_function):
return np.mean(color_function[member_ids])
def _size_node(member_ids):
return int(np.log(len(member_ids) + 1) + 1)
def _type_node():
return "circle"
def _size_link_width(graph, node_id, linked_node_id):
return 1
| {
"repo_name": "MLWave/kepler-mapper",
"path": "kmapper/visuals.py",
"copies": "1",
"size": "12091",
"license": "mit",
"hash": -5311303685623446000,
"line_mean": 28.134939759,
"line_max": 117,
"alpha_frac": 0.566123563,
"autogenerated": false,
"ratio": 3.3391328362330848,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44052563992330845,
"avg_score": null,
"num_lines": null
} |
"""A small helper module to deal with sections in the control page.
"""
import os
import re
import wikiconnector
def linesFromBulletlist(t):
"""Assume t is a mediawiki bullet list produced by readlines,
one item per line.
Return a list of the items, without the bullet syntax.
"""
regexp = " *(\*|#)+ *"
r = [re.sub(regexp, '', x, count=1)
for x in t
if re.match(regexp, x)]
print 'lFB 1: ', t, '\n', r
# get the content of the link:
match = [re.search('\[\[ *(.*?) *\]\]', x) for x in r]
r = [(m.group(1) if m else x.strip())
for (x, m)
in zip(r, match)]
# remove any possible readable name suffices
match = [re.search('(.*?)\|(.*)', x) for x in r]
r = [(m.group(1) if m else x.strip())
for (x, m)
in zip(r, match)]
print 'lFB final: ', r
return r
#------------------------------------------
# helper functions to deal with sections
def getSection(text, section):
"""Assume text is a mediawiki formatted text.
Assume it has L1 headings.
Obtain the L1 heading with section as title
"""
# print "getSection: ", text, section
m = re.search('= *' + section + ' *=([^=]*)',
text, re.S)
if m:
blocktext = m.group(1).strip()
return blocktext
else:
return ""
def getSectionLines(text, section):
blocktext = getSection(text, section)
if blocktext:
return blocktext.split('\n')
return []
def writeSectionContent(text, section,
filename):
"""grab content of a desired section
and write it into a file.
"""
content = getSection(text, section)
with open(filename, 'w') as fp:
fp.write(content)
return content
def downloadSectionFiles(text, section, dirname, downloadFlag, embeddedElemetsFlag):
"""download all files mentioned
in the given section as a bullet list.
download them to dirname.
return list of successfull file names
"""
filenames = linesFromBulletlist(
getSectionLines(text, section))
print "filenames: ", filenames
r = []
for f in filenames:
tmp = f.strip()
print "trying to download: ", tmp, '\n'
if tmp:
if downloadFlag:
try:
wikiconnector.download(
target=tmp,
output=dirname,
embedded_elements=embeddedElemetsFlag)
r.append(f)
except:
pass
else:
# no download, but does the file already exist locally?
fname = os.path.join(dirname, tmp + '.md')
print "tmp: >>>", tmp, "<<<", dirname, fname
try:
fp = open(fname, 'r')
r.append(tmp)
fp.close()
except:
pass
print "reutrning: ", r
return r
def getBullets(text, section):
return linesFromBulletlist(
getSectionLines(text, section))
def getProperties(text, section):
tmp = getBullets(text, section)
tmp = [x.split(':') for x in tmp]
tmp = filter(lambda x: len(x) == 2, tmp)
properties = [(k.strip(), v.strip())
for (k, v) in tmp]
return properties
| {
"repo_name": "hkarl/mw2pdf",
"path": "section.py",
"copies": "1",
"size": "3372",
"license": "bsd-3-clause",
"hash": 1623517709730858800,
"line_mean": 23.7941176471,
"line_max": 84,
"alpha_frac": 0.528173191,
"autogenerated": false,
"ratio": 3.943859649122807,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49720328401228064,
"avg_score": null,
"num_lines": null
} |
"""a small IOC container"""
#TODO services (parallel for same runlevel)
import inspect
class Context(object):
@classmethod
def set_attribute(cls, obj, key, val):
"""recurisivly adds fields to obj according to key
the final field will have val"""
idx_dot = key.find('.')
if idx_dot == -1: #single key
setattr(obj, key, val)
else: #dotted key
keyhead, keyrest = key[:idx_dot], key[idx_dot+1:]
if hasattr(obj, keyhead):
p = getattr(obj, keyhead)
cls.set_attribute(p, keyrest, val)
else:
p = Context()
cls.set_attribute(p, keyrest, val)
setattr(obj, key[:idx_dot], p)
context = Context()
class Container(object):
def __init__(self):
self.resources = {}
def add(self, key, resource):
self.resources[key] = resource
def configure(self, parameters, prefix = 'config'):
"""adds all key, value pairs in dict config as resources to the container, prefixed by prefix"""
if not parameters:
return
for key, val in parameters.items():
self.resources[prefix + '.' + key] = val
def finalize(self):
"""configures all resources that need configuring"""
Context.set_attribute(context, 'container', self)
for k in sorted(self.resources.keys()):
#print k, self.resources[k]
Context.set_attribute(context, k, self.resources[k])
def _find_members(self, filter):
import re
for name, resource in self.resources.items():
for member_name, member in inspect.getmembers(resource, inspect.ismethod):
fa = re.findall(filter, member_name)
if fa:
yield name, resource, fa, getattr(resource, member_name)
def statistics(self):
"""return all the statistics from all the resources that define them"""
stats = {}
for name, resource, fa, member in self._find_members('__statistics__'):
stats[name] = member()
return stats
def start(self):
"""starts up services, services are started up by calling their
__startXX__ methods, start methods in the same runlevel are started
concurrently (TODO)"""
for _, resource, fa, member in self._find_members(r'__start(\d\d)__'):
try:
level = int(fa[0])
except:
level = -1
if level != -1:
#start the service
member()
container = Container()
| {
"repo_name": "toymachine/concurrence",
"path": "lib/concurrence/application.py",
"copies": "2",
"size": "2706",
"license": "bsd-3-clause",
"hash": -1658141946087668500,
"line_mean": 34.1428571429,
"line_max": 104,
"alpha_frac": 0.5450849963,
"autogenerated": false,
"ratio": 4.414355628058727,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5959440624358727,
"avg_score": null,
"num_lines": null
} |
"""A small library for interacting with the pactl command line tool.
This library allows for a few simple interactions with linux pulseaudio, like
viewing active sinks (sound outputs, usually loosely correlating with speakers)
and their relative volumes, and setting volume absolutely or relatively.
Most functions take a sink argument which defaults to retrieving and using
the first 'RUNNING' sink found. This uses a full subprocess, so users doing
repeated calls are recommended to get a sink object (eg. through active_sink())
and pass that to successive calls.
The pactl tool will need to be installed for this library to work. It is commonly
installed by default when PulseAudio is installed, and on many major distros.
"""
import re
import subprocess
class Sink(object):
"""A class wrapping attributes of PulseAudio sink properties."""
LIST_PATTERNS = {
'state': (str, 'State: (\w+)'),
'volume': (int, 'Volume: 0: +(\d+)%'),
'name': (str, 'Name: (.+)'),
}
__slots__ = tuple(LIST_PATTERNS)
def __init__(self, **kwargs):
for key, arg in kwargs.items():
setattr(self, key, arg)
def set_volume(self, value):
"""Set an absolute volume (0-100)."""
if not 0 <= value <= 100:
raise ValueError("Volume must be between 0 and 100")
return subprocess.check_call(
['pactl', 'set-sink-volume', self.name, '{}%'.format(value)])
def inc_volume(self, delta=1):
"""Set a relative volume (can be negative)."""
new_volume = max(0, min(100, self.volume + delta))
self.set_volume(new_volume)
return new_volume
def list_sinks():
"""Iterate system's sink objects as Sinks."""
output = subprocess.check_output(['pactl', 'list', 'sinks']).decode()
if not output.strip():
return
sink = {}
for line in output.split('\n')[1:]: # first line starts a sink
if line.startswith('Sink #'):
yield Sink(**sink)
sink = {}
else:
# While it's close to YAML output, it isn't, and doesn't parse at all :P
# So some mad hacks here to pull out some basic data
for prop, (conversion, pattern) in Sink.LIST_PATTERNS.items():
vals = re.findall(pattern, line)
if vals:
sink[prop] = conversion(vals[0])
yield Sink(**sink)
def active_sink():
"""Retrieve the first running PulseAudio sink."""
for sink in list_sinks():
if sink.state == 'RUNNING':
return sink
| {
"repo_name": "bethebunny/powermate",
"path": "pactl.py",
"copies": "1",
"size": "2412",
"license": "apache-2.0",
"hash": 1586372680451768800,
"line_mean": 30.7368421053,
"line_max": 81,
"alpha_frac": 0.6608623549,
"autogenerated": false,
"ratio": 3.693721286370597,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48545836412705967,
"avg_score": null,
"num_lines": null
} |
"""A small library for parsing HTML."""
# standard library
from html.parser import HTMLParser
class PageParser(HTMLParser):
'''
This is an HTML parser! All of the hard work is done by the superclass
(which is a Python built-in). This class puts the HTML into a hierarchy
that's (hopefully) easier to work with than raw string parsing.
'''
@staticmethod
def parse(html):
parser = PageParser()
parser.feed(html)
return parser.get_root_node()
@staticmethod
def banlist():
'''Commonly unclosed tags'''
return ('br', 'img', 'meta')
@staticmethod
def new_node(type):
'''An empty node of the HTML tree'''
return {'type': type, 'attrs': {}, 'nodes': [], 'data': ''}
@staticmethod
def filter_all(node, filters):
'''Applies all filters'''
for f in filters:
node = PageParser.filter(node, *f)
return node
@staticmethod
def filter(node, type, index=0):
'''Finds a sub-node of the given type, specified by index'''
i = 0
for node in node['nodes']:
if node['type'] == type:
if i == index:
return node
i += 1
return None
def __init__(self):
HTMLParser.__init__(self)
self.root = PageParser.new_node(None)
self.stack = [self.root]
self.indent = 0
def get_root_node(self):
'''After parsing, returns the abstract root node (which contains the html node)'''
return self.root
def handle_starttag(self, tag, attrs):
'''Inherited - called when a start tag is found'''
if tag in PageParser.banlist():
return
element = PageParser.new_node(tag)
for (k, v) in attrs:
element['attrs'][k] = v
self.stack[-1]['nodes'].append(element)
self.stack.append(element)
def handle_endtag(self, tag):
'''Inherited - called when an end tag is found'''
if tag in PageParser.banlist():
return
x = self.stack.pop()
#if x['type'] != tag:
# print('Unclosed tag! Parent/Child:', x['type'], tag)
def handle_data(self, data):
'''Inherited - called when a data string is found'''
element = self.stack[-1]
element['data'] += data
| {
"repo_name": "undefx/delphi-epidata",
"path": "src/acquisition/twtr/pageparser.py",
"copies": "2",
"size": "2208",
"license": "mit",
"hash": -8097969477504852000,
"line_mean": 26.0253164557,
"line_max": 86,
"alpha_frac": 0.5987318841,
"autogenerated": false,
"ratio": 3.846689895470383,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5445421779570383,
"avg_score": null,
"num_lines": null
} |
"""A small library for rich input.
Adds validation and casting to builtins.input."""
import builtins
from functools import wraps
class TooManyAttempts(Exception): pass
def limiter(limit, error=TooManyAttempts):
"""A decorator that limits the amount of times a function can be called.
The decorated function can be called `limit` times but will raise an error
(TooManyAttempts by default) the next time it's called.
"""
def decorating_function(user_function):
countdown = limit
@wraps(user_function)
def wrapper(*args, **kwargs):
nonlocal countdown
if not countdown: # the final countdown!
raise error
countdown -= 1
return user_function(*args, **kwargs)
return wrapper
return decorating_function
def input(prompt, validation=None, error=None, input=builtins.input):
"""Prompt for a value until it's valid according to the given validation
function.
The validation function also acts as a converter function to cast the input
into the desired data types.
ValueError exceptions raised inside it are interpreted as invalid inputs.
"""
if validation is None:
validation = lambda v: True
if error is None:
error = lambda v: print("{} is not a valid input.".format(v))
while True:
val = input(prompt)
try:
val = validation(val)
except ValueError:
error(val)
else:
return val
def limited_input(prompt, limit=3, **kwargs):
"""Prompt for a value a limited amount of times (3 by default)."""
kwargs['input'] = limiter(limit)(kwargs.get('input', builtins.input))
return input(prompt, **kwargs)
| {
"repo_name": "bmispelon/putnik",
"path": "putnik.py",
"copies": "1",
"size": "1787",
"license": "bsd-2-clause",
"hash": -4161650602700152000,
"line_mean": 28.7833333333,
"line_max": 79,
"alpha_frac": 0.6373810856,
"autogenerated": false,
"ratio": 4.5470737913486,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.56844548769486,
"avg_score": null,
"num_lines": null
} |
""" A small library of time and data transformation functions.
Author: Alastair Hughes
"""
from constants import MAX_FRAMES_PER_DAY, MIN_FRAMES_PER_DAY
import math
# Transformation functions:
# These accept a map 'values' of the form values[row index][patch no], and
# returns another map 'values' suitably transformed.
# These are applied to the data as preprocessing. For instance,
# time_delta_value returns the delta between the current and previous value.
# Time delta uses the delta between a value and the previous day's result.
def time_delta_value(values):
new_values = {}
for index in values:
new_values[index] = {}
for patch in values[index]:
new_values[index][patch] = values[index][patch] - \
values.get(index - 1, {patch: values[index][patch]})[patch]
return new_values
# Time culm acculumates the value for a specific patch as time goes on.
def time_culm_value(values):
new_values = {}
for index in sorted(values.keys()):
new_values[index] = {}
for patch in values[index]:
new_values[index][patch] = values[index][patch] + \
new_values.get(index - 1, {patch: 0})[patch]
return new_values
# Field delta uses the relative delta between a value and the maximum and
# minimums on one specific day.
def field_delta_value(values):
new_values = {}
for index in values:
new_values[index] = {}
min_day = min(values[index].values())
max_day = max(values[index].values())
for patch in values[index]:
try:
new_values[index][patch] = \
((values[index][patch] - min_day) / (max_day - min_day))
except ZeroDivisionError:
new_values[index][patch] = 0
return new_values
# Per field normalises the data relative to specific fields.
def per_field_value(values, fields):
""" This normalises all patches relative to their field.
'fields' is a map of field numbers to a list of patches in that field.
"""
field_list = list(fields.values())
# We calculate the maximum and minimum values for each field.
maxs = [max((values[index][patch] for index in values \
for patch in field)) for field in field_list]
mins = [min((values[index][patch] for index in values \
for patch in field)) for field in field_list]
new_values = {}
for field_id, field in enumerate(field_list):
f_max = maxs[field_id]
f_min = mins[field_id]
for index in values:
if index not in new_values:
new_values[index] = {}
for patch in field:
try:
scaled_value = float(values[index][patch] - f_min) / \
(f_max - f_min)
except ZeroDivisionError:
scaled_value = 0
new_values[index][patch] = scaled_value
return new_values
# Exponential scaling.
def exponential_value(values, v = math.e):
new_values = {}
for index in values:
new_values[index] = {}
for patch in values[index]:
new_values[index][patch] = v**(values[index][patch])
return new_values
# Logarithmic scaling.
def log_value(values, v = math.e):
new_values = {}
for index in values:
new_values[index] = {}
for patch in values[index]:
new_values[index][patch] = math.log(values[index][patch], v)
return new_values
# Filter by patch_no.
def patch_filter(values, patches):
new_values = {}
for index in values:
new_values[index] = {}
for patch in patches:
new_values[index][patch] = values[index][patch]
return new_values
# Time mapping functions:
# Basic time map functions; these are functions that accept a list of Values
# and use that to generate a map from a frame to a particular index in the
# values.
def map_basic(values):
""" Direct map from frames to dates """
one_value = None
for value in values:
if one_value == None:
one_value = value
elif one_value.model.dates != value.model.dates:
raise ValueError("All models must have the same set of dates!")
return {i: i for i in one_value.model.dates}
def map_delta(values):
""" Map from frames to dates, with the frame count per date largely
changing with respect to the maximum delta that day.
"""
# TODO: This is not at all smoothed!
# Validate the values.
dates = map_basic(values)
def max_day(v, date):
""" Return the max value for a given date.
If the given date does not exist, return the max delta for
the date +1 in the future.
"""
if date not in v.values:
date += 1
return max((abs(v.values[date][patch]) \
for patch in v.values[date]))
# Generate a map of maximum deltas per day.
max_deltas = {}
for date in dates:
max_deltas[date] = max((abs(max_day(v, date) - max_day(v, date - 1)) \
for v in values))
# Find the minimum and maximum deltas (positive values only!)
max_delta = abs(max(max_deltas.values()))
min_delta = abs(min(max_deltas.values()))
frames = {}
cur_frame = 0
# We assume that dates can be sorted sensibly.
# Iterate through the dates, adding them and increasing the number of
# frames for days with large deltas.
for date in sorted(dates.values()):
try:
relative_delta = (max_deltas[date]-min_delta) / (max_delta-min_delta)
except ZeroDivisionError:
relative_delta = 0
frame_count = int((MAX_FRAMES_PER_DAY - MIN_FRAMES_PER_DAY) \
* relative_delta + MIN_FRAMES_PER_DAY)
for i in range(frame_count):
frames[cur_frame] = date
cur_frame += 1
return frames
# Map from time warp type to the actual function.
times = {'basic': map_basic,
'delta': map_delta}
| {
"repo_name": "PlantandFoodResearch/IrrigationAnimation",
"path": "transforms.py",
"copies": "1",
"size": "6058",
"license": "mit",
"hash": -8409265461337735000,
"line_mean": 34.0173410405,
"line_max": 81,
"alpha_frac": 0.6041597887,
"autogenerated": false,
"ratio": 3.890815671162492,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4994975459862492,
"avg_score": null,
"num_lines": null
} |
'''A small module for testing Serpent code, for use with py.test.'''
import enum
import operator
from types import MethodType
from warnings import simplefilter; simplefilter('ignore')
from ethereum import tester as t
from ethereum.utils import coerce_to_int
Assert = enum.Enum("Assert", "eq,ne,lt,gt,le,ge")
compare_ops = {
Assert.eq: operator.eq,
Assert.ne: operator.ne,
Assert.lt: operator.lt,
Assert.gt: operator.gt,
Assert.le: operator.le,
Assert.ge: operator.ge,
}
class ContractTestError(Exception):
pass
class Account(object):
"""A class for easily dealing with accounts in ethereum.tester."""
def __init__(self, rawaddr, privkey):
self.hexaddr = rawaddr.encode('hex')
self.privkey = self.private_key = privkey
self.address = rawaddr
self.as_int = coerce_to_int(rawaddr)
def __str__(self):
return "<account: 0x{}>".format(self.hexaddr)
def __repr__(self):
return "Account(rawaddr={!r}, privkey={!r})".format(self.address, self.privkey)
ACCOUNTS = map(Account, t.accounts, t.keys)
def tester(func, name):
"""A wrapper function for testing serpent functions in an AbiContract."""
def test_func(args=(), kwds={}, asserts=Assert.eq, compare=None):
"""Test function {!r}.
Keyword Arguments:
args -- a tuple or list of arguments to pass to the function.
kwds -- a dict of keywords to pass to the function.
asserts -- a member of the Assert enum that specifies the comparison to make.
expects -- a value to compare against the result of the function call.
"""
for arg in args:
if not isinstance(arg, ContractTest.arg_types):
err_msg = "Invalid argument type, must be int, long, str, list, or tuple: <arg: {}>; <type: {}>"
raise ContractTestError(err_msg.format(arg, type(arg)))
if not isinstance(kwds, dict):
raise ContractTestError("'kwds' argument must be a dict.")
if asserts not in Assert:
raise ContractTestError("Invalid value for 'asserts' keyword: {}".format(asserts))
if not isinstance(compare, ContractTest.allowed_types):
err_msg = "Invalid type for 'compare', must be int, str, list, or tuple: {!r}"
raise ContractTestError(err_msg.format(compare))
result = func(*args, **kwds)
comparison = compare_ops[asserts]
assert comparison(result, compare)
test_func.__name__ = name
test_func.__doc__ = test_func.__doc__.format(name)
return test_func
class ContractTest(object):
"""A class for testing the results of Serpent contracts."""
# TODO: replace pyethereum test state with geth or parity based testnet.
global_state = t.state()
allowed_types = int, long, str, list, type(None)
arg_types = int, long, str, list, tuple
def __init__(self, code, global_state=False):
self.code = code
if not global_state:
self.state = t.state()
start_gas = self.state.block.gas_used
self.contract = self.state.abi_contract(self.code)
self.contractAccount = Account(self.contract.address, None)
self.gas_cost = self.state.block.gas_used - start_gas
for name, obj in vars(self.contract).items():
if isinstance(obj, MethodType) and obj.__func__.__name__ == 'kall':
setattr(self, name, tester(obj, name))
| {
"repo_name": "ChrisCalderon/SerpentTests",
"path": "serpent_tests/__init__.py",
"copies": "1",
"size": "3458",
"license": "mit",
"hash": -3407190268443109400,
"line_mean": 34.6494845361,
"line_max": 112,
"alpha_frac": 0.6353383459,
"autogenerated": false,
"ratio": 3.742424242424242,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9868396149248858,
"avg_score": 0.001873287815076753,
"num_lines": 97
} |
"""A small Python 3 module that keeps track of elapsed time
and its current state"""
import time
import math
__version__ = '0.1'
class Clocker(object):
"""The time watcher class that keeps track of elapsed time and state"""
def __init__(self):
self.mode = 'init'
self.startTime = 0
# self.elapsed will always be in floating point seconds
self.elapsed = 0
def start(self):
"""Set the initial starting point and return the start Time`"""
if self.mode == 'init':
self.startTime = time.time()
self.mode = "running"
elif self.mode == 'stopped':
self.startTime = time.time()
self.mode = 'running'
# if clocker is in any other modes, return None as no changes need to be made
else:
return None
return self.startTime
def stop(self):
"""Change running state to stopped and return current elapsed time"""
if self.mode == 'running':
self.__elapsed_time()
self.mode = 'stopped'
# see line 22
else:
return None
def reset(self):
""""Change the running state to reset and clear counters"""
if self.mode == 'running' or self.mode == 'stopped':
self.mode = 'reset'
self.startTime = 0
self.elapsed = 0
# see line 22
else:
return None
def current_mode(self):
"""Return the current running mode of the clocker"""
return self.mode
def __elapsed_time(self):
"""updates or just returns the current elapsed time"""
if self.mode == 'running':
self.elapsed = time.time() - self.startTime
return self.elapsed
# if the clocker is any other state then no need to update
# just return the current value of self.elapsed
else:
return self.elapsed
def elapsed_seconds(self):
"""Return the current elapsed time in seconds"""
if self.mode == 'running' or self.mode == 'stopped':
return self._round_three_decimals(self.__elapsed_time())
elif self.mode == 'reset':
# when the mode is reset, no need to make any calculation
# just return the current __elapsed_time which should always be 0
return self.__elapsed_time()
else:
return None
def elapsed_minutes(self):
"""Returns the current elapsed time in minutes"""
if self.mode == 'running' or self.mode == 'stopped':
return self._round_two_decimals(self.__elapsed_time()/60)
elif self.mode == 'reset':
# see line 67
return self.__elapsed_time()
else:
return None
def elapsed_hours(self):
"""Returns the current elapsed time in hours"""
if self.mode == 'running' or self.mode == 'stopped':
return self._round_two_decimals(self.__elapsed_time()/3600)
elif self.mode == 'reset':
# see line 67
return self.__elapsed_time()
else:
return None
def _round_two_decimals(self, float_number):
"""Take a floating point number and round it to two decimal places"""
return math.ceil(float_number*100)/100
def _round_three_decimals(self, float_number):
"""Take a floating point number and round it to three decimal places"""
return math.ceil(float_number*1000)/1000
| {
"repo_name": "Fuchida/clocker",
"path": "src/clocker.py",
"copies": "1",
"size": "3608",
"license": "mit",
"hash": -1216824103490420000,
"line_mean": 30.8,
"line_max": 85,
"alpha_frac": 0.5579268293,
"autogenerated": false,
"ratio": 4.5786802030456855,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0001044932079414838,
"num_lines": 110
} |
# A small Python script to load the text files
# output by the Haskell code, and compute log(Z) and H,
# as well as create some plots.
# Imports
import matplotlib.pyplot as plt
import numpy as np
import numpy.random as rng
import pandas as pd
# Logsumexp function
def logsumexp(x):
biggest = x.max()
y = x - biggest
tot = np.sum(np.exp(y))
return np.log(tot) + biggest
def loadtxt_rows(filename, rows, single_precision=False):
"""
Load only certain rows
"""
# Open the file
f = open(filename, "r")
# Storage
results = {}
# Row number
i = 0
# Number of columns
ncol = None
while(True):
# Read the line and split by commas
line = f.readline()
cells = line.split(",")
# Quit when you see a different number of columns
if ncol is not None and len(cells) != ncol:
break
# Non-comment lines
if cells[0] != "#":
# If it's the first one, get the number of columns
if ncol is None:
ncol = len(cells)
# Otherwise, include in results
if i in rows:
if single_precision:
results[i] = np.array([float(cell) for cell in cells],\
dtype="float32")
else:
results[i] = np.array([float(cell) for cell in cells])
i += 1
results["ncol"] = ncol
return results
def postprocess(single_precision=False, temperature=1.0):
# Load the files
#sample = pd.read_csv("nested_sampling_parameters.csv", header=None)
sample_info = pd.read_csv("nested_sampling_info.csv")
# In case one is shorter than the other, truncate.
#L = min(sample.shape[0], sample_info.shape[0])
#sample = sample.iloc[0:L, :]
#sample_info = sample_info.iloc[0:L, :]
# Normalise prior weights
logw = sample_info["ln_prior_weight"]
logw = logw - logsumexp(logw)
# Calculate logZ, posterior weights, and H
logZ = logsumexp(logw + sample_info["ln_l"])
logW = logw + sample_info["ln_l"]/temperature - logZ
W = np.exp(logW - logsumexp(logW))
# Save posterior weights
np.savetxt("posterior_weights.txt", W)
# Create posterior samples
W /= W.max()
Wnormed = W/W.sum()
ESS = int(np.exp(-np.sum(Wnormed*np.log(Wnormed + 1E-300))))
print("Effective sample size = {ESS}".format(ESS=ESS))
# Make the standard NS plots
plt.subplot(2,1,1)
plt.plot(sample_info["ln_x"], sample_info["ln_l"], "-", markersize=3)
plt.ylabel("$\\ln(L)$")
plt.title("Likelihood curve")
# Set y lower limit by excluding bottom 5%
ln_l_sorted = np.sort(sample_info["ln_l"])
lower_limit = ln_l_sorted[int(0.05*len(ln_l_sorted))]
upper_limit = ln_l_sorted[-1]
upper_limit += 0.05*(upper_limit - lower_limit)
plt.ylim([lower_limit, upper_limit])
plt.subplot(2,1,2)
plt.plot(sample_info["ln_x"], W, "-", markersize=3)
plt.xlabel("$\\ln(X)$")
plt.ylabel("$W$")
plt.title("Posterior weights")
# Format and display figures
plt.tight_layout(h_pad=1.0)
plt.show()
rows = np.empty(ESS, dtype="int64")
for i in range(0, ESS):
while True:
which = np.random.randint(sample_info.shape[0])
if np.random.rand() <= W[which]:
break
rows[i] = which
sample = loadtxt_rows("nested_sampling_parameters.csv",
set(rows), single_precision)
posterior_sample = None
if single_precision:
posterior_sample = np.empty((ESS, sample["ncol"]), dtype="float32")
else:
posterior_sample = np.empty((ESS, sample["ncol"]))
for i in range(0, ESS):
posterior_sample[i, :] = sample[rows[i]]
np.savetxt("posterior_weights.txt", W)
if single_precision:
np.savetxt("posterior_sample.csv",
posterior_sample, delimiter=",", fmt="%.7e")
else:
np.savetxt("posterior_sample.csv", posterior_sample, delimiter=",")
if __name__ == "__main__":
postprocess()
| {
"repo_name": "eggplantbren/NestedSampling.hs",
"path": "showresults.py",
"copies": "2",
"size": "4141",
"license": "mit",
"hash": 506116487474145200,
"line_mean": 27.3630136986,
"line_max": 78,
"alpha_frac": 0.5759478387,
"autogenerated": false,
"ratio": 3.4251447477253927,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5001092586425393,
"avg_score": null,
"num_lines": null
} |
# a small script which shows some of the possiblities of the
# LKOpticalTrack filter
# LKOpitcal is good low resource algorithm which is good at tracking points through a video
# stream
from org.myrobotlab.opencv import OpenCVFilterLKOpticalTrack
# create services
opencv = Runtime.createAndStart("opencv","OpenCV")
# add listener so data comes back to python
opencv.addListener("publishOpenCVData", "python", "input")
lkfilter = opencv.getFilter("LKOpticalTrack")
if (lkfilter == None):
lkfilter = OpenCVFilterLKOpticalTrack()
opencv.addFilter(lkfilter)
# other options
# if you want to get pixel values instead of floats
# floats are nice because the value doesnt change even if the
# resolution does
# lkfilter.useFloatValues=False # default is true
# lkfilter.needTrackingPoints=True #default is false
lkfilter.samplePoint(0.5,0.5)# programmatically sets a point
# a set of points can come back from LKOptical
def input ():
points = msg_opencv_publishOpenCVData.data[0].getPoints()
if (not points == None):
print points
if (points.size() > 0):
print points.get(0).x, points.get(0).y
opencv.capture()
| {
"repo_name": "sujitbehera27/MyRoboticsProjects-Arduino",
"path": "src/resource/Python/examples/OpenCV.LKOpticalTrack.py",
"copies": "5",
"size": "1140",
"license": "apache-2.0",
"hash": -6641440500482467000,
"line_mean": 29.8108108108,
"line_max": 91,
"alpha_frac": 0.7552631579,
"autogenerated": false,
"ratio": 3.3827893175074184,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6638052475407419,
"avg_score": null,
"num_lines": null
} |
# A small SOCP formulation of minimizing the inverse of the top-left entry
# of the positive 2x2 matrix [1,2; 3,4]. This example originally caused the
# 30th SOCP IPM iteration to fail within the symmetric tridiagonal eigensolver
# and was submitted by Steven Diamond. The original script was:
#
# from cvxpy import *
# x = Variable(2,2)
# expr = inv_pos(x)
# prob = Problem(Minimize(expr[0,0]), [x == [[1,2],[3,4]] ])
# which should have a solution of 1. The problem data used
# in the following script can be found by then running
#
# data = prob.get_problem_data(ELEMENTAL)
# print( data['A'] )
# print( data['G'] )
# print( data['b'] )
# print( data['c'] )
# print( data['h'] )
#
# which has three SOCs of size three.
import El
El.Initialize()
orders = El.DistMultiVec(El.iTag)
firstInds = El.DistMultiVec(El.iTag)
labels = El.DistMultiVec(El.iTag)
orders.Resize(12,1)
firstInds.Resize(12,1)
labels.Resize(12,1)
for c in xrange(0,4):
for i in xrange(0,3):
orders.Set(3*c+i,0,3)
firstInds.Set(3*c+i,0,3*c)
labels.Set(3*c+i,0,c)
A = El.DistSparseMatrix(El.dTag)
A.Resize(4,8)
A.Reserve(4)
A.QueueUpdate( 0, 0, 1 )
A.QueueUpdate( 1, 1, 1 )
A.QueueUpdate( 2, 2, 1 )
A.QueueUpdate( 3, 3, 1 )
A.ProcessLocalQueues()
G = El.DistSparseMatrix(El.dTag)
G.Resize(12,8)
G.Reserve(16)
G.QueueUpdate( 0, 0, -1 )
G.QueueUpdate( 1, 0, -1 )
G.QueueUpdate( 3, 1, -1 )
G.QueueUpdate( 4, 1, -1 )
G.QueueUpdate( 6, 2, -1 )
G.QueueUpdate( 7, 2, -1 )
G.QueueUpdate( 9, 3, -1 )
G.QueueUpdate( 10, 3, -1 )
G.QueueUpdate( 0, 4, -1 )
G.QueueUpdate( 1, 4, 1 )
G.QueueUpdate( 3, 5, -1 )
G.QueueUpdate( 4, 5, 1 )
G.QueueUpdate( 6, 6, -1 )
G.QueueUpdate( 7, 6, 1 )
G.QueueUpdate( 9, 7, -1 )
G.QueueUpdate( 10, 7, 1 )
G.ProcessLocalQueues()
b = El.DistMultiVec(El.dTag)
b.Resize(4,1)
b.Set(0,0, 1.)
b.Set(1,0, 2.)
b.Set(2,0, 3.)
b.Set(3,0, 4.)
c = El.DistMultiVec(El.dTag)
c.Resize(8,1)
c.Set(0,0, 0)
c.Set(1,0, 0)
c.Set(2,0, 0)
c.Set(3,0, 0)
c.Set(4,0, 1)
c.Set(5,0, 0)
c.Set(6,0, 0)
c.Set(7,0, 0)
h = El.DistMultiVec(El.dTag)
El.Zeros( h, 12, 1 )
h.Set(2,0,2.)
h.Set(5,0,2.)
h.Set(8,0,2.)
h.Set(11,0,2.)
output = True
display = False
if output:
El.Print( A, "A" )
El.Print( G, "G" )
El.Print( b, "b" )
El.Print( c, "c" )
El.Print( h, "h" )
if display:
El.Display( A, "A" )
El.Display( G, "G" )
El.Display( b, "b" )
El.Display( c, "c" )
El.Display( h, "h" )
x = El.DistMultiVec()
y = El.DistMultiVec()
z = El.DistMultiVec()
s = El.DistMultiVec()
ctrl = El.SOCPAffineCtrl_d()
ctrl.mehrotraCtrl.qsdCtrl.progress = True
ctrl.mehrotraCtrl.progress = True
ctrl.mehrotraCtrl.outerEquil = True
ctrl.mehrotraCtrl.time = True
El.SOCPAffine(A,G,b,c,h,orders,firstInds,labels,x,y,z,s,ctrl)
El.Finalize()
| {
"repo_name": "justusc/Elemental",
"path": "examples/interface/InvPos.py",
"copies": "1",
"size": "2739",
"license": "bsd-3-clause",
"hash": -2670130170129702400,
"line_mean": 21.825,
"line_max": 78,
"alpha_frac": 0.6370938299,
"autogenerated": false,
"ratio": 2.1772655007949124,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.33143593306949126,
"avg_score": null,
"num_lines": null
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.