text
stringlengths 4
1.02M
| meta
dict |
|---|---|
AuthorizedException = (
BufferError,
ArithmeticError,
AssertionError,
AttributeError,
EnvironmentError,
EOFError,
LookupError,
MemoryError,
ReferenceError,
RuntimeError,
SystemError,
TypeError,
ValueError
)
|
{
"content_hash": "e623c5271fd39080a953114fae340619",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 25,
"avg_line_length": 20.733333333333334,
"alnum_prop": 0.5627009646302251,
"repo_name": "ainafp/nilearn",
"id": "ee45bba416bd22f2e0e5332db3823938709bed53",
"size": "311",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nilearn/_utils/exceptions.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
from __future__ import absolute_import, unicode_literals
__version__ = '0.39'
__license__ = 'MIT'
import re
import os
import sys
import time
import logging
import marshal
import tempfile
import threading
from math import log
from hashlib import md5
from ._compat import *
from . import finalseg
if os.name == 'nt':
from shutil import move as _replace_file
else:
_replace_file = os.rename
_get_abs_path = lambda path: os.path.normpath(os.path.join(os.getcwd(), path))
DEFAULT_DICT = None
DEFAULT_DICT_NAME = "dict.txt"
log_console = logging.StreamHandler(sys.stderr)
default_logger = logging.getLogger(__name__)
default_logger.setLevel(logging.DEBUG)
default_logger.addHandler(log_console)
DICT_WRITING = {}
pool = None
re_userdict = re.compile('^(.+?)( [0-9]+)?( [a-z]+)?$', re.U)
re_eng = re.compile('[a-zA-Z0-9]', re.U)
# \u4E00-\u9FD5a-zA-Z0-9+#&\._ : All non-space characters. Will be handled with re_han
# \r\n|\s : whitespace characters. Will not be handled.
# re_han_default = re.compile("([\u4E00-\u9FD5a-zA-Z0-9+#&\._%]+)", re.U)
# Adding "-" symbol in re_han_default
re_han_default = re.compile("([\u4E00-\u9FD5a-zA-Z0-9+#&\._%\-]+)", re.U)
re_skip_default = re.compile("(\r\n|\s)", re.U)
re_han_cut_all = re.compile("([\u4E00-\u9FD5]+)", re.U)
re_skip_cut_all = re.compile("[^a-zA-Z0-9+#\n]", re.U)
def setLogLevel(log_level):
global logger
default_logger.setLevel(log_level)
class Tokenizer(object):
def __init__(self, dictionary=DEFAULT_DICT):
self.lock = threading.RLock()
if dictionary == DEFAULT_DICT:
self.dictionary = dictionary
else:
self.dictionary = _get_abs_path(dictionary)
self.FREQ = {}
self.total = 0
self.user_word_tag_tab = {}
self.initialized = False
self.tmp_dir = None
self.cache_file = None
def __repr__(self):
return '<Tokenizer dictionary=%r>' % self.dictionary
def gen_pfdict(self, f):
lfreq = {}
ltotal = 0
f_name = resolve_filename(f)
for lineno, line in enumerate(f, 1):
try:
line = line.strip().decode('utf-8')
word, freq = line.split(' ')[:2]
freq = int(freq)
lfreq[word] = freq
ltotal += freq
for ch in xrange(len(word)):
wfrag = word[:ch + 1]
if wfrag not in lfreq:
lfreq[wfrag] = 0
except ValueError:
raise ValueError(
'invalid dictionary entry in %s at Line %s: %s' % (f_name, lineno, line))
f.close()
return lfreq, ltotal
def initialize(self, dictionary=None):
if dictionary:
abs_path = _get_abs_path(dictionary)
if self.dictionary == abs_path and self.initialized:
return
else:
self.dictionary = abs_path
self.initialized = False
else:
abs_path = self.dictionary
with self.lock:
try:
with DICT_WRITING[abs_path]:
pass
except KeyError:
pass
if self.initialized:
return
default_logger.debug("Building prefix dict from %s ..." % (abs_path or 'the default dictionary'))
t1 = time.time()
if self.cache_file:
cache_file = self.cache_file
# default dictionary
elif abs_path == DEFAULT_DICT:
cache_file = "jieba.cache"
# custom dictionary
else:
cache_file = "jieba.u%s.cache" % md5(
abs_path.encode('utf-8', 'replace')).hexdigest()
cache_file = os.path.join(
self.tmp_dir or tempfile.gettempdir(), cache_file)
# prevent absolute path in self.cache_file
tmpdir = os.path.dirname(cache_file)
load_from_cache_fail = True
if os.path.isfile(cache_file) and (abs_path == DEFAULT_DICT or
os.path.getmtime(cache_file) > os.path.getmtime(abs_path)):
default_logger.debug(
"Loading model from cache %s" % cache_file)
try:
with open(cache_file, 'rb') as cf:
self.FREQ, self.total = marshal.load(cf)
load_from_cache_fail = False
except Exception:
load_from_cache_fail = True
if load_from_cache_fail:
wlock = DICT_WRITING.get(abs_path, threading.RLock())
DICT_WRITING[abs_path] = wlock
with wlock:
self.FREQ, self.total = self.gen_pfdict(self.get_dict_file())
default_logger.debug(
"Dumping model to file cache %s" % cache_file)
try:
# prevent moving across different filesystems
fd, fpath = tempfile.mkstemp(dir=tmpdir)
with os.fdopen(fd, 'wb') as temp_cache_file:
marshal.dump(
(self.FREQ, self.total), temp_cache_file)
_replace_file(fpath, cache_file)
except Exception:
default_logger.exception("Dump cache file failed.")
try:
del DICT_WRITING[abs_path]
except KeyError:
pass
self.initialized = True
default_logger.debug(
"Loading model cost %.3f seconds." % (time.time() - t1))
default_logger.debug("Prefix dict has been built successfully.")
def check_initialized(self):
if not self.initialized:
self.initialize()
def calc(self, sentence, DAG, route):
N = len(sentence)
route[N] = (0, 0)
logtotal = log(self.total)
for idx in xrange(N - 1, -1, -1):
route[idx] = max((log(self.FREQ.get(sentence[idx:x + 1]) or 1) -
logtotal + route[x + 1][0], x) for x in DAG[idx])
def get_DAG(self, sentence):
self.check_initialized()
DAG = {}
N = len(sentence)
for k in xrange(N):
tmplist = []
i = k
frag = sentence[k]
while i < N and frag in self.FREQ:
if self.FREQ[frag]:
tmplist.append(i)
i += 1
frag = sentence[k:i + 1]
if not tmplist:
tmplist.append(k)
DAG[k] = tmplist
return DAG
def __cut_all(self, sentence):
dag = self.get_DAG(sentence)
old_j = -1
for k, L in iteritems(dag):
if len(L) == 1 and k > old_j:
yield sentence[k:L[0] + 1]
old_j = L[0]
else:
for j in L:
if j > k:
yield sentence[k:j + 1]
old_j = j
def __cut_DAG_NO_HMM(self, sentence):
DAG = self.get_DAG(sentence)
route = {}
self.calc(sentence, DAG, route)
x = 0
N = len(sentence)
buf = ''
while x < N:
y = route[x][1] + 1
l_word = sentence[x:y]
if re_eng.match(l_word) and len(l_word) == 1:
buf += l_word
x = y
else:
if buf:
yield buf
buf = ''
yield l_word
x = y
if buf:
yield buf
buf = ''
def __cut_DAG(self, sentence):
DAG = self.get_DAG(sentence)
route = {}
self.calc(sentence, DAG, route)
x = 0
buf = ''
N = len(sentence)
while x < N:
y = route[x][1] + 1
l_word = sentence[x:y]
if y - x == 1:
buf += l_word
else:
if buf:
if len(buf) == 1:
yield buf
buf = ''
else:
if not self.FREQ.get(buf):
recognized = finalseg.cut(buf)
for t in recognized:
yield t
else:
for elem in buf:
yield elem
buf = ''
yield l_word
x = y
if buf:
if len(buf) == 1:
yield buf
elif not self.FREQ.get(buf):
recognized = finalseg.cut(buf)
for t in recognized:
yield t
else:
for elem in buf:
yield elem
def cut(self, sentence, cut_all=False, HMM=True):
'''
The main function that segments an entire sentence that contains
Chinese characters into separated words.
Parameter:
- sentence: The str(unicode) to be segmented.
- cut_all: Model type. True for full pattern, False for accurate pattern.
- HMM: Whether to use the Hidden Markov Model.
'''
sentence = strdecode(sentence)
if cut_all:
re_han = re_han_cut_all
re_skip = re_skip_cut_all
else:
re_han = re_han_default
re_skip = re_skip_default
if cut_all:
cut_block = self.__cut_all
elif HMM:
cut_block = self.__cut_DAG
else:
cut_block = self.__cut_DAG_NO_HMM
blocks = re_han.split(sentence)
for blk in blocks:
if not blk:
continue
if re_han.match(blk):
for word in cut_block(blk):
yield word
else:
tmp = re_skip.split(blk)
for x in tmp:
if re_skip.match(x):
yield x
elif not cut_all:
for xx in x:
yield xx
else:
yield x
def cut_for_search(self, sentence, HMM=True):
"""
Finer segmentation for search engines.
"""
words = self.cut(sentence, HMM=HMM)
for w in words:
if len(w) > 2:
for i in xrange(len(w) - 1):
gram2 = w[i:i + 2]
if self.FREQ.get(gram2):
yield gram2
if len(w) > 3:
for i in xrange(len(w) - 2):
gram3 = w[i:i + 3]
if self.FREQ.get(gram3):
yield gram3
yield w
def lcut(self, *args, **kwargs):
return list(self.cut(*args, **kwargs))
def lcut_for_search(self, *args, **kwargs):
return list(self.cut_for_search(*args, **kwargs))
_lcut = lcut
_lcut_for_search = lcut_for_search
def _lcut_no_hmm(self, sentence):
return self.lcut(sentence, False, False)
def _lcut_all(self, sentence):
return self.lcut(sentence, True)
def _lcut_for_search_no_hmm(self, sentence):
return self.lcut_for_search(sentence, False)
def get_dict_file(self):
if self.dictionary == DEFAULT_DICT:
return get_module_res(DEFAULT_DICT_NAME)
else:
return open(self.dictionary, 'rb')
def load_userdict(self, f):
'''
Load personalized dict to improve detect rate.
Parameter:
- f : A plain text file contains words and their ocurrences.
Can be a file-like object, or the path of the dictionary file,
whose encoding must be utf-8.
Structure of dict file:
word1 freq1 word_type1
word2 freq2 word_type2
...
Word type may be ignored
'''
self.check_initialized()
if isinstance(f, string_types):
f_name = f
f = open(f, 'rb')
else:
f_name = resolve_filename(f)
for lineno, ln in enumerate(f, 1):
line = ln.strip()
if not isinstance(line, text_type):
try:
line = line.decode('utf-8').lstrip('\ufeff')
except UnicodeDecodeError:
raise ValueError('dictionary file %s must be utf-8' % f_name)
if not line:
continue
# match won't be None because there's at least one character
word, freq, tag = re_userdict.match(line).groups()
if freq is not None:
freq = freq.strip()
if tag is not None:
tag = tag.strip()
self.add_word(word, freq, tag)
def add_word(self, word, freq=None, tag=None):
"""
Add a word to dictionary.
freq and tag can be omitted, freq defaults to be a calculated value
that ensures the word can be cut out.
"""
self.check_initialized()
word = strdecode(word)
freq = int(freq) if freq is not None else self.suggest_freq(word, False)
self.FREQ[word] = freq
self.total += freq
if tag:
self.user_word_tag_tab[word] = tag
for ch in xrange(len(word)):
wfrag = word[:ch + 1]
if wfrag not in self.FREQ:
self.FREQ[wfrag] = 0
if freq == 0:
finalseg.add_force_split(word)
def del_word(self, word):
"""
Convenient function for deleting a word.
"""
self.add_word(word, 0)
def suggest_freq(self, segment, tune=False):
"""
Suggest word frequency to force the characters in a word to be
joined or splitted.
Parameter:
- segment : The segments that the word is expected to be cut into,
If the word should be treated as a whole, use a str.
- tune : If True, tune the word frequency.
Note that HMM may affect the final result. If the result doesn't change,
set HMM=False.
"""
self.check_initialized()
ftotal = float(self.total)
freq = 1
if isinstance(segment, string_types):
word = segment
for seg in self.cut(word, HMM=False):
freq *= self.FREQ.get(seg, 1) / ftotal
freq = max(int(freq * self.total) + 1, self.FREQ.get(word, 1))
else:
segment = tuple(map(strdecode, segment))
word = ''.join(segment)
for seg in segment:
freq *= self.FREQ.get(seg, 1) / ftotal
freq = min(int(freq * self.total), self.FREQ.get(word, 0))
if tune:
self.add_word(word, freq)
return freq
def tokenize(self, unicode_sentence, mode="default", HMM=True):
"""
Tokenize a sentence and yields tuples of (word, start, end)
Parameter:
- sentence: the str(unicode) to be segmented.
- mode: "default" or "search", "search" is for finer segmentation.
- HMM: whether to use the Hidden Markov Model.
"""
if not isinstance(unicode_sentence, text_type):
raise ValueError("jieba: the input parameter should be unicode.")
start = 0
if mode == 'default':
for w in self.cut(unicode_sentence, HMM=HMM):
width = len(w)
yield (w, start, start + width)
start += width
else:
for w in self.cut(unicode_sentence, HMM=HMM):
width = len(w)
if len(w) > 2:
for i in xrange(len(w) - 1):
gram2 = w[i:i + 2]
if self.FREQ.get(gram2):
yield (gram2, start + i, start + i + 2)
if len(w) > 3:
for i in xrange(len(w) - 2):
gram3 = w[i:i + 3]
if self.FREQ.get(gram3):
yield (gram3, start + i, start + i + 3)
yield (w, start, start + width)
start += width
def set_dictionary(self, dictionary_path):
with self.lock:
abs_path = _get_abs_path(dictionary_path)
if not os.path.isfile(abs_path):
raise Exception("jieba: file does not exist: " + abs_path)
self.dictionary = abs_path
self.initialized = False
# default Tokenizer instance
dt = Tokenizer()
# global functions
get_FREQ = lambda k, d=None: dt.FREQ.get(k, d)
add_word = dt.add_word
calc = dt.calc
cut = dt.cut
lcut = dt.lcut
cut_for_search = dt.cut_for_search
lcut_for_search = dt.lcut_for_search
del_word = dt.del_word
get_DAG = dt.get_DAG
get_dict_file = dt.get_dict_file
initialize = dt.initialize
load_userdict = dt.load_userdict
set_dictionary = dt.set_dictionary
suggest_freq = dt.suggest_freq
tokenize = dt.tokenize
user_word_tag_tab = dt.user_word_tag_tab
def _lcut_all(s):
return dt._lcut_all(s)
def _lcut(s):
return dt._lcut(s)
def _lcut_no_hmm(s):
return dt._lcut_no_hmm(s)
def _lcut_all(s):
return dt._lcut_all(s)
def _lcut_for_search(s):
return dt._lcut_for_search(s)
def _lcut_for_search_no_hmm(s):
return dt._lcut_for_search_no_hmm(s)
def _pcut(sentence, cut_all=False, HMM=True):
parts = strdecode(sentence).splitlines(True)
if cut_all:
result = pool.map(_lcut_all, parts)
elif HMM:
result = pool.map(_lcut, parts)
else:
result = pool.map(_lcut_no_hmm, parts)
for r in result:
for w in r:
yield w
def _pcut_for_search(sentence, HMM=True):
parts = strdecode(sentence).splitlines(True)
if HMM:
result = pool.map(_lcut_for_search, parts)
else:
result = pool.map(_lcut_for_search_no_hmm, parts)
for r in result:
for w in r:
yield w
def enable_parallel(processnum=None):
"""
Change the module's `cut` and `cut_for_search` functions to the
parallel version.
Note that this only works using dt, custom Tokenizer
instances are not supported.
"""
global pool, dt, cut, cut_for_search
from multiprocessing import cpu_count
if os.name == 'nt':
raise NotImplementedError(
"jieba: parallel mode only supports posix system")
else:
from multiprocessing import Pool
dt.check_initialized()
if processnum is None:
processnum = cpu_count()
pool = Pool(processnum)
cut = _pcut
cut_for_search = _pcut_for_search
def disable_parallel():
global pool, dt, cut, cut_for_search
if pool:
pool.close()
pool = None
cut = dt.cut
cut_for_search = dt.cut_for_search
|
{
"content_hash": "91f83599b50145eb3e4445c4a4017385",
"timestamp": "",
"source": "github",
"line_count": 598,
"max_line_length": 109,
"avg_line_length": 31.777591973244146,
"alnum_prop": 0.502604851865495,
"repo_name": "gumblex/jieba",
"id": "60bbfd6ab34fbf5cad893570814a079d172d1c5d",
"size": "19003",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "jieba/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "OpenEdge ABL",
"bytes": "6770809"
},
{
"name": "Python",
"bytes": "7320280"
}
],
"symlink_target": ""
}
|
import StringIO
from django.utils.html import escape
def __dump_value(t, value, tag, file):
if t == str or t == unicode:
file.write('<%s>%s</%s>' % (tag, escape(value), tag))
elif t == bool:
file.write('<%s>%s</%s>' % (tag, str(value).lower(), tag))
else:
file.write('<%s>%s</%s>' % (tag, str(value), tag))
def __dump_dict(dictionary, file):
"""Output a dict."""
for key, value in dictionary.items():
t = type(value)
if t == dict:
file.write('<div class="%s">' % key)
__dump_dict(value, file)
file.write('</div>')
elif t == tuple or t == list or t == set:
file.write('<ul class="%s">' % key)
__dump_array(value, file)
file.write('</ul>' % key)
else:
__dump_value(t, value, key, file)
def __dump_array(array, file):
"""Output an array."""
for value in array:
t = type(value)
if t == dict:
__dump_dict(value, file)
elif t == tuple or t == list or t == set:
file.write('<ul>')
__dump_array(value, file)
file.write('</ul>')
else:
__dump_value(t, value, 'li', file)
def dumps(data, file=None):
"""Similar to json.dumps, will return an html fragment string."""
if file:
output = file
else:
output = StringIO.StringIO()
__dump_array([data], output)
if not file:
return output.getvalue()
|
{
"content_hash": "7d753f9bfe70a8a5166a8ee79fc0a0c4",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 69,
"avg_line_length": 28.169811320754718,
"alnum_prop": 0.4996651038178165,
"repo_name": "symmetricapi/django-symmetric",
"id": "e83a83a2176040075db5bd719de2e223f0f1c3bf",
"size": "1493",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "symmetric/html.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C#",
"bytes": "384"
},
{
"name": "Java",
"bytes": "8248"
},
{
"name": "Objective-C",
"bytes": "15314"
},
{
"name": "Python",
"bytes": "254281"
}
],
"symlink_target": ""
}
|
from __future__ import print_function, absolute_import
__all__ = ['Worker', 'Manager', 'make_log', 'Timeout']
__version__ = '0.1'
import os
import sys
import time
import signal
import logging
from .process import pid_exists, wait_pid
default_log = logging.getLogger(__name__)
is_exit = False
class TimeoutException(Exception):
pass
class Timeout():
"""Timeout class using ALARM signal."""
def __init__(self, sec):
self.sec = sec
def __enter__(self):
signal.signal(signal.SIGALRM, self.raise_timeout)
signal.alarm(self.sec)
def __exit__(self, *args):
signal.alarm(0) # disable alarm
def raise_timeout(self, *args):
raise TimeoutException("Timeout {}s".format(self.sec))
def get_memory(pid):
# return the memory usage in MB, psutil should be 4.0 version
from psutil import Process, __version__
# if __version__ < '4.0.0':
# raise Exception('psutil module should be 4.0.0 version at least.')
if pid_exists(pid):
process = Process(pid)
# mem = process.memory_full_info().uss / float(1024*1024)
mem = process.memory_info().rss / float(1024*1024)
return mem
return 0
FORMAT = "[%(levelname)s - %(asctime)s - %(filename)s:%(lineno)s] - %(message)s"
def make_log(log, log_filename, format=FORMAT, datafmt=None, max_bytes=1024*1024*50,
backup_count=5):
import logging.handlers
if isinstance(log, (str, unicode)):
log = logging.getLogger(log)
handler = logging.handlers.RotatingFileHandler(
log_filename, maxBytes=max_bytes, backupCount=backup_count)
fmt = logging.Formatter(format, datafmt)
handler.setFormatter(fmt)
log.addHandler(handler)
return log
class Worker(object):
_id = 1
def __init__(self, log=None,
max_requests=None,
soft_memory_limit=200, #memory limit MB
hard_memory_limit=300, #memory limit MB
timeout=None,
check_point=None,
name=None,
args=None, kwargs=None):
self.log = log or default_log
self.max_requests = max_requests or sys.maxint
self.soft_memory_limit = soft_memory_limit
self.hard_memory_limit = hard_memory_limit
self.timeout = timeout
self.args = args or ()
self.kwargs = kwargs or {}
self.is_exit = None
self.count = 0
self.check_point = check_point
self.name = "%s-%d" % ((name or 'Process'), self._id)
self.__class__._id += 1
def start(self):
self.pid = os.getpid()
signal.signal(signal.SIGINT, self.signal_handler)
signal.signal(signal.SIGTERM, self.signal_handler)
signal.signal(signal.SIGUSR1, self.signal_handler_usr1)
signal.signal(signal.SIGUSR2, self.signal_handler_usr2)
self.init()
try:
self.loop()
self.on_finished()
except Exception as e:
self.log.exception(e)
self.on_exception(e)
self.after_run()
def init(self):
self.log.info('%s %d created' % (self.name, self.pid))
def _run(self):
if self.timeout:
with Timeout(self.timeout):
ret = self.run()
else:
ret = self.run()
return ret
def run(self):
self.log.info('%s %d running' % (self.name, self.pid))
time.sleep(1)
return True
def on_exception(self, e):
pass
def on_finished(self):
pass
def loop(self):
while (not self.max_requests or
(self.max_requests and self.count <= self.max_requests)) and \
not self.is_exit:
try:
self._run()
except TimeoutException as e:
self.log.info('Time out')
except Exception as e:
self.log.exception(e)
self.is_exit = True
finally:
# !important
# count shoud be calculated by child class
# self.count += 1
if self.check_point:
time.sleep(self.check_point)
def after_run(self):
if self.is_exit == 'signal':
self.log.info('%s %d cancelled by signal.' % (self.name, self.pid))
elif self.is_exit == 'timeout':
self.log.info("%s %d cancelled by reaching timeout %ds" %
(self.name, self.pid, self.timeout))
elif self.is_exit == 'quit':
self.log.info("%s %d quit!" % (self.name, self.pid))
elif self.max_requests and self.count>self.max_requests:
self.log.info('%s %d cancelled by reaching max requests count [%d]' % (
self.name, self.pid, self.max_requests))
else:
self.log.info('%s %d cancelled by exception occorred' % (
self.name, self.pid))
def signal_handler(self, signum, frame):
self.is_exit = 'signal'
self.log.info ("%s %d received a signal %d" % (self.name, self.pid, signum))
os._exit(0)
def signal_handler_usr1(self, signum, frame):
"""hard memory limit"""
self.is_exit = 'signal'
self.log.info ("%s %d received a signal %d" % (self.name, self.pid, signum))
os._exit(0)
def signal_handler_usr2(self, signum, frame):
"""soft memory limit"""
self.is_exit = 'signal'
self.log.info ("%s %d received a signal %d" % (self.name, self.pid, signum))
os._exit(0)
def reached_soft_memory_limit(self, mem):
if self.soft_memory_limit and mem >= self.soft_memory_limit:
return True
else:
return False
def reached_hard_memory_limit(self, mem):
if self.hard_memory_limit and mem >= self.hard_memory_limit:
return True
else:
return False
class Manager(object):
def __init__(self, workers, log=None, check_point=10,
title='Workers Daemon', wait_time=3, daemon=False):
"""
:param workers: a list of workers
:param log: log object
:param check_point: time interval to check sub process status
:return:
"""
if not workers:
log.info('No workers need to run.')
sys.exit(0)
self.log = log or logging.getLogger(__name__)
self.workers = workers
#reset log
for w in self.workers:
w.log = self.log
self.is_exit = False
self.check_point = check_point
self.title = title
self.wait_time = wait_time
self.daemon = daemon
def init(self):
_len = len(self.title)
self.log.info('='*_len)
self.log.info('%s' % self.title)
self.log.info('='*_len)
self.log.info('Daemon process %d' % self.pid)
self.log.info('Check point %ds' % self.check_point)
def start(self):
self.pid = os.getpid()
signal.signal(signal.SIGINT, self.signal_handler)
signal.signal(signal.SIGTERM, self.signal_handler)
self.init()
self.run()
self.after_run()
def join(self):
for child in self.pids.values():
if pid_exists(child):
wait_pid(child)
def run(self):
self.pids = pids = {}
while 1:
for i, worker in enumerate(self.workers):
pid = pids.get(i)
create = False
if not pid:
create = True
else:
#reap child process !important
try:
os.waitpid(pid, os.WNOHANG)
except Exception, e:
pass
if not pid_exists(pid):
self.log.info('%s %d is not existed any more.' % (worker.name, pid))
create = True
if create:
pid = os.fork()
#main
if pid:
pids[i] = pid
#child
else:
try:
worker.start()
except Exception as e:
self.log.exception(e)
sys.exit(0)
else:
try:
mem = get_memory(pid)
if worker.reached_hard_memory_limit(mem):
self.kill_child(pid, signal.SIGUSR1)
self.log.info('%s %d memory is %dM reaches hard memory limit %dM will be killed.' % (
worker.name, pid, mem, worker.hard_memory_limit))
elif worker.reached_soft_memory_limit(mem):
self.kill_child(pid, signal.SIGUSR2)
self.log.info('%s %d memory is %dM reaches soft memory limit %dM will be cannelled.' % (
worker.name, pid, mem, worker.soft_memory_limit))
except Exception as e:
self.log.info(e)
if not self.daemon:
return
time.sleep(self.check_point)
def after_run(self):
pass
def kill_child(self, pid, sig=signal.SIGTERM):
if pid_exists(pid):
os.kill(pid, sig)
wait_pid(pid, 3, lambda x:os.kill(x, signal.SIGKILL))
wait_pid(pid, 2)
#remove pid
for index, p in self.pids.iteritems():
if pid == p:
self.pids.pop(index, None)
def signal_handler(self, signum, frame):
self.log.info ("Process %d received a signal %d" % (self.pid, signum))
for child in self.pids.values():
self.kill_child(child)
sys.exit(0)
|
{
"content_hash": "1a0420ae83826e54bb3cce34322d8f7c",
"timestamp": "",
"source": "github",
"line_count": 309,
"max_line_length": 116,
"avg_line_length": 32.10032362459547,
"alnum_prop": 0.5221292468998892,
"repo_name": "limodou/uliweb",
"id": "552995dff194d85f3c7449bf1b25233286961d22",
"size": "10100",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "uliweb/utils/workers.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "4614"
},
{
"name": "HTML",
"bytes": "53717"
},
{
"name": "JavaScript",
"bytes": "3450"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "2185739"
},
{
"name": "Shell",
"bytes": "684"
},
{
"name": "Smarty",
"bytes": "698"
}
],
"symlink_target": ""
}
|
"""
Created on Jun 20, 2011
@author: Bilel Msekni
@contact: bilel.msekni@telecom-sudparis.eu
@author: Houssem Medhioub
@contact: houssem.medhioub@it-sudparis.eu
@organization: Institut Mines-Telecom - Telecom SudParis
@license: Apache License, Version 2.0
"""
import pyocni.pyocni_tools.config as config
# getting the Logger
logger = config.logger
def update_kind_provider(old_provider, new_provider):
"""
Update only a part of the provider description
Args:
@param old_provider: The old provider description
@param new_provider: The new provider description
@return : Updated data and a boolean (false if all fields are updated, true if there were some un-updated fields)
"""
#Try to get the keys from occi description dictionary
oldData_keys = old_provider.keys()
newData_keys = new_provider.keys()
for key in newData_keys:
try:
oldData_keys.index(key)
old_provider[key] = new_provider[key]
except Exception:
#Keep the record of the keys(=parts) that couldn't be updated
logger.debug("update description : " + key + " could not be found")
return None, True
return old_provider, False
|
{
"content_hash": "657cb15cf478a4388bb2b963c6309839",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 121,
"avg_line_length": 33.16216216216216,
"alnum_prop": 0.6821515892420538,
"repo_name": "jordan-developer/pyCMBS",
"id": "1299fa542564f0a0aa6f4ab347f3395d0569566c",
"size": "1825",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "pyocni/pyocni_tools/couchdbdoc_Joker.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "10581"
},
{
"name": "Python",
"bytes": "356349"
},
{
"name": "Shell",
"bytes": "4196"
}
],
"symlink_target": ""
}
|
import pytest
import hashlib
from django.core import exceptions
from django.db.utils import IntegrityError
from share.models import RawData
@pytest.mark.django_db
class TestRawData:
def test_doesnt_mangle_data(self, share_source):
rd = RawData(source=share_source, app_label='foo', data=b'This is just some data')
rd.save()
assert RawData.objects.first().data == 'This is just some data'
def test_must_have_data(self, share_source):
rd = RawData(source=share_source, app_label='foo')
with pytest.raises(exceptions.ValidationError) as e:
rd.clean_fields()
rd.save()
assert 'This field cannot be blank.' == e.value.message_dict['data'][0]
def test_must_have_source(self):
rd = RawData(data='SomeData', app_label='foo')
with pytest.raises(IntegrityError) as e:
rd.save()
assert 'null value in column "source_id" violates not-null constraint' in e.value.args[0]
def test_store_data(self, share_source):
rd = RawData.objects.store_data('myid', b'mydatums', share_source, 'applabel')
assert rd.date_seen is not None
assert rd.date_harvested is not None
assert rd.data == b'mydatums'
assert rd.source == share_source
assert rd.app_label == 'applabel'
assert rd.provider_doc_id == 'myid'
assert rd.sha256 == hashlib.sha256(b'mydatums').hexdigest()
def test_store_data_dedups_simple(self, share_source):
rd1 = RawData.objects.store_data('myid', b'mydatums', share_source, 'applabel')
rd2 = RawData.objects.store_data('myid', b'mydatums', share_source, 'applabel')
assert rd1.pk == rd2.pk
assert rd1.date_seen < rd2.date_seen
assert rd1.date_harvested == rd2.date_harvested
def test_store_data_dedups_complex(self, share_source):
data = b'{"providerUpdatedDateTime":"2016-08-25T11:37:40Z","uris":{"canonicalUri":"https://provider.domain/files/7d2792031","providerUris":["https://provider.domain/files/7d2792031"]},"contributors":[{"name":"Person1","email":"one@provider.domain"},{"name":"Person2","email":"two@provider.domain"},{"name":"Person3","email":"three@provider.domain"},{"name":"Person4","email":"dxm6@psu.edu"}],"title":"ReducingMorbiditiesinNeonatesUndergoingMRIScannig"}'
rd1 = RawData.objects.store_data('myid', data, share_source, 'applabel')
rd2 = RawData.objects.store_data('myid', data, share_source, 'applabel')
assert rd1.pk == rd2.pk
assert rd1.date_seen < rd2.date_seen
assert rd1.date_harvested == rd2.date_harvested
|
{
"content_hash": "f702fcc5cb4bc1f0b3177b76abe0c1f9",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 461,
"avg_line_length": 41.77777777777778,
"alnum_prop": 0.6614741641337386,
"repo_name": "zamattiac/SHARE",
"id": "5e33c9947d940ae7ee9c7b122afd32810b25f37e",
"size": "2632",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "tests/share/models/test_rawdata.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "3690"
},
{
"name": "HTML",
"bytes": "1582"
},
{
"name": "Python",
"bytes": "1517988"
},
{
"name": "Shell",
"bytes": "633"
}
],
"symlink_target": ""
}
|
from flask import Response
import time, datetime
import config
import json
def ApiResponse(data, mimetype=config.MIMETYPE, **kwargs):
return Response(
json.dumps(data) if type(data) in [dict, list] else data,
mimetype=mimetype,
**kwargs
)
def to_timestamp(dt):
"""
Converts a datetime into a unix timestamp
"""
return time.mktime(dt.timetuple())
def str_equal(str1, str2):
"""
Time-safe string comparison
"""
if len(str1) != len(str2):
return False
are_equal = True
i = 0
while i < len(str1):
if str1[i] != str2[i] and are_equal:
are_equal = False
i += 1
return are_equal
|
{
"content_hash": "888df123bee6be10453b995358e5f564",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 65,
"avg_line_length": 21.875,
"alnum_prop": 0.59,
"repo_name": "tiranacode/jap-jete-admin",
"id": "dda2e50c81b33298a14a3458f7ffdd687add16cd",
"size": "700",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/api/utils.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "2909"
},
{
"name": "HTML",
"bytes": "1494"
},
{
"name": "JavaScript",
"bytes": "33315"
},
{
"name": "Python",
"bytes": "23488"
},
{
"name": "RAML",
"bytes": "1546"
}
],
"symlink_target": ""
}
|
"""
Conditions represent a build configuration under which to disable a test.
This file contains a canonical list of conditions and their properties, and code
for composing, parsing, and simplifying them. This is independent of their
representation within any particular test format.
"""
import collections
import functools
import itertools
import types
from typing import List, Optional, Union, Set, Tuple
import errors
class BaseCondition:
"""BaseCondition is a class for sentinel values ALWAYS and NEVER."""
# These represent a condition that's always true, and a condition that's never
# true.
ALWAYS = BaseCondition()
NEVER = BaseCondition()
@functools.total_ordering
class Terminal:
"""A boolean variable, the value of which depends on the configuration."""
def __init__(self, name: str, group: Optional[str]):
"""
Args:
name: The generic name for this terminal. Used to specify conditions on
the command line.
group: The group to which this condition belongs. Every Terminal with the
same group is mutually exclusive. For example, "os" - we can't be
compiling for Linux and Mac at the same time.
We also add fields for each test format, initialised to None. It's up to the
file defining the relevant code to fill these out.
"""
self.name: str = name
self.group: Optional[str] = group
self.gtest_info = None
self.expectations_info = None
def __str__(self):
return (f"Terminal('{self.name}')")
def __repr__(self):
return str(self)
def __lt__(self, other):
"""Define a consistent ordering for conditions written into test files"""
if not isinstance(other, Terminal):
return False
# Ungrouped terminals should compare greater than grouped, and compare based
# on name to each-other.
if (self.group is not None) == (other.group is not None):
return (self.group, self.name) < (other.group, other.name)
return self.group is not None
# TODO: We could probably just use object identity here (and for __hash__),
# since we only use a fixed set of Terminal objects.
def __eq__(self, other):
# Names are expected to be unique keys
return isinstance(other, Terminal) and self.name == other.name
def __hash__(self):
return hash(self.name)
# TODO: We should think about how to incorporate overlapping conditions. For
# instance, multiple versions of the same OS, where we might just specify "Mac"
# to refer to any version, or "Mac-10.15" for that specific version. Or "x86" to
# refer to the architecture as a whole, regardless of 32 vs 64-bit.
#
# We could handle this via expanding the higher-level one, for instance "Mac"
# being parsed directly into (or, ["Mac-10.15", "Mac-11", ...]). But we'd also
# need to handle this on the other end, to reduce it back down after
# simplifying.
TERMINALS = [
Terminal('android', group='os'),
Terminal('chromeos', group='os'),
Terminal('fuchsia', group='os'),
Terminal('ios', group='os'),
Terminal('linux', group='os'),
Terminal('mac', group='os'),
Terminal('win', group='os'),
Terminal('arm64', group='arch'),
Terminal('x86', group='arch'),
Terminal('x86-64', group='arch'),
Terminal('lacros', group='lacros/ash'),
Terminal('ash', group='lacros/ash'),
Terminal('asan', group=None),
Terminal('msan', group=None),
Terminal('tsan', group=None),
]
# Terminals should have unique names.
assert len({t.name for t in TERMINALS}) == len(TERMINALS)
# A condition can be one of three things:
# 1. A BaseCondition (ALWAYS or NEVER).
# 2. An operator, represented as a tuple with the operator name followed by its
# arguments.
# 3. A Terminal.
Condition = Union[BaseCondition, tuple, Terminal]
def get_term(name: str) -> Terminal:
"""Look up a Terminal by name."""
t = next((t for t in TERMINALS if t.name == name), None)
if t is not None:
return t
raise ValueError(f"Unknown condition '{name}'")
# TODO: We should check that the parsed condition makes sense with respect to
# condition groups. For instance, the condition 'linux & mac' can never be true.
def parse(condition_strs: List[str]) -> Condition:
"""Parse a list of condition strings, as passed on the command line.
Each element of condition_strs is a set of Terminal names joined with '&'s.
The list is implicitly 'or'ed together.
"""
# When no conditions are given, this is taken to mean "always".
if not condition_strs:
return ALWAYS
try:
return op_of('or', [
op_of('and', [get_term(x.strip()) for x in cond.split('&')])
for cond in condition_strs
])
except ValueError as e:
# Catching the exception raised by get_term.
valid_conds = '\n'.join(sorted(f'\t{term.name}' for term in TERMINALS))
raise errors.UserError(f"{e}\nValid conditions are:\n{valid_conds}")
def op_of(op: str, args: List[Condition]) -> Condition:
"""Make an operator, simplifying the single-argument case."""
if len(args) == 1:
return args[0]
return (op, args)
def merge(existing_cond: Condition, new_cond: Condition) -> Condition:
"""Merge two conditions together.
Given an existing condition, parsed from a file, and a new condition to be
added, combine the two to produce a merged condition.
"""
# If currently ALWAYS, merging would only ever produce ALWAYS too. In this
# case the user likely want to change the conditions or re-enable.
if existing_cond == ALWAYS:
return new_cond
# If currently NEVER, ignore the current value - NEVER or X = X
if existing_cond == NEVER:
return new_cond
# If new cond is ALWAYS, ignore the current value - X or ALWAYS = ALWAYS
if new_cond == ALWAYS:
return ALWAYS
# Similar to the first branch, if the user has specified NEVER then ignore the
# current value, as they're re-enabling it.
if new_cond == NEVER:
return NEVER
# Otherwise, take the union of the two conditions
cond = ('or', [existing_cond, new_cond])
return simplify(cond)
def generate_condition_groups(terms: List[Terminal]) -> List[Set[Terminal]]:
"""Partition a list of Terminals by their 'group' attribute."""
by_group = collections.defaultdict(set)
ungrouped = []
for term in terms:
if term.group is not None:
by_group[term.group].add(term)
else:
# Every Terminal without a 'group' attribute gets its own group, as
# they're not mutually exclusive with anything.
ungrouped.append({term})
groups = list(by_group.values())
groups += ungrouped
return groups
# Pre-compute condition groups for use when simplifying.
CONDITION_GROUPS = generate_condition_groups(TERMINALS)
def simplify(cond: Condition) -> Condition:
"""Given a Condition, produce an equivalent but simpler Condition.
This function uses the Quine-McCluskey algorithm. It's not implemented very
efficiently, but it works and is fast enough for now.
"""
if isinstance(cond, BaseCondition):
return cond
# Quine-McCluskey uses three values - true, false, and "don't care". The
# latter represents two things:
# * For values of input variables, the case where either true or false will
# suffice. This is used when combining conditions that differ only in that
# variable into one where that variable isn't specified.
# * For resulting values of the function, the case where we don't care what
# value the function produces. We use this for mutually exclusive
# conditions. For example, (linux & mac) is impossible, so we assign this a
# "don't care" value. This avoids producing a bunch of redundant stuff like
# (linux & ~mac).
DONT_CARE = 2
# First, compute the truth table of the function. We produce a set of
# "minterms", which are the combinations of input values for which the output
# is 1. We also produce a set of "don't care" values, which are the
# combinations of input values for which we don't care what the output is.
#
# Both of these are represented via tuples of {0, 1} values, which the value
# at index 'i' corresponds to variables[i].
# TODO: This could use a more efficient representation. Some packed integer
# using two bits per element or something.
variables = list(sorted(find_terminals(cond)))
dont_cares = []
min_terms = []
for possible_input in itertools.product([0, 1], repeat=len(variables)):
# Generate every possible input, and evaluate the condition for that input.
# This is exponential in the number of variables, but in practice the number
# should be low (and is strictly bounded by len(TERMINALS)).
true_vars = {variables[i] for i, val in enumerate(possible_input) if val}
if any(len(group & true_vars) > 1 for group in CONDITION_GROUPS):
# Any combination which sets more than one variable from the same group to
# 1 is impossible, so we don't care about the output.
dont_cares.append(possible_input)
elif evaluate(cond, true_vars):
min_terms.append(possible_input)
# The meat of the algorithm. Try to combine minterms which differ by only a
# single variable.
# For example, (0, 1) and (0, 0) can be combined into (0, DONT_CARE), as the
# value of the second variable doesn't affect the output.
#
# We work in rounds, combining together all minterms from the previous round
# that can be. This may include combining the same minterm with multiple
# different minterms. Keep going until no more minterms can be combined.
#
# Any minterm which can't be combined with another is a "prime implicant",
# that is, it's a necessary part of the representation of the function. The
# union of all prime implicants specifies the function.
combined_some_minterms = True
prev_round = set(min_terms + dont_cares)
prime_implicants: List[Tuple] = []
while combined_some_minterms:
new_implicants = set()
used = set()
combined_some_minterms = False
# TODO: Rather than taking combinations of the entire set of minterms, we
# can instead group by the number of '1's. Then we only need to combine
# elements from adjacent groups.
for a, b in itertools.combinations(prev_round, 2):
diff_index = None
for i, (x, y) in enumerate(zip(a, b)):
if x != y:
if diff_index is not None:
# In this case there are at least two points of difference, so these
# two can't be combined.
break
diff_index = i
else:
if diff_index is not None:
# Replace the sole differing variable with DONT_CARE to produce the
# combined minterm. Flag both inputs as having been used, and
# therefore as not being prime implicants.
new_implicants.add(a[:diff_index] + (DONT_CARE, ) +
a[diff_index + 1:])
used |= {a, b}
combined_some_minterms = True
# Collect any minterms that weren't used in this round as prime implicants.
prime_implicants.extend(prev_round - used)
prev_round = new_implicants
# TODO: This isn't yet minimal - the set of prime implicants may have some
# redundancy which can be reduced further. For now we just accept that and
# use this set as-is. If we encounter any case for which we don't produce the
# minimal result, we'll need to implement something like Petrick's method.
# Finally, create our simplified condition using the computed set of prime
# implicants.
# TODO: Ordering. We should define some stable ordering to use. We probably
# want to group stuff based on CONDITION_GROUPS, so all the OS-related
# conditions are together, for instance. And then alphabetically within that.
or_args: List[Condition] = []
for pi in sorted(prime_implicants):
and_args: List[Condition] = []
for i, x in enumerate(pi):
if x == DONT_CARE:
continue
var = variables[i]
if x == 0:
and_args.append(('not', var))
else:
assert x == 1
and_args.append(var)
or_args.append(op_of('and', and_args))
return op_of('or', or_args)
def find_terminals(cond: Condition) -> Set[Terminal]:
"""Find all leaf Terminal nodes of this Condition."""
if isinstance(cond, BaseCondition):
return set()
if isinstance(cond, Terminal):
return {cond}
assert isinstance(cond, tuple)
op, args = cond
if op == 'not':
return find_terminals(args)
return {var for arg in args for var in find_terminals(arg)}
def evaluate(cond: Condition, true_vars: Set[Terminal]) -> bool:
"""Evaluate a condition with a given set of true variables."""
if isinstance(cond, BaseCondition):
return cond is ALWAYS
if isinstance(cond, Terminal):
return cond in true_vars
# => must be a tuple
op, args = cond
if op == 'not':
return not evaluate(args, true_vars)
return {'or': any, 'and': all}[op](evaluate(arg, true_vars) for arg in args)
|
{
"content_hash": "f6d9949f292c037cb5b6c46356e002db",
"timestamp": "",
"source": "github",
"line_count": 362,
"max_line_length": 80,
"avg_line_length": 35.5828729281768,
"alnum_prop": 0.6855057837124446,
"repo_name": "scheib/chromium",
"id": "616dc19eb1cb51bd8342f85b262c27ff22f633b1",
"size": "13043",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "tools/disable_tests/conditions.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
from django.utils.translation import ugettext_noop as _
_('Page %(page)s of %(pages)s')
|
{
"content_hash": "dc855e2296aa798a1d7e01ce3fde0c82",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 55,
"avg_line_length": 29.666666666666668,
"alnum_prop": 0.7078651685393258,
"repo_name": "avastjohn/maventy_new",
"id": "f38f07121c77e1a27861ba550a4d0c89f085cf54",
"size": "89",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "search/translations.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "14891"
},
{
"name": "JavaScript",
"bytes": "81658"
},
{
"name": "Python",
"bytes": "381211"
},
{
"name": "Shell",
"bytes": "868"
}
],
"symlink_target": ""
}
|
"""
@package mi.dataset.parser
@file mi-dataset/mi/dataset/parser/utilities.py
@author Joe Padula, vipul lakhani
@brief Utilities that can be used by any parser
Release notes:
initial release
"""
from datetime import datetime
import time
import ntplib
import calendar
from mi.core.log import get_logger
__author__ = 'Joe Padula'
__license__ = 'Apache 2.0'
log = get_logger()
# Format of DCL Controller Timestamp in records
# Example: 2014/08/17 00:57:10.648
ZULU_TIMESTAMP_FORMAT = "%Y-%m-%dT%H:%M:%S.%fZ"
# Format of DCL Controller Timestamp in records
# Example: 2014/08/17 00:57:10.648
DCL_CONTROLLER_TIMESTAMP_FORMAT = "%Y/%m/%d %H:%M:%S.%f"
def formatted_timestamp_utc_time(timestamp_str, format_str):
"""
Converts a formatted timestamp timestamp string to UTC time
NOTE: will not handle seconds >59 correctly due to limitation in
datetime module.
:param timestamp_str: a formatted timestamp string
:param format_str: format string used to decode the timestamp_str
:return: utc time value
"""
dt = datetime.strptime(timestamp_str, format_str)
return calendar.timegm(dt.timetuple()) + (dt.microsecond / 1000000.0)
def zulu_timestamp_to_utc_time(zulu_timestamp_str):
"""
Converts a zulu formatted timestamp timestamp string to UTC time.
:param zulu_timestamp_str: a zulu formatted timestamp string
:return: UTC time in seconds and microseconds precision
"""
return formatted_timestamp_utc_time(zulu_timestamp_str,
ZULU_TIMESTAMP_FORMAT)
def zulu_timestamp_to_ntp_time(zulu_timestamp_str):
"""
Converts a zulu formatted timestamp timestamp string to NTP time.
:param zulu_timestamp_str: a zulu formatted timestamp string
:return: NTP time in seconds and microseconds precision
"""
utc_time = zulu_timestamp_to_utc_time(zulu_timestamp_str)
return float(ntplib.system_to_ntp_time(utc_time))
def julian_time_to_ntp(julian_timestamp_str):
"""
Converts a julian formatted timestamp timestamp string to NTP time.
:param julian_timestamp_str: a julian formatted timestamp string (julian_timestamp_str = 200412)
:return: NTP time in seconds
"""
timestamp = datetime.strptime(julian_timestamp_str, "%Y%j")
return (timestamp - datetime(1900, 1, 1)).total_seconds()
def time_1904_to_ntp(time_1904):
"""
:param time_1904: time in 1904 ( example time_1904 = 3601587612.0)
:return: ntp (timestamp in number of seconds since Jan 1, 1900)
"""
return time_1904 + (datetime(1904, 1, 1) - datetime(1900, 1, 1)).total_seconds()
def time_2000_to_ntp(time_2000):
"""
:param time_2000: a timestamp in epoch 2000
:return: timestamp in epoch 1900
This function calculates and returns a timestamp in epoch 1900
based on an integer timestamp in epoch 2000.
Parameter:
time_2000 - timestamp in number of seconds since Jan 1, 2000
Returns:
timestamp in number of seconds since Jan 1, 1900
"""
return time_2000 + zulu_timestamp_to_ntp_time("2000-01-01T00:00:00.00Z")
def dcl_time_to_utc(dcl_controller_timestamp_str):
"""
Converts a DCL controller timestamp string to UTC time.
:param dcl_controller_timestamp_str: a DCL controller timestamp string
:return: UTC time in seconds and microseconds precision
"""
no_frac_timestamp_str, frac_timestamp_str = dcl_controller_timestamp_str.split('.')
no_frac_format_str, frac_format_str = DCL_CONTROLLER_TIMESTAMP_FORMAT.split('.')
tt = time.strptime(no_frac_timestamp_str, no_frac_format_str)
frac_of_sec = float('.' + frac_timestamp_str)
return calendar.timegm(tt) + frac_of_sec
def dcl_time_to_ntp(dcl_controller_timestamp_str):
"""
Converts a DCL controller timestamp string to NTP time.
:param dcl_controller_timestamp_str: a DCL controller timestamp string
:return: NTP time (float64) in seconds and microseconds precision
"""
utc_time = dcl_time_to_utc(dcl_controller_timestamp_str)
return float(ntplib.system_to_ntp_time(utc_time))
def timestamp_yyyymmddhhmmss_to_ntp(timestamp_str):
"""
Converts a timestamp string, in the YYYYMMDDHHMMSS format, to NTP time.
:param timestamp_str: a timestamp string in the format YYYYMMDDHHMMSS
:return: Time (float64) in seconds from epoch 01-01-1900.
"""
timestamp = datetime.strptime(timestamp_str, "%Y%m%d%H%M%S")
return (timestamp - datetime(1900, 1, 1)).total_seconds()
def timestamp_yyyy_mm_dd_hh_mm_ss_to_ntp(timestamp_str):
"""
Converts a timestamp string, in the YYYY/MM/DD HH:MM:SS format, to NTP time.
:param timestamp_str: a timestamp string in the format YYYY/MM/DD HH:MM:SS
:return: Time (float64) in seconds from epoch 01-01-1900.
"""
timestamp = datetime.strptime(timestamp_str, "%Y/%m/%d %H:%M:%S")
return (timestamp - datetime(1900, 1, 1)).total_seconds()
def timestamp_ddmmyyyyhhmmss_to_ntp(timestamp_str):
"""
Converts a timestamp string, in the DD Mon YYYY HH:MM:SS format, to NTP time.
:param timestamp_str: a timestamp string in the format DD Mon YYYY HH:MM:SS
:return: Time (float64) in seconds from epoch 01-01-1900.
"""
timestamp = datetime.strptime(timestamp_str, "%d %b %Y %H:%M:%S")
return (timestamp - datetime(1900, 1, 1)).total_seconds()
def timestamp_mmddyyhhmmss_to_ntp(timestamp_str):
"""
Converts a timestamp string, in the MMDDYYHHMMSS format, to NTP time.
:param timestamp_str: a timestamp string in the format MM/DD/YY HH:MM:SS
:return: Time (float64) in seconds from epoch 01-01-1900.
"""
timestamp = datetime.strptime(timestamp_str, "%m/%d/%y %H:%M:%S")
return (timestamp - datetime(1900, 1, 1)).total_seconds()
def timestamp_ddmmyyhhmmss_to_ntp(timestamp_str):
"""
Converts a timestamp string, in the DDMMYYHHMMSS format, to NTP time.
:param timestamp_str: a timestamp string in the format DD/MM/YY HH:MM:SS
:return: Time (float64) in seconds from epoch 01-01-1900.
"""
timestamp = datetime.strptime(timestamp_str, "%d/%m/%y %H:%M:%S")
return (timestamp - datetime(1900, 1, 1)).total_seconds()
def mac_timestamp_to_utc_timestamp(mac_timestamp):
"""
:param mac_timestamp: A mac based timestamp
:return: The mac timestamp converted to unix time
"""
unix_minus_mac_secs = (datetime(1970, 1, 1) - datetime(1904, 1, 1)).total_seconds()
secs_since_1970 = mac_timestamp - unix_minus_mac_secs
return secs_since_1970
def convert_to_signed_int_32_bit(hex_str):
"""
Utility function to convert a hex string into a 32 bit signed hex integer value
:param hex_str: hex String
:return: signed 32 bit integer
"""
val = int(hex_str, 16)
if val > 0x7FFFFFFF:
val = ((val+0x80000000) & 0xFFFFFFFF) - 0x80000000
return val
def convert_to_signed_int_16_bit(hex_str):
"""
Utility function to convert a hex string into a 16 bit signed hex integer value
:param hex_str: hex String
:return: signed 16 bit integer
"""
val = int(hex_str, 16)
if val > 0x7FFF:
val = ((val+0x8000) & 0xFFFF) - 0x8000
return val
def convert_to_signed_int_8_bit(hex_str):
"""
Utility function to convert a hex string into a 8 bit signed hex integer value
:param hex_str: hex String
:return: signed 8 bit integer
"""
val = int(hex_str, 16)
if val > 0x7F:
val = ((val+0x80) & 0xFF) - 0x80
return val
def sum_hex_digits(ascii_hex_str):
"""
This method will take an ascii hex string and sum each of the bytes
returning the result as hex.
:param ascii_hex_str: The ascii hex string to sum
:return:
"""
len_of_ascii_hex = len(ascii_hex_str)
if len_of_ascii_hex % 2 != 0:
raise ValueError("The ASCII Hex string is not divisible by 2.")
x = 0
# Iterate through each byte of ascii hex
for index in range(0, len_of_ascii_hex, 2):
# Convert each byte to an int and add it to the existing summation
x += int(ascii_hex_str[index:index+2], 16)
# Return the resultant summation as hex
return hex(x)
def particle_to_yml(particles, filename, mode='w+'):
"""
This function write particles to .yml file and create .yml file for testing
"""
# open write append, if you want to start from scratch manually delete this file
with open(filename, mode) as fid:
fid.write('header:\n')
fid.write(" particle_object: 'MULTIPLE'\n")
fid.write(" particle_type: 'MULTIPLE'\n")
fid.write('data:\n')
for index in range(len(particles)):
particle_dict = particles[index].generate_dict()
fid.write(' - _index: %d\n' % (index+1))
fid.write(' particle_object: %s\n' % particles[index].__class__.__name__)
fid.write(' particle_type: %s\n' % particle_dict.get('stream_name'))
if particle_dict.get('internal_timestamp') is not None:
fid.write(' internal_timestamp: %.7f\n' % particle_dict.get('internal_timestamp'))
if particle_dict.get('port_timestamp') is not None:
fid.write(' port_timestamp: %.7f\n' % particle_dict.get('port_timestamp'))
values_dict = {}
for value in particle_dict.get('values'):
values_dict[value.get('value_id')] = value.get('value')
for key in sorted(values_dict.iterkeys()):
value = values_dict[key]
if value is None:
fid.write(' %s: %s\n' % (key, 'Null'))
elif isinstance(value, float):
fid.write(' %s: %15.5f\n' % (key, value))
elif isinstance(value, str):
fid.write(" %s: '%s'\n" % (key, value))
else:
fid.write(' %s: %s\n' % (key, value))
|
{
"content_hash": "a4d4a1e16be4fc7931f4ae176ce2e1ea",
"timestamp": "",
"source": "github",
"line_count": 297,
"max_line_length": 101,
"avg_line_length": 33.407407407407405,
"alnum_prop": 0.6515823422697037,
"repo_name": "danmergens/mi-instrument",
"id": "e24477e16939085b8ddace2c408f62a53d67aa91",
"size": "9922",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "mi/dataset/parser/utilities.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "HTML",
"bytes": "4746"
},
{
"name": "Python",
"bytes": "10013408"
}
],
"symlink_target": ""
}
|
'''Views tests for the OSF.'''
from __future__ import absolute_import
import unittest
import json
import datetime as dt
import mock
import httplib as http
from nose.tools import * # noqa PEP8 asserts
from tests.test_features import requires_search
from modularodm import Q
from dateutil.parser import parse as parse_date
from framework import auth
from framework.exceptions import HTTPError
from framework.auth import User, Auth
from framework.auth.utils import impute_names_model
from website import mailchimp_utils
from website.views import _rescale_ratio
from website.util import permissions
from website.models import Node, Pointer, NodeLog
from website.project.model import ensure_schemas, has_anonymous_link
from website.project.views.contributor import (
send_claim_email,
deserialize_contributors,
send_claim_registered_email,
)
from website.profile.utils import add_contributor_json, serialize_unregistered
from website.profile.views import fmt_date_or_none
from website.util import api_url_for, web_url_for
from website import mails, settings
from website.util import rubeus
from website.project.views.node import _view_project, abbrev_authors, _should_show_wiki_widget
from website.project.views.comment import serialize_comment
from website.project.decorators import check_can_access
from website.addons.github.model import AddonGitHubOauthSettings
from tests.base import (
OsfTestCase,
fake,
capture_signals,
assert_is_redirect,
assert_datetime_equal,
)
from tests.factories import (
UserFactory, ApiKeyFactory, ProjectFactory, WatchConfigFactory,
NodeFactory, NodeLogFactory, AuthUserFactory, UnregUserFactory,
RegistrationFactory, CommentFactory, PrivateLinkFactory, UnconfirmedUserFactory, DashboardFactory, FolderFactory,
ProjectWithAddonFactory,
)
from website.settings import ALL_MY_REGISTRATIONS_ID, ALL_MY_PROJECTS_ID
class TestViewingProjectWithPrivateLink(OsfTestCase):
def setUp(self):
super(TestViewingProjectWithPrivateLink, self).setUp()
self.user = AuthUserFactory() # Is NOT a contributor
self.project = ProjectFactory(is_public=False)
self.link = PrivateLinkFactory()
self.link.nodes.append(self.project)
self.link.save()
self.project_url = self.project.web_url_for('view_project')
def test_not_anonymous_for_public_project(self):
anonymous_link = PrivateLinkFactory(anonymous=True)
anonymous_link.nodes.append(self.project)
anonymous_link.save()
self.project.set_privacy('public')
self.project.save()
self.project.reload()
auth = Auth(user=self.user, private_key=anonymous_link.key)
assert_false(has_anonymous_link(self.project, auth))
def test_has_private_link_key(self):
res = self.app.get(self.project_url, {'view_only': self.link.key})
assert_equal(res.status_code, 200)
def test_not_logged_in_no_key(self):
res = self.app.get(self.project_url, {'view_only': None})
assert_is_redirect(res)
res = res.follow(expect_errors=True)
assert_equal(res.status_code, 301)
assert_equal(
res.request.path,
'/login'
)
def test_logged_in_no_private_key(self):
res = self.app.get(self.project_url, {'view_only': None}, auth=self.user.auth,
expect_errors=True)
assert_equal(res.status_code, http.FORBIDDEN)
def test_logged_in_has_key(self):
res = self.app.get(
self.project_url, {'view_only': self.link.key}, auth=self.user.auth)
assert_equal(res.status_code, 200)
@unittest.skip('Skipping for now until we find a way to mock/set the referrer')
def test_prepare_private_key(self):
res = self.app.get(self.project_url, {'key': self.link.key})
res = res.click('Registrations')
assert_is_redirect(res)
res = res.follow()
assert_equal(res.status_code, 200)
assert_equal(res.request.GET['key'], self.link.key)
def test_check_can_access_valid(self):
contributor = AuthUserFactory()
self.project.add_contributor(contributor, auth=Auth(self.project.creator))
self.project.save()
assert_true(check_can_access(self.project, contributor))
def test_check_user_access_invalid(self):
noncontrib = AuthUserFactory()
with assert_raises(HTTPError):
check_can_access(self.project, noncontrib)
def test_check_user_access_if_user_is_None(self):
assert_false(check_can_access(self.project, None))
class TestProjectViews(OsfTestCase):
def setUp(self):
super(TestProjectViews, self).setUp()
ensure_schemas()
self.user1 = AuthUserFactory()
self.user1.save()
self.consolidate_auth1 = Auth(user=self.user1)
self.auth = self.user1.auth
self.user2 = UserFactory()
# A project has 2 contributors
self.project = ProjectFactory(
title="Ham",
description='Honey-baked',
creator=self.user1
)
self.project.add_contributor(self.user2, auth=Auth(self.user1))
self.project.save()
def test_can_view_nested_project_as_admin(self):
self.parent_project = NodeFactory(
title='parent project',
category='project',
parent=self.project,
is_public=False
)
self.parent_project.save()
self.child_project = NodeFactory(
title='child project',
category='project',
parent=self.parent_project,
is_public=False
)
self.child_project.save()
url = self.child_project.web_url_for('view_project')
res = self.app.get(url, auth=self.auth)
assert_not_in('Private Project', res.body)
assert_in('parent project', res.body)
def test_edit_description(self):
url = "/api/v1/project/{0}/edit/".format(self.project._id)
self.app.post_json(url,
{"name": "description", "value": "Deep-fried"},
auth=self.auth)
self.project.reload()
assert_equal(self.project.description, "Deep-fried")
def test_project_api_url(self):
url = self.project.api_url
res = self.app.get(url, auth=self.auth)
data = res.json
assert_equal(data['node']['category'], 'Project')
assert_equal(data['node']['node_type'], 'project')
assert_equal(data['node']['title'], self.project.title)
assert_equal(data['node']['is_public'], self.project.is_public)
assert_equal(data['node']['is_registration'], False)
assert_equal(data['node']['id'], self.project._primary_key)
assert_equal(data['node']['watched_count'], 0)
assert_true(data['user']['is_contributor'])
assert_equal(data['node']['description'], self.project.description)
assert_equal(data['node']['url'], self.project.url)
assert_equal(data['node']['tags'], [t._primary_key for t in self.project.tags])
assert_in('forked_date', data['node'])
assert_in('watched_count', data['node'])
assert_in('registered_from_url', data['node'])
# TODO: Test "parent" and "user" output
def test_api_get_folder_pointers(self):
dashboard = DashboardFactory(creator=self.user1)
project_one = ProjectFactory(creator=self.user1)
project_two = ProjectFactory(creator=self.user1)
url = dashboard.api_url_for("get_folder_pointers")
dashboard.add_pointer(project_one, auth=self.consolidate_auth1)
dashboard.add_pointer(project_two, auth=self.consolidate_auth1)
res = self.app.get(url, auth=self.auth)
pointers = res.json
assert_in(project_one._id, pointers)
assert_in(project_two._id, pointers)
assert_equal(len(pointers), 2)
def test_api_get_folder_pointers_from_non_folder(self):
project_one = ProjectFactory(creator=self.user1)
project_two = ProjectFactory(creator=self.user1)
url = project_one.api_url_for("get_folder_pointers")
project_one.add_pointer(project_two, auth=self.consolidate_auth1)
res = self.app.get(url, auth=self.auth)
pointers = res.json
assert_equal(len(pointers), 0)
def test_new_user_gets_dashboard_on_dashboard_path(self):
my_user = AuthUserFactory()
dashboard = my_user.node__contributed.find(Q('is_dashboard', 'eq', True))
assert_equal(dashboard.count(), 0)
url = api_url_for('get_dashboard')
self.app.get(url, auth=my_user.auth)
my_user.reload()
dashboard = my_user.node__contributed.find(Q('is_dashboard', 'eq', True))
assert_equal(dashboard.count(), 1)
def test_add_contributor_post(self):
# Two users are added as a contributor via a POST request
project = ProjectFactory(creator=self.user1, is_public=True)
user2 = UserFactory()
user3 = UserFactory()
url = "/api/v1/project/{0}/contributors/".format(project._id)
dict2 = add_contributor_json(user2)
dict3 = add_contributor_json(user3)
dict2.update({
'permission': 'admin',
'visible': True,
})
dict3.update({
'permission': 'write',
'visible': False,
})
self.app.post_json(
url,
{
'users': [dict2, dict3],
'node_ids': [project._id],
},
content_type="application/json",
auth=self.auth,
).maybe_follow()
project.reload()
assert_in(user2._id, project.contributors)
# A log event was added
assert_equal(project.logs[-1].action, "contributor_added")
assert_equal(len(project.contributors), 3)
assert_in(user2._id, project.permissions)
assert_in(user3._id, project.permissions)
assert_equal(project.permissions[user2._id], ['read', 'write', 'admin'])
assert_equal(project.permissions[user3._id], ['read', 'write'])
def test_manage_permissions(self):
url = self.project.api_url + 'contributors/manage/'
self.app.post_json(
url,
{
'contributors': [
{'id': self.project.creator._id, 'permission': 'admin',
'registered': True, 'visible': True},
{'id': self.user1._id, 'permission': 'read',
'registered': True, 'visible': True},
{'id': self.user2._id, 'permission': 'admin',
'registered': True, 'visible': True},
]
},
auth=self.auth,
)
self.project.reload()
assert_equal(self.project.get_permissions(self.user1), ['read'])
assert_equal(self.project.get_permissions(self.user2), ['read', 'write', 'admin'])
def test_manage_permissions_again(self):
url = self.project.api_url + 'contributors/manage/'
self.app.post_json(
url,
{
'contributors': [
{'id': self.user1._id, 'permission': 'admin',
'registered': True, 'visible': True},
{'id': self.user2._id, 'permission': 'admin',
'registered': True, 'visible': True},
]
},
auth=self.auth,
)
self.project.reload()
self.app.post_json(
url,
{
'contributors': [
{'id': self.user1._id, 'permission': 'admin',
'registered': True, 'visible': True},
{'id': self.user2._id, 'permission': 'read',
'registered': True, 'visible': True},
]
},
auth=self.auth,
)
self.project.reload()
assert_equal(self.project.get_permissions(self.user2), ['read'])
assert_equal(self.project.get_permissions(self.user1), ['read', 'write', 'admin'])
def test_contributor_manage_reorder(self):
# Two users are added as a contributor via a POST request
project = ProjectFactory(creator=self.user1, is_public=True)
reg_user1, reg_user2 = UserFactory(), UserFactory()
project.add_contributors(
[
{'user': reg_user1, 'permissions': [
'read', 'write', 'admin'], 'visible': True},
{'user': reg_user2, 'permissions': [
'read', 'write', 'admin'], 'visible': False},
]
)
# Add a non-registered user
unregistered_user = project.add_unregistered_contributor(
fullname=fake.name(), email=fake.email(),
auth=self.consolidate_auth1,
save=True,
)
url = project.api_url + 'contributors/manage/'
self.app.post_json(
url,
{
'contributors': [
{'id': reg_user2._id, 'permission': 'admin',
'registered': True, 'visible': False},
{'id': project.creator._id, 'permission': 'admin',
'registered': True, 'visible': True},
{'id': unregistered_user._id, 'permission': 'admin',
'registered': False, 'visible': True},
{'id': reg_user1._id, 'permission': 'admin',
'registered': True, 'visible': True},
]
},
auth=self.auth,
)
project.reload()
assert_equal(
# Note: Cast ForeignList to list for comparison
list(project.contributors),
[reg_user2, project.creator, unregistered_user, reg_user1]
)
assert_equal(
project.visible_contributors,
[project.creator, unregistered_user, reg_user1]
)
def test_project_remove_contributor(self):
url = "/api/v1/project/{0}/removecontributors/".format(self.project._id)
# User 1 removes user2
self.app.post(url, json.dumps({"id": self.user2._id}),
content_type="application/json",
auth=self.auth).maybe_follow()
self.project.reload()
assert_not_in(self.user2._id, self.project.contributors)
# A log event was added
assert_equal(self.project.logs[-1].action, "contributor_removed")
def test_get_contributors_abbrev(self):
# create a project with 3 registered contributors
project = ProjectFactory(creator=self.user1, is_public=True)
reg_user1, reg_user2 = UserFactory(), UserFactory()
project.add_contributors(
[
{'user': reg_user1, 'permissions': [
'read', 'write', 'admin'], 'visible': True},
{'user': reg_user2, 'permissions': [
'read', 'write', 'admin'], 'visible': True},
]
)
# add an unregistered contributor
project.add_unregistered_contributor(
fullname=fake.name(), email=fake.email(),
auth=self.consolidate_auth1,
save=True,
)
url = project.api_url_for('get_node_contributors_abbrev')
res = self.app.get(url, auth=self.auth)
assert_equal(len(project.contributors), 4)
assert_equal(len(res.json['contributors']), 3)
assert_equal(len(res.json['others_count']), 1)
assert_equal(res.json['contributors'][0]['separator'], ',')
assert_equal(res.json['contributors'][1]['separator'], ',')
assert_equal(res.json['contributors'][2]['separator'], ' &')
def test_edit_node_title(self):
url = "/api/v1/project/{0}/edit/".format(self.project._id)
# The title is changed though posting form data
self.app.post_json(url, {"name": "title", "value": "Bacon"},
auth=self.auth).maybe_follow()
self.project.reload()
# The title was changed
assert_equal(self.project.title, "Bacon")
# A log event was saved
assert_equal(self.project.logs[-1].action, "edit_title")
def test_make_public(self):
self.project.is_public = False
self.project.save()
url = "/api/v1/project/{0}/permissions/public/".format(self.project._id)
res = self.app.post_json(url, {}, auth=self.auth)
self.project.reload()
assert_true(self.project.is_public)
assert_equal(res.json['status'], 'success')
def test_make_private(self):
self.project.is_public = True
self.project.save()
url = "/api/v1/project/{0}/permissions/private/".format(self.project._id)
res = self.app.post_json(url, {}, auth=self.auth)
self.project.reload()
assert_false(self.project.is_public)
assert_equal(res.json['status'], 'success')
def test_cant_make_public_if_not_admin(self):
non_admin = AuthUserFactory()
self.project.add_contributor(non_admin, permissions=['read', 'write'])
self.project.is_public = False
self.project.save()
url = "/api/v1/project/{0}/permissions/public/".format(self.project._id)
res = self.app.post_json(
url, {}, auth=non_admin.auth,
expect_errors=True,
)
assert_equal(res.status_code, http.FORBIDDEN)
assert_false(self.project.is_public)
def test_cant_make_private_if_not_admin(self):
non_admin = AuthUserFactory()
self.project.add_contributor(non_admin, permissions=['read', 'write'])
self.project.is_public = True
self.project.save()
url = "/api/v1/project/{0}/permissions/private/".format(self.project._id)
res = self.app.post_json(
url, {}, auth=non_admin.auth,
expect_errors=True,
)
assert_equal(res.status_code, http.FORBIDDEN)
assert_true(self.project.is_public)
def test_add_tag(self):
url = "/api/v1/project/{0}/addtag/{tag}/".format(
self.project._primary_key,
tag="footag",
)
self.app.post_json(url, {}, auth=self.auth)
self.project.reload()
assert_in("footag", self.project.tags)
def test_remove_tag(self):
self.project.add_tag("footag", auth=self.consolidate_auth1, save=True)
assert_in("footag", self.project.tags)
url = "/api/v1/project/{0}/removetag/{tag}/".format(
self.project._primary_key,
tag="footag",
)
self.app.post_json(url, {}, auth=self.auth)
self.project.reload()
assert_not_in("footag", self.project.tags)
def test_register_template_page(self):
url = "/api/v1/project/{0}/register/Replication_Recipe_(Brandt_et_al.,_2013):_Post-Completion/".format(
self.project._primary_key)
self.app.post_json(url, {}, auth=self.auth)
self.project.reload()
# A registration was added to the project's registration list
assert_equal(len(self.project.node__registrations), 1)
# A log event was saved
assert_equal(self.project.logs[-1].action, "project_registered")
# Most recent node is a registration
reg = Node.load(self.project.node__registrations[-1])
assert_true(reg.is_registration)
def test_register_template_page_with_invalid_template_name(self):
url = self.project.web_url_for('node_register_template_page', template='invalid')
res = self.app.get(url, expect_errors=True, auth=self.auth)
assert_equal(res.status_code, 404)
assert_in('Template not found', res)
# Regression test for https://github.com/CenterForOpenScience/osf.io/issues/1478
def test_registered_projects_contributions(self):
# register a project
self.project.register_node(None, Auth(user=self.project.creator), '', None)
# get the first registered project of a project
url = self.project.api_url_for('get_registrations')
res = self.app.get(url, auth=self.auth)
data = res.json
pid = data['nodes'][0]['id']
url2 = api_url_for('get_summary', pid=pid)
# count contributions
res2 = self.app.get(url2, {'rescale_ratio': data['rescale_ratio']}, auth=self.auth)
data = res2.json
assert_is_not_none(data['summary']['nlogs'])
def test_forks_contributions(self):
# fork a project
self.project.fork_node(Auth(user=self.project.creator))
# get the first forked project of a project
url = self.project.api_url_for('get_forks')
res = self.app.get(url, auth=self.auth)
data = res.json
pid = data['nodes'][0]['id']
url2 = api_url_for('get_summary', pid=pid)
# count contributions
res2 = self.app.get(url2, {'rescale_ratio': data['rescale_ratio']}, auth=self.auth)
data = res2.json
assert_is_not_none(data['summary']['nlogs'])
@mock.patch('framework.transactions.commands.begin')
@mock.patch('framework.transactions.commands.rollback')
@mock.patch('framework.transactions.commands.commit')
def test_get_logs(self, *mock_commands):
# Add some logs
for _ in range(5):
self.project.logs.append(
NodeLogFactory(
user=self.user1,
action='file_added',
params={'node': self.project._id}
)
)
self.project.save()
url = self.project.api_url_for('get_logs')
res = self.app.get(url, auth=self.auth)
for mock_command in mock_commands:
assert_false(mock_command.called)
self.project.reload()
data = res.json
assert_equal(len(data['logs']), len(self.project.logs))
assert_equal(data['total'], len(self.project.logs))
assert_equal(data['page'], 0)
assert_equal(data['pages'], 1)
most_recent = data['logs'][0]
assert_equal(most_recent['action'], 'file_added')
def test_get_logs_invalid_page_input(self):
url = self.project.api_url_for('get_logs')
invalid_input = 'invalid page'
res = self.app.get(
url, {'page': invalid_input}, auth=self.auth, expect_errors=True
)
assert_equal(res.status_code, 400)
assert_equal(
res.json['message_long'],
'Invalid value for "page".'
)
def test_get_logs_with_count_param(self):
# Add some logs
for _ in range(5):
self.project.logs.append(
NodeLogFactory(
user=self.user1,
action='file_added',
params={'node': self.project._id}
)
)
self.project.save()
url = self.project.api_url_for('get_logs')
res = self.app.get(url, {'count': 3}, auth=self.auth)
assert_equal(len(res.json['logs']), 3)
# 1 project create log, 1 add contributor log, then 5 generated logs
assert_equal(res.json['total'], 5 + 2)
assert_equal(res.json['page'], 0)
assert_equal(res.json['pages'], 3)
def test_get_logs_defaults_to_ten(self):
# Add some logs
for _ in range(12):
self.project.logs.append(
NodeLogFactory(
user=self.user1,
action='file_added',
params={'node': self.project._id}
)
)
self.project.save()
url = self.project.api_url_for('get_logs')
res = self.app.get(url, auth=self.auth)
assert_equal(len(res.json['logs']), 10)
# 1 project create log, 1 add contributor log, then 5 generated logs
assert_equal(res.json['total'], 12 + 2)
assert_equal(res.json['page'], 0)
assert_equal(res.json['pages'], 2)
def test_get_more_logs(self):
# Add some logs
for _ in range(12):
self.project.logs.append(
NodeLogFactory(
user=self.user1,
action="file_added",
params={"node": self.project._id}
)
)
self.project.save()
url = self.project.api_url_for('get_logs')
res = self.app.get(url, {"page": 1}, auth=self.auth)
assert_equal(len(res.json['logs']), 4)
#1 project create log, 1 add contributor log, then 12 generated logs
assert_equal(res.json['total'], 12 + 2)
assert_equal(res.json['page'], 1)
assert_equal(res.json['pages'], 2)
def test_logs_private(self):
"""Add logs to a public project, then to its private component. Get
the ten most recent logs; assert that ten logs are returned and that
all belong to the project and not its component.
"""
# Add some logs
for _ in range(15):
self.project.add_log(
auth=self.consolidate_auth1,
action='file_added',
params={'node': self.project._id}
)
self.project.is_public = True
self.project.save()
child = NodeFactory(parent=self.project)
for _ in range(5):
child.add_log(
auth=self.consolidate_auth1,
action='file_added',
params={'node': child._id}
)
url = self.project.api_url_for('get_logs')
res = self.app.get(url).maybe_follow()
assert_equal(len(res.json['logs']), 10)
# 1 project create log, 1 add contributor log, then 15 generated logs
assert_equal(res.json['total'], 15 + 2)
assert_equal(res.json['page'], 0)
assert_equal(res.json['pages'], 2)
assert_equal(
[self.project._id] * 10,
[
log['params']['node']
for log in res.json['logs']
]
)
def test_can_view_public_log_from_private_project(self):
project = ProjectFactory(is_public=True)
fork = project.fork_node(auth=self.consolidate_auth1)
url = fork.api_url_for('get_logs')
res = self.app.get(url, auth=self.auth)
assert_equal(
[each['action'] for each in res.json['logs']],
['node_forked', 'project_created'],
)
project.is_public = False
project.save()
res = self.app.get(url, auth=self.auth)
assert_equal(
[each['action'] for each in res.json['logs']],
['node_forked', 'project_created'],
)
def test_for_private_component_log(self):
for _ in range(5):
self.project.add_log(
auth=self.consolidate_auth1,
action='file_added',
params={'node': self.project._id}
)
self.project.is_public = True
self.project.save()
child = NodeFactory(parent=self.project)
child.is_public = False
child.set_title("foo", auth=self.consolidate_auth1)
child.set_title("bar", auth=self.consolidate_auth1)
child.save()
url = self.project.api_url_for('get_logs')
res = self.app.get(url).maybe_follow()
assert_equal(len(res.json['logs']), 7)
assert_not_in(
child._id,
[
log['params']['node']
for log in res.json['logs']
]
)
def test_remove_project(self):
url = self.project.api_url
res = self.app.delete_json(url, {}, auth=self.auth).maybe_follow()
self.project.reload()
assert_equal(self.project.is_deleted, True)
assert_in('url', res.json)
assert_equal(res.json['url'], '/dashboard/')
def test_private_link_edit_name(self):
link = PrivateLinkFactory()
link.nodes.append(self.project)
link.save()
assert_equal(link.name, "link")
url = self.project.api_url + 'private_link/edit/'
self.app.put_json(
url,
{'pk': link._id, "value": "new name"},
auth=self.auth,
).maybe_follow()
self.project.reload()
link.reload()
assert_equal(link.name, "new name")
def test_remove_private_link(self):
link = PrivateLinkFactory()
link.nodes.append(self.project)
link.save()
url = self.project.api_url_for('remove_private_link')
self.app.delete_json(
url,
{'private_link_id': link._id},
auth=self.auth,
).maybe_follow()
self.project.reload()
link.reload()
assert_true(link.is_deleted)
def test_remove_component(self):
node = NodeFactory(parent=self.project, creator=self.user1)
url = node.api_url
res = self.app.delete_json(url, {}, auth=self.auth).maybe_follow()
node.reload()
assert_equal(node.is_deleted, True)
assert_in('url', res.json)
assert_equal(res.json['url'], self.project.url)
def test_cant_remove_component_if_not_admin(self):
node = NodeFactory(parent=self.project, creator=self.user1)
non_admin = AuthUserFactory()
node.add_contributor(
non_admin,
permissions=['read', 'write'],
save=True,
)
url = node.api_url
res = self.app.delete_json(
url, {}, auth=non_admin.auth,
expect_errors=True,
).maybe_follow()
assert_equal(res.status_code, http.FORBIDDEN)
assert_false(node.is_deleted)
def test_watch_and_unwatch(self):
url = self.project.api_url_for('togglewatch_post')
self.app.post_json(url, {}, auth=self.auth)
res = self.app.get(self.project.api_url, auth=self.auth)
assert_equal(res.json['node']['watched_count'], 1)
self.app.post_json(url, {}, auth=self.auth)
res = self.app.get(self.project.api_url, auth=self.auth)
assert_equal(res.json['node']['watched_count'], 0)
def test_view_project_returns_whether_to_show_wiki_widget(self):
user = AuthUserFactory()
project = ProjectFactory.build(creator=user, is_public=True)
project.add_contributor(user)
project.save()
url = project.api_url_for('view_project')
res = self.app.get(url, auth=user.auth)
assert_equal(res.status_code, http.OK)
assert_in('show_wiki_widget', res.json['user'])
def test_fork_count_does_not_include_deleted_forks(self):
user = AuthUserFactory()
project = ProjectFactory(creator=user)
auth = Auth(project.creator)
fork = project.fork_node(auth)
project.save()
fork.remove_node(auth)
fork.save()
url = project.api_url_for('view_project')
res = self.app.get(url, auth=user.auth)
assert_in('fork_count', res.json['node'])
assert_equal(0, res.json['node']['fork_count'])
class TestEditableChildrenViews(OsfTestCase):
def setUp(self):
OsfTestCase.setUp(self)
self.user = AuthUserFactory()
self.project = ProjectFactory(creator=self.user, is_public=False)
self.child = ProjectFactory(parent=self.project, creator=self.user, is_public=True)
self.grandchild = ProjectFactory(parent=self.child, creator=self.user, is_public=False)
self.great_grandchild = ProjectFactory(parent=self.grandchild, creator=self.user, is_public=True)
self.great_great_grandchild = ProjectFactory(parent=self.great_grandchild, creator=self.user, is_public=False)
url = self.project.api_url_for('get_editable_children')
self.project_results = self.app.get(url, auth=self.user.auth).json
def test_get_editable_children(self):
assert_equal(len(self.project_results['children']), 4)
assert_equal(self.project_results['node']['id'], self.project._id)
def test_editable_children_order(self):
assert_equal(self.project_results['children'][0]['id'], self.child._id)
assert_equal(self.project_results['children'][1]['id'], self.grandchild._id)
assert_equal(self.project_results['children'][2]['id'], self.great_grandchild._id)
assert_equal(self.project_results['children'][3]['id'], self.great_great_grandchild._id)
def test_editable_children_indents(self):
assert_equal(self.project_results['children'][0]['indent'], 0)
assert_equal(self.project_results['children'][1]['indent'], 1)
assert_equal(self.project_results['children'][2]['indent'], 2)
assert_equal(self.project_results['children'][3]['indent'], 3)
def test_editable_children_parents(self):
assert_equal(self.project_results['children'][0]['parent_id'], self.project._id)
assert_equal(self.project_results['children'][1]['parent_id'], self.child._id)
assert_equal(self.project_results['children'][2]['parent_id'], self.grandchild._id)
assert_equal(self.project_results['children'][3]['parent_id'], self.great_grandchild._id)
def test_editable_children_privacy(self):
assert_false(self.project_results['node']['is_public'])
assert_true(self.project_results['children'][0]['is_public'])
assert_false(self.project_results['children'][1]['is_public'])
assert_true(self.project_results['children'][2]['is_public'])
assert_false(self.project_results['children'][3]['is_public'])
def test_editable_children_titles(self):
assert_equal(self.project_results['node']['title'], self.project.title)
assert_equal(self.project_results['children'][0]['title'], self.child.title)
assert_equal(self.project_results['children'][1]['title'], self.grandchild.title)
assert_equal(self.project_results['children'][2]['title'], self.great_grandchild.title)
assert_equal(self.project_results['children'][3]['title'], self.great_great_grandchild.title)
class TestChildrenViews(OsfTestCase):
def setUp(self):
OsfTestCase.setUp(self)
self.user = AuthUserFactory()
def test_get_children(self):
project = ProjectFactory(creator=self.user)
child = NodeFactory(parent=project, creator=self.user)
url = project.api_url_for('get_children')
res = self.app.get(url, auth=self.user.auth)
nodes = res.json['nodes']
assert_equal(len(nodes), 1)
assert_equal(nodes[0]['id'], child._primary_key)
def test_get_children_includes_pointers(self):
project = ProjectFactory(creator=self.user)
pointed = ProjectFactory()
project.add_pointer(pointed, Auth(self.user))
project.save()
url = project.api_url_for('get_children')
res = self.app.get(url, auth=self.user.auth)
nodes = res.json['nodes']
assert_equal(len(nodes), 1)
assert_equal(nodes[0]['title'], pointed.title)
pointer = Pointer.find_one(Q('node', 'eq', pointed))
assert_equal(nodes[0]['id'], pointer._primary_key)
def test_get_children_filter_for_permissions(self):
# self.user has admin access to this project
project = ProjectFactory(creator=self.user)
# self.user only has read access to this project, which project points
# to
read_only_pointed = ProjectFactory()
read_only_creator = read_only_pointed.creator
read_only_pointed.add_contributor(self.user, auth=Auth(read_only_creator), permissions=['read'])
read_only_pointed.save()
# self.user only has read access to this project, which is a subproject
# of project
read_only = ProjectFactory()
read_only_pointed.add_contributor(self.user, auth=Auth(read_only_creator), permissions=['read'])
project.nodes.append(read_only)
# self.user adds a pointer to read_only
project.add_pointer(read_only_pointed, Auth(self.user))
project.save()
url = project.api_url_for('get_children')
res = self.app.get(url, auth=self.user.auth)
assert_equal(len(res.json['nodes']), 2)
url = project.api_url_for('get_children', permissions='write')
res = self.app.get(url, auth=self.user.auth)
assert_equal(len(res.json['nodes']), 0)
def test_get_children_rescale_ratio(self):
project = ProjectFactory(creator=self.user)
child = NodeFactory(parent=project, creator=self.user)
url = project.api_url_for('get_children')
res = self.app.get(url, auth=self.user.auth)
rescale_ratio = res.json['rescale_ratio']
assert_is_instance(rescale_ratio, float)
assert_equal(rescale_ratio, _rescale_ratio(Auth(self.user), [child]))
def test_get_children_render_nodes_receives_auth(self):
project = ProjectFactory(creator=self.user)
NodeFactory(parent=project, creator=self.user)
url = project.api_url_for('get_children')
res = self.app.get(url, auth=self.user.auth)
perm = res.json['nodes'][0]['permissions']
assert_equal(perm, 'admin')
class TestUserProfile(OsfTestCase):
def setUp(self):
super(TestUserProfile, self).setUp()
self.user = AuthUserFactory()
def test_sanitization_of_edit_profile(self):
url = api_url_for('edit_profile', uid=self.user._id)
post_data = {'name': 'fullname', 'value': 'new<b> name</b> '}
request = self.app.post(url, post_data, auth=self.user.auth)
assert_equal('new name', request.json['name'])
def test_fmt_date_or_none(self):
with assert_raises(HTTPError) as cm:
#enter a date before 1900
fmt_date_or_none(dt.datetime(1890, 10, 31, 18, 23, 29, 227))
# error should be raised because date is before 1900
assert_equal(cm.exception.code, http.BAD_REQUEST)
def test_unserialize_social(self):
url = api_url_for('unserialize_social')
payload = {
'personal': 'http://frozen.pizza.com/reviews',
'twitter': 'howtopizza',
'github': 'frozenpizzacode',
}
self.app.put_json(
url,
payload,
auth=self.user.auth,
)
self.user.reload()
for key, value in payload.iteritems():
assert_equal(self.user.social[key], value)
assert_true(self.user.social['researcherId'] is None)
def test_unserialize_social_validation_failure(self):
url = api_url_for('unserialize_social')
# personal URL is invalid
payload = {
'personal': 'http://invalidurl',
'twitter': 'howtopizza',
'github': 'frozenpizzacode',
}
res = self.app.put_json(
url,
payload,
auth=self.user.auth,
expect_errors=True
)
assert_equal(res.status_code, 400)
assert_equal(res.json['message_long'], 'Invalid personal URL.')
def test_serialize_social_editable(self):
self.user.social['twitter'] = 'howtopizza'
self.user.save()
url = api_url_for('serialize_social')
res = self.app.get(
url,
auth=self.user.auth,
)
assert_equal(res.json.get('twitter'), 'howtopizza')
assert_true(res.json.get('github') is None)
assert_true(res.json['editable'])
def test_serialize_social_not_editable(self):
user2 = AuthUserFactory()
self.user.social['twitter'] = 'howtopizza'
self.user.save()
url = api_url_for('serialize_social', uid=self.user._id)
res = self.app.get(
url,
auth=user2.auth,
)
assert_equal(res.json.get('twitter'), 'howtopizza')
assert_true(res.json.get('github') is None)
assert_false(res.json['editable'])
def test_serialize_social_addons_editable(self):
self.user.add_addon('github')
user_github = self.user.get_addon('github')
oauth_settings = AddonGitHubOauthSettings()
oauth_settings.github_user_id = 'testuser'
oauth_settings.save()
user_github.oauth_settings = oauth_settings
user_github.save()
user_github.github_user_name = 'howtogithub'
oauth_settings.save()
url = api_url_for('serialize_social')
res = self.app.get(
url,
auth=self.user.auth,
)
assert_equal(
res.json['addons']['github'],
'howtogithub'
)
def test_serialize_social_addons_not_editable(self):
user2 = AuthUserFactory()
self.user.add_addon('github')
user_github = self.user.get_addon('github')
oauth_settings = AddonGitHubOauthSettings()
oauth_settings.github_user_id = 'testuser'
oauth_settings.save()
user_github.oauth_settings = oauth_settings
user_github.save()
user_github.github_user_name = 'howtogithub'
oauth_settings.save()
url = api_url_for('serialize_social', uid=self.user._id)
res = self.app.get(
url,
auth=user2.auth,
)
assert_not_in('addons', res.json)
def test_unserialize_and_serialize_jobs(self):
jobs = [{
'institution': 'an institution',
'department': 'a department',
'title': 'a title',
'startMonth': 'January',
'startYear': '2001',
'endMonth': 'March',
'endYear': '2001',
'ongoing': False,
}, {
'institution': 'another institution',
'department': None,
'title': None,
'startMonth': 'May',
'startYear': '2001',
'endMonth': None,
'endYear': None,
'ongoing': True,
}]
payload = {'contents': jobs}
url = api_url_for('unserialize_jobs')
self.app.put_json(url, payload, auth=self.user.auth)
self.user.reload()
assert_equal(len(self.user.jobs), 2)
url = api_url_for('serialize_jobs')
res = self.app.get(
url,
auth=self.user.auth,
)
for i, job in enumerate(jobs):
assert_equal(job, res.json['contents'][i])
def test_unserialize_and_serialize_schools(self):
schools = [{
'institution': 'an institution',
'department': 'a department',
'degree': 'a degree',
'startMonth': 1,
'startYear': '2001',
'endMonth': 5,
'endYear': '2001',
'ongoing': False,
}, {
'institution': 'another institution',
'department': None,
'degree': None,
'startMonth': 5,
'startYear': '2001',
'endMonth': None,
'endYear': None,
'ongoing': True,
}]
payload = {'contents': schools}
url = api_url_for('unserialize_schools')
self.app.put_json(url, payload, auth=self.user.auth)
self.user.reload()
assert_equal(len(self.user.schools), 2)
url = api_url_for('serialize_schools')
res = self.app.get(
url,
auth=self.user.auth,
)
for i, job in enumerate(schools):
assert_equal(job, res.json['contents'][i])
def test_unserialize_jobs(self):
jobs = [
{
'institution': fake.company(),
'department': fake.catch_phrase(),
'title': fake.bs(),
'startMonth': 5,
'startYear': '2013',
'endMonth': 3,
'endYear': '2014',
'ongoing': False,
}
]
payload = {'contents': jobs}
url = api_url_for('unserialize_jobs')
res = self.app.put_json(url, payload, auth=self.user.auth)
assert_equal(res.status_code, 200)
self.user.reload()
# jobs field is updated
assert_equal(self.user.jobs, jobs)
def test_unserialize_names(self):
fake_fullname_w_spaces = ' {} '.format(fake.name())
names = {
'full': fake_fullname_w_spaces,
'given': 'Tea',
'middle': 'Gray',
'family': 'Pot',
'suffix': 'Ms.',
}
url = api_url_for('unserialize_names')
res = self.app.put_json(url, names, auth=self.user.auth)
assert_equal(res.status_code, 200)
self.user.reload()
# user is updated
assert_equal(self.user.fullname, fake_fullname_w_spaces.strip())
assert_equal(self.user.given_name, names['given'])
assert_equal(self.user.middle_names, names['middle'])
assert_equal(self.user.family_name, names['family'])
assert_equal(self.user.suffix, names['suffix'])
def test_unserialize_schools(self):
schools = [
{
'institution': fake.company(),
'department': fake.catch_phrase(),
'degree': fake.bs(),
'startMonth': 5,
'startYear': '2013',
'endMonth': 3,
'endYear': '2014',
'ongoing': False,
}
]
payload = {'contents': schools}
url = api_url_for('unserialize_schools')
res = self.app.put_json(url, payload, auth=self.user.auth)
assert_equal(res.status_code, 200)
self.user.reload()
# schools field is updated
assert_equal(self.user.schools, schools)
def test_unserialize_jobs_valid(self):
jobs_cached = self.user.jobs
jobs = [
{
'institution': fake.company(),
'department': fake.catch_phrase(),
'title': fake.bs(),
'startMonth': 5,
'startYear': '2013',
'endMonth': 3,
'endYear': '2014',
'ongoing': False,
}
]
payload = {'contents': jobs}
url = api_url_for('unserialize_jobs')
res = self.app.put_json(url, payload, auth=self.user.auth)
assert_equal(res.status_code, 200)
def test_get_current_user_gravatar_default_size(self):
url = api_url_for('current_user_gravatar')
res = self.app.get(url, auth=self.user.auth)
current_user_gravatar = res.json['gravatar_url']
assert_true(current_user_gravatar is not None)
url = api_url_for('get_gravatar', uid=self.user._id)
res = self.app.get(url, auth=self.user.auth)
my_user_gravatar = res.json['gravatar_url']
assert_equal(current_user_gravatar, my_user_gravatar)
def test_get_other_user_gravatar_default_size(self):
user2 = AuthUserFactory()
url = api_url_for('current_user_gravatar')
res = self.app.get(url, auth=self.user.auth)
current_user_gravatar = res.json['gravatar_url']
url = api_url_for('get_gravatar', uid=user2._id)
res = self.app.get(url, auth=self.user.auth)
user2_gravatar = res.json['gravatar_url']
assert_true(user2_gravatar is not None)
assert_not_equal(current_user_gravatar, user2_gravatar)
def test_get_current_user_gravatar_specific_size(self):
url = api_url_for('current_user_gravatar')
res = self.app.get(url, auth=self.user.auth)
current_user_default_gravatar = res.json['gravatar_url']
url = api_url_for('current_user_gravatar', size=11)
res = self.app.get(url, auth=self.user.auth)
current_user_small_gravatar = res.json['gravatar_url']
assert_true(current_user_small_gravatar is not None)
assert_not_equal(current_user_default_gravatar, current_user_small_gravatar)
def test_get_other_user_gravatar_specific_size(self):
user2 = AuthUserFactory()
url = api_url_for('get_gravatar', uid=user2._id)
res = self.app.get(url, auth=self.user.auth)
gravatar_default_size = res.json['gravatar_url']
url = api_url_for('get_gravatar', uid=user2._id, size=11)
res = self.app.get(url, auth=self.user.auth)
gravatar_small = res.json['gravatar_url']
assert_true(gravatar_small is not None)
assert_not_equal(gravatar_default_size, gravatar_small)
def test_update_user_timezone(self):
assert_equal(self.user.timezone, 'Etc/UTC')
payload = {'timezone': 'America/New_York', 'id': self.user._id}
url = api_url_for('update_user', uid=self.user._id)
self.app.put_json(url, payload, auth=self.user.auth)
self.user.reload()
assert_equal(self.user.timezone, 'America/New_York')
def test_update_user_locale(self):
assert_equal(self.user.locale, 'en_US')
payload = {'locale': 'de_DE', 'id': self.user._id}
url = api_url_for('update_user', uid=self.user._id)
self.app.put_json(url, payload, auth=self.user.auth)
self.user.reload()
assert_equal(self.user.locale, 'de_DE')
def test_update_user_locale_none(self):
assert_equal(self.user.locale, 'en_US')
payload = {'locale': None, 'id': self.user._id}
url = api_url_for('update_user', uid=self.user._id)
self.app.put_json(url, payload, auth=self.user.auth)
self.user.reload()
assert_equal(self.user.locale, 'en_US')
def test_update_user_locale_empty_string(self):
assert_equal(self.user.locale, 'en_US')
payload = {'locale': '', 'id': self.user._id}
url = api_url_for('update_user', uid=self.user._id)
self.app.put_json(url, payload, auth=self.user.auth)
self.user.reload()
assert_equal(self.user.locale, 'en_US')
def test_cannot_update_user_without_user_id(self):
user1 = AuthUserFactory()
url = api_url_for('update_user')
header = {'emails': [{'address': user1.username}]}
res = self.app.put_json(url, header, auth=user1.auth, expect_errors=True)
assert_equal(res.status_code, 400)
assert_equal(res.json['message_long'], '"id" is required')
class TestUserAccount(OsfTestCase):
def setUp(self):
super(TestUserAccount, self).setUp()
self.user = AuthUserFactory()
self.user.set_password('password')
self.user.save()
@mock.patch('website.profile.views.push_status_message')
def test_password_change_valid(self, mock_push_status_message):
old_password = 'password'
new_password = 'Pa$$w0rd'
confirm_password = new_password
url = web_url_for('user_account_password')
post_data = {
'old_password': old_password,
'new_password': new_password,
'confirm_password': confirm_password,
}
res = self.app.post(url, post_data, auth=self.user.auth)
assert_true(302, res.status_code)
res = res.follow(auth=self.user.auth)
assert_true(200, res.status_code)
self.user.reload()
assert_true(self.user.check_password(new_password))
assert_true(mock_push_status_message.called)
assert_in('Password updated successfully', mock_push_status_message.mock_calls[0][1][0])
@mock.patch('website.profile.views.push_status_message')
def test_password_change_invalid(self, mock_push_status_message, old_password='', new_password='',
confirm_password='', error_message='Old password is invalid'):
url = web_url_for('user_account_password')
post_data = {
'old_password': old_password,
'new_password': new_password,
'confirm_password': confirm_password,
}
res = self.app.post(url, post_data, auth=self.user.auth)
assert_true(302, res.status_code)
res = res.follow(auth=self.user.auth)
assert_true(200, res.status_code)
self.user.reload()
assert_false(self.user.check_password(new_password))
assert_true(mock_push_status_message.called)
assert_in(error_message, mock_push_status_message.mock_calls[0][1][0])
def test_password_change_invalid_old_password(self):
self.test_password_change_invalid(
old_password='invalid old password',
new_password='new password',
confirm_password='new password',
error_message='Old password is invalid',
)
def test_password_change_invalid_confirm_password(self):
self.test_password_change_invalid(
old_password='password',
new_password='new password',
confirm_password='invalid confirm password',
error_message='Password does not match the confirmation',
)
def test_password_change_invalid_new_password_length(self):
self.test_password_change_invalid(
old_password='password',
new_password='12345',
confirm_password='12345',
error_message='Password should be at least six characters',
)
def test_password_change_invalid_blank_password(self, old_password='', new_password='', confirm_password=''):
self.test_password_change_invalid(
old_password=old_password,
new_password=new_password,
confirm_password=confirm_password,
error_message='Passwords cannot be blank',
)
def test_password_change_invalid_blank_new_password(self):
for password in ('', ' '):
self.test_password_change_invalid_blank_password('password', password, 'new password')
def test_password_change_invalid_blank_confirm_password(self):
for password in ('', ' '):
self.test_password_change_invalid_blank_password('password', 'new password', password)
class TestAddingContributorViews(OsfTestCase):
def setUp(self):
super(TestAddingContributorViews, self).setUp()
ensure_schemas()
self.creator = AuthUserFactory()
self.project = ProjectFactory(creator=self.creator)
# Authenticate all requests
self.app.authenticate(*self.creator.auth)
def test_serialize_unregistered_without_record(self):
name, email = fake.name(), fake.email()
res = serialize_unregistered(fullname=name, email=email)
assert_equal(res['fullname'], name)
assert_equal(res['email'], email)
assert_equal(res['id'], None)
assert_false(res['registered'])
assert_true(res['gravatar'])
assert_false(res['active'])
def test_deserialize_contributors(self):
contrib = UserFactory()
unreg = UnregUserFactory()
name, email = fake.name(), fake.email()
unreg_no_record = serialize_unregistered(name, email)
contrib_data = [
add_contributor_json(contrib),
serialize_unregistered(fake.name(), unreg.username),
unreg_no_record
]
contrib_data[0]['permission'] = 'admin'
contrib_data[1]['permission'] = 'write'
contrib_data[2]['permission'] = 'read'
contrib_data[0]['visible'] = True
contrib_data[1]['visible'] = True
contrib_data[2]['visible'] = True
res = deserialize_contributors(
self.project,
contrib_data,
auth=Auth(self.creator))
assert_equal(len(res), len(contrib_data))
assert_true(res[0]['user'].is_registered)
assert_false(res[1]['user'].is_registered)
assert_true(res[1]['user']._id)
assert_false(res[2]['user'].is_registered)
assert_true(res[2]['user']._id)
def test_deserialize_contributors_sends_unreg_contributor_added_signal(self):
unreg = UnregUserFactory()
from website.project.model import unreg_contributor_added
serialized = [serialize_unregistered(fake.name(), unreg.username)]
serialized[0]['visible'] = True
with capture_signals() as mock_signals:
deserialize_contributors(self.project, serialized,
auth=Auth(self.creator))
assert_equal(mock_signals.signals_sent(), set([unreg_contributor_added]))
def test_serialize_unregistered_with_record(self):
name, email = fake.name(), fake.email()
user = self.project.add_unregistered_contributor(fullname=name,
email=email, auth=Auth(self.project.creator))
self.project.save()
res = serialize_unregistered(
fullname=name,
email=email
)
assert_false(res['active'])
assert_false(res['registered'])
assert_equal(res['id'], user._primary_key)
assert_true(res['gravatar_url'])
assert_equal(res['fullname'], name)
assert_equal(res['email'], email)
def test_add_contributor_with_unreg_contribs_and_reg_contribs(self):
n_contributors_pre = len(self.project.contributors)
reg_user = UserFactory()
name, email = fake.name(), fake.email()
pseudouser = {
'id': None,
'registered': False,
'fullname': name,
'email': email,
'permission': 'admin',
'visible': True,
}
reg_dict = add_contributor_json(reg_user)
reg_dict['permission'] = 'admin'
reg_dict['visible'] = True
payload = {
'users': [reg_dict, pseudouser],
'node_ids': []
}
url = self.project.api_url_for('project_contributors_post')
self.app.post_json(url, payload).maybe_follow()
self.project.reload()
assert_equal(len(self.project.contributors),
n_contributors_pre + len(payload['users']))
new_unreg = auth.get_user(email=email)
assert_false(new_unreg.is_registered)
# unclaimed record was added
new_unreg.reload()
assert_in(self.project._primary_key, new_unreg.unclaimed_records)
rec = new_unreg.get_unclaimed_record(self.project._primary_key)
assert_equal(rec['name'], name)
assert_equal(rec['email'], email)
@mock.patch('website.project.views.contributor.send_claim_email')
def test_add_contributors_post_only_sends_one_email_to_unreg_user(
self, mock_send_claim_email):
# Project has components
comp1, comp2 = NodeFactory(
creator=self.creator), NodeFactory(creator=self.creator)
self.project.nodes.append(comp1)
self.project.nodes.append(comp2)
self.project.save()
# An unreg user is added to the project AND its components
unreg_user = { # dict because user has not previous unreg record
'id': None,
'registered': False,
'fullname': fake.name(),
'email': fake.email(),
'permission': 'admin',
'visible': True,
}
payload = {
'users': [unreg_user],
'node_ids': [comp1._primary_key, comp2._primary_key]
}
# send request
url = self.project.api_url_for('project_contributors_post')
assert self.project.can_edit(user=self.creator)
self.app.post_json(url, payload, auth=self.creator.auth)
# finalize_invitation should only have been called once
assert_equal(mock_send_claim_email.call_count, 1)
@mock.patch('website.project.views.contributor.send_claim_email')
def test_email_sent_when_unreg_user_is_added(self, send_mail):
name, email = fake.name(), fake.email()
pseudouser = {
'id': None,
'registered': False,
'fullname': name,
'email': email,
'permission': 'admin',
'visible': True,
}
payload = {
'users': [pseudouser],
'node_ids': []
}
url = self.project.api_url_for('project_contributors_post')
self.app.post_json(url, payload).maybe_follow()
assert_true(send_mail.called)
assert_true(send_mail.called_with(email=email))
def test_add_multiple_contributors_only_adds_one_log(self):
n_logs_pre = len(self.project.logs)
reg_user = UserFactory()
name = fake.name()
pseudouser = {
'id': None,
'registered': False,
'fullname': name,
'email': fake.email(),
'permission': 'write',
'visible': True,
}
reg_dict = add_contributor_json(reg_user)
reg_dict['permission'] = 'admin'
reg_dict['visible'] = True
payload = {
'users': [reg_dict, pseudouser],
'node_ids': []
}
url = self.project.api_url_for('project_contributors_post')
self.app.post_json(url, payload).maybe_follow()
self.project.reload()
assert_equal(len(self.project.logs), n_logs_pre + 1)
def test_add_contribs_to_multiple_nodes(self):
child = NodeFactory(parent=self.project, creator=self.creator)
n_contributors_pre = len(child.contributors)
reg_user = UserFactory()
name, email = fake.name(), fake.email()
pseudouser = {
'id': None,
'registered': False,
'fullname': name,
'email': email,
'permission': 'admin',
'visible': True,
}
reg_dict = add_contributor_json(reg_user)
reg_dict['permission'] = 'admin'
reg_dict['visible'] = True
payload = {
'users': [reg_dict, pseudouser],
'node_ids': [self.project._primary_key, child._primary_key]
}
url = "/api/v1/project/{0}/contributors/".format(self.project._id)
self.app.post_json(url, payload).maybe_follow()
child.reload()
assert_equal(len(child.contributors),
n_contributors_pre + len(payload['users']))
class TestUserInviteViews(OsfTestCase):
def setUp(self):
super(TestUserInviteViews, self).setUp()
ensure_schemas()
self.user = AuthUserFactory()
self.project = ProjectFactory(creator=self.user)
self.invite_url = '/api/v1/project/{0}/invite_contributor/'.format(
self.project._primary_key)
def test_invite_contributor_post_if_not_in_db(self):
name, email = fake.name(), fake.email()
res = self.app.post_json(
self.invite_url,
{'fullname': name, 'email': email},
auth=self.user.auth,
)
contrib = res.json['contributor']
assert_true(contrib['id'] is None)
assert_equal(contrib['fullname'], name)
assert_equal(contrib['email'], email)
def test_invite_contributor_post_if_unreg_already_in_db(self):
# A n unreg user is added to a different project
name, email = fake.name(), fake.email()
project2 = ProjectFactory()
unreg_user = project2.add_unregistered_contributor(fullname=name, email=email,
auth=Auth(project2.creator))
project2.save()
res = self.app.post_json(self.invite_url,
{'fullname': name, 'email': email}, auth=self.user.auth)
expected = add_contributor_json(unreg_user)
expected['fullname'] = name
expected['email'] = email
assert_equal(res.json['contributor'], expected)
def test_invite_contributor_post_if_emaiL_already_registered(self):
reg_user = UserFactory()
# Tries to invite user that is already regiestered
res = self.app.post_json(self.invite_url,
{'fullname': fake.name(), 'email': reg_user.username},
auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, http.BAD_REQUEST)
def test_invite_contributor_post_if_user_is_already_contributor(self):
unreg_user = self.project.add_unregistered_contributor(
fullname=fake.name(), email=fake.email(),
auth=Auth(self.project.creator)
)
self.project.save()
# Tries to invite unreg user that is already a contributor
res = self.app.post_json(self.invite_url,
{'fullname': fake.name(), 'email': unreg_user.username},
auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, http.BAD_REQUEST)
def test_invite_contributor_with_no_email(self):
name = fake.name()
res = self.app.post_json(self.invite_url,
{'fullname': name, 'email': None}, auth=self.user.auth)
assert_equal(res.status_code, http.OK)
data = res.json
assert_equal(data['status'], 'success')
assert_equal(data['contributor']['fullname'], name)
assert_true(data['contributor']['email'] is None)
assert_false(data['contributor']['registered'])
def test_invite_contributor_requires_fullname(self):
res = self.app.post_json(self.invite_url,
{'email': 'brian@queen.com', 'fullname': ''}, auth=self.user.auth,
expect_errors=True)
assert_equal(res.status_code, http.BAD_REQUEST)
@mock.patch('website.project.views.contributor.mails.send_mail')
def test_send_claim_email_to_given_email(self, send_mail):
project = ProjectFactory()
given_email = fake.email()
unreg_user = project.add_unregistered_contributor(
fullname=fake.name(),
email=given_email,
auth=Auth(project.creator),
)
project.save()
send_claim_email(email=given_email, user=unreg_user, node=project)
assert_true(send_mail.called)
assert_true(send_mail.called_with(
to_addr=given_email,
mail=mails.INVITE
))
@mock.patch('website.project.views.contributor.mails.send_mail')
def test_send_claim_email_to_referrer(self, send_mail):
project = ProjectFactory()
referrer = project.creator
given_email, real_email = fake.email(), fake.email()
unreg_user = project.add_unregistered_contributor(fullname=fake.name(),
email=given_email, auth=Auth(
referrer)
)
project.save()
send_claim_email(email=real_email, user=unreg_user, node=project)
assert_true(send_mail.called)
# email was sent to referrer
assert_true(send_mail.called_with(
to_addr=referrer.username,
mail=mails.FORWARD_INVITE
))
@mock.patch('website.project.views.contributor.mails.send_mail')
def test_send_claim_email_before_throttle_expires(self, send_mail):
project = ProjectFactory()
given_email = fake.email()
unreg_user = project.add_unregistered_contributor(
fullname=fake.name(),
email=given_email,
auth=Auth(project.creator),
)
project.save()
send_claim_email(email=fake.email(), user=unreg_user, node=project)
# 2nd call raises error because throttle hasn't expired
with assert_raises(HTTPError):
send_claim_email(email=fake.email(), user=unreg_user, node=project)
send_mail.assert_not_called()
class TestClaimViews(OsfTestCase):
def setUp(self):
super(TestClaimViews, self).setUp()
self.referrer = AuthUserFactory()
self.project = ProjectFactory(creator=self.referrer, is_public=True)
self.given_name = fake.name()
self.given_email = fake.email()
self.user = self.project.add_unregistered_contributor(
fullname=self.given_name,
email=self.given_email,
auth=Auth(user=self.referrer)
)
self.project.save()
@mock.patch('website.project.views.contributor.mails.send_mail')
def test_claim_user_post_with_registered_user_id(self, send_mail):
# registered user who is attempting to claim the unclaimed contributor
reg_user = UserFactory()
payload = {
# pk of unreg user record
'pk': self.user._primary_key,
'claimerId': reg_user._primary_key
}
url = '/api/v1/user/{uid}/{pid}/claim/email/'.format(
uid=self.user._primary_key,
pid=self.project._primary_key,
)
res = self.app.post_json(url, payload)
# mail was sent
assert_true(send_mail.called)
# ... to the correct address
assert_true(send_mail.called_with(to_addr=self.given_email))
# view returns the correct JSON
assert_equal(res.json, {
'status': 'success',
'email': reg_user.username,
'fullname': self.given_name,
})
@mock.patch('website.project.views.contributor.mails.send_mail')
def test_send_claim_registered_email(self, mock_send_mail):
reg_user = UserFactory()
send_claim_registered_email(
claimer=reg_user,
unreg_user=self.user,
node=self.project
)
mock_send_mail.assert_called()
assert_equal(mock_send_mail.call_count, 2)
first_call_args = mock_send_mail.call_args_list[0][0]
assert_equal(first_call_args[0], self.referrer.username)
second_call_args = mock_send_mail.call_args_list[1][0]
assert_equal(second_call_args[0], reg_user.username)
@mock.patch('website.project.views.contributor.mails.send_mail')
def test_send_claim_registered_email_before_throttle_expires(self, mock_send_mail):
reg_user = UserFactory()
send_claim_registered_email(
claimer=reg_user,
unreg_user=self.user,
node=self.project,
)
# second call raises error because it was called before throttle period
with assert_raises(HTTPError):
send_claim_registered_email(
claimer=reg_user,
unreg_user=self.user,
node=self.project,
)
mock_send_mail.assert_not_called()
@mock.patch('website.project.views.contributor.send_claim_registered_email')
def test_claim_user_post_with_email_already_registered_sends_correct_email(
self, send_claim_registered_email):
reg_user = UserFactory()
payload = {
'value': reg_user.username,
'pk': self.user._primary_key
}
url = self.project.api_url_for('claim_user_post', uid=self.user._id)
self.app.post_json(url, payload)
assert_true(send_claim_registered_email.called)
def test_user_with_removed_unclaimed_url_claiming(self):
""" Tests that when an unclaimed user is removed from a project, the
unregistered user object does not retain the token.
"""
self.project.remove_contributor(self.user, Auth(user=self.referrer))
assert_not_in(
self.project._primary_key,
self.user.unclaimed_records.keys()
)
def test_user_with_claim_url_cannot_claim_twice(self):
""" Tests that when an unclaimed user is replaced on a project with a
claimed user, the unregistered user object does not retain the token.
"""
reg_user = AuthUserFactory()
self.project.replace_contributor(self.user, reg_user)
assert_not_in(
self.project._primary_key,
self.user.unclaimed_records.keys()
)
def test_claim_user_form_redirects_to_password_confirm_page_if_user_is_logged_in(self):
reg_user = AuthUserFactory()
url = self.user.get_claim_url(self.project._primary_key)
res = self.app.get(url, auth=reg_user.auth)
assert_equal(res.status_code, 302)
res = res.follow(auth=reg_user.auth)
token = self.user.get_unclaimed_record(self.project._primary_key)['token']
expected = self.project.web_url_for(
'claim_user_registered',
uid=self.user._id,
token=token,
)
assert_equal(res.request.path, expected)
def test_get_valid_form(self):
url = self.user.get_claim_url(self.project._primary_key)
res = self.app.get(url).maybe_follow()
assert_equal(res.status_code, 200)
def test_invalid_claim_form_redirects_to_register_page(self):
uid = self.user._primary_key
pid = self.project._primary_key
url = '/user/{uid}/{pid}/claim/?token=badtoken'.format(**locals())
res = self.app.get(url, expect_errors=True).maybe_follow()
assert_equal(res.status_code, 200)
assert_equal(res.request.path, web_url_for('auth_login'))
def test_posting_to_claim_form_with_valid_data(self):
url = self.user.get_claim_url(self.project._primary_key)
res = self.app.post(url, {
'username': self.user.username,
'password': 'killerqueen',
'password2': 'killerqueen'
}).maybe_follow()
assert_equal(res.status_code, 200)
self.user.reload()
assert_true(self.user.is_registered)
assert_true(self.user.is_active)
assert_not_in(self.project._primary_key, self.user.unclaimed_records)
def test_posting_to_claim_form_removes_all_unclaimed_data(self):
# user has multiple unclaimed records
p2 = ProjectFactory(creator=self.referrer)
self.user.add_unclaimed_record(node=p2, referrer=self.referrer,
given_name=fake.name())
self.user.save()
assert_true(len(self.user.unclaimed_records.keys()) > 1) # sanity check
url = self.user.get_claim_url(self.project._primary_key)
self.app.post(url, {
'username': self.given_email,
'password': 'bohemianrhap',
'password2': 'bohemianrhap'
})
self.user.reload()
assert_equal(self.user.unclaimed_records, {})
def test_posting_to_claim_form_sets_fullname_to_given_name(self):
# User is created with a full name
original_name = fake.name()
unreg = UnregUserFactory(fullname=original_name)
# User invited with a different name
different_name = fake.name()
new_user = self.project.add_unregistered_contributor(
email=unreg.username,
fullname=different_name,
auth=Auth(self.project.creator),
)
self.project.save()
# Goes to claim url
claim_url = new_user.get_claim_url(self.project._id)
self.app.post(claim_url, {
'username': unreg.username,
'password': 'killerqueen', 'password2': 'killerqueen'
})
unreg.reload()
# Full name was set correctly
assert_equal(unreg.fullname, different_name)
# CSL names were set correctly
parsed_name = impute_names_model(different_name)
assert_equal(unreg.given_name, parsed_name['given_name'])
assert_equal(unreg.family_name, parsed_name['family_name'])
@mock.patch('website.project.views.contributor.mails.send_mail')
def test_claim_user_post_returns_fullname(self, send_mail):
url = '/api/v1/user/{0}/{1}/claim/email/'.format(self.user._primary_key,
self.project._primary_key)
res = self.app.post_json(url,
{'value': self.given_email,
'pk': self.user._primary_key},
auth=self.referrer.auth)
assert_equal(res.json['fullname'], self.given_name)
assert_true(send_mail.called)
assert_true(send_mail.called_with(to_addr=self.given_email))
@mock.patch('website.project.views.contributor.mails.send_mail')
def test_claim_user_post_if_email_is_different_from_given_email(self, send_mail):
email = fake.email() # email that is different from the one the referrer gave
url = '/api/v1/user/{0}/{1}/claim/email/'.format(self.user._primary_key,
self.project._primary_key)
self.app.post_json(url,
{'value': email, 'pk': self.user._primary_key}
)
assert_true(send_mail.called)
assert_equal(send_mail.call_count, 2)
call_to_invited = send_mail.mock_calls[0]
assert_true(call_to_invited.called_with(
to_addr=email
))
call_to_referrer = send_mail.mock_calls[1]
assert_true(call_to_referrer.called_with(
to_addr=self.given_email
))
def test_claim_url_with_bad_token_returns_400(self):
url = self.project.web_url_for(
'claim_user_registered',
uid=self.user._id,
token='badtoken',
)
res = self.app.get(url, auth=self.referrer.auth, expect_errors=400)
assert_equal(res.status_code, 400)
def test_cannot_claim_user_with_user_who_is_already_contributor(self):
# user who is already a contirbutor to the project
contrib = AuthUserFactory.build()
contrib.set_password('underpressure')
contrib.save()
self.project.add_contributor(contrib, auth=Auth(self.project.creator))
self.project.save()
# Claiming user goes to claim url, but contrib is already logged in
url = self.user.get_claim_url(self.project._primary_key)
res = self.app.get(
url,
auth=contrib.auth,
).follow(
auth=contrib.auth,
expect_errors=True,
)
# Response is a 400
assert_equal(res.status_code, 400)
class TestWatchViews(OsfTestCase):
def setUp(self):
super(TestWatchViews, self).setUp()
self.user = UserFactory.build()
api_key = ApiKeyFactory()
self.user.api_keys.append(api_key)
self.user.save()
self.consolidate_auth = Auth(user=self.user, api_key=api_key)
self.auth = ('test', self.user.api_keys[0]._id) # used for requests auth
# A public project
self.project = ProjectFactory(is_public=True)
self.project.save()
# Manually reset log date to 100 days ago so it won't show up in feed
self.project.logs[0].date = dt.datetime.utcnow() - dt.timedelta(days=100)
self.project.logs[0].save()
# A log added now
self.last_log = self.project.add_log(
NodeLog.TAG_ADDED,
params={'node': self.project._primary_key},
auth=self.consolidate_auth,
log_date=dt.datetime.utcnow(),
save=True,
)
# Clear watched list
self.user.watched = []
self.user.save()
def test_watching_a_project_appends_to_users_watched_list(self):
n_watched_then = len(self.user.watched)
url = '/api/v1/project/{0}/watch/'.format(self.project._id)
res = self.app.post_json(url,
params={"digest": True},
auth=self.auth)
assert_equal(res.json['watchCount'], 1)
self.user.reload()
n_watched_now = len(self.user.watched)
assert_equal(res.status_code, 200)
assert_equal(n_watched_now, n_watched_then + 1)
assert_true(self.user.watched[-1].digest)
def test_watching_project_twice_returns_400(self):
url = "/api/v1/project/{0}/watch/".format(self.project._id)
res = self.app.post_json(url,
params={},
auth=self.auth)
assert_equal(res.status_code, 200)
# User tries to watch a node she's already watching
res2 = self.app.post_json(url,
params={},
auth=self.auth,
expect_errors=True)
assert_equal(res2.status_code, http.BAD_REQUEST)
def test_unwatching_a_project_removes_from_watched_list(self):
# The user has already watched a project
watch_config = WatchConfigFactory(node=self.project)
self.user.watch(watch_config)
self.user.save()
n_watched_then = len(self.user.watched)
url = '/api/v1/project/{0}/unwatch/'.format(self.project._id)
res = self.app.post_json(url, {}, auth=self.auth)
self.user.reload()
n_watched_now = len(self.user.watched)
assert_equal(res.status_code, 200)
assert_equal(n_watched_now, n_watched_then - 1)
assert_false(self.user.is_watching(self.project))
def test_toggle_watch(self):
# The user is not watching project
assert_false(self.user.is_watching(self.project))
url = "/api/v1/project/{0}/togglewatch/".format(self.project._id)
res = self.app.post_json(url, {}, auth=self.auth)
# The response json has a watchcount and watched property
assert_equal(res.json['watchCount'], 1)
assert_true(res.json['watched'])
assert_equal(res.status_code, 200)
self.user.reload()
# The user is now watching the project
assert_true(res.json['watched'])
assert_true(self.user.is_watching(self.project))
def test_toggle_watch_node(self):
# The project has a public sub-node
node = NodeFactory(creator=self.user, parent=self.project, is_public=True)
url = "/api/v1/project/{}/node/{}/togglewatch/".format(self.project._id,
node._id)
res = self.app.post_json(url, {}, auth=self.auth)
assert_equal(res.status_code, 200)
self.user.reload()
# The user is now watching the sub-node
assert_true(res.json['watched'])
assert_true(self.user.is_watching(node))
def test_get_watched_logs(self):
project = ProjectFactory()
# Add some logs
for _ in range(12):
project.logs.append(NodeLogFactory(user=self.user, action="file_added"))
project.save()
watch_cfg = WatchConfigFactory(node=project)
self.user.watch(watch_cfg)
self.user.save()
url = "/api/v1/watched/logs/"
res = self.app.get(url, auth=self.auth)
assert_equal(len(res.json['logs']), 10)
assert_equal(res.json['logs'][0]['action'], 'file_added')
def test_get_watched_logs(self):
project = ProjectFactory()
# Add some logs
for _ in range(12):
project.logs.append(NodeLogFactory(user=self.user, action="file_added"))
project.save()
watch_cfg = WatchConfigFactory(node=project)
self.user.watch(watch_cfg)
self.user.save()
url = api_url_for("watched_logs_get")
res = self.app.get(url, auth=self.auth)
assert_equal(len(res.json['logs']), 10)
# 1 project create log then 12 generated logs
assert_equal(res.json['total'], 12 + 1)
assert_equal(res.json['page'], 0)
assert_equal(res.json['pages'], 2)
assert_equal(res.json['logs'][0]['action'], 'file_added')
def test_get_more_watched_logs(self):
project = ProjectFactory()
# Add some logs
for _ in range(12):
project.logs.append(NodeLogFactory(user=self.user, action="file_added"))
project.save()
watch_cfg = WatchConfigFactory(node=project)
self.user.watch(watch_cfg)
self.user.save()
url = api_url_for("watched_logs_get")
page = 1
res = self.app.get(url, {'page': page}, auth=self.auth)
assert_equal(len(res.json['logs']), 3)
# 1 project create log then 12 generated logs
assert_equal(res.json['total'], 12 + 1)
assert_equal(res.json['page'], page)
assert_equal(res.json['pages'], 2)
assert_equal(res.json['logs'][0]['action'], 'file_added')
def test_get_more_watched_logs_invalid_page(self):
project = ProjectFactory()
watch_cfg = WatchConfigFactory(node=project)
self.user.watch(watch_cfg)
self.user.save()
url = api_url_for("watched_logs_get")
invalid_page = 'invalid page'
res = self.app.get(
url, {'page': invalid_page}, auth=self.auth, expect_errors=True
)
assert_equal(res.status_code, 400)
assert_equal(
res.json['message_long'],
'Invalid value for "page".'
)
def test_get_more_watched_logs_invalid_size(self):
project = ProjectFactory()
watch_cfg = WatchConfigFactory(node=project)
self.user.watch(watch_cfg)
self.user.save()
url = api_url_for("watched_logs_get")
invalid_size = 'invalid size'
res = self.app.get(
url, {'size': invalid_size}, auth=self.auth, expect_errors=True
)
assert_equal(res.status_code, 400)
assert_equal(
res.json['message_long'],
'Invalid value for "size".'
)
class TestPointerViews(OsfTestCase):
def setUp(self):
super(TestPointerViews, self).setUp()
self.user = AuthUserFactory()
self.consolidate_auth = Auth(user=self.user)
self.project = ProjectFactory(creator=self.user)
# https://github.com/CenterForOpenScience/openscienceframework.org/issues/1109
def test_get_pointed_excludes_folders(self):
pointer_project = ProjectFactory(is_public=True) # project that points to another project
pointed_project = ProjectFactory(creator=self.user) # project that other project points to
pointer_project.add_pointer(pointed_project, Auth(pointer_project.creator), save=True)
# Project is in a dashboard folder
folder = FolderFactory(creator=pointed_project.creator)
folder.add_pointer(pointed_project, Auth(pointed_project.creator), save=True)
url = pointed_project.api_url_for('get_pointed')
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.status_code, 200)
# pointer_project's id is included in response, but folder's id is not
pointer_ids = [each['id'] for each in res.json['pointed']]
assert_in(pointer_project._id, pointer_ids)
assert_not_in(folder._id, pointer_ids)
def test_add_pointers(self):
url = self.project.api_url + 'pointer/'
node_ids = [
NodeFactory()._id
for _ in range(5)
]
self.app.post_json(
url,
{'nodeIds': node_ids},
auth=self.user.auth,
).maybe_follow()
self.project.reload()
assert_equal(
len(self.project.nodes),
5
)
def test_add_the_same_pointer_more_than_once(self):
url = self.project.api_url + 'pointer/'
double_node = NodeFactory()
self.app.post_json(
url,
{'nodeIds': [double_node._id]},
auth=self.user.auth,
)
res = self.app.post_json(
url,
{'nodeIds': [double_node._id]},
auth=self.user.auth,
expect_errors=True
)
assert_equal(res.status_code, 400)
def test_add_pointers_no_user_logg_in(self):
url = self.project.api_url_for('add_pointers')
node_ids = [
NodeFactory()._id
for _ in range(5)
]
res = self.app.post_json(
url,
{'nodeIds': node_ids},
auth=None,
expect_errors=True
)
assert_equal(res.status_code, 401)
def test_add_pointers_public_non_contributor(self):
project2 = ProjectFactory()
project2.set_privacy('public')
project2.save()
url = self.project.api_url_for('add_pointers')
self.app.post_json(
url,
{'nodeIds': [project2._id]},
auth=self.user.auth,
).maybe_follow()
self.project.reload()
assert_equal(
len(self.project.nodes),
1
)
def test_add_pointers_contributor(self):
user2 = AuthUserFactory()
self.project.add_contributor(user2)
self.project.save()
url = self.project.api_url_for('add_pointers')
node_ids = [
NodeFactory()._id
for _ in range(5)
]
self.app.post_json(
url,
{'nodeIds': node_ids},
auth=user2.auth,
).maybe_follow()
self.project.reload()
assert_equal(
len(self.project.nodes),
5
)
def test_add_pointers_not_provided(self):
url = self.project.api_url + 'pointer/'
res = self.app.post_json(url, {}, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
def test_move_pointers(self):
project_two = ProjectFactory(creator=self.user)
url = api_url_for('move_pointers')
node = NodeFactory()
pointer = self.project.add_pointer(node, auth=self.consolidate_auth)
assert_equal(len(self.project.nodes), 1)
assert_equal(len(project_two.nodes), 0)
user_auth = self.user.auth
move_request = \
{
'fromNodeId': self.project._id,
'toNodeId': project_two._id,
'pointerIds': [pointer.node._id],
}
self.app.post_json(
url,
move_request,
auth=user_auth,
).maybe_follow()
self.project.reload()
project_two.reload()
assert_equal(len(self.project.nodes), 0)
assert_equal(len(project_two.nodes), 1)
def test_remove_pointer(self):
url = self.project.api_url + 'pointer/'
node = NodeFactory()
pointer = self.project.add_pointer(node, auth=self.consolidate_auth)
self.app.delete_json(
url,
{'pointerId': pointer._id},
auth=self.user.auth,
)
self.project.reload()
assert_equal(
len(self.project.nodes),
0
)
def test_remove_pointer_not_provided(self):
url = self.project.api_url + 'pointer/'
res = self.app.delete_json(url, {}, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
def test_remove_pointer_not_found(self):
url = self.project.api_url + 'pointer/'
res = self.app.delete_json(
url,
{'pointerId': None},
auth=self.user.auth,
expect_errors=True
)
assert_equal(res.status_code, 400)
def test_remove_pointer_not_in_nodes(self):
url = self.project.api_url + 'pointer/'
node = NodeFactory()
pointer = Pointer(node=node)
res = self.app.delete_json(
url,
{'pointerId': pointer._id},
auth=self.user.auth,
expect_errors=True
)
assert_equal(res.status_code, 400)
def test_fork_pointer(self):
url = self.project.api_url + 'pointer/fork/'
node = NodeFactory(creator=self.user)
pointer = self.project.add_pointer(node, auth=self.consolidate_auth)
self.app.post_json(
url,
{'pointerId': pointer._id},
auth=self.user.auth
)
def test_fork_pointer_not_provided(self):
url = self.project.api_url + 'pointer/fork/'
res = self.app.post_json(url, {}, auth=self.user.auth,
expect_errors=True)
assert_equal(res.status_code, 400)
def test_fork_pointer_not_found(self):
url = self.project.api_url + 'pointer/fork/'
res = self.app.post_json(
url,
{'pointerId': None},
auth=self.user.auth,
expect_errors=True
)
assert_equal(res.status_code, 400)
def test_fork_pointer_not_in_nodes(self):
url = self.project.api_url + 'pointer/fork/'
node = NodeFactory()
pointer = Pointer(node=node)
res = self.app.post_json(
url,
{'pointerId': pointer._id},
auth=self.user.auth,
expect_errors=True
)
assert_equal(res.status_code, 400)
def test_before_register_with_pointer(self):
"Assert that link warning appears in before register callback."
node = NodeFactory()
self.project.add_pointer(node, auth=self.consolidate_auth)
url = self.project.api_url + 'fork/before/'
res = self.app.get(url, auth=self.user.auth).maybe_follow()
prompts = [
prompt
for prompt in res.json['prompts']
if 'Links will be copied into your fork' in prompt
]
assert_equal(len(prompts), 1)
def test_before_fork_with_pointer(self):
"Assert that link warning appears in before fork callback."
node = NodeFactory()
self.project.add_pointer(node, auth=self.consolidate_auth)
url = self.project.api_url + 'beforeregister/'
res = self.app.get(url, auth=self.user.auth).maybe_follow()
prompts = [
prompt
for prompt in res.json['prompts']
if 'Links will be copied into your registration' in prompt
]
assert_equal(len(prompts), 1)
def test_before_register_no_pointer(self):
"Assert that link warning does not appear in before register callback."
url = self.project.api_url + 'fork/before/'
res = self.app.get(url, auth=self.user.auth).maybe_follow()
prompts = [
prompt
for prompt in res.json['prompts']
if 'Links will be copied into your fork' in prompt
]
assert_equal(len(prompts), 0)
def test_before_fork_no_pointer(self):
"""Assert that link warning does not appear in before fork callback.
"""
url = self.project.api_url + 'beforeregister/'
res = self.app.get(url, auth=self.user.auth).maybe_follow()
prompts = [
prompt
for prompt in res.json['prompts']
if 'Links will be copied into your registration' in prompt
]
assert_equal(len(prompts), 0)
def test_get_pointed(self):
pointing_node = ProjectFactory(creator=self.user)
pointing_node.add_pointer(self.project, auth=Auth(self.user))
url = self.project.api_url_for('get_pointed')
res = self.app.get(url, auth=self.user.auth)
pointed = res.json['pointed']
assert_equal(len(pointed), 1)
assert_equal(pointed[0]['url'], pointing_node.url)
assert_equal(pointed[0]['title'], pointing_node.title)
assert_equal(pointed[0]['authorShort'], abbrev_authors(pointing_node))
def test_get_pointed_private(self):
secret_user = UserFactory()
pointing_node = ProjectFactory(creator=secret_user)
pointing_node.add_pointer(self.project, auth=Auth(secret_user))
url = self.project.api_url_for('get_pointed')
res = self.app.get(url, auth=self.user.auth)
pointed = res.json['pointed']
assert_equal(len(pointed), 1)
assert_equal(pointed[0]['url'], None)
assert_equal(pointed[0]['title'], 'Private Component')
assert_equal(pointed[0]['authorShort'], 'Private Author(s)')
class TestPublicViews(OsfTestCase):
def test_explore(self):
res = self.app.get("/explore/").maybe_follow()
assert_equal(res.status_code, 200)
def test_forgot_password_get(self):
res = self.app.get(web_url_for('forgot_password_get'))
assert_equal(res.status_code, 200)
assert_in('Forgot Password', res.body)
class TestAuthViews(OsfTestCase):
def setUp(self):
super(TestAuthViews, self).setUp()
self.user = AuthUserFactory()
self.auth = self.user.auth
def test_merge_user(self):
dupe = UserFactory(
username="copy@cat.com",
emails=['copy@cat.com']
)
dupe.set_password("copycat")
dupe.save()
url = "/api/v1/user/merge/"
self.app.post_json(
url,
{
"merged_username": "copy@cat.com",
"merged_password": "copycat"
},
auth=self.auth,
)
self.user.reload()
dupe.reload()
assert_true(dupe.is_merged)
@mock.patch('framework.auth.views.mails.send_mail')
def test_register_sends_confirm_email(self, send_mail):
url = '/register/'
self.app.post(url, {
'register-fullname': 'Freddie Mercury',
'register-username': 'fred@queen.com',
'register-password': 'killerqueen',
'register-username2': 'fred@queen.com',
'register-password2': 'killerqueen',
})
assert_true(send_mail.called)
assert_true(send_mail.called_with(
to_addr='fred@queen.com'
))
def test_register_ok(self):
url = api_url_for('register_user')
name, email, password = fake.name(), fake.email(), 'underpressure'
self.app.post_json(
url,
{
'fullName': name,
'email1': email,
'email2': email,
'password': password,
}
)
user = User.find_one(Q('username', 'eq', email))
assert_equal(user.fullname, name)
def test_register_scrubs_username(self):
"""Usernames are scrubbed of malicious HTML when registering"""
url = api_url_for('register_user')
name = "<i>Eunice</i> O' \"Cornwallis\"<script type='text/javascript' src='http://www.cornify.com/js/cornify.js'></script><script type='text/javascript'>cornify_add()</script>"
email, password = fake.email(), 'underpressure'
res = self.app.post_json(
url,
{
'fullName': name,
'email1': email,
'email2': email,
'password': password,
}
)
expected_scrub_username = "Eunice O' \"Cornwallis\"cornify_add()"
user = User.find_one(Q('username', 'eq', email))
assert_equal(res.status_code, http.OK)
assert_equal(user.fullname, expected_scrub_username)
def test_register_email_mismatch(self):
url = api_url_for('register_user')
name, email, password = fake.name(), fake.email(), 'underpressure'
res = self.app.post_json(
url,
{
'fullName': name,
'email1': email,
'email2': email + 'lol',
'password': password,
},
expect_errors=True,
)
assert_equal(res.status_code, http.BAD_REQUEST)
users = User.find(Q('username', 'eq', email))
assert_equal(users.count(), 0)
def test_register_after_being_invited_as_unreg_contributor(self):
# Regression test for:
# https://github.com/CenterForOpenScience/openscienceframework.org/issues/861
# https://github.com/CenterForOpenScience/openscienceframework.org/issues/1021
# https://github.com/CenterForOpenScience/openscienceframework.org/issues/1026
# A user is invited as an unregistered contributor
project = ProjectFactory()
name, email = fake.name(), fake.email()
project.add_unregistered_contributor(fullname=name, email=email,
auth=Auth(project.creator))
project.save()
# The new, unregistered user
new_user = User.find_one(Q('username', 'eq', email))
# Instead of following the invitation link, they register at the regular
# registration page
# They use a different name when they register, but same email
real_name = fake.name()
password = 'myprecious'
url = api_url_for('register_user')
payload = {
'fullName': real_name,
'email1': email,
'email2': email,
'password': password,
}
# Send registration request
self.app.post_json(url, payload)
new_user.reload()
# New user confirms by following confirmation link
confirm_url = new_user.get_confirmation_url(email, external=False)
self.app.get(confirm_url)
new_user.reload()
# Password and fullname should be updated
assert_true(new_user.is_confirmed)
assert_true(new_user.check_password(password))
assert_equal(new_user.fullname, real_name)
def test_register_sends_user_registered_signal(self):
url = api_url_for('register_user')
name, email, password = fake.name(), fake.email(), 'underpressure'
with capture_signals() as mock_signals:
self.app.post_json(
url,
{
'fullName': name,
'email1': email,
'email2': email,
'password': password,
}
)
assert_equal(mock_signals.signals_sent(), set([auth.signals.user_registered]))
def test_register_post_sends_user_registered_signal(self):
url = web_url_for('auth_register_post')
name, email, password = fake.name(), fake.email(), 'underpressure'
with capture_signals() as mock_signals:
self.app.post(url, {
'register-fullname': name,
'register-username': email,
'register-password': password,
'register-username2': email,
'register-password2': password
})
assert_equal(mock_signals.signals_sent(), set([auth.signals.user_registered]))
def test_resend_confirmation_get(self):
res = self.app.get('/resend/')
assert_equal(res.status_code, 200)
def test_confirm_email_clears_unclaimed_records_and_revokes_token(self):
unclaimed_user = UnconfirmedUserFactory()
# unclaimed user has been invited to a project.
referrer = UserFactory()
project = ProjectFactory(creator=referrer)
unclaimed_user.add_unclaimed_record(project, referrer, 'foo')
unclaimed_user.save()
# sanity check
assert_equal(len(unclaimed_user.email_verifications.keys()), 1)
# user goes to email confirmation link
token = unclaimed_user.get_confirmation_token(unclaimed_user.username)
url = web_url_for('confirm_email_get', uid=unclaimed_user._id, token=token)
res = self.app.get(url)
assert_equal(res.status_code, 302)
# unclaimed records and token are cleared
unclaimed_user.reload()
assert_equal(unclaimed_user.unclaimed_records, {})
assert_equal(len(unclaimed_user.email_verifications.keys()), 0)
@mock.patch('framework.auth.views.mails.send_mail')
def test_resend_confirmation_post_sends_confirm_email(self, send_mail):
# Make sure user has a confirmation token for their primary email
u = UnconfirmedUserFactory()
u.add_unconfirmed_email(u.username)
u.save()
self.app.post('/resend/', {'email': u.username})
assert_true(send_mail.called)
assert_true(send_mail.called_with(
to_addr=u.username
))
# see: https://github.com/CenterForOpenScience/osf.io/issues/1492
@mock.patch('website.security.random_string')
@mock.patch('framework.auth.views.mails.send_mail')
def test_resend_confirmation_post_regenerates_token(self, send_mail, random_string):
expiration = dt.datetime.utcnow() - dt.timedelta(seconds=1)
random_string.return_value = '12345'
u = UnconfirmedUserFactory()
u.add_unconfirmed_email(u.username, expiration=expiration)
u.save()
self.app.post('/resend/', {'email': u.username})
confirm_url = u.get_confirmation_url(u.username, force=True)
assert_true(send_mail.called)
assert_true(send_mail.called_with(
to_addr=u.username,
confirmation_url=confirm_url
))
@mock.patch('framework.auth.views.mails.send_mail')
def test_resend_confirmation_post_if_user_not_in_database(self, send_mail):
self.app.post('/resend/', {'email': 'norecord@norecord.no'})
assert_false(send_mail.called)
def test_confirmation_link_registers_user(self):
user = User.create_unconfirmed('brian@queen.com', 'bicycle123', 'Brian May')
assert_false(user.is_registered) # sanity check
user.save()
confirmation_url = user.get_confirmation_url('brian@queen.com', external=False)
res = self.app.get(confirmation_url)
assert_equal(res.status_code, 302, 'redirects to settings page')
res = res.follow()
user.reload()
assert_true(user.is_registered)
# TODO: Use mock add-on
class TestAddonUserViews(OsfTestCase):
def setUp(self):
super(TestAddonUserViews, self).setUp()
self.user = AuthUserFactory()
def test_choose_addons_add(self):
"""Add add-ons; assert that add-ons are attached to project.
"""
url = '/api/v1/settings/addons/'
self.app.post_json(
url,
{'github': True},
auth=self.user.auth,
).maybe_follow()
self.user.reload()
assert_true(self.user.get_addon('github'))
def test_choose_addons_remove(self):
# Add, then delete, add-ons; assert that add-ons are not attached to
# project.
url = '/api/v1/settings/addons/'
self.app.post_json(
url,
{'github': True},
auth=self.user.auth,
).maybe_follow()
self.app.post_json(
url,
{'github': False},
auth=self.user.auth
).maybe_follow()
self.user.reload()
assert_false(self.user.get_addon('github'))
class TestConfigureMailingListViews(OsfTestCase):
@classmethod
def setUpClass(cls):
super(TestConfigureMailingListViews, cls).setUpClass()
cls._original_enable_email_subscriptions = settings.ENABLE_EMAIL_SUBSCRIPTIONS
settings.ENABLE_EMAIL_SUBSCRIPTIONS = True
@unittest.skipIf(settings.USE_CELERY, 'Subscription must happen synchronously for this test')
@mock.patch('website.mailchimp_utils.get_mailchimp_api')
def test_user_choose_mailing_lists_updates_user_dict(self, mock_get_mailchimp_api):
user = AuthUserFactory()
list_name = 'OSF General'
mock_client = mock.MagicMock()
mock_get_mailchimp_api.return_value = mock_client
mock_client.lists.list.return_value = {'data': [{'id': 1, 'list_name': list_name}]}
list_id = mailchimp_utils.get_list_id_from_name(list_name)
payload = {settings.MAILCHIMP_GENERAL_LIST: True}
url = api_url_for('user_choose_mailing_lists')
res = self.app.post_json(url, payload, auth=user.auth)
user.reload()
# check user.mailing_lists is updated
assert_true(user.mailing_lists[settings.MAILCHIMP_GENERAL_LIST])
assert_equal(
user.mailing_lists[settings.MAILCHIMP_GENERAL_LIST],
payload[settings.MAILCHIMP_GENERAL_LIST]
)
# check that user is subscribed
mock_client.lists.subscribe.assert_called_with(id=list_id,
email={'email': user.username},
merge_vars= {'fname': user.given_name,
'lname': user.family_name,
},
double_optin=False,
update_existing=True)
def test_get_mailchimp_get_endpoint_returns_200(self):
url = api_url_for('mailchimp_get_endpoint')
res = self.app.get(url)
assert_equal(res.status_code, 200)
@mock.patch('website.mailchimp_utils.get_mailchimp_api')
def test_mailchimp_webhook_subscribe_action_does_not_change_user(self, mock_get_mailchimp_api):
""" Test that 'subscribe' actions sent to the OSF via mailchimp
webhooks update the OSF database.
"""
list_id = '12345'
list_name = 'OSF General'
mock_client = mock.MagicMock()
mock_get_mailchimp_api.return_value = mock_client
mock_client.lists.list.return_value = {'data': [{'id': list_id, 'name': list_name}]}
# user is not subscribed to a list
user = AuthUserFactory()
user.mailing_lists = {'OSF General': False}
user.save()
# user subscribes and webhook sends request to OSF
data = {'type': 'subscribe',
'data[list_id]': list_id,
'data[email]': user.username
}
url = api_url_for('sync_data_from_mailchimp') + '?key=' + settings.MAILCHIMP_WEBHOOK_SECRET_KEY
res = self.app.post(url,
data,
content_type="application/x-www-form-urlencoded",
auth=user.auth)
# user field is updated on the OSF
user.reload()
assert_true(user.mailing_lists[list_name])
@mock.patch('website.mailchimp_utils.get_mailchimp_api')
def test_mailchimp_webhook_profile_action_does_not_change_user(self, mock_get_mailchimp_api):
""" Test that 'profile' actions sent to the OSF via mailchimp
webhooks do not cause any database changes.
"""
list_id = '12345'
list_name = 'OSF General'
mock_client = mock.MagicMock()
mock_get_mailchimp_api.return_value = mock_client
mock_client.lists.list.return_value = {'data': [{'id': list_id, 'name': list_name}]}
# user is subscribed to a list
user = AuthUserFactory()
user.mailing_lists = {'OSF General': True}
user.save()
# user hits subscribe again, which will update the user's existing info on mailchimp
# webhook sends request (when configured to update on changes made through the API)
data = {'type': 'profile',
'data[list_id]': list_id,
'data[email]': user.username
}
url = api_url_for('sync_data_from_mailchimp') + '?key=' + settings.MAILCHIMP_WEBHOOK_SECRET_KEY
res = self.app.post(url,
data,
content_type="application/x-www-form-urlencoded",
auth=user.auth)
# user field does not change
user.reload()
assert_true(user.mailing_lists[list_name])
@mock.patch('website.mailchimp_utils.get_mailchimp_api')
def test_sync_data_from_mailchimp_unsubscribes_user(self, mock_get_mailchimp_api):
list_id = '12345'
list_name = 'OSF General'
mock_client = mock.MagicMock()
mock_get_mailchimp_api.return_value = mock_client
mock_client.lists.list.return_value = {'data': [{'id': list_id, 'name': list_name}]}
# user is subscribed to a list
user = AuthUserFactory()
user.mailing_lists = {'OSF General': True}
user.save()
# user unsubscribes through mailchimp and webhook sends request
data = {'type': 'unsubscribe',
'data[list_id]': list_id,
'data[email]': user.username
}
url = api_url_for('sync_data_from_mailchimp') + '?key=' + settings.MAILCHIMP_WEBHOOK_SECRET_KEY
res = self.app.post(url,
data,
content_type="application/x-www-form-urlencoded",
auth=user.auth)
# user field is updated on the OSF
user.reload()
assert_false(user.mailing_lists[list_name])
def test_sync_data_from_mailchimp_fails_without_secret_key(self):
user = AuthUserFactory()
payload = {'values': {'type': 'unsubscribe',
'data': {'list_id': '12345',
'email': 'freddie@cos.io'}}}
url = api_url_for('sync_data_from_mailchimp')
res = self.app.post_json(url, payload, auth=user.auth, expect_errors=True)
assert_equal(res.status_code, http.UNAUTHORIZED)
@classmethod
def tearDownClass(cls):
super(TestConfigureMailingListViews, cls).tearDownClass()
settings.ENABLE_EMAIL_SUBSCRIPTIONS = cls._original_enable_email_subscriptions
# TODO: Move to OSF Storage
class TestFileViews(OsfTestCase):
def setUp(self):
super(TestFileViews, self).setUp()
self.user = AuthUserFactory()
self.project = ProjectFactory.build(creator=self.user, is_public=True)
self.project.add_contributor(self.user)
self.project.save()
def test_files_get(self):
url = self.project.api_url_for('collect_file_trees')
res = self.app.get(url, auth=self.user.auth)
expected = _view_project(self.project, auth=Auth(user=self.user))
assert_equal(res.status_code, http.OK)
assert_equal(res.json['node'], expected['node'])
assert_in('tree_js', res.json)
assert_in('tree_css', res.json)
def test_grid_data(self):
url = self.project.api_url_for('grid_data')
res = self.app.get(url, auth=self.user.auth).maybe_follow()
assert_equal(res.status_code, http.OK)
expected = rubeus.to_hgrid(self.project, auth=Auth(self.user))
data = res.json['data']
assert_equal(len(data), len(expected))
class TestComments(OsfTestCase):
def setUp(self):
super(TestComments, self).setUp()
self.project = ProjectFactory(is_public=True)
self.consolidated_auth = Auth(user=self.project.creator)
self.non_contributor = AuthUserFactory()
self.user = AuthUserFactory()
self.project.add_contributor(self.user)
self.project.save()
self.user.save()
def _configure_project(self, project, comment_level):
project.comment_level = comment_level
project.save()
def _add_comment(self, project, content=None, **kwargs):
content = content if content is not None else 'hammer to fall'
url = project.api_url + 'comment/'
return self.app.post_json(
url,
{
'content': content,
'isPublic': 'public',
},
**kwargs
)
def test_add_comment_public_contributor(self):
self._configure_project(self.project, 'public')
res = self._add_comment(
self.project, auth=self.project.creator.auth,
)
self.project.reload()
res_comment = res.json['comment']
date_created = parse_date(str(res_comment.pop('dateCreated')))
date_modified = parse_date(str(res_comment.pop('dateModified')))
serialized_comment = serialize_comment(self.project.commented[0], self.consolidated_auth)
date_created2 = parse_date(serialized_comment.pop('dateCreated'))
date_modified2 = parse_date(serialized_comment.pop('dateModified'))
assert_datetime_equal(date_created, date_created2)
assert_datetime_equal(date_modified, date_modified2)
assert_equal(len(self.project.commented), 1)
assert_equal(res_comment, serialized_comment)
def test_add_comment_public_non_contributor(self):
self._configure_project(self.project, 'public')
res = self._add_comment(
self.project, auth=self.non_contributor.auth,
)
self.project.reload()
res_comment = res.json['comment']
date_created = parse_date(res_comment.pop('dateCreated'))
date_modified = parse_date(res_comment.pop('dateModified'))
serialized_comment = serialize_comment(self.project.commented[0], Auth(user=self.non_contributor))
date_created2 = parse_date(serialized_comment.pop('dateCreated'))
date_modified2 = parse_date(serialized_comment.pop('dateModified'))
assert_datetime_equal(date_created, date_created2)
assert_datetime_equal(date_modified, date_modified2)
assert_equal(len(self.project.commented), 1)
assert_equal(res_comment, serialized_comment)
def test_add_comment_private_contributor(self):
self._configure_project(self.project, 'private')
res = self._add_comment(
self.project, auth=self.project.creator.auth,
)
self.project.reload()
res_comment = res.json['comment']
date_created = parse_date(str(res_comment.pop('dateCreated')))
date_modified = parse_date(str(res_comment.pop('dateModified')))
serialized_comment = serialize_comment(self.project.commented[0], self.consolidated_auth)
date_created2 = parse_date(serialized_comment.pop('dateCreated'))
date_modified2 = parse_date(serialized_comment.pop('dateModified'))
assert_datetime_equal(date_created, date_created2)
assert_datetime_equal(date_modified, date_modified2)
assert_equal(len(self.project.commented), 1)
assert_equal(res_comment, serialized_comment)
def test_add_comment_private_non_contributor(self):
self._configure_project(self.project, 'private')
res = self._add_comment(
self.project, auth=self.non_contributor.auth, expect_errors=True,
)
assert_equal(res.status_code, http.FORBIDDEN)
def test_add_comment_logged_out(self):
self._configure_project(self.project, 'public')
res = self._add_comment(self.project)
assert_equal(res.status_code, 302)
assert_in('login', res.headers.get('location'))
def test_add_comment_off(self):
self._configure_project(self.project, None)
res = self._add_comment(
self.project, auth=self.project.creator.auth, expect_errors=True,
)
assert_equal(res.status_code, http.BAD_REQUEST)
def test_add_comment_empty(self):
self._configure_project(self.project, 'public')
res = self._add_comment(
self.project, content='',
auth=self.project.creator.auth,
expect_errors=True,
)
assert_equal(res.status_code, http.BAD_REQUEST)
assert_false(getattr(self.project, 'commented', []))
def test_add_comment_toolong(self):
self._configure_project(self.project, 'public')
res = self._add_comment(
self.project, content='toolong' * 500,
auth=self.project.creator.auth,
expect_errors=True,
)
assert_equal(res.status_code, http.BAD_REQUEST)
assert_false(getattr(self.project, 'commented', []))
def test_add_comment_whitespace(self):
self._configure_project(self.project, 'public')
res = self._add_comment(
self.project, content=' ',
auth=self.project.creator.auth,
expect_errors=True
)
assert_equal(res.status_code, http.BAD_REQUEST)
assert_false(getattr(self.project, 'commented', []))
def test_edit_comment(self):
self._configure_project(self.project, 'public')
comment = CommentFactory(node=self.project)
url = self.project.api_url + 'comment/{0}/'.format(comment._id)
res = self.app.put_json(
url,
{
'content': 'edited',
'isPublic': 'private',
},
auth=self.project.creator.auth,
)
comment.reload()
assert_equal(res.json['content'], 'edited')
assert_equal(comment.content, 'edited')
def test_edit_comment_short(self):
self._configure_project(self.project, 'public')
comment = CommentFactory(node=self.project, content='short')
url = self.project.api_url + 'comment/{0}/'.format(comment._id)
res = self.app.put_json(
url,
{
'content': '',
'isPublic': 'private',
},
auth=self.project.creator.auth,
expect_errors=True,
)
comment.reload()
assert_equal(res.status_code, http.BAD_REQUEST)
assert_equal(comment.content, 'short')
def test_edit_comment_toolong(self):
self._configure_project(self.project, 'public')
comment = CommentFactory(node=self.project, content='short')
url = self.project.api_url + 'comment/{0}/'.format(comment._id)
res = self.app.put_json(
url,
{
'content': 'toolong' * 500,
'isPublic': 'private',
},
auth=self.project.creator.auth,
expect_errors=True,
)
comment.reload()
assert_equal(res.status_code, http.BAD_REQUEST)
assert_equal(comment.content, 'short')
def test_edit_comment_non_author(self):
"Contributors who are not the comment author cannot edit."
self._configure_project(self.project, 'public')
comment = CommentFactory(node=self.project)
non_author = AuthUserFactory()
self.project.add_contributor(non_author, auth=self.consolidated_auth)
url = self.project.api_url + 'comment/{0}/'.format(comment._id)
res = self.app.put_json(
url,
{
'content': 'edited',
'isPublic': 'private',
},
auth=non_author.auth,
expect_errors=True,
)
assert_equal(res.status_code, http.FORBIDDEN)
def test_edit_comment_non_contributor(self):
"Non-contributors who are not the comment author cannot edit."
self._configure_project(self.project, 'public')
comment = CommentFactory(node=self.project)
url = self.project.api_url + 'comment/{0}/'.format(comment._id)
res = self.app.put_json(
url,
{
'content': 'edited',
'isPublic': 'private',
},
auth=self.non_contributor.auth,
expect_errors=True,
)
assert_equal(res.status_code, http.FORBIDDEN)
def test_delete_comment_author(self):
self._configure_project(self.project, 'public')
comment = CommentFactory(node=self.project)
url = self.project.api_url + 'comment/{0}/'.format(comment._id)
self.app.delete_json(
url,
auth=self.project.creator.auth,
)
comment.reload()
assert_true(comment.is_deleted)
def test_delete_comment_non_author(self):
self._configure_project(self.project, 'public')
comment = CommentFactory(node=self.project)
url = self.project.api_url + 'comment/{0}/'.format(comment._id)
res = self.app.delete_json(
url,
auth=self.non_contributor.auth,
expect_errors=True,
)
assert_equal(res.status_code, http.FORBIDDEN)
comment.reload()
assert_false(comment.is_deleted)
def test_report_abuse(self):
self._configure_project(self.project, 'public')
comment = CommentFactory(node=self.project)
reporter = AuthUserFactory()
url = self.project.api_url + 'comment/{0}/report/'.format(comment._id)
self.app.post_json(
url,
{
'category': 'spam',
'text': 'ads',
},
auth=reporter.auth,
)
comment.reload()
assert_in(reporter._id, comment.reports)
assert_equal(
comment.reports[reporter._id],
{'category': 'spam', 'text': 'ads'}
)
def test_can_view_private_comments_if_contributor(self):
self._configure_project(self.project, 'public')
CommentFactory(node=self.project, user=self.project.creator, is_public=False)
url = self.project.api_url + 'comments/'
res = self.app.get(url, auth=self.project.creator.auth)
assert_equal(len(res.json['comments']), 1)
def test_view_comments_with_anonymous_link(self):
self.project.save()
self.project.set_privacy('private')
self.project.reload()
user = AuthUserFactory()
link = PrivateLinkFactory(anonymous=True)
link.nodes.append(self.project)
link.save()
CommentFactory(node=self.project, user=self.project.creator, is_public=False)
url = self.project.api_url + 'comments/'
res = self.app.get(url, {"view_only": link.key}, auth=user.auth)
comment = res.json['comments'][0]
author = comment['author']
assert_in('A user', author['name'])
assert_false(author['gravatarUrl'])
assert_false(author['url'])
assert_false(author['id'])
def test_discussion_recursive(self):
self._configure_project(self.project, 'public')
comment_l0 = CommentFactory(node=self.project)
user_l1 = UserFactory()
user_l2 = UserFactory()
comment_l1 = CommentFactory(node=self.project, target=comment_l0, user=user_l1)
CommentFactory(node=self.project, target=comment_l1, user=user_l2)
url = self.project.api_url + 'comments/discussion/'
res = self.app.get(url)
assert_equal(len(res.json['discussion']), 3)
def test_discussion_no_repeats(self):
self._configure_project(self.project, 'public')
comment_l0 = CommentFactory(node=self.project)
comment_l1 = CommentFactory(node=self.project, target=comment_l0)
CommentFactory(node=self.project, target=comment_l1)
url = self.project.api_url + 'comments/discussion/'
res = self.app.get(url)
assert_equal(len(res.json['discussion']), 1)
def test_discussion_sort(self):
self._configure_project(self.project, 'public')
user1 = UserFactory()
user2 = UserFactory()
CommentFactory(node=self.project)
for _ in range(3):
CommentFactory(node=self.project, user=user1)
for _ in range(2):
CommentFactory(node=self.project, user=user2)
url = self.project.api_url + 'comments/discussion/'
res = self.app.get(url)
assert_equal(len(res.json['discussion']), 3)
observed = [user['id'] for user in res.json['discussion']]
expected = [user1._id, user2._id, self.project.creator._id]
assert_equal(observed, expected)
def test_view_comments_updates_user_comments_view_timestamp(self):
CommentFactory(node=self.project)
url = self.project.api_url_for('update_comments_timestamp')
res = self.app.put_json(url, auth=self.user.auth)
self.user.reload()
user_timestamp = self.user.comments_viewed_timestamp[self.project._id]
view_timestamp = dt.datetime.utcnow()
assert_datetime_equal(user_timestamp, view_timestamp)
def test_confirm_non_contrib_viewers_dont_have_pid_in_comments_view_timestamp(self):
url = self.project.api_url_for('update_comments_timestamp')
res = self.app.put_json(url, auth=self.user.auth)
self.non_contributor.reload()
assert_not_in(self.project._id, self.non_contributor.comments_viewed_timestamp)
def test_n_unread_comments_updates_when_comment_is_added(self):
self._add_comment(self.project, auth=self.project.creator.auth)
self.project.reload()
url = self.project.api_url_for('list_comments')
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.json.get('nUnread'), 1)
url = self.project.api_url_for('update_comments_timestamp')
res = self.app.put_json(url, auth=self.user.auth)
self.user.reload()
url = self.project.api_url_for('list_comments')
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.json.get('nUnread'), 0)
def test_n_unread_comments_updates_when_comment_reply(self):
comment = CommentFactory(node=self.project, user=self.project.creator)
reply = CommentFactory(node=self.project, user=self.user, target=comment)
self.project.reload()
url = self.project.api_url_for('list_comments')
res = self.app.get(url, auth=self.project.creator.auth)
assert_equal(res.json.get('nUnread'), 1)
def test_n_unread_comments_updates_when_comment_is_edited(self):
self.test_edit_comment()
self.project.reload()
url = self.project.api_url_for('list_comments')
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.json.get('nUnread'), 1)
def test_n_unread_comments_is_zero_when_no_comments(self):
url = self.project.api_url_for('list_comments')
res = self.app.get(url, auth=self.project.creator.auth)
assert_equal(res.json.get('nUnread'), 0)
class TestTagViews(OsfTestCase):
def setUp(self):
super(TestTagViews, self).setUp()
self.user = AuthUserFactory()
self.project = ProjectFactory(creator=self.user)
@unittest.skip('Tags endpoint disabled for now.')
def test_tag_get_returns_200(self):
url = web_url_for('project_tag', tag='foo')
res = self.app.get(url)
assert_equal(res.status_code, 200)
@requires_search
class TestSearchViews(OsfTestCase):
def setUp(self):
super(TestSearchViews, self).setUp()
import website.search.search as search
search.delete_all()
self.project = ProjectFactory(creator=UserFactory(fullname='Robbie Williams'))
self.contrib = UserFactory(fullname='Brian May')
for i in range(0, 12):
UserFactory(fullname='Freddie Mercury{}'.format(i))
def tearDown(self):
super(TestSearchViews, self).tearDown()
import website.search.search as search
search.delete_all()
def test_search_contributor(self):
url = api_url_for('search_contributor')
res = self.app.get(url, {'query': self.contrib.fullname})
assert_equal(res.status_code, 200)
result = res.json['users']
assert_equal(len(result), 1)
brian = result[0]
assert_equal(brian['fullname'], self.contrib.fullname)
assert_in('gravatar_url', brian)
assert_equal(brian['registered'], self.contrib.is_registered)
assert_equal(brian['active'], self.contrib.is_active)
def test_search_pagination_default(self):
url = api_url_for('search_contributor')
res = self.app.get(url, {'query': 'fr'})
assert_equal(res.status_code, 200)
result = res.json['users']
pages = res.json['pages']
page = res.json['page']
assert_equal(len(result), 5)
assert_equal(pages, 3)
assert_equal(page, 0)
def test_search_pagination_default_page_1(self):
url = api_url_for('search_contributor')
res = self.app.get(url, {'query': 'fr', 'page': 1})
assert_equal(res.status_code, 200)
result = res.json['users']
page = res.json['page']
assert_equal(len(result), 5)
assert_equal(page, 1)
def test_search_pagination_default_page_2(self):
url = api_url_for('search_contributor')
res = self.app.get(url, {'query': 'fr', 'page': 2})
assert_equal(res.status_code, 200)
result = res.json['users']
page = res.json['page']
assert_equal(len(result), 2)
assert_equal(page, 2)
def test_search_pagination_smaller_pages(self):
url = api_url_for('search_contributor')
res = self.app.get(url, {'query': 'fr', 'size': 5})
assert_equal(res.status_code, 200)
result = res.json['users']
pages = res.json['pages']
page = res.json['page']
assert_equal(len(result), 5)
assert_equal(page, 0)
assert_equal(pages, 3)
def test_search_pagination_smaller_pages_page_2(self):
url = api_url_for('search_contributor')
res = self.app.get(url, {'query': 'fr', 'page': 2, 'size': 5, })
assert_equal(res.status_code, 200)
result = res.json['users']
pages = res.json['pages']
page = res.json['page']
assert_equal(len(result), 2)
assert_equal(page, 2)
assert_equal(pages, 3)
def test_search_projects(self):
url = '/search/'
res = self.app.get(url, {'q': self.project.title})
assert_equal(res.status_code, 200)
class TestODMTitleSearch(OsfTestCase):
""" Docs from original method:
:arg term: The substring of the title.
:arg category: Category of the node.
:arg isDeleted: yes, no, or either. Either will not add a qualifier for that argument in the search.
:arg isFolder: yes, no, or either. Either will not add a qualifier for that argument in the search.
:arg isRegistration: yes, no, or either. Either will not add a qualifier for that argument in the search.
:arg includePublic: yes or no. Whether the projects listed should include public projects.
:arg includeContributed: yes or no. Whether the search should include projects the current user has
contributed to.
:arg ignoreNode: a list of nodes that should not be included in the search.
:return: a list of dictionaries of projects
"""
def setUp(self):
super(TestODMTitleSearch, self).setUp()
self.user = AuthUserFactory()
self.user_two = AuthUserFactory()
self.project = ProjectFactory(creator=self.user, title="foo")
self.project_two = ProjectFactory(creator=self.user_two, title="bar")
self.public_project = ProjectFactory(creator=self.user_two, is_public=True, title="baz")
self.registration_project = RegistrationFactory(creator=self.user, title="qux")
self.folder = FolderFactory(creator=self.user, title="quux")
self.dashboard = DashboardFactory(creator=self.user, title="Dashboard")
self.url = api_url_for('search_projects_by_title')
def test_search_projects_by_title(self):
res = self.app.get(self.url, {'term': self.project.title}, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(len(res.json), 1)
res = self.app.get(self.url,
{
'term': self.public_project.title,
'includePublic': 'yes',
'includeContributed': 'no'
}, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(len(res.json), 1)
res = self.app.get(self.url,
{
'term': self.project.title,
'includePublic': 'no',
'includeContributed': 'yes'
}, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(len(res.json), 1)
res = self.app.get(self.url,
{
'term': self.project.title,
'includePublic': 'no',
'includeContributed': 'yes',
'isRegistration': 'no'
}, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(len(res.json), 1)
res = self.app.get(self.url,
{
'term': self.project.title,
'includePublic': 'yes',
'includeContributed': 'yes',
'isRegistration': 'either'
}, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(len(res.json), 1)
res = self.app.get(self.url,
{
'term': self.public_project.title,
'includePublic': 'yes',
'includeContributed': 'yes',
'isRegistration': 'either'
}, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(len(res.json), 1)
res = self.app.get(self.url,
{
'term': self.registration_project.title,
'includePublic': 'yes',
'includeContributed': 'yes',
'isRegistration': 'either'
}, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(len(res.json), 2)
res = self.app.get(self.url,
{
'term': self.registration_project.title,
'includePublic': 'yes',
'includeContributed': 'yes',
'isRegistration': 'no'
}, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(len(res.json), 1)
res = self.app.get(self.url,
{
'term': self.folder.title,
'includePublic': 'yes',
'includeContributed': 'yes',
'isFolder': 'yes'
}, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(len(res.json), 1)
res = self.app.get(self.url,
{
'term': self.folder.title,
'includePublic': 'yes',
'includeContributed': 'yes',
'isFolder': 'no'
}, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(len(res.json), 0)
res = self.app.get(self.url,
{
'term': self.dashboard.title,
'includePublic': 'yes',
'includeContributed': 'yes',
'isFolder': 'no'
}, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(len(res.json), 0)
res = self.app.get(self.url,
{
'term': self.dashboard.title,
'includePublic': 'yes',
'includeContributed': 'yes',
'isFolder': 'yes'
}, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(len(res.json), 1)
class TestReorderComponents(OsfTestCase):
def setUp(self):
super(TestReorderComponents, self).setUp()
self.creator = AuthUserFactory()
self.contrib = AuthUserFactory()
# Project is public
self.project = ProjectFactory.build(creator=self.creator, public=True)
self.project.add_contributor(self.contrib, auth=Auth(self.creator))
# subcomponent that only creator can see
self.public_component = NodeFactory(creator=self.creator, public=True)
self.private_component = NodeFactory(creator=self.creator, public=False)
self.project.nodes.append(self.public_component)
self.project.nodes.append(self.private_component)
self.project.save()
# https://github.com/CenterForOpenScience/openscienceframework.org/issues/489
def test_reorder_components_with_private_component(self):
# contrib tries to reorder components
payload = {
'new_list': [
'{0}:node'.format(self.private_component._primary_key),
'{0}:node'.format(self.public_component._primary_key),
]
}
url = self.project.api_url_for('project_reorder_components')
res = self.app.post_json(url, payload, auth=self.contrib.auth)
assert_equal(res.status_code, 200)
class TestDashboardViews(OsfTestCase):
def setUp(self):
super(TestDashboardViews, self).setUp()
self.creator = AuthUserFactory()
self.contrib = AuthUserFactory()
self.dashboard = DashboardFactory(creator=self.creator)
# https://github.com/CenterForOpenScience/openscienceframework.org/issues/571
def test_components_with_are_accessible_from_dashboard(self):
project = ProjectFactory(creator=self.creator, public=False)
component = NodeFactory(creator=self.creator, parent=project)
component.add_contributor(self.contrib, auth=Auth(self.creator))
component.save()
# Get the All My Projects smart folder from the dashboard
url = api_url_for('get_dashboard', nid=ALL_MY_PROJECTS_ID)
res = self.app.get(url, auth=self.contrib.auth)
assert_equal(len(res.json['data']), 1)
def test_get_dashboard_nodes(self):
project = ProjectFactory(creator=self.creator)
component = NodeFactory(creator=self.creator, parent=project)
url = api_url_for('get_dashboard_nodes')
res = self.app.get(url, auth=self.creator.auth)
assert_equal(res.status_code, 200)
nodes = res.json['nodes']
assert_equal(len(nodes), 2)
project_serialized = nodes[0]
assert_equal(project_serialized['id'], project._primary_key)
def test_get_dashboard_nodes_shows_components_if_user_is_not_contrib_on_project(self):
# User creates a project with a component
project = ProjectFactory(creator=self.creator)
component = NodeFactory(creator=self.creator, parent=project)
# User adds friend as a contributor to the component but not the
# project
friend = AuthUserFactory()
component.add_contributor(friend, auth=Auth(self.creator))
component.save()
# friend requests their dashboard nodes
url = api_url_for('get_dashboard_nodes')
res = self.app.get(url, auth=friend.auth)
nodes = res.json['nodes']
# Response includes component
assert_equal(len(nodes), 1)
assert_equal(nodes[0]['id'], component._primary_key)
# friend requests dashboard nodes, filtering against components
url = api_url_for('get_dashboard_nodes', no_components=True)
res = self.app.get(url, auth=friend.auth)
nodes = res.json['nodes']
assert_equal(len(nodes), 0)
def test_get_dashboard_nodes_admin_only(self):
friend = AuthUserFactory()
project = ProjectFactory(creator=self.creator)
# Friend is added as a contributor with read+write (not admin)
# permissions
perms = permissions.expand_permissions(permissions.WRITE)
project.add_contributor(friend, auth=Auth(self.creator), permissions=perms)
project.save()
url = api_url_for('get_dashboard_nodes')
res = self.app.get(url, auth=friend.auth)
assert_equal(res.json['nodes'][0]['id'], project._primary_key)
# Can filter project according to permission
url = api_url_for('get_dashboard_nodes', permissions='admin')
res = self.app.get(url, auth=friend.auth)
assert_equal(len(res.json['nodes']), 0)
def test_get_dashboard_nodes_invalid_permission(self):
url = api_url_for('get_dashboard_nodes', permissions='not-valid')
res = self.app.get(url, auth=self.creator.auth, expect_errors=True)
assert_equal(res.status_code, 400)
def test_registered_components_with_are_accessible_from_dashboard(self):
project = ProjectFactory(creator=self.creator, public=False)
component = NodeFactory(creator=self.creator, parent=project)
component.add_contributor(self.contrib, auth=Auth(self.creator))
component.save()
project.register_node(
None, Auth(self.creator), '', '',
)
# Get the All My Registrations smart folder from the dashboard
url = api_url_for('get_dashboard', nid=ALL_MY_REGISTRATIONS_ID)
res = self.app.get(url, auth=self.contrib.auth)
assert_equal(len(res.json['data']), 1)
def test_untouched_node_is_collapsed(self):
found_item = False
folder = FolderFactory(creator=self.creator, public=True)
self.dashboard.add_pointer(folder, auth=Auth(self.creator))
url = api_url_for('get_dashboard', nid=self.dashboard._id)
dashboard_data = self.app.get(url, auth=self.creator.auth)
dashboard_json = dashboard_data.json[u'data']
for dashboard_item in dashboard_json:
if dashboard_item[u'node_id'] == folder._id:
found_item = True
assert_false(dashboard_item[u'expand'], "Expand state was not set properly.")
assert_true(found_item, "Did not find the folder in the dashboard.")
def test_expand_node_sets_expand_to_true(self):
found_item = False
folder = FolderFactory(creator=self.creator, public=True)
self.dashboard.add_pointer(folder, auth=Auth(self.creator))
url = api_url_for('expand', pid=folder._id)
self.app.post(url, auth=self.creator.auth)
url = api_url_for('get_dashboard', nid=self.dashboard._id)
dashboard_data = self.app.get(url, auth=self.creator.auth)
dashboard_json = dashboard_data.json[u'data']
for dashboard_item in dashboard_json:
if dashboard_item[u'node_id'] == folder._id:
found_item = True
assert_true(dashboard_item[u'expand'], "Expand state was not set properly.")
assert_true(found_item, "Did not find the folder in the dashboard.")
def test_collapse_node_sets_expand_to_true(self):
found_item = False
folder = FolderFactory(creator=self.creator, public=True)
self.dashboard.add_pointer(folder, auth=Auth(self.creator))
# Expand the folder
url = api_url_for('expand', pid=folder._id)
self.app.post(url, auth=self.creator.auth)
# Serialize the dashboard and test
url = api_url_for('get_dashboard', nid=self.dashboard._id)
dashboard_data = self.app.get(url, auth=self.creator.auth)
dashboard_json = dashboard_data.json[u'data']
for dashboard_item in dashboard_json:
if dashboard_item[u'node_id'] == folder._id:
found_item = True
assert_true(dashboard_item[u'expand'], "Expand state was not set properly.")
assert_true(found_item, "Did not find the folder in the dashboard.")
# Collapse the folder
found_item = False
url = api_url_for('collapse', pid=folder._id)
self.app.post(url, auth=self.creator.auth)
# Serialize the dashboard and test
url = api_url_for('get_dashboard', nid=self.dashboard._id)
dashboard_data = self.app.get(url, auth=self.creator.auth)
dashboard_json = dashboard_data.json[u'data']
for dashboard_item in dashboard_json:
if dashboard_item[u'node_id'] == folder._id:
found_item = True
assert_false(dashboard_item[u'expand'], "Expand state was not set properly.")
assert_true(found_item, "Did not find the folder in the dashboard.")
def test_folder_new_post(self):
url = api_url_for('folder_new_post', nid=self.dashboard._id)
found_item = False
# Make the folder
title = 'New test folder'
payload = {'title': title, }
self.app.post_json(url, payload, auth=self.creator.auth)
# Serialize the dashboard and test
url = api_url_for('get_dashboard', nid=self.dashboard._id)
dashboard_data = self.app.get(url, auth=self.creator.auth)
dashboard_json = dashboard_data.json[u'data']
for dashboard_item in dashboard_json:
if dashboard_item[u'name'] == title:
found_item = True
assert_true(found_item, "Did not find the folder in the dashboard.")
class TestWikiWidgetViews(OsfTestCase):
def setUp(self):
super(TestWikiWidgetViews, self).setUp()
# project with no home wiki page
self.project = ProjectFactory()
self.read_only_contrib = AuthUserFactory()
self.project.add_contributor(self.read_only_contrib, permissions='read')
self.noncontributor = AuthUserFactory()
# project with no home wiki content
self.project2 = ProjectFactory(creator=self.project.creator)
self.project2.add_contributor(self.read_only_contrib, permissions='read')
self.project2.update_node_wiki(name='home', content='', auth=Auth(self.project.creator))
def test_show_wiki_for_contributors_when_no_wiki_or_content(self):
assert_true(_should_show_wiki_widget(self.project, self.project.creator))
assert_true(_should_show_wiki_widget(self.project2, self.project.creator))
def test_show_wiki_is_false_for_read_contributors_when_no_wiki_or_content(self):
assert_false(_should_show_wiki_widget(self.project, self.read_only_contrib))
assert_false(_should_show_wiki_widget(self.project2, self.read_only_contrib))
def test_show_wiki_is_false_for_noncontributors_when_no_wiki_or_content(self):
assert_false(_should_show_wiki_widget(self.project, self.noncontributor))
assert_false(_should_show_wiki_widget(self.project2, self.read_only_contrib))
class TestForkViews(OsfTestCase):
def setUp(self):
super(TestForkViews, self).setUp()
self.user = AuthUserFactory()
self.project = ProjectFactory.build(creator=self.user, is_public=True)
self.consolidated_auth = Auth(user=self.project.creator)
self.user.save()
self.project.save()
def test_fork_private_project_non_contributor(self):
self.project.set_privacy("private")
self.project.save()
url = self.project.api_url_for('node_fork_page')
non_contributor = AuthUserFactory()
res = self.app.post_json(url,
auth=non_contributor.auth,
expect_errors=True)
assert_equal(res.status_code, http.FORBIDDEN)
def test_fork_public_project_non_contributor(self):
url = self.project.api_url_for('node_fork_page')
non_contributor = AuthUserFactory()
res = self.app.post_json(url, auth=non_contributor.auth)
assert_equal(res.status_code, 200)
def test_fork_project_contributor(self):
contributor = AuthUserFactory()
self.project.set_privacy("private")
self.project.add_contributor(contributor)
self.project.save()
url = self.project.api_url_for('node_fork_page')
res = self.app.post_json(url, auth=contributor.auth)
assert_equal(res.status_code, 200)
def test_registered_forks_dont_show_in_fork_list(self):
fork = self.project.fork_node(self.consolidated_auth)
RegistrationFactory(project=fork)
url = self.project.api_url_for('get_forks')
res = self.app.get(url, auth=self.user.auth)
assert_equal(len(res.json['nodes']), 1)
assert_equal(res.json['nodes'][0]['id'], fork._id)
class TestProjectCreation(OsfTestCase):
def setUp(self):
super(TestProjectCreation, self).setUp()
self.creator = AuthUserFactory()
self.url = api_url_for('project_new_post')
def test_needs_title(self):
res = self.app.post_json(self.url, {}, auth=self.creator.auth, expect_errors=True)
assert_equal(res.status_code, 400)
def test_create_component_strips_html(self):
user = AuthUserFactory()
project = ProjectFactory(creator=user)
url = web_url_for('project_new_node', pid=project._id)
post_data = {'title': '<b>New <blink>Component</blink> Title</b>', 'category': ''}
request = self.app.post(url, post_data, auth=user.auth).follow()
project.reload()
child = project.nodes[0]
# HTML has been stripped
assert_equal(child.title, 'New Component Title')
def test_strip_html_from_title(self):
payload = {
'title': 'no html <b>here</b>'
}
res = self.app.post_json(self.url, payload, auth=self.creator.auth)
node = Node.load(res.json['projectUrl'].replace('/', ''))
assert_true(node)
assert_equal('no html here', node.title)
def test_only_needs_title(self):
payload = {
'title': 'Im a real title'
}
res = self.app.post_json(self.url, payload, auth=self.creator.auth)
assert_equal(res.status_code, 201)
def test_title_must_be_one_long(self):
payload = {
'title': ''
}
res = self.app.post_json(
self.url, payload, auth=self.creator.auth, expect_errors=True)
assert_equal(res.status_code, 400)
def test_title_must_be_less_than_200(self):
payload = {
'title': ''.join([str(x) for x in xrange(0, 250)])
}
res = self.app.post_json(
self.url, payload, auth=self.creator.auth, expect_errors=True)
assert_equal(res.status_code, 400)
def test_fails_to_create_project_with_whitespace_title(self):
payload = {
'title': ' '
}
res = self.app.post_json(
self.url, payload, auth=self.creator.auth, expect_errors=True)
assert_equal(res.status_code, 400)
def test_creates_a_project(self):
payload = {
'title': 'Im a real title'
}
res = self.app.post_json(self.url, payload, auth=self.creator.auth)
assert_equal(res.status_code, 201)
node = Node.load(res.json['projectUrl'].replace('/', ''))
assert_true(node)
assert_true(node.title, 'Im a real title')
def test_description_works(self):
payload = {
'title': 'Im a real title',
'description': 'I describe things!'
}
res = self.app.post_json(self.url, payload, auth=self.creator.auth)
assert_equal(res.status_code, 201)
node = Node.load(res.json['projectUrl'].replace('/', ''))
assert_true(node)
assert_true(node.description, 'I describe things!')
def test_can_template(self):
other_node = ProjectFactory(creator=self.creator)
payload = {
'title': 'Im a real title',
'template': other_node._id
}
res = self.app.post_json(self.url, payload, auth=self.creator.auth)
assert_equal(res.status_code, 201)
node = Node.load(res.json['projectUrl'].replace('/', ''))
assert_true(node)
assert_true(node.template_node, other_node)
def test_project_before_template_no_addons(self):
project = ProjectFactory()
res = self.app.get(project.api_url_for('project_before_template'), auth=project.creator.auth)
assert_equal(res.json['prompts'], [])
def test_project_before_template_with_addons(self):
project = ProjectWithAddonFactory(addon='github')
res = self.app.get(project.api_url_for('project_before_template'), auth=project.creator.auth)
assert_in('GitHub', res.json['prompts'])
def test_project_new_from_template_non_user(self):
project = ProjectFactory()
url = api_url_for('project_new_from_template', nid=project._id)
res = self.app.post(url, auth=None)
assert_equal(res.status_code, 302)
res2 = res.follow(expect_errors=True)
assert_equal(res2.status_code, 301)
assert_equal(res2.request.path, '/login')
def test_project_new_from_template_public_non_contributor(self):
non_contributor = AuthUserFactory()
project = ProjectFactory(is_public=True)
url = api_url_for('project_new_from_template', nid=project._id)
res = self.app.post(url, auth=non_contributor.auth)
assert_equal(res.status_code, 201)
def test_project_new_from_template_contributor(self):
contributor = AuthUserFactory()
project = ProjectFactory(is_public=False)
project.add_contributor(contributor)
project.save()
url = api_url_for('project_new_from_template', nid=project._id)
res = self.app.post(url, auth=contributor.auth)
assert_equal(res.status_code, 201)
class TestUnconfirmedUserViews(OsfTestCase):
def test_can_view_profile(self):
user = UnconfirmedUserFactory()
url = web_url_for('profile_view_id', uid=user._id)
res = self.app.get(url)
assert_equal(res.status_code, 200)
class TestProfileNodeList(OsfTestCase):
def setUp(self):
OsfTestCase.setUp(self)
self.user = AuthUserFactory()
self.public = ProjectFactory(is_public=True)
self.public_component = NodeFactory(parent=self.public, is_public=True)
self.private = ProjectFactory(is_public=False)
self.deleted = ProjectFactory(is_public=True, is_deleted=True)
for node in (self.public, self.public_component, self.private, self.deleted):
node.add_contributor(self.user, auth=Auth(node.creator))
node.save()
def test_get_public_projects(self):
url = api_url_for('get_public_projects', uid=self.user._id)
res = self.app.get(url)
node_ids = [each['id'] for each in res.json['nodes']]
assert_in(self.public._id, node_ids)
assert_not_in(self.private._id, node_ids)
assert_not_in(self.deleted._id, node_ids)
assert_not_in(self.public_component._id, node_ids)
def test_get_public_components(self):
url = api_url_for('get_public_components', uid=self.user._id)
res = self.app.get(url)
node_ids = [each['id'] for each in res.json['nodes']]
assert_in(self.public_component._id, node_ids)
assert_not_in(self.public._id, node_ids)
assert_not_in(self.private._id, node_ids)
assert_not_in(self.deleted._id, node_ids)
class TestStaticFileViews(OsfTestCase):
def test_robots_dot_txt(self):
res = self.app.get('/robots.txt')
assert_equal(res.status_code, 200)
assert_in('User-agent', res)
assert_in('text/plain', res.headers['Content-Type'])
def test_favicon(self):
res = self.app.get('/favicon.ico')
assert_equal(res.status_code, 200)
assert_in('image/vnd.microsoft.icon', res.headers['Content-Type'])
def test_getting_started_page(self):
res = self.app.get('/getting-started/')
assert_equal(res.status_code, 200)
class TestUserConfirmSignal(OsfTestCase):
def test_confirm_user_signal_called_when_user_claims_account(self):
unclaimed_user = UnconfirmedUserFactory()
# unclaimed user has been invited to a project.
referrer = UserFactory()
project = ProjectFactory(creator=referrer)
unclaimed_user.add_unclaimed_record(project, referrer, 'foo')
unclaimed_user.save()
token = unclaimed_user.get_unclaimed_record(project._primary_key)['token']
with capture_signals() as mock_signals:
url = web_url_for('claim_user_form', pid=project._id, uid=unclaimed_user._id, token=token)
payload = {'username': unclaimed_user.username,
'password': 'password',
'password2': 'password'}
res = self.app.post(url, payload)
assert_equal(res.status_code, 302)
assert_equal(mock_signals.signals_sent(), set([auth.signals.user_confirmed]))
def test_confirm_user_signal_called_when_user_confirms_email(self):
unconfirmed_user = UnconfirmedUserFactory()
unconfirmed_user.save()
# user goes to email confirmation link
token = unconfirmed_user.get_confirmation_token(unconfirmed_user.username)
with capture_signals() as mock_signals:
url = web_url_for('confirm_email_get', uid=unconfirmed_user._id, token=token)
res = self.app.get(url)
assert_equal(res.status_code, 302)
assert_equal(mock_signals.signals_sent(), set([auth.signals.user_confirmed]))
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "bbf862d8b0506f26945ef09418990865",
"timestamp": "",
"source": "github",
"line_count": 4107,
"max_line_length": 184,
"avg_line_length": 39.16264913562211,
"alnum_prop": 0.5890164821158784,
"repo_name": "GaryKriebel/osf.io",
"id": "0909bb2589fcdfd44dcf30e7325f44ff33905abd",
"size": "160887",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "tests/test_views.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "84831"
},
{
"name": "HTML",
"bytes": "16454"
},
{
"name": "JavaScript",
"bytes": "973620"
},
{
"name": "Mako",
"bytes": "470543"
},
{
"name": "Python",
"bytes": "2644957"
},
{
"name": "Shell",
"bytes": "234"
}
],
"symlink_target": ""
}
|
import os
import getpass
import sys
import datetime
import time
import smtplib
import yaml
from optparse import OptionParser
from dateutil import parser
if __name__ =='__main__':
optparser = OptionParser()
optparser.add_option('-f', '--file', dest='file',
help='YAML file with cluster data',
default=None, type='string')
optparser.add_option('-d', '--date', dest='targetdate',
help='target date to operate on',
default=None, type='string')
(opts, args) = optparser.parse_args()
results = {}
clustersizes = {}
if opts.file is not None:
try:
now = datetime.date.today()
if opts.targetdate is not None:
now = parser.parse(opts.targetdate).date()
schedule = yaml.load(open(opts.file,'r'))
# not doing anything with the clusters section yet
for member in schedule.keys():
if member == "clusters":
for clustername in schedule[member]:
True
if member == "hostnames":
for hostname in schedule[member]:
latestdate = parser.parse("1970-01-01").date()
if schedule.get(hostname,False):
if schedule[hostname].get("cluster",False):
for scheddate in schedule[hostname]["cluster"].keys():
if scheddate <= now:
if scheddate > latestdate:
latestdate = scheddate
if schedule[hostname]["cluster"].get(latestdate,False):
results[hostname] = schedule[hostname]["cluster"][latestdate]
clustersizes[schedule[hostname]["cluster"][latestdate]] = 0
except Exception, ex:
print "There was a problem with your file %s" % ex
SystemExit(4)
for hostname in results.keys():
clustersizes[results[hostname]] += 1
for cluster in sorted(clustersizes.keys()):
print " " + cluster + " " + str(clustersizes[cluster]) + " (" + schedule["clusters"][cluster] + ")"
|
{
"content_hash": "6cac5bd45e5d6445e12b0cdeaf0e2146",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 107,
"avg_line_length": 39,
"alnum_prop": 0.5154280747501087,
"repo_name": "sadsfae/openstack-lab-tools",
"id": "5eaf7079aea6810e10fb7f0fced729c96038b6d7",
"size": "2486",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lab-tools/lab-schedule-ical-driver.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "4741"
},
{
"name": "Shell",
"bytes": "8906"
}
],
"symlink_target": ""
}
|
import requests
import json
from django.conf import settings
try:
import rados
import rbd
WITH_RADOS = True
except ImportError:
WITH_RADOS = False
class CephClient():
def __init__(self):
if WITH_RADOS:
try:
self._ceph_cluster = rados.Rados(
conffile=settings.CEPH_CONF,
conf={"keyring": settings.CEPH_KEYRING})
self._ceph_cluster.connect()
except Exception:
self._ceph_cluster = None
self._session = requests.Session()
def request(self, method, url, params=None):
try:
resp = self._session.request(method,
url,
params=params,
headers={'Accept': 'application/json',
'Content-Type':
'application/json'})
if not resp.ok:
raise Exception('connection error')
resp_json = json.loads(resp.text)
if resp_json['status'] != 'OK' and 'report' not in url:
raise Exception('command error')
return resp_json['output']
except Exception:
return {}
def send_command(self, method, cmd, params=None):
ceph_url = settings.API_URL
if not ceph_url.startswith("http://"):
ceph_url = "http://" + ceph_url
url = "%s:%d/api/v0.1/" % (ceph_url, settings.API_PORT)
url += '/'.join(cmd)
return self.request(method, url, params)
def status(self):
return self.send_command('GET', ['status'])
def report(self):
return self.send_command('GET', ['report'])
def osd_tree(self):
osds = self.send_command('GET', ['osd', 'tree'])
if not osds:
return {}
nodes = osds['nodes']
osd_dic = dict()
is_root = dict()
for node in nodes:
osd_dic[node['id']] = node
node['cid'] = node['id']
is_root[node['id']] = True
del node['id']
for node in nodes:
if 'children' in node:
children = list()
for child in node['children']:
children.append(osd_dic[child])
del is_root[child]
node['children'] = children
osd_tree = {'name': 'ceph', 'cid': -32767, 'type_id': -1, 'children':[]}
for id in is_root:
osd_tree['children'].append(osd_dic[id])
return osd_tree
def crush_rules(self):
rules = self.send_command('GET', ['osd', 'crush', 'rule', 'dump'])
for rule in rules:
step_item = []
for step in rule['steps']:
if step['op'] == 'take':
step_item.append("take %s" % step['item_name'])
elif step['op'] == 'chooseleaf_firstn':
step_item.append("chooseleaf_firstn %d %s" %
(step['num'], step['type']))
elif step['op'] == 'choose_firstn':
step_item.append("choose_firstn %d %s" %
(step['num'], step['type']))
else:
step_item.append("emit")
rule['steps'] = step_item
return rules
def create_crush_rule(self, name, root, type, mode):
result = self.send_command('PUT',
['osd', 'crush', 'rule', 'create-simple'],
params={'name': name,
'root': root,
'type': type,
'mode': mode})
return result
def delete_crush_rule(self, name):
result = self.send_command('PUT', ['osd', 'crush', 'rule', 'rm'],
params={'name': name})
return result
def df(self):
return self.send_command('GET', ['df'])
def pool_get(self, pool, var):
return self.send_command('GET', ['osd', 'pool', 'get'],
params={'var': var, 'pool': pool})
def create_pool(self, name, pg, type, crush_rule):
return self.send_command('PUT', ['osd', 'pool', 'create'],
params={'pool': name, 'pg_num': pg,
'pgp_num': pg, 'pool_type': type,
'erasure_code_profile': crush_rule})
def delete_pool(self, pool):
return self.send_command(
'PUT',
['osd', 'pool', 'delete'],
params={'pool': pool,
'pool2': pool,
'sure': '--yes-i-really-really-mean-it'})
def pool_df(self, name, stats):
return self.send_command(
'GET',
['osd', 'pool', 'df'],
params={'name': name, 'stats': ','.join(stats)})
def list_pools(self):
return self.send_command('GET', ['osd', 'lspools'])
def image_dist(self, pool, id):
resp = {'pg': {}, 'osd': {}}
if WITH_RADOS and self._ceph_cluster is not None:
with self._ceph_cluster.open_ioctx(pool) as ioctx:
with rbd.Image(ioctx, id) as image:
stat = image.stat()
prefix = stat['block_name_prefix']
num_objs = stat['num_objs']
for obj in range(num_objs):
cmd = ['{"prefix": "osd map", "object": "%s.%016x", \
"pool": "%s", "format": "json"}' %
(prefix, obj, pool)]
ret, outbuf, outs = self._ceph_cluster.mon_command(
cmd, '', 0)
dist = json.loads(outbuf)
pgid = dist['pgid']
p_osd = dist['acting_primary']
if pgid in resp['pg']:
resp['pg'][pgid] += 1
else:
resp['pg'][pgid] = 1
if p_osd in resp['osd']:
resp['osd'][p_osd] += 1
else:
resp['osd'][p_osd] = 1
return resp
def osd_info(self, hostname, osdname):
return self.send_command('GET', ['osd', 'info'],
params={'id': osdname, 'host': hostname})
def osd_list(self):
osd_info = self.send_command('GET', ['osd', 'dump'])
osd_id = []
for val in osd_info['osds']:
osd_id.append(val["osd"])
return osd_id
def image_list(self, pool):
if WITH_RADOS and self._ceph_cluster is not None:
with self._ceph_cluster.open_ioctx(pool) as ioctx:
r = rbd.RBD()
return r.list(ioctx)
return list()
class GraphiteClient:
def request(self, url):
if settings.GRAPHITE_AUTH:
return requests.get(url, auth=(settings.GRAPHITE_USERNAME,
settings.GRAPHITE_PASSWORD),
verify=False)
else:
return requests.get(url)
def get_metrics(self, metrics, begin=None, end=None):
graphite_ep = settings.GRAPHITE_ENDPOINT
if not graphite_ep.endswith('/'):
graphite_ep += '/'
metrics = ['target='+x for x in metrics]
url = graphite_ep + "?" + "&".join(metrics)
if begin is not None:
url += "&from=" + begin
if end is not None:
url += "&end=" + end
url += "&format=json"
r = self.request(url)
if r.status_code == 200:
return r.json()
else:
return []
def pool_df(self, name, stats):
metric_prefix = settings.GRAPHITE_PREFIX + settings.GRAPHITE_SERVER +\
settings.GRAPHITE_SUFIX
metric_prefix += '.ceph-ceph-pool-' + name
stat_dict = {"op": "op_per_sec", "rd": "read_bytes_sec",
"wr": "write_bytes_sec", "objects": "objects",
"used": "bytes_used"}
metrics = []
for stat in stats:
metrics.append(metric_prefix + '.gauge-' + stat_dict[stat])
results = self.get_metrics(metrics, '-5h')
for metric in results:
metric['key'] = metric['target'].split('-')[-1:]
del metric['target']
metric['values'] = []
for val in metric['datapoints']:
metric['values'].append({'x': val[1], 'y': val[0]})
del metric['datapoints']
return results
def osd_info(self, hostname, osdid):
metric_prefix = settings.GRAPHITE_PREFIX + hostname + '*' +\
settings.GRAPHITE_SUFIX
metrics = ['.load.load.shortterm', '.load.load.midterm',
'.load.load.longterm', '.memory.memory-free',
'.memory.memory-used']
metrics = [metric_prefix + x for x in metrics]
metric_prefix = settings.GRAPHITE_PREFIX + settings.GRAPHITE_SERVER +\
settings.GRAPHITE_SUFIX
metrics.append(metric_prefix + '.ceph-ceph-osd-' + osdid +
'.gauge-kb_used')
metrics.append(metric_prefix + '.ceph-ceph-osd-' + osdid +
'.gauge-kb_total')
result = self.get_metrics(metrics, '-60s')
key_val = {}
for metric in result:
name = metric['target'].split('.')[-1]
key_val[name] = metric['datapoints'][-1][0]
return {"load": [key_val['shortterm'], key_val['midterm'],
key_val['longterm']],
"disk": {'used': key_val['gauge-kb_used'] * 1024,
'free': (key_val['gauge-kb_total'] -
key_val['gauge-kb_used'])*1024},
"memory": {'used': key_val['memory-used'],
'free': key_val['memory-free']}}
def osd_io(self):
prefix = '*.ceph-ceph-osd-*'
metrics = [prefix + x for x in ['.gauge-ops_read', '.gauge-ops_write']]
result = self.get_metrics(metrics, begin='-120s')
osds = dict()
for x in result:
cluster, osd, io = x['target'].split('.')
osd = osd.split('-')[-1]
if x['datapoints'][1][0] is not None:
value = x['datapoints'][1][0] - x['datapoints'][0][0]
else:
value = 0
if io.endswith('read'):
if osd in osds:
osds[osd]['r'] = value
else:
osds[osd] = {'r': value}
else:
if osd in osds:
osds[osd]['w'] = value
else:
osds[osd] = {'w': value}
return [{'id': key, 'r': val['r'], 'w': val['w']}
for key, val in osds.items()]
|
{
"content_hash": "7570044488987567afea51e760ae6b6f",
"timestamp": "",
"source": "github",
"line_count": 304,
"max_line_length": 80,
"avg_line_length": 36.6875,
"alnum_prop": 0.44804088586030666,
"repo_name": "CiscoSystems/CephEWS",
"id": "bae4c8fbb22f09b84281143729dd6c7d5ea99375",
"size": "11729",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dashboard/cephews/api.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "3222"
},
{
"name": "HTML",
"bytes": "33814"
},
{
"name": "JavaScript",
"bytes": "23314"
},
{
"name": "Python",
"bytes": "23285"
}
],
"symlink_target": ""
}
|
"""Utilities related to FeatureColumn."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.framework.python.framework import checkpoint_utils
from tensorflow.contrib.framework.python.framework import experimental
from tensorflow.contrib.framework.python.ops import variables as contrib_variables
from tensorflow.contrib.layers.python.layers import embedding_ops
from tensorflow.contrib.layers.python.layers import feature_column as fc
from tensorflow.contrib.layers.python.layers import layers
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor as sparse_tensor_py
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import tf_logging as logging
def _embeddings_from_arguments(column,
args,
weight_collections,
trainable,
output_rank=2):
"""Returns embeddings for a column based on the computed arguments.
Args:
column: the column name.
args: the _DeepEmbeddingLookupArguments for this column.
weight_collections: collections to store weights in.
trainable: whether these embeddings should be trainable.
output_rank: the desired rank of the returned `Tensor`. Inner dimensions will
be combined to produce the desired rank.
Returns:
the embeddings.
Raises:
ValueError: if not possible to create.
"""
# pylint: disable=protected-access
input_tensor = layers._inner_flatten(args.input_tensor, output_rank)
weight_tensor = None
if args.weight_tensor is not None:
weight_tensor = layers._inner_flatten(args.weight_tensor, output_rank)
# pylint: enable=protected-access
if args.hashed:
embeddings = contrib_variables.model_variable(
name='weights',
shape=[args.vocab_size],
dtype=dtypes.float32,
initializer=args.initializer,
trainable=trainable,
collections=weight_collections)
return embedding_ops.hashed_embedding_lookup_sparse(
embeddings, input_tensor, args.dimension,
combiner=args.combiner, name='lookup')
if args.shared_embedding_name is not None:
shared_embedding_collection_name = (
'SHARED_EMBEDDING_COLLECTION_' + args.shared_embedding_name.upper())
graph = ops.get_default_graph()
shared_embedding_collection = (
graph.get_collection_ref(shared_embedding_collection_name))
shape = [args.vocab_size, args.dimension]
if shared_embedding_collection:
if len(shared_embedding_collection) > 1:
raise ValueError('Collection %s can only contain one '
'(partitioned) variable.'
% shared_embedding_collection_name)
else:
embeddings = shared_embedding_collection[0]
if embeddings.get_shape() != shape:
raise ValueError('The embedding variable with name {} already '
'exists, but its shape does not match required '
'embedding shape here. Please make sure to use '
'different shared_embedding_name for different '
'shared embeddings.'.format(
args.shared_embedding_name))
else:
embeddings = contrib_variables.model_variable(
name=args.shared_embedding_name,
shape=shape,
dtype=dtypes.float32,
initializer=args.initializer,
trainable=trainable,
collections=weight_collections)
graph.add_to_collection(shared_embedding_collection_name, embeddings)
else:
embeddings = contrib_variables.model_variable(
name='weights',
shape=[args.vocab_size, args.dimension],
dtype=dtypes.float32,
initializer=args.initializer,
trainable=trainable,
collections=weight_collections)
if isinstance(embeddings, variables.Variable):
embeddings = [embeddings]
else:
embeddings = embeddings._get_variable_list() # pylint: disable=protected-access
# pylint: disable=protected-access
_maybe_restore_from_checkpoint(
column._checkpoint_path(), embeddings)
return embedding_ops.safe_embedding_lookup_sparse(
embeddings,
input_tensor,
sparse_weights=weight_tensor,
combiner=args.combiner,
name=column.name + 'weights')
def _input_from_feature_columns(columns_to_tensors,
feature_columns,
weight_collections,
trainable,
scope,
output_rank,
default_name):
"""Implementation of `input_from(_sequence)_feature_columns`."""
check_feature_columns(feature_columns)
with variable_scope.variable_scope(scope,
default_name=default_name,
values=columns_to_tensors.values()):
output_tensors = []
transformer = _Transformer(columns_to_tensors)
if weight_collections:
weight_collections = list(set(list(weight_collections) +
[ops.GraphKeys.VARIABLES]))
for column in sorted(set(feature_columns), key=lambda x: x.key):
with variable_scope.variable_scope(None,
default_name=column.name,
values=columns_to_tensors.values()):
transformed_tensor = transformer.transform(column)
try:
# pylint: disable=protected-access
arguments = column._deep_embedding_lookup_arguments(
transformed_tensor)
output_tensors.append(_embeddings_from_arguments(
column,
arguments,
weight_collections,
trainable,
output_rank=output_rank))
except NotImplementedError as ee:
try:
# pylint: disable=protected-access
output_tensors.append(column._to_dnn_input_layer(
transformed_tensor,
weight_collections,
trainable,
output_rank=output_rank))
except ValueError as e:
raise ValueError('Error creating input layer for column: {}.\n'
'{}, {}'.format(column.name, e, ee))
return array_ops.concat(output_rank - 1, output_tensors)
def input_from_feature_columns(columns_to_tensors,
feature_columns,
weight_collections=None,
trainable=True,
scope=None):
"""A tf.contrib.layer style input layer builder based on FeatureColumns.
Generally a single example in training data is described with feature columns.
At the first layer of the model, this column oriented data should be converted
to a single tensor. Each feature column needs a different kind of operation
during this conversion. For example sparse features need a totally different
handling than continuous features.
An example usage of input_from_feature_columns is as follows:
# Building model for training
columns_to_tensor = tf.parse_example(...)
first_layer = input_from_feature_columns(
columns_to_tensors=columns_to_tensor,
feature_columns=feature_columns)
second_layer = fully_connected(first_layer, ...)
...
where feature_columns can be defined as follows:
occupation = sparse_column_with_hash_bucket(column_name="occupation",
hash_bucket_size=1000)
occupation_emb = embedding_column(sparse_id_column=occupation, dimension=16,
combiner="sum")
age = real_valued_column("age")
age_buckets = bucketized_column(
source_column=age,
boundaries=[18, 25, 30, 35, 40, 45, 50, 55, 60, 65])
feature_columns=[occupation_emb, age_buckets]
Args:
columns_to_tensors: A mapping from feature column to tensors. 'string' key
means a base feature (not-transformed). It can have FeatureColumn as a
key too. That means that FeatureColumn is already transformed by input
pipeline. For example, `inflow` may have handled transformations.
feature_columns: A set containing all the feature columns. All items in the
set should be instances of classes derived by FeatureColumn.
weight_collections: List of graph collections to which weights are added.
trainable: If `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).
scope: Optional scope for variable_scope.
Returns:
A Tensor which can be consumed by hidden layers in the neural network.
Raises:
ValueError: if FeatureColumn cannot be consumed by a neural network.
"""
return _input_from_feature_columns(columns_to_tensors,
feature_columns,
weight_collections,
trainable,
scope,
output_rank=2,
default_name='input_from_feature_columns')
@experimental
def sequence_input_from_feature_columns(columns_to_tensors,
feature_columns,
weight_collections=None,
trainable=True,
scope=None):
"""Builds inputs for sequence models from `FeatureColumn`s.
See documentation for `input_from_feature_columns`. The following types of
`FeatureColumn` are permitted in `feature_columns`: `_OneHotColumn`,
`_EmbeddingColumn`, `_HashedEmbeddingColumn`, `_RealValuedColumn`,
`_DataFrameColumn`. In addition, columns in `feature_columns` may not be
constructed using any of the following: `HashedEmbeddingColumn`,
`BucketizedColumn`, `CrossedColumn`.
Args:
columns_to_tensors: A mapping from feature column to tensors. 'string' key
means a base feature (not-transformed). It can have FeatureColumn as a
key too. That means that FeatureColumn is already transformed by input
pipeline. For example, `inflow` may have handled transformations.
feature_columns: A set containing all the feature columns. All items in the
set should be instances of classes derived by FeatureColumn.
weight_collections: List of graph collections to which weights are added.
trainable: If `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).
scope: Optional scope for variable_scope.
Returns:
A Tensor which can be consumed by hidden layers in the neural network.
Raises:
ValueError: if FeatureColumn cannot be consumed by a neural network.
"""
_check_supported_sequence_columns(feature_columns)
_check_forbidden_sequence_columns(feature_columns)
return _input_from_feature_columns(
columns_to_tensors,
feature_columns,
weight_collections,
trainable,
scope,
output_rank=3,
default_name='sequence_input_from_feature_columns')
def _create_embedding_lookup(column,
columns_to_tensors,
embedding_lookup_arguments,
num_outputs,
trainable,
weight_collections):
"""Creates variables and returns predictions for linear weights in a model.
Args:
column: the column we're working on.
columns_to_tensors: a map from column name to tensors.
embedding_lookup_arguments: arguments for embedding lookup.
num_outputs: how many outputs.
trainable: whether the variable we create is trainable.
weight_collections: weights will be placed here.
Returns:
variables: the created embeddings.
predictions: the computed predictions.
"""
with variable_scope.variable_scope(
None, default_name=column.name, values=columns_to_tensors.values()):
variable = contrib_variables.model_variable(
name='weights',
shape=[embedding_lookup_arguments.vocab_size, num_outputs],
dtype=dtypes.float32,
initializer=embedding_lookup_arguments.initializer,
trainable=trainable,
collections=weight_collections)
if isinstance(variable, variables.Variable):
variable = [variable]
else:
variable = variable._get_variable_list() # pylint: disable=protected-access
predictions = embedding_ops.safe_embedding_lookup_sparse(
variable,
embedding_lookup_arguments.input_tensor,
sparse_weights=embedding_lookup_arguments.weight_tensor,
combiner=embedding_lookup_arguments.combiner,
name=column.name + '_weights')
return variable, predictions
def _maybe_restore_from_checkpoint(checkpoint_path, variable):
if checkpoint_path is not None:
path, tensor_name = checkpoint_path
weights_to_restore = variable
if len(variable) == 1:
weights_to_restore = variable[0]
checkpoint_utils.init_from_checkpoint(path,
{tensor_name: weights_to_restore})
def _create_joint_embedding_lookup(columns_to_tensors,
embedding_lookup_arguments,
num_outputs,
trainable,
weight_collections):
"""Creates an embedding lookup for all columns sharing a single weight."""
for arg in embedding_lookup_arguments:
assert arg.weight_tensor is None, (
'Joint sums for weighted sparse columns are not supported. '
'Please use weighted_sum_from_feature_columns instead.')
assert arg.combiner == 'sum', (
'Combiners other than sum are not supported for joint sums. '
'Please use weighted_sum_from_feature_columns instead.')
assert len(embedding_lookup_arguments) >= 1, (
'At least one column must be in the model.')
prev_size = 0
sparse_tensors = []
for a in embedding_lookup_arguments:
t = a.input_tensor
values = t.values + prev_size
prev_size += a.vocab_size
sparse_tensors.append(
sparse_tensor_py.SparseTensor(t.indices,
values,
t.shape))
sparse_tensor = sparse_ops.sparse_concat(1, sparse_tensors)
with variable_scope.variable_scope(
None, default_name='linear_weights', values=columns_to_tensors.values()):
variable = contrib_variables.model_variable(
name='weights',
shape=[prev_size, num_outputs],
dtype=dtypes.float32,
initializer=init_ops.zeros_initializer,
trainable=trainable,
collections=weight_collections)
if isinstance(variable, variables.Variable):
variable = [variable]
else:
variable = variable._get_variable_list() # pylint: disable=protected-access
predictions = embedding_ops.safe_embedding_lookup_sparse(
variable,
sparse_tensor,
sparse_weights=None,
combiner='sum',
name='_weights')
return variable, predictions
def joint_weighted_sum_from_feature_columns(columns_to_tensors,
feature_columns,
num_outputs,
weight_collections=None,
trainable=True,
scope=None):
"""A restricted linear prediction builder based on FeatureColumns.
As long as all feature columns are unweighted sparse columns this computes the
prediction of a linear model which stores all weights in a single variable.
Args:
columns_to_tensors: A mapping from feature column to tensors. 'string' key
means a base feature (not-transformed). It can have FeatureColumn as a
key too. That means that FeatureColumn is already transformed by input
pipeline. For example, `inflow` may have handled transformations.
feature_columns: A set containing all the feature columns. All items in the
set should be instances of classes derived from FeatureColumn.
num_outputs: An integer specifying number of outputs. Default value is 1.
weight_collections: List of graph collections to which weights are added.
trainable: If `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).
scope: Optional scope for variable_scope.
Returns:
A tuple of followings:
* A Tensor which represents predictions of a linear model.
* A list of Variables storing the weights.
* A Variable which is used for bias.
Raises:
ValueError: if FeatureColumn cannot be used for linear predictions.
"""
check_feature_columns(feature_columns)
with variable_scope.variable_scope(
scope,
default_name='joint_weighted_sum_from_feature_columns',
values=columns_to_tensors.values()):
transformer = _Transformer(columns_to_tensors)
embedding_lookup_arguments = []
for column in sorted(set(feature_columns), key=lambda x: x.key):
transformed_tensor = transformer.transform(column)
try:
embedding_lookup_arguments.append(
column._wide_embedding_lookup_arguments(transformed_tensor)) # pylint: disable=protected-access
except NotImplementedError:
raise NotImplementedError('Real-valued columns are not supported. '
'Use weighted_sum_from_feature_columns '
'instead, or bucketize these columns.')
variable, predictions_no_bias = _create_joint_embedding_lookup(
columns_to_tensors,
embedding_lookup_arguments,
num_outputs,
trainable,
weight_collections)
bias = contrib_variables.model_variable(
'bias_weight',
shape=[num_outputs],
initializer=init_ops.zeros_initializer,
collections=_add_variable_collection(weight_collections))
_log_variable(bias)
predictions = nn_ops.bias_add(predictions_no_bias, bias)
return predictions, variable, bias
def weighted_sum_from_feature_columns(columns_to_tensors,
feature_columns,
num_outputs,
weight_collections=None,
trainable=True,
scope=None):
"""A tf.contrib.layer style linear prediction builder based on FeatureColumns.
Generally a single example in training data is described with feature columns.
This function generates weighted sum for each num_outputs. Weighted sum refers
to logits in classification problems. It refers to prediction itself for
linear regression problems.
An example usage of weighted_sum_from_feature_columns is as follows:
# Building model for training
columns_to_tensor = tf.parse_example(...)
logits = weighted_sum_from_feature_columns(
columns_to_tensors=columns_to_tensor,
feature_columns=feature_columns,
num_outputs=1)
loss = tf.nn.sigmoid_cross_entropy_with_logits(logits, labels)
where feature_columns can be defined as follows:
occupation = sparse_column_with_hash_bucket(column_name="occupation",
hash_bucket_size=1000)
age = real_valued_column("age")
age_buckets = bucketized_column(
source_column=age,
boundaries=[18, 25, 30, 35, 40, 45, 50, 55, 60, 65])
occupation_x_age = crossed_column(columns=[occupation, age_buckets],
hash_bucket_size=10000)
feature_columns=[age_buckets, occupation, occupation_x_age]
Args:
columns_to_tensors: A mapping from feature column to tensors. 'string' key
means a base feature (not-transformed). It can have FeatureColumn as a
key too. That means that FeatureColumn is already transformed by input
pipeline. For example, `inflow` may have handled transformations.
feature_columns: A set containing all the feature columns. All items in the
set should be instances of classes derived from FeatureColumn.
num_outputs: An integer specifying number of outputs. Default value is 1.
weight_collections: List of graph collections to which weights are added.
trainable: If `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).
scope: Optional scope for variable_scope.
Returns:
A tuple of followings:
* A Tensor which represents predictions of a linear model.
* A dictionary which maps feature_column to corresponding Variable.
* A Variable which is used for bias.
Raises:
ValueError: if FeatureColumn cannot be used for linear predictions.
"""
check_feature_columns(feature_columns)
with variable_scope.variable_scope(
scope,
default_name='weighted_sum_from_feature_columns',
values=columns_to_tensors.values()):
output_tensors = []
column_to_variable = dict()
transformer = _Transformer(columns_to_tensors)
# pylint: disable=protected-access
for column in sorted(set(feature_columns), key=lambda x: x.key):
transformed_tensor = transformer.transform(column)
try:
embedding_lookup_arguments = column._wide_embedding_lookup_arguments(
transformed_tensor)
variable, predictions = _create_embedding_lookup(
column,
columns_to_tensors,
embedding_lookup_arguments,
num_outputs,
trainable,
weight_collections)
except NotImplementedError:
with variable_scope.variable_scope(
None,
default_name=column.name,
values=columns_to_tensors.values()):
tensor = column._to_dense_tensor(transformed_tensor)
tensor = fc._reshape_real_valued_tensor(tensor, 2, column.name)
variable = [contrib_variables.model_variable(
name='weight',
shape=[tensor.get_shape()[1], num_outputs],
initializer=init_ops.zeros_initializer,
collections=weight_collections)]
predictions = math_ops.matmul(tensor, variable[0], name='matmul')
except ValueError as ee:
raise ValueError('Error creating weighted sum for column: {}.\n'
'{}'.format(column.name, ee))
output_tensors.append(predictions)
column_to_variable[column] = variable
_log_variable(variable)
_maybe_restore_from_checkpoint(column._checkpoint_path(), variable)
# pylint: enable=protected-access
predictions_no_bias = math_ops.add_n(output_tensors)
bias = contrib_variables.model_variable(
'bias_weight',
shape=[num_outputs],
initializer=init_ops.zeros_initializer,
collections=_add_variable_collection(weight_collections))
_log_variable(bias)
predictions = nn_ops.bias_add(predictions_no_bias, bias)
return predictions, column_to_variable, bias
def parse_feature_columns_from_examples(serialized,
feature_columns,
name=None,
example_names=None):
"""Parses tf.Examples to extract tensors for given feature_columns.
This is a wrapper of 'tf.parse_example'. A typical usage is as follows:
```python
columns_to_tensor = parse_feature_columns_from_examples(
serialized=my_data,
feature_columns=my_features)
# Where my_features are:
# Define features and transformations
country = sparse_column_with_keys(column_name="native_country",
keys=["US", "BRA", ...])
country_emb = embedding_column(sparse_id_column=country, dimension=3,
combiner="sum")
occupation = sparse_column_with_hash_bucket(column_name="occupation",
hash_bucket_size=1000)
occupation_emb = embedding_column(sparse_id_column=occupation, dimension=16,
combiner="sum")
occupation_x_country = crossed_column(columns=[occupation, country],
hash_bucket_size=10000)
age = real_valued_column("age")
age_buckets = bucketized_column(
source_column=age,
boundaries=[18, 25, 30, 35, 40, 45, 50, 55, 60, 65])
my_features = [occupation_emb, age_buckets, country_emb]
```
Args:
serialized: A vector (1-D Tensor) of strings, a batch of binary
serialized `Example` protos.
feature_columns: An iterable containing all the feature columns. All items
should be instances of classes derived from _FeatureColumn.
name: A name for this operation (optional).
example_names: A vector (1-D Tensor) of strings (optional), the names of
the serialized protos in the batch.
Returns:
A `dict` mapping FeatureColumn to `Tensor` and `SparseTensor` values.
"""
check_feature_columns(feature_columns)
columns_to_tensors = parsing_ops.parse_example(
serialized=serialized,
features=fc.create_feature_spec_for_parsing(feature_columns),
name=name,
example_names=example_names)
transformer = _Transformer(columns_to_tensors)
for column in sorted(set(feature_columns), key=lambda x: x.key):
transformer.transform(column)
return columns_to_tensors
def parse_feature_columns_from_sequence_examples(
serialized,
context_feature_columns,
sequence_feature_columns,
name=None,
example_name=None):
"""Parses tf.SequenceExamples to extract tensors for given `FeatureColumn`s.
Args:
serialized: A scalar (0-D Tensor) of type string, a single serialized
`SequenceExample` proto.
context_feature_columns: An iterable containing the feature columns for
context features. All items should be instances of classes derived from
`_FeatureColumn`. Can be `None`.
sequence_feature_columns: An iterable containing the feature columns for
sequence features. All items should be instances of classes derived from
`_FeatureColumn`. Can be `None`.
name: A name for this operation (optional).
example_name: A scalar (0-D Tensor) of type string (optional), the names of
the serialized proto.
Returns:
A tuple consisting of:
context_features: a dict mapping `FeatureColumns` from
`context_feature_columns` to their parsed `Tensors`/`SparseTensor`s.
sequence_features: a dict mapping `FeatureColumns` from
`sequence_feature_columns` to their parsed `Tensors`/`SparseTensor`s.
"""
# Sequence example parsing requires a single (scalar) example.
try:
serialized = array_ops.reshape(serialized, [])
except ValueError as e:
raise ValueError(
'serialized must contain as single sequence example. Batching must be '
'done after parsing for sequence examples. Error: {}'.format(e))
if context_feature_columns is None:
context_feature_columns = []
if sequence_feature_columns is None:
sequence_feature_columns = []
check_feature_columns(context_feature_columns)
context_feature_spec = fc.create_feature_spec_for_parsing(
context_feature_columns)
check_feature_columns(sequence_feature_columns)
sequence_feature_spec = fc._create_sequence_feature_spec_for_parsing( # pylint: disable=protected-access
sequence_feature_columns, allow_missing_by_default=False)
return parsing_ops.parse_single_sequence_example(serialized,
context_feature_spec,
sequence_feature_spec,
example_name,
name)
def _log_variable(variable):
if isinstance(variable, list):
for var in variable:
if isinstance(variable, variables.Variable):
logging.info('Created variable %s, with device=%s', var.name,
var.device)
elif isinstance(variable, variables.Variable):
logging.info('Created variable %s, with device=%s', variable.name,
variable.device)
def _infer_real_valued_column_for_tensor(name, tensor):
"""Creates a real_valued_column for given tensor and name."""
if isinstance(tensor, sparse_tensor_py.SparseTensor):
raise ValueError(
'SparseTensor is not supported for auto detection. Please define '
'corresponding FeatureColumn for tensor {} {}.', name, tensor)
if not (tensor.dtype.is_integer or tensor.dtype.is_floating):
raise ValueError(
'Non integer or non floating types are not supported for auto detection'
'. Please define corresponding FeatureColumn for tensor {} {}.', name,
tensor)
shape = tensor.get_shape().as_list()
dimension = 1
for i in range(1, len(shape)):
dimension *= shape[i]
return fc.real_valued_column(name, dimension=dimension, dtype=tensor.dtype)
def infer_real_valued_columns(features):
if not isinstance(features, dict):
return [_infer_real_valued_column_for_tensor('', features)]
feature_columns = []
for key, value in features.items():
feature_columns.append(_infer_real_valued_column_for_tensor(key, value))
return feature_columns
def check_feature_columns(feature_columns):
"""Checks the validity of the set of FeatureColumns.
Args:
feature_columns: A set of instances or subclasses of FeatureColumn.
Raises:
ValueError: If there are duplicate feature column keys.
"""
seen_keys = set()
for f in feature_columns:
key = f.key
if key in seen_keys:
raise ValueError('Duplicate feature column key found for column: {}. '
'This usually means that the column is almost identical '
'to another column, and one must be discarded.'.format(
f.name))
seen_keys.add(key)
class _Transformer(object):
"""Handles all the transformations defined by FeatureColumn if needed.
FeatureColumn specifies how to digest an input column to the network. Some
feature columns require data transformations. This class handles those
transformations if they are not handled already.
Some features may be used in more than one places. For example one can use a
bucketized feature by itself and a cross with it. In that case Transformer
should create only one bucketization op instead of multiple ops for each
feature column. To handle re-use of transformed columns, Transformer keeps all
previously transformed columns.
An example usage of Transformer is as follows:
occupation = sparse_column_with_hash_bucket(column_name="occupation",
hash_bucket_size=1000)
age = real_valued_column("age")
age_buckets = bucketized_column(
source_column=age,
boundaries=[18, 25, 30, 35, 40, 45, 50, 55, 60, 65])
occupation_x_age = crossed_column(columns=[occupation, age_buckets],
hash_bucket_size=10000)
columns_to_tensor = tf.parse_example(...)
transformer = Transformer(columns_to_tensor)
occupation_x_age_tensor = transformer.transform(occupation_x_age)
occupation_tensor = transformer.transform(occupation)
age_buckets_tensor = transformer.transform(age_buckets)
"""
def __init__(self, columns_to_tensors):
"""Initializes transfomer.
Args:
columns_to_tensors: A mapping from feature columns to tensors. 'string'
key means a base feature (not-transformed). It can have FeatureColumn as
a key too. That means that FeatureColumn is already transformed by input
pipeline. For example, `inflow` may have handled transformations.
Transformed features are inserted in columns_to_tensors.
"""
self._columns_to_tensors = columns_to_tensors
def transform(self, feature_column):
"""Returns a Tensor which represents given feature_column.
Args:
feature_column: An instance of FeatureColumn.
Returns:
A Tensor which represents given feature_column. It may create a new Tensor
or re-use an existing one.
Raises:
ValueError: if FeatureColumn cannot be handled by this Transformer.
"""
logging.debug('Transforming feature_column %s', feature_column)
if feature_column in self._columns_to_tensors:
# Feature_column is already transformed.
return self._columns_to_tensors[feature_column]
feature_column.insert_transformed_feature(self._columns_to_tensors)
if feature_column not in self._columns_to_tensors:
raise ValueError('Column {} is not supported.'.format(
feature_column.name))
return self._columns_to_tensors[feature_column]
def _add_variable_collection(weight_collections):
if weight_collections:
weight_collections = list(
set(list(weight_collections) + [ops.GraphKeys.VARIABLES]))
return weight_collections
# TODO(jamieas): remove the following logic once all FeatureColumn types are
# supported for sequences.
# pylint: disable=protected-access
_SUPPORTED_SEQUENCE_COLUMNS = (fc._OneHotColumn,
fc._EmbeddingColumn,
fc._RealValuedColumn)
_FORBIDDEN_SEQUENCE_COLUMNS = (fc._HashedEmbeddingColumn,
fc._BucketizedColumn,
fc._CrossedColumn)
def _check_supported_sequence_columns(feature_columns):
"""Asserts `feature_columns` are in `_SUPPORTED_SEQUENCE_COLUMNS`."""
for col in feature_columns:
if not isinstance(col, _SUPPORTED_SEQUENCE_COLUMNS):
raise ValueError(
'FeatureColumn type {} is not currently supported for sequence data.'.
format(type(col).__name__))
def _get_parent_columns(feature_column):
"""Returns the tuple of `FeatureColumn`s that `feature_column` depends on."""
if isinstance(feature_column, (fc._WeightedSparseColumn,
fc._OneHotColumn,
fc._EmbeddingColumn,)):
return (feature_column.sparse_id_column,)
if isinstance(feature_column, (fc._BucketizedColumn,)):
return (feature_column.source_column,)
if isinstance(feature_column, (fc._CrossedColumn)):
return tuple(feature_column.columns)
return tuple()
def _gather_feature_columns(feature_columns):
"""Returns a list of all ancestor `FeatureColumns` of `feature_columns`."""
gathered = list(feature_columns)
i = 0
while i < len(gathered):
for column in _get_parent_columns(gathered[i]):
if column not in gathered:
gathered.append(column)
i += 1
return gathered
def _check_forbidden_sequence_columns(feature_columns):
"""Recursively cecks `feature_columns` for `_FORBIDDEN_SEQUENCE_COLUMNS`."""
all_feature_columns = _gather_feature_columns(feature_columns)
for feature_column in all_feature_columns:
if isinstance(feature_column, _FORBIDDEN_SEQUENCE_COLUMNS):
raise ValueError(
'Column {} is of type {}, which is not currently supported for '
'sequences.'.format(feature_column.name,
type(feature_column).__name__))
|
{
"content_hash": "657c273ddab14edf037f85e0f425fc90",
"timestamp": "",
"source": "github",
"line_count": 866,
"max_line_length": 109,
"avg_line_length": 41.495381062355655,
"alnum_prop": 0.6514540141922917,
"repo_name": "tongwang01/tensorflow",
"id": "16ecd92e6705afd360f18198e33abbd702569ed1",
"size": "36624",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow/contrib/layers/python/layers/feature_column_ops.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "177722"
},
{
"name": "C++",
"bytes": "11252614"
},
{
"name": "CMake",
"bytes": "36462"
},
{
"name": "CSS",
"bytes": "107"
},
{
"name": "GCC Machine Description",
"bytes": "2"
},
{
"name": "HTML",
"bytes": "968188"
},
{
"name": "Java",
"bytes": "41615"
},
{
"name": "JavaScript",
"bytes": "10844"
},
{
"name": "Jupyter Notebook",
"bytes": "1974767"
},
{
"name": "Makefile",
"bytes": "21265"
},
{
"name": "Objective-C",
"bytes": "6942"
},
{
"name": "Objective-C++",
"bytes": "61636"
},
{
"name": "Protocol Buffer",
"bytes": "122032"
},
{
"name": "Python",
"bytes": "9724114"
},
{
"name": "Shell",
"bytes": "243989"
},
{
"name": "TypeScript",
"bytes": "429623"
}
],
"symlink_target": ""
}
|
from django.db import models
from professional.models import Professional
from titles.models import Adaptation
from character.models import Character
class Role(models.Model):
title = models.ForeignKey(Adaptation)
character = models.ForeignKey(Character, null=True)
ROLE_CHOICES = [
'Director',
'Writer',
'Producer',
'Music',
'Cinematography',
'Crew',
'Actor',
]
role = models.CharField(max_length=20,
choices=((x,x) for x in ROLE_CHOICES),
default = 'Actor')
desc = models.TextField(blank=True)
url = models.URLField(blank=True)
imdb = models.URLField(blank=True)
#picture = models.ForeignKey(Picture, null=True)
|
{
"content_hash": "8ab2e8309a41eee712a8308156749d4a",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 58,
"avg_line_length": 30.26923076923077,
"alnum_prop": 0.5984752223634053,
"repo_name": "vivyly/fancastic_17",
"id": "667c286578ebe338ac2b567acb88a1777f5f76b1",
"size": "787",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fancastic_17/role/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "1210"
},
{
"name": "JavaScript",
"bytes": "2385"
},
{
"name": "Python",
"bytes": "60130"
},
{
"name": "Shell",
"bytes": "5107"
}
],
"symlink_target": ""
}
|
from setuptools import setup, Extension
mdbsmodule = Extension(
'mdbs',
sources=['mdbsmodule.c'])
setup(
ext_modules=[mdbsmodule])
|
{
"content_hash": "91795078958bb5f6f22bd0dc8c4d456a",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 39,
"avg_line_length": 18.125,
"alnum_prop": 0.6896551724137931,
"repo_name": "cheeseywhiz/cheeseywhiz",
"id": "6b7566b32288c5c36172612e34911d57cb39d511",
"size": "145",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Mandelbrot-Set/c/setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "21780"
},
{
"name": "CSS",
"bytes": "155"
},
{
"name": "HTML",
"bytes": "1377"
},
{
"name": "Haskell",
"bytes": "1568"
},
{
"name": "JavaScript",
"bytes": "97"
},
{
"name": "Makefile",
"bytes": "1497"
},
{
"name": "Python",
"bytes": "149292"
},
{
"name": "Roff",
"bytes": "121"
},
{
"name": "Shell",
"bytes": "119"
},
{
"name": "TeX",
"bytes": "54792"
}
],
"symlink_target": ""
}
|
import mock
import six
from sahara.plugins.cdh.v5_3_0 import edp_engine
from sahara.plugins.cdh.v5_3_0 import versionhandler
from sahara.tests.unit import base
class VersionHandlerTestCase(base.SaharaTestCase):
plugin_path = "sahara.plugins.cdh.v5_3_0."
cloudera_utils_path = plugin_path + "cloudera_utils.ClouderaUtilsV530."
plugin_utils_path = plugin_path + "plugin_utils.PluginUtilsV530."
def setUp(self):
super(VersionHandlerTestCase, self).setUp()
self.vh = versionhandler.VersionHandler()
def test_get_node_processes(self):
processes = self.vh.get_node_processes()
for k, v in six.iteritems(processes):
for p in v:
self.assertIsInstance(p, str)
@mock.patch("sahara.conductor.API.cluster_update")
@mock.patch("sahara.context.ctx")
@mock.patch(plugin_path + "deploy.configure_cluster")
@mock.patch(cloudera_utils_path + "get_cloudera_manager_info",
return_value={"fake_cm_info": "fake"})
def test_config_cluster(self, get_cm_info, configure_cluster,
ctx, cluster_update):
cluster = mock.Mock()
self.vh.configure_cluster(cluster)
configure_cluster.assert_called_once_with(cluster)
cluster_update.assert_called_once_with(
ctx(), cluster,
{'info': {"fake_cm_info": "fake"}})
@mock.patch(plugin_path + "deploy.start_cluster")
def test_start_cluster(self, start_cluster):
cluster = mock.Mock()
self.vh._set_cluster_info = mock.Mock()
self.vh.start_cluster(cluster)
start_cluster.assert_called_once_with(cluster)
self.vh._set_cluster_info.assert_called_once_with(cluster)
@mock.patch(plugin_path + "deploy.decommission_cluster")
def test_decommission_nodes(self, decommission_cluster):
cluster = mock.Mock()
instances = mock.Mock()
self.vh.decommission_nodes(cluster, instances)
decommission_cluster.assert_called_once_with(cluster,
instances)
@mock.patch(plugin_path + "deploy.scale_cluster")
def test_scale_cluster(self, scale_cluster):
cluster = mock.Mock()
instances = mock.Mock()
self.vh.scale_cluster(cluster, instances)
scale_cluster.assert_called_once_with(cluster, instances)
@mock.patch("sahara.conductor.API.cluster_update")
@mock.patch("sahara.context.ctx")
@mock.patch(cloudera_utils_path + "get_cloudera_manager_info",
return_value={})
@mock.patch(plugin_utils_path + "get_hue")
def test_set_cluster_info(self, get_hue, get_cloudera_manager_info,
ctx, cluster_update):
hue = mock.Mock()
hue.get_ip_or_dns_name.return_value = "1.2.3.4"
get_hue.return_value = hue
cluster = mock.Mock()
self.vh._set_cluster_info(cluster)
info = {'info': {'Hue Dashboard': {'Web UI': 'http://1.2.3.4:8888'}}}
cluster_update.assert_called_once_with(ctx(), cluster, info)
@mock.patch("sahara.plugins.utils.get_instance")
@mock.patch("sahara.plugins.utils.get_config_value_or_default")
@mock.patch("sahara.service.edp.job_utils.get_plugin")
def test_get_edp_engine(self, get_plugin, get_config_value_or_default,
get_instance):
cluster = mock.Mock()
job_type = 'Java'
ret = self.vh.get_edp_engine(cluster, job_type)
self.assertIsInstance(ret, edp_engine.EdpOozieEngine)
job_type = 'Spark'
ret = self.vh.get_edp_engine(cluster, job_type)
self.assertIsInstance(ret, edp_engine.EdpSparkEngine)
job_type = 'unsupported'
ret = self.vh.get_edp_engine(cluster, job_type)
self.assertIsNone(ret)
def test_get_edp_job_types(self):
ret = self.vh.get_edp_job_types()
expect = edp_engine.EdpOozieEngine.get_supported_job_types() + \
edp_engine.EdpSparkEngine.get_supported_job_types()
self.assertEqual(expect, ret)
@mock.patch(plugin_path +
"edp_engine.EdpOozieEngine.get_possible_job_config",
return_value={'job_config': {}})
def test_edp_config_hints(self, get_possible_job_config):
job_type = mock.Mock()
ret = self.vh.get_edp_config_hints(job_type)
get_possible_job_config.assert_called_once_with(job_type)
self.assertEqual(ret, {'job_config': {}})
@mock.patch(plugin_path + "deploy.get_open_ports", return_value=[1234])
def test_get_open_ports(self, get_open_ports):
node_group = mock.Mock()
ret = self.vh.get_open_ports(node_group)
get_open_ports.assert_called_once_with(node_group)
self.assertEqual(ret, [1234])
@mock.patch(plugin_utils_path + "recommend_configs")
def test_recommend_configs(self, recommend_configs):
cluster = mock.Mock()
scaling = mock.Mock()
self.vh.get_plugin_configs = mock.Mock()
self.vh.recommend_configs(cluster, scaling)
recommend_configs.assert_called_once_with(cluster,
self.vh.get_plugin_configs(),
scaling)
|
{
"content_hash": "0246fb33d160049d453fef78e16dc43e",
"timestamp": "",
"source": "github",
"line_count": 125,
"max_line_length": 79,
"avg_line_length": 42.112,
"alnum_prop": 0.6221504559270516,
"repo_name": "shakamunyi/sahara",
"id": "61daf64543ce46ba03d9567d9c277d3c5c6d184f",
"size": "5852",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "sahara/tests/unit/plugins/cdh/v5_3_0/test_versionhandler.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Java",
"bytes": "3609"
},
{
"name": "Mako",
"bytes": "36849"
},
{
"name": "PigLatin",
"bytes": "792"
},
{
"name": "Python",
"bytes": "4947252"
},
{
"name": "Shell",
"bytes": "100611"
}
],
"symlink_target": ""
}
|
import logging
import os
import re
import shutil
from cloudException import CloudRuntimeException, CloudInternalException, formatExceptionInfo
from configFileOps import configFileOps
from networkConfig import networkConfig
from utilities import writeProgressBar, bash
class serviceCfgBase(object):
def __init__(self, syscfg):
self.status = None
self.serviceName = ""
self.cfoHandlers = []
self.syscfg = syscfg
self.netMgrRunning = False
def configration(self):
writeProgressBar("Configure " + self.serviceName + " ...", None)
result = False
try:
result = self.config()
if result is None:
result = False
self.status = result
writeProgressBar(None, result)
return result
except CloudRuntimeException, e:
self.status = result
writeProgressBar(None, result)
logging.debug(e.getDetails())
raise e
except CloudInternalException, e:
self.status = result
writeProgressBar(None, result)
raise e
except:
logging.debug(formatExceptionInfo())
if self.syscfg.env.mode == "Server":
raise CloudRuntimeException("Configure %s failed, Please check the /var/log/cosmic/management/setupManagement.log for detail" % self.serviceName)
else:
raise CloudRuntimeException("Configure %s failed, Please check the /var/log/cosmic/agent/setup.log for detail" % self.serviceName)
def backup(self):
if self.status is None:
return True
writeProgressBar("Restore " + self.serviceName + " ...", None)
result = False
try:
for cfo in self.cfoHandlers:
cfo.backup()
result = self.restore()
except (CloudRuntimeException, CloudInternalException), e:
logging.debug(e)
writeProgressBar(None, result)
def config(self):
return True
def restore(self):
return True
class networkConfigBase:
def __init__(self, syscfg):
self.netcfg = networkConfig()
self.serviceName = "Network"
self.brName = None
self.dev = None
self.syscfg = syscfg
def isPreConfiged(self):
preCfged = False
for br in self.syscfg.env.nics:
if not self.netcfg.isNetworkDev(br):
logging.debug("%s is not a network device, is it down?" % br)
return False
if self.syscfg.env.bridgeType == "openvswitch" and not self.netcfg.isOvsBridge(br):
raise CloudInternalException("%s is not an openvswitch bridge" % br)
if self.syscfg.env.bridgeType == "native" and not self.netcfg.isBridge(br) and not self.netcfg.isNetworkDev(br):
# traffic label doesn't have to be a bridge, we'll create bridges on it
raise CloudInternalException("%s is not a bridge and not a net device" % br)
preCfged = True
return preCfged
def cfgNetwork(self, dev=None, brName=None):
if dev is None:
device = self.netcfg.getDefaultNetwork()
else:
device = self.netcfg.getDevInfo(dev)
if device.type == "dev":
if brName is None:
brName = "cloudbr0"
self.writeToCfgFile(brName, device)
elif device.type == "brport":
brName = self.netcfg.getBridge(dev)
brDevice = self.netcfg.getDevInfo(brName)
self.writeToCfgFile(brDevice.name, device)
elif device.type == "bridge":
# Fixme, assuming the outgoing physcial device is on port 1
enslavedDev = self.netcfg.getEnslavedDev(device.name, 1)
if enslavedDev is None:
raise CloudInternalException("Failed to get enslaved devices on bridge:%s" % device.name)
brDevice = device
device = self.netcfg.getDevInfo(enslavedDev)
brName = brDevice.name
self.writeToCfgFile(brName, device)
self.brName = brName
self.dev = device.name
def writeToCfgFile(self):
pass
class networkConfigUbuntu(serviceCfgBase, networkConfigBase):
def __init__(self, syscfg):
super(networkConfigUbuntu, self).__init__(syscfg)
networkConfigBase.__init__(self, syscfg)
self.netCfgFile = "/etc/network/interfaces"
def getNetworkMethod(self, line):
if line.find("static") != -1:
return "static"
elif line.find("dhcp") != -1:
return "dhcp"
else:
logging.debug("Failed to find the network method from:%s" % line)
raise CloudInternalException("Failed to find the network method from /etc/network/interfaces")
def addBridge(self, br, dev):
bash("ifdown %s" % dev.name)
for line in file(self.netCfgFile).readlines():
match = re.match("^ *iface %s.*" % dev.name, line)
if match is not None:
dev.method = self.getNetworkMethod(match.group(0))
cfo = configFileOps(self.netCfgFile, self)
if self.syscfg.env.bridgeType == "openvswitch":
bridgeCfg = "\n".join(("",
"iface {device} inet manual",
" ovs_type OVSPort",
" ovs_bridge {bridge}",
"",
"auto {bridge}",
"allow-ovs {bridge}",
"iface {bridge} inet {device_method}",
" ovs_type OVSBridge",
" ovs_ports {device}",
"")).format(bridge=br, device=dev.name, device_method=dev.method)
cfo.replace_line("^ *auto %s.*" % dev.name,
"allow-{bridge} {device}".format(bridge=br, device=dev.name))
elif self.syscfg.env.bridgeType == "native":
bridgeCfg = "\niface %s inet manual\n \
auto %s\n \
iface %s inet %s\n \
bridge_ports %s\n" % (dev.name, br, br, dev.method, dev.name)
else:
raise CloudInternalException("Unknown network.bridge.type %s" % self.syscfg.env.bridgeType)
cfo.replace_line("^ *iface %s.*" % dev.name, bridgeCfg)
def addDev(self, br, dev):
logging.debug("Haven't implement yet")
def addBridgeAndDev(self, br, dev):
logging.debug("Haven't implement yet")
def writeToCfgFile(self, br, dev):
cfg = file(self.netCfgFile).read()
ifaceDev = re.search("^ *iface %s.*" % dev.name, cfg, re.MULTILINE)
ifaceBr = re.search("^ *iface %s.*" % br, cfg, re.MULTILINE)
if ifaceDev is not None and ifaceBr is not None:
logging.debug("%s:%s already configured" % (br, dev.name))
return True
elif ifaceDev is not None and ifaceBr is None:
# reconfig bridge
self.addBridge(br, dev)
elif ifaceDev is None and ifaceBr is not None:
# reconfig dev
raise CloudInternalException("Missing device configuration, Need to add your network configuration into /etc/network/interfaces at first")
else:
raise CloudInternalException("Missing bridge/device network configuration, need to add your network configuration into /etc/network/interfaces at first")
def config(self):
try:
if super(networkConfigUbuntu, self).isPreConfiged():
return True
self.netMgrRunning = self.syscfg.svo.isServiceRunning("network-manager")
super(networkConfigUbuntu, self).cfgNetwork()
if self.netMgrRunning:
self.syscfg.svo.stopService("network-manager")
self.syscfg.svo.disableService("network-manager")
ifup_op = bash("ifup %s" % self.brName)
if not ifup_op.isSuccess():
raise CloudInternalException("Can't start network:%s %s" % (self.brName, ifup_op.getErrMsg()))
self.syscfg.env.nics.append(self.brName)
self.syscfg.env.nics.append(self.brName)
self.syscfg.env.nics.append(self.brName)
return True
except:
raise
def restore(self):
try:
if self.netMgrRunning:
self.syscfg.svo.enableService("network-manager")
self.syscfg.svo.startService("network-manager")
bash("/etc/init.d/networking stop")
bash("/etc/init.d/networking start")
return True
except:
logging.debug(formatExceptionInfo())
return False
class networkConfigRedhat(serviceCfgBase, networkConfigBase):
def __init__(self, syscfg):
super(networkConfigRedhat, self).__init__(syscfg)
networkConfigBase.__init__(self, syscfg)
def writeToCfgFile(self, brName, dev):
self.devCfgFile = "/etc/sysconfig/network-scripts/ifcfg-%s" % dev.name
self.brCfgFile = "/etc/sysconfig/network-scripts/ifcfg-%s" % brName
isDevExist = os.path.exists(self.devCfgFile)
isBrExist = os.path.exists(self.brCfgFile)
if isDevExist and isBrExist:
logging.debug("%s:%s already configured" % (brName, dev.name))
return True
elif isDevExist and not isBrExist:
# reconfig bridge
self.addBridge(brName, dev)
elif not isDevExist and isBrExist:
# reconfig dev
raise CloudInternalException("Missing device configuration, Need to add your network configuration into /etc/sysconfig/network-scripts at first")
else:
raise CloudInternalException("Missing bridge/device network configuration, need to add your network configuration into /etc/sysconfig/network-scripts at first")
def addBridge(self, brName, dev):
bash("ifdown %s" % dev.name)
if not os.path.exists(self.brCfgFile):
shutil.copy(self.devCfgFile, self.brCfgFile)
# config device file at first: disable nm, set onboot=yes if not
cfo = configFileOps(self.devCfgFile, self)
cfo.addEntry("NM_CONTROLLED", "no")
cfo.addEntry("ONBOOT", "yes")
if self.syscfg.env.bridgeType == "openvswitch":
if cfo.getEntry("IPADDR"):
cfo.rmEntry("IPADDR", cfo.getEntry("IPADDR"))
cfo.addEntry("DEVICETYPE", "ovs")
cfo.addEntry("TYPE", "OVSPort")
cfo.addEntry("OVS_BRIDGE", brName)
elif self.syscfg.env.bridgeType == "native":
cfo.addEntry("BRIDGE", brName)
else:
raise CloudInternalException("Unknown network.bridge.type %s" % self.syscfg.env.bridgeType)
cfo.save()
cfo = configFileOps(self.brCfgFile, self)
cfo.addEntry("NM_CONTROLLED", "no")
cfo.addEntry("ONBOOT", "yes")
cfo.addEntry("DEVICE", brName)
if self.syscfg.env.bridgeType == "openvswitch":
if cfo.getEntry("HWADDR"):
cfo.rmEntry("HWADDR", cfo.getEntry("HWADDR"))
if cfo.getEntry("UUID"):
cfo.rmEntry("UUID", cfo.getEntry("UUID"))
cfo.addEntry("STP", "yes")
cfo.addEntry("DEVICETYPE", "ovs")
cfo.addEntry("TYPE", "OVSBridge")
elif self.syscfg.env.bridgeType == "native":
cfo.addEntry("TYPE", "Bridge")
else:
raise CloudInternalException("Unknown network.bridge.type %s" % self.syscfg.env.bridgeType)
cfo.save()
def config(self):
try:
if super(networkConfigRedhat, self).isPreConfiged():
return True
super(networkConfigRedhat, self).cfgNetwork()
self.netMgrRunning = self.syscfg.svo.isServiceRunning("NetworkManager")
if self.netMgrRunning:
self.syscfg.svo.stopService("NetworkManager")
self.syscfg.svo.disableService("NetworkManager")
cfo = configFileOps("/etc/sysconfig/network", self)
cfo.addEntry("NOZEROCONF", "yes")
cfo.save()
if not bash("service network restart").isSuccess():
raise CloudInternalException("Can't restart network")
self.syscfg.env.nics.append(self.brName)
self.syscfg.env.nics.append(self.brName)
self.syscfg.env.nics.append(self.brName)
return True
except:
raise
def restore(self):
try:
if self.netMgrRunning:
self.syscfg.svo.enableService("NetworkManager")
self.syscfg.svo.startService("NetworkManager")
bash("service network restart")
return True
except:
logging.debug(formatExceptionInfo())
return False
class cgroupConfig(serviceCfgBase):
def __init__(self, syscfg):
super(cgroupConfig, self).__init__(syscfg)
self.serviceName = "Cgroup"
def config(self):
try:
cfo = configFileOps("/etc/cgconfig.conf", self)
addConfig = "group virt {\n \
cpu {\n \
cpu.shares = 9216;\n \
}\n \
}\n"
cfo.add_lines(addConfig)
self.syscfg.svo.stopService("cgconfig", True)
self.syscfg.svo.enableService("cgconfig", forcestart=True)
cfo = configFileOps("/etc/cgrules.conf", self)
cfgline = "root:/usr/sbin/libvirtd cpu virt/\n"
cfo.add_lines(cfgline)
self.syscfg.svo.stopService("cgred", True)
if not self.syscfg.svo.enableService("cgred"):
return False
return True
except:
raise
def restore(self):
try:
self.syscfg.svo.stopService("cgconfig")
self.syscfg.svo.enableService("cgconfig", forcestart=True)
self.syscfg.svo.stopService("cgred")
self.syscfg.svo.enableService("cgred")
return True
except:
logging.debug(formatExceptionInfo())
return False
class nfsConfig(serviceCfgBase):
def __init__(self, syscfg):
super(nfsConfig, self).__init__(syscfg)
self.serviceName = "Nfs"
def config(self):
try:
if not os.path.exists("/etc/nfsmount.conf"):
return True
cfo = configFileOps("/etc/nfsmount.conf")
cfo.addEntry("Ac", "False")
cfo.addEntry("actimeo", "0")
cfo.save()
self.syscfg.svo.enableService("rpcbind")
self.syscfg.svo.stopService("rpcbind")
self.syscfg.svo.startService("rpcbind")
self.syscfg.svo.enableService("nfs")
self.syscfg.svo.stopService("nfs")
self.syscfg.svo.startService("nfs")
return True
except:
logging.debug(formatExceptionInfo())
return False
class securityPolicyConfigUbuntu(serviceCfgBase):
def __init__(self, syscfg):
super(securityPolicyConfigUbuntu, self).__init__(syscfg)
self.serviceName = "Apparmor"
def config(self):
try:
cmd = bash("service apparmor status")
if not cmd.isSuccess() or cmd.getStdout() == "":
self.spRunning = False
return True
if not bash("apparmor_status |grep libvirt").isSuccess():
return True
bash("ln -s /etc/apparmor.d/usr.sbin.libvirtd /etc/apparmor.d/disable/")
bash("ln -s /etc/apparmor.d/usr.lib.libvirt.virt-aa-helper /etc/apparmor.d/disable/")
bash("apparmor_parser -R /etc/apparmor.d/usr.sbin.libvirtd")
bash("apparmor_parser -R /etc/apparmor.d/usr.lib.libvirt.virt-aa-helper")
return True
except:
raise CloudRuntimeException("Failed to configure apparmor, please see the /var/log/cosmic/agent/setup.log for detail, \
or you can manually disable it before starting myCloud")
def restore(self):
try:
self.syscfg.svo.enableService("apparmor")
self.syscfg.svo.startService("apparmor")
return True
except:
logging.debug(formatExceptionInfo())
return False
class securityPolicyConfigRedhat(serviceCfgBase):
def __init__(self, syscfg):
super(securityPolicyConfigRedhat, self).__init__(syscfg)
self.serviceName = "SElinux"
def config(self):
selinuxEnabled = True
if not bash("selinuxenabled").isSuccess():
selinuxEnabled = False
if selinuxEnabled:
try:
bash("setenforce 0")
cfo = configFileOps("/etc/selinux/config", self)
cfo.replace_line("SELINUX=", "SELINUX=permissive")
return True
except:
raise CloudRuntimeException("Failed to configure selinux, please see the /var/log/cosmic/agent/setup.log for detail, \
or you can manually disable it before starting myCloud")
else:
return True
def restore(self):
try:
bash("setenforce 1")
return True
except:
logging.debug(formatExceptionInfo())
return False
class libvirtConfigRedhat(serviceCfgBase):
def __init__(self, syscfg):
super(libvirtConfigRedhat, self).__init__(syscfg)
self.serviceName = "Libvirt"
def config(self):
try:
cfo = configFileOps("/etc/libvirt/libvirtd.conf", self)
cfo.addEntry("listen_tcp", "1")
cfo.addEntry("tcp_port", "\"16509\"")
cfo.addEntry("auth_tcp", "\"none\"")
cfo.addEntry("listen_tls", "0")
cfo.save()
cfo = configFileOps("/etc/sysconfig/libvirtd", self)
cfo.addEntry("export CGROUP_DAEMON", "'cpu:/virt'")
cfo.addEntry("LIBVIRTD_ARGS", "-l")
cfo.save()
filename = "/etc/libvirt/qemu.conf"
cfo = configFileOps(filename, self)
cfo.addEntry("security_driver", "\"none\"")
cfo.addEntry("user", "\"root\"")
cfo.addEntry("group", "\"root\"")
cfo.addEntry("vnc_listen", "\"0.0.0.0\"")
cfo.save()
self.syscfg.svo.stopService("libvirtd")
if not self.syscfg.svo.startService("libvirtd"):
return False
return True
except:
raise
def restore(self):
pass
class libvirtConfigUbuntu(serviceCfgBase):
def __init__(self, syscfg):
super(libvirtConfigUbuntu, self).__init__(syscfg)
self.serviceName = "Libvirt"
def setupLiveMigration(self):
cfo = configFileOps("/etc/libvirt/libvirtd.conf", self)
cfo.addEntry("listen_tcp", "1")
cfo.addEntry("tcp_port", "\"16509\"");
cfo.addEntry("auth_tcp", "\"none\"");
cfo.addEntry("listen_tls", "0")
cfo.save()
if os.path.exists("/etc/init/libvirt-bin.conf"):
cfo = configFileOps("/etc/init/libvirt-bin.conf", self)
cfo.replace_line("exec /usr/sbin/libvirtd", "exec /usr/sbin/libvirtd -d -l")
else:
cfo = configFileOps("/etc/default/libvirt-bin", self)
cfo.replace_or_add_line("libvirtd_opts=", "libvirtd_opts='-l -d'")
def config(self):
try:
self.setupLiveMigration()
filename = "/etc/libvirt/qemu.conf"
cfo = configFileOps(filename, self)
cfo.addEntry("security_driver", "\"none\"")
cfo.addEntry("user", "\"root\"")
cfo.addEntry("group", "\"root\"")
cfo.save()
self.syscfg.svo.stopService("libvirt-bin")
self.syscfg.svo.enableService("libvirt-bin")
return True
except:
raise
def restore(self):
try:
self.syscfg.svo.stopService("libvirt-bin")
self.syscfg.svo.startService("libvirt-bin")
return True
except:
logging.debug(formatExceptionInfo())
return False
class firewallConfigUbuntu(serviceCfgBase):
def __init__(self, syscfg):
super(firewallConfigUbuntu, self).__init__(syscfg)
self.serviceName = "Firewall"
def config(self):
try:
ports = "22 1798 16509".split()
for p in ports:
bash("ufw allow %s" % p)
bash("ufw allow proto tcp from any to any port 5900:6100")
bash("ufw allow proto tcp from any to any port 49152:49216")
self.syscfg.svo.stopService("ufw")
self.syscfg.svo.startService("ufw")
return True
except:
raise
def restore(self):
return True
class firewallConfigBase(serviceCfgBase):
def __init__(self, syscfg):
super(firewallConfigBase, self).__init__(syscfg)
self.serviceName = "Firewall"
self.rules = []
def allowPort(self, port):
status = False
try:
status = bash("iptables-save|grep INPUT|grep -w %s" % port).isSuccess()
except:
pass
if not status:
redo = False
result = True
try:
result = bash("iptables -I INPUT -p tcp -m tcp --dport %s -j ACCEPT" % port).isSuccess()
except:
redo = True
if not result or redo:
bash("sleep 30")
bash("iptables -I INPUT -p tcp -m tcp --dport %s -j ACCEPT" % port)
def config(self):
try:
for port in self.ports:
self.allowPort(port)
for rule in self.rules:
bash("iptables " + rule)
bash("iptables-save > /etc/sysconfig/iptables")
self.syscfg.svo.stopService("iptables")
self.syscfg.svo.startService("iptables")
return True
except:
raise
def restore(self):
return True
class firewallConfigAgent(firewallConfigBase):
def __init__(self, syscfg):
super(firewallConfigAgent, self).__init__(syscfg)
self.ports = "22 16509 5900:6100 49152:49216".split()
if syscfg.env.distribution.getVersion() == "CentOS":
self.rules = ["-D FORWARD -j RH-Firewall-1-INPUT"]
else:
self.rules = ["-D FORWARD -j REJECT --reject-with icmp-host-prohibited"]
class cloudAgentConfig(serviceCfgBase):
def __init__(self, syscfg):
super(cloudAgentConfig, self).__init__(syscfg)
if syscfg.env.agentMode == "Agent":
self.serviceName = "cloudAgent"
elif syscfg.env.agentMode == "myCloud":
self.serviceName = "myCloud"
elif syscfg.env.agentMode == "Console":
self.serviceName = "Console Proxy"
def configMyCloud(self):
try:
cfo = configFileOps("/etc/cosmic/agent/agent.properties", self)
cfo.addEntry("host", self.syscfg.env.mgtSvr)
cfo.addEntry("zone", self.syscfg.env.zone)
cfo.addEntry("port", "443")
if cfo.getEntry("local.storage.uuid") == "":
cfo.addEntry("local.storage.uuid", str(bash("uuidgen").getStdout()))
cfo.addEntry("guid", str(self.syscfg.env.uuid))
cfo.addEntry("mount.path", "/mnt")
cfo.addEntry("resource", "com.cloud.storage.resource.LocalSecondaryStorageResource|com.cloud.agent.resource.computing.CloudZonesComputingResource")
cfo.save()
# self.syscfg.svo.stopService("cloud-agent")
# self.syscfg.svo.enableService("cloud-agent")
return True
except:
raise
def configAgent(self):
try:
cfo = configFileOps("/etc/cosmic/agent/agent.properties", self)
cfo.addEntry("host", self.syscfg.env.mgtSvr)
cfo.addEntry("zone", self.syscfg.env.zone)
cfo.addEntry("pod", self.syscfg.env.pod)
cfo.addEntry("cluster", self.syscfg.env.cluster)
cfo.addEntry("hypervisor.type", self.syscfg.env.hypervisor)
cfo.addEntry("port", "8250")
cfo.addEntry("guid", str(self.syscfg.env.uuid))
if cfo.getEntry("local.storage.uuid") == "":
cfo.addEntry("local.storage.uuid", str(bash("uuidgen").getStdout()))
if cfo.getEntry("resource") == "":
cfo.addEntry("resource", "com.cloud.hypervisor.kvm.resource.LibvirtComputingResource")
cfo.save()
self.syscfg.svo.stopService("cosmic-agent")
bash("sleep 30")
self.syscfg.svo.enableService("cosmic-agent")
return True
except:
raise
def configConsole(self):
try:
cfo = configFileOps("/etc/cosmic/agent/agent.properties", self)
cfo.addEntry("host", self.syscfg.env.mgtSvr)
cfo.addEntry("zone", self.syscfg.env.zone)
cfo.addEntry("pod", self.syscfg.env.pod)
cfo.addEntry("cluster", self.syscfg.env.cluster)
cfo.addEntry("port", "8250")
cfo.addEntry("guid", str(self.syscfg.env.uuid))
cfo.addEntry("resource", "com.cloud.agent.resource.computing.consoleProxyResource")
cfo.save()
self.syscfg.svo.stopService("cosmic-agent")
self.syscfg.svo.enableService("cosmic-agent")
return True
except:
raise
def config(self):
if self.syscfg.env.agentMode == "Agent":
return self.configAgent()
elif self.syscfg.env.agentMode == "myCloud":
return self.configMyCloud()
elif self.syscfg.env.agentMode == "console":
return self.configConsole()
def restore(self):
return True
class firewallConfigServer(firewallConfigBase):
def __init__(self, syscfg):
super(firewallConfigServer, self).__init__(syscfg)
# 9090 is used for cluster management server
if self.syscfg.env.svrMode == "myCloud":
self.ports = "443 8080 8250 8443 9090".split()
else:
self.ports = "8080 8250 9090".split()
class ubuntuFirewallConfigServer(firewallConfigServer):
def allowPort(self, port):
status = False
try:
status = bash("iptables-save|grep INPUT|grep -w %s" % port).isSuccess()
except:
pass
if not status:
bash("ufw allow %s/tcp" % port)
def config(self):
try:
for port in self.ports:
self.allowPort(port)
return True
except:
raise
|
{
"content_hash": "5d7c1e17e8bbadf37a72a5ef4d5d0e68",
"timestamp": "",
"source": "github",
"line_count": 741,
"max_line_length": 172,
"avg_line_length": 36.75978407557355,
"alnum_prop": 0.5652924116156981,
"repo_name": "remibergsma/cosmic",
"id": "7f33cc3c23e0e5949ff8db4cded9eb3921b963dd",
"size": "27239",
"binary": false,
"copies": "1",
"ref": "refs/heads/play/serviceofferings",
"path": "cosmic-core/scripts/src/main/resources/python/lib/cloudutils/serviceConfig.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "ApacheConf",
"bytes": "1451"
},
{
"name": "CSS",
"bytes": "355524"
},
{
"name": "FreeMarker",
"bytes": "1832"
},
{
"name": "Groovy",
"bytes": "135777"
},
{
"name": "HTML",
"bytes": "142254"
},
{
"name": "Java",
"bytes": "19541615"
},
{
"name": "JavaScript",
"bytes": "4569018"
},
{
"name": "Python",
"bytes": "1940322"
},
{
"name": "Shell",
"bytes": "274412"
},
{
"name": "XSLT",
"bytes": "165385"
}
],
"symlink_target": ""
}
|
import sys
import os
import py
import re
import subprocess
import inspect
class Interpreters:
def __init__(self):
self.name2executable = {}
self.executable2info = {}
def get_executable(self, name):
""" return path object to the executable for the given
name (e.g. python2.6, python2.7, python etc.)
if name is already an existing path, return name.
If an interpreter cannot be found, return None.
"""
try:
return self.name2executable[name]
except KeyError:
self.name2executable[name] = e = find_executable(name)
return e
def get_info(self, name=None, executable=None):
if name is None and executable is None:
raise ValueError("need to specify name or executable")
if name:
if executable is not None:
raise ValueError("cannot specify both name, executable")
executable = self.get_executable(name)
if not executable:
return NoInterpreterInfo(name=name)
try:
return self.executable2info[executable]
except KeyError:
info = run_and_get_interpreter_info(name, executable)
self.executable2info[executable] = info
return info
def get_sitepackagesdir(self, info, envdir):
if not info.executable:
return ""
envdir = str(envdir)
try:
res = exec_on_interpreter(info.executable,
[inspect.getsource(sitepackagesdir),
"print (sitepackagesdir(%r))" % envdir])
except ExecFailed:
val = sys.exc_info()[1]
print ("execution failed: %s -- %s" %(val.out, val.err))
return ""
else:
return res["dir"]
def run_and_get_interpreter_info(name, executable):
assert executable
try:
result = exec_on_interpreter(executable,
[inspect.getsource(pyinfo), "print (pyinfo())"])
except ExecFailed:
val = sys.exc_info()[1]
return NoInterpreterInfo(name, executable=val.executable,
out=val.out, err=val.err)
else:
return InterpreterInfo(name, executable, **result)
def exec_on_interpreter(executable, source):
if isinstance(source, list):
source = "\n".join(source)
from subprocess import Popen, PIPE
args = [str(executable)]
popen = Popen(args, stdin=PIPE, stdout=PIPE, stderr=PIPE)
popen.stdin.write(source.encode("utf8"))
out, err = popen.communicate()
if popen.returncode:
raise ExecFailed(executable, source, out, err)
try:
result = eval(out.strip())
except Exception:
raise ExecFailed(executable, source, out,
"could not decode %r" % out)
return result
class ExecFailed(Exception):
def __init__(self, executable, source, out, err):
self.executable = executable
self.source = source
self.out = out
self.err = err
class InterpreterInfo:
runnable = True
def __init__(self, name, executable, version_info):
assert executable and version_info
self.name = name
self.executable = executable
self.version_info = version_info
def __str__(self):
return "<executable at %s, version_info %s>" % (
self.executable, self.version_info)
class NoInterpreterInfo:
runnable = False
def __init__(self, name, executable=None,
out=None, err="not found"):
self.name = name
self.executable = executable
self.version_info = None
self.out = out
self.err = err
def __str__(self):
if self.executable:
return "<executable at %s, not runnable>"
else:
return "<executable not found for: %s>" % self.name
if sys.platform != "win32":
def find_executable(name):
return py.path.local.sysfind(name)
else:
# Exceptions to the usual windows mapping
win32map = {
'python': sys.executable,
'jython': "c:\jython2.5.1\jython.bat",
}
def locate_via_py(v_maj, v_min):
ver = "-%s.%s" % (v_maj, v_min)
script = "import sys; print(sys.executable)"
py_exe = py.path.local.sysfind('py')
if py_exe:
try:
exe = py_exe.sysexec(ver, '-c', script).strip()
except py.process.cmdexec.Error:
exe = None
if exe:
exe = py.path.local(exe)
if exe.check():
return exe
def find_executable(name):
p = py.path.local.sysfind(name)
if p:
return p
actual = None
# Is this a standard PythonX.Y name?
m = re.match(r"python(\d)\.(\d)", name)
if m:
# The standard names are in predictable places.
actual = r"c:\python%s%s\python.exe" % m.groups()
if not actual:
actual = win32map.get(name, None)
if actual:
actual = py.path.local(actual)
if actual.check():
return actual
# The standard executables can be found as a last resort via the
# Python launcher py.exe
if m:
locate_via_py(*m.groups())
def pyinfo():
import sys
return dict(version_info=tuple(sys.version_info))
def sitepackagesdir(envdir):
from distutils.sysconfig import get_python_lib
return dict(dir=get_python_lib(prefix=envdir))
|
{
"content_hash": "5cd92e5cd07d4d36aeddaf01916c48b6",
"timestamp": "",
"source": "github",
"line_count": 172,
"max_line_length": 72,
"avg_line_length": 32.22093023255814,
"alnum_prop": 0.5743413929989174,
"repo_name": "msabramo/tox",
"id": "e225fcc13515ea33e48c4e32f1d9b8fab26ba05b",
"size": "5542",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tox/interpreters.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "6090"
},
{
"name": "Perl",
"bytes": "14751"
},
{
"name": "Python",
"bytes": "215609"
}
],
"symlink_target": ""
}
|
"""
Geometry factories based on the geo interface
"""
from .point import Point, asPoint
from .linestring import LineString, asLineString
from .polygon import Polygon, asPolygon
from .multipoint import MultiPoint, asMultiPoint
from .multilinestring import MultiLineString, asMultiLineString
from .multipolygon import MultiPolygon, MultiPolygonAdapter
from .collection import GeometryCollection
def box(minx, miny, maxx, maxy, ccw=True):
"""Returns a rectangular polygon with configurable normal vector"""
coords = [(maxx, miny), (maxx, maxy), (minx, maxy), (minx, miny)]
if not ccw:
coords = coords[::-1]
return Polygon(coords)
def shape(context):
"""Returns a new, independent geometry with coordinates *copied* from the
context.
"""
if hasattr(context, "__geo_interface__"):
ob = context.__geo_interface__
else:
ob = context
geom_type = ob.get("type").lower()
if geom_type == "point":
return Point(ob["coordinates"])
elif geom_type == "linestring":
return LineString(ob["coordinates"])
elif geom_type == "polygon":
return Polygon(ob["coordinates"][0], ob["coordinates"][1:])
elif geom_type == "multipoint":
return MultiPoint(ob["coordinates"])
elif geom_type == "multilinestring":
return MultiLineString(ob["coordinates"])
elif geom_type == "multipolygon":
return MultiPolygon(ob["coordinates"], context_type='geojson')
elif geom_type == "geometrycollection":
geoms = [shape(g) for g in ob.get("geometries", [])]
return GeometryCollection(geoms)
else:
raise ValueError("Unknown geometry type: %s" % geom_type)
def asShape(context):
"""Adapts the context to a geometry interface. The coordinates remain
stored in the context.
"""
if hasattr(context, "__geo_interface__"):
ob = context.__geo_interface__
else:
ob = context
try:
geom_type = ob.get("type").lower()
except AttributeError:
raise ValueError("Context does not provide geo interface")
if geom_type == "point":
return asPoint(ob["coordinates"])
elif geom_type == "linestring":
return asLineString(ob["coordinates"])
elif geom_type == "polygon":
return asPolygon(ob["coordinates"][0], ob["coordinates"][1:])
elif geom_type == "multipoint":
return asMultiPoint(ob["coordinates"])
elif geom_type == "multilinestring":
return asMultiLineString(ob["coordinates"])
elif geom_type == "multipolygon":
return MultiPolygonAdapter(ob["coordinates"], context_type='geojson')
elif geom_type == "geometrycollection":
geoms = [asShape(g) for g in ob.get("geometries", [])]
return GeometryCollection(geoms)
else:
raise ValueError("Unknown geometry type: %s" % geom_type)
def mapping(ob):
"""Returns a GeoJSON-like mapping"""
return ob.__geo_interface__
|
{
"content_hash": "c67715285e2519740a5a595aa1ef6e6d",
"timestamp": "",
"source": "github",
"line_count": 82,
"max_line_length": 77,
"avg_line_length": 35.890243902439025,
"alnum_prop": 0.6540944614339109,
"repo_name": "l00py/KML_Lookup",
"id": "96f301c8c1cc2975668eb1598774dfb71d70386b",
"size": "2943",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "TA-KML_lookup/bin/shapely/geometry/geo.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "359937"
},
{
"name": "Shell",
"bytes": "84"
}
],
"symlink_target": ""
}
|
"""
Request object
==============
This module implements a request object.
"""
__author__ = u"Andr\xe9 Malo"
__docformat__ = "restructuredtext en"
import itertools as _it
import re as _re
from wtf import httputil as _httputil
class CookieCodecFactoryInterface(object):
""" Interface for cookie codec factories """
def __init__(self, config, opts, args):
"""
Initialization
:Parameters:
- `config`: Configuration
- `opts`: Command line options
- `args`: Positional command line arguments
:Types:
- `config`: `wtf.config.Config`
- `opts`: ``optparse.OptionContainer``
- `args`: ``list``
"""
def __call__(self):
"""
Create the codec instance (doesn't have to be a new one)
:return: The codec instance
:rtype: `CookieCodecInterface`
"""
class BaseCookieCodec(object):
"""
Base class for some codecs
:CVariables:
- `_UNSAFE_SEARCH`: Unsafe char detection function
:Types:
- `_UNSAFE_SEARCH`: ``callable``
"""
__implements__ = [
CookieCodecFactoryInterface, _httputil.CookieCodecInterface
]
UNSAFE_SEARCH = _httputil.CookieMaker.UNSAFE_SEARCH
def __init__(self, config, opts, args):
""" Initialization """
pass
def __call__(self):
""" Determine codec instance """
return self
def quote(self, value):
"""
Quote a value if necessary
:Parameters:
- `value`: The value to inspect
:Types:
- `value`: ``str``
:return: The quoted value (or the original if no quoting is needed)
:rtype: ``str``
"""
if self.UNSAFE_SEARCH(value):
return '"%s"' % value.replace('"', '\\"')
return value
def unquote(self, value):
"""
Unquote a value if applicable
:Parameters:
- `value`: The value to inspect
:Types:
- `value`: ``str``
:return: The unquoted value (or the original if no unquoting is needed)
:rtype: ``str``
"""
if value.startswith('"') and value.endswith('"'):
return value[1:-1].replace('\\"', '"')
return value
def encode(self, value):
""" Encode the cookie value """
raise NotImplementedError()
def decode(self, value):
""" Decode the cookie value """
raise NotImplementedError()
class DefaultCookie(BaseCookieCodec):
"""
Standard python behaviour
:CVariables:
- `_TRANS`: Translation getter
- `_UNTRANS`: Untranslation substituter
:Types:
- `_TRANS`: ``callable``
- `_UNTRANS`: ``callable``
"""
_TRANS = dict([('\\', '\\\\')] + [(chr(_key), "\\%03o" % _key)
for _key in _it.chain(xrange(32), xrange(127, 256))
]).get
_UNTRANS = _re.compile(r'\\([0-3][0-7][0-7])').sub
del _key # pylint: disable = W0631
def encode(self, value):
""" Encode a cookie value """
if self.UNSAFE_SEARCH(value):
value = ''.join(map(self._TRANS, value, value))
return self.quote(value)
def decode(self, value):
""" Decode a cookie value """
return self._UNTRANS(self._untranssub, self.unquote(value))
@staticmethod
def _untranssub(match):
""" Translate octal string back to number to char """
return chr(int(match.group(1), 8))
class UnicodeCookie(BaseCookieCodec):
"""
Unicode cookies
The codecs takes and gives unicode, translates them using the
``unicode_escape`` codec.
"""
def encode(self, value):
""" Encode a cookie value """
return self.quote(unicode(value).encode('unicode_escape'))
def decode(self, value):
""" Decode a cookie value """
return self.unquote(value).decode('unicode_escape')
|
{
"content_hash": "4e231e7f4ee341fa607489f8608d73b0",
"timestamp": "",
"source": "github",
"line_count": 157,
"max_line_length": 79,
"avg_line_length": 24.808917197452228,
"alnum_prop": 0.5609756097560976,
"repo_name": "wontfix-org/wtf",
"id": "c6fb98fc10e732f198e0829b7db5453e147dcb51",
"size": "4537",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "wtf/app/cookie.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "75998"
},
{
"name": "JavaScript",
"bytes": "63450"
},
{
"name": "Python",
"bytes": "667449"
}
],
"symlink_target": ""
}
|
import os
from subprocess import Popen, STDOUT, PIPE
pwd = os.path.dirname(__file__)
def setup():
# Change directory
os.putenv('PWD', pwd)
os.chdir(pwd)
def test_run():
# Run the simulation
proc = Popen([pwd + '/../../src/openmc'], stderr=STDOUT, stdout=PIPE)
returncode = proc.wait()
# Display stdout
print(proc.communicate()[0])
# Make sure simulation ran to completion
assert returncode == 0
def test_created_statepoint():
# Make sure that statepoint file was created
assert os.path.exists(pwd + '/statepoint.10.binary')
def teardown():
# Remove output files
output = [pwd + '/statepoint.10.binary']
for f in output:
if os.path.exists(f):
os.remove(f)
|
{
"content_hash": "b2322bb2cc78228bf57f8e3fbc973f44",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 73,
"avg_line_length": 23.967741935483872,
"alnum_prop": 0.6339165545087483,
"repo_name": "keadyk/openmc_mg_prepush",
"id": "d858aec2d9f3e6cad83ae6a26a7b842618ab984d",
"size": "766",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_survival_biasing/test_survival_biasing.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "6343"
},
{
"name": "FORTRAN",
"bytes": "1307584"
},
{
"name": "Makefile",
"bytes": "10064"
},
{
"name": "Python",
"bytes": "315358"
},
{
"name": "Shell",
"bytes": "6046"
}
],
"symlink_target": ""
}
|
from __future__ import annotations
import os
import signal
from datetime import datetime, timedelta
from tempfile import NamedTemporaryFile, TemporaryDirectory
from time import sleep
from unittest import mock
import pytest
from parameterized import parameterized
from airflow.exceptions import AirflowException, AirflowSkipException, AirflowTaskTimeout
from airflow.models.dag import DAG
from airflow.operators.bash import BashOperator
from airflow.utils import timezone
from airflow.utils.state import State
from airflow.utils.types import DagRunType
DEFAULT_DATE = datetime(2016, 1, 1, tzinfo=timezone.utc)
END_DATE = datetime(2016, 1, 2, tzinfo=timezone.utc)
INTERVAL = timedelta(hours=12)
class TestBashOperator:
@parameterized.expand(
[
(False, None, "MY_PATH_TO_AIRFLOW_HOME"),
(True, {"AIRFLOW_HOME": "OVERRIDDEN_AIRFLOW_HOME"}, "OVERRIDDEN_AIRFLOW_HOME"),
]
)
def test_echo_env_variables(self, append_env, user_defined_env, expected_airflow_home):
"""
Test that env variables are exported correctly to the task bash environment.
"""
utc_now = datetime.utcnow().replace(tzinfo=timezone.utc)
expected = (
f"{expected_airflow_home}\n"
"AWESOME_PYTHONPATH\n"
"bash_op_test\n"
"echo_env_vars\n"
f"{utc_now.isoformat()}\n"
f"manual__{utc_now.isoformat()}\n"
)
dag = DAG(
dag_id="bash_op_test",
default_args={"owner": "airflow", "retries": 100, "start_date": DEFAULT_DATE},
schedule="@daily",
dagrun_timeout=timedelta(minutes=60),
)
dag.create_dagrun(
run_type=DagRunType.MANUAL,
execution_date=utc_now,
start_date=utc_now,
state=State.RUNNING,
external_trigger=False,
)
with NamedTemporaryFile() as tmp_file:
task = BashOperator(
task_id="echo_env_vars",
dag=dag,
bash_command="echo $AIRFLOW_HOME>> {0};"
"echo $PYTHONPATH>> {0};"
"echo $AIRFLOW_CTX_DAG_ID >> {0};"
"echo $AIRFLOW_CTX_TASK_ID>> {0};"
"echo $AIRFLOW_CTX_EXECUTION_DATE>> {0};"
"echo $AIRFLOW_CTX_DAG_RUN_ID>> {0};".format(tmp_file.name),
append_env=append_env,
env=user_defined_env,
)
with mock.patch.dict(
"os.environ", {"AIRFLOW_HOME": "MY_PATH_TO_AIRFLOW_HOME", "PYTHONPATH": "AWESOME_PYTHONPATH"}
):
task.run(utc_now, utc_now, ignore_first_depends_on_past=True, ignore_ti_state=True)
with open(tmp_file.name) as file:
output = "".join(file.readlines())
assert expected == output
@parameterized.expand(
[
("test-val", "test-val"),
("test-val\ntest-val\n", ""),
("test-val\ntest-val", "test-val"),
("", ""),
]
)
def test_return_value(self, val, expected):
op = BashOperator(task_id="abc", bash_command=f'set -e; echo "{val}";')
line = op.execute({})
assert line == expected
def test_raise_exception_on_non_zero_exit_code(self):
bash_operator = BashOperator(bash_command="exit 42", task_id="test_return_value", dag=None)
with pytest.raises(
AirflowException, match="Bash command failed\\. The command returned a non-zero exit code 42\\."
):
bash_operator.execute(context={})
def test_task_retries(self):
bash_operator = BashOperator(
bash_command='echo "stdout"', task_id="test_task_retries", retries=2, dag=None
)
assert bash_operator.retries == 2
def test_default_retries(self):
bash_operator = BashOperator(bash_command='echo "stdout"', task_id="test_default_retries", dag=None)
assert bash_operator.retries == 0
def test_command_not_found(self):
with pytest.raises(
AirflowException, match="Bash command failed\\. The command returned a non-zero exit code 127\\."
):
BashOperator(task_id="abc", bash_command="set -e; something-that-isnt-on-path").execute({})
def test_unset_cwd(self):
val = "xxxx"
op = BashOperator(task_id="abc", bash_command=f'set -e; echo "{val}";')
line = op.execute({})
assert line == val
def test_cwd_does_not_exist(self):
test_cmd = 'set -e; echo "xxxx" |tee outputs.txt'
with TemporaryDirectory(prefix="test_command_with_cwd") as tmp_dir:
# Get a nonexistent temporary directory to do the test
pass
# There should be no exceptions when creating the operator even the `cwd` doesn't exist
bash_operator = BashOperator(task_id="abc", bash_command=test_cmd, cwd=tmp_dir)
with pytest.raises(AirflowException, match=f"Can not find the cwd: {tmp_dir}"):
bash_operator.execute({})
def test_cwd_is_file(self):
test_cmd = 'set -e; echo "xxxx" |tee outputs.txt'
with NamedTemporaryFile(suffix="var.env") as tmp_file:
# Test if the cwd is a file_path
with pytest.raises(AirflowException, match=f"The cwd {tmp_file.name} must be a directory"):
BashOperator(task_id="abc", bash_command=test_cmd, cwd=tmp_file.name).execute({})
def test_valid_cwd(self):
test_cmd = 'set -e; echo "xxxx" |tee outputs.txt'
with TemporaryDirectory(prefix="test_command_with_cwd") as test_cwd_folder:
# Test everything went alright
result = BashOperator(task_id="abc", bash_command=test_cmd, cwd=test_cwd_folder).execute({})
assert result == "xxxx"
with open(f"{test_cwd_folder}/outputs.txt") as tmp_file:
assert tmp_file.read().splitlines()[0] == "xxxx"
@parameterized.expand(
[
(None, 99, AirflowSkipException),
({"skip_exit_code": 100}, 100, AirflowSkipException),
({"skip_exit_code": 100}, 101, AirflowException),
({"skip_exit_code": None}, 99, AirflowException),
]
)
def test_skip(self, extra_kwargs, actual_exit_code, expected_exc):
kwargs = dict(task_id="abc", bash_command=f'set -e; echo "hello world"; exit {actual_exit_code};')
if extra_kwargs:
kwargs.update(**extra_kwargs)
with pytest.raises(expected_exc):
BashOperator(**kwargs).execute({})
def test_bash_operator_multi_byte_output(self):
op = BashOperator(
task_id="test_multi_byte_bash_operator",
bash_command="echo \u2600",
output_encoding="utf-8",
)
op.execute(context={})
def test_bash_operator_kill(self, dag_maker):
import psutil
sleep_time = "100%d" % os.getpid()
with dag_maker():
op = BashOperator(
task_id="test_bash_operator_kill",
execution_timeout=timedelta(microseconds=25),
bash_command=f"/bin/bash -c 'sleep {sleep_time}'",
)
with pytest.raises(AirflowTaskTimeout):
op.run()
sleep(2)
for proc in psutil.process_iter():
if proc.cmdline() == ["sleep", sleep_time]:
os.kill(proc.pid, signal.SIGTERM)
assert False, "BashOperator's subprocess still running after stopping on timeout!"
break
|
{
"content_hash": "e10b36758f7d88d815a603ae16dbd7a5",
"timestamp": "",
"source": "github",
"line_count": 194,
"max_line_length": 109,
"avg_line_length": 38.95876288659794,
"alnum_prop": 0.5861338978565758,
"repo_name": "apache/airflow",
"id": "9efad3bab83c4cfd5b644e4dd2fea4643fcb4707",
"size": "8345",
"binary": false,
"copies": "3",
"ref": "refs/heads/main",
"path": "tests/operators/test_bash.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "25980"
},
{
"name": "Dockerfile",
"bytes": "71458"
},
{
"name": "HCL",
"bytes": "3786"
},
{
"name": "HTML",
"bytes": "172957"
},
{
"name": "JavaScript",
"bytes": "143915"
},
{
"name": "Jinja",
"bytes": "38911"
},
{
"name": "Jupyter Notebook",
"bytes": "5482"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "23697738"
},
{
"name": "R",
"bytes": "313"
},
{
"name": "Shell",
"bytes": "211306"
},
{
"name": "TypeScript",
"bytes": "521019"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
from ..data import DataBlock
from ..preprocess import PreProcess
import pandas as pd
import numpy as np
from sklearn.datasets import load_iris
from random import sample
#####################################################################
##### TESTS FOR DATABLOCK
#####################################################################
def test_datablock(datablock):
assert datablock.train.shape == (150, 5)
assert datablock.test.shape == (150, 5)
assert datablock.predict.shape == (150, 5)
#####################################################################
##### TESTS FOR PREPROCESS
#####################################################################
def test_check_missing_no_missing(datablock):
pp = PreProcess(datablock)
result = pp.check_missing(printResult=False,returnResult=True)
for df,miss in result.items():
print(df,miss)
assert miss.sum()==0
def test_check_missing_missing_induced(datablock):
df = pd.DataFrame(datablock.train,copy=True)
pp = PreProcess(DataBlock(df,df,df,'target'))
num_miss=25
for data in pp.datablock.data_present().values():
data.iloc[sample(range(150),num_miss),0] = np.nan
result = pp.check_missing(printResult=False,returnResult=True)
for key,miss in result.items():
assert miss.sum()==num_miss
|
{
"content_hash": "8835d2a3f70fb7885f1fa27642aa4640",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 69,
"avg_line_length": 31.804878048780488,
"alnum_prop": 0.5820552147239264,
"repo_name": "aarshayj/easyML",
"id": "201c13266070803088b5eee33e10562fdb79a614",
"size": "1475",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "easyML/tests/test_preprocess.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "3366"
},
{
"name": "Python",
"bytes": "112902"
},
{
"name": "Shell",
"bytes": "3120"
}
],
"symlink_target": ""
}
|
def install(job):
prefab = job.service.executor.prefab
data = job.service.model.data
# create a pool for the images and virtual disks
pool = prefab.virtualization.kvm.storage_pools.create(name=data.name)
data.path = pool.poolpath
job.service.model.actions['uninstall'].state = 'new'
job.service.saveAll()
def uninstall(job):
prefab = job.service.executor.prefab
data = job.service.model.data
# delete a pool
# destroy all volume in the pool before deleting the pool
pool = prefab.virtualization.kvm.storage_pools.get_by_name(name=data.name)
pool.delete()
data.path = ''
job.service.model.actions['install'].state = 'new'
job.service.saveAll()
|
{
"content_hash": "19ed457546f2b8b54bf50628eb1f0bf3",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 78,
"avg_line_length": 29.708333333333332,
"alnum_prop": 0.6928471248246845,
"repo_name": "Jumpscale/ays9",
"id": "5cdead3565c4d3f36b9f2e7201ec4d8c6236811b",
"size": "713",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "templates/disk/storagepool.kvm/actions.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "235840"
},
{
"name": "Cap'n Proto",
"bytes": "20377"
},
{
"name": "HTML",
"bytes": "1974"
},
{
"name": "JavaScript",
"bytes": "4324209"
},
{
"name": "Python",
"bytes": "691623"
},
{
"name": "RAML",
"bytes": "3933753"
},
{
"name": "Shell",
"bytes": "3824"
}
],
"symlink_target": ""
}
|
"""This module contains Splittable DoFn logic that's common to all runners."""
import uuid
import apache_beam as beam
from apache_beam import pvalue
from apache_beam.coders import typecoders
from apache_beam.pipeline import AppliedPTransform
from apache_beam.pipeline import PTransformOverride
from apache_beam.runners.common import DoFnInvoker
from apache_beam.runners.common import DoFnSignature
from apache_beam.transforms.core import ParDo
from apache_beam.transforms.ptransform import PTransform
class SplittableParDoOverride(PTransformOverride):
"""A transform override for ParDo transformss of SplittableDoFns.
Replaces the ParDo transform with a SplittableParDo transform that performs
SDF specific logic.
"""
def get_matcher(self):
def _matcher(applied_ptransform):
assert isinstance(applied_ptransform, AppliedPTransform)
transform = applied_ptransform.transform
if isinstance(transform, ParDo):
signature = DoFnSignature(transform.fn)
return signature.is_splittable_dofn()
return _matcher
def get_replacement_transform(self, ptransform):
assert isinstance(ptransform, ParDo)
do_fn = ptransform.fn
signature = DoFnSignature(do_fn)
if signature.is_splittable_dofn():
return SplittableParDo(ptransform)
else:
return ptransform
class SplittableParDo(PTransform):
"""A transform that processes a PCollection using a Splittable DoFn."""
def __init__(self, ptransform):
assert isinstance(ptransform, ParDo)
self._ptransform = ptransform
def expand(self, pcoll):
sdf = self._ptransform.fn
signature = DoFnSignature(sdf)
invoker = DoFnInvoker.create_invoker(signature, process_invocation=False)
element_coder = typecoders.registry.get_coder(pcoll.element_type)
restriction_coder = invoker.invoke_restriction_coder()
keyed_elements = (pcoll
| 'pair' >> ParDo(PairWithRestrictionFn(sdf))
| 'split' >> ParDo(SplitRestrictionFn(sdf))
| 'explode' >> ParDo(ExplodeWindowsFn())
| 'random' >> ParDo(RandomUniqueKeyFn()))
return keyed_elements | ProcessKeyedElements(
sdf, element_coder, restriction_coder,
pcoll.windowing, self._ptransform.args, self._ptransform.kwargs)
class ElementAndRestriction(object):
"""A holder for an element and a restriction."""
def __init__(self, element, restriction):
self.element = element
self.restriction = restriction
class PairWithRestrictionFn(beam.DoFn):
"""A transform that pairs each element with a restriction."""
def __init__(self, do_fn):
self._do_fn = do_fn
def start_bundle(self):
signature = DoFnSignature(self._do_fn)
self._invoker = DoFnInvoker.create_invoker(
signature, process_invocation=False)
def process(self, element, window=beam.DoFn.WindowParam, *args, **kwargs):
initial_restriction = self._invoker.invoke_initial_restriction(element)
yield ElementAndRestriction(element, initial_restriction)
class SplitRestrictionFn(beam.DoFn):
"""A transform that perform initial splitting of Splittable DoFn inputs."""
def __init__(self, do_fn):
self._do_fn = do_fn
def start_bundle(self):
signature = DoFnSignature(self._do_fn)
self._invoker = DoFnInvoker.create_invoker(
signature, process_invocation=False)
def process(self, element_and_restriction, *args, **kwargs):
element = element_and_restriction.element
restriction = element_and_restriction.restriction
restriction_parts = self._invoker.invoke_split(
element,
restriction)
for part in restriction_parts:
yield ElementAndRestriction(element, part)
class ExplodeWindowsFn(beam.DoFn):
"""A transform that forces the runner to explode windows.
This is done to make sure that Splittable DoFn proceses an element for each of
the windows that element belongs to.
"""
def process(self, element, window=beam.DoFn.WindowParam, *args, **kwargs):
yield element
class RandomUniqueKeyFn(beam.DoFn):
"""A transform that assigns a unique key to each element."""
def process(self, element, window=beam.DoFn.WindowParam, *args, **kwargs):
# We ignore UUID collisions here since they are extremely rare.
yield (uuid.uuid4().bytes, element)
class ProcessKeyedElements(PTransform):
"""A primitive transform that performs SplittableDoFn magic.
Input to this transform should be a PCollection of keyed ElementAndRestriction
objects.
"""
def __init__(
self, sdf, element_coder, restriction_coder, windowing_strategy,
ptransform_args, ptransform_kwargs):
self.sdf = sdf
self.element_coder = element_coder
self.restriction_coder = restriction_coder
self.windowing_strategy = windowing_strategy
self.ptransform_args = ptransform_args
self.ptransform_kwargs = ptransform_kwargs
def expand(self, pcoll):
return pvalue.PCollection(pcoll.pipeline)
|
{
"content_hash": "5ca082dc9d4c328857c4a0fbdfa0c44d",
"timestamp": "",
"source": "github",
"line_count": 151,
"max_line_length": 80,
"avg_line_length": 32.980132450331126,
"alnum_prop": 0.7224899598393574,
"repo_name": "jbonofre/incubator-beam",
"id": "a7d80ac8b180cdb17bb6e42433dfe1385ca48e64",
"size": "5765",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "sdks/python/apache_beam/runners/sdf_common.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Java",
"bytes": "8027868"
},
{
"name": "Protocol Buffer",
"bytes": "1407"
},
{
"name": "Shell",
"bytes": "4126"
}
],
"symlink_target": ""
}
|
class StateException(BaseException):
def __init__(self, state):
self.state = state
def __str__(self):
return '{} {:x} exception'.format(
self.state.id, self.state.ip)
class EmulationFinished(StateException):
pass
class InstructionNotTranslated(StateException):
pass
class SyscallNotSupported(StateException):
pass
class TargetReached(StateException):
pass
########################################################################
# VULNERABILITIES #
########################################################################
########################################################################
# MEMORY ACCESS ISSUES #
########################################################################
class InvalidMemoryAccess(StateException):
def __init__(self, state, address):
StateException.__init__(self, state)
self.address = address
def __str__(self):
return '{} {:x} invalid memory access: {}'.format(
self.state.id, self.state.ip, self.address)
# reads
class InvalidRead(InvalidMemoryAccess):
def __init__(self, state, address):
InvalidMemoryAccess.__init__(self, state, address)
def __str__(self):
return '{} {:x} invalid read: {:x}'.format(
self.state.id, self.state.ip, self.address)
class UninitialisedRead(InvalidRead):
def __init__(self, state, address):
InvalidRead.__init__(self, state, address)
def __str__(self):
return '{} {:x} uninitialised read: {:x}'.format(
self.state.id, self.state.ip, self.address)
class UnmappedRead(InvalidRead):
def __init__(self, state, address):
InvalidRead.__init__(self, state, address)
def __str__(self):
return '{} {:x} unmapped read: {:x}'.format(
self.state.id, self.state.ip, self.address)
class UseAfterFree(InvalidRead):
def __init__(self, state, address):
InvalidRead.__init__(self, state, address)
def __str__(self):
return '{} {:x} use-after-free: {:x}'.format(
self.state.id, self.state.ip, self.address)
class ArbitraryRead(InvalidRead):
def __init__(self, state, address):
InvalidRead.__init__(self, state, address)
def __str__(self):
return '{} {:x} arbitrary read: {}'.format(
self.state.id, self.state.ip, self.address)
# writes
class InvalidWrite(InvalidMemoryAccess):
def __init__(self, state, address, value):
InvalidMemoryAccess.__init__(self, state, address)
self.value = value
def __str__(self):
return '{} {:x} invalid write: {:x} {}'.format(
self.state.id, self.state.ip, self.address, self.value)
class UnmappedWrite(InvalidWrite):
def __init__(self, state, address, value):
InvalidWrite.__init__(self, state, address, value)
def __str__(self):
return '{} {:x} unmapped write: {} {}'.format(
self.state.id, self.state.ip, self.address, self.value)
class ArbitraryWrite(InvalidWrite):
def __init__(self, state, address, value):
InvalidWrite.__init__(self, state, address, value)
def __str__(self):
return '{} {:x} arbitrary write: {} {}'.format(
self.state.id, self.state.ip, self.address, self.value)
# execute
class InvalidExecution(InvalidMemoryAccess):
def __init__(self, state, address):
InvalidMemoryAccess.__init__(self, state, address)
def __str__(self):
return '{} {:x} invalid execution: {}'.format(
self.state.id, self.state.ip, self.address)
class ArbitraryExecution(InvalidExecution):
def __init__(self, state, address):
InvalidExecution.__init__(self, state, address)
def __str__(self):
return '{} {:x} arbitrary execution: {}'.format(
self.state.id, self.state.ip, self.address)
class SymbolicExecution(InvalidExecution):
def __init__(self, state, address, byte):
InvalidExecution.__init__(self, state, address)
self.byte = byte
def __str__(self):
return '{} {:x} symbolic execution: {}'.format(
self.state.id, self.state.ip, self.byte.smt2())
|
{
"content_hash": "8571743258aedc20f65ad07272c32ecd",
"timestamp": "",
"source": "github",
"line_count": 162,
"max_line_length": 72,
"avg_line_length": 26.80246913580247,
"alnum_prop": 0.545601105481345,
"repo_name": "c01db33f/concolica",
"id": "4325cf04bff2abd3d34ccb322489f68a01963c88",
"size": "5219",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vulnerabilities.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "130307"
}
],
"symlink_target": ""
}
|
"""
Subclass of Simulation that contains additional methods to construct database.
"""
import sys
import logging
from reader import Simulation
class Constructor(Simulation):
def commit(self):
"""
commits database changes
"""
self.conn.commit()
def create_table(self, table, initial_columns):
"""
adds new table to database
"""
initial_columns_sql = ""
for i, column in enumerate(initial_columns):
if i > 0:
initial_columns_sql += ", "
initial_columns_sql += column[0] + " " + column[1]
query = "CREATE TABLE if not exists " + \
table + " (" + initial_columns_sql + ")"
self.cursor.execute(query)
# Database updating methods ###
def add_column(self, column, type, table="halos", default=""):
"""
adds new column after checking if it already exists
"""
query = 'ALTER TABLE ' + table + ' ADD COLUMN ' + column + ' ' + type
if default != "":
query += ' DEFAULT ' + str(default)
try:
self.cursor.execute(query)
except:
logging.warning(
"column " + column + " already exists in table " + table)
pass
def insert(self, table, newdata, all_cols=True, columns="", one_row=False):
many = True
if len(newdata) == 0:
print "Error. No data passed to insert."
return
elif len(newdata) == 1 and not one_row:
newdata = newdata[0]
holders = ','.join('?' * len(newdata))
many = False
elif one_row:
many = False
holders = ','.join('?' * len(newdata))
else:
holders = ','.join('?' * len(newdata[0]))
if all_cols:
query = 'INSERT INTO ' + table + ' VALUES (' + holders + ')'
else:
columns_sql = ",".join(columns)
query = ('INSERT INTO ' + table +
' (' + columns_sql + ') VALUES (' + holders + ')')
logging.debug(query)
logging.debug(newdata)
if not many:
self.cursor.execute(query, newdata)
else:
self.cursor.executemany(query, newdata)
def update_rows(self, table, columns, data):
if len(columns) != len(data[0]) - 2:
print "mismatched number of columns and number of columns to be updated"
print len(columns), len(data[0]) - 2
query = "update " + table + " set "
for i, column in enumerate(columns):
if i > 0:
query += ", "
query += column + "=?"
query += " where aexp=? and id=?"
self.cursor.executemany(query, data)
def write_frame_to_database(self, data, table, if_exists="append") :
data.to_sql(table, self.conn, if_exists=if_exists, index=False)
if table == "halos" :
self.conn.execute("CREATE INDEX IF NOT EXISTS "
"Halo_Index ON halos (aexp, id)")
def mark_leaf(self, aexp, id, table="mergertree"):
"""
labels specified halo as a leaf in the merger tree in the database
"""
query = "update %s set is_leaf=%d where child_aexp=%0.4f and child_id=%d" % (
table, 1, aexp, id)
self.cursor.execute(query)
def mark_main_line(self, aexp, id, parent_id="", table="mergertree"):
"""
labels specified halo as a main line progenitor in the merger tree in the database
"""
query = "update %s set is_main_line=%d where child_aexp=%0.4f and child_id=%d" % (
table, 1, aexp, id)
if parent_id:
query += " and parent_id=%d" % parent_id
self.cursor.execute(query)
query = "update halos set is_main_halo=%d where aexp=%0.4f and id=%d" % (
1, aexp, id)
self.cursor.execute(query)
def add_to_mergers(self, new_mergers, table="mergers"):
"""
Adds new data to mergers database table
"""
self.insert(table, new_mergers)
def restart_mergertree(self):
# get list of completed aexp
query = "SELECT DISTINCT child_aexp FROM mergertree"
rows = self.execute(query)
already_done = list(zip(*rows)[0])
if len(already_done) < 1:
print "no records in mergertree! remove restart flag in config and try again."
sys.exit(1)
# get ids from last completed epoch
query = "SELECT DISTINCT child_id FROM mergertree WHERE child_aexp = %f;" % already_done[
-1]
id_rows = self.execute(query)
ids = list(zip(*id_rows)[0])
if len(ids) < 1:
print "no halos found at aexp %0.4f" % already_done[-1]
sys.exit(1)
return ids, already_done[-1]
# Database table schema ###
def create_mergertree_table(self):
"""
schema for mergertree table
"""
columns = (("parent_aexp", "REAL"),
("child_aexp", "REAL"),
("parent_id", "INTEGER"),
("child_id", "INTEGER"),
("num_shared_particles", "INTEGER"),
("particle_ratio", "REAL"),
("distance", "REAL"),
("z0_parent_id", "INTEGER"),
("is_leaf", "BOOLEAN"))
self.create_table("mergertree", columns)
def create_mergers_table(self):
"""
schema for mergers table
"""
columns = (("z0_parent_id", "INTEGER"),
("merger_aexp", "REAL"),
("main_line_id", "INTEGER"),
("merging_id", "INTEGER"),
("mass_ratio", "REAL"),
("impact_parameter", "REAL"),
("track_merging_aexp", "REAL"),
("num_shared_particles", "INTEGER"))
self.create_table("mergers", columns)
|
{
"content_hash": "62535189af3c0fa0617b86c06d73dd92",
"timestamp": "",
"source": "github",
"line_count": 203,
"max_line_length": 97,
"avg_line_length": 29.65024630541872,
"alnum_prop": 0.5103837846818409,
"repo_name": "cavestruz/L500analysis",
"id": "95037535b309dd879579897683463d594189a321",
"size": "6038",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "caps/io/constructor.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "509320"
}
],
"symlink_target": ""
}
|
import logging
import psycopg2
import psycopg2.extensions
from google.cloud.sqlcommenter import add_sql_comment
from google.cloud.sqlcommenter.flask import get_flask_info
from google.cloud.sqlcommenter.opencensus import get_opencensus_values
from google.cloud.sqlcommenter.opentelemetry import get_opentelemetry_values
logger = logging.getLogger(__name__)
# This integration extends psycopg2.extensions.cursor
# by implementing a custom execute method.
#
# By default, it doesn't enable adding trace_id and span_id
# to SQL comments due to their ephemeral nature. You can opt-in
# by instead setting with_opencensus=True
def CommenterCursorFactory(
with_framework=True, with_controller=True, with_route=True,
with_opencensus=False, with_opentelemetry=False, with_db_driver=False,
with_dbapi_threadsafety=False, with_dbapi_level=False,
with_libpq_version=False, with_driver_paramstyle=False):
attributes = {
'framework': with_framework,
'controller': with_controller,
'route': with_route,
'db_driver': with_db_driver,
'dbapi_threadsafety': with_dbapi_threadsafety,
'dbapi_level': with_dbapi_level,
'libpq_version': with_libpq_version,
'driver_paramstyle': with_driver_paramstyle,
}
class CommenterCursor(psycopg2.extensions.cursor):
def execute(self, sql, args=None):
data = dict(
# Psycopg2/framework information
db_driver='psycopg2:%s' % psycopg2.__version__,
dbapi_threadsafety=psycopg2.threadsafety,
dbapi_level=psycopg2.apilevel,
libpq_version=psycopg2.__libpq_version__,
driver_paramstyle=psycopg2.paramstyle,
)
# Because psycopg2 is a plain database connectivity module,
# folks using it in a web framework such as flask will
# use it in unison with flask but initialize the parts disjointly,
# unlike Django which uses ORMs directly as part of the framework.
data.update(get_flask_info())
# Filter down to just the requested attributes.
data = {k: v for k, v in data.items() if attributes.get(k)}
if with_opencensus and with_opentelemetry:
logger.warning(
"with_opencensus and with_opentelemetry were enabled. "
"Only use one to avoid unexpected behavior"
)
if with_opencensus:
data.update(get_opencensus_values())
if with_opentelemetry:
data.update(get_opentelemetry_values())
sql = add_sql_comment(sql, **data)
return psycopg2.extensions.cursor.execute(self, sql, args)
return CommenterCursor
|
{
"content_hash": "c60e95d893c401eafca9d3e26b8267d4",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 78,
"avg_line_length": 39.478873239436616,
"alnum_prop": 0.6518016410988227,
"repo_name": "google/sqlcommenter",
"id": "28c26453bc69749921bd2dd8754b35ed9824d559",
"size": "3399",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/sqlcommenter-python/google/cloud/sqlcommenter/psycopg2/extension.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Blade",
"bytes": "18386"
},
{
"name": "Dockerfile",
"bytes": "325"
},
{
"name": "Go",
"bytes": "32466"
},
{
"name": "Java",
"bytes": "54272"
},
{
"name": "JavaScript",
"bytes": "55473"
},
{
"name": "PHP",
"bytes": "145396"
},
{
"name": "Python",
"bytes": "72279"
},
{
"name": "Ruby",
"bytes": "61519"
},
{
"name": "Shell",
"bytes": "4110"
}
],
"symlink_target": ""
}
|
from django.conf import settings
from django.core.files import File
from sfmmanager.models import *
from sfmmanager.storage_utils import ResourceData, UserData
from sfmmanager.tasks import *
import sfmmanager #needed for mocking parts of it
from .base import *
import mock
import StringIO
import shutil
import os
import __builtin__
class MeshlabTestCase(CeleryTestCase):
SAMPLE_FILE = "testing/samples/ffmpeg/c4ea9969cd299e73eccc37e918b59fa4.mp4"
FILE_HASH = "c4ea9969cd299e73eccc37e918b59fa4"
def setUp(self):
super(MeshlabTestCase, self).setUp()
# prepare directory tree
self.path = os.path.join(self.udata.getStorageDir(), MeshlabTestCase.FILE_HASH)
self.data_path = self.path + "_data"
os.mkdir(self.udata.getStorageDir())
os.mkdir(self.data_path)
# create sample video
self.tempfile = File(open(os.path.join(settings.MEDIA_ROOT, MeshlabTestCase.SAMPLE_FILE)))
self.tempfile.name = "dummy.mp4"
self.video = Video()
self.video.data = self.tempfile
self.video.uid = self.user
self.video.vname = "dummy.mp4"
self.video.vhash = MeshlabTestCase.FILE_HASH
self.video.status = Video.STATUS_PENDING
self.video.save()
def tearDown(self):
# remove everything
Video.objects.filter(uid=self.user).delete()
shutil.rmtree(self.udata.getStorageDir())
super(MeshlabTestCase, self).tearDown()
@mock.patch("sfmmanager.storage_utils.ResourceData.getLogFile", mock.Mock())
@mock.patch("sfmmanager.storage_utils.ResourceData.getUniqueVsfmOutput", mock.Mock())
@mock.patch("subprocess.Popen")
def test_meshlab_call(self, mock_task):
mock_log = sfmmanager.storage_utils.ResourceData.getLogFile
mock_ply = sfmmanager.storage_utils.ResourceData.getUniqueVsfmOutput
mock_log.return_value = StringIO.StringIO()
mock_task.return_value.returncode = 0
mock_ply.return_value = os.path.join(self.data_path, "vsfm.0.ply")
result = processOutput(self.video.id)
self.assertEqual(result, self.video.id)
mock_task.assert_called_once_with(["meshlabserver", "-i", mock_ply.return_value, "-o",
os.path.join(self.data_path, "result.obj"), "-om", "vc", "vn"],
stdin=None,
stdout=mock_log.return_value,
stderr=mock_log.return_value)
mock_log.assert_called_once_with("meshlab")
self.assertEqual(Video.objects.get(pk=self.video.id).status, str(Video.STATUS_RECONSTRUCTED))
|
{
"content_hash": "c757b8abd50a345b46a6c60465c27db6",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 107,
"avg_line_length": 43.29032258064516,
"alnum_prop": 0.6523845007451565,
"repo_name": "qwattash/uclvr-backend",
"id": "196318125df7cbbf5479a0ee34687cf39d68c689",
"size": "2684",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sfmmanager/tests/task_meshlab.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "90816"
}
],
"symlink_target": ""
}
|
import cirq
import qflexcirq.interface.data_storage_interface as tmpi
from qflexcirq.ordering import order_circuit_simulation as auto_order
class QFlexOrder():
def __init__(self,
qflex_order_strings=None,
cirq_circuit=None,
qubits=None):
if (qflex_order_strings is None) and (cirq_circuit is None):
# TODO: More serious checking
raise ValueError("No order specified to constructor!")
if (cirq_circuit is not None) and (not isinstance(
cirq_circuit, cirq.Circuit)):
raise ValueError("Order accepts only QFlexCircuits")
ord_list = qflex_order_strings
if cirq_circuit is not None:
if qubits is None:
raise ValueError("Qubits have to be specified!")
# The device has to be QFlex
# qubits = qflex_circuit.device.get_indexed_grid_qubits()
# List of ordering commands
print("Ordering is being computed from the provided circuit ...")
ord_list = auto_order.circuit_to_ordering(
cirq_circuit, qubit_names=sorted(qubits))
print("... Done!")
# Long string of ordering commands
_local_order_string = '\n'.join([x.strip() for x in ord_list])
# Behind the scene, this class creates a temporary file for each object
self.temp_file_if = tmpi.DataStorageInterface()
with open(self.temp_file_if.fullpath, "w") as f:
# I do have the file handle anyway...
print(_local_order_string, file=f)
@property
def order_data(self):
return self.temp_file_if.fullpath
@staticmethod
def from_existing_file(file_path):
with open(file_path, "r") as f:
lines = f.readlines()
return QFlexOrder(qflex_order_strings=lines)
|
{
"content_hash": "eef9865ef2ec77d49a780dd34d3b7a95",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 79,
"avg_line_length": 34.09090909090909,
"alnum_prop": 0.6032,
"repo_name": "ngnrsaa/qflex",
"id": "518aa4f21216e0d643bafae2b2b6915cf58ba095",
"size": "1875",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "qflexcirq/interface/qflex_order.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "390"
},
{
"name": "C++",
"bytes": "264489"
},
{
"name": "Dockerfile",
"bytes": "1347"
},
{
"name": "M4",
"bytes": "8320"
},
{
"name": "Makefile",
"bytes": "4027"
},
{
"name": "Python",
"bytes": "94500"
},
{
"name": "Shell",
"bytes": "13537"
}
],
"symlink_target": ""
}
|
import os.path
import shutil
import invoke
@invoke.task
def build():
# Build our CSS files
invoke.run("compass compile -c compass.rb --force")
# Clean existing directories
for directory in {"css", "fonts", "js"}:
shutil.rmtree(os.path.join("warehouse/static", directory))
# Run wake to generate our built files
invoke.run("wake")
@invoke.task
def watch():
try:
# Watch With Compass
invoke.run("compass watch -c compass.rb")
except KeyboardInterrupt:
pass
|
{
"content_hash": "dd0e4f8969f99fd6ad82efefed0bf4dc",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 66,
"avg_line_length": 20.23076923076923,
"alnum_prop": 0.6463878326996197,
"repo_name": "mattrobenolt/warehouse",
"id": "b5829d5013a7fb2116aba568393b9ba5910e8485",
"size": "1099",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tasks/static.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "151221"
},
{
"name": "JavaScript",
"bytes": "58411"
},
{
"name": "Python",
"bytes": "293257"
},
{
"name": "Ruby",
"bytes": "339"
}
],
"symlink_target": ""
}
|
from abc import ABCMeta, abstractmethod
class Person:
def __init__(self, name, gender, marital_status):
self.name = name
self.gender = gender
self.marital_status = marital_status
def get_name(self):
return self.name
def get_gender(self):
return self.gender
def get_marital_status(self):
return self.marital_status
class Criteria:
__metaclass__ = ABCMeta
@abstractmethod
def meet_criteria(self, person_list):
pass
class CriteriaMale(Criteria):
def meet_criteria(self, person_list):
male_person_list = []
for person in person_list:
if person.get_gender() == "Male":
male_person_list.append(person)
return male_person_list
class CriteriaFemale(Criteria):
def meet_criteria(self, person_list):
female_person_list = []
for person in person_list:
if person.get_gender() == "Female":
female_person_list.append(person)
return female_person_list
class CriteriaSingle(Criteria):
def meet_criteria(self, person_list):
single_person_list = []
for person in person_list:
if person.get_marital_status() == "Single":
single_person_list.append(person)
return single_person_list
class CriteriaOr(Criteria):
def __init__(self, criteria, other_criteria):
self.criteria = criteria
self.other_criteria = other_criteria
def meet_criteria(self, person_list):
first_criteria_items = self.criteria.meet_criteria(person_list)
second_criteria_items = self.other_criteria.meet_criteria(person_list)
for person in second_criteria_items:
if person not in first_criteria_items:
first_criteria_items.append(person)
return first_criteria_items
class CriteriaAnd(Criteria):
def __init__(self, criteria, other_criteria):
self.criteria = criteria
self.other_criteria = other_criteria
def meet_criteria(self, person_list):
first_criteria_items = self.criteria.meet_criteria(person_list)
return self.other_criteria.meet_criteria(first_criteria_items)
def print_persons(persons):
for person in persons:
print ("name: ", person.get_name(),
"gender: ", person.get_gender(),
"marital_status: ", person.get_marital_status())
if __name__ == '__main__':
person_list = []
person_list.append(Person('A', 'Male', 'Single'))
person_list.append(Person('B', 'Male', 'Married'))
person_list.append(Person('C', 'Female', 'Single'))
person_list.append(Person('D', 'Female', 'Married'))
male = CriteriaMale()
female = CriteriaFemale()
single = CriteriaSingle()
single_male = CriteriaAnd(male, single)
single_or_female = CriteriaOr(female, single)
print "male"
print_persons(male.meet_criteria(person_list))
print "female"
print_persons(female.meet_criteria(person_list))
print "single"
print_persons(single.meet_criteria(person_list))
print "single_male"
print_persons(single.meet_criteria(person_list))
print "single or female"
print_persons(single_or_female.meet_criteria(person_list))
|
{
"content_hash": "b6048020aa205fbdb407b5949a9fe4c0",
"timestamp": "",
"source": "github",
"line_count": 125,
"max_line_length": 78,
"avg_line_length": 26.192,
"alnum_prop": 0.6362248014660965,
"repo_name": "kkaushik24/python-design-patterns",
"id": "d925a36dc256bfb9953c17c29d686ee863ff4a7f",
"size": "3274",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "structural/filter_pattern.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "17256"
}
],
"symlink_target": ""
}
|
"""Realize a Pi-shaped fuzzy set"""
__revision__ = "$Id: PiFunction.py,v 1.19 2010-03-28 18:44:46 rliebscher Exp $"
from fuzzy.set.Function import Function
from fuzzy.set.SFunction import SFunction
from fuzzy.set.ZFunction import ZFunction
class PiFunction(Function):
r"""
Realize a Pi-shaped fuzzy set::
_
/|\
/ | \
_/ | \_
| a |
| |
delta
See also U{http://pyfuzzy.sourceforge.net/demo/set/PiFunction.png}
@ivar a: center of set.
@type a: float
@ivar delta: absolute distance between x-values for minimum and maximum.
@type delta: float
"""
def __init__(self, a=0.0, delta=1.0):
"""Initialize a Pi-shaped fuzzy set.
@param a: center of set
@type a: float
@param delta: absolute distance between x-values for minimum and maximum
@type delta: float
"""
super(PiFunction, self).__init__()
self.a = a
self.delta = delta
self._sfunction = SFunction(a - delta/2., delta/2)
self._zfunction = ZFunction(a + delta/2., delta/2)
def __call__(self, x):
"""Return membership of x in this fuzzy set.
This method makes the set work like a function.
@param x: value for which the membership is to calculate
@type x: float
@return: membership
@rtype: float
"""
if x < self.a:
return self._sfunction(x)
else:
return self._zfunction(x)
def getCOG(self):
"""Return center of gravity."""
return self.a
def getValuesX(self):
"""Return sequence of x-values so we get a smooth function."""
for x in self._sfunction.getValuesX():
yield x
# first value is equal the last of the previous sequence
skippedFirst = False
for x in self._zfunction.getValuesX():
if not skippedFirst:
skippedFirst = True
else:
yield x
def __repr__(self):
"""Return representation of instance.
@return: representation of instance
@rtype: string
"""
return "%s.%s(a=%s, delta=%s)" % (self.__class__.__module__, self.__class__.__name__, self.a, self.delta)
|
{
"content_hash": "1be6404e53afdd3dcf4402899c957243",
"timestamp": "",
"source": "github",
"line_count": 82,
"max_line_length": 113,
"avg_line_length": 29.378048780487806,
"alnum_prop": 0.5325861353258613,
"repo_name": "avatar29A/pyfuzzy",
"id": "cb3e992adbad8e2235f66ae2528e238e2227160d",
"size": "3134",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fuzzy/set/PiFunction.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "GAP",
"bytes": "13981"
},
{
"name": "Python",
"bytes": "712985"
}
],
"symlink_target": ""
}
|
"""
Test utility class which simplifies construction of token streams.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import unittest
from spreadflow_core.script import Context, NoContextError
class ContextTestCase(unittest.TestCase):
"""
Unit tests for compiler Context.
"""
def test_push_pop(self):
"""
Covers Context.push(), Context.pop(), Context.top()
"""
ctx = Context(self)
Context.push(ctx)
self.assertIs(ctx, Context.top())
Context.pop(ctx)
def test_contextmanager(self):
"""
Covers context manager interface.
"""
with Context(self) as ctx:
self.assertIs(ctx, Context.top())
with Context(self) as ctx2:
self.assertIsNot(ctx2, ctx)
self.assertIs(ctx2, Context.top())
self.assertIs(ctx, Context.top())
def test_raises_no_context(self):
"""
Raises an error if the context stack is empty.
"""
self.assertRaises(NoContextError, Context.top)
def test_raises_unbalanced_context(self):
"""
Raises if context push and pop calls are not balanced.
"""
ctx1 = Context(self)
ctx2 = Context(self)
Context.push(ctx1)
Context.push(ctx2)
self.assertRaises(AssertionError, Context.pop, ctx1)
def test_raises_push_invalid(self):
"""
Raises if non-context are pushed.
"""
self.assertRaises(AssertionError, Context.push, self)
|
{
"content_hash": "0e2a4c1b5972e748825e7f693082eca9",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 66,
"avg_line_length": 26.40983606557377,
"alnum_prop": 0.6014897579143389,
"repo_name": "spreadflow/spreadflow-core",
"id": "3f32a4a70006a7a4c6d468fe7e15667e35374f8b",
"size": "1678",
"binary": false,
"copies": "1",
"ref": "refs/heads/stable",
"path": "spreadflow_core/test/test_script_context.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "142343"
}
],
"symlink_target": ""
}
|
"""Test for the retaliate strategy."""
import axelrod
from .test_player import TestPlayer
C, D = axelrod.Actions.C, axelrod.Actions.D
class TestRetaliate(TestPlayer):
name = "Retaliate (0.1)"
player = axelrod.Retaliate
expected_classifier = {
'memory_depth': float('inf'), # Long memory
'stochastic': False,
'makes_use_of': set(),
'inspects_source': False,
'manipulates_source': False,
'manipulates_state': False
}
def test_strategy(self):
"""Starts by cooperating."""
self.first_play_test(C)
def test_effect_of_strategy(self):
"""If opponent has defected more than 10 percent of the time, defect."""
P1 = axelrod.Retaliate()
P2 = axelrod.Player()
self.responses_test([C] * 4, [C] * 4, [C])
self.responses_test([C, C, C, C, D], [C, C, C, D, C], [D])
self.responses_test([C] * 6, [C] * 5 + [D], [D])
class TestLimitedRetaliate(TestPlayer):
name = 'Limited Retaliate (0.1/20)'
player = axelrod.LimitedRetaliate
expected_classifier = {
'memory_depth': float('inf'), # Long memory
'stochastic': False,
'makes_use_of': set(),
'inspects_source': False,
'manipulates_source': False,
'manipulates_state': False
}
def test_strategy(self):
"""Starts by cooperating."""
self.first_play_test(C)
def test_effect_of_strategy(self):
P1 = axelrod.LimitedRetaliate()
P2 = axelrod.Player()
# If opponent has never defected, co-operate
self.responses_test([C] * 4, [C] * 4, [C])
self.assertFalse(P1.retaliating)
# If opponent has previously defected and won,
# defect and be not retaliating
self.responses_test([C, C, C, C, D], [C, C, C, D, C], [D])
self.assertFalse(P1.retaliating)
# Case were retaliation count is less than limit: cooperate, reset
# retaliation count and be not retaliating
P1.history = [C, C, C, D, C]
P2.history = [D, D, D, C, D]
P1.retaliation_count = 1
P1.retaliation_limit = 0
self.assertEqual(P1.strategy(P2), C)
self.assertEqual(P1.retaliation_count, 0)
self.assertFalse(P1.retaliating)
# If opponent has previously defected and won, defect and
# be not retaliating
self.responses_test([C, C, C, C, D], [C, C, C, D, C], [D])
self.assertFalse(P1.retaliating)
# If opponent has just defected and won, defect and be not retaliating
self.responses_test([C, C, C, C, C, C], [C, C, C, C, C, D], [D])
self.assertFalse(P1.retaliating)
# If I've hit the limit for retaliation attempts, co-operate
P1.history = [C, C, C, C, D]
P2.history = [C, C, C, D, C]
P1.retaliation_count = 20
self.assertEqual(P1.strategy(P2), C)
self.assertFalse(P1.retaliating)
def test_reset(self):
P1 = axelrod.LimitedRetaliate()
P1.history = [C, C, C, C, D]
P1.retaliating = True
P1.retaliation_count = 4
P1.reset()
self.assertEqual(P1.history, [])
self.assertFalse(P1.retaliating)
self.assertEqual(P1.retaliation_count, 0)
|
{
"content_hash": "76f0feabf48c36aed48a8d78d0f2e3f4",
"timestamp": "",
"source": "github",
"line_count": 99,
"max_line_length": 80,
"avg_line_length": 32.91919191919192,
"alnum_prop": 0.585455661245781,
"repo_name": "ranjinidas/Axelrod",
"id": "4dcb033db0af59f4902caf1b5e2ff5c494cf1258",
"size": "3259",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "axelrod/tests/unit/test_retaliate.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "568469"
},
{
"name": "Shell",
"bytes": "82"
}
],
"symlink_target": ""
}
|
import os
PROJECT_ROOT = os.path.normpath(os.path.dirname(os.path.abspath(__file__)))
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'example.db', # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# URL prefix for admin static files -- CSS, JavaScript and images.
# Make sure to use a trailing slash.
# Examples: "http://foo.com/static/admin/", "/static/admin/".
ADMIN_MEDIA_PREFIX = '/static/admin/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 't8_)kj3v!au0!_i56#gre**mkg0&z1df%3bw(#5^#^5e_64!$_'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
AUTHENTICATION_BACKENDS = (
"allauth.account.auth_backends.AuthenticationBackend",
)
ROOT_URLCONF = 'example.urls'
TEMPLATE_CONTEXT_PROCESSORS = (
"django.contrib.auth.context_processors.auth",
"django.core.context_processors.debug",
"django.core.context_processors.i18n",
"django.core.context_processors.media",
"django.core.context_processors.static",
"django.core.context_processors.request",
"django.contrib.messages.context_processors.messages",
"allauth.context_processors.allauth",
"allauth.account.context_processors.account",
)
TEMPLATE_DIRS = (
# allauth templates: you could copy this directory into your
# project and tweak it according to your needs
os.path.join(PROJECT_ROOT, 'templates', 'uniform', 'allauth'),
# example project specific templates
os.path.join(PROJECT_ROOT, 'templates', 'uniform', 'example')
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
'emailconfirmation',
'uni_form',
'allauth',
'allauth.account',
# 'allauth.socialaccount',
# 'allauth.socialaccount.providers.twitter',
# 'allauth.socialaccount.providers.openid',
# 'allauth.socialaccount.providers.facebook',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
|
{
"content_hash": "00cbb1e23f0b4673fdd2596adbdee411",
"timestamp": "",
"source": "github",
"line_count": 172,
"max_line_length": 122,
"avg_line_length": 33.98837209302326,
"alnum_prop": 0.6886760177899418,
"repo_name": "uroslates/django-allauth",
"id": "a4ffd66bbb458417e41390f8545105e5db9253e0",
"size": "5885",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "example/settings.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
}
|
import argparse
import io
from operator import itemgetter
import os.path
import six
import socket
from string import Template
import sys
import lxml.etree
from requestbuilder import Arg
import requestbuilder.auth.aws
from requestbuilder.exceptions import ArgumentError, AuthError, ClientError
from requestbuilder.mixins import TabifyingMixin
from requestbuilder.request import AWSQueryRequest
from requestbuilder.service import BaseService
import requests.exceptions
from euca2ools.commands import Euca2ools
from euca2ools.exceptions import AWSError
from euca2ools.util import add_fake_region_name
class EC2(BaseService):
NAME = 'ec2'
DESCRIPTION = 'Elastic compute cloud service'
API_VERSION = '2014-06-15'
REGION_ENVVAR = 'AWS_DEFAULT_REGION'
URL_ENVVAR = 'EC2_URL'
ARGS = [Arg('-U', '--url', metavar='URL',
help='compute service endpoint URL')]
def configure(self):
requestbuilder.service.BaseService.configure(self)
add_fake_region_name(self)
def handle_http_error(self, response):
raise AWSError(response)
class EC2Request(AWSQueryRequest, TabifyingMixin):
SUITE = Euca2ools
SERVICE_CLASS = EC2
AUTH_CLASS = requestbuilder.auth.aws.HmacV4Auth
METHOD = 'POST'
def __init__(self, **kwargs):
AWSQueryRequest.__init__(self, **kwargs)
def print_resource_tag(self, resource_tag, resource_id):
resource_type = RESOURCE_TYPE_MAP.lookup(resource_id)
print self.tabify(['TAG', resource_type, resource_id,
resource_tag.get('key'), resource_tag.get('value')])
def print_reservation(self, reservation):
res_line = ['RESERVATION', reservation['reservationId'],
reservation.get('ownerId')]
# group.get('entry') is a workaround for a CLC bug
group_ids = [group.get('groupName') or group.get('groupId') or
group.get('entry') or ''
for group in reservation['groupSet']]
res_line.append(', '.join(group_ids))
print self.tabify(res_line)
for instance in sorted(reservation.get('instancesSet') or [],
key=itemgetter('launchTime')):
self.print_instance(instance)
def print_instance(self, instance):
instance_line = ['INSTANCE']
for key in ['instanceId', 'imageId', 'dnsName', 'privateDnsName']:
instance_line.append(instance.get(key))
instance_line.append(instance.get('instanceState', {}).get('name'))
instance_line.append(instance.get('keyName'))
instance_line.append(instance.get('amiLaunchIndex'))
instance_line.append(','.join([code['productCode'] for code in
instance.get('productCodes', [])]))
instance_line.append(instance.get('instanceType'))
instance_line.append(instance.get('launchTime'))
instance_line.append(instance.get('placement', {}).get(
'availabilityZone'))
instance_line.append(instance.get('kernelId'))
instance_line.append(instance.get('ramdiskId'))
instance_line.append(instance.get('platform'))
if instance.get('monitoring'):
instance_line.append('monitoring-' +
instance['monitoring'].get('state'))
else:
# noinspection PyTypeChecker
instance_line.append(None)
instance_line.append(instance.get('ipAddress'))
instance_line.append(instance.get('privateIpAddress'))
instance_line.append(instance.get('vpcId'))
instance_line.append(instance.get('subnetId'))
instance_line.append(instance.get('rootDeviceType'))
instance_line.append(instance.get('instanceLifecycle'))
instance_line.append(instance.get('showInstanceRequestId'))
# noinspection PyTypeChecker
instance_line.append(None) # Should be the license, but where is it?
instance_line.append(instance.get('placement', {}).get('groupName'))
instance_line.append(instance.get('virtualizationType'))
instance_line.append(instance.get('hypervisor'))
instance_line.append(instance.get('clientToken'))
instance_line.append(','.join([group['groupId'] for group in
instance.get('groupSet', [])]))
instance_line.append(instance.get('placement', {}).get('tenancy'))
instance_line.append(instance.get('ebsOptimized'))
instance_line.append(instance.get('iamInstanceProfile', {}).get('arn'))
instance_line.append(instance.get('architecture'))
print self.tabify(instance_line)
for blockdev in instance.get('blockDeviceMapping', []):
self.print_blockdevice(blockdev)
for nic in instance.get('networkInterfaceSet', []):
self.print_interface(nic)
for tag in instance.get('tagSet', []):
self.print_resource_tag(tag, instance.get('instanceId'))
def print_blockdevice(self, blockdev):
# Block devices belong to instances
print self.tabify(('BLOCKDEVICE', blockdev.get('deviceName'),
blockdev.get('ebs', {}).get('volumeId'),
blockdev.get('ebs', {}).get('attachTime'),
blockdev.get('ebs', {}).get('deleteOnTermination'),
blockdev.get('ebs', {}).get('volumeType'),
blockdev.get('ebs', {}).get('iops')))
def print_blockdevice_mapping(self, mapping):
# Block device mappings belong to images
if mapping.get('virtualName'):
print self.tabify(('BLOCKDEVICEMAPPING', 'EPHEMERAL',
mapping.get('deviceName'),
mapping.get('virtualName')))
else:
ebs = mapping.get('ebs') or {}
print self.tabify(('BLOCKDEVICEMAPPING', 'EBS',
mapping.get('deviceName'),
ebs.get('snapshotId'), ebs.get('volumeSize'),
ebs.get('deleteOnTermination'),
ebs.get('volumeType'), ebs.get('iops')))
def print_attachment(self, attachment):
print self.tabify(['ATTACHMENT', attachment.get('volumeId'),
attachment.get('instanceId'),
attachment.get('device'),
attachment.get('status'),
attachment.get('attachTime')])
def print_vpc(self, vpc):
print self.tabify(('VPC', vpc.get('vpcId'), vpc.get('state'),
vpc.get('cidrBlock'), vpc.get('dhcpOptionsId'),
vpc.get('instanceTenancy'), vpc.get('isDefault')))
for tag in vpc.get('tagSet') or []:
self.print_resource_tag(tag, vpc.get('vpcId'))
def print_internet_gateway(self, igw):
print self.tabify(('INTERNETGATEWAY', igw.get('internetGatewayId')))
for attachment in igw.get('attachmentSet') or []:
print self.tabify(('ATTACHMENT', attachment.get('vpcId'),
attachment.get('state')))
for tag in igw.get('tagSet') or []:
self.print_resource_tag(tag, igw.get('internetGatewayId'))
def print_peering_connection(self, pcx):
status = pcx.get('status') or {}
print self.tabify(('VPCPEERINGCONNECTION',
pcx.get('vpcPeeringConnectionId'),
pcx.get('expirationTime'),
'{0}: {1}'.format(status.get('code'),
status.get('message'))))
requester = pcx.get('requesterVpcInfo') or {}
print self.tabify(('REQUESTERVPCINFO', requester.get('vpcId'),
requester.get('cidrBlock'),
requester.get('ownerId')))
accepter = pcx.get('accepterVpcInfo') or {}
print self.tabify(('ACCEPTERVPCINFO', accepter.get('vpcId'),
accepter.get('cidrBlock'), accepter.get('ownerId')))
for tag in pcx.get('tagSet') or []:
self.print_resource_tag(tag, pcx.get('vpcPeeringConnectionId'))
def print_subnet(self, subnet):
print self.tabify(('SUBNET', subnet.get('subnetId'),
subnet.get('state'), subnet.get('vpcId'),
subnet.get('cidrBlock'),
subnet.get('availableIpAddressCount'),
subnet.get('availabilityZone'),
subnet.get('defaultForAz'),
subnet.get('mapPublicIpOnLaunch')))
for tag in subnet.get('tagSet') or []:
self.print_resource_tag(tag, subnet.get('subnetId'))
def print_network_acl(self, acl):
if acl.get('default').lower() == 'true':
default = 'default'
else:
default = ''
print self.tabify(('NETWORKACL', acl.get('networkAclId'),
acl.get('vpcId'), default))
for entry in acl.get('entrySet') or []:
if entry.get('egress').lower() == 'true':
direction = 'egress'
else:
direction = 'ingress'
protocol = entry.get('protocol')
port_map = {-1: 'all', 1: 'icmp', 6: 'tcp', 17: 'udp', 132: 'sctp'}
try:
protocol = port_map.get(int(protocol), int(protocol))
except ValueError:
pass
if 'icmpTypeCode' in entry:
from_port = entry.get('icmpTypeCode', {}).get('code')
to_port = entry.get('icmpTypeCode', {}).get('type')
else:
from_port = entry.get('portRange', {}).get('from')
to_port = entry.get('portRange', {}).get('to')
print self.tabify(('ENTRY', direction, entry.get('ruleNumber'),
entry.get('ruleAction'), entry.get('cidrBlock'),
protocol, from_port, to_port))
for assoc in acl.get('associationSet') or []:
print self.tabify(('ASSOCIATION',
assoc.get('networkAclAssociationId'),
assoc.get('subnetId')))
for tag in acl.get('tagSet') or []:
self.print_resource_tag(tag, acl.get('networkAclId'))
def print_route_table(self, table):
print self.tabify(('ROUTETABLE', table.get('routeTableId'),
table.get('vpcId')))
for route in table.get('routeSet') or []:
target = (route.get('gatewayId') or route.get('instanceId') or
route.get('networkInterfaceId') or
route.get('vpcPeeringConnectionId'))
print self.tabify((
'ROUTE', target, route.get('state'),
route.get('destinationCidrBlock'), route.get('origin')))
for vgw in table.get('propagatingVgwSet') or []:
print self.tabify(('PROPAGATINGVGW', vgw.get('gatewayID')))
for assoc in table.get('associationSet') or []:
if (assoc.get('main') or '').lower() == 'true':
main = 'main'
else:
main = ''
print self.tabify(('ASSOCIATION',
assoc.get('routeTableAssociationId'),
assoc.get('subnetId'), main))
for tag in table.get('tagSet') or []:
self.print_resource_tag(tag, table.get('routeTableId'))
def print_interface(self, nic):
nic_info = [nic.get(attr) for attr in (
'networkInterfaceId', 'subnetId', 'vpcId', 'ownerId', 'status',
'privateIpAddress', 'privateDnsName', 'sourceDestCheck')]
print self.tabify(['NETWORKINTERFACE'] + nic_info)
if nic.get('attachment'):
attachment_info = [nic['attachment'].get(attr) for attr in (
'attachmentID', 'deviceIndex', 'status', 'attachTime',
'deleteOnTermination')]
print self.tabify(['ATTACHMENT'] + attachment_info)
privaddresses = nic.get('privateIpAddressesSet', [])
if nic.get('association'):
association = nic['association']
# The EC2 tools apparently print private IP info in the
# association even though that info doesn't appear there
# in the response, so we have to look it up elsewhere.
for privaddress in privaddresses:
if (privaddress.get('association', {}).get('publicIp') ==
association.get('publicIp')):
# Found a match
break
else:
privaddress = None
print self.tabify(('ASSOCIATION', association.get('publicIp'),
association.get('ipOwnerId'),
privaddress.get('privateIpAddress')))
for group in nic.get('groupSet', []):
print self.tabify(('GROUP', group.get('groupId'),
group.get('groupName')))
for privaddress in privaddresses:
if privaddress.get('primary').lower() == 'true':
primary = 'primary'
else:
primary = None
print self.tabify(('PRIVATEIPADDRESS',
privaddress.get('privateIpAddress'),
privaddress.get('privateDnsName'), primary))
def print_customer_gateway(self, cgw):
print self.tabify(('CUSTOMERGATEWAY', cgw.get('customerGatewayId'),
cgw.get('state'), cgw.get('type'),
cgw.get('ipAddress'), cgw.get('bgpAsn')))
for tag in cgw.get('tagSet', []):
self.print_resource_tag(tag, cgw.get('customerGatewayId'))
def print_vpn_gateway(self, vgw):
print self.tabify(('VPNGATEWAY', vgw.get('vpnGatewayId'),
vgw.get('state'), vgw.get('availabilityZone'),
vgw.get('type')))
for attachment in vgw.get('attachments'):
print self.tabify(('VGWATTACHMENT', attachment.get('vpcId'),
attachment.get('state')))
for tag in vgw.get('tagSet', []):
self.print_resource_tag(tag, vgw.get('vpnGatewayId'))
def print_vpn_connection(self, vpn, show_conn_info=False,
stylesheet=None):
print self.tabify(('VPNCONNECTION', vpn.get('vpnConnectionId'),
vpn.get('type'), vpn.get('customerGatewayId'),
vpn.get('vpnGatewayId'), vpn.get('state')))
if show_conn_info and vpn.get('customerGatewayConfiguration'):
if stylesheet is None:
print vpn.get('customerGatewayConfiguration')
else:
if (stylesheet.startswith('http://') or
stylesheet.startswith('https://')):
self.log.info('fetching connection info stylesheet from %s',
stylesheet)
response = requests.get(stylesheet)
try:
response.raise_for_status()
except requests.exceptions.HTTPError as err:
raise ClientError('failed to fetch stylesheet: {0}'
.format(str(err)))
xslt_root = lxml.etree.XML(response.text)
else:
if stylesheet.startswith('file://'):
stylesheet = stylesheet[7:]
self.log.info('using connection info stylesheet %s',
stylesheet)
with open(stylesheet) as stylesheet_file:
xslt_root = lxml.etree.parse(stylesheet_file)
transform = lxml.etree.XSLT(xslt_root)
conn_info_root = lxml.etree.parse(io.BytesIO(
vpn.get('customerGatewayConfiguration')))
print transform(conn_info_root)
for tag in vpn.get('tagSet') or []:
self.print_resource_tag(tag, vpn.get('vpnConnectionId'))
def print_dhcp_options(self, dopt):
print self.tabify(('DHCPOPTIONS', dopt.get('dhcpOptionsId')))
for option in dopt.get('dhcpConfigurationSet') or {}:
values = [val_dict.get('value')
for val_dict in option.get('valueSet')]
print self.tabify(('OPTION', option.get('key'), ','.join(values)))
for tag in dopt.get('tagSet', []):
self.print_resource_tag(tag, dopt.get('dhcpOptionsId'))
def print_volume(self, volume):
vol_bits = ['VOLUME']
for attr in ('volumeId', 'size', 'snapshotId', 'availabilityZone',
'status', 'createTime'):
vol_bits.append(volume.get(attr))
vol_bits.append(volume.get('volumeType') or 'standard')
vol_bits.append(volume.get('iops'))
print self.tabify(vol_bits)
for attachment in volume.get('attachmentSet', []):
self.print_attachment(attachment)
for tag in volume.get('tagSet', []):
self.print_resource_tag(tag, volume.get('volumeId'))
def print_snapshot(self, snap):
print self.tabify(['SNAPSHOT', snap.get('snapshotId'),
snap.get('volumeId'), snap.get('status'),
snap.get('startTime'), snap.get('progress'),
snap.get('ownerId'), snap.get('volumeSize'),
snap.get('description')])
for tag in snap.get('tagSet', []):
self.print_resource_tag(tag, snap.get('snapshotId'))
def print_bundle_task(self, task):
bucket = task.get('storage', {}).get('S3', {}).get('bucket')
prefix = task.get('storage', {}).get('S3', {}).get('prefix')
if bucket and prefix:
manifest = '{0}/{1}.manifest.xml'.format(bucket, prefix)
else:
manifest = None
print self.tabify(['BUNDLE', task.get('bundleId'),
task.get('instanceId'), bucket, prefix,
task.get('startTime'), task.get('updateTime'),
task.get('state'), task.get('progress'), manifest])
def print_conversion_task(self, task):
task_bits = []
if task.get('importVolume'):
task_bits.extend(('TaskType', 'IMPORTVOLUME'))
if task.get('importInstance'):
task_bits.extend(('TaskType', 'IMPORTINSTANCE'))
if task.get('conversionTaskId'):
task_bits.append('TaskId')
task_bits.append(task.get('conversionTaskId'))
if task.get('expirationTime'):
task_bits.append('ExpirationTime')
task_bits.append(task['expirationTime'])
if task.get('state'):
task_bits.append('Status')
task_bits.append(task['state'])
if task.get('statusMessage'):
task_bits.append('StatusMessage')
task_bits.append(task['statusMessage'])
if task.get('importVolume'):
print self.tabify(task_bits)
self.__print_import_disk(task['importVolume'])
if task.get('importInstance'):
if task['importInstance'].get('instanceId'):
task_bits.extend(('InstanceID',
task['importInstance']['instanceId']))
print self.tabify(task_bits)
for volume in task['importInstance'].get('volumes') or []:
self.__print_import_disk(volume)
def __print_import_disk(self, container):
disk_bits = ['DISKIMAGE']
image = container.get('image') or {}
volume = container.get('volume') or {}
if image.get('format'):
disk_bits.extend(('DiskImageFormat', image['format']))
if image.get('size'):
disk_bits.extend(('DiskImageSize', image['size']))
if volume.get('id'):
disk_bits.extend(('VolumeId', volume['id']))
if volume.get('size'):
disk_bits.extend(('VolumeSize', volume['size']))
if container.get('availabilityZone'):
disk_bits.extend(('AvailabilityZone',
container['availabilityZone']))
if container.get('bytesConverted'):
disk_bits.extend(('ApproximateBytesConverted',
container['bytesConverted']))
if container.get('status'):
# This is the status of the volume for an ImportInstance operation
disk_bits.extend(('Status', container.get('status')))
if container.get('statusMessage'):
disk_bits.extend(('StatusMessage', container.get('statusMessage')))
print self.tabify((disk_bits))
def process_port_cli_args(self):
"""
Security group and network ACL rule commands need to be able to
to parse "-1:-1" before argparse can see it because of Python
bug 9334, which causes argparse to treat it as a nonexistent
option name and not an option value. This method wraps
process_cli_args in such a way that values beginning with "-1"
are preserved.
"""
saved_sys_argv = list(sys.argv)
def parse_neg_one_value(opt_name):
if opt_name in sys.argv:
index = sys.argv.index(opt_name)
if (index < len(sys.argv) - 1 and
sys.argv[index + 1].startswith('-1')):
opt_val = sys.argv[index + 1]
del sys.argv[index:index + 2]
return opt_val
icmp_type_code = (parse_neg_one_value('-t') or
parse_neg_one_value('--icmp-type-code'))
port_range = (parse_neg_one_value('-p') or
parse_neg_one_value('--port-range'))
EC2Request.process_cli_args(self)
if icmp_type_code:
self.args['icmp_type_code'] = icmp_type_code
if port_range:
self.args['port_range'] = port_range
sys.argv = saved_sys_argv
class _ResourceTypeMap(object):
_prefix_type_map = {
'eipalloc': 'allocation-id',
'bun': 'bundle', # technically a bundle *task*
'import': 'conversion-task', # this is a guess
'cgw': 'customer-gateway',
'dopt': 'dhcp-options',
'export': 'export-task', # this is a guess
'aki': 'image',
'ami': 'image',
'ari': 'image',
'eki': 'image',
'emi': 'image',
'eri': 'image',
'i': 'instance',
'igw': 'internet-gateway',
'acl': 'network-acl',
'eni': 'network-interface',
'xxx': 'reserved-instances', # reserved instance IDs are UUIDs
'rtb': 'route-table',
'sg': 'security-group',
'snap': 'snapshot',
'sir': 'spot-instances-request',
'subnet': 'subnet',
'vol': 'volume',
'vpc': 'vpc',
'pcx': 'vpc-peering-connection',
'vpn': 'vpn-connection',
'vgw': 'vpn-gateway'}
def lookup(self, item):
if not isinstance(item, basestring):
raise TypeError('argument type must be str')
for prefix in self._prefix_type_map:
if item.startswith(prefix + '-'):
return self._prefix_type_map[prefix]
def __iter__(self):
return iter(set(self._prefix_type_map.values()))
RESOURCE_TYPE_MAP = _ResourceTypeMap()
def parse_ports(protocol, port_range=None, icmp_type_code=None):
# This function's error messages make assumptions about arguments'
# names, but currently all of its callers agree on them. If that
# changes then please fix this.
from_port = None
to_port = None
if str(protocol).lower() in ('icmp', '1'):
if port_range:
raise ArgumentError('argument -p/--port-range: not compatible '
'with protocol "{0}"'.format(protocol))
if not icmp_type_code:
icmp_type_code = '-1:-1'
types = icmp_type_code.split(':')
if len(types) == 2:
try:
from_port = int(types[0])
to_port = int(types[1])
except ValueError:
raise ArgumentError('argument -t/--icmp-type-code: value '
'must have format "1:2"')
else:
raise ArgumentError('argument -t/--icmp-type-code: value '
'must have format "1:2"')
if from_port < -1 or to_port < -1:
raise ArgumentError('argument -t/--icmp-type-code: ICMP type, '
'code must be at least -1')
elif str(protocol).lower() in ('tcp', '6', 'udp', '17'):
if icmp_type_code:
raise ArgumentError('argument -t/--icmp-type-code: not compatible '
'with protocol "{0}"'.format(protocol))
if not port_range:
raise ArgumentError('argument -p/--port-range is required '
'for protocol "{0}"'.format(protocol))
if ':' in port_range:
# Be extra helpful in the event of this common typo
raise ArgumentError('argument -p/--port-range: multi-port '
'range must be separated by "-", not ":"')
from_port, to_port = _parse_port_range(port_range, protocol)
if from_port < -1 or to_port < -1:
raise ArgumentError('argument -p/--port-range: port number(s) '
'must be at least -1')
if from_port == -1:
from_port = 1
if to_port == -1:
to_port = 65535
# We allow other protocols through without parsing port numbers at all.
return from_port, to_port
def _parse_port_range(port_range, protocol):
# Try for an integer
try:
return (int(port_range), int(port_range))
except ValueError:
pass
# Try for an integer range
if port_range.count('-') == 1:
ports = port_range.split('-')
try:
return (int(ports[0]), int(ports[1]))
except ValueError:
pass
# Try for a service name
if isinstance(protocol, six.string_types):
try:
# This is going to fail if protocol is a number.
port = socket.getservbyname(port_range, protocol)
return (port, port)
except socket.error:
pass
# That's all, folks!
raise ArgumentError("argument -p/--port-range: '{0}' is neither a port "
"number, range of port numbers, nor a recognized "
"service name".format(port_range))
|
{
"content_hash": "7d1d28475998cad657c740a02cdaa246",
"timestamp": "",
"source": "github",
"line_count": 586,
"max_line_length": 80,
"avg_line_length": 46.06825938566553,
"alnum_prop": 0.5431545414135428,
"repo_name": "jhajek/euca2ools",
"id": "c332d6e630760bfbd6ba3121de465140b93e3e5c",
"size": "28343",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "euca2ools/commands/ec2/__init__.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "1230266"
},
{
"name": "Shell",
"bytes": "872"
}
],
"symlink_target": ""
}
|
"""
The implementation of the NNLM model with only local context.
"""
from model_util import *
import io_read_ngram
import codecs
import theano
import theano.tensor as T
import numpy as np
class ModelNNLM(object):
def __init__(self, context_size, in_vocab_size, emb_dim, hidden_sizes, act_func, n_out, pretrain_file):
self.context_size = context_size
self.in_vocab_size = in_vocab_size
self.emb_dim = emb_dim
self.hidden_sizes = hidden_sizes
self.activation = get_activation_func(act_func)
self.n_out = n_out
self.pretrain_file = pretrain_file
def buildModel(self):
##############################
# MODEL ARCHITECTURE
# symbolic variables
self.x = T.matrix('x')
self.classifier = NNLM(self.x, self.activation, self.in_vocab_size, self.emb_dim, self.hidden_sizes, self.n_out, self.context_size, self.pretrain_file)
self._initSGD()
# CALCULATE GRAD USING SYMBOLIC FUNCTION
def _initSGD(self):
# symbolic variables
y = T.ivector('y')
lr = T.scalar('lr')
# set symbolic cost function
cost = self.classifier.nll(y)
# set symbolic param gradients and adjust learning rate accordingly
gparams = []
grad_norm = 0.0
for param in self.classifier.params:
gparam = T.grad(cost, param)
grad_norm += (gparam ** 2).sum()
# gradients
gparams.append(gparam)
grad_norm = T.sqrt(grad_norm)
max_grad_norm = 5
if T.gt(grad_norm, max_grad_norm):
lr = lr * max_grad_norm / grad_norm
# set symbolic update rules
updates = []
for param, gparam in zip(self.classifier.params, gparams):
updates.append((param, param - lr * gparam))
self.y = y
self.lr = lr
self.cost = cost
self.grad_norm = grad_norm
self.updates = updates
#################################################
# used in train_util ##### BEGIN ####
#################################################
def getTrainModel(self, data_x, data_y):
self.start_index = T.lscalar()
self.end_index = T.lscalar()
self.learning_rate = T.scalar()
# TRAIN_MODEL
self.train_outputs = [self.cost, self.grad_norm]
self.train_set_x, self.train_set_y = io_read_ngram.shared_dataset([data_x, data_y])
self.int_train_set_y = T.cast(self.train_set_y, 'int32')
self.train_model = theano.function(inputs=[self.start_index, self.end_index, self.learning_rate], outputs=self.train_outputs, updates=self.updates,
givens={
self.x: self.train_set_x[self.start_index:self.end_index],
self.y: self.int_train_set_y[self.start_index:self.end_index],
self.lr: self.learning_rate})
return self.train_model
# def getTrainTestModel(self):
def getValidationModel(self, valid_set_x, valid_set_y, batch_size):
self.num_valid_ngrams = valid_set_x.get_value(borrow=True).shape[0]
self.num_valid_batches = (self.num_valid_ngrams - 1) / batch_size + 1
self.valid_model = theano.function(inputs=[self.start_index, self.end_index], outputs=self.classifier.sum_ll(self.y),
givens={
self.x: valid_set_x[self.start_index:self.end_index],
self.y: valid_set_y[self.start_index:self.end_index],
self.lr: self.learning_rate})
return self.valid_model
def getTestModel(self, test_set_x, test_set_y, batch_size):
self.num_test_ngrams = test_set_x.get_value(borrow=True).shape[0]
self.num_test_batches = (self.num_test_ngrams - 1) / batch_size + 1
self.test_model = theano.function(inputs=[self.start_index, self.end_index], outputs=self.classifier.sum_ll(self.y),
givens={
self.x: test_set_x[self.start_index:self.end_index],
self.y: test_set_y[self.start_index:self.end_index],
self.lr: self.learning_rate})
return self.test_model
def updateTrainModelInput(self, data_x, data_y):
if len(data_y) == 0:
return False # EOF
self.train_set_x.set_value(
np.asarray(data_x, dtype=theano.config.floatX), borrow=True)
self.train_set_y.set_value(
np.asarray(data_y, dtype=theano.config.floatX), borrow=True)
self.int_train_set_y = T.cast(self.train_set_y, 'int32')
return True
# used in train_util: num_train_samples = model.getTrainSetXSize()
def getTrainSetXSize(self):
return self.train_set_x.get_value(borrow=True).shape[0]
# for old interface
def getModelSymbols(self):
return (self.classifier, self.x, self.y, self.lr, self.cost, self.grad_norm, self.updates)
#################################################
# used in train_util ##### END ####
#################################################
class NNLM(object):
"""
The complete NNLM model.
Member variables:
Scalar:
ngram_size, emb_dim, in_vocab_size, num_hidden_layers
mean_abs_log_norm, mean_square_log_norm
Class object:
linear_layer
hidden_layers
softmaxLayer
params
Func:
nll, sum_ll, ind_ll
"""
def __init__(self, input, activation, in_vocab_size, emb_dim, hidden_sizes, n_out, context_size, pretrain_file):
self.context_size = context_size
self.num_hidden_layers = len(hidden_sizes)
self.emb_dim = emb_dim
self.in_vocab_size = in_vocab_size
# linear layer
self.linearLayer = LinearLayer(input, in_vocab_size, emb_dim, pretrain_file)
# hidden layers
self.hidden_layers = []
hidden_in = emb_dim * context_size
hidden_params = []
prev_layer = self.linearLayer
for ii in xrange(self.num_hidden_layers):
hidden_out = hidden_sizes[ii]
hidden_layer = HiddenLayer(prev_layer.output, hidden_in, hidden_out, activation)
self.hidden_layers.append(hidden_layer)
hidden_params = hidden_params + hidden_layer.params
hidden_in = hidden_out
prev_layer = hidden_layer
# softmax layer
self.softmaxLayer = SoftmaxLayer(self.hidden_layers[len(hidden_sizes) - 1].output, hidden_out, n_out)
# nll
self.nll = self.softmaxLayer.nll
# sum_ll
self.sum_ll = self.softmaxLayer.sum_ll
# params
self.params = self.linearLayer.params + \
hidden_params + self.softmaxLayer.params
|
{
"content_hash": "fbcfd1e11c69c360ec59c3043f0fc37d",
"timestamp": "",
"source": "github",
"line_count": 184,
"max_line_length": 159,
"avg_line_length": 36.92934782608695,
"alnum_prop": 0.5736571008094187,
"repo_name": "yuhaozhang/nnjm-global",
"id": "506c5f90cf299232faced882f0ecdaf228e1959d",
"size": "6818",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "code/model_nnlm.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "135136"
},
{
"name": "Shell",
"bytes": "64"
}
],
"symlink_target": ""
}
|
import os, random
import numpy as np
from PIL import Image, ImageDraw
from printing import print_error
def normalize(data, std):
'''
Data patch is constrast normalized by this method. The second argument std should be the standard deviation of
the patch dataset.
'''
m = np.mean(data)
data = (data - m) / std
return data
def get_image_files(path):
print('Retrieving {}'.format(path))
included_extenstions = ['jpg','png', 'tiff', 'tif']
files = [fn for fn in os.listdir(path) if any([fn.endswith(ext) for ext in included_extenstions])]
files.sort()
return files
def get_dataset(path):
content = os.listdir(path)
if not all(x in ['train', 'valid', 'test'] for x in content):
print_error('Folder does not contain image or label folder. Path probably not correct')
raise Exception('Fix dataset_path in config')
content.sort()
return content
def from_arr_to_label(label, label_dim):
label_arr = label.reshape(label_dim, label_dim)
label_arr = label_arr* 255
label_arr = [label_arr, label_arr, label_arr, np.ones((16,16))*255]
label_arr = np.array(label_arr, dtype=np.uint8)
label_arr = np.rollaxis(label_arr, 2)
label_arr = np.rollaxis(label_arr, 2)
return Image.fromarray(label_arr)
def from_arr_to_data(data, data_dim):
data_arr = data.reshape(3, data_dim, data_dim)
data_arr = np.rollaxis(data_arr, 2)
data_arr = np.rollaxis(data_arr, 2)
data_arr = data_arr * 255
data_arr = np.array(data_arr, dtype=np.uint8)
return Image.fromarray(data_arr)
def from_rgb_to_arr(image):
arr = np.asarray(image, dtype='float32') / 255
arr = np.rollaxis(arr, 2, 0)
arr = arr.reshape(3 * arr.shape[1] * arr.shape[2])
return arr
def create_image_label(image, dim_data, dim_label):
#TODO: Euclidiean to dist, ramp up to definite roads. Model label noise in labels?
y_size = dim_label
padding = (dim_data - y_size)/2
#label = np.array(image.getdata())
#label = np.asarray(image, dtype=theano.config.floatX)
label = np.asarray(image)
label = label[padding : padding+y_size, padding : padding+y_size ]
label = label.reshape(y_size*y_size)
label = label / 255.0
return label
def create_threshold_image(image, threshold):
'''
threshold value define the binary split. Resulting binary image only contains 0 and 1, while image contains
values between 0 and 1.
'''
binary_arr = np.ones(image.shape)
low_values_indices = image <= threshold # Where values are low
binary_arr[low_values_indices] = 0 # All low values set to 0
return binary_arr
def get_sum_road(altered_image, original_image):
'''
Number of changes between the original image and the altered image
'''
arr = np.array(original_image)
arr2 = np.array(altered_image)
return np.sum(arr != arr2)
def get_road_position(image):
arr = np.array(image)
return np.where(arr == 255)
def add_artificial_road_noise(image, threshold):
'''
Adds artifical omission noise to label image, until threshold is reached.
Experimental random_noise, which adds unstructured noise to label image. Not realistic noise.
:param image: label image, bw
:param threshold: Percentage of roads to be removed
:return:
'''
label = image.copy()
random_noise = False
if random_noise:
nr_labels = image.size[0] * image.size[1]
pixels = label.load()
original_pixels = image.load()
else:
dr = ImageDraw.Draw(label)
nr_labels = np.sum(np.array(image) == 255)
shape_max = int(image.size[0] / 10)
shape_min = int(image.size[0]/20)
locations = get_road_position(label)
#If there are no road class there is no use in removing some.
if nr_labels == 0:
return label, 0
removed_threshold = np.clip(threshold, 0.0, 1.0)
p_roads_removed = 0.0
#t = 0
while p_roads_removed < removed_threshold:
if random_noise:
#Randomly distribution noise
for i in range(1000):
y = random.randint(0, image.size[0]-1)
x = random.randint(0, image.size[1]-1)
pixels[x, y] = int(not bool(original_pixels[x, y])) * 255
else:
#Road pixels are removed systematically. Simulates omission noise
i = random.randint(0, locations[0].shape[0] -1 )
y = locations[0][i]
x = locations[1][i]
w = int(random.randint(shape_min, shape_max)/2)
h = int(random.randint(shape_min, shape_max)/2)
#if t%2 == 0:
cor = (x-w, y-h, x+w, y+h)
dr.ellipse(cor, fill="black")
#Failed attempt at registration noise
#else:
# m = int(random.randint(-10, 10))
# n = int(random.randint(-10, 10))
# cropped = label.crop((x,y, x+w,y+h))
# label.paste(cropped, (x-m, y-n, x+w-m, y+h-n) )
nr_of_changes = get_sum_road(label, image)
p_roads_removed = nr_of_changes/ float(nr_labels)
#t = t+1
return label, p_roads_removed
|
{
"content_hash": "c9882ce07960d69e41ede32c96374531",
"timestamp": "",
"source": "github",
"line_count": 149,
"max_line_length": 114,
"avg_line_length": 35.06711409395973,
"alnum_prop": 0.6120574162679426,
"repo_name": "olavvatne/CNN",
"id": "ca1c5e5c3d4f9a9440e0b19c3bcdfd217b1874d2",
"size": "5225",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "augmenter/util.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "88387"
},
{
"name": "Shell",
"bytes": "822"
}
],
"symlink_target": ""
}
|
import numpy as np
import pandas
import matplotlib as mpl
import matplotlib.pyplot as plt
import sys
import glob
myColors = ["b", "g", "r", "c", "m", "y", "k", "b", "k", "c"]
argv = sys.argv
if len(argv) < 2:
print ("direcrory name required")
exit()
name_dir = argv[1]
max_x = 0
max_y = 0
legends = []
files = glob.glob(name_dir+"/*")
i = 1;
dic_df = {}
for name_file in files:
try:
print("parsing "+name_file)
fileIn=open(name_file,'r')
# dataframe=pandas.read_csv(fileIn,header=True,names=['keys','throughput'])
dataframe=pandas.read_csv(fileIn,header=True, sep='\t',names=['keys','throughput','xxxx'])
if not isinstance(dataframe.iloc[0,0], (int, float, complex)): #must be header #long for python 2.x
dataframe.drop(0,axis=0,inplace=True)
dataframe=dataframe.applymap(lambda x: float(x)/1000000.0)
dic_df[name_file[len(name_dir):]]=dataframe
except:
print ("error parsing file", name_file)
raise
concatenated = pandas.concat(list(dic_df.values()), keys=dic_df.keys())
#max_x=concatenated.max()
max_x=concatenated.max()['keys']
max_y=concatenated.max()['throughput']+15
for name_file in files:
#plt.subplot(len(files),1,i)
i+=1
#reshape_factor=10
dataframe=dic_df[name_file[len(name_dir):]]
size=len(dataframe.index)
#new_size=size/reshape_factor*reshape_factor
#dataframe_averaged=dataframe.groupby('keys').mean()
p1=plt.plot(dataframe['keys'],dataframe['throughput'], myColors[0],linewidth=1,linestyle='-', marker='o',markersize=0,markeredgewidth=0)
x=(np.float_(dataframe['keys']))
y=(np.float_(dataframe['throughput']))
#print (y)
heatmap,xedges,yedges=np.histogram2d(y,x, bins=50)
#print(heatmap[::-1])
#im = plt.pcolor(heatmap,cmap=mpl.cm.Blues)
dataframe_averaged = dataframe.groupby('keys').aggregate([np.mean, np.std, np.max, np.min])
#print(dataframe_averaged['throughput'])
#p2=plt.plot(dataframe_averaged.index,dataframe_averaged['throughput','mean'], myColors[0],linewidth=1)
#p1=plt.plot(dataframe_averaged.index,dataframe_averaged['throughput','amax'], myColors[0],linewidth=1)
#p1=plt.plot(dataframe_averaged.index,dataframe_averaged['throughput','amin'], myColors[0],linewidth=1)
#p1=plt.plot(lstSizesErr,lstStdDeviation, myColors[0],linewidth=1)
#errs=plt.errorbar(dataframe_averaged.index, dataframe_averaged['throughput','mean'], yerr=dataframe_averaged['throughput','std'], fmt='o-')
# lstSizes=dataframe[dataframe.columns[0]].reshape(-1,reshape_factor).mean(axis=1)
# lstPerf=dataframe[dataframe.columns[1]].reshape(-1,reshape_factor).mean(axis=1)
# lstErr=dataframe[dataframe.columns[1]].reshape(-1,reshape_factor).std(axis=1)
legends.append(name_file[len(name_dir):])
myColors=myColors[1:]
plt.ylabel('Unique words, (millions)', weight="bold")
plt.xlabel('Total words, (millions)', weight="bold")
#plt.ylim([0,max_y+1])
#plt.xlim([0,max_x+1])
#plt.xticks(np.arange(0, max_x+1, 2.0))
plt.grid(True)
#plt.title(name_file[len(name_dir):])
plt.subplots_adjust(left=0.08, right=0.97, top=0.97, bottom=0.08)
#plt.savefig('temp.png', transparent=True)
plt.legend(legends,loc=8,ncol=2).draggable()
plt.savefig('unique_words.pdf',transparent=True)
plt.show()
quit()
|
{
"content_hash": "93efd61acabb74012468443fa894d504",
"timestamp": "",
"source": "github",
"line_count": 88,
"max_line_length": 142,
"avg_line_length": 36.22727272727273,
"alnum_prop": 0.7023212045169385,
"repo_name": "undertherain/vsmlib",
"id": "a9b74f43cad84b51256acbd1fc8b34b865c8f8ec",
"size": "3211",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vsmlib/viz/plot_graph.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "123"
},
{
"name": "Python",
"bytes": "201669"
}
],
"symlink_target": ""
}
|
"""This file contains various utility classes used by GRR."""
import errno
import functools
import getpass
import io
import os
import pathlib
import pipes
import queue
import random
import re
import shutil
import socket
import struct
import tarfile
import tempfile
import threading
import time
from typing import Generic, Iterable, Optional, Text, TypeVar
import weakref
import zipfile
import zlib
import psutil
from grr_response_core.lib.util import compatibility
from grr_response_core.lib.util import precondition
class Error(Exception):
pass
class ParsingError(Error):
pass
def Proxy(f):
"""A helper to create a proxy method in a class."""
def Wrapped(self, *args):
return getattr(self, f)(*args)
return Wrapped
class TempDirectory(object):
"""A self cleaning temporary directory.
Do not use this function for any client related temporary files! Use
the functionality provided by client_actions/tempfiles.py instead.
"""
def __enter__(self):
self.name = tempfile.mkdtemp()
return self.name
def __exit__(self, exc_type, exc_value, traceback):
shutil.rmtree(self.name, True)
# This is a synchronize decorator.
def Synchronized(f):
"""Synchronization decorator."""
@functools.wraps(f)
def NewFunction(self, *args, **kw):
with self.lock:
return f(self, *args, **kw)
return NewFunction
class InterruptableThread(threading.Thread):
"""A class which exits once the main thread exits."""
def __init__(self,
target=None,
args=None,
kwargs=None,
sleep_time=10,
name: Optional[Text] = None,
**kw):
self.exit = False
self.last_run = 0
self.target = target
self.args = args or ()
self.kwargs = kwargs or {}
self.sleep_time = sleep_time
if name is None:
raise ValueError("Please name your threads.")
# TODO(hanuszczak): Incorrect type specification for the `name` param.
# pytype: disable=wrong-arg-count
super().__init__(name=name, **kw)
# pytype: enable=wrong-arg-count
# Do not hold up program exit
self.daemon = True
def Iterate(self):
"""This will be repeatedly called between sleeps."""
def Stop(self):
self.exit = True
def run(self):
# When the main thread exits, the time module might disappear and be already
# None. We take a local reference to the functions we need.
sleep = time.sleep
now = time.time
while not self.exit:
if self.target:
self.target(*self.args, **self.kwargs)
else:
self.Iterate()
# Implement interruptible sleep here.
self.last_run = now()
# Exit if the main thread disappears.
while (time and not self.exit and
now() < self.last_run + self.sleep_time):
sleep(1)
class Node(object):
"""An entry to a linked list."""
next = None
prev = None
data = None
def __init__(self, key, data):
self.data = data
self.key = key
def __str__(self):
return "Node %s: %s" % (self.key, self.data)
def __repr__(self):
return str(self)
# TODO(user):pytype: self.next and self.prev are assigned to self but then
# are used in AppendNode in a very different way. Should be redesigned.
# pytype: disable=attribute-error
class LinkedList(object):
"""A simple doubly linked list used for fast caches."""
def __init__(self):
# We are the head node.
self.next = self.prev = self
self.size = 0
def AppendNode(self, node):
self.size += 1
last_node = self.prev
last_node.next = node
node.prev = last_node
node.next = self
self.prev = node
def PopLeft(self):
"""Returns the head node and removes it from the list."""
if self.next is self:
raise IndexError("Pop from empty list.")
first_node = self.next
self.Unlink(first_node)
return first_node
def Pop(self):
"""Returns the tail node and removes it from the list."""
if self.prev is self:
raise IndexError("Pop from empty list.")
last_node = self.prev
self.Unlink(last_node)
return last_node
def Unlink(self, node):
"""Removes a given node from the list."""
self.size -= 1
node.prev.next = node.next
node.next.prev = node.prev
node.next = node.prev = None
def __iter__(self):
p = self.next
while p is not self:
yield p
p = p.next
def __len__(self):
return self.size
def __str__(self):
return "[" + ", ".join(map(str, self)) + "]"
def Print(self):
p = self.next
while p is not self:
print("%s: prev %r next %r\n" % (p.data, p.prev, p.next))
p = p.next
# pytype: enable=attribute-error
class FastStore(object):
"""This is a cache which expires objects in oldest first manner.
This implementation first appeared in PyFlag.
"""
def __init__(self, max_size=10):
"""Constructor.
Args:
max_size: The maximum number of objects held in cache.
"""
# This class implements a LRU cache which needs fast updates of the LRU
# order for random elements. This is usually implemented by using a
# dict for fast lookups and a linked list for quick deletions / insertions.
self._age = LinkedList()
self._hash = {}
self._limit = max_size
self.lock = threading.RLock()
def KillObject(self, obj):
"""Perform cleanup on objects when they expire.
Should be overridden by classes which need to perform special cleanup.
Args:
obj: The object which was stored in the cache and is now expired.
"""
@Synchronized
def __iter__(self):
return iter([(key, n.data) for key, n in self._hash.items()])
@Synchronized
def Expire(self):
"""Expires old cache entries."""
while len(self._age) > self._limit:
node = self._age.PopLeft()
self._hash.pop(node.key, None)
self.KillObject(node.data)
@Synchronized
def Put(self, key, obj):
"""Add the object to the cache."""
# Remove the old entry if it is there.
node = self._hash.pop(key, None)
if node:
self._age.Unlink(node)
# Make a new node and insert it.
node = Node(key=key, data=obj)
self._hash[key] = node
self._age.AppendNode(node)
self.Expire()
return key
@Synchronized
def ExpireObject(self, key):
"""Expire a specific object from cache."""
node = self._hash.pop(key, None)
if node:
self._age.Unlink(node)
self.KillObject(node.data)
return node.data
@Synchronized
def ExpireRegEx(self, regex):
"""Expire all the objects with the key matching the regex."""
reg = re.compile(regex)
for key in list(self._hash):
if reg.match(key):
self.ExpireObject(key)
@Synchronized
def ExpirePrefix(self, prefix):
"""Expire all the objects with the key having a given prefix."""
for key in list(self._hash):
if key.startswith(prefix):
self.ExpireObject(key)
@Synchronized
def Pop(self, key):
"""Remove the object from the cache completely."""
node = self._hash.get(key)
if node:
del self._hash[key]
self._age.Unlink(node)
return node.data
@Synchronized
def Get(self, key):
"""Fetch the object from cache.
Objects may be flushed from cache at any time. Callers must always
handle the possibility of KeyError raised here.
Args:
key: The key used to access the object.
Returns:
Cached object.
Raises:
KeyError: If the object is not present in the cache.
"""
if key not in self._hash:
raise KeyError(key)
node = self._hash[key]
self._age.Unlink(node)
self._age.AppendNode(node)
return node.data
@Synchronized
def __contains__(self, obj):
return obj in self._hash
@Synchronized
def __getitem__(self, key):
return self.Get(key)
@Synchronized
def Flush(self):
"""Flush all items from cache."""
while self._age:
node = self._age.PopLeft()
self.KillObject(node.data)
self._hash = dict()
def __len__(self):
return len(self._hash)
_T = TypeVar("_T")
class TimeBasedCacheEntry(Generic[_T]):
def __init__(self, timestamp: float, value: _T):
self.timestamp = timestamp
self.value: _T = value
class TimeBasedCache(FastStore):
"""A Cache which expires based on time."""
active_caches = None
house_keeper_thread = None
def __init__(self, max_size=10, max_age=600):
"""Constructor.
This cache will refresh the age of the cached object as long as they are
accessed within the allowed age. The age refers to the time since it was
last touched.
Args:
max_size: The maximum number of objects held in cache.
max_age: The maximum length of time an object is considered alive.
"""
super().__init__(max_size)
self.max_age = max_age
def HouseKeeper():
"""A housekeeper thread which expunges old objects."""
if not time:
# This might happen when the main thread exits, we don't want to raise.
return
now = time.time()
for cache in TimeBasedCache.active_caches:
# Only expunge while holding the lock on the data store.
with cache.lock:
# pylint: disable=protected-access
# We need to take a copy of the value list because we are changing
# this dict during the iteration.
for node in list(cache._hash.values()):
# Expire the object if it is too old.
if node.data.timestamp + cache.max_age < now:
cache.KillObject(node.data)
cache._age.Unlink(node)
cache._hash.pop(node.key, None)
# pylint: enable=protected-access
if not TimeBasedCache.house_keeper_thread:
TimeBasedCache.active_caches = weakref.WeakSet()
# This thread is designed to never finish.
TimeBasedCache.house_keeper_thread = InterruptableThread(
name="HouseKeeperThread", target=HouseKeeper)
TimeBasedCache.house_keeper_thread.start()
TimeBasedCache.active_caches.add(self)
@Synchronized
def Get(self, key):
now = time.time()
stored = super().Get(key)
if stored.timestamp + self.max_age < now:
raise KeyError("Expired")
# This updates the timestamp in place to keep the object alive
stored.timestamp = now
return stored.value
def Put(self, key, obj):
super().Put(key, TimeBasedCacheEntry(time.time(), obj))
class AgeBasedCache(TimeBasedCache):
"""A cache which holds objects for a maximum length of time.
This differs from the TimeBasedCache which keeps the objects alive as long as
they are accessed.
"""
@Synchronized
def Get(self, key):
now = time.time()
stored = FastStore.Get(self, key)
if stored.timestamp + self.max_age < now:
raise KeyError("Expired")
return stored.value
class Struct(object):
"""A baseclass for parsing binary Structs."""
# Derived classes must initialize this into an array of (format,
# name) tuples.
_fields = None
def __init__(self, data):
"""Parses ourselves from data."""
format_str = "".join([x[0] for x in self._fields])
self.size = struct.calcsize(format_str)
try:
parsed_data = struct.unpack(format_str, data[:self.size])
except struct.error:
raise ParsingError("Unable to parse")
for i in range(len(self._fields)):
setattr(self, self._fields[i][1], parsed_data[i])
def __repr__(self):
"""Produce useful text representation of the Struct."""
dat = []
for _, name in self._fields:
dat.append("%s=%s" % (name, getattr(self, name)))
return "%s(%s)" % (self.__class__.__name__, ", ".join(dat))
@classmethod
def GetSize(cls):
"""Calculate the size of the struct."""
format_str = "".join([x[0] for x in cls._fields])
return struct.calcsize(format_str)
def SmartUnicode(string):
"""Returns a unicode object.
This function will always return a unicode object. It should be used to
guarantee that something is always a unicode object.
Args:
string: The string to convert.
Returns:
a unicode object.
"""
if isinstance(string, Text):
return string
if isinstance(string, bytes):
return string.decode("utf-8", "ignore")
return str(string)
def Xor(bytestr, key):
"""Returns a `bytes` object where each byte has been xored with key."""
precondition.AssertType(bytestr, bytes)
return bytes([byte ^ key for byte in bytestr])
def FormatAsHexString(num, width=None, prefix="0x"):
"""Takes an int and returns the number formatted as a hex string."""
# Strip "0x".
hex_str = hex(num)[2:]
# Strip "L" for long values.
hex_str = hex_str.replace("L", "")
if width:
hex_str = hex_str.rjust(width, "0")
return "%s%s" % (prefix, hex_str)
def FormatAsTimestamp(timestamp: int) -> Text:
if not timestamp:
return "-"
return compatibility.FormatTime("%Y-%m-%d %H:%M:%S", time.gmtime(timestamp))
def NormalizePath(path: Text, sep: Text = "/") -> Text:
"""A sane implementation of os.path.normpath.
The standard implementation treats leading / and // as different leading to
incorrect normal forms.
NOTE: Its ok to use a relative path here (without leading /) but any /../ will
still be removed anchoring the path at the top level (e.g. foo/../../../../bar
=> bar).
Args:
path: The path to normalize.
sep: Separator used.
Returns:
A normalized path. In this context normalized means that all input paths
that would result in the system opening the same physical file will produce
the same normalized path.
"""
precondition.AssertType(path, Text)
precondition.AssertType(sep, Text)
if not path:
return sep
path_list = path.split(sep)
# This is a relative path and the first element is . or ..
if path_list[0] in [".", "..", ""]:
path_list.pop(0)
# Deliberately begin at index 1 to preserve a single leading /
i = 0
while True:
list_len = len(path_list)
# We begin at the last known good position so we never iterate over path
# elements which are already examined
for i in range(i, len(path_list)):
# Remove /./ form
if path_list[i] == "." or not path_list[i]:
path_list.pop(i)
break
# Remove /../ form
elif path_list[i] == "..":
path_list.pop(i)
# Anchor at the top level
if (i == 1 and path_list[0]) or i > 1:
i -= 1
path_list.pop(i)
break
# If we didn't alter the path so far we can quit
if len(path_list) == list_len:
return sep + sep.join(path_list)
# TODO(hanuszczak): The linter complains for a reason here, the signature of
# this function should be fixed as soon as possible.
def JoinPath(stem: Text = "", *parts: Text) -> Text: # pylint: disable=keyword-arg-before-vararg
"""A sane version of os.path.join.
The intention here is to append the stem to the path. The standard module
removes the path if the stem begins with a /.
Args:
stem: The stem to join to.
*parts: parts of the path to join. The first arg is always the root and
directory traversal is not allowed.
Returns:
a normalized path.
"""
precondition.AssertIterableType(parts, Text)
precondition.AssertType(stem, Text)
result = (stem + NormalizePath("/".join(parts))).replace("//", "/")
result = result.rstrip("/")
return result or "/"
def ShellQuote(value):
"""Escapes the string for the safe use inside shell command line."""
# TODO(user): replace pipes.quote with shlex.quote when time comes.
return pipes.quote(SmartUnicode(value))
def Join(*parts):
"""Join (AFF4) paths without normalizing.
A quick join method that can be used to express the precondition that
the parts are already normalized.
Args:
*parts: The parts to join
Returns:
The joined path.
"""
return "/".join(parts)
def GeneratePassphrase(length=20):
"""Create a 20 char passphrase with easily typeable chars."""
valid_chars = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
valid_chars += "0123456789 ,-_&$#"
return "".join(random.choice(valid_chars) for i in range(length))
def PassphraseCallback(verify=False,
prompt1="Enter passphrase:",
prompt2="Verify passphrase:"):
"""A utility function to read a passphrase from stdin."""
while 1:
try:
p1 = getpass.getpass(prompt1)
if verify:
p2 = getpass.getpass(prompt2)
if p1 == p2:
break
else:
break
except KeyboardInterrupt:
return None
return p1.encode("utf-8")
def FormatNumberAsString(num):
"""Return a large number in human readable form."""
for suffix in ["b", "KB", "MB", "GB"]:
if num < 1024.0:
return "%3.2f%s" % (num, suffix)
num /= 1024.0
return "%3.1f%s" % (num, "TB")
class NotAValue(object):
pass
class HeartbeatQueue(queue.Queue):
"""A queue that periodically calls a provided callback while waiting."""
def __init__(self, callback=None, fast_poll_time=60, *args, **kw):
queue.Queue.__init__(self, *args, **kw)
self.callback = callback or (lambda: None)
self.last_item_time = time.time()
self.fast_poll_time = fast_poll_time
def get(self, poll_interval=5):
while True:
try:
# Using Queue.get() with a timeout is really expensive - Python uses
# busy waiting that wakes up the process every 50ms - so we switch
# to a more efficient polling method if there is no activity for
# <fast_poll_time> seconds.
if time.time() - self.last_item_time < self.fast_poll_time:
message = queue.Queue.get(self, block=True, timeout=poll_interval)
else:
time.sleep(poll_interval)
message = queue.Queue.get(self, block=False)
break
except queue.Empty:
self.callback()
self.last_item_time = time.time()
return message
class RollingMemoryStream(object):
"""Append-only memory stream that allows writing data in chunks."""
def __init__(self):
self._stream = io.BytesIO()
self._offset = 0
def write(self, b): # pylint: disable=invalid-name
if not self._stream:
raise ArchiveAlreadyClosedError("Attempting to write to a closed stream.")
self._stream.write(b)
self._offset += len(b)
def flush(self): # pylint: disable=invalid-name
pass
def tell(self): # pylint: disable=invalid-name
return self._offset
def close(self): # pylint: disable=invalid-name
self._stream = None
def GetValueAndReset(self):
"""Gets stream buffer since the last GetValueAndReset() call."""
if not self._stream:
raise ArchiveAlreadyClosedError(
"Attempting to get a value from a closed stream.")
value = self._stream.getvalue()
self._stream.seek(0)
self._stream.truncate()
return value
class ArchiveAlreadyClosedError(Error):
pass
# TODO: These `StreamingZipGenerator` classes exist because in PY2,
# there was no support for streaming. Once we support for PY2 is dropped, we can
# switch back to the native implementation that does not rely on dirty hacks.
# TODO(user):pytype: we use a lot of zipfile internals that type checker is
# not aware of.
# pytype: disable=attribute-error,wrong-arg-types
class StreamingZipGeneratorPy2(object):
"""A streaming zip generator that can archive file-like objects."""
FILE_CHUNK_SIZE = 1024 * 1024 * 4
def __init__(self, compression=zipfile.ZIP_STORED):
self._stream = RollingMemoryStream()
self._zip_fd = zipfile.ZipFile(
self._stream, mode="w", compression=compression, allowZip64=True)
self._compression = compression
self._ResetState()
def _ResetState(self):
self.cur_zinfo = None
self.cur_file_size = 0
self.cur_compress_size = 0
self.cur_cmpr = None
self.cur_crc = 0
def __enter__(self):
return self
def __exit__(self, unused_type, unused_value, unused_traceback):
self.Close()
def _GenerateZipInfo(self, arcname=None, compress_type=None, st=None):
"""Generate ZipInfo instance for the given name, compression and stat.
Args:
arcname: The name in the archive this should take.
compress_type: Compression type (zipfile.ZIP_DEFLATED, or ZIP_STORED)
st: An optional stat object to be used for setting headers.
Returns:
ZipInfo instance.
Raises:
ValueError: If arcname is not provided.
"""
# Fake stat response.
if st is None:
# TODO(user):pytype: stat_result typing is not correct.
# pytype: disable=wrong-arg-count
st = os.stat_result((0o100644, 0, 0, 0, 0, 0, 0, 0, 0, 0))
# pytype: enable=wrong-arg-count
mtime = time.localtime(st.st_mtime or time.time())
date_time = mtime[0:6]
# Create ZipInfo instance to store file information
if arcname is None:
raise ValueError("An arcname must be provided.")
zinfo = zipfile.ZipInfo(arcname, date_time)
zinfo.external_attr = (st[0] & 0xFFFF) << 16 # Unix attributes
if compress_type is None:
zinfo.compress_type = self._compression
else:
zinfo.compress_type = compress_type
zinfo.file_size = 0
zinfo.compress_size = 0
zinfo.flag_bits = 0x08 # Setting data descriptor flag.
zinfo.CRC = 0x08074b50 # Predefined CRC for archives using data
# descriptors.
# This fills an empty Info-ZIP Unix extra field.
zinfo.extra = struct.pack(
"<HHIIHH",
0x5855,
12,
0, # time of last access (UTC/GMT)
0, # time of last modification (UTC/GMT)
0, # user ID
0) # group ID
return zinfo
def WriteFileHeader(self, arcname=None, compress_type=None, st=None):
"""Writes a file header."""
if not self._stream:
raise ArchiveAlreadyClosedError(
"Attempting to write to a ZIP archive that was already closed.")
self.cur_zinfo = self._GenerateZipInfo(
arcname=arcname, compress_type=compress_type, st=st)
self.cur_file_size = 0
self.cur_compress_size = 0
if self.cur_zinfo.compress_type == zipfile.ZIP_DEFLATED:
self.cur_cmpr = zlib.compressobj(zlib.Z_DEFAULT_COMPRESSION,
zlib.DEFLATED, -15)
else:
self.cur_cmpr = None
self.cur_crc = 0
if not self._stream:
raise ArchiveAlreadyClosedError(
"Attempting to write to a ZIP archive that was already closed.")
self.cur_zinfo.header_offset = self._stream.tell()
# Call _writeCheck(self.cur_zinfo) to do sanity checking on zinfo structure
# that we've constructed.
self._zip_fd._writecheck(self.cur_zinfo) # pylint: disable=protected-access
# Mark ZipFile as dirty. We have to keep self._zip_fd's internal state
# coherent so that it behaves correctly when close() is called.
self._zip_fd._didModify = True # pylint: disable=protected-access
# Write FileHeader now. It's incomplete, but CRC and uncompressed/compressed
# sized will be written later in data descriptor.
self._stream.write(self.cur_zinfo.FileHeader())
return self._stream.GetValueAndReset()
def WriteFileChunk(self, chunk):
"""Writes file chunk."""
precondition.AssertType(chunk, bytes)
if not self._stream:
raise ArchiveAlreadyClosedError(
"Attempting to write to a ZIP archive that was already closed.")
self.cur_file_size += len(chunk)
# TODO(user):pytype: crc32 is not visible outside of zipfile.
# pytype: disable=module-attr
self.cur_crc = zipfile.crc32(chunk, self.cur_crc) & 0xffffffff
# pytype: enable=module-attr
if self.cur_cmpr:
chunk = self.cur_cmpr.compress(chunk)
self.cur_compress_size += len(chunk)
self._stream.write(chunk)
return self._stream.GetValueAndReset()
def WriteFileFooter(self):
"""Writes the file footer (finished the file)."""
if not self._stream:
raise ArchiveAlreadyClosedError(
"Attempting to write to a ZIP archive that was already closed.")
if self.cur_cmpr:
buf = self.cur_cmpr.flush()
self.cur_compress_size += len(buf)
self.cur_zinfo.compress_size = self.cur_compress_size
self._stream.write(buf)
else:
self.cur_zinfo.compress_size = self.cur_file_size
self.cur_zinfo.CRC = self.cur_crc
self.cur_zinfo.file_size = self.cur_file_size
# The zip footer has a 8 bytes limit for sizes so if we compress a
# file larger than 4 GB, the code below will not work. The ZIP64
# convention is to write 0xffffffff for compressed and
# uncompressed size in those cases. The actual size is written by
# the library for us anyways so those fields are redundant.
cur_file_size = min(0xffffffff, self.cur_file_size)
cur_compress_size = min(0xffffffff, self.cur_compress_size)
# Writing data descriptor ZIP64-way by default. We never know how large
# the archive may become as we're generating it dynamically.
#
# crc-32 8 bytes (little endian)
# compressed size 8 bytes (little endian)
# uncompressed size 8 bytes (little endian)
self._stream.write(
struct.pack("<LLL", self.cur_crc, cur_compress_size, cur_file_size))
# Register the file in the zip file, so that central directory gets
# written correctly.
self._zip_fd.filelist.append(self.cur_zinfo)
self._zip_fd.NameToInfo[self.cur_zinfo.filename] = self.cur_zinfo
self._ResetState()
return self._stream.GetValueAndReset()
@property
def is_file_write_in_progress(self):
return self.cur_zinfo
def WriteFromFD(self, src_fd, arcname=None, compress_type=None, st=None):
"""Write a zip member from a file like object.
Args:
src_fd: A file like object, must support seek(), tell(), read().
arcname: The name in the archive this should take.
compress_type: Compression type (zipfile.ZIP_DEFLATED, or ZIP_STORED)
st: An optional stat object to be used for setting headers.
Raises:
ArchiveAlreadyClosedError: If the zip if already closed.
Yields:
Chunks of binary data.
"""
yield self.WriteFileHeader(
arcname=arcname, compress_type=compress_type, st=st)
while 1:
buf = src_fd.read(1024 * 1024)
if not buf:
break
yield self.WriteFileChunk(buf)
yield self.WriteFileFooter()
def Close(self):
self._zip_fd.close()
value = self._stream.GetValueAndReset()
self._stream.close()
return value
@property
def output_size(self):
return self._stream.tell()
# pytype: enable=attribute-error,wrong-arg-types
# TODO(hanuszczak): Typings for `ZipFile` are ill-typed.
# pytype: disable=attribute-error,wrong-arg-types
class StreamingZipGeneratorPy3(object):
"""A streaming zip generator that can archive file-like objects."""
def __init__(self, compression=zipfile.ZIP_STORED):
self._compression = compression
self._stream = RollingMemoryStream()
self._zipfile = zipfile.ZipFile(
self._stream, mode="w", compression=compression, allowZip64=True)
self._zipopen = None
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
del exc_type, exc_value, traceback # Unused.
return self.Close()
def __del__(self):
if self._zipopen is not None:
self._zipopen.__exit__(None, None, None)
self._zipopen = None
# TODO(hanuszczak): Because we have to use `ZipFile::open`, there is no way to
# specify per-file compression and write custom stat entry (but it should be
# relevant only for dates). Once we remove this class and switch back to the
# native implementation, it should be possible to fill this information again.
def WriteFileHeader(self, arcname=None, compress_type=None, st=None):
del compress_type, st # Unused.
if not self._stream:
raise ArchiveAlreadyClosedError()
self._zipopen = self._zipfile.open(arcname, mode="w")
self._zipopen.__enter__()
return self._stream.GetValueAndReset()
def WriteFileChunk(self, chunk):
precondition.AssertType(chunk, bytes)
if not self._stream:
raise ArchiveAlreadyClosedError()
self._zipopen.write(chunk)
return self._stream.GetValueAndReset()
def WriteFileFooter(self):
if not self._stream:
raise ArchiveAlreadyClosedError()
self._zipopen.__exit__(None, None, None)
self._zipopen = None
return self._stream.GetValueAndReset()
def Close(self):
if self._zipopen is not None:
self._zipopen.__exit__(None, None, None)
self._zipopen = None
self._zipfile.close()
value = self._stream.GetValueAndReset()
self._stream.close()
return value
def WriteFromFD(self, src_fd, arcname=None, compress_type=None, st=None):
"""A convenience method for adding an entire file to the ZIP archive."""
yield self.WriteFileHeader(
arcname=arcname, compress_type=compress_type, st=st)
while True:
buf = src_fd.read(1024 * 1024)
if not buf:
break
yield self.WriteFileChunk(buf)
yield self.WriteFileFooter()
@property
def is_file_write_in_progress(self) -> bool:
return bool(self._zipopen)
@property
def output_size(self):
return self._stream.tell()
# pytype: enable=attribute-error,wrong-arg-types
if compatibility.PY2:
StreamingZipGenerator = StreamingZipGeneratorPy2
else:
StreamingZipGenerator = StreamingZipGeneratorPy3
class StreamingTarGenerator(object):
"""A streaming tar generator that can archive file-like objects."""
FILE_CHUNK_SIZE = 1024 * 1024 * 4
def __init__(self):
super().__init__()
self._stream = RollingMemoryStream()
# TODO(user):pytype: self._stream should be a valid IO object.
# pytype: disable=wrong-arg-types
self._tar_fd = tarfile.open(
mode="w:gz", fileobj=self._stream, encoding="utf-8")
# pytype: enable=wrong-arg-types
self._ResetState()
def _ResetState(self):
self.cur_file_size = 0
self.cur_info = None
def __enter__(self):
return self
def __exit__(self, unused_type, unused_value, unused_traceback):
self.Close()
def Close(self):
self._tar_fd.close()
value = self._stream.GetValueAndReset()
self._stream.close()
return value
def WriteFileHeader(self, arcname=None, st=None):
"""Writes file header."""
precondition.AssertType(arcname, Text)
if st is None:
raise ValueError("Stat object can't be None.")
# TODO: In Python 2, name of the file has to be a bytestring.
# Once support for Python 2 is dropped, this line can be removed.
if compatibility.PY2:
arcname = arcname.encode("utf-8")
self.cur_file_size = 0
self.cur_info = self._tar_fd.tarinfo()
self.cur_info.tarfile = self._tar_fd
self.cur_info.type = tarfile.REGTYPE
self.cur_info.name = arcname
self.cur_info.size = st.st_size
self.cur_info.mode = st.st_mode
self.cur_info.mtime = st.st_mtime or time.time()
self._tar_fd.addfile(self.cur_info)
return self._stream.GetValueAndReset()
def WriteFileChunk(self, chunk):
"""Writes file chunk."""
self._tar_fd.fileobj.write(chunk)
self.cur_file_size += len(chunk)
return self._stream.GetValueAndReset()
def WriteFileFooter(self):
"""Writes file footer (finishes the file)."""
if self.cur_file_size != self.cur_info.size:
raise IOError("Incorrect file size: st_size=%d, but written %d bytes." %
(self.cur_info.size, self.cur_file_size))
# TODO(user):pytype: BLOCKSIZE/NUL constants are not visible to type
# checker.
# pytype: disable=module-attr
blocks, remainder = divmod(self.cur_file_size, tarfile.BLOCKSIZE)
if remainder > 0:
self._tar_fd.fileobj.write(tarfile.NUL * (tarfile.BLOCKSIZE - remainder))
blocks += 1
self._tar_fd.offset += blocks * tarfile.BLOCKSIZE
# pytype: enable=module-attr
self._ResetState()
return self._stream.GetValueAndReset()
@property
def is_file_write_in_progress(self):
return self.cur_info
def WriteFromFD(self, src_fd, arcname=None, st=None):
"""Write an archive member from a file like object.
Args:
src_fd: A file like object, must support seek(), tell(), read().
arcname: The name in the archive this should take.
st: A stat object to be used for setting headers.
Raises:
ValueError: If st is omitted.
ArchiveAlreadyClosedError: If the archive was already closed.
IOError: if file size reported in st is different from the one that
was actually read from the src_fd.
Yields:
Chunks of binary data.
"""
yield self.WriteFileHeader(arcname=arcname, st=st)
while 1:
buf = src_fd.read(1024 * 1024)
if not buf:
break
yield self.WriteFileChunk(buf)
yield self.WriteFileFooter()
@property
def output_size(self):
return self._stream.tell()
class Stubber(object):
"""A context manager for doing simple stubs."""
def __init__(self, module, target_name, stub):
self.target_name = target_name
self.module = module
self.stub = stub
def __enter__(self):
self.Start()
def Stop(self):
setattr(self.module, self.target_name, self.old_target)
def Start(self):
self.old_target = getattr(self.module, self.target_name, None)
try:
self.stub.old_target = self.old_target
except AttributeError:
pass
setattr(self.module, self.target_name, self.stub)
def __exit__(self, unused_type, unused_value, unused_traceback):
self.Stop()
class MultiStubber(object):
"""A context manager for doing simple stubs."""
def __init__(self, *args):
self.stubbers = [Stubber(*x) for x in args]
def Start(self):
for x in self.stubbers:
x.Start()
def Stop(self):
for x in self.stubbers:
x.Stop()
def __enter__(self):
self.Start()
def __exit__(self, t, value, traceback):
self.Stop()
def EnsureDirExists(path):
"""Equivalent of makedir -p."""
try:
os.makedirs(path)
except OSError as exc:
# Necessary so we don't hide other errors such as permission denied.
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def MergeDirectories(src: str, dst: str) -> None:
"""Merges the src directory tree into the dst directory tree."""
src_dir = pathlib.Path(src)
dst_dir = pathlib.Path(dst)
for path in src_dir.glob("**/*"):
if path.is_dir():
continue
relative_path = path.relative_to(src_dir)
dst_path = dst_dir / relative_path
EnsureDirExists(str(dst_path.parent))
shutil.copy(str(path), str(dst_path))
def ResolveHostnameToIP(host, port):
"""Resolves a hostname to an IP address."""
ip_addrs = socket.getaddrinfo(host, port, socket.AF_UNSPEC, 0,
socket.IPPROTO_TCP)
# getaddrinfo returns tuples (family, socktype, proto, canonname, sockaddr).
# We are interested in sockaddr which is in turn a tuple
# (address, port) for IPv4 or (address, port, flow info, scope id)
# for IPv6. In both cases, we want the first element, the address.
result = ip_addrs[0][4][0]
# TODO: In Python 2, this value is a byte string instead of UTF-8
# string. To ensure type correctness until support for Python 2 is dropped,
# we always decode this value.
if compatibility.PY2:
result = result.decode("ascii") # pytype: disable=attribute-error
return result
# TODO: This module is way too big right now. It should be split
# into several smaller ones (such as `util.paths`, `util.collections` etc.).
def ProcessIdString():
return "%s@%s:%d" % (psutil.Process().name(), socket.gethostname(),
os.getpid())
def RegexListDisjunction(regex_list: Iterable[bytes]):
precondition.AssertIterableType(regex_list, bytes)
return b"(" + b")|(".join(regex_list) + b")"
def ReadFileBytesAsUnicode(file_obj):
data = file_obj.read()
precondition.AssertType(data, bytes)
return data.decode("utf-8")
def RunOnce(fn):
"""Returns a decorated function that will only pass through the first call.
At first execution, the return value or raised Exception is passed through and
cached. Further calls will not be passed to `fn` and will return or raise
the result of the first call.
Be cautious when returning an Iterator, Generator or mutable value, since the
result is shared by reference among all calls.
Args:
fn: The function to be decorated.
Returns:
A decorated function that will pass through only the first call.
"""
@functools.wraps(fn)
def _OneTimeFunction(*args, **kwargs):
"""Wrapper function that only passes through the first call."""
if not _OneTimeFunction.executed:
_OneTimeFunction.executed = True
try:
_OneTimeFunction.result = fn(*args, **kwargs)
except BaseException as e: # pylint: disable=broad-except
_OneTimeFunction.exception = e
raise # Preserve original stack trace during first invocation.
if _OneTimeFunction.exception is None:
return _OneTimeFunction.result
else:
raise _OneTimeFunction.exception
_OneTimeFunction.executed = False
_OneTimeFunction.exception = None
_OneTimeFunction.result = None
return _OneTimeFunction
|
{
"content_hash": "7c5874b12e034dc4277c6989b41f349e",
"timestamp": "",
"source": "github",
"line_count": 1349,
"max_line_length": 97,
"avg_line_length": 27.7175685693106,
"alnum_prop": 0.6564146452354844,
"repo_name": "google/grr",
"id": "b9e33671b8a65e552bc1272972c712ae2ce47543",
"size": "37413",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "grr/core/grr_response_core/lib/utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "12697"
},
{
"name": "C++",
"bytes": "54814"
},
{
"name": "Dockerfile",
"bytes": "1822"
},
{
"name": "HCL",
"bytes": "8451"
},
{
"name": "HTML",
"bytes": "366783"
},
{
"name": "JavaScript",
"bytes": "13088"
},
{
"name": "Jupyter Notebook",
"bytes": "199216"
},
{
"name": "Makefile",
"bytes": "3244"
},
{
"name": "PowerShell",
"bytes": "531"
},
{
"name": "Python",
"bytes": "8844725"
},
{
"name": "Roff",
"bytes": "444"
},
{
"name": "SCSS",
"bytes": "105120"
},
{
"name": "Shell",
"bytes": "48663"
},
{
"name": "Standard ML",
"bytes": "8172"
},
{
"name": "TypeScript",
"bytes": "2139377"
}
],
"symlink_target": ""
}
|
import objgraph
import arcpy
class MemUsage(object):
def __init__(self, f):
self.f = f
def __call__(self):
print("---objects at start---")
objgraph.show_growth(limit=3)
self.f()
print("---objects at end---")
objgraph.show_growth()
@MemUsage
def search():
rows = arcpy.SearchCursor("C:/temp/address_point.shp", "", "", "STREET_NAM", "STREET_NAM A")
current_street = ""
i = 1
for row in rows:
if current_street != row.STREET_NAM:
current_street = row.STREET_NAM
print(current_street)
del rows
search()
|
{
"content_hash": "9c6013f8ce125645b6c6eaebd17c32ab",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 96,
"avg_line_length": 20.625,
"alnum_prop": 0.5196969696969697,
"repo_name": "phillipsj/py-profilig-presentation-2014",
"id": "64fa16d20cffe33a54261133502d1812f42defe5",
"size": "660",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "example-objgraph-decorator.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "129935"
},
{
"name": "JavaScript",
"bytes": "213881"
},
{
"name": "Python",
"bytes": "2995"
}
],
"symlink_target": ""
}
|
"""
Schedule Primitives in TVM
==========================
**Author**: `Ziheng Jiang <https://github.com/ZihengJiang>`_
TVM is a domain specific language for efficient kernel construction.
In this tutorial, we will show you how to schedule the computation by
various primitives provided by TVM.
"""
from __future__ import absolute_import, print_function
import tvm
from tvm import te
import numpy as np
######################################################################
#
# There often exist several methods to compute the same result,
# however, different methods will result in different locality and
# performance. So TVM asks user to provide how to execute the
# computation called **Schedule**.
#
# A **Schedule** is a set of transformation of computation that
# transforms the loop of computations in the program.
#
# declare some variables for use later
n = te.var("n")
m = te.var("m")
######################################################################
# A schedule can be created from a list of ops, by default the
# schedule computes tensor in a serial manner in a row-major order.
# declare a matrix element-wise multiply
A = te.placeholder((m, n), name="A")
B = te.placeholder((m, n), name="B")
C = te.compute((m, n), lambda i, j: A[i, j] * B[i, j], name="C")
s = te.create_schedule([C.op])
# lower will transform the computation from definition to the real
# callable function. With argument `simple_mode=True`, it will
# return you a readable C like statement, we use it here to print the
# schedule result.
print(tvm.lower(s, [A, B, C], simple_mode=True))
######################################################################
# One schedule is composed by multiple stages, and one
# **Stage** represents schedule for one operation. We provide various
# methods to schedule every stage.
######################################################################
# split
# -----
# :code:`split` can split a specified axis into two axises by
# :code:`factor`.
A = te.placeholder((m,), name="A")
B = te.compute((m,), lambda i: A[i] * 2, name="B")
s = te.create_schedule(B.op)
xo, xi = s[B].split(B.op.axis[0], factor=32)
print(tvm.lower(s, [A, B], simple_mode=True))
######################################################################
# You can also split a axis by :code:`nparts`, which splits the axis
# contrary with :code:`factor`.
A = te.placeholder((m,), name="A")
B = te.compute((m,), lambda i: A[i], name="B")
s = te.create_schedule(B.op)
bx, tx = s[B].split(B.op.axis[0], nparts=32)
print(tvm.lower(s, [A, B], simple_mode=True))
######################################################################
# tile
# ----
# :code:`tile` help you execute the computation tile by tile over two
# axises.
A = te.placeholder((m, n), name="A")
B = te.compute((m, n), lambda i, j: A[i, j], name="B")
s = te.create_schedule(B.op)
xo, yo, xi, yi = s[B].tile(B.op.axis[0], B.op.axis[1], x_factor=10, y_factor=5)
print(tvm.lower(s, [A, B], simple_mode=True))
######################################################################
# fuse
# ----
# :code:`fuse` can fuse two consecutive axises of one computation.
A = te.placeholder((m, n), name="A")
B = te.compute((m, n), lambda i, j: A[i, j], name="B")
s = te.create_schedule(B.op)
# tile to four axises first: (i.outer, j.outer, i.inner, j.inner)
xo, yo, xi, yi = s[B].tile(B.op.axis[0], B.op.axis[1], x_factor=10, y_factor=5)
# then fuse (i.inner, j.inner) into one axis: (i.inner.j.inner.fused)
fused = s[B].fuse(xi, yi)
print(tvm.lower(s, [A, B], simple_mode=True))
######################################################################
# reorder
# -------
# :code:`reorder` can reorder the axises in the specified order.
A = te.placeholder((m, n), name="A")
B = te.compute((m, n), lambda i, j: A[i, j], name="B")
s = te.create_schedule(B.op)
# tile to four axises first: (i.outer, j.outer, i.inner, j.inner)
xo, yo, xi, yi = s[B].tile(B.op.axis[0], B.op.axis[1], x_factor=10, y_factor=5)
# then reorder the axises: (i.inner, j.outer, i.outer, j.inner)
s[B].reorder(xi, yo, xo, yi)
print(tvm.lower(s, [A, B], simple_mode=True))
######################################################################
# bind
# ----
# :code:`bind` can bind a specified axis with a thread axis, often used
# in gpu programming.
A = te.placeholder((n,), name="A")
B = te.compute(A.shape, lambda i: A[i] * 2, name="B")
s = te.create_schedule(B.op)
bx, tx = s[B].split(B.op.axis[0], factor=64)
s[B].bind(bx, te.thread_axis("blockIdx.x"))
s[B].bind(tx, te.thread_axis("threadIdx.x"))
print(tvm.lower(s, [A, B], simple_mode=True))
######################################################################
# compute_at
# ----------
# For a schedule that consists of multiple operators, TVM will compute
# tensors at the root separately by default.
A = te.placeholder((m,), name="A")
B = te.compute((m,), lambda i: A[i] + 1, name="B")
C = te.compute((m,), lambda i: B[i] * 2, name="C")
s = te.create_schedule(C.op)
print(tvm.lower(s, [A, B, C], simple_mode=True))
######################################################################
# :code:`compute_at` can move computation of `B` into the first axis
# of computation of `C`.
A = te.placeholder((m,), name="A")
B = te.compute((m,), lambda i: A[i] + 1, name="B")
C = te.compute((m,), lambda i: B[i] * 2, name="C")
s = te.create_schedule(C.op)
s[B].compute_at(s[C], C.op.axis[0])
print(tvm.lower(s, [A, B, C], simple_mode=True))
######################################################################
# compute_inline
# --------------
# :code:`compute_inline` can mark one stage as inline, then the body of
# computation will be expanded and inserted at the address where the
# tensor is required.
A = te.placeholder((m,), name="A")
B = te.compute((m,), lambda i: A[i] + 1, name="B")
C = te.compute((m,), lambda i: B[i] * 2, name="C")
s = te.create_schedule(C.op)
s[B].compute_inline()
print(tvm.lower(s, [A, B, C], simple_mode=True))
######################################################################
# compute_root
# ------------
# :code:`compute_root` can move computation of one stage to the root.
A = te.placeholder((m,), name="A")
B = te.compute((m,), lambda i: A[i] + 1, name="B")
C = te.compute((m,), lambda i: B[i] * 2, name="C")
s = te.create_schedule(C.op)
s[B].compute_at(s[C], C.op.axis[0])
s[B].compute_root()
print(tvm.lower(s, [A, B, C], simple_mode=True))
######################################################################
# Summary
# -------
# This tutorial provides an introduction to schedule primitives in
# tvm, which permits users schedule the computation easily and
# flexibly.
#
# In order to get a good performance kernel implementation, the
# general workflow often is:
#
# - Describe your computation via series of operations.
# - Try to schedule the computation with primitives.
# - Compile and run to see the performance difference.
# - Adjust your schedule according the running result.
|
{
"content_hash": "3d7ea0cd60838a5ab9aeeca861391ea5",
"timestamp": "",
"source": "github",
"line_count": 192,
"max_line_length": 79,
"avg_line_length": 36.130208333333336,
"alnum_prop": 0.5645091538128875,
"repo_name": "sxjscience/tvm",
"id": "eb48dc218cdda18cd6e1bca2dd1ff1d6d5eea23f",
"size": "7722",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tutorials/language/schedule_primitives.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "6056"
},
{
"name": "C",
"bytes": "95567"
},
{
"name": "C++",
"bytes": "5565032"
},
{
"name": "CMake",
"bytes": "67305"
},
{
"name": "Go",
"bytes": "112376"
},
{
"name": "HTML",
"bytes": "8625"
},
{
"name": "Java",
"bytes": "173219"
},
{
"name": "JavaScript",
"bytes": "49801"
},
{
"name": "Makefile",
"bytes": "50818"
},
{
"name": "Objective-C",
"bytes": "15264"
},
{
"name": "Objective-C++",
"bytes": "46673"
},
{
"name": "Python",
"bytes": "6763729"
},
{
"name": "Rust",
"bytes": "182027"
},
{
"name": "Scala",
"bytes": "184105"
},
{
"name": "Shell",
"bytes": "96967"
},
{
"name": "Tcl",
"bytes": "53645"
},
{
"name": "Verilog",
"bytes": "30605"
}
],
"symlink_target": ""
}
|
import os
import sys
import codecs
import platform
try:
from setuptools import setup, find_packages, Command
except ImportError:
from ez_setup import use_setuptools
use_setuptools()
from setuptools import setup, find_packages, Command
import djcelery_email as distmeta
class RunTests(Command):
description = "Run the django test suite from the tests dir."
user_options = []
def run(self):
this_dir = os.getcwd()
testproj_dir = os.path.join(this_dir, "test_project")
sys.path.append(testproj_dir)
from django.core.management import execute_from_command_line
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "settings")
os.chdir(testproj_dir)
execute_from_command_line([__file__, "test"])
os.chdir(this_dir)
def initialize_options(self):
pass
def finalize_options(self):
pass
if os.path.exists("README.rst"):
long_description = codecs.open("README.rst", "r", "utf-8").read()
else:
long_description = "See http://pypi.python.org/pypi/django-celery-email"
setup(
name='django-celery-email',
version=distmeta.__version__,
description=distmeta.__doc__,
author=distmeta.__author__,
author_email=distmeta.__contact__,
url=distmeta.__homepage__,
platforms=["any"],
license="BSD",
packages=find_packages(exclude=['ez_setup', 'test_project', 'test_project.*']),
scripts=[],
zip_safe=False,
install_requires=[
"django-celery>=2.2.0",
],
cmdclass = {"test": RunTests},
classifiers=[
"Development Status :: 5 - Production/Stable",
"Framework :: Django",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 2.5",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
"Operating System :: POSIX",
"Topic :: Communications",
"Topic :: Communications :: Email",
"Topic :: System :: Distributed Computing",
"Topic :: Software Development :: Libraries :: Python Modules",
],
entry_points={},
long_description=long_description,
)
|
{
"content_hash": "34453aae18c69117174196e78b62653f",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 83,
"avg_line_length": 29.435897435897434,
"alnum_prop": 0.6276132404181185,
"repo_name": "andresriancho/django-celery-email",
"id": "ab0e8d0072f29a316c8611c2a6086f0973faabbb",
"size": "2342",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "9570"
}
],
"symlink_target": ""
}
|
from django.db import models
from django.db.models import Q
from django.urls import reverse
from django_extensions.db.models import (ActivatorModel,
TimeStampedModel)
from djgeojson.fields import PointField
from tagulous.models import TagField
from market.apps.core.models import (RandomSlugModel,
UserProfile)
class PostManager(models.Manager):
def search(self, **kwargs):
qs = super().get_queryset()
# TODO:
# Split query into words, case insensitive search each field:
# owner name, title, body, tags, location
if 'query' in kwargs:
query = kwargs['query']
qs = qs.filter(Q(title__icontains=query) | Q(body__icontains=query))
if 'tags' in kwargs:
tags = kwargs['tags']
# Filter to posts with tags in the provided set
qs = qs.filter(tags__name__in=tags)
return qs
# Posts submitted by users on the site.
class Post(RandomSlugModel, TimeStampedModel):
UNIT_CHOICES = (
('pound', 'POUND'),
('gallon', 'GALLON'),
('each', 'EACH'),
)
# todo: custom queryset to get active posts
objects = PostManager()
owner = models.ForeignKey(UserProfile, on_delete=models.CASCADE)
# todo: Remove activatormodel?
title = models.CharField(max_length=300)
body = models.TextField(max_length=5000)
# TODO: Use autocomplete_initial=True and specify preset tags?
tags = TagField(max_count=10, force_lowercase=True, space_delimiter=False, blank=True)
price = models.DecimalField(max_digits=7, decimal_places=2)
unit = models.CharField(max_length=80, choices=UNIT_CHOICES, default='each')
# location = models.CharField(max_length=5)
location = PointField()
def get_absolute_url(self):
return reverse('board:detail', kwargs={'slug': self.slug})
def __str__(self):
return self.title + " - $" + str(self.price)
class PostImage(models.Model):
post = models.ForeignKey(Post, on_delete=models.CASCADE, related_name='images')
image = models.ImageField(blank=True)
|
{
"content_hash": "6bd4d6899b504d9fec41e737bf45a817",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 90,
"avg_line_length": 30.914285714285715,
"alnum_prop": 0.640018484288355,
"repo_name": "justinbot/sdd-farmers",
"id": "9e59d803fef5ce9b6a368e2595fd959e6206ea21",
"size": "2164",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "market/market/apps/board/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2367"
},
{
"name": "HTML",
"bytes": "36641"
},
{
"name": "Python",
"bytes": "47439"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import, unicode_literals
import datetime
from django.conf import settings
from django.forms import *
from django.forms.extras import SelectDateWidget
from django.forms.util import ErrorList
from django.test import TestCase
from django.utils import translation
from django.utils.encoding import force_unicode, smart_unicode
from .error_messages import AssertFormErrorsMixin
class GetDate(Form):
mydate = DateField(widget=SelectDateWidget)
class GetNotRequiredDate(Form):
mydate = DateField(widget=SelectDateWidget, required=False)
class GetDateShowHiddenInitial(Form):
mydate = DateField(widget=SelectDateWidget, show_hidden_initial=True)
class FormsExtraTestCase(TestCase, AssertFormErrorsMixin):
###############
# Extra stuff #
###############
# The forms library comes with some extra, higher-level Field and Widget
def test_selectdate(self):
w = SelectDateWidget(years=('2007','2008','2009','2010','2011','2012','2013','2014','2015','2016'))
self.assertHTMLEqual(w.render('mydate', ''), """<select name="mydate_month" id="id_mydate_month">
<option value="0">---</option>
<option value="1">January</option>
<option value="2">February</option>
<option value="3">March</option>
<option value="4">April</option>
<option value="5">May</option>
<option value="6">June</option>
<option value="7">July</option>
<option value="8">August</option>
<option value="9">September</option>
<option value="10">October</option>
<option value="11">November</option>
<option value="12">December</option>
</select>
<select name="mydate_day" id="id_mydate_day">
<option value="0">---</option>
<option value="1">1</option>
<option value="2">2</option>
<option value="3">3</option>
<option value="4">4</option>
<option value="5">5</option>
<option value="6">6</option>
<option value="7">7</option>
<option value="8">8</option>
<option value="9">9</option>
<option value="10">10</option>
<option value="11">11</option>
<option value="12">12</option>
<option value="13">13</option>
<option value="14">14</option>
<option value="15">15</option>
<option value="16">16</option>
<option value="17">17</option>
<option value="18">18</option>
<option value="19">19</option>
<option value="20">20</option>
<option value="21">21</option>
<option value="22">22</option>
<option value="23">23</option>
<option value="24">24</option>
<option value="25">25</option>
<option value="26">26</option>
<option value="27">27</option>
<option value="28">28</option>
<option value="29">29</option>
<option value="30">30</option>
<option value="31">31</option>
</select>
<select name="mydate_year" id="id_mydate_year">
<option value="0">---</option>
<option value="2007">2007</option>
<option value="2008">2008</option>
<option value="2009">2009</option>
<option value="2010">2010</option>
<option value="2011">2011</option>
<option value="2012">2012</option>
<option value="2013">2013</option>
<option value="2014">2014</option>
<option value="2015">2015</option>
<option value="2016">2016</option>
</select>""")
self.assertHTMLEqual(w.render('mydate', None), w.render('mydate', ''))
self.assertHTMLEqual(w.render('mydate', '2010-04-15'), """<select name="mydate_month" id="id_mydate_month">
<option value="1">January</option>
<option value="2">February</option>
<option value="3">March</option>
<option value="4" selected="selected">April</option>
<option value="5">May</option>
<option value="6">June</option>
<option value="7">July</option>
<option value="8">August</option>
<option value="9">September</option>
<option value="10">October</option>
<option value="11">November</option>
<option value="12">December</option>
</select>
<select name="mydate_day" id="id_mydate_day">
<option value="1">1</option>
<option value="2">2</option>
<option value="3">3</option>
<option value="4">4</option>
<option value="5">5</option>
<option value="6">6</option>
<option value="7">7</option>
<option value="8">8</option>
<option value="9">9</option>
<option value="10">10</option>
<option value="11">11</option>
<option value="12">12</option>
<option value="13">13</option>
<option value="14">14</option>
<option value="15" selected="selected">15</option>
<option value="16">16</option>
<option value="17">17</option>
<option value="18">18</option>
<option value="19">19</option>
<option value="20">20</option>
<option value="21">21</option>
<option value="22">22</option>
<option value="23">23</option>
<option value="24">24</option>
<option value="25">25</option>
<option value="26">26</option>
<option value="27">27</option>
<option value="28">28</option>
<option value="29">29</option>
<option value="30">30</option>
<option value="31">31</option>
</select>
<select name="mydate_year" id="id_mydate_year">
<option value="2007">2007</option>
<option value="2008">2008</option>
<option value="2009">2009</option>
<option value="2010" selected="selected">2010</option>
<option value="2011">2011</option>
<option value="2012">2012</option>
<option value="2013">2013</option>
<option value="2014">2014</option>
<option value="2015">2015</option>
<option value="2016">2016</option>
</select>""")
# Accepts a datetime or a string:
self.assertHTMLEqual(w.render('mydate', datetime.date(2010, 4, 15)), w.render('mydate', '2010-04-15'))
# Invalid dates still render the failed date:
self.assertHTMLEqual(w.render('mydate', '2010-02-31'), """<select name="mydate_month" id="id_mydate_month">
<option value="1">January</option>
<option value="2" selected="selected">February</option>
<option value="3">March</option>
<option value="4">April</option>
<option value="5">May</option>
<option value="6">June</option>
<option value="7">July</option>
<option value="8">August</option>
<option value="9">September</option>
<option value="10">October</option>
<option value="11">November</option>
<option value="12">December</option>
</select>
<select name="mydate_day" id="id_mydate_day">
<option value="1">1</option>
<option value="2">2</option>
<option value="3">3</option>
<option value="4">4</option>
<option value="5">5</option>
<option value="6">6</option>
<option value="7">7</option>
<option value="8">8</option>
<option value="9">9</option>
<option value="10">10</option>
<option value="11">11</option>
<option value="12">12</option>
<option value="13">13</option>
<option value="14">14</option>
<option value="15">15</option>
<option value="16">16</option>
<option value="17">17</option>
<option value="18">18</option>
<option value="19">19</option>
<option value="20">20</option>
<option value="21">21</option>
<option value="22">22</option>
<option value="23">23</option>
<option value="24">24</option>
<option value="25">25</option>
<option value="26">26</option>
<option value="27">27</option>
<option value="28">28</option>
<option value="29">29</option>
<option value="30">30</option>
<option value="31" selected="selected">31</option>
</select>
<select name="mydate_year" id="id_mydate_year">
<option value="2007">2007</option>
<option value="2008">2008</option>
<option value="2009">2009</option>
<option value="2010" selected="selected">2010</option>
<option value="2011">2011</option>
<option value="2012">2012</option>
<option value="2013">2013</option>
<option value="2014">2014</option>
<option value="2015">2015</option>
<option value="2016">2016</option>
</select>""")
# Using a SelectDateWidget in a form:
w = SelectDateWidget(years=('2007','2008','2009','2010','2011','2012','2013','2014','2015','2016'), required=False)
self.assertHTMLEqual(w.render('mydate', ''), """<select name="mydate_month" id="id_mydate_month">
<option value="0">---</option>
<option value="1">January</option>
<option value="2">February</option>
<option value="3">March</option>
<option value="4">April</option>
<option value="5">May</option>
<option value="6">June</option>
<option value="7">July</option>
<option value="8">August</option>
<option value="9">September</option>
<option value="10">October</option>
<option value="11">November</option>
<option value="12">December</option>
</select>
<select name="mydate_day" id="id_mydate_day">
<option value="0">---</option>
<option value="1">1</option>
<option value="2">2</option>
<option value="3">3</option>
<option value="4">4</option>
<option value="5">5</option>
<option value="6">6</option>
<option value="7">7</option>
<option value="8">8</option>
<option value="9">9</option>
<option value="10">10</option>
<option value="11">11</option>
<option value="12">12</option>
<option value="13">13</option>
<option value="14">14</option>
<option value="15">15</option>
<option value="16">16</option>
<option value="17">17</option>
<option value="18">18</option>
<option value="19">19</option>
<option value="20">20</option>
<option value="21">21</option>
<option value="22">22</option>
<option value="23">23</option>
<option value="24">24</option>
<option value="25">25</option>
<option value="26">26</option>
<option value="27">27</option>
<option value="28">28</option>
<option value="29">29</option>
<option value="30">30</option>
<option value="31">31</option>
</select>
<select name="mydate_year" id="id_mydate_year">
<option value="0">---</option>
<option value="2007">2007</option>
<option value="2008">2008</option>
<option value="2009">2009</option>
<option value="2010">2010</option>
<option value="2011">2011</option>
<option value="2012">2012</option>
<option value="2013">2013</option>
<option value="2014">2014</option>
<option value="2015">2015</option>
<option value="2016">2016</option>
</select>""")
self.assertHTMLEqual(w.render('mydate', '2010-04-15'), """<select name="mydate_month" id="id_mydate_month">
<option value="0">---</option>
<option value="1">January</option>
<option value="2">February</option>
<option value="3">March</option>
<option value="4" selected="selected">April</option>
<option value="5">May</option>
<option value="6">June</option>
<option value="7">July</option>
<option value="8">August</option>
<option value="9">September</option>
<option value="10">October</option>
<option value="11">November</option>
<option value="12">December</option>
</select>
<select name="mydate_day" id="id_mydate_day">
<option value="0">---</option>
<option value="1">1</option>
<option value="2">2</option>
<option value="3">3</option>
<option value="4">4</option>
<option value="5">5</option>
<option value="6">6</option>
<option value="7">7</option>
<option value="8">8</option>
<option value="9">9</option>
<option value="10">10</option>
<option value="11">11</option>
<option value="12">12</option>
<option value="13">13</option>
<option value="14">14</option>
<option value="15" selected="selected">15</option>
<option value="16">16</option>
<option value="17">17</option>
<option value="18">18</option>
<option value="19">19</option>
<option value="20">20</option>
<option value="21">21</option>
<option value="22">22</option>
<option value="23">23</option>
<option value="24">24</option>
<option value="25">25</option>
<option value="26">26</option>
<option value="27">27</option>
<option value="28">28</option>
<option value="29">29</option>
<option value="30">30</option>
<option value="31">31</option>
</select>
<select name="mydate_year" id="id_mydate_year">
<option value="0">---</option>
<option value="2007">2007</option>
<option value="2008">2008</option>
<option value="2009">2009</option>
<option value="2010" selected="selected">2010</option>
<option value="2011">2011</option>
<option value="2012">2012</option>
<option value="2013">2013</option>
<option value="2014">2014</option>
<option value="2015">2015</option>
<option value="2016">2016</option>
</select>""")
a = GetDate({'mydate_month':'4', 'mydate_day':'1', 'mydate_year':'2008'})
self.assertTrue(a.is_valid())
self.assertEqual(a.cleaned_data['mydate'], datetime.date(2008, 4, 1))
# As with any widget that implements get_value_from_datadict,
# we must be prepared to accept the input from the "as_hidden"
# rendering as well.
self.assertHTMLEqual(a['mydate'].as_hidden(), '<input type="hidden" name="mydate" value="2008-4-1" id="id_mydate" />')
b = GetDate({'mydate':'2008-4-1'})
self.assertTrue(b.is_valid())
self.assertEqual(b.cleaned_data['mydate'], datetime.date(2008, 4, 1))
# Invalid dates shouldn't be allowed
c = GetDate({'mydate_month':'2', 'mydate_day':'31', 'mydate_year':'2010'})
self.assertFalse(c.is_valid())
self.assertEqual(c.errors, {'mydate': ['Enter a valid date.']})
# label tag is correctly associated with month dropdown
d = GetDate({'mydate_month':'1', 'mydate_day':'1', 'mydate_year':'2010'})
self.assertTrue('<label for="id_mydate_month">' in d.as_p())
def test_multiwidget(self):
# MultiWidget and MultiValueField #############################################
# MultiWidgets are widgets composed of other widgets. They are usually
# combined with MultiValueFields - a field that is composed of other fields.
# MulitWidgets can themselved be composed of other MultiWidgets.
# SplitDateTimeWidget is one example of a MultiWidget.
class ComplexMultiWidget(MultiWidget):
def __init__(self, attrs=None):
widgets = (
TextInput(),
SelectMultiple(choices=(('J', 'John'), ('P', 'Paul'), ('G', 'George'), ('R', 'Ringo'))),
SplitDateTimeWidget(),
)
super(ComplexMultiWidget, self).__init__(widgets, attrs)
def decompress(self, value):
if value:
data = value.split(',')
return [data[0], data[1], datetime.datetime.strptime(data[2], "%Y-%m-%d %H:%M:%S")]
return [None, None, None]
def format_output(self, rendered_widgets):
return '\n'.join(rendered_widgets)
w = ComplexMultiWidget()
self.assertHTMLEqual(w.render('name', 'some text,JP,2007-04-25 06:24:00'), """<input type="text" name="name_0" value="some text" />
<select multiple="multiple" name="name_1">
<option value="J" selected="selected">John</option>
<option value="P" selected="selected">Paul</option>
<option value="G">George</option>
<option value="R">Ringo</option>
</select>
<input type="text" name="name_2_0" value="2007-04-25" /><input type="text" name="name_2_1" value="06:24:00" />""")
class ComplexField(MultiValueField):
def __init__(self, required=True, widget=None, label=None, initial=None):
fields = (
CharField(),
MultipleChoiceField(choices=(('J', 'John'), ('P', 'Paul'), ('G', 'George'), ('R', 'Ringo'))),
SplitDateTimeField()
)
super(ComplexField, self).__init__(fields, required, widget, label, initial)
def compress(self, data_list):
if data_list:
return '%s,%s,%s' % (data_list[0],''.join(data_list[1]),data_list[2])
return None
f = ComplexField(widget=w)
self.assertEqual(f.clean(['some text', ['J','P'], ['2007-04-25','6:24:00']]), 'some text,JP,2007-04-25 06:24:00')
self.assertFormErrors(['Select a valid choice. X is not one of the available choices.'], f.clean, ['some text',['X'], ['2007-04-25','6:24:00']])
# If insufficient data is provided, None is substituted
self.assertFormErrors(['This field is required.'], f.clean, ['some text',['JP']])
class ComplexFieldForm(Form):
field1 = ComplexField(widget=w)
f = ComplexFieldForm()
self.assertHTMLEqual(f.as_table(), """<tr><th><label for="id_field1_0">Field1:</label></th><td><input type="text" name="field1_0" id="id_field1_0" />
<select multiple="multiple" name="field1_1" id="id_field1_1">
<option value="J">John</option>
<option value="P">Paul</option>
<option value="G">George</option>
<option value="R">Ringo</option>
</select>
<input type="text" name="field1_2_0" id="id_field1_2_0" /><input type="text" name="field1_2_1" id="id_field1_2_1" /></td></tr>""")
f = ComplexFieldForm({'field1_0':'some text','field1_1':['J','P'], 'field1_2_0':'2007-04-25', 'field1_2_1':'06:24:00'})
self.assertHTMLEqual(f.as_table(), """<tr><th><label for="id_field1_0">Field1:</label></th><td><input type="text" name="field1_0" value="some text" id="id_field1_0" />
<select multiple="multiple" name="field1_1" id="id_field1_1">
<option value="J" selected="selected">John</option>
<option value="P" selected="selected">Paul</option>
<option value="G">George</option>
<option value="R">Ringo</option>
</select>
<input type="text" name="field1_2_0" value="2007-04-25" id="id_field1_2_0" /><input type="text" name="field1_2_1" value="06:24:00" id="id_field1_2_1" /></td></tr>""")
self.assertEqual(f.cleaned_data['field1'], 'some text,JP,2007-04-25 06:24:00')
def test_ipaddress(self):
f = IPAddressField()
self.assertFormErrors(['This field is required.'], f.clean, '')
self.assertFormErrors(['This field is required.'], f.clean, None)
self.assertEqual(f.clean('127.0.0.1'), '127.0.0.1')
self.assertFormErrors(['Enter a valid IPv4 address.'], f.clean, 'foo')
self.assertFormErrors(['Enter a valid IPv4 address.'], f.clean, '127.0.0.')
self.assertFormErrors(['Enter a valid IPv4 address.'], f.clean, '1.2.3.4.5')
self.assertFormErrors(['Enter a valid IPv4 address.'], f.clean, '256.125.1.5')
f = IPAddressField(required=False)
self.assertEqual(f.clean(''), '')
self.assertEqual(f.clean(None), '')
self.assertEqual(f.clean('127.0.0.1'), '127.0.0.1')
self.assertFormErrors(['Enter a valid IPv4 address.'], f.clean, 'foo')
self.assertFormErrors(['Enter a valid IPv4 address.'], f.clean, '127.0.0.')
self.assertFormErrors(['Enter a valid IPv4 address.'], f.clean, '1.2.3.4.5')
self.assertFormErrors(['Enter a valid IPv4 address.'], f.clean, '256.125.1.5')
def test_generic_ipaddress_invalid_arguments(self):
self.assertRaises(ValueError, GenericIPAddressField, protocol="hamster")
self.assertRaises(ValueError, GenericIPAddressField, protocol="ipv4", unpack_ipv4=True)
def test_generic_ipaddress_as_generic(self):
# The edge cases of the IPv6 validation code are not deeply tested
# here, they are covered in the tests for django.utils.ipv6
f = GenericIPAddressField()
self.assertFormErrors(['This field is required.'], f.clean, '')
self.assertFormErrors(['This field is required.'], f.clean, None)
self.assertEqual(f.clean('127.0.0.1'), '127.0.0.1')
self.assertFormErrors(['Enter a valid IPv4 or IPv6 address.'], f.clean, 'foo')
self.assertFormErrors(['Enter a valid IPv4 or IPv6 address.'], f.clean, '127.0.0.')
self.assertFormErrors(['Enter a valid IPv4 or IPv6 address.'], f.clean, '1.2.3.4.5')
self.assertFormErrors(['Enter a valid IPv4 or IPv6 address.'], f.clean, '256.125.1.5')
self.assertEqual(f.clean('fe80::223:6cff:fe8a:2e8a'), 'fe80::223:6cff:fe8a:2e8a')
self.assertEqual(f.clean('2a02::223:6cff:fe8a:2e8a'), '2a02::223:6cff:fe8a:2e8a')
self.assertFormErrors(['Enter a valid IPv4 or IPv6 address.'], f.clean, '12345:2:3:4')
self.assertFormErrors(['Enter a valid IPv4 or IPv6 address.'], f.clean, '1::2:3::4')
self.assertFormErrors(['Enter a valid IPv4 or IPv6 address.'], f.clean, 'foo::223:6cff:fe8a:2e8a')
self.assertFormErrors(['Enter a valid IPv4 or IPv6 address.'], f.clean, '1::2:3:4:5:6:7:8')
self.assertFormErrors(['Enter a valid IPv4 or IPv6 address.'], f.clean, '1:2')
def test_generic_ipaddress_as_ipv4_only(self):
f = GenericIPAddressField(protocol="IPv4")
self.assertFormErrors(['This field is required.'], f.clean, '')
self.assertFormErrors(['This field is required.'], f.clean, None)
self.assertEqual(f.clean('127.0.0.1'), '127.0.0.1')
self.assertFormErrors(['Enter a valid IPv4 address.'], f.clean, 'foo')
self.assertFormErrors(['Enter a valid IPv4 address.'], f.clean, '127.0.0.')
self.assertFormErrors(['Enter a valid IPv4 address.'], f.clean, '1.2.3.4.5')
self.assertFormErrors(['Enter a valid IPv4 address.'], f.clean, '256.125.1.5')
self.assertFormErrors(['Enter a valid IPv4 address.'], f.clean, 'fe80::223:6cff:fe8a:2e8a')
self.assertFormErrors(['Enter a valid IPv4 address.'], f.clean, '2a02::223:6cff:fe8a:2e8a')
def test_generic_ipaddress_as_ipv6_only(self):
f = GenericIPAddressField(protocol="IPv6")
self.assertFormErrors(['This field is required.'], f.clean, '')
self.assertFormErrors(['This field is required.'], f.clean, None)
self.assertFormErrors(['Enter a valid IPv6 address.'], f.clean, '127.0.0.1')
self.assertFormErrors(['Enter a valid IPv6 address.'], f.clean, 'foo')
self.assertFormErrors(['Enter a valid IPv6 address.'], f.clean, '127.0.0.')
self.assertFormErrors(['Enter a valid IPv6 address.'], f.clean, '1.2.3.4.5')
self.assertFormErrors(['Enter a valid IPv6 address.'], f.clean, '256.125.1.5')
self.assertEqual(f.clean('fe80::223:6cff:fe8a:2e8a'), 'fe80::223:6cff:fe8a:2e8a')
self.assertEqual(f.clean('2a02::223:6cff:fe8a:2e8a'), '2a02::223:6cff:fe8a:2e8a')
self.assertFormErrors(['Enter a valid IPv6 address.'], f.clean, '12345:2:3:4')
self.assertFormErrors(['Enter a valid IPv6 address.'], f.clean, '1::2:3::4')
self.assertFormErrors(['Enter a valid IPv6 address.'], f.clean, 'foo::223:6cff:fe8a:2e8a')
self.assertFormErrors(['Enter a valid IPv6 address.'], f.clean, '1::2:3:4:5:6:7:8')
self.assertFormErrors(['Enter a valid IPv6 address.'], f.clean, '1:2')
def test_generic_ipaddress_as_generic_not_required(self):
f = GenericIPAddressField(required=False)
self.assertEqual(f.clean(''), '')
self.assertEqual(f.clean(None), '')
self.assertEqual(f.clean('127.0.0.1'), '127.0.0.1')
self.assertFormErrors(['Enter a valid IPv4 or IPv6 address.'], f.clean, 'foo')
self.assertFormErrors(['Enter a valid IPv4 or IPv6 address.'], f.clean, '127.0.0.')
self.assertFormErrors(['Enter a valid IPv4 or IPv6 address.'], f.clean, '1.2.3.4.5')
self.assertFormErrors(['Enter a valid IPv4 or IPv6 address.'], f.clean, '256.125.1.5')
self.assertEqual(f.clean('fe80::223:6cff:fe8a:2e8a'), 'fe80::223:6cff:fe8a:2e8a')
self.assertEqual(f.clean('2a02::223:6cff:fe8a:2e8a'), '2a02::223:6cff:fe8a:2e8a')
self.assertFormErrors(['Enter a valid IPv4 or IPv6 address.'], f.clean, '12345:2:3:4')
self.assertFormErrors(['Enter a valid IPv4 or IPv6 address.'], f.clean, '1::2:3::4')
self.assertFormErrors(['Enter a valid IPv4 or IPv6 address.'], f.clean, 'foo::223:6cff:fe8a:2e8a')
self.assertFormErrors(['Enter a valid IPv4 or IPv6 address.'], f.clean, '1::2:3:4:5:6:7:8')
self.assertFormErrors(['Enter a valid IPv4 or IPv6 address.'], f.clean, '1:2')
def test_generic_ipaddress_normalization(self):
# Test the normalising code
f = GenericIPAddressField()
self.assertEqual(f.clean('::ffff:0a0a:0a0a'), '::ffff:10.10.10.10')
self.assertEqual(f.clean('::ffff:10.10.10.10'), '::ffff:10.10.10.10')
self.assertEqual(f.clean('2001:000:a:0000:0:fe:fe:beef'), '2001:0:a::fe:fe:beef')
self.assertEqual(f.clean('2001::a:0000:0:fe:fe:beef'), '2001:0:a::fe:fe:beef')
f = GenericIPAddressField(unpack_ipv4=True)
self.assertEqual(f.clean('::ffff:0a0a:0a0a'), '10.10.10.10')
def test_smart_unicode(self):
class Test:
def __str__(self):
return 'ŠĐĆŽćžšđ'.encode('utf-8')
class TestU:
def __str__(self):
return 'Foo'
def __unicode__(self):
return '\u0160\u0110\u0106\u017d\u0107\u017e\u0161\u0111'
self.assertEqual(smart_unicode(Test()), '\u0160\u0110\u0106\u017d\u0107\u017e\u0161\u0111')
self.assertEqual(smart_unicode(TestU()), '\u0160\u0110\u0106\u017d\u0107\u017e\u0161\u0111')
self.assertEqual(smart_unicode(1), '1')
self.assertEqual(smart_unicode('foo'), 'foo')
def test_accessing_clean(self):
class UserForm(Form):
username = CharField(max_length=10)
password = CharField(widget=PasswordInput)
def clean(self):
data = self.cleaned_data
if not self.errors:
data['username'] = data['username'].lower()
return data
f = UserForm({'username': 'SirRobin', 'password': 'blue'})
self.assertTrue(f.is_valid())
self.assertEqual(f.cleaned_data['username'], 'sirrobin')
def test_overriding_errorlist(self):
class DivErrorList(ErrorList):
def __unicode__(self):
return self.as_divs()
def as_divs(self):
if not self: return ''
return '<div class="errorlist">%s</div>' % ''.join(['<div class="error">%s</div>' % force_unicode(e) for e in self])
class CommentForm(Form):
name = CharField(max_length=50, required=False)
email = EmailField()
comment = CharField()
data = dict(email='invalid')
f = CommentForm(data, auto_id=False, error_class=DivErrorList)
self.assertHTMLEqual(f.as_p(), """<p>Name: <input type="text" name="name" maxlength="50" /></p>
<div class="errorlist"><div class="error">Enter a valid e-mail address.</div></div>
<p>Email: <input type="text" name="email" value="invalid" /></p>
<div class="errorlist"><div class="error">This field is required.</div></div>
<p>Comment: <input type="text" name="comment" /></p>""")
def test_multipart_encoded_form(self):
class FormWithoutFile(Form):
username = CharField()
class FormWithFile(Form):
username = CharField()
file = FileField()
class FormWithImage(Form):
image = ImageField()
self.assertFalse(FormWithoutFile().is_multipart())
self.assertTrue(FormWithFile().is_multipart())
self.assertTrue(FormWithImage().is_multipart())
def test_field_not_required(self):
b = GetNotRequiredDate({
'mydate_year': '',
'mydate_month': '',
'mydate_day': ''
})
self.assertFalse(b.has_changed())
class FormsExtraL10NTestCase(TestCase):
def setUp(self):
super(FormsExtraL10NTestCase, self).setUp()
self.old_use_l10n = getattr(settings, 'USE_L10N', False)
settings.USE_L10N = True
translation.activate('nl')
def tearDown(self):
translation.deactivate()
settings.USE_L10N = self.old_use_l10n
super(FormsExtraL10NTestCase, self).tearDown()
def test_l10n(self):
w = SelectDateWidget(years=('2007','2008','2009','2010','2011','2012','2013','2014','2015','2016'), required=False)
self.assertEqual(w.value_from_datadict({'date_year': '2010', 'date_month': '8', 'date_day': '13'}, {}, 'date'), '13-08-2010')
self.assertHTMLEqual(w.render('date', '13-08-2010'), """<select name="date_day" id="id_date_day">
<option value="0">---</option>
<option value="1">1</option>
<option value="2">2</option>
<option value="3">3</option>
<option value="4">4</option>
<option value="5">5</option>
<option value="6">6</option>
<option value="7">7</option>
<option value="8">8</option>
<option value="9">9</option>
<option value="10">10</option>
<option value="11">11</option>
<option value="12">12</option>
<option value="13" selected="selected">13</option>
<option value="14">14</option>
<option value="15">15</option>
<option value="16">16</option>
<option value="17">17</option>
<option value="18">18</option>
<option value="19">19</option>
<option value="20">20</option>
<option value="21">21</option>
<option value="22">22</option>
<option value="23">23</option>
<option value="24">24</option>
<option value="25">25</option>
<option value="26">26</option>
<option value="27">27</option>
<option value="28">28</option>
<option value="29">29</option>
<option value="30">30</option>
<option value="31">31</option>
</select>
<select name="date_month" id="id_date_month">
<option value="0">---</option>
<option value="1">januari</option>
<option value="2">februari</option>
<option value="3">maart</option>
<option value="4">april</option>
<option value="5">mei</option>
<option value="6">juni</option>
<option value="7">juli</option>
<option value="8" selected="selected">augustus</option>
<option value="9">september</option>
<option value="10">oktober</option>
<option value="11">november</option>
<option value="12">december</option>
</select>
<select name="date_year" id="id_date_year">
<option value="0">---</option>
<option value="2007">2007</option>
<option value="2008">2008</option>
<option value="2009">2009</option>
<option value="2010" selected="selected">2010</option>
<option value="2011">2011</option>
<option value="2012">2012</option>
<option value="2013">2013</option>
<option value="2014">2014</option>
<option value="2015">2015</option>
<option value="2016">2016</option>
</select>""")
# Years before 1900 work
w = SelectDateWidget(years=('1899',))
self.assertEqual(w.value_from_datadict({'date_year': '1899', 'date_month': '8', 'date_day': '13'}, {}, 'date'), '13-08-1899')
def test_l10n_date_changed(self):
"""
Ensure that SelectDateWidget._has_changed() works correctly with a
localized date format.
Refs #17165.
"""
# With Field.show_hidden_initial=False -----------------------
b = GetDate({
'mydate_year': '2008',
'mydate_month': '4',
'mydate_day': '1',
}, initial={'mydate': datetime.date(2008, 4, 1)})
self.assertFalse(b.has_changed())
b = GetDate({
'mydate_year': '2008',
'mydate_month': '4',
'mydate_day': '2',
}, initial={'mydate': datetime.date(2008, 4, 1)})
self.assertTrue(b.has_changed())
# With Field.show_hidden_initial=True ------------------------
b = GetDateShowHiddenInitial({
'mydate_year': '2008',
'mydate_month': '4',
'mydate_day': '1',
'initial-mydate': HiddenInput()._format_value(datetime.date(2008, 4, 1))
}, initial={'mydate': datetime.date(2008, 4, 1)})
self.assertFalse(b.has_changed())
b = GetDateShowHiddenInitial({
'mydate_year': '2008',
'mydate_month': '4',
'mydate_day': '22',
'initial-mydate': HiddenInput()._format_value(datetime.date(2008, 4, 1))
}, initial={'mydate': datetime.date(2008, 4, 1)})
self.assertTrue(b.has_changed())
b = GetDateShowHiddenInitial({
'mydate_year': '2008',
'mydate_month': '4',
'mydate_day': '22',
'initial-mydate': HiddenInput()._format_value(datetime.date(2008, 4, 1))
}, initial={'mydate': datetime.date(2008, 4, 22)})
self.assertTrue(b.has_changed())
b = GetDateShowHiddenInitial({
'mydate_year': '2008',
'mydate_month': '4',
'mydate_day': '22',
'initial-mydate': HiddenInput()._format_value(datetime.date(2008, 4, 22))
}, initial={'mydate': datetime.date(2008, 4, 1)})
self.assertFalse(b.has_changed())
def test_l10n_invalid_date_in(self):
# Invalid dates shouldn't be allowed
a = GetDate({'mydate_month':'2', 'mydate_day':'31', 'mydate_year':'2010'})
self.assertFalse(a.is_valid())
# 'Geef een geldige datum op.' = 'Enter a valid date.'
self.assertEqual(a.errors, {'mydate': ['Geef een geldige datum op.']})
def test_form_label_association(self):
# label tag is correctly associated with first rendered dropdown
a = GetDate({'mydate_month':'1', 'mydate_day':'1', 'mydate_year':'2010'})
self.assertTrue('<label for="id_mydate_day">' in a.as_p())
|
{
"content_hash": "9e6f9b97fdaf5989e1262f1d2ef7c030",
"timestamp": "",
"source": "github",
"line_count": 780,
"max_line_length": 175,
"avg_line_length": 41.52051282051282,
"alnum_prop": 0.6348422157722473,
"repo_name": "azurestandard/django",
"id": "28b6c12453019e53a2736dc08349c1e5348a7768",
"size": "32418",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/regressiontests/forms/tests/extra.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
"""
Scheduler Service
"""
import eventlet
from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging as messaging
from oslo_utils import excutils
from oslo_utils import importutils
import six
from cinder import context
from cinder import db
from cinder import exception
from cinder import flow_utils
from cinder.i18n import _, _LE
from cinder import manager
from cinder import quota
from cinder import rpc
from cinder.scheduler.flows import create_volume
from cinder.volume import rpcapi as volume_rpcapi
scheduler_driver_opt = cfg.StrOpt('scheduler_driver',
default='cinder.scheduler.filter_scheduler.'
'FilterScheduler',
help='Default scheduler driver to use')
CONF = cfg.CONF
CONF.register_opt(scheduler_driver_opt)
QUOTAS = quota.QUOTAS
LOG = logging.getLogger(__name__)
class SchedulerManager(manager.Manager):
"""Chooses a host to create volumes."""
RPC_API_VERSION = '1.8'
target = messaging.Target(version=RPC_API_VERSION)
def __init__(self, scheduler_driver=None, service_name=None,
*args, **kwargs):
if not scheduler_driver:
scheduler_driver = CONF.scheduler_driver
self.driver = importutils.import_object(scheduler_driver)
super(SchedulerManager, self).__init__(*args, **kwargs)
self._startup_delay = True
def init_host_with_rpc(self):
ctxt = context.get_admin_context()
self.request_service_capabilities(ctxt)
eventlet.sleep(CONF.periodic_interval)
self._startup_delay = False
def update_service_capabilities(self, context, service_name=None,
host=None, capabilities=None, **kwargs):
"""Process a capability update from a service node."""
if capabilities is None:
capabilities = {}
self.driver.update_service_capabilities(service_name,
host,
capabilities)
def _wait_for_scheduler(self):
# NOTE(dulek): We're waiting for scheduler to announce that it's ready
# or CONF.periodic_interval seconds from service startup has passed.
while self._startup_delay and not self.driver.is_ready():
eventlet.sleep(1)
def create_consistencygroup(self, context, topic,
group,
request_spec_list=None,
filter_properties_list=None):
self._wait_for_scheduler()
try:
self.driver.schedule_create_consistencygroup(
context, group,
request_spec_list,
filter_properties_list)
except exception.NoValidHost:
LOG.error(_LE("Could not find a host for consistency group "
"%(group_id)s."),
{'group_id': group.id})
group.status = 'error'
group.save()
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_LE("Failed to create consistency group "
"%(group_id)s."),
{'group_id': group.id})
group.status = 'error'
group.save()
def create_volume(self, context, topic, volume_id, snapshot_id=None,
image_id=None, request_spec=None,
filter_properties=None):
self._wait_for_scheduler()
try:
flow_engine = create_volume.get_flow(context,
db, self.driver,
request_spec,
filter_properties,
volume_id,
snapshot_id,
image_id)
except Exception:
msg = _("Failed to create scheduler manager volume flow")
LOG.exception(msg)
raise exception.CinderException(msg)
with flow_utils.DynamicLogListener(flow_engine, logger=LOG):
flow_engine.run()
def request_service_capabilities(self, context):
volume_rpcapi.VolumeAPI().publish_service_capabilities(context)
def migrate_volume_to_host(self, context, topic, volume_id, host,
force_host_copy, request_spec,
filter_properties=None):
"""Ensure that the host exists and can accept the volume."""
self._wait_for_scheduler()
def _migrate_volume_set_error(self, context, ex, request_spec):
volume_state = {'volume_state': {'migration_status': None}}
self._set_volume_state_and_notify('migrate_volume_to_host',
volume_state,
context, ex, request_spec)
try:
tgt_host = self.driver.host_passes_filters(context, host,
request_spec,
filter_properties)
except exception.NoValidHost as ex:
_migrate_volume_set_error(self, context, ex, request_spec)
except Exception as ex:
with excutils.save_and_reraise_exception():
_migrate_volume_set_error(self, context, ex, request_spec)
else:
volume_ref = db.volume_get(context, volume_id)
volume_rpcapi.VolumeAPI().migrate_volume(context, volume_ref,
tgt_host,
force_host_copy)
def retype(self, context, topic, volume_id,
request_spec, filter_properties=None):
"""Schedule the modification of a volume's type.
:param context: the request context
:param topic: the topic listened on
:param volume_id: the ID of the volume to retype
:param request_spec: parameters for this retype request
:param filter_properties: parameters to filter by
"""
self._wait_for_scheduler()
def _retype_volume_set_error(self, context, ex, request_spec,
volume_ref, msg, reservations):
if reservations:
QUOTAS.rollback(context, reservations)
if (volume_ref['volume_attachment'] is None or
len(volume_ref['volume_attachment']) == 0):
orig_status = 'available'
else:
orig_status = 'in-use'
volume_state = {'volume_state': {'status': orig_status}}
self._set_volume_state_and_notify('retype', volume_state,
context, ex, request_spec, msg)
volume_ref = db.volume_get(context, volume_id)
reservations = request_spec.get('quota_reservations')
new_type = request_spec.get('volume_type')
if new_type is None:
msg = _('New volume type not specified in request_spec.')
ex = exception.ParameterNotFound(param='volume_type')
_retype_volume_set_error(self, context, ex, request_spec,
volume_ref, msg, reservations)
# Default migration policy is 'never'
migration_policy = request_spec.get('migration_policy')
if not migration_policy:
migration_policy = 'never'
try:
tgt_host = self.driver.find_retype_host(context, request_spec,
filter_properties,
migration_policy)
except exception.NoValidHost as ex:
msg = (_("Could not find a host for volume %(volume_id)s with "
"type %(type_id)s.") %
{'type_id': new_type['id'], 'volume_id': volume_id})
_retype_volume_set_error(self, context, ex, request_spec,
volume_ref, msg, reservations)
except Exception as ex:
with excutils.save_and_reraise_exception():
_retype_volume_set_error(self, context, ex, request_spec,
volume_ref, None, reservations)
else:
volume_rpcapi.VolumeAPI().retype(context, volume_ref,
new_type['id'], tgt_host,
migration_policy, reservations)
def manage_existing(self, context, topic, volume_id,
request_spec, filter_properties=None):
"""Ensure that the host exists and can accept the volume."""
self._wait_for_scheduler()
def _manage_existing_set_error(self, context, ex, request_spec):
volume_state = {'volume_state': {'status': 'error'}}
self._set_volume_state_and_notify('manage_existing', volume_state,
context, ex, request_spec)
volume_ref = db.volume_get(context, volume_id)
try:
self.driver.host_passes_filters(context,
volume_ref['host'],
request_spec,
filter_properties)
except exception.NoValidHost as ex:
_manage_existing_set_error(self, context, ex, request_spec)
except Exception as ex:
with excutils.save_and_reraise_exception():
_manage_existing_set_error(self, context, ex, request_spec)
else:
volume_rpcapi.VolumeAPI().manage_existing(context, volume_ref,
request_spec.get('ref'))
def get_pools(self, context, filters=None):
"""Get active pools from scheduler's cache.
NOTE(dulek): There's no self._wait_for_scheduler() because get_pools is
an RPC call (is blocking for the c-api). Also this is admin-only API
extension so it won't hurt the user much to retry the request manually.
"""
return self.driver.get_pools(context, filters)
def _set_volume_state_and_notify(self, method, updates, context, ex,
request_spec, msg=None):
# TODO(harlowja): move into a task that just does this later.
if not msg:
msg = (_LE("Failed to schedule_%(method)s: %(ex)s") %
{'method': method, 'ex': six.text_type(ex)})
LOG.error(msg)
volume_state = updates['volume_state']
properties = request_spec.get('volume_properties', {})
volume_id = request_spec.get('volume_id', None)
if volume_id:
db.volume_update(context, volume_id, volume_state)
payload = dict(request_spec=request_spec,
volume_properties=properties,
volume_id=volume_id,
state=volume_state,
method=method,
reason=ex)
rpc.get_notifier("scheduler").error(context,
'scheduler.' + method,
payload)
|
{
"content_hash": "ffcdb64cc84b8cd87cdffd32eb22cb2d",
"timestamp": "",
"source": "github",
"line_count": 271,
"max_line_length": 79,
"avg_line_length": 42.45387453874539,
"alnum_prop": 0.5280312907431551,
"repo_name": "potsmaster/cinder",
"id": "352db2376aee8044894a910c751aecc267f936f2",
"size": "12279",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "cinder/scheduler/manager.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "12496416"
},
{
"name": "Shell",
"bytes": "8172"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('adoffice', '0026_category_items_title'),
]
operations = [
migrations.AddField(
model_name='category',
name='items_title_en',
field=models.CharField(default=b'', max_length=254, null=True, verbose_name='Tytu\u0142 Katalogu', blank=True),
),
migrations.AddField(
model_name='category',
name='items_title_pl',
field=models.CharField(default=b'', max_length=254, null=True, verbose_name='Tytu\u0142 Katalogu', blank=True),
),
]
|
{
"content_hash": "0a11693da77c112e63ec9b31597b0a96",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 123,
"avg_line_length": 30.26086956521739,
"alnum_prop": 0.6063218390804598,
"repo_name": "kobox/achilles.pl",
"id": "74fb17ace75fa1d6d9f094df061673b370a978af",
"size": "720",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/adoffice/migrations/0027_auto_20150804_0904.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "16652"
},
{
"name": "HTML",
"bytes": "44127"
},
{
"name": "JavaScript",
"bytes": "11891"
},
{
"name": "Python",
"bytes": "103708"
}
],
"symlink_target": ""
}
|
import sys
import re
def main():
for filename in sys.argv[1:]:
try:
f = open(filename, 'r')
except IOError as msg:
print(filename, ': can\'t open :', msg)
continue
with f:
line = f.readline()
if not re.match('^#! */usr/local/bin/python', line):
print(filename, ': not a /usr/local/bin/python script')
continue
rest = f.read()
line = re.sub('/usr/local/bin/python',
'/usr/bin/env python', line)
print(filename, ':', repr(line))
with open(filename, "w") as f:
f.write(line)
f.write(rest)
if __name__ == '__main__':
main()
|
{
"content_hash": "231c0f8a0923c4d6d43913be54955992",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 71,
"avg_line_length": 28.03846153846154,
"alnum_prop": 0.4663923182441701,
"repo_name": "batermj/algorithm-challenger",
"id": "725300e56a27db4a7a413b11cd074210c51881eb",
"size": "892",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "code-analysis/programming_anguage/python/source_codes/Python3.8.0/Python-3.8.0/Tools/scripts/fixps.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "655185"
},
{
"name": "Batchfile",
"bytes": "127416"
},
{
"name": "C",
"bytes": "33127630"
},
{
"name": "C++",
"bytes": "1364796"
},
{
"name": "CSS",
"bytes": "3163"
},
{
"name": "Common Lisp",
"bytes": "48962"
},
{
"name": "DIGITAL Command Language",
"bytes": "26402"
},
{
"name": "DTrace",
"bytes": "2196"
},
{
"name": "Go",
"bytes": "26248"
},
{
"name": "HTML",
"bytes": "385719"
},
{
"name": "Haskell",
"bytes": "33612"
},
{
"name": "Java",
"bytes": "1084"
},
{
"name": "JavaScript",
"bytes": "20754"
},
{
"name": "M4",
"bytes": "403992"
},
{
"name": "Makefile",
"bytes": "238185"
},
{
"name": "Objective-C",
"bytes": "4934684"
},
{
"name": "PHP",
"bytes": "3513"
},
{
"name": "PLSQL",
"bytes": "45772"
},
{
"name": "Perl",
"bytes": "649"
},
{
"name": "PostScript",
"bytes": "27606"
},
{
"name": "PowerShell",
"bytes": "21737"
},
{
"name": "Python",
"bytes": "55270625"
},
{
"name": "R",
"bytes": "29951"
},
{
"name": "Rich Text Format",
"bytes": "14551"
},
{
"name": "Roff",
"bytes": "292490"
},
{
"name": "Ruby",
"bytes": "519"
},
{
"name": "Scala",
"bytes": "846446"
},
{
"name": "Shell",
"bytes": "491113"
},
{
"name": "Swift",
"bytes": "881"
},
{
"name": "TeX",
"bytes": "337654"
},
{
"name": "VBScript",
"bytes": "140"
},
{
"name": "XSLT",
"bytes": "153"
}
],
"symlink_target": ""
}
|
import os
from thumblr.dto import ImageMetadata
from thumblr.models import Image, ImageSize
from thumblr.utils.hash import file_hash
def create_image(uploaded_file, image_metadata):
assert isinstance(image_metadata, ImageMetadata)
image = Image()
image.file_name = image_metadata.file_name
image.site_id = image_metadata.site_id
image.content_type_id = image_metadata.content_type_id
image.object_id = image_metadata.object_id
image.image_hash = file_hash(uploaded_file) + os.path.splitext(image_metadata.file_name)[-1]
if uploaded_file.name:
image.image_in_storage.save(uploaded_file.name, uploaded_file, False)
image.image_hash_in_storage.save(image.image_hash, uploaded_file, False)
image.is_main = image_metadata.is_main or False
if image_metadata.order_number:
image.order_number = image_metadata.order_number
original_size = ImageSize.objects.get(name=image_metadata.size_slug)
image.size = original_size
image.save()
return image
def replace_uploaded_image(image, new_uploaded_image):
assert isinstance(image, Image)
image.image_hash = file_hash(new_uploaded_image) + os.path.splitext(image.file_name)[-1]
if new_uploaded_image.name:
image.image_in_storage.save(new_uploaded_image.name, new_uploaded_image, False)
image.image_hash_in_storage.save(image.image_hash, new_uploaded_image, False)
image.save()
def update_image_metadata(image, updated_spec):
assert isinstance(image, Image)
assert isinstance(updated_spec, ImageMetadata)
if not updated_spec.file_name is None:
image.file_name = updated_spec.file_name
if not updated_spec.site_id is None:
image.site_id = updated_spec.site_id
if not updated_spec.content_type_id is None:
image.content_type_id = updated_spec.content_type_id
if not updated_spec.object_id is None:
image.object_id = updated_spec.object_id
if not updated_spec.is_main is None:
image.is_main = updated_spec.is_main
if not updated_spec.size_slug is None:
image_size = ImageSize.objects.get(name=updated_spec.size_slug)
image.size = image_size
image.save()
|
{
"content_hash": "94b158392db211b786388fdaf626e0c3",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 96,
"avg_line_length": 30.246575342465754,
"alnum_prop": 0.708786231884058,
"repo_name": "unisport/thumblr",
"id": "bdb915cc08671d18662ec1d20e4ccb73bb622ddc",
"size": "2208",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "thumblr/services/cud.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "855"
},
{
"name": "Python",
"bytes": "106757"
}
],
"symlink_target": ""
}
|
from setuptools import find_packages, setup
import os
dir_path = os.path.dirname(os.path.realpath(__file__))
long_description = """
Pipelign
--------
An automated pipeline for multiple sequence alignment.
"""
setup(
name='pipelign',
version='0.1',
author='Mukarram Hossain',
author_email='asmmh2@cam.ac.uk',
packages=find_packages(exclude=[dir_path+'/docs']),
url='https://github.com/asmmhossain/pipelign',
license='MIT',
description='A pipeline for automated alignment',
long_description=long_description,
install_requires=[
'biopython',
'ete3'
],
scripts=[
dir_path+'/bin/pipelign',
dir_path+'/bin/gb2fas'
]
)
|
{
"content_hash": "d98dd937415d5732da6249e38574280b",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 55,
"avg_line_length": 23.333333333333332,
"alnum_prop": 0.64,
"repo_name": "asmmhossain/pipelign",
"id": "5943b0cc9bfa9b5cb9c164e96232d6512ba0e5cd",
"size": "700",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "108"
},
{
"name": "Makefile",
"bytes": "182"
},
{
"name": "Python",
"bytes": "85583"
},
{
"name": "Shell",
"bytes": "135"
}
],
"symlink_target": ""
}
|
from typing import List
class BaseParseError(ValueError):
pass
class ParseResultError(BaseParseError):
pass
class ParseError(BaseParseError):
pass
class ParseActionError(BaseParseError):
def __init__(self, expr: str, tokens: List, string: str, **kwargs):
tpl = kwargs.pop('tpl', (
'Invalid parse error for `{}` expression. '
'Got `{}` tokens for rule `{}`'
))
tokens = '[{}, ]'.format(', '.join(tokens))
super().__init__(tpl.format(expr, tokens, string))
|
{
"content_hash": "491ae81cee1db0004a4eeb14662d3e06",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 71,
"avg_line_length": 23.26086956521739,
"alnum_prop": 0.5981308411214953,
"repo_name": "RulersOfAsgard/alamo-common",
"id": "51177bd0d7bd043312b1a72275cea52eec28365d",
"size": "559",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "alamo_common/parser/exceptions.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "7442"
},
{
"name": "Python",
"bytes": "73282"
}
],
"symlink_target": ""
}
|
"""
Created on Mon Oct 2 22:42:37 2017
@author: Ameen
"""
from django.shortcuts import render
def pathfinder(request):
return render(request, 'pathfinder.html')
|
{
"content_hash": "1a6ad546a7173a544446941564bd260b",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 45,
"avg_line_length": 16.8,
"alnum_prop": 0.7202380952380952,
"repo_name": "mdameenh/elysia",
"id": "de7231ee311ebba6bbb9ba19311cca7a37cb31bc",
"size": "192",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pathfinder/views.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "4647"
},
{
"name": "HTML",
"bytes": "5344"
},
{
"name": "JavaScript",
"bytes": "585209"
},
{
"name": "Python",
"bytes": "58300"
}
],
"symlink_target": ""
}
|
"""
Django settings for django_db project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'vp7k0tp7bh#kb*o1^&3v129--u(_jjqu*04nhh(_5h#rg6j64_'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'django_db.urls'
WSGI_APPLICATION = 'django_db.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
|
{
"content_hash": "c6a354c3a98a66b0e89f7ddfb333a672",
"timestamp": "",
"source": "github",
"line_count": 82,
"max_line_length": 71,
"avg_line_length": 24.158536585365855,
"alnum_prop": 0.7218576476527007,
"repo_name": "wfarn/scratchpad",
"id": "bff21f5e506780e8c85bb49e35b6a49dc8d1e864",
"size": "1981",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django/django_db/django_db/settings.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "115"
},
{
"name": "Go",
"bytes": "548"
},
{
"name": "HCL",
"bytes": "3390"
},
{
"name": "Python",
"bytes": "14684"
},
{
"name": "Ruby",
"bytes": "1819"
},
{
"name": "Shell",
"bytes": "3143"
}
],
"symlink_target": ""
}
|
_q_exports = [
'_q_index',
'query',
]
from quixote.errors import AccessError
from canary.qx_defs import NotLoggedInError
from canary.ui.stats import stats_ui
_q_index = stats_ui._q_index
query = stats_ui.query
def _q_access (request):
if request.session.user == None:
raise NotLoggedInError('Authorized access only.')
if not (request.session.user.is_admin \
or request.session.user.is_editor \
or request.session.user.is_assistant):
raise AccessError("You don't have access to this page.")
|
{
"content_hash": "e74c8cb40f71d4231045cced9cffd968",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 64,
"avg_line_length": 27.3,
"alnum_prop": 0.6794871794871795,
"repo_name": "dchud/sentinel",
"id": "4b8d3c5eace5c9f7aa995c74dc3e24f7d55b864e",
"size": "554",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "canary/ui/stats/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "151729"
},
{
"name": "Python",
"bytes": "345235"
},
{
"name": "Shell",
"bytes": "775"
}
],
"symlink_target": ""
}
|
def return_truey():
return True
def return_falsey():
return False
|
{
"content_hash": "a9b7c8db0955f2aff49ec8fbdad266ba",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 20,
"avg_line_length": 12.666666666666666,
"alnum_prop": 0.6578947368421053,
"repo_name": "ealonas/Markus",
"id": "337e50d52395046400d3d6b67c515510f694fade",
"size": "76",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "automated-tests-files/test_runner/sample_files/uam/submissions/error_all.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "270"
},
{
"name": "C",
"bytes": "15162"
},
{
"name": "C++",
"bytes": "3255"
},
{
"name": "CSS",
"bytes": "114582"
},
{
"name": "HTML",
"bytes": "589084"
},
{
"name": "Java",
"bytes": "433411"
},
{
"name": "JavaScript",
"bytes": "1419126"
},
{
"name": "Makefile",
"bytes": "1233"
},
{
"name": "PHP",
"bytes": "1518"
},
{
"name": "Pascal",
"bytes": "1960"
},
{
"name": "Python",
"bytes": "165398"
},
{
"name": "Racket",
"bytes": "1336"
},
{
"name": "Ruby",
"bytes": "2039952"
},
{
"name": "Shell",
"bytes": "9845"
},
{
"name": "SourcePawn",
"bytes": "257"
}
],
"symlink_target": ""
}
|
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "cfehosts.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
|
{
"content_hash": "f714d4b4b56897e80618b6ed90c4a63a",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 77,
"avg_line_length": 37.333333333333336,
"alnum_prop": 0.6211734693877551,
"repo_name": "codingforentrepreneurs/CFE-Django-Hosts",
"id": "8634ef8a96fa3c94a643199c84f096df78840cc1",
"size": "806",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/manage.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "606"
},
{
"name": "Python",
"bytes": "10221"
}
],
"symlink_target": ""
}
|
#!/usr/bin/env python
# coding=utf-8
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Runs strace or dtrace on a test and processes the logs to extract the
dependencies from the source tree.
Automatically extracts directories where all the files are used to make the
dependencies list more compact.
"""
import codecs
import csv
import logging
import optparse
import os
import posixpath
import re
import subprocess
import sys
## OS-specific imports
if sys.platform == 'win32':
from ctypes.wintypes import create_unicode_buffer
from ctypes.wintypes import windll, FormatError # pylint: disable=E0611
from ctypes.wintypes import GetLastError # pylint: disable=E0611
elif sys.platform == 'darwin':
import Carbon.File # pylint: disable=F0401
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT_DIR = os.path.dirname(os.path.dirname(BASE_DIR))
KEY_TRACKED = 'isolate_dependency_tracked'
KEY_UNTRACKED = 'isolate_dependency_untracked'
## OS-specific functions
if sys.platform == 'win32':
def QueryDosDevice(drive_letter):
"""Returns the Windows 'native' path for a DOS drive letter."""
assert re.match(r'^[a-zA-Z]:$', drive_letter), drive_letter
# Guesswork. QueryDosDeviceW never returns the required number of bytes.
chars = 1024
drive_letter = unicode(drive_letter)
p = create_unicode_buffer(chars)
if 0 == windll.kernel32.QueryDosDeviceW(drive_letter, p, chars):
err = GetLastError()
if err:
# pylint: disable=E0602
raise WindowsError(
err,
'QueryDosDevice(%s): %s (%d)' % (
str(drive_letter), FormatError(err), err))
return p.value
def GetShortPathName(long_path):
"""Returns the Windows short path equivalent for a 'long' path."""
long_path = unicode(long_path)
# Adds '\\\\?\\' when given an absolute path so the MAX_PATH (260) limit is
# not enforced.
if os.path.isabs(long_path) and not long_path.startswith('\\\\?\\'):
long_path = '\\\\?\\' + long_path
chars = windll.kernel32.GetShortPathNameW(long_path, None, 0)
if chars:
p = create_unicode_buffer(chars)
if windll.kernel32.GetShortPathNameW(long_path, p, chars):
return p.value
err = GetLastError()
if err:
# pylint: disable=E0602
raise WindowsError(
err,
'GetShortPathName(%s): %s (%d)' % (
str(long_path), FormatError(err), err))
def GetLongPathName(short_path):
"""Returns the Windows long path equivalent for a 'short' path."""
short_path = unicode(short_path)
# Adds '\\\\?\\' when given an absolute path so the MAX_PATH (260) limit is
# not enforced.
if os.path.isabs(short_path) and not short_path.startswith('\\\\?\\'):
short_path = '\\\\?\\' + short_path
chars = windll.kernel32.GetLongPathNameW(short_path, None, 0)
if chars:
p = create_unicode_buffer(chars)
if windll.kernel32.GetLongPathNameW(short_path, p, chars):
return p.value
err = GetLastError()
if err:
# pylint: disable=E0602
raise WindowsError(
err,
'GetLongPathName(%s): %s (%d)' % (
str(short_path), FormatError(err), err))
def get_current_encoding():
"""Returns the 'ANSI' code page associated to the process."""
return 'cp%d' % int(windll.kernel32.GetACP())
class DosDriveMap(object):
"""Maps \Device\HarddiskVolumeN to N: on Windows."""
# Keep one global cache.
_MAPPING = {}
def __init__(self):
if not self._MAPPING:
# This is related to UNC resolver on windows. Ignore that.
self._MAPPING['\\Device\\Mup'] = None
for letter in (chr(l) for l in xrange(ord('C'), ord('Z')+1)):
try:
letter = '%s:' % letter
mapped = QueryDosDevice(letter)
# It can happen. Assert until we see it happens in the wild. In
# practice, prefer the lower drive letter.
assert mapped not in self._MAPPING
if mapped not in self._MAPPING:
self._MAPPING[mapped] = letter
except WindowsError: # pylint: disable=E0602
pass
def to_dos(self, path):
"""Converts a native NT path to DOS path."""
m = re.match(r'(^\\Device\\[a-zA-Z0-9]+)(\\.*)?$', path)
assert m, path
if not m.group(1) in self._MAPPING:
# Unmapped partitions may be accessed by windows for the
# fun of it while the test is running. Discard these.
return None
drive = self._MAPPING[m.group(1)]
if not drive or not m.group(2):
return drive
return drive + m.group(2)
def get_native_path_case(root, relative_path):
"""Returns the native path case."""
if sys.platform == 'win32':
# Windows used to have an option to turn on case sensitivity on non Win32
# subsystem but that's out of scope here and isn't supported anymore.
# First process root.
if root:
root = GetLongPathName(GetShortPathName(root)) + os.path.sep
path = os.path.join(root, relative_path) if root else relative_path
# Go figure why GetShortPathName() is needed.
return GetLongPathName(GetShortPathName(path))[len(root):]
elif sys.platform == 'darwin':
# Technically, it's only HFS+ on OSX that is case insensitive. It's
# the default setting on HFS+ but can be changed.
root_ref, _ = Carbon.File.FSPathMakeRef(root)
rel_ref, _ = Carbon.File.FSPathMakeRef(os.path.join(root, relative_path))
return rel_ref.FSRefMakePath()[len(root_ref.FSRefMakePath())+1:]
else:
# Give up on cygwin, as GetLongPathName() can't be called.
return relative_path
def get_flavor():
"""Returns the system default flavor. Copied from gyp/pylib/gyp/common.py."""
flavors = {
'cygwin': 'win',
'win32': 'win',
'darwin': 'mac',
'sunos5': 'solaris',
'freebsd7': 'freebsd',
'freebsd8': 'freebsd',
}
return flavors.get(sys.platform, 'linux')
def isEnabledFor(level):
return logging.getLogger().isEnabledFor(level)
def fix_python_path(cmd):
"""Returns the fixed command line to call the right python executable."""
out = cmd[:]
if out[0] == 'python':
out[0] = sys.executable
elif out[0].endswith('.py'):
out.insert(0, sys.executable)
return out
def posix_relpath(path, root):
"""posix.relpath() that keeps trailing slash."""
out = posixpath.relpath(path, root)
if path.endswith('/'):
out += '/'
return out
class Strace(object):
"""strace implies linux."""
IGNORED = (
'/bin',
'/dev',
'/etc',
'/lib',
'/proc',
'/sys',
'/tmp',
'/usr',
'/var',
)
class _Context(object):
"""Processes a strace log line and keeps the list of existent and non
existent files accessed.
Ignores directories.
"""
# This is the most common format. pid function(args) = result
RE_HEADER = re.compile(r'^(\d+)\s+([^\(]+)\((.+?)\)\s+= (.+)$')
# An interrupted function call, only grab the minimal header.
RE_UNFINISHED = re.compile(r'^(\d+)\s+([^\(]+).*$')
UNFINISHED = ' <unfinished ...>'
# A resumed function call.
RE_RESUMED = re.compile(r'^(\d+)\s+<\.\.\. ([^ ]+) resumed> (.+)$')
# A process received a signal.
RE_SIGNAL = re.compile(r'^\d+\s+--- SIG[A-Z]+ .+ ---')
# A process didn't handle a signal.
RE_KILLED = re.compile(r'^(\d+)\s+\+\+\+ killed by ([A-Z]+) \+\+\+$')
# A call was canceled.
RE_UNAVAILABLE = re.compile(r'\)\s+= \? <unavailable>$')
# Arguments parsing.
RE_CHDIR = re.compile(r'^\"(.+?)\"$')
RE_EXECVE = re.compile(r'^\"(.+?)\", \[.+?\], \[.+?\]$')
RE_OPEN2 = re.compile(r'^\"(.*?)\", ([A-Z\_\|]+)$')
RE_OPEN3 = re.compile(r'^\"(.*?)\", ([A-Z\_\|]+), (\d+)$')
RE_RENAME = re.compile(r'^\"(.+?)\", \"(.+?)\"$')
def __init__(self, blacklist):
self._cwd = {}
self.blacklist = blacklist
self.files = set()
self.non_existent = set()
# Key is a tuple(pid, function name)
self._pending_calls = {}
self._line_number = 0
@classmethod
def traces(cls):
prefix = 'handle_'
return [i[len(prefix):] for i in dir(cls) if i.startswith(prefix)]
def on_line(self, line):
self._line_number += 1
line = line.strip()
if self.RE_SIGNAL.match(line):
# Ignore signals.
return
m = self.RE_KILLED.match(line)
if m:
self.handle_exit_group(int(m.group(1)), m.group(2), None, None)
return
if line.endswith(self.UNFINISHED):
line = line[:-len(self.UNFINISHED)]
m = self.RE_UNFINISHED.match(line)
assert m, '%d: %s' % (self._line_number, line)
self._pending_calls[(m.group(1), m.group(2))] = line
return
m = self.RE_UNAVAILABLE.match(line)
if m:
# This usually means a process was killed and a pending call was
# canceled.
# TODO(maruel): Look up the last exit_group() trace just above and make
# sure any self._pending_calls[(pid, anything)] is properly flushed.
return
m = self.RE_RESUMED.match(line)
if m:
pending = self._pending_calls.pop((m.group(1), m.group(2)))
# Reconstruct the line.
line = pending + m.group(3)
m = self.RE_HEADER.match(line)
assert m, '%d: %s' % (self._line_number, line)
return getattr(self, 'handle_%s' % m.group(2))(
int(m.group(1)),
m.group(2),
m.group(3),
m.group(4))
def handle_chdir(self, pid, _function, args, result):
"""Updates cwd."""
if result.startswith('0'):
cwd = self.RE_CHDIR.match(args).group(1)
if not cwd.startswith('/'):
cwd2 = os.path.join(self._cwd[pid], cwd)
logging.debug('handle_chdir(%d, %s) -> %s' % (pid, cwd, cwd2))
self._cwd[pid] = cwd2
else:
logging.debug('handle_chdir(%d, %s)' % (pid, cwd))
self._cwd[pid] = cwd
else:
assert False, 'Unexecpected fail: %s' % result
def handle_clone(self, pid, _function, _args, result):
"""Transfers cwd."""
if result == '? ERESTARTNOINTR (To be restarted)':
return
self._cwd[int(result)] = self._cwd[pid]
def handle_execve(self, pid, _function, args, result):
self._handle_file(pid, self.RE_EXECVE.match(args).group(1), result)
def handle_exit_group(self, pid, _function, _args, _result):
"""Removes cwd."""
del self._cwd[pid]
@staticmethod
def handle_fork(_pid, _function, args, result):
assert False, (args, result)
def handle_open(self, pid, _function, args, result):
args = (self.RE_OPEN3.match(args) or self.RE_OPEN2.match(args)).groups()
if 'O_DIRECTORY' in args[1]:
return
self._handle_file(pid, args[0], result)
def handle_rename(self, pid, _function, args, result):
args = self.RE_RENAME.match(args).groups()
self._handle_file(pid, args[0], result)
self._handle_file(pid, args[1], result)
@staticmethod
def handle_stat64(_pid, _function, args, result):
assert False, (args, result)
@staticmethod
def handle_vfork(_pid, _function, args, result):
assert False, (args, result)
def _handle_file(self, pid, filepath, result):
if result.startswith('-1'):
return
old_filepath = filepath
if not filepath.startswith('/'):
filepath = os.path.join(self._cwd[pid], filepath)
if self.blacklist(filepath):
return
if old_filepath != filepath:
logging.debug(
'_handle_file(%d, %s) -> %s' % (pid, old_filepath, filepath))
else:
logging.debug('_handle_file(%d, %s)' % (pid, filepath))
if filepath not in self.files and filepath not in self.non_existent:
if os.path.isfile(filepath):
self.files.add(filepath)
else:
self.non_existent.add(filepath)
@classmethod
def gen_trace(cls, cmd, cwd, logname):
"""Runs strace on an executable."""
logging.info('gen_trace(%s, %s, %s)' % (cmd, cwd, logname))
silent = not isEnabledFor(logging.INFO)
stdout = stderr = None
if silent:
stdout = stderr = subprocess.PIPE
traces = ','.join(cls._Context.traces())
trace_cmd = ['strace', '-f', '-e', 'trace=%s' % traces, '-o', logname]
child = subprocess.Popen(
trace_cmd + cmd, cwd=cwd, stdout=stdout, stderr=stderr)
out, err = child.communicate()
# Once it's done, inject a chdir() call to cwd to be able to reconstruct
# the full paths.
# TODO(maruel): cwd should be saved at each process creation, so forks needs
# to be traced properly.
if os.path.isfile(logname):
with open(logname) as f:
content = f.read()
with open(logname, 'w') as f:
pid = content.split(' ', 1)[0]
f.write('%s chdir("%s") = 0\n' % (pid, cwd))
f.write(content)
if child.returncode != 0:
print 'Failure: %d' % child.returncode
# pylint: disable=E1103
if out:
print ''.join(out.splitlines(True)[-100:])
if err:
print ''.join(err.splitlines(True)[-100:])
return child.returncode
@classmethod
def parse_log(cls, filename, blacklist):
"""Processes a strace log and returns the files opened and the files that do
not exist.
It does not track directories.
Most of the time, files that do not exist are temporary test files that
should be put in /tmp instead. See http://crbug.com/116251
"""
logging.info('parse_log(%s, %s)' % (filename, blacklist))
context = cls._Context(blacklist)
for line in open(filename):
context.on_line(line)
# Resolve any symlink we hit.
return (
set(os.path.realpath(f) for f in context.files),
set(os.path.realpath(f) for f in context.non_existent))
class Dtrace(object):
"""Uses DTrace framework through dtrace. Requires root access.
Implies Mac OSX.
dtruss can't be used because it has compatibility issues with python.
Also, the pid->cwd handling needs to be done manually since OSX has no way to
get the absolute path of the 'cwd' dtrace variable from the probe.
Also, OSX doesn't populate curpsinfo->pr_psargs properly, see
https://discussions.apple.com/thread/1980539.
"""
IGNORED = (
'/.vol',
'/Library',
'/System',
'/dev',
'/etc',
'/private/var',
'/tmp',
'/usr',
'/var',
)
# pylint: disable=C0301
# To understand the following code, you'll want to take a look at:
# http://developers.sun.com/solaris/articles/dtrace_quickref/dtrace_quickref.html
# https://wikis.oracle.com/display/DTrace/Variables
# http://docs.oracle.com/cd/E19205-01/820-4221/
#
# The list of valid probes can be retrieved with:
# sudo dtrace -l -P syscall | less
D_CODE = """
proc:::start /trackedpid[ppid]/ {
trackedpid[pid] = 1;
current_processes += 1;
printf("%d %d:%d %s_%s(\\"%s\\", %d) = 0\\n",
logindex, ppid, pid, probeprov, probename, execname,
current_processes);
logindex++;
}
proc:::exit /trackedpid[pid] && current_processes == 1/ {
trackedpid[pid] = 0;
current_processes -= 1;
printf("%d %d:%d %s_%s(\\"%s\\", %d) = 0\\n",
logindex, ppid, pid, probeprov, probename, execname,
current_processes);
logindex++;
exit(0);
}
proc:::exit /trackedpid[pid]/ {
trackedpid[pid] = 0;
current_processes -= 1;
printf("%d %d:%d %s_%s(\\"%s\\", %d) = 0\\n",
logindex, ppid, pid, probeprov, probename, execname,
current_processes);
logindex++;
}
/* Finally what we care about! */
syscall::open*:entry /trackedpid[pid]/ {
self->arg0 = arg0;
self->arg1 = arg1;
self->arg2 = arg2;
}
syscall::open*:return /trackedpid[pid] && errno == 0/ {
printf("%d %d:%d %s(\\"%s\\", %d, %d) = %d\\n",
logindex, ppid, pid, probefunc, copyinstr(self->arg0),
self->arg1, self->arg2, errno);
logindex++;
self->arg0 = 0;
self->arg1 = 0;
self->arg2 = 0;
}
syscall::rename:entry /trackedpid[pid]/ {
self->arg0 = arg0;
self->arg1 = arg1;
}
syscall::rename:return /trackedpid[pid]/ {
printf("%d %d:%d %s(\\"%s\\", \\"%s\\") = %d\\n",
logindex, ppid, pid, probefunc, copyinstr(self->arg0),
copyinstr(self->arg1), errno);
logindex++;
self->arg0 = 0;
self->arg1 = 0;
}
/* Track chdir, it's painful because it is only receiving relative path */
syscall::chdir:entry /trackedpid[pid]/ {
self->arg0 = arg0;
}
syscall::chdir:return /trackedpid[pid] && errno == 0/ {
printf("%d %d:%d %s(\\"%s\\") = %d\\n",
logindex, ppid, pid, probefunc, copyinstr(self->arg0), errno);
logindex++;
self->arg0 = 0;
}
/* TODO(maruel): *stat* functions and friends
syscall::access:return,
syscall::chdir:return,
syscall::chflags:return,
syscall::chown:return,
syscall::chroot:return,
syscall::getattrlist:return,
syscall::getxattr:return,
syscall::lchown:return,
syscall::lstat64:return,
syscall::lstat:return,
syscall::mkdir:return,
syscall::pathconf:return,
syscall::readlink:return,
syscall::removexattr:return,
syscall::setxattr:return,
syscall::stat64:return,
syscall::stat:return,
syscall::truncate:return,
syscall::unlink:return,
syscall::utimes:return,
*/
"""
@classmethod
def code(cls, pid, cwd):
"""Setups the D code to implement child process tracking.
Injects a fake chdir() trace to simplify parsing. The reason is that the
child process is already running at that point so:
- no proc_start() is logged for it.
- there is no way to figure out the absolute path of cwd in kernel on OSX
Since the child process is already started, initialize current_processes to
1.
"""
pid = str(pid)
cwd = os.path.realpath(cwd).replace('\\', '\\\\').replace('%', '%%')
return (
'dtrace:::BEGIN {\n'
' current_processes = 1;\n'
' logindex = 0;\n'
' trackedpid[') + pid + ('] = 1;\n'
' printf("%d %d:%d chdir(\\"' + cwd + '\\") = 0\\n",\n'
' logindex, 1, ' + pid + ');\n'
' logindex++;\n'
' printf("%d %d:%d %s_%s() = 0\\n",\n'
' logindex, ppid, pid, probeprov, probename);\n'
' logindex++;\n'
'}\n') + cls.D_CODE
class _Context(object):
"""Processes a dtrace log line and keeps the list of existent and non
existent files accessed.
Ignores directories.
"""
# This is the most common format. index pid function(args) = result
RE_HEADER = re.compile(r'^\d+ (\d+):(\d+) ([a-zA-Z_\-]+)\((.*?)\) = (.+)$')
# Arguments parsing.
RE_CHDIR = re.compile(r'^\"(.+?)\"$')
RE_OPEN = re.compile(r'^\"(.+?)\", (\d+), (\d+)$')
RE_RENAME = re.compile(r'^\"(.+?)\", \"(.+?)\"$')
O_DIRECTORY = 0x100000
def __init__(self, blacklist):
# TODO(maruel): Handling chdir() and cwd in general on OSX is tricky
# because OSX only keeps relative directory names. In addition, cwd is a
# process local variable so forks need to be properly traced and cwd
# saved.
self._cwd = {}
self.blacklist = blacklist
self.files = set()
self.non_existent = set()
def on_line(self, line):
m = self.RE_HEADER.match(line)
assert m, line
fn = getattr(
self,
'handle_%s' % m.group(3).replace('-', '_'),
self._handle_ignored)
return fn(
int(m.group(1)),
int(m.group(2)),
m.group(3),
m.group(4),
m.group(5))
def handle_dtrace_BEGIN(self, _ppid, _pid, _function, args, _result):
pass
def handle_proc_start(self, ppid, pid, _function, _args, result):
"""Transfers cwd."""
assert result == '0'
self._cwd[pid] = self._cwd[ppid]
def handle_proc_exit(self, _ppid, pid, _function, _args, _result):
"""Removes cwd."""
del self._cwd[pid]
def handle_chdir(self, _ppid, pid, _function, args, result):
"""Updates cwd."""
if result.startswith('0'):
cwd = self.RE_CHDIR.match(args).group(1)
if not cwd.startswith('/'):
cwd2 = os.path.join(self._cwd[pid], cwd)
logging.debug('handle_chdir(%d, %s) -> %s' % (pid, cwd, cwd2))
self._cwd[pid] = cwd2
else:
logging.debug('handle_chdir(%d, %s)' % (pid, cwd))
self._cwd[pid] = cwd
else:
assert False, 'Unexecpected fail: %s' % result
def handle_open_nocancel(self, ppid, pid, function, args, result):
return self.handle_open(ppid, pid, function, args, result)
def handle_open(self, _ppid, pid, _function, args, result):
args = self.RE_OPEN.match(args).groups()
flag = int(args[1])
if self.O_DIRECTORY & flag == self.O_DIRECTORY:
# Ignore directories.
return
self._handle_file(pid, args[0], result)
def handle_rename(self, _ppid, pid, _function, args, result):
args = self.RE_RENAME.match(args).groups()
self._handle_file(pid, args[0], result)
self._handle_file(pid, args[1], result)
def _handle_file(self, pid, filepath, result):
if result.startswith(('-1', '2')):
return
orig_filepath = filepath
if not filepath.startswith('/'):
filepath = os.path.join(self._cwd[pid], filepath)
filepath = os.path.normpath(filepath)
if self.blacklist(filepath):
return
# Sadly, still need to filter out directories here;
# saw open_nocancel(".", 0, 0) = 0 lines.
if (filepath not in self.files and
filepath not in self.non_existent and
not os.path.isdir(filepath)):
if orig_filepath:
logging.debug(
'_handle_file(%d, %s) -> %s' % (pid, orig_filepath, filepath))
else:
logging.debug('_handle_file(%d, %s)' % (pid, filepath))
if os.path.isfile(filepath):
self.files.add(filepath)
else:
self.non_existent.add(filepath)
@staticmethod
def _handle_ignored(_ppid, pid, function, args, result):
logging.debug('%d %s(%s) = %s' % (pid, function, args, result))
@classmethod
def gen_trace(cls, cmd, cwd, logname):
"""Runs dtrace on an executable."""
logging.info('gen_trace(%s, %s, %s)' % (cmd, cwd, logname))
silent = not isEnabledFor(logging.INFO)
logging.info('Running: %s' % cmd)
signal = 'Go!'
logging.debug('Our pid: %d' % os.getpid())
# Part 1: start the child process.
stdout = stderr = None
if silent:
stdout = stderr = subprocess.PIPE
child_cmd = [
sys.executable, os.path.join(BASE_DIR, 'trace_child_process.py'),
]
child = subprocess.Popen(
child_cmd + cmd,
stdin=subprocess.PIPE,
stdout=stdout,
stderr=stderr,
cwd=cwd)
logging.debug('Started child pid: %d' % child.pid)
# Part 2: start dtrace process.
# Note: do not use the -p flag. It's useless if the initial process quits
# too fast, resulting in missing traces from the grand-children. The D code
# manages the dtrace lifetime itself.
trace_cmd = [
'sudo',
'dtrace',
'-x', 'dynvarsize=4m',
'-x', 'evaltime=exec',
'-n', cls.code(child.pid, cwd),
'-o', '/dev/stderr',
'-q',
]
with open(logname, 'w') as logfile:
dtrace = subprocess.Popen(
trace_cmd, stdout=logfile, stderr=subprocess.STDOUT)
logging.debug('Started dtrace pid: %d' % dtrace.pid)
# Part 3: Read until one line is printed, which signifies dtrace is up and
# ready.
with open(logname, 'r') as logfile:
while 'dtrace_BEGIN' not in logfile.readline():
if dtrace.poll() is not None:
break
try:
# Part 4: We can now tell our child to go.
# TODO(maruel): Another pipe than stdin could be used instead. This would
# be more consistent with the other tracing methods.
out, err = child.communicate(signal)
dtrace.wait()
if dtrace.returncode != 0:
print 'dtrace failure: %d' % dtrace.returncode
with open(logname) as logfile:
print ''.join(logfile.readlines()[-100:])
# Find a better way.
os.remove(logname)
else:
# Short the log right away to simplify our life. There isn't much
# advantage in keeping it out of order.
cls._sort_log(logname)
if child.returncode != 0:
print 'Failure: %d' % child.returncode
# pylint: disable=E1103
if out:
print ''.join(out.splitlines(True)[-100:])
if err:
print ''.join(err.splitlines(True)[-100:])
except KeyboardInterrupt:
# Still sort when testing.
cls._sort_log(logname)
raise
return dtrace.returncode or child.returncode
@classmethod
def parse_log(cls, filename, blacklist):
"""Processes a dtrace log and returns the files opened and the files that do
not exist.
It does not track directories.
Most of the time, files that do not exist are temporary test files that
should be put in /tmp instead. See http://crbug.com/116251
"""
logging.info('parse_log(%s, %s)' % (filename, blacklist))
context = cls._Context(blacklist)
for line in open(filename, 'rb'):
context.on_line(line)
# Resolve any symlink we hit.
return (
set(os.path.realpath(f) for f in context.files),
set(os.path.realpath(f) for f in context.non_existent))
@staticmethod
def _sort_log(logname):
"""Sorts the log back in order when each call occured.
dtrace doesn't save the buffer in strict order since it keeps one buffer per
CPU.
"""
with open(logname, 'rb') as logfile:
lines = [f for f in logfile.readlines() if f.strip()]
lines = sorted(lines, key=lambda l: int(l.split(' ', 1)[0]))
with open(logname, 'wb') as logfile:
logfile.write(''.join(lines))
class LogmanTrace(object):
"""Uses the native Windows ETW based tracing functionality to trace a child
process.
"""
class _Context(object):
"""Processes a ETW log line and keeps the list of existent and non
existent files accessed.
Ignores directories.
"""
EVENT_NAME = 0
TYPE = 1
PID = 9
CHILD_PID = 20
PARENT_PID = 21
FILE_PATH = 25
PROC_NAME = 26
CMD_LINE = 27
def __init__(self, blacklist):
self.blacklist = blacklist
self.files = set()
self.non_existent = set()
self._processes = set()
self._drive_map = DosDriveMap()
self._first_line = False
def on_csv_line(self, line):
"""Processes a CSV Event line."""
# So much white space!
line = [i.strip() for i in line]
if not self._first_line:
assert line == [
u'Event Name',
u'Type',
u'Event ID',
u'Version',
u'Channel',
u'Level', # 5
u'Opcode',
u'Task',
u'Keyword',
u'PID',
u'TID', # 10
u'Processor Number',
u'Instance ID',
u'Parent Instance ID',
u'Activity ID',
u'Related Activity ID', # 15
u'Clock-Time',
u'Kernel(ms)',
u'User(ms)',
u'User Data',
]
self._first_line = True
return
# As you can see, the CSV is full of useful non-redundant information:
# Event ID
assert line[2] == '0'
# Version
assert line[3] in ('2', '3'), line[3]
# Channel
assert line[4] == '0'
# Level
assert line[5] == '0'
# Task
assert line[7] == '0'
# Keyword
assert line[8] == '0x0000000000000000'
# Instance ID
assert line[12] == ''
# Parent Instance ID
assert line[13] == ''
# Activity ID
assert line[14] == '{00000000-0000-0000-0000-000000000000}'
# Related Activity ID
assert line[15] == ''
if line[0].startswith('{'):
# Skip GUIDs.
return
# Convert the PID in-place from hex.
line[self.PID] = int(line[self.PID], 16)
# By Opcode
handler = getattr(
self,
'handle_%s_%s' % (line[self.EVENT_NAME], line[self.TYPE]),
None)
if not handler:
# Try to get an universal fallback
handler = getattr(self, 'handle_%s_Any' % line[self.EVENT_NAME], None)
if handler:
handler(line)
else:
assert False, '%s_%s' % (line[self.EVENT_NAME], line[self.TYPE])
def handle_EventTrace_Any(self, line):
pass
def handle_FileIo_Create(self, line):
m = re.match(r'^\"(.+)\"$', line[self.FILE_PATH])
self._handle_file(self._drive_map.to_dos(m.group(1)))
def handle_FileIo_Rename(self, line):
# TODO(maruel): Handle?
pass
def handle_FileIo_Any(self, line):
pass
def handle_Image_DCStart(self, line):
# TODO(maruel): Handle?
pass
def handle_Image_Load(self, line):
# TODO(maruel): Handle?
pass
def handle_Image_Any(self, line):
# TODO(maruel): Handle?
pass
def handle_Process_Any(self, line):
pass
def handle_Process_DCStart(self, line):
"""Gives historic information about the process tree.
Use it to extract the pid of the trace_inputs.py parent process that
started logman.exe.
"""
ppid = int(line[self.PARENT_PID], 16)
if line[self.PROC_NAME] == '"logman.exe"':
# logman's parent is us.
self._processes.add(ppid)
logging.info('Found logman\'s parent at %d' % ppid)
def handle_Process_End(self, line):
# Look if it is logman terminating, if so, grab the parent's process pid
# and inject cwd.
if line[self.PID] in self._processes:
logging.info('Terminated: %d' % line[self.PID])
self._processes.remove(line[self.PID])
def handle_Process_Start(self, line):
"""Handles a new child process started by PID."""
ppid = line[self.PID]
pid = int(line[self.CHILD_PID], 16)
if ppid in self._processes:
if line[self.PROC_NAME] == '"logman.exe"':
# Skip the shutdown call.
return
self._processes.add(pid)
logging.info(
'New child: %d -> %d %s' % (ppid, pid, line[self.PROC_NAME]))
def handle_SystemConfig_Any(self, line):
pass
def _handle_file(self, filename):
"""Handles a file that was touched.
Interestingly enough, the file is always with an absolute path.
"""
if (not filename or
self.blacklist(filename) or
os.path.isdir(filename) or
filename in self.files or
filename in self.non_existent):
return
logging.debug('_handle_file(%s)' % filename)
if os.path.isfile(filename):
self.files.add(filename)
else:
self.non_existent.add(filename)
def __init__(self):
# Most ignores need to be determined at runtime.
self.IGNORED = set([os.path.dirname(sys.executable)])
# Add many directories from environment variables.
vars_to_ignore = (
'APPDATA',
'LOCALAPPDATA',
'ProgramData',
'ProgramFiles',
'ProgramFiles(x86)',
'ProgramW6432',
'SystemRoot',
'TEMP',
'TMP',
)
for i in vars_to_ignore:
if os.environ.get(i):
self.IGNORED.add(os.environ[i])
# Also add their short path name equivalents.
for i in list(self.IGNORED):
self.IGNORED.add(GetShortPathName(i))
# Add this one last since it has no short path name equivalent.
self.IGNORED.add('\\systemroot')
self.IGNORED = tuple(sorted(self.IGNORED))
@classmethod
def gen_trace(cls, cmd, cwd, logname):
logging.info('gen_trace(%s, %s, %s)' % (cmd, cwd, logname))
# Use "logman -?" for help.
etl = logname + '.etl'
silent = not isEnabledFor(logging.INFO)
stdout = stderr = None
if silent:
stdout = stderr = subprocess.PIPE
# 1. Start the log collection. Requires administrative access. logman.exe is
# synchronous so no need for a "warmup" call.
# 'Windows Kernel Trace' is *localized* so use its GUID instead.
# The GUID constant name is SystemTraceControlGuid. Lovely.
cmd_start = [
'logman.exe',
'start',
'NT Kernel Logger',
'-p', '{9e814aad-3204-11d2-9a82-006008a86939}',
'(process,img,file,fileio)',
'-o', etl,
'-ets', # Send directly to kernel
]
logging.debug('Running: %s' % cmd_start)
subprocess.check_call(cmd_start, stdout=stdout, stderr=stderr)
# 2. Run the child process.
logging.debug('Running: %s' % cmd)
try:
child = subprocess.Popen(cmd, cwd=cwd, stdout=stdout, stderr=stderr)
out, err = child.communicate()
finally:
# 3. Stop the log collection.
cmd_stop = [
'logman.exe',
'stop',
'NT Kernel Logger',
'-ets', # Send directly to kernel
]
logging.debug('Running: %s' % cmd_stop)
subprocess.check_call(cmd_stop, stdout=stdout, stderr=stderr)
# 4. Convert the traces to text representation.
# Use "tracerpt -?" for help.
LOCALE_INVARIANT = 0x7F
windll.kernel32.SetThreadLocale(LOCALE_INVARIANT)
cmd_convert = [
'tracerpt.exe',
'-l', etl,
'-o', logname,
'-gmt', # Use UTC
'-y', # No prompt
]
# Normally, 'csv' is sufficient. If complex scripts are used (like eastern
# languages), use 'csv_unicode'. If localization gets in the way, use 'xml'.
logformat = 'csv'
if logformat == 'csv':
# tracerpt localizes the 'Type' column, for major brainfuck
# entertainment. I can't imagine any sane reason to do that.
cmd_convert.extend(['-of', 'CSV'])
elif logformat == 'csv_utf16':
# This causes it to use UTF-16, which doubles the log size but ensures the
# log is readable for non-ASCII characters.
cmd_convert.extend(['-of', 'CSV', '-en', 'Unicode'])
elif logformat == 'xml':
cmd_convert.extend(['-of', 'XML'])
else:
assert False, logformat
logging.debug('Running: %s' % cmd_convert)
subprocess.check_call(cmd_convert, stdout=stdout, stderr=stderr)
if child.returncode != 0:
print 'Failure: %d' % child.returncode
# pylint: disable=E1103
if out:
print ''.join(out.splitlines(True)[-100:])
if err:
print ''.join(err.splitlines(True)[-100:])
return child.returncode
@classmethod
def parse_log(cls, filename, blacklist):
logging.info('parse_log(%s, %s)' % (filename, blacklist))
# Auto-detect the log format
with open(filename, 'rb') as f:
hdr = f.read(2)
assert len(hdr) == 2
if hdr == '<E':
# It starts with <Events>
logformat = 'xml'
elif hdr == '\xFF\xEF':
# utf-16 BOM.
logformat = 'csv_utf16'
else:
logformat = 'csv'
context = cls._Context(blacklist)
if logformat == 'csv_utf16':
def utf_8_encoder(unicode_csv_data):
"""Encodes the unicode object as utf-8 encoded str instance"""
for line in unicode_csv_data:
yield line.encode('utf-8')
def unicode_csv_reader(unicode_csv_data, **kwargs):
"""Encodes temporarily as UTF-8 since csv module doesn't do unicode."""
csv_reader = csv.reader(utf_8_encoder(unicode_csv_data), **kwargs)
for row in csv_reader:
# Decode str utf-8 instances back to unicode instances, cell by cell:
yield [cell.decode('utf-8') for cell in row]
# The CSV file is UTF-16 so use codecs.open() to load the file into the
# python internal unicode format (utf-8). Then explicitly re-encode as
# utf8 as str instances so csv can parse it fine. Then decode the utf-8
# str back into python unicode instances. This sounds about right.
for line in unicode_csv_reader(codecs.open(filename, 'r', 'utf-16')):
# line is a list of unicode objects
context.on_csv_line(line)
elif logformat == 'csv':
def ansi_csv_reader(ansi_csv_data, **kwargs):
"""Loads an 'ANSI' code page and returns unicode() objects."""
assert sys.getfilesystemencoding() == 'mbcs'
encoding = get_current_encoding()
for row in csv.reader(ansi_csv_data, **kwargs):
# Decode str 'ansi' instances to unicode instances, cell by cell:
yield [cell.decode(encoding) for cell in row]
# The fastest and smallest format but only supports 'ANSI' file paths.
# E.g. the filenames are encoding in the 'current' encoding.
for line in ansi_csv_reader(open(filename)):
# line is a list of unicode objects
context.on_csv_line(line)
else:
raise NotImplementedError('Implement %s' % logformat)
return (
set(os.path.realpath(f) for f in context.files),
set(os.path.realpath(f) for f in context.non_existent))
def relevant_files(files, root):
"""Trims the list of files to keep the expected files and unexpected files.
Unexpected files are files that are not based inside the |root| directory.
"""
expected = []
unexpected = []
for f in files:
if f.startswith(root):
f = f[len(root):]
assert f
expected.append(f)
else:
unexpected.append(f)
return sorted(set(expected)), sorted(set(unexpected))
def extract_directories(files, root):
"""Detects if all the files in a directory were loaded and if so, replace the
individual files by the directory entry.
"""
directories = set(os.path.dirname(f) for f in files)
files = set(files)
for directory in sorted(directories, reverse=True):
actual = set(
os.path.join(directory, f) for f in
os.listdir(os.path.join(root, directory))
if not f.endswith(('.svn', '.pyc'))
)
if not (actual - files):
files -= actual
files.add(directory + os.path.sep)
return sorted(files)
def pretty_print(variables, stdout):
"""Outputs a gyp compatible list from the decoded variables.
Similar to pprint.print() but with NIH syndrome.
"""
# Order the dictionary keys by these keys in priority.
ORDER = (
'variables', 'condition', 'command', 'relative_cwd', 'read_only',
KEY_TRACKED, KEY_UNTRACKED)
def sorting_key(x):
"""Gives priority to 'most important' keys before the others."""
if x in ORDER:
return str(ORDER.index(x))
return x
def loop_list(indent, items):
for item in items:
if isinstance(item, basestring):
stdout.write('%s\'%s\',\n' % (indent, item))
elif isinstance(item, dict):
stdout.write('%s{\n' % indent)
loop_dict(indent + ' ', item)
stdout.write('%s},\n' % indent)
elif isinstance(item, list):
# A list inside a list will write the first item embedded.
stdout.write('%s[' % indent)
for index, i in enumerate(item):
if isinstance(i, basestring):
stdout.write(
'\'%s\', ' % i.replace('\\', '\\\\').replace('\'', '\\\''))
elif isinstance(i, dict):
stdout.write('{\n')
loop_dict(indent + ' ', i)
if index != len(item) - 1:
x = ', '
else:
x = ''
stdout.write('%s}%s' % (indent, x))
else:
assert False
stdout.write('],\n')
else:
assert False
def loop_dict(indent, items):
for key in sorted(items, key=sorting_key):
item = items[key]
stdout.write("%s'%s': " % (indent, key))
if isinstance(item, dict):
stdout.write('{\n')
loop_dict(indent + ' ', item)
stdout.write(indent + '},\n')
elif isinstance(item, list):
stdout.write('[\n')
loop_list(indent + ' ', item)
stdout.write(indent + '],\n')
elif isinstance(item, basestring):
stdout.write(
'\'%s\',\n' % item.replace('\\', '\\\\').replace('\'', '\\\''))
elif item in (True, False, None):
stdout.write('%s\n' % item)
else:
assert False, item
stdout.write('{\n')
loop_dict(' ', variables)
stdout.write('}\n')
def trace_inputs(logfile, cmd, root_dir, cwd_dir, product_dir, force_trace):
"""Tries to load the logs if available. If not, trace the test.
Symlinks are not processed at all.
Arguments:
- logfile: Absolute path to the OS-specific trace.
- cmd: Command list to run.
- root_dir: Base directory where the files we care about live.
- cwd_dir: Cwd to use to start the process, relative to the root_dir
directory.
- product_dir: Directory containing the executables built by the build
process, relative to the root_dir directory. It is used to
properly replace paths with <(PRODUCT_DIR) for gyp output.
- force_trace: Will force to trace unconditionally even if a trace already
exist.
"""
logging.debug(
'trace_inputs(%s, %s, %s, %s, %s, %s)' % (
logfile, cmd, root_dir, cwd_dir, product_dir, force_trace))
# It is important to have unambiguous path.
assert os.path.isabs(root_dir), root_dir
assert os.path.isabs(logfile), logfile
assert not cwd_dir or not os.path.isabs(cwd_dir), cwd_dir
assert not product_dir or not os.path.isabs(product_dir), product_dir
cmd = fix_python_path(cmd)
assert (
(os.path.isfile(logfile) and not force_trace) or os.path.isabs(cmd[0])
), cmd[0]
# Resolve any symlink
root_dir = os.path.realpath(root_dir)
def print_if(txt):
if cwd_dir is None:
print(txt)
flavor = get_flavor()
if flavor == 'linux':
api = Strace()
elif flavor == 'mac':
api = Dtrace()
elif sys.platform == 'win32':
api = LogmanTrace()
else:
print >> sys.stderr, 'Unsupported platform %s' % sys.platform
return 1
if not os.path.isfile(logfile) or force_trace:
if os.path.isfile(logfile):
os.remove(logfile)
print_if('Tracing... %s' % cmd)
cwd = root_dir
# Use the proper relative directory.
if cwd_dir:
cwd = os.path.join(cwd, cwd_dir)
returncode = api.gen_trace(cmd, cwd, logfile)
if returncode and not force_trace:
return returncode
git_path = os.path.sep + '.git' + os.path.sep
svn_path = os.path.sep + '.svn' + os.path.sep
def blacklist(f):
"""Strips ignored paths."""
return (
f.startswith(api.IGNORED) or
f.endswith('.pyc') or
git_path in f or
svn_path in f)
print_if('Loading traces... %s' % logfile)
files, non_existent = api.parse_log(logfile, blacklist)
print_if('Total: %d' % len(files))
print_if('Non existent: %d' % len(non_existent))
for f in non_existent:
print_if(' %s' % f)
expected, unexpected = relevant_files(
files, root_dir.rstrip(os.path.sep) + os.path.sep)
if unexpected:
print_if('Unexpected: %d' % len(unexpected))
for f in unexpected:
print_if(' %s' % f)
# In case the file system is case insensitive.
expected = sorted(set(get_native_path_case(root_dir, f) for f in expected))
simplified = extract_directories(expected, root_dir)
print_if('Interesting: %d reduced to %d' % (len(expected), len(simplified)))
for f in simplified:
print_if(' %s' % f)
if cwd_dir is not None:
def cleanuppath(x):
"""Cleans up a relative path. Converts any os.path.sep to '/' on Windows.
"""
if x:
x = x.rstrip(os.path.sep).replace(os.path.sep, '/')
if x == '.':
x = ''
if x:
x += '/'
return x
# Both are relative directories to root_dir.
cwd_dir = cleanuppath(cwd_dir)
product_dir = cleanuppath(product_dir)
def fix(f):
"""Bases the file on the most restrictive variable."""
logging.debug('fix(%s)' % f)
# Important, GYP stores the files with / and not \.
f = f.replace(os.path.sep, '/')
if product_dir and f.startswith(product_dir):
return '<(PRODUCT_DIR)/%s' % f[len(product_dir):]
else:
# cwd_dir is usually the directory containing the gyp file. It may be
# empty if the whole directory containing the gyp file is needed.
return posix_relpath(f, cwd_dir) or './'
corrected = [fix(f) for f in simplified]
tracked = [f for f in corrected if not f.endswith('/') and ' ' not in f]
untracked = [f for f in corrected if f.endswith('/') or ' ' in f]
variables = {}
if tracked:
variables[KEY_TRACKED] = tracked
if untracked:
variables[KEY_UNTRACKED] = untracked
value = {
'conditions': [
['OS=="%s"' % flavor, {
'variables': variables,
}],
],
}
pretty_print(value, sys.stdout)
return 0
def main():
parser = optparse.OptionParser(
usage='%prog <options> [cmd line...]')
parser.allow_interspersed_args = False
parser.add_option(
'-v', '--verbose', action='count', default=0, help='Use multiple times')
parser.add_option('-l', '--log', help='Log file')
parser.add_option(
'-c', '--cwd',
help='Signal to start the process from this relative directory. When '
'specified, outputs the inputs files in a way compatible for '
'gyp processing. Should be set to the relative path containing the '
'gyp file, e.g. \'chrome\' or \'net\'')
parser.add_option(
'-p', '--product-dir', default='out/Release',
help='Directory for PRODUCT_DIR. Default: %default')
parser.add_option(
'--root-dir', default=ROOT_DIR,
help='Root directory to base everything off. Default: %default')
parser.add_option(
'-f', '--force',
action='store_true',
default=False,
help='Force to retrace the file')
options, args = parser.parse_args()
level = [logging.ERROR, logging.INFO, logging.DEBUG][min(2, options.verbose)]
logging.basicConfig(
level=level,
format='%(levelname)5s %(module)15s(%(lineno)3d):%(message)s')
if not options.log:
parser.error('Must supply a log file with -l')
if not args:
if not os.path.isfile(options.log) or options.force:
parser.error('Must supply a command to run')
else:
args[0] = os.path.abspath(args[0])
if options.root_dir:
options.root_dir = os.path.abspath(options.root_dir)
return trace_inputs(
os.path.abspath(options.log),
args,
options.root_dir,
options.cwd,
options.product_dir,
options.force)
if __name__ == '__main__':
sys.exit(main())
|
{
"content_hash": "cb2ecf85ee2415dc42951e8e4621ed64",
"timestamp": "",
"source": "github",
"line_count": 1451,
"max_line_length": 83,
"avg_line_length": 32.48793935217092,
"alnum_prop": 0.5901569792108613,
"repo_name": "gx1997/chrome-loongson",
"id": "f0e9b17c145439ac530a21b8c278a5ba95c9b0bd",
"size": "47140",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "tools/isolate/trace_inputs.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "AppleScript",
"bytes": "6973"
},
{
"name": "Assembly",
"bytes": "1178292"
},
{
"name": "C",
"bytes": "74543799"
},
{
"name": "C++",
"bytes": "120777816"
},
{
"name": "F#",
"bytes": "381"
},
{
"name": "Go",
"bytes": "18556"
},
{
"name": "Java",
"bytes": "62764"
},
{
"name": "JavaScript",
"bytes": "10089209"
},
{
"name": "Objective-C",
"bytes": "6954601"
},
{
"name": "PHP",
"bytes": "97796"
},
{
"name": "Perl",
"bytes": "1047454"
},
{
"name": "Python",
"bytes": "6417217"
},
{
"name": "R",
"bytes": "524"
},
{
"name": "Ruby",
"bytes": "837"
},
{
"name": "Shell",
"bytes": "5356327"
},
{
"name": "Tcl",
"bytes": "277077"
}
],
"symlink_target": ""
}
|
from django.shortcuts import render, redirect
from django.urls import reverse
from django.contrib import messages
from django.forms.formsets import formset_factory
from django.http import JsonResponse
from django.views.generic import (
ListView,
CreateView,
UpdateView
)
from django.db.models import Value
from django.db.models.fields import IntegerField
from django.contrib.auth.decorators import login_required
from django_tables2 import RequestConfig, Column
from .models import Survey, Question, Choice, SurveyNotification
from .forms import (SurveyCreationForm, AnswerChoice, QuestionForm, BaseOptionFormSet, StageAssignmentForm, BaseChoicesFormSet, SurveyMetaForm )
from .tables import SurveysTable, SurveySelectableTable
from .backend import (
store_qa,
fetchSurvey,
assign_survey_to_stage,
assign_patient_to_stage,
fetch_survey,
fetchSurveyQuestions,
store_answer,
softDeleteSurvey,
store_survey_metadata
)
from subjects.tables import PatientSelectableTable
from subjects.models import Subject
from researchers.forms import TreatmentSelectionForm
from surveys import *
PAGE_SIZE = 10
def survey_completed(request):
return render(request, 'survey_completed.html')
def serve_survey(request, key):
notification = None
qa = None
if request.method == 'GET':
notification = fetch_survey(key)
if notification is not None:
AnswerFormSet = formset_factory(AnswerChoice, extra=0, formset=BaseChoicesFormSet)
qa = AnswerFormSet(form_kwargs={'mode': 'QUESTION'}, initial=fetchSurveyQuestions(None, notification.survey.id))
else:
notification = fetch_survey(key)
if notification is not None:
AnswerFormSet = formset_factory(AnswerChoice, formset=BaseChoicesFormSet)
qa = AnswerFormSet(request.POST, form_kwargs={'mode': 'QUESTION'}, initial=fetchSurveyQuestions(None, notification.survey.id))
if qa.is_valid():
outcome = store_answer(notification, qa)
if outcome['result']:
return redirect('survey-completed')
return render(request, 'survey_form.html', {'notification': notification, 'answers_form':qa})
@login_required
def set_patient_to_stage(request, treatment_id):
form = TreatmentSelectionForm(treatmentId=treatment_id)
if request.method == 'GET':
stage_form = StageAssignmentForm(item_name='paciente', treatment_id=treatment_id)
table = PatientSelectableTable(Subject.objects.filter(treatment_subjects__treatment_id=treatment_id))
RequestConfig(request, paginate={"per_page": PAGE_SIZE}).configure(table)
else:
stage_form = StageAssignmentForm(request.POST, item_name='paciente', treatment_id=treatment_id)
table = PatientSelectableTable(Subject.objects.filter(treatment_subjects__treatment_id=treatment_id))
RequestConfig(request, paginate={"per_page": PAGE_SIZE}).configure(table)
if stage_form.is_valid():
output = assign_patient_to_stage(stage_form, treatment_id)
if output['result']:
messages.success(request, output['status'])
form = StageAssignmentForm(item_name='paciente', treatment_id=treatment_id)
return render(request, 'patient_to_stage.html', {'form': form, 'stage_form': stage_form,'patients_table': table,
'treatment_id':treatment_id})
#return render(request, 'study_template.html', {'form': form, 'patients_table': table, 'treatment_id':treatment_id})
@login_required
def set_survey_to_stage(request, treatment_id):
if request.method == 'GET':
form = StageAssignmentForm(item_name='cuestionario')
table = SurveySelectableTable(Survey.objects.all())
RequestConfig(request, paginate={"per_page": PAGE_SIZE}).configure(table)
else:
form = StageAssignmentForm(data=request.POST, item_name='cuestionario')
table = SurveySelectableTable(Survey.objects.all())
RequestConfig(request, paginate={"per_page": PAGE_SIZE}).configure(table)
if form.is_valid():
output = assign_survey_to_stage(form, treatment_id)
if output['result']:
messages.success(request, output['status'])
form = StageAssignmentForm()
return render(request, 'survey_to_stage.html', {'form': form, 'surveys_table': table})
@login_required
def list_treatment_surveys(request, treatment_id):
if request.method == 'GET':
form = TreatmentSelectionForm(treatmentId=treatment_id)
table = SurveysTable(Survey.objects.filter(disabled=False).annotate(treatment_id=Value(treatment_id, output_field=IntegerField())))
RequestConfig(request, paginate={"per_page": 10}).configure(table)
return render(request, 'survey_list.html', {'form': form, 'surveys_table': table, 'treatment_id':treatment_id})
@login_required()
def delete_survey(request, survey_id, treatment_id):
if request.method == 'GET':
softDeleteSurvey(survey_id, treatment_id)
table = SurveysTable(Survey.objects.filter(disabled=False).annotate(treatment_id=Value(treatment_id, output_field=IntegerField())))
RequestConfig(request, paginate={"per_page": 10}).configure(table)
return render(request, 'survey_list.html', {'surveys_table': table})
@login_required
def edit_survey(request, treatment_id, survey_id, active_tab=None):
form = TreatmentSelectionForm(treatmentId=treatment_id)
if request.method == 'GET':
survey_data = fetchSurvey(treatment_id, survey_id)
survey_meta_form = SurveyMetaForm(survey_data.getSurveyMetadata())
questionForm = None
OptionFormSet = formset_factory(AnswerChoice, min_num=1, extra=0, max_num=10, validate_min=True,
can_delete=True, formset=BaseOptionFormSet)
var_type = None
optionsformSet = OptionFormSet()
questionForm = QuestionForm(surveyId=survey_id, answerType=var_type, treatment_id=treatment_id)
else:
survey_meta_form = SurveyMetaForm(request.POST)
survey_data = fetchSurvey(treatment_id, survey_id)
if survey_meta_form.is_valid():
output = store_survey_metadata(survey_meta_form, survey_id)
if output['result']:
messages.success(request, output['status'])
return render(request, 'survey_edit.html', {'form': form, 'treatment_id': treatment_id, 'survey_id': survey_id,
'active_tab': active_tab, 'survey_data': survey_data,
'survey_meta_form': survey_meta_form, 'options_formset': optionsformSet,
'question_form': questionForm})
@login_required
def define_qa(request, treatment_id=None, survey_id=None, var_type=None):
questionForm = None
OptionFormSet = formset_factory(AnswerChoice, min_num=1, extra=0, max_num=10, validate_min=True, can_delete=True, formset=BaseOptionFormSet)
if request.method == 'GET':
optionsformSet = OptionFormSet()
questionForm = QuestionForm(surveyId=survey_id, answerType=var_type, treatment_id=treatment_id)
else:
optionsformSet = OptionFormSet(data=request.POST)
questionForm = QuestionForm(data=request.POST)
process_qa(questionForm, optionsformSet)
# return render(request, 'qa_created.html')
loc_treatment_id = questionForm.cleaned_data['treatment_id']
loc_survey_id = questionForm.cleaned_data['survey_id']
form = TreatmentSelectionForm(treatmentId=loc_treatment_id)
survey_data = fetchSurvey(loc_treatment_id, loc_survey_id)
survey_meta_form = SurveyMetaForm(survey_data.getSurveyMetadata())
return render(request, 'survey_edit.html',
{'form': form,
'treatment_id': loc_treatment_id,
'survey_id': loc_survey_id,
'active_tab': '2', 'survey_data': survey_data,
'survey_meta_form': survey_meta_form, 'options_formset': optionsformSet,
'question_form': questionForm})
#return render(request, 'answer_def.html', {'options_formset': optionsformSet, 'question_form': questionForm} )
def process_qa(questionForm, optionsformSet):
"""
First layer of validation
:param questionForm:
:param optionsformSet:
:return: tuple with result and description
"""
if questionForm.is_valid() and optionsformSet.is_valid():
#print(questionForm.cleaned_data.get("question_text_message"))
#JsonResponse({"alles_gut": "Doch"})
return store_qa(questionForm, optionsformSet)
#return {'result': True, 'status': 'validación ha detectado error'} #store_qa(questionForm, optionsformSet)
else:
return {'result': False, 'status': 'validación ha detectado error'}
class ListSurveysView(ListView):
model = Survey
fields = '__all__'
template_name = 'survey_list.html'
def get_context_data(self, **kwargs):
context = super(ListSurveysView, self).get_context_data(**kwargs)
context['treatment_id'] = self.kwargs.get('treatment_id')
return context
class CreateSurveyView(CreateView):
model = Survey
#fields = '__all__'
template_name = 'create_survey.html'
form_class = SurveyCreationForm
#def __init__(self, *args, **kwargs):
# super(CreateSurveyView, self).__init__(*args, **kwargs)
#self.treatment_id = kwargs.get('treatment_id')
def get_context_data(self, **kwargs):
context = super(CreateSurveyView, self).get_context_data(**kwargs)
context['treatment_id'] = self.kwargs.get('treatment_id')
return context
def get_success_url(self):
return reverse('surveys-list', kwargs={'treatment_id': self.kwargs.get('treatment_id')})
class EditQuestionsView(UpdateView):
model = Survey
fields = '__all__'
template_name = 'question_form.html'
def get_template_names(self):
return ['question_form.html']
def get_form_class(self):
return nestedformset_factory(
Survey,
Question,
#Choice
nested_formset=inlineformset_factory(
Question,
Choice,
fields = '__all__'
)
)
def get_success_url(self):
return reverse('surveys-list')
|
{
"content_hash": "1b807ff71a98698f50eab26ac3be4c16",
"timestamp": "",
"source": "github",
"line_count": 252,
"max_line_length": 144,
"avg_line_length": 41.99603174603175,
"alnum_prop": 0.6586979117452518,
"repo_name": "luisen14/treatment-tracking-project",
"id": "f2193c3ecffb65c28bfea6481b332d141ea7c856",
"size": "10585",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "treatment_tracker/surveys/views.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "24928"
},
{
"name": "HTML",
"bytes": "131306"
},
{
"name": "JavaScript",
"bytes": "3403"
},
{
"name": "Python",
"bytes": "120860"
}
],
"symlink_target": ""
}
|
class RemotePHPUnitSettings:
PLUGIN_FOLDER = 'remote-phpunit'
def __init__(self, sublime):
self._settings_file = self.PLUGIN_FOLDER + '.sublime-settings'
self._sublime = sublime
@property
def root(self):
return self._get('root')
@property
def cl_options(self):
return self._get('options')
@property
def path_to_phpunit(self):
return self._get('path_to_phpunit')
@property
def tests_folder(self):
return self._get('tests_folder')
@property
def xml_config(self):
return self._get('xml_config')
def _get(self, name):
settings = self._sublime.load_settings(self._settings_file)
return settings.get(name)
|
{
"content_hash": "74e3ae929426846f5b41e79b36994dab",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 70,
"avg_line_length": 23.612903225806452,
"alnum_prop": 0.6092896174863388,
"repo_name": "ldgit/remote-phpunit",
"id": "c20d8b086b1600eb706221417a985e2272bf7887",
"size": "732",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/remote_phpunit_settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "44204"
}
],
"symlink_target": ""
}
|
__author__ = 'Alex Bo'
__email__ = 'bosha@the-bosha.ru'
from torhandlers.templating.base import BaseTemplater
class TornadoTemplater(BaseTemplater):
"""
Dummy class for support base tornado template engine
"""
|
{
"content_hash": "b5fa8a3fe5dcc83e9371e0faa03f9f12",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 56,
"avg_line_length": 24.88888888888889,
"alnum_prop": 0.7098214285714286,
"repo_name": "bosha/torhandlers",
"id": "c977b1ee512cf84df7b47213d17e342a90a787db",
"size": "248",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "torhandlers/templating/tornado.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "817"
},
{
"name": "Python",
"bytes": "59317"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
import os
from celery import Celery
from django.conf import settings
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'hydroshare.settings')
os.environ.setdefault('PYTHONPATH', '/hydroshare/hydroshare')
app = Celery('hydroshare', backend='amqp://')
app.config_from_object('django.conf:settings')
app.autodiscover_tasks(lambda: settings.INSTALLED_APPS)
@app.task(bind=True)
def debug_task(self):
print('Request: {0!r}'.format(self.request))
|
{
"content_hash": "84fef9f435de5b127a92eb67a674841c",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 70,
"avg_line_length": 28.58823529411765,
"alnum_prop": 0.7613168724279835,
"repo_name": "FescueFungiShare/hydroshare",
"id": "b04b29661818adae2c741f81c4e8f8c2c3dfc00f",
"size": "486",
"binary": false,
"copies": "3",
"ref": "refs/heads/FescueFungiShare-develop",
"path": "hydroshare/celery.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "374952"
},
{
"name": "HTML",
"bytes": "1107800"
},
{
"name": "JavaScript",
"bytes": "1822132"
},
{
"name": "Python",
"bytes": "3599347"
},
{
"name": "R",
"bytes": "4475"
},
{
"name": "Shell",
"bytes": "49970"
},
{
"name": "XSLT",
"bytes": "790987"
}
],
"symlink_target": ""
}
|
from django.core.exceptions import ImproperlyConfigured
from django.db.models.fields import CharField, DecimalField
from django.utils.translation import gettext_lazy as _
from phonenumber_field.modelfields import PhoneNumberField
from oscar.core import validators
from oscar.forms import fields
from oscar.models.fields.autoslugfield import AutoSlugField
AutoSlugField = AutoSlugField
PhoneNumberField = PhoneNumberField
# https://github.com/django/django/blob/64200c14e0072ba0ffef86da46b2ea82fd1e019a/django/db/models/fields/subclassing.py#L31-L44
class Creator(object):
"""
A placeholder class that provides a way to set the attribute on the model.
"""
def __init__(self, field):
self.field = field
def __get__(self, obj, type=None):
if obj is None:
return self
return obj.__dict__[self.field.name]
def __set__(self, obj, value):
obj.__dict__[self.field.name] = self.field.to_python(value)
class ExtendedURLField(CharField):
description = _("URL")
def __init__(self, *args, **kwargs):
kwargs['max_length'] = kwargs.get('max_length', 200)
CharField.__init__(self, *args, **kwargs)
self.validators.append(validators.ExtendedURLValidator())
def formfield(self, **kwargs):
"""
As with CharField, this will cause URL validation to be performed
twice.
"""
defaults = {
'form_class': fields.ExtendedURLField,
}
defaults.update(kwargs)
return super().formfield(**defaults)
def deconstruct(self):
"""
deconstruct() is needed by Django's migration framework
"""
name, path, args, kwargs = super().deconstruct()
# We have a default value for max_length; remove it in that case
if self.max_length == 200:
del kwargs['max_length']
return name, path, args, kwargs
class PositiveDecimalField(DecimalField):
"""
A simple subclass of ``django.db.models.fields.DecimalField`` that
restricts values to be non-negative.
"""
def formfield(self, **kwargs):
"""
Return a :py:class:`django.forms.Field` instantiated with a ``min_value`` of 0.
"""
return super().formfield(min_value=0)
class UppercaseCharField(CharField):
"""
A simple subclass of ``django.db.models.fields.CharField`` that
restricts all text to be uppercase.
"""
def contribute_to_class(self, cls, name, **kwargs):
super().contribute_to_class(
cls, name, **kwargs)
setattr(cls, self.name, Creator(self))
def from_db_value(self, value, *args, **kwargs):
return self.to_python(value)
def to_python(self, value):
"""
Cast the supplied value to uppercase
"""
val = super().to_python(value)
if isinstance(val, str):
return val.upper()
else:
return val
class NullCharField(CharField):
"""
CharField that stores '' as None and returns None as ''
Useful when using unique=True and forms. Implies null==blank==True.
Django's CharField stores '' as None, but does not return None as ''.
"""
description = "CharField that stores '' as None and returns None as ''"
def __init__(self, *args, **kwargs):
if not kwargs.get('null', True) or not kwargs.get('blank', True):
raise ImproperlyConfigured(
"NullCharField implies null==blank==True")
kwargs['null'] = kwargs['blank'] = True
super().__init__(*args, **kwargs)
def contribute_to_class(self, cls, name, **kwargs):
super().contribute_to_class(cls, name, **kwargs)
setattr(cls, self.name, Creator(self))
def from_db_value(self, value, *args, **kwargs):
value = self.to_python(value)
# If the value was stored as null, return empty string instead
return value if value is not None else ''
def get_prep_value(self, value):
prepped = super().get_prep_value(value)
return prepped if prepped != "" else None
def deconstruct(self):
"""
deconstruct() is needed by Django's migration framework
"""
name, path, args, kwargs = super().deconstruct()
del kwargs['null']
del kwargs['blank']
return name, path, args, kwargs
|
{
"content_hash": "5974565592e1f17641310dae207060b1",
"timestamp": "",
"source": "github",
"line_count": 134,
"max_line_length": 127,
"avg_line_length": 32.582089552238806,
"alnum_prop": 0.6266605588639487,
"repo_name": "django-oscar/django-oscar",
"id": "d1e4920db44c51f5b31223a9f5ceda25e8ef2454",
"size": "4366",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "src/oscar/models/fields/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Dockerfile",
"bytes": "529"
},
{
"name": "HTML",
"bytes": "565297"
},
{
"name": "JavaScript",
"bytes": "41944"
},
{
"name": "Makefile",
"bytes": "4234"
},
{
"name": "Python",
"bytes": "2261460"
},
{
"name": "SCSS",
"bytes": "21815"
},
{
"name": "Shell",
"bytes": "308"
}
],
"symlink_target": ""
}
|
import sys
from alembic.testing import TestBase, config, mock
from sqlalchemy import MetaData, Column, Table, Integer, String, \
ForeignKeyConstraint
from alembic.testing import eq_
py3k = sys.version_info >= (3, )
from ._autogen_fixtures import AutogenFixtureTest
class AutogenerateForeignKeysTest(AutogenFixtureTest, TestBase):
__backend__ = True
def test_remove_fk(self):
m1 = MetaData()
m2 = MetaData()
Table('table', m1,
Column('test', String(10), primary_key=True),
mysql_engine='InnoDB')
Table('user', m1,
Column('id', Integer, primary_key=True),
Column('name', String(50), nullable=False),
Column('a1', String(10), server_default="x"),
Column('test2', String(10)),
ForeignKeyConstraint(['test2'], ['table.test']),
mysql_engine='InnoDB')
Table('table', m2,
Column('test', String(10), primary_key=True),
mysql_engine='InnoDB')
Table('user', m2,
Column('id', Integer, primary_key=True),
Column('name', String(50), nullable=False),
Column('a1', String(10), server_default="x"),
Column('test2', String(10)),
mysql_engine='InnoDB'
)
diffs = self._fixture(m1, m2)
self._assert_fk_diff(
diffs[0], "remove_fk",
"user", ['test2'],
'table', ['test'],
conditional_name="servergenerated"
)
def test_add_fk(self):
m1 = MetaData()
m2 = MetaData()
Table('table', m1,
Column('id', Integer, primary_key=True),
Column('test', String(10)),
mysql_engine='InnoDB')
Table('user', m1,
Column('id', Integer, primary_key=True),
Column('name', String(50), nullable=False),
Column('a1', String(10), server_default="x"),
Column('test2', String(10)),
mysql_engine='InnoDB')
Table('table', m2,
Column('id', Integer, primary_key=True),
Column('test', String(10)),
mysql_engine='InnoDB')
Table('user', m2,
Column('id', Integer, primary_key=True),
Column('name', String(50), nullable=False),
Column('a1', String(10), server_default="x"),
Column('test2', String(10)),
ForeignKeyConstraint(['test2'], ['table.test']),
mysql_engine='InnoDB')
diffs = self._fixture(m1, m2)
self._assert_fk_diff(
diffs[0], "add_fk",
"user", ["test2"],
"table", ["test"]
)
def test_no_change(self):
m1 = MetaData()
m2 = MetaData()
Table('table', m1,
Column('id', Integer, primary_key=True),
Column('test', String(10)),
mysql_engine='InnoDB')
Table('user', m1,
Column('id', Integer, primary_key=True),
Column('name', String(50), nullable=False),
Column('a1', String(10), server_default="x"),
Column('test2', Integer),
ForeignKeyConstraint(['test2'], ['table.id']),
mysql_engine='InnoDB')
Table('table', m2,
Column('id', Integer, primary_key=True),
Column('test', String(10)),
mysql_engine='InnoDB')
Table('user', m2,
Column('id', Integer, primary_key=True),
Column('name', String(50), nullable=False),
Column('a1', String(10), server_default="x"),
Column('test2', Integer),
ForeignKeyConstraint(['test2'], ['table.id']),
mysql_engine='InnoDB')
diffs = self._fixture(m1, m2)
eq_(diffs, [])
def test_no_change_composite_fk(self):
m1 = MetaData()
m2 = MetaData()
Table('table', m1,
Column('id_1', String(10), primary_key=True),
Column('id_2', String(10), primary_key=True),
mysql_engine='InnoDB')
Table('user', m1,
Column('id', Integer, primary_key=True),
Column('name', String(50), nullable=False),
Column('a1', String(10), server_default="x"),
Column('other_id_1', String(10)),
Column('other_id_2', String(10)),
ForeignKeyConstraint(['other_id_1', 'other_id_2'],
['table.id_1', 'table.id_2']),
mysql_engine='InnoDB')
Table('table', m2,
Column('id_1', String(10), primary_key=True),
Column('id_2', String(10), primary_key=True),
mysql_engine='InnoDB'
)
Table('user', m2,
Column('id', Integer, primary_key=True),
Column('name', String(50), nullable=False),
Column('a1', String(10), server_default="x"),
Column('other_id_1', String(10)),
Column('other_id_2', String(10)),
ForeignKeyConstraint(['other_id_1', 'other_id_2'],
['table.id_1', 'table.id_2']),
mysql_engine='InnoDB')
diffs = self._fixture(m1, m2)
eq_(diffs, [])
def test_add_composite_fk_with_name(self):
m1 = MetaData()
m2 = MetaData()
Table('table', m1,
Column('id_1', String(10), primary_key=True),
Column('id_2', String(10), primary_key=True),
mysql_engine='InnoDB')
Table('user', m1,
Column('id', Integer, primary_key=True),
Column('name', String(50), nullable=False),
Column('a1', String(10), server_default="x"),
Column('other_id_1', String(10)),
Column('other_id_2', String(10)),
mysql_engine='InnoDB')
Table('table', m2,
Column('id_1', String(10), primary_key=True),
Column('id_2', String(10), primary_key=True),
mysql_engine='InnoDB')
Table('user', m2,
Column('id', Integer, primary_key=True),
Column('name', String(50), nullable=False),
Column('a1', String(10), server_default="x"),
Column('other_id_1', String(10)),
Column('other_id_2', String(10)),
ForeignKeyConstraint(['other_id_1', 'other_id_2'],
['table.id_1', 'table.id_2'],
name='fk_test_name'),
mysql_engine='InnoDB')
diffs = self._fixture(m1, m2)
self._assert_fk_diff(
diffs[0], "add_fk",
"user", ['other_id_1', 'other_id_2'],
'table', ['id_1', 'id_2'],
name="fk_test_name"
)
def test_remove_composite_fk(self):
m1 = MetaData()
m2 = MetaData()
Table('table', m1,
Column('id_1', String(10), primary_key=True),
Column('id_2', String(10), primary_key=True),
mysql_engine='InnoDB')
Table('user', m1,
Column('id', Integer, primary_key=True),
Column('name', String(50), nullable=False),
Column('a1', String(10), server_default="x"),
Column('other_id_1', String(10)),
Column('other_id_2', String(10)),
ForeignKeyConstraint(['other_id_1', 'other_id_2'],
['table.id_1', 'table.id_2'],
name='fk_test_name'),
mysql_engine='InnoDB')
Table('table', m2,
Column('id_1', String(10), primary_key=True),
Column('id_2', String(10), primary_key=True),
mysql_engine='InnoDB')
Table('user', m2,
Column('id', Integer, primary_key=True),
Column('name', String(50), nullable=False),
Column('a1', String(10), server_default="x"),
Column('other_id_1', String(10)),
Column('other_id_2', String(10)),
mysql_engine='InnoDB')
diffs = self._fixture(m1, m2)
self._assert_fk_diff(
diffs[0], "remove_fk",
"user", ['other_id_1', 'other_id_2'],
"table", ['id_1', 'id_2'],
conditional_name="fk_test_name"
)
def test_add_fk_colkeys(self):
m1 = MetaData()
m2 = MetaData()
Table('table', m1,
Column('id_1', String(10), primary_key=True),
Column('id_2', String(10), primary_key=True),
mysql_engine='InnoDB')
Table('user', m1,
Column('id', Integer, primary_key=True),
Column('other_id_1', String(10)),
Column('other_id_2', String(10)),
mysql_engine='InnoDB')
Table('table', m2,
Column('id_1', String(10), key='tid1', primary_key=True),
Column('id_2', String(10), key='tid2', primary_key=True),
mysql_engine='InnoDB')
Table('user', m2,
Column('id', Integer, primary_key=True),
Column('other_id_1', String(10), key='oid1'),
Column('other_id_2', String(10), key='oid2'),
ForeignKeyConstraint(['oid1', 'oid2'],
['table.tid1', 'table.tid2'],
name='fk_test_name'),
mysql_engine='InnoDB')
diffs = self._fixture(m1, m2)
self._assert_fk_diff(
diffs[0], "add_fk",
"user", ['other_id_1', 'other_id_2'],
'table', ['id_1', 'id_2'],
name="fk_test_name"
)
def test_no_change_colkeys(self):
m1 = MetaData()
m2 = MetaData()
Table('table', m1,
Column('id_1', String(10), primary_key=True),
Column('id_2', String(10), primary_key=True),
mysql_engine='InnoDB')
Table('user', m1,
Column('id', Integer, primary_key=True),
Column('other_id_1', String(10)),
Column('other_id_2', String(10)),
ForeignKeyConstraint(['other_id_1', 'other_id_2'],
['table.id_1', 'table.id_2']),
mysql_engine='InnoDB')
Table('table', m2,
Column('id_1', String(10), key='tid1', primary_key=True),
Column('id_2', String(10), key='tid2', primary_key=True),
mysql_engine='InnoDB')
Table('user', m2,
Column('id', Integer, primary_key=True),
Column('other_id_1', String(10), key='oid1'),
Column('other_id_2', String(10), key='oid2'),
ForeignKeyConstraint(['oid1', 'oid2'],
['table.tid1', 'table.tid2']),
mysql_engine='InnoDB')
diffs = self._fixture(m1, m2)
eq_(diffs, [])
class IncludeHooksTest(AutogenFixtureTest, TestBase):
__backend__ = True
__requires__ = 'fk_names',
def test_remove_connection_fk(self):
m1 = MetaData()
m2 = MetaData()
ref = Table(
'ref', m1, Column('id', Integer, primary_key=True),
mysql_engine='InnoDB')
t1 = Table(
't', m1, Column('x', Integer), Column('y', Integer),
mysql_engine='InnoDB')
t1.append_constraint(
ForeignKeyConstraint([t1.c.x], [ref.c.id], name="fk1")
)
t1.append_constraint(
ForeignKeyConstraint([t1.c.y], [ref.c.id], name="fk2")
)
ref = Table(
'ref', m2, Column('id', Integer, primary_key=True),
mysql_engine='InnoDB')
Table(
't', m2, Column('x', Integer), Column('y', Integer),
mysql_engine='InnoDB')
def include_object(object_, name, type_, reflected, compare_to):
return not (
isinstance(object_, ForeignKeyConstraint) and
type_ == 'foreign_key_constraint'
and reflected and name == 'fk1')
diffs = self._fixture(m1, m2, object_filters=include_object)
self._assert_fk_diff(
diffs[0], "remove_fk",
't', ['y'], 'ref', ['id'],
conditional_name='fk2'
)
eq_(len(diffs), 1)
def test_add_metadata_fk(self):
m1 = MetaData()
m2 = MetaData()
Table(
'ref', m1,
Column('id', Integer, primary_key=True), mysql_engine='InnoDB')
Table(
't', m1,
Column('x', Integer), Column('y', Integer), mysql_engine='InnoDB')
ref = Table(
'ref', m2, Column('id', Integer, primary_key=True),
mysql_engine='InnoDB')
t2 = Table(
't', m2, Column('x', Integer), Column('y', Integer),
mysql_engine='InnoDB')
t2.append_constraint(
ForeignKeyConstraint([t2.c.x], [ref.c.id], name="fk1")
)
t2.append_constraint(
ForeignKeyConstraint([t2.c.y], [ref.c.id], name="fk2")
)
def include_object(object_, name, type_, reflected, compare_to):
return not (
isinstance(object_, ForeignKeyConstraint) and
type_ == 'foreign_key_constraint'
and not reflected and name == 'fk1')
diffs = self._fixture(m1, m2, object_filters=include_object)
self._assert_fk_diff(
diffs[0], "add_fk",
't', ['y'], 'ref', ['id'],
name='fk2'
)
eq_(len(diffs), 1)
def test_change_fk(self):
m1 = MetaData()
m2 = MetaData()
r1a = Table(
'ref_a', m1,
Column('a', Integer, primary_key=True),
mysql_engine='InnoDB'
)
Table(
'ref_b', m1,
Column('a', Integer, primary_key=True),
Column('b', Integer, primary_key=True),
mysql_engine='InnoDB'
)
t1 = Table(
't', m1, Column('x', Integer),
Column('y', Integer), Column('z', Integer),
mysql_engine='InnoDB')
t1.append_constraint(
ForeignKeyConstraint([t1.c.x], [r1a.c.a], name="fk1")
)
t1.append_constraint(
ForeignKeyConstraint([t1.c.y], [r1a.c.a], name="fk2")
)
Table(
'ref_a', m2,
Column('a', Integer, primary_key=True),
mysql_engine='InnoDB'
)
r2b = Table(
'ref_b', m2,
Column('a', Integer, primary_key=True),
Column('b', Integer, primary_key=True),
mysql_engine='InnoDB'
)
t2 = Table(
't', m2, Column('x', Integer),
Column('y', Integer), Column('z', Integer),
mysql_engine='InnoDB')
t2.append_constraint(
ForeignKeyConstraint(
[t2.c.x, t2.c.z], [r2b.c.a, r2b.c.b], name="fk1")
)
t2.append_constraint(
ForeignKeyConstraint(
[t2.c.y, t2.c.z], [r2b.c.a, r2b.c.b], name="fk2")
)
def include_object(object_, name, type_, reflected, compare_to):
return not (
isinstance(object_, ForeignKeyConstraint) and
type_ == 'foreign_key_constraint'
and name == 'fk1'
)
diffs = self._fixture(m1, m2, object_filters=include_object)
self._assert_fk_diff(
diffs[0], "remove_fk",
't', ['y'], 'ref_a', ['a'],
name='fk2'
)
self._assert_fk_diff(
diffs[1], "add_fk",
't', ['y', 'z'], 'ref_b', ['a', 'b'],
name='fk2'
)
eq_(len(diffs), 2)
class AutogenerateFKOptionsTest(AutogenFixtureTest, TestBase):
__backend__ = True
__requires__ = ('sqlalchemy_09', )
def _fk_opts_fixture(self, old_opts, new_opts):
m1 = MetaData()
m2 = MetaData()
Table('table', m1,
Column('id', Integer, primary_key=True),
Column('test', String(10)),
mysql_engine='InnoDB')
Table('user', m1,
Column('id', Integer, primary_key=True),
Column('name', String(50), nullable=False),
Column('tid', Integer),
ForeignKeyConstraint(['tid'], ['table.id'], **old_opts),
mysql_engine='InnoDB')
Table('table', m2,
Column('id', Integer, primary_key=True),
Column('test', String(10)),
mysql_engine='InnoDB')
Table('user', m2,
Column('id', Integer, primary_key=True),
Column('name', String(50), nullable=False),
Column('tid', Integer),
ForeignKeyConstraint(['tid'], ['table.id'], **new_opts),
mysql_engine='InnoDB')
return self._fixture(m1, m2)
def _expect_opts_supported(self, deferrable=False, initially=False):
if not config.requirements.reflects_fk_options.enabled:
return False
if deferrable and not config.requirements.fk_deferrable.enabled:
return False
if initially and not config.requirements.fk_initially.enabled:
return False
return True
def test_add_ondelete(self):
diffs = self._fk_opts_fixture(
{}, {"ondelete": "cascade"}
)
if self._expect_opts_supported():
self._assert_fk_diff(
diffs[0], "remove_fk",
"user", ["tid"],
"table", ["id"],
ondelete=None,
conditional_name="servergenerated"
)
self._assert_fk_diff(
diffs[1], "add_fk",
"user", ["tid"],
"table", ["id"],
ondelete="cascade"
)
else:
eq_(diffs, [])
def test_remove_ondelete(self):
diffs = self._fk_opts_fixture(
{"ondelete": "cascade"}, {}
)
if self._expect_opts_supported():
self._assert_fk_diff(
diffs[0], "remove_fk",
"user", ["tid"],
"table", ["id"],
ondelete="CASCADE",
conditional_name="servergenerated"
)
self._assert_fk_diff(
diffs[1], "add_fk",
"user", ["tid"],
"table", ["id"],
ondelete=None
)
else:
eq_(diffs, [])
def test_nochange_ondelete(self):
"""test case sensitivity"""
diffs = self._fk_opts_fixture(
{"ondelete": "caSCAde"}, {"ondelete": "CasCade"}
)
eq_(diffs, [])
def test_add_onupdate(self):
diffs = self._fk_opts_fixture(
{}, {"onupdate": "cascade"}
)
if self._expect_opts_supported():
self._assert_fk_diff(
diffs[0], "remove_fk",
"user", ["tid"],
"table", ["id"],
onupdate=None,
conditional_name="servergenerated"
)
self._assert_fk_diff(
diffs[1], "add_fk",
"user", ["tid"],
"table", ["id"],
onupdate="cascade"
)
else:
eq_(diffs, [])
def test_remove_onupdate(self):
diffs = self._fk_opts_fixture(
{"onupdate": "cascade"}, {}
)
if self._expect_opts_supported():
self._assert_fk_diff(
diffs[0], "remove_fk",
"user", ["tid"],
"table", ["id"],
onupdate="CASCADE",
conditional_name="servergenerated"
)
self._assert_fk_diff(
diffs[1], "add_fk",
"user", ["tid"],
"table", ["id"],
onupdate=None
)
else:
eq_(diffs, [])
def test_nochange_onupdate(self):
"""test case sensitivity"""
diffs = self._fk_opts_fixture(
{"onupdate": "caSCAde"}, {"onupdate": "CasCade"}
)
eq_(diffs, [])
def test_nochange_ondelete_restrict(self):
"""test the RESTRICT option which MySQL doesn't report on"""
diffs = self._fk_opts_fixture(
{"ondelete": "restrict"}, {"ondelete": "restrict"}
)
eq_(diffs, [])
def test_nochange_onupdate_restrict(self):
"""test the RESTRICT option which MySQL doesn't report on"""
diffs = self._fk_opts_fixture(
{"onupdate": "restrict"}, {"onupdate": "restrict"}
)
eq_(diffs, [])
def test_nochange_ondelete_noaction(self):
"""test the NO ACTION option which generally comes back as None"""
diffs = self._fk_opts_fixture(
{"ondelete": "no action"}, {"ondelete": "no action"}
)
eq_(diffs, [])
def test_nochange_onupdate_noaction(self):
"""test the NO ACTION option which generally comes back as None"""
diffs = self._fk_opts_fixture(
{"onupdate": "no action"}, {"onupdate": "no action"}
)
eq_(diffs, [])
def test_change_ondelete_from_restrict(self):
"""test the RESTRICT option which MySQL doesn't report on"""
# note that this is impossible to detect if we change
# from RESTRICT to NO ACTION on MySQL.
diffs = self._fk_opts_fixture(
{"ondelete": "restrict"}, {"ondelete": "cascade"}
)
if self._expect_opts_supported():
self._assert_fk_diff(
diffs[0], "remove_fk",
"user", ["tid"],
"table", ["id"],
onupdate=None,
ondelete=mock.ANY, # MySQL reports None, PG reports RESTRICT
conditional_name="servergenerated"
)
self._assert_fk_diff(
diffs[1], "add_fk",
"user", ["tid"],
"table", ["id"],
onupdate=None,
ondelete="cascade"
)
else:
eq_(diffs, [])
def test_change_onupdate_from_restrict(self):
"""test the RESTRICT option which MySQL doesn't report on"""
# note that this is impossible to detect if we change
# from RESTRICT to NO ACTION on MySQL.
diffs = self._fk_opts_fixture(
{"onupdate": "restrict"}, {"onupdate": "cascade"}
)
if self._expect_opts_supported():
self._assert_fk_diff(
diffs[0], "remove_fk",
"user", ["tid"],
"table", ["id"],
onupdate=mock.ANY, # MySQL reports None, PG reports RESTRICT
ondelete=None,
conditional_name="servergenerated"
)
self._assert_fk_diff(
diffs[1], "add_fk",
"user", ["tid"],
"table", ["id"],
onupdate="cascade",
ondelete=None
)
else:
eq_(diffs, [])
def test_ondelete_onupdate_combo(self):
diffs = self._fk_opts_fixture(
{"onupdate": "cascade", "ondelete": "set null"},
{"onupdate": "restrict", "ondelete": "restrict"}
)
if self._expect_opts_supported():
self._assert_fk_diff(
diffs[0], "remove_fk",
"user", ["tid"],
"table", ["id"],
onupdate="CASCADE",
ondelete="SET NULL",
conditional_name="servergenerated"
)
self._assert_fk_diff(
diffs[1], "add_fk",
"user", ["tid"],
"table", ["id"],
onupdate="restrict",
ondelete="restrict"
)
else:
eq_(diffs, [])
@config.requirements.fk_initially
def test_add_initially_deferred(self):
diffs = self._fk_opts_fixture(
{}, {"initially": "deferred"}
)
self._assert_fk_diff(
diffs[0], "remove_fk",
"user", ["tid"],
"table", ["id"],
initially=None,
conditional_name="servergenerated"
)
self._assert_fk_diff(
diffs[1], "add_fk",
"user", ["tid"],
"table", ["id"],
initially="deferred"
)
@config.requirements.fk_initially
def test_remove_initially_deferred(self):
diffs = self._fk_opts_fixture(
{"initially": "deferred"}, {}
)
self._assert_fk_diff(
diffs[0], "remove_fk",
"user", ["tid"],
"table", ["id"],
initially="DEFERRED",
deferrable=True,
conditional_name="servergenerated"
)
self._assert_fk_diff(
diffs[1], "add_fk",
"user", ["tid"],
"table", ["id"],
initially=None
)
@config.requirements.fk_deferrable
@config.requirements.fk_initially
def test_add_initially_immediate_plus_deferrable(self):
diffs = self._fk_opts_fixture(
{}, {"initially": "immediate", "deferrable": True}
)
self._assert_fk_diff(
diffs[0], "remove_fk",
"user", ["tid"],
"table", ["id"],
initially=None,
conditional_name="servergenerated"
)
self._assert_fk_diff(
diffs[1], "add_fk",
"user", ["tid"],
"table", ["id"],
initially="immediate",
deferrable=True
)
@config.requirements.fk_deferrable
@config.requirements.fk_initially
def test_remove_initially_immediate_plus_deferrable(self):
diffs = self._fk_opts_fixture(
{"initially": "immediate", "deferrable": True}, {}
)
self._assert_fk_diff(
diffs[0], "remove_fk",
"user", ["tid"],
"table", ["id"],
initially=None, # immediate is the default
deferrable=True,
conditional_name="servergenerated"
)
self._assert_fk_diff(
diffs[1], "add_fk",
"user", ["tid"],
"table", ["id"],
initially=None,
deferrable=None
)
@config.requirements.fk_initially
@config.requirements.fk_deferrable
def test_add_initially_deferrable_nochange_one(self):
diffs = self._fk_opts_fixture(
{"deferrable": True, "initially": "immediate"},
{"deferrable": True, "initially": "immediate"}
)
eq_(diffs, [])
@config.requirements.fk_initially
@config.requirements.fk_deferrable
def test_add_initially_deferrable_nochange_two(self):
diffs = self._fk_opts_fixture(
{"deferrable": True, "initially": "deferred"},
{"deferrable": True, "initially": "deferred"}
)
eq_(diffs, [])
@config.requirements.fk_initially
@config.requirements.fk_deferrable
def test_add_initially_deferrable_nochange_three(self):
diffs = self._fk_opts_fixture(
{"deferrable": None, "initially": "deferred"},
{"deferrable": None, "initially": "deferred"}
)
eq_(diffs, [])
@config.requirements.fk_deferrable
def test_add_deferrable(self):
diffs = self._fk_opts_fixture(
{}, {"deferrable": True}
)
self._assert_fk_diff(
diffs[0], "remove_fk",
"user", ["tid"],
"table", ["id"],
deferrable=None,
conditional_name="servergenerated"
)
self._assert_fk_diff(
diffs[1], "add_fk",
"user", ["tid"],
"table", ["id"],
deferrable=True
)
@config.requirements.fk_deferrable
def test_remove_deferrable(self):
diffs = self._fk_opts_fixture(
{"deferrable": True}, {}
)
self._assert_fk_diff(
diffs[0], "remove_fk",
"user", ["tid"],
"table", ["id"],
deferrable=True,
conditional_name="servergenerated"
)
self._assert_fk_diff(
diffs[1], "add_fk",
"user", ["tid"],
"table", ["id"],
deferrable=None
)
|
{
"content_hash": "8538475bfa93e212538b8a9ef478d6f8",
"timestamp": "",
"source": "github",
"line_count": 900,
"max_line_length": 78,
"avg_line_length": 31.82777777777778,
"alnum_prop": 0.4754756502007331,
"repo_name": "chishaku/alembic",
"id": "0a5b1de2ccb7af36d98ae1a14d2ba1b16a932877",
"size": "28645",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_autogen_fks.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Mako",
"bytes": "6390"
},
{
"name": "Python",
"bytes": "948556"
}
],
"symlink_target": ""
}
|
import argparse
import logging
import os
import random
import uuid
from datetime import datetime
from time import sleep
from client import Tutor
class ExampleTutor(Tutor):
def __init__(self, entity_id, api_key, logger=None, run_once=None, args = None):
super().__init__(entity_id, api_key, self.main_callback)
self.run_once = run_once
self.logger = logger
self.event_names = [
'test', 'example', 'add_student',
'remove_student', 'trace']
def main_callback(self):
event = random.choice(self.event_names)
logger = logging.getLogger(__name__)
logger.debug("Sending a random event: " + event)
response = self.send(event, {'test': 1234})
logger.debug("RECV: " + str(response))
sleep(1)
if self.run_once:
return False
else:
return True
|
{
"content_hash": "9c5e8cf096094c8611284dc8fa619d14",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 84,
"avg_line_length": 26.939393939393938,
"alnum_prop": 0.6017997750281214,
"repo_name": "adlnet/HPIT-python-client",
"id": "0ba6e9536d8f69804647ced3ed26713ccf89fccc",
"size": "889",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "examples/tutors/example.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "73459"
}
],
"symlink_target": ""
}
|
"""This sample application demonstrates the app class way to use configman."""
# there are two ways to invoke this app:
# .../generic_app.py --admin.application=demo3.Demo3App
# .../demo3.py
# this demo differs from demo2.py in the manner in which it works with
# configman. Rather than being a linear procedure, this app defines a app
# class with five features:
# 1) the app class derives from 'RequiredConfig'. This instruments the class
# with the mechanism for discovery of required configuration parameters.
# 2) closely aligned with point 1, this class defines a class level constant
# called 'required_config' that sets up Namespaces and Options to define
# the configuration requirements.
# 3) the app class defines three class level constants that identify the app.
# 'app_name', 'app_version', 'app_description'
# 4) the app class defines a constructor that accepts a DotDict derivative
# of configuration values.
# 5) the app class defines a parameterless 'main' function that executes the
# business logic of the application
import configman as cm
import configman.config_manager as config_man
# the following class embodies the business logic of the application.
class Demo3App(config_man.RequiredConfig):
app_name = 'demo3_app'
app_version = '0.1'
app_description = __doc__
# create the definitions for the parameters that are to come from
# the command line or config file.
required_config = cm.Namespace()
required_config.add_option('text', 'Socorro Forever', 'the input value',
short_form='t')
def __init__(self, config):
self.text = config.text
self.action_fn = Demo3App.action_converter(config.action)
def main(self):
self.action_fn(self.text)
@staticmethod
def echo_action(x):
print x
@staticmethod
def backwards_action(x):
print x[::-1]
@staticmethod
def upper_action(x):
print x.upper()
@staticmethod
def action_converter(action):
try:
return getattr(Demo3App, "%s_action" % action)
except AttributeError:
raise Exception("'%s' is not a valid action" % action)
# normally, all the parameters are defined within the class, but
# the methods of this class itself are used in the configuration parameters.
# Python doesn't allow reference to class members until the class is entirely
# defined. This tag along code injects the final config parameter after
# the class has been fully defined
list_of_actions = [x[:-7] for x in dir(Demo3App) if x.endswith('_action')]
doc_string = 'the action to take [%s]' % ', '.join(list_of_actions)
Demo3App.required_config.add_option('action', 'echo', doc_string,
short_form='a')
# if you'd rather invoke the app directly with its source file, this will
# allow it.
if __name__ == "__main__":
import generic_app
generic_app.main(Demo3App)
|
{
"content_hash": "963085599906200ce22f8cd6379126de",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 79,
"avg_line_length": 38.74025974025974,
"alnum_prop": 0.6842105263157895,
"repo_name": "twobraids/configman_orginal",
"id": "bfbfa01fca9689c71fb40614814e8d02622a4a27",
"size": "4718",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "demo/demo3.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "257212"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
import getopt
import os
import shutil
import sys
from Command import CommandRunner, Usage
all_versions = [
'2.3.7',
'2.4.5',
#'2.5.2',
'2.6',
]
def main(argv=None):
if argv is None:
argv = sys.argv
all = False
downloads_dir = 'Downloads'
downloads_url = 'http://www.python.org/ftp/python'
sudo = 'sudo'
prefix = '/usr/local'
short_options = 'ad:hnp:q'
long_options = ['all', 'help', 'no-exec', 'prefix=', 'quiet']
helpstr = """\
Usage: install_python.py [-ahnq] [-d DIR] [-p PREFIX] [VERSION ...]
-a, --all Install all SCons versions.
-d DIR, --downloads=DIR Downloads directory.
-h, --help Print this help and exit
-n, --no-exec No execute, just print command lines
-p PREFIX, --prefix=PREFIX Installation prefix.
-q, --quiet Quiet, don't print command lines
"""
try:
try:
opts, args = getopt.getopt(argv[1:], short_options, long_options)
except getopt.error as msg:
raise Usage(msg)
for o, a in opts:
if o in ('-a', '--all'):
all = True
elif o in ('-d', '--downloads'):
downloads_dir = a
elif o in ('-h', '--help'):
print(helpstr)
sys.exit(0)
elif o in ('-n', '--no-exec'):
CommandRunner.execute = CommandRunner.do_not_execute
elif o in ('-p', '--prefix'):
prefix = a
elif o in ('-q', '--quiet'):
CommandRunner.display = CommandRunner.do_not_display
except Usage as err:
sys.stderr.write(str(err.msg) + '\n')
sys.stderr.write('use -h to get help\n')
return 2
if all:
if args:
msg = 'install-scons.py: -a and version arguments both specified'
sys.stderr.write(msg)
sys.exit(1)
args = all_versions
cmd = CommandRunner()
for version in args:
python = 'Python-' + version
tar_gz = os.path.join(downloads_dir, python + '.tgz')
tar_gz_url = os.path.join(downloads_url, version, python + '.tgz')
cmd.subst_dictionary(locals())
if not os.path.exists(tar_gz):
if not os.path.exists(downloads_dir):
cmd.run('mkdir %(downloads_dir)s')
cmd.run('wget -O %(tar_gz)s %(tar_gz_url)s')
cmd.run('tar zxf %(tar_gz)s')
cmd.run('cd %(python)s')
cmd.run('./configure --prefix=%(prefix)s %(configureflags)s 2>&1 | tee configure.out')
cmd.run('make 2>&1 | tee make.out')
cmd.run('%(sudo)s make install')
cmd.run('%(sudo)s rm -f %(prefix)s/bin/{idle,pydoc,python,python-config,smtpd.py}')
cmd.run('cd ..')
cmd.run((shutil.rmtree, python), 'rm -rf %(python)s')
if __name__ == "__main__":
sys.exit(main())
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
{
"content_hash": "b8603a47a1ac03cc02972922fabeac27",
"timestamp": "",
"source": "github",
"line_count": 109,
"max_line_length": 94,
"avg_line_length": 28.28440366972477,
"alnum_prop": 0.5296788842036977,
"repo_name": "timj/scons",
"id": "5c947ac85011f4c3481ac2cb531e17e6ff2a555a",
"size": "3402",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bin/install_python.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "2437"
},
{
"name": "C",
"bytes": "593"
},
{
"name": "C++",
"bytes": "598"
},
{
"name": "CSS",
"bytes": "18502"
},
{
"name": "D",
"bytes": "1817"
},
{
"name": "DTrace",
"bytes": "180"
},
{
"name": "HTML",
"bytes": "857084"
},
{
"name": "Java",
"bytes": "6860"
},
{
"name": "JavaScript",
"bytes": "215495"
},
{
"name": "Makefile",
"bytes": "3795"
},
{
"name": "Perl",
"bytes": "29978"
},
{
"name": "Python",
"bytes": "7393581"
},
{
"name": "Ruby",
"bytes": "10888"
},
{
"name": "Shell",
"bytes": "52480"
},
{
"name": "XSLT",
"bytes": "7567242"
}
],
"symlink_target": ""
}
|
from test_framework.test_framework import DeuscoinTestFramework
from test_framework.util import *
from test_framework.script import *
from test_framework.mininode import *
import binascii
COIN = 100000000
MAX_REPLACEMENT_LIMIT = 100
def satoshi_round(amount):
return Decimal(amount).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
def txToHex(tx):
return binascii.hexlify(tx.serialize()).decode('utf-8')
def make_utxo(node, amount, confirmed=True, scriptPubKey=CScript([1])):
"""Create a txout with a given amount and scriptPubKey
Mines coins as needed.
confirmed - txouts created will be confirmed in the blockchain;
unconfirmed otherwise.
"""
fee = 1*COIN
while node.getbalance() < satoshi_round((amount + fee)/COIN):
node.generate(100)
#print (node.getbalance(), amount, fee)
new_addr = node.getnewaddress()
#print new_addr
txid = node.sendtoaddress(new_addr, satoshi_round((amount+fee)/COIN))
tx1 = node.getrawtransaction(txid, 1)
txid = int(txid, 16)
i = None
for i, txout in enumerate(tx1['vout']):
#print i, txout['scriptPubKey']['addresses']
if txout['scriptPubKey']['addresses'] == [new_addr]:
#print i
break
assert i is not None
tx2 = CTransaction()
tx2.vin = [CTxIn(COutPoint(txid, i))]
tx2.vout = [CTxOut(amount, scriptPubKey)]
tx2.rehash()
binascii.hexlify(tx2.serialize()).decode('utf-8')
signed_tx = node.signrawtransaction(binascii.hexlify(tx2.serialize()).decode('utf-8'))
txid = node.sendrawtransaction(signed_tx['hex'], True)
# If requested, ensure txouts are confirmed.
if confirmed:
mempool_size = len(node.getrawmempool())
while mempool_size > 0:
node.generate(1)
new_size = len(node.getrawmempool())
# Error out if we have something stuck in the mempool, as this
# would likely be a bug.
assert(new_size < mempool_size)
mempool_size = new_size
return COutPoint(int(txid, 16), 0)
class ReplaceByFeeTest(DeuscoinTestFramework):
def setup_network(self):
self.nodes = []
self.nodes.append(start_node(0, self.options.tmpdir, ["-maxorphantx=1000", "-debug",
"-relaypriority=0", "-whitelist=127.0.0.1",
"-limitancestorcount=50",
"-limitancestorsize=101",
"-limitdescendantcount=200",
"-limitdescendantsize=101"
]))
self.is_network_split = False
def run_test(self):
make_utxo(self.nodes[0], 1*COIN)
print "Running test simple doublespend..."
self.test_simple_doublespend()
print "Running test doublespend chain..."
self.test_doublespend_chain()
print "Running test doublespend tree..."
self.test_doublespend_tree()
print "Running test replacement feeperkb..."
self.test_replacement_feeperkb()
print "Running test spends of conflicting outputs..."
self.test_spends_of_conflicting_outputs()
print "Running test new unconfirmed inputs..."
self.test_new_unconfirmed_inputs()
print "Running test too many replacements..."
self.test_too_many_replacements()
print "Running test opt-in..."
self.test_opt_in()
print "Running test prioritised transactions..."
self.test_prioritised_transactions()
print "Passed\n"
def test_simple_doublespend(self):
"""Simple doublespend"""
tx0_outpoint = make_utxo(self.nodes[0], 1.1*COIN)
tx1a = CTransaction()
tx1a.vin = [CTxIn(tx0_outpoint, nSequence=0)]
tx1a.vout = [CTxOut(1*COIN, CScript([b'a']))]
tx1a_hex = txToHex(tx1a)
tx1a_txid = self.nodes[0].sendrawtransaction(tx1a_hex, True)
# Should fail because we haven't changed the fee
tx1b = CTransaction()
tx1b.vin = [CTxIn(tx0_outpoint, nSequence=0)]
tx1b.vout = [CTxOut(1*COIN, CScript([b'b']))]
tx1b_hex = txToHex(tx1b)
try:
tx1b_txid = self.nodes[0].sendrawtransaction(tx1b_hex, True)
except JSONRPCException as exp:
assert_equal(exp.error['code'], -26) # insufficient fee
else:
assert(False)
# Extra 0.1 DEUS fee
tx1b = CTransaction()
tx1b.vin = [CTxIn(tx0_outpoint, nSequence=0)]
tx1b.vout = [CTxOut(0.9*COIN, CScript([b'b']))]
tx1b_hex = txToHex(tx1b)
tx1b_txid = self.nodes[0].sendrawtransaction(tx1b_hex, True)
mempool = self.nodes[0].getrawmempool()
assert (tx1a_txid not in mempool)
assert (tx1b_txid in mempool)
assert_equal(tx1b_hex, self.nodes[0].getrawtransaction(tx1b_txid))
def test_doublespend_chain(self):
"""Doublespend of a long chain"""
initial_nValue = 50*COIN
tx0_outpoint = make_utxo(self.nodes[0], initial_nValue)
prevout = tx0_outpoint
remaining_value = initial_nValue
chain_txids = []
while remaining_value > 10*COIN:
remaining_value -= 1*COIN
tx = CTransaction()
tx.vin = [CTxIn(prevout, nSequence=0)]
tx.vout = [CTxOut(remaining_value, CScript([1]))]
tx_hex = txToHex(tx)
txid = self.nodes[0].sendrawtransaction(tx_hex, True)
chain_txids.append(txid)
prevout = COutPoint(int(txid, 16), 0)
# Whether the double-spend is allowed is evaluated by including all
# child fees - 40 DEUS - so this attempt is rejected.
dbl_tx = CTransaction()
dbl_tx.vin = [CTxIn(tx0_outpoint, nSequence=0)]
dbl_tx.vout = [CTxOut(initial_nValue - 30*COIN, CScript([1]))]
dbl_tx_hex = txToHex(dbl_tx)
try:
self.nodes[0].sendrawtransaction(dbl_tx_hex, True)
except JSONRPCException as exp:
assert_equal(exp.error['code'], -26) # insufficient fee
else:
assert(False) # transaction mistakenly accepted!
# Accepted with sufficient fee
dbl_tx = CTransaction()
dbl_tx.vin = [CTxIn(tx0_outpoint, nSequence=0)]
dbl_tx.vout = [CTxOut(1*COIN, CScript([1]))]
dbl_tx_hex = txToHex(dbl_tx)
self.nodes[0].sendrawtransaction(dbl_tx_hex, True)
mempool = self.nodes[0].getrawmempool()
for doublespent_txid in chain_txids:
assert(doublespent_txid not in mempool)
def test_doublespend_tree(self):
"""Doublespend of a big tree of transactions"""
initial_nValue = 50*COIN
tx0_outpoint = make_utxo(self.nodes[0], initial_nValue)
def branch(prevout, initial_value, max_txs, tree_width=5, fee=0.0001*COIN, _total_txs=None):
if _total_txs is None:
_total_txs = [0]
if _total_txs[0] >= max_txs:
return
txout_value = (initial_value - fee) // tree_width
if txout_value < fee:
return
vout = [CTxOut(txout_value, CScript([i+1]))
for i in range(tree_width)]
tx = CTransaction()
tx.vin = [CTxIn(prevout, nSequence=0)]
tx.vout = vout
tx_hex = txToHex(tx)
assert(len(tx.serialize()) < 100000)
txid = self.nodes[0].sendrawtransaction(tx_hex, True)
yield tx
_total_txs[0] += 1
txid = int(txid, 16)
for i, txout in enumerate(tx.vout):
for x in branch(COutPoint(txid, i), txout_value,
max_txs,
tree_width=tree_width, fee=fee,
_total_txs=_total_txs):
yield x
fee = 0.0001*COIN
n = MAX_REPLACEMENT_LIMIT
tree_txs = list(branch(tx0_outpoint, initial_nValue, n, fee=fee))
assert_equal(len(tree_txs), n)
# Attempt double-spend, will fail because too little fee paid
dbl_tx = CTransaction()
dbl_tx.vin = [CTxIn(tx0_outpoint, nSequence=0)]
dbl_tx.vout = [CTxOut(initial_nValue - fee*n, CScript([1]))]
dbl_tx_hex = txToHex(dbl_tx)
try:
self.nodes[0].sendrawtransaction(dbl_tx_hex, True)
except JSONRPCException as exp:
assert_equal(exp.error['code'], -26) # insufficient fee
else:
assert(False)
# 1 DEUS fee is enough
dbl_tx = CTransaction()
dbl_tx.vin = [CTxIn(tx0_outpoint, nSequence=0)]
dbl_tx.vout = [CTxOut(initial_nValue - fee*n - 1*COIN, CScript([1]))]
dbl_tx_hex = txToHex(dbl_tx)
self.nodes[0].sendrawtransaction(dbl_tx_hex, True)
mempool = self.nodes[0].getrawmempool()
for tx in tree_txs:
tx.rehash()
assert (tx.hash not in mempool)
# Try again, but with more total transactions than the "max txs
# double-spent at once" anti-DoS limit.
for n in (MAX_REPLACEMENT_LIMIT+1, MAX_REPLACEMENT_LIMIT*2):
fee = 0.0001*COIN
tx0_outpoint = make_utxo(self.nodes[0], initial_nValue)
tree_txs = list(branch(tx0_outpoint, initial_nValue, n, fee=fee))
assert_equal(len(tree_txs), n)
dbl_tx = CTransaction()
dbl_tx.vin = [CTxIn(tx0_outpoint, nSequence=0)]
dbl_tx.vout = [CTxOut(initial_nValue - 2*fee*n, CScript([1]))]
dbl_tx_hex = txToHex(dbl_tx)
try:
self.nodes[0].sendrawtransaction(dbl_tx_hex, True)
except JSONRPCException as exp:
assert_equal(exp.error['code'], -26)
assert_equal("too many potential replacements" in exp.error['message'], True)
else:
assert(False)
for tx in tree_txs:
tx.rehash()
self.nodes[0].getrawtransaction(tx.hash)
def test_replacement_feeperkb(self):
"""Replacement requires fee-per-KB to be higher"""
tx0_outpoint = make_utxo(self.nodes[0], 1.1*COIN)
tx1a = CTransaction()
tx1a.vin = [CTxIn(tx0_outpoint, nSequence=0)]
tx1a.vout = [CTxOut(1*COIN, CScript([b'a']))]
tx1a_hex = txToHex(tx1a)
tx1a_txid = self.nodes[0].sendrawtransaction(tx1a_hex, True)
# Higher fee, but the fee per KB is much lower, so the replacement is
# rejected.
tx1b = CTransaction()
tx1b.vin = [CTxIn(tx0_outpoint, nSequence=0)]
tx1b.vout = [CTxOut(0.001*COIN, CScript([b'a'*999000]))]
tx1b_hex = txToHex(tx1b)
try:
tx1b_txid = self.nodes[0].sendrawtransaction(tx1b_hex, True)
except JSONRPCException as exp:
assert_equal(exp.error['code'], -26) # insufficient fee
else:
assert(False)
def test_spends_of_conflicting_outputs(self):
"""Replacements that spend conflicting tx outputs are rejected"""
utxo1 = make_utxo(self.nodes[0], 1.2*COIN)
utxo2 = make_utxo(self.nodes[0], 3.0*COIN)
tx1a = CTransaction()
tx1a.vin = [CTxIn(utxo1, nSequence=0)]
tx1a.vout = [CTxOut(1.1*COIN, CScript([b'a']))]
tx1a_hex = txToHex(tx1a)
tx1a_txid = self.nodes[0].sendrawtransaction(tx1a_hex, True)
tx1a_txid = int(tx1a_txid, 16)
# Direct spend an output of the transaction we're replacing.
tx2 = CTransaction()
tx2.vin = [CTxIn(utxo1, nSequence=0), CTxIn(utxo2, nSequence=0)]
tx2.vin.append(CTxIn(COutPoint(tx1a_txid, 0), nSequence=0))
tx2.vout = tx1a.vout
tx2_hex = txToHex(tx2)
try:
tx2_txid = self.nodes[0].sendrawtransaction(tx2_hex, True)
except JSONRPCException as exp:
assert_equal(exp.error['code'], -26)
else:
assert(False)
# Spend tx1a's output to test the indirect case.
tx1b = CTransaction()
tx1b.vin = [CTxIn(COutPoint(tx1a_txid, 0), nSequence=0)]
tx1b.vout = [CTxOut(1.0*COIN, CScript([b'a']))]
tx1b_hex = txToHex(tx1b)
tx1b_txid = self.nodes[0].sendrawtransaction(tx1b_hex, True)
tx1b_txid = int(tx1b_txid, 16)
tx2 = CTransaction()
tx2.vin = [CTxIn(utxo1, nSequence=0), CTxIn(utxo2, nSequence=0),
CTxIn(COutPoint(tx1b_txid, 0))]
tx2.vout = tx1a.vout
tx2_hex = txToHex(tx2)
try:
tx2_txid = self.nodes[0].sendrawtransaction(tx2_hex, True)
except JSONRPCException as exp:
assert_equal(exp.error['code'], -26)
else:
assert(False)
def test_new_unconfirmed_inputs(self):
"""Replacements that add new unconfirmed inputs are rejected"""
confirmed_utxo = make_utxo(self.nodes[0], 1.1*COIN)
unconfirmed_utxo = make_utxo(self.nodes[0], 0.1*COIN, False)
tx1 = CTransaction()
tx1.vin = [CTxIn(confirmed_utxo)]
tx1.vout = [CTxOut(1.0*COIN, CScript([b'a']))]
tx1_hex = txToHex(tx1)
tx1_txid = self.nodes[0].sendrawtransaction(tx1_hex, True)
tx2 = CTransaction()
tx2.vin = [CTxIn(confirmed_utxo), CTxIn(unconfirmed_utxo)]
tx2.vout = tx1.vout
tx2_hex = txToHex(tx2)
try:
tx2_txid = self.nodes[0].sendrawtransaction(tx2_hex, True)
except JSONRPCException as exp:
assert_equal(exp.error['code'], -26)
else:
assert(False)
def test_too_many_replacements(self):
"""Replacements that evict too many transactions are rejected"""
# Try directly replacing more than MAX_REPLACEMENT_LIMIT
# transactions
# Start by creating a single transaction with many outputs
initial_nValue = 10*COIN
utxo = make_utxo(self.nodes[0], initial_nValue)
fee = 0.0001*COIN
split_value = int((initial_nValue-fee)/(MAX_REPLACEMENT_LIMIT+1))
actual_fee = initial_nValue - split_value*(MAX_REPLACEMENT_LIMIT+1)
outputs = []
for i in range(MAX_REPLACEMENT_LIMIT+1):
outputs.append(CTxOut(split_value, CScript([1])))
splitting_tx = CTransaction()
splitting_tx.vin = [CTxIn(utxo, nSequence=0)]
splitting_tx.vout = outputs
splitting_tx_hex = txToHex(splitting_tx)
txid = self.nodes[0].sendrawtransaction(splitting_tx_hex, True)
txid = int(txid, 16)
# Now spend each of those outputs individually
for i in range(MAX_REPLACEMENT_LIMIT+1):
tx_i = CTransaction()
tx_i.vin = [CTxIn(COutPoint(txid, i), nSequence=0)]
tx_i.vout = [CTxOut(split_value-fee, CScript([b'a']))]
tx_i_hex = txToHex(tx_i)
self.nodes[0].sendrawtransaction(tx_i_hex, True)
# Now create doublespend of the whole lot; should fail.
# Need a big enough fee to cover all spending transactions and have
# a higher fee rate
double_spend_value = (split_value-100*fee)*(MAX_REPLACEMENT_LIMIT+1)
inputs = []
for i in range(MAX_REPLACEMENT_LIMIT+1):
inputs.append(CTxIn(COutPoint(txid, i), nSequence=0))
double_tx = CTransaction()
double_tx.vin = inputs
double_tx.vout = [CTxOut(double_spend_value, CScript([b'a']))]
double_tx_hex = txToHex(double_tx)
try:
self.nodes[0].sendrawtransaction(double_tx_hex, True)
except JSONRPCException as exp:
assert_equal(exp.error['code'], -26)
assert_equal("too many potential replacements" in exp.error['message'], True)
else:
assert(False)
# If we remove an input, it should pass
double_tx = CTransaction()
double_tx.vin = inputs[0:-1]
double_tx.vout = [CTxOut(double_spend_value, CScript([b'a']))]
double_tx_hex = txToHex(double_tx)
self.nodes[0].sendrawtransaction(double_tx_hex, True)
def test_opt_in(self):
""" Replacing should only work if orig tx opted in """
tx0_outpoint = make_utxo(self.nodes[0], 1.1*COIN)
# Create a non-opting in transaction
tx1a = CTransaction()
tx1a.vin = [CTxIn(tx0_outpoint, nSequence=0xffffffff)]
tx1a.vout = [CTxOut(1*COIN, CScript([b'a']))]
tx1a_hex = txToHex(tx1a)
tx1a_txid = self.nodes[0].sendrawtransaction(tx1a_hex, True)
# Shouldn't be able to double-spend
tx1b = CTransaction()
tx1b.vin = [CTxIn(tx0_outpoint, nSequence=0)]
tx1b.vout = [CTxOut(0.9*COIN, CScript([b'b']))]
tx1b_hex = txToHex(tx1b)
try:
tx1b_txid = self.nodes[0].sendrawtransaction(tx1b_hex, True)
except JSONRPCException as exp:
assert_equal(exp.error['code'], -26)
else:
print tx1b_txid
assert(False)
tx1_outpoint = make_utxo(self.nodes[0], 1.1*COIN)
# Create a different non-opting in transaction
tx2a = CTransaction()
tx2a.vin = [CTxIn(tx1_outpoint, nSequence=0xfffffffe)]
tx2a.vout = [CTxOut(1*COIN, CScript([b'a']))]
tx2a_hex = txToHex(tx2a)
tx2a_txid = self.nodes[0].sendrawtransaction(tx2a_hex, True)
# Still shouldn't be able to double-spend
tx2b = CTransaction()
tx2b.vin = [CTxIn(tx1_outpoint, nSequence=0)]
tx2b.vout = [CTxOut(0.9*COIN, CScript([b'b']))]
tx2b_hex = txToHex(tx2b)
try:
tx2b_txid = self.nodes[0].sendrawtransaction(tx2b_hex, True)
except JSONRPCException as exp:
assert_equal(exp.error['code'], -26)
else:
assert(False)
# Now create a new transaction that spends from tx1a and tx2a
# opt-in on one of the inputs
# Transaction should be replaceable on either input
tx1a_txid = int(tx1a_txid, 16)
tx2a_txid = int(tx2a_txid, 16)
tx3a = CTransaction()
tx3a.vin = [CTxIn(COutPoint(tx1a_txid, 0), nSequence=0xffffffff),
CTxIn(COutPoint(tx2a_txid, 0), nSequence=0xfffffffd)]
tx3a.vout = [CTxOut(0.9*COIN, CScript([b'c'])), CTxOut(0.9*COIN, CScript([b'd']))]
tx3a_hex = txToHex(tx3a)
self.nodes[0].sendrawtransaction(tx3a_hex, True)
tx3b = CTransaction()
tx3b.vin = [CTxIn(COutPoint(tx1a_txid, 0), nSequence=0)]
tx3b.vout = [CTxOut(0.5*COIN, CScript([b'e']))]
tx3b_hex = txToHex(tx3b)
tx3c = CTransaction()
tx3c.vin = [CTxIn(COutPoint(tx2a_txid, 0), nSequence=0)]
tx3c.vout = [CTxOut(0.5*COIN, CScript([b'f']))]
tx3c_hex = txToHex(tx3c)
self.nodes[0].sendrawtransaction(tx3b_hex, True)
# If tx3b was accepted, tx3c won't look like a replacement,
# but make sure it is accepted anyway
self.nodes[0].sendrawtransaction(tx3c_hex, True)
def test_prioritised_transactions(self):
# Ensure that fee deltas used via prioritisetransaction are
# correctly used by replacement logic
# 1. Check that feeperkb uses modified fees
tx0_outpoint = make_utxo(self.nodes[0], 1.1*COIN)
tx1a = CTransaction()
tx1a.vin = [CTxIn(tx0_outpoint, nSequence=0)]
tx1a.vout = [CTxOut(1*COIN, CScript([b'a']))]
tx1a_hex = txToHex(tx1a)
tx1a_txid = self.nodes[0].sendrawtransaction(tx1a_hex, True)
# Higher fee, but the actual fee per KB is much lower.
tx1b = CTransaction()
tx1b.vin = [CTxIn(tx0_outpoint, nSequence=0)]
tx1b.vout = [CTxOut(0.001*COIN, CScript([b'a'*740000]))]
tx1b_hex = txToHex(tx1b)
# Verify tx1b cannot replace tx1a.
try:
tx1b_txid = self.nodes[0].sendrawtransaction(tx1b_hex, True)
except JSONRPCException as exp:
assert_equal(exp.error['code'], -26)
else:
assert(False)
# Use prioritisetransaction to set tx1a's fee to 0.
self.nodes[0].prioritisetransaction(tx1a_txid, 0, int(-0.1*COIN))
# Now tx1b should be able to replace tx1a
tx1b_txid = self.nodes[0].sendrawtransaction(tx1b_hex, True)
assert(tx1b_txid in self.nodes[0].getrawmempool())
# 2. Check that absolute fee checks use modified fee.
tx1_outpoint = make_utxo(self.nodes[0], 1.1*COIN)
tx2a = CTransaction()
tx2a.vin = [CTxIn(tx1_outpoint, nSequence=0)]
tx2a.vout = [CTxOut(1*COIN, CScript([b'a']))]
tx2a_hex = txToHex(tx2a)
tx2a_txid = self.nodes[0].sendrawtransaction(tx2a_hex, True)
# Lower fee, but we'll prioritise it
tx2b = CTransaction()
tx2b.vin = [CTxIn(tx1_outpoint, nSequence=0)]
tx2b.vout = [CTxOut(1.01*COIN, CScript([b'a']))]
tx2b.rehash()
tx2b_hex = txToHex(tx2b)
# Verify tx2b cannot replace tx2a.
try:
tx2b_txid = self.nodes[0].sendrawtransaction(tx2b_hex, True)
except JSONRPCException as exp:
assert_equal(exp.error['code'], -26)
else:
assert(False)
# Now prioritise tx2b to have a higher modified fee
self.nodes[0].prioritisetransaction(tx2b.hash, 0, int(0.1*COIN))
# tx2b should now be accepted
tx2b_txid = self.nodes[0].sendrawtransaction(tx2b_hex, True)
assert(tx2b_txid in self.nodes[0].getrawmempool())
if __name__ == '__main__':
ReplaceByFeeTest().main()
|
{
"content_hash": "63d842305b50ddb812e6c2f6f7617b37",
"timestamp": "",
"source": "github",
"line_count": 583,
"max_line_length": 105,
"avg_line_length": 37.337907375643226,
"alnum_prop": 0.5825523704520397,
"repo_name": "deuscoin-org/deuscoin-core",
"id": "ed9cc845b8ac05266d055cf00b9b47bb855343af",
"size": "22016",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "qa/rpc-tests/replace-by-fee.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "619655"
},
{
"name": "C++",
"bytes": "4298620"
},
{
"name": "CSS",
"bytes": "1127"
},
{
"name": "HTML",
"bytes": "50621"
},
{
"name": "Java",
"bytes": "2101"
},
{
"name": "M4",
"bytes": "147636"
},
{
"name": "Makefile",
"bytes": "97785"
},
{
"name": "Objective-C",
"bytes": "92297"
},
{
"name": "Objective-C++",
"bytes": "7244"
},
{
"name": "Python",
"bytes": "703811"
},
{
"name": "QMake",
"bytes": "2021"
},
{
"name": "Roff",
"bytes": "3831"
},
{
"name": "Shell",
"bytes": "417069"
}
],
"symlink_target": ""
}
|
""" Settings for my_project """
from .base import *
try:
from .local import *
except ImportError, exc:
exc.args = tuple(
['%s (did you rename settings/local-dist.py?)' % exc.args[0]])
raise exc
|
{
"content_hash": "f1d9bc02a106ef4963081c57cc0d7563",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 70,
"avg_line_length": 23.88888888888889,
"alnum_prop": 0.6139534883720931,
"repo_name": "jangeador/django_sample",
"id": "e95bdbeec67b6aa0384c541c51fd569d2b83c2ff",
"size": "215",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "my_project/settings/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "11300"
},
{
"name": "JavaScript",
"bytes": "734"
},
{
"name": "Python",
"bytes": "24839"
},
{
"name": "Shell",
"bytes": "8144"
}
],
"symlink_target": ""
}
|
"""Constants for the Risco integration."""
from homeassistant.const import (
CONF_SCAN_INTERVAL,
STATE_ALARM_ARMED_AWAY,
STATE_ALARM_ARMED_HOME,
)
DOMAIN = "risco"
RISCO_EVENT = "risco_event"
DATA_COORDINATOR = "risco"
EVENTS_COORDINATOR = "risco_events"
DEFAULT_SCAN_INTERVAL = 30
TYPE_LOCAL = "local"
CONF_CODE_ARM_REQUIRED = "code_arm_required"
CONF_CODE_DISARM_REQUIRED = "code_disarm_required"
CONF_RISCO_STATES_TO_HA = "risco_states_to_ha"
CONF_HA_STATES_TO_RISCO = "ha_states_to_risco"
RISCO_GROUPS = ["A", "B", "C", "D"]
RISCO_ARM = "arm"
RISCO_PARTIAL_ARM = "partial_arm"
RISCO_STATES = [RISCO_ARM, RISCO_PARTIAL_ARM, *RISCO_GROUPS]
DEFAULT_RISCO_GROUPS_TO_HA = {group: STATE_ALARM_ARMED_HOME for group in RISCO_GROUPS}
DEFAULT_RISCO_STATES_TO_HA = {
RISCO_ARM: STATE_ALARM_ARMED_AWAY,
RISCO_PARTIAL_ARM: STATE_ALARM_ARMED_HOME,
**DEFAULT_RISCO_GROUPS_TO_HA,
}
DEFAULT_HA_STATES_TO_RISCO = {
STATE_ALARM_ARMED_AWAY: RISCO_ARM,
STATE_ALARM_ARMED_HOME: RISCO_PARTIAL_ARM,
}
DEFAULT_OPTIONS = {
CONF_SCAN_INTERVAL: DEFAULT_SCAN_INTERVAL,
CONF_CODE_ARM_REQUIRED: False,
CONF_CODE_DISARM_REQUIRED: False,
CONF_RISCO_STATES_TO_HA: DEFAULT_RISCO_STATES_TO_HA,
CONF_HA_STATES_TO_RISCO: DEFAULT_HA_STATES_TO_RISCO,
}
|
{
"content_hash": "0b2636782e9ef9be04c7aadf2550cf5a",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 86,
"avg_line_length": 26.666666666666668,
"alnum_prop": 0.7046875,
"repo_name": "w1ll1am23/home-assistant",
"id": "9f0e71701c65b915c8e2c8cfa56c595681fea100",
"size": "1280",
"binary": false,
"copies": "3",
"ref": "refs/heads/dev",
"path": "homeassistant/components/risco/const.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2963"
},
{
"name": "PLSQL",
"bytes": "840"
},
{
"name": "Python",
"bytes": "52277012"
},
{
"name": "Shell",
"bytes": "6252"
}
],
"symlink_target": ""
}
|
import os
import sys
import shutil
class NoDirectoriesError(Exception):
"Error thrown when no directories starting with an underscore are found"
class DirHelper:
def __init__(self, is_dir, list_dir, walk, rmtree):
self.is_dir = is_dir
self.list_dir = list_dir
self.walk = walk
self.rmtree = rmtree
class FileSystemHelper:
def __init__(self, open_, path_join, move, exists):
self.open_ = open_
self.path_join = path_join
self.move = move
self.exists = exists
class Replacer:
"Encapsulates a simple text replace"
def __init__(self, from_, to):
self.from_ = from_
self.to = to
def process(self, text):
print('Replace %s to %s' % (self.from_, self.to))
return text.replace(self.from_, self.to)
class FileHandler:
"Applies a series of replacements the contents of a file inplace"
def __init__(self, name, replacers, opener):
self.name = name
self.replacers = replacers
self.opener = opener
def process(self):
text = self.opener(self.name).read()
for replacer in self.replacers:
text = replacer.process(text)
self.opener(self.name, "w").write(text)
class Remover:
def __init__(self, exists, remove):
self.exists = exists
self.remove = remove
def __call__(self, name):
if self.exists(name):
self.remove(name)
class ForceRename:
def __init__(self, renamer, remove):
self.renamer = renamer
self.remove = remove
def __call__(self, from_, to):
self.remove(to)
self.renamer(from_, to)
class VerboseRename:
def __init__(self, renamer, stream):
self.renamer = renamer
self.stream = stream
def __call__(self, from_, to):
self.stream.write(
"Renaming directory '%s' -> '%s'\n" %
(os.path.basename(from_), os.path.basename(to)))
self.renamer(from_, to)
class DirectoryHandler:
'''Encapsulates renaming a directory by removing its first character'''
def __init__(self, name, root, renamer):
self.name = name
self.new_name = name[1:]
self.root = root + os.sep
self.renamer = renamer
def path(self):
return os.path.join(self.root, self.name)
def relative_path(self, directory, filename):
path = directory.replace(self.root, "", 1)
return os.path.join(path, filename)
def new_relative_path(self, directory, filename):
path = self.relative_path(directory, filename)
return path.replace(self.name, self.new_name, 1)
def process(self):
from_ = os.path.join(self.root, self.name)
to = os.path.join(self.root, self.new_name)
self.renamer(from_, to)
class HandlerFactory:
def create_file_handler(self, name, replacers, opener):
return FileHandler(name, replacers, opener)
def create_dir_handler(self, name, root, renamer):
return DirectoryHandler(name, root, renamer)
class OperationsFactory:
def create_force_rename(self, renamer, remover):
return ForceRename(renamer, remover)
def create_verbose_rename(self, renamer, stream):
return VerboseRename(renamer, stream)
def create_replacer(self, from_, to):
return Replacer(from_, to)
def create_remover(self, exists, remove):
return Remover(exists, remove)
class Layout:
"""
Applies a set of operations which result in the layout
of a directory changing
"""
def __init__(self, directory_handlers, file_handlers):
self.directory_handlers = directory_handlers
self.file_handlers = file_handlers
def process(self):
for handler in self.file_handlers:
handler.process()
for handler in self.directory_handlers:
handler.process()
class LayoutFactory:
"Creates a layout object"
def __init__(self, operations_factory, handler_factory, file_helper,
dir_helper, verbose, stream, force):
self.operations_factory = operations_factory
self.handler_factory = handler_factory
self.file_helper = file_helper
self.dir_helper = dir_helper
self.verbose = verbose
self.output_stream = stream
self.force = force
def create_layout(self, path):
contents = self.dir_helper.list_dir(path)
renamer = self.file_helper.move
if self.force:
remove = self.operations_factory.create_remover(
self.file_helper.exists, self.dir_helper.rmtree)
renamer = self.operations_factory.create_force_rename(
renamer, remove)
if self.verbose:
renamer = self.operations_factory.create_verbose_rename(
renamer, self.output_stream)
# Build list of directories to process
directories = [d for d in contents if self.is_underscore_dir(path, d)]
underscore_directories = [
self.handler_factory.create_dir_handler(d, path, renamer)
for d in directories]
if not underscore_directories:
raise NoDirectoriesError()
# Build list of files that are in those directories
replacers = []
for handler in underscore_directories:
for directory, dirs, files in self.dir_helper.walk(handler.path()):
for f in files:
replacers.append(
self.operations_factory.create_replacer(
handler.relative_path(directory, f),
handler.new_relative_path(directory, f)))
# Build list of handlers to process all files
filelist = []
for root, dirs, files in self.dir_helper.walk(path):
for f in files:
if f.endswith(".html") or f.endswith(".fjson"):
filelist.append(
self.handler_factory.create_file_handler(
self.file_helper.path_join(root, f),
replacers,
self.file_helper.open_)
)
if f.endswith(".js"):
filelist.append(
self.handler_factory.create_file_handler(
self.file_helper.path_join(root, f),
[self.operations_factory.create_replacer(
"'_sources/'", "'sources/'")],
self.file_helper.open_
)
)
return Layout(underscore_directories, filelist)
def is_underscore_dir(self, path, directory):
return (self.dir_helper.is_dir(
self.file_helper.path_join(path, directory)) and
directory.startswith("_"))
def sphinx_extension(app, exception):
"Wrapped up as a Sphinx Extension"
if app.builder.name not in ("html", "dirhtml", "json"):
return
if not app.config.sphinx_to_github:
if app.config.sphinx_to_github_verbose:
print("Sphinx-to-github: Disabled, doing nothing.")
return
if exception:
if app.config.sphinx_to_github_verbose:
print("Sphinx-to-github: Exception raised in main build, "
"doing nothing.")
return
dir_helper = DirHelper(
os.path.isdir,
os.listdir,
os.walk,
shutil.rmtree)
file_helper = FileSystemHelper(
open,
os.path.join,
shutil.move,
os.path.exists)
operations_factory = OperationsFactory()
handler_factory = HandlerFactory()
layout_factory = LayoutFactory(
operations_factory,
handler_factory,
file_helper,
dir_helper,
app.config.sphinx_to_github_verbose,
sys.stdout,
force=True)
layout = layout_factory.create_layout(app.outdir)
layout.process()
def setup(app):
"Setup function for Sphinx Extension"
app.add_config_value("sphinx_to_github", True, '')
app.add_config_value("sphinx_to_github_verbose", True, '')
app.connect("build-finished", sphinx_extension)
|
{
"content_hash": "8b568ef136ba3cfbee7a83882edf4225",
"timestamp": "",
"source": "github",
"line_count": 281,
"max_line_length": 79,
"avg_line_length": 29.380782918149468,
"alnum_prop": 0.5868459302325582,
"repo_name": "quantmind/lux",
"id": "c2b23c562f9277afb04fb53c65cc5c7987c9694b",
"size": "8256",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lux/utils/sphinxtogithub.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "906"
},
{
"name": "HTML",
"bytes": "5107"
},
{
"name": "JavaScript",
"bytes": "219127"
},
{
"name": "Makefile",
"bytes": "422"
},
{
"name": "Mako",
"bytes": "1050"
},
{
"name": "PLpgSQL",
"bytes": "140"
},
{
"name": "Python",
"bytes": "615221"
},
{
"name": "Shell",
"bytes": "196"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from pybullet_envs.minitaur.envs import minitaur_raibert_controller
from pybullet_envs.minitaur.envs import minitaur_gym_env
flags = tf.app.flags
FLAGS = tf.app.flags.FLAGS
flags.DEFINE_float("motor_kp", 1.0, "The position gain of the motor.")
flags.DEFINE_float("motor_kd", 0.015, "The speed gain of the motor.")
flags.DEFINE_float("control_latency", 0.006, "The latency between sensor measurement and action"
" execution the robot.")
flags.DEFINE_string("log_path", ".", "The directory to write the log file.")
def speed(t):
max_speed = 0.35
t1 = 3
if t < t1:
return t / t1 * max_speed
else:
return -max_speed
def main(argv):
del argv
env = minitaur_gym_env.MinitaurGymEnv(urdf_version=minitaur_gym_env.RAINBOW_DASH_V0_URDF_VERSION,
control_time_step=0.006,
action_repeat=6,
pd_latency=0.0,
control_latency=FLAGS.control_latency,
motor_kp=FLAGS.motor_kp,
motor_kd=FLAGS.motor_kd,
remove_default_joint_damping=True,
leg_model_enabled=False,
render=True,
on_rack=False,
accurate_motor_model_enabled=True,
log_path=FLAGS.log_path)
env.reset()
controller = minitaur_raibert_controller.MinitaurRaibertTrottingController(env.minitaur)
tstart = env.minitaur.GetTimeSinceReset()
for _ in range(1000):
t = env.minitaur.GetTimeSinceReset() - tstart
controller.behavior_parameters = (minitaur_raibert_controller.BehaviorParameters(
desired_forward_speed=speed(t)))
controller.update(t)
env.step(controller.get_action())
#env.close()
if __name__ == "__main__":
tf.app.run(main)
|
{
"content_hash": "776ddab04b682e01cb3764e0b0c6a7f0",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 99,
"avg_line_length": 36.74576271186441,
"alnum_prop": 0.5641143911439115,
"repo_name": "MTASZTAKI/ApertusVR",
"id": "40e6b4799990843078bcfbe037c9da218e69ac79",
"size": "2309",
"binary": false,
"copies": "2",
"ref": "refs/heads/0.9",
"path": "plugins/physics/bulletPhysics/3rdParty/bullet3/examples/pybullet/gym/pybullet_envs/minitaur/actuatornet/minitaur_raibert_controller_example.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "7599"
},
{
"name": "C++",
"bytes": "1207412"
},
{
"name": "CMake",
"bytes": "165066"
},
{
"name": "CSS",
"bytes": "1816"
},
{
"name": "GLSL",
"bytes": "223507"
},
{
"name": "HLSL",
"bytes": "141879"
},
{
"name": "HTML",
"bytes": "34827"
},
{
"name": "JavaScript",
"bytes": "140550"
},
{
"name": "Python",
"bytes": "1370"
}
],
"symlink_target": ""
}
|
from django.apps import AppConfig
class SongsArtistsConfig(AppConfig):
name = 'songs_artists'
|
{
"content_hash": "b56f26e383cc01cb382f50f3ca789c9f",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 36,
"avg_line_length": 20,
"alnum_prop": 0.77,
"repo_name": "forever-Agriculture/lyrics_site",
"id": "a405cb29a4a6d5faed09ea2ed20051e186fb8ddd",
"size": "100",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/apps/songs_artists/apps.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "56"
},
{
"name": "Elixir",
"bytes": "3002"
},
{
"name": "HTML",
"bytes": "18146"
},
{
"name": "Makefile",
"bytes": "1586"
},
{
"name": "Nginx",
"bytes": "1546"
},
{
"name": "Python",
"bytes": "15905"
},
{
"name": "Shell",
"bytes": "19047"
}
],
"symlink_target": ""
}
|
"""
Tests for the sprockets.clients.cassandra package
"""
import os
import socket
import time
from cassandra.cluster import Cluster
from cassandra.protocol import SyntaxException
from tornado.testing import AsyncTestCase, gen_test
from sprockets.clients.cassandra import CassandraConnection
class TestCassandraConnectionClass(AsyncTestCase):
def setUp(self):
super(TestCassandraConnectionClass, self).setUp()
self.cluster = Cluster(self.find_cassandra())
self.session = self.cluster.connect()
self.keyspace = 'sprocketstest{0}'.format(int(time.time()*10000))
self.create_fixtures()
self.connection = CassandraConnection()
def tearDown(self):
super(TestCassandraConnectionClass, self).tearDown()
self.session.execute("DROP KEYSPACE {0}".format(self.keyspace))
self.connection.shutdown()
def find_cassandra(self):
uri = os.environ.get('CASSANDRA_URI', 'cassandra://localhost')
hostname = uri[12:]
return [hostname.split(':')[0]]
_, _, ips = socket.gethostbyname_ex(hostname)
return ips
def create_fixtures(self):
self.session.execute(
"CREATE KEYSPACE IF NOT EXISTS {0} WITH REPLICATION = "
"{{'class': 'SimpleStrategy', "
"'replication_factor': 1}}".format(self.keyspace))
self.session.execute("USE {0}".format(self.keyspace))
self.session.execute(
"CREATE TABLE IF NOT EXISTS names (name text PRIMARY KEY)")
self.session.execute(
"INSERT INTO names (name) VALUES ('Peabody')")
@gen_test
def test_several_queries(self):
futures = []
count = 100
for i in range(count):
futures.append(self.connection.execute(
"SELECT name FROM {0}.names".format(self.keyspace)))
results = 0
for future in futures:
yield future
results += 1
self.assertEqual(count, results)
@gen_test
def test_bad_query(self):
with self.assertRaises(SyntaxException):
yield self.connection.execute('goobletygook')
@gen_test
def test_set_keyspace(self):
self.connection.set_keyspace(self.keyspace)
@gen_test
def test_prepared_statement(self):
yield self.connection.execute('use %s' % self.keyspace)
stmt = self.connection.prepare('SELECT * FROM names;', 'get_names')
copy = self.connection.prepare('SELECT * FROM names;', 'get_names')
self.assertIs(stmt, copy, 'Should return the cached statement')
results = yield self.connection.execute(stmt)
self.assertEqual(results[0].name, 'Peabody')
|
{
"content_hash": "88e119a1cf83aef3142355e8bca59d66",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 75,
"avg_line_length": 34.52564102564103,
"alnum_prop": 0.6435202376531749,
"repo_name": "sprockets/sprockets.clients.cassandra",
"id": "5ad5460bb626ab1efd8a0d4f0b18b08c15135430",
"size": "2693",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "9622"
}
],
"symlink_target": ""
}
|
from django.conf.urls.defaults import patterns, url
urlpatterns = patterns(
'post.views',
url(r'^list/$', 'object_list', name='post_object_list'),
url(r'^(?P<slug>[\w-]+)/$', 'object_detail', name='post_object_detail'),
)
|
{
"content_hash": "e7d28e7e76e9fc88e95cbfc5bc36e2fb",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 76,
"avg_line_length": 33.57142857142857,
"alnum_prop": 0.6340425531914894,
"repo_name": "praekelt/panya-post",
"id": "7142d6cb0f311262a2c471568469797edca38ff7",
"size": "235",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "post/urls.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "236"
},
{
"name": "Python",
"bytes": "3840"
}
],
"symlink_target": ""
}
|
import datetime as dt
from collections import OrderedDict
from collections.abc import Iterable, Mapping
from decimal import Decimal
from itertools import chain as iter_chain
from .typing import DSet, DObject, DSetBase
from .metaclass import DObjectMetaClass
# from ._reshape import ReshapeOperator
class dobject(DObject, metaclass=DObjectMetaClass):
def __new__(cls, *args, **kwargs):
instance = super(dobject, cls).__new__(cls) # new instance of dobject
# store values of attributes
super(dobject, instance).__setattr__('__value_dict__', OrderedDict())
attributes = OrderedDict(iter_chain(cls.__dobject_key__.items(),
cls.__dobject_att__.items()))
aggregates = []
seen = set()
if args:
if len(args) > 1:
errmsg = "Do not exceed one positional argument: "
errmsg += "(obj, attr1='', ...) or (attr1='', ...) "
raise ValueError(errmsg)
source_obj = args[0] # reshape the given object or dict
if isinstance(source_obj, Mapping): # like {}
for attr_name, attr in attributes.items():
if attr_name in kwargs:
continue # this value will be set laterly
if attr_name not in source_obj:
continue
attr_val = source_obj[attr_name]
attr.set_value_unguardedly(instance, attr_val)
seen.add(attr_name)
elif isinstance(source_obj, DObject):
if (cls.__dobject_origin_class__ and
isinstance(source_obj, cls.__dobject_origin_class__)):
subst_mapping = {}
for o_name, n_name in cls.__dobject_mapping__.items():
subst_mapping[n_name] = o_name
if n_name not in cls.__dobject_mapping__:
# _subst=dict(a=b*, b=a) if o_name in mapping
subst_mapping[o_name] = None # mark it not to clone
else:
subst_mapping = {}
for attr_name, attr in attributes.items():
if attr_name in kwargs:
continue # this value will be set laterly
if attr_name in subst_mapping:
src_attr_name = subst_mapping[attr_name]
if src_attr_name is None:
continue
else:
src_attr_name = attr_name
if not hasattr(source_obj, src_attr_name):
continue
attr_val = getattr(source_obj, src_attr_name)
# if isinstance(attr_val, DSetBase):
# # NOTED: the dominion object is required to replace
# aggregates.append((attr_name, attr, attr_val))
# continue
attr.set_value_unguardedly(instance, attr_val)
seen.add(attr_name)
else:
for attr_name, attr in attributes.items():
if attr_name in kwargs:
continue # this value will be set laterly
if not hasattr(source_obj, attr_name):
continue
attr_val = getattr(source_obj, attr_name)
attr.set_value_unguardedly(instance, attr_val)
seen.add(attr_name)
for arg_name, arg_value in kwargs.items():
attr = attributes.get(arg_name, None)
if attr is None:
errmsg = "No attribue '%s' defined in %s"
errmsg %= (arg_name, cls.__name__)
raise ValueError(errmsg)
attr.set_value_unguardedly(instance, arg_value)
seen.add(arg_name)
# for attr_name, attr, attr_val in aggregates:
# attr_val = attr.type(attr_val, _dominion = instance)
# # set default values for these left parameters
# for attr_name, attr in parameters.items():
# getattr(instance, attr_name)
# # force it to get chance to check default value
pkey_att_vals = tuple(getattr(instance, attr_name)
for attr_name in cls.__dobject_key__)
setattr(instance, '__dobject_key__',
cls.__dobject_key_class__(instance))
return instance
# def __getattr__(self, name):
# errmsg ='The domain object %s has no field: %s '
# errmsg %= (self.__class__.__name__, name)
# raise AttributeError(errmsg)
def __setattr__(self, name, value):
if hasattr(self, name):
super(dobject, self).__setattr__(name, value)
else:
errmsg ='The domain object %s has no field: %s '
errmsg %= (self.__class__.__name__, name)
raise AttributeError(errmsg)
def __repr__(self):
""" """
values = self.__value_dict__
segs = [repr(self.__dobject_key__)] if self.__dobject_key__ else []
segs += ['%s=%r' % (attr_name, getattr(self, attr_name))
for attr_name in self.__class__.__dobject_att__]
return self.__class__.__name__ + '(' + ', '.join(segs) + ')'
def __eq__(self, other) :
"""
When the primary key attribute is specified, this dobject is equal to
the other if the attribues of primary key are equaled. Otherwise, all
attributes are needed to be equaled if the two dobject are equaled.
"""
if other is None :
return False
# if not isinstance(other, dobject): # it's weird: A() == 9999
#
if not isinstance(other, dobject): #
errmsg = "The value should be a dobject, not '%s' type"
errmsg %= other.__class__.__name__
raise ValueError(errmsg)
# other = self.__class__(other) # it's weird: A() == 9999
if self.__class__.__dobject_key__:
return self.__dobject_key__ == other.__dobject_key__
for attr_name in self.__class__.__dobject_att__.keys():
if getattr(self, attr_name) != getattr(other, attr_name, None):
return False
return True
def __bool__(self):
"""
"""
cls = self.__class__
if not cls.__dobject_att__ and not cls.__dobject_key__:
return False # no attribues defined in this dobject
for attr_name, attr in iter_chain(cls.__dobject_key__.items(),
cls.__dobject_att__.items()):
if attr_name not in self.__value_dict__:
continue # The truth value of attribute is false
attr_val = getattr(self, attr_name)
if attr.default is not None:
if attr_val != attr.default:
return True
elif attr_val:
return True
return False
def __json_object__(self):
"""export dobject as list or dict """
cls = self.__class__
self_attrs = getattr(self, '__value_dict__')
data = OrderedDict()
for attr_name in iter_chain(cls.__dobject_key__, cls.__dobject_att__):
attr_value = getattr(self, attr_name)
if hasattr(attr_value, '__json_object__'):
attr_value = attr_value.__json_object__()
data[attr_name] = attr_value
return data
|
{
"content_hash": "17593ab5eff0dcdbf6f64ec7fbc4ed36",
"timestamp": "",
"source": "github",
"line_count": 209,
"max_line_length": 79,
"avg_line_length": 36.61244019138756,
"alnum_prop": 0.5065342394145321,
"repo_name": "lcgong/domainics",
"id": "e4eb98f18e9297abb5b509d5710a351942c71aa0",
"size": "7677",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "domainics/domobj/dobject.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "177092"
}
],
"symlink_target": ""
}
|
__author__ = 'phithon'
import tornado.web
from controller.base import BaseHandler
from tornado import gen
import time, pymongo, os, uuid
from bson.objectid import ObjectId
from util.function import random_str, hash, intval
class AdminHandler(BaseHandler):
def initialize(self):
super(AdminHandler, self).initialize()
self.topbar = "admin"
def prepare(self):
super(AdminHandler, self).prepare()
if self.power != "admin":
self.redirect("/")
def post(self, *args, **kwargs):
method = ("%s_action" % args[0]) if len(args) > 0 else "home_action"
if hasattr(self, method):
getattr(self, method)(*args, **kwargs)
else:
self.home_action(*args, **kwargs)
@tornado.web.asynchronous
@gen.coroutine
def register_action(self, *args, **kwargs):
method = self.get_body_argument("method", default=None)
if method in ["open"]:
config = self._read_config()
config["global"]["register"] = method
self._write_config(config)
self.flash["register"] = "设置成功"
else:
self.flash["register"] = "注册方法不正确"
self.redirect("/admin/")
@tornado.web.asynchronous
@gen.coroutine
def article_action(self, *args, **kwargs):
method = self.get_body_argument("method", default="")
id = self.get_body_argument("id", default=None)
if method in ("star", "unstar"):
star = True if method == "star" else False
post = yield self.db.article.find_and_modify({
"_id": ObjectId(id)
}, {
"$set": {
"star": star
}
})
content = u"你的文章《%s》被管理员" % post["title"] + (u"加精" if star else u"取消精华") + u"了"
yield self.message(fromuser=None, touser=post["user"], content=content,
jump="/post/%s" % id)
elif method in ("open", "close"):
open = True if method == "open" else False
post = yield self.db.article.find_and_modify({
"_id": ObjectId(id)
}, {
"$set": {
"open": open
}
})
yield self.message(fromuser=None, touser=post["user"], jump="/post/%s" % id,
content=u"你的文章《%s》被管理员%s了" % (post["title"], u"公开" if open else u"取消公开"))
elif method in ("top", "notop"):
top = True if method == "top" else False
post = yield self.db.article.find_and_modify({
"_id": ObjectId(id)
}, {
"$set": {
"top": top
}
})
yield self.message(fromuser=None, touser=post["user"], jump="/post/%s" % id,
content=u"你的文章《%s》被管理员%s了" % (post["title"], u"置顶" if top else u"取消置顶"))
elif method == "del":
post = yield self.db.article.find_and_modify({
"_id": ObjectId(id)
}, remove = True)
if not post:
self.custom_error("不存在这篇文章", jump="/")
yield self.db.member.update({
}, {
"$pull": {
"bookmark": {"id": id}
}
}, multi = True)
yield self.message(fromuser=None, touser=post["user"], jump="/post/%s" % id,
content=u"你的文章《%s》被管理员删除了" % post["title"])
self.redirect("/")
elif method == "rank":
rank = intval(self.get_body_argument("rank"))
post = yield self.db.article.find_one({
"_id": ObjectId(id)
})
if not post:
self.custom_error("不存在这篇文章")
if "rank" in post and post.get("rank") != 0:
self.custom_error("已经评分过啦")
if not (-10 <= rank <= 10):
self.custom_error("评分超出范围拉")
yield self.db.article.find_and_modify({
"_id": ObjectId(id)
}, {
"$set": {"rank": rank}
})
yield self.message(fromuser=None, touser=post["user"],
content=u"你的文章《%s》被管理员" % post["title"] + (u"奖励" if rank > 0 else u"扣除") + u"%d金币" % abs(rank),
jump="/post/%s" % id)
self.redirect("/post/%s" % id)
@tornado.web.asynchronous
@gen.coroutine
def delcomment_action(self, *args, **kwargs):
comid = self.get_body_argument("comid")
postid = self.get_body_argument("postid")
yield self.db.article.find_and_modify({
"_id": ObjectId(postid),
}, {"$pull": {
"comment": {
"_id": {"$eq": ObjectId(comid)}
}
}
})
self.redirect("/post/%s" % postid)
@tornado.web.asynchronous
@gen.coroutine
def edituser_action(self, *args, **kwargs):
id = self.get_body_argument("id")
user = dict(
power = intval(self.get_body_argument("power")),
email = self.get_body_argument("email"),
website = self.get_body_argument("website"),
qq = self.get_body_argument("qq"),
address = self.get_body_argument("address"),
signal = self.get_body_argument("signal"),
)
password = self.get_body_argument("password", default=None)
if password:
user["password"] = yield self.backend.submit(hash.get, password)
user = yield self.db.member.find_and_modify({
"_id": ObjectId(id)
}, {
"$set": user
})
self.redirect("/manage/userdetail/%s" % user['username'])
@tornado.web.asynchronous
@gen.coroutine
def tag_action(self, *args, **kwargs):
id = self.get_body_argument("id")
tag = dict(
name = self.get_body_argument("name"),
intro = self.get_body_argument("intro",default=None)
)
tag = yield self.db.tag.find_and_modify({
"_id": ObjectId(id)
}, {
"$set": tag
})
if not tag:
self.custom_error("不存在这个标签")
else:
self.redirect("/manage/tagdetail/%s" % id)
@tornado.web.asynchronous
@gen.coroutine
def newtag_action(self, *args, **kwargs):
tag = dict(
name = self.get_body_argument("name"),
intro = self.get_body_argument("intro",default=None),
article = 0
)
tag = yield self.db.tag.insert(tag)
self.redirect("/manage/tag")
@tornado.web.asynchronous
@gen.coroutine
def setting_action(self, *args, **kwargs):
config = self._read_config()
config["global"]["site"] = dict(
webname = self.get_body_argument("webname"),
keyword = self.get_body_argument("keyword"),
description = self.get_body_argument("description")
)
config["global"]["register"] = self.get_body_argument("register")
if config["global"]["register"] not in ("open", "close"):
self.custom_error("注册方法不正确")
captcha = self.get_body_arguments("captcha")
for d in ("register", "login", "comment"):
config["global"]["captcha"][d] = True if (d in captcha) else False
key = self.get_body_argument("key", default=None)
if key:
config["global"]["cookie_secret"] = key
self._write_config(config)
self.redirect("/manage/setting")
|
{
"content_hash": "c2581778cc384d17a51c7b5b7b45d14e",
"timestamp": "",
"source": "github",
"line_count": 200,
"max_line_length": 111,
"avg_line_length": 37.63,
"alnum_prop": 0.5079723624767473,
"repo_name": "resec/superhero",
"id": "4465b4b2a6e6300a3499e2f1c3b07da1051876ab",
"size": "7804",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "controller/admin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "104759"
},
{
"name": "HTML",
"bytes": "76310"
},
{
"name": "JavaScript",
"bytes": "1766166"
},
{
"name": "Python",
"bytes": "99973"
}
],
"symlink_target": ""
}
|
import json
import collections
Edge = collections.namedtuple("Edge",["source","dest","type"])
class GraphHelper( object ):
def __init__(self):
self.counters = collections.defaultdict(lambda: 0)
self.nodes = set()
self.edges = set()
def dumps(self):
return json.dumps({
"nodes": sorted(self.nodes),
"edges": [{"from":e.source,"to":e.dest,"type":e.type} for e in self.edges]
})
def make(self, node_type):
full_name = node_type + "#" + str(self.counters[node_type])
self.counters[node_type] += 1
self.nodes.add(full_name)
return Node(full_name, self)
def make_unique(self, node_name):
if not node_name in self.nodes:
self.nodes.add(node_name)
return Node(node_name, self)
class BadEdgeError( Exception ):
pass
class Node( object ):
def __init__(self, identifier, parent):
object.__setattr__(self, 'identifier', identifier)
object.__setattr__(self, 'parent', parent)
def __getattr__(self, edgename):
matching = set(e.dest for e in self.parent.edges
if e.source == self.identifier
and e.type == edgename)
if len(matching) == 0:
return None
elif len(matching) > 1:
raise BadEdgeError("Expected one result for {}.{}, got {}".format(self.identifier, edgename, matching))
return Node(matching.pop(), self.parent)
def __getitem__(self, edgename):
return self.__getattr__(edgename)
def __setattr__(self, edgename, value):
if edgename in ["identifier","parent"]:
self.__setattribute__(edgename, value)
return
matching = set(e for e in self.parent.edges
if e.source == self.identifier
and e.type == edgename)
if len(matching) > 1:
print("WARNING: Setting attr {} on {} clears old values, but has multiple edges {}".format(edgename,self.identifier,matching))
self.parent.edges -= matching
if value is not None:
self.parent.edges.add(Edge(self.identifier, value.identifier, edgename))
def __setitem__(self, edgename, value):
return self.__setattr__(edgename, value)
def getall(self, edgename):
matching = frozenset(e.dest for e in self.parent.edges
if e.source == self.identifier
and e.type == edgename)
return matching
def add(self, edgename, dest):
self.parent.edges.add(Edge(self.identifier, dest.identifier, edgename))
def remove(self, edgename=None, dest=None):
matching = set(e for e in self.parent.edges
if e.source == self.identifier
and (e.dest == dest.identifier or dest is None)
and (e.type == edgename or edgename is None))
self.parent.edges -= matching
@property
def type(self):
return self.identifier.split("#")[0]
class Story( object ):
def __init__(self):
self.graph = GraphHelper()
self.counter = 1
self.lines = []
def add_line(self, line_str):
assert not "=" in line_str
assert not "\t" in line_str
self.lines.append("{} {}={}".format(self.counter, line_str, self.graph.dumps()))
self.counter += 1
def no_query(self):
self.add_query("","")
def add_query(self, query, answer):
assert not "=" in query + answer
assert not "\t" in query + answer
self.lines.append("{} {}\t{}".format(self.counter, query, answer))
self.counter += 1
|
{
"content_hash": "29e432da4c7d8e9d0d165aaad023901c",
"timestamp": "",
"source": "github",
"line_count": 112,
"max_line_length": 138,
"avg_line_length": 33.142857142857146,
"alnum_prop": 0.5641163793103449,
"repo_name": "hexahedria/gated-graph-transformer-network",
"id": "6f1acc4e46efb29be693dd2736204b9178fd1862",
"size": "3712",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "task_generators/graph_tools.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "14613"
},
{
"name": "Python",
"bytes": "182547"
}
],
"symlink_target": ""
}
|
''' A grouped bar chart using a cleaned up version of the `Auto MPG dataset`_.
This examples demonstrates automatic handing of Pandas GroupBy objects and
colormapping nested factors with ``factor_cmap``. A hover tooltip displays
information for each bar.
.. bokeh-example-metadata::
:sampledata: autompg
:apis: bokeh.plotting.figure.vbar, bokeh.transform.factor_cmap
:refs: :ref:`ug_basic_bars_pandas`
:keywords: bars, categorical, colormap, groupby, pandas
.. _Auto MPG dataset: https://archive.ics.uci.edu/ml/datasets/auto+mpg
'''
from bokeh.palettes import Spectral5
from bokeh.plotting import figure, show
from bokeh.sampledata.autompg import autompg_clean as df
from bokeh.transform import factor_cmap
df.cyl = df.cyl.astype(str)
df.yr = df.yr.astype(str)
group = df.groupby(['cyl', 'mfr'])
index_cmap = factor_cmap('cyl_mfr', palette=Spectral5, factors=sorted(df.cyl.unique()), end=1)
p = figure(width=800, height=300, title="Mean MPG by # Cylinders and Manufacturer",
x_range=group, toolbar_location=None, tooltips=[("MPG", "@mpg_mean"), ("Cyl, Mfr", "@cyl_mfr")])
p.vbar(x='cyl_mfr', top='mpg_mean', width=1, source=group,
line_color="white", fill_color=index_cmap, )
p.y_range.start = 0
p.x_range.range_padding = 0.05
p.xgrid.grid_line_color = None
p.xaxis.axis_label = "Manufacturer grouped by # Cylinders"
p.xaxis.major_label_orientation = 1.2
p.outline_line_color = None
show(p)
|
{
"content_hash": "4ba0cb9d7747cb4edb271455f9dab22b",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 107,
"avg_line_length": 35.875,
"alnum_prop": 0.7205574912891987,
"repo_name": "bokeh/bokeh",
"id": "7288c3e324e483d3a7af20ea138cec3fe1930cae",
"size": "1435",
"binary": false,
"copies": "1",
"ref": "refs/heads/branch-3.1",
"path": "examples/basic/bars/pandas_groupby_nested.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "1884"
},
{
"name": "Dockerfile",
"bytes": "1924"
},
{
"name": "GLSL",
"bytes": "44696"
},
{
"name": "HTML",
"bytes": "53475"
},
{
"name": "JavaScript",
"bytes": "20301"
},
{
"name": "Less",
"bytes": "46376"
},
{
"name": "Python",
"bytes": "4475226"
},
{
"name": "Shell",
"bytes": "7673"
},
{
"name": "TypeScript",
"bytes": "3652153"
}
],
"symlink_target": ""
}
|
from django import forms
from django.conf import settings
from django.utils import timezone
from django.utils.translation import gettext as _
from django.utils.translation import ngettext
from wagtail.admin import widgets
from wagtail.core.models import Page, PageViewRestriction
from .models import WagtailAdminModelForm
from .view_restrictions import BaseViewRestrictionForm
class CopyForm(forms.Form):
def __init__(self, *args, **kwargs):
# CopyPage must be passed a 'page' kwarg indicating the page to be copied
self.page = kwargs.pop('page')
self.user = kwargs.pop('user', None)
can_publish = kwargs.pop('can_publish')
super().__init__(*args, **kwargs)
self.fields['new_title'] = forms.CharField(initial=self.page.title, label=_("New title"))
allow_unicode = getattr(settings, 'WAGTAIL_ALLOW_UNICODE_SLUGS', True)
self.fields['new_slug'] = forms.SlugField(initial=self.page.slug, label=_("New slug"), allow_unicode=allow_unicode)
self.fields['new_parent_page'] = forms.ModelChoiceField(
initial=self.page.get_parent(),
queryset=Page.objects.all(),
widget=widgets.AdminPageChooser(can_choose_root=True, user_perms='copy_to'),
label=_("New parent page"),
help_text=_("This copy will be a child of this given parent page.")
)
pages_to_copy = self.page.get_descendants(inclusive=True)
subpage_count = pages_to_copy.count() - 1
if subpage_count > 0:
self.fields['copy_subpages'] = forms.BooleanField(
required=False, initial=True, label=_("Copy subpages"),
help_text=ngettext(
"This will copy %(count)s subpage.",
"This will copy %(count)s subpages.",
subpage_count) % {'count': subpage_count})
if can_publish:
pages_to_publish_count = pages_to_copy.live().count()
if pages_to_publish_count > 0:
# In the specific case that there are no subpages, customise the field label and help text
if subpage_count == 0:
label = _("Publish copied page")
help_text = _("This page is live. Would you like to publish its copy as well?")
else:
label = _("Publish copies")
help_text = ngettext(
"%(count)s of the pages being copied is live. Would you like to publish its copy?",
"%(count)s of the pages being copied are live. Would you like to publish their copies?",
pages_to_publish_count) % {'count': pages_to_publish_count}
self.fields['publish_copies'] = forms.BooleanField(
required=False, initial=False, label=label, help_text=help_text
)
# Note that only users who can publish in the new parent page can create an alias.
# This is because alias pages must always match their original page's state.
self.fields['alias'] = forms.BooleanField(
required=False, initial=False, label=_("Alias"),
help_text=_("Keep the new pages updated with future changes")
)
def clean(self):
cleaned_data = super().clean()
# Make sure the slug isn't already in use
slug = cleaned_data.get('new_slug')
# New parent page given in form or parent of source, if parent_page is empty
parent_page = cleaned_data.get('new_parent_page') or self.page.get_parent()
# check if user is allowed to create a page at given location.
if not parent_page.permissions_for_user(self.user).can_add_subpage():
self._errors['new_parent_page'] = self.error_class([
_("You do not have permission to copy to page \"%(page_title)s\"") % {'page_title': parent_page.specific_deferred.get_admin_display_title()}
])
# Count the pages with the same slug within the context of our copy's parent page
if slug and parent_page.get_children().filter(slug=slug).count():
self._errors['new_slug'] = self.error_class(
[_("This slug is already in use within the context of its parent page \"%s\"") % parent_page]
)
# The slug is no longer valid, hence remove it from cleaned_data
del cleaned_data['new_slug']
# Don't allow recursive copies into self
if cleaned_data.get('copy_subpages') and (self.page == parent_page or parent_page.is_descendant_of(self.page)):
self._errors['new_parent_page'] = self.error_class(
[_("You cannot copy a page into itself when copying subpages")]
)
return cleaned_data
class PageViewRestrictionForm(BaseViewRestrictionForm):
class Meta:
model = PageViewRestriction
fields = ('restriction_type', 'password', 'groups')
class WagtailAdminPageForm(WagtailAdminModelForm):
comment_notifications = forms.BooleanField(widget=forms.CheckboxInput(), required=False)
# Could be set to False by a subclass constructed by TabbedInterface
show_comments_toggle = True
class Meta:
# (dealing with Treebeard's tree-related fields that really should have
# been editable=False)
exclude = ['content_type', 'path', 'depth', 'numchild']
def __init__(self, data=None, files=None, parent_page=None, subscription=None, *args, **kwargs):
self.subscription = subscription
initial = kwargs.pop('initial', {})
if self.subscription:
initial['comment_notifications'] = subscription.comment_notifications
super().__init__(data, files, *args, initial=initial, **kwargs)
self.parent_page = parent_page
if not self.show_comments_toggle:
del self.fields['comment_notifications']
def save(self, commit=True):
# Save comment notifications updates to PageSubscription
if self.show_comments_toggle and self.subscription:
self.subscription.comment_notifications = self.cleaned_data['comment_notifications']
if commit:
self.subscription.save()
return super().save(commit=commit)
def is_valid(self):
comments = self.formsets.get('comments')
# Remove the comments formset if the management form is invalid
if comments and not comments.management_form.is_valid():
del self.formsets['comments']
return super().is_valid()
def clean(self):
cleaned_data = super().clean()
if 'slug' in self.cleaned_data:
if not Page._slug_is_available(
cleaned_data['slug'], self.parent_page, self.instance
):
self.add_error('slug', forms.ValidationError(_("This slug is already in use")))
# Check scheduled publishing fields
go_live_at = cleaned_data.get('go_live_at')
expire_at = cleaned_data.get('expire_at')
# Go live must be before expire
if go_live_at and expire_at:
if go_live_at > expire_at:
msg = _('Go live date/time must be before expiry date/time')
self.add_error('go_live_at', forms.ValidationError(msg))
self.add_error('expire_at', forms.ValidationError(msg))
# Expire at must be in the future
if expire_at and expire_at < timezone.now():
self.add_error('expire_at', forms.ValidationError(_('Expiry date/time must be in the future')))
# Don't allow an existing first_published_at to be unset by clearing the field
if 'first_published_at' in cleaned_data and not cleaned_data['first_published_at']:
del cleaned_data['first_published_at']
return cleaned_data
|
{
"content_hash": "328c0420520a9ea5fa14c0fc0490f2fb",
"timestamp": "",
"source": "github",
"line_count": 173,
"max_line_length": 156,
"avg_line_length": 45.51445086705202,
"alnum_prop": 0.6176022352044704,
"repo_name": "mixxorz/wagtail",
"id": "1a6b83f38f369d4da40bedc1d537b9a3648cb426",
"size": "7874",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "wagtail/admin/forms/pages.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "3390"
},
{
"name": "Dockerfile",
"bytes": "2041"
},
{
"name": "HTML",
"bytes": "560244"
},
{
"name": "JavaScript",
"bytes": "508189"
},
{
"name": "Makefile",
"bytes": "1014"
},
{
"name": "Python",
"bytes": "5487927"
},
{
"name": "SCSS",
"bytes": "246385"
},
{
"name": "Shell",
"bytes": "6688"
},
{
"name": "TypeScript",
"bytes": "237634"
}
],
"symlink_target": ""
}
|
def permutations(string):
permutations = []
return perm_helper(string, permutations, 0)
def perm_helper(string, perm, index):
if len(string) == index:
return []
else:
current_perm = perm_helper(string, perm, index + 1)
print index
return generate_perm(string[index], current_perm)
def generate_perm(char, permutations):
new_perm = []
print char
if len(permutations) == 0:
return [char]
else:
for perm in permutations:
for i in range(len(perm)):
new = perm[:i] + char + perm[i:]
new_perm.append(new)
new_perm.append(perm+char)
return new_perm
print len(permutations('abcd'))
'''
1. Incorrect handling of base case - ended up using one char twice
'''
|
{
"content_hash": "626ba9ed84024d227c6ed2d059f0b521",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 66,
"avg_line_length": 26.133333333333333,
"alnum_prop": 0.5994897959183674,
"repo_name": "howardwkim/ctci",
"id": "216cb370e039135b6d376fd41b7edb83997577a0",
"size": "785",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Chapter8_Recursion_Dynamic_Programming/p7_permutations_without_dups.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Makefile",
"bytes": "63"
},
{
"name": "Python",
"bytes": "42659"
}
],
"symlink_target": ""
}
|
"""`Trainable` interface."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
class Trainable(object):
"""Interface for objects that are trainable by, e.g., `Experiment`.
"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def fit(self, x=None, y=None, input_fn=None, steps=None, batch_size=None,
monitors=None, max_steps=None):
"""Trains a model given training data `x` predictions and `y` labels.
Args:
x: Matrix of shape [n_samples, n_features...]. Can be iterator that
returns arrays of features. The training input samples for fitting the
model. If set, `input_fn` must be `None`.
y: Vector or matrix [n_samples] or [n_samples, n_outputs]. Can be
iterator that returns array of labels. The training label values
(class labels in classification, real numbers in regression). If set,
`input_fn` must be `None`. Note: For classification, label values must
be integers representing the class index (i.e. values from 0 to
n_classes-1).
input_fn: Input function returning a tuple of:
features - `Tensor` or dictionary of string feature name to `Tensor`.
labels - `Tensor` or dictionary of `Tensor` with labels.
If input_fn is set, `x`, `y`, and `batch_size` must be `None`.
steps: Number of steps for which to train model. If `None`, train forever.
'steps' works incrementally. If you call two times fit(steps=10) then
training occurs in total 20 steps. If you don't want to have incremental
behaviour please set `max_steps` instead. If set, `max_steps` must be
`None`.
batch_size: minibatch size to use on the input, defaults to first
dimension of `x`. Must be `None` if `input_fn` is provided.
monitors: List of `BaseMonitor` subclass instances. Used for callbacks
inside the training loop.
max_steps: Number of total steps for which to train model. If `None`,
train forever. If set, `steps` must be `None`.
Two calls to `fit(steps=100)` means 200 training
iterations. On the other hand, two calls to `fit(max_steps=100)` means
that the second call will not do any iteration since first call did
all 100 steps.
Returns:
`self`, for chaining.
"""
raise NotImplementedError
|
{
"content_hash": "c78b4f0ad446fc528757429c6e7ab88c",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 80,
"avg_line_length": 44.7962962962963,
"alnum_prop": 0.6614303431169904,
"repo_name": "martinwicke/tensorflow",
"id": "095d22f41f7445f94d31e7fcbb0e196f14f86ab6",
"size": "3109",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow/contrib/learn/python/learn/trainable.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "6641"
},
{
"name": "C",
"bytes": "95531"
},
{
"name": "C++",
"bytes": "14099336"
},
{
"name": "CMake",
"bytes": "108489"
},
{
"name": "CSS",
"bytes": "774"
},
{
"name": "Go",
"bytes": "91226"
},
{
"name": "HTML",
"bytes": "533841"
},
{
"name": "Java",
"bytes": "57002"
},
{
"name": "JavaScript",
"bytes": "13406"
},
{
"name": "Jupyter Notebook",
"bytes": "1833475"
},
{
"name": "Makefile",
"bytes": "23478"
},
{
"name": "Objective-C",
"bytes": "7056"
},
{
"name": "Objective-C++",
"bytes": "64592"
},
{
"name": "Protocol Buffer",
"bytes": "147571"
},
{
"name": "Python",
"bytes": "13775129"
},
{
"name": "Shell",
"bytes": "283430"
},
{
"name": "TypeScript",
"bytes": "750416"
}
],
"symlink_target": ""
}
|
import sys
import os
import shutil
import getopt
from pyarm import fig
from pyarm import clock as clock_mod
## Lionel's old format
#COMMAND_SLICE = slice(8, 14)
#ANGLES_SLICE = slice(2, 4)
#VELOCITIES_SLICE = slice(0, 2)
# Lionel's new format
COMMAND_SLICE = slice(18, 24)
ANGLES_SLICE = slice(10, 12)
VELOCITIES_SLICE = slice(8, 10)
TARGETS_ANGLES_SLICE = slice(2, 4)
def usage():
"""Print help message"""
print '''Usage : ./pyarm -d DELTA_TIME [-m MUSCLE] [-a ARM] [-A AGENT] [-g GUI]
[-D GUI_DELTA_TIME] [-s] [-l] FILENAME
Replay a simulation from FILENAME (experimental).
-m, --muscle
the muscle model to use (kambara, mitrovic, li or none)
-a, --arm
the arm model to use (kambara, mitrovic, li or sagittal)
-g, --gui
the graphical user interface to use (tk, gtk, cairo)
-d, --deltatime
timestep value in second (should be near to 0.005 seconds)
-D, --guideltatime
set the interval between two display in milliseconds (default = 0.04)
-s, --screencast
make a screencast
-h, --help
display this help and exit
'''
def main():
"""The main function.
The purpose of this function is to get the list of modules to load and
launch the simulator."""
# Parse options ###########################################################
muscle = 'none'
arm = 'li'
gui = 'tk'
delta_time = None
gui_delta_time = 0.04
screencast = False
unbounded = False
log_file = None
try:
opts, args = getopt.getopt(sys.argv[1:],
'm:a:g:d:D:sh',
["muscle=", "arm=", "gui=", "deltatime=",
"guideltatime=", "screencast", "help"])
except getopt.GetoptError, err:
# will print something like "option -x not recognized"
print str(err)
usage()
sys.exit(1)
for o, a in opts:
if o in ("-h", "--help"):
usage()
sys.exit(0)
elif o in ("-m", "--muscle"):
muscle = a
elif o in ("-a", "--arm"):
arm = a
elif o in ("-g", "--gui"):
gui = a
elif o in ("-d", "--deltatime"):
delta_time = float(a)
elif o in ("-D", "--guideltatime"):
gui_delta_time = float(a)
elif o in ("-s", "--screencast"):
screencast = True
else:
assert False, "unhandled option"
if muscle not in ('none', 'kambara', 'mitrovic', 'li') \
or arm not in ('kambara', 'mitrovic', 'li', 'sagittal') \
or gui not in ('tk', 'gtk', 'cairo'):
usage()
sys.exit(2)
try:
log_file = args[0]
except IndexError: # TODO
usage()
exit(3)
# Init ####################################################################
# Erase the screencast directory
if screencast:
shutil.rmtree('screencast', True)
os.mkdir('screencast')
# Muscle module
if muscle == 'none':
from pyarm.model.muscle import fake_muscle_model as muscle_module
elif muscle == 'kambara':
from pyarm.model.muscle import kambara_muscle_model as muscle_module
elif muscle == 'mitrovic':
from pyarm.model.muscle import mitrovic_muscle_model as muscle_module
elif muscle == 'li':
from pyarm.model.muscle import weiwei_muscle_model as muscle_module
else:
usage()
sys.exit(2)
# Arm module
if arm == 'kambara':
from pyarm.model.arm import kambara_arm_model as arm_module
elif arm == 'mitrovic':
from pyarm.model.arm import mitrovic_arm_model as arm_module
elif arm == 'li':
from pyarm.model.arm import weiwei_arm_model as arm_module
elif arm == 'sagittal':
from pyarm.model.arm import sagittal_arm_model as arm_module
else:
usage()
sys.exit(2)
# GUI module
if gui == 'tk':
from pyarm.gui import tkinter_gui as gui_mod
elif gui == 'gtk':
from pyarm.gui import gtk_gui as gui_mod
elif gui == 'cairo':
raise NotImplementedError()
else:
usage()
sys.exit(2)
# Init instances
arm = arm_module.ArmModel(unbounded)
muscle = muscle_module.MuscleModel()
clock = None
if delta_time is None:
print "error : -d option isn't set"
sys.exit(1)
else:
clock = clock_mod.SimulationtimeClock(delta_time)
gui = gui_mod.GUI(muscle, arm, clock, screencast)
# Miscellaneous initialization
fig.CLOCK = clock
former_gui_time = 0
gui.shoulder_point = [70, 70]
gui.scale = 1200. # px/m (pixels per meter)
# The mainloop ############################################################
fd = file(log_file, 'rU')
line = fd.readline()
while gui.running and line != '': # TODO
if not line.lstrip().startswith('#'):
datas = [float(num) for num in line.split()]
# Update clock
clock.update()
# Get input signals
commands = datas[COMMAND_SLICE]
# Update angles (physics)
arm.angles = datas[ANGLES_SLICE]
arm.velocities = datas[VELOCITIES_SLICE]
torque = [0, 0]
acceleration = [0, 0]
# Update target
gui.target_angle = datas[TARGETS_ANGLES_SLICE]
# Update GUI
current_time = clock.time
if current_time - former_gui_time >= gui_delta_time:
gui.update(commands, torque, acceleration)
former_gui_time = current_time
line = fd.readline()
fd.close()
# Quit ####################################################################
if screencast:
print "Making screencast..."
cmd = "ffmpeg2theora -v 9 -f image2 %(path)s/%%05d.%(format)s -o %(path)s/screencast.ogv" % {'path': gui.screencast_path, 'format': gui.screenshot_format}
print cmd
os.system(cmd)
if __name__ == '__main__':
main()
|
{
"content_hash": "a45031a1422350d228d30becb6d632be",
"timestamp": "",
"source": "github",
"line_count": 216,
"max_line_length": 162,
"avg_line_length": 28.14814814814815,
"alnum_prop": 0.5342105263157895,
"repo_name": "jeremiedecock/pyarm",
"id": "b3857c7abee48b5c696642a208715e7e022bd9c6",
"size": "6188",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tool-replay.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "125519"
},
{
"name": "Shell",
"bytes": "1449"
}
],
"symlink_target": ""
}
|
import climate
import numpy as np
import os
import scipy.io.wavfile
import segmentaxis
logging = climate.get_logger('extract-windows')
# relatively prime window sizes, primes < 2**k (window size in ms):
# [3 7 13 31 61] 127 (7.94) 251 (15.68) 509 (31.81) 1021 (63.81)
@climate.annotate(
width=('generate windows of N samples', 'option', None, int),
overlap=('overlap windows by R fraction of width', 'option', None, float),
samplerate=('die if any audio files are not N fps', 'option', None, int),
root='save outputs to this directory',
audio='extract windows from these wav files',
)
def main(width, overlap, samplerate, root, *audio):
if not samplerate:
samplerate = 16000
if not width:
width = 512
if not overlap:
overlap = 0.75
env = np.hanning(width)[None, :].astype('f')
for f in audio:
rate, samples = scipy.io.wavfile.read(f)
logging.info('%s: %d samples at %.1fkHz',
os.path.basename(f), len(samples), rate / 1000)
assert rate == samplerate
assert len(samples.shape) == 1
samples -= samples.mean()
X = segmentaxis.segment_axis(samples, width, int(width * overlap)) * env
s = os.path.join(root, os.path.basename(f).replace('.wav', '-wave.npy'))
logging.info('saving %s: %s', s, X.shape)
np.save(s, X.astype('f'))
if __name__ == '__main__':
climate.call(main)
|
{
"content_hash": "9d4f20ab804ebafe34a78dcc6b03a09b",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 80,
"avg_line_length": 34.23809523809524,
"alnum_prop": 0.6161335187760779,
"repo_name": "lmjohns3/speech-experiment",
"id": "9ecce2748a66034ee5d9d675be43c4d114e328d9",
"size": "1461",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "audio/extract-windows.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "42063"
},
{
"name": "Shell",
"bytes": "6933"
}
],
"symlink_target": ""
}
|
"""Tests for the PAM config checks."""
from absl import app
from grr_response_core.lib.parsers import linux_pam_parser
from grr_response_server.check_lib import checks_test_lib
from grr.test_lib import test_lib
class PamConfigTests(checks_test_lib.HostCheckTest):
@classmethod
def setUpClass(cls):
super(PamConfigTests, cls).setUpClass()
cls.LoadCheck("pam.yaml")
cls.parser = linux_pam_parser.PAMParser()
def testPamSshAccess(self):
"""Test we handle when PAM ssh service doesn't require an account."""
good1_contents = "account required pam_access.so\n"
good2_contents = "account required /lib/security/pam_access.so foo\n"
good3_contents = "account required /lib64/security/pam_access.so f b\n"
bad_contents = "account required test.so\n"
pam_good1 = {"/etc/pam.d/ssh": good1_contents}
pam_good2 = {"/etc/pam.d/ssh": good2_contents}
pam_good3 = {"/etc/pam.d/ssh": good3_contents}
pam_bad = {"/etc/pam.d/ssh": bad_contents}
sym = "Missing attribute: PAM ssh service must require an account."
found = ["Expected state was not found"]
results = self.RunChecks(
self.GenFileData("PamConfig", pam_bad, parser=self.parser))
self.assertCheckDetectedAnom("PAM-SSH-PAMACCESS", results, sym, found)
# Now the successful cases.
results = self.RunChecks(
self.GenFileData("PamConfig", pam_good1, parser=self.parser))
self.assertCheckUndetected("PAM-SSH-PAMACCESS", results)
results = self.RunChecks(
self.GenFileData("PamConfig", pam_good2, parser=self.parser))
self.assertCheckUndetected("PAM-SSH-PAMACCESS", results)
results = self.RunChecks(
self.GenFileData("PamConfig", pam_good3, parser=self.parser))
self.assertCheckUndetected("PAM-SSH-PAMACCESS", results)
def testPamSshUnconditionalPermit(self):
"""Test we find when PAM ssh service allows an unconditional auth permit."""
good_contents = "auth done pam_deny.so\n"
bad_contents = "auth done pam_permit.so\n"
pam_good = {"/etc/pam.d/ssh": good_contents}
pam_bad = {"/etc/pam.d/ssh": bad_contents}
# Check the detection case.
sym = "Found: PAM ssh service has unconditional authentication."
found = ["In service 'ssh': auth done pam_permit.so"]
results = self.RunChecks(
self.GenFileData("PamConfig", pam_bad, parser=self.parser))
self.assertCheckDetectedAnom("PAM-SSH-UNCONDITIONAL-PERMIT", results, sym,
found)
# Now the pass case.
results = self.RunChecks(
self.GenFileData("PamConfig", pam_good, parser=self.parser))
self.assertCheckUndetected("PAM-SSH-UNCONDITIONAL-PERMIT", results)
def testPamSshDefaultDeniesAuth(self):
"""Test we detect when PAM ssh service doesn't deny auth by default."""
good1_contents = "auth required pam_deny.so\n"
good2_contents = ("auth [success=ok new_authtok_reqd=ok default=die] "
"pam_unix.so try_first_pass\n")
bad_contents = "auth required pam_foobar.so\n"
pam_good1 = {"/etc/pam.d/ssh": good1_contents}
pam_good2 = {"/etc/pam.d/ssh": good2_contents}
pam_bad = {"/etc/pam.d/ssh": bad_contents}
# Check the detection case.
sym = "Missing attribute: PAM ssh service must default to denying auth."
found = ["Expected state was not found"]
results = self.RunChecks(
self.GenFileData("PamConfig", pam_bad, parser=self.parser))
self.assertCheckDetectedAnom("PAM-SSH-DEFAULT-DENIES-AUTH", results, sym,
found)
# Now the pass cases.
results = self.RunChecks(
self.GenFileData("PamConfig", pam_good1, parser=self.parser))
self.assertCheckUndetected("PAM-SSH-DEFAULT-DENIES-AUTH", results)
results = self.RunChecks(
self.GenFileData("PamConfig", pam_good2, parser=self.parser))
self.assertCheckUndetected("PAM-SSH-DEFAULT-DENIES-AUTH", results)
def testPamSshNoNullPasswords(self):
"""Test we find when PAM ssh service allows an unconditional auth permit."""
good_contents = "password sufficient pam_unix.so\n"
bad_contents = "password requisite /lib/security/pam_unix.so nullok\n"
pam_good = {"/etc/pam.d/ssh": good_contents}
pam_bad = {"/etc/pam.d/ssh": bad_contents}
# Check the detection case.
sym = "Found: PAM ssh service allows unix null password accounts to login."
found = [
"In service 'ssh': password requisite /lib/security/pam_unix.so "
"nullok"
]
results = self.RunChecks(
self.GenFileData("PamConfig", pam_bad, parser=self.parser))
self.assertCheckDetectedAnom("PAM-SSH-NO-NULL-PASSWORDS", results, sym,
found)
# Now the pass case.
results = self.RunChecks(
self.GenFileData("PamConfig", pam_good, parser=self.parser))
self.assertCheckUndetected("PAM-SSH-NO-NULL-PASSWORDS", results)
def testPamSecureDefaults(self):
"""Test we detect when PAM ssh service doesn't deny auth by default."""
good_contents = """
auth required pam_deny.so foobar
password sufficient /lib64/security/pam_warn.so
session requisite /lib/security/pam_deny.so
"""
bad_contents = good_contents + """
auth required pam_permit.so
password done pam_foobar.so test args
"""
pam_good = {"/etc/pam.d/other": good_contents}
pam_bad = {"/etc/pam.d/other": bad_contents}
# Check the detection case.
sym = ("Found: PAM 'other'(the default) config should only be "
"used for denying/logging access.")
found = [
"In service 'other': auth required pam_permit.so",
"In service 'other': password done pam_foobar.so test args"
]
results = self.RunChecks(
self.GenFileData("PamConfig", pam_bad, parser=self.parser))
self.assertCheckDetectedAnom("PAM-SECURE-DEFAULTS", results, sym, found)
# Now the pass cases.
results = self.RunChecks(
self.GenFileData("PamConfig", pam_good, parser=self.parser))
self.assertCheckUndetected("PAM-SECURE-DEFAULTS", results)
def testPamExternalConfigs(self):
"""Test we detect when PAM ssh service doesn't deny auth by default."""
pam_good = {"/etc/pam.d/ssh": "auth include other", "/etc/pam.d/other": ""}
pam_bad = {
"/etc/pam.d/ssh": "auth include non-existant",
"/etc/pam.d/other": "password include /tmp/non-existant"
}
# Check the detection case.
sym = "Found: PAM configuration refers to files outside of /etc/pam.d."
found = [
"/etc/pam.d/ssh -> /etc/pam.d/non-existant",
"/etc/pam.d/other -> /tmp/non-existant"
]
results = self.RunChecks(
self.GenFileData("PamConfig", pam_bad, parser=self.parser))
self.assertCheckDetectedAnom("PAM-EXTERNAL-CONFIG", results, sym, found)
# Now the pass cases.
results = self.RunChecks(
self.GenFileData("PamConfig", pam_good, parser=self.parser))
self.assertCheckUndetected("PAM-SECURE-DEFAULTS", results)
def testPamConfigPermissions(self):
"""Ensure check detects Pam config files that non-root users can edit."""
data = [
self.CreateStat("/etc/pam.d/hit-123", 50, 0, 0o0100640),
self.CreateStat("/etc/pam.d/hit-234", 0, 60, 0o0040777),
self.CreateStat("/etc/pam.d/no-hit-123", 0, 6000, 0o0100440),
self.CreateStat("/etc/pam.d/no-hit-234", 0, 0, 0o0100640),
self.CreateStat("/etc/pam.d/hit-345", 70, 0, 0o0100660)
]
results = self.GenResults(["LinuxPamConfigs"], [data])
check_id = "PAM-CONFIG-FILES-WRITABLE-BY-NON-ROOT-USER"
sym = ("Found: Files or folders in Pam configuration can be modified by "
"non-privileged users.")
found = [
"/etc/pam.d/hit-123 user: 50, group: 0, mode: -rw-r-----",
"/etc/pam.d/hit-234 user: 0, group: 60, mode: drwxrwxrwx",
"/etc/pam.d/hit-345 user: 70, group: 0, mode: -rw-rw----",
]
self.assertCheckDetectedAnom(check_id, results, sym, found)
def main(argv):
test_lib.main(argv)
if __name__ == "__main__":
app.run(main)
|
{
"content_hash": "82d81a058cc631c66ed6679165e07cef",
"timestamp": "",
"source": "github",
"line_count": 188,
"max_line_length": 80,
"avg_line_length": 42.984042553191486,
"alnum_prop": 0.6593243410469002,
"repo_name": "google/grr",
"id": "bdc357d23d7e159107e3febf5c89951b7138ce08",
"size": "8129",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "grr/server/grr_response_server/checks/pam_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "12697"
},
{
"name": "C++",
"bytes": "54814"
},
{
"name": "Dockerfile",
"bytes": "1822"
},
{
"name": "HCL",
"bytes": "8451"
},
{
"name": "HTML",
"bytes": "366783"
},
{
"name": "JavaScript",
"bytes": "13088"
},
{
"name": "Jupyter Notebook",
"bytes": "199216"
},
{
"name": "Makefile",
"bytes": "3244"
},
{
"name": "PowerShell",
"bytes": "531"
},
{
"name": "Python",
"bytes": "8844725"
},
{
"name": "Roff",
"bytes": "444"
},
{
"name": "SCSS",
"bytes": "105120"
},
{
"name": "Shell",
"bytes": "48663"
},
{
"name": "Standard ML",
"bytes": "8172"
},
{
"name": "TypeScript",
"bytes": "2139377"
}
],
"symlink_target": ""
}
|
from Tkinter import *
import random, sys, time
root = Tk()
root.wm_title("Flappy Bird")
width = 500
height = 500
canvas = Canvas(root, width = width, height = height, bg = "light blue")
canvas.pack()
pipe_speed = 5.0
player_jump = False
score = 0
player_pos = [20, height / 2, 40, height / 2 + 20]
canvas.create_rectangle(player_pos, fill = "blue", tag = "player")
score_id = canvas.create_text(40, 20)
jump_count = 0
player_speed = 3
canvas.insert(score_id, 0, "Score: " + str(score))
def jump_animate():
global player_pos, player_jump, jump_count, player_speed
canvas.move("player", 0, -player_speed)
player_pos[1] -= player_speed
player_pos[3] -= player_speed
player_speed -= 0.09
jump_count += 3
if jump_count >= 100:
player_jump = False
if player_jump:
canvas.after(10, jump_animate)
def game_over():
global pipe_speed, pipe_bottom, pipe_pos
canvas.delete("rect")
canvas.delete("player")
game_overid = canvas.create_text(width / 2, height / 2)
canvas.insert(game_overid, 0, "Game Over! Your score was " + str(score) + " pipes!")
pipe_speed = 0
pipe_pos = [width + 50, 0, width + 100, height]
pipe_bottom = pipe_pos
def draw_pipe():
global score
canvas.create_rectangle(list(pipe_pos), fill = "green", tag = "rect", outline = "green")
canvas.create_rectangle(list(pipe_bottom), fill = "green", tag = "rect", outline = "green")
if pipe_pos[2] <= -5:
score += 1
canvas.itemconfig(score_id, text = "Score: " + str(score))
generate_pipe()
canvas.after(1000, draw_pipe)
def generate_pipe():
global pipe_pos, pipe_bottom
pipe_hole = random.randrange(150, height - 150)
pipe_pos = [width - 50, 0, width, pipe_hole]
pipe_bottom = [width - 50, pipe_pos[3] + 135, width, height]
draw_pipe()
def check_hit():
global coun
if player_pos[0] <= pipe_pos[2] and player_pos[2] >= pipe_pos[0] and player_pos[1] <= pipe_pos[3] and player_pos[3] >= pipe_pos[1]:
game_over()
if player_pos[0] <= pipe_bottom[2] and player_pos[2] >= pipe_bottom[0] and player_pos[3] <= pipe_bottom[3] and player_pos[3] >= pipe_bottom[1]:
game_over()
if player_pos[3] >= height + 5:
game_over()
canvas.after(10, check_hit)
def move_items():
global player_pos, player_speed
canvas.move("rect", -pipe_speed, 0)
pipe_bottom[0] -= pipe_speed
pipe_bottom[2] -= pipe_speed
pipe_pos[0] -= pipe_speed
pipe_pos[2] -= pipe_speed
if player_jump == False:
canvas.move("player", 0, player_speed)
player_pos[1] += player_speed
player_pos[3] += player_speed
player_speed += 0.15
canvas.after(10, move_items)
def jump(press):
global player_jump, jump_count, player_speed
player_jump = True
player_speed = 3
jump_count = 0
jump_animate()
canvas.bind("<Button-1>", jump)
generate_pipe()
move_items()
check_hit()
root.mainloop()
|
{
"content_hash": "d92f8fc64ca9d96fff39ad437015d5e2",
"timestamp": "",
"source": "github",
"line_count": 94,
"max_line_length": 147,
"avg_line_length": 31.638297872340427,
"alnum_prop": 0.6183591123066577,
"repo_name": "ananyacleetus/15-112",
"id": "23aaeb7b328c2f0dc457d8f662e8cb09de665bb8",
"size": "2974",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "flappy_bird.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "27600"
}
],
"symlink_target": ""
}
|
'''
Copyright 2016 University of Auckland
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
class NeonError(Exception):
def __init__(self, message):
self.message = message
def __str__(self):
return repr(self.message)
def getMessage(self):
return self.message
|
{
"content_hash": "1d1e28bf6e8093a3a7e10a9af730feb8",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 75,
"avg_line_length": 31.076923076923077,
"alnum_prop": 0.7091584158415841,
"repo_name": "alan-wu/neon",
"id": "dd9d7f832061b95c30312421c0b2097bdeadc82a",
"size": "808",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "src/opencmiss/neon/core/misc/neonerror.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "NSIS",
"bytes": "29534"
},
{
"name": "Python",
"bytes": "1449198"
}
],
"symlink_target": ""
}
|
"""CRUD = Create Read Update Delete"""
import json
import pytest
import responses
from globus_sdk._testing import load_response_set, register_response_set
@pytest.fixture(autouse=True, scope="session")
def _register_group_responses():
group_id = "efdab3ca-cff1-11e4-9b86-123139260d4e"
register_response_set(
"get_group?include=memberships",
dict(
default=dict(
service="groups",
path=f"/groups/{group_id}",
json={
"description": "Ipso facto",
"enforce_session": False,
"group_type": "regular",
"id": group_id,
"memberships": [
{
"group_id": group_id,
"identity_id": "ae332d86-d274-11e5-b885-b31714a110e9",
"membership_fields": {
"department": "Globus Testing",
"email": "sirosen@globus.org",
"field_of_science": "CS",
"institution": "Computation Institute",
"phone": "867-5309",
},
"role": "admin",
"status": "active",
"username": "sirosen@globusid.org",
},
{
"group_id": group_id,
"identity_id": "508e5ef6-cb9b-11e5-abe1-431ce3f42be1",
"membership_fields": {},
"role": "member",
"status": "invited",
"username": "sirosen@xsede.org",
},
{
"group_id": group_id,
"identity_id": "ae2f7f60-d274-11e5-b879-afc598dd59d4",
"membership_fields": {
"institution": "University of Chicago",
"name": "Bryce Allen",
"department": "Globus",
},
"role": "member",
"status": "active",
"username": "ballen@globusid.org",
},
{
"group_id": group_id,
"identity_id": "b0e8f24a-d274-11e5-8c98-8fd1e61c0a76",
"membership_fields": {
"current_project_name": "Petrel support",
"department": "UChicago",
},
"role": "member",
"status": "rejected",
"username": "smartin@globusid.org",
},
{
"group_id": group_id,
"identity_id": "6b487878-d2a1-11e5-b689-a7dd99513a65",
"membership_fields": {
"department": (
"Columbia University department "
"of Witchcraft and History"
),
},
"role": "member",
"status": "active",
"username": "jss2253@columbia.edu",
},
{
"group_id": group_id,
"identity_id": "ae2a1750-d274-11e5-b867-e74762c29f57",
"membership_fields": {},
"role": "member",
"status": "invited",
"username": "bjmc@globusid.org",
},
],
"name": "Claptrap Presents Claptrap's Rough Riders",
"parent_id": None,
"policies": {
"authentication_assurance_timeout": 28800,
"group_members_visibility": "managers",
"group_visibility": "private",
"is_high_assurance": False,
"join_requests": False,
"signup_fields": [],
},
"session_limit": 28800,
"session_timeouts": {
"ae341a98-d274-11e5-b888-dbae3a8ba545": {
"expire_time": "2022-02-08T06:05:54+00:00",
"expires_in": 0,
}
},
},
metadata={
"group_id": group_id,
"known_members": [
{
"role": "admin",
"status": "active",
"username": "sirosen@globusid.org",
},
{
"role": "member",
"status": "invited",
"username": "bjmc@globusid.org",
},
{
"role": "member",
"status": "rejected",
"username": "smartin@globusid.org",
},
],
},
)
),
)
def test_group_list(run_line):
"""
Runs globus group list and validates results
"""
meta = load_response_set("cli.groups").metadata
group1_id = meta["group1_id"]
group2_id = meta["group2_id"]
group1_name = meta["group1_name"]
group2_name = meta["group2_name"]
result = run_line("globus group list")
assert group1_id in result.output
assert group2_id in result.output
assert group1_name in result.output
assert group2_name in result.output
def test_group_show(run_line):
"""
Basic success test for globus group show
"""
meta = load_response_set("cli.groups").metadata
group1_id = meta["group1_id"]
group1_name = meta["group1_name"]
group1_description = meta["group1_description"]
result = run_line(f"globus group show {group1_id}")
assert group1_name in result.output
assert group1_description in result.output
def test_group_create(run_line):
"""
Basic success test for globus group create
"""
meta = load_response_set("cli.groups").metadata
group1_id = meta["group1_id"]
group1_name = meta["group1_name"]
group1_description = meta["group1_description"]
result = run_line(
f"globus group create '{group1_name}' --description '{group1_description}'"
)
assert f"Group {group1_id} created successfully" in result.output
def test_group_update(run_line):
"""
Basic success test for globus group update
Confirms existing values are included in the put document when
not specified by options
"""
meta = load_response_set("cli.groups").metadata
group1_id = meta["group1_id"]
group1_name = meta["group1_name"]
group1_description = meta["group1_description"]
new_name = "New Name"
new_description = "New Description"
# update name
result = run_line(f"globus group update {group1_id} --name '{new_name}'")
assert "Group updated successfully" in result.output
# confirm description is in the put document with the pre-existing value
last_req = responses.calls[-1].request
sent = json.loads(last_req.body)
assert sent["name"] == new_name
assert sent["description"] == group1_description
# update description
result = run_line(
f"globus group update {group1_id} --description '{new_description}'"
)
assert "Group updated successfully" in result.output
# confirm name is in the put document with the pre-existing value
last_req = responses.calls[-1].request
sent = json.loads(last_req.body)
assert sent["name"] == group1_name
assert sent["description"] == new_description
# update both name and description
result = run_line(
f"globus group update {group1_id} "
f"--name '{new_name}' --description '{new_description}'"
)
assert "Group updated successfully" in result.output
# confirm both fields use new value
last_req = responses.calls[-1].request
sent = json.loads(last_req.body)
assert sent["name"] == new_name
assert sent["description"] == new_description
def test_group_delete(run_line):
"""
Basic success test for globus group delete
"""
meta = load_response_set("cli.groups").metadata
group1_id = meta["group1_id"]
result = run_line(f"globus group delete {group1_id}")
assert "Group deleted successfully" in result.output
|
{
"content_hash": "384b064bebd7b8915fd5c2cc2ad263d2",
"timestamp": "",
"source": "github",
"line_count": 247,
"max_line_length": 83,
"avg_line_length": 36.87044534412956,
"alnum_prop": 0.4513011968815197,
"repo_name": "globus/globus-cli",
"id": "b012a0a4c1ef4e8591661b52f02392801d335658",
"size": "9107",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tests/functional/groups/test_group_crud.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jinja",
"bytes": "425"
},
{
"name": "Makefile",
"bytes": "764"
},
{
"name": "Python",
"bytes": "746729"
},
{
"name": "Shell",
"bytes": "776"
}
],
"symlink_target": ""
}
|
import cv2
import cv2.cv as cv
import numpy as np
import signal, os, subprocess, sys
import time
import threading
import requests
import io
from picamera.array import PiRGBArray
from picamera import PiCamera
import RPi.GPIO as GPIO
GPIO.setmode(GPIO.BCM)
from fractions import Fraction
#
GPIO.setup(18, GPIO.OUT)
camera = PiCamera()
camera.framerate = 32
#camera.framerate = Fraction(1,6)
raw_capture = PiRGBArray(camera)
output = PiRGBArray(camera)
time.sleep(0.1)
"""
#g = camera.awb_gains
g = (Fraction(1, 1), Fraction(1,1))
print g
camera.exposure_mode = 'off'
camera.shutter_speed = 500000
camera.awb_mode = 'off'
camera.awb_gains = g
camera.capture(output, format="bgr")
img = output.array
b,g,r = cv2.split(img)
cv2.imshow('frame',g)
key = cv2.waitKey(0) & 0xFF
"""
camera.awb_mode = 'off'
camera.awb_gains = (Fraction(1,3), Fraction(1,3))
camera.shutter_speed = 32000
pwm = GPIO.PWM(18, 100)
pwm.start(1)
redLower = np.array((0, 127, 58))
redUpper = np.array((330, 255,255))
#camera.awb_gains = (Fraction(2), Fraction(2))
try:
for video_frame in camera.capture_continuous(raw_capture, format="bgr", use_video_port=True):
frame = video_frame.array
#Avisha: ball tracking
cv2.imshow('asdf', frame)
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
#construct mask, dilations and erosions to remove noise
mask = cv2.inRange(hsv, redLower, redUpper)
mask = cv2.erode(mask, None, iterations=2)
mask = cv2.dilate(mask, None, iterations=2)
#find contours in the mask, initialize current center (x,y)
cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)[-2]
center = None
#only proceed if at least one contour was found
if len (cnts) > 0:
#find largest contour, use it to compute min enclosed cirlce
#and centroid
c = max(cnts, key=cv2.contourArea)
((x, y), radius) = cv2.minEnclosingCircle(c)
M = cv2.moments(c)
center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"]))
#proceed if radius is min size --NEED TO FIGURE OUT
if radius > 1:
#draw the circle and centroid on the frame,
#then update the list of tracked points
cv2.circle(frame, (int(x), int(y)), int(radius),
(0, 255, 255), 2)
cv2.circle(frame, center, 5, (0, 0, 255), -1)
#update the points queue
#loop over the set of tracked points
# show the frame to our screen
#cv2.imshow("Frame", frame)
key = cv2.waitKey(1) & 0xFF
#print camera.awb_gains
print (float(camera.awb_gains[0]), float(camera.awb_gains[1]))
print (camera.exposure_speed)
# gains are about 1/3, 1/3
# Our operations on the frame come here
#gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
#gray = frame
#integral_table = cv2.integral(frame)
image_y = int(frame.shape[0])
image_x = int(frame.shape[1])
#cv2.imshow('temp', frame)
key = cv2.waitKey(30) & 0xFF
if key == ord('q'):
break
time.sleep(0.02)
# clear the stream in preparation for the next frame
raw_capture.truncate(0)
finally:
cv2.destroyAllWindows()
camera.close()
pwm.stop()
GPIO.cleanup()
|
{
"content_hash": "0076188615c656417af8ad169c72c029",
"timestamp": "",
"source": "github",
"line_count": 125,
"max_line_length": 97,
"avg_line_length": 29.392,
"alnum_prop": 0.5713119216113228,
"repo_name": "Cornell-iGEM/iGEM-Detection",
"id": "df9008a1f9ce85430ff822c105d6b8d865384ff5",
"size": "3674",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "calibration_withballdetection.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "50403"
}
],
"symlink_target": ""
}
|
from setuptools import setup, find_packages
setup(
name="vumi",
version="0.5.21",
url='http://github.com/praekelt/vumi',
license='BSD',
description="Super-scalable messaging engine for the delivery of SMS, "
"Star Menu and chat messages to diverse audiences in "
"emerging markets and beyond.",
long_description=open('README.rst', 'r').read(),
author='Praekelt Foundation',
author_email='dev@praekeltfoundation.org',
packages=find_packages() + [
'twisted.plugins',
],
package_data={'twisted.plugins': ['twisted/plugins/*.py']},
include_package_data=True,
scripts=[
'vumi/scripts/vumi_tagpools.py',
'vumi/scripts/vumi_redis_tools.py',
'vumi/scripts/vumi_model_migrator.py',
'vumi/scripts/vumi_count_models.py',
'vumi/scripts/vumi_list_messages.py',
],
install_requires=[
'zope.interface',
'Twisted>=13.1.0',
'txAMQP>=0.6.2',
'PyYAML',
'iso8601',
'pyOpenSSL',
'service_identity',
'txssmi>=0.3.0',
'wokkel',
'redis>=2.10.0',
'txredis',
'python-smpp>=0.1.5',
'pytz',
'riak>=2.1',
'txJSON-RPC==0.3.1',
'txTwitter>=0.1.4a',
'treq',
'confmodel>=0.2.0',
'hyperloglog',
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: POSIX',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: System :: Networking',
],
)
|
{
"content_hash": "989402a85fc77188dd3e2c7590158617",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 75,
"avg_line_length": 30.74576271186441,
"alnum_prop": 0.5529217199558986,
"repo_name": "vishwaprakashmishra/xmatrix",
"id": "8a31a7f9cb698df0074de29f0a9746370deb942b",
"size": "1814",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Erlang",
"bytes": "29735"
},
{
"name": "JavaScript",
"bytes": "5556"
},
{
"name": "Puppet",
"bytes": "2557"
},
{
"name": "Python",
"bytes": "2968329"
},
{
"name": "Shell",
"bytes": "3435"
}
],
"symlink_target": ""
}
|
from __future__ import print_function, division
from sympy.core.add import Add
from sympy.core.compatibility import is_sequence
from sympy.core.containers import Tuple
from sympy.core.expr import Expr
from sympy.core.mul import Mul
from sympy.core.relational import Equality, Relational
from sympy.core.singleton import S
from sympy.core.symbol import Symbol, Dummy
from sympy.core.sympify import sympify
from sympy.functions.elementary.piecewise import (piecewise_fold,
Piecewise)
from sympy.logic.boolalg import BooleanFunction
from sympy.tensor.indexed import Idx
from sympy.sets.sets import Interval
from sympy.sets.fancysets import Range
from sympy.utilities import flatten
from sympy.utilities.iterables import sift
def _common_new(cls, function, *symbols, **assumptions):
"""Return either a special return value or the tuple,
(function, limits, orientation). This code is common to
both ExprWithLimits and AddWithLimits."""
function = sympify(function)
if hasattr(function, 'func') and isinstance(function, Equality):
lhs = function.lhs
rhs = function.rhs
return Equality(cls(lhs, *symbols, **assumptions), \
cls(rhs, *symbols, **assumptions))
if function is S.NaN:
return S.NaN
if symbols:
limits, orientation = _process_limits(*symbols)
for i, li in enumerate(limits):
if len(li) == 4:
function = function.subs(li[0], li[-1])
limits[i] = Tuple(*li[:-1])
else:
# symbol not provided -- we can still try to compute a general form
free = function.free_symbols
if len(free) != 1:
raise ValueError(
"specify dummy variables for %s" % function)
limits, orientation = [Tuple(s) for s in free], 1
# denest any nested calls
while cls == type(function):
limits = list(function.limits) + limits
function = function.function
# Any embedded piecewise functions need to be brought out to the
# top level. We only fold Piecewise that contain the integration
# variable.
reps = {}
symbols_of_integration = set([i[0] for i in limits])
for p in function.atoms(Piecewise):
if not p.has(*symbols_of_integration):
reps[p] = Dummy()
# mask off those that don't
function = function.xreplace(reps)
# do the fold
function = piecewise_fold(function)
# remove the masking
function = function.xreplace({v: k for k, v in reps.items()})
return function, limits, orientation
def _process_limits(*symbols):
"""Process the list of symbols and convert them to canonical limits,
storing them as Tuple(symbol, lower, upper). The orientation of
the function is also returned when the upper limit is missing
so (x, 1, None) becomes (x, None, 1) and the orientation is changed.
"""
limits = []
orientation = 1
for V in symbols:
if isinstance(V, (Relational, BooleanFunction)):
variable = V.atoms(Symbol).pop()
V = (variable, V.as_set())
if isinstance(V, Symbol) or getattr(V, '_diff_wrt', False):
if isinstance(V, Idx):
if V.lower is None or V.upper is None:
limits.append(Tuple(V))
else:
limits.append(Tuple(V, V.lower, V.upper))
else:
limits.append(Tuple(V))
continue
elif is_sequence(V, Tuple):
if len(V) == 2 and isinstance(V[1], Range):
lo = V[1].inf
hi = V[1].sup
dx = abs(V[1].step)
V = [V[0]] + [0, (hi - lo)//dx, dx*V[0] + lo]
V = sympify(flatten(V)) # a list of sympified elements
if isinstance(V[0], (Symbol, Idx)) or getattr(V[0], '_diff_wrt', False):
newsymbol = V[0]
if len(V) == 2 and isinstance(V[1], Interval): # 2 -> 3
# Interval
V[1:] = [V[1].start, V[1].end]
elif len(V) == 3:
# general case
if V[2] is None and not V[1] is None:
orientation *= -1
V = [newsymbol] + [i for i in V[1:] if i is not None]
if not isinstance(newsymbol, Idx) or len(V) == 3:
if len(V) == 4:
limits.append(Tuple(*V))
continue
if len(V) == 3:
if isinstance(newsymbol, Idx):
# Idx represents an integer which may have
# specified values it can take on; if it is
# given such a value, an error is raised here
# if the summation would try to give it a larger
# or smaller value than permitted. None and Symbolic
# values will not raise an error.
lo, hi = newsymbol.lower, newsymbol.upper
try:
if lo is not None and not bool(V[1] >= lo):
raise ValueError("Summation will set Idx value too low.")
except TypeError:
pass
try:
if hi is not None and not bool(V[2] <= hi):
raise ValueError("Summation will set Idx value too high.")
except TypeError:
pass
limits.append(Tuple(*V))
continue
if len(V) == 1 or (len(V) == 2 and V[1] is None):
limits.append(Tuple(newsymbol))
continue
elif len(V) == 2:
limits.append(Tuple(newsymbol, V[1]))
continue
raise ValueError('Invalid limits given: %s' % str(symbols))
return limits, orientation
class ExprWithLimits(Expr):
__slots__ = ['is_commutative']
def __new__(cls, function, *symbols, **assumptions):
pre = _common_new(cls, function, *symbols, **assumptions)
if type(pre) is tuple:
function, limits, _ = pre
else:
return pre
# limits must have upper and lower bounds; the indefinite form
# is not supported. This restriction does not apply to AddWithLimits
if any(len(l) != 3 or None in l for l in limits):
raise ValueError('ExprWithLimits requires values for lower and upper bounds.')
obj = Expr.__new__(cls, **assumptions)
arglist = [function]
arglist.extend(limits)
obj._args = tuple(arglist)
obj.is_commutative = function.is_commutative # limits already checked
return obj
@property
def function(self):
"""Return the function applied across limits.
Examples
========
>>> from sympy import Integral
>>> from sympy.abc import x
>>> Integral(x**2, (x,)).function
x**2
See Also
========
limits, variables, free_symbols
"""
return self._args[0]
@property
def limits(self):
"""Return the limits of expression.
Examples
========
>>> from sympy import Integral
>>> from sympy.abc import x, i
>>> Integral(x**i, (i, 1, 3)).limits
((i, 1, 3),)
See Also
========
function, variables, free_symbols
"""
return self._args[1:]
@property
def variables(self):
"""Return a list of the limit variables.
>>> from sympy import Sum
>>> from sympy.abc import x, i
>>> Sum(x**i, (i, 1, 3)).variables
[i]
See Also
========
function, limits, free_symbols
as_dummy : Rename dummy variables
sympy.integrals.integrals.Integral.transform : Perform mapping on the dummy variable
"""
return [l[0] for l in self.limits]
@property
def bound_symbols(self):
"""Return only variables that are dummy variables.
Examples
========
>>> from sympy import Integral
>>> from sympy.abc import x, i, j, k
>>> Integral(x**i, (i, 1, 3), (j, 2), k).bound_symbols
[i, j]
See Also
========
function, limits, free_symbols
as_dummy : Rename dummy variables
sympy.integrals.integrals.Integral.transform : Perform mapping on the dummy variable
"""
return [l[0] for l in self.limits if len(l) != 1]
@property
def free_symbols(self):
"""
This method returns the symbols in the object, excluding those
that take on a specific value (i.e. the dummy symbols).
Examples
========
>>> from sympy import Sum
>>> from sympy.abc import x, y
>>> Sum(x, (x, y, 1)).free_symbols
{y}
"""
# don't test for any special values -- nominal free symbols
# should be returned, e.g. don't return set() if the
# function is zero -- treat it like an unevaluated expression.
function, limits = self.function, self.limits
isyms = function.free_symbols
for xab in limits:
if len(xab) == 1:
isyms.add(xab[0])
continue
# take out the target symbol
if xab[0] in isyms:
isyms.remove(xab[0])
# add in the new symbols
for i in xab[1:]:
isyms.update(i.free_symbols)
return isyms
@property
def is_number(self):
"""Return True if the Sum has no free symbols, else False."""
return not self.free_symbols
def _eval_interval(self, x, a, b):
limits = [(i if i[0] != x else (x, a, b)) for i in self.limits]
integrand = self.function
return self.func(integrand, *limits)
def _eval_subs(self, old, new):
"""
Perform substitutions over non-dummy variables
of an expression with limits. Also, can be used
to specify point-evaluation of an abstract antiderivative.
Examples
========
>>> from sympy import Sum, oo
>>> from sympy.abc import s, n
>>> Sum(1/n**s, (n, 1, oo)).subs(s, 2)
Sum(n**(-2), (n, 1, oo))
>>> from sympy import Integral
>>> from sympy.abc import x, a
>>> Integral(a*x**2, x).subs(x, 4)
Integral(a*x**2, (x, 4))
See Also
========
variables : Lists the integration variables
transform : Perform mapping on the dummy variable for integrals
change_index : Perform mapping on the sum and product dummy variables
"""
from sympy.core.function import AppliedUndef, UndefinedFunction
func, limits = self.function, list(self.limits)
# If one of the expressions we are replacing is used as a func index
# one of two things happens.
# - the old variable first appears as a free variable
# so we perform all free substitutions before it becomes
# a func index.
# - the old variable first appears as a func index, in
# which case we ignore. See change_index.
# Reorder limits to match standard mathematical practice for scoping
limits.reverse()
if not isinstance(old, Symbol) or \
old.free_symbols.intersection(self.free_symbols):
sub_into_func = True
for i, xab in enumerate(limits):
if 1 == len(xab) and old == xab[0]:
if new._diff_wrt:
xab = (new,)
else:
xab = (old, old)
limits[i] = Tuple(xab[0], *[l._subs(old, new) for l in xab[1:]])
if len(xab[0].free_symbols.intersection(old.free_symbols)) != 0:
sub_into_func = False
break
if isinstance(old, AppliedUndef) or isinstance(old, UndefinedFunction):
sy2 = set(self.variables).intersection(set(new.atoms(Symbol)))
sy1 = set(self.variables).intersection(set(old.args))
if not sy2.issubset(sy1):
raise ValueError(
"substitution can not create dummy dependencies")
sub_into_func = True
if sub_into_func:
func = func.subs(old, new)
else:
# old is a Symbol and a dummy variable of some limit
for i, xab in enumerate(limits):
if len(xab) == 3:
limits[i] = Tuple(xab[0], *[l._subs(old, new) for l in xab[1:]])
if old == xab[0]:
break
# simplify redundant limits (x, x) to (x, )
for i, xab in enumerate(limits):
if len(xab) == 2 and (xab[0] - xab[1]).is_zero:
limits[i] = Tuple(xab[0], )
# Reorder limits back to representation-form
limits.reverse()
return self.func(func, *limits)
@property
def has_finite_limits(self):
"""
Returns True if the limits are known to be finite, either by the
explicit bounds, assumptions on the bounds, or assumptions on the
variables. False if known to be infinite, based on the bounds.
None if not enough information is available to determine.
Examples
========
>>> from sympy import Sum, Integral, Product, oo, Symbol
>>> x = Symbol('x')
>>> Sum(x, (x, 1, 8)).has_finite_limits
True
>>> Integral(x, (x, 1, oo)).has_finite_limits
False
>>> M = Symbol('M')
>>> Sum(x, (x, 1, M)).has_finite_limits
>>> N = Symbol('N', integer=True)
>>> Product(x, (x, 1, N)).has_finite_limits
True
See Also
========
has_reversed_limits
"""
ret_None = False
for lim in self.limits:
if len(lim) == 3:
if any(l.is_infinite for l in lim[1:]):
# Any of the bounds are +/-oo
return False
elif any(l.is_infinite is None for l in lim[1:]):
# Maybe there are assumptions on the variable?
if lim[0].is_infinite is None:
ret_None = True
else:
if lim[0].is_infinite is None:
ret_None = True
if ret_None:
return None
return True
@property
def has_reversed_limits(self):
"""
Returns True if the limits are known to be in reversed order, either
by the explicit bounds, assumptions on the bounds, or assumptions on the
variables. False if known to be in normal order, based on the bounds.
None if not enough information is available to determine.
Examples
========
>>> from sympy import Sum, Integral, Product, oo, Symbol
>>> x = Symbol('x')
>>> Sum(x, (x, 8, 1)).has_reversed_limits
True
>>> Sum(x, (x, 1, oo)).has_reversed_limits
False
>>> M = Symbol('M')
>>> Integral(x, (x, 1, M)).has_reversed_limits
>>> N = Symbol('N', integer=True, positive=True)
>>> Sum(x, (x, 1, N)).has_reversed_limits
False
>>> Product(x, (x, 2, N)).has_reversed_limits
>>> Product(x, (x, 2, N)).subs(N, N + 2).has_reversed_limits
False
See Also
========
sympy.concrete.expr_with_intlimits.ExprWithIntLimits.has_empty_sequence
"""
ret_None = False
for lim in self.limits:
if len(lim) == 3:
var, a, b = lim
dif = b - a
if dif.is_extended_negative:
return True
elif dif.is_extended_nonnegative:
continue
else:
ret_None = True
else:
return None
if ret_None:
return None
return False
class AddWithLimits(ExprWithLimits):
r"""Represents unevaluated oriented additions.
Parent class for Integral and Sum.
"""
def __new__(cls, function, *symbols, **assumptions):
pre = _common_new(cls, function, *symbols, **assumptions)
if type(pre) is tuple:
function, limits, orientation = pre
else:
return pre
obj = Expr.__new__(cls, **assumptions)
arglist = [orientation*function] # orientation not used in ExprWithLimits
arglist.extend(limits)
obj._args = tuple(arglist)
obj.is_commutative = function.is_commutative # limits already checked
return obj
def _eval_adjoint(self):
if all([x.is_real for x in flatten(self.limits)]):
return self.func(self.function.adjoint(), *self.limits)
return None
def _eval_conjugate(self):
if all([x.is_real for x in flatten(self.limits)]):
return self.func(self.function.conjugate(), *self.limits)
return None
def _eval_transpose(self):
if all([x.is_real for x in flatten(self.limits)]):
return self.func(self.function.transpose(), *self.limits)
return None
def _eval_factor(self, **hints):
if 1 == len(self.limits):
summand = self.function.factor(**hints)
if summand.is_Mul:
out = sift(summand.args, lambda w: w.is_commutative \
and not set(self.variables) & w.free_symbols)
return Mul(*out[True])*self.func(Mul(*out[False]), \
*self.limits)
else:
summand = self.func(self.function, *self.limits[0:-1]).factor()
if not summand.has(self.variables[-1]):
return self.func(1, [self.limits[-1]]).doit()*summand
elif isinstance(summand, Mul):
return self.func(summand, self.limits[-1]).factor()
return self
def _eval_expand_basic(self, **hints):
from sympy.matrices.matrices import MatrixBase
summand = self.function.expand(**hints)
if summand.is_Add and summand.is_commutative:
return Add(*[self.func(i, *self.limits) for i in summand.args])
elif isinstance(summand, MatrixBase):
return summand.applyfunc(lambda x: self.func(x, *self.limits))
elif summand != self.function:
return self.func(summand, *self.limits)
return self
|
{
"content_hash": "445d571c332d49a66e933c43c7a087a7",
"timestamp": "",
"source": "github",
"line_count": 539,
"max_line_length": 94,
"avg_line_length": 34.957328385899814,
"alnum_prop": 0.5341789618936419,
"repo_name": "kaushik94/sympy",
"id": "1a7241d7be4a83bbfd98f63b406c7e9555229f13",
"size": "18842",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sympy/concrete/expr_with_limits.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "5094"
},
{
"name": "Python",
"bytes": "13553568"
},
{
"name": "Ruby",
"bytes": "304"
},
{
"name": "Scheme",
"bytes": "125"
},
{
"name": "Shell",
"bytes": "4008"
},
{
"name": "TeX",
"bytes": "32356"
},
{
"name": "XSLT",
"bytes": "366202"
}
],
"symlink_target": ""
}
|
import datetime as dt
import os
from mock import patch
from decimal import Decimal
from io import StringIO
from django.core import mail
from django.test import TestCase
from apps.plea.models import Court, Case, CaseOffenceFilter
from .models import Result, ResultOffenceData, ResultOffence
from .management.commands.process_results import Command
class ResultTestCase(TestCase):
def setUp(self):
self.test_court = Court.objects.create(
court_code="1234",
region_code="51",
court_name="Test Court",
enabled=True
)
self.test_case1 = Case.objects.create(
case_number="12345678",
urn="51XX0000000",
email="test@test123.com",
sent=True
)
self.test_result1 = Result.objects.create(
urn="51XX0000000",
case_number="12345678",
case=self.test_case1,
date_of_hearing=dt.date.today(),
sent=False,
processed=False,
account_number="12345",
division="100"
)
self.offence1 = ResultOffence.objects.create(
result=self.test_result1,
offence_code="XXXXXXX",
offence_seq_number="001"
)
self.offence2 = ResultOffence.objects.create(
result=self.test_result1,
offence_code="YYYYYYY",
offence_seq_number="002"
)
self.f_code_offence = ResultOffenceData.objects.create(
result_offence=self.offence1,
result_code="FCOST",
result_short_title="FINAL CODE"
)
def test_has_valid_offences_with_non_whitelisted_offences(self):
CaseOffenceFilter.objects.create(filter_match="VVVVV", description="A test whitelist entry")
self.assertFalse(self.test_result1.has_valid_offences())
def test_has_valid_offences_with_all_whitelisted_offences(self):
CaseOffenceFilter.objects.create(filter_match="YYYYY", description="A test whitelist entry")
CaseOffenceFilter.objects.create(filter_match="XXXX", description="A test whitelist entry")
self.assertTrue(self.test_result1.has_valid_offences())
def test_can_result_succeeds(self):
result, _ = self.test_result1.can_result()
self.assertTrue(result)
def test_can_result_with_adjourned_offence_is_false(self):
self.f_code_offence.delete()
self.adjourned_offence = ResultOffenceData.objects.create(
result_offence=self.offence1,
result_code="A",
result_short_title="ADJOURNED!"
)
result, _ = self.test_result1.can_result()
self.assertFalse(result)
def test_can_result_with_adjourned_and_withdrawn_offence_is_true(self):
self.adjourned_offence = ResultOffenceData.objects.create(
result_offence=self.offence1,
result_code="A",
result_short_title="ADJOURNED!"
)
self.withdrawn_offence = ResultOffenceData.objects.create(
result_offence=self.offence1,
result_code="WDRN",
result_short_title="FINE VICTIM SURCHARGE!"
)
result, _ = self.test_result1.can_result()
self.assertTrue(result)
def test_can_result_with_adjourned_and_final_codes_is_true(self):
self.adjourned_offence = ResultOffenceData.objects.create(
result_offence=self.offence1,
result_code="A",
result_short_title="ADJOURNED!"
)
self.withdrawn_offence = ResultOffenceData.objects.create(
result_offence=self.offence1,
result_code="WDRN",
result_short_title="WITHDRAWN!"
)
def test_can_result_with_disqualified_code_is_false(self):
self.withdrawn_offence = ResultOffenceData.objects.create(
result_offence=self.offence1,
result_code="DDDT",
result_short_title="DISQUALIFIED!"
)
result, _ = self.test_result1.can_result()
self.assertFalse(result)
def test_can_result_missing_divcode_or_acc_number(self):
self.test_result1.account_number = ""
self.test_result1.save()
result, _ = self.test_result1.can_result()
self.assertFalse(result)
self.test_result1.account_number = "12345"
self.test_result1.division = ""
self.test_result1.save()
result, _ = self.test_result1.can_result()
self.assertFalse(result)
def test_get_offence_totals_fines(self):
self.adjourned_offence = ResultOffenceData.objects.create(
result_offence=self.offence1,
result_code="FCOST",
result_short_title="FINE"
)
fines, _, _ = self.test_result1.get_offence_totals()
self.assertEquals(len(fines), 2)
def test_get_offence_totals_fines_wording_english(self):
self.adjourned_offence = ResultOffenceData.objects.create(
result_offence=self.offence1,
result_code="FCOST",
result_short_title="FINE",
result_wording=u"english words £75.00 more english",
result_short_title_welsh="dirwy",
result_wording_welsh=u"I dalu costau o £75.00 welsh"
)
fines, _, _ = self.test_result1.get_offence_totals()
self.assertEquals(fines[0], u"english words £75.00 more english")
def test_get_offence_totals_fines_wording_welsh(self):
self.test_case1.language = "cy"
self.test_case1.save()
self.adjourned_offence = ResultOffenceData.objects.create(
result_offence=self.offence1,
result_code="FCOST",
result_short_title="FINE",
result_wording=u"english words £75.00 more english",
result_short_title_welsh="dirwy",
result_wording_welsh=u"I dalu costau o £75.00 welsh"
)
fines, _, _ = self.test_result1.get_offence_totals()
self.assertEquals(fines[0], u"I dalu costau o £75.00 welsh")
def test_get_offence_totals_fines_wording_welsh_but_no_welsh_text(self):
self.test_case1.language = "cy"
self.test_case1.save()
self.adjourned_offence = ResultOffenceData.objects.create(
result_offence=self.offence1,
result_code="FCOST",
result_short_title="FINE",
result_wording=u"english words £75.00 more english",
result_short_title_welsh="dirwy",
result_wording_welsh=u""
)
fines, _, _ = self.test_result1.get_offence_totals()
self.assertEquals(fines[0], u"english words £75.00 more english")
def test_get_offence_totals_fines_wording_welsh_but_whitespace_welsh_text(self):
self.test_case1.language = "cy"
self.test_case1.save()
self.adjourned_offence = ResultOffenceData.objects.create(
result_offence=self.offence1,
result_code="FCOST",
result_short_title="FINE",
result_wording=u"english words £75.00 more english",
result_short_title_welsh="dirwy",
result_wording_welsh=u" "
)
fines, _, _ = self.test_result1.get_offence_totals()
self.assertEquals(fines[0], u"english words £75.00 more english")
def test_get_offence_totals_endorsements(self):
self.adjourned_offence = ResultOffenceData.objects.create(
result_offence=self.offence1,
result_code="LEP",
result_short_title="ENDORSEMENT",
result_wording="Driving record endorsed with 3 points.",
)
self.adjourned_offence = ResultOffenceData.objects.create(
result_offence=self.offence2,
result_code="LEA",
result_short_title="ENDORSEMENT",
result_wording="Driving record endorsed with 6 points."
)
_, endorsements, _ = self.test_result1.get_offence_totals()
self.assertEquals(len(endorsements), 2)
self.assertEquals(endorsements[0], "Driving record endorsed with 3 points.")
self.assertEquals(endorsements[1], "Driving record endorsed with 6 points.")
def test_get_offence_totals_total(self):
self.adjourned_offence = ResultOffenceData.objects.create(
result_offence=self.offence1,
result_code="FCOST",
result_short_title="FINE",
result_wording=u"asdfsadf £75.00 asasdfadfs"
)
self.adjourned_offence = ResultOffenceData.objects.create(
result_offence=self.offence2,
result_code="FVS",
result_short_title="FINE",
result_wording=u"asdfsadf £25.00 asasdfadfs"
)
_, _, total = self.test_result1.get_offence_totals()
self.assertEquals(Decimal("100"), total)
class ProcessResultsTestCase(TestCase):
def setUp(self):
self.test_court = Court.objects.create(
court_code="1234",
region_code="51",
court_name="Test Court",
enabled=True,
)
self.test_case1 = Case.objects.create(
case_number="12345678",
urn="51XX0000000",
sent=True,
email="frank.marsh@marshtech.com"
)
self.test_result1 = Result.objects.create(
urn="51XX0000000",
case_number="12345678",
date_of_hearing=dt.date.today(),
sent=False,
processed=False,
account_number="12345",
division="100"
)
self.offence1 = ResultOffence.objects.create(
result=self.test_result1
)
self.adjourned_result = ResultOffenceData.objects.create(
result_offence=self.offence1,
result_code="A",
result_short_title="ADJOURNED"
)
self.withdrawn_result = ResultOffenceData.objects.create(
result_offence=self.offence1,
result_code="WDRN",
result_short_title="WITHDRAWN"
)
self.final_result = ResultOffenceData.objects.create(
result_offence=self.offence1,
result_code="FCOST",
result_short_title="FINAL"
)
self.command = Command(stdout=StringIO())
self.opts = dict(
override_recipient="",
status_email_recipients="",
dry_run=False,
date="")
def test_matching_case_with_email_is_sent(self):
self.command.handle(**self.opts)
result = Result.objects.get(pk=self.test_result1.id)
self.assertTrue(result.sent)
self.assertTrue(result.processed)
self.assertEquals(len(mail.outbox), 1)
self.assertEquals(mail.outbox[0].to, [self.test_case1.email])
def test_option_override_recipient(self):
self.opts["override_recipient"] = "override@xyzyzyz.com"
self.command.handle(**self.opts)
self.assertEquals(len(mail.outbox), 1)
self.assertEquals(mail.outbox[0].to, ["override@xyzyzyz.com"])
def test_option_dry_run(self):
self.opts["dry_run"] = True
self.command.handle(**self.opts)
result = Result.objects.get(pk=self.test_result1.id)
self.assertFalse(result.processed)
self.assertFalse(result.sent)
def test_option_send_status_email(self):
self.opts["dry_run"] = True
self.opts["status_email_recipients"] = "statusemail@testxyz.com"
self.command.handle(**self.opts)
self.assertEquals(len(mail.outbox), 1)
self.assertEquals(mail.outbox[0].to, ["statusemail@testxyz.com"])
@patch('os.environ.get')
def test_subject_includes_env(self, mock_env):
self.opts["dry_run"] = True
self.opts["status_email_recipients"] = "statusemail@testxyz.com"
mock_env.return_value = 'unit_test'
self.command.handle(**self.opts)
self.assertEquals(mail.outbox[0].subject, "[unit_test] make-a-plea resulting status email")
def test_no_supplied_email_no_result(self):
self.test_case1.email = None
self.test_case1.save()
self.command.handle(**self.opts)
result = Result.objects.get(pk=self.test_result1.id)
self.assertTrue(result.processed)
self.assertFalse(result.sent)
def test_no_matching_case_no_email(self):
self.test_case1.delete()
self.command.handle(**self.opts)
result = Result.objects.get(pk=self.test_result1.id)
self.assertTrue(result.processed)
self.assertFalse(result.sent)
def test_case_not_sent_result_not_sent(self):
"""
If the case does not have sent=True, do not send the result email
"""
self.test_case1.sent = False
self.test_case1.save()
self.command.handle(**self.opts)
result = Result.objects.get(id=self.test_result1.id)
self.assertFalse(result.sent)
self.assertTrue(result.processed)
def test_result_sent_not_resent(self):
self.test_result1.sent = True
self.test_result1.save()
self.command.handle(**self.opts)
result = Result.objects.get(pk=self.test_result1.id)
self.assertEquals(mail.outbox, [])
def test_result_is_marked_sent_and_processed(self):
assert not self.test_result1.sent
self.command.handle(**self.opts)
result = Result.objects.get(pk=self.test_result1.id)
self.assertTrue(result.sent)
self.assertTrue(result.processed)
def test_date_option(self):
assert self.test_result1.created.date() == dt.date.today()
self.opts["date"] = (dt.date.today()-dt.timedelta(7)).strftime("%d/%m/%Y")
self.command.handle(**self.opts)
result = Result.objects.get(pk=self.test_result1.id)
self.assertFalse(result.sent)
self.assertFalse(result.processed)
def test_forward_email_section_removed_from_plain_text_email(self):
self.command.handle(**self.opts)
search_text = "If you're unsure an email is from the Ministry of Justice"
self.assertNotIn(search_text, mail.outbox[0].body)
def test_forward_email_section_removed_from_html_email(self):
self.command.handle(**self.opts)
search_text = "If you're unsure an email is from the Ministry of Justice"
self.assertNotIn(search_text, mail.outbox[0].alternatives[0][0])
def test_result_for_welsh_case_sent_in_welsh(self):
self.test_case1.language = "cy"
self.test_case1.save()
self.command.handle(**self.opts)
assert mail.outbox[0].subject == 'Canlyniad Cofnodi Ple'
assert 'Eich llys: Test Court' in mail.outbox[0].body
|
{
"content_hash": "17e045bf3025a44e9db71a9ae78165d1",
"timestamp": "",
"source": "github",
"line_count": 479,
"max_line_length": 100,
"avg_line_length": 30.82045929018789,
"alnum_prop": 0.6141028246291405,
"repo_name": "ministryofjustice/manchester_traffic_offences_pleas",
"id": "9499c5b515bfe42904064377dbffecc4658e8eef",
"size": "14800",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "apps/result/tests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "867"
},
{
"name": "Gherkin",
"bytes": "10122"
},
{
"name": "HTML",
"bytes": "184454"
},
{
"name": "JavaScript",
"bytes": "52955"
},
{
"name": "Python",
"bytes": "792658"
},
{
"name": "SCSS",
"bytes": "43568"
},
{
"name": "Shell",
"bytes": "1766"
}
],
"symlink_target": ""
}
|
"""Composes one or more `LinearOperators`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.linalg.python.ops import linear_operator
from tensorflow.python.framework import common_shapes
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
__all__ = ["LinearOperatorComposition"]
class LinearOperatorComposition(linear_operator.LinearOperator):
"""Composes one or more `LinearOperators`.
This operator composes one or more linear operators `[op1,...,opJ]`,
building a new `LinearOperator` with action defined by:
```
op_composed(x) := op1(op2(...(opJ(x)...))
```
If `opj` acts like [batch] matrix `Aj`, then `op_composed` acts like the
[batch] matrix formed with the multiplication `A1 A2...AJ`.
If `opj` has shape `batch_shape_j + [M_j, N_j]`, then we must have
`N_j = M_{j+1}`, in which case the composed operator has shape equal to
`broadcast_batch_shape + [M_1, N_J]`, where `broadcast_batch_shape` is the
mutual broadcast of `batch_shape_j`, `j = 1,...,J`, assuming the intermediate
batch shapes broadcast. Even if the composed shape is well defined, the
composed operator's methods may fail due to lack of broadcasting ability in
the defining operators' methods.
```python
# Create a 2 x 2 linear operator composed of two 2 x 2 operators.
operator_1 = LinearOperatorFullMatrix([[1., 2.], [3., 4.]])
operator_2 = LinearOperatorFullMatrix([[1., 0.], [0., 1.]])
operator = LinearOperatorComposition([operator_1, operator_2])
operator.to_dense()
==> [[1., 2.]
[3., 4.]]
operator.shape
==> [2, 2]
operator.log_determinant()
==> scalar Tensor
x = ... Shape [2, 4] Tensor
operator.apply(x)
==> Shape [2, 4] Tensor
# Create a [2, 3] batch of 4 x 5 linear operators.
matrix_45 = tf.random_normal(shape=[2, 3, 4, 5])
operator_45 = LinearOperatorFullMatrix(matrix)
# Create a [2, 3] batch of 5 x 6 linear operators.
matrix_56 = tf.random_normal(shape=[2, 3, 5, 6])
operator_56 = LinearOperatorFullMatrix(matrix_56)
# Compose to create a [2, 3] batch of 4 x 6 operators.
opeartor_46 = LinearOperatorComposition([operator_45, operator_56])
# Create a shape [2, 3, 6, 2] vector.
x = tf.random_normal(shape=[2, 3, 6, 2])
operator.apply(x)
==> Shape [2, 3, 4, 2] Tensor
```
#### Performance
The performance of `LinearOperatorComposition` on any operation is equal to
the sum of the individual operators' operations.
#### Matrix property hints
This `LinearOperator` is initialized with boolean flags of the form `is_X`,
for `X = non_singular, self_adjoint, positive_definite`.
These have the following meaning
* If `is_X == True`, callers should expect the operator to have the
property `X`. This is a promise that should be fulfilled, but is *not* a
runtime assert. For example, finite floating point precision may result
in these promises being violated.
* If `is_X == False`, callers should expect the operator to not have `X`.
* If `is_X == None` (the default), callers should have no expectation either
way.
"""
def __init__(self,
operators,
is_non_singular=None,
is_self_adjoint=None,
is_positive_definite=None,
name=None):
r"""Initialize a `LinearOperatorComposition`.
`LinearOperatorComposition` is initialized with a list of operators
`[op_1,...,op_J]`. For the `apply` method to be well defined, the
composition `op_i.apply(op_{i+1}(x))` must be defined. Other methods have
similar constraints.
Args:
operators: Iterable of `LinearOperator` objects, each with
the same `dtype` and composable shape.
is_non_singular: Expect that this operator is non-singular.
is_self_adjoint: Expect that this operator is equal to its hermitian
transpose.
is_positive_definite: Expect that this operator is positive definite,
meaning the quadratic form `x^H A x` has positive real part for all
nonzero `x`. Note that we do not require the operator to be
self-adjoint to be positive-definite. See:
https://en.wikipedia.org/wiki/Positive-definite_matrix\
#Extension_for_non_symmetric_matrices
name: A name for this `LinearOperator`. Default is the individual
operators names joined with `_o_`.
Raises:
TypeError: If all operators do not have the same `dtype`.
ValueError: If `operators` is empty.
"""
# Validate operators.
check_ops.assert_proper_iterable(operators)
operators = list(operators)
if not operators:
raise ValueError(
"Expected a non-empty list of operators. Found: %s" % operators)
self._operators = operators
# Validate dtype.
dtype = operators[0].dtype
for operator in operators:
if operator.dtype != dtype:
name_type = (str((o.name, o.dtype)) for o in operators)
raise TypeError(
"Expected all operators to have the same dtype. Found %s"
% " ".join(name_type))
# Auto-set and check hints.
if all(operator.is_non_singular for operator in operators):
if is_non_singular is False:
raise ValueError(
"The composition of non-singular operators is always non-singular.")
is_non_singular = True
# Initialization.
graph_parents = []
for operator in operators:
graph_parents.extend(operator.graph_parents)
if name is None:
name = "_o_".join(operator.name for operator in operators)
with ops.name_scope(name, values=graph_parents):
super(LinearOperatorComposition, self).__init__(
dtype=dtype,
graph_parents=graph_parents,
is_non_singular=is_non_singular,
is_self_adjoint=is_self_adjoint,
is_positive_definite=is_positive_definite,
name=name)
@property
def operators(self):
return self._operators
def _shape(self):
# Get final matrix shape.
domain_dimension = self.operators[0].domain_dimension
for operator in self.operators[1:]:
domain_dimension.assert_is_compatible_with(operator.range_dimension)
domain_dimension = operator.domain_dimension
matrix_shape = tensor_shape.TensorShape(
[self.operators[0].range_dimension,
self.operators[-1].domain_dimension])
# Get broadcast batch shape.
# broadcast_shape checks for compatibility.
batch_shape = self.operators[0].batch_shape
for operator in self.operators[1:]:
batch_shape = common_shapes.broadcast_shape(
batch_shape, operator.batch_shape)
return batch_shape.concatenate(matrix_shape)
def _shape_tensor(self):
# Avoid messy broadcasting if possible.
if self.shape.is_fully_defined():
return ops.convert_to_tensor(
self.shape.as_list(), dtype=dtypes.int32, name="shape")
# Don't check the matrix dimensions. That would add unnecessary Asserts to
# the graph. Things will fail at runtime naturally if shapes are
# incompatible.
matrix_shape = array_ops.stack([
self.operators[0].range_dimension_tensor(),
self.operators[-1].domain_dimension_tensor()
])
# Dummy Tensor of zeros. Will never be materialized.
zeros = array_ops.zeros(shape=self.operators[0].batch_shape_tensor())
for operator in self.operators[1:]:
zeros += array_ops.zeros(shape=operator.batch_shape_tensor())
batch_shape = array_ops.shape(zeros)
return array_ops.concat((batch_shape, matrix_shape), 0)
def _apply(self, x, adjoint=False, adjoint_arg=False):
# If self.operators = [A, B], and not adjoint, then
# apply_order_list = [B, A].
# As a result, we return A.apply(B.apply(x))
if adjoint:
apply_order_list = self.operators
else:
apply_order_list = list(reversed(self.operators))
result = apply_order_list[0].apply(
x, adjoint=adjoint, adjoint_arg=adjoint_arg)
for operator in apply_order_list[1:]:
result = operator.apply(result, adjoint=adjoint)
return result
def _determinant(self):
result = self.operators[0].determinant()
for operator in self.operators[1:]:
result *= operator.determinant()
return result
def _log_abs_determinant(self):
result = self.operators[0].log_abs_determinant()
for operator in self.operators[1:]:
result += operator.log_abs_determinant()
return result
def _solve(self, rhs, adjoint=False, adjoint_arg=False):
# TODO(langmore) Implement solve using solve_ls if some intermediate
# operator maps to a high dimensional space.
# In that case, an exact solve may still be possible.
# If self.operators = [A, B], and not adjoint, then
# solve_order_list = [A, B].
# As a result, we return B.solve(A.solve(x))
if adjoint:
solve_order_list = list(reversed(self.operators))
else:
solve_order_list = self.operators
solution = solve_order_list[0].solve(
rhs, adjoint=adjoint, adjoint_arg=adjoint_arg)
for operator in solve_order_list[1:]:
solution = operator.solve(solution, adjoint=adjoint)
return solution
def _add_to_tensor(self, x):
return self.to_dense() + x
|
{
"content_hash": "d866a857fab460802b1dcfcca1aaf796",
"timestamp": "",
"source": "github",
"line_count": 261,
"max_line_length": 80,
"avg_line_length": 36.252873563218394,
"alnum_prop": 0.6716339040372015,
"repo_name": "chenjun0210/tensorflow",
"id": "b1557769b222cf5b1d4ce11210b2fb4ddebecacb",
"size": "10151",
"binary": false,
"copies": "10",
"ref": "refs/heads/master",
"path": "tensorflow/contrib/linalg/python/ops/linear_operator_composition.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "7481"
},
{
"name": "C",
"bytes": "177254"
},
{
"name": "C++",
"bytes": "22819759"
},
{
"name": "CMake",
"bytes": "140276"
},
{
"name": "CSS",
"bytes": "774"
},
{
"name": "Go",
"bytes": "794578"
},
{
"name": "HTML",
"bytes": "595822"
},
{
"name": "Java",
"bytes": "286562"
},
{
"name": "JavaScript",
"bytes": "13906"
},
{
"name": "Jupyter Notebook",
"bytes": "1833654"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "37240"
},
{
"name": "Objective-C",
"bytes": "7037"
},
{
"name": "Objective-C++",
"bytes": "64166"
},
{
"name": "Protocol Buffer",
"bytes": "210350"
},
{
"name": "Python",
"bytes": "20069220"
},
{
"name": "Shell",
"bytes": "331908"
},
{
"name": "TypeScript",
"bytes": "790493"
}
],
"symlink_target": ""
}
|
from django.test import TestCase
from django.contrib.auth.models import User
from django.shortcuts import reverse
from django.utils import timezone
import datetime
import decimal
from .models import Budget, BudgetCategory, Wallet, Transaction
class WalletViewsTests(TestCase):
def setUp(self):
# Create a user and give them some data to test with
self.user = User.objects.create_user(id=1, username='test_user', email='test_user@gmail.com', password='tester123')
self.user.save()
self.other_user = User.objects.create_user(id=2, username='test_user2', email='test_user2@gmail.com',
password='123tester')
self.other_user.save()
self.category = BudgetCategory.objects.create(id=1, user_id=1, user=self.user, name='groceries', is_income=False)
self.category.save()
self.wallet = Wallet.objects.create(id=1, user_id=1, name='checking', balance=100, created_time=timezone.now())
self.wallet.save()
self.other_wallet = Wallet.objects.create(id=2, user_id=2, name='debit card', balance=0, created_time=timezone.now())
self.other_wallet.save()
self.transaction = Transaction.objects.create(id=1, amount=10.50, category=self.category, category_id=1,
description='foo', created_time=timezone.now(),
wallet=self.wallet, wallet_id=1, user=self.user, user_id=1)
self.transaction.save()
self.budget = Budget.objects.create(budget_id=1, user_id=1, category_id=1, goal=100, month=datetime.date.today(), wallet_id=1, balance=0, user=self.user)
self.budget.save()
self.login()
def login(self):
self.client.login(username='test_user', password='tester123')
def tearDown(self):
self.budget.delete()
self.wallet.delete()
self.other_wallet.delete()
self.category.delete()
self.user.delete()
self.other_user.delete()
def test_get_all_wallets(self):
resp = self.client.get(reverse('wallets'))
self.assertEqual(resp.status_code, 200)
wallets = resp.context['wallets']
self.assertEqual(len(wallets), 1)
self.assertEqual(wallets[0], self.wallet)
def test_post_to_create_a_new_wallet(self):
resp = self.client.post(reverse('wallets'),
{
'name': 'credit card',
'balance': '10.00'
})
self.assertEqual(resp.status_code, 201)
self.assertTrue(len(resp.context['alerts']['success']) == 1)
wallets = resp.context['wallets']
self.assertEqual(len(wallets), 2)
wallet = Wallet.objects.get(user=self.user, name='credit card')
self.assertEqual(wallet.balance, decimal.Decimal('10.00'))
wallet.delete()
def test_post_to_create_existing_wallet(self):
resp = self.client.post(reverse('wallets'),
{
'name': 'checking',
'balance': '10.00'
})
self.assertEqual(resp.status_code, 200)
self.assertTrue(len(resp.context['alerts']['errors']) == 1)
self.assertEqual(len(Wallet.objects.filter(user=self.user)), 1)
wallet = Wallet.objects.get(id=1)
self.assertEqual(wallet.balance, decimal.Decimal('100'))
def test_new_wallet_view(self):
resp = self.client.get(reverse('new_wallet'))
self.assertEqual(resp.status_code, 200)
def test_get_specific_wallet(self):
resp = self.client.get('/pynny/wallets/1')
self.assertEqual(resp.status_code, 200)
wallet = resp.context['wallet']
self.assertEqual(wallet, self.wallet)
def test_get_nonexistent_wallet(self):
resp = self.client.get('/pynny/wallets/95')
self.assertEqual(resp.status_code, 404)
self.assertEqual(len(resp.context['alerts']['errors']), 1)
def test_get_someone_elses_wallet(self):
resp = self.client.get('/pynny/wallets/2')
self.assertEqual(resp.status_code, 403)
self.assertEqual(len(resp.context['alerts']['errors']), 1)
def test_post_to_delete_a_wallet(self):
resp = self.client.post(
'/pynny/wallets/1',
{
'action': 'delete'
}
)
self.assertEqual(resp.status_code, 200)
with self.assertRaises(Wallet.DoesNotExist):
Wallet.objects.get(id=1)
def test_edit_a_wallet_view(self):
resp = self.client.post(
'/pynny/wallets/1',
{
'action': 'edit'
}
)
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp.context['wallet'], self.wallet)
def test_edit_completion(self):
resp = self.client.post(
'/pynny/wallets/1',
{
'action': 'edit_complete',
'name': 'savings'
}
)
self.assertEqual(resp.status_code, 200)
self.assertEqual(Wallet.objects.get(id=1).name, 'savings')
self.assertEqual(len(resp.context['alerts']['success']), 1)
|
{
"content_hash": "a0d79c13656e5688171b80a80074baed",
"timestamp": "",
"source": "github",
"line_count": 133,
"max_line_length": 161,
"avg_line_length": 40.18796992481203,
"alnum_prop": 0.5779232927970065,
"repo_name": "zcking/Pynny",
"id": "68a993fd0ed1142f9b4cef7888c7c9e5b9697927",
"size": "5345",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mysite/pynny/test_wallet_views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "164000"
},
{
"name": "HTML",
"bytes": "127545"
},
{
"name": "JavaScript",
"bytes": "203"
},
{
"name": "Python",
"bytes": "105317"
}
],
"symlink_target": ""
}
|
import unittest
from cubes.browser import *
from cubes.errors import *
from .common import CubesTestCaseBase
class CutsTestCase(CubesTestCaseBase):
def setUp(self):
super(CutsTestCase, self).setUp()
self.workspace = self.create_workspace(model="browser_test.json")
self.cube = self.workspace.cube("transactions")
self.dim_date = self.cube.dimension("date")
def test_cut_depth(self):
dim = self.cube.dimension("date")
self.assertEqual(1, PointCut(dim, [1]).level_depth())
self.assertEqual(3, PointCut(dim, [1, 1, 1]).level_depth())
self.assertEqual(1, RangeCut(dim, [1], [1]).level_depth())
self.assertEqual(3, RangeCut(dim, [1, 1, 1], [1]).level_depth())
self.assertEqual(1, SetCut(dim, [[1], [1]]).level_depth())
self.assertEqual(3, SetCut(dim, [[1], [1], [1, 1, 1]]).level_depth())
def test_cut_from_dict(self):
# d = {"type":"point", "path":[2010]}
# self.assertRaises(Exception, cubes.cut_from_dict, d)
d = {"type": "point", "path": [2010], "dimension": "date",
"level_depth": 1, "hierarchy": None, "invert": False,
"hidden": False}
cut = cut_from_dict(d)
tcut = PointCut("date", [2010])
self.assertEqual(tcut, cut)
self.assertEqual(dict(d), tcut.to_dict())
self._assert_invert(d, cut, tcut)
d = {"type": "range", "from": [2010], "to": [2012, 10], "dimension":
"date", "level_depth": 2, "hierarchy": None, "invert": False,
"hidden": False}
cut = cut_from_dict(d)
tcut = RangeCut("date", [2010], [2012, 10])
self.assertEqual(tcut, cut)
self.assertEqual(dict(d), tcut.to_dict())
self._assert_invert(d, cut, tcut)
d = {"type": "set", "paths": [[2010], [2012, 10]], "dimension": "date",
"level_depth": 2, "hierarchy": None, "invert": False,
"hidden": False}
cut = cut_from_dict(d)
tcut = SetCut("date", [[2010], [2012, 10]])
self.assertEqual(tcut, cut)
self.assertEqual(dict(d), tcut.to_dict())
self._assert_invert(d, cut, tcut)
self.assertRaises(ArgumentError, cut_from_dict, {"type": "xxx"})
def _assert_invert(self, d, cut, tcut):
cut.invert = True
tcut.invert = True
d["invert"] = True
self.assertEqual(tcut, cut)
self.assertEqual(dict(d), tcut.to_dict())
class StringConversionsTestCase(unittest.TestCase):
def test_cut_string_conversions(self):
cut = PointCut("foo", ["10"])
self.assertEqual("foo:10", str(cut))
self.assertEqual(cut, cut_from_string("foo:10"))
cut = PointCut("foo", ["123_abc_", "10", "_"])
self.assertEqual("foo:123_abc_,10,_", str(cut))
self.assertEqual(cut, cut_from_string("foo:123_abc_,10,_"))
cut = PointCut("foo", ["123_ abc_"])
self.assertEqual(r"foo:123_ abc_", str(cut))
self.assertEqual(cut, cut_from_string("foo:123_ abc_"))
cut = PointCut("foo", ["a-b"])
self.assertEqual("foo:a\-b", str(cut))
self.assertEqual(cut, cut_from_string("foo:a\-b"))
cut = PointCut("foo", ["a+b"])
self.assertEqual("foo:a+b", str(cut))
self.assertEqual(cut, cut_from_string("foo:a+b"))
def test_special_characters(self):
self.assertEqual('\\:q\\-we,a\\\\sd\\;,100',
string_from_path([":q-we", "a\\sd;", 100]))
def test_string_from_path(self):
self.assertEqual('qwe,asd,100',
string_from_path(["qwe", "asd", 100]))
self.assertEqual('', string_from_path([]))
self.assertEqual('', string_from_path(None))
def test_path_from_string(self):
self.assertEqual(["qwe", "asd", "100"],
path_from_string('qwe,asd,100'))
self.assertEqual([], path_from_string(''))
self.assertEqual([], path_from_string(None))
def test_set_cut_string(self):
cut = SetCut("foo", [["1"], ["2", "3"], ["qwe", "asd", "100"]])
self.assertEqual("foo:1;2,3;qwe,asd,100", str(cut))
self.assertEqual(cut, cut_from_string("foo:1;2,3;qwe,asd,100"))
# single-element SetCuts cannot go round trip, they become point cuts
cut = SetCut("foo", [["a+b"]])
self.assertEqual("foo:a+b", str(cut))
self.assertEqual(PointCut("foo", ["a+b"]), cut_from_string("foo:a+b"))
cut = SetCut("foo", [["a-b"]])
self.assertEqual("foo:a\-b", str(cut))
self.assertEqual(PointCut("foo", ["a-b"]), cut_from_string("foo:a\-b"))
def test_range_cut_string(self):
cut = RangeCut("date", ["2010"], ["2011"])
self.assertEqual("date:2010-2011", str(cut))
self.assertEqual(cut, cut_from_string("date:2010-2011"))
cut = RangeCut("date", ["2010"], None)
self.assertEqual("date:2010-", str(cut))
cut = cut_from_string("date:2010-")
if cut.to_path:
self.fail('there should be no to path, is: %s' % (cut.to_path, ))
cut = RangeCut("date", None, ["2010"])
self.assertEqual("date:-2010", str(cut))
cut = cut_from_string("date:-2010")
if cut.from_path:
self.fail('there should be no from path is: %s' % (cut.from_path, ))
cut = RangeCut("date", ["2010", "11", "12"], ["2011", "2", "3"])
self.assertEqual("date:2010,11,12-2011,2,3", str(cut))
self.assertEqual(cut, cut_from_string("date:2010,11,12-2011,2,3"))
cut = RangeCut("foo", ["a+b"], ["1"])
self.assertEqual("foo:a+b-1", str(cut))
self.assertEqual(cut, cut_from_string("foo:a+b-1"))
cut = RangeCut("foo", ["a-b"], ["1"])
self.assertEqual(r"foo:a\-b-1", str(cut))
self.assertEqual(cut, cut_from_string(r"foo:a\-b-1"))
def test_hierarchy_cut(self):
cut = PointCut("date", ["10"], "dqmy")
self.assertEqual("date@dqmy:10", str(cut))
self.assertEqual(cut, cut_from_string("date@dqmy:10"))
class BrowserTestCase(CubesTestCaseBase):
def setUp(self):
super(BrowserTestCase, self).setUp()
self.workspace = self.create_workspace(model="model.json")
self.cube = self.workspace.cube("contracts")
class AggregationBrowserTestCase(BrowserTestCase):
def setUp(self):
super(AggregationBrowserTestCase, self).setUp()
self.browser = AggregationBrowser(self.cube)
def test_cutting(self):
full_cube = Cell(self.cube)
self.assertEqual(self.cube, full_cube.cube)
self.assertEqual(0, len(full_cube.cuts))
cell = full_cube.slice(PointCut("date", [2010]))
self.assertEqual(1, len(cell.cuts))
cell = cell.slice(PointCut("supplier", [1234]))
cell = cell.slice(PointCut("cpv", [50, 20]))
self.assertEqual(3, len(cell.cuts))
self.assertEqual(self.cube, cell.cube)
# Adding existing slice should result in changing the slice properties
cell = cell.slice(PointCut("date", [2011]))
self.assertEqual(3, len(cell.cuts))
def test_multi_slice(self):
full_cube = Cell(self.cube)
cuts_list = (
PointCut("date", [2010]),
PointCut("cpv", [50, 20]),
PointCut("supplier", [1234]))
cell_list = full_cube.multi_slice(cuts_list)
self.assertEqual(3, len(cell_list.cuts))
self.assertRaises(CubesError, full_cube.multi_slice, {})
def test_get_cell_dimension_cut(self):
full_cube = Cell(self.cube)
cell = full_cube.slice(PointCut("date", [2010]))
cell = cell.slice(PointCut("supplier", [1234]))
cut = cell.cut_for_dimension("date")
self.assertEqual(str(cut.dimension), "date")
self.assertRaises(NoSuchDimensionError, cell.cut_for_dimension, "someunknown")
cut = cell.cut_for_dimension("cpv")
self.assertEqual(cut, None)
def test_hierarchy_path(self):
dim = self.cube.dimension("cpv")
hier = dim.hierarchy()
levels = hier.levels_for_path([])
self.assertEqual(len(levels), 0)
levels = hier.levels_for_path(None)
self.assertEqual(len(levels), 0)
levels = hier.levels_for_path([1, 2, 3, 4])
self.assertEqual(len(levels), 4)
names = [level.name for level in levels]
self.assertEqual(names, ['division', 'group', 'class', 'category'])
self.assertRaises(HierarchyError, hier.levels_for_path,
[1, 2, 3, 4, 5, 6, 7, 8])
def test_hierarchy_drilldown_levels(self):
dim = self.cube.dimension("cpv")
hier = dim.hierarchy()
levels = hier.levels_for_path([], drilldown=True)
self.assertEqual(len(levels), 1)
self.assertEqual(levels[0].name, 'division')
levels = hier.levels_for_path(None, drilldown=True)
self.assertEqual(len(levels), 1)
self.assertEqual(levels[0].name, 'division')
def test_slice_drilldown(self):
cut = PointCut("date", [])
original_cell = Cell(self.cube, [cut])
cell = original_cell.drilldown("date", 2010)
self.assertEqual([2010], cell.cut_for_dimension("date").path)
cell = cell.drilldown("date", 1)
self.assertEqual([2010, 1], cell.cut_for_dimension("date").path)
cell = cell.drilldown("date", 2)
self.assertEqual([2010, 1, 2], cell.cut_for_dimension("date").path)
def test_suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(AggregationBrowserTestCase))
suite.addTest(unittest.makeSuite(CellsAndCutsTestCase))
return suite
|
{
"content_hash": "9f5aba234bad47d280dc089b605bec85",
"timestamp": "",
"source": "github",
"line_count": 260,
"max_line_length": 86,
"avg_line_length": 37.207692307692305,
"alnum_prop": 0.5801116394459376,
"repo_name": "she11c0de/cubes",
"id": "878c3ef66d0cafcfb6f7ba0f4e2e5f5e5d3f4cf4",
"size": "9674",
"binary": false,
"copies": "1",
"ref": "refs/heads/unicode-fix",
"path": "tests/test_browser.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "38599"
},
{
"name": "HTML",
"bytes": "66157"
},
{
"name": "JavaScript",
"bytes": "362898"
},
{
"name": "Python",
"bytes": "795339"
},
{
"name": "VimL",
"bytes": "2215"
}
],
"symlink_target": ""
}
|
from .admins import ReadonlyAdmin
from .models import CustomModelPage
|
{
"content_hash": "2f76612d2aeca852fe07033a1c915f2d",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 35,
"avg_line_length": 35,
"alnum_prop": 0.8571428571428571,
"repo_name": "idlesign/django-etc",
"id": "8e2ed62cf5c7ff2c8781bf65ada26e0c427627d5",
"size": "70",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "etc/admin/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "316"
},
{
"name": "Python",
"bytes": "44252"
}
],
"symlink_target": ""
}
|
import socket
import fcntl
import struct
import sl_metro
import time
def param(ifname):
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
return socket.inet_ntoa(fcntl.ioctl(
s.fileno(),
0x8915, # SIOCGIFADDR
struct.pack('256s', ifname[:15])
)[20:24])
except IOError:
return False
def waiting(ifname):
metro = sl_metro.Metro(0.5)
is_waiting = True
while is_waiting:
if metro.update():
if param(ifname) == False:
print "waiting for IP"
else:
is_waiting = False
print "Got ip is : " , param(ifname)
time.sleep(3)
|
{
"content_hash": "225e4f0847a252e32ca255a87f0154d3",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 60,
"avg_line_length": 19.90909090909091,
"alnum_prop": 0.578386605783866,
"repo_name": "DiamondOhana/jphacks",
"id": "6f5a64f2ff6f505850482f5c74e7691c650283dc",
"size": "806",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "python_main/sonilab/get_ip.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1761"
},
{
"name": "HTML",
"bytes": "785"
},
{
"name": "PHP",
"bytes": "35220"
},
{
"name": "Python",
"bytes": "48892"
},
{
"name": "Shell",
"bytes": "366"
}
],
"symlink_target": ""
}
|
from django.db.models import get_model, get_models
from django.http import HttpResponse
from django.core.urlresolvers import reverse as _reverse
from serializers import ModelSerializer, Field, RelatedField
mime_types = {
'json': 'application/json',
'xml': 'application/xml',
'yaml': 'application/yaml',
'csv': 'text/csv',
'html': 'text/html'
}
def reverse(viewname, kwargs=None, request=None, format=None):
"""
Like the regular 'reverse' function, but returns fully qualified urls,
and takes an additional 'format' argument.
"""
if format:
kwargs['format'] = format
url = _reverse(viewname, kwargs=kwargs)
return request.build_absolute_uri(url)
def url_for_object(obj, request, format):
"""
Return the canonical URL for a given model instance.
Use the request to form an absolute URL, rather than a relative one,
and use a format suffix (eg '.json') if one is provided.
"""
app_name = obj._meta.app_label
model_name = obj._meta.object_name.lower()
kwargs = {'app_name': app_name, 'model': model_name, 'pk': obj.pk}
return reverse('autoapi:instance', kwargs=kwargs,
request=request, format=format)
def get_api_root(request, format):
"""
Return a dict of `model label` -> `url`.
"""
ret = {}
for model in get_models():
app_name = model._meta.app_label
model_name = model._meta.object_name.lower()
kwargs = {'app_name': app_name, 'model': model_name}
url = reverse('autoapi:list', kwargs=kwargs,
request=request, format=format)
ret[app_name + '.' + model_name] = url
return ret
class URLField(Field):
def field_to_native(self, obj, field_name):
request = self.context['request']
format = self.context['format']
return url_for_object(obj, request, format)
class URLRelatedField(RelatedField):
def to_native(self, obj):
request = self.context['request']
format = self.context['format']
return url_for_object(obj, request, format)
class APISerializer(ModelSerializer):
url = URLField()
class Meta:
exclude = ('pk', 'id', 'password')
def get_related_field(self, model_field):
return URLRelatedField()
def root(request, format=None):
root = get_api_root(request, format)
format = format or 'html'
content = APISerializer().serialize(format, root)
return HttpResponse(content, mime_types[format])
def list(request, app_name, model, format=None):
queryset = get_model(app_name, model)._default_manager.all()
context = {'request': request, 'format': format}
format = format or 'html'
content = APISerializer().serialize(format, queryset, context)
return HttpResponse(content, mime_types[format])
def instance(request, app_name, model, pk, format=None):
instance = get_model(app_name, model)._default_manager.get(pk=pk)
context = {'request': request, 'format': format}
format = format or 'html'
content = APISerializer().serialize(format, instance, context)
return HttpResponse(content, mime_types[format])
|
{
"content_hash": "4d02324a05c2e22ae027cb6f2b469294",
"timestamp": "",
"source": "github",
"line_count": 99,
"max_line_length": 74,
"avg_line_length": 31.858585858585858,
"alnum_prop": 0.6537729866835764,
"repo_name": "tomchristie/django-auto-api",
"id": "a6595fb3e7db572699aba90e53d813428ea8f966",
"size": "3154",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "autoapi/views.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "6972"
}
],
"symlink_target": ""
}
|
"""
The Ironic Management Service
"""
import logging
import sys
from oslo_config import cfg
from oslo_log import log
from oslo_service import service
from ironic.common import service as ironic_service
CONF = cfg.CONF
def main():
# Pase config file and command line options, then start logging
ironic_service.prepare_service(sys.argv)
mgr = ironic_service.RPCService(CONF.host,
'ironic.conductor.manager',
'ConductorManager')
LOG = log.getLogger(__name__)
LOG.debug("Configuration:")
CONF.log_opt_values(LOG, logging.DEBUG)
launcher = service.launch(CONF, mgr)
launcher.wait()
|
{
"content_hash": "c5c0bddcce779e53a69e8b54b318fe83",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 67,
"avg_line_length": 23,
"alnum_prop": 0.6507246376811594,
"repo_name": "naterh/ironic",
"id": "2867d3e2c3e9796cb5925d7465293fcb882e4d64",
"size": "1375",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "ironic/cmd/conductor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "3558973"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.