code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
|---|---|---|---|---|---|
import builder
main = builder.main
|
bin3/bobo
|
bobo/__init__.py
|
Python
|
apache-2.0
| 35
|
from audio.io import *
|
Curly-Mo/audio
|
__init__.py
|
Python
|
mit
| 23
|
__author__ = 'wbtang'
import datetime
import os
folder_src = '../src'
folder_log = '../log'
folder_release = '../release'
folder_final = '../ex_final'
folder_final_ex1 = '%s/%s' % (folder_final, 'ex1')
folder_final_ex2 = '%s/%s' % (folder_final, 'ex2')
folder_final_ex3 = '%s/%s' % (folder_final, 'ex3')
__name_exe = 'p2p_vod.exe'
__name_parser_ini = 'cmd.ini'
file_exe = '%s/%s' % (folder_release, __name_exe)
file_parser_ini = '%s/%s' % (folder_src, __name_parser_ini)
file_ex3_dump = 'ex3.dump'
file_ex2_log = 'monster_two_world.xls'
file_figure_extend = '.jpg'
graphs = {'BTC': True, 'PA': True, 'TWITTER': True}
graph_type = 'I DO NOT KNOW'
def folder_graph():
return '%s/%s' % (folder_final, graph_type)
def get_time():
return datetime.datetime.now().strftime('%Y.%m.%d_%H.%M.%S')
def run_exe(exe, opt, async):
if async == True:
if is_windows():
cmd = 'start %s %s' % (exe, opt)
else:
cmd = 'exec %s %s >> trace_screen &' % (exe, opt)
else:
if is_windows():
cmd = '%s %s' % (exe, opt)
else:
cmd = 'exec %s %s' % (exe, opt)
print('cmd: %s' % cmd)
os.system(cmd)
def run_py_script(script, async):
script = get_full_path(script)
run_exe('python', script, async)
def get_full_path(file):
file = os.path.abspath(file)
for i in range(len(file)):
if file[i] == '\\':
file = file[:i] + '/' + file[(i+1):]
return file
def is_windows():
return os.name == 'nt'
def default_processor():
if is_windows():
return 3
else:
return 15
def make_script(scripts, jobs):
for i in range(len(scripts)):
file = open(scripts[i], "w")
file.write('import os\n')
for j in range(i, len(jobs), len(scripts)):
file.write('os.system(\'%s %s\')\n' % (get_full_path(file_exe), jobs[j]))
file.close()
run_py_script(scripts[i], async=True)
scale = 1.
class PeerData:
peer_id = -1
enter_time = -1
finish_time = -1
exit_time = -1
start_position = -1
latency = -1
interrupts = -1
smoothness = -1
def __init__(self, data):
self.peer_id = int(data[0])
self.enter_time = int(data[1])
self.finish_time = int(data[2])
self.start_position = int(data[3])
self.latency = int(data[4])
self.interrupts = int(data[5])
self.smoothness = float(data[6]) * scale
self.exit_time = int(data[7])
def __parse(file):
conf = {}
peer_data = []
for line in file:
if not line[:1].isdigit():
tokens = line.strip().split(' ')
tokens = list(filter(None, tokens))
if len(tokens) == 4 and tokens[2] == '=':
conf[tokens[1]] = tokens[3]
else:
peer_data.append(PeerData(line.strip().split('\t')))
return conf, peer_data
def read(log_name):
print('Parse Log: %s' % log_name)
file = open(log_name)
return __parse(file)
def verify_graph_type():
if graph_type not in graphs:
print('unknown graph: %s' % graph_type)
print('valid graph: %s' % str(graphs))
exit()
print('verify graph: (OK) %s' % graph_type)
|
iSuneast/p2p_vod
|
p2p_vod.linux_win/py/log_base.py
|
Python
|
mit
| 3,229
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
class ValidationError(ValueError):
pass
|
HeathKang/flasky
|
app/exceptions.py
|
Python
|
mit
| 90
|
import re
from collections import Iterable
from io import StringIO
from itertools import groupby
from typing import List, Tuple, Callable, Any, IO, cast
from smartchangelog import datetools
from smartchangelog.commit import Commit
class Node:
def __init__(self, name: str = None, criterion: property = None, children: Tuple['Node'] = None,
value: Commit = None) -> None:
self._parent: 'Node' = None
self.name = name
self.criterion = criterion
self._children: Tuple['Node'] = None
self.children = children
self.value = value
@property
def parent(self) -> 'Node':
return self._parent
@property
def children(self) -> Tuple['Node']:
return self._children
@children.setter
def children(self, children: Tuple['Node']) -> None:
if children is not None:
for node in children:
node._parent = self
self._children = children
def depth_level(self) -> int:
if self.parent is None:
return 0
else:
return self.parent.depth_level() + 1
def __len__(self):
if not self.children:
return 1
nb_children = 0
for child in self.children:
nb_children += len(child)
return nb_children
@classmethod
def print_multilines(cls, name: str, value: str, file: IO):
if value:
lines = value.split('\n')
if len(lines) == 1:
print(" * {name}: {value}".format(name=name, value=value), file=file)
else:
print(" * {name}:".format(name=name), file=file)
for line in lines:
print(" - {line}".format(line=line), file=file)
@classmethod
def print_leaf(cls, commit: Commit, file: IO) -> None:
print("* subject: {subject}".format(subject=commit.subject or ''), file=file)
cls.print_multilines(name='body', value=commit.body, file=file)
print(" * date: {date}".format(date=datetools.date2str(commit.date)), file=file)
print(" * author: {author}".format(author=commit.author), file=file)
print(" * commit: {id}".format(id=commit.id), file=file)
def print_header(self, node: 'Node', file: IO):
print(
"{header} {criterion_name}: {name}".format(
header="#" * (self.depth_level() + 1),
criterion_name=Commit.property_name(node.criterion),
name=node.name
),
file=file
)
print(file=file)
def report(self) -> str:
sio = StringIO()
with sio:
if self.children is None:
self.print_leaf(commit=self.value, file=sio)
else:
for node in self.children:
if node.name:
self.print_header(node=node, file=sio)
print(node.report().strip('\n'), file=sio)
print(file=sio)
string = sio.getvalue()
return string
class Changelog(List[Commit]):
@classmethod
def parse(cls, log: str) -> 'Changelog':
raw_commits = re.findall('(commit [a-z0-9]{40}\n(?:.|\n)*?)(?=commit [a-z0-9]{40}|$)', log)
return Changelog([Commit.parse(rc) for rc in raw_commits])
def groupby(self, *criteria: property) -> Node:
if len(criteria) == 0:
# Sort
date_prop = cast(property, Commit.date)
date_getter = cast(Callable[[Commit], Any], date_prop.fget)
self.sort(key=date_getter)
return self.node()
criteria_list = list(criteria)
criterion = criteria_list.pop(0)
criterion_getter = cast(Callable[[Commit], Any], criterion.fget)
# Filter
# noinspection PyTypeChecker
categorized_changelog = Changelog([commit for commit in self if criterion_getter(commit) is not None])
# noinspection PyTypeChecker
uncategorized_commits = Changelog([commit for commit in self if criterion_getter(commit) is None])
# Sort
categorized_changelog.sort(key=criterion_getter)
# Arrange
raw_result = self.groupby_to_list(groupby(iterable=categorized_changelog, key=criterion_getter))
children_list: List[Node] = []
for key, group in raw_result:
cl = Changelog(group)
children_list.append(Node(name=str(key), criterion=criterion, children=cl.groupby(*criteria_list).children))
if len(uncategorized_commits) > 0:
children_list.append(uncategorized_commits.node(name="unknown", criterion=criterion))
children = cast(Tuple[Node], tuple(children_list))
return Node(children=children)
def node(self, name: str=None, criterion: property=None) -> Node:
# noinspection PyTypeChecker
children = cast(Tuple[Node], tuple(Node(value=commit) for commit in self))
return Node(name=name, criterion=criterion, children=children)
@classmethod
def groupby_to_list(cls, iterable: Iterable):
return [[key, [i for i in group]] for key, group in iterable]
|
ngouzy/smartchangelog
|
smartchangelog/changelog.py
|
Python
|
mit
| 5,198
|
#!/usr/bin/env python
#
# Copyright 2016 Google Inc.
#
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Run all infrastructure-related tests."""
import os
import subprocess
import sys
INFRA_BOTS_DIR = os.path.dirname(os.path.realpath(__file__))
SKIA_DIR = os.path.abspath(os.path.join(INFRA_BOTS_DIR, os.pardir, os.pardir))
def test(cmd, cwd):
try:
subprocess.check_output(cmd, cwd=cwd, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
return e.output
def python_unit_tests(train):
if train:
return None
return test(
['python', '-m', 'unittest', 'discover', '-s', '.', '-p', '*_test.py'],
INFRA_BOTS_DIR)
def recipe_test(train):
cmd = [
'python', os.path.join(INFRA_BOTS_DIR, 'recipes.py'), 'test']
if train:
cmd.append('train')
else:
cmd.append('run')
return test(cmd, SKIA_DIR)
def gen_tasks_test(train):
cmd = ['go', 'run', 'gen_tasks.go']
if not train:
cmd.append('--test')
try:
output = test(cmd, INFRA_BOTS_DIR)
except OSError:
return ('Failed to run "%s"; do you have Go installed on your machine?'
% ' '.join(cmd))
return output
def main():
train = False
if '--train' in sys.argv:
train = True
tests = (
python_unit_tests,
recipe_test,
gen_tasks_test,
)
errs = []
for t in tests:
err = t(train)
if err:
errs.append(err)
if len(errs) > 0:
print >> sys.stderr, 'Test failures:\n'
for err in errs:
print >> sys.stderr, '=============================='
print >> sys.stderr, err
print >> sys.stderr, '=============================='
sys.exit(1)
if train:
print 'Trained tests successfully.'
else:
print 'All tests passed!'
if __name__ == '__main__':
main()
|
HalCanary/skia-hc
|
infra/bots/infra_tests.py
|
Python
|
bsd-3-clause
| 1,839
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""Module for generating tables used by rungeneric1.py.
The generated tables give the ERT and in brackets the 10th to 90th
percentile range divided by two of 100 simulated runs divided by the
best ERT measured during BBOB-2009 (given in the respective first row)
for different target precisions for different functions. If no algorithm
in BBOB-2009 reached the target precision, the absolute values are
given.
The median number of conducted function evaluations is given in
*italics*, if no run reached 1e-7.
#succ is the number of trials that reached the target precision 1e-8
**Bold** entries are statistically significantly better (according to
the rank-sum test) compared to the best algorithm in BBOB-2009, with
p = 0.05 or p = 1e-k where k > 1 is the number following the
\downarrow symbol, with Bonferroni correction by the number of
functions.
"""
from __future__ import absolute_import
import os
import numpy as np
import matplotlib.pyplot as plt
from . import genericsettings, bestalg, toolsstats, pproc
from .pptex import tableLaTeX, tableLaTeXStar, writeFEvals2, writeFEvalsMaxPrec
from .toolsstats import significancetest
from pdb import set_trace
targets = (10., 1., 1e-1, 1e-3, 1e-5, 1e-7) # targets of the table
finaltarget = 1e-8 # value for determining the success ratio
targetsOfInterest = (10., 1., 1e-1, 1e-3, 1e-5, 1e-7) # targets of the table
targetsOfInterest = pproc.TargetValues((10, 1, 1e-1, 1e-2, 1e-3, 1e-5, 1e-7))
targetf = 1e-8 # value for determining the success ratio
samplesize = genericsettings.simulated_runlength_bootstrap_sample_size # TODO: change samplesize
# def tablespec(targets):
#
# i = 0
# tspec = {'col%d' % i: {'what': 'fname', 'header': r'$\Delta f$', 'format': None}}
# for t in targets:
# i = i + 1
# tspec.update({'col%d' % i: {'what': 'ERT ratio for df=%e' % t,
# 'header': r'\multicolumn{2}{@{}c@{}}{1e%+d}' % (int(np.log10(t)),
# 'format': writeFEval}})
# i = i + 1
# tspec.update({'col%d' % i: {'what': 'nb of success', 'header': r'\#succ',
# 'format': '%d'}})
old_legend = r"""
\newcommand{\tablecaption}[1]{Shown are, for functions #1 and for a
given target difference to the optimal function value \Df: the number
of successful trials (\textbf{$\#$}); the expected running time to
surpass $\fopt+\Df$ (\ERT, see Figure~\ref{fig:ERTgraphs}); the
\textbf{10\%}-tile and \textbf{90\%}-tile of the bootstrap
distribution of \ERT; the average number of function evaluations in
successful trials or, if none was successful, as last entry the median
number of function evaluations to reach the best function value
($\text{RT}_\text{succ}$). If $\fopt+\Df$ was never reached, figures in
\textit{italics} denote the best achieved \Df-value of the median
trial and the 10\% and 90\%-tile trial. Furthermore, N denotes the
number of trials, and mFE denotes the maximum of number of function
evaluations executed in one trial. See Figure~\ref{fig:ERTgraphs} for
the names of functions. }
"""
table_caption_one = r"""%
Expected running time (ERT in number of function
evaluations) divided by the best ERT measured during BBOB-2009. The ERT
and in braces, as dispersion measure, the half difference between 90 and
10\%-tile of bootstrapped run lengths appear in the second row of each cell,
the best ERT
"""
table_caption_two1 = r"""%
in the first. The different target \Df-values are shown in the top row.
\#succ is the number of trials that reached the (final) target $\fopt + 10^{-8}$.
"""
table_caption_two2 = r"""%
(preceded by the target \Df-value in \textit{italics}) in the first.
\#succ is the number of trials that reached the target value of the last column.
"""
table_caption_rest = r"""%
The median number of conducted function evaluations is additionally given in
\textit{italics}, if the target in the last column was never reached.
\textbf{Bold} entries are statistically significantly better (according to
the rank-sum test) compared to the best algorithm in BBOB-2009, with
$p = 0.05$ or $p = 10^{-k}$ when the number $k > 1$ is following the
$\downarrow$ symbol, with Bonferroni correction by the number of
functions.
"""
table_caption = table_caption_one + table_caption_two1 + table_caption_rest
table_caption_rlbased = table_caption_one + table_caption_two2 + table_caption_rest
def _treat(ds):
bestalgentries = bestalg.loadBestAlgorithm(ds.isBiobjective())
# Rec array: http://docs.scipy.org/doc/numpy/user/basics.rec.html
bestentry = bestalgentries[(ds.dim, ds.funcId)]
bestert = bestentry.detERT(targets)
bestevals, bestalgs = bestentry.detEvals(targets)
bestfinaldata = bestentry.detEvals([finaltarget])[0][0]
ert = ds.detERT(targets)
evals = ds.detEvals(targets)
finaldata = ds.detEvals([finaltarget])[0]
dtype = []
bestdtype = []
for i, t in enumerate(targets):
dtype.append((('ERT ratio (iq 10-90), df=%e' % t, 'df=%e' % t), '2f'))
bestdtype.append((('best ERT df=%e' % t, 'df=%e' % t), 'f'))
dtype.append((('nb success final target=%e' % t, 'finaltarget=%e' % t), 'i8'))
dtype.append(('nbruns', 'i8'))
bestdtype.append((('nb success finaltarget=%e' % t, 'finaltarget=%e' % t), 'i8'))
bestdtype.append(('nbruns', 'i8'))
besttable = np.zeros(1, dtype=bestdtype)
wholetable = np.zeros(1, dtype=dtype)
table = wholetable[0]
bestdata = list()
bestdata.extend(bestert)
bestdata.append(np.sum(np.isnan(bestfinaldata) == False))
bestdata.append(len(bestfinaldata))
besttable[0] = tuple(bestdata)
data = list()
for i, e in enumerate(evals): # loop over targets
unsucc = np.isnan(e)
bt = toolsstats.drawSP(e[unsucc == False], ds.maxevals[unsucc],
(10, 90), samplesize)[0]
data.append((ert[i] / bestert[i], (bt[-1] - bt[0]) / 2. / bestert[i]))
data.append(np.sum(np.isnan(finaldata) == False))
data.append(ds.nbRuns())
table = tuple(data) # fill with tuple not list nor array!
# TODO: do the significance test thing here.
return besttable, wholetable
def _table(data):
res = []
return res
def main2(dsList, dimsOfInterest, outputdir='.', info='', verbose=True):
"""Generate a table of ratio ERT/ERTbest vs target precision.
1 table per dimension will be generated.
Rank-sum tests table on "Final Data Points" for only one algorithm.
that is, for example, using 1/#fevals(ftarget) if ftarget was
reached and -f_final otherwise as input for the rank-sum test, where
obviously the larger the better.
"""
# TODO: remove dimsOfInterest, was added just for compatibility's sake
if info:
info = '_' + info
# insert a separator between the default file name and the additional
# information string.
bestalg.loadBestAlgorithm(dsList.isBiobjective())
for d, dsdim in dsList.dictByDim().iteritems():
res = []
for f, dsfun in sorted(dsdim.dictByFunc().iteritems()):
assert len(dsfun) == 1, ('Expect one-element DataSetList for a '
'given dimension and function')
ds = dsfun[0]
data = _treat(ds)
res = _table(data)
res = []
outputfile = os.path.join(outputdir, 'pptable_%02dD%s.tex' % (d, info))
f = open(outputfile, 'w')
f.write(res)
f.close()
if verbose:
print "Table written in %s" % outputfile
def main(dsList, dimsOfInterest, outputdir, info='', verbose=True):
"""Generate a table of ratio ERT/ERTbest vs target precision.
1 table per dimension will be generated.
Rank-sum tests table on "Final Data Points" for only one algorithm.
that is, for example, using 1/#fevals(ftarget) if ftarget was
reached and -f_final otherwise as input for the rank-sum test, where
obviously the larger the better.
"""
#TODO: check that it works for any reference algorithm?
#in the following the reference algorithm is the one given in
#bestalg.bestalgentries which is the virtual best of BBOB
dictDim = dsList.dictByDim()
targetf=1e-8
if info:
info = '_' + info
# insert a separator between the default file name and the additional
# information string.
bestalgentries = bestalg.loadBestAlgorithm(dsList.isBiobjective())
if isinstance(targetsOfInterest, pproc.RunlengthBasedTargetValues):
header = [r'\#FEs/D']
headerHtml = ['<thead>\n<tr>\n<th>#FEs/D</th>\n']
for i in targetsOfInterest.labels():
header.append(r'\multicolumn{2}{@{}c@{}}{%s}' % i)
headerHtml.append('<td>%s</td>\n' % i)
else:
header = [r'$\Delta f$']
headerHtml = ['<thead>\n<tr>\n<th>Δ f</th>\n']
for i in targetsOfInterest.target_values:
header.append(r'\multicolumn{2}{@{}c@{}}{1e%+d}' % (int(np.log10(i))))
headerHtml.append('<td>1e%+d</td>\n' % (int(np.log10(i))))
header.append(r'\multicolumn{2}{|@{}r@{}}{\#succ}')
headerHtml.append('<td>#succ</td>\n</tr>\n</thead>\n')
for d in dimsOfInterest:
table = [header]
tableHtml = headerHtml
extraeol = [r'\hline']
try:
dictFunc = dictDim[d].dictByFunc()
except KeyError:
continue
funcs = set(dictFunc.keys())
nbtests = float(len(funcs)) # #funcs tests times one algorithm
tableHtml.append('<tbody>\n')
for f in sorted(funcs):
tableHtml.append('<tr>\n')
bestalgentry = bestalgentries[(d, f)]
curline = [r'${\bf f_{%d}}$' % f]
curlineHtml = ['<th><b>f<sub>%d</sub></b></th>\n' % f]
bestalgdata = bestalgentry.detERT(targetsOfInterest((f,d)))
bestalgevals, bestalgalgs = bestalgentry.detEvals(targetsOfInterest((f,d)))
if isinstance(targetsOfInterest, pproc.RunlengthBasedTargetValues):
#write ftarget:fevals
for i in xrange(len(bestalgdata[:-1])):
temp="%.1e" %targetsOfInterest((f,d))[i]
if temp[-2]=="0":
temp=temp[:-2]+temp[-1]
curline.append(r'\multicolumn{2}{@{}c@{}}{\textit{%s}:%s \quad}'
% (temp, writeFEvalsMaxPrec(bestalgdata[i], 2)))
curlineHtml.append('<td><i>%s</i>:%s</td>\n'
% (temp, writeFEvalsMaxPrec(bestalgdata[i], 2)))
temp="%.1e" %targetsOfInterest((f,d))[-1]
if temp[-2]=="0":
temp=temp[:-2]+temp[-1]
curline.append(r'\multicolumn{2}{@{}c@{}|}{\textit{%s}:%s }'
% (temp, writeFEvalsMaxPrec(bestalgdata[-1], 2)))
curlineHtml.append('<td><i>%s</i>:%s</td>\n'
% (temp, writeFEvalsMaxPrec(bestalgdata[-1], 2)))
#success
targetf=targetsOfInterest((f,d))[-1]
else:
# write #fevals of the reference alg
for i in bestalgdata[:-1]:
curline.append(r'\multicolumn{2}{@{}c@{}}{%s \quad}'
% writeFEvalsMaxPrec(i, 2))
curlineHtml.append('<td>%s</td>\n' % writeFEvalsMaxPrec(i, 2))
curline.append(r'\multicolumn{2}{@{}c@{}|}{%s}'
% writeFEvalsMaxPrec(bestalgdata[-1], 2))
curlineHtml.append('<td>%s</td>\n' % writeFEvalsMaxPrec(bestalgdata[-1], 2))
# write the success ratio for the reference alg
tmp = bestalgentry.detEvals([targetf])[0][0]
tmp2 = np.sum(np.isnan(tmp) == False) # count the nb of success
curline.append('%d' % (tmp2))
if tmp2 > 0:
curline.append('/%d' % len(tmp))
curlineHtml.append('<td>%d/%d</td>\n' % (tmp2, len(tmp)))
else:
curlineHtml.append('<td>%d</td>\n' % (tmp2))
table.append(curline[:])
tableHtml.extend(curlineHtml[:])
tableHtml.append('</tr>\n')
extraeol.append('')
# generate all data for ranksum test
assert len(dictFunc[f]) == 1
entry = dictFunc[f][0] # take the first element
ertdata = entry.detERT(targetsOfInterest((f, d)))
testresbestvs1 = significancetest(bestalgentry, entry,
targetsOfInterest((f, d)))
tableHtml.append('<tr>\n')
#for nb, entry in enumerate(entries):
#curline = [r'\algshort\hspace*{\fill}']
curline = ['']
curlineHtml = ['<th></th>\n']
#data = entry.detERT(targetsOfInterest)
evals = entry.detEvals(targetsOfInterest((f,d)))
dispersion = []
data = []
for i in evals:
succ = (np.isnan(i) == False)
tmp = i.copy()
tmp[succ==False] = entry.maxevals[np.isnan(i)]
#set_trace()
# TODO: what is the difference between data and ertdata?
data.append(toolsstats.sp(tmp, issuccessful=succ)[0])
#if not any(succ):
#set_trace()
if any(succ):
tmp2 = toolsstats.drawSP(tmp[succ], tmp[succ==False],
(10, 50, 90), samplesize)[0]
dispersion.append((tmp2[-1] - tmp2[0]) / 2.)
else:
dispersion.append(None)
assert data == ertdata
for i, ert in enumerate(data):
alignment = 'c'
if i == len(data) - 1: # last element
alignment = 'c|'
nbstars = 0
z, p = testresbestvs1[i]
if ert - bestalgdata[i] < 0. and not np.isinf(bestalgdata[i]):
evals = entry.detEvals([targetsOfInterest((f,d))[i]])[0]
evals[np.isnan(evals)] = entry.maxevals[np.isnan(evals)]
bestevals = bestalgentry.detEvals([targetsOfInterest((f,d))[i]])
bestevals, bestalgalg = (bestevals[0][0], bestevals[1][0])
bestevals[np.isnan(bestevals)] = bestalgentry.maxevals[bestalgalg][np.isnan(bestevals)]
evals = np.array(sorted(evals))[0:min(len(evals), len(bestevals))]
bestevals = np.array(sorted(bestevals))[0:min(len(evals), len(bestevals))]
#The conditions for significance are now that ERT < ERT_best and
# all(sorted(FEvals_best) > sorted(FEvals_current)).
if ((nbtests * p) < 0.05 and ert - bestalgdata[i] < 0.
and z < 0.
and (np.isinf(bestalgdata[i])
or all(evals < bestevals))):
nbstars = -np.ceil(np.log10(nbtests * p))
isBold = False
if nbstars > 0:
isBold = True
if np.isinf(bestalgdata[i]): # if the best did not solve the problem
tmp = writeFEvalsMaxPrec(float(ert), 2)
if not np.isinf(ert):
tmpHtml = '<i>%s</i>' % (tmp)
tmp = r'\textit{%s}' % (tmp)
if isBold:
tmp = r'\textbf{%s}' % tmp
tmpHtml = '<b>%s</b>' % tmpHtml
tableentry = (r'\multicolumn{2}{@{}%s@{}}{%s}'
% (alignment, tmp))
tableentryHtml = ('<td>%s</td>' % tmpHtml)
else:
# Formatting
tmp = float(ert) / bestalgdata[i]
assert not np.isnan(tmp)
tableentry = writeFEvalsMaxPrec(tmp, 2)
tableentryHtml = writeFEvalsMaxPrec(tmp, 2)
if np.isinf(tmp) and i == len(data)-1:
tableentry = (tableentry
+ r'\textit{%s}' % writeFEvals2(np.median(entry.maxevals), 2))
tableentryHtml = (tableentryHtml
+ ' <i>%s</i>' % writeFEvals2(np.median(entry.maxevals), 2))
if isBold:
tableentry = r'\textbf{%s}' % tableentry
tableentryHtml = '<b>%s</b>' % tableentryHtml
elif 11 < 3: # and significance0vs1 < 0:
tableentry = r'\textit{%s}' % tableentry
tableentryHtml = '<i>%s</i>' % tableentryHtml
tableentry = (r'\multicolumn{2}{@{}%s@{}}{%s}'
% (alignment, tableentry))
elif tableentry.find('e') > -1 or (np.isinf(tmp) and i != len(data) - 1):
if isBold:
tableentry = r'\textbf{%s}' % tableentry
tableentryHtml = '<b>%s</b>' % tableentryHtml
elif 11 < 3: # and significance0vs1 < 0:
tableentry = r'\textit{%s}' % tableentry
tableentryHtml = '<i>%s</i>' % tableentryHtml
tableentry = (r'\multicolumn{2}{@{}%s@{}}{%s}'
% (alignment, tableentry))
else:
tmp = tableentry.split('.', 1)
tmpHtml = tableentryHtml.split('.', 1)
if isBold:
tmp = list(r'\textbf{%s}' % i for i in tmp)
tmpHtml = list('<b>%s</b>' % i for i in tmpHtml)
elif 11 < 3: # and significance0vs1 < 0:
tmp = list(r'\textit{%s}' % i for i in tmp)
tmpHtml = list('<i>%s</i>' % i for i in tmpHtml)
tableentry = ' & .'.join(tmp)
tableentryHtml = '.'.join(tmpHtml)
if len(tmp) == 1:
tableentry += '&'
superscript = ''
superscriptHtml = ''
if nbstars > 0:
#tmp = '\hspace{-.5ex}'.join(nbstars * [r'\star'])
if z > 0:
superscript = r'\uparrow' #* nbstars
superscriptHtml = '↑'
else:
superscript = r'\downarrow' #* nbstars
superscriptHtml = '↓'
# print z, linebest[i], line1
if nbstars > 1:
superscript += str(int(min((9, nbstars))))
superscriptHtml += str(int(min(9, nbstars)))
# superscript += str(int(nbstars))
#if superscript or significance0vs1:
#s = ''
#if significance0vs1 > 0:
#s = '\star'
#if significance0vs1 > 1:
#s += str(significance0vs1)
#s = r'$^{' + s + superscript + r'}$'
#if tableentry.endswith('}'):
#tableentry = tableentry[:-1] + s + r'}'
#else:
#tableentry += s
if dispersion[i]:
if not np.isinf(bestalgdata[i]):
tmp = writeFEvalsMaxPrec(dispersion[i]/bestalgdata[i], 1)
else:
tmp = writeFEvalsMaxPrec(dispersion[i], 1)
tableentry += (r'${\scriptscriptstyle(%s)}$' % tmp)
tableentryHtml += (' (%s)' % tmp)
if superscript:
s = r'$^{' + superscript + r'}$'
shtml = '<sup>' + superscriptHtml + '</sup>'
if tableentry.endswith('}'):
tableentry = tableentry[:-1] + s + r'}'
else:
tableentry += s
tableentryHtml += shtml
tableentryHtml = tableentryHtml.replace('$\infty$', '∞')
curlineHtml.append('<td>%s</td>\n' % tableentryHtml)
curline.append(tableentry)
#curline.append(tableentry)
#if dispersion[i] is None or np.isinf(bestalgdata[i]):
#curline.append('')
#else:
#tmp = writeFEvalsMaxPrec(dispersion[i]/bestalgdata[i], 2)
#curline.append('(%s)' % tmp)
tmp = entry.evals[entry.evals[:, 0] <= targetf, 1:]
try:
tmp = tmp[0]
curline.append('%d' % np.sum(np.isnan(tmp) == False))
curlineHtml.append('<td>%d' % np.sum(np.isnan(tmp) == False))
except IndexError:
curline.append('%d' % 0)
curlineHtml.append('<td>%d' % 0)
curline.append('/%d' % entry.nbRuns())
curlineHtml.append('/%d</td>\n' % entry.nbRuns())
table.append(curline[:])
tableHtml.extend(curlineHtml[:])
tableHtml.append('</tr>\n')
extraeol.append(r'\hline')
extraeol[-1] = ''
outputfile = os.path.join(outputdir, 'pptable_%02dD%s.tex' % (d, info))
if isinstance(targetsOfInterest, pproc.RunlengthBasedTargetValues):
spec = r'@{}c@{}|' + '*{%d}{@{ }r@{}@{}l@{}}' % len(targetsOfInterest) + '|@{}r@{}@{}l@{}'
else:
spec = r'@{}c@{}|' + '*{%d}{@{}r@{}@{}l@{}}' % len(targetsOfInterest) + '|@{}r@{}@{}l@{}'
#res = r'\providecommand{\algshort}{%s}' % alg1 + '\n'
#res += tableLaTeXStar(table, width=r'0.45\textwidth', spec=spec,
#extraeol=extraeol)
res = tableLaTeX(table, spec=spec, extraeol=extraeol)
f = open(outputfile, 'w')
f.write(res)
f.close()
res = ("").join(str(item) for item in tableHtml)
res = '<p><b>%d-D</b></p>\n<table>\n%s</table>\n' % (d, res)
filename = os.path.join(outputdir, genericsettings.single_algorithm_file_name + '.html')
lines = []
with open(filename) as infile:
for line in infile:
if '<!--pptableHtml-->' in line:
lines.append(res)
lines.append(line)
with open(filename, 'w') as outfile:
for line in lines:
outfile.write(line)
if verbose:
print "Table written in %s" % outputfile
|
NDManh/numbbo
|
code-postprocessing/bbob_pproc/pptable.py
|
Python
|
bsd-3-clause
| 23,192
|
""" Controller for tsl2561 devices. """
import mauzr
from mauzr.serializer import Struct as SS
__author__ = "Alexander Sowitzki"
FMAP = ((0, 0, 0), (0x40, 0x01f2, 0x01be), (0x80, 0x214, 0x2d1),
(0xc0, 0x23f, 0x37b), (0x0100, 0x270, 0x3fe),
(0x0138, 0x16f, 0x1fc), (0x019a, 0xd2, 0xfb), (0x29a, 0x18, 0x12))
def control(core, cfgbase="tsl2561", **kwargs):
""" Controller for tsl2561 devices.
:param core: Core instance.
:type core: object
:param cfgbase: Configuration entry for this unit.
:type cfgbase: str
:param kwargs: Keyword arguments that will be merged into the config.
:type kwargs: dict
"""
cfg = core.config[cfgbase]
cfg.update(kwargs)
mqtt = core.mqtt
base = cfg["base"]
log = core.logger("<TSL2561@{}>".format(base))
mqtt.setup_publish(base + "illuminance", SS("!f"), 0)
mqtt.setup_publish(base + "poll_interval", SS("!I"), 0, cfg["interval"])
def _on_measurement(_topic, channels):
if True in [ch > 65000 for ch in channels]:
log.warning("Sensor saturated")
return
channels = [ch * 16 for ch in channels]
ratio = 0 if not channels[0] else int(channels[1] * 1024 / channels[0])
ratio = (ratio + 1) >> 1
for tres, a, b in FMAP:
if ratio <= tres:
f = (a, b)
break
channels = [ch * fi for ch, fi in zip(channels, f)]
illuminance = (max(0, channels[0] - channels[1]) + 8192) >> 14
mqtt.publish(base + "illuminance", illuminance, True)
mqtt.subscribe(base + "channels", _on_measurement, SS("<HH"), 0)
def main():
""" Entry point. """
mauzr.cpython("mauzr", "tsl2561controller", control)
|
eqrx/mauzr
|
mauzr/hardware/tsl2561/controller.py
|
Python
|
agpl-3.0
| 1,735
|
# listenbrainz-server - Server for the ListenBrainz project.
#
# Copyright (C) 2020 MetaBrainz Foundation Inc.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import listenbrainz.db.user as db_user
import listenbrainz.db.missing_musicbrainz_data as db_missing_musicbrainz_data
from listenbrainz.webserver.errors import APIBadRequest, APINotFound, APINoContent
from listenbrainz.webserver.views.api_tools import (DEFAULT_ITEMS_PER_GET,
get_non_negative_param,
MAX_ITEMS_PER_GET)
from flask import Blueprint, jsonify, request
from listenbrainz.webserver.decorators import crossdomain
from listenbrainz.webserver.rate_limiter import ratelimit
missing_musicbrainz_data_api_bp = Blueprint('missing_musicbrainz_data_v1', __name__)
@missing_musicbrainz_data_api_bp.route("/user/<user_name>")
@crossdomain()
@ratelimit()
def get_missing_musicbrainz_data(user_name):
""" Get musicbrainz data sorted on "listened_at" that the user has submitted to ListenBrainz but has not
submitted to MusicBrainz.
A sample response from the endpoint may look like::
{
"payload": {
"last_updated": 1588494361,
"data": [
{
"artist_msid": "fd32e967-b874-44b2-809c-3862f714813c",
"artist_name": "Red City Radio",
"listened_at": "2020-04-29 23:40:47",
"recording_msid": "78f63ece-86e1-48bf-a7ff-29793d4a84e6",
"release_msid": "47818692-f669-4846-acbc-cb0a69987aee",
"release_name": "The Dangers Of Standing Still",
"track_name": "Never Bring A Cup Of Water To A Gunfight"
},
{
"artist_msid": "fd32e967-b874-44b2-809c-3862f714813c",
"artist_name": "Red City Radio",
"listened_at": "2020-04-29 23:37:57",
"recording_msid": "d226200a-a9be-4e9e-9f7c-d74a71647893",
"release_msid": "47818692-f669-4846-acbc-cb0a69987aee",
"release_name": "The Dangers Of Standing Still",
"track_name": "Nathaniel Martinez"
}
"count": 2,
"offset": 4,
"total_data_count": 25,
"user_name": "Vansika"
}
}
:param count: Optional, number of records to return, Default: :data:`~webserver.views.api.DEFAULT_ITEMS_PER_GET`
Max: :data:`~webserver.views.api.MAX_ITEMS_PER_GET`
:type count: ``int``
:param offset: Optional, number of records to skip from the beginning, for pagination.
Ex. An offset of 5 means the 5 records will be skipped, defaults to 0
:type offset: ``int``
:statuscode 200: Successful query, you have data!
:statuscode 400: Bad request, check ``response['error']`` for more details
:statuscode 404: User not found.
:statuscode 204: Missing MusicBrainz data for the user not calculated , empty response will be returned
"""
# source indicates the *source* script/algorithm by which the missing musicbrainz data was calculated.
# The source may change in future
source = 'cf'
user = db_user.get_by_mb_id(user_name)
if user is None:
raise APINotFound("Cannot find user: {}".format(user_name))
offset = get_non_negative_param('offset', default=0)
count = get_non_negative_param('count', default=DEFAULT_ITEMS_PER_GET)
count = min(count, MAX_ITEMS_PER_GET)
data = db_missing_musicbrainz_data.get_user_missing_musicbrainz_data(user['id'], source)
if data is None:
err_msg = 'Missing MusicBrainz data for {} not calculated'.format(user_name)
raise APINoContent(err_msg)
missing_musicbrainz_data_list = getattr(data, 'data').dict()['missing_musicbrainz_data']
missing_musicbrainz_data_list_filtered = missing_musicbrainz_data_list[offset:count]
payload = {
'payload': {
'user_name': user_name,
'last_updated': int(getattr(data, 'created').timestamp()),
'count': len(missing_musicbrainz_data_list_filtered),
'total_data_count': len(missing_musicbrainz_data_list),
'offset': offset,
'data': missing_musicbrainz_data_list_filtered
}
}
return jsonify(payload)
|
Freso/listenbrainz-server
|
listenbrainz/webserver/views/missing_musicbrainz_data_api.py
|
Python
|
gpl-2.0
| 5,227
|
# cloudscope.replica.consensus.tag
# Package that implements tag based consensus consistency.
#
# Author: Benjamin Bengfort <bengfort@cs.umd.edu>
# Created: Tue Mar 08 14:28:05 2016 -0500
#
# Copyright (C) 2016 University of Maryland
# For license information, see LICENSE.txt
#
# ID: tag.py [] benjamin@bengfort.com $
"""
Package that implements tag based consensus consistency.
"""
##########################################################################
## Imports
##########################################################################
from cloudscope.config import settings
from cloudscope.simulation.timer import Timer
from cloudscope.replica import Consistency, State
from cloudscope.exceptions import TagRPCException
from cloudscope.exceptions import SimulationException
from cloudscope.replica.store import namespace
from cloudscope.replica.store import WriteLog
from cloudscope.utils.enums import Enum
from .base import ConsensusReplica
from .election import Election
from collections import defaultdict
from collections import namedtuple
from functools import partial
##########################################################################
## Module Constants
##########################################################################
## Timers and timing
SESSION_TIMEOUT = settings.simulation.session_timeout
HEARTBEAT_INTERVAL = settings.simulation.heartbeat_interval
## RPC Messages
## NOTE: tag should be a data structure of {objects: {index, epoch, commit}}
## NOTE: index, epoch, commit are meaningful in different RPC contexts
RequestTag = namedtuple('RequestTag', 'epoch, tag, candidate')
TagResponse = namedtuple('TagResponse', 'epoch, accept')
AppendEntries = namedtuple('AppendEntries', 'epoch, owner, tag, entries')
AEResponse = namedtuple('AEResponse', 'epoch, success, tag, reason')
RemoteAccess = namedtuple('RemoteAccess', 'epoch, access')
AccessResponse = namedtuple('AccessResponse', 'epoch, success, access')
## Sent with RPC messages to indicate the state of a log per object.
LogState = namedtuple('TagState', 'index, epoch, commit')
## Sent with Append Entries responses to indicate what went wrong.
Reason = Enum('Reason', 'OK, EPOCH, LOG')
##########################################################################
## Tag Replica
##########################################################################
class TagReplica(ConsensusReplica):
def __init__(self, simulation, **kwargs):
## Timers for work
self.session_timeout = kwargs.get('session_timeout', SESSION_TIMEOUT)
self.heartbeat_interval = kwargs.get('heartbeat_interval', HEARTBEAT_INTERVAL)
self.session = None
self.heartbeat = None
## Initialze the tag specific settings
self.epoch = 0
self.log = defaultdict(WriteLog)
self.view = defaultdict(set)
## Owner state
self.nextIndex = None
self.matchIndex = None
## Initialize the replica
super(TagReplica, self).__init__(simulation, **kwargs)
self.state = State.READY
######################################################################
## Core Methods (Replica API)
######################################################################
def read(self, name, **kwargs):
"""
When a tag replica performs a read it has to decide whether or not to
read locally or to make a remote read across the cluster.
Convert the read into an access, then check if we own the object.
If we do, then return the latest commit.
If we don't and no one else does either, attempt to acquire the tag.
If we don't and someone else does then either drop, wait, or remote.
Current implementation: #2, MR, no remote access.
If someone else owns tag, reads are dropped.
TODO: Remote vs Local Reads
"""
# Create the read event using super.
access = super(TagReplica, self).read(name, **kwargs)
# Record the number of attempts for the access
if access.is_local_to(self): access.attempts += 1
# Increase the session on access.
self.handle_session()
# Are we the owner of this tag?
if self.owns(access.name):
# TODO: Change to last commit!
version = self.log[access.name].lastVersion
# If the version is None, bail since we haven't read anything
if version is None: return access.drop(empty=True)
# Update the version, complete the read, and log the access
access.update(version, completed=True)
access.log(self)
# Return, we're done reading!
return access
# Is there a different owner for the tag?
owner = self.find_owner(access.name)
if owner is not None:
# Right now just drop the read on its face.
self.sim.logger.info(
"ownership conflict: dropped {} at {}".format(access, self)
)
return access.drop()
# We're going to acquire the tag!
else:
# Log the access from this particular replica.
access.log(self)
# We're going to have some read latency, retry the read.
retry = Timer(
self.env, self.heartbeat_interval, lambda: self.read(access)
).start()
if access.attempts <= 1 and self.state != State.TAGGING:
# Request the ownership of the tag
self.acquire(access.name)
return access
def write(self, name, **kwargs):
"""
When a replica performs a write it needs to decide if it can write to
the tag locally, can acquire a tag for this object, or if it has to do
something else like drop, wait, or remote write.
If the access is local:
- if the replica owns the tag, append and complete
- if someone else owns the tag then drop, wait, or remote
- if no one owns the tag, then attempt to acquire it
If access is remote:
- if we own the tag, then append but do not complete (at local)
- if someone else owns the tag, log and forward to owner
- if no one owns the tag then respond false
"""
# Create the read event using super.
access = super(TagReplica, self).write(name, **kwargs)
# Increase the session on access.
self.handle_session()
# Determine if the write is local or remote
if access.is_local_to(self):
# Record the number of attempts for the access
access.attempts += 1
# Fetch the latest version from the log.
latest = self.log[access.name].lastVersion
# Perform the write
if latest is None:
version = namespace(access.name)(self)
else:
version = latest.nextv(self)
# Update the access with the latest version
access.update(version)
else:
# If there is no version, raise an exception
if access.version is None:
raise AccessError(
"Attempting a remote write on {} without a version!".format(self)
)
# Save the version variable for use below.
version = access.version
# Log the access at this replica
access.log(self)
# Are we the owner of this tag?
if self.owns(access.name):
# Perform the append entries
self.log[name].append(version, self.epoch)
# Update the version to track visibility latency
version.update(self)
# Complete the access if it was local
if access.is_local_to(self): access.complete()
# Now do AppendEntries
# Also interrupt the heartbeat since we just sent AppendEntries
if not settings.simulation.aggregate_writes:
self.send_append_entries()
if self.heartbeat: self.heartbeat.stop()
return access
# Is there a different owner for the tag?
owner = self.find_owner(name)
if owner is not None:
# Right now just drop the write on its face.
self.sim.logger.info(
"ownership conflict: dropped {} at {}".format(access, self)
)
return access.drop()
# We're going to acquire the tag!
else:
# We're going to have some write latency, retry the write.
retry = Timer(
self.env, self.heartbeat_interval, lambda: self.write(access)
).start()
# Request the ownership of the tag
self.acquire(access.name)
return access
def run(self):
"""
We have to check in at every heartbeat interval. If we own a tag then
send a heartbeat message, otherwise just keep quiescing.
"""
while True:
if self.state == State.OWNER:
self.heartbeat = Timer(
self.env, self.heartbeat_interval, self.on_heartbeat_timeout
)
yield self.heartbeat.start()
else:
yield self.env.timeout(self.heartbeat_interval)
######################################################################
## Helper Methods
######################################################################
def owns(self, name):
"""
Returns True if the name is in the current view for that owner.
"""
return name in self.view[self]
def find_owner(self, name):
"""
Looks up the owner of the name in the current view.
Returns None if there is no owner fo the tag.
"""
for owner, tag in self.view.items():
if name in tag:
return owner
return None
def acquire(self, tag):
"""
Sends out the acquire tag RPC
"""
# Construct the tag to send out
if not isinstance(tag, (set, frozenset)):
tag = frozenset([tag])
# Make sure to request the tag we already have
tag = frozenset(self.view[self] | tag)
# Request tag with all current tags
self.send_tag_request(tag)
# Log the tag acquisition
self.sim.logger.info(
"{} is atempting to acquire tag {}".format(self, self.tag)
)
def release(self, tag=None):
"""
Sends out the release tag RPC
"""
# Release all currently held tags
if tag is None: tag = self.view[self]
# Construct the tag to send out (if specified)
if not isinstance(tag, (set, frozenset)):
tag = frozenset([tag])
# Request the difference of the tags we already have
tag = frozenset(self.view[self] - tag)
# Request tag with all current tags
self.send_tag_request(tag)
# Log the tag release
self.sim.logger.info(
"{} is atempting to release tag {}".format(self, tag)
)
def handle_session(self):
"""
Starts a session timer if one isn't running, otherwise resets the
currently running session timer on an additional access.
"""
if not self.session:
self.session = Timer(
self.env, self.session_timeout,
partial(self.on_session_timeout, self.env.now)
)
else:
self.session = self.session.reset()
def get_log_state(self, tag=None):
"""
Constructs a log state object for append entries responses, either
for the current tag or simply the current view.
"""
if tag is None:
tag = [obj for view in self.view.values() for obj in view]
return {
obj: LogState(
self.log[obj].lastApplied,
self.log[obj].lastTerm,
self.log[obj].commitIndex
) for obj in tag
}
def send_tag_request(self, tag):
"""
Broadcasts a tag request for the passed in tag.
"""
# Change state to tagging and save tag locally
self.state = State.TAGGING
self.tag = tag
# Request the entire tag in your current view.
tagset = {
owner.id: tagset
for owner, tagset in self.view.items()
}
tagset[self.id] = self.tag
# Send the tag request RPC to each neighbor
rpc = RequestTag(self.epoch, tagset, self)
for neighbor in self.neighbors():
self.send(neighbor, rpc)
def send_append_entries(self, target=None):
"""
Helper function to send append entries to quorum or a specific node.
Note: fails silently if target is not in the neighbors list.
"""
# ownership check
if not self.state == State.OWNER:
return
# Go through follower list.
for node, objs in self.nextIndex.iteritems():
# Filter based on the target supplied.
if target is not None and node != target:
continue
# Construct the entries, or empty for heartbeat
# The tag contains the state of each item to be sent
entries = defaultdict(list)
tag = defaultdict(LogState)
for obj, nidx in objs.items():
# A rule directly from the Raft paper
if self.log[obj].lastApplied >= nidx:
entries[obj] = self.log[obj][nidx:]
# Compute the previous log index and term
prevLogIndex = nidx - 1
prevLogTerm = self.log[obj][prevLogIndex].term
commitIndex = self.log[obj].commitIndex
# Create the tag state
tag[obj] = LogState(prevLogIndex, prevLogTerm, commitIndex)
# Send the append entries message
self.send(
node, AppendEntries(
self.epoch, self.id, tag, entries
)
)
######################################################################
## Event Handlers
######################################################################
def on_state_change(self):
"""
Setting the state decides how the Tag node will interact.
"""
# Do state specific tag modifications
if self.state == State.READY:
self.votes = None
self.tag = None
# Remove owner state
self.nextIndex = None
self.matchIndex = None
# Also interrupt the heartbeat
if self.heartbeat: self.heartbeat.stop()
elif self.state == State.TAGGING:
# Convert to tag acquisition/release
self.epoch += 1
# Create election and vote for self
self.votes = Election([node.id for node in self.quorum()])
self.votes.vote(self.id)
# Also interrupt the heartbeat
if self.heartbeat: self.heartbeat.stop()
elif self.state == State.OWNER:
# Create the next index and match index
self.nextIndex = {
node: {
obj: self.log[obj].lastApplied + 1
for obj in self.view[self]
} for node in self.neighbors()
}
self.matchIndex = {
node: {
obj: 0 for obj in self.view[self]
} for node in self.neighbors()
}
else:
raise SimulationException(
"Unknown Tag Replica State: {!r} set on {}".format(state, self)
)
def on_heartbeat_timeout(self):
"""
Time to send a heartbeat message to all tags.
"""
if not self.state == State.OWNER:
return
# Send heartbeat or aggregated writes
self.send_append_entries()
def on_session_timeout(self, started):
"""
If the session times out then go ahead and release the tag.
"""
duration = self.env.now - started
self.sim.logger.info(
"session on {} terminated at {} ({} ms)".format(
self.id, self.env.now, duration
)
)
self.sim.results.update(
'session length', (self.id, duration)
)
self.session = None
self.release()
def on_request_tag_rpc(self, msg):
"""
Respond to a request for a tag acquisition from a server.
"""
rpc = msg.value
accept = True
# The requested epoch must be less than or greater than local.
if rpc.epoch < self.epoch: accept = False
# Ensure that no one else owns the tag in your current view.
for candidate, tagset in rpc.tag.items():
# Short circuit
if not accept: break
for tag in tagset:
owner = self.find_owner(tag)
if owner is not None and owner.id != candidate:
accept = False
break
# Log the vote decision
amsg = "accepted" if accept else "did not accept"
lmsg = "{} {} tag [{}] for {}".format(
self, amsg, ",".join(rpc.tag[rpc.candidate.id]), rpc.candidate.id
)
self.sim.logger.info(lmsg)
# Send the vote response back to the tag requester
return self.send(
msg.source, TagResponse(self.epoch, accept)
)
def on_tag_response_rpc(self, msg):
"""
Handle the votes from tag requests to other nodes.
"""
rpc = msg.value
if self.state == State.TAGGING:
# If the epoch is greater than the current epoch
if rpc.epoch > self.epoch:
# Retry the tag request
self.epoch = rpc.epoch
self.send_tag_request(self.tag)
self.sim.logger.info(
"{} retrying tag request for {}".format(self, self.tag)
)
# Exit: no more work required!
return
# Update the current election
self.votes.vote(msg.source.id, rpc.accept)
if self.votes.has_passed():
# Update our local tag and become owner.
if self.tag:
self.state = State.OWNER
self.view[self] = set(self.tag)
else:
self.state = State.READY
# Send out the ownership change append entries
self.send_append_entries()
# Log the new tag owner
self.sim.logger.info(
"{} tag goes to: {}".format(self, self.view[self])
)
# Record tag length over time
self.sim.results.update(
'tag size', (self.id, self.env.now, len(self.view[self]))
)
elif self.state in (State.READY, State.OWNER):
# Ignore vote responses if we've changed our state
return
else:
raise TagRPCException(
"Tag request response in unknown state: '{}'".format(self.state)
)
def on_append_entries_rpc(self, msg):
rpc = msg.value
# reply false if the epoch < current epoch
if rpc.epoch < self.epoch:
self.sim.logger.info(
"{} doesn't accept append entries in epoch {} for epoch {}".format(
self, self.epoch, rpc.epoch
)
)
# Send back the request that you made originally.
return self.send(
msg.source, AEResponse(
self.epoch,
{obj: False for obj in rpc.tag.keys()},
rpc.tag, Reason.EPOCH
)
)
# Update the view to match the view of the append entries
# Update the epoch to match the rpc of the append entries
self.view[msg.source] = set(rpc.tag.keys())
if self.epoch < rpc.epoch:
self.epoch = rpc.epoch
# Now for each object in the RPC, perform Raft-like append entries.
# The success tracking is a complete tracking for all objects, will
# return false even if we need to update the log for only one thing.
# We will reply back with a state object that has per-object details.
success = defaultdict(bool)
state = defaultdict(LogState)
for obj, prev in rpc.tag.items():
entries = rpc.entries[obj]
objlog = self.log[obj]
# If log doesn't contain an entry at prev index matching epoch.
if objlog.lastApplied < prev.index or objlog[prev.index].term != prev.epoch:
# Perform the logging of this state failure
if objlog.lastApplied < prev.index:
self.sim.logger.info(
"{} doesn't accept append to {} index {} where last applied is {}".format(
self, obj, prev.index, objlog.lastApplied
)
)
else:
self.sim.logger.info(
"{} doesn't accept append to {} due to epoch mismatch: {} vs {}".format(
self, obj, prev.epoch, objlog[prev.index].term
)
)
# Mark that there is a problem and continue
success[obj] = False
state[obj] = LogState(objlog.lastApplied, objlog.lastTerm, objlog.lastCommit)
continue
# At this point the entries are accepted because of continue statements
if entries:
if objlog.lastApplied >= prev.index:
# If existing entry conflicts with a new one (same index, different epochs)
# Delete the existing entry and all that follow it.
if objlog[prev.index].term != prev.epoch:
objlog.truncate(prev.index)
if objlog.lastApplied > prev.index:
# Better look into what's happening here!
raise TagRPCException(
"{} is possibly receiving duplicate append entries".format(self)
)
# Append any new entries not already in the log.
for entry in entries:
# Add the entry/epoch to the log
objlog.append(*entry)
# Update the versions to compute visibilities
entry[0].update(self)
# Log the last write from the append entries
self.sim.logger.debug(
"appending {} entries to {} log on {} (term {}, commit {})".format(
len(entries), obj, self, objlog.lastTerm, objlog.commitIndex
)
)
# Update the commit index and save the state of the object.
if prev.commit > objlog.commitIndex:
objlog.commitIndex = min(prev.commit, objlog.lastApplied)
success[obj] = True
state[obj] = LogState(objlog.lastApplied, objlog.lastTerm, objlog.lastCommit)
# Return the response back to the owner
reason = Reason.OK if all(success.values()) else Reason.LOG
return self.send(
msg.source, AEResponse(self.epoch, success, state, reason)
)
def on_ae_response_rpc(self, msg):
"""
Handles acknowledgment of append entries messages.
"""
rpc = msg.value
retry = False
if self.state == State.OWNER:
# Update state of followers in the tag group
for obj, success in rpc.success.items():
if success:
self.nextIndex[msg.source][obj] = rpc.tag[obj].index + 1
self.matchIndex[msg.source][obj] = rpc.tag[obj].index
else:
# If the epoch is not the same, update accordingly.
if rpc.epoch > self.epoch:
self.epoch = rpc.epoch
# If the failure was because of the epoch, simply retry.
if rpc.reason == Reason.EPOCH:
retry = True
# Otherwise decrement the next index and to retry
elif rpc.reason == Reason.LOG:
self.nextIndex[msg.source][obj] -= 1
retry = True
else:
raise TagRPCException(
"Unknown append entries failure reason: {}".format(rpc.reason)
)
# Determine if we can commit the entry
for obj, state in rpc.tag.items():
log = self.log[obj]
for n in xrange(log.lastApplied, log.commitIndex, -1):
commit = Election(self.matchIndex.keys())
for node, objs in self.matchIndex.items():
match = objs[obj]
commit.vote(node, match >= n)
if commit.has_passed() and log[n].term == self.epoch:
# Commit all versions from the last log to now.
for idx in xrange(log.commitIndex, n+1):
if not log[idx].version: continue
log[idx].version.update(self, commit=True)
# Set the commit index and break
log.commitIndex = n
break
# If retry, send append entries back to the source.
if retry: self.send_append_entries(msg.source)
elif self.state == State.TAGGING:
# Determine if we need to retry the tagging again.
if rpc.epoch > self.epoch:
# Retry the tag request
self.epoch = rpc.epoch
self.send_tag_request(self.tag)
self.sim.logger.info(
"{} retrying tag request for {}".format(self, self.tag)
)
return
elif self.state == State.READY:
# Ignore AE messages if we're not an owner anymore.
return
else:
raise TagRPCException(
"Response in unknown state: '{}'".format(self.state)
)
def on_remote_access(self, msg):
"""
Handles remote writes to and from the replicas.
"""
access = msg.value.access
# Ensure that we own the object
if not self.owns(access.name):
return self.send(
msg.source, AccessResponse(self.epoch, False, access)
)
# If we do own the object, then respond:
method = {
'read': self.read,
'write': self.write,
}[access.type]
# Call the remote method with the access.
method(access)
return self.send(
msg.source, AccessResponse(self.epoch, True, access)
)
def on_access_response_rpc(self, msg):
"""
Handles responses to remote accesses.
"""
rpc = msg.value
if rpc.success:
rpc.access.complete()
|
bbengfort/cloudscope
|
cloudscope/replica/consensus/tag.py
|
Python
|
mit
| 27,667
|
#!/usr/bin/python3
# vi:set fileencoding=utf-8 :
"""
Created on 2014-03-22
@author : Laurent Stacul
"""
|
stac47/osm-garmin
|
tests/__init__.py
|
Python
|
gpl-2.0
| 106
|
import random
l = [0, 1, 2, 3, 4]
print(random.sample(l, 3))
# [2, 4, 0]
print(type(random.sample(l, 3)))
# <class 'list'>
print(random.sample(l, 1))
# [3]
print(random.sample(l, 0))
# []
# print(random.sample(l, 10))
# ValueError: Sample larger than population or is negative
print(random.sample(('xxx', 'yyy', 'zzz'), 2))
# ['xxx', 'yyy']
print(random.sample('abcde', 2))
# ['b', 'e']
print(tuple(random.sample(('xxx', 'yyy', 'zzz'), 2)))
# ('xxx', 'yyy')
print(''.join(random.sample('abcde', 2)))
# dc
l_dup = [0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3]
print(random.sample(l_dup, 3))
# [3, 1, 1]
print(set(l_dup))
# {0, 1, 2, 3}
print(random.sample(set(l_dup), 3))
# [1, 3, 2]
|
nkmk/python-snippets
|
notebook/random_sample.py
|
Python
|
mit
| 688
|
"""
Unit tests over SQLite backend for Crash Database
"""
from apport.report import Report
import os
from unittest import TestCase
from sqlite import CrashDatabase
class CrashDatabaseTestCase(TestCase):
def setUp(self):
self.crash_base = os.path.sep + 'tmp'
self.crash_base_url = 'file://' + self.crash_base + '/'
self.crash_path = os.path.join(self.crash_base, 'test.crash')
self.r = Report()
self.r['ExecutablePath'] = '/usr/bin/napoleon-solod'
self.r['Package'] = 'libnapoleon-solo1 1.2-1'
self.r['Signal'] = '11'
self.r['StacktraceTop'] = """foo_bar (x=2) at crash.c:28
d01 (x=3) at crash.c:29
raise () from /lib/libpthread.so.0
<signal handler called>
__frob (x=4) at crash.c:30"""
def tearDown(self):
if os.path.exists(self.crash_path):
os.unlink(self.crash_path)
exe_crash_base = os.path.join(self.crash_base, '1_usr_bin_napoleon-solod')
if os.path.exists(exe_crash_base):
os.unlink(exe_crash_base)
def test_create_db_default(self):
try:
CrashDatabase(None, {})
self.assertTrue(os.path.isfile(os.path.expanduser('~/crashdb.sqlite')))
finally:
os.unlink(os.path.expanduser('~/crashdb.sqlite'))
def test_crashes_base_url(self):
cb = CrashDatabase(None, {'dbfile': ':memory:', 'crashes_base_url': self.crash_base_url})
self.assertEqual(cb.base_url, self.crash_base_url)
def test_crashes_base_url_is_none(self):
cb = CrashDatabase(None, {'dbfile': ':memory:'})
self.assertIsNone(cb.base_url)
def test_upload_download(self):
cb = CrashDatabase(None, {'dbfile': ':memory:', 'crashes_base_url': self.crash_base_url})
crash_id = cb.upload(self.r)
self.assertEqual(crash_id, 1)
report = cb.download(1)
self.assertIsInstance(report, Report)
self.assertIn('Signal', report)
self.assertEqual(report['Signal'], '11')
def test_failed_upload_no_URL(self):
cb = CrashDatabase(None, {'dbfile': ':memory:'})
self.assertRaises(ValueError, cb.upload, self.r)
def test_failed_upload_invalid_URL_scheme(self):
cb = CrashDatabase(None, {'dbfile': ':memory:'})
self.r['_URL'] = 'invalid://scheme/path'
self.assertRaises(ValueError, cb.upload, self.r)
def test_failed_download(self):
cb = CrashDatabase(None, {'dbfile': ':memory:'})
self.assertRaises(Exception, cb.download, 23232)
def test_get_id_url(self):
cb = CrashDatabase(None, {'dbfile': ':memory:'})
self.assertEqual("#1", cb.get_id_url(None, 1))
self.assertEqual("#1: napoleon-solod crashed with SIGSEGV in foo_bar()",
cb.get_id_url(self.r, 1))
def test_update(self):
"""
Test complete update
"""
cb = CrashDatabase(None, {'dbfile': ':memory:', 'crashes_base_url': self.crash_base_url})
crash_id = cb.upload(self.r)
self.r['SourcePackage'] = 'adios'
self.r['Signal'] = u'9'
cb.update(crash_id, self.r, 'a comment to add')
report = cb.download(crash_id)
self.assertIn('SourcePackage', report)
self.assertEqual(report['Signal'], u'9')
def test_update_with_key_filter(self):
"""
Test a partial update
"""
cb = CrashDatabase(None, {'dbfile': ':memory:', 'crashes_base_url': self.crash_base_url})
crash_id = cb.upload(self.r)
self.r['SourcePackage'] = 'adios'
self.r['Signal'] = u'9'
cb.update(crash_id, self.r, 'a comment to add', key_filter=('Package', 'SourcePackage'))
report = cb.download(crash_id)
self.assertIn('SourcePackage', report)
self.assertNotEqual(report['Signal'], u'9')
def test_failed_update_no_URL(self):
cb = CrashDatabase(None, {'dbfile': ':memory:'})
self.r['_URL'] = self.crash_base_url + 'test.crash'
crash_id = cb.upload(self.r)
del self.r['_URL']
self.assertRaises(ValueError, cb.update, *(crash_id, self.r, 'comment'))
def test_get_distro_release(self):
cb = CrashDatabase(None, {'dbfile': ':memory:', 'crashes_base_url': self.crash_base_url})
crash_id = cb.upload(self.r)
self.assertIsNone(cb.get_distro_release(crash_id))
self.r['DistroRelease'] = 'Ubuntu 14.04'
crash_id = cb.upload(self.r)
self.assertEqual(cb.get_distro_release(crash_id), 'Ubuntu 14.04')
def test_get_unretraced(self):
cb = CrashDatabase(None, {'dbfile': ':memory:', 'crashes_base_url': self.crash_base_url})
self.assertEqual(cb.get_unretraced(), [])
crash_id = cb.upload(self.r)
self.assertEqual(cb.get_unretraced(), [crash_id])
self.r['Stacktrace'] = """
#0 0x00007f96dcfb9f77 in __GI_raise (sig=sig@entry=6) at ../nptl/sysdeps/unix/sysv/linux/raise.c:56
resultvar = 0
pid = 1427
selftid = 1427
#1 0x00007f96dcfbd5e8 in __GI_abort () at abort.c:90
save_stage = 2
act = {__sigaction_handler = {sa_handler = 0x0, sa_sigaction = 0x0}, sa_mask = {__val = {140286034336064, 140285996709792, 140285998988405, 5, 0, 752786625060479084, 140285929102568, 140285994568476, 140285996709792, 140285459489344, 140285999015717, 140285994520128, 140285996776629, 140285996776368, 140733249635424, 6}}, sa_flags = 56247888, sa_restorer = 0x18}
sigs = {__val = {32, 0 <repeats 15 times>}}
#2 0x00007f96e0deccbc in smb_panic_default (why=0x7f96e0df8b1c "internal error") at ../lib/util/fault.c:149
No locals.
#3 smb_panic (why=why@entry=0x7f96e0df8b1c "internal error") at ../lib/util/fault.c:162
No locals.
#4 0x00007f96e0dece76 in fault_report (sig=<optimized out>) at ../lib/util/fault.c:77
counter = 1
#5 sig_fault (sig=<optimized out>) at ../lib/util/fault.c:88
No locals.
#6 <signal handler called>
No locals.
#7 0x00007f96b9bae711 in sarray_get_safe (indx=<optimized out>, array=<optimized out>) at /build/buildd/gcc-4.8-4.8.1/src/libobjc/objc-private/sarray.h:237
No locals.
#8 objc_msg_lookup (receiver=0x7f96e3485278, op=0x7f96c0fae240 <_OBJC_SELECTOR_TABLE+128>) at /build/buildd/gcc-4.8-4.8.1/src/libobjc/sendmsg.c:448
No locals.
#9 0x00007f96c0da737a in sogo_table_get_row (table_object=<optimized out>, mem_ctx=0x7f96e33e5940, query_type=MAPISTORE_PREFILTERED_QUERY, row_id=1, data=0x7fff035a4e00) at MAPIStoreSOGo.m:1464
e = <optimized out>
ret = MAPISTORE_SUCCESS
wrapper = <optimized out>
pool = 0x7f96e3485278
table = <optimized out>
rc = 0
__FUNCTION__ = "sogo_table_get_row"
__PRETTY_FUNCTION__ = "sogo_table_get_row"
"""
cb.update(crash_id, self.r, "")
self.assertEqual(cb.get_unretraced(), [])
self.r['Stacktrace'] = """#8 0x00007ff5aae8e159 in ldb_msg_find_ldb_val (msg=<optimised out>, attr_name=<optimised out>) at ../common/ldb_msg.c:399
el = <optimised out>
#9 0x00007ff5aae8e669 in ldb_msg_find_attr_as_string (msg=<optimised out>, attr_name=<optimised out>, default_value=0x0) at ../common/ldb_msg.c:584
v = <optimised out>
#10 0x00007ff5905d0e5f in ?? ()
No symbol table info available.
#11 0x0000000000000081 in ?? ()
No symbol table info available.
#12 0x0000000000000000 in ?? ()
No symbol table info available."""
cb.update(crash_id, self.r, "")
self.assertEqual(cb.get_unretraced(), [crash_id])
def test_get_unfixed(self):
cb = CrashDatabase(None, {'dbfile': ':memory:', 'crashes_base_url': self.crash_base_url})
self.assertEqual(cb.get_unfixed(), set())
crash_id = cb.upload(self.r)
self.assertEqual(cb.get_unfixed(), set([crash_id]))
cb.close_duplicate(self.r, crash_id, crash_id)
self.assertEqual(cb.get_unfixed(), set())
def test_close_duplicate(self):
cb = CrashDatabase(None, {'dbfile': ':memory:', 'crashes_base_url': self.crash_base_url})
crash_id = cb.upload(self.r)
self.assertIsNone(cb.duplicate_of(crash_id))
crash_id2 = cb.upload(self.r)
self.assertIsNone(cb.duplicate_of(crash_id2))
cb.close_duplicate(self.r, crash_id2, crash_id)
self.assertEqual(cb.duplicate_of(crash_id2), crash_id)
# Remove current duplicate thing
cb.close_duplicate(self.r, crash_id2, None)
self.assertIsNone(cb.duplicate_of(crash_id2))
# Tests related with components
def test_app_components_get_set(self):
cb = CrashDatabase(None, {'dbfile': ':memory:', 'crashes_base_url': self.crash_base_url})
crash_id = cb.upload(self.r)
self.assertEqual(cb.get_app_components(crash_id), [])
cb.set_app_components(crash_id, ['sand'])
self.assertEqual(cb.get_app_components(crash_id), ['sand'])
cb.set_app_components(crash_id, ['sand'])
self.assertEqual(cb.get_app_components(crash_id), ['sand'])
def test_app_components_remove(self):
cb = CrashDatabase(None, {'dbfile': ':memory:', 'crashes_base_url': self.crash_base_url})
crash_id = cb.upload(self.r)
self.assertRaises(ValueError, cb.remove_app_component, *(crash_id, 'sand'))
self.assertIsNone(cb.remove_app_component(crash_id))
cb.set_app_components(crash_id, ['sand'])
self.assertIsNone(cb.remove_app_component(crash_id, 'sand'))
self.assertEqual(cb.get_app_components(crash_id), [])
cb.set_app_components(crash_id, ['sand'])
self.assertIsNone(cb.remove_app_component(crash_id))
self.assertEqual(cb.get_app_components(crash_id), [])
|
icandigitbaby/openchange
|
script/bug-analysis/test_sqlite.py
|
Python
|
gpl-3.0
| 9,687
|
#
# File: courseware/capa/inputtypes.py
#
"""
Module containing the problem elements which render into input objects
- textline
- textbox (aka codeinput)
- schematic
- choicegroup (aka radiogroup, checkboxgroup)
- javascriptinput
- imageinput (for clickable image)
- optioninput (for option list)
- filesubmission (upload a file)
- crystallography
- vsepr_input
- drag_and_drop
- formulaequationinput
- chemicalequationinput
These are matched by *.html files templates/*.html which are mako templates with the
actual html.
Each input type takes the xml tree as 'element', the previous answer as 'value', and the
graded status as'status'
"""
# TODO: make hints do something
# TODO: make all inputtypes actually render msg
# TODO: remove unused fields (e.g. 'hidden' in a few places)
# TODO: add validators so that content folks get better error messages.
# Possible todo: make inline the default for textlines and other "one-line" inputs. It probably
# makes sense, but a bunch of problems have markup that assumes block. Bigger TODO: figure out a
# general css and layout strategy for capa, document it, then implement it.
import time
import json
import logging
from lxml import etree
import re
import shlex # for splitting quoted strings
import sys
import pyparsing
import html5lib
import bleach
from .util import sanitize_html
from .registry import TagRegistry
from chem import chemcalc
from calc.preview import latex_preview
import xqueue_interface
from xqueue_interface import XQUEUE_TIMEOUT
from datetime import datetime
from xmodule.stringify import stringify_children
log = logging.getLogger(__name__)
#########################################################################
registry = TagRegistry() # pylint: disable=C0103
class Status(object):
"""
Problem status
attributes: classname, display_name
"""
css_classes = {
# status: css class
'unsubmitted': 'unanswered',
'incomplete': 'incorrect',
'queued': 'processing',
}
__slots__ = ('classname', '_status', 'display_name')
def __init__(self, status, gettext_func=unicode):
self.classname = self.css_classes.get(status, status)
_ = gettext_func
names = {
'correct': _('correct'),
'incorrect': _('incorrect'),
'incomplete': _('incomplete'),
'unanswered': _('unanswered'),
'unsubmitted': _('unanswered'),
'queued': _('processing'),
}
self.display_name = names.get(status, unicode(status))
self._status = status or ''
def __str__(self):
return self._status
def __unicode__(self):
return self._status.decode('utf8')
def __repr__(self):
return 'Status(%r)' % self._status
def __eq__(self, other):
return self._status == str(other)
class Attribute(object):
"""
Allows specifying required and optional attributes for input types.
"""
# want to allow default to be None, but also allow required objects
_sentinel = object()
def __init__(self, name, default=_sentinel, transform=None, validate=None, render=True):
"""
Define an attribute
name (str): then name of the attribute--should be alphanumeric (valid for an XML attribute)
default (any type): If not specified, this attribute is required. If specified, use this as the default value
if the attribute is not specified. Note that this value will not be transformed or validated.
transform (function str -> any type): If not None, will be called to transform the parsed value into an internal
representation.
validate (function str-or-return-type-of-tranform -> unit or exception): If not None, called to validate the
(possibly transformed) value of the attribute. Should raise ValueError with a helpful message if
the value is invalid.
render (bool): if False, don't include this attribute in the template context.
"""
self.name = name
self.default = default
self.validate = validate
self.transform = transform
self.render = render
def parse_from_xml(self, element):
"""
Given an etree xml element that should have this attribute, do the obvious thing:
- look for it. raise ValueError if not found and required.
- transform and validate. pass through any exceptions from transform or validate.
"""
val = element.get(self.name)
if self.default == self._sentinel and val is None:
raise ValueError(
'Missing required attribute {0}.'.format(self.name)
)
if val is None:
# not required, so return default
return self.default
if self.transform is not None:
val = self.transform(val)
if self.validate is not None:
self.validate(val)
return val
class InputTypeBase(object):
"""
Abstract base class for input types.
"""
template = None
def __init__(self, system, xml, state):
"""
Instantiate an InputType class. Arguments:
- system : LoncapaModule instance which provides OS, rendering, and user context.
Specifically, must have a render_template function.
- xml : Element tree of this Input element
- state : a dictionary with optional keys:
* 'value' -- the current value of this input
(what the student entered last time)
* 'id' -- the id of this input, typically
"{problem-location}_{response-num}_{input-num}"
* 'status' (answered, unanswered, unsubmitted)
* 'input_state' -- dictionary containing any inputtype-specific state
that has been preserved
* 'feedback' (dictionary containing keys for hints, errors, or other
feedback from previous attempt. Specifically 'message', 'hint',
'hintmode'. If 'hintmode' is 'always', the hint is always displayed.)
"""
self.xml = xml
self.tag = xml.tag
self.capa_system = system
# NOTE: ID should only come from one place. If it comes from multiple,
# we use state first, XML second (in case the xml changed, but we have
# existing state with an old id). Since we don't make this guarantee,
# we can swap this around in the future if there's a more logical
# order.
self.input_id = state.get('id', xml.get('id'))
if self.input_id is None:
raise ValueError(
"input id state is None. xml is {0}".format(etree.tostring(xml))
)
self.value = state.get('value', '')
feedback = state.get('feedback', {})
self.msg = feedback.get('message', '')
self.hint = feedback.get('hint', '')
self.hintmode = feedback.get('hintmode', None)
self.input_state = state.get('input_state', {})
# put hint above msg if it should be displayed
if self.hintmode == 'always':
self.msg = self.hint + ('<br/>' if self.msg else '') + self.msg
self.status = state.get('status', 'unanswered')
try:
# Pre-parse and process all the declared requirements.
self.process_requirements()
# Call subclass "constructor" -- means they don't have to worry about calling
# super().__init__, and are isolated from changes to the input
# constructor interface.
self.setup()
except Exception as err:
# Something went wrong: add xml to message, but keep the traceback
msg = u"Error in xml '{x}': {err} ".format(
x=etree.tostring(xml), err=err.message)
raise Exception, msg, sys.exc_info()[2]
@classmethod
def get_attributes(cls):
"""
Should return a list of Attribute objects (see docstring there for details). Subclasses should override. e.g.
return [Attribute('unicorn', True), Attribute('num_dragons', 12, transform=int), ...]
"""
return []
def process_requirements(self):
"""
Subclasses can declare lists of required and optional attributes. This
function parses the input xml and pulls out those attributes. This
isolates most simple input types from needing to deal with xml parsing at all.
Processes attributes, putting the results in the self.loaded_attributes dictionary. Also creates a set
self.to_render, containing the names of attributes that should be included in the context by default.
"""
# Use local dicts and sets so that if there are exceptions, we don't
# end up in a partially-initialized state.
loaded = {}
to_render = set()
for attribute in self.get_attributes():
loaded[attribute.name] = attribute.parse_from_xml(self.xml)
if attribute.render:
to_render.add(attribute.name)
self.loaded_attributes = loaded
self.to_render = to_render
def setup(self):
"""
InputTypes should override this to do any needed initialization. It is called after the
constructor, so all base attributes will be set.
If this method raises an exception, it will be wrapped with a message that includes the
problem xml.
"""
pass
def handle_ajax(self, dispatch, data):
"""
InputTypes that need to handle specialized AJAX should override this.
Input:
dispatch: a string that can be used to determine how to handle the data passed in
data: a dictionary containing the data that was sent with the ajax call
Output:
a dictionary object that can be serialized into JSON. This will be sent back to the Javascript.
"""
pass
def _get_render_context(self):
"""
Should return a dictionary of keys needed to render the template for the input type.
(Separate from get_html to faciliate testing of logic separately from the rendering)
The default implementation gets the following rendering context: basic things like value, id, status, and msg,
as well as everything in self.loaded_attributes, and everything returned by self._extra_context().
This means that input types that only parse attributes and pass them to the template get everything they need,
and don't need to override this method.
"""
context = {
'id': self.input_id,
'value': self.value,
'status': Status(self.status, self.capa_system.i18n.ugettext),
'msg': self.msg,
'STATIC_URL': self.capa_system.STATIC_URL,
}
context.update(
(a, v) for (a, v) in self.loaded_attributes.iteritems() if a in self.to_render
)
context.update(self._extra_context())
return context
def _extra_context(self):
"""
Subclasses can override this to return extra context that should be passed to their templates for rendering.
This is useful when the input type requires computing new template variables from the parsed attributes.
"""
return {}
def get_html(self):
"""
Return the html for this input, as an etree element.
"""
if self.template is None:
raise NotImplementedError("no rendering template specified for class {0}"
.format(self.__class__))
context = self._get_render_context()
html = self.capa_system.render_template(self.template, context)
try:
output = etree.XML(html)
except etree.XMLSyntaxError as ex:
# If `html` contains attrs with no values, like `controls` in <audio controls src='smth'/>,
# XML parser will raise exception, so wee fallback to html5parser, which will set empty "" values for such attrs.
try:
output = html5lib.parseFragment(html, treebuilder='lxml', namespaceHTMLElements=False)[0]
except IndexError:
raise ex
return output
def get_user_visible_answer(self, internal_answer):
"""
Given the internal representation of the answer provided by the user, return the representation of the answer
as the user saw it. Subclasses should override this method if and only if the internal represenation of the
answer is different from the answer that is displayed to the user.
"""
return internal_answer
#-----------------------------------------------------------------------------
@registry.register
class OptionInput(InputTypeBase):
"""
Input type for selecting and Select option input type.
Example:
<optioninput options="('Up','Down')" label="Where is the sky?" correct="Up"/><text>The location of the sky</text>
# TODO: allow ordering to be randomized
"""
template = "optioninput.html"
tags = ['optioninput']
@staticmethod
def parse_options(options):
"""
Given options string, convert it into an ordered list of (option_id, option_description) tuples, where
id==description for now. TODO: make it possible to specify different id and descriptions.
"""
# convert single quotes inside option values to html encoded string
options = re.sub(r"([a-zA-Z])('|\\')([a-zA-Z])", r"\1'\3", options)
options = re.sub(r"\\'", r"'", options) # replace already escaped single quotes
# parse the set of possible options
lexer = shlex.shlex(options[1:-1].encode('utf8'))
lexer.quotes = "'"
# Allow options to be separated by whitespace as well as commas
lexer.whitespace = ", "
# remove quotes
# convert escaped single quotes (html encoded string) back to single quotes
tokens = [x[1:-1].decode('utf8').replace("'", "'") for x in lexer]
# make list of (option_id, option_description), with description=id
return [(t, t) for t in tokens]
@classmethod
def get_attributes(cls):
"""
Convert options to a convenient format.
"""
return [Attribute('options', transform=cls.parse_options),
Attribute('label', ''),
Attribute('inline', False)]
#-----------------------------------------------------------------------------
# TODO: consolidate choicegroup, radiogroup, checkboxgroup after discussion of
# desired semantics.
@registry.register
class ChoiceGroup(InputTypeBase):
"""
Radio button or checkbox inputs: multiple choice or true/false
TODO: allow order of choices to be randomized, following lon-capa spec. Use
"location" attribute, ie random, top, bottom.
Example:
<choicegroup label="Which foil?">
<choice correct="false" name="foil1">
<text>This is foil One.</text>
</choice>
<choice correct="false" name="foil2">
<text>This is foil Two.</text>
</choice>
<choice correct="true" name="foil3">
<text>This is foil Three.</text>
</choice>
</choicegroup>
"""
template = "choicegroup.html"
tags = ['choicegroup', 'radiogroup', 'checkboxgroup']
def setup(self):
i18n = self.capa_system.i18n
# suffix is '' or [] to change the way the input is handled in --as a scalar or vector
# value. (VS: would be nice to make this less hackish).
if self.tag == 'choicegroup':
self.suffix = ''
self.html_input_type = "radio"
elif self.tag == 'radiogroup':
self.html_input_type = "radio"
self.suffix = '[]'
elif self.tag == 'checkboxgroup':
self.html_input_type = "checkbox"
self.suffix = '[]'
else:
_ = i18n.ugettext
# Translators: 'ChoiceGroup' is an input type and should not be translated.
msg = _("ChoiceGroup: unexpected tag {tag_name}").format(tag_name=self.tag)
raise Exception(msg)
self.choices = self.extract_choices(self.xml, i18n)
self._choices_map = dict(self.choices,) # pylint: disable=attribute-defined-outside-init
@classmethod
def get_attributes(cls):
_ = lambda text: text
return [Attribute("show_correctness", "always"),
Attribute('label', ''),
Attribute("submitted_message", _("Answer received."))]
def _extra_context(self):
return {'input_type': self.html_input_type,
'choices': self.choices,
'name_array_suffix': self.suffix}
@staticmethod
def extract_choices(element, i18n):
"""
Extracts choices for a few input types, such as ChoiceGroup, RadioGroup and
CheckboxGroup.
returns list of (choice_name, choice_text) tuples
TODO: allow order of choices to be randomized, following lon-capa spec. Use
"location" attribute, ie random, top, bottom.
"""
choices = []
_ = i18n.ugettext
for choice in element:
if choice.tag != 'choice':
msg = u"[capa.inputtypes.extract_choices] {error_message}".format(
# Translators: '<choice>' is a tag name and should not be translated.
error_message=_("Expected a <choice> tag; got {given_tag} instead").format(
given_tag=choice.tag
)
)
raise Exception(msg)
choices.append((choice.get("name"), stringify_children(choice)))
return choices
def get_user_visible_answer(self, internal_answer):
if isinstance(internal_answer, basestring):
return self._choices_map[internal_answer]
return [self._choices_map[i] for i in internal_answer]
#-----------------------------------------------------------------------------
@registry.register
class JavascriptInput(InputTypeBase):
"""
Hidden field for javascript to communicate via; also loads the required
scripts for rendering the problem and passes data to the problem.
TODO (arjun?): document this in detail. Initial notes:
- display_class is a subclass of XProblemClassDisplay (see
xmodule/xmodule/js/src/capa/display.coffee),
- display_file is the js script to be in /static/js/ where display_class is defined.
"""
template = "javascriptinput.html"
tags = ['javascriptinput']
@classmethod
def get_attributes(cls):
"""
Register the attributes.
"""
return [Attribute('params', None),
Attribute('problem_state', None),
Attribute('display_class', None),
Attribute('display_file', None), ]
def setup(self):
# Need to provide a value that JSON can parse if there is no
# student-supplied value yet.
if self.value == "":
self.value = 'null'
#-----------------------------------------------------------------------------
@registry.register
class JSInput(InputTypeBase):
"""
Inputtype for general javascript inputs. Intended to be used with
customresponse.
Loads in a sandboxed iframe to help prevent css and js conflicts between
frame and top-level window.
iframe sandbox whitelist:
- allow-scripts
- allow-popups
- allow-forms
- allow-pointer-lock
This in turn means that the iframe cannot directly access the top-level
window elements.
Example:
<jsinput html_file="/static/test.html"
gradefn="grade"
height="500"
width="400"/>
See the documentation in docs/data/source/course_data_formats/jsinput.rst
for more information.
"""
template = "jsinput.html"
tags = ['jsinput']
@classmethod
def get_attributes(cls):
"""
Register the attributes.
"""
return [
Attribute('params', None), # extra iframe params
Attribute('html_file', None),
Attribute('gradefn', "gradefn"),
Attribute('get_statefn', None), # Function to call in iframe
# to get current state.
Attribute('initial_state', None), # JSON string to be used as initial state
Attribute('set_statefn', None), # Function to call iframe to
# set state
Attribute('width', "400"), # iframe width
Attribute('height', "300"), # iframe height
Attribute('sop', None) # SOP will be relaxed only if this
# attribute is set to false.
]
def _extra_context(self):
context = {
'jschannel_loader': '{static_url}js/capa/src/jschannel.js'.format(
static_url=self.capa_system.STATIC_URL),
'jsinput_loader': '{static_url}js/capa/src/jsinput.js'.format(
static_url=self.capa_system.STATIC_URL),
'saved_state': self.value
}
return context
#-----------------------------------------------------------------------------
@registry.register
class TextLine(InputTypeBase):
"""
A text line input. Can do math preview if "math"="1" is specified.
If "trailing_text" is set to a value, then the textline will be shown with
the value after the text input, and before the checkmark or any input-specific
feedback. HTML will not work, but properly escaped HTML characters will. This
feature is useful if you would like to specify a specific type of units for the
text input.
If the hidden attribute is specified, the textline is hidden and the input id
is stored in a div with name equal to the value of the hidden attribute. This
is used e.g. for embedding simulations turned into questions.
Example:
<textline math="1" trailing_text="m/s" label="How fast is a cheetah?" />
This example will render out a text line with a math preview and the text 'm/s'
after the end of the text line.
"""
template = "textline.html"
tags = ['textline']
@classmethod
def get_attributes(cls):
"""
Register the attributes.
"""
return [
Attribute('size', None),
Attribute('label', ''),
Attribute('hidden', False),
Attribute('inline', False),
# Attributes below used in setup(), not rendered directly.
Attribute('math', None, render=False),
# TODO: 'dojs' flag is temporary, for backwards compatibility with
# 8.02x
Attribute('dojs', None, render=False),
Attribute('preprocessorClassName', None, render=False),
Attribute('preprocessorSrc', None, render=False),
Attribute('trailing_text', ''),
]
def setup(self):
self.do_math = bool(self.loaded_attributes['math'] or
self.loaded_attributes['dojs'])
# TODO: do math checking using ajax instead of using js, so
# that we only have one math parser.
self.preprocessor = None
if self.do_math:
# Preprocessor to insert between raw input and Mathjax
self.preprocessor = {
'class_name': self.loaded_attributes['preprocessorClassName'],
'script_src': self.loaded_attributes['preprocessorSrc'],
}
if None in self.preprocessor.values():
self.preprocessor = None
def _extra_context(self):
return {'do_math': self.do_math,
'preprocessor': self.preprocessor, }
#-----------------------------------------------------------------------------
@registry.register
class FileSubmission(InputTypeBase):
"""
Upload some files (e.g. for programming assignments)
"""
template = "filesubmission.html"
tags = ['filesubmission']
@staticmethod
def parse_files(files):
"""
Given a string like 'a.py b.py c.out', split on whitespace and return as a json list.
"""
return json.dumps(files.split())
@classmethod
def get_attributes(cls):
"""
Convert the list of allowed files to a convenient format.
"""
return [Attribute('allowed_files', '[]', transform=cls.parse_files),
Attribute('label', ''),
Attribute('required_files', '[]', transform=cls.parse_files), ]
def setup(self):
"""
Do some magic to handle queueing status (render as "queued" instead of "incomplete"),
pull queue_len from the msg field. (TODO: get rid of the queue_len hack).
"""
_ = self.capa_system.i18n.ugettext
submitted_msg = _("Your files have been submitted. As soon as your submission is"
" graded, this message will be replaced with the grader's feedback.")
self.submitted_msg = submitted_msg
# Check if problem has been queued
self.queue_len = 0
# Flag indicating that the problem has been queued, 'msg' is length of
# queue
if self.status == 'incomplete':
self.status = 'queued'
self.queue_len = self.msg
self.msg = self.submitted_msg
def _extra_context(self):
return {'queue_len': self.queue_len, }
#-----------------------------------------------------------------------------
@registry.register
class CodeInput(InputTypeBase):
"""
A text area input for code--uses codemirror, does syntax highlighting, special tab handling,
etc.
"""
template = "codeinput.html"
tags = [
'codeinput',
'textbox',
# Another (older) name--at some point we may want to make it use a
# non-codemirror editor.
]
@classmethod
def get_attributes(cls):
"""
Convert options to a convenient format.
"""
return [
Attribute('rows', '30'),
Attribute('cols', '80'),
Attribute('hidden', ''),
# For CodeMirror
Attribute('mode', 'python'),
Attribute('linenumbers', 'true'),
# Template expects tabsize to be an int it can do math with
Attribute('tabsize', 4, transform=int),
]
def setup_code_response_rendering(self):
"""
Implement special logic: handle queueing state, and default input.
"""
# if no student input yet, then use the default input given by the
# problem
if not self.value and self.xml.text:
self.value = self.xml.text.strip()
# Check if problem has been queued
self.queue_len = 0
# Flag indicating that the problem has been queued, 'msg' is length of
# queue
if self.status == 'incomplete':
self.status = 'queued'
self.queue_len = self.msg
self.msg = bleach.clean(self.submitted_msg)
def setup(self):
""" setup this input type """
_ = self.capa_system.i18n.ugettext
submitted_msg = _("Your answer has been submitted. As soon as your submission is"
" graded, this message will be replaced with the grader's feedback.")
self.submitted_msg = submitted_msg
self.setup_code_response_rendering()
def _extra_context(self):
"""Defined queue_len, add it """
return {'queue_len': self.queue_len, }
#-----------------------------------------------------------------------------
@registry.register
class MatlabInput(CodeInput):
"""
InputType for handling Matlab code input
Example:
<matlabinput rows="10" cols="80" tabsize="4">
Initial Text
</matlabinput>
"""
template = "matlabinput.html"
tags = ['matlabinput']
def setup(self):
"""
Handle matlab-specific parsing
"""
_ = self.capa_system.i18n.ugettext
submitted_msg = _("Submitted. As soon as a response is returned, "
"this message will be replaced by that feedback.")
self.submitted_msg = submitted_msg
self.setup_code_response_rendering()
xml = self.xml
self.plot_payload = xml.findtext('./plot_payload')
# Check if problem has been queued
self.queuename = 'matlab'
self.queue_msg = ''
# this is only set if we don't have a graded response
# the graded response takes precedence
if 'queue_msg' in self.input_state and self.status in ['queued', 'incomplete', 'unsubmitted']:
self.queue_msg = sanitize_html(self.input_state['queue_msg'])
if 'queuestate' in self.input_state and self.input_state['queuestate'] == 'queued':
self.status = 'queued'
self.queue_len = 1
self.msg = self.submitted_msg
# Handle situation if no response from xqueue arrived during specified time.
if ('queuetime' not in self.input_state or
time.time() - self.input_state['queuetime'] > XQUEUE_TIMEOUT):
self.queue_len = 0
self.status = 'unsubmitted'
self.msg = _(
'No response from Xqueue within {xqueue_timeout} seconds. Aborted.'
).format(xqueue_timeout=XQUEUE_TIMEOUT)
def handle_ajax(self, dispatch, data):
"""
Handle AJAX calls directed to this input
Args:
- dispatch (str) - indicates how we want this ajax call to be handled
- data (dict) - dictionary of key-value pairs that contain useful data
Returns:
dict - 'success' - whether or not we successfully queued this submission
- 'message' - message to be rendered in case of error
"""
if dispatch == 'plot':
return self._plot_data(data)
return {}
def ungraded_response(self, queue_msg, queuekey):
"""
Handle the response from the XQueue
Stores the response in the input_state so it can be rendered later
Args:
- queue_msg (str) - message returned from the queue. The message to be rendered
- queuekey (str) - a key passed to the queue. Will be matched up to verify that this is the response we're waiting for
Returns:
nothing
"""
# check the queuekey against the saved queuekey
if('queuestate' in self.input_state and self.input_state['queuestate'] == 'queued'
and self.input_state['queuekey'] == queuekey):
msg = self._parse_data(queue_msg)
# save the queue message so that it can be rendered later
self.input_state['queue_msg'] = msg
self.input_state['queuestate'] = None
self.input_state['queuekey'] = None
def button_enabled(self):
""" Return whether or not we want the 'Test Code' button visible
Right now, we only want this button to show up when a problem has not been
checked.
"""
if self.status in ['correct', 'incorrect']:
return False
else:
return True
def _extra_context(self):
""" Set up additional context variables"""
_ = self.capa_system.i18n.ugettext
queue_msg = self.queue_msg
if len(self.queue_msg) > 0: # An empty string cannot be parsed as XML but is okay to include in the template.
try:
etree.XML(u'<div>{0}</div>'.format(self.queue_msg))
except etree.XMLSyntaxError:
try:
html5lib.parseFragment(self.queue_msg, treebuilder='lxml', namespaceHTMLElements=False)[0]
except (IndexError, ValueError):
# If neither can parse queue_msg, it contains invalid xml.
queue_msg = u"<span>{0}</span>".format(_("Error running code."))
extra_context = {
'queue_len': str(self.queue_len),
'queue_msg': queue_msg,
'button_enabled': self.button_enabled(),
'matlab_editor_js': '{static_url}js/vendor/CodeMirror/octave.js'.format(
static_url=self.capa_system.STATIC_URL),
'msg': sanitize_html(self.msg) # sanitize msg before rendering into template
}
return extra_context
def _parse_data(self, queue_msg):
"""
Parses the message out of the queue message
Args:
queue_msg (str) - a JSON encoded string
Returns:
returns the value for the the key 'msg' in queue_msg
"""
try:
result = json.loads(queue_msg)
except (TypeError, ValueError):
log.error("External message should be a JSON serialized dict."
" Received queue_msg = %s", queue_msg)
raise
msg = result['msg']
return msg
def _plot_data(self, data):
"""
AJAX handler for the plot button
Args:
get (dict) - should have key 'submission' which contains the student submission
Returns:
dict - 'success' - whether or not we successfully queued this submission
- 'message' - message to be rendered in case of error
"""
_ = self.capa_system.i18n.ugettext
# only send data if xqueue exists
if self.capa_system.xqueue is None:
return {'success': False, 'message': _('Cannot connect to the queue')}
# pull relevant info out of get
response = data['submission']
# construct xqueue headers
qinterface = self.capa_system.xqueue['interface']
qtime = datetime.utcnow().strftime(xqueue_interface.dateformat)
callback_url = self.capa_system.xqueue['construct_callback']('ungraded_response')
anonymous_student_id = self.capa_system.anonymous_student_id
# TODO: Why is this using self.capa_system.seed when we have self.seed???
queuekey = xqueue_interface.make_hashkey(str(self.capa_system.seed) + qtime +
anonymous_student_id +
self.input_id)
xheader = xqueue_interface.make_xheader(
lms_callback_url=callback_url,
lms_key=queuekey,
queue_name=self.queuename)
# construct xqueue body
student_info = {
'anonymous_student_id': anonymous_student_id,
'submission_time': qtime
}
contents = {
'grader_payload': self.plot_payload,
'student_info': json.dumps(student_info),
'student_response': response,
'token': getattr(self.capa_system, 'matlab_api_key', None),
'endpoint_version': "2",
'requestor_id': anonymous_student_id,
}
(error, msg) = qinterface.send_to_queue(header=xheader,
body=json.dumps(contents))
# save the input state if successful
if error == 0:
self.input_state['queuekey'] = queuekey
self.input_state['queuestate'] = 'queued'
self.input_state['queuetime'] = time.time()
return {'success': error == 0, 'message': msg}
#-----------------------------------------------------------------------------
@registry.register
class Schematic(InputTypeBase):
"""
InputType for the schematic editor
"""
template = "schematicinput.html"
tags = ['schematic']
@classmethod
def get_attributes(cls):
"""
Convert options to a convenient format.
"""
return [
Attribute('height', None),
Attribute('width', None),
Attribute('parts', None),
Attribute('analyses', None),
Attribute('initial_value', None),
Attribute('submit_analyses', None),
Attribute('label', ''),
]
def _extra_context(self):
"""
"""
context = {
'setup_script': '{static_url}js/capa/schematicinput.js'.format(
static_url=self.capa_system.STATIC_URL),
}
return context
#-----------------------------------------------------------------------------
@registry.register
class ImageInput(InputTypeBase):
"""
Clickable image as an input field. Element should specify the image source, height,
and width, e.g.
<imageinput src="/static/Figures/Skier-conservation-of-energy.jpg" width="388" height="560" />
TODO: showanswer for imageimput does not work yet - need javascript to put rectangle
over acceptable area of image.
"""
template = "imageinput.html"
tags = ['imageinput']
@classmethod
def get_attributes(cls):
"""
Note: src, height, and width are all required.
"""
return [Attribute('src'),
Attribute('height'),
Attribute('label', ''),
Attribute('width'), ]
def setup(self):
"""
if value is of the form [x,y] then parse it and send along coordinates of previous answer
"""
m = re.match(r'\[([0-9]+),([0-9]+)]',
self.value.strip().replace(' ', ''))
if m:
# Note: we subtract 15 to compensate for the size of the dot on the screen.
# (is a 30x30 image--lms/static/images/green-pointer.png).
(self.gx, self.gy) = [int(x) - 15 for x in m.groups()]
else:
(self.gx, self.gy) = (0, 0)
def _extra_context(self):
return {'gx': self.gx,
'gy': self.gy}
#-----------------------------------------------------------------------------
@registry.register
class Crystallography(InputTypeBase):
"""
An input for crystallography -- user selects 3 points on the axes, and we get a plane.
TODO: what's the actual value format?
"""
template = "crystallography.html"
tags = ['crystallography']
@classmethod
def get_attributes(cls):
"""
Note: height, width are required.
"""
return [Attribute('height'),
Attribute('width'),
]
# -------------------------------------------------------------------------
@registry.register
class VseprInput(InputTypeBase):
"""
Input for molecular geometry--show possible structures, let student
pick structure and label positions with atoms or electron pairs.
"""
template = 'vsepr_input.html'
tags = ['vsepr_input']
@classmethod
def get_attributes(cls):
"""
Note: height, width, molecules and geometries are required.
"""
return [Attribute('height'),
Attribute('width'),
Attribute('molecules'),
Attribute('geometries'),
]
#-------------------------------------------------------------------------
@registry.register
class ChemicalEquationInput(InputTypeBase):
"""
An input type for entering chemical equations. Supports live preview.
Example:
<chemicalequationinput size="50"/>
options: size -- width of the textbox.
"""
template = "chemicalequationinput.html"
tags = ['chemicalequationinput']
@classmethod
def get_attributes(cls):
"""
Can set size of text field.
"""
return [Attribute('size', '20'),
Attribute('label', ''), ]
def _extra_context(self):
"""
TODO (vshnayder): Get rid of this once we have a standard way of requiring js to be loaded.
"""
return {
'previewer': '{static_url}js/capa/chemical_equation_preview.js'.format(
static_url=self.capa_system.STATIC_URL),
}
def handle_ajax(self, dispatch, data):
"""
Since we only have chemcalc preview this input, check to see if it
matches the corresponding dispatch and send it through if it does
"""
if dispatch == 'preview_chemcalc':
return self.preview_chemcalc(data)
return {}
def preview_chemcalc(self, data):
"""
Render an html preview of a chemical formula or equation. get should
contain a key 'formula' and value 'some formula string'.
Returns a json dictionary:
{
'preview' : 'the-preview-html' or ''
'error' : 'the-error' or ''
}
"""
_ = self.capa_system.i18n.ugettext
result = {'preview': '',
'error': ''}
try:
formula = data['formula']
except KeyError:
result['error'] = _("No formula specified.")
return result
try:
result['preview'] = chemcalc.render_to_html(formula)
except pyparsing.ParseException as err:
result['error'] = _("Couldn't parse formula: {error_msg}").format(error_msg=err.msg)
except Exception:
# this is unexpected, so log
log.warning(
"Error while previewing chemical formula", exc_info=True)
result['error'] = _("Error while rendering preview")
return result
#-------------------------------------------------------------------------
@registry.register
class FormulaEquationInput(InputTypeBase):
"""
An input type for entering formula equations. Supports live preview.
Example:
<formulaequationinput size="50" label="Enter the equation for motion"/>
options: size -- width of the textbox.
"""
template = "formulaequationinput.html"
tags = ['formulaequationinput']
@classmethod
def get_attributes(cls):
"""
Can set size of text field.
"""
return [
Attribute('size', '20'),
Attribute('inline', False),
Attribute('label', ''),
]
def _extra_context(self):
"""
TODO (vshnayder): Get rid of 'previewer' once we have a standard way of requiring js to be loaded.
"""
# `reported_status` is basically `status`, except we say 'unanswered'
return {
'previewer': '{static_url}js/capa/src/formula_equation_preview.js'.format(
static_url=self.capa_system.STATIC_URL),
}
def handle_ajax(self, dispatch, get):
"""
Since we only have formcalc preview this input, check to see if it
matches the corresponding dispatch and send it through if it does
"""
if dispatch == 'preview_formcalc':
return self.preview_formcalc(get)
return {}
def preview_formcalc(self, get):
"""
Render an preview of a formula or equation. `get` should
contain a key 'formula' with a math expression.
Returns a json dictionary:
{
'preview' : '<some latex>' or ''
'error' : 'the-error' or ''
'request_start' : <time sent with request>
}
"""
_ = self.capa_system.i18n.ugettext
result = {'preview': '',
'error': ''}
try:
formula = get['formula']
except KeyError:
result['error'] = _("No formula specified.")
return result
result['request_start'] = int(get.get('request_start', 0))
try:
# TODO add references to valid variables and functions
# At some point, we might want to mark invalid variables as red
# or something, and this is where we would need to pass those in.
result['preview'] = latex_preview(formula)
except pyparsing.ParseException as err:
result['error'] = _("Sorry, couldn't parse formula")
result['formula'] = formula
except Exception:
# this is unexpected, so log
log.warning(
"Error while previewing formula", exc_info=True
)
result['error'] = _("Error while rendering preview")
return result
#-----------------------------------------------------------------------------
@registry.register
class DragAndDropInput(InputTypeBase):
"""
Input for drag and drop problems. Allows student to drag and drop images and
labels to base image.
"""
template = 'drag_and_drop_input.html'
tags = ['drag_and_drop_input']
def setup(self):
def parse(tag, tag_type):
"""Parses <tag ... /> xml element to dictionary. Stores
'draggable' and 'target' tags with attributes to dictionary and
returns last.
Args:
tag: xml etree element <tag...> with attributes
tag_type: 'draggable' or 'target'.
If tag_type is 'draggable' : all attributes except id
(name or label or icon or can_reuse) are optional
If tag_type is 'target' all attributes (name, x, y, w, h)
are required. (x, y) - coordinates of center of target,
w, h - weight and height of target.
Returns:
Dictionary of vaues of attributes:
dict{'name': smth, 'label': smth, 'icon': smth,
'can_reuse': smth}.
"""
tag_attrs = dict()
tag_attrs['draggable'] = {
'id': Attribute._sentinel,
'label': "", 'icon': "",
'can_reuse': ""
}
tag_attrs['target'] = {
'id': Attribute._sentinel,
'x': Attribute._sentinel,
'y': Attribute._sentinel,
'w': Attribute._sentinel,
'h': Attribute._sentinel
}
dic = dict()
for attr_name in tag_attrs[tag_type].keys():
dic[attr_name] = Attribute(attr_name,
default=tag_attrs[tag_type][attr_name]).parse_from_xml(tag)
if tag_type == 'draggable' and not self.no_labels:
dic['label'] = dic['label'] or dic['id']
if tag_type == 'draggable':
dic['target_fields'] = [parse(target, 'target') for target in
tag.iterchildren('target')]
return dic
# add labels to images?:
self.no_labels = Attribute('no_labels',
default="False").parse_from_xml(self.xml)
to_js = dict()
# image drag and drop onto
to_js['base_image'] = Attribute('img').parse_from_xml(self.xml)
# outline places on image where to drag adn drop
to_js['target_outline'] = Attribute('target_outline',
default="False").parse_from_xml(self.xml)
# one draggable per target?
to_js['one_per_target'] = Attribute('one_per_target',
default="True").parse_from_xml(self.xml)
# list of draggables
to_js['draggables'] = [parse(draggable, 'draggable') for draggable in
self.xml.iterchildren('draggable')]
# list of targets
to_js['targets'] = [parse(target, 'target') for target in
self.xml.iterchildren('target')]
# custom background color for labels:
label_bg_color = Attribute('label_bg_color',
default=None).parse_from_xml(self.xml)
if label_bg_color:
to_js['label_bg_color'] = label_bg_color
self.loaded_attributes['drag_and_drop_json'] = json.dumps(to_js)
self.to_render.add('drag_and_drop_json')
#-------------------------------------------------------------------------
@registry.register
class EditAMoleculeInput(InputTypeBase):
"""
An input type for edit-a-molecule. Integrates with the molecule editor java applet.
Example:
<editamolecule size="50"/>
options: size -- width of the textbox.
"""
template = "editamolecule.html"
tags = ['editamoleculeinput']
@classmethod
def get_attributes(cls):
"""
Can set size of text field.
"""
return [Attribute('file'),
Attribute('missing', None)]
def _extra_context(self):
"""
"""
context = {
'applet_loader': '{static_url}js/capa/editamolecule.js'.format(
static_url=self.capa_system.STATIC_URL),
}
return context
#-----------------------------------------------------------------------------
@registry.register
class DesignProtein2dInput(InputTypeBase):
"""
An input type for design of a protein in 2D. Integrates with the Protex java applet.
Example:
<designprotein2d width="800" hight="500" target_shape="E;NE;NW;W;SW;E;none" />
"""
template = "designprotein2dinput.html"
tags = ['designprotein2dinput']
@classmethod
def get_attributes(cls):
"""
Note: width, hight, and target_shape are required.
"""
return [Attribute('width'),
Attribute('height'),
Attribute('target_shape')
]
def _extra_context(self):
"""
"""
context = {
'applet_loader': '{static_url}js/capa/design-protein-2d.js'.format(
static_url=self.capa_system.STATIC_URL),
}
return context
#-----------------------------------------------------------------------------
@registry.register
class EditAGeneInput(InputTypeBase):
"""
An input type for editing a gene.
Integrates with the genex GWT application.
Example:
<editagene genex_dna_sequence="CGAT" genex_problem_number="1"/>
"""
template = "editageneinput.html"
tags = ['editageneinput']
@classmethod
def get_attributes(cls):
"""
Note: width, height, and dna_sequencee are required.
"""
return [Attribute('genex_dna_sequence'),
Attribute('genex_problem_number')
]
def _extra_context(self):
"""
"""
context = {
'applet_loader': '{static_url}js/capa/edit-a-gene.js'.format(
static_url=self.capa_system.STATIC_URL),
}
return context
#---------------------------------------------------------------------
@registry.register
class AnnotationInput(InputTypeBase):
"""
Input type for annotations: students can enter some notes or other text
(currently ungraded), and then choose from a set of tags/optoins, which are graded.
Example:
<annotationinput>
<title>Annotation Exercise</title>
<text>
They are the ones who, at the public assembly, had put savage derangement [ate] into my thinking
[phrenes] |89 on that day when I myself deprived Achilles of his honorific portion [geras]
</text>
<comment>Agamemnon says that ate or 'derangement' was the cause of his actions: why could Zeus say the same thing?</comment>
<comment_prompt>Type a commentary below:</comment_prompt>
<tag_prompt>Select one tag:</tag_prompt>
<options>
<option choice="correct">ate - both a cause and an effect</option>
<option choice="incorrect">ate - a cause</option>
<option choice="partially-correct">ate - an effect</option>
</options>
</annotationinput>
# TODO: allow ordering to be randomized
"""
template = "annotationinput.html"
tags = ['annotationinput']
def setup(self):
xml = self.xml
self.debug = False # set to True to display extra debug info with input
self.return_to_annotation = True # return only works in conjunction with annotatable xmodule
self.title = xml.findtext('./title', 'Annotation Exercise')
self.text = xml.findtext('./text')
self.comment = xml.findtext('./comment')
self.comment_prompt = xml.findtext(
'./comment_prompt', 'Type a commentary below:')
self.tag_prompt = xml.findtext('./tag_prompt', 'Select one tag:')
self.options = self._find_options()
# Need to provide a value that JSON can parse if there is no
# student-supplied value yet.
if self.value == '':
self.value = 'null'
self._validate_options()
def _find_options(self):
""" Returns an array of dicts where each dict represents an option. """
elements = self.xml.findall('./options/option')
return [{
'id': index,
'description': option.text,
'choice': option.get('choice')
} for (index, option) in enumerate(elements)]
def _validate_options(self):
""" Raises a ValueError if the choice attribute is missing or invalid. """
valid_choices = ('correct', 'partially-correct', 'incorrect')
for option in self.options:
choice = option['choice']
if choice is None:
raise ValueError('Missing required choice attribute.')
elif choice not in valid_choices:
raise ValueError('Invalid choice attribute: {0}. Must be one of: {1}'.format(
choice, ', '.join(valid_choices)))
def _unpack(self, json_value):
""" Unpacks the json input state into a dict. """
d = json.loads(json_value)
if type(d) != dict:
d = {}
comment_value = d.get('comment', '')
if not isinstance(comment_value, basestring):
comment_value = ''
options_value = d.get('options', [])
if not isinstance(options_value, list):
options_value = []
return {
'options_value': options_value,
'has_options_value': len(options_value) > 0, # for convenience
'comment_value': comment_value,
}
def _extra_context(self):
extra_context = {
'title': self.title,
'text': self.text,
'comment': self.comment,
'comment_prompt': self.comment_prompt,
'tag_prompt': self.tag_prompt,
'options': self.options,
'return_to_annotation': self.return_to_annotation,
'debug': self.debug
}
extra_context.update(self._unpack(self.value))
return extra_context
@registry.register
class ChoiceTextGroup(InputTypeBase):
"""
Groups of radiobutton/checkboxes with text inputs.
Examples:
RadioButton problem
<problem>
<startouttext/>
A person rolls a standard die 100 times and records the results.
On the first roll they received a "1". Given this information
select the correct choice and fill in numbers to make it accurate.
<endouttext/>
<choicetextresponse>
<radiotextgroup label="What is the correct choice?">
<choice correct="false">The lowest number rolled was:
<decoy_input/> and the highest number rolled was:
<decoy_input/> .</choice>
<choice correct="true">The lowest number rolled was <numtolerance_input answer="1"/>
and there is not enough information to determine the highest number rolled.
</choice>
<choice correct="false">There is not enough information to determine the lowest
number rolled, and the highest number rolled was:
<decoy_input/> .
</choice>
</radiotextgroup>
</choicetextresponse>
</problem>
CheckboxProblem:
<problem>
<startouttext/>
A person randomly selects 100 times, with replacement, from the list of numbers \(\sqrt{2}\) , 2, 3, 4 ,5 ,6
and records the results. The first number they pick is \(\sqrt{2}\) Given this information
select the correct choices and fill in numbers to make them accurate.
<endouttext/>
<choicetextresponse>
<checkboxtextgroup label="What is the answer?">
<choice correct="true">
The lowest number selected was <numtolerance_input answer="1.4142" tolerance="0.01"/>
</choice>
<choice correct="false">
The highest number selected was <decoy_input/> .
</choice>
<choice correct="true">There is not enough information given to determine the highest number
which was selected.
</choice>
<choice correct="false">There is not enough information given to determine the lowest number
selected.
</choice>
</checkboxtextgroup>
</choicetextresponse>
</problem>
In the preceding examples the <decoy_input/> is used to generate a textinput html element
in the problem's display. Since it is inside of an incorrect choice, no answer given
for it will be correct, and thus specifying an answer for it is not needed.
"""
template = "choicetext.html"
tags = ['radiotextgroup', 'checkboxtextgroup']
def setup(self):
"""
Performs setup for the initial rendering of the problem.
`self.html_input_type` determines whether this problem is displayed
with radiobuttons or checkboxes
If the initial value of `self.value` is '' change it to {} so that
the template has an empty dictionary to work with.
sets the value of self.choices to be equal to the return value of
`self.extract_choices`
"""
self.text_input_values = {}
if self.tag == 'radiotextgroup':
self.html_input_type = "radio"
elif self.tag == 'checkboxtextgroup':
self.html_input_type = "checkbox"
else:
_ = self.capa_system.i18n.ugettext
msg = _("{input_type}: unexpected tag {tag_name}").format(
input_type="ChoiceTextGroup", tag_name=self.tag
)
raise Exception(msg)
if self.value == '':
# Make `value` an empty dictionary, if it currently has an empty
# value. This is necessary because the template expects a
# dictionary.
self.value = {}
self.choices = self.extract_choices(self.xml, self.capa_system.i18n)
@classmethod
def get_attributes(cls):
"""
Returns a list of `Attribute` for this problem type
"""
_ = lambda text: text
return [
Attribute("show_correctness", "always"),
Attribute("submitted_message", _("Answer received.")),
Attribute("label", ""),
]
def _extra_context(self):
"""
Returns a dictionary of extra content necessary for rendering this InputType.
`input_type` is either 'radio' or 'checkbox' indicating whether the choices for
this problem will have radiobuttons or checkboxes.
"""
return {
'input_type': self.html_input_type,
'choices': self.choices
}
@staticmethod
def extract_choices(element, i18n):
"""
Extracts choices from the xml for this problem type.
If we have xml that is as follows(choice names will have been assigned
by now)
<radiotextgroup>
<choice correct = "true" name ="1_2_1_choiceinput_0bc">
The number
<numtolerance_input name = "1_2_1_choiceinput0_numtolerance_input_0" answer="5"/>
Is the mean of the list.
</choice>
<choice correct = "false" name = "1_2_1_choiceinput_1bc>
False demonstration choice
</choice>
</radiotextgroup>
Choices are used for rendering the problem properly
The function will setup choices as follows:
choices =[
("1_2_1_choiceinput_0bc",
[{'type': 'text', 'contents': "The number", 'tail_text': '',
'value': ''
},
{'type': 'textinput',
'contents': "1_2_1_choiceinput0_numtolerance_input_0",
'tail_text': 'Is the mean of the list',
'value': ''
}
]
),
("1_2_1_choiceinput_1bc",
[{'type': 'text', 'contents': "False demonstration choice",
'tail_text': '',
'value': ''
}
]
)
]
"""
_ = i18n.ugettext
choices = []
for choice in element:
if choice.tag != 'choice':
msg = u"[capa.inputtypes.extract_choices] {0}".format(
# Translators: a "tag" is an XML element, such as "<b>" in HTML
_("Expected a {expected_tag} tag; got {given_tag} instead").format(
expected_tag=u"<choice>",
given_tag=choice.tag,
)
)
raise Exception(msg)
components = []
choice_text = ''
if choice.text is not None:
choice_text += choice.text
# Initialize our dict for the next content
adder = {
'type': 'text',
'contents': choice_text,
'tail_text': '',
'value': ''
}
components.append(adder)
for elt in choice:
# for elements in the choice e.g. <text> <numtolerance_input>
adder = {
'type': 'text',
'contents': '',
'tail_text': '',
'value': ''
}
tag_type = elt.tag
# If the current `elt` is a <numtolerance_input> set the
# `adder`type to 'numtolerance_input', and 'contents' to
# the `elt`'s name.
# Treat decoy_inputs and numtolerance_inputs the same in order
# to prevent students from reading the Html and figuring out
# which inputs are valid
if tag_type in ('numtolerance_input', 'decoy_input'):
# We set this to textinput, so that we get a textinput html
# element.
adder['type'] = 'textinput'
adder['contents'] = elt.get('name')
else:
adder['contents'] = elt.text
# Add any tail text("is the mean" in the example)
adder['tail_text'] = elt.tail if elt.tail else ''
components.append(adder)
# Add the tuple for the current choice to the list of choices
choices.append((choice.get("name"), components))
return choices
|
wwj718/ANALYSE
|
common/lib/capa/capa/inputtypes.py
|
Python
|
agpl-3.0
| 63,613
|
#!/usr/bin/env python
import sys, os
from setuptools import setup, find_packages
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
classifiers = [
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Scientific/Engineering :: GIS',
]
setup(name='vectorformats',
version='0.2',
description='geographic data serialization/deserialization library',
long_description=read('doc/Readme.txt'),
author='VectorFormats (iocast)',
author_email='vectorformats@live.com',
url='http://featureserver.org/vectorformats.html',
#packages=['vectorformats',
# 'vectorformats.formats',
# 'vectorformats.lib'],
packages=find_packages(exclude=["doc", "tests"]),
install_requires=['dxfwrite>=1.2.0',
'simplejson>=2.6.2',
'pyspatialite>=3.0.1a0',
'pyshp>=1.1.4',
'Cheetah>=2.4.4'],
test_suite = 'tests.test_suite',
zip_safe=False,
license="MIT",
classifiers=classifiers
)
|
pusateri/vectorformats
|
setup.py
|
Python
|
mit
| 1,365
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RSquash(RPackage):
"""Color-Based Plots for Multivariate Visualization
Functions for color-based visualization of multivariate data, i.e.
colorgrams or heatmaps. Lower-level functions map numeric values to colors,
display a matrix as an array of colors, and draw color keys. Higher-level
plotting functions generate a bivariate histogram, a dendrogram aligned
with a color-coded matrix, a triangular distance matrix, and more."""
homepage = "https://cloud.r-project.org/package=squash"
url = "https://cloud.r-project.org/src/contrib/squash_1.0.8.tar.gz"
list_url = "https://cloud.r-project.org/src/contrib/Archive/squash"
version('1.0.9', sha256='ff381c85071e3407574e3db28d789657f64e7d3f9d74ac123539de22ab8ac6f4')
version('1.0.8', sha256='e6932c0a461d5c85f7180a31d18a3eb4f28afd6769efe251075a4de12de039f4')
version('1.0.7', sha256='d2d7182a72dfd93b8b65e775bea11e891c38598fa49a3ed4f92ec1159ffab6f1')
|
LLNL/spack
|
var/spack/repos/builtin/packages/r-squash/package.py
|
Python
|
lgpl-2.1
| 1,180
|
#!/usr/bin/env python3
# Python primary Helper to generate PWM audio signals to control a servos
# Current setup involves sending a mono audio PWM signal from the left (or right) channel to control a servo
# We use a USB soundcard/default audio jack to output audio waveform, but since it is usually 2V peak DC, we need an Op-Amp circuit to boost to ~5V
# Please check documentation for USB powered circuit powering servo and op amp circuit
# WAV files are favoured as signal sources as they are lossless as compared to MP3
# Sound player module fallback is ffmpeg, but for windows systems it is better to install PYAUDIO, since it does not need to access restricted folders to generate a temporary wav file for playing.
# To install pyaudio, some helper packages are needed first: libasound-dev portaudio19-dev libportaudio2 libportaudiocpp0 ffmpeg. Hopefully this means it plays the audio file created in situ before the entire Pulsegen class destructs after each audio command. Wholesome, organic, grass-fed audio solution...
# For playing saved .wav files, we should use python sounddevices to choose the output device first
# Made 2019, Wu Mingsong
# mingsongwu [at] outlook [dot] sg
###
from ctypes import *
from contextlib import contextmanager
import time
from pydub import AudioSegment
from pydub.generators import SignalGenerator
# from pydub.playback import play
from extraFunctions import play
import os, sys
SHOWERROR = False
## removing buggy/useless ALSA pyaudio errors. Does not affect audio output.
ERROR_HANDLER_FUNC = CFUNCTYPE(None, c_char_p, c_int, c_char_p, c_int, c_char_p)
def py_error_handler(filename, line, function, err, fmt):
pass
c_error_handler = ERROR_HANDLER_FUNC(py_error_handler)
@contextmanager
def noALSAerror():
if not SHOWERROR:
# stackoverflow.com/a/36966379
devnull = os.open(os.devnull, os.O_WRONLY)
old_stderr = os.dup(2)
sys.stderr.flush()
os.dup2(devnull, 2)
os.close(devnull)
try:
yield
finally:
os.dup2(old_stderr, 2)
os.close(old_stderr)
else:
asound = cdll.LoadLibrary('libasound.so')
asound.snd_lib_error_set_handler(c_error_handler)
yield
asound.snd_lib_error_set_handler(None)
##
class Pulsegen(SignalGenerator):
PANLEFT = -1
PANRIGHT = 1
def __init__(self, duty, polarity = True, freq = 51, duration = 400, pan = -1, **kwargs):
super().__init__(**kwargs)
self.freq = freq
self.duty = duty
self.polarity = polarity
self.duration = duration
## pan function is volume equaliser: -1 = 100% left, 1 = 100% right
self.pan = pan
def generate(self):
sample_n = 0
# in samples
cycle_length = self.sample_rate / float(self.freq)
pulse_length = cycle_length * self.duty
while True:
if (sample_n % cycle_length) < pulse_length:
if self.polarity == True:
yield 1.0
else:
yield -1.0
else:
yield 0
sample_n += 1
def playpulse(self):
sound_segment = self.to_audio_segment(self.duration)
## pan function is volume equaliser: -1 = 100% left, 1 = 100% right
sound_segment = sound_segment.pan(self.pan)
## setting channels instead is possible, but using stereo output effectively sends mono signal through both channel contacts = stereo output
#sound_segment = sound_segment.set_channels(1)
with noALSAerror():
play(sound_segment)
def setPan(self, pan):
self.pan = pan
def __enter__(self):
# print('\nPulse generator initialising...done\n')
return self
def __exit__(self, e_type, e_val, traceback):
# print('\nPulse generator self destructing...done')
pass
|
sunjerry019/photonLauncher
|
micron/pwmaudio.py
|
Python
|
apache-2.0
| 3,911
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation of Lax-Bisimulation metrics."""
import time
from absl import logging
import numpy as np
import ot
from rl_metrics_aaai2021 import metric
class LaxBisimulation(metric.Metric):
"""Implementation of LaxBisimulation metrics.
See Taylor et al., 2008: "Bounding Performance Loss in Approximate MDP
Homomorphisms"
"""
def _compute(self, tolerance, verbose=False):
"""Compute exact/online lax-bisimulation metric up to specified tolerance.
Args:
tolerance: float, maximum difference in metric estimate between successive
iterations. Once this threshold is past, computation stops.
verbose: bool, whether to print verbose messages.
"""
# Initial metric is all zeros.
curr_metric = np.zeros((self.num_states, self.num_states))
metric_difference = tolerance * 2.
i = 1
exact_metric_differences = []
start_time = time.time()
while metric_difference > tolerance:
new_metric = np.zeros((self.num_states, self.num_states))
state_action_metric = np.zeros((self.num_states, self.num_actions,
self.num_states, self.num_actions))
for s in range(self.num_states):
for t in range(self.num_states):
for a in range(self.num_actions):
for b in range(self.num_actions):
next_state_distrib_1 = self.env.transition_probs[s, a, :]
next_state_distrib_2 = self.env.transition_probs[t, b, :]
rew1 = self.env.rewards[s, a]
rew2 = self.env.rewards[t, b]
emd = ot.emd2(
next_state_distrib_1, next_state_distrib_2, curr_metric)
state_action_metric[s, a, t, b] = (
abs(rew1 - rew2) + self.gamma * emd)
# Now that we've updated the state-action metric, we compute the Hausdorff
# metric.
for s in range(self.num_states):
for t in range(s + 1, self.num_states):
# First we find \sup_x\inf_y d(x, y) from Definition 5 in paper.
max_a = None
for a in range(self.num_actions):
min_b = np.min(state_action_metric[s, a, t, :])
if max_a is None or min_b > max_a:
max_a = min_b
# Next we find \sup_y\inf_x d(x, y) from Definition 5 in paper.
max_b = None
for b in range(self.num_actions):
min_a = np.min(state_action_metric[s, :, t, b])
if max_b is None or min_a > max_b:
max_b = min_a
new_metric[s, t] = max(max_a, max_b)
new_metric[t, s] = new_metric[s, t]
metric_difference = np.max(abs(new_metric - curr_metric))
exact_metric_differences.append(metric_difference)
if verbose:
logging.info('Iteration %d: %f', i, metric_difference)
curr_metric = np.copy(new_metric)
i += 1
total_time = time.time() - start_time
self.metric = curr_metric
self.statistics = metric.Statistics(
tolerance, total_time, i, exact_metric_differences)
|
google-research/google-research
|
rl_metrics_aaai2021/lax_bisimulation.py
|
Python
|
apache-2.0
| 3,658
|
import functools
from django.db.models.sql import EmptyResultSet
import commonware.log
from rest_framework.decorators import api_view
from rest_framework.exceptions import ParseError
from rest_framework.mixins import ListModelMixin
from rest_framework.routers import Route, SimpleRouter
from rest_framework.response import Response
from rest_framework.urlpatterns import format_suffix_patterns
import mkt
from mkt.api.paginator import CustomPagination, PageNumberPagination
log = commonware.log.getLogger('z.api')
def list_url(name, **kw):
kw['resource_name'] = name
return ('api_dispatch_list', kw)
def get_url(name, pk, **kw):
kw.update({'resource_name': name, 'pk': pk})
return ('api_dispatch_detail', kw)
def _collect_form_errors(forms):
errors = {}
if not isinstance(forms, list):
forms = [forms]
for f in forms:
# If we've got form objects, get the error object off it.
# Otherwise assume we've just been passed a form object.
form_errors = getattr(f, 'errors', f)
if isinstance(form_errors, list): # Cope with formsets.
for e in form_errors:
errors.update(e)
continue
errors.update(dict(form_errors.items()))
return errors
def form_errors(forms):
errors = _collect_form_errors(forms)
exc = ParseError()
exc.detail = {'detail': errors}
raise exc
def get_region_from_request(request):
region = request.GET.get('region')
if region and region == 'None':
return None
return getattr(request, 'REGION', mkt.regions.RESTOFWORLD)
class SubRouter(SimpleRouter):
"""
Like SimpleRouter, but with the lookup before the prefix, so that it can be
easily used for sub-actions that are children of a main router.
This is a convenient way of linking one or more viewsets to a parent one
without having to set multiple @action and @link manually.
"""
routes = [
# List route.
Route(
url=r'^{lookup}/{prefix}{trailing_slash}$',
mapping={
'get': 'list',
'post': 'create'
},
name='{basename}-list',
initkwargs={'suffix': 'List'}
),
# Detail route.
Route(
url=r'^{lookup}/{prefix}{trailing_slash}$',
mapping={
'get': 'retrieve',
'put': 'update',
'post': 'detail_post',
'patch': 'partial_update',
'delete': 'destroy'
},
name='{basename}-detail',
initkwargs={'suffix': 'Instance'}
)
]
class SubRouterWithFormat(SubRouter):
"""
SubRouter that also adds the optional format to generated URL patterns.
This is similar to DRF's DefaultRouter, except it's a SubRouter and we
don't respect the trailing_slash parameter with the URLs containing the
format parameter, because that'd make ugly, weird URLs.
"""
def get_urls(self):
# Keep trailing slash value...
trailing_slash = self.trailing_slash
# Generate base URLs without format.
base_urls = super(SubRouterWithFormat, self).get_urls()
# Generate the same URLs, but forcing to omit the trailing_slash.
self.trailing_slash = ''
extra_urls = super(SubRouterWithFormat, self).get_urls()
# Reset trailing slash and add format to our extra URLs.
self.trailing_slash = trailing_slash
extra_urls = format_suffix_patterns(extra_urls, suffix_required=True)
# Return the addition of both lists of URLs.
return base_urls + extra_urls
class MarketplaceView(object):
"""
Base view for DRF views.
It includes:
- An implementation of handle_exception() that goes with our custom
exception handler. It stores the request and originating class in the
exception before it's handed over the the handler, so that the handler
can in turn properly propagate the got_request_exception signal if
necessary.
- A implementation of paginate_queryset() that goes with our custom
pagination handler. It does tastypie-like offset pagination instead of
the default page mechanism.
"""
pagination_class = CustomPagination
def handle_exception(self, exc):
exc._request = self.request._request
exc._klass = self.__class__
return super(MarketplaceView, self).handle_exception(exc)
def paginate_queryset(self, queryset):
page = self.request.query_params.get('page')
offset = self.request.query_params.get('offset')
# If 'offset' (tastypie-style pagination) parameter isn't present and
# 'page' is, use page numbers instead.
if page is not None and offset is None:
self._paginator = PageNumberPagination()
return self.paginator.paginate_queryset(
queryset, self.request, view=self)
def get_region_from_request(self, request):
"""
Returns the REGION object for the passed request. If the GET param
`region` is `'None'`, return `None`. Otherwise, return `request.REGION`
which will have been set by the RegionMiddleware. If somehow we didn't
go through the middleware and request.REGION is absent, we fall back to
RESTOFWORLD.
"""
return get_region_from_request(request)
class MultiSerializerViewSetMixin(object):
"""
Allows attaching multiple serializers to a single viewset. A serializer
is chosen based on request.GET['serializer'] which is used to look up the
appropriate serializer in a serializers_classes map. Useful to not have to
create separate endpoints just to use different serializers (e.g.,
product-specific serializers, slimmed serializers).
"""
def get_serializer_class(self):
"""
Look for serializer class in self.serializer_classes. It will be looked
up using request.GET.serializer, i.e.:
class MyViewSet(ViewSet):
serializer_class = MySerializer
serializer_classes = {
'mini': MyMiniSerializer,
}
If there's no entry for that param then just fallback to the regular
get_serializer_class lookup: self.serializer_class.
"""
try:
return self.serializer_classes[self.request.GET.get('serializer')]
except KeyError:
return super(MultiSerializerViewSetMixin,
self).get_serializer_class()
class CORSMixin(object):
"""
Mixin to enable CORS for DRF API.
"""
def finalize_response(self, request, response, *args, **kwargs):
if not hasattr(request._request, 'CORS'):
request._request.CORS = self.cors_allowed_methods
if hasattr(self, 'cors_allowed_headers'):
request._request.CORS_HEADERS = self.cors_allowed_headers
return super(CORSMixin, self).finalize_response(
request, response, *args, **kwargs)
def cors_api_view(methods, headers=None):
def decorator(view):
def add_cors(handler):
@functools.wraps(handler)
def view_with_cors(request, *args, **kw):
request.CORS = methods
if headers:
request.CORS_HEADERS = headers
return handler(request, *args, **kw)
return view_with_cors
# The request.CORS attributes need to be added to the view before
# the DRF @api_view handler executes.
return add_cors(api_view(methods)(view))
return decorator
class SlugOrIdMixin(object):
"""
Mixin that allows you to pass slugs instead of pk in your URLs. Use with
any router or urlpattern that relies on a relaxed regexp for pks, like
(?P<pk>[^/]+) (DRF does this by default).
If the name of your `slug` is called something else, override
`self.slug_field`.
"""
def get_object(self):
pk = self.kwargs.get('pk')
if pk and not pk.isdigit():
# If the `pk` contains anything other than a digit, it's a `slug`.
self.lookup_field = getattr(self, 'slug_field', 'slug')
self.kwargs.update({
'pk': None,
self.lookup_field: self.kwargs['pk']
})
return super(SlugOrIdMixin, self).get_object()
class SilentListModelMixin(ListModelMixin):
"""
DRF's ListModelMixin that returns a 204_NO_CONTENT rather than flipping a
500 or 404.
"""
def list(self, *args, **kwargs):
try:
res = super(SilentListModelMixin, self).list(*args, **kwargs)
except EmptyResultSet:
return Response([])
if res.status_code == 404:
return Response([])
return res
|
ingenioustechie/zamboni
|
mkt/api/base.py
|
Python
|
bsd-3-clause
| 8,836
|
'''
blockr.io
'''
import logging
from lib import config, util
def get_host():
if config.BLOCKCHAIN_SERVICE_CONNECT:
return config.BLOCKCHAIN_SERVICE_CONNECT
else:
return 'http://twdc.blockr.io' if config.TESTNET else 'http://wdc.blockr.io'
def check():
pass
def getinfo():
result = util.get_url(get_host() + '/api/v1/coin/info', abort_on_error=True)
if 'status' in result and result['status'] == 'success':
return {
"info": {
"blocks": result['data']['last_block']['nb']
}
}
return None
def listunspent(address):
result = util.get_url(get_host() + '/api/v1/address/unspent/{}/'.format(address), abort_on_error=True)
if 'status' in result and result['status'] == 'success':
utxo = []
for txo in result['data']['unspent']:
newtxo = {
'address': address,
'txid': txo['tx'],
'vout': txo['n'],
'ts': 0,
'scriptPubKey': txo['script'],
'amount': float(txo['amount']),
'confirmations': txo['confirmations'],
'confirmationsFromCache': False
}
utxo.append(newtxo)
return utxo
return None
def getaddressinfo(address):
infos = util.get_url(get_host() + '/api/v1/address/info/{}'.format(address), abort_on_error=True)
if 'status' in infos and infos['status'] == 'success':
txs = util.get_url(get_host() + '/api/v1/address/txs/{}'.format(address), abort_on_error=True)
if 'status' in txs and txs['status'] == 'success':
transactions = []
for tx in txs['data']['txs']:
transactions.append(tx['tx'])
return {
'addrStr': address,
'balance': infos['data']['balance'],
'balanceSat': infos['data']['balance'] * config.UNIT,
'totalReceived': infos['data']['totalreceived'],
'totalReceivedSat': infos['data']['totalreceived'] * config.UNIT,
'unconfirmedBalance': 0,
'unconfirmedBalanceSat': 0,
'unconfirmedTxApperances': 0,
'txApperances': txs['data']['nb_txs'],
'transactions': transactions
}
return None
def gettransaction(tx_hash):
url = get_host() + '/api/v1/tx/raw/{}'.format(tx_hash)
tx = util.get_url(url, abort_on_error=False)
assert tx and tx.get('status') and tx.get('code')
if tx['code'] == 404:
return None
elif tx['code'] != 200:
raise Exception("Invalid result (code %s), body: %s" % (tx['code'], tx))
if 'status' in tx and tx['status'] == 'success':
valueOut = 0
for vout in tx['data']['tx']['vout']:
valueOut += vout['value']
return {
'txid': tx_hash,
'version': tx['data']['tx']['version'],
'locktime': tx['data']['tx']['locktime'],
'blockhash': tx['data']['tx'].get('blockhash', None), #will be None if not confirmed yet...
'confirmations': tx['data']['tx'].get('confirmations', None),
'time': tx['data']['tx'].get('time', None),
'blocktime': tx['data']['tx'].get('blocktime', None),
'valueOut': valueOut,
'vin': tx['data']['tx']['vin'],
'vout': tx['data']['tx']['vout']
}
return None
|
Bluejudy/bluejudyd
|
lib/blockchain/blockr.py
|
Python
|
mit
| 3,465
|
import random
from django.http import Http404
from django.http.response import HttpResponseRedirect
from django.shortcuts import render, get_object_or_404
from django.urls import reverse_lazy
from django.views.generic import View, FormView
from django.views.generic.base import TemplateView
from django_tables2 import RequestConfig
from register.email import send_message_after_invitation
from register.models import Candidate, Bicycle, HandoutEvent, SiteConfiguration
from register.models import UserRegistration, Invitation
from staff.filters import CandidateFilter, BicycleFilter
from staff.forms import CreateCandidateForm, DeleteCandidateForm
from staff.forms import HandoverForm, EventForm, InviteForm, RefundForm
from staff.forms import ModifyCandidateForm, InviteCandidateForm
from staff.tables import CandidateTable, BicycleTable, EventTable
from staff.tables import HandoutEventTable
class ManageView(TemplateView):
template_name = 'staff/index.html'
class BicycleOverviewView(View):
template_name = 'staff/bicycle_overview.html'
def get(self, request, *args, **kwargs):
queryset = Bicycle.objects.all()
matches = BicycleFilter(request.GET, queryset=queryset)
table = BicycleTable(matches.qs)
RequestConfig(request, paginate={'per_page': 40}).configure(table)
context_dict = {'bicycles': table, 'filter': matches}
return render(request, self.template_name, context_dict)
class EventOverviewView(TemplateView):
template_name = 'staff/event_overview.html'
def get(self, request, *args, **kwargs):
queryset = HandoutEvent.objects.all()
table = HandoutEventTable(queryset)
RequestConfig(request, paginate={'per_page': 40}).configure(table)
context_dict = {'handoutevents': table}
return render(request, self.template_name, context_dict)
class CreateEventView(FormView):
template_name = 'staff/create_event.html'
form_class = EventForm
success_url = reverse_lazy('staff:event_overview')
def form_valid(self, form):
due_date = form.cleaned_data['due_date']
if HandoutEvent.objects.filter(due_date=due_date):
raise Http404("An event on that time and date already exists.")
event = HandoutEvent.objects.create(due_date=due_date)
self.success_url = reverse_lazy('staff:event',
kwargs={'event_id': event.id})
return super(CreateEventView, self).form_valid(form)
class AutoInviteView(FormView):
template_name = 'staff/auto_invite.html'
form_class = InviteForm
success_url = reverse_lazy('staff:event_overview')
def form_valid(self, form):
event_id = form.cleaned_data['event_id']
event = get_object_or_404(HandoutEvent, id=event_id)
for choice, _ in UserRegistration.BICYCLE_CHOICES:
number_of_winners = form.cleaned_data['choice_%s' % choice]
# do have no bicycle and are registered with contact information
candidate = Candidate.registered_and_without_bicycle(choice)
# people that have not already been invited so many times
candidate = [
c for c in candidate if c.invitations.count() <
SiteConfiguration.get_solo().max_number_of_autoinvites]
winners = random.sample(candidate, min(len(candidate),
number_of_winners))
for winner in winners:
Invitation.objects.create(handout_event=event,
candidate=winner)
send_message_after_invitation(candidate=winner,
handout_event=event)
self.success_url = reverse_lazy('staff:event',
kwargs={'event_id': event.id})
return super(AutoInviteView, self).form_valid(form)
def get(self, request, event_id, *args, **kwargs):
event = get_object_or_404(HandoutEvent, id=event_id)
context_dict = {'event': event,
'bike_choices': UserRegistration.BICYCLE_CHOICES}
return render(request, self.template_name, context_dict)
class EventView(View):
template_name = 'staff/event.html'
def get(self, request, event_id, *args, **kwargs):
event = get_object_or_404(HandoutEvent, id=event_id)
invited_candidates = [
invitation.candidate.id for invitation in event.invitations.all()
]
queryset = Candidate.objects.filter(id__in=invited_candidates)
candidate_table = EventTable(data=queryset, event_id=event_id)
RequestConfig(request, paginate={'per_page': 100}).configure(
candidate_table)
context_dict = {
'candidates': candidate_table,
'event': event}
return render(request, self.template_name, context_dict)
class CandidateOverviewView(View):
template_name = 'staff/candidate_overview.html'
query_set = None
def get(self, request, *args, **kwargs):
matches = CandidateFilter(request.GET, queryset=self.query_set)
candidates_table = CandidateTable(matches.qs)
RequestConfig(request, paginate={'per_page': 40}).configure(
candidates_table)
context_dict = {'candidates': candidates_table,
'filter': matches}
return render(request, self.template_name, context_dict)
class CreateCandidateView(FormView):
template_name = 'staff/create_candidate.html'
form_class = CreateCandidateForm
success_url = reverse_lazy('staff:candidate_overview')
def form_valid(self, form):
form_data = {'first_name': form.cleaned_data['first_name'],
'last_name': form.cleaned_data['last_name'],
'date_of_birth': form.cleaned_data['date_of_birth']}
if Candidate.get_matching(**form_data):
raise Http404("This candidate already exists")
Candidate.objects.create(**form_data)
return super(CreateCandidateView, self).form_valid(form)
class CandidateMixin(object):
def get_context_dict(self, candidate_id, event_id, bicycle_id, data=None):
candidate = get_object_or_404(Candidate, id=candidate_id)
context_dict = {'candidate': candidate,
'base_template_name': 'staff/base_candidate_view.html'}
if event_id:
event = get_object_or_404(HandoutEvent, id=event_id)
context_dict['event'] = event
context_dict['base_template_name'] = 'staff/base_event_view.html'
elif bicycle_id:
bicycle = get_object_or_404(Bicycle, id=bicycle_id)
context_dict['bicycle'] = bicycle
context_dict['base_template_name'] = 'staff/base_bicycle_view.html'
if self.form_class:
context_dict['form'] = self.form_class(
data=data,
candidate_id=candidate_id,
event_id=event_id,
bicycle_id=bicycle_id)
return context_dict
def get(self, request, candidate_id):
event_id = request.GET.get('event_id')
bicycle_id = request.GET.get('bicycle_id')
context_dict = self.get_context_dict(candidate_id=candidate_id,
event_id=event_id,
bicycle_id=bicycle_id)
return render(request, self.template_name, context_dict)
def post(self, request, candidate_id):
event_id = request.POST.get('event_id')
bicycle_id = request.POST.get('bicycle_id')
context_dict = self.get_context_dict(candidate_id=candidate_id,
event_id=event_id,
bicycle_id=bicycle_id,
data=request.POST)
form = context_dict['form']
if form.is_valid():
self.form_valid(form)
return HttpResponseRedirect(self.success_url)
return render(request, self.template_name, context_dict)
def set_success_url(self, form):
candidate_id = form.cleaned_data['candidate_id']
if not Candidate.objects.filter(id=candidate_id):
raise Http404("Candidate id not found.")
self.success_url = reverse_lazy('staff:candidate',
kwargs={'candidate_id': candidate_id})
event_id = form.cleaned_data.get('event_id')
bicycle_id = form.cleaned_data.get('bicycle_id')
if event_id:
event = get_object_or_404(HandoutEvent, id=event_id)
self.success_url += event.url_parameter
elif bicycle_id:
try:
bicycle = Bicycle.objects.get(id=bicycle_id)
self.success_url += bicycle.url_parameter
except Bicycle.DoesNotExist:
# the bicycle has been handed back
self.success_url = reverse_lazy('staff:bicycle_overview')
class CandidateView(CandidateMixin, View):
template_name = 'staff/candidate.html'
form_class = None
class DeleteCandidateView(CandidateMixin, FormView):
template_name = 'staff/delete_candidate.html'
form_class = DeleteCandidateForm
def form_valid(self, form):
candidate_id = form.cleaned_data['candidate_id']
candidate = get_object_or_404(Candidate, id=candidate_id)
candidate.delete()
event_id = form.cleaned_data.get('event_id')
bicycle_id = form.cleaned_data.get('bicycle_id')
if event_id:
self.success_url = reverse_lazy('staff:event',
kwargs={'event_id': event_id})
elif bicycle_id:
self.success_url = reverse_lazy('staff:bicycle_overview')
else:
self.success_url = reverse_lazy('staff:candidate_overview')
return super(DeleteCandidateView, self).form_valid(form)
class ModifyCandidateView(CandidateMixin, FormView):
template_name = 'staff/modify_candidate.html'
form_class = ModifyCandidateForm
def form_valid(self, form):
candidate_id = form.cleaned_data['candidate_id']
form_data = {'first_name': form.cleaned_data['first_name'],
'last_name': form.cleaned_data['last_name'],
'date_of_birth': form.cleaned_data['date_of_birth']}
if Candidate.get_matching(**form_data).exclude(id=candidate_id):
raise Http404("This candidate already exists")
Candidate.objects.filter(id=candidate_id).update(**form_data)
self.set_success_url(form)
return super(ModifyCandidateView, self).form_valid(form)
class HandoverBicycleView(CandidateMixin, FormView):
template_name = 'staff/handover_bicycle.html'
form_class = HandoverForm
def form_valid(self, form):
candidate_id = form.cleaned_data['candidate_id']
candidate = get_object_or_404(Candidate, id=candidate_id)
if candidate.has_bicycle:
raise Http404("This Candidate already has a bicycle.")
bicycle_number = form.cleaned_data['bicycle_number']
lock_combination = form.cleaned_data['lock_combination']
color = form.cleaned_data['color']
brand = form.cleaned_data['brand']
general_remarks = form.cleaned_data['general_remarks']
Bicycle.objects.create(candidate=candidate,
bicycle_number=bicycle_number,
lock_combination=lock_combination,
color=color,
brand=brand,
general_remarks=general_remarks)
self.set_success_url(form)
return super(HandoverBicycleView, self).form_valid(form)
class RefundBicycleView(CandidateMixin, FormView):
template_name = 'staff/refund_bicycle.html'
form_class = RefundForm
def form_valid(self, form):
candidate_id = form.cleaned_data['candidate_id']
candidate = get_object_or_404(Candidate, id=candidate_id)
if not candidate.has_bicycle:
raise Http404("This Candidate does not have a bicycle.")
candidate.bicycle.delete()
self.set_success_url(form)
return super(RefundBicycleView, self).form_valid(form)
class InviteCandidateView(CandidateMixin, FormView):
template_name = 'staff/invite_candidate.html'
form_class = InviteCandidateForm
def form_valid(self, form):
candidate_id = form.cleaned_data['candidate_id']
candidate = get_object_or_404(Candidate, id=candidate_id)
invitation_event_id = form.cleaned_data['invitation_event_id']
invitation_event = get_object_or_404(
HandoutEvent, id=invitation_event_id)
if invitation_event not in candidate.events_not_invited_to:
raise Http404("The Candidate is already invited to this event.")
Invitation.objects.create(candidate=candidate,
handout_event=invitation_event)
send_message_after_invitation(candidate=candidate,
handout_event=invitation_event)
self.set_success_url(form)
return super(InviteCandidateView, self).form_valid(form)
|
michaelbratsch/bwb
|
staff/views.py
|
Python
|
gpl-3.0
| 13,376
|
import pychrono.core as chrono
import pychrono.sensor as sens
import numpy as np
import time
import random
def main():
#------------------
# Create the system
#------------------
mphysicalSystem = chrono.ChSystemNSC()
mphysicalSystem.Set_G_acc(chrono.ChVectorD(0,0,0))
red = chrono.ChVisualMaterial()
red.SetDiffuseColor(chrono.ChVectorF(1,0,0))
red.SetSpecularColor(chrono.ChVectorF(1,1,1))
green = chrono.ChVisualMaterial()
green.SetDiffuseColor(chrono.ChVectorF(0,1,0))
green.SetSpecularColor(chrono.ChVectorF(1,1,1))
#------------------------------
# add body for sensor to attach
#------------------------------
floor = chrono.ChBodyEasyBox(1000,20,1, 1000, True, False)
floor.SetPos(chrono.ChVectorD(0,0,-1))
floor.SetBodyFixed(True)
mphysicalSystem.Add(floor)
for i in range(10):
x = random.uniform(0,30)
y = 1
z = 0
box = chrono.ChBodyEasyBox(0.5, 0.5, 0.5, 1000, True, False)
box.SetPos(chrono.ChVectorD(5+x, y, z))
box.SetPos_dt(chrono.ChVectorD(-0.5, 0, 0))
mphysicalSystem.Add(box)
asset = floor.GetAssets()[0]
visual_asset = chrono.CastToChVisualization(asset)
visual_asset.material_list.append(red)
for i in range(10):
x = random.uniform(0,30)
y = -1
z = 0
box = chrono.ChBodyEasyBox(0.5, 0.5, 0.5, 1000, True, False)
box.SetPos(chrono.ChVectorD(10-x, y, z))
box.SetPos_dt(chrono.ChVectorD(0.5, 0, 0))
mphysicalSystem.Add(box)
asset = floor.GetAssets()[0]
visual_asset = chrono.CastToChVisualization(asset)
visual_asset.material_list.append(red)
# -----------------------
# Create a sensor manager
# -----------------------
manager = sens.ChSensorManager(mphysicalSystem)
# ------------------------------------------------
# Create a radar and add it to the sensor manager
# ------------------------------------------------
offset_pose = chrono.ChFrameD(
chrono.ChVectorD(0, 0, 1), chrono.Q_from_AngZ(0))
radar = sens.ChRadarSensor(
floor, # body radar is attached to
update_rate, # scanning rate in Hz
offset_pose, # offset pose
horizontal_samples, # number of horizontal samples
vertical_samples, # number of vertical channels
horizontal_fov, # horizontal field of view
max_vert_angle, # vertical field of view
min_vert_angle,
100.0, # max radar range
)
radar.PushFilter(sens.ChFilterRadarProcess())
radar.PushFilter(sens.ChFilterRadarVisualizeCluster(960, 1080, 2))
manager.AddSensor(radar)
# ---------------
# Simulate system
# ---------------
orbit_radius = 5
orbit_rate = 0.2
ch_time = 0.0
render_time = 0
t1 = time.time()
while (ch_time < end_time):
# Update sensor manager
# Will render/save/filter automatically
manager.Update()
# Perform step of dynamics
mphysicalSystem.DoStepDynamics(step_size)
# Get the current time of the simulation
ch_time = mphysicalSystem.GetChTime()
print("Sim time:", end_time, "Wall time:", time.time()-t1)
# -----------------
# radar parameters
# -----------------
# Update rate in Hz
update_rate = 5.0
# Number of horizontal and vertical samples
horizontal_samples = 100
vertical_samples = 100
# Horizontal and vertical field of view (radians)
horizontal_fov = chrono.CH_C_PI /9 # 20 degrees
max_vert_angle = chrono.CH_C_PI / 15
min_vert_angle = -chrono.CH_C_PI / 15
# camera to have same view as radar
aspect_ratio = horizontal_fov / (max_vert_angle - min_vert_angle)
width = 960
height = width / aspect_ratio
# Lag time
lag = 0
# Collection window for the radar
collection_time = 1. / update_rate # typically 1/update rate
# ---------------------
# Simulation parameters
# ---------------------
# Simulation step size
step_size = 1e-3
# Simulation end time
end_time = 100.0
main()
|
projectchrono/chrono
|
src/demos/python/sensor/demo_SEN_radar.py
|
Python
|
bsd-3-clause
| 4,103
|
import config
import lib
from flask import Flask, Request
from flask.ext.sqlalchemy import SQLAlchemy
app = Flask(__name__)
app.config.from_object(config)
app.request_class = lib.Request
db = SQLAlchemy(app)
def format_datetime(value):
return value.strftime("%Y-%m-%d %H:%M:%S %z")
app.jinja_env.filters['datetime'] = format_datetime
import ctfengine.views
if not app.debug:
import logging
from logging.handlers import SMTPHandler
mail_handler = SMTPHandler('127.0.0.1',
app.config['MAIL_FROM'],
app.config['ADMINS'], "ctfengine error")
mail_handler.setFormatter(logging.Formatter('''\
Message type: %(levelname)s
Time: %(asctime)s
%(message)s
'''))
mail_handler.setLevel(logging.ERROR)
app.logger.addHandler(mail_handler)
|
mutantmonkey/ctfengine
|
ctfengine/__init__.py
|
Python
|
isc
| 840
|
from qutebrowser.config.configfiles import ConfigAPI # noqa: F401
from qutebrowser.config.config import ConfigContainer # noqa: F401
config = config # type: ConfigAPI # noqa: F821 pylint: disable=E0602,C0103
c = c # type: ConfigContainer # noqa: F821 pylint: disable=E0602,C0103
from pyconfig import qbv
qbv.c = c
qbv.config = config
# Load autoconfig before the rest of python config
config.load_autoconfig()
import pyconfig.bindings # noqa
import pyconfig.aliases # noqa
import pyconfig.themes # noqa
config.source('qutenyan/nyan.py')
|
jgkamat/dotfiles
|
qutebrowser/.config/qutebrowser/config.py
|
Python
|
gpl-3.0
| 548
|
__project__ = 'ParkFinder'
__version__ = '0.0.0'
VERSION = __project__ + '-' + __version__
|
friendlycode/gr-parks
|
parks/__init__.py
|
Python
|
mit
| 92
|
#!/usr/bin/env python2
import ringo_config
cfg = ringo_config.RingoConfig()
import pyximport; pyximport.install(build_dir=cfg.pyximport_build())
import argparse
import os
import scj
import file_ops
from model import Genome
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="SCJ algorithms.")
parser.add_argument("-i", "--input_genomes", required=True, type=str, help="Leaf genomes file.")
parser.add_argument("-t", "--tree", required=True, type=str, help="Newick Tree file.")
parser.add_argument("-o", "--output", type=str, help="Output folder. If not given, output is written to the same location of the genomes file.")
param = parser.parse_args()
# open files:
extant_genomes = file_ops.open_genome_file(param.input_genomes)
tree = file_ops.open_newick_tree(param.tree, label_internal_nodes=True)
# run SCK
adj_set = scj.scj_small_phylogeny_adjacencies(tree, extant_genomes)
genomes = {label:Genome.from_adjacency_list(label, adj) for label, adj in adj_set.iteritems()}
# write output:
folder = param.output if param.output is not None else os.path.dirname(param.input_genomes)
file_ops.write_genomes_to_file(genomes, os.path.join(folder, cfg.scj_genomes()))
|
pedrofeijao/RINGO
|
src/ringo/run_scj.py
|
Python
|
mit
| 1,242
|
#
# Copyright (c) 1996-2000 Tyler C. Sarna <tsarna@sarna.org>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. All advertising materials mentioning features or use of this software
# must display the following acknowledgement:
# This product includes software developed by Tyler C. Sarna.
# 4. Neither the name of the author nor the names of contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
__all__ = tuple('''registerWidget getCodes getCodeNames createBarcodeDrawing createBarcodeImageInMemory'''.split())
__version__ = '0.9'
__doc__='''Popular barcodes available as reusable widgets'''
_widgets = []
def registerWidget(widget):
_widgets.append(widget)
def _reset():
_widgets[:] = []
from reportlab.graphics.barcode.widgets import BarcodeI2of5, BarcodeCode128, BarcodeStandard93,\
BarcodeExtended93, BarcodeStandard39, BarcodeExtended39,\
BarcodeMSI, BarcodeCodabar, BarcodeCode11, BarcodeFIM,\
BarcodePOSTNET, BarcodeUSPS_4State, BarcodeCode128Auto, BarcodeECC200DataMatrix
#newer codes will typically get their own module
from reportlab.graphics.barcode.eanbc import Ean13BarcodeWidget, Ean8BarcodeWidget, UPCA, Ean5BarcodeWidget, ISBNBarcodeWidget
from reportlab.graphics.barcode.qr import QrCodeWidget
for widget in (BarcodeI2of5,
BarcodeCode128,
BarcodeCode128Auto,
BarcodeStandard93,
BarcodeExtended93,
BarcodeStandard39,
BarcodeExtended39,
BarcodeMSI,
BarcodeCodabar,
BarcodeCode11,
BarcodeFIM,
BarcodePOSTNET,
BarcodeUSPS_4State,
Ean13BarcodeWidget,
Ean8BarcodeWidget,
UPCA,
Ean5BarcodeWidget,
ISBNBarcodeWidget,
QrCodeWidget,
BarcodeECC200DataMatrix,
):
registerWidget(widget)
_reset()
from reportlab.rl_config import register_reset
register_reset(_reset)
def getCodes():
"""Returns a dict mapping code names to widgets"""
#the module exports a dictionary of names to widgets, to make it easy for
#apps and doc tools to display information about them.
codes = {}
for widget in _widgets:
codeName = widget.codeName
codes[codeName] = widget
return codes
def getCodeNames():
"""Returns sorted list of supported bar code names"""
return sorted(getCodes().keys())
def createBarcodeDrawing(codeName, **options):
"""This creates and returns a drawing with a barcode.
"""
from reportlab.graphics.shapes import Drawing, Group
codes = getCodes()
bcc = codes[codeName]
width = options.pop('width',None)
height = options.pop('height',None)
isoScale = options.pop('isoScale',0)
kw = {}
for k,v in options.items():
if k.startswith('_') or k in bcc._attrMap: kw[k] = v
bc = bcc(**kw)
#Robin's new ones validate when setting the value property.
#Ty Sarna's old ones do not. We need to test.
if hasattr(bc, 'validate'):
bc.validate() #raise exception if bad value
if not bc.valid:
raise ValueError("Illegal barcode with value '%s' in code '%s'" % (options.get('value',None), codeName))
#size it after setting the data
x1, y1, x2, y2 = bc.getBounds()
w = float(x2 - x1)
h = float(y2 - y1)
sx = width not in ('auto',None)
sy = height not in ('auto',None)
if sx or sy:
sx = sx and width/w or 1.0
sy = sy and height/h or 1.0
if isoScale:
if sx<1.0 and sy<1.0:
sx = sy = max(sx,sy)
else:
sx = sy = min(sx,sy)
w *= sx
h *= sy
else:
sx = sy = 1
#bc.x = -sx*x1
#bc.y = -sy*y1
d = Drawing(width=w,height=h,transform=[sx,0,0,sy,-sx*x1,-sy*y1])
d.add(bc, "_bc")
return d
def createBarcodeImageInMemory(codeName,**options):
"""This creates and returns barcode as an image in memory.
Takes same arguments as createBarcodeDrawing and also an
optional format keyword which can be anything acceptable
to Drawing.asString eg gif, pdf, tiff, py ......
"""
format = options.pop('format','png')
d = createBarcodeDrawing(codeName, **options)
return d.asString(format)
|
EduPepperPDTesting/pepper2013-testing
|
lms/djangoapps/reportlab/graphics/barcode/__init__.py
|
Python
|
agpl-3.0
| 5,911
|
"""
Passing arguments to decorators
------------------------------
Looking back at the example before the one above, you can notice how
redundant the decorators in the example are. 3 decorators(div_decorate,
p_decorate, strong_decorate) each with the same functionality but wrapping
the string with different tags. We can definitely do much better than that.
Why not have a more general implementation for one that takes the tag to
wrap with as a string? Yes please!
"""
def tags(tag_name):
def tags_decorator(func):
def func_wrapper(name):
return "<{0}>{1}</{0}>".format(tag_name, func(name))
return func_wrapper
return tags_decorator
@tags("p")
def get_text(name):
return "Hello "+name
print get_text("John")
|
spradeepv/dive-into-python
|
decorators/passing_args_to_decorators.py
|
Python
|
mit
| 753
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from generator import Generator
import sys
from lxml import etree
import os.path
class PluginGenerator(Generator):
plugin_id = 0
def __init__(self, xml):
Generator.__init__(self)
if os.path.isfile(xml):
plugin_tree = etree.parse(xml)
plugins = plugin_tree.xpath("/plugins/plugin")
for plugin in plugins:
self.__generate_plugin__(plugin, xml)
else:
print "XML file: " + xml + " not valid !"
def __generate_plugin__(self, plugin, xml):
self.__generate_base__(plugin, xml)
self.__generate_body__(plugin)
self.__generate_close__()
def __generate_base__(self, plugin, xml):
self.__name__ = plugin.get("name")
self.__reload__ = plugin.get("reload")
base = os.path.dirname(xml)
plugin_str_name = base + "/" + self.__name__.lower() + ".py"
self.__f__ = open(plugin_str_name, 'w')
self.__put__("#!/usr/bin/env python\n# -*- coding: utf-8 -*-")
self.__put__("")
self.__put__("import os, logging ")
self.__put__("from voxgenerator.plugin import Plugin")
self.__addPackageInclusion__(plugin)
self.__put__("class " + self.__name__ + "(Plugin):")
self.__right__()
self.__put__("def __init__(self):")
self.__right__()
self.__put__("Plugin.__init__(self, '" + self.__name__ + "')")
self.__put__("self.__id__ = " + str(PluginGenerator.plugin_id))
PluginGenerator.plugin_id += 1
def __generate_body__(self, plugin):
commands = plugin.findall("command")
self.__addfunctionlookup__(commands)
self.__addcommandlookup__(commands)
self.__put__("self.__build__(" + self.__reload__ + ")")
self.__put__("self.__run__()")
self.__left__()
self.__addcommandfunction__(commands)
self.__left__()
def __addPackageInclusion__(self, plugin):
packages = plugin.findall("package")
for package in packages:
name = package.get("name")
module = package.get("module")
if name is not None and module is not None:
self.__put__("from " + name + " import " + module)
else:
if name is not None and module is None:
self.__put__("import " + name)
def __addfunctionlookup__(self, commands):
id = 0
for cmd in commands:
name = cmd.get("name")
self.__put__("self.__function__[" + str(id) + "] = self." + name)
id += 1
self.__put__("")
def __addcommandlookup__(self, commands):
id = 0
for cmd in commands:
trans = cmd.get("transcription")
self.__put__("self.__command__[" + str(id) + "] = '" + trans + "'")
id += 1
self.__put__("")
def __addcommandfunction__(self, commands):
id = 0
for cmd in commands:
id += 1
self.__put__("")
name = cmd.get("name")
type = cmd.get("type")
self.__put__("def " + name + "(self):")
self.__right__()
exe = cmd.get("exec")
if exe is not None:
if type == "system":
self.__put__("os.system('" + exe +"')")
else:
self.__put__(exe)
else:
self.__put__("raise NotImplementedError('subclasses must override " + name + "()!')")
self.__left__()
if __name__ == '__main__':
plugin_generator = PluginGenerator(sys.argv[1])
|
benoitfragit/VOXGenerator
|
voxgenerator/generator/plugin_generator.py
|
Python
|
gpl-2.0
| 3,689
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from flask import Flask, render_template, request
app = Flask(__name__)
import ibm_db_dbi as dbi
from itoolkit import *
from itoolkit.db2.idb2call import * #for local jobs
version = tuple(map(int, dbi.__version__.split('.')))
if version < (2, 0, 5, 5):
raise Exception("Need ibm_db_dbi 2.0.5.5 or higher to run, you have " + dbi.__version__)
@app.route('/sample')
def sample():
return render_template('sample.html')
@app.route('/query', methods=['POST'])
def query_ibm_db():
statement = request.form.get('sql')
conn = dbi.connect()
cur = conn.cursor()
cur.execute(statement)
headers = [descr[0] for descr in cur.description]
return render_template('query.html', headers=headers, rows=cur)
@app.route('/cmd', methods=['POST'])
def cmd_toolkit():
cl_statement = request.form.get('cl')
# xmlservice
itool = iToolKit()
itransport = iDB2Call()
itool.add(iCmd5250(cl_statement, cl_statement))
itool.call(itransport)
# results from list
data = ''
for output_outer in itool.list_out():
for output_inner in output_outer:
data += output_inner
return render_template('cmd.html', data=data)
app.debug = True
app.run(host='0.0.0.0', port=9000,)
|
Club-Seiden/python-for-IBM-i-examples
|
non-wheel/flask-example/sample.py
|
Python
|
mit
| 1,305
|
## Mask generator from MADE: https://github.com/mgermain/MADE
import copy
import theano
import theano.tensor as T
import numpy as np
from theano.sandbox.rng_mrg import MRG_RandomStreams # Limited but works on GPU
from theano.tensor.shared_randomstreams import RandomStreams
# from theano.gpuarray.dnn import GpuDnnSoftmax as mysoftmax
def mysoftmax(x):
e_x = T.exp(x - x.max())
return e_x / e_x.sum()
class MaskGenerator(object):
def __init__(self, input_size, hidden_sizes, l, random_seed=1234):
self._random_seed = random_seed
self._mrng = MRG_RandomStreams(seed=random_seed)
self._rng = RandomStreams(seed=random_seed)
self._hidden_sizes = hidden_sizes
self._input_size = input_size
self._l = l
self.ordering = theano.shared(value=np.arange(input_size, dtype=theano.config.floatX), name='ordering', borrow=False)
# Initial layer connectivity
self.layers_connectivity = [theano.shared(value=(self.ordering + 1).eval(), name='layer_connectivity_input', borrow=False)]
for i in range(len(self._hidden_sizes)):
self.layers_connectivity += [theano.shared(value=np.zeros((self._hidden_sizes[i]), dtype=theano.config.floatX), name='layer_connectivity_hidden{0}'.format(i), borrow=False)]
self.layers_connectivity += [self.ordering]
## Theano functions
new_ordering = self._rng.shuffle_row_elements(self.ordering)
self.shuffle_ordering = theano.function(name='shuffle_ordering',
inputs=[],
updates=[(self.ordering, new_ordering), (self.layers_connectivity[0], new_ordering + 1)])
self.layers_connectivity_updates = []
for i in range(len(self._hidden_sizes)):
self.layers_connectivity_updates += [self._get_hidden_layer_connectivity(i)]
# self.layers_connectivity_updates = [self._get_hidden_layer_connectivity(i) for i in range(len(self._hidden_sizes))] # WTF THIS DO NOT WORK
self.sample_connectivity = theano.function(name='sample_connectivity',
inputs=[],
updates=[(self.layers_connectivity[i+1], self.layers_connectivity_updates[i]) for i in range(len(self._hidden_sizes))])
# Save random initial state
self._initial_mrng_rstate = copy.deepcopy(self._mrng.rstate)
self._initial_mrng_state_updates = [state_update[0].get_value() for state_update in self._mrng.state_updates]
# Ensuring valid initial connectivity
self.sample_connectivity()
def reset(self):
# Set Original ordering
self.ordering.set_value(np.arange(self._input_size, dtype=theano.config.floatX))
# Reset RandomStreams
self._rng.seed(self._random_seed)
# Initial layer connectivity
self.layers_connectivity[0].set_value((self.ordering + 1).eval())
for i in range(1, len(self.layers_connectivity)-1):
self.layers_connectivity[i].set_value(np.zeros((self._hidden_sizes[i-1]), dtype=theano.config.floatX))
self.layers_connectivity[-1].set_value(self.ordering.get_value())
# Reset MRG_RandomStreams (GPU)
self._mrng.rstate = self._initial_mrng_rstate
for state, value in zip(self._mrng.state_updates, self._initial_mrng_state_updates):
state[0].set_value(value)
self.sample_connectivity()
def _get_p(self, start_choice):
start_choice_idx = (start_choice-1).astype('int32')
p_vals = T.concatenate([T.zeros((start_choice_idx,)), (self._l * T.arange(start_choice, self._input_size, dtype=theano.config.floatX))])
p_vals = T.inc_subtensor(p_vals[start_choice_idx], 1.) # Stupid hack because de multinomial does not contain a safety for numerical imprecision.
return p_vals
def _get_hidden_layer_connectivity(self, layerIdx):
layer_size = self._hidden_sizes[layerIdx]
if layerIdx == 0:
p_vals = self._get_p(T.min(self.layers_connectivity[layerIdx]))
else:
p_vals = self._get_p(T.min(self.layers_connectivity_updates[layerIdx-1]))
# #Implementations of np.choose in theano GPU
# return T.nonzero(self._mrng.multinomial(pvals=[self._p_vals] * layer_size, dtype=theano.config.floatX))[1].astype(dtype=theano.config.floatX)
# return T.argmax(self._mrng.multinomial(pvals=[self._p_vals] * layer_size, dtype=theano.config.floatX), axis=1)
return T.sum(T.cumsum(self._mrng.multinomial(pvals=T.tile(p_vals[::-1][None, :], (layer_size, 1)), dtype=theano.config.floatX), axis=1), axis=1)
def _get_mask(self, layerIdxIn, layerIdxOut):
return (self.layers_connectivity[layerIdxIn][:, None] <= self.layers_connectivity[layerIdxOut][None, :]).astype(theano.config.floatX)
def get_mask_layer_UPDATE(self, layerIdx):
return self._get_mask(layerIdx, layerIdx + 1)
def get_direct_input_mask_layer_UPDATE(self, layerIdx):
return self._get_mask(0, layerIdx)
def get_direct_output_mask_layer_UPDATE(self, layerIdx):
return self._get_mask(layerIdx, -1)
|
ajbrock/Neural-Photo-Editor
|
mask_generator.py
|
Python
|
mit
| 5,220
|
import _plotly_utils.basevalidators
class IndicesValidator(_plotly_utils.basevalidators.DataArrayValidator):
def __init__(self, plotly_name="indices", parent_name="pointcloud", **kwargs):
super(IndicesValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
**kwargs
)
|
plotly/plotly.py
|
packages/python/plotly/plotly/validators/pointcloud/_indices.py
|
Python
|
mit
| 402
|
# -*- coding: utf-8 -*-
import re
import vim
from orgmode._vim import echom, ORGMODE, realign_tags
from orgmode.menu import Submenu, Separator, ActionEntry
from orgmode.keybinding import Keybinding, Plug, Command
class Hyperlinks(object):
u""" Hyperlinks plugin """
def __init__(self):
u""" Initialize plugin """
object.__init__(self)
# menu entries this plugin should create
self.menu = ORGMODE.orgmenu + Submenu(u'Hyperlinks')
# key bindings for this plugin
# key bindings are also registered through the menu so only additional
# bindings should be put in this variable
self.keybindings = []
# commands for this plugin
self.commands = []
uri_match = re.compile(r'^\[{2}(?P<uri>[^][]*)(\]\[(?P<description>[^][]*))?\]{2}')
@classmethod
def _get_link(cls, cursor=None):
u"""
Get the link the cursor is on and return it's URI and description
:cursor: None or (Line, Column)
:returns: None if no link was found, otherwise {uri:URI, description:DESCRIPTION, line:LINE, start:START, end:END} or uri and description could be None if not set
"""
cursor = cursor if cursor else vim.current.window.cursor
line = vim.current.buffer[cursor[0] - 1].decode(u'utf-8')
# if the cursor is on the last bracket, it's not recognized as a hyperlink
start = line.rfind(u'[[', 0, cursor[1])
if start == -1:
start = line.rfind(u'[[', 0, cursor[1] + 2)
end = line.find(u']]', cursor[1])
if end == -1:
end = line.find(u']]', cursor[1] - 1)
# extract link
if start != -1 and end != -1:
end += 2
match = Hyperlinks.uri_match.match(line[start:end])
res = {u'line':line, u'start':start, u'end':end, u'uri':None, u'description':None}
if match:
res.update(match.groupdict())
return res
@classmethod
def follow(cls, action=u'openLink', visual=u''):
u""" Follow hyperlink. If called on a regular string UTL determines the
outcome. Normally a file with that name will be opened.
:action: "copy" if the link should be copied to clipboard, otherwise the link will be opened
:visual: "visual" if Universal Text Linking should be triggered in visual mode
:returns: URI or None
"""
if not int(vim.eval(u'exists(":Utl")')):
echom(u'Universal Text Linking plugin not installed, unable to proceed.')
return
action = u'copyLink' if action and action.startswith(u'copy') else u'openLink'
visual = u'visual' if visual and visual.startswith(u'visual') else u''
link = Hyperlinks._get_link()
if link and link[u'uri'] is not None:
# call UTL with the URI
vim.command((u'Utl %s %s %s' % (action, visual, link[u'uri'])).encode(u'utf-8'))
return link[u'uri']
else:
# call UTL and let it decide what to do
vim.command((u'Utl %s %s' % (action, visual)).encode(u'utf-8'))
@classmethod
@realign_tags
def insert(cls, uri=None, description=None):
u""" Inserts a hyperlink. If no arguments are provided, an interactive
query will be started.
:uri: The URI that will be opened
:description: An optional description that will be displayed instead of the URI
:returns: (URI, description)
"""
link = Hyperlinks._get_link()
if link:
if uri is None and link[u'uri'] is not None:
uri = link[u'uri']
if description is None and link[u'description'] is not None:
description = link[u'description']
if uri is None:
uri = vim.eval(u'input("Link: ", "", "file")')
elif link:
uri = vim.eval(u'input("Link: ", "%s", "file")' % link[u'uri'])
if uri is None:
return
else:
uri = uri.decode(u'utf-8')
if description is None:
description = vim.eval(u'input("Description: ")').decode(u'utf-8')
elif link:
description = vim.eval(u'input("Description: ", "%s")' % link[u'description']).decode(u'utf-8')
if description is None:
return
cursor = vim.current.window.cursor
cl = vim.current.buffer[cursor[0] - 1].decode(u'utf-8')
head = cl[:cursor[1] + 1] if not link else cl[:link[u'start']]
tail = cl[cursor[1] + 1:] if not link else cl[link[u'end']:]
separator = u''
if description:
separator = u']['
if uri or description:
vim.current.buffer[cursor[0] - 1] = (u''.join((head, u'[[%s%s%s]]' % (uri, separator, description), tail))).encode(u'utf-8')
elif link:
vim.current.buffer[cursor[0] - 1] = (u''.join((head, tail))).encode(u'utf-8')
def register(self):
u"""
Registration of plugin. Key bindings and other initialization should be done.
"""
self.commands.append(Command(u'OrgHyperlinkFollow', u':py ORGMODE.plugins[u"Hyperlinks"].follow()'))
self.keybindings.append(Keybinding(u'gl', Plug(u'OrgHyperlinkFollow', self.commands[-1])))
self.menu + ActionEntry(u'&Follow Link', self.keybindings[-1])
self.commands.append(Command(u'OrgHyperlinkCopy', u':py ORGMODE.plugins[u"Hyperlinks"].follow(action=u"copy")'))
self.keybindings.append(Keybinding(u'gyl', Plug(u'OrgHyperlinkCopy', self.commands[-1])))
self.menu + ActionEntry(u'&Copy Link', self.keybindings[-1])
self.commands.append(Command(u'OrgHyperlinkInsert', u':py ORGMODE.plugins[u"Hyperlinks"].insert(<f-args>)', arguments=u'*'))
self.keybindings.append(Keybinding(u'gil', Plug(u'OrgHyperlinkInsert', self.commands[-1])))
self.menu + ActionEntry(u'&Insert Link', self.keybindings[-1])
self.menu + Separator()
# find next link
self.commands.append(Command(u'OrgHyperlinkNextLink', u":if search('\[\{2}\zs[^][]*\(\]\[[^][]*\)\?\ze\]\{2}', 's') == 0 | echo 'No further link found.' | endif"))
self.keybindings.append(Keybinding(u'gn', Plug(u'OrgHyperlinkNextLink', self.commands[-1])))
self.menu + ActionEntry(u'&Next Link', self.keybindings[-1])
# find previous link
self.commands.append(Command(u'OrgHyperlinkPreviousLink', u":if search('\[\{2}\zs[^][]*\(\]\[[^][]*\)\?\ze\]\{2}', 'bs') == 0 | echo 'No further link found.' | endif"))
self.keybindings.append(Keybinding(u'go', Plug(u'OrgHyperlinkPreviousLink', self.commands[-1])))
self.menu + ActionEntry(u'&Previous Link', self.keybindings[-1])
self.menu + Separator()
# Descriptive Links
self.commands.append(Command(u'OrgHyperlinkDescriptiveLinks', u':setlocal cole=2'))
self.menu + ActionEntry(u'&Descriptive Links', self.commands[-1])
# Literal Links
self.commands.append(Command(u'OrgHyperlinkLiteralLinks', u':setlocal cole=0'))
self.menu + ActionEntry(u'&Literal Links', self.commands[-1])
|
j-a-m-l/.dot
|
vim/bundle/vim-orgmode/ftplugin/orgmode/plugins/Hyperlinks.py
|
Python
|
mit
| 6,337
|
import sys
import os.path
import logging
import ply.yacc
from rightarrow.annotations import *
from rightarrow.lexer import Lexer
logger = logging.getLogger(__name__)
class Parser(object):
tokens = Lexer.tokens
def __init__(self, debug=False, lexer_class=None):
self.debug = debug
self.lexer_class = lexer_class or Lexer # Crufty but works around statefulness in PLY
def parse(self, string, lexer = None):
lexer = lexer or self.lexer_class()
return self.parse_token_stream(lexer.tokenize(string))
def parse_token_stream(self, token_iterator, start_symbol='ty'):
# Since PLY has some crufty aspects and dumps files, we try to keep them local
# However, we need to derive the name of the output Python file :-/
output_directory = os.path.dirname(__file__)
try:
module_name = os.path.splitext(os.path.split(__file__)[1])[0]
except:
module_name = __name__
parsing_table_module = '_'.join([module_name, start_symbol, 'parsetab'])
# And we regenerate the parse table every time; it doesn't actually take that long!
new_parser = ply.yacc.yacc(module=self,
debug=self.debug,
tabmodule = parsing_table_module,
outputdir = output_directory,
write_tables=0,
start = start_symbol,
errorlog = logger)
return new_parser.parse(lexer = IteratorToTokenStream(token_iterator))
# ===================== PLY Parser specification =====================
precedence = [
('right', 'ARROW'),
('left', '|'),
]
def p_error(self, t):
raise Exception('Parse error at %s:%s near token %s (%s)' % (t.lineno, t.col, t.value, t.type))
def p_empty(self, p):
'empty :'
pass
def p_ty_parens(self, p):
"ty : '(' ty ')'"
p[0] = p[2]
def p_ty_var(self, p):
"ty : TYVAR"
p[0] = Variable(p[1])
def p_ty_union(self, p):
"ty : ty '|' ty"
p[0] = Union([p[1], p[3]])
def p_ty_bare(self, p):
"ty : bare_arg_ty"
p[0] = p[1]
def p_ty_funty_bare(self, p):
"ty : ty ARROW ty"
p[0] = Function(arg_types=[p[1]], return_type=p[3])
def p_ty_funty_complex(self, p):
"ty : '(' maybe_arg_types ')' ARROW ty"
argument_types=p[2]
return_type=p[5]
# Check here whether too many kwarg or vararg types are present
# Each item in the list uses the dictionary encoding of tagged variants
arg_types = [argty['arg_type'] for argty in argument_types if 'arg_type' in argty]
vararg_types = [argty['vararg_type'] for argty in argument_types if 'vararg_type' in argty]
kwarg_types = [argty['kwarg_type'] for argty in argument_types if 'kwarg_type' in argty]
if len(vararg_types) > 1:
raise Exception('Argument list with multiple vararg types: %s' % argument_types)
if len(kwarg_types) > 1:
raise Exception('Argument list with multiple kwarg types: %s' % argument_types)
# All the arguments that are not special
p[0] = Function(arg_types=arg_types,
vararg_type=vararg_types[0] if len(vararg_types) > 0 else None,
kwarg_type=kwarg_types[0] if len(kwarg_types) > 0 else None,
kwonly_arg_types=None,
return_type=return_type)
# Because a bare function type is equivalent to a single argument in parens, it is not
# parsed by this rule
def p_maybe_arg_types(self, p):
'''
maybe_arg_types : arg_types ',' arg_ty
| empty
'''
p[0] = [] if len(p) == 2 else p[1] + [p[3]]
# Executive decision is this: kwargs and varargs get to be elements of this list ANYWHERE
# and we check later, to avoid any parsing issues with commas
def p_arg_types_single(self, p):
'''
arg_types : arg_types ',' arg_ty
| arg_ty
'''
p[0] = [p[1]] if len(p) == 2 else p[1] + [p[3]]
def p_arg_ty_normal(self, p):
"arg_ty : ty"
p[0] = { 'arg_type' : p[1] }
def p_arg_ty_vararg(self, p):
"arg_ty : '*' ty"
p[0] = { 'vararg_type' : p[2] }
def p_arg_ty_kwarg(self, p):
"arg_ty : KWARG ty"
p[0] = { 'kwarg_type' : p[2] }
# Special types that never require parenthesis
def p_bare_arg_ty(self, p):
"""
bare_arg_ty : identifier_ty
| dict_ty
| list_ty
| object_ty
| any_ty
"""
p[0] = p[1]
def p_identifier_ty(self, p):
"identifier_ty : ID"
p[0] = NamedType(p[1])
def p_list_ty(self, p):
"list_ty : '[' ty ']'"
p[0] = List(elem_ty=p[2])
def p_dict_ty(self, p):
"dict_ty : '{' ty ':' ty '}'"
p[0] = Dict(key_ty=p[2], value_ty=p[4])
def p_any_ty(self, p):
"any_ty : ANY"
p[0] = Any()
def p_object_ty(self, p):
"""
object_ty : OBJECT '(' ID ')'
| OBJECT '(' ID ',' obj_fields ')'
"""
field_types = {} if len(p) == 5 else p[5]
p[0] = Object(p[3], **field_types)
def p_obj_fields(self, p):
"""
obj_fields : obj_fields ',' obj_field
| obj_field
"""
p[0] = dict([p[1]] if len(p) == 2 else p[1] + [p[3]]) # Note: no checking for dupe fields at the moment
def p_obj_field(self, p):
"obj_field : ID ':' ty"
p[0] = (p[1], p[3])
class IteratorToTokenStream(object):
def __init__(self, iterator):
self.iterator = iterator
def token(self):
try:
return self.iterator.next()
except StopIteration:
return None
if __name__ == '__main__':
logging.basicConfig()
parser = Parser(debug=True)
print parser.parse(sys.stdin.read())
|
kennknowles/python-rightarrow
|
rightarrow/parser.py
|
Python
|
apache-2.0
| 6,194
|
def extractPeaTranslation(item):
"""
"""
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol or frag) or 'preview' in item['title'].lower():
return None
return False
|
fake-name/ReadableWebProxy
|
WebMirror/management/rss_parser_funcs/feed_parse_extractPeaTranslation.py
|
Python
|
bsd-3-clause
| 215
|
import numpy as __np__
from numpy import sqrt as __sqrt__
from numpy import cos as __cos__
from numpy import sin as __sin__
import matplotlib.pyplot as __plt__
from matplotlib import cm as __cm__
from matplotlib.ticker import LinearLocator as __LinearLocator__
from matplotlib.ticker import FormatStrFormatter as __FormatStrFormatter__
#generate test surface figure
def makecircle(a, r, PR):
max = a.max()
size = __np__.sqrt(a.size)
for i in range(int(size)):
for j in range(int(size)):
if __np__.sqrt(r[i]**2+r[j]**2) > PR:
a[i,j] = max
def testsurface2():
lambda_1 = 632*(10**-9)
PR = 1
r = __np__.linspace(-PR, PR, 200)
x, y = __np__.meshgrid(r,r)
r1 = __np__.sqrt(x**2 + y**2)
Z4 = 1
Z5 = 0.6
ZX = Z4 * __np__.sqrt(3)*(2*r1**2-1) + Z5*2*__np__.sqrt(6)*x*y
OPD = ZX*2/PR
ph = 2 * __np__.pi * OPD
Ia = 1
Ib = 1
Ixy = Ia + Ib + 2 * __np__.sqrt(Ia*Ib) * __np__.cos(ph)
makecircle(Ixy, r, PR)
fig = __plt__.figure(figsize=(9, 6), dpi=80)
__plt__.imshow(-Ixy, extent=[-PR,PR,-PR,PR])
__plt__.set_cmap('Greys')
__plt__.show()
I1 = Ia + Ib + 2 * __np__.sqrt(Ia*Ib) * __np__.cos(ph)
I2 = Ia + Ib + 2 * __np__.sqrt(Ia*Ib) * __np__.cos(ph+45.0/180*__np__.pi)
I3 = Ia + Ib + 2 * __np__.sqrt(Ia*Ib) * __np__.cos(ph+90.0/180*__np__.pi)
I4 = Ia + Ib + 2 * __np__.sqrt(Ia*Ib) * __np__.cos(ph+135.0/180*__np__.pi)
Ilist = [I1,I2,I3,I4]
for i in range(4):
makecircle(Ilist[i], r, PR)
fig = __plt__.figure(figsize=(9, 6), dpi=80)
__plt__.imshow(-Ilist[i], extent=[-PR,PR,-PR,PR])
__plt__.set_cmap('Greys')
__plt__.show()
ph1 = __np__.arctan((I4-I2)/(I1-I3))
Ixy1 = Ia + Ib + 2 * __np__.sqrt(Ia*Ib) * __np__.cos(ph1)
fig = __plt__.figure(figsize=(9, 6), dpi=80)
__plt__.imshow(-Ixy, extent=[-PR,PR,-PR,PR])
__plt__.set_cmap('Greys')
__plt__.show()
OPD = ph*PR/2
Z = OPD
fig = __plt__.figure(figsize=(6, 6), dpi=80)
#ax = fig.gca(projection='3d')
#surf = ax.plot_surface(x, y, Z, rstride=1, cstride=1, cmap=__cm__.RdYlGn,linewidth=0, antialiased=False, alpha = 0.6)
im = __plt__.pcolormesh(x, y, Z, cmap=__cm__.RdYlGn)
__plt__.colorbar()
__plt__.show()
for i in range(len(Z)):
for j in range(len(Z)):
if r[i]**2+r[j]**2>1:
Z[i][j]=0
fig = __plt__.figure(figsize=(6, 6), dpi=80)
im = __plt__.pcolormesh(x, y, Z, cmap=__cm__.RdYlGn)
__plt__.colorbar()
__plt__.show()
return Z
|
Sterncat/opticspy
|
opticspy/test/test_surface2.py
|
Python
|
mit
| 2,346
|
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Interfaces for Hadoop filesystem access via HttpFs/WebHDFS
"""
import errno
import logging
import posixpath
import stat
import threading
import time
from django.utils.encoding import smart_str
from django.utils.translation import ugettext as _
from desktop.lib.rest import http_client, resource
from hadoop.fs import normpath, SEEK_SET, SEEK_CUR, SEEK_END
from hadoop.fs.hadoopfs import Hdfs
from hadoop.fs.exceptions import WebHdfsException
from hadoop.fs.webhdfs_types import WebHdfsStat, WebHdfsContentSummary
from hadoop.conf import UPLOAD_CHUNK_SIZE
import hadoop.conf
import hadoop.core_site
DEFAULT_HDFS_SUPERUSER = 'hdfs'
# The number of bytes to read if not specified
DEFAULT_READ_SIZE = 1024*1024 # 1MB
LOG = logging.getLogger(__name__)
class WebHdfs(Hdfs):
"""
WebHdfs implements the filesystem interface via the WebHDFS rest protocol.
"""
DEFAULT_USER = 'hue' # This should be the user running Hue
TRASH_CURRENT = 'Current'
def __init__(self, url,
fs_defaultfs,
logical_name=None,
hdfs_superuser=None,
security_enabled=False,
temp_dir="/tmp"):
self._url = url
self._superuser = hdfs_superuser
self._security_enabled = security_enabled
self._temp_dir = temp_dir
self._fs_defaultfs = fs_defaultfs
self._logical_name = logical_name
self._client = self._make_client(url, security_enabled)
self._root = resource.Resource(self._client)
# To store user info
self._thread_local = threading.local()
LOG.debug("Initializing Hadoop WebHdfs: %s (security: %s, superuser: %s)" %
(self._url, self._security_enabled, self._superuser))
@classmethod
def from_config(cls, hdfs_config):
fs_defaultfs = hdfs_config.FS_DEFAULTFS.get()
return cls(url=_get_service_url(hdfs_config),
fs_defaultfs=fs_defaultfs,
logical_name=hdfs_config.LOGICAL_NAME.get(),
security_enabled=hdfs_config.SECURITY_ENABLED.get(),
temp_dir=hdfs_config.TEMP_DIR.get())
def __str__(self):
return "WebHdfs at %s" % self._url
def _make_client(self, url, security_enabled):
client = http_client.HttpClient(
url, exc_class=WebHdfsException, logger=LOG)
if security_enabled:
client.set_kerberos_auth()
return client
@property
def uri(self):
return self._url
@property
def logical_name(self):
return self._logical_name
@property
def fs_defaultfs(self):
return self._fs_defaultfs
@property
def security_enabled(self):
return self._security_enabled
@property
def superuser(self):
if self._superuser is None:
try:
# The owner of '/' is usually the superuser
sb = self.stats('/')
self._superuser = sb.user
except Exception, ex:
LOG.exception('Failed to determine superuser of %s: %s' % (self, ex))
self._superuser = DEFAULT_HDFS_SUPERUSER
return self._superuser
@property
def user(self):
try:
return self._thread_local.user
except AttributeError:
return WebHdfs.DEFAULT_USER
@property
def trash_path(self):
try:
return self._thread_local.trash_path[self.user]
except AttributeError:
self._thread_local.trash_paths = {}
self._thread_local.trash_paths[self.user] = self.join(self.get_home_dir(), '.Trash')
except KeyError:
self._thread_local.trash_paths[self.user] = self.join(self.get_home_dir(), '.Trash')
return self._thread_local.trash_paths[self.user]
@property
def current_trash_path(self):
return self.join(self.trash_path, self.TRASH_CURRENT)
def _getparams(self):
return {
"user.name" : WebHdfs.DEFAULT_USER,
"doas" : self.user
}
def setuser(self, user):
"""Set a new user. Return the current user."""
curr = self.user
self._thread_local.user = user
return curr
def listdir_stats(self, path, glob=None):
"""
listdir_stats(path, glob=None) -> [ WebHdfsStat ]
Get directory listing with stats.
"""
path = Hdfs.normpath(path)
params = self._getparams()
if glob is not None:
params['filter'] = glob
params['op'] = 'LISTSTATUS'
json = self._root.get(path, params)
filestatus_list = json['FileStatuses']['FileStatus']
return [ WebHdfsStat(st, path) for st in filestatus_list ]
def listdir(self, path, glob=None):
"""
listdir(path, glob=None) -> [ entry names ]
Get directory entry names without stats.
"""
dirents = self.listdir_stats(path, glob)
return [Hdfs.basename(x.path) for x in dirents]
def get_content_summary(self, path):
"""
get_content_summary(path) -> WebHdfsContentSummary
"""
path = Hdfs.normpath(path)
params = self._getparams()
params['op'] = 'GETCONTENTSUMMARY'
json = self._root.get(path, params)
return WebHdfsContentSummary(json['ContentSummary'])
def _stats(self, path):
"""This version of stats returns None if the entry is not found"""
path = Hdfs.normpath(path)
params = self._getparams()
params['op'] = 'GETFILESTATUS'
try:
json = self._root.get(path, params)
return WebHdfsStat(json['FileStatus'], path)
except WebHdfsException, ex:
if ex.server_exc == 'FileNotFoundException' or ex.code == 404:
return None
raise ex
def stats(self, path):
"""
stats(path) -> WebHdfsStat
"""
res = self._stats(path)
if res is not None:
return res
raise IOError(errno.ENOENT, _("File %s not found") % path)
def exists(self, path):
return self._stats(path) is not None
def isdir(self, path):
sb = self._stats(path)
if sb is None:
return False
return sb.isDir
def isfile(self, path):
sb = self._stats(path)
if sb is None:
return False
return not sb.isDir
def _ensure_current_trash_directory(self):
"""Create trash directory for a user if it doesn't exist."""
if self.exists(self.current_trash_path):
self.mkdir(self.current_trash_path)
return self.current_trash_path
def _trash(self, path, recursive=False):
"""
_trash(path, recursive=False)
Move a file or directory to trash.
Will create a timestamped directory underneath /user/<username>/.Trash.
Trash must be enabled for this to work.
"""
if not self.exists(path):
raise IOError(errno.ENOENT, _("File %s not found") % path)
if not recursive and self.isdir(path):
raise IOError(errno.EISDIR, _("File %s is a directory") % path)
if path.startswith(self.trash_path):
raise IOError(errno.EPERM, _("File %s is already trashed") % path)
# Make path (with timestamp suffix if necessary)
base_trash_path = self.join(self._ensure_current_trash_directory(), path[1:])
trash_path = base_trash_path
while self.exists(trash_path):
trash_path = base_trash_path + str(time.time())
# Move path to trash path
self.mkdir(self.dirname(trash_path))
self.rename(path, trash_path)
def _delete(self, path, recursive=False):
"""
_delete(path, recursive=False)
Delete a file or directory.
"""
path = Hdfs.normpath(path)
params = self._getparams()
params['op'] = 'DELETE'
params['recursive'] = recursive and 'true' or 'false'
result = self._root.delete(path, params)
# This part of the API is nonsense.
# The lack of exception should indicate success.
if not result['boolean']:
raise IOError(_('Delete failed: %s') % path)
def remove(self, path, skip_trash=False):
"""Delete a file."""
if hadoop.core_site.get_trash_interval() is None or skip_trash:
self._delete(path, recursive=False)
else:
self._trash(path, recursive=False)
def rmdir(self, path, skip_trash=False):
"""Delete a directory."""
self.remove(path, skip_trash)
def rmtree(self, path, skip_trash=False):
"""Delete a tree recursively."""
if hadoop.core_site.get_trash_interval() is None or skip_trash:
self._delete(path, recursive=True)
else:
self._trash(path, recursive=True)
def restore(self, path):
"""
restore(path)
The root of ``path`` will be /users/<current user>/.Trash/<timestamp>.
Removing the root from ``path`` will provide the original path.
Ensure parent directories exist and rename path.
"""
if hadoop.core_site.get_trash_interval() is None:
raise IOError(errno.EPERM, _("Trash is not enabled."))
if not path.startswith(self.trash_path):
raise IOError(errno.EPERM, _("File %s is not in trash") % path)
# Build original path
original_path = []
split_path = self.split(path)
while split_path[0] != self.trash_path:
original_path.append(split_path[1])
split_path = self.split(split_path[0])
original_path.reverse()
original_path = self.join(posixpath.sep, *original_path)
# move to original path
# the path could have been expunged.
if self.exists(original_path):
raise IOError(errno.EEXIST, _("Path %s already exists.") % str(smart_str(original_path)))
self.rename(path, original_path)
def purge_trash(self):
"""
purge_trash()
Purge all trash in users ``trash_path``
"""
if hadoop.core_site.get_trash_interval() is None:
raise IOError(errno.EPERM, _("Trash is not enabled."))
for timestamped_directory in self.listdir(self.trash_path):
self.rmtree(self.join(self.trash_path, timestamped_directory), True)
def mkdir(self, path, mode=None):
"""
mkdir(path, mode=None)
Creates a directory and any parent directory if necessary.
"""
path = Hdfs.normpath(path)
params = self._getparams()
params['op'] = 'MKDIRS'
if mode is not None:
params['permission'] = safe_octal(mode)
success = self._root.put(path, params)
if not success:
raise IOError(_("Mkdir failed: %s") % path)
def rename(self, old, new):
"""rename(old, new)"""
old = Hdfs.normpath(old)
if not new.startswith('/'):
new = Hdfs.join(Hdfs.dirname(old), new)
new = Hdfs.normpath(new)
params = self._getparams()
params['op'] = 'RENAME'
# Encode `new' because it's in the params
params['destination'] = smart_str(new)
result = self._root.put(old, params)
if not result['boolean']:
raise IOError(_("Rename failed: %s -> %s") %
(str(smart_str(old)), str(smart_str(new))))
def rename_star(self, old_dir, new_dir):
"""Equivalent to `mv old_dir/* new"""
if not self.isdir(old_dir):
raise IOError(errno.ENOTDIR, _("'%s' is not a directory") % old_dir)
if not self.exists(new_dir):
self.mkdir(new_dir)
elif not self.isdir(new_dir):
raise IOError(errno.ENOTDIR, _("'%s' is not a directory") % new_dir)
ls = self.listdir(old_dir)
for dirent in ls:
self.rename(Hdfs.join(old_dir, dirent), Hdfs.join(new_dir, dirent))
def chown(self, path, user=None, group=None, recursive=False):
"""chown(path, user=None, group=None, recursive=False)"""
path = Hdfs.normpath(path)
params = self._getparams()
params['op'] = 'SETOWNER'
if user is not None:
params['owner'] = user
if group is not None:
params['group'] = group
if recursive:
for xpath in self.listdir_recursive(path):
self._root.put(xpath, params)
else:
self._root.put(path, params)
def chmod(self, path, mode, recursive=False):
"""
chmod(path, mode, recursive=False)
`mode' should be an octal integer or string.
"""
path = Hdfs.normpath(path)
params = self._getparams()
params['op'] = 'SETPERMISSION'
params['permission'] = safe_octal(mode)
if recursive:
for xpath in self.listdir_recursive(path):
self._root.put(xpath, params)
else:
self._root.put(path, params)
def get_home_dir(self):
"""get_home_dir() -> Home directory for the current user"""
params = self._getparams()
params['op'] = 'GETHOMEDIRECTORY'
res = self._root.get(params=params)
return res['Path']
def read(self, path, offset, length, bufsize=None):
"""
read(path, offset, length[, bufsize]) -> data
Read data from a file.
"""
path = Hdfs.normpath(path)
params = self._getparams()
params['op'] = 'OPEN'
params['offset'] = long(offset)
params['length'] = long(length)
if bufsize is not None:
params['bufsize'] = bufsize
try:
return self._root.get(path, params)
except WebHdfsException, ex:
if "out of the range" in ex.message:
return ""
raise ex
def open(self, path, mode='r'):
"""
DEPRECATED!
open(path, mode='r') -> File object
This exists for legacy support and backwards compatibility only.
Please use read().
"""
return File(self, path, mode)
def create(self, path, overwrite=False, blocksize=None,
replication=None, permission=None, data=None):
"""
create(path, overwrite=False, blocksize=None, replication=None, permission=None)
Creates a file with the specified parameters.
`permission' should be an octal integer or string.
"""
path = Hdfs.normpath(path)
params = self._getparams()
params['op'] = 'CREATE'
params['overwrite'] = overwrite and 'true' or 'false'
if blocksize is not None:
params['blocksize'] = long(blocksize)
if replication is not None:
params['replication'] = int(replication)
if permission is not None:
params['permission'] = safe_octal(permission)
self._invoke_with_redirect('PUT', path, params, data)
def append(self, path, data):
"""
append(path, data)
Append data to a given file.
"""
path = Hdfs.normpath(path)
params = self._getparams()
params['op'] = 'APPEND'
self._invoke_with_redirect('POST', path, params, data)
def copyfile(self, src, dst):
sb = self._stats(src)
if sb is None:
raise IOError(errno.ENOENT, _("Copy src '%s' does not exist") % src)
if sb.isDir:
raise IOError(errno.INVAL, _("Copy src '%s' is a directory") % src)
if self.isdir(dst):
raise IOError(errno.INVAL, _("Copy dst '%s' is a directory") % dst)
offset = 0
while True:
data = self.read(src, offset, UPLOAD_CHUNK_SIZE.get())
if offset == 0:
self.create(dst,
overwrite=True,
blocksize=sb.blockSize,
replication=sb.replication,
permission=oct(stat.S_IMODE(sb.mode)),
data=data)
cnt = len(data)
if cnt < UPLOAD_CHUNK_SIZE.get():
break
if offset != 0:
self.append(dst, data)
offset += cnt
def copy_remote_dir(self, source, destination, dir_mode=0755, owner=None):
if owner is None:
owner = self.DEFAULT_USER
self.do_as_user(owner, self.mkdir, destination, mode=dir_mode)
self.do_as_user(owner, self.chmod, destination, mode=dir_mode) # To remove after HDFS-3491
for stat in self.listdir_stats(source):
source_file = stat.path
destination_file = posixpath.join(destination, stat.name)
if stat.isDir:
self.copy_remote_dir(source_file, destination_file, dir_mode, owner)
else:
self.do_as_user(owner, self.copyfile, source_file, destination_file)
self.do_as_superuser(self.chown, destination_file, owner, owner)
def copy(self, src, dest, recursive=False, dir_mode=0755, owner=None):
"""
Copy file, or directory, in HDFS to another location in HDFS.
``src`` -- The directory, or file, to copy from.
``dest`` -- the directory, or file, to copy to.
If 'dest' is a directory that exists, copy 'src' into dest.
If 'dest' is a file that exists and 'src' is a file, overwrite dest.
If 'dest' does not exist, create 'src' as 'dest'.
``recursive`` -- Recursively copy contents of 'src' to 'dest'.
This is required for directories.
``dir_mode`` and ``owner`` are used to define permissions on the newly
copied files and directories.
This method will overwrite any pre-existing files that collide with what is being copied.
Copying a directory to a file is not allowed.
"""
if owner is None:
owner = self.user
src = self.abspath(src)
dest = self.abspath(dest)
if not self.exists(src):
raise IOError(errno.ENOENT, _("File not found: %s") % src)
if self.isdir(src):
# 'src' is directory.
# Skip if not recursive copy and 'src' is directory.
if not recursive:
LOG.debug("Skipping contents of %s" % src)
return None
# If 'dest' is a directory change 'dest'
# to include 'src' basename.
# create 'dest' if it doesn't already exist.
if self.exists(dest):
if self.isdir(dest):
dest = self.join(dest, self.basename(src))
else:
raise IOError(errno.EEXIST, _("Destination file %s exists and is not a directory.") % dest)
self.do_as_user(owner, self.mkdir, dest)
self.do_as_user(owner, self.chmod, dest, mode=dir_mode)
# Copy files in 'src' directory to 'dest'.
self.copy_remote_dir(src, dest, dir_mode, owner)
else:
# 'src' is a file.
# If 'dest' is a directory, then copy 'src' into that directory.
# Other wise, copy to 'dest'.
if self.exists(dest) and self.isdir(dest):
self.copyfile(src, self.join(dest, self.basename(src)))
else:
self.copyfile(src, dest)
@staticmethod
def urlsplit(url):
return Hdfs.urlsplit(url)
def get_hdfs_path(self, path):
return posixpath.join(self.fs_defaultfs, path.lstrip('/'))
def _invoke_with_redirect(self, method, path, params=None, data=None):
"""
Issue a request, and expect a redirect, and then submit the data to
the redirected location. This is used for create, write, etc.
Returns the response from the redirected request.
"""
next_url = None
try:
# Do not pass data in the first leg.
self._root.invoke(method, path, params)
except WebHdfsException, ex:
# This is expected. We get a 307 redirect.
# The following call may throw.
next_url = self._get_redirect_url(ex)
if next_url is None:
raise WebHdfsException(
_("Failed to create '%s'. HDFS did not return a redirect") % path)
# Now talk to the real thing. The redirect url already includes the params.
client = self._make_client(next_url, self.security_enabled)
headers = {'Content-Type': 'application/octet-stream'}
return resource.Resource(client).invoke(method, data=data, headers=headers)
def _get_redirect_url(self, webhdfs_ex):
"""Retrieve the redirect url from an exception object"""
try:
# The actual HttpError (307) is wrapped inside
http_error = webhdfs_ex.get_parent_ex()
if http_error is None:
raise webhdfs_ex
if http_error.response.status_code not in (301, 302, 303, 307):
LOG.error("Response is not a redirect: %s" % webhdfs_ex)
raise webhdfs_ex
return http_error.response.headers['location']
except Exception, ex:
LOG.error("Failed to read redirect from response: %s (%s)" %
(webhdfs_ex, ex))
raise webhdfs_ex
def get_delegation_token(self, renewer):
"""get_delegation_token(user) -> Delegation token"""
# Workaround for HDFS-3988
if self._security_enabled:
self.get_home_dir()
params = self._getparams()
params['op'] = 'GETDELEGATIONTOKEN'
params['renewer'] = renewer
res = self._root.get(params=params)
return res['Token']['urlString']
def do_as_user(self, username, fn, *args, **kwargs):
prev_user = self.user
try:
self.setuser(username)
return fn(*args, **kwargs)
finally:
self.setuser(prev_user)
def do_as_superuser(self, fn, *args, **kwargs):
return self.do_as_user(self.superuser, fn, *args, **kwargs)
class File(object):
"""
DEPRECATED!
Represent an open file on HDFS. This exists to mirror the old thriftfs
interface, for backwards compatibility only.
"""
def __init__(self, fs, path, mode='r'):
self._fs = fs
self._path = normpath(path)
self._pos = 0
self._mode = mode
try:
self._stat = fs.stats(path)
if self._stat.isDir:
raise IOError(errno.EISDIR, _("Is a directory: '%s'") % path)
except IOError, ex:
if ex.errno == errno.ENOENT and 'w' in self._mode:
self._fs.create(self._path)
self.stat()
else:
raise ex
def seek(self, offset, whence=0):
"""Set the file pointer to the given spot. @see file.seek"""
if whence == SEEK_SET:
self._pos = offset
elif whence == SEEK_CUR:
self._pos += offset
elif whence == SEEK_END:
self.stat()
self._pos = self._fs.stats(self._path).size + offset
else:
raise IOError(errno.EINVAL, _("Invalid argument to seek for whence"))
def stat(self):
self._stat = self._fs.stats(self._path)
return self._stat
def tell(self):
return self._pos
def read(self, length=DEFAULT_READ_SIZE):
data = self._fs.read(self._path, self._pos, length)
self._pos += len(data)
return data
def write(self, data):
"""Append the data to the end of the file"""
self.append(data)
def append(self, data):
if 'w' not in self._mode:
raise IOError(errno.EINVAL, _("File not open for writing"))
self._fs.append(self._path, data=data)
def flush(self):
pass
def close(self):
pass
def safe_octal(octal_value):
"""
safe_octal(octal_value) -> octal value in string
This correctly handles octal values specified as a string or as a numeric.
"""
try:
return oct(octal_value)
except TypeError:
return str(octal_value)
def _get_service_url(hdfs_config):
override = hdfs_config.WEBHDFS_URL.get()
if override:
return override
fs_defaultfs = hdfs_config.FS_DEFAULTFS.get()
netloc = Hdfs.urlsplit(fs_defaultfs)[1]
host = netloc.split(':')[0]
port = hadoop.conf.DEFAULT_NN_HTTP_PORT
return "http://%s:%s/webhdfs/v1" % (host, port)
def test_fs_configuration(fs_config):
"""
This is a config validation method. Returns a list of
[ (config_variable, error_message) ]
"""
fs = WebHdfs.from_config(fs_config)
fs.setuser(fs.superuser)
# Access root
try:
statbuf = fs.stats('/')
if statbuf.user != 'hdfs':
return [(fs_config.WEBHDFS_URL, _("Filesystem root '/' should be owned by 'hdfs'"))]
except Exception, ex:
LOG.info("%s -- Validation error: %s" % (fs, ex))
return [(fs_config.WEBHDFS_URL, _('Failed to access filesystem root'))]
# Write a file
tmpname = fs.mktemp(prefix='hue_config_validation')
try:
fs.create(tmpname)
except Exception, ex:
LOG.info("%s -- Validation error: %s" % (fs, ex))
return [(fs_config.WEBHDFS_URL,
_('Failed to create temporary file "%s"') % tmpname)]
# Check superuser has super power
try: # Finally: delete tmpname
try:
fs.chown(tmpname, fs.superuser)
except Exception, ex:
LOG.info("%s -- Validation error: %s" % (fs, ex))
return [(fs_config.WEBHDFS_URL,
'Failed to chown file. Please make sure that the filesystem root '
'is owned by the cluster superuser ("hdfs" in most cases).')]
finally:
try:
fs.remove(tmpname)
except Exception, ex:
LOG.error("Failed to remove '%s': %s" % (tmpname, ex))
return [(fs_config.WEBHDFS_URL,
_('Failed to remove temporary file "%s"') % tmpname)]
return [ ]
|
pwong-mapr/private-hue
|
desktop/libs/hadoop/src/hadoop/fs/webhdfs.py
|
Python
|
apache-2.0
| 24,483
|
#
# Newfies-Dialer License
# http://www.newfies-dialer.org
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Copyright (C) 2011-2014 Star2Billing S.L.
#
# The primary maintainer of this project is
# Arezqui Belaid <info@star2billing.com>
#
from django.utils.translation import ugettext_lazy as _
from dialer_contact.models import Phonebook, Contact
from dialer_campaign.models import Campaign, Subscriber
from dialer_campaign.constants import SUBSCRIBER_STATUS
from user_profile.models import UserProfile
from mod_utils.function_def import get_status_value
from dateutil.rrule import rrule, DAILY, HOURLY
from dateutil.parser import parse
from datetime import timedelta
def get_phonebook_list(user):
"""Return phonebook list of logged in user"""
phonebook_list = Phonebook.objects.filter(user=user).order_by('id')
result_list = []
for phonebook in phonebook_list:
contacts_in_phonebook = phonebook.phonebook_contacts()
nbcontact = " -> %d contact(s)" % (contacts_in_phonebook)
pb_string = phonebook.name + nbcontact
result_list.append((phonebook.id, pb_string))
return result_list
def check_dialer_setting(request, check_for, field_value=''):
"""Check Dialer Setting Limitation
**Attribute**
* ``check_for`` - for campaign or for contact
"""
try:
# DialerSettings is linked with the User
dialer_set_obj = request.user.userprofile.dialersetting
if dialer_set_obj:
# check running campaign for User
if check_for == "campaign":
# Total campaign matched with max_cpgs
if Campaign.objects.filter(user=request.user).count() >= dialer_set_obj.max_cpg:
# Limit matched or exceeded
return True
else:
# Limit not matched
return False
# check contacts limit
if check_for == "contact":
# total contacts matched with max_contact
if Contact.objects.filter(phonebook__user=request.user).count() >= dialer_set_obj.max_contact:
# Limit matched or exceeded
return True
# limit not matched
return False
# check frequency limit
if check_for == "frequency":
if field_value > dialer_set_obj.max_frequency:
# Limit matched or exceeded
return True
# Limit not exceeded
return False
# check call duration limit
if check_for == "duration":
if field_value > dialer_set_obj.callmaxduration:
# Limit matched or exceeded
return True
# Limit not exceeded
return False
# check call retry limit
if check_for == "retry":
if field_value > dialer_set_obj.maxretry:
# Limit matched or exceeded
return True
# Limit not exceeded
return False
# check call timeout limit
if check_for == "timeout":
if field_value > dialer_set_obj.max_calltimeout:
# Limit matched or exceeded
return True
# Limit not exceeded
return False
# check subscriber limit
if check_for == "subscriber":
if Subscriber.objects.filter(campaign__user=request.user).count() > dialer_set_obj.max_subr_cpg:
# Limit matched or exceeded
return True
# Limit not exceeded
return False
except:
# DialerSettings not link to the User
return False
def dialer_setting_limit(request, limit_for):
"""Return Dialer Setting's limit
e.g. max_subr_cpg
max_cpg
max_contact
max_frequency
max_calltimeout
maxretry
callmaxduration
smscampaign
"""
try:
# DialerSettings is linked with the User
dialer_set_obj = request.user.userprofile.dialersetting
if limit_for == "contact":
return str(dialer_set_obj.max_contact)
if limit_for == "subscriber":
return str(dialer_set_obj.max_subr_cpg)
if limit_for == "campaign":
return str(dialer_set_obj.max_cpg)
if limit_for == "frequency":
return str(dialer_set_obj.max_frequency)
if limit_for == "duration":
return str(dialer_set_obj.callmaxduration)
if limit_for == "retry":
return str(dialer_set_obj.maxretry)
if limit_for == "timeout":
return str(dialer_set_obj.max_calltimeout)
if limit_for == "smscampaign":
return str(dialer_set_obj.sms_max_number_campaign)
if limit_for == "smsfrequency":
return str(dialer_set_obj.sms_max_frequency)
if limit_for == "smsmaxretry":
return str(dialer_set_obj.sms_maxretry)
if limit_for == "smssubscriber":
return str(dialer_set_obj.sms_max_number_subscriber_campaign)
except:
return False
def date_range(start, end, q):
"""Date Range
>>> from datetime import datetime
>>> from django.utils.timezone import utc
>>> s_date = datetime(2012, 07, 11, 0, 0, 0, 0).replace(tzinfo=utc)
>>> e_date = datetime(2012, 07, 12, 23, 59, 59, 99999).replace(tzinfo=utc)
>>> date_range(s_date, e_date, 2)
[datetime.datetime(2012, 7, 11, 0, 0), datetime.datetime(2012, 7, 12, 0, 0)]
"""
r = (end + timedelta(days=1) - start).days
if int(q) <= 2:
return list(rrule(DAILY, dtstart=parse(str(start)), until=parse(str(end))))
if int(q) >= 3:
return list(rrule(HOURLY, interval=1, dtstart=parse(str(start)), until=parse(str(end))))
else:
return [start + timedelta(days=i) for i in range(r)]
def user_dialer_setting(user):
"""Get Dialer setting for user"""
try:
return UserProfile.objects.get(user=user).dialersetting
except:
return []
def user_dialer_setting_msg(user):
msg = ''
if not user_dialer_setting(user):
msg = _('your settings are not configured properly, please contact the administrator.')
return msg
def get_subscriber_status(value):
"""Get subscriber status name"""
return get_status_value(value, SUBSCRIBER_STATUS)
def get_subscriber_disposition(campaign_id, val):
"""To get subscriber disposition name from campaign's
lead_disposition string"""
dsp_dict = {}
dsp_count = 1
try:
dsp_array = Campaign.objects.get(pk=campaign_id).lead_disposition.split(',')
for i in dsp_array:
dsp_dict[dsp_count] = i.strip()
dsp_count += 1
return dsp_dict[val]
except:
return '-'
|
tarikgwa/nfd
|
newfies/dialer_campaign/function_def.py
|
Python
|
mpl-2.0
| 7,128
|
""" Sahana Eden Automated Test - HRM001 Create Staff
@copyright: 2011-2012 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
from gluon import current
import unittest
from tests.web2unittest import SeleniumUnitTest
from selenium.common.exceptions import NoSuchElementException
from s3 import s3_debug
from tests import *
#import unittest, re, time
import time
class CreateStaff(SeleniumUnitTest):
def test_hrm001_create_staff(self):
"""
@case: HRM001
@description: Create a Staff
@TestDoc: https://docs.google.com/spreadsheet/ccc?key=0AmB3hMcgB-3idG1XNGhhRG9QWF81dUlKLXpJaFlCMFE
@Test Wiki: http://eden.sahanafoundation.org/wiki/DeveloperGuidelines/Testing
"""
print "\n"
self.login(account="admin", nexturl="hrm/staff/create")
self.create("hrm_human_resource",
[( "organisation_id",
"Acme Suppliers",
"autocomplete"),
( "first_name",
"Robert",
"pr_person"),
( "middle_name",
"James",
"pr_person"),
( "last_name",
"Lemon",
"pr_person"),
( "email",
"rjltestdonotusetest99@romanian.com",
"pr_person"),
( "job_role_id",
"Administrative Officer",
"option"),
( "site_id",
"AP Zone",
"autocomplete"),
]
)
|
anubhav929/eden
|
modules/tests/hrm/create_staff.py
|
Python
|
mit
| 2,825
|
import types
import vcr
from nose.tools import * # flake8: noqa
# Comment line below prevents unittest from deletion in import optimization
# noinspection PyUnresolvedReferences
import unittest
from osf_api_v2_client.utils import (
DotNotator,
StatusCode400orGreaterError
)
from osf_api_v2_client.session import Session
from osf_api_v2_client.settings.local import (
URL, # e.g. 'https://staging2-api.osf.io/v2/'
AUTH1, # authentication details for USER1
AUTH2, # authentication details for USER2
PUBLIC_NODE_ID, # id of a public node
PRIVATE_NODE_ID # id of a private node that is visible to USER1 but
# *not* to USER2
)
# Sessions with different forms of authentication:
# A session authenticated by the user who created the node
# with PRIVATE_NODE_ID
SESSION_AUTH1 = Session(root_url=URL, auth=AUTH1)
# A session authenticated by a user who does NOT have access
# to the node with PRIVATE_NODE_ID
SESSION_AUTH2 = Session(root_url=URL, auth=AUTH2)
# A session that is not authenticated
SESSION_NO_AUTH = Session(root_url=URL)
VCR_CASSETTE_PREFIX = 'fixtures/vcr_cassettes/test_nodes/'
VCR_RECORD_MODE = 'new_episodes'
# TODO once functionality exists to create public nodes and edit
# a node's private/public setting, add tests for this functionality
# under TestCreateNodes and TestEditNodes.
class TestGetNodes(unittest.TestCase):
get_nodes_vcr = vcr.VCR(
cassette_library_dir='{}test_get_nodes'.format(VCR_CASSETTE_PREFIX),
record_mode=VCR_RECORD_MODE
)
@get_nodes_vcr.use_cassette()
def test_get_node_generator(self):
node_generator = SESSION_AUTH1.get_node_generator(num_requested=25)
assert_true(isinstance(node_generator, types.GeneratorType))
node_list = [] # Create a list with the nodes in it
for node in node_generator:
node_list.append(node)
assert_equal(len(node_list), 25)
assert_true(isinstance(node_list[0], DotNotator))
@get_nodes_vcr.use_cassette()
def test_get_public_node_auth_contrib(self):
public_node = SESSION_AUTH1.get_node(PUBLIC_NODE_ID)
assert_true(isinstance(public_node, DotNotator))
@get_nodes_vcr.use_cassette()
def test_get_public_node_auth_non_contrib(self):
public_node = SESSION_AUTH2.get_node(PUBLIC_NODE_ID)
assert_true(isinstance(public_node, DotNotator))
@get_nodes_vcr.use_cassette()
def test_get_public_node_no_auth(self):
public_node = SESSION_NO_AUTH.get_node(PUBLIC_NODE_ID)
assert_true(isinstance(public_node, DotNotator))
@get_nodes_vcr.use_cassette()
def test_get_private_node_auth_contrib(self):
"""
The node with PRIVATE_NODE_ID is one created by USER1,
so it should be visible to USER1.
"""
private_node = SESSION_AUTH1.get_node(PRIVATE_NODE_ID)
assert_true(isinstance(private_node, DotNotator))
@get_nodes_vcr.use_cassette()
def test_get_private_node_auth_non_contrib(self):
"""
USER2 is not a contributor to the node with PRIVATE_NODE_ID,
so it should not be visible.
"""
with assert_raises(StatusCode400orGreaterError):
SESSION_AUTH2.get_node(PRIVATE_NODE_ID)
@get_nodes_vcr.use_cassette()
def test_get_private_node_no_auth(self):
"""
Unauthenticated user should not be able to view any
private node.
"""
with assert_raises(StatusCode400orGreaterError):
SESSION_NO_AUTH.get_node(PRIVATE_NODE_ID)
class TestCreateNodes(unittest.TestCase):
create_nodes_vcr = vcr.VCR(
cassette_library_dir='{}test_create_nodes'.format(VCR_CASSETTE_PREFIX),
record_mode=VCR_RECORD_MODE
)
@create_nodes_vcr.use_cassette()
def test_create_private_node_all_params(self):
new_private_node = SESSION_AUTH1.create_node(
"Private node created with client library", category="",
description="Hello world!"
)
assert_true(isinstance(new_private_node, DotNotator))
assert_equal(new_private_node.attributes.title,
"Private node created with client library")
assert_equal(new_private_node.attributes.category, "")
assert_equal(new_private_node.attributes.description, "Hello world!")
@create_nodes_vcr.use_cassette()
def test_create_private_nodes_title_param_only(self):
new_private_node = SESSION_AUTH1.create_node(
"Private node 2 created with client library"
)
assert_true(isinstance(new_private_node, DotNotator))
assert_equal(new_private_node.attributes.title,
"Private node 2 created with client library")
assert_equal(new_private_node.attributes.category, "")
assert_equal(new_private_node.attributes.description, "")
@create_nodes_vcr.use_cassette()
def test_create_private_node_no_auth(self):
"""
Should not work, because users must be authenticated
in order to create nodes.
"""
with assert_raises(StatusCode400orGreaterError):
new_private_node = SESSION_NO_AUTH.create_node(
"Private node 3 created with client library"
)
class TestEditNodes(unittest.TestCase):
edit_nodes_vcr = vcr.VCR(
cassette_library_dir='{}test_edit_nodes'.format(VCR_CASSETTE_PREFIX),
record_mode=VCR_RECORD_MODE
)
@edit_nodes_vcr.use_cassette()
def setUp(self):
# TODO this setUp is currently dependent on edit_node() working.
# How can we make it independent?
SESSION_AUTH1.edit_node(
PUBLIC_NODE_ID,
title="Original public node title",
description="Original public node description",
category=""
)
SESSION_AUTH1.edit_node(
PRIVATE_NODE_ID,
title="Original private node title",
description="Original private node description",
category=""
)
@edit_nodes_vcr.use_cassette()
def test_edit_public_node_auth_contrib(self):
"""
The node with PUBLIC_NODE_ID was created by USER1,
so it should be editable by USER1.
"""
edited_public_node = SESSION_AUTH1.edit_node(
PUBLIC_NODE_ID,
title="User1's new title",
description="User1's new description",
category="data"
)
assert_true(isinstance(edited_public_node, DotNotator))
assert_equal(edited_public_node.attributes.title,
"User1's new title")
assert_equal(edited_public_node.attributes.description,
"User1's new description")
assert_equal(edited_public_node.attributes.category,
"data")
@edit_nodes_vcr.use_cassette()
def test_edit_public_node_auth_non_contrib(self):
"""
USER2 is not a contributor to the node with PUBLIC_NODE_ID,
so it should not be editable by USER2.
"""
with assert_raises(StatusCode400orGreaterError):
edited_public_node = SESSION_AUTH2.edit_node(
PUBLIC_NODE_ID,
title="User2's new title",
description="User2's new description",
category="data",
)
@edit_nodes_vcr.use_cassette()
def test_edit_public_node_no_auth(self):
"""
The node with PUBLIC_NODE_ID should be visible to
a session with no authentication, but should not
be editable by such a session.
"""
with assert_raises(StatusCode400orGreaterError):
edited_public_node = SESSION_NO_AUTH.edit_node(
PUBLIC_NODE_ID,
title="NoAuth's new title",
description="NoAuth's new description",
category="data",
)
@edit_nodes_vcr.use_cassette()
def test_edit_private_node_auth_contributor(self):
"""
The node with PRIVATE_NODE_ID was created by USER1,
so it should be editable by USER1.
"""
private_node = SESSION_AUTH1.edit_node(
PRIVATE_NODE_ID,
title="User1's new title",
description="User1's new description",
category="data",
)
assert_true(isinstance(private_node, DotNotator))
assert_equal(private_node.attributes.title,
"User1's new title")
assert_equal(private_node.attributes.description,
"User1's new description")
assert_equal(private_node.attributes.category,
"data")
@edit_nodes_vcr.use_cassette()
def test_edit_private_node_auth_non_contributor(self):
"""
USER2 is not a contributor to the node with PRIVATE_NODE_ID,
so the node should not be visible.
"""
with assert_raises(StatusCode400orGreaterError):
private_node = SESSION_AUTH2.edit_node(
PRIVATE_NODE_ID,
title="User2's new title",
description="User2's new description",
category="data",
)
@edit_nodes_vcr.use_cassette()
def test_edit_private_node_no_auth(self):
"""
Unauthenticated user should not be able to view any
private node.
"""
with assert_raises(StatusCode400orGreaterError):
private_node = SESSION_NO_AUTH.edit_node(
PRIVATE_NODE_ID,
title="NoAuth's new title",
description="NoAuth's new description",
category="data",
)
class TestDeleteNodes(unittest.TestCase):
delete_nodes_vcr = vcr.VCR(
cassette_library_dir='{}test_delete_nodes'.format(VCR_CASSETTE_PREFIX),
record_mode=VCR_RECORD_MODE
)
@delete_nodes_vcr.use_cassette()
def setUp(self):
# This is currently the id of a node that I created and made
# public in the GUI, but to make the test self-contained:
# TODO once it's possible to create public nodes, create
# a public node with SESSION_AUTH1, recording its id as
# self.public_node_id, and then delete it in test methods,
# as done below with the private node.
# TODO every time this vcr cassette is remade, a new public
# node id (made by USER1) needs to be put here to be deleted:
self.public_node_id = '9e2gz'
private_node = SESSION_AUTH1.create_node('Private node to delete')
self.private_node_id = private_node.id
@delete_nodes_vcr.use_cassette()
def test_delete_public_node_auth_non_contrib(self):
"""
USER2 is not a contributor to the node with
self.public_node_id, so it should not be deletable by USER2.
"""
with assert_raises(StatusCode400orGreaterError):
SESSION_AUTH2.delete_node(
self.public_node_id
)
@delete_nodes_vcr.use_cassette()
def test_delete_public_node_no_auth(self):
"""
Unauthenticated user should not be able to delete any node.
"""
with assert_raises(StatusCode400orGreaterError):
SESSION_NO_AUTH.delete_node(
self.public_node_id
)
@delete_nodes_vcr.use_cassette()
def test_delete_public_node_auth_contrib(self):
"""
The node with self.public_node_id was created by USER1,
so it should be deletable by USER1.
"""
SESSION_AUTH1.delete_node(
self.public_node_id
)
with assert_raises(StatusCode400orGreaterError):
SESSION_AUTH1.get_node(self.public_node_id)
@delete_nodes_vcr.use_cassette()
def test_delete_private_node_auth_non_contrib(self):
"""
USER2 is not a contributor to the node with
self.private_node_id, so the node should not be visible.
"""
with assert_raises(StatusCode400orGreaterError):
SESSION_AUTH2.delete_node(
self.private_node_id
)
@delete_nodes_vcr.use_cassette()
def test_delete_private_node_no_auth(self):
"""
Unauthenticated user should not be able to delete any node.
"""
with assert_raises(StatusCode400orGreaterError):
SESSION_NO_AUTH.delete_node(
self.private_node_id
)
@delete_nodes_vcr.use_cassette()
def test_delete_private_node_auth_contrib(self):
"""
The node with self.private_node_id was created by USER1,
so it should be deletable by USER1.
"""
SESSION_AUTH1.delete_node(
self.private_node_id
)
with assert_raises(StatusCode400orGreaterError):
SESSION_AUTH1.get_node(self.private_node_id)
|
jamiehand/osf_api_v2_client
|
tests/test_nodes.py
|
Python
|
apache-2.0
| 12,910
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tools for deserializing `Function`s."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import re
from tensorflow.core.framework import function_pb2
from tensorflow.python.eager import def_function
from tensorflow.python.eager import function as function_lib
from tensorflow.python.framework import func_graph as func_graph_lib
from tensorflow.python.framework import function_def_to_graph as function_def_lib
from tensorflow.python.framework import op_def_registry
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import type_spec
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.saved_model import nested_structure_coder
from tensorflow.python.util import compat
from tensorflow.python.util import nest
from tensorflow.python.util import tf_decorator
from tensorflow.python.util import tf_inspect
def _is_tensor(t):
return isinstance(t, (ops.Tensor, resource_variable_ops.BaseResourceVariable))
# TODO(edloper): Update this to just use ConcreteFunction.__call__ with the
# structured signature.
def _call_concrete_function(function, inputs):
"""Calls a restored Function with structured inputs.
This differs from `function.__call__` in that inputs and outputs are
structured and that it casts inputs to tensors if needed.
Note: this does not checks that non-tensor inputs match. That should be
done before via `_concrete_function_callable_with`.
Args:
function: ConcreteFunction to call.
inputs: Structured inputs compatible with
`function.graph.structured_input_signature`.
Returns:
The structured function output.
"""
expected_structure = function.graph.structured_input_signature
flatten_inputs = nest.flatten_up_to(
expected_structure, inputs, expand_composites=True)
flatten_expected = nest.flatten(expected_structure, expand_composites=True)
tensor_inputs = []
for arg, expected in zip(flatten_inputs, flatten_expected):
if isinstance(expected, tensor_spec.TensorSpec):
tensor_inputs.append(
ops.convert_to_tensor(arg, dtype_hint=expected.dtype))
result = function._call_flat(tensor_inputs, function._captured_inputs) # pylint: disable=protected-access
if isinstance(result, ops.Operation):
return None
return result
def _try_convert_to_tensor_spec(arg, dtype_hint):
"""Returns None or TensorSpec obtained if `arg` is converted to tensor."""
try:
# Note: try conversion in a FuncGraph to avoid polluting current context.
with func_graph_lib.FuncGraph(name="guess_conversion").as_default():
result = ops.convert_to_tensor(arg, dtype_hint=dtype_hint)
return tensor_spec.TensorSpec(shape=result.shape, dtype=result.dtype)
except (TypeError, ValueError):
return None
def _concrete_function_callable_with(function, inputs, allow_conversion):
"""Returns whether concrete `function` can be called with `inputs`."""
expected_structure = function.graph.structured_input_signature
try:
flatten_inputs = nest.flatten_up_to(expected_structure, inputs)
except (TypeError, ValueError):
return False
for arg, expected in zip(flatten_inputs, nest.flatten(expected_structure)):
if isinstance(expected, tensor_spec.TensorSpec):
if allow_conversion:
arg = _try_convert_to_tensor_spec(arg, dtype_hint=expected.dtype)
if not _is_tensor(arg) and not isinstance(arg, tensor_spec.TensorSpec):
return False
if arg.dtype != expected.dtype:
return False
if not expected.shape.is_compatible_with(arg.shape):
return False
elif isinstance(expected, type_spec.TypeSpec):
if not expected.is_compatible_with(arg):
return False
elif _is_tensor(arg):
if id(arg) != id(expected):
return False
else:
if arg != expected:
return False
return True
def _deserialize_function_spec_as_nonmethod(function_spec_proto, coder):
"""Deserialize a FunctionSpec object from its proto representation."""
typeless_fullargspec = coder.decode_proto(function_spec_proto.fullargspec)
# Convert a method function into a non method.
if function_spec_proto.is_method:
if not typeless_fullargspec.args:
raise NotImplementedError(
"Missing support to deserialize a method function without a named "
"'self' argument.")
args = typeless_fullargspec.args[1:]
else:
args = typeless_fullargspec.args
fullargspec = tf_inspect.FullArgSpec(
args=args,
varargs=typeless_fullargspec.varargs,
varkw=typeless_fullargspec.varkw,
defaults=typeless_fullargspec.defaults,
kwonlyargs=typeless_fullargspec.kwonlyargs,
kwonlydefaults=typeless_fullargspec.kwonlydefaults,
annotations=typeless_fullargspec.annotations)
input_signature = coder.decode_proto(function_spec_proto.input_signature)
return function_lib.FunctionSpec(fullargspec=fullargspec,
is_method=False,
input_signature=input_signature)
# TODO(allenl): The fact that we can't derive ConcreteFunction calling
# conventions from the serialized input spec right now is unfortunate. Merging
# these would be good, maybe by adding TensorSpec names to cache keys so renamed
# keyword arguments would yield different ConcreteFunctions.
def setup_bare_concrete_function(saved_bare_concrete_function,
concrete_functions):
"""Makes a restored bare concrete function callable."""
# Bare concrete functions accept only flat lists of Tensors with unique
# names.
concrete_function = concrete_functions[
saved_bare_concrete_function.concrete_function_name]
# pylint: disable=protected-access
concrete_function._arg_keywords = (
saved_bare_concrete_function.argument_keywords)
concrete_function._num_positional_args = (
saved_bare_concrete_function.allowed_positional_arguments)
# pylint: enable=protected-access
concrete_function.add_to_graph()
return concrete_function
class RestoredFunction(def_function.Function):
"""Wrapper class for a function that has been restored from saved state.
See `def_function.Function`.
"""
def __init__(self, python_function, name, function_spec, concrete_functions):
# TODO(mdan): We may enable autograph once exceptions are supported.
super(RestoredFunction, self).__init__(
python_function, name, autograph=False)
self.concrete_functions = concrete_functions
self._function_spec = function_spec
def _list_all_concrete_functions_for_serialization(self):
return self.concrete_functions
def _defun_with_scope(self, scope):
func = super(RestoredFunction, self)._defun_with_scope(scope)
func._function_spec = self._function_spec # pylint: disable=protected-access
return func
def recreate_function(saved_function, concrete_functions):
"""Creates a `Function` from a `SavedFunction`.
Args:
saved_function: `SavedFunction` proto.
concrete_functions: map from function name to `ConcreteFunction`.
As a side effect of this function, the `FunctionSpec` from
`saved_function` is added to each `ConcreteFunction` in this map.
Returns:
A `Function`.
"""
# TODO(andresp): Construct a `Function` with the cache populated
# instead of creating a new `Function` backed by a Python layer to
# glue things together. Current approach is nesting functions deeper for each
# serialization cycle.
coder = nested_structure_coder.StructureCoder()
# Note: handling method functions is tricky since make_decorator does not
# allows control of "ismethod". Additionally since restored functions do
# not behave as methods i.e. they always use the same captured tensors
# independent of the object they are bound to, there is little value on
# propagating that correctly.
#
# Ideally this conversion should happen at serialization time. But since
# there are SavedModels which have "ismethod" populated and have an extra
# argument that they expect to be ignored, we do it at deserialization.
function_spec = _deserialize_function_spec_as_nonmethod(
saved_function.function_spec,
coder)
def restored_function_body(*args, **kwargs):
"""Calls a restored function."""
# This is the format of function.graph.structured_input_signature. At this
# point, the args and kwargs have already been canonicalized.
inputs = (args, kwargs)
# First try to find a concrete function that can be called without input
# conversions. This allows one to pick a more specific trace in case there
# was also a more expensive one that supported tensors.
for allow_conversion in [False, True]:
for function_name in saved_function.concrete_functions:
function = concrete_functions[function_name]
if _concrete_function_callable_with(function, inputs, allow_conversion):
return _call_concrete_function(function, inputs)
signature_descriptions = []
def _pretty_format_positional(positional):
return "Positional arguments ({} total):\n * {}".format(
len(positional), "\n * ".join(str(a) for a in positional))
for index, function_name in enumerate(saved_function.concrete_functions):
concrete_function = concrete_functions[function_name]
positional, keyword = concrete_function.structured_input_signature
signature_descriptions.append(
"Option {}:\n {}\n Keyword arguments: {}"
.format(index + 1, _pretty_format_positional(positional), keyword))
raise ValueError(
"Could not find matching function to call loaded from the SavedModel. "
"Got:\n {}\n Keyword arguments: {}\n\nExpected "
"these arguments to match one of the following {} option(s):\n\n{}"
.format(_pretty_format_positional(args), kwargs,
len(saved_function.concrete_functions),
"\n\n".join(signature_descriptions)))
concrete_function_objects = []
for concrete_function_name in saved_function.concrete_functions:
concrete_function_objects.append(concrete_functions[concrete_function_name])
for cf in concrete_function_objects:
cf._set_function_spec(function_spec) # pylint: disable=protected-access
restored_function = RestoredFunction(
restored_function_body,
restored_function_body.__name__,
function_spec,
concrete_function_objects)
return tf_decorator.make_decorator(
restored_function_body,
restored_function,
decorator_argspec=function_spec.fullargspec)
def load_function_def_library(library, load_shared_name_suffix=None):
"""Load a set of functions as concrete functions without captured inputs.
Functions names are manipulated during load such that they do not overlap
with previously created ones.
Args:
library: FunctionDefLibrary proto message.
load_shared_name_suffix: If specified, used to uniquify shared
names. Otherwise, a unique name is generated.
Returns:
Map of original function names in the library to instances of
`ConcreteFunction` without captured inputs.
Raises:
ValueError: if functions dependencies have a cycle.
"""
library_function_names = set(fdef.signature.name for fdef in library.function)
functions = {}
renamed_functions = {}
# Our graph building code currently requires functions to be registered with
# some tf.Graph in order to import functions using the
# op-name-is-function-name calling convention. To avoid leaking memory into
# the global default graph when executing eagerly, we create a temporary
# Graph.
#
# TODO(allenl): Make this Graph creation unnecessary when executing eagerly by
# fixing function_def_to_graph_def.
if ops.executing_eagerly_outside_functions():
graph = ops.Graph()
else:
graph = ops.get_default_graph()
if load_shared_name_suffix is None:
load_shared_name_suffix = "_load_{}".format(ops.uid())
for fdef in _sort_function_defs(library, library_function_names):
copy = _fix_fdef(fdef, functions, load_shared_name_suffix)
# There is no need to copy all functions into the function def graph. It
# leads to a O(n^2) increase of memory when importing functions and the
# extra function definitions are a no-op since they already imported as a
# function before and passed in explicitly (due to the topologic sort
# import).
with graph.as_default():
func_graph = function_def_lib.function_def_to_graph(copy)
_restore_gradient_functions(func_graph, renamed_functions)
for dep in _list_function_deps(fdef, library_function_names):
functions[dep].add_to_graph(func_graph)
# We do not initialize the new ConcreteFunction's function_spec or
# arg_keywords here (which are used to parse the structured and flat
# signatures, respectively). function_spec is set up later by
# recreate_function(); and arg_keywords by setup_bare_concrete_function().
func = function_lib.ConcreteFunction(func_graph)
func.add_to_graph(graph)
functions[fdef.signature.name] = func
renamed_functions[func.name] = func
if any(op.type == "TRTEngineOp" for op in func_graph.get_operations()):
# TODO(b/150708051): Remove this hack once TensorRT SavedModel integration
# is fixed. Currently it's leaking memory to maintain bug compatibility
# with previous behavior.
func.add_to_graph(ops.get_default_graph())
return functions
def _restore_gradient_functions(func_graph, renamed_functions):
"""Populate function op's _gradient_function with default gradient."""
for op in func_graph.get_operations():
# TODO(andresp): This code assumes that the gradient registered for this
# function call is the default gradient for the function and not a custom
# one.
if op.type in ["StatefulPartitionedCall", "PartitionedCall"]:
function = renamed_functions[compat.as_bytes(
op.node_def.attr["f"].func.name)]
op._gradient_function = function._get_gradient_function() # pylint: disable=protected-access
def _sort_function_defs(library, library_function_names):
"""Return a topologic sort of FunctionDefs in a library."""
edges = collections.defaultdict(list)
in_count = collections.defaultdict(lambda: 0)
for fdef in library.function:
for dep in _list_function_deps(fdef, library_function_names):
edges[dep].append(fdef.signature.name)
in_count[fdef.signature.name] += 1
ready = [
fdef.signature.name
for fdef in library.function
if in_count[fdef.signature.name] == 0
]
output = []
while ready:
node = ready.pop()
output.append(node)
for dest in edges[node]:
in_count[dest] -= 1
if not in_count[dest]:
ready.append(dest)
if len(output) != len(library.function):
failed_to_resolve = sorted(set(in_count.keys()) - set(output))
raise ValueError("There is a cyclic-dependency between functions. ",
"Could not resolve %r." % (failed_to_resolve,))
reverse = {fdef.signature.name: fdef for fdef in library.function}
return [reverse[x] for x in output]
def fix_node_def(node_def, functions, shared_name_suffix, debug_name):
"""Replace functions calls and shared names in `node_def`."""
if ("_gradient_op_type" in node_def.attr and
node_def.op not in ["StatefulPartitionedCall", "PartitionedCall"]):
logging.warning(
"Importing a function (%s) with ops with custom gradients. Will likely "
"fail if a gradient is requested.", debug_name)
if node_def.op in functions:
node_def.op = functions[node_def.op].name
for _, attr_value in node_def.attr.items():
if attr_value.WhichOneof("value") == "func":
attr_value.func.name = functions[attr_value.func.name].name
elif attr_value.WhichOneof("value") == "list":
for fn in attr_value.list.func:
fn.name = functions[fn.name].name
# Fix old table creation bug.
if node_def.op == "HashTableV2":
if ("use_node_name_sharing" not in node_def.attr or
not node_def.attr["use_node_name_sharing"].b):
node_def.attr["use_node_name_sharing"].b = True
# We are turning on node mame sharing, so have to make sure we don't
# accidentally share a table resource.
shared_name_suffix += "_{}".format(ops.uid())
# TODO(b/124205571): Avoid accidental sharing and destruction of restored
# resources. For now uniquify "shared_name" when loading functions to avoid
# sharing.
# TODO: Add regression test for b/150826922.
op_def = op_def_registry.get(node_def.op)
if op_def:
attr = next((a for a in op_def.attr if a.name == "shared_name"), None)
if attr:
shared_name = None
if "shared_name" in node_def.attr and node_def.attr["shared_name"].s:
shared_name = node_def.attr["shared_name"].s
elif attr.default_value.s:
shared_name = compat.as_bytes(attr.default_value.s)
if not shared_name:
shared_name = compat.as_bytes(node_def.name)
node_def.attr["shared_name"].s = (
shared_name + compat.as_bytes(shared_name_suffix))
def _fix_fdef(orig_fdef, functions, shared_name_suffix):
"""Fixes a FunctionDef proto to be loaded in current context.
In particular, when loading a function library into an eager context, one
must rename the functions to avoid conflicts with existent functions.
Args:
orig_fdef: FunctionDef proto to fix. It is not modified.
functions: map from function name to a ConcreteFunction instance.
shared_name_suffix: A unique string for this load which helps to avoid
`shared_name` collisions across loads. Two functions from the same load
using the same `shared_name` still need to share, but functions from
different loads with the same `shared_name` should not.
Returns:
A fixed copy of the original FunctionDef.
"""
fdef = function_pb2.FunctionDef()
fdef.CopyFrom(orig_fdef)
for node_def in fdef.node_def:
fix_node_def(node_def, functions, shared_name_suffix, fdef.signature.name)
fdef.signature.name = _clean_function_name(fdef.signature.name)
return fdef
def _list_function_deps(fdef, library_function_names):
"""Find functions referenced in `fdef`."""
# TODO(andresp): Recurse into list attributes and into NameAttrList attrs both
# when listing deps and when fixing them. `function_def_to_graph` also
# requires fixes.
deps = set()
for node_def in fdef.node_def:
if node_def.op in library_function_names:
deps.add(node_def.op)
else:
for _, attr_value in node_def.attr.items():
if attr_value.WhichOneof("value") == "func":
deps.add(attr_value.func.name)
elif attr_value.WhichOneof("value") == "list":
for fn in attr_value.list.func:
deps.add(fn.name)
return deps
_FUNCTION_WRAPPER_NAME_REGEX = r"^%s(.*)_\d+$" % (function_lib._INFERENCE_PREFIX
) # pylint:disable=protected-access
def _clean_function_name(name):
"""Vanity function to keep the function names comprehensible."""
# Note: each time a function is wrapped into `function_lib.ConcreteFunction`
# its name becomes "__inference_<orig>_xyz".
match = re.search(_FUNCTION_WRAPPER_NAME_REGEX, name)
if match:
return match.group(1)
else:
return name
|
aldian/tensorflow
|
tensorflow/python/saved_model/function_deserialization.py
|
Python
|
apache-2.0
| 20,230
|
#!/usr/bin/env python
#
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example populates a specific first party audience segment.
To determine which first party audience segments exist, run
get_first_party_audience_segments.py.
"""
# Import appropriate modules from the client library.
from googleads import ad_manager
AUDIENCE_SEGMENT_ID = 'INSERT_AUDIENCE_SEGMENT_ID_HERE'
def main(client, audience_segment_id):
# Initialize appropriate service.
audience_segment_service = client.GetService(
'AudienceSegmentService', version='v202202')
# Create statement object to get the specified first party audience segment.
statement = (ad_manager.StatementBuilder(version='v202202')
.Where('Type = :type AND Id = :audience_segment_id')
.WithBindVariable('audience_segment_id',
int(audience_segment_id))
.WithBindVariable('type', 'FIRST_PARTY')
.Limit(1))
response = audience_segment_service.getAudienceSegmentsByStatement(
statement.ToStatement())
if 'results' in response and len(response['results']):
segments = response['results']
for segment in segments:
print(
'Audience segment with id "%s" and name "%s" will be populated.'
% (segment['id'], segment['name']))
action = {
'xsi_type': 'PopulateAudienceSegments'
}
populated_audience_segments = (
audience_segment_service.performAudienceSegmentAction(
action, statement.ToStatement()))
print('%s audience segment populated' %
populated_audience_segments['numChanges'])
else:
print('No Results Found')
if __name__ == '__main__':
# Initialize client object.
ad_manager_client = ad_manager.AdManagerClient.LoadFromStorage()
main(ad_manager_client, AUDIENCE_SEGMENT_ID)
|
googleads/googleads-python-lib
|
examples/ad_manager/v202202/audience_segment_service/populate_first_party_audience_segments.py
|
Python
|
apache-2.0
| 2,404
|
#!/usr/bin/env python
import locale
locale.setlocale(locale.LC_ALL, 'C')
import sys, tempfile, os, shutil, imp, time
import unittest, subprocess
import logging
import warnings
from xml.dom import minidom
if sys.version_info[0] > 2:
from io import StringIO, BytesIO
else:
from StringIO import StringIO
BytesIO = StringIO
warnings.filterwarnings("ignore", message = 'The CObject type')
# Catch silly mistakes...
os.environ['HOME'] = '/home/idontexist'
os.environ['LANGUAGE'] = 'C'
os.environ['LANG'] = 'C'
sys.path.insert(0, '..')
from zeroinstall.injector import qdom, background, namespaces
from zeroinstall.injector import iface_cache, download, distro, model, handler, reader, trust
from zeroinstall.zerostore import NotStored, Store, Stores; Store._add_with_helper = lambda *unused, **kwargs: False
from zeroinstall import support, cmd
from zeroinstall.support import basedir, tasks
def skipIf(condition, reason):
def wrapped(underlying):
if condition:
if hasattr(underlying, 'func_name'):
print("Skipped %s: %s" % (underlying.func_name, reason)) # Python 2
else:
print("Skipped %s: %s" % (underlying.__name__, reason)) # Python 3
def run(self): pass
return run
else:
return underlying
return wrapped
class BackgroundException(Exception):
pass
def throw_background():
raise BackgroundException("Tried to spawn background process")
dpkgdir = os.path.join(os.path.dirname(__file__), 'dpkg')
empty_feed = qdom.parse(BytesIO(b"""<interface xmlns='http://zero-install.sourceforge.net/2004/injector/interface'>
<name>Empty</name>
<summary>just for testing</summary>
</interface>"""))
import my_dbus
sys.modules['dbus'] = my_dbus
sys.modules['dbus.glib'] = my_dbus
my_dbus.types = my_dbus
sys.modules['dbus.types'] = my_dbus
sys.modules['dbus.mainloop'] = my_dbus
sys.modules['dbus.mainloop.glib'] = my_dbus
mydir = os.path.dirname(__file__)
ocaml_0install = os.path.join(mydir, '..', 'build', 'ocaml', '0install')
class ExecMan(Exception):
def __init__(self, args):
self.man_args = args
Exception.__init__(self, 'ExecMan')
# Catch us trying to run the GUI and return a dummy string instead
old_execvp = os.execvp
def test_execvp(prog, args):
if prog == sys.executable and args[1].endswith('/0launch-gui'):
prog = os.path.join(mydir, 'test-gui')
if prog == 'man':
raise ExecMan(args)
return old_execvp(prog, args)
os.execvp = test_execvp
test_locale = (None, None)
assert model.locale
class TestLocale:
LC_ALL = 'LC_ALL' # Note: LC_MESSAGES not present on Windows
def getlocale(self, x = None):
assert x is not TestLocale.LC_ALL
return test_locale
model.locale = TestLocale()
class DummyPackageKit:
available = False
def get_candidates(self, package, factory, prefix):
pass
class DummyHandler(handler.Handler):
__slots__ = ['ex', 'tb', 'allow_downloads']
def __init__(self):
handler.Handler.__init__(self)
self.ex = None
self.allow_downloads = False
def wait_for_blocker(self, blocker):
self.ex = None
handler.Handler.wait_for_blocker(self, blocker)
if self.ex:
support.raise_with_traceback(self.ex, self.tb)
def report_error(self, ex, tb = None):
assert self.ex is None, self.ex
self.ex = ex
self.tb = tb
#import traceback
#traceback.print_exc()
class DummyKeyInfo:
def __init__(self, fpr):
self.fpr = fpr
self.info = [minidom.parseString('<item vote="bad"/>')]
self.blocker = None
class TestFetcher:
def __init__(self, config):
self.allowed_downloads = set()
self.allowed_feed_downloads = {}
self.config = config
def allow_download(self, digest):
assert isinstance(self.config.stores, TestStores)
self.allowed_downloads.add(digest)
def allow_feed_download(self, url, feed_xml):
assert isinstance(feed_xml, support.basestring), feed_xml
self.allowed_feed_downloads[url] = feed_xml
def download_impls(self, impls, stores):
@tasks.async
def fake_download():
yield
for impl in impls:
assert impl.id in self.allowed_downloads, impl
self.allowed_downloads.remove(impl.id)
self.config.stores.add_fake(impl.id)
return fake_download()
def download_and_import_feed(self, feed_url, iface_cache, force = False):
@tasks.async
def fake_download():
yield
feed_xml = self.allowed_feed_downloads.get(feed_url, None)
assert feed_xml, feed_url
if not isinstance(feed_xml, bytes):
feed_xml = feed_xml.encode('utf-8')
self.config.iface_cache.update_feed_from_network(feed_url, feed_xml, int(time.time()))
del self.allowed_feed_downloads[feed_url]
return fake_download()
def fetch_key_info(self, fingerprint):
return DummyKeyInfo(fingerprint)
class TestStores:
def __init__(self):
self.fake_impls = set()
def add_fake(self, digest):
self.fake_impls.add(digest)
def lookup_maybe(self, digests):
for d in digests:
if d in self.fake_impls:
return '/fake_store/' + d
return None
def lookup_any(self, digests):
path = self.lookup_maybe(digests)
if path:
return path
raise NotStored()
class TestConfig:
freshness = 0
help_with_testing = False
network_use = model.network_full
key_info_server = None
auto_approve_keys = False
mirror = None
def __init__(self):
self.iface_cache = iface_cache.IfaceCache()
self.handler = DummyHandler()
self.stores = Stores()
self.fetcher = TestFetcher(self)
self.trust_db = trust.trust_db
self.trust_mgr = trust.TrustMgr(self)
class BaseTest(unittest.TestCase):
def setUp(self):
background._detach = throw_background
warnings.resetwarnings()
self.config_home = tempfile.mktemp()
self.cache_home = tempfile.mktemp()
self.cache_system = tempfile.mktemp()
self.data_home = tempfile.mktemp()
self.gnupg_home = tempfile.mktemp()
os.environ['GNUPGHOME'] = self.gnupg_home
os.environ['XDG_CONFIG_HOME'] = self.config_home
os.environ['XDG_CONFIG_DIRS'] = ''
os.environ['XDG_CACHE_HOME'] = self.cache_home
os.environ['XDG_CACHE_DIRS'] = self.cache_system
os.environ['XDG_DATA_HOME'] = self.data_home
os.environ['XDG_DATA_DIRS'] = ''
if 'ZEROINSTALL_PORTABLE_BASE' in os.environ:
del os.environ['ZEROINSTALL_PORTABLE_BASE']
imp.reload(basedir)
assert basedir.xdg_config_home == self.config_home
os.mkdir(self.config_home, 0o700)
os.mkdir(self.cache_home, 0o700)
os.mkdir(self.cache_system, 0o500)
os.mkdir(self.gnupg_home, 0o700)
if 'DISPLAY' in os.environ:
del os.environ['DISPLAY']
self.config = TestConfig()
iface_cache.iface_cache = self.config.iface_cache
logging.getLogger().setLevel(logging.WARN)
download._downloads = {}
self.old_path = os.environ['PATH']
os.environ['PATH'] = self.config_home + ':' + dpkgdir + ':' + self.old_path
distro._host_distribution = distro.DebianDistribution(dpkgdir + '/status')
distro._host_distribution._packagekit = DummyPackageKit()
my_dbus.system_services = {}
trust.trust_db.watchers = []
trust.trust_db.keys = None
trust.trust_db._dry_run = False
def tearDown(self):
if self.config.handler.ex:
support.raise_with_traceback(self.config.handler.ex, self.config.handler.tb)
shutil.rmtree(self.config_home)
support.ro_rmtree(self.cache_home)
shutil.rmtree(self.cache_system)
shutil.rmtree(self.gnupg_home)
os.environ['PATH'] = self.old_path
def run_ocaml(self, args, stdin = None, stderr = subprocess.PIPE, binary = False):
child = subprocess.Popen([ocaml_0install] + args,
stdin = subprocess.PIPE if stdin is not None else None,
stdout = subprocess.PIPE, stderr = stderr, universal_newlines = not binary)
out, err = child.communicate(stdin)
status = child.wait()
if status:
msg = "Exit status: %d\n" % status
if binary:
msg = msg.encode('utf-8')
err += msg
return out, err
def import_feed(self, url, contents):
"""contents can be a path or an Element."""
iface_cache = self.config.iface_cache
iface_cache.get_interface(url)
if isinstance(contents, qdom.Element):
feed = model.ZeroInstallFeed(contents)
else:
feed = reader.load_feed(contents)
iface_cache._feeds[url] = feed
xml = qdom.to_UTF8(feed.feed_element)
upstream_dir = basedir.save_cache_path(namespaces.config_site, 'interfaces')
cached = os.path.join(upstream_dir, model.escape(url))
with open(cached, 'wb') as stream:
stream.write(xml)
return feed
def run_0install(self, args):
old_stdout = sys.stdout
old_stderr = sys.stderr
try:
sys.stdout = StringIO()
sys.stderr = StringIO()
ex = None
try:
cmd.main(args, config = self.config)
except NameError:
raise
except SystemExit:
pass
except TypeError:
raise
except AttributeError:
raise
except AssertionError:
raise
except ValueError:
raise
except Exception as ex2:
ex = ex2 # Python 3
raise
out = sys.stdout.getvalue()
err = sys.stderr.getvalue()
if ex is not None:
err += str(ex.__class__)
finally:
sys.stdout = old_stdout
sys.stderr = old_stderr
return (out, err)
|
linuxmidhun/0install
|
tests/basetest.py
|
Python
|
lgpl-2.1
| 8,927
|
__author__ = "CHANN"
__email__ = "chann@chann.kr"
__version__ = "0.0.0"
import os
import sys
import re
from flask import Flask
from flask import render_template
from flask import url_for
from flask import redirect
from flask import request
from flask import flash
from flask_sqlalchemy import SQLAlchemy
from flask_login import LoginManager
from app.forms import SignUpForm
from app import init
app = Flask(__name__)
app.config.from_object('config')
db = SQLAlchemy(app)
lm = LoginManager()
lm.init_app(app)
lm.login_view = 'signin'
from app import models
from app import views
|
channprj/uptime-robot
|
app/__init__.py
|
Python
|
mit
| 583
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import logging
from argparse import ArgumentParser
from vocab import Vocab
from lmdataset import LMDataset
from lm import NeuralLM
from deepy.dataset import SequentialMiniBatches
from deepy.trainers import SGDTrainer, LearningRateAnnealer
from deepy.layers import LSTM, Dense
logging.basicConfig(level=logging.INFO)
resource_dir = os.path.abspath(os.path.dirname(__file__)) + os.sep + "resources"
vocab_path = os.path.join(resource_dir, "ptb.train.txt")
train_path = os.path.join(resource_dir, "ptb.train.txt")
valid_path = os.path.join(resource_dir, "ptb.valid.txt")
vocab = Vocab(char_based=True)
vocab.load(vocab_path, max_size=1000)
model = NeuralLM(input_dim=vocab.size, input_tensor=3)
model.stack(LSTM(hidden_size=100, output_type="sequence"),
Dense(vocab.size, activation="softmax"))
default_model = os.path.join(os.path.dirname(__file__), "models", "char_lstm_model1.gz")
if __name__ == '__main__':
ap = ArgumentParser()
ap.add_argument("--model", default=default_model)
ap.add_argument("--sample", default="")
args = ap.parse_args()
if os.path.exists(args.model):
model.load_params(args.model)
lmdata = LMDataset(vocab, train_path, valid_path, history_len=30, char_based=True, max_tokens=300)
batch = SequentialMiniBatches(lmdata, batch_size=20)
trainer = SGDTrainer(model)
annealer = LearningRateAnnealer()
trainer.run(batch, epoch_controllers=[annealer])
model.save_params(args.model)
|
zomux/deepy
|
examples/lm/char_lstm.py
|
Python
|
mit
| 1,531
|
# -*- coding: utf-8 -*-
"""
pyvisa-py.protocols.rpc
~~~~~~~~~~~~~~~~~~~~~~~
Sun RPC version 2 -- RFC1057
This file is drawn from Python's RPC demo, updated for python 3.
XXX There should be separate exceptions for the various reasons why
XXX an RPC can fail, rather than using RuntimeError for everything
XXX The UDP version of the protocol resends requests when it does
XXX not receive a timely reply -- use only for idempotent calls!
XXX There is no provision for call timeout on TCP connections
Original source: http://svn.python.org/projects/python/trunk/Demo/rpc/rpc.py
:copyright: 2014 by PyVISA-py Authors, see AUTHORS for more details.
:license: MIT, see LICENSE for more details.
"""
from __future__ import division, unicode_literals, print_function, absolute_import
import sys
import enum
import xdrlib
import socket
import select
from pyvisa.compat import struct
from ..common import logger
#: Version of the protocol
RPCVERSION = 2
class MessagegType(enum.IntEnum):
call = 0
reply = 1
class AuthorizationFlavor(enum.IntEnum):
null = 0
unix = 1
short = 2
des = 3
class ReplyStatus(enum.IntEnum):
accepted = 0
denied = 1
class AcceptStatus(enum.IntEnum):
#: RPC executed successfully
success = 0
#: remote hasn't exported program
program_unavailable = 1
#: remote can't support version
program_mismatch = 2
#: program can't support procedure
procedure_unavailable = 3
#: procedure can't decode params
garbage_args = 4
class RejectStatus(enum.IntEnum):
#: RPC version number != 2
rpc_mismatch = 0
#: remote can't authenticate caller
auth_error = 1
class AuthStatus(enum.IntEnum):
ok = 0
#: bad credentials (seal broken)
bad_credentials = 1
#: client must begin new session
rejected_credentials = 2
#: bad verifier (seal broken)
bad_verifier = 3
#: verifier expired or replayed
rejected_verifier = 4
#: rejected for security reasons
too_weak = 5
# Exceptions
class RPCError(Exception):
pass
class RPCBadFormat(RPCError):
pass
class RPCBadVersion(RPCError):
pass
class RPCGarbageArgs(RPCError):
pass
class RPCUnpackError(RPCError):
pass
def make_auth_null():
return b''
class Packer(xdrlib.Packer):
def pack_auth(self, auth):
flavor, stuff = auth
self.pack_enum(flavor)
self.pack_opaque(stuff)
def pack_auth_unix(self, stamp, machinename, uid, gid, gids):
self.pack_uint(stamp)
self.pack_string(machinename)
self.pack_uint(uid)
self.pack_uint(gid)
self.pack_uint(len(gids))
for i in gids:
self.pack_uint(i)
def pack_callheader(self, xid, prog, vers, proc, cred, verf):
self.pack_uint(xid)
self.pack_enum(MessagegType.call)
self.pack_uint(RPCVERSION)
self.pack_uint(prog)
self.pack_uint(vers)
self.pack_uint(proc)
self.pack_auth(cred)
self.pack_auth(verf)
# Caller must add procedure-specific part of call
def pack_replyheader(self, xid, verf):
self.pack_uint(xid)
self.pack_enum(MessagegType.reply)
self.pack_uint(ReplyStatus.accepted)
self.pack_auth(verf)
self.pack_enum(AcceptStatus.success)
# Caller must add procedure-specific part of reply
class Unpacker(xdrlib.Unpacker):
def unpack_auth(self):
flavor = self.unpack_enum()
stuff = self.unpack_opaque()
return flavor, stuff
def unpack_callheader(self):
xid = self.unpack_uint()
temp = self.unpack_enum()
if temp != MessagegType.call:
raise RPCBadFormat('no CALL but %r' % (temp,))
temp = self.unpack_uint()
if temp != RPCVERSION:
raise RPCBadVersion('bad RPC version %r' % (temp,))
prog = self.unpack_uint()
vers = self.unpack_uint()
proc = self.unpack_uint()
cred = self.unpack_auth()
verf = self.unpack_auth()
return xid, prog, vers, proc, cred, verf
# Caller must add procedure-specific part of call
def unpack_replyheader(self):
xid = self.unpack_uint()
mtype = self.unpack_enum()
if mtype != MessagegType.reply:
raise RPCUnpackError('no reply but %r' % (mtype,))
stat = self.unpack_enum()
if stat == ReplyStatus.denied:
stat = self.unpack_enum()
if stat == RejectStatus.rpc_mismatch:
low = self.unpack_uint()
high = self.unpack_uint()
raise RPCUnpackError('denied: rpc_mismatch: %r' % ((low, high),))
if stat == RejectStatus.auth_error:
stat = self.unpack_uint()
raise RPCUnpackError('denied: auth_error: %r' % (stat,))
raise RPCUnpackError('denied: %r' % (stat,))
if stat != ReplyStatus.accepted:
raise RPCUnpackError('Neither denied nor accepted: %r' % (stat,))
verf = self.unpack_auth()
stat = self.unpack_enum()
if stat == AcceptStatus.program_unavailable:
raise RPCUnpackError('call failed: program_unavailable')
if stat == AcceptStatus.program_mismatch:
low = self.unpack_uint()
high = self.unpack_uint()
raise RPCUnpackError('call failed: program_mismatch: %r' % ((low, high),))
if stat == AcceptStatus.procedure_unavailable:
raise RPCUnpackError('call failed: procedure_unavailable')
if stat == AcceptStatus.garbage_args:
raise RPCGarbageArgs
if stat != AcceptStatus.success:
raise RPCUnpackError('call failed: %r' % (stat,))
return xid, verf
# Caller must get procedure-specific part of reply
class Client(object):
"""Common base class for clients.
"""
def __init__(self, host, prog, vers, port):
self.host = host
self.prog = prog
self.vers = vers
self.port = port
self.lastxid = 0 # XXX should be more random?
self.cred = None
self.verf = None
def make_call(self, proc, args, pack_func, unpack_func):
# Don't normally override this (but see Broadcast)
logger.debug('Make call %r, %r, %r, %r', proc, args, pack_func, unpack_func)
if pack_func is None and args is not None:
raise TypeError('non-null args with null pack_func')
self.start_call(proc)
if pack_func:
pack_func(args)
self.do_call()
if unpack_func:
result = unpack_func()
else:
result = None
self.unpacker.done()
return result
def start_call(self, proc):
# Don't override this
self.lastxid += 1
cred = self.mkcred()
verf = self.mkverf()
p = self.packer
p.reset()
p.pack_callheader(self.lastxid, self.prog, self.vers, proc, cred, verf)
def do_call(self):
# This MUST be overridden
raise RPCError('do_call not defined')
def mkcred(self):
# Override this to use more powerful credentials
if self.cred is None:
self.cred = (AuthorizationFlavor.null, make_auth_null())
return self.cred
def mkverf(self):
# Override this to use a more powerful verifier
if self.verf is None:
self.verf = (AuthorizationFlavor.null, make_auth_null())
return self.verf
def call_0(self):
# Procedure 0 is always like this
return self.make_call(0, None, None, None)
# Record-Marking standard support
def sendfrag(sock, last, frag):
x = len(frag)
if last:
x = x | 0x80000000
header = struct.pack(">I", x)
sock.send(header + frag)
def sendrecord(sock, record):
logger.debug('Sending record through %s: %s', sock, record)
sendfrag(sock, 1, record)
def recvfrag(sock):
header = sock.recv(4)
if len(header) < 4:
raise EOFError
x = struct.unpack(">I", header[0:4])[0]
last = ((x & 0x80000000) != 0)
n = int(x & 0x7fffffff)
frag = b''
while n > 0:
buf = sock.recv(n)
if not buf:
raise EOFError
n -= len(buf)
frag += buf
return last, frag
def recvrecord(sock):
record = b''
last = 0
while not last:
last, frag = recvfrag(sock)
record = record + frag
logger.debug('Received record through %s: %r', sock, record)
return record
class RawTCPClient(Client):
"""Client using TCP to a specific port.
"""
def __init__(self, host, prog, vers, port):
Client.__init__(self, host, prog, vers, port)
self.connect()
# self.timeout defaults higher than the default 2 second VISA timeout,
# ensuring that VISA timeouts take precedence.
self.timeout = 4.0
def make_call(self, proc, args, pack_func, unpack_func):
"""Overridden to allow for utilizing io_timeout (passed in args)
"""
if proc == 11:
# vxi11.DEVICE_WRITE
self.timeout = (args[1] / 1000.0) + 2.0
elif proc in (12, 22):
# vxi11.DEVICE_READ or vxi11.DEVICE_DOCMD
self.timeout = (args[2] / 1000.0) + 2.0
elif proc in (13, 14, 15, 16, 17):
# vxi11.DEVICE_READSTB, vxi11.DEVICE_TRIGGER, vxi11.DEVICE_CLEAR,
# vxi11.DEVICE_REMOTE, or vxi11.DEVICE_LOCAL
self.timeout = (args[3] / 1000.0) + 2.0
else:
self.timeout = 4.0
return super(RawTCPClient, self).make_call(proc, args, pack_func, unpack_func)
def connect(self):
logger.debug('RawTCPClient: connecting to socket at (%s, %s)', self.host, self.port)
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.connect((self.host, self.port))
def close(self):
logger.debug('RawTCPClient: closing socket')
self.sock.close()
def do_call(self):
call = self.packer.get_buf()
r, w, x = select.select([], [self.sock], [], self.timeout)
if self.sock not in w:
raise socket.timeout("socket.timeout: The instrument seems to have stopped responding.")
sendrecord(self.sock, call)
r, w, x = select.select([self.sock], [], [], self.timeout)
if self.sock not in r:
raise socket.timeout("socket.timeout: The instrument seems to have stopped responding.")
reply = recvrecord(self.sock)
u = self.unpacker
u.reset(reply)
xid, verf = u.unpack_replyheader()
if xid != self.lastxid:
# Can't really happen since this is TCP...
raise RPCError('wrong xid in reply {0} instead of {1}'.format(xid, self.lastxid))
class RawUDPClient(Client):
"""Client using UDP to a specific port
"""
def __init__(self, host, prog, vers, port):
Client.__init__(self, host, prog, vers, port)
self.connect()
def connect(self):
logger.debug('RawTCPClient: connecting to socket at (%s, %s)', self.host, self.port)
self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.sock.connect((self.host, self.port))
def close(self):
logger.debug('RawTCPClient: closing socket')
self.sock.close()
def do_call(self):
call = self.packer.get_buf()
self.sock.send(call)
BUFSIZE = 8192 # Max UDP buffer size
timeout = 1
count = 5
while 1:
r, w, x = [self.sock], [], []
if select:
r, w, x = select.select(r, w, x, timeout)
if self.sock not in r:
count = count - 1
if count < 0:
raise RPCError('timeout')
if timeout < 25:
timeout = timeout * 2
self.sock.send(call)
continue
reply = self.sock.recv(BUFSIZE)
u = self.unpacker
u.reset(reply)
xid, verf = u.unpack_replyheader()
if xid != self.lastxid:
continue
break
class RawBroadcastUDPClient(RawUDPClient):
"""Client using UDP broadcast to a specific port.
"""
def __init__(self, bcastaddr, prog, vers, port):
RawUDPClient.__init__(self, bcastaddr, prog, vers, port)
self.reply_handler = None
self.timeout = 30
def connect(self):
self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
def set_reply_handler(self, reply_handler):
self.reply_handler = reply_handler
def set_timeout(self, timeout):
self.timeout = timeout # Use None for infinite timeout
def make_call(self, proc, args, pack_func, unpack_func):
if pack_func is None and args is not None:
raise TypeError('non-null args with null pack_func')
self.start_call(proc)
if pack_func:
pack_func(args)
call = self.packer.get_buf()
self.sock.sendto(call, (self.host, self.port))
BUFSIZE = 8192 # Max UDP buffer size (for reply)
replies = []
if unpack_func is None:
def dummy():
pass
unpack_func = dummy
while 1:
r, w, x = [self.sock], [], []
if select:
if self.timeout is None:
r, w, x = select.select(r, w, x)
else:
r, w, x = select.select(r, w, x, self.timeout)
if self.sock not in r:
break
reply, fromaddr = self.sock.recvfrom(BUFSIZE)
u = self.unpacker
u.reset(reply)
xid, verf = u.unpack_replyheader()
if xid != self.lastxid:
continue
reply = unpack_func()
self.unpacker.done()
replies.append((reply, fromaddr))
if self.reply_handler:
self.reply_handler(reply, fromaddr)
return replies
# Port mapper interface
# Program number, version and port number
PMAP_PROG = 100000
PMAP_VERS = 2
PMAP_PORT = 111
class PortMapperVersion(enum.IntEnum):
#: (void) -> void
null = 0
#: (mapping) -> bool
set = 1
#: (mapping) -> bool
unset = 2
#: (mapping) -> unsigned int
get_port = 3
#: (void) -> pmaplist
dump = 4
#: (call_args) -> call_result
call_it = 5
# A mapping is (prog, vers, prot, port) and prot is one of:
IPPROTO_TCP = 6
IPPROTO_UDP = 17
# A pmaplist is a variable-length list of mappings, as follows:
# either (1, mapping, pmaplist) or (0).
# A call_args is (prog, vers, proc, args) where args is opaque;
# a call_result is (port, res) where res is opaque.
class PortMapperPacker(Packer):
def pack_mapping(self, mapping):
prog, vers, prot, port = mapping
self.pack_uint(prog)
self.pack_uint(vers)
self.pack_uint(prot)
self.pack_uint(port)
def pack_pmaplist(self, list):
self.pack_list(list, self.pack_mapping)
def pack_call_args(self, ca):
prog, vers, proc, args = ca
self.pack_uint(prog)
self.pack_uint(vers)
self.pack_uint(proc)
self.pack_opaque(args)
class PortMapperUnpacker(Unpacker):
def unpack_mapping(self):
prog = self.unpack_uint()
vers = self.unpack_uint()
prot = self.unpack_uint()
port = self.unpack_uint()
return prog, vers, prot, port
def unpack_pmaplist(self):
return self.unpack_list(self.unpack_mapping)
def unpack_call_result(self):
port = self.unpack_uint()
res = self.unpack_opaque()
return port, res
class PartialPortMapperClient(object):
def __init__(self):
self.packer = PortMapperPacker()
self.unpacker = PortMapperUnpacker('')
def set(self, mapping):
return self.make_call(PortMapperVersion.set, mapping,
self.packer.pack_mapping,
self.unpacker.unpack_uint)
def unset(self, mapping):
return self.make_call(PortMapperVersion.unset, mapping,
self.packer.pack_mapping,
self.unpacker.unpack_uint)
def get_port(self, mapping):
return self.make_call(PortMapperVersion.get_port, mapping,
self.packer.pack_mapping,
self.unpacker.unpack_uint)
def dump(self):
return self.make_call(PortMapperVersion.dump, None,
None,
self.unpacker.unpack_pmaplist)
def callit(self, ca):
return self.make_call(PortMapperVersion.call_it, ca,
self.packer.pack_call_args,
self.unpacker.unpack_call_result)
class TCPPortMapperClient(PartialPortMapperClient, RawTCPClient):
def __init__(self, host):
RawTCPClient.__init__(self, host, PMAP_PROG, PMAP_VERS, PMAP_PORT)
PartialPortMapperClient.__init__(self)
class UDPPortMapperClient(PartialPortMapperClient, RawUDPClient):
def __init__(self, host):
RawUDPClient.__init__(self, host, PMAP_PROG, PMAP_VERS, PMAP_PORT)
PartialPortMapperClient.__init__(self)
class BroadcastUDPPortMapperClient(PartialPortMapperClient, RawBroadcastUDPClient):
def __init__(self, bcastaddr):
RawBroadcastUDPClient.__init__(self, bcastaddr, PMAP_PROG, PMAP_VERS, PMAP_PORT)
PartialPortMapperClient.__init__(self)
class TCPClient(RawTCPClient):
"""A TCP Client that find their server through the Port mapper
"""
def __init__(self, host, prog, vers):
pmap = TCPPortMapperClient(host)
port = pmap.get_port((prog, vers, IPPROTO_TCP, 0))
pmap.close()
if port == 0:
raise RPCError('program not registered')
RawTCPClient.__init__(self, host, prog, vers, port)
class UDPClient(RawUDPClient):
"""A UDP Client that find their server through the Port mapper
"""
def __init__(self, host, prog, vers):
pmap = UDPPortMapperClient(host)
port = pmap.get_port((prog, vers, IPPROTO_UDP, 0))
pmap.close()
if port == 0:
raise RPCError('program not registered')
RawUDPClient.__init__(self, host, prog, vers, port)
class BroadcastUDPClient(Client):
"""A Broadcast UDP Client that find their server through the Port mapper
"""
def __init__(self, bcastaddr, prog, vers):
self.pmap = BroadcastUDPPortMapperClient(bcastaddr)
self.pmap.set_reply_handler(self.my_reply_handler)
self.prog = prog
self.vers = vers
self.user_reply_handler = None
self.addpackers()
def close(self):
self.pmap.close()
def set_reply_handler(self, reply_handler):
self.user_reply_handler = reply_handler
def set_timeout(self, timeout):
self.pmap.set_timeout(timeout)
def my_reply_handler(self, reply, fromaddr):
port, res = reply
self.unpacker.reset(res)
result = self.unpack_func()
self.unpacker.done()
self.replies.append((result, fromaddr))
if self.user_reply_handler is not None:
self.user_reply_handler(result, fromaddr)
def make_call(self, proc, args, pack_func, unpack_func):
self.packer.reset()
if pack_func:
pack_func(args)
if unpack_func is None:
def dummy(): pass
self.unpack_func = dummy
else:
self.unpack_func = unpack_func
self.replies = []
packed_args = self.packer.get_buf()
dummy_replies = self.pmap.Callit((self.prog, self.vers, proc, packed_args))
return self.replies
# Server classes
# These are not symmetric to the Client classes
# XXX No attempt is made to provide authorization hooks yet
class Server(object):
def __init__(self, host, prog, vers, port):
self.host = host # Should normally be '' for default interface
self.prog = prog
self.vers = vers
self.port = port # Should normally be 0 for random port
self.port = port
self.addpackers()
def register(self):
mapping = self.prog, self.vers, self.prot, self.port
p = TCPPortMapperClient(self.host)
if not p.set(mapping):
raise RPCError('register failed')
def unregister(self):
mapping = self.prog, self.vers, self.prot, self.port
p = TCPPortMapperClient(self.host)
if not p.unset(mapping):
raise RPCError('unregister failed')
def handle(self, call):
# Don't use unpack_header but parse the header piecewise
# XXX I have no idea if I am using the right error responses!
self.unpacker.reset(call)
self.packer.reset()
xid = self.unpacker.unpack_uint()
self.packer.pack_uint(xid)
temp = self.unpacker.unpack_enum()
if temp != MessagegType.call:
return None # Not worthy of a reply
self.packer.pack_uint(MessagegType.reply)
temp = self.unpacker.unpack_uint()
if temp != RPCVERSION:
self.packer.pack_uint(ReplyStatus.denied)
self.packer.pack_uint(RejectStatus.rpc_mismatch)
self.packer.pack_uint(RPCVERSION)
self.packer.pack_uint(RPCVERSION)
return self.packer.get_buf()
self.packer.pack_uint(ReplyStatus.accepted)
self.packer.pack_auth((AuthorizationFlavor.null, make_auth_null()))
prog = self.unpacker.unpack_uint()
if prog != self.prog:
self.packer.pack_uint(AcceptStatus.program_unavailable)
return self.packer.get_buf()
vers = self.unpacker.unpack_uint()
if vers != self.vers:
self.packer.pack_uint(AcceptStatus.program_mismatch)
self.packer.pack_uint(self.vers)
self.packer.pack_uint(self.vers)
return self.packer.get_buf()
proc = self.unpacker.unpack_uint()
methname = 'handle_' + repr(proc)
try:
meth = getattr(self, methname)
except AttributeError:
self.packer.pack_uint(AcceptStatus.procedure_unavailable)
return self.packer.get_buf()
cred = self.unpacker.unpack_auth()
verf = self.unpacker.unpack_auth()
try:
meth() # Unpack args, call turn_around(), pack reply
except (EOFError, RPCGarbageArgs):
# Too few or too many arguments
self.packer.reset()
self.packer.pack_uint(xid)
self.packer.pack_uint(MessagegType.reply)
self.packer.pack_uint(ReplyStatus.accepted)
self.packer.pack_auth((AuthorizationFlavor.null, make_auth_null()))
self.packer.pack_uint(AcceptStatus.garbage_args)
return self.packer.get_buf()
def turn_around(self):
try:
self.unpacker.done()
except RuntimeError:
raise RPCGarbageArgs
self.packer.pack_uint(AcceptStatus.success)
def handle_0(self):
# Handle NULL message
self.turn_around()
def addpackers(self):
# Override this to use derived classes from Packer/Unpacker
self.packer = Packer()
self.unpacker = Unpacker('')
class TCPServer(Server):
def __init__(self, host, prog, vers, port):
Server.__init__(self, host, prog, vers, port)
self.connect()
def connect(self):
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.prot = IPPROTO_TCP
self.sock.bind((self.host, self.port))
def loop(self):
self.sock.listen(0)
while 1:
self.session(self.sock.accept())
def session(self, connection):
sock, (host, port) = connection
while 1:
try:
call = recvrecord(sock)
except EOFError:
break
except socket.error:
logger.exception('socket error: %r', sys.exc_info()[0])
break
reply = self.handle(call)
if reply is not None:
sendrecord(sock, reply)
def forkingloop(self):
# Like loop but uses forksession()
self.sock.listen(0)
while 1:
self.forksession(self.sock.accept())
def forksession(self, connection):
# Like session but forks off a subprocess
import os
# Wait for deceased children
try:
while 1:
pid, sts = os.waitpid(0, 1)
except os.error:
pass
pid = None
try:
pid = os.fork()
if pid: # Parent
connection[0].close()
return
# Child
self.session(connection)
finally:
# Make sure we don't fall through in the parent
if pid == 0:
os._exit(0)
class UDPServer(Server):
def __init__(self, host, prog, vers, port):
Server.__init__(self, host, prog, vers, port)
self.connect()
def connect(self):
self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.prot = IPPROTO_UDP
self.sock.bind((self.host, self.port))
def loop(self):
while 1:
self.session()
def session(self):
call, host_port = self.sock.recvfrom(8192)
reply = self.handle(call)
if reply is not None:
self.sock.sendto(reply, host_port)
|
andeh575/pyvisa-py
|
pyvisa-py/protocols/rpc.py
|
Python
|
mit
| 25,745
|
from __future__ import unicode_literals
from django.contrib.auth.views import logout
from django.core.urlresolvers import NoReverseMatch, reverse_lazy
from django.shortcuts import resolve_url
from django.test import SimpleTestCase, ignore_warnings, override_settings
from django.utils import six
from django.utils.deprecation import RemovedInDjango20Warning
from .models import UnimportantThing
@override_settings(ROOT_URLCONF='resolve_url.urls')
class ResolveUrlTests(SimpleTestCase):
"""
Tests for the ``resolve_url`` function.
"""
def test_url_path(self):
"""
Tests that passing a URL path to ``resolve_url`` will result in the
same url.
"""
self.assertEqual('/something/', resolve_url('/something/'))
def test_relative_path(self):
"""
Tests that passing a relative URL path to ``resolve_url`` will result
in the same url.
"""
self.assertEqual('../', resolve_url('../'))
self.assertEqual('../relative/', resolve_url('../relative/'))
self.assertEqual('./', resolve_url('./'))
self.assertEqual('./relative/', resolve_url('./relative/'))
def test_full_url(self):
"""
Tests that passing a full URL to ``resolve_url`` will result in the
same url.
"""
url = 'http://example.com/'
self.assertEqual(url, resolve_url(url))
def test_model(self):
"""
Tests that passing a model to ``resolve_url`` will result in
``get_absolute_url`` being called on that model instance.
"""
m = UnimportantThing(importance=1)
self.assertEqual(m.get_absolute_url(), resolve_url(m))
def test_view_function(self):
"""
Tests that passing a view name to ``resolve_url`` will result in the
URL path mapping to that view name.
"""
resolved_url = resolve_url(logout)
self.assertEqual('/accounts/logout/', resolved_url)
def test_lazy_reverse(self):
"""
Tests that passing the result of reverse_lazy is resolved to a real URL
string.
"""
resolved_url = resolve_url(reverse_lazy('logout'))
self.assertIsInstance(resolved_url, six.text_type)
self.assertEqual('/accounts/logout/', resolved_url)
@ignore_warnings(category=RemovedInDjango20Warning)
def test_valid_view_name(self):
"""
Tests that passing a view function to ``resolve_url`` will result in
the URL path mapping to that view.
"""
resolved_url = resolve_url('django.contrib.auth.views.logout')
self.assertEqual('/accounts/logout/', resolved_url)
def test_domain(self):
"""
Tests that passing a domain to ``resolve_url`` returns the same domain.
"""
self.assertEqual(resolve_url('example.com'), 'example.com')
def test_non_view_callable_raises_no_reverse_match(self):
"""
Tests that passing a non-view callable into ``resolve_url`` raises a
``NoReverseMatch`` exception.
"""
with self.assertRaises(NoReverseMatch):
resolve_url(lambda: 'asdf')
|
gannetson/django
|
tests/resolve_url/tests.py
|
Python
|
bsd-3-clause
| 3,165
|
class Solution(object):
def numberOfBoomerangs(self, points):
"""
:type points: List[List[int]]
:rtype: int
"""
ans = 0
for p in points:
dis_dict = {}
for q in points:
if p == q:
continue
x_d = p[0] - q[0]
y_d = p[1] - q[1]
dis_dict[x_d ** 2 + y_d ** 2] = 1 + dis_dict.get(x_d ** 2 + y_d ** 2, 0)
for key in dis_dict:
ans += dis_dict[key] * (dis_dict[key] - 1)
return ans
|
YiqunPeng/Leetcode-pyq
|
solutions/447NumberOfBoomerangs.py
|
Python
|
gpl-3.0
| 591
|
#!/usr/bin/env python
##
# @license
# Copyright Neekware Inc. All Rights Reserved.
#
# Use of this source code is governed by an MIT-style license that can be
# found in the LICENSE file at http://neekware.com/license/MIT.html
###
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "settings.stagging")
for item in ['api', 'apps', 'www']:
sys.path.insert(0, os.path.abspath(os.path.join(
os.path.dirname(__file__), os.pardir, os.pardir, item)))
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
|
un33k/djangoware
|
bin/staging/manage.py
|
Python
|
mit
| 1,185
|
from celery.execute import send_task
from rest_framework import status
from rest_framework import viewsets
from rest_framework.response import Response
from rockit.core import models
from rockit.core import holders
from rockit.core import resolvers
from rockit.core import serializers
class MixesViewSet(viewsets.ViewSet):
"""
List all addable mix states in rockit server
"""
def list(self, request):
"""
Return a list of all addables.
"""
result = holders.Holder()
for a in models.Association.objects.all():
task = send_task("%s.mixes" % a.entry, args=[holders.MixesHolder(a)])
mixes = task.wait(timeout=10)
if mixes:
mixes = resolvers.MixesResolver().resolve(request, mixes)
result.extend(resolvers.MixesNameResolver().resolve(mixes) if mixes.should_resolve_names() else mixes)
return Response(result.get_content())
def details(self, request, *args, **kwargs):
"""
Return specifc options for the requested mix
"""
entry = kwargs['entry']
identifier = kwargs['pk']
task = send_task("%s.mixes.details" % entry, args=[identifier, holders.MixesDetailsHolder()])
mixes = task.wait(timeout=10)
return Response(mixes.get_content())
def create(self, request):
"""
Create a new mix
"""
holder = request.DATA
validation = holders.ErrorHolder()
if not self._validate_common(holder, validation):
validation = self._validate_container('when', holder['when'], validation)
validation = self._validate_container('then', holder['then'], validation)
validation = self._validate_container('finish', holder['finish'], validation)
if not validation.has_errors():
when = self._create_mix('when', holder['when'], validation)
then = self._create_mix('then', holder['then'], validation)
finish = self._create_mix('finish', holder['finish'], validation)
action = models.Action.objects.create(name=holder['name'], description=holder['description'])
self._save_mix(action, models.ActionWhen.objects, when)
self._save_mix(action, models.ActionThen.objects, then)
self._save_mix(action, models.ActionFinish.objects, finish)
return Response({'success':False}, status=status.HTTP_404_NOT_FOUND)
return Response({'success': False , "detail": validation.get_errors() }, status=status.HTTP_400_BAD_REQUEST)
def _create_mix(self, name, holder, validation):
result = []
# Create then action
for container in holder:
association = self._get_association(container['entry'], validation)
if association:
entry = association.entry
uuid = self._get_node_uuid(association, container['id'])
task = send_task("%s.mixes.%s.create" % (entry, name), args=[uuid, container['criterias']])
result.append({
'id': task.wait(timeout=10),
'association': association,
'uuid': uuid
})
return result
def _get_association(self, entry, validation):
try:
return models.Association.objects.get(entry=entry)
except models.Association.DoesNotExist:
validation.add_error(entry, 'Could not find association in network')
return None
def _get_node_uuid(self, association, id):
nodes = models.Node.objects.filter(association=association, aid=id)
# Should get one and only
if nodes and len(nodes) is 1:
return nodes[0].uuid
# Cannot find node with id, send it back
return id
def _save_mix(self, action, model, items):
for item in items:
if item['id']:
created = model.create(holder=action, target=item['association'], identifier=item['id'])
def _validate_mixes(self, container, holder, validation):
if 'entry' not in holder or not holder['entry']:
validation.add_error(container, 'Entry must be provided')
if 'id' not in holder or not holder['id']:
validation.add_error(container, 'Node id must be provided')
if 'criterias' not in holder or not holder['criterias']:
validation.add_error(container, 'Criterias cannot be empty')
return validation.has_errors()
def _validate_criteria(self, container, holder, validation):
if 'id' not in holder or not holder['id']:
validation.add_error(container, 'Critieria id must be provided')
if 'id' not in holder or not holder['id']:
validation.add_error(container, 'Critieria value must be provided')
return validation.has_errors()
def _validate_container(self, name, holder, validation):
# Validate when criterias
for container in holder:
if not self._validate_mixes(name, container, validation):
for key, criteria in container['criterias'].iteritems():
self._validate_criteria(container['id'], criteria, validation)
if not validation.has_errors():
association = self._get_association(container['entry'], validation)
if association:
entry = association.entry
task = send_task("%s.mixes.%s.validate" % (entry,name), args=[container['id'], container['criterias'], validation])
# Wait for return validation holder
return task.wait(timeout=10)
return validation
def _validate_common(self, holder, validation):
# Make sure that name is
if 'name' not in holder or not holder['name']:
validation.add_error('name', 'Name cannot be empty')
if 'when' not in holder:
validation.add_error('when', 'When container must be provided')
if 'then' not in holder or not holder['then']:
validation.add_error('then', 'Then container must be provided')
if 'finish' not in holder:
validation.add_error('finish', 'When container must be provided')
return validation.has_errors()
|
acreations/rockit-server
|
rockit/core/views/mixes.py
|
Python
|
mit
| 6,422
|
#!/usr/bin/python -u
# WARNING: python -u means unbuffered I/O. Without it the messages are
# passed to the parent asynchronously which looks bad in clients.
import sys
import os
import getopt
import rpm
def log(s):
sys.stderr.write("%s\n" % s)
def error_msg(s):
sys.stderr.write("%s\n" % s)
def error_msg_and_die(s):
sys.stderr.write("%s\n" % s)
sys.exit(1)
def xopen(name, mode):
try:
r = open(name, mode)
except IOError, e:
error_msg_and_die("Can't open '%s': %s" % (name, e))
return r
def parse_maps(maps_path):
try:
f = xopen(maps_path, "r")
# set() -> uniqifies the list of filenames
return set([x.strip()[x.find('/'):] for x in f.readlines() if x.find('/') > -1])
except IOError, e:
error_msg_and_die("Can't read '%s': %s" % (maps_path, e))
if __name__ == "__main__":
progname = os.path.basename(sys.argv[0])
help_text = ("Usage: %s [-o OUTFILE] -m PROC_PID_MAP_FILE") % progname
try:
opts, args = getopt.getopt(sys.argv[1:], "o:m:h", ["help"])
except getopt.GetoptError, err:
error_msg(err) # prints something like "option -a not recognized"
error_msg_and_die(help_text)
opt_o = None
memfile = None
for opt, arg in opts:
if opt in ("-h", "--help"):
print help_text
exit(0)
#elif opt == "-v":
# verbose += 1
elif opt == "-o":
opt_o = arg
elif opt == "-m":
memfile = arg
if not memfile:
error_msg("MAP_FILE is not specified")
error_msg_and_die(help_text)
try:
# Note that we open -o FILE only when we reach the point
# when we are definitely going to write something to it
outfile = sys.stdout
outname = opt_o
try:
dso_paths = parse_maps(memfile)
for path in dso_paths:
ts = rpm.TransactionSet()
mi = ts.dbMatch('basenames', path)
if len(mi):
for h in mi:
if outname:
outfile = xopen(outname, "w")
outname = None
outfile.write("%s %s (%s) %s\n" %
(path,
h[rpm.RPMTAG_NEVRA],
h[rpm.RPMTAG_VENDOR],
h[rpm.RPMTAG_INSTALLTIME])
)
except Exception, ex:
error_msg_and_die("Can't get the DSO list: %s" % ex)
outfile.close()
except:
if not opt_o:
opt_o = "<stdout>"
error_msg_and_die("Error writing to '%s'" % opt_o)
|
rplnt/abrt
|
src/plugins/abrt-action-list-dsos.py
|
Python
|
gpl-2.0
| 2,759
|
from multiprocessing import Process
from multiprocessing import Lock
from multiprocessing import current_process
from multiprocessing import Value
N = 10
def p(almacen, poner, tomar):
for v in range(N):
print current_process().name, "produciendo", v
poner.acquire()
almacen.value = v
print current_process().name, "almacenando", v
tomar.release()
def c(almacen, poner, tomar):
for v in range(N):
tomar.acquire()
dato = almacen.value
print current_process().name, "desalmacenando", dato
poner.release()
print current_process().name, "consumiendo", dato
if __name__ == "__main__":
poner = Lock() #observa que no basta un unico lock!!
tomar = Lock()
tomar.acquire()
almacen = Value('i', -1)
print "almacen inicial", almacen
productor = Process(target=p, name="productor", args=(almacen,poner,tomar))
consumidor = Process(target=c, name="consumidor", args=(almacen,poner,tomar))
productor.start()
consumidor.start()
|
tayebzaidi/PPLL_Spr_16
|
productor-consumidor/p_c_lock_value_ok.py
|
Python
|
gpl-3.0
| 1,070
|
# -*- coding: utf-8 -*-
#
# Kivy documentation build configuration file, created by
# sphinx-quickstart on Wed Jan 21 22:37:12 2009.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# The contents of this file are pickled, so don't put values in the namespace
# that aren't pickleable (module imports are okay, they're removed automatically).
#
# All configuration values have a default value; values that are commented out
# serve to show the default value.
import sys, os
# If your extensions are in another directory, add it here. If the directory
# is relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
sys.path.insert(0, os.path.abspath('sphinxext'))
# General configuration
# ---------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'autodoc', 'sphinx.ext.todo', 'preprocess', 'sphinx.ext.ifconfig',
'sphinx.ext.viewcode', 'sphinx.ext.mathjax']
# Todo configuration
todo_include_todos = True
# XXX HACK mathieu: monkey patch the autodoc module, to give a better priority
# for ClassDocumenter, or the cython class will be documented as AttributeClass
import sphinx.ext.autodoc
sphinx.ext.autodoc.ClassDocumenter.priority = 10
# Add any paths that contain templates here, relative to this directory.
templates_path = ['.templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General substitutions.
project = 'Kivy'
copyright = '2010, The Kivy Authors'
# The default replacements for |version| and |release|, also used in various
# other places throughout the built documents.
#
os.environ['KIVY_DOC_INCLUDE'] = '1'
import kivy
print(kivy.__file__)
version = kivy.__version__
release = kivy.__version__
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%B %d, %Y'
# suppress exclusion warnings
exclude_patterns = ['guide/layouts.rst', 'api-kivy.lib.osc*']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'kivy_pygments_theme.KivyStyle'
# Options for HTML output
# -----------------------
# The style sheet to use for HTML and HTML Help pages. A file of that name
# must exist either in Sphinx' static/ path, or in one of the custom paths
# given in html_static_path.
html_style = 'fresh.css'
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (within the static path) to place at the top of
# the sidebar.
html_logo = '.static/logo-kivy.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['.static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, the reST sources are included in the HTML build as _sources/<name>.
#html_copy_source = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'Kivydoc'
# Options for LaTeX output
# ------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, document class [howto/manual]).
latex_documents = [
('index', 'Kivy.tex', 'Kivy Documentation',
'The Kivy Developers', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
latex_elements = {
'fontpkg': r'\usepackage{mathpazo}',
'papersize': 'a4paper',
'pointsize': '10pt',
'preamble': r'\usepackage{kivystyle}'
}
latex_additional_files = ['kivystyle.sty',
'../../kivy/data/logo/kivy-icon-512.png']
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
latex_use_parts = True
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
|
woylaski/notebook
|
graphic/kivy-master/doc/sources/conf.py
|
Python
|
gpl-3.0
| 6,263
|
from __future__ import absolute_import, print_function, division
import numpy as np
import numpy
from six.moves import xrange
import theano
from theano.tensor import basic
from theano.tensor import nlinalg # noqa
from theano import gof, scalar
from theano.gof import Generic
from theano import gradient
from theano.gradient import DisconnectedType, disconnected_type
tensor = basic
class CpuContiguous(theano.Op):
"""
Check to see if the input is c-contiguous,
if it is, do nothing, else return a contiguous array.
"""
__props__ = ()
view_map = {0: [0]}
def make_node(self, x):
x_ = theano.tensor.as_tensor_variable(x)
return theano.Apply(self, [x_], [x_.type()])
def perform(self, node, inputs, output_storage):
x, = inputs
y = output_storage[0]
# if the ouput is contiguous do nothing, else copy
# the input
if not x.flags['C_CONTIGUOUS']:
x = x.copy()
assert x.flags['C_CONTIGUOUS']
y[0] = x
def grad(self, inputs, dout):
return [theano.tensor.as_tensor_variable(dout[0])]
def c_code(self, node, name, inames, onames, sub):
x, = inames
y, = onames
code = """
if (!PyArray_CHKFLAGS(%(x)s, NPY_ARRAY_C_CONTIGUOUS)){
// check to see if output is contiguous first
if (%(y)s != NULL &&
PyArray_CompareLists(PyArray_DIMS(%(y)s), PyArray_DIMS(%(x)s), PyArray_NDIM(%(x)s)) &&
PyArray_CHKFLAGS(%(y)s, NPY_ARRAY_C_CONTIGUOUS)){
PyArray_CopyInto(%(y)s, %(x)s);
}
else{
Py_XDECREF(%(y)s);
%(y)s = PyArray_GETCONTIGUOUS(%(x)s);
}
}
else{
Py_XINCREF(%(x)s);
Py_XDECREF(%(y)s);
%(y)s = %(x)s;
}
""" % locals()
return code
def c_code_cache_version(self):
return (1,)
cpu_contiguous = CpuContiguous()
class SearchsortedOp(theano.Op):
"""Wrapper of numpy.searchsorted.
For full documentation, see :func:`searchsorted`.
See Also
--------
searchsorted : numpy-like function to use the SearchsortedOp
"""
params_type = Generic()
__props__ = ("side", )
def __init__(self, side='left'):
if side == 'left' or side == 'right':
self.side = side
else:
raise ValueError('\'%(side)s\' is an invalid value for keyword \'side\''
% locals())
def get_params(self, node):
return self.side
def make_node(self, x, v, sorter=None):
x = basic.as_tensor(x, ndim=1)
v = basic.as_tensor(v)
out_type = v.type.clone(dtype='int64')
if sorter is None:
return theano.Apply(self, [x, v], [out_type()])
else:
sorter = basic.as_tensor(sorter, ndim=1)
if (theano.configdefaults.python_int_bitwidth() == 32 and
sorter.dtype == 'int64'):
raise TypeError(
"numpy.searchsorted with Python 32bit do not support a"
" sorter of int64.")
if sorter.type not in basic.int_vector_types:
raise TypeError('sorter must be an integer vector',
sorter.type)
return theano.Apply(self, [x, v, sorter], [out_type()])
def infer_shape(self, node, shapes):
return [shapes[1]]
def perform(self, node, inputs, output_storage, params):
x = inputs[0]
v = inputs[1]
if len(node.inputs) == 3:
sorter = inputs[2]
else:
sorter = None
z = output_storage[0]
z[0] = np.searchsorted(x, v, side=params, sorter=sorter).astype(
node.outputs[0].dtype)
def c_support_code_struct(self, node, name):
return """
int right_%(name)s;
""" % locals()
def c_init_code_struct(self, node, name, sub):
side = sub['params']
fail = sub['fail']
return """
PyObject* tmp_%(name)s = PyUnicode_FromString("right");
if (tmp_%(name)s == NULL)
%(fail)s;
right_%(name)s = PyUnicode_Compare(%(side)s, tmp_%(name)s);
Py_DECREF(tmp_%(name)s);
""" % locals()
def c_code(self, node, name, inames, onames, sub):
sorter = None
if len(node.inputs) == 3:
x, v, sorter = inames
else:
x, v = inames
if not sorter:
sorter = "NULL"
z, = onames
fail = sub['fail']
return """
Py_XDECREF(%(z)s);
%(z)s = (PyArrayObject*) PyArray_SearchSorted(%(x)s, (PyObject*) %(v)s,
right_%(name)s ? NPY_SEARCHLEFT : NPY_SEARCHRIGHT, (PyObject*) %(sorter)s);
if (!%(z)s)
%(fail)s;
if (PyArray_TYPE(%(z)s) != NPY_INT64){
PyObject * tmp = PyArray_Cast(%(z)s, NPY_INT64);
Py_XDECREF(%(z)s);
%(z)s = (PyArrayObject*) tmp;
}
""" % locals()
def c_code_cache_version(self):
return (2,)
def grad(self, inputs, output_gradients):
num_ins = len(inputs)
if num_ins == 3:
x, v, sorter = inputs
else:
x, v = inputs
x_grad = gradient._float_zeros_like(x)
v_grad = gradient._float_zeros_like(v)
if num_ins == 3:
return [x_grad, v_grad, disconnected_type()]
else:
return [x_grad, v_grad]
def searchsorted(x, v, side='left', sorter=None):
"""Find indices where elements should be inserted to maintain order.
Wrapping of numpy.searchsorted. Find the indices into a sorted array
`x` such that, if the corresponding elements in `v` were inserted
before the indices, the order of `x` would be preserved.
Parameters
----------
x: 1-D tensor (array-like)
Input array. If `sorter` is None, then it must be sorted in
ascending order, otherwise `sorter` must be an array of indices
which sorts it.
v: tensor (array-like)
Contains the values to be inserted into `x`.
side: {'left', 'right'}, optional.
If 'left' (default), the index of the first suitable
location found is given. If 'right', return the last such index. If
there is no suitable index, return either 0 or N (where N is the length
of `x`).
sorter: 1-D tensor of integers (array-like), optional
Contains indices that sort array `x` into ascending order.
They are typically the result of argsort.
Returns
-------
indices : tensor of integers (int64)
Array of insertion points with the same shape as `v`.
See Also
--------
`numpy.searchsorted <https://docs.scipy.org/doc/numpy-1.10.0/reference/generated/numpy.searchsorted.html>`_
Notes
-----
* Binary search is used to find the required insertion points.
* This Op is working **only on CPU** currently.
Examples
--------
>>> from theano import tensor
>>> x = tensor.dvector()
>>> idx = x.searchsorted(3)
>>> idx.eval({x: [1,2,3,4,5]})
array(2)
>>> tensor.extra_ops.searchsorted([1,2,3,4,5], 3).eval()
array(2)
>>> tensor.extra_ops.searchsorted([1,2,3,4,5], 3, side='right').eval()
array(3)
>>> tensor.extra_ops.searchsorted([1,2,3,4,5], [-10, 10, 2, 3]).eval()
array([0, 5, 1, 2])
.. versionadded:: 0.9
"""
return SearchsortedOp(side=side)(x, v, sorter)
class CumOp(theano.Op):
# See function cumsum/cumprod for docstring
__props__ = ("axis", "mode")
def __init__(self, axis=None, mode='add'):
if mode not in ('add', 'mul'):
raise ValueError('%s: Unknown mode "%s"' % (type(self).__name__, mode))
self.axis = axis
self.mode = mode
def make_node(self, x):
x = basic.as_tensor_variable(x)
out_type = x.type()
if self.axis is None:
out_type = theano.tensor.vector(dtype=x.dtype) # Flatten
elif self.axis >= x.ndim or self.axis < -x.ndim:
raise ValueError('axis(={0}) out of bounds'.format(self.axis))
return theano.Apply(self, [x], [out_type])
def perform(self, node, inputs, output_storage):
x = inputs[0]
z = output_storage[0]
z[0] = {'add': np.cumsum, 'mul': np.cumprod}[self.mode](x, axis=self.axis)
def grad(self, inputs, output_gradients):
x, = inputs
gi, = output_gradients
if self.axis is None:
if self.mode == 'add':
return [cumsum(gi[::-1])[::-1].reshape(x.shape)]
elif self.mode == 'mul':
fx = cumprod(x, axis=self.axis)
return [cumsum(
(fx * gi)[::-1])[::-1].reshape(x.shape) / x]
else:
raise NotImplementedError(
'%s: unknown gradient for mode "%s"' %
(type(self).__name__, self.mode))
reverse_slicing = [slice(None, None, None)] * gi.ndim
reverse_slicing[self.axis] = slice(None, None, -1)
reverse_slicing = tuple(reverse_slicing)
# We need to reverse the gradients along ``self.axis``,
# compute cumsum, then reverse again
if self.mode == 'add':
return [cumsum(gi[reverse_slicing], self.axis)[reverse_slicing]]
elif self.mode == 'mul':
fx = cumprod(x, axis=self.axis)
return [cumsum(
(fx * gi)[reverse_slicing], self.axis)[reverse_slicing] / x]
else:
raise NotImplementedError(
'%s: unknown gradient for mode "%s"' %
(type(self).__name__, self.mode))
def infer_shape(self, node, shapes):
if self.axis is None:
return [(tensor.prod(shapes[0]),)] # Flatten
return shapes
def c_code(self, node, name, inames, onames, sub):
x, = inames
z, = onames
axis = self.axis
fail = sub['fail']
func = dict(mul='CumProd', add='CumSum')[self.mode]
if self.axis is None or (self.axis == 0 and node.inputs[0].ndim == 1):
code = """
npy_intp shape[1] = { PyArray_SIZE(%(x)s) };
if(!(%(z)s && PyArray_DIMS(%(z)s)[0] == shape[0]))
{
Py_XDECREF(%(z)s);
%(z)s = (PyArrayObject*) PyArray_SimpleNew(1, shape, PyArray_TYPE((PyArrayObject*) py_%(x)s));
}
if (!%(z)s)
%(fail)s;
{
PyObject * t = PyArray_%(func)s(
%(x)s, NPY_MAXDIMS,
PyArray_TYPE((PyArrayObject*) py_%(x)s), %(z)s);
if (!t){
%(fail)s;
}
// Because PyArray_%(func)s returns a newly created reference on t.
Py_XDECREF(t);
}
""" % locals()
else:
code = """
if(!(%(z)s && PyArray_CompareLists(PyArray_DIMS(%(z)s), PyArray_DIMS(%(x)s), PyArray_NDIM(%(x)s))))
{
Py_XDECREF(%(z)s);
%(z)s = (PyArrayObject*) PyArray_SimpleNew(PyArray_NDIM(%(x)s), PyArray_DIMS(%(x)s), PyArray_TYPE((PyArrayObject*) py_%(x)s));
}
if (!%(z)s)
%(fail)s;
{
PyObject * t = PyArray_%(func)s(
%(x)s, %(axis)s,
PyArray_TYPE((PyArrayObject*) py_%(x)s), %(z)s);
if (!t){
%(fail)s;
}
// Because PyArray_%(func)s returns a newly created reference on t.
Py_XDECREF(t);
}
""" % locals()
return code
def c_code_cache_version(self):
return (7,)
def __str__(self):
return "%s{%s, %s}" % (self.__class__.__name__, self.axis, self.mode)
def cumsum(x, axis=None):
"""Return the cumulative sum of the elements along a given axis.
Wraping of numpy.cumsum.
Parameters
----------
x
Input tensor variable.
axis
The axis along which the cumulative sum is computed.
The default (None) is to compute the cumsum over the flattened array.
.. versionadded:: 0.7
"""
return CumOp(axis=axis, mode='add')(x)
def cumprod(x, axis=None):
"""Return the cumulative product of the elements along a given axis.
Wraping of numpy.cumprod.
Parameters
----------
x
Input tensor variable.
axis
The axis along which the cumulative product is computed.
The default (None) is to compute the cumprod over the flattened array.
.. versionadded:: 0.7
"""
return CumOp(axis=axis, mode='mul')(x)
# CumsumOp and CumprodOp are for compatibility with old version,
# just in case unpickling a theano function with old Ops.
class CumsumOp(theano.Op):
__props__ = ("axis",)
def __new__(typ, *args, **kwargs):
obj = object.__new__(CumOp, *args, **kwargs)
obj.mode = 'add'
return obj
class CumprodOp(theano.Op):
__props__ = ("axis",)
def __new__(typ, *args, **kwargs):
obj = object.__new__(CumOp, *args, **kwargs)
obj.mode = 'mul'
return obj
class DiffOp(theano.Op):
# See function diff for docstring
__props__ = ("n", "axis")
def __init__(self, n=1, axis=-1):
self.n = n
self.axis = axis
# numpy return a view in that case.
# TODO, make an optimization that remove this op in this case.
if n == 0:
self.view_map = {0: [0]}
def make_node(self, x):
x = basic.as_tensor_variable(x)
return theano.Apply(self, [x], [x.type()])
def perform(self, node, inputs, output_storage):
x = inputs[0]
z = output_storage[0]
z[0] = np.diff(x, n=self.n, axis=self.axis)
def grad(self, inputs, outputs_gradients):
inputs = inputs[0]
if inputs.ndim != 1:
raise NotImplementedError("Grad is not implemented for inputs with"
"number of dimension other than 1.")
z = outputs_gradients[0]
def _grad_helper(z):
pre = basic.concatenate([[0.], z])
app = basic.concatenate([z, [0.]])
return pre - app
for k in range(self.n):
z = _grad_helper(z)
return [z]
def infer_shape(self, node, ins_shapes):
i0_shapes = ins_shapes[0]
out_shape = list(i0_shapes)
out_shape[self.axis] = out_shape[self.axis] - self.n
return [out_shape]
def diff(x, n=1, axis=-1):
"""Calculate the n-th order discrete difference along given axis.
The first order difference is given by out[i] = a[i + 1] - a[i]
along the given axis, higher order differences are calculated by
using diff recursively. Wraping of numpy.diff.
Parameters
----------
x
Input tensor variable.
n
The number of times values are differenced, default is 1.
axis
The axis along which the difference is taken, default is the last axis.
.. versionadded:: 0.6
"""
return DiffOp(n=n, axis=axis)(x)
def bincount(x, weights=None, minlength=None, assert_nonneg=False):
"""Count number of occurrences of each value in array of ints.
The number of bins (of size 1) is one larger than the largest
value in x. If minlength is specified, there will be at least
this number of bins in the output array (though it will be longer
if necessary, depending on the contents of x). Each bin gives the
number of occurrences of its index value in x. If weights is
specified the input array is weighted by it, i.e. if a value n
is found at position i, out[n] += weight[i] instead of out[n] += 1.
Parameters
----------
x : 1 dimension, nonnegative ints
weights : array of the same shape as x with corresponding weights.
Optional.
minlength : A minimum number of bins for the output array.
Optional.
assert_nonneg : A flag that inserts an assert_op to check if
every input x is nonnegative.
Optional.
.. versionadded:: 0.6
"""
if x.ndim != 1:
raise TypeError("Inputs must be of dimension 1.")
if assert_nonneg:
from theano.tensor.opt import Assert
assert_op = Assert('Input to bincount has negative values!')
x = assert_op(x, theano.tensor.all(x >= 0))
max_value = theano.tensor.cast(x.max() + 1, 'int64')
if minlength is not None:
max_value = theano.tensor.maximum(max_value, minlength)
# Note: we do not use inc_subtensor(out[x], ...) in the following lines,
# since out[x] raises an exception if the indices (x) are int8.
if weights is None:
out = theano.tensor.zeros([max_value], dtype=x.dtype)
out = theano.tensor.advanced_inc_subtensor1(out, 1, x)
else:
out = theano.tensor.zeros([max_value], dtype=weights.dtype)
out = theano.tensor.advanced_inc_subtensor1(out, weights, x)
return out
def squeeze(x):
"""
Remove broadcastable dimensions from the shape of an array.
It returns the input array, but with the
broadcastable dimensions removed. This is
always `x` itself or a view into `x`.
.. versionadded:: 0.6
Parameters
----------
x
Input data, tensor variable.
Returns
-------
object
`x` without its broadcastable dimensions.
"""
view = x.dimshuffle([i for i in range(x.ndim)
if not x.broadcastable[i]])
return view
def compress(condition, x, axis=None):
"""
Return selected slices of an array along given axis.
It returns the input tensor, but with selected slices along a given axis
retained. If no axis is provided, the tensor is flattened.
Corresponds to numpy.compress
.. versionadded:: 0.7
Parameters
----------
x
Input data, tensor variable.
condition
1 dimensional array of non-zero and zero values
corresponding to indices of slices along a selected axis.
Returns
-------
object
`x` with selected slices.
"""
indices = theano.tensor.basic.flatnonzero(condition)
return x.take(indices, axis=axis)
class RepeatOp(theano.Op):
# See the repeat function for docstring
__props__ = ("axis",)
def __init__(self, axis=None):
self.axis = axis
def make_node(self, x, repeats):
x = basic.as_tensor_variable(x)
repeats = basic.as_tensor_variable(repeats)
if repeats.dtype not in tensor.integer_dtypes:
raise TypeError("repeats.dtype must be an integer.")
# Some dtypes are not supported by numpy's implementation of repeat.
# Until another one is available, we should fail at graph construction
# time, not wait for execution.
ptr_bitwidth = theano.configdefaults.local_bitwidth()
if ptr_bitwidth == 64:
numpy_unsupported_dtypes = ('uint64',)
if ptr_bitwidth == 32:
numpy_unsupported_dtypes = ('uint32', 'int64', 'uint64')
if repeats.dtype in numpy_unsupported_dtypes:
raise TypeError(
("dtypes %s are not supported by numpy.repeat "
"for the 'repeats' parameter, "
% str(numpy_unsupported_dtypes)), repeats.dtype)
if self.axis is None:
broadcastable = [False]
else:
try:
const_reps = basic.get_scalar_constant_value(repeats)
except basic.NotScalarConstantError:
const_reps = None
if const_reps == 1:
broadcastable = x.broadcastable
else:
broadcastable = list(x.broadcastable)
broadcastable[self.axis] = False
out_type = theano.tensor.TensorType(x.dtype, broadcastable)
return theano.Apply(self, [x, repeats], [out_type()])
def perform(self, node, inputs, output_storage):
x = inputs[0]
repeats = inputs[1]
z = output_storage[0]
z[0] = np.repeat(x, repeats=repeats, axis=self.axis)
def connection_pattern(self, node):
return [[True], [False]]
def grad(self, inputs, gout):
(x, repeats) = inputs
(gz,) = gout
if repeats.ndim == 0:
if self.axis is None:
axis = x.ndim
else:
if self.axis >= 0:
axis = self.axis + 1
else:
axis = self.axis + x.ndim + 1
shape = [x.shape[k] for k in range(x.ndim)]
shape.insert(axis, repeats)
return [gz.reshape(shape, x.ndim + 1).sum(axis=axis),
DisconnectedType()()]
elif repeats.ndim == 1:
# For this implementation, we would need to specify the length
# of repeats in order to split gz in the right way to sum
# the good part.
raise NotImplementedError()
else:
raise ValueError()
def infer_shape(self, node, ins_shapes):
i0_shapes = ins_shapes[0]
repeats = node.inputs[1]
out_shape = list(i0_shapes)
# uint64 shape are not supported.
dtype = None
if repeats.dtype in ['uint8', 'uint16', 'uint32']:
dtype = 'int64'
if self.axis is None:
if repeats.ndim == 0:
if len(i0_shapes) == 0:
out_shape = [repeats]
else:
res = 1
for d in i0_shapes:
res = res * d
out_shape = (res * repeats, )
else:
out_shape = [theano.tensor.sum(repeats, dtype=dtype)]
else:
if repeats.ndim == 0:
out_shape[self.axis] = out_shape[self.axis] * repeats
else:
out_shape[self.axis] = theano.tensor.sum(repeats, dtype=dtype)
return [out_shape]
def repeat(x, repeats, axis=None):
"""Repeat elements of an array.
It returns an array which has the same shape as `x`, except
along the given axis. The axis is used to speficy along which
axis to repeat values. By default, use the flattened input
array, and return a flat output array.
The number of repetitions for each element is `repeat`.
`repeats` is broadcasted to fit the length of the given `axis`.
Parameters
----------
x
Input data, tensor variable.
repeats
int, scalar or tensor variable
axis : int, optional
See Also
--------
tensor.tile
.. versionadded:: 0.6
"""
repeats = tensor.as_tensor_variable(repeats)
if repeats.ndim > 1:
raise ValueError('The dimension of repeats should not exceed 1.')
if repeats.ndim == 1 and not repeats.broadcastable[0]:
return RepeatOp(axis=axis)(x, repeats)
else:
if repeats.ndim == 1:
repeats = repeats[0]
if x.dtype == 'uint64':
raise TypeError("theano.tensor.repeat don't support dtype uint64")
if axis is None:
axis = 0
x = x.flatten()
else:
if axis >= x.ndim:
raise ValueError('Axis should not exceed x.ndim-1.')
if axis < 0:
axis = x.ndim + axis
shape = [x.shape[i] for i in xrange(x.ndim)]
# shape_ is the shape of the intermediate tensor which has
# an additional dimension comparing to x. We use alloc to
# allocate space for this intermediate tensor to replicate x
# along that additional dimension.
shape_ = shape[:]
shape_.insert(axis + 1, repeats)
# shape is now the shape of output, where shape[axis] becomes
# shape[axis]*repeats.
shape[axis] = shape[axis] * repeats
# dims_ is the dimension of that intermediate tensor.
dims_ = list(numpy.arange(x.ndim))
dims_.insert(axis + 1, 'x')
# After the original tensor is duplicated along the additional
# dimension, we reshape it to the expected output shape, and
# return the output z.
z = tensor.alloc(x.dimshuffle(*dims_), *shape_).reshape(shape)
return z
class Bartlett(gof.Op):
# See function bartlett for docstring
__props__ = ()
def make_node(self, M):
M = tensor.as_tensor_variable(M)
if M.ndim != 0:
raise TypeError('%s only works on scalar input'
% self.__class__.__name__)
elif M.dtype not in theano.tensor.integer_dtypes:
# dtype is a theano attribute here
raise TypeError('%s only works on integer input'
% self.__class__.__name__)
return gof.Apply(self, [M], [tensor.dvector()])
def perform(self, node, inputs, out_):
M = inputs[0]
out, = out_
out[0] = numpy.bartlett(M)
def infer_shape(self, node, in_shapes):
temp = node.inputs[0]
M = tensor.switch(tensor.lt(temp, 0),
tensor.cast(0, temp.dtype),
temp)
return [[M]]
def grad(self, inputs, output_grads):
return [None for i in inputs]
bartlett_ = Bartlett()
# I create a function only to have the doc show well.
def bartlett(M):
"""
An instance of this class returns the Bartlett spectral window in the
time-domain. The Bartlett window is very similar to a triangular window,
except that the end points are at zero. It is often used in signal
processing for tapering a signal, without generating too much ripple in
the frequency domain.
.. versionadded:: 0.6
Parameters
----------
M : integer scalar
Number of points in the output window. If zero or less,
an empty vector is returned.
Returns
-------
vector of doubles
The triangular window, with the maximum value normalized to one
(the value one appears only if the number of samples is odd), with
the first and last samples equal to zero.
"""
return bartlett_(M)
class FillDiagonal(gof.Op):
# See function fill_diagonal for docstring
__props__ = ()
def infer_shape(self, node, in_shapes):
return [in_shapes[0]]
def make_node(self, a, val):
a = tensor.as_tensor_variable(a)
val = tensor.as_tensor_variable(val)
if a.ndim < 2:
raise TypeError('%s: first parameter must have at least'
' two dimensions' % self.__class__.__name__)
elif val.ndim != 0:
raise TypeError('%s: second parameter must be a scalar'
% self.__class__.__name__)
val = tensor.cast(val, dtype=scalar.upcast(a.dtype, val.dtype))
if val.dtype != a.dtype:
raise TypeError('%s: type of second parameter must be the same as'
' the first\'s' % self.__class__.__name__)
return gof.Apply(self, [a, val], [a.type()])
def perform(self, node, inputs, output_storage):
a = inputs[0].copy()
val = inputs[1]
if a.ndim == 2:
# numpy.fill_diagonal up to date(including 1.6.2) have a
# bug for tall matrix.
# For 2-d arrays, we accept rectangular ones.
step = a.shape[1] + 1
end = a.shape[1] * a.shape[1]
# Write the value out into the diagonal.
a.flat[:end:step] = val
else:
numpy.fill_diagonal(a, val)
output_storage[0][0] = a
def grad(self, inp, cost_grad):
"""
Notes
-----
The gradient is currently implemented for matrices only.
"""
a, val = inp
grad = cost_grad[0]
if (a.dtype.startswith('complex')):
return [None, None]
elif a.ndim > 2:
raise NotImplementedError('%s: gradient is currently implemented'
' for matrices only' %
self.__class__.__name__)
wr_a = fill_diagonal(grad, 0) # valid for any number of dimensions
# diag is only valid for matrices
wr_val = theano.tensor.nlinalg.diag(grad).sum()
return [wr_a, wr_val]
fill_diagonal_ = FillDiagonal()
# I create a function only to have the doc show well.
def fill_diagonal(a, val):
"""
Returns a copy of an array with all
elements of the main diagonal set to a specified scalar value.
.. versionadded:: 0.6
Parameters
----------
a
Rectangular array of at least two dimensions.
val
Scalar value to fill the diagonal whose type must be
compatible with that of array 'a' (i.e. 'val' cannot be viewed
as an upcast of 'a').
Returns
-------
array
An array identical to 'a' except that its main diagonal
is filled with scalar 'val'. (For an array 'a' with a.ndim >=
2, the main diagonal is the list of locations a[i, i, ..., i]
(i.e. with indices all identical).)
Support rectangular matrix and tensor with more than 2 dimensions
if the later have all dimensions are equals.
"""
return fill_diagonal_(a, val)
class FillDiagonalOffset(gof.Op):
# See function fill_diagonal_offset for docstring
__props__ = ()
def infer_shape(self, node, in_shapes):
return [in_shapes[0]]
def make_node(self, a, val, offset):
a = tensor.as_tensor_variable(a)
val = tensor.as_tensor_variable(val)
offset = tensor.as_tensor_variable(offset)
if a.ndim != 2:
raise TypeError('%s: first parameter must have exactly'
' two dimensions' % self.__class__.__name__)
elif val.ndim != 0:
raise TypeError('%s: second parameter must be a scalar'
% self.__class__.__name__)
elif offset.ndim != 0:
raise TypeError('%s: third parameter must be a scalar'
% self.__class__.__name__)
val = tensor.cast(val, dtype=scalar.upcast(a.dtype, val.dtype))
if val.dtype != a.dtype:
raise TypeError('%s: type of second parameter must be the same'
' as the first\'s' % self.__class__.__name__)
elif offset.dtype not in theano.tensor.integer_dtypes:
raise TypeError('%s: type of third parameter must be as integer'
' use theano.tensor.cast( input, \'int32/int64\')'
% self.__class__.__name__)
return gof.Apply(self, [a, val, offset], [a.type()])
def perform(self, node, inputs, output_storage):
a = inputs[0].copy()
val = inputs[1]
offset = inputs[2]
height, width = a.shape
"""
Notes
-----
The fill_diagonal only support rectangular matrix. The output
of tall matrix is "wrapped", which is an option in numpy 1.9.0
but was regarded as a bug in numpy 1.6.2. Here I implement the
fill_diagonal_offset with unwrapped output, so fill_diagonal_offset
supports tall matrix.(This make a little difference between the output
of fill_diagonal and fill_diagonal_offset only in the case of tall
matrix)
"""
if offset >= 0:
start = offset
num_of_step = min(min(width, height), width - offset)
else:
start = - offset * a.shape[1]
num_of_step = min(min(width, height), height + offset)
step = a.shape[1] + 1
end = start + step * num_of_step
# Write the value out into the diagonal.
a.flat[start:end:step] = val
output_storage[0][0] = a
def grad(self, inp, cost_grad):
"""
Notes
-----
The gradient is currently implemented for matrices only.
"""
a, val, offset = inp
grad = cost_grad[0]
height, width = grad.shape
if (a.dtype.startswith('complex')):
return [None, None]
# only valid for matrices
wr_a = fill_diagonal_offset(grad, 0, offset)
offset_abs = basic.abs_(offset)
pos_offset_flag = basic.ge(offset, 0)
neg_offset_flag = basic.lt(offset, 0)
min_wh = basic.minimum(width, height)
start = offset * pos_offset_flag + offset_abs * width * neg_offset_flag
num_of_step = basic.minimum(min_wh, width * pos_offset_flag +
height * neg_offset_flag - offset_abs)
step = a.shape[1] + 1
end = start + step * num_of_step
# input of slice should be integer
start = basic.cast(start, 'int32')
step = basic.cast(step, 'int32')
end = basic.cast(end, 'int32')
wr_val = grad.flatten()[start:end:step].sum()
wr_offset = theano.gradient.grad_undefined(
self, 2, offset,
"offset is not defined for non-integer offset so"
" fill_diagonal_offset(a,val,offset+eps) is undefined")
return [wr_a, wr_val, wr_offset]
fill_diagonal_offset_ = FillDiagonalOffset()
def fill_diagonal_offset(a, val, offset):
"""
Returns a copy of an array with all
elements of the main diagonal set to a specified scalar value.
Parameters
----------
a
Rectangular array of two dimensions.
val
Scalar value to fill the diagonal whose type must be
compatible with that of array 'a' (i.e. 'val' cannot be viewed
as an upcast of 'a').
offset
Scalar value Offset of the diagonal from the main
diagonal. Can be positive or negative integer.
Returns
-------
array
An array identical to 'a' except that its offset diagonal
is filled with scalar 'val'. The output is unwrapped.
"""
return fill_diagonal_offset_(a, val, offset)
def to_one_hot(y, nb_class, dtype=None):
"""
Return a matrix where each row correspond to the one hot
encoding of each element in y.
Parameters
----------
y
A vector of integer value between 0 and nb_class - 1.
nb_class : int
The number of class in y.
dtype : data-type
The dtype of the returned matrix. Default floatX.
Returns
-------
object
A matrix of shape (y.shape[0], nb_class), where each row ``i`` is
the one hot encoding of the corresponding ``y[i]`` value.
"""
ret = theano.tensor.zeros((y.shape[0], nb_class),
dtype=dtype)
ret = theano.tensor.set_subtensor(ret[theano.tensor.arange(y.shape[0]), y],
1)
return ret
class Unique(theano.Op):
"""
Wraps numpy.unique. This op is not implemented on the GPU.
Examples
--------
>>> import numpy as np
>>> import theano
>>> x = theano.tensor.vector()
>>> f = theano.function([x], Unique(True, True, False)(x))
>>> f([1, 2., 3, 4, 3, 2, 1.])
[array([ 1., 2., 3., 4.]), array([0, 1, 2, 3]), array([0, 1, 2, 3, 2, 1, 0])]
>>> y = theano.tensor.matrix()
>>> g = theano.function([y], Unique(True, True, False)(y))
>>> g([[1, 1, 1.0], (2, 3, 3.0)])
[array([ 1., 2., 3.]), array([0, 3, 4]), array([0, 0, 0, 1, 2, 2])]
"""
__props__ = ("return_index", "return_inverse", "return_counts")
def __init__(self, return_index=False, return_inverse=False,
return_counts=False):
self.return_index = return_index
self.return_inverse = return_inverse
self.return_counts = return_counts
numpy_ver = [int(n) for n in numpy.__version__.split('.')[:2]]
if self.return_counts and bool(numpy_ver < [1, 9]):
raise RuntimeError(
"Numpy version = " + np.__version__ +
". Option 'return_counts=True' works starting"
" from version 1.9.0.")
def make_node(self, x):
x = basic.as_tensor_variable(x)
outputs = [basic.TensorType(broadcastable=[False], dtype=x.dtype)()]
typ = basic.TensorType(broadcastable=[False], dtype='int64')
if self.return_index:
outputs.append(typ())
if self.return_inverse:
outputs.append(typ())
if self.return_counts:
outputs.append(typ())
return theano.Apply(self, [x], outputs)
def perform(self, node, inputs, output_storage):
x = inputs[0]
z = output_storage
param = {}
if self.return_index:
param['return_index'] = True
if self.return_inverse:
param['return_inverse'] = True
if self.return_counts:
param['return_counts'] = True
outs = np.unique(x, **param)
if ((not self.return_inverse) and
(not self.return_index) and
(not self.return_counts)):
z[0][0] = outs
else:
for i in range(len(outs)):
z[i][0] = outs[i]
def infer_shape(self, node, i0_shapes):
ret = node.fgraph.shape_feature.default_infer_shape(node, i0_shapes)
if self.return_inverse:
shape = (basic.prod(i0_shapes[0]), )
if self.return_index:
ret[2] = shape
return ret
ret[1] = shape
return ret
return ret
|
Weihonghao/ECM
|
Vpy34/lib/python3.5/site-packages/theano/tensor/extra_ops.py
|
Python
|
agpl-3.0
| 37,957
|
# This file is part of Indico.
# Copyright (C) 2002 - 2021 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from dataclasses import dataclass, field
from datetime import datetime, timedelta
from authlib.oauth2.rfc6749 import list_to_scope
from authlib.oauth2.rfc6749.models import AuthorizationCodeMixin, TokenMixin
from sqlalchemy.dialects.postgresql import ARRAY, INET
from indico.core.db import db
from indico.core.db.sqlalchemy import UTCDateTime
from indico.util.date_time import now_utc
from indico.util.passwords import TokenProperty
class TokenModelBase(TokenMixin, db.Model):
__abstract__ = True
id = db.Column(
db.Integer,
primary_key=True
)
access_token_hash = db.Column(
db.String,
unique=True,
index=True,
nullable=False
)
_scopes = db.Column(
'scopes',
ARRAY(db.String),
nullable=False,
default=[]
)
created_dt = db.Column(
UTCDateTime,
nullable=False,
default=now_utc
)
last_used_dt = db.Column(
UTCDateTime,
nullable=True
)
last_used_ip = db.Column(
INET,
nullable=True
)
use_count = db.Column(
db.Integer,
nullable=False,
default=0
)
access_token = TokenProperty('access_token_hash')
@property
def locator(self):
return {'id': self.id}
@property
def scopes(self):
"""The set of scopes this token has access to."""
return set(self._scopes)
@scopes.setter
def scopes(self, value):
self._scopes = sorted(value)
def get_expires_in(self):
return 0
def is_expired(self):
return False
class OAuthToken(TokenModelBase):
"""OAuth tokens."""
__tablename__ = 'tokens'
__table_args__ = {'schema': 'oauth'}
app_user_link_id = db.Column(
db.ForeignKey('oauth.application_user_links.id', ondelete='CASCADE'),
nullable=False,
index=True
)
app_user_link = db.relationship(
'OAuthApplicationUserLink',
lazy=False,
backref=db.backref(
'tokens',
lazy='dynamic',
cascade='all, delete-orphan',
passive_deletes=True
)
)
@property
def user(self):
return self.app_user_link.user
@property
def application(self):
return self.app_user_link.application
def __repr__(self): # pragma: no cover
return f'<OAuthToken({self.id}, {self.app_user_link_id}, {self.scopes})>'
def check_client(self, client):
return self.application == client
def get_scope(self):
# scopes are restricted by what's authorized for the particular user and what's whitelisted for the app
scopes = self.scopes & set(self.app_user_link.scopes) & set(self.application.allowed_scopes)
return list_to_scope(sorted(scopes))
def is_revoked(self):
return self.user.is_blocked or self.user.is_deleted or not self.application.is_enabled
@dataclass(frozen=True)
class OAuth2AuthorizationCode(AuthorizationCodeMixin):
code: str
user_id: int
client_id: str
code_challenge: str
code_challenge_method: str
redirect_uri: str = ''
scope: str = ''
auth_time: datetime = field(default_factory=now_utc)
def is_expired(self):
return now_utc() - self.auth_time > timedelta(minutes=5)
def get_redirect_uri(self):
return self.redirect_uri
def get_scope(self):
return self.scope
def get_auth_time(self):
return self.auth_time
def get_nonce(self):
# our grant types do not require nonces
raise NotImplementedError
|
ThiefMaster/indico
|
indico/core/oauth/models/tokens.py
|
Python
|
mit
| 3,814
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import logging
from typing import List, Optional
from flask_appbuilder.security.sqla.models import User
from superset.commands.base import BaseCommand
from superset.commands.exceptions import DeleteFailedError
from superset.connectors.sqla.models import SqlaTable
from superset.datasets.commands.exceptions import (
DatasetBulkDeleteFailedError,
DatasetForbiddenError,
DatasetNotFoundError,
)
from superset.datasets.dao import DatasetDAO
from superset.exceptions import SupersetSecurityException
from superset.extensions import db, security_manager
from superset.views.base import check_ownership
logger = logging.getLogger(__name__)
class BulkDeleteDatasetCommand(BaseCommand):
def __init__(self, user: User, model_ids: List[int]):
self._actor = user
self._model_ids = model_ids
self._models: Optional[List[SqlaTable]] = None
def run(self) -> None:
self.validate()
if not self._models:
return None
try:
DatasetDAO.bulk_delete(self._models)
for model in self._models:
view_menu = (
security_manager.find_view_menu(model.get_perm()) if model else None
)
if view_menu:
permission_views = (
db.session.query(security_manager.permissionview_model)
.filter_by(view_menu=view_menu)
.all()
)
for permission_view in permission_views:
db.session.delete(permission_view)
if view_menu:
db.session.delete(view_menu)
else:
if not view_menu:
logger.error(
"Could not find the data access permission for the dataset",
exc_info=True,
)
db.session.commit()
return None
except DeleteFailedError as ex:
logger.exception(ex.exception)
raise DatasetBulkDeleteFailedError()
def validate(self) -> None:
# Validate/populate model exists
self._models = DatasetDAO.find_by_ids(self._model_ids)
if not self._models or len(self._models) != len(self._model_ids):
raise DatasetNotFoundError()
# Check ownership
for model in self._models:
try:
check_ownership(model)
except SupersetSecurityException:
raise DatasetForbiddenError()
|
mistercrunch/panoramix
|
superset/datasets/commands/bulk_delete.py
|
Python
|
apache-2.0
| 3,375
|
"""
support for presenting detailed information in failing assertions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import six
from _pytest.assertion import rewrite
from _pytest.assertion import truncate
from _pytest.assertion import util
def pytest_addoption(parser):
group = parser.getgroup("debugconfig")
group.addoption(
"--assert",
action="store",
dest="assertmode",
choices=("rewrite", "plain"),
default="rewrite",
metavar="MODE",
help="""Control assertion debugging tools. 'plain'
performs no assertion debugging. 'rewrite'
(the default) rewrites assert statements in
test modules on import to provide assert
expression information.""",
)
def register_assert_rewrite(*names):
"""Register one or more module names to be rewritten on import.
This function will make sure that this module or all modules inside
the package will get their assert statements rewritten.
Thus you should make sure to call this before the module is
actually imported, usually in your __init__.py if you are a plugin
using a package.
:raise TypeError: if the given module names are not strings.
"""
for name in names:
if not isinstance(name, str):
msg = "expected module names as *args, got {0} instead"
raise TypeError(msg.format(repr(names)))
for hook in sys.meta_path:
if isinstance(hook, rewrite.AssertionRewritingHook):
importhook = hook
break
else:
importhook = DummyRewriteHook()
importhook.mark_rewrite(*names)
class DummyRewriteHook(object):
"""A no-op import hook for when rewriting is disabled."""
def mark_rewrite(self, *names):
pass
class AssertionState(object):
"""State for the assertion plugin."""
def __init__(self, config, mode):
self.mode = mode
self.trace = config.trace.root.get("assertion")
self.hook = None
def install_importhook(config):
"""Try to install the rewrite hook, raise SystemError if it fails."""
# Jython has an AST bug that make the assertion rewriting hook malfunction.
if sys.platform.startswith("java"):
raise SystemError("rewrite not supported")
config._assertstate = AssertionState(config, "rewrite")
config._assertstate.hook = hook = rewrite.AssertionRewritingHook(config)
sys.meta_path.insert(0, hook)
config._assertstate.trace("installed rewrite import hook")
def undo():
hook = config._assertstate.hook
if hook is not None and hook in sys.meta_path:
sys.meta_path.remove(hook)
config.add_cleanup(undo)
return hook
def pytest_collection(session):
# this hook is only called when test modules are collected
# so for example not in the master process of pytest-xdist
# (which does not collect test modules)
assertstate = getattr(session.config, "_assertstate", None)
if assertstate:
if assertstate.hook is not None:
assertstate.hook.set_session(session)
def pytest_runtest_setup(item):
"""Setup the pytest_assertrepr_compare hook
The newinterpret and rewrite modules will use util._reprcompare if
it exists to use custom reporting via the
pytest_assertrepr_compare hook. This sets up this custom
comparison for the test.
"""
def callbinrepr(op, left, right):
"""Call the pytest_assertrepr_compare hook and prepare the result
This uses the first result from the hook and then ensures the
following:
* Overly verbose explanations are truncated unless configured otherwise
(eg. if running in verbose mode).
* Embedded newlines are escaped to help util.format_explanation()
later.
* If the rewrite mode is used embedded %-characters are replaced
to protect later % formatting.
The result can be formatted by util.format_explanation() for
pretty printing.
"""
hook_result = item.ihook.pytest_assertrepr_compare(
config=item.config, op=op, left=left, right=right
)
for new_expl in hook_result:
if new_expl:
new_expl = truncate.truncate_if_required(new_expl, item)
new_expl = [line.replace("\n", "\\n") for line in new_expl]
res = six.text_type("\n~").join(new_expl)
if item.config.getvalue("assertmode") == "rewrite":
res = res.replace("%", "%%")
return res
util._reprcompare = callbinrepr
def pytest_runtest_teardown(item):
util._reprcompare = None
def pytest_sessionfinish(session):
assertstate = getattr(session.config, "_assertstate", None)
if assertstate:
if assertstate.hook is not None:
assertstate.hook.set_session(None)
# Expose this plugin's implementation for the pytest_assertrepr_compare hook
pytest_assertrepr_compare = util.assertrepr_compare
|
txomon/pytest
|
src/_pytest/assertion/__init__.py
|
Python
|
mit
| 5,171
|
"""
A module container
"""
from NetCatKS.Validators.api.interfaces.message import IMessage
from NetCatKS.Validators.api.interfaces.validators import IValidator, IValidatorResponse
__author__ = 'dimd'
__all__ = [
'IMessage',
'IValidator',
'IValidatorResponse'
]
|
dimddev/NetCatKS
|
NetCatKS/Validators/api/interfaces/__init__.py
|
Python
|
bsd-2-clause
| 275
|
# coding=utf-8
# Copyright 2022 The Tensor2Robot Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Abstract decoder and MSE decoder.
"""
import gin
import tensorflow.compat.v1 as tf
from tensorflow.contrib import slim
@gin.configurable
class MSEDecoder(object):
"""Default MSE decoder."""
def __call__(self, params, output_size):
self._predictions = slim.fully_connected(
params, output_size, activation_fn=None, scope='pose')
return self._predictions
def loss(self, labels):
return tf.losses.mean_squared_error(labels=labels.action,
predictions=self._predictions)
|
google-research/tensor2robot
|
research/vrgripper/mse_decoder.py
|
Python
|
apache-2.0
| 1,146
|
"""
Models for User Information (students, staff, etc)
Migration Notes
If you make changes to this model, be sure to create an appropriate migration
file and check it in at the same time as your model changes. To do that,
1. Go to the edx-platform dir
2. ./manage.py lms schemamigration student --auto description_of_your_change
3. Add the migration file created in edx-platform/common/djangoapps/student/migrations/
"""
from datetime import datetime, timedelta
import hashlib
import json
import logging
from pytz import UTC
import uuid
from collections import defaultdict, OrderedDict
import dogstats_wrapper as dog_stats_api
from urllib import urlencode
from django.utils.translation import ugettext_lazy as _
from django.conf import settings
from django.utils import timezone
from django.contrib.auth.models import User
from django.contrib.auth.hashers import make_password
from django.contrib.auth.signals import user_logged_in, user_logged_out
from django.db import models, IntegrityError
from django.db.models import Count
from django.db.models.signals import pre_save, post_save
from django.dispatch import receiver, Signal
from django.core.exceptions import ObjectDoesNotExist
from django.utils.translation import ugettext_noop
from django_countries.fields import CountryField
from config_models.models import ConfigurationModel
from track import contexts
from eventtracking import tracker
from importlib import import_module
from south.modelsinspector import add_introspection_rules
from opaque_keys.edx.locations import SlashSeparatedCourseKey
import lms.lib.comment_client as cc
from util.model_utils import emit_field_changed_events, get_changed_fields_dict
from util.query import use_read_replica_if_available
from xmodule_django.models import CourseKeyField, NoneToEmptyManager
from xmodule.modulestore.exceptions import ItemNotFoundError
from xmodule.modulestore.django import modulestore
from opaque_keys.edx.keys import CourseKey
from functools import total_ordering
from certificates.models import GeneratedCertificate
from course_modes.models import CourseMode
import analytics
UNENROLL_DONE = Signal(providing_args=["course_enrollment", "skip_refund"])
log = logging.getLogger(__name__)
AUDIT_LOG = logging.getLogger("audit")
SessionStore = import_module(settings.SESSION_ENGINE).SessionStore # pylint: disable=invalid-name
UNENROLLED_TO_ALLOWEDTOENROLL = 'from unenrolled to allowed to enroll'
ALLOWEDTOENROLL_TO_ENROLLED = 'from allowed to enroll to enrolled'
ENROLLED_TO_ENROLLED = 'from enrolled to enrolled'
ENROLLED_TO_UNENROLLED = 'from enrolled to unenrolled'
UNENROLLED_TO_ENROLLED = 'from unenrolled to enrolled'
ALLOWEDTOENROLL_TO_UNENROLLED = 'from allowed to enroll to enrolled'
UNENROLLED_TO_UNENROLLED = 'from unenrolled to unenrolled'
DEFAULT_TRANSITION_STATE = 'N/A'
TRANSITION_STATES = (
(UNENROLLED_TO_ALLOWEDTOENROLL, UNENROLLED_TO_ALLOWEDTOENROLL),
(ALLOWEDTOENROLL_TO_ENROLLED, ALLOWEDTOENROLL_TO_ENROLLED),
(ENROLLED_TO_ENROLLED, ENROLLED_TO_ENROLLED),
(ENROLLED_TO_UNENROLLED, ENROLLED_TO_UNENROLLED),
(UNENROLLED_TO_ENROLLED, UNENROLLED_TO_ENROLLED),
(ALLOWEDTOENROLL_TO_UNENROLLED, ALLOWEDTOENROLL_TO_UNENROLLED),
(UNENROLLED_TO_UNENROLLED, UNENROLLED_TO_UNENROLLED),
(DEFAULT_TRANSITION_STATE, DEFAULT_TRANSITION_STATE)
)
class AnonymousUserId(models.Model):
"""
This table contains user, course_Id and anonymous_user_id
Purpose of this table is to provide user by anonymous_user_id.
We generate anonymous_user_id using md5 algorithm,
and use result in hex form, so its length is equal to 32 bytes.
"""
objects = NoneToEmptyManager()
user = models.ForeignKey(User, db_index=True)
anonymous_user_id = models.CharField(unique=True, max_length=32)
course_id = CourseKeyField(db_index=True, max_length=255, blank=True)
unique_together = (user, course_id)
def anonymous_id_for_user(user, course_id, save=True):
"""
Return a unique id for a (user, course) pair, suitable for inserting
into e.g. personalized survey links.
If user is an `AnonymousUser`, returns `None`
Keyword arguments:
save -- Whether the id should be saved in an AnonymousUserId object.
"""
# This part is for ability to get xblock instance in xblock_noauth handlers, where user is unauthenticated.
if user.is_anonymous():
return None
cached_id = getattr(user, '_anonymous_id', {}).get(course_id)
if cached_id is not None:
return cached_id
# include the secret key as a salt, and to make the ids unique across different LMS installs.
hasher = hashlib.md5()
hasher.update(settings.SECRET_KEY)
hasher.update(unicode(user.id))
if course_id:
hasher.update(course_id.to_deprecated_string().encode('utf-8'))
digest = hasher.hexdigest()
if not hasattr(user, '_anonymous_id'):
user._anonymous_id = {} # pylint: disable=protected-access
user._anonymous_id[course_id] = digest # pylint: disable=protected-access
if save is False:
return digest
try:
anonymous_user_id, __ = AnonymousUserId.objects.get_or_create(
defaults={'anonymous_user_id': digest},
user=user,
course_id=course_id
)
if anonymous_user_id.anonymous_user_id != digest:
log.error(
u"Stored anonymous user id %r for user %r "
u"in course %r doesn't match computed id %r",
user,
course_id,
anonymous_user_id.anonymous_user_id,
digest
)
except IntegrityError:
# Another thread has already created this entry, so
# continue
pass
return digest
def user_by_anonymous_id(uid):
"""
Return user by anonymous_user_id using AnonymousUserId lookup table.
Do not raise `django.ObjectDoesNotExist` exception,
if there is no user for anonymous_student_id,
because this function will be used inside xmodule w/o django access.
"""
if uid is None:
return None
try:
return User.objects.get(anonymoususerid__anonymous_user_id=uid)
except ObjectDoesNotExist:
return None
class UserStanding(models.Model):
"""
This table contains a student's account's status.
Currently, we're only disabling accounts; in the future we can imagine
taking away more specific privileges, like forums access, or adding
more specific karma levels or probationary stages.
"""
ACCOUNT_DISABLED = "disabled"
ACCOUNT_ENABLED = "enabled"
USER_STANDING_CHOICES = (
(ACCOUNT_DISABLED, u"Account Disabled"),
(ACCOUNT_ENABLED, u"Account Enabled"),
)
user = models.ForeignKey(User, db_index=True, related_name='standing', unique=True)
account_status = models.CharField(
blank=True, max_length=31, choices=USER_STANDING_CHOICES
)
changed_by = models.ForeignKey(User, blank=True)
standing_last_changed_at = models.DateTimeField(auto_now=True)
class UserProfile(models.Model):
"""This is where we store all the user demographic fields. We have a
separate table for this rather than extending the built-in Django auth_user.
Notes:
* Some fields are legacy ones from the first run of 6.002, from which
we imported many users.
* Fields like name and address are intentionally open ended, to account
for international variations. An unfortunate side-effect is that we
cannot efficiently sort on last names for instance.
Replication:
* Only the Portal servers should ever modify this information.
* All fields are replicated into relevant Course databases
Some of the fields are legacy ones that were captured during the initial
MITx fall prototype.
"""
class Meta: # pylint: disable=missing-docstring
db_table = "auth_userprofile"
# CRITICAL TODO/SECURITY
# Sanitize all fields.
# This is not visible to other users, but could introduce holes later
user = models.OneToOneField(User, unique=True, db_index=True, related_name='profile')
name = models.CharField(blank=True, max_length=255, db_index=True)
meta = models.TextField(blank=True) # JSON dictionary for future expansion
courseware = models.CharField(blank=True, max_length=255, default='course.xml')
# Location is no longer used, but is held here for backwards compatibility
# for users imported from our first class.
language = models.CharField(blank=True, max_length=255, db_index=True)
location = models.CharField(blank=True, max_length=255, db_index=True)
# Optional demographic data we started capturing from Fall 2012
this_year = datetime.now(UTC).year
VALID_YEARS = range(this_year, this_year - 120, -1)
year_of_birth = models.IntegerField(blank=True, null=True, db_index=True)
GENDER_CHOICES = (
('m', ugettext_noop('Male')),
('f', ugettext_noop('Female')),
# Translators: 'Other' refers to the student's gender
('o', ugettext_noop('Other'))
)
gender = models.CharField(
blank=True, null=True, max_length=6, db_index=True, choices=GENDER_CHOICES
)
# [03/21/2013] removed these, but leaving comment since there'll still be
# p_se and p_oth in the existing data in db.
# ('p_se', 'Doctorate in science or engineering'),
# ('p_oth', 'Doctorate in another field'),
LEVEL_OF_EDUCATION_CHOICES = (
('p', ugettext_noop('Doctorate')),
('m', ugettext_noop("Master's or professional degree")),
('b', ugettext_noop("Bachelor's degree")),
('a', ugettext_noop("Associate degree")),
('hs', ugettext_noop("Secondary/high school")),
('jhs', ugettext_noop("Junior secondary/junior high/middle school")),
('el', ugettext_noop("Elementary/primary school")),
# Translators: 'None' refers to the student's level of education
('none', ugettext_noop("None")),
# Translators: 'Other' refers to the student's level of education
('other', ugettext_noop("Other"))
)
level_of_education = models.CharField(
blank=True, null=True, max_length=6, db_index=True,
choices=LEVEL_OF_EDUCATION_CHOICES
)
mailing_address = models.TextField(blank=True, null=True)
city = models.TextField(blank=True, null=True)
country = CountryField(blank=True, null=True)
goals = models.TextField(blank=True, null=True)
allow_certificate = models.BooleanField(default=1)
bio = models.CharField(blank=True, null=True, max_length=3000, db_index=False)
profile_image_uploaded_at = models.DateTimeField(null=True)
@property
def has_profile_image(self):
"""
Convenience method that returns a boolean indicating whether or not
this user has uploaded a profile image.
"""
return self.profile_image_uploaded_at is not None
def get_meta(self): # pylint: disable=missing-docstring
js_str = self.meta
if not js_str:
js_str = dict()
else:
js_str = json.loads(self.meta)
return js_str
def set_meta(self, meta_json): # pylint: disable=missing-docstring
self.meta = json.dumps(meta_json)
def set_login_session(self, session_id=None):
"""
Sets the current session id for the logged-in user.
If session_id doesn't match the existing session,
deletes the old session object.
"""
meta = self.get_meta()
old_login = meta.get('session_id', None)
if old_login:
SessionStore(session_key=old_login).delete()
meta['session_id'] = session_id
self.set_meta(meta)
self.save()
def requires_parental_consent(self, date=None, age_limit=None, default_requires_consent=True):
"""Returns true if this user requires parental consent.
Args:
date (Date): The date for which consent needs to be tested (defaults to now).
age_limit (int): The age limit at which parental consent is no longer required.
This defaults to the value of the setting 'PARENTAL_CONTROL_AGE_LIMIT'.
default_requires_consent (bool): True if users require parental consent if they
have no specified year of birth (default is True).
Returns:
True if the user requires parental consent.
"""
if age_limit is None:
age_limit = getattr(settings, 'PARENTAL_CONSENT_AGE_LIMIT', None)
if age_limit is None:
return False
# Return True if either:
# a) The user has a year of birth specified and that year is fewer years in the past than the limit.
# b) The user has no year of birth specified and the default is to require consent.
#
# Note: we have to be conservative using the user's year of birth as their birth date could be
# December 31st. This means that if the number of years since their birth year is exactly equal
# to the age limit then we have to assume that they might still not be old enough.
year_of_birth = self.year_of_birth
if year_of_birth is None:
return default_requires_consent
if date is None:
date = datetime.now(UTC)
return date.year - year_of_birth <= age_limit # pylint: disable=maybe-no-member
@receiver(pre_save, sender=UserProfile)
def user_profile_pre_save_callback(sender, **kwargs):
"""
Ensure consistency of a user profile before saving it.
"""
user_profile = kwargs['instance']
# Remove profile images for users who require parental consent
if user_profile.requires_parental_consent() and user_profile.has_profile_image:
user_profile.profile_image_uploaded_at = None
# Cache "old" field values on the model instance so that they can be
# retrieved in the post_save callback when we emit an event with new and
# old field values.
user_profile._changed_fields = get_changed_fields_dict(user_profile, sender)
@receiver(post_save, sender=UserProfile)
def user_profile_post_save_callback(sender, **kwargs):
"""
Emit analytics events after saving the UserProfile.
"""
user_profile = kwargs['instance']
# pylint: disable=protected-access
emit_field_changed_events(
user_profile,
user_profile.user,
sender._meta.db_table,
excluded_fields=['meta']
)
@receiver(pre_save, sender=User)
def user_pre_save_callback(sender, **kwargs):
"""
Capture old fields on the user instance before save and cache them as a
private field on the current model for use in the post_save callback.
"""
user = kwargs['instance']
user._changed_fields = get_changed_fields_dict(user, sender)
@receiver(post_save, sender=User)
def user_post_save_callback(sender, **kwargs):
"""
Emit analytics events after saving the User.
"""
user = kwargs['instance']
# pylint: disable=protected-access
emit_field_changed_events(
user,
user,
sender._meta.db_table,
excluded_fields=['last_login', 'first_name', 'last_name'],
hidden_fields=['password']
)
class UserSignupSource(models.Model):
"""
This table contains information about users registering
via Micro-Sites
"""
user = models.ForeignKey(User, db_index=True)
site = models.CharField(max_length=255, db_index=True)
def unique_id_for_user(user, save=True):
"""
Return a unique id for a user, suitable for inserting into
e.g. personalized survey links.
Keyword arguments:
save -- Whether the id should be saved in an AnonymousUserId object.
"""
# Setting course_id to '' makes it not affect the generated hash,
# and thus produce the old per-student anonymous id
return anonymous_id_for_user(user, None, save=save)
# TODO: Should be renamed to generic UserGroup, and possibly
# Given an optional field for type of group
class UserTestGroup(models.Model):
users = models.ManyToManyField(User, db_index=True)
name = models.CharField(blank=False, max_length=32, db_index=True)
description = models.TextField(blank=True)
class Registration(models.Model):
''' Allows us to wait for e-mail before user is registered. A
registration profile is created when the user creates an
account, but that account is inactive. Once the user clicks
on the activation key, it becomes active. '''
class Meta:
db_table = "auth_registration"
user = models.ForeignKey(User, unique=True)
activation_key = models.CharField(('activation key'), max_length=32, unique=True, db_index=True)
def register(self, user):
# MINOR TODO: Switch to crypto-secure key
self.activation_key = uuid.uuid4().hex
self.user = user
self.save()
def activate(self):
self.user.is_active = True
self.user.save()
class PendingNameChange(models.Model):
user = models.OneToOneField(User, unique=True, db_index=True)
new_name = models.CharField(blank=True, max_length=255)
rationale = models.CharField(blank=True, max_length=1024)
class PendingEmailChange(models.Model):
user = models.OneToOneField(User, unique=True, db_index=True)
new_email = models.CharField(blank=True, max_length=255, db_index=True)
activation_key = models.CharField(('activation key'), max_length=32, unique=True, db_index=True)
def request_change(self, email):
"""Request a change to a user's email.
Implicitly saves the pending email change record.
Arguments:
email (unicode): The proposed new email for the user.
Returns:
unicode: The activation code to confirm the change.
"""
self.new_email = email
self.activation_key = uuid.uuid4().hex
self.save()
return self.activation_key
EVENT_NAME_ENROLLMENT_ACTIVATED = 'edx.course.enrollment.activated'
EVENT_NAME_ENROLLMENT_DEACTIVATED = 'edx.course.enrollment.deactivated'
EVENT_NAME_ENROLLMENT_MODE_CHANGED = 'edx.course.enrollment.mode_changed'
class PasswordHistory(models.Model):
"""
This model will keep track of past passwords that a user has used
as well as providing contraints (e.g. can't reuse passwords)
"""
user = models.ForeignKey(User)
password = models.CharField(max_length=128)
time_set = models.DateTimeField(default=timezone.now)
def create(self, user):
"""
This will copy over the current password, if any of the configuration has been turned on
"""
if not (PasswordHistory.is_student_password_reuse_restricted() or
PasswordHistory.is_staff_password_reuse_restricted() or
PasswordHistory.is_password_reset_frequency_restricted() or
PasswordHistory.is_staff_forced_password_reset_enabled() or
PasswordHistory.is_student_forced_password_reset_enabled()):
return
self.user = user
self.password = user.password
self.save()
@classmethod
def is_student_password_reuse_restricted(cls):
"""
Returns whether the configuration which limits password reuse has been turned on
"""
if not settings.FEATURES['ADVANCED_SECURITY']:
return False
min_diff_pw = settings.ADVANCED_SECURITY_CONFIG.get(
'MIN_DIFFERENT_STUDENT_PASSWORDS_BEFORE_REUSE', 0
)
return min_diff_pw > 0
@classmethod
def is_staff_password_reuse_restricted(cls):
"""
Returns whether the configuration which limits password reuse has been turned on
"""
if not settings.FEATURES['ADVANCED_SECURITY']:
return False
min_diff_pw = settings.ADVANCED_SECURITY_CONFIG.get(
'MIN_DIFFERENT_STAFF_PASSWORDS_BEFORE_REUSE', 0
)
return min_diff_pw > 0
@classmethod
def is_password_reset_frequency_restricted(cls):
"""
Returns whether the configuration which limits the password reset frequency has been turned on
"""
if not settings.FEATURES['ADVANCED_SECURITY']:
return False
min_days_between_reset = settings.ADVANCED_SECURITY_CONFIG.get(
'MIN_TIME_IN_DAYS_BETWEEN_ALLOWED_RESETS'
)
return min_days_between_reset
@classmethod
def is_staff_forced_password_reset_enabled(cls):
"""
Returns whether the configuration which forces password resets to occur has been turned on
"""
if not settings.FEATURES['ADVANCED_SECURITY']:
return False
min_days_between_reset = settings.ADVANCED_SECURITY_CONFIG.get(
'MIN_DAYS_FOR_STAFF_ACCOUNTS_PASSWORD_RESETS'
)
return min_days_between_reset
@classmethod
def is_student_forced_password_reset_enabled(cls):
"""
Returns whether the configuration which forces password resets to occur has been turned on
"""
if not settings.FEATURES['ADVANCED_SECURITY']:
return False
min_days_pw_reset = settings.ADVANCED_SECURITY_CONFIG.get(
'MIN_DAYS_FOR_STUDENT_ACCOUNTS_PASSWORD_RESETS'
)
return min_days_pw_reset
@classmethod
def should_user_reset_password_now(cls, user):
"""
Returns whether a password has 'expired' and should be reset. Note there are two different
expiry policies for staff and students
"""
if not settings.FEATURES['ADVANCED_SECURITY']:
return False
days_before_password_reset = None
if user.is_staff:
if cls.is_staff_forced_password_reset_enabled():
days_before_password_reset = \
settings.ADVANCED_SECURITY_CONFIG['MIN_DAYS_FOR_STAFF_ACCOUNTS_PASSWORD_RESETS']
elif cls.is_student_forced_password_reset_enabled():
days_before_password_reset = \
settings.ADVANCED_SECURITY_CONFIG['MIN_DAYS_FOR_STUDENT_ACCOUNTS_PASSWORD_RESETS']
if days_before_password_reset:
history = PasswordHistory.objects.filter(user=user).order_by('-time_set')
time_last_reset = None
if history:
# first element should be the last time we reset password
time_last_reset = history[0].time_set
else:
# no history, then let's take the date the user joined
time_last_reset = user.date_joined
now = timezone.now()
delta = now - time_last_reset
return delta.days >= days_before_password_reset
return False
@classmethod
def is_password_reset_too_soon(cls, user):
"""
Verifies that the password is not getting reset too frequently
"""
if not cls.is_password_reset_frequency_restricted():
return False
history = PasswordHistory.objects.filter(user=user).order_by('-time_set')
if not history:
return False
now = timezone.now()
delta = now - history[0].time_set
return delta.days < settings.ADVANCED_SECURITY_CONFIG['MIN_TIME_IN_DAYS_BETWEEN_ALLOWED_RESETS']
@classmethod
def is_allowable_password_reuse(cls, user, new_password):
"""
Verifies that the password adheres to the reuse policies
"""
if not settings.FEATURES['ADVANCED_SECURITY']:
return True
if user.is_staff and cls.is_staff_password_reuse_restricted():
min_diff_passwords_required = \
settings.ADVANCED_SECURITY_CONFIG['MIN_DIFFERENT_STAFF_PASSWORDS_BEFORE_REUSE']
elif cls.is_student_password_reuse_restricted():
min_diff_passwords_required = \
settings.ADVANCED_SECURITY_CONFIG['MIN_DIFFERENT_STUDENT_PASSWORDS_BEFORE_REUSE']
else:
min_diff_passwords_required = 0
# just limit the result set to the number of different
# password we need
history = PasswordHistory.objects.filter(user=user).order_by('-time_set')[:min_diff_passwords_required]
for entry in history:
# be sure to re-use the same salt
# NOTE, how the salt is serialized in the password field is dependent on the algorithm
# in pbkdf2_sha256 [LMS] it's the 3rd element, in sha1 [unit tests] it's the 2nd element
hash_elements = entry.password.split('$')
algorithm = hash_elements[0]
if algorithm == 'pbkdf2_sha256':
hashed_password = make_password(new_password, hash_elements[2])
elif algorithm == 'sha1':
hashed_password = make_password(new_password, hash_elements[1])
else:
# This means we got something unexpected. We don't want to throw an exception, but
# log as an error and basically allow any password reuse
AUDIT_LOG.error('''
Unknown password hashing algorithm "{0}" found in existing password
hash, password reuse policy will not be enforced!!!
'''.format(algorithm))
return True
if entry.password == hashed_password:
return False
return True
class LoginFailures(models.Model):
"""
This model will keep track of failed login attempts
"""
user = models.ForeignKey(User)
failure_count = models.IntegerField(default=0)
lockout_until = models.DateTimeField(null=True)
@classmethod
def is_feature_enabled(cls):
"""
Returns whether the feature flag around this functionality has been set
"""
return settings.FEATURES['ENABLE_MAX_FAILED_LOGIN_ATTEMPTS']
@classmethod
def is_user_locked_out(cls, user):
"""
Static method to return in a given user has his/her account locked out
"""
try:
record = LoginFailures.objects.get(user=user)
if not record.lockout_until:
return False
now = datetime.now(UTC)
until = record.lockout_until
is_locked_out = until and now < until
return is_locked_out
except ObjectDoesNotExist:
return False
@classmethod
def increment_lockout_counter(cls, user):
"""
Ticks the failed attempt counter
"""
record, _ = LoginFailures.objects.get_or_create(user=user)
record.failure_count = record.failure_count + 1
max_failures_allowed = settings.MAX_FAILED_LOGIN_ATTEMPTS_ALLOWED
# did we go over the limit in attempts
if record.failure_count >= max_failures_allowed:
# yes, then store when this account is locked out until
lockout_period_secs = settings.MAX_FAILED_LOGIN_ATTEMPTS_LOCKOUT_PERIOD_SECS
record.lockout_until = datetime.now(UTC) + timedelta(seconds=lockout_period_secs)
record.save()
@classmethod
def clear_lockout_counter(cls, user):
"""
Removes the lockout counters (normally called after a successful login)
"""
try:
entry = LoginFailures.objects.get(user=user)
entry.delete()
except ObjectDoesNotExist:
return
class CourseEnrollmentException(Exception):
pass
class NonExistentCourseError(CourseEnrollmentException):
pass
class EnrollmentClosedError(CourseEnrollmentException):
pass
class CourseFullError(CourseEnrollmentException):
pass
class AlreadyEnrolledError(CourseEnrollmentException):
pass
class CourseEnrollmentManager(models.Manager):
"""
Custom manager for CourseEnrollment with Table-level filter methods.
"""
def num_enrolled_in(self, course_id):
"""
Returns the count of active enrollments in a course.
'course_id' is the course_id to return enrollments
"""
enrollment_number = super(CourseEnrollmentManager, self).get_query_set().filter(
course_id=course_id,
is_active=1
).count()
return enrollment_number
def is_course_full(self, course):
"""
Returns a boolean value regarding whether a course has already reached it's max enrollment
capacity
"""
is_course_full = False
if course.max_student_enrollments_allowed is not None:
is_course_full = self.num_enrolled_in(course.id) >= course.max_student_enrollments_allowed
return is_course_full
def users_enrolled_in(self, course_id):
"""Return a queryset of User for every user enrolled in the course."""
return User.objects.filter(
courseenrollment__course_id=course_id,
courseenrollment__is_active=True
)
def enrollment_counts(self, course_id):
"""
Returns a dictionary that stores the total enrollment count for a course, as well as the
enrollment count for each individual mode.
"""
# Unfortunately, Django's "group by"-style queries look super-awkward
query = use_read_replica_if_available(
super(CourseEnrollmentManager, self).get_query_set().filter(course_id=course_id, is_active=True).values(
'mode').order_by().annotate(Count('mode')))
total = 0
enroll_dict = defaultdict(int)
for item in query:
enroll_dict[item['mode']] = item['mode__count']
total += item['mode__count']
enroll_dict['total'] = total
return enroll_dict
def enrolled_and_dropped_out_users(self, course_id):
"""Return a queryset of Users in the course."""
return User.objects.filter(
courseenrollment__course_id=course_id
)
class CourseEnrollment(models.Model):
"""
Represents a Student's Enrollment record for a single Course. You should
generally not manipulate CourseEnrollment objects directly, but use the
classmethods provided to enroll, unenroll, or check on the enrollment status
of a given student.
We're starting to consolidate course enrollment logic in this class, but
more should be brought in (such as checking against CourseEnrollmentAllowed,
checking course dates, user permissions, etc.) This logic is currently
scattered across our views.
"""
MODEL_TAGS = ['course_id', 'is_active', 'mode']
user = models.ForeignKey(User)
course_id = CourseKeyField(max_length=255, db_index=True)
created = models.DateTimeField(auto_now_add=True, null=True, db_index=True)
# If is_active is False, then the student is not considered to be enrolled
# in the course (is_enrolled() will return False)
is_active = models.BooleanField(default=True)
# Represents the modes that are possible. We'll update this later with a
# list of possible values.
mode = models.CharField(default="honor", max_length=100)
objects = CourseEnrollmentManager()
class Meta:
unique_together = (('user', 'course_id'),)
ordering = ('user', 'course_id')
def __unicode__(self):
return (
"[CourseEnrollment] {}: {} ({}); active: ({})"
).format(self.user, self.course_id, self.created, self.is_active)
@classmethod
def get_or_create_enrollment(cls, user, course_key):
"""
Create an enrollment for a user in a class. By default *this enrollment
is not active*. This is useful for when an enrollment needs to go
through some sort of approval process before being activated. If you
don't need this functionality, just call `enroll()` instead.
Returns a CoursewareEnrollment object.
`user` is a Django User object. If it hasn't been saved yet (no `.id`
attribute), this method will automatically save it before
adding an enrollment for it.
`course_id` is our usual course_id string (e.g. "edX/Test101/2013_Fall)
It is expected that this method is called from a method which has already
verified the user authentication and access.
"""
# If we're passing in a newly constructed (i.e. not yet persisted) User,
# save it to the database so that it can have an ID that we can throw
# into our CourseEnrollment object. Otherwise, we'll get an
# IntegrityError for having a null user_id.
assert(isinstance(course_key, CourseKey))
if user.id is None:
user.save()
enrollment, created = CourseEnrollment.objects.get_or_create(
user=user,
course_id=course_key,
)
# If we *did* just create a new enrollment, set some defaults
if created:
enrollment.mode = "honor"
enrollment.is_active = False
enrollment.save()
return enrollment
@classmethod
def get_enrollment(cls, user, course_key):
"""Returns a CoursewareEnrollment object.
Args:
user (User): The user associated with the enrollment.
course_id (CourseKey): The key of the course associated with the enrollment.
Returns:
Course enrollment object or None
"""
try:
return CourseEnrollment.objects.get(
user=user,
course_id=course_key
)
except cls.DoesNotExist:
return None
@classmethod
def is_enrollment_closed(cls, user, course):
"""
Returns a boolean value regarding whether the user has access to enroll in the course. Returns False if the
enrollment has been closed.
"""
# Disable the pylint error here, as per ormsbee. This local import was previously
# in CourseEnrollment.enroll
from courseware.access import has_access # pylint: disable=import-error
return not has_access(user, 'enroll', course)
def update_enrollment(self, mode=None, is_active=None, skip_refund=False):
"""
Updates an enrollment for a user in a class. This includes options
like changing the mode, toggling is_active True/False, etc.
Also emits relevant events for analytics purposes.
This saves immediately.
"""
activation_changed = False
# if is_active is None, then the call to update_enrollment didn't specify
# any value, so just leave is_active as it is
if self.is_active != is_active and is_active is not None:
self.is_active = is_active
activation_changed = True
mode_changed = False
# if mode is None, the call to update_enrollment didn't specify a new
# mode, so leave as-is
if self.mode != mode and mode is not None:
self.mode = mode
mode_changed = True
if activation_changed or mode_changed:
self.save()
if activation_changed:
if self.is_active:
self.emit_event(EVENT_NAME_ENROLLMENT_ACTIVATED)
dog_stats_api.increment(
"common.student.enrollment",
tags=[u"org:{}".format(self.course_id.org),
u"offering:{}".format(self.course_id.offering),
u"mode:{}".format(self.mode)]
)
else:
UNENROLL_DONE.send(sender=None, course_enrollment=self, skip_refund=skip_refund)
self.emit_event(EVENT_NAME_ENROLLMENT_DEACTIVATED)
dog_stats_api.increment(
"common.student.unenrollment",
tags=[u"org:{}".format(self.course_id.org),
u"offering:{}".format(self.course_id.offering),
u"mode:{}".format(self.mode)]
)
if mode_changed:
# the user's default mode is "honor" and disabled for a course
# mode change events will only be emitted when the user's mode changes from this
self.emit_event(EVENT_NAME_ENROLLMENT_MODE_CHANGED)
def emit_event(self, event_name):
"""
Emits an event to explicitly track course enrollment and unenrollment.
"""
try:
context = contexts.course_context_from_course_id(self.course_id)
assert(isinstance(self.course_id, CourseKey))
data = {
'user_id': self.user.id,
'course_id': self.course_id.to_deprecated_string(),
'mode': self.mode,
}
with tracker.get_tracker().context(event_name, context):
tracker.emit(event_name, data)
if settings.FEATURES.get('SEGMENT_IO_LMS') and settings.SEGMENT_IO_LMS_KEY:
tracking_context = tracker.get_tracker().resolve_context()
analytics.track(self.user_id, event_name, {
'category': 'conversion',
'label': self.course_id.to_deprecated_string(),
'org': self.course_id.org,
'course': self.course_id.course,
'run': self.course_id.run,
'mode': self.mode,
}, context={
'Google Analytics': {
'clientId': tracking_context.get('client_id')
}
})
except: # pylint: disable=bare-except
if event_name and self.course_id:
log.exception(
u'Unable to emit event %s for user %s and course %s',
event_name,
self.user.username, # pylint: disable=no-member
self.course_id,
)
@classmethod
def enroll(cls, user, course_key, mode="honor", check_access=False):
"""
Enroll a user in a course. This saves immediately.
Returns a CoursewareEnrollment object.
`user` is a Django User object. If it hasn't been saved yet (no `.id`
attribute), this method will automatically save it before
adding an enrollment for it.
`course_key` is our usual course_id string (e.g. "edX/Test101/2013_Fall)
`mode` is a string specifying what kind of enrollment this is. The
default is "honor", meaning honor certificate. Future options
may include "audit", "verified_id", etc. Please don't use it
until we have these mapped out.
`check_access`: if True, we check that an accessible course actually
exists for the given course_key before we enroll the student.
The default is set to False to avoid breaking legacy code or
code with non-standard flows (ex. beta tester invitations), but
for any standard enrollment flow you probably want this to be True.
Exceptions that can be raised: NonExistentCourseError,
EnrollmentClosedError, CourseFullError, AlreadyEnrolledError. All these
are subclasses of CourseEnrollmentException if you want to catch all of
them in the same way.
It is expected that this method is called from a method which has already
verified the user authentication.
Also emits relevant events for analytics purposes.
"""
# All the server-side checks for whether a user is allowed to enroll.
try:
course = modulestore().get_course(course_key)
except ItemNotFoundError:
log.warning(
u"User %s failed to enroll in non-existent course %s",
user.username,
course_key.to_deprecated_string(),
)
raise NonExistentCourseError
if check_access:
if course is None:
raise NonExistentCourseError
if CourseEnrollment.is_enrollment_closed(user, course):
log.warning(
u"User %s failed to enroll in course %s because enrollment is closed",
user.username,
course_key.to_deprecated_string()
)
raise EnrollmentClosedError
if CourseEnrollment.objects.is_course_full(course):
log.warning(
u"User %s failed to enroll in full course %s",
user.username,
course_key.to_deprecated_string(),
)
raise CourseFullError
if CourseEnrollment.is_enrolled(user, course_key):
log.warning(
u"User %s attempted to enroll in %s, but they were already enrolled",
user.username,
course_key.to_deprecated_string()
)
if check_access:
raise AlreadyEnrolledError
# User is allowed to enroll if they've reached this point.
enrollment = cls.get_or_create_enrollment(user, course_key)
enrollment.update_enrollment(is_active=True, mode=mode)
return enrollment
@classmethod
def enroll_by_email(cls, email, course_id, mode="honor", ignore_errors=True):
"""
Enroll a user in a course given their email. This saves immediately.
Note that enrolling by email is generally done in big batches and the
error rate is high. For that reason, we supress User lookup errors by
default.
Returns a CoursewareEnrollment object. If the User does not exist and
`ignore_errors` is set to `True`, it will return None.
`email` Email address of the User to add to enroll in the course.
`course_id` is our usual course_id string (e.g. "edX/Test101/2013_Fall)
`mode` is a string specifying what kind of enrollment this is. The
default is "honor", meaning honor certificate. Future options
may include "audit", "verified_id", etc. Please don't use it
until we have these mapped out.
`ignore_errors` is a boolean indicating whether we should suppress
`User.DoesNotExist` errors (returning None) or let it
bubble up.
It is expected that this method is called from a method which has already
verified the user authentication and access.
"""
try:
user = User.objects.get(email=email)
return cls.enroll(user, course_id, mode)
except User.DoesNotExist:
err_msg = u"Tried to enroll email {} into course {}, but user not found"
log.error(err_msg.format(email, course_id))
if ignore_errors:
return None
raise
@classmethod
def unenroll(cls, user, course_id, skip_refund=False):
"""
Remove the user from a given course. If the relevant `CourseEnrollment`
object doesn't exist, we log an error but don't throw an exception.
`user` is a Django User object. If it hasn't been saved yet (no `.id`
attribute), this method will automatically save it before
adding an enrollment for it.
`course_id` is our usual course_id string (e.g. "edX/Test101/2013_Fall)
`skip_refund` can be set to True to avoid the refund process.
"""
try:
record = CourseEnrollment.objects.get(user=user, course_id=course_id)
record.update_enrollment(is_active=False, skip_refund=skip_refund)
except cls.DoesNotExist:
log.error(
u"Tried to unenroll student %s from %s but they were not enrolled",
user,
course_id
)
@classmethod
def unenroll_by_email(cls, email, course_id):
"""
Unenroll a user from a course given their email. This saves immediately.
User lookup errors are logged but will not throw an exception.
`email` Email address of the User to unenroll from the course.
`course_id` is our usual course_id string (e.g. "edX/Test101/2013_Fall)
"""
try:
user = User.objects.get(email=email)
return cls.unenroll(user, course_id)
except User.DoesNotExist:
log.error(
u"Tried to unenroll email %s from course %s, but user not found",
email,
course_id
)
@classmethod
def is_enrolled(cls, user, course_key):
"""
Returns True if the user is enrolled in the course (the entry must exist
and it must have `is_active=True`). Otherwise, returns False.
`user` is a Django User object. If it hasn't been saved yet (no `.id`
attribute), this method will automatically save it before
adding an enrollment for it.
`course_id` is our usual course_id string (e.g. "edX/Test101/2013_Fall)
"""
if not user.is_authenticated():
return False
try:
record = CourseEnrollment.objects.get(user=user, course_id=course_key)
return record.is_active
except cls.DoesNotExist:
return False
@classmethod
def is_enrolled_by_partial(cls, user, course_id_partial):
"""
Returns `True` if the user is enrolled in a course that starts with
`course_id_partial`. Otherwise, returns False.
Can be used to determine whether a student is enrolled in a course
whose run name is unknown.
`user` is a Django User object. If it hasn't been saved yet (no `.id`
attribute), this method will automatically save it before
adding an enrollment for it.
`course_id_partial` (CourseKey) is missing the run component
"""
assert isinstance(course_id_partial, CourseKey)
assert not course_id_partial.run # None or empty string
course_key = SlashSeparatedCourseKey(course_id_partial.org, course_id_partial.course, '')
querystring = unicode(course_key.to_deprecated_string())
try:
return CourseEnrollment.objects.filter(
user=user,
course_id__startswith=querystring,
is_active=1
).exists()
except cls.DoesNotExist:
return False
@classmethod
def enrollment_mode_for_user(cls, user, course_id):
"""
Returns the enrollment mode for the given user for the given course
`user` is a Django User object
`course_id` is our usual course_id string (e.g. "edX/Test101/2013_Fall)
Returns (mode, is_active) where mode is the enrollment mode of the student
and is_active is whether the enrollment is active.
Returns (None, None) if the courseenrollment record does not exist.
"""
try:
record = CourseEnrollment.objects.get(user=user, course_id=course_id)
return (record.mode, record.is_active)
except cls.DoesNotExist:
return (None, None)
@classmethod
def enrollments_for_user(cls, user):
return CourseEnrollment.objects.filter(user=user, is_active=1)
def is_paid_course(self):
"""
Returns True, if course is paid
"""
paid_course = CourseMode.is_white_label(self.course_id)
if paid_course or CourseMode.is_professional_slug(self.mode):
return True
return False
def activate(self):
"""Makes this `CourseEnrollment` record active. Saves immediately."""
self.update_enrollment(is_active=True)
def deactivate(self):
"""Makes this `CourseEnrollment` record inactive. Saves immediately. An
inactive record means that the student is not enrolled in this course.
"""
self.update_enrollment(is_active=False)
def change_mode(self, mode):
"""Changes this `CourseEnrollment` record's mode to `mode`. Saves immediately."""
self.update_enrollment(mode=mode)
def refundable(self):
"""
For paid/verified certificates, students may receive a refund if they have
a verified certificate and the deadline for refunds has not yet passed.
"""
# In order to support manual refunds past the deadline, set can_refund on this object.
# On unenrolling, the "UNENROLL_DONE" signal calls CertificateItem.refund_cert_callback(),
# which calls this method to determine whether to refund the order.
# This can't be set directly because refunds currently happen as a side-effect of unenrolling.
# (side-effects are bad)
if getattr(self, 'can_refund', None) is not None:
return True
# If the student has already been given a certificate they should not be refunded
if GeneratedCertificate.certificate_for_student(self.user, self.course_id) is not None:
return False
#TODO - When Course administrators to define a refund period for paid courses then refundable will be supported. # pylint: disable=fixme
course_mode = CourseMode.mode_for_course(self.course_id, 'verified')
if course_mode is None:
return False
else:
return True
@property
def username(self):
return self.user.username
@property
def course(self):
return modulestore().get_course(self.course_id)
def is_verified_enrollment(self):
"""
Check the course enrollment mode is verified or not
"""
return CourseMode.is_verified_slug(self.mode)
class ManualEnrollmentAudit(models.Model):
"""
Table for tracking which enrollments were performed through manual enrollment.
"""
enrollment = models.ForeignKey(CourseEnrollment, null=True)
enrolled_by = models.ForeignKey(User, null=True)
enrolled_email = models.CharField(max_length=255, db_index=True)
time_stamp = models.DateTimeField(auto_now_add=True, null=True)
state_transition = models.CharField(max_length=255, choices=TRANSITION_STATES)
reason = models.TextField(null=True)
@classmethod
def create_manual_enrollment_audit(cls, user, email, state_transition, reason, enrollment=None):
"""
saves the student manual enrollment information
"""
cls.objects.create(
enrolled_by=user,
enrolled_email=email,
state_transition=state_transition,
reason=reason,
enrollment=enrollment
)
@classmethod
def get_manual_enrollment_by_email(cls, email):
"""
if matches returns the most recent entry in the table filtered by email else returns None.
"""
try:
manual_enrollment = cls.objects.filter(enrolled_email=email).latest('time_stamp')
except cls.DoesNotExist:
manual_enrollment = None
return manual_enrollment
@classmethod
def get_manual_enrollment(cls, enrollment):
"""
if matches returns the most recent entry in the table filtered by enrollment else returns None,
"""
try:
manual_enrollment = cls.objects.filter(enrollment=enrollment).latest('time_stamp')
except cls.DoesNotExist:
manual_enrollment = None
return manual_enrollment
class CourseEnrollmentAllowed(models.Model):
"""
Table of users (specified by email address strings) who are allowed to enroll in a specified course.
The user may or may not (yet) exist. Enrollment by users listed in this table is allowed
even if the enrollment time window is past.
"""
email = models.CharField(max_length=255, db_index=True)
course_id = CourseKeyField(max_length=255, db_index=True)
auto_enroll = models.BooleanField(default=0)
created = models.DateTimeField(auto_now_add=True, null=True, db_index=True)
class Meta: # pylint: disable=missing-docstring
unique_together = (('email', 'course_id'),)
def __unicode__(self):
return "[CourseEnrollmentAllowed] %s: %s (%s)" % (self.email, self.course_id, self.created)
@classmethod
def may_enroll_and_unenrolled(cls, course_id):
"""
Return QuerySet of students who are allowed to enroll in a course.
Result excludes students who have already enrolled in the
course.
`course_id` identifies the course for which to compute the QuerySet.
"""
enrolled = CourseEnrollment.objects.users_enrolled_in(course_id=course_id).values_list('email', flat=True)
return CourseEnrollmentAllowed.objects.filter(course_id=course_id).exclude(email__in=enrolled)
@total_ordering
class CourseAccessRole(models.Model):
"""
Maps users to org, courses, and roles. Used by student.roles.CourseRole and OrgRole.
To establish a user as having a specific role over all courses in the org, create an entry
without a course_id.
"""
objects = NoneToEmptyManager()
user = models.ForeignKey(User)
# blank org is for global group based roles such as course creator (may be deprecated)
org = models.CharField(max_length=64, db_index=True, blank=True)
# blank course_id implies org wide role
course_id = CourseKeyField(max_length=255, db_index=True, blank=True)
role = models.CharField(max_length=64, db_index=True)
class Meta: # pylint: disable=missing-docstring
unique_together = ('user', 'org', 'course_id', 'role')
@property
def _key(self):
"""
convenience function to make eq overrides easier and clearer. arbitrary decision
that role is primary, followed by org, course, and then user
"""
return (self.role, self.org, self.course_id, self.user_id)
def __eq__(self, other):
"""
Overriding eq b/c the django impl relies on the primary key which requires fetch. sometimes we
just want to compare roles w/o doing another fetch.
"""
return type(self) == type(other) and self._key == other._key # pylint: disable=protected-access
def __hash__(self):
return hash(self._key)
def __lt__(self, other):
"""
Lexigraphic sort
"""
return self._key < other._key # pylint: disable=protected-access
def __unicode__(self):
return "[CourseAccessRole] user: {} role: {} org: {} course: {}".format(self.user.username, self.role, self.org, self.course_id)
#### Helper methods for use from python manage.py shell and other classes.
def get_user_by_username_or_email(username_or_email):
"""
Return a User object, looking up by email if username_or_email contains a
'@', otherwise by username.
Raises:
User.DoesNotExist is lookup fails.
"""
if '@' in username_or_email:
return User.objects.get(email=username_or_email)
else:
return User.objects.get(username=username_or_email)
def get_user(email):
user = User.objects.get(email=email)
u_prof = UserProfile.objects.get(user=user)
return user, u_prof
def user_info(email):
user, u_prof = get_user(email)
print "User id", user.id
print "Username", user.username
print "E-mail", user.email
print "Name", u_prof.name
print "Location", u_prof.location
print "Language", u_prof.language
return user, u_prof
def change_email(old_email, new_email):
user = User.objects.get(email=old_email)
user.email = new_email
user.save()
def change_name(email, new_name):
_user, u_prof = get_user(email)
u_prof.name = new_name
u_prof.save()
def user_count():
print "All users", User.objects.all().count()
print "Active users", User.objects.filter(is_active=True).count()
return User.objects.all().count()
def active_user_count():
return User.objects.filter(is_active=True).count()
def create_group(name, description):
utg = UserTestGroup()
utg.name = name
utg.description = description
utg.save()
def add_user_to_group(user, group):
utg = UserTestGroup.objects.get(name=group)
utg.users.add(User.objects.get(username=user))
utg.save()
def remove_user_from_group(user, group):
utg = UserTestGroup.objects.get(name=group)
utg.users.remove(User.objects.get(username=user))
utg.save()
DEFAULT_GROUPS = {
'email_future_courses': 'Receive e-mails about future MITx courses',
'email_helpers': 'Receive e-mails about how to help with MITx',
'mitx_unenroll': 'Fully unenrolled -- no further communications',
'6002x_unenroll': 'Took and dropped 6002x'
}
def add_user_to_default_group(user, group):
try:
utg = UserTestGroup.objects.get(name=group)
except UserTestGroup.DoesNotExist:
utg = UserTestGroup()
utg.name = group
utg.description = DEFAULT_GROUPS[group]
utg.save()
utg.users.add(User.objects.get(username=user))
utg.save()
def create_comments_service_user(user):
if not settings.FEATURES['ENABLE_DISCUSSION_SERVICE']:
# Don't try--it won't work, and it will fill the logs with lots of errors
return
try:
cc_user = cc.User.from_django_user(user)
cc_user.save()
except Exception: # pylint: disable=broad-except
log = logging.getLogger("edx.discussion") # pylint: disable=redefined-outer-name
log.error(
"Could not create comments service user with id {}".format(user.id),
exc_info=True
)
# Define login and logout handlers here in the models file, instead of the views file,
# so that they are more likely to be loaded when a Studio user brings up the Studio admin
# page to login. These are currently the only signals available, so we need to continue
# identifying and logging failures separately (in views).
@receiver(user_logged_in)
def log_successful_login(sender, request, user, **kwargs): # pylint: disable=unused-argument
"""Handler to log when logins have occurred successfully."""
if settings.FEATURES['SQUELCH_PII_IN_LOGS']:
AUDIT_LOG.info(u"Login success - user.id: {0}".format(user.id))
else:
AUDIT_LOG.info(u"Login success - {0} ({1})".format(user.username, user.email))
@receiver(user_logged_out)
def log_successful_logout(sender, request, user, **kwargs): # pylint: disable=unused-argument
"""Handler to log when logouts have occurred successfully."""
if settings.FEATURES['SQUELCH_PII_IN_LOGS']:
AUDIT_LOG.info(u"Logout - user.id: {0}".format(request.user.id))
else:
AUDIT_LOG.info(u"Logout - {0}".format(request.user))
@receiver(user_logged_in)
@receiver(user_logged_out)
def enforce_single_login(sender, request, user, signal, **kwargs): # pylint: disable=unused-argument
"""
Sets the current session id in the user profile,
to prevent concurrent logins.
"""
if settings.FEATURES.get('PREVENT_CONCURRENT_LOGINS', False):
if signal == user_logged_in:
key = request.session.session_key
else:
key = None
if user:
user.profile.set_login_session(key)
class DashboardConfiguration(ConfigurationModel):
"""Dashboard Configuration settings.
Includes configuration options for the dashboard, which impact behavior and rendering for the application.
"""
recent_enrollment_time_delta = models.PositiveIntegerField(
default=0,
help_text="The number of seconds in which a new enrollment is considered 'recent'. "
"Used to display notifications."
)
@property
def recent_enrollment_seconds(self):
return self.recent_enrollment_time_delta
class LinkedInAddToProfileConfiguration(ConfigurationModel):
"""
LinkedIn Add to Profile Configuration
This configuration enables the "Add to Profile" LinkedIn
button on the student dashboard. The button appears when
users have a certificate available; when clicked,
users are sent to the LinkedIn site with a pre-filled
form allowing them to add the certificate to their
LinkedIn profile.
"""
MODE_TO_CERT_NAME = {
"honor": _(u"{platform_name} Honor Code Certificate for {course_name}"),
"verified": _(u"{platform_name} Verified Certificate for {course_name}"),
"professional": _(u"{platform_name} Professional Certificate for {course_name}"),
"no-id-professional": _(
u"{platform_name} Professional Certificate for {course_name}"
),
}
company_identifier = models.TextField(
help_text=_(
u"The company identifier for the LinkedIn Add-to-Profile button "
u"e.g 0_0dPSPyS070e0HsE9HNz_13_d11_"
)
)
# Deprecated
dashboard_tracking_code = models.TextField(default="", blank=True)
trk_partner_name = models.CharField(
max_length=10,
default="",
blank=True,
help_text=_(
u"Short identifier for the LinkedIn partner used in the tracking code. "
u"(Example: 'edx') "
u"If no value is provided, tracking codes will not be sent to LinkedIn."
)
)
def add_to_profile_url(self, course_key, course_name, cert_mode, cert_url, source="o", target="dashboard"):
"""Construct the URL for the "add to profile" button.
Arguments:
course_key (CourseKey): The identifier for the course.
course_name (unicode): The display name of the course.
cert_mode (str): The course mode of the user's certificate (e.g. "verified", "honor", "professional")
cert_url (str): The download URL for the certificate.
Keyword Arguments:
source (str): Either "o" (for onsite/UI), "e" (for emails), or "m" (for mobile)
target (str): An identifier for the occurrance of the button.
"""
params = OrderedDict([
('_ed', self.company_identifier),
('pfCertificationName', self._cert_name(course_name, cert_mode).encode('utf-8')),
('pfCertificationUrl', cert_url),
('source', source)
])
tracking_code = self._tracking_code(course_key, cert_mode, target)
if tracking_code is not None:
params['trk'] = tracking_code
return u'http://www.linkedin.com/profile/add?{params}'.format(
params=urlencode(params)
)
def _cert_name(self, course_name, cert_mode):
"""Name of the certification, for display on LinkedIn. """
return self.MODE_TO_CERT_NAME.get(
cert_mode,
_(u"{platform_name} Certificate for {course_name}")
).format(
platform_name=settings.PLATFORM_NAME,
course_name=course_name
)
def _tracking_code(self, course_key, cert_mode, target):
"""Create a tracking code for the button.
Tracking codes are used by LinkedIn to collect
analytics about certifications users are adding
to their profiles.
The tracking code format is:
&trk=[partner name]-[certificate type]-[date]-[target field]
In our case, we're sending:
&trk=edx-{COURSE ID}_{COURSE MODE}-{TARGET}
If no partner code is configured, then this will
return None, indicating that tracking codes are disabled.
Arguments:
course_key (CourseKey): The identifier for the course.
cert_mode (str): The enrollment mode for the course.
target (str): Identifier for where the button is located.
Returns:
unicode or None
"""
return (
u"{partner}-{course_key}_{cert_mode}-{target}".format(
partner=self.trk_partner_name,
course_key=unicode(course_key),
cert_mode=cert_mode,
target=target
)
if self.trk_partner_name else None
)
class EntranceExamConfiguration(models.Model):
"""
Represents a Student's entrance exam specific data for a single Course
"""
user = models.ForeignKey(User, db_index=True)
course_id = CourseKeyField(max_length=255, db_index=True)
created = models.DateTimeField(auto_now_add=True, null=True, db_index=True)
updated = models.DateTimeField(auto_now=True, db_index=True)
# if skip_entrance_exam is True, then student can skip entrance exam
# for the course
skip_entrance_exam = models.BooleanField(default=True)
class Meta(object):
"""
Meta class to make user and course_id unique in the table
"""
unique_together = (('user', 'course_id'), )
def __unicode__(self):
return "[EntranceExamConfiguration] %s: %s (%s) = %s" % (
self.user, self.course_id, self.created, self.skip_entrance_exam
)
@classmethod
def user_can_skip_entrance_exam(cls, user, course_key):
"""
Return True if given user can skip entrance exam for given course otherwise False.
"""
can_skip = False
if settings.FEATURES.get('ENTRANCE_EXAMS', False):
try:
record = EntranceExamConfiguration.objects.get(user=user, course_id=course_key)
can_skip = record.skip_entrance_exam
except EntranceExamConfiguration.DoesNotExist:
can_skip = False
return can_skip
class LanguageField(models.CharField):
"""Represents a language from the ISO 639-1 language set."""
def __init__(self, *args, **kwargs):
"""Creates a LanguageField.
Accepts all the same kwargs as a CharField, except for max_length and
choices. help_text defaults to a description of the ISO 639-1 set.
"""
kwargs.pop('max_length', None)
kwargs.pop('choices', None)
help_text = kwargs.pop(
'help_text',
_("The ISO 639-1 language code for this language."),
)
super(LanguageField, self).__init__(
max_length=16,
choices=settings.ALL_LANGUAGES,
help_text=help_text,
*args,
**kwargs
)
add_introspection_rules([], [r"^student\.models\.LanguageField"])
class LanguageProficiency(models.Model):
"""
Represents a user's language proficiency.
Note that we have not found a way to emit analytics change events by using signals directly on this
model or on UserProfile. Therefore if you are changing LanguageProficiency values, it is important
to go through the accounts API (AccountsView) defined in
/edx-platform/openedx/core/djangoapps/user_api/accounts/views.py or its associated api method
(update_account_settings) so that the events are emitted.
"""
class Meta:
unique_together = (('code', 'user_profile'),)
user_profile = models.ForeignKey(UserProfile, db_index=True, related_name='language_proficiencies')
code = models.CharField(
max_length=16,
blank=False,
choices=settings.ALL_LANGUAGES,
help_text=_("The ISO 639-1 language code for this language.")
)
|
shubhdev/openedx
|
common/djangoapps/student/models.py
|
Python
|
agpl-3.0
| 67,408
|
# -*- coding: utf-8 -*-
"""Tests for the proofreadpage module."""
#
# (C) Pywikibot team, 2015-2019
#
# Distributed under the terms of the MIT license.
#
from __future__ import absolute_import, division, unicode_literals
import difflib
import json
import pywikibot
from pywikibot.data import api
from pywikibot.proofreadpage import IndexPage, ProofreadPage
from pywikibot.tools import has_module
from tests import unittest_print
from tests.aspects import unittest, require_modules, TestCase
from tests.basepage_tests import (
BasePageMethodsTestBase,
BasePageLoadRevisionsCachingTestBase,
)
class TestProofreadPageInvalidSite(TestCase):
"""Test ProofreadPage class."""
family = 'wikipedia'
code = 'en'
cached = True
def test_invalid_site_source(self):
"""Test ProofreadPage from invalid Site as source."""
self.assertRaises(pywikibot.UnknownExtension,
ProofreadPage, self.site, 'title')
class TestBasePageMethodsProofreadPage(BasePageMethodsTestBase):
"""Test behavior of ProofreadPage methods inherited from BasePage."""
family = 'wikisource'
code = 'en'
def setUp(self):
"""Set up test case."""
self._page = ProofreadPage(
self.site, 'Page:Popular Science Monthly Volume 1.djvu/12')
super(TestBasePageMethodsProofreadPage, self).setUp()
def test_basepage_methods(self):
"""Test ProofreadPage methods inherited from superclass BasePage."""
self._test_invoke()
self._test_return_datatypes()
class TestLoadRevisionsCachingProofreadPage(
BasePageLoadRevisionsCachingTestBase):
"""Test site.loadrevisions() caching."""
family = 'wikisource'
code = 'en'
def setUp(self):
"""Set up test case."""
self._page = ProofreadPage(
self.site, 'Page:Popular Science Monthly Volume 1.djvu/12')
super(TestLoadRevisionsCachingProofreadPage, self).setUp()
def test_page_text(self):
"""Test site.loadrevisions() with Page.text."""
self._test_page_text()
class TestProofreadPageParseTitle(TestCase):
"""Test ProofreadPage._parse_title() function."""
cached = True
# Use sites to run parametrized tests.
sites = {
'1': {
'family': 'wikisource', 'code': 'en',
'title': 'Page:Test.djvu/12',
'tuple': ('Test.djvu', 'djvu', 12),
},
'2': {
'family': 'wikisource', 'code': 'en',
'title': 'Page:Test djvu/12',
'tuple': ('Test djvu', '', 12),
},
'3': {
'family': 'wikisource', 'code': 'en',
'title': 'Page:Test.jpg/12',
'tuple': ('Test.jpg', 'jpg', 12),
},
'4': {
'family': 'wikisource', 'code': 'en',
'title': 'Page:Test jpg/12',
'tuple': ('Test jpg', '', 12),
},
'5': {
'family': 'wikisource', 'code': 'en',
'title': 'Page:Test.jpg',
'tuple': ('Test.jpg', 'jpg', None),
},
'6': {
'family': 'wikisource', 'code': 'en',
'title': 'Page:Test jpg',
'tuple': ('Test jpg', '', None),
},
}
@classmethod
def setUpClass(cls):
"""Prepare get_page dataset for tests."""
super(TestProofreadPageParseTitle, cls).setUpClass()
def test_parse_title(self, key):
"""Test ProofreadPage_parse_title() function."""
data = self.sites[key]
title = data['title']
base, base_ext, num = data['tuple']
page = ProofreadPage(self.site, title)
self.assertEqual(page._base, base)
self.assertEqual(page._base_ext, base_ext)
self.assertEqual(page._num, num)
class TestProofreadPageValidSite(TestCase):
"""Test ProofreadPage class."""
family = 'wikisource'
code = 'en'
cached = True
valid = {
'title': 'Page:Popular Science Monthly Volume 1.djvu/12',
'index': 'Index:Popular Science Monthly Volume 1.djvu',
'ql': 4,
'user': 'T. Mazzei',
'header': "{{rh|2|''THE POPULAR SCIENCE MONTHLY.''}}",
'footer': '\n{{smallrefs}}',
'url_image': ('https://upload.wikimedia.org/wikipedia/commons/'
'thumb/a/ac/Popular_Science_Monthly_Volume_1.djvu/'
'page12-1024px-Popular_Science_Monthly_Volume_1.djvu'
'.jpg'),
}
valid_redlink = {
'title': 'Page:Pywikibot test page 3.jpg',
'url_image': ('https://upload.wikimedia.org/wikisource/en/3/37/'
'Pywikibot_test_page_3.jpg'),
}
existing_invalid = {
'title': 'Main Page',
}
existing_unlinked = {
'title': 'Page:Pywikibot unlinked test page',
}
not_existing_invalid = {
'title': 'User:cannot_exists',
'title1': 'User:Popular Science Monthly Volume 1.djvu/12'
}
div_in_footer = {
'title': 'Page:Pywikibot unlinked test page',
}
class_pagetext_fmt = {
True: ('<div class="pagetext">\n\n\n', '</div>'),
False: ('', ''),
}
fmt = ('<noinclude><pagequality level="1" user="{user}" />'
'{class_pagetext}</noinclude>'
'<noinclude>{references}{div_end}</noinclude>')
def test_valid_site_source(self):
"""Test ProofreadPage from valid Site as source."""
page = ProofreadPage(self.site, 'Page:dummy test page')
self.assertEqual(page.namespace(), self.site.proofread_page_ns)
def test_invalid_existing_page_source(self):
"""Test ProofreadPage from invalid existing Page as source."""
source = pywikibot.Page(self.site, self.existing_invalid['title'])
self.assertRaises(ValueError, ProofreadPage, source)
def test_invalid_not_existing_page_source(self):
"""Test ProofreadPage from invalid not existing Page as source."""
# namespace is forced
source = pywikibot.Page(self.site,
self.not_existing_invalid['title'])
fixed_source = pywikibot.Page(self.site,
source.title(with_ns=False),
ns=self.site.proofread_page_ns)
page = ProofreadPage(fixed_source)
self.assertEqual(page.title(), fixed_source.title())
def test_invalid_not_existing_page_source_wrong_ns(self):
"""Test ProofreadPage from Page not existing in non-Page ns."""
source = pywikibot.Page(self.site,
self.not_existing_invalid['title1'])
self.assertRaises(ValueError, ProofreadPage, source)
def test_invalid_link_source(self):
"""Test ProofreadPage from invalid Link as source."""
source = pywikibot.Link(self.not_existing_invalid['title'],
source=self.site)
self.assertRaises(ValueError, ProofreadPage, source)
def test_valid_link_source(self):
"""Test ProofreadPage from valid Link as source."""
source = pywikibot.Link(
self.valid['title'],
source=self.site,
default_namespace=self.site.proofread_page_ns)
page = ProofreadPage(source)
self.assertEqual(page.title(with_ns=False), source.title)
self.assertEqual(page.namespace(), source.namespace)
def test_valid_parsing(self):
"""Test ProofreadPage page parsing functions."""
page = ProofreadPage(self.site, self.valid['title'])
self.assertEqual(page.ql, self.valid['ql'])
self.assertEqual(page.user, self.valid['user'])
self.assertEqual(page.header, self.valid['header'])
self.assertEqual(page.footer, self.valid['footer'])
def test_div_in_footer(self):
"""Test ProofreadPage page parsing functions."""
page = ProofreadPage(self.site, self.div_in_footer['title'])
self.assertTrue(page.footer.endswith('</div>'))
def test_decompose_recompose_text(self):
"""Test ProofreadPage page decomposing/composing text."""
page = ProofreadPage(self.site, self.valid['title'])
plain_text = pywikibot.Page(self.site, self.valid['title']).text
assert page.text
self.assertEqual(plain_text, page.text)
def test_preload_from_not_existing_page(self):
"""Test ProofreadPage page decomposing/composing text."""
page = ProofreadPage(self.site, 'Page:dummy test page')
# Fetch page text to instantiate page._full_header, in order to allow
# for proper test result preparation.
page.text
class_pagetext, div = self.class_pagetext_fmt[
page._full_header._has_div]
self.assertEqual(page.text,
self.fmt.format(user=self.site.username(),
class_pagetext=class_pagetext,
references='<references/>',
div_end=div))
def test_preload_from_empty_text(self):
"""Test ProofreadPage page decomposing/composing text."""
page = ProofreadPage(self.site, 'Page:dummy test page')
page.text = ''
class_pagetext, div = self.class_pagetext_fmt[
page._full_header._has_div]
self.assertEqual(page.text,
self.fmt.format(user=self.site.username(),
class_pagetext=class_pagetext,
references='',
div_end=div))
def test_json_format(self):
"""Test conversion to json format."""
page = ProofreadPage(self.site, self.valid['title'])
rvargs = {'rvprop': 'ids|flags|timestamp|user|comment|content',
'rvcontentformat': 'application/json',
'titles': page,
}
rvgen = self.site._generator(api.PropertyGenerator,
type_arg='info|revisions',
total=1, **rvargs)
rvgen.set_maximum_items(-1) # suppress use of rvlimit parameter
try:
pagedict = next(iter(rvgen))
loaded_text = pagedict.get('revisions')[0].get('*')
except (StopIteration, TypeError, KeyError, ValueError, IndexError):
loaded_text = ''
page_text = page._page_to_json()
self.assertEqual(json.loads(page_text), json.loads(loaded_text))
@require_modules('bs4')
@unittest.skip('T181913 and T114318')
def test_url_image(self):
"""Test fetching of url image of the scan of ProofreadPage."""
page = ProofreadPage(self.site, self.valid['title'])
self.assertEqual(page.url_image, self.valid['url_image'])
page = ProofreadPage(self.site, self.valid_redlink['title'])
self.assertEqual(page.url_image, self.valid_redlink['url_image'])
page = ProofreadPage(self.site, self.existing_unlinked['title'])
# test Exception in property.
self.assertRaises(ValueError, getattr, page, 'url_image')
class TestPageQuality(TestCase):
"""Test page quality."""
family = 'wikisource'
code = 'en'
cached = True
def test_applicable_quality_level(self):
"""Test Page.quality_level when applicable."""
site = self.get_site()
title = 'Page:Popular Science Monthly Volume 49.djvu/1'
page = ProofreadPage(site, title)
self.assertEqual(page.content_model, 'proofread-page')
self.assertEqual(page.quality_level, 0)
class BS4TestCase(TestCase):
"""Run tests which needs bs4 beeing installed."""
@classmethod
def setUpClass(cls):
"""Check whether bs4 module is installed already."""
if not has_module('bs4'):
unittest_print(
'all tests ({module}.{name})\n{doc} ... '
.format(module=__name__, doc=cls.__doc__, name=cls.__name__),
end='')
# skipTest cannot be used with Python 2 for setUpClass
raise unittest.SkipTest('bs4 not installed')
super(BS4TestCase, cls).setUpClass()
class TestPageOCR(BS4TestCase):
"""Test page ocr functions."""
family = 'wikisource'
code = 'en'
cached = True
data = {'title': 'Page:Popular Science Monthly Volume 1.djvu/10',
'hocr': (False, 'ENTERED, according to Act of Congress, in the '
'year 1872,\nBY D. APPLETON & CO.,\nIn the Office '
'of the Librarian of Congress, at '
'Washington.\n\n'),
'ocr': (False, 'EsTEnen, according to Act of Congress, in the '
'year 1872,\nBy D. APPLETON & CO.,\nIn the '
'Office of the Librarian of Congress, at '
'Washington.\n\u000c'),
'googleOCR': (False, 'ENTERED, according to Act of Congress, in '
'the year 1572,\nBY D. APPLETON & CO.\n'
'In the Office of the Librarian of '
'Congress, at Washington.\n4 334\n'),
}
def setUp(self):
"""Test setUp."""
site = self.get_site()
title = self.data['title']
self.page = ProofreadPage(site, title)
super(TestPageOCR, self).setUp()
def test_ocr_exceptions(self):
"""Test page.ocr() exceptions."""
self.assertRaises(TypeError, self.page.ocr, ocr_tool='dummy')
def test_do_hocr(self):
"""Test page._do_hocr()."""
error, text = self.page._do_hocr()
if error:
self.skipTest(text)
ref_error, ref_text = self.data['hocr']
self.assertEqual(error, ref_error)
s = difflib.SequenceMatcher(None, text, ref_text)
self.assertGreater(s.ratio(), 0.9)
def test_do_ocr_phetools(self):
"""Test page._do_ocr(ocr_tool='phetools')."""
error, text = self.page._do_ocr(ocr_tool='phetools')
ref_error, ref_text = self.data['ocr']
if error:
self.skipTest(text)
self.assertEqual(error, ref_error)
s = difflib.SequenceMatcher(None, text, ref_text)
self.assertGreater(s.ratio(), 0.9)
def test_do_ocr_googleocr(self):
"""Test page._do_ocr(ocr_tool='googleOCR')."""
error, text = self.page._do_ocr(ocr_tool='googleOCR')
if error:
self.skipTest(text)
ref_error, ref_text = self.data['googleOCR']
self.assertEqual(error, ref_error)
s = difflib.SequenceMatcher(None, text, ref_text)
self.assertGreater(s.ratio(), 0.9)
def test_ocr_googleocr(self):
"""Test page.ocr(ocr_tool='googleOCR')."""
try:
text = self.page.ocr(ocr_tool='googleOCR')
except Exception as exc:
self.assertIsInstance(exc, ValueError)
else:
ref_error, ref_text = self.data['googleOCR']
s = difflib.SequenceMatcher(None, text, ref_text)
self.assertGreater(s.ratio(), 0.9)
class TestProofreadPageIndexProperty(BS4TestCase):
"""Test ProofreadPage index property."""
family = 'wikisource'
code = 'en'
cached = True
valid = {
'title': 'Page:Popular Science Monthly Volume 1.djvu/12',
'index': 'Index:Popular Science Monthly Volume 1.djvu',
}
existing_multilinked = {
'title': 'Page:Pywikibot test page.djvu/1',
'index_1': 'Index:Pywikibot test page.djvu',
'index_2': 'Index:Pywikibot test page 2',
}
existing_unlinked = {
'title': 'Page:Pywikibot unlinked test page',
}
def test_index(self):
"""Test index property."""
# Page with Index.
page = ProofreadPage(self.site, self.valid['title'])
index_page = IndexPage(self.site, self.valid['index'])
# Test property.
self.assertEqual(page.index, index_page)
# Test deleter
del page.index
self.assertFalse(hasattr(page, '_index'))
# Test setter with wrong type.
self.assertRaises(TypeError, setattr, page, 'index', 'invalid index')
# Test setter with correct type.
page.index = index_page
self.assertEqual(page.index, index_page)
# Page without Index.
page = ProofreadPage(self.site, self.existing_multilinked['title'])
index_page_1 = IndexPage(self.site,
self.existing_multilinked['index_1'])
index_page_2 = IndexPage(self.site,
self.existing_multilinked['index_2'])
self.assertEqual(page.index, index_page_1)
self.assertNotEqual(page.index, index_page_2)
self.assertEqual(page._index, (index_page_1, [index_page_2]))
# Page without Index.
page = ProofreadPage(self.site, self.existing_unlinked['title'])
self.assertIsNone(page.index)
self.assertEqual(page._index, (None, []))
class TestIndexPageInvalidSite(BS4TestCase):
"""Test IndexPage class."""
family = 'wikipedia'
code = 'en'
cached = True
def test_invalid_site_source(self):
"""Test IndexPage from invalid Site as source."""
self.assertRaises(pywikibot.UnknownExtension,
IndexPage, self.site, 'title')
class TestIndexPageValidSite(BS4TestCase):
"""Test IndexPage class."""
family = 'wikisource'
code = 'en'
cached = True
valid_index_title = 'Index:Popular Science Monthly Volume 1.djvu'
existing_invalid_title = 'Main Page'
not_existing_invalid_title = 'User:cannot_exists'
def test_valid_site_as_source(self):
"""Test IndexPage from valid Site as source."""
page = IndexPage(self.site, 'Index:dummy test page')
self.assertEqual(page.namespace(), self.site.proofread_index_ns)
def test_invalid_existing_page_as_source(self):
"""Test IndexPage from invalid existing Page as source."""
source = pywikibot.Page(self.site, self.existing_invalid_title)
self.assertRaises(ValueError, IndexPage, source)
def test_invalid_not_existing_page_as_source(self):
"""Test IndexPage from Page not existing in non-Page ns as source."""
source = pywikibot.Page(self.site,
self.not_existing_invalid_title)
self.assertRaises(ValueError, IndexPage, source)
def test_invalid_link_as_source(self):
"""Test IndexPage from invalid Link as source."""
source = pywikibot.Link(self.not_existing_invalid_title,
source=self.site)
self.assertRaises(ValueError, IndexPage, source)
def test_valid_link_as_source(self):
"""Test IndexPage from valid Link as source."""
source = pywikibot.Link(self.valid_index_title,
source=self.site,
default_namespace=self.site.proofread_page_ns)
page = IndexPage(source)
self.assertEqual(page.title(with_ns=False), source.title)
self.assertEqual(page.namespace(), source.namespace)
class TestBasePageMethodsIndexPage(BS4TestCase, BasePageMethodsTestBase):
"""Test behavior of ProofreadPage methods inherited from BasePage."""
family = 'wikisource'
code = 'en'
def setUp(self):
"""Set up test case."""
self._page = IndexPage(
self.site, 'Index:Popular Science Monthly Volume 1.djvu')
super(TestBasePageMethodsIndexPage, self).setUp()
def test_basepage_methods(self):
"""Test IndexPage methods inherited from superclass BasePage."""
self._test_invoke()
self._test_return_datatypes()
class TestLoadRevisionsCachingIndexPage(BS4TestCase,
BasePageLoadRevisionsCachingTestBase):
"""Test site.loadrevisions() caching."""
family = 'wikisource'
code = 'en'
def setUp(self):
"""Set up test case."""
self._page = IndexPage(
self.site, 'Index:Popular Science Monthly Volume 1.djvu')
super(TestLoadRevisionsCachingIndexPage, self).setUp()
def test_page_text(self):
"""Test site.loadrevisions() with Page.text."""
self._test_page_text()
@unittest.skip('T193637 and T114318')
class TestIndexPageMappings(BS4TestCase):
"""Test IndexPage class."""
sites = {
'enws': {
'family': 'wikisource',
'code': 'en',
'index': 'Index:Popular Science Monthly Volume 1.djvu',
'num_pages': 804,
'page': 'Page:Popular Science Monthly Volume 1.djvu/{0}',
'get_label': [11, 11, '1'],
'get_number': [[1, {11}],
['Cvr', {1, 9, 10, 804}],
],
# 'get_page' is filled in setUpClass.
},
'dews': { # dews does not use page convention name/number.
'family': 'wikisource',
'code': 'de',
'index': 'Index:Musen-Almanach für das Jahr 1799',
'num_pages': 272,
'page': 'Seite:Schiller_Musenalmanach_1799_{0:3d}.jpg',
'get_label': [120, 120, '120'], # page no, title no, label
'get_number': [[120, {120}],
],
# 'get_page' is filled in setUpClass.
},
'frws': {
'family': 'wikisource',
'code': 'fr',
'index': 'Index:Segard - Hymnes profanes, 1894.djvu',
'num_pages': 107,
'page': 'Page:Segard - Hymnes profanes, 1894.djvu/{0}',
'get_label': [11, 11, '8'],
'get_number': [[8, {11}],
['-', set(range(1, 4)) | set(range(101, 108))],
],
# 'get_page' is filled in setUpClass.
},
}
cached = True
@classmethod
def setUpClass(cls):
"""Prepare get_page dataset for tests."""
super(TestIndexPageMappings, cls).setUpClass()
for key, site_def in cls.sites.items():
site = cls.get_site(name=key)
base_title = site_def['page']
# 'get_page' has same structure as 'get_number'.
site_def['get_page'] = []
for label, page_numbers in site_def['get_number']:
page_set = {ProofreadPage(site, base_title.format(i))
for i in page_numbers}
site_def['get_page'].append([label, page_set])
def test_check_if_cached(self, key):
"""Test if cache is checked and loaded properly."""
data = self.sites[key]
index_page = IndexPage(self.site, self.sites[key]['index'])
num, title_num, label = data['get_label']
self.assertIs(index_page._cached, False)
fetched_label = index_page.get_label_from_page_number(num)
self.assertIs(index_page._cached, True)
self.assertEqual(label, fetched_label)
# Check if cache is refreshed.
index_page._labels_from_page_number[num] = 'wrong cached value'
self.assertEqual(index_page.get_label_from_page_number(num),
'wrong cached value')
index_page._cached = False
self.assertEqual(index_page.get_label_from_page_number(num), label)
def test_num_pages(self, key):
"""Test num_pages property."""
index_page = IndexPage(self.site, self.sites[key]['index'])
self.assertEqual(index_page.num_pages, self.sites[key]['num_pages'])
def test_get_labels(self, key):
"""Test IndexPage page get_label_from_* functions."""
data = self.sites[key]
num, title_num, label = data['get_label']
index_page = IndexPage(self.site, self.sites[key]['index'])
page_title = self.sites[key]['page'].format(title_num)
proofread_page = ProofreadPage(self.site, page_title)
# Get label from number.
self.assertEqual(index_page.get_label_from_page_number(num), label)
# Error if number does not exists.
self.assertRaises(KeyError, index_page.get_label_from_page_number, -1)
# Get label from page.
self.assertEqual(index_page.get_label_from_page(proofread_page), label)
# Error if page does not exists.
self.assertRaises(KeyError, index_page.get_label_from_page, None)
def test_get_page_and_number(self, key):
"""Test IndexPage page get_page_number functions."""
data = self.sites[key]
index_page = IndexPage(self.site, self.sites[key]['index'])
# Test get_page_numbers_from_label.
for label, num_set in data['get_number']:
# Get set of numbers from label with label as int or str.
self.assertEqual(index_page.get_page_number_from_label(label),
num_set)
self.assertEqual(index_page.get_page_number_from_label(str(label)),
num_set)
# Error if label does not exists.
label, num_set = 'dummy label', []
self.assertRaises(KeyError, index_page.get_page_number_from_label,
'dummy label')
# Test get_page_from_label.
for label, page_set in data['get_page']:
# Get set of pages from label with label as int or str.
self.assertEqual(index_page.get_page_from_label(label),
page_set)
self.assertEqual(index_page.get_page_from_label(str(label)),
page_set)
# Error if label does not exists.
self.assertRaises(KeyError, index_page.get_page_from_label,
'dummy label')
# Test get_page.
for n in num_set:
p = index_page.get_page(n)
self.assertEqual(index_page.get_number(p), n)
# Test get_number.
for p in page_set:
n = index_page.get_number(p)
self.assertEqual(index_page.get_page(n), p)
def test_page_gen(self, key):
"""Test Index page generator."""
data = self.sites[key]
num, title_num, label = data['get_label']
index_page = IndexPage(self.site, self.sites[key]['index'])
page_title = self.sites[key]['page'].format(title_num)
proofread_page = ProofreadPage(self.site, page_title)
# Check start/end limits.
self.assertRaises(ValueError, index_page.page_gen, -1, 2)
self.assertRaises(ValueError, index_page.page_gen, 1, -1)
self.assertRaises(ValueError, index_page.page_gen, 2, 1)
# Check quality filters.
gen = index_page.page_gen(num, num, filter_ql=range(5))
self.assertEqual(list(gen), [proofread_page])
gen = index_page.page_gen(num, num, filter_ql=[0])
self.assertEqual(list(gen), [])
class TestIndexPageMappingsRedlinks(BS4TestCase):
"""Test IndexPage mappings with redlinks."""
family = 'wikisource'
code = 'en'
cached = True
index_name = 'Index:Pywikibot test page.djvu'
page_names = ['Page:Pywikibot test page.djvu/1',
'Page:Pywikibot test page.djvu/2',
]
missing_name = 'Page:Pywikibot test page.djvu/2'
@classmethod
def setUpClass(cls):
"""Prepare tests by creating page instances."""
super(TestIndexPageMappingsRedlinks, cls).setUpClass()
cls.index = IndexPage(cls.site, cls.index_name)
cls.pages = [ProofreadPage(cls.site, page) for page in cls.page_names]
cls.missing = ProofreadPage(cls.site, cls.missing_name)
def test_index_redlink(self):
"""Test index property with redlink."""
self.assertEqual(self.missing.index, self.index)
def test_get_page_and_number_redlink(self):
"""Test IndexPage page get_page_number functions with redlinks."""
for page in self.pages:
n = self.index.get_number(page)
self.assertEqual(self.index.get_page(n), page)
def test_page_gen_redlink(self):
"""Test Index page generator with redlinks."""
# Check start/end limits.
self.assertRaises(ValueError, self.index.page_gen, -1, 2)
self.assertRaises(ValueError, self.index.page_gen, 1, -1)
self.assertRaises(ValueError, self.index.page_gen, 2, 1)
gen = self.index.page_gen(1, None, filter_ql=range(5))
self.assertEqual(list(gen), self.pages)
class TestIndexPageHasValidContent(BS4TestCase):
"""Unit tests for has_valid_content()."""
family = 'wikisource'
code = 'en'
index_name = 'Index:Phosphor (1888).djvu'
valid_template = '{{%s|foo=bar}}' % IndexPage.INDEX_TEMPLATE
other_template = '{{PoTM|bar=foobar}}'
@classmethod
def setUpClass(cls):
"""Prepare tests by creating an IndexPage instance."""
super(TestIndexPageHasValidContent, cls).setUpClass()
cls.index = IndexPage(cls.site, cls.index_name)
def test_has_valid_content_empty(self):
"""Test empty page is invalid."""
self.index.text = ''
self.assertFalse(self.index.has_valid_content())
def test_has_valid_content_non_template(self):
"""Test non-template is invalid."""
self.index.text = 'foobar'
self.assertFalse(self.index.has_valid_content())
def test_has_valid_content_valid(self):
"""Test correct Index template is valid."""
self.index.text = self.valid_template
self.assertTrue(self.index.has_valid_content())
def test_has_valid_content_prefixed(self):
"""Test prefixing Index template is invalid."""
self.index.text = 'pre {}'.format(self.valid_template)
self.assertFalse(self.index.has_valid_content())
def test_has_valid_content_postfixed(self):
"""Test postfixing Index template is invalid."""
self.index.text = '{}post'.format(self.valid_template)
self.assertFalse(self.index.has_valid_content())
def test_has_valid_content_pre_and_postfixed(self):
"""Test pre- and postfixing Index template is invalid."""
self.index.text = 'pre{}post'.format(self.valid_template)
self.assertFalse(self.index.has_valid_content())
def test_has_valid_content_second_template(self):
"""Test postfixing a second template is invalid."""
self.index.text = self.valid_template + self.other_template
self.assertFalse(self.index.has_valid_content())
def test_has_valid_content_wrong_template(self):
"""Test incorrect template is invalid."""
self.index.text = self.other_template
self.assertFalse(self.index.has_valid_content())
def test_has_valid_content_missnamed_template(self):
"""Test nested templates is valid."""
self.index.text = '{{%s_bar|foo=bar}}' % IndexPage.INDEX_TEMPLATE
self.assertFalse(self.index.has_valid_content())
def test_has_valid_content_nested_template(self):
"""Test nested templates is valid."""
self.index.text = ('{{%s|foo=%s}}'
% (IndexPage.INDEX_TEMPLATE, self.other_template))
self.assertTrue(self.index.has_valid_content())
def test_has_valid_content_multiple_valid(self):
"""Test multiple Index templates is invalid."""
self.index.text = self.valid_template * 2
self.assertFalse(self.index.has_valid_content())
if __name__ == '__main__': # pragma: no cover
try:
unittest.main()
except SystemExit:
pass
|
PersianWikipedia/pywikibot-core
|
tests/proofreadpage_tests.py
|
Python
|
mit
| 31,607
|
#!/usr/bin/env python
# encoding: utf-8
"""
The :class:`~IPython.core.application.Application` object for the command
line :command:`ipython` program.
"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
from __future__ import absolute_import
from __future__ import print_function
import logging
import os
import sys
from traitlets.config.loader import Config
from traitlets.config.application import boolean_flag, catch_config_error, Application
from IPython.core import release
from IPython.core import usage
from IPython.core.completer import IPCompleter
from IPython.core.crashhandler import CrashHandler
from IPython.core.formatters import PlainTextFormatter
from IPython.core.history import HistoryManager
from IPython.core.prompts import PromptManager
from IPython.core.application import (
ProfileDir, BaseIPythonApplication, base_flags, base_aliases
)
from IPython.core.magics import ScriptMagics
from IPython.core.shellapp import (
InteractiveShellApp, shell_flags, shell_aliases
)
from IPython.extensions.storemagic import StoreMagics
from IPython.terminal.interactiveshell import TerminalInteractiveShell
from IPython.utils import warn
from IPython.paths import get_ipython_dir
from traitlets import (
Bool, List, Dict,
)
#-----------------------------------------------------------------------------
# Globals, utilities and helpers
#-----------------------------------------------------------------------------
_examples = """
ipython --matplotlib # enable matplotlib integration
ipython --matplotlib=qt # enable matplotlib integration with qt4 backend
ipython --log-level=DEBUG # set logging to DEBUG
ipython --profile=foo # start with profile foo
ipython profile create foo # create profile foo w/ default config files
ipython help profile # show the help for the profile subcmd
ipython locate # print the path to the IPython directory
ipython locate profile foo # print the path to the directory for profile `foo`
"""
#-----------------------------------------------------------------------------
# Crash handler for this application
#-----------------------------------------------------------------------------
class IPAppCrashHandler(CrashHandler):
"""sys.excepthook for IPython itself, leaves a detailed report on disk."""
def __init__(self, app):
contact_name = release.author
contact_email = release.author_email
bug_tracker = 'https://github.com/ipython/ipython/issues'
super(IPAppCrashHandler,self).__init__(
app, contact_name, contact_email, bug_tracker
)
def make_report(self,traceback):
"""Return a string containing a crash report."""
sec_sep = self.section_sep
# Start with parent report
report = [super(IPAppCrashHandler, self).make_report(traceback)]
# Add interactive-specific info we may have
rpt_add = report.append
try:
rpt_add(sec_sep+"History of session input:")
for line in self.app.shell.user_ns['_ih']:
rpt_add(line)
rpt_add('\n*** Last line of input (may not be in above history):\n')
rpt_add(self.app.shell._last_input_line+'\n')
except:
pass
return ''.join(report)
#-----------------------------------------------------------------------------
# Aliases and Flags
#-----------------------------------------------------------------------------
flags = dict(base_flags)
flags.update(shell_flags)
frontend_flags = {}
addflag = lambda *args: frontend_flags.update(boolean_flag(*args))
addflag('autoedit-syntax', 'TerminalInteractiveShell.autoedit_syntax',
'Turn on auto editing of files with syntax errors.',
'Turn off auto editing of files with syntax errors.'
)
addflag('banner', 'TerminalIPythonApp.display_banner',
"Display a banner upon starting IPython.",
"Don't display a banner upon starting IPython."
)
addflag('confirm-exit', 'TerminalInteractiveShell.confirm_exit',
"""Set to confirm when you try to exit IPython with an EOF (Control-D
in Unix, Control-Z/Enter in Windows). By typing 'exit' or 'quit',
you can force a direct exit without any confirmation.""",
"Don't prompt the user when exiting."
)
addflag('term-title', 'TerminalInteractiveShell.term_title',
"Enable auto setting the terminal title.",
"Disable auto setting the terminal title."
)
classic_config = Config()
classic_config.InteractiveShell.cache_size = 0
classic_config.PlainTextFormatter.pprint = False
classic_config.PromptManager.in_template = '>>> '
classic_config.PromptManager.in2_template = '... '
classic_config.PromptManager.out_template = ''
classic_config.InteractiveShell.separate_in = ''
classic_config.InteractiveShell.separate_out = ''
classic_config.InteractiveShell.separate_out2 = ''
classic_config.InteractiveShell.colors = 'NoColor'
classic_config.InteractiveShell.xmode = 'Plain'
frontend_flags['classic']=(
classic_config,
"Gives IPython a similar feel to the classic Python prompt."
)
# # log doesn't make so much sense this way anymore
# paa('--log','-l',
# action='store_true', dest='InteractiveShell.logstart',
# help="Start logging to the default log file (./ipython_log.py).")
#
# # quick is harder to implement
frontend_flags['quick']=(
{'TerminalIPythonApp' : {'quick' : True}},
"Enable quick startup with no config files."
)
frontend_flags['i'] = (
{'TerminalIPythonApp' : {'force_interact' : True}},
"""If running code from the command line, become interactive afterwards.
It is often useful to follow this with `--` to treat remaining flags as
script arguments.
"""
)
flags.update(frontend_flags)
aliases = dict(base_aliases)
aliases.update(shell_aliases)
#-----------------------------------------------------------------------------
# Main classes and functions
#-----------------------------------------------------------------------------
class LocateIPythonApp(BaseIPythonApplication):
description = """print the path to the IPython dir"""
subcommands = Dict(dict(
profile=('IPython.core.profileapp.ProfileLocate',
"print the path to an IPython profile directory",
),
))
def start(self):
if self.subapp is not None:
return self.subapp.start()
else:
print(self.ipython_dir)
class TerminalIPythonApp(BaseIPythonApplication, InteractiveShellApp):
name = u'ipython'
description = usage.cl_usage
crash_handler_class = IPAppCrashHandler
examples = _examples
flags = Dict(flags)
aliases = Dict(aliases)
classes = List()
def _classes_default(self):
"""This has to be in a method, for TerminalIPythonApp to be available."""
return [
InteractiveShellApp, # ShellApp comes before TerminalApp, because
self.__class__, # it will also affect subclasses (e.g. QtConsole)
TerminalInteractiveShell,
PromptManager,
HistoryManager,
ProfileDir,
PlainTextFormatter,
IPCompleter,
ScriptMagics,
StoreMagics,
]
deprecated_subcommands = dict(
qtconsole=('qtconsole.qtconsoleapp.JupyterQtConsoleApp',
"""DEPRECATED, Will be removed in IPython 6.0 : Launch the Jupyter Qt Console."""
),
notebook=('notebook.notebookapp.NotebookApp',
"""DEPRECATED, Will be removed in IPython 6.0 : Launch the Jupyter HTML Notebook Server."""
),
console=('jupyter_console.app.ZMQTerminalIPythonApp',
"""DEPRECATED, Will be removed in IPython 6.0 : Launch the Jupyter terminal-based Console."""
),
nbconvert=('nbconvert.nbconvertapp.NbConvertApp',
"DEPRECATED, Will be removed in IPython 6.0 : Convert notebooks to/from other formats."
),
trust=('nbformat.sign.TrustNotebookApp',
"DEPRECATED, Will be removed in IPython 6.0 : Sign notebooks to trust their potentially unsafe contents at load."
),
kernelspec=('jupyter_client.kernelspecapp.KernelSpecApp',
"DEPRECATED, Will be removed in IPython 6.0 : Manage Jupyter kernel specifications."
),
)
subcommands = dict(
profile = ("IPython.core.profileapp.ProfileApp",
"Create and manage IPython profiles."
),
kernel = ("ipykernel.kernelapp.IPKernelApp",
"Start a kernel without an attached frontend."
),
locate=('IPython.terminal.ipapp.LocateIPythonApp',
LocateIPythonApp.description
),
history=('IPython.core.historyapp.HistoryApp',
"Manage the IPython history database."
),
)
deprecated_subcommands['install-nbextension'] = (
"notebook.nbextensions.InstallNBExtensionApp",
"DEPRECATED, Will be removed in IPython 6.0 : Install Jupyter notebook extension files"
)
subcommands.update(deprecated_subcommands)
# *do* autocreate requested profile, but don't create the config file.
auto_create=Bool(True)
# configurables
quick = Bool(False, config=True,
help="""Start IPython quickly by skipping the loading of config files."""
)
def _quick_changed(self, name, old, new):
if new:
self.load_config_file = lambda *a, **kw: None
display_banner = Bool(True, config=True,
help="Whether to display a banner upon starting IPython."
)
# if there is code of files to run from the cmd line, don't interact
# unless the --i flag (App.force_interact) is true.
force_interact = Bool(False, config=True,
help="""If a command or file is given via the command-line,
e.g. 'ipython foo.py', start an interactive shell after executing the
file or command."""
)
def _force_interact_changed(self, name, old, new):
if new:
self.interact = True
def _file_to_run_changed(self, name, old, new):
if new:
self.something_to_run = True
if new and not self.force_interact:
self.interact = False
_code_to_run_changed = _file_to_run_changed
_module_to_run_changed = _file_to_run_changed
# internal, not-configurable
something_to_run=Bool(False)
def parse_command_line(self, argv=None):
"""override to allow old '-pylab' flag with deprecation warning"""
argv = sys.argv[1:] if argv is None else argv
if '-pylab' in argv:
# deprecated `-pylab` given,
# warn and transform into current syntax
argv = argv[:] # copy, don't clobber
idx = argv.index('-pylab')
warn.warn("`-pylab` flag has been deprecated.\n"
" Use `--matplotlib <backend>` and import pylab manually.")
argv[idx] = '--pylab'
return super(TerminalIPythonApp, self).parse_command_line(argv)
@catch_config_error
def initialize(self, argv=None):
"""Do actions after construct, but before starting the app."""
super(TerminalIPythonApp, self).initialize(argv)
if self.subapp is not None:
# don't bother initializing further, starting subapp
return
# print self.extra_args
if self.extra_args and not self.something_to_run:
self.file_to_run = self.extra_args[0]
self.init_path()
# create the shell
self.init_shell()
# and draw the banner
self.init_banner()
# Now a variety of things that happen after the banner is printed.
self.init_gui_pylab()
self.init_extensions()
self.init_code()
def init_shell(self):
"""initialize the InteractiveShell instance"""
# Create an InteractiveShell instance.
# shell.display_banner should always be False for the terminal
# based app, because we call shell.show_banner() by hand below
# so the banner shows *before* all extension loading stuff.
self.shell = TerminalInteractiveShell.instance(parent=self,
display_banner=False, profile_dir=self.profile_dir,
ipython_dir=self.ipython_dir, user_ns=self.user_ns)
self.shell.configurables.append(self)
def init_banner(self):
"""optionally display the banner"""
if self.display_banner and self.interact:
self.shell.show_banner()
# Make sure there is a space below the banner.
if self.log_level <= logging.INFO: print()
def _pylab_changed(self, name, old, new):
"""Replace --pylab='inline' with --pylab='auto'"""
if new == 'inline':
warn.warn("'inline' not available as pylab backend, "
"using 'auto' instead.")
self.pylab = 'auto'
def start(self):
if self.subapp is not None:
return self.subapp.start()
# perform any prexec steps:
if self.interact:
self.log.debug("Starting IPython's mainloop...")
self.shell.mainloop()
else:
self.log.debug("IPython not interactive...")
def load_default_config(ipython_dir=None):
"""Load the default config file from the default ipython_dir.
This is useful for embedded shells.
"""
if ipython_dir is None:
ipython_dir = get_ipython_dir()
profile_dir = os.path.join(ipython_dir, 'profile_default')
config = Config()
for cf in Application._load_config_files("ipython_config", path=profile_dir):
config.update(cf)
return config
launch_new_instance = TerminalIPythonApp.launch_instance
if __name__ == '__main__':
launch_new_instance()
|
fzheng/codejam
|
lib/python2.7/site-packages/IPython/terminal/ipapp.py
|
Python
|
mit
| 13,800
|
# coding: utf-8
from .base import *
from .compound import *
from .primitive import *
|
aromanovich/jsl
|
jsl/fields/__init__.py
|
Python
|
bsd-3-clause
| 86
|
'''
Function to load a session created by Django
'''
from django.contrib.sessions.backends.db import SessionStore
def get_session(session_key):
'''
Loads and returns a session form the SessionStore based on the session key
'''
return SessionStore(session_key=session_key)
def is_valid_key(session_key):
'''
Detects if a session key is valid or not. If not the caller of this function can create a new one by calling session.flush().
'''
# Sometimes session_key is None when the Django website is called. Not
# clear why.
if session_key is None:
return False
# Rare case. For example if the database storing the sessions is reset
# while a user is on the website.
s = SessionStore(session_key=session_key)
s.save()
if s.session_key != session_key:
return False
return True
|
geometalab/geoconverter
|
OGRgeoConverter/geoconverter/sessionhandler.py
|
Python
|
mit
| 859
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# NetProfile: Setup and entry points
# Copyright © 2013-2017 Alex Unigovsky
#
# This file is part of NetProfile.
# NetProfile is free software: you can redistribute it and/or
# modify it under the terms of the GNU Affero General Public
# License as published by the Free Software Foundation, either
# version 3 of the License, or (at your option) any later
# version.
#
# NetProfile is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General
# Public License along with NetProfile. If not, see
# <http://www.gnu.org/licenses/>.
from __future__ import (unicode_literals, print_function,
absolute_import, division)
import sys
try:
import cdecimal
sys.modules['decimal'] = cdecimal
except ImportError:
pass
from six import PY3
from babel import Locale
from pyramid.config import Configurator
from pyramid.settings import asbool
from sqlalchemy import engine_from_config
from netprofile.common import cache
from netprofile.common.modules import IModuleManager
from netprofile.common.factory import RootFactory
from netprofile.db.connection import DBSession
from ._version import get_versions
if not PY3: # pragma: no cover
# Ugly hack to reset Python 2 default string encoding
# from ASCII to UTF-8.
reload(sys) # noqa: F821
sys.setdefaultencoding('utf-8')
__version__ = get_versions()['version']
del get_versions
inst_id = 'ru.netprofile'
inst_mm = None
def locale_neg(request):
avail = request.locales
loc = request.params.get('__locale')
if loc is None:
loc = request.session.get('ui.locale')
if loc is None and request.accept_language:
loc = Locale.negotiate(list(request.accept_language),
list(request.locales),
sep='-')
if loc:
loc = str(loc)
if loc is None:
loc = request.registry.settings.get('pyramid.default_locale_name',
'en')
if loc in avail:
request.session['ui.locale'] = loc
return loc
return 'en'
def get_debug(request):
return request.registry.settings.get('netprofile.debug', False)
def get_locales(request):
avail = request.registry.settings.get('pyramid.available_languages',
'').split()
return {loc: Locale.parse(loc) for loc in avail}
def get_current_locale(request):
if request.locale_name in request.locales:
return request.locales[request.locale_name]
def get_csrf(request):
if request.session:
csrf = request.session.get_csrf_token()
if isinstance(csrf, bytes):
csrf = csrf.decode()
return csrf
class VHostPredicate(object):
def __init__(self, val, config):
self.needed = val
self.current = config.registry.settings.get('netprofile.vhost')
def text(self):
return 'vhost = %s' % (self.needed,)
phash = text
def __call__(self, context, request):
if self.needed == 'MAIN':
return (self.current is None)
return self.needed == self.current
def setup_config(settings):
global inst_id
settings['netprofile.debug'] = asbool(settings.get('netprofile.debug'))
if 'netprofile.instance_id' in settings:
inst_id = settings.get('netprofile.instance_id')
engine = engine_from_config(settings, 'sqlalchemy.')
DBSession.configure(bind=engine)
cache.cache = cache.configure_cache(settings)
config = Configurator(settings=settings,
root_factory=RootFactory,
locale_negotiator=locale_neg)
config.add_route_predicate('vhost', VHostPredicate)
config.add_view_predicate('vhost', VHostPredicate)
config.include('netprofile.common.crypto')
return config
def main(global_config, **settings):
"""
Pyramid WSGI application for most of NetProfile vhosts.
"""
config = setup_config(settings)
config.add_subscriber(
'netprofile.common.subscribers.add_renderer_globals',
'pyramid.events.BeforeRender')
config.add_subscriber(
'netprofile.common.subscribers.on_new_request',
'pyramid.events.ContextFound')
config.add_subscriber(
'netprofile.common.subscribers.on_response',
'pyramid.events.NewResponse')
config.add_request_method(get_locales, str('locales'), reify=True)
config.add_request_method(get_current_locale,
str('current_locale'), reify=True)
config.add_request_method(get_debug, str('debug_enabled'), reify=True)
config.add_request_method(get_csrf, str('get_csrf'))
mmgr = config.registry.getUtility(IModuleManager)
mmgr.load('core')
mmgr.load_enabled()
return config.make_wsgi_app()
|
unikmhz/npui
|
netprofile/netprofile/__init__.py
|
Python
|
agpl-3.0
| 5,064
|
from swtk.processors import *
class PassiveVoiceProcessor(Plugin):
help = 'Finds sentences in passive voice.'
run_priority = 160
dictionary = ['is', 'are', 'were', 'was', 'been', 'be']
def __init__(self, dictionary_filename='./data/frequent_words.pickle'):
self.counter = 0
def process_sentence(self, sentence):
indices = [i for i in range(len(sentence.tokens)-1) if sentence.tokens[i].word.lower() in self.dictionary and sentence.tokens[i+1].pos_tag == 'VBN']
if len(indices) > 0:
sentence.reports.append('_passiveVoice')
self.counter += 1
for index in indices:
sentence.tokens[index].reports.append('_passiveVerb')
sentence.tokens[index+1].reports.append('_passiveVerb')
def finalize(self, paper):
sentence_count = paper.stats['sentences']
percent_info = ' ({:.1f}%)'.format(100*float(self.counter)/sentence_count) if sentence_count is not None and sentence_count > 0 else ''
summary = '1 sentence' if self.counter == 1 else '{} sentences{}'.format(self.counter, percent_info)
report = Report('Passive voice sentences', None, self.help, summary)
report.css_classes = [CSS('passiveVoice', 'D0DEFF'), CSS('passiveVerb', 'ECBFEC')]
paper.reports.append(report)
paper.stats['passive voice sentences'] = self.counter
|
pkorus/swtk
|
plugins/passive_voice.py
|
Python
|
mit
| 1,379
|
def main(request, response):
return ([("Content-Type", "text/html")],
"FROM_NETWORK")
|
UK992/servo
|
tests/wpt/web-platform-tests/clear-site-data/support/controlled-endpoint.py
|
Python
|
mpl-2.0
| 101
|
"""
pygments.lexers.stata
~~~~~~~~~~~~~~~~~~~~~
Lexer for Stata
:copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, default, include, words
from pygments.token import Comment, Keyword, Name, Number, \
String, Text, Operator
from pygments.lexers._stata_builtins import builtins_base, builtins_functions
__all__ = ['StataLexer']
class StataLexer(RegexLexer):
"""
For `Stata <http://www.stata.com/>`_ do files.
.. versionadded:: 2.2
"""
# Syntax based on
# - http://fmwww.bc.edu/RePEc/bocode/s/synlightlist.ado
# - https://github.com/isagalaev/highlight.js/blob/master/src/languages/stata.js
# - https://github.com/jpitblado/vim-stata/blob/master/syntax/stata.vim
name = 'Stata'
aliases = ['stata', 'do']
filenames = ['*.do', '*.ado']
mimetypes = ['text/x-stata', 'text/stata', 'application/x-stata']
flags = re.MULTILINE | re.DOTALL
tokens = {
'root': [
include('comments'),
include('strings'),
include('macros'),
include('numbers'),
include('keywords'),
include('operators'),
include('format'),
(r'.', Text),
],
# Comments are a complicated beast in Stata because they can be
# nested and there are a few corner cases with that. See:
# - github.com/kylebarron/language-stata/issues/90
# - statalist.org/forums/forum/general-stata-discussion/general/1448244
'comments': [
(r'(^//|(?<=\s)//)(?!/)', Comment.Single, 'comments-double-slash'),
(r'^\s*\*', Comment.Single, 'comments-star'),
(r'/\*', Comment.Multiline, 'comments-block'),
(r'(^///|(?<=\s)///)', Comment.Special, 'comments-triple-slash')
],
'comments-block': [
(r'/\*', Comment.Multiline, '#push'),
# this ends and restarts a comment block. but need to catch this so
# that it doesn\'t start _another_ level of comment blocks
(r'\*/\*', Comment.Multiline),
(r'(\*/\s+\*(?!/)[^\n]*)|(\*/)', Comment.Multiline, '#pop'),
# Match anything else as a character inside the comment
(r'.', Comment.Multiline),
],
'comments-star': [
(r'///.*?\n', Comment.Single,
('#pop', 'comments-triple-slash')),
(r'(^//|(?<=\s)//)(?!/)', Comment.Single,
('#pop', 'comments-double-slash')),
(r'/\*', Comment.Multiline, 'comments-block'),
(r'.(?=\n)', Comment.Single, '#pop'),
(r'.', Comment.Single),
],
'comments-triple-slash': [
(r'\n', Comment.Special, '#pop'),
# A // breaks out of a comment for the rest of the line
(r'//.*?(?=\n)', Comment.Single, '#pop'),
(r'.', Comment.Special),
],
'comments-double-slash': [
(r'\n', Text, '#pop'),
(r'.', Comment.Single),
],
# `"compound string"' and regular "string"; note the former are
# nested.
'strings': [
(r'`"', String, 'string-compound'),
(r'(?<!`)"', String, 'string-regular'),
],
'string-compound': [
(r'`"', String, '#push'),
(r'"\'', String, '#pop'),
(r'\\\\|\\"|\\\$|\\`|\\\n', String.Escape),
include('macros'),
(r'.', String)
],
'string-regular': [
(r'(")(?!\')|(?=\n)', String, '#pop'),
(r'\\\\|\\"|\\\$|\\`|\\\n', String.Escape),
include('macros'),
(r'.', String)
],
# A local is usually
# `\w{0,31}'
# `:extended macro'
# `=expression'
# `[rsen](results)'
# `(++--)scalar(++--)'
#
# However, there are all sorts of weird rules wrt edge
# cases. Instead of writing 27 exceptions, anything inside
# `' is a local.
#
# A global is more restricted, so we do follow rules. Note only
# locals explicitly enclosed ${} can be nested.
'macros': [
(r'\$(\{|(?=[$`]))', Name.Variable.Global, 'macro-global-nested'),
(r'\$', Name.Variable.Global, 'macro-global-name'),
(r'`', Name.Variable, 'macro-local'),
],
'macro-local': [
(r'`', Name.Variable, '#push'),
(r"'", Name.Variable, '#pop'),
(r'\$(\{|(?=[$`]))', Name.Variable.Global, 'macro-global-nested'),
(r'\$', Name.Variable.Global, 'macro-global-name'),
(r'.', Name.Variable), # fallback
],
'macro-global-nested': [
(r'\$(\{|(?=[$`]))', Name.Variable.Global, '#push'),
(r'\}', Name.Variable.Global, '#pop'),
(r'\$', Name.Variable.Global, 'macro-global-name'),
(r'`', Name.Variable, 'macro-local'),
(r'\w', Name.Variable.Global), # fallback
default('#pop'),
],
'macro-global-name': [
(r'\$(\{|(?=[$`]))', Name.Variable.Global, 'macro-global-nested', '#pop'),
(r'\$', Name.Variable.Global, 'macro-global-name', '#pop'),
(r'`', Name.Variable, 'macro-local', '#pop'),
(r'\w{1,32}', Name.Variable.Global, '#pop'),
],
# Built in functions and statements
'keywords': [
(words(builtins_functions, prefix = r'\b', suffix = r'(?=\()'),
Name.Function),
(words(builtins_base, prefix = r'(^\s*|\s)', suffix = r'\b'),
Keyword),
],
# http://www.stata.com/help.cgi?operators
'operators': [
(r'-|==|<=|>=|<|>|&|!=', Operator),
(r'\*|\+|\^|/|!|~|==|~=', Operator)
],
# Stata numbers
'numbers': [
# decimal number
(r'\b[+-]?([0-9]+(\.[0-9]+)?|\.[0-9]+|\.)([eE][+-]?[0-9]+)?[i]?\b',
Number),
],
# Stata formats
'format': [
(r'%-?\d{1,2}(\.\d{1,2})?[gfe]c?', Name.Other),
(r'%(21x|16H|16L|8H|8L)', Name.Other),
(r'%-?(tc|tC|td|tw|tm|tq|th|ty|tg)\S{0,32}', Name.Other),
(r'%[-~]?\d{1,4}s', Name.Other),
]
}
|
sonntagsgesicht/regtest
|
.aux/venv/lib/python3.9/site-packages/pygments/lexers/stata.py
|
Python
|
apache-2.0
| 6,414
|
# DicomDiff.py
"""Show the difference between two dicom files.
"""
# Copyright (c) 2008-2012 Darcy Mason
# This file is part of pydicom, relased under an MIT license.
# See the file license.txt included with this distribution, also
# available at http://pydicom.googlecode.com
from __future__ import print_function
usage = """
Usage:
python DicomDiff.py file1 file2
Results printed in python difflib form - indicated by start of each line:
' ' blank means lines the same
'-' means in file1 but "removed" in file2
'+' means not in file1, but "added" in file2
('?' lines from difflib removed - no use here)
"""
import sys
import dicom
import difflib
# only used as a script
if len(sys.argv) != 3:
print(usage)
sys.exit()
datasets = dicom.read_file(sys.argv[1]), \
dicom.read_file(sys.argv[2])
# diflib compare functions require a list of lines, each terminated with newline character
# massage the string representation of each dicom dataset into this form:
rep = []
for dataset in datasets:
lines = str(dataset).split("\n")
lines = [line + "\n" for line in lines] # add the newline to end
rep.append(lines)
diff = difflib.Differ()
for line in diff.compare(rep[0], rep[1]):
if line[0] != "?":
print(line)
|
njvack/ge-mri-rtafni
|
upload-host/vendor/dicom/examples/DicomDiff.py
|
Python
|
mit
| 1,262
|
#!/usr/bin/python
# Util file to import in all of the notebooks to allow for easy code re-use
# Calculate Percent of Attendees that did not speak
def percent_silent(df):
total = len(df)
silent = 0
for row in df.iteritems():
if row[1] == 0:
silent = silent + 1
percent = {}
percent['TOTAL'] = total
percent['SILENT'] = silent
percent['VERBOSE'] = total - silent
return percent
# Calculate Percent of Attendees that left
def percent_left(df):
total = len(df)
left = 0
for row in df.iteritems():
if row[1] == 0:
left = left + 1
percent = {}
percent['TOTAL'] = total
percent['LEFT'] = left
percent['STAYED'] = total - left
return percent
# Calculate Percent of Attendees along gender
def percent_gender(df):
total = len(df)
female = 0
for row in df.iteritems():
if row[1] == 1:
female = female + 1
percent = {}
percent['TOTAL'] = total
percent['FEMALE'] = female
percent['MALE'] = total - female
return percent
# Calculate Percent of Talking points by
def percent_talking_gender(df):
total = 0
male = 0
female = 0
for talks, gender in df.itertuples(index=False):
if talks > 0:
total = total + 1
if gender == 0:
male = male + 1
elif gender == 1:
female = female + 1
percent = {}
percent['TOTAL'] = total
percent['FEMALE'] = female
percent['MALE'] = male
return percent
|
JesseScott/PolyglotVancouver-Analysis
|
util.py
|
Python
|
gpl-3.0
| 1,542
|
from django.contrib.staticfiles.storage import staticfiles_storage
from django.core.urlresolvers import reverse
from jinja2 import Environment
def environment(**options):
env = Environment(**options)
env.globals.update({
'static': staticfiles_storage.url,
'url': reverse
})
return env
|
k3nsuk3/webtex
|
webtex/jinja2.py
|
Python
|
gpl-3.0
| 320
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# ===============================================================
#
# Filename: report.py
#
# Author: zhangpan
# Email: yunxinyi@gmail.com
# Created: 2015-12-23 13:53:10 CST
# Last-update: 2015-12-23 13:53:10 CST
# Description: ANCHOR
#
# Version: 0.0.1
# Revision: [None]
# Revision history: [None]
# Date Author Remarks: [None]
#
# License:
# Copyright (c) 2015 zhangpan. All rights reserved.
#
# ===============================================================
#
from __future__ import print_function
import os
import re
import sys
import time
import operator
import functools
import FastInt
import numpy as np
import multiprocessing as mp
def timefunc(func):
'''benchmark func'''
@functools.wraps(func)
def decorator(*args, **kwargs):
ts0 = time.clock()
tm0 = time.time()
result = func(*args, **kwargs)
ts1 = time.clock()
tm1 = time.time()
tsdiff = ts1 - ts0
tmdiff = tm1 - tm0
print('time cost of {2}: real: {0}, cpu: {1}'.format(
tmdiff, tsdiff, func.func_name))
return result
return decorator
#NOTICE: 12-21 00:00:00: imas * 26706 [ÄÚÒµê] qry=ÄÚÒµê ip=223.98.252.57 rt_ip=10.128.205.21 pn=0 tn= pre=0 s=327126f4b415f974 bd=AD4890C25E3E24A41E13CF3FE2BACB44 cuid=E382D676F6FFB6674CAC94DF6C8DECD9|789195320493468 imei= idm=(2,2,0) src=915, fn=cnkang_cpr tm=(128|11447|16998|8770|0|0|0|0|0|0|0|0|0|0|0|0|15728|0|179|16421|0|0|0|0|0|0|0|0|0|0|0|0|0|0|0|0|0|0|0|9307|0|0|0|0|0|0|0|0|0|0|0|0) tt=79203 sp=(51985,8388608) inner=1 ft=37 ppq=0 csfn=(6|9) bsrn=(47|47,) amrn=47 qspn=2 ws=1 di=1(47/47|47) qs_kpi=(0|2|0) it=(0|15480|11339|16111|0|0|15152|0|0|7909|0|0|0|0|0) mem=(5)([18168263][0][0][0][0] 32M[0]) pack=333414 tmo=0 qs_cmd_time=() extra_log=fetch:9,before_write:69852,fengsui:1,all_except_conn:79216,
class Task(mp.Process):
pattern = re.compile(r'tm=\(([^)]+)\) tt=(\d+).*qspn=(\d+)')
def __init__(self, queue, lines):
super(Task, self).__init__()
self._queue = queue
self._lines = lines
def run(self):
PAT = Task.pattern
ST = [0] * 54
NR = 0
NE = 0
NL = len(self._lines)
addis = FastInt.add
stderr = sys.stderr
match = PAT.search
for line in self._lines:
try:
m = match(line)
#m = PAT.search(line)
#FastInt.add(self._CNT, '|'.join(m.groups()))
#ST = map(addis, ST, ('|'.join(m.groups())).split('|'))
FastInt.addArray(ST, '|'.join(m.groups()))
#ST = map(addis, ST, '|'.join(m.groups()).split('|'))
NR += 1
except Exception as e:
print('*** malformed line {0}'.format(e), file=stderr)
NE += 1
self._queue.put([NR] + ST)
@timefunc
def parse(lines, nproc):
nline = len(lines)
step = (nline + nproc - 1)/nproc
queue = mp.Queue()
taskq = [Task(queue, lines[i:i+step]) for i in range(0, nline, step)]
for task in taskq:
task.start()
print('+ {0}'.format(task))
for task in reversed(taskq):
task.join()
print('- {0}'.format(task))
statq = [queue.get() for task in taskq]
stat = reduce(operator.add, map(np.array, statq))
NR = stat[0]
stat = np.array(stat[1:])/float(NR)
print('report'.center(80, '-'))
print(stat)
print(''.center(80, '='))
from getopt import (getopt, GetoptError)
def help(name):
print('Usage: {0} <logfile>'.format(name))
if __name__ == '__main__':
try:
opts, args = getopt(sys.argv[1:], 'hn:', ['help', 'nproc'])
except GetoptError as e:
print(e, file=sys.stderr)
sys.exit(1)
nproc = mp.cpu_count()
for opt, arg in opts:
if opt in ('-h', '--help'):
help(sys.argv[0])
sys.exit(0)
elif opt in ('-n', '--nproc'):
nproc = int(arg)
else:
print('invalid opt: {0}'.format(opt), file=sys.stderr)
sys.exit(1)
nargs = len(args)
if nargs == 0 or nargs > 1:
help(sys.argv[0])
sys.exit(1)
fpath = args[0]
with open(fpath) as f:
parse(f.readlines(), nproc)
|
oxnz/work-stuff
|
logparse/proc.py
|
Python
|
mit
| 4,272
|
from datetime import datetime
from pyramid.threadlocal import get_current_registry
from pytz import timezone
from sqlalchemy import event
from sqlalchemy import sql
from sqlalchemy import orm
from amnesia.modules.content import Content
from amnesia.modules.account import Account
from amnesia.modules.state import State
from amnesia.modules.content_type import ContentType
from amnesia.modules.tag import Tag
from amnesia.modules.folder import Folder
from amnesia.db import mapper_registry
@event.listens_for(Content, 'before_update', propagate=True)
def updated_listener(mapper, connection, target):
registry = get_current_registry()
if registry and registry.settings:
tz = registry.settings.get('timezone', 'UTC')
else:
tz = 'UTC'
target.updated = datetime.now(timezone(tz))
PGSQL_FTS_WEIGHTS = frozenset(('a', 'b', 'c', 'd'))
PGSQL_FTS_DEFAULT_WEIGHT = 'd'
@event.listens_for(Content, 'before_update', propagate=True)
@event.listens_for(Content, 'before_insert', propagate=True)
def update_fts_listener(mapper, connection, target):
""" Set the 'fts' column (full text search) """
fts = None
if target.is_fts:
# Check which columns should be indexed
for i in getattr(target.__class__, '_FTS_', ()):
(field, weight) = i
if weight.lower() not in PGSQL_FTS_WEIGHTS:
weight = PGSQL_FTS_DEFAULT_WEIGHT
_fts = sql.func.coalesce(getattr(target, field, ''), '')
_fts = sql.func.to_tsvector(_fts)
_fts = sql.func.setweight(_fts, weight)
fts = _fts if fts is None else fts.op('||')(_fts)
target.fts = fts
def includeme(config):
tables = config.registry['metadata'].tables
config.include('amnesia.modules.account.mapper')
config.include('amnesia.modules.state.mapper')
config.include('amnesia.modules.content_type.mapper')
config.include('amnesia.modules.tag.mapper')
config.include('amnesia.modules.language.mapper')
_count_alias = tables['content'].alias('_count_children')
mapper_registry.map_imperatively(
Content, tables['content'],
polymorphic_on=tables['content'].c.content_type_id,
properties={
#################
# RELATIONSHIPS #
#################
'owner': orm.relationship(
Account,
lazy='joined',
innerjoin=True,
backref=orm.backref('contents', lazy='dynamic',
cascade='all, delete-orphan')
),
'state': orm.relationship(
State,
lazy='joined',
innerjoin=True
),
'type': orm.relationship(
ContentType,
lazy='joined',
innerjoin=True
),
'tags': orm.relationship(
Tag,
secondary=tables['content_tag'],
back_populates='contents'
),
'parent': orm.relationship(
lambda: Folder,
foreign_keys=tables['content'].c.container_id,
innerjoin=True,
uselist=False,
backref=orm.backref('children', cascade='all, delete-orphan')
),
#####################
# COLUMN PROPERTIES #
#####################
'position_in_container': orm.column_property(
sql.func.row_number().over(
partition_by=tables['content'].c.container_id,
order_by=tables['content'].c.weight.desc()
),
deferred=True,
group='window_func'
),
'count_children': orm.column_property(
sql.select(
sql.func.count('*')
).where(
_count_alias.c.container_id == tables['content'].c.id
).correlate_except(
_count_alias
).scalar_subquery(),
deferred=True
)
})
|
silenius/amnesia
|
amnesia/modules/content/mapper.py
|
Python
|
bsd-2-clause
| 4,150
|
import unittest
import warnings
from unittest import mock
from django.db import DatabaseError, connection
from django.test import TestCase
@unittest.skipUnless(connection.vendor == 'postgresql', 'PostgreSQL tests')
class Tests(TestCase):
def test_nodb_connection(self):
"""
The _nodb_connection property fallbacks to the default connection
database when access to the 'postgres' database is not granted.
"""
def mocked_connect(self):
if self.settings_dict['NAME'] is None:
raise DatabaseError()
return ''
nodb_conn = connection._nodb_connection
self.assertIsNone(nodb_conn.settings_dict['NAME'])
# Now assume the 'postgres' db isn't available
with warnings.catch_warnings(record=True) as w:
with mock.patch('django.db.backends.base.base.BaseDatabaseWrapper.connect',
side_effect=mocked_connect, autospec=True):
warnings.simplefilter('always', RuntimeWarning)
nodb_conn = connection._nodb_connection
self.assertIsNotNone(nodb_conn.settings_dict['NAME'])
self.assertEqual(nodb_conn.settings_dict['NAME'], connection.settings_dict['NAME'])
# Check a RuntimeWarning has been emitted
self.assertEqual(len(w), 1)
self.assertEqual(w[0].message.__class__, RuntimeWarning)
def test_connect_and_rollback(self):
"""
PostgreSQL shouldn't roll back SET TIME ZONE, even if the first
transaction is rolled back (#17062).
"""
new_connection = connection.copy()
try:
# Ensure the database default time zone is different than
# the time zone in new_connection.settings_dict. We can
# get the default time zone by reset & show.
with new_connection.cursor() as cursor:
cursor.execute("RESET TIMEZONE")
cursor.execute("SHOW TIMEZONE")
db_default_tz = cursor.fetchone()[0]
new_tz = 'Europe/Paris' if db_default_tz == 'UTC' else 'UTC'
new_connection.close()
# Invalidate timezone name cache, because the setting_changed
# handler cannot know about new_connection.
del new_connection.timezone_name
# Fetch a new connection with the new_tz as default
# time zone, run a query and rollback.
with self.settings(TIME_ZONE=new_tz):
new_connection.set_autocommit(False)
new_connection.rollback()
# Now let's see if the rollback rolled back the SET TIME ZONE.
with new_connection.cursor() as cursor:
cursor.execute("SHOW TIMEZONE")
tz = cursor.fetchone()[0]
self.assertEqual(new_tz, tz)
finally:
new_connection.close()
def test_connect_non_autocommit(self):
"""
The connection wrapper shouldn't believe that autocommit is enabled
after setting the time zone when AUTOCOMMIT is False (#21452).
"""
new_connection = connection.copy()
new_connection.settings_dict['AUTOCOMMIT'] = False
try:
# Open a database connection.
new_connection.cursor()
self.assertFalse(new_connection.get_autocommit())
finally:
new_connection.close()
def test_connect_isolation_level(self):
"""
The transaction level can be configured with
DATABASES ['OPTIONS']['isolation_level'].
"""
import psycopg2
from psycopg2.extensions import (
ISOLATION_LEVEL_READ_COMMITTED as read_committed,
ISOLATION_LEVEL_SERIALIZABLE as serializable,
)
# Since this is a django.test.TestCase, a transaction is in progress
# and the isolation level isn't reported as 0. This test assumes that
# PostgreSQL is configured with the default isolation level.
# Check the level on the psycopg2 connection, not the Django wrapper.
default_level = read_committed if psycopg2.__version__ < '2.7' else None
self.assertEqual(connection.connection.isolation_level, default_level)
new_connection = connection.copy()
new_connection.settings_dict['OPTIONS']['isolation_level'] = serializable
try:
# Start a transaction so the isolation level isn't reported as 0.
new_connection.set_autocommit(False)
# Check the level on the psycopg2 connection, not the Django wrapper.
self.assertEqual(new_connection.connection.isolation_level, serializable)
finally:
new_connection.close()
def _select(self, val):
with connection.cursor() as cursor:
cursor.execute('SELECT %s', (val,))
return cursor.fetchone()[0]
def test_select_ascii_array(self):
a = ['awef']
b = self._select(a)
self.assertEqual(a[0], b[0])
def test_select_unicode_array(self):
a = ['ᄲawef']
b = self._select(a)
self.assertEqual(a[0], b[0])
def test_lookup_cast(self):
from django.db.backends.postgresql.operations import DatabaseOperations
do = DatabaseOperations(connection=None)
lookups = (
'iexact', 'contains', 'icontains', 'startswith', 'istartswith',
'endswith', 'iendswith', 'regex', 'iregex',
)
for lookup in lookups:
with self.subTest(lookup=lookup):
self.assertIn('::text', do.lookup_cast(lookup))
for lookup in lookups:
for field_type in ('CICharField', 'CIEmailField', 'CITextField'):
with self.subTest(lookup=lookup, field_type=field_type):
self.assertIn('::citext', do.lookup_cast(lookup, internal_type=field_type))
def test_correct_extraction_psycopg2_version(self):
from django.db.backends.postgresql.base import psycopg2_version
with mock.patch('psycopg2.__version__', '4.2.1 (dt dec pq3 ext lo64)'):
self.assertEqual(psycopg2_version(), (4, 2, 1))
with mock.patch('psycopg2.__version__', '4.2b0.dev1 (dt dec pq3 ext lo64)'):
self.assertEqual(psycopg2_version(), (4, 2))
|
uranusjr/django
|
tests/backends/postgresql/tests.py
|
Python
|
bsd-3-clause
| 6,323
|
//codecademy course answer
# Reassign meal on line 7!
meal = 44.50
tax = 0.0675
tip = 0.15
meal = meal + meal * tax
|
nurhandipa/python
|
codecademy/reassign_in_a_single_line.py
|
Python
|
gpl-3.0
| 119
|
from django.urls import path
from . import views
urlpatterns = [
path('',
views.AccountView.as_view(),
name='account'),
path('profile/',
views.ProfileUpdateView.as_view(),
name='account_profile'),
path('actions/',
views.ProfileActionsView.as_view(),
name='account_actions'),
]
|
liqd/a4-meinberlin
|
meinberlin/apps/account/urls.py
|
Python
|
agpl-3.0
| 345
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import datetime
class Migration(migrations.Migration):
dependencies = [
('rh', '0001_initial'),
('estoque', '0005_auto_20141001_0953'),
('comercial', '0007_auto_20141006_1852'),
('almoxarifado', '0003_auto_20140917_0843'),
]
operations = [
migrations.CreateModel(
name='LinhaListaMaterial',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('quantidade_requisitada', models.DecimalField(max_digits=10, decimal_places=2)),
('quantidade_ja_atendida', models.DecimalField(max_digits=10, decimal_places=2)),
('criado', models.DateTimeField(default=datetime.datetime.now, verbose_name=b'Criado', auto_now_add=True)),
('atualizado', models.DateTimeField(default=datetime.datetime.now, verbose_name=b'Atualizado', auto_now=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='LinhaListaMaterialCompra',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('quantidade', models.DecimalField(max_digits=10, decimal_places=2)),
('criado', models.DateTimeField(default=datetime.datetime.now, verbose_name=b'Criado', auto_now_add=True)),
('atualizado', models.DateTimeField(default=datetime.datetime.now, verbose_name=b'Atualizado', auto_now=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='LinhaListaMaterialEntregue',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('quantidade', models.DecimalField(max_digits=10, decimal_places=2)),
('criado', models.DateTimeField(default=datetime.datetime.now, verbose_name=b'Criado', auto_now_add=True)),
('atualizado', models.DateTimeField(default=datetime.datetime.now, verbose_name=b'Atualizado', auto_now=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='ListaMaterialCompra',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('ativa', models.BooleanField(default=True)),
('criado', models.DateTimeField(default=datetime.datetime.now, verbose_name=b'Criado', auto_now_add=True)),
('atualizado', models.DateTimeField(default=datetime.datetime.now, verbose_name=b'Atualizado', auto_now=True)),
('contrato', models.ForeignKey(blank=True, to='comercial.ContratoFechado', null=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='ListaMaterialDoContrato',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('ativa', models.BooleanField(default=True)),
('criado', models.DateTimeField(default=datetime.datetime.now, verbose_name=b'Criado', auto_now_add=True)),
('atualizado', models.DateTimeField(default=datetime.datetime.now, verbose_name=b'Atualizado', auto_now=True)),
('contrato', models.OneToOneField(null=True, blank=True, to='comercial.ContratoFechado')),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='ListaMaterialEntregue',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('entregue', models.BooleanField(default=False)),
('criado', models.DateTimeField(default=datetime.datetime.now, verbose_name=b'Criado', auto_now_add=True)),
('atualizado', models.DateTimeField(default=datetime.datetime.now, verbose_name=b'Atualizado', auto_now=True)),
('contrato', models.ForeignKey(blank=True, to='comercial.ContratoFechado', null=True)),
('entregue_para', models.ForeignKey(related_name=b'entregue_para_set', to='rh.Funcionario')),
('entregue_por', models.ForeignKey(related_name=b'entregue_por_set', to='rh.Funcionario')),
],
options={
},
bases=(models.Model,),
),
migrations.AddField(
model_name='linhalistamaterialentregue',
name='lista',
field=models.ForeignKey(to='almoxarifado.ListaMaterialEntregue'),
preserve_default=True,
),
migrations.AddField(
model_name='linhalistamaterialentregue',
name='produto',
field=models.ForeignKey(to='estoque.Produto'),
preserve_default=True,
),
migrations.AddField(
model_name='linhalistamaterialcompra',
name='lista',
field=models.ForeignKey(to='almoxarifado.ListaMaterialCompra'),
preserve_default=True,
),
migrations.AddField(
model_name='linhalistamaterialcompra',
name='produto',
field=models.ForeignKey(to='estoque.Produto'),
preserve_default=True,
),
migrations.AddField(
model_name='linhalistamaterial',
name='lista',
field=models.ForeignKey(to='almoxarifado.ListaMaterialDoContrato'),
preserve_default=True,
),
migrations.AddField(
model_name='linhalistamaterial',
name='produto',
field=models.ForeignKey(to='estoque.Produto'),
preserve_default=True,
),
]
|
dudanogueira/microerp
|
microerp/almoxarifado/migrations/0004_auto_20141006_1957.py
|
Python
|
lgpl-3.0
| 6,177
|
# coding: utf-8
import logger
import service
import common
import core
import tcp
import http
__version__ = '0.0.1'
VERSION = tuple(map(int, __version__.split('.')))
__all__ = [
'logger',
'service',
'common',
'core',
'http',
'tcp',
]
|
slin1972/unity
|
unity/__init__.py
|
Python
|
apache-2.0
| 261
|
#!/usr/bin/env python
################################################################################
# bitly.py
# pyanalysis
#
# Created by Brian Baughman on 10/26/11.
# Copyright 2011 Brian Baughman. All rights reserved.
################################################################################
from os import environ
from numpy import floor, sin, cos, tan, arcsin, arccos, arctan2, pi, remainder
from timeConv import dttm2LST
def eq2horz(obslat,obslon,doi,RA,dec):
'''
Given:
observer latitude and longitude
event time
RA and dec
returns the Horizontal Geocentric Coordinates.
All angles given in radians
obslat - observer latitude
obslong - observer longitude
doi - datetime object of interest
RA - right ascension of event
dec - declination of event
'''
LST = dttm2LST(doi,obslon)
H = 2.*pi*remainder(LST,24.)/24. - RA
sinalt = sin(dec)*sin(obslat) + cos(dec)*cos(obslat)*cos(H)
alt = arcsin(sinalt)
sinaz = - sin(H)*cos(dec) / cos(alt)
cosaz = ( sin(dec) - sin(obslat)*sinalt ) / (cos(obslat)*cos(alt))
az = arctan2(sinaz,cosaz)
return alt, az
def horz2eq(obslat,obslon,doi,alt,az):
'''
Given:
observer latitude and longitude
event time
RA and dec
returns the Equatorial Coordinates.
All angles given in radians
obslat - observer latitude
obslong - observer longitude
doi - datetime object of interest
alt - altitude angle of event
az - azimuthal angle of event
'''
LST = dttm2LST(doi,obslon)
sindec = sin(alt)*sin(obslat) + cos(alt)*cos(obslat)*cos(az)
dec = arcsin(sindec)
cosdec = cos(dec)
sinHA = - sin(az)*cos(alt) / cosdec
cosHA = ( sin(alt) - sindec * sin(obslat)) / (cosdec*cos(obslat))
HA = arctan2(sinHA,cosHA)
RA = 2.*pi*remainder(LST,24.)/24. - HA
return RA, dec
|
bbaugh/gcn-parser
|
coordConv.py
|
Python
|
gpl-2.0
| 1,818
|
# -*- coding: utf-8 -*-
try:
f1 = open("input.txt","r",encoding="utf-8")
except IOError:
print("Не удалось найти входной файл input.txt")
try:
f2 = open("output.txt","w",encoding="utf-8")
except IOError:
print("Не удалось открыть выходной файл output.txt")
import re # импортируем модуль работы с регулярными выражениями
# --- регулярное выражение для заголовков вида: == ййй ==
zagolovok_level2 = re.compile("==.*==") # жадный квантификатор .*
# --- регулярные выражения для внутренних ссылок вида [[id**|**]], [[club**|**]], [[public**|**]]
#ssylka_inner_tpl = re.compile("\[\[.*?\|.*?\]\]") # [[ | ]] нежадный кватнификатор .*?
ssylka_inner_id = re.compile("\[\[id.*?\|.*?\]\]") # id
ssylka_inner_club = re.compile("\[\[club.*?\|.*?\]\]") # club
ssylka_inner_public = re.compile("\[\[public.*?\|.*?\]\]") # public
# --- регулярное выражение для внешних ссылок вида [http**|**]
ssylka_outer = re.compile("\[http.*?\|.*?\]")
# --- регулярное выражение для вставки переноса на другую строку (если заканчивается на ":" + пробелы)
perenos = re.compile(":\s*$")
# --------
for stroka in f1.readlines(): #читаем входной файл построчно
# ---- Замена заголовков
if re.match(zagolovok_level2, stroka):
stroka = stroka.replace("==","##",1)
stroka = stroka.replace("==", "")
# ---- Замена жирного шрифта и курсива ----
stroka = stroka.replace("'''",'**') # жирный шрифт - переделать в регулярные выражения!
stroka = stroka.replace("''",'*') # курсив - переделать в регулярные выражения!
# ---- Замена внутренних ссылок (id, club, public) ----
iskomoe = (re.findall(ssylka_inner_id, stroka) +
re.findall(ssylka_inner_club, stroka) +
re.findall(ssylka_inner_public, stroka)) # находим все id,club,public
if iskomoe:
for ssylka in iskomoe: # перебираем найденные ссылки в строке
ssylka_id = ssylka.split("|")[0].replace('[[','') #выделяем id ссылки
ssylka_name = ssylka.split("|")[1].replace(']]','') #выделяем имя ссылки
ssylka_new = ('['+ssylka_name+']('+'http://vk.com/'+ssylka_id+')')
stroka = stroka.replace(ssylka, ssylka_new) #заменяем старую ссылку на новую
# ---- Замена внешних ссылок [http**|**] ----
iskomoe2 = re.findall(ssylka_outer, stroka)
if iskomoe2:
for ssylka2 in iskomoe2:
ssylka2_id = ssylka2.split("|")[0].replace('[http','http')
ssylka2_name = ssylka2.split("|")[1].replace(']','')
ssylka2_new = '['+ssylka2_name+']('+ssylka2_id+')'
stroka = stroka.replace(ssylka2, ssylka2_new)
# ---- Запись преобразованной строки в выходной файл ----
if re.search(perenos, stroka):
f2.write('\n' + stroka)
else:
f2.write(stroka)
# --------
f1.close()
f2.close()
|
dimitrius-brest/katalog-poseleniy-RP
|
converter-vkwiki2md/convert2md.py
|
Python
|
cc0-1.0
| 3,623
|
from flask import abort
from flask import render_template
from flask import request
from flask import url_for
from flask.ext.wtf import Form
from wtforms import validators
from wtforms import IntegerField, RadioField, SelectField, TextField
from logcas.bootstrap import *
# forms
class LogIndexForm(Form):
page = IntegerField('Page', default=1,
validators=[validators.NumberRange(min=1)])
limit = IntegerField('Limit', default=DEFAULT_LIMIT,
validators=[validators.NumberRange(min=10, max=200)])
levelno = RadioField('Level', default=DEFAULT_LEVELNO, coerce=int,
choices=[(k, v) for k, v in sorted(LEVELMAP.items())])
created = IntegerField('Created', default=0,
validators=[validators.NumberRange(min=0)])
span = IntegerField('Span', default=DEFAULT_SPAN,
validators=[validators.NumberRange(min=1, max=120)])
style = SelectField('Style', default=DEFAULT_STYLE,
choices=STYLEMAP)
host = TextField('Host', default="",
validators=[validators.length(min=0, max=20)])
# controllers
@app.route('/logs')
def _log_index():
forms = LogIndexForm(request.args)
if not forms.validate():
abort(400)
style = forms.style.data
page = forms.page.data
limit = forms.limit.data
levelno = forms.levelno.data
created = forms.created.data
span = forms.span.data
host = forms.host.data
spec = {'levelno': {'$gte': levelno}}
if created:
spec.update({
'created': {"$gte": created - span, "$lte": created + span},
})
if len(host):
spec.update({'hostname': host})
counts, logs = get_logs(mongo.db.logs,
spec=spec, limit=limit, page=page)
pages = counts / limit + 1
return render_template('log_index.html', **locals())
@app.route('/archived/logs')
def _archived_log_index():
forms = LogIndexForm(request.args)
if not forms.validate():
abort(400)
style = forms.style.data
page = forms.page.data
limit = forms.limit.data
levelno = forms.levelno.data
created = forms.created.data
span = forms.span.data
host = forms.host.data
spec = {'levelno': {'$gte': levelno}}
if created:
spec.update({
'created': {"$gte": created - span, "$lte": created + span},
})
if len(host):
spec.update({'hostname': host})
counts, logs = get_logs(mongo.db.archived_logs,
spec=spec, limit=limit, page=page)
pages = counts / limit + 1
return render_template('archived_log_index.html', **locals())
|
yosshy/logcas
|
logcas/log_index.py
|
Python
|
apache-2.0
| 2,715
|
# (c) 2017, Patrick Deelman <patrick@patrickdeelman.nl>
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = """
lookup: passwordstore
version_added: "2.3"
author:
- Patrick Deelman <patrick@patrickdeelman.nl>
short_description: manage passwords with passwordstore.org's pass utility
description:
- Enables Ansible to retrieve, create or update passwords from the passwordstore.org pass utility.
It also retrieves YAML style keys stored as multilines in the passwordfile.
options:
_terms:
description: query key
required: True
passwordstore:
description: location of the password store
default: '~/.password-store'
directory:
description: The directory of the password store.
env:
- name: PASSWORD_STORE_DIR
create:
description: Create the password if it does not already exist.
type: bool
default: 'no'
overwrite:
description: Overwrite the password if it does already exist.
type: bool
default: 'no'
returnall:
description: Return all the content of the password, not only the first line.
type: bool
default: 'no'
subkey:
description: Return a specific subkey of the password. When set to C(password), always returns the first line.
default: password
userpass:
description: Specify a password to save, instead of a generated one.
length:
description: The length of the generated password
type: integer
default: 16
backup:
description: Used with C(overwrite=yes). Backup the previous password in a subkey.
type: bool
default: 'no'
version_added: 2.7
nosymbols:
description: use alphanumeric characters
type: bool
default: 'no'
version_added: 2.8
"""
EXAMPLES = """
# Debug is used for examples, BAD IDEA to show passwords on screen
- name: Basic lookup. Fails if example/test doesn't exist
debug:
msg: "{{ lookup('passwordstore', 'example/test')}}"
- name: Create pass with random 16 character password. If password exists just give the password
debug:
var: mypassword
vars:
mypassword: "{{ lookup('passwordstore', 'example/test create=true')}}"
- name: Different size password
debug:
msg: "{{ lookup('passwordstore', 'example/test create=true length=42')}}"
- name: Create password and overwrite the password if it exists. As a bonus, this module includes the old password inside the pass file
debug:
msg: "{{ lookup('passwordstore', 'example/test create=true overwrite=true')}}"
- name: Create an alphanumeric password
debug: msg="{{ lookup('passwordstore', 'example/test create=true nosymbols=true) }}"
- name: Return the value for user in the KV pair user, username
debug:
msg: "{{ lookup('passwordstore', 'example/test subkey=user')}}"
- name: Return the entire password file content
set_fact:
passfilecontent: "{{ lookup('passwordstore', 'example/test returnall=true')}}"
"""
RETURN = """
_raw:
description:
- a password
"""
import os
import subprocess
import time
from distutils import util
from ansible.errors import AnsibleError, AnsibleAssertionError
from ansible.module_utils._text import to_bytes, to_native, to_text
from ansible.utils.encrypt import random_password
from ansible.plugins.lookup import LookupBase
from ansible import constants as C
# backhacked check_output with input for python 2.7
# http://stackoverflow.com/questions/10103551/passing-data-to-subprocess-check-output
def check_output2(*popenargs, **kwargs):
if 'stdout' in kwargs:
raise ValueError('stdout argument not allowed, it will be overridden.')
if 'stderr' in kwargs:
raise ValueError('stderr argument not allowed, it will be overridden.')
if 'input' in kwargs:
if 'stdin' in kwargs:
raise ValueError('stdin and input arguments may not both be used.')
b_inputdata = to_bytes(kwargs['input'], errors='surrogate_or_strict')
del kwargs['input']
kwargs['stdin'] = subprocess.PIPE
else:
b_inputdata = None
process = subprocess.Popen(*popenargs, stdout=subprocess.PIPE, stderr=subprocess.PIPE, **kwargs)
try:
b_out, b_err = process.communicate(b_inputdata)
except Exception:
process.kill()
process.wait()
raise
retcode = process.poll()
if retcode != 0 or \
b'encryption failed: Unusable public key' in b_out or \
b'encryption failed: Unusable public key' in b_err:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
raise subprocess.CalledProcessError(
retcode,
cmd,
to_native(b_out + b_err, errors='surrogate_or_strict')
)
return b_out
class LookupModule(LookupBase):
def parse_params(self, term):
# I went with the "traditional" param followed with space separated KV pairs.
# Waiting for final implementation of lookup parameter parsing.
# See: https://github.com/ansible/ansible/issues/12255
params = term.split()
if len(params) > 0:
# the first param is the pass-name
self.passname = params[0]
# next parse the optional parameters in keyvalue pairs
try:
for param in params[1:]:
name, value = param.split('=')
if name not in self.paramvals:
raise AnsibleAssertionError('%s not in paramvals' % name)
self.paramvals[name] = value
except (ValueError, AssertionError) as e:
raise AnsibleError(e)
# check and convert values
try:
for key in ['create', 'returnall', 'overwrite', 'backup', 'nosymbols']:
if not isinstance(self.paramvals[key], bool):
self.paramvals[key] = util.strtobool(self.paramvals[key])
except (ValueError, AssertionError) as e:
raise AnsibleError(e)
if not isinstance(self.paramvals['length'], int):
if self.paramvals['length'].isdigit():
self.paramvals['length'] = int(self.paramvals['length'])
else:
raise AnsibleError("{0} is not a correct value for length".format(self.paramvals['length']))
# Set PASSWORD_STORE_DIR if directory is set
if self.paramvals['directory']:
if os.path.isdir(self.paramvals['directory']):
os.environ['PASSWORD_STORE_DIR'] = self.paramvals['directory']
else:
raise AnsibleError('Passwordstore directory \'{0}\' does not exist'.format(self.paramvals['directory']))
def check_pass(self):
try:
self.passoutput = to_text(
check_output2(["pass", self.passname]),
errors='surrogate_or_strict'
).splitlines()
self.password = self.passoutput[0]
self.passdict = {}
for line in self.passoutput[1:]:
if ':' in line:
name, value = line.split(':', 1)
self.passdict[name.strip()] = value.strip()
except (subprocess.CalledProcessError) as e:
if e.returncode == 1 and 'not in the password store' in e.output:
# if pass returns 1 and return string contains 'is not in the password store.'
# We need to determine if this is valid or Error.
if not self.paramvals['create']:
raise AnsibleError('passname: {0} not found, use create=True'.format(self.passname))
else:
return False
else:
raise AnsibleError(e)
return True
def get_newpass(self):
if self.paramvals['nosymbols']:
chars = C.DEFAULT_PASSWORD_CHARS[:62]
else:
chars = C.DEFAULT_PASSWORD_CHARS
if self.paramvals['userpass']:
newpass = self.paramvals['userpass']
else:
newpass = random_password(length=self.paramvals['length'], chars=chars)
return newpass
def update_password(self):
# generate new password, insert old lines from current result and return new password
newpass = self.get_newpass()
datetime = time.strftime("%d/%m/%Y %H:%M:%S")
msg = newpass + '\n'
if self.passoutput[1:]:
msg += '\n'.join(self.passoutput[1:]) + '\n'
if self.paramvals['backup']:
msg += "lookup_pass: old password was {0} (Updated on {1})\n".format(self.password, datetime)
try:
check_output2(['pass', 'insert', '-f', '-m', self.passname], input=msg)
except (subprocess.CalledProcessError) as e:
raise AnsibleError(e)
return newpass
def generate_password(self):
# generate new file and insert lookup_pass: Generated by Ansible on {date}
# use pwgen to generate the password and insert values with pass -m
newpass = self.get_newpass()
datetime = time.strftime("%d/%m/%Y %H:%M:%S")
msg = newpass + '\n' + "lookup_pass: First generated by ansible on {0}\n".format(datetime)
try:
check_output2(['pass', 'insert', '-f', '-m', self.passname], input=msg)
except (subprocess.CalledProcessError) as e:
raise AnsibleError(e)
return newpass
def get_passresult(self):
if self.paramvals['returnall']:
return os.linesep.join(self.passoutput)
if self.paramvals['subkey'] == 'password':
return self.password
else:
if self.paramvals['subkey'] in self.passdict:
return self.passdict[self.paramvals['subkey']]
else:
return None
def run(self, terms, variables, **kwargs):
result = []
self.paramvals = {
'subkey': 'password',
'directory': variables.get('passwordstore'),
'create': False,
'returnall': False,
'overwrite': False,
'nosymbols': False,
'userpass': '',
'length': 16,
'backup': False,
}
for term in terms:
self.parse_params(term) # parse the input into paramvals
if self.check_pass(): # password exists
if self.paramvals['overwrite'] and self.paramvals['subkey'] == 'password':
result.append(self.update_password())
else:
result.append(self.get_passresult())
else: # password does not exist
if self.paramvals['create']:
result.append(self.generate_password())
return result
|
alxgu/ansible
|
lib/ansible/plugins/lookup/passwordstore.py
|
Python
|
gpl-3.0
| 11,134
|
from sgp import SGP
from sgp import SGP2
from sgp import ASGP
__all__ = ['SGP',
'SGP2',
'ASGP']
|
dvro/scikit-protopy
|
protopy/generation/__init__.py
|
Python
|
bsd-2-clause
| 119
|
# -*- coding: utf-8 -*-
from .core import serialize, json_encode
import datetime
import pandas
from unittest import TestCase
df = pandas.DataFrame([
{'a': 1, 'b': 2, 'c': 3, 't': datetime.datetime(2015, 1, 1), 's': 's1'},
{'a': 2, 'b': 4, 'c': 6, 't': datetime.datetime(2015, 1, 2), 's': 's2'}
])
class CoreTest(TestCase):
def test_type(self):
self.assertEqual(type(serialize(df, render_to="chart")), str)
obj = serialize(df, render_to="chart", output_type="json")
self.assertEqual(type(obj), dict)
obj = serialize(df, render_to="chart", output_type="json", zoom="xy")
self.assertTrue("chart" in obj)
self.assertEqual(type(obj["chart"]), dict)
self.assertTrue("zoomType" in obj["chart"])
self.assertRaises(ValueError, serialize, df, **{"render_to": "chart", "zoom": "z"})
obj = serialize(df, render_to="chart", output_type="json", kind="bar")
self.assertTrue("chart" in obj)
self.assertEqual(type(obj["chart"]), dict)
self.assertEqual(obj["chart"].get("type"), "column")
self.assertRaises(ValueError, serialize, df, **{"render_to": "chart", "kind": "z"})
obj = serialize(df, render_to="chart", output_type="json", secondary_y="a")
self.assertTrue(obj.get("yAxis", [])[1].get('opposite'))
obj = serialize(df, render_to="chart", output_type="json", rot=45, loglog=True)
self.assertEqual(obj.get('xAxis', {}).get('labels'), {'rotation': 45})
self.assertEqual(obj.get('yAxis', [])[0].get('labels'), {'rotation': 45})
self.assertEqual(obj.get('xAxis', {}).get('type'), 'logarithmic')
obj = serialize(df, render_to="chart", output_type="json", x="t")
self.assertEqual(obj.get('xAxis', {}).get('type'), 'datetime')
obj = serialize(df, render_to="chart", output_type="json", x="t", style={"a": ":"})
for series in obj.get("series"):
if series["name"] == "a":
self.assertEqual(series.get("dashStyle"), "Dot")
self.assertRaises(ValueError, serialize, df, **{"render_to": "chart", "style": {"a": "u"}})
obj = serialize(df, render_to="chart", output_type="json", kind="area", stacked=True)
self.assertEqual(obj.get("series")[0].get("stacking"), "normal")
obj = serialize(df, render_to="chart", output_type="json", grid=True)
self.assertEqual(obj.get('xAxis', {}).get('gridLineDashStyle'), 'Dot')
self.assertEqual(obj.get('xAxis', {}).get('gridLineWidth'), 1)
self.assertEqual(obj.get('yAxis', [])[0].get('gridLineDashStyle'), 'Dot')
self.assertEqual(obj.get('yAxis', [])[0].get('gridLineWidth'), 1)
obj = serialize(df, render_to="chart", output_type="json", xlim=(0, 1), ylim=(0, 1))
self.assertEqual(obj.get('xAxis', {}).get('min'), 0)
self.assertEqual(obj.get('xAxis', {}).get('max'), 1)
self.assertEqual(obj.get('yAxis', [])[0].get('min'), 0)
self.assertEqual(obj.get('yAxis', [])[0].get('max'), 1)
obj = serialize(df, render_to="chart", output_type="json", fontsize=12, figsize=(4, 5))
self.assertEqual(obj.get('xAxis', {}).get('labels', {}).get('style', {}).get('fontSize'), 12)
self.assertEqual(obj.get('yAxis', [])[0].get('labels', {}).get('style', {}).get('fontSize'), 12)
obj = serialize(df, render_to="chart", output_type="json", title='Chart', xticks=[1], yticks=[2])
self.assertTrue(obj.get('title', {}).get('text'))
self.assertTrue(obj.get('xAxis', {}).get('tickPositions'))
for yaxis in obj.get('yAxis', []):
self.assertTrue(yaxis.get('tickPositions'))
obj = serialize(df, render_to="chart", output_type="json", fontsize=12, kind='pie', x='s', y=['a'], tooltip={'pointFormat': '{series.name}: <b>{point.percentage:.1f}%</b>'})
self.assertTrue(obj.get('tooltip'))
obj = serialize(df, render_to="chart", output_type="json", polar=True, x='s', y=['a'])
self.assertTrue(obj.get('chart', {}).get('polar'))
def test_jsonencoder(self):
self.assertEqual(json_encode(datetime.date(1970, 1, 1)), "0")
self.assertEqual(json_encode(datetime.date(2015, 1, 1)), "1420070400000")
self.assertEqual(json_encode(datetime.datetime(2015, 1, 1)), "1420070400000")
self.assertEqual(json_encode(pandas.tslib.Timestamp(1420070400000000000)), "1420070400000")
|
albahnsen/pandas-highcharts
|
pandas_highcharts/tests.py
|
Python
|
mit
| 4,413
|
import logging
import accounts
import log
import scriptlib
from collections import defaultdict
# noinspection PyUnusedLocal
def main(a, args):
a.timeout = 10
friends = scriptlib.getFriends(a, fields='country')
foll = scriptlib.getFollowers(a, fields='country')
c = defaultdict(int)
for u in friends + foll:
if 'country' in u:
c[(u['country']['title'], u['country']['id'])] += 1
else:
c[('-', 0)] += 1
items = sorted(c.items(), key=lambda x: x[1], reverse=True)
for i in items:
print(*i)
|
kalinochkind/vkbot
|
scripts/countries.py
|
Python
|
mit
| 567
|
# -*- coding: utf-8 -*-
# © <2016> <Moneygrid Project, Lucas Huber, Yannick Buron>
# based on account_wallet by Yannick Buron, Copyright Yannick Buron
# License LGPL-3.0 or later (http://www.gnu.org/licenses/lgpl.html).
import exchange_membership
|
moneygrid/vertical-exchange
|
exchange_membership/__init__.py
|
Python
|
gpl-3.0
| 249
|
# Copyright 2015 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper functions for Schema Validation."""
import jsonschema
DEFAULT = 'default'
PROPERTIES = 'properties'
REF = '$ref'
REQUIRED = 'required'
def ExtendWithDefault(validator_class):
"""Takes a validator and makes it set default values on properties.
Args:
validator_class: A class to add our overridden validators to
Returns:
A validator_class that will set default values
and ignore required fields
"""
validate_properties = validator_class.VALIDATORS['properties']
def SetDefaultsInProperties(validator, user_schema, user_properties,
parent_schema):
SetDefaults(validator, user_schema or {}, user_properties,
parent_schema, validate_properties)
return jsonschema.validators.extend(
validator_class, {PROPERTIES: SetDefaultsInProperties,
REQUIRED: IgnoreKeyword})
def SetDefaults(validator, user_schema, user_properties, parent_schema,
validate_properties):
"""Populate the default values of properties.
Args:
validator: A generator that validates the "properties" keyword
of the schema
user_schema: Schema which might define defaults, might be a nested
part of the entire schema file.
user_properties: User provided values which we are setting defaults on
parent_schema: Schema object that contains the schema being
evaluated on this pass, user_schema.
validate_properties: Validator function, called recursively.
"""
for schema_property, subschema in user_schema.iteritems():
# The ordering of these conditions assumes that '$ref' blocks override
# all other schema info, which is what the jsonschema library assumes.
# If the subschema has a reference,
# see if that reference defines a 'default' value
if REF in subschema:
out = ResolveReferencedDefault(validator, subschema[REF])
user_properties.setdefault(schema_property, out)
# Otherwise, see if the subschema has a 'default' value
elif DEFAULT in subschema:
user_properties.setdefault(schema_property, subschema[DEFAULT])
# Recursively apply defaults. This is a generator, we must wrap with list()
list(validate_properties(validator, user_schema,
user_properties, parent_schema))
def ResolveReferencedDefault(validator, ref):
"""Resolves a reference, and returns any default value it defines.
Args:
validator: A generator that validates the "$ref" keyword
ref: The target of the "$ref" keyword
Returns:
The value of the 'default' field found in the referenced schema,
or None
"""
with validator.resolver.resolving(ref) as resolved:
if DEFAULT in resolved:
return resolved[DEFAULT]
def IgnoreKeyword(
unused_validator, unused_required, unused_instance, unused_schema):
"""Validator for JsonSchema that does nothing."""
pass
|
sparkprime/helm
|
expansion/schema_validation_utils.py
|
Python
|
apache-2.0
| 3,726
|
from django.core.urlresolvers import reverse
from django.test import Client, TestCase
class LandingViewTestCase(TestCase):
def test_landing(self):
url = reverse('landing')
res = self.client.get(url)
self.assertEqual(200, res.status_code)
class LandingTestCase(TestCase):
def test_no_catch_all(self):
"""Ensure there are no URL routes that catch-all"""
client = Client()
response = client.get('/this-should-not-be-a-valid-endpoint')
self.assertEqual(response.status_code, 404)
|
frewsxcv/lop.farm
|
app/landing/tests.py
|
Python
|
mpl-2.0
| 544
|
# -*- encoding: utf-8 -*-
class SIS(object):
#u, v = 1, 2
#Lambda, Mu = -1./4., 2
def __init__(self):
self._lambda = -1./4.
self._mu = 2
self._u = 1
self._v = 2
@property
def Lambda(self):
return self._lambda
@Lambda.setter
def Lambda(self, n):
self._lambda = n
@property
def mu(self):
return self._mu
@mu.setter
def mu(self, n):
self._mu = n
@property
def u(self):
return self._u
@u.setter
def u(self, n):
self._u = n
@property
def v(self):
return self._v
@v.setter
def v(self, n):
self._v = n
|
ElricleNecro/LibThese
|
LibThese/Models/SIS.py
|
Python
|
lgpl-3.0
| 559
|
# Copyright 2020 Binovo IT Human Project SL
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl).
from odoo import models, api
class AccountInvoiceRefund(models.TransientModel):
_inherit = 'account.invoice.refund'
@api.multi
def invoice_refund(self):
data_refund = self.read(['filter_refund'])[0]['filter_refund']
return super(AccountInvoiceRefund,
self.with_context(filter_refund=data_refund)).invoice_refund()
|
factorlibre/l10n-spain
|
l10n_es_ticketbai/wizard/account_invoice_refund.py
|
Python
|
agpl-3.0
| 475
|
# File: ReportingServer.py ; This file is part of Twister.
# version: 2.009
# Copyright (C) 2012-2013 , Luxoft
# Authors:
# Andrei Costachi <acostachi@luxoft.com>
# Andrei Toma <atoma@luxoft.com>
# Cristi Constantin <crconstantin@luxoft.com>
# Daniel Cioata <dcioata@luxoft.com>
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This file contains the Reporting Server.
It is used to view the results of the test executions.
The reports can be fully customized, by editing the DB.xml file.
The INDEX/ HOME links force the reload of the DB.xml file,
the rest of the links just use the cached data, from last reload.
"""
import os
import sys
import re
import time
import datetime
import json
import mako
import binascii
import MySQLdb
import cherrypy
from collections import OrderedDict
from mako.template import Template
TWISTER_PATH = os.getenv('TWISTER_PATH')
if not TWISTER_PATH:
print('\n$TWISTER_PATH environment variable is not set! Exiting!\n')
exit(1)
if TWISTER_PATH not in sys.path:
sys.path.append(TWISTER_PATH)
from common.helpers import *
from common.tsclogging import *
from common.xmlparser import *
if mako.__version__ < '0.7':
logWarning('Warning! Mako-template version `{}` is old! Some pages might crash!\n'.format(mako.__version__))
# --------------------------------------------------------------------------------------------------
# # # # C L A S S R e p o r t i n g # # #
# --------------------------------------------------------------------------------------------------
class ReportingServer:
db_parser = {}
glob_fields = {}
glob_reports = {}
glob_redirects = {}
glob_links = {}
conn = {}
curs = {}
def __init__(self, project):
self.project = project
def load_config(self, usr, force=False):
'''
Read DB Config File for 1 user.
'''
if not os.path.isdir(userHome(usr) + '/twister/config'):
logError('Report Server: Cannot find Twister for user `{}` !'.format(usr))
return False
# Get the path to DB.XML
db_file = self.project.getUserInfo(usr, 'db_config')
if not db_file:
logError('Report Server: Null DB.XML file for user `{}`! Nothing to do!'.format(usr))
return False
# Create database parser IF necessary, or FORCED...
if force or (usr not in self.db_parser):
self.db_parser[usr] = DBParser(db_file)
self.glob_fields[usr] = self.db_parser[usr].getReportFields()
self.glob_reports[usr] = self.db_parser[usr].getReports()
self.glob_redirects[usr] = self.db_parser[usr].getRedirects()
# There are more types of reports:
# Normal links, like Home, Help and other normal reports
# Redirect links, that don't contain reports
# Folders, that don't go anywhere, are just labels for reports
self.glob_links[usr] = [{'link': 'Home', 'folder': '', 'type': 'link'}] +\
[{'link': k, 'folder': v.get('folder', ''), 'type': 'link'} for k, v in self.glob_reports[usr].iteritems() ] +\
[{'link': k, 'folder': '', 'type': 'redir'} for k in self.glob_redirects[usr] ] +\
[{'link': 'Help', 'folder': '', 'type': 'link'}]
self.connect_db(usr)
def connect_db(self, usr):
'''
Reconnect to the database.
'''
db_config = self.db_parser[usr].db_config
# Decode database password
db_password = self.project.decryptText( db_config.get('password') )
if not db_password:
logError('Report Server: Cannot decrypt the database password for user `{}`!'.format(usr))
db_password = '0'
self.conn[usr] = MySQLdb.connect(host=db_config.get('server'), db=db_config.get('database'),
user=db_config.get('user'), passwd=db_password)
self.curs[usr] = self.conn[usr].cursor()
# Report link 1
@cherrypy.expose
def index(self, usr=''):
if not usr:
users = self.project.listUsers()
output = Template(filename=TWISTER_PATH + '/server/template/rep_base.htm')
return output.render(title='Users', usr='#' + '#'.join(users), links=[])
if not os.path.isdir(userHome(usr) + '/twister/config'):
return '<br><b>Error! Username `{}` doesn\'t have a Twister config folder!</b>'.format(usr)
# FORCE re-load all Database XML on INDEX/ HOME links !
self.load_config(usr, True)
output = Template(filename=TWISTER_PATH + '/server/template/rep_base.htm')
return output.render(title='Home', usr=usr, links=self.glob_links[usr])
# Report link 2
@cherrypy.expose
def home(self, usr=''):
return self.index(usr=usr)
# Report link 3
@cherrypy.expose
def report(self, usr=''):
return self.index(usr=usr)
# Report link 4
@cherrypy.expose
def reporting(self, usr=''):
return self.index(usr=usr)
# Help link
@cherrypy.expose
def help(self, usr=''):
if not usr: return '<br><b>Error! This link should be accessed by passing a username, eg: /help/some_user<b/>'
if not os.path.isdir(userHome(usr) + '/twister/config'):
return '<br><b>Error! Username `{}` doesn\'t have a Twister config folder!</b>'.format(usr)
self.load_config(usr) # Re-load all Database XML
output = Template(filename=TWISTER_PATH + '/server/template/rep_help.htm')
return output.render(title='Help', usr=usr, links=self.glob_links[usr])
# Reporting link
@cherrypy.expose
def rep(self, report=None, usr=None, **args):
if not usr: return '<br><b>Error! This link should be accessed by passing a username, eg: /rep/some_user<b/>'
if not os.path.isdir(userHome(usr) + '/twister/config'):
return '<br><b>Error! Username `{}` doesn\'t have a Twister config folder!</b>'.format(usr)
self.load_config(usr) # Re-load all Database XML
if usr not in self.conn: self.connect_db(usr)
cherrypy.response.headers['Cache-Control'] = 'no-cache, no-store, must-revalidate'
cherrypy.response.headers['Pragma'] = 'no-cache'
cherrypy.response.headers['Expires'] = 0
if not report:
raise cherrypy.HTTPRedirect('/error')
if report in self.glob_redirects[usr]:
redirect_dict = self.glob_redirects[usr][report]['path']
raise cherrypy.HTTPRedirect(redirect_dict)
if report not in self.glob_reports[usr]:
output = Template(filename=TWISTER_PATH + '/server/template/rep_error.htm')
return output.render(title='Missing report', usr=usr, links=self.glob_links[usr], msg='Report `<b>{}</b>` is not defined!'.format(report))
# All info about the report, from DB XML
report_dict = self.glob_reports[usr][report]
query = report_dict['sqlquery']
# All variables that must be replaced in Query
vars_to_replace = re.findall('(@.+?@)', query)
# ------------------------------------------------------------------------------------------
# If the user didn't select fields YET :
# ------------------------------------------------------------------------------------------
if vars_to_replace and not cherrypy.request.params:
# Options are defined as: Type, Label, Data
u_options = OrderedDict()
for opt in vars_to_replace:
u_field = self.glob_fields[usr].get(opt.replace('@', ''))
this_option = {}
if not u_field:
output = Template(filename=TWISTER_PATH + '/server/template/rep_error.htm')
return output.render(links=self.glob_links[usr], title=report, usr=usr,
msg='Cannot build query!<br><br>Field `<b>{}</b>` is not defined in the fields section!'.format(opt.replace('@', '')))
this_option['type'] = u_field.get('type')
this_option['label'] = u_field.get('label')
# Field type : User Select
if this_option['type'] == 'UserSelect':
u_query = u_field.get('sqlquery')
if not u_query:
output = Template(filename=TWISTER_PATH + '/server/template/rep_error.htm')
return output.render(links=self.glob_links[usr], title=report, usr=usr,
msg='Cannot build query!<br><br>Field `<b>{}</b>` doesn\'t have a query!'.format(opt.replace('@', '')))
# Execute User Query
try:
self.curs[usr].execute(u_query)
except MySQLdb.Error, e:
try:
self.connect_db(usr)
except:
pass
output = Template(filename=TWISTER_PATH + '/server/template/rep_error.htm')
return output.render(links=self.glob_links[usr], title=report, usr=usr,
msg='Error in query `{}`!<br><br><b>MySQL Error {}</b>: {}!'.format(u_query, e.args[0], e.args[1]))
try:
u_vals = self.curs[usr].fetchall()
except Exception, e:
output = Template(filename=TWISTER_PATH + '/server/template/rep_error.htm')
return output.render(links=self.glob_links[usr], title=report, usr=usr,
msg='Error in query `{}`!<br><br><b>Exception</b>: {}!'.format(u_query, e))
# No data available
if not u_vals:
this_option['data'] = []
# Data has one column
elif len(u_vals[0]) == 1:
field_data = [ (val[0], val[0]) for val in u_vals ]
this_option['data'] = field_data
# Data has 2 or more columns
else:
field_data = [ ( str(val[0]), str(val[0])+': '+'| '.join(val[1:]) ) for val in u_vals ]
this_option['data'] = field_data
# Field type : User Text
elif this_option['type'] == 'UserText':
this_option['data'] = ''
else:
output = Template(filename=TWISTER_PATH + '/server/template/rep_error.htm')
return output.render(title=report, links=self.glob_links[usr], usr=usr,
msg='Field `<b>{}</b>` is of unknown type: <b>{}</b>!'.format(opt.replace('@', ''), this_option['type']))
u_options[opt] = this_option
output = Template(filename=TWISTER_PATH + '/server/template/rep_base.htm')
return output.render(title=report, usr=usr, links=self.glob_links[usr], options=u_options)
# ------------------------------------------------------------------------------------------
# If the user has selected the fields :
# ------------------------------------------------------------------------------------------
ajax_links = []
# ... For normal Queries ...
for field in vars_to_replace:
# The value chosen by the user
u_select = cherrypy.request.params.get(field)
if not u_select: u_select = ''
ajax_links.append(field +'='+ u_select)
# Replace @variables@ with user chosen value
query = query.replace(field, str(u_select))
ajax_links = sorted( list(set(ajax_links)) )
ajax_link = '/report/json/' + report + '/' + usr + '?' + '&'.join(ajax_links)
user_choices = ('", '.join(ajax_links))
user_choices = user_choices.replace('@', '').replace('=', '="')+'"'
del ajax_links
try:
self.curs[usr].execute(query)
except MySQLdb.Error, e:
try:
self.connect_db(usr)
except:
pass
#
output = Template(filename=TWISTER_PATH + '/server/template/rep_error.htm')
return output.render(title=report, links=self.glob_links[usr], usr=usr,
msg='Error in query `{}`!<br><br><b>MySQL Error {}</b>: {}!'.format(query, e.args[0], e.args[1]))
descr = [desc[0] for desc in self.curs[usr].description]
# Write DEBUG
#DEBUG.write(report +' -> '+ user_choices +' -> '+ query + '\n\n') ; DEBUG.flush()
# ... For Query Compare side by side, the table is double ...
query_compr = report_dict['sqlcompr']
if query_compr:
# All variables that must be replaced in Query
vars_to_replace = re.findall('(@.+?@)', query_compr)
for field in vars_to_replace:
# The value chosen by the user
u_select = cherrypy.request.params.get(field)
# Replace @variables@ with user chosen value
query_compr = query_compr.replace(field, str(u_select))
try:
self.curs[usr].execute(query_compr)
except MySQLdb.Error, e:
try:
self.connect_db(usr)
except:
pass
#
output = Template(filename=TWISTER_PATH + '/server/template/rep_error.htm')
return output.render(title=report, links=self.glob_links[usr], usr=usr,
msg='Error in query `{}`!<br><br><b>MySQL Error {}</b>: {}!'.format(query_compr, e.args[0], e.args[1]))
headers_tot = [desc[0] for desc in self.curs[usr].description]
# Update headers: must contain both headers.
descr = descr + ['vs.'] + headers_tot
# Write DEBUG
#DEBUG.write(report +' -> '+ user_choices +' -> '+ query_compr + '\n\n') ; DEBUG.flush()
output = Template(filename=TWISTER_PATH + '/server/template/rep_base.htm')
return output.render(usr=usr, title=report, links=self.glob_links[usr], ajax_link=ajax_link, user_choices=user_choices,
report=descr, chart=report_dict['type'])
# JSON link
@cherrypy.expose
def json(self, report, usr, **args):
if not usr:
output = {'aaData':[], 'error':'Error! This link should be accessed by passing a username, eg: /json/some_report/some_user'}
return json.dumps(output, indent=2)
if not os.path.isdir(userHome(usr) + '/twister/config'):
output = {'aaData':[], 'error':'Error! Username `{}` doesn\'t have a Twister config folder!'.format(usr)}
return json.dumps(output, indent=2)
self.load_config(usr) # Re-load all Database XML
if usr not in self.conn: self.connect_db(usr)
cherrypy.response.headers['Content-Type'] = 'application/json; charset=utf-8'
cherrypy.response.headers['Cache-Control'] = 'no-cache, no-store, must-revalidate'
cherrypy.response.headers['Pragma'] = 'no-cache'
cherrypy.response.headers['Expires'] = 0
if report not in self.glob_reports[usr]:
output = {'aaData':[], 'error':'Report `{0}` is not in the list of defined reports!'.format(report)}
return json.dumps(output, indent=2)
# All info about the report, from DB XML.
report_dict = self.glob_reports[usr][report]
query = report_dict['sqlquery']
# All variables that must be replaced in Query
vars_to_replace = re.findall('(@.+?@)', query)
for field in vars_to_replace:
# The value chosen by the user
u_select = cherrypy.request.params.get(field)
# Replace @variables@ with user chosen value
query = query.replace(field, str(u_select))
try:
self.curs[usr].execute(query)
except MySQLdb.Error, e:
try:
self.connect_db(usr)
except:
pass
output = {'aaData':[], 'error':'Error in query `{}`! MySQL Error {}: {}!'.format(query, e.args[0], e.args[1])}
return json.dumps(output, indent=2)
headers = [desc[0] for desc in self.curs[usr].description]
rows = self.curs[usr].fetchall()
del query
query_total = report_dict['sqltotal']
query_compr = report_dict['sqlcompr']
# ... Calculate SQL Query Total ...
if query_total:
# All variables that must be replaced in Query
vars_to_replace = re.findall('(@.+?@)', query_total)
for field in vars_to_replace:
# The value chosen by the user
u_select = cherrypy.request.params.get(field)
# Replace @variables@ with user chosen value
query_total = query_total.replace(field, str(u_select))
try:
self.curs[usr].execute(query_total)
except MySQLdb.Error, e:
try:
self.connect_db(usr)
except:
pass
output = {'aaData':[], 'error':'Error in query total `{}`! MySQL Error {}: {}!'.format(query_total, e.args[0], e.args[1])}
return json.dumps(output, indent=2)
headers_tot = [desc[0] for desc in self.curs[usr].description]
rows_tot = self.curs[usr].fetchall()
if len(headers) != len(headers_tot):
output = {'aaData':[], 'error':'The first query has {} columns and the second has {} columns!'.format(len(headers), len(headers_tot))}
return json.dumps(output, indent=2)
if len(rows) != len(rows_tot):
output = {'aaData':[], 'error':'The first query has {} rows and the second has {} rows!'.format(len(rows), len(rows_tot))}
return json.dumps(output, indent=2)
# Will calculate the new rows like this:
# The first column of the first query will not be changed
# The second row of the first query / the second row of the second query * 100
calc_rows = []
for i in range(len(rows)):
row = rows[i]
tot_row = list(rows_tot[i])
# Null and None values must be numbers
if not row[0]: row = (0.0, row[1])
if not row[1]: row = (row[0], 0.0)
if not tot_row[0]: tot_row[0] = 0.0
if not tot_row[1]: tot_row[1] = 0.1
# Calculate percent...
percent = '%.2f' % ( float(row[1]) / tot_row[1] * 100.0 )
# Using the header from Total, because it might be Null in the first query
calc_rows.append([tot_row[0], float(percent)])
# ... SQL Query Compare side by side ...
elif query_compr:
# All variables that must be replaced in Query
vars_to_replace = re.findall('(@.+?@)', query_compr)
for field in vars_to_replace:
# The value chosen by the user
u_select = cherrypy.request.params.get(field)
# Replace @variables@ with user chosen value
query_compr = query_compr.replace(field, str(u_select))
try:
self.curs[usr].execute(query_compr)
except MySQLdb.Error, e:
try:
self.connect_db(usr)
except:
pass
output = {'aaData':[], 'error':'Error in query compare `{}`! MySQL Error {}: {}!'.format(query_total, e.args[0], e.args[1])}
return json.dumps(output, indent=2)
headers_tot = [desc[0] for desc in self.curs[usr].description]
rows_tot = self.curs[usr].fetchall()
if len(headers) != len(headers_tot): # Must be the same number of columns
output = {'aaData':[], 'error':'The first query has {} columns and the second has {} columns!'.format(len(headers), len(headers_tot))}
return json.dumps(output, indent=2)
headers_len = len(headers)
rows_max_size = max(len(rows), len(rows_tot))
calc_rows = []
for i in range(rows_max_size):
r1 = rows[i:i+1]
r2 = rows_tot[i:i+1]
if not r1: r1 = [' ' for i in range(headers_len)]
else: r1 = r1[0]
if not r2: r2 = [' ' for i in range(headers_len)]
else: r2 = r2[0]
calc_rows.append( tuple(r1) +(' <---> ',)+ tuple(r2) )
# Update headers: must contain both headers.
headers = headers + ['vs.'] + headers_tot
# ... Normal Query ...
else:
calc_rows = rows
del rows
if (not calc_rows) or (not calc_rows[0:1]):
output = {'aaData':[], 'error':'The select is empty!'}
return json.dumps(output, indent=2)
if isinstance(calc_rows[0][0], datetime.datetime):
isDate = True
else:
isDate = False
dthandler = lambda obj: obj.strftime('%Y-%m-%d %H:%M:%S') if isinstance(obj, datetime.datetime) else None
return json.dumps({'headers':headers, 'type':report_dict['type'], 'isDate':isDate, 'aaData':calc_rows},
indent=2, default=dthandler)
# Error page
@cherrypy.expose
def error(self, **args):
output = Template(filename=TWISTER_PATH + '/server/template/rep_error.htm')
return output.render(title='Error 404', links=[], msg='Sorry, this page does not exist!')
# Error page
@cherrypy.expose
def default(self, **args):
output = Template(filename=TWISTER_PATH + '/server/template/rep_error.htm')
return output.render(title='Error 404', links=[], msg='Sorry, this page does not exist!')
# Eof()
|
twister/twister.github.io
|
server/ReportingServer.py
|
Python
|
apache-2.0
| 22,585
|
############################################################
## Balistic Motion Simulator, organic ##
## V0.5 ##
## function Cleaned ##
## Copyright 2015, Thomas Boudreaux ##
############################################################
#Libraries to Import
from __future__ import division
from visual import *
from visual.controls import *
import time
import sys
import threading
#Global Variables
impacted = False
ended = False
follow = False
endtime = True
paused = False
dt = .01
height = 0
pos = vector(0,0,0)
bpr = 0
gp = vector(0,0,0)
gz = vector(0,0,0)
vix = 0
viy = 0
g = 0
temp = 0
menreturn = 0
BoolReturn = 1
infopasser = False
pressure = 0
R = 287.058
Cd = 0
Aref = 0
mass = 0
#Error Codes
E1 = 'Error, Please enter either Yes or no, program is case sensitive'
E2 = 'Error, Please enter Yes, program is case sensitive'
#Menu Questions
deftemp = 'Would you like to use standard Earth temperature? [Yes/no]: '
enttemp = 'Please enter your desired temerature in the degrees celcius: '
defgrav = 'Would you like to use an Approximation of Earth Normal Gravitational Acceleration at Sea level? [Yes/no]: '
entgrav = 'Please Entered Desired Gravity (enter absolute value of acceletation): '
track = 'Would You like to track the projectile? [Yes/no]: '
defpresh = 'Would you like to use Pressure at Sea Level on Earth? [Yes/no]: '
entpresh = 'Please Enter Desired Pressure for Use in Drag Calculations: '
#Meta Functions
def YesnoMenu(value, question, nextquestion):
global E1, menreturn, infopasser
men = True
while men is True:
yesno = raw_input(question)
if yesno == "Yes":
menreturn = value
men = False
elif yesno == "no":
menreturn = input(nextquestion)
men = False
infopasser = True
else:
print E1
men = True
def BoolMenu(question):
global E1, BoolReturn
men = True
while men is True:
yesno = raw_input(question)
if yesno == "Yes":
BoolReturn = 1
men = False
elif yesno == "no":
BoolReturn = 0
men = False
else:
print E1
men = True
#Main Functions
def UserInput():
global follow, vix, viy, g, follow, height, deftemp, enttemp, temp, track, infopasser, pressure, Cd, mass
mass = input('Please Enter Mass of Projectile: ')
vix = input('Please Enter Initial X Velocity: ')
viy = input('Please Eneter Initial Y Velocity: ')
height = input('Please Enter Initial Height: ')
YesnoMenu(9.81, defgrav, entgrav)
g = menreturn
g = -g
YesnoMenu(16, deftemp, enttemp)
temp = menreturn
YesnoMenu(101.325, defpresh, entpresh)
pressure = menreturn
print pressure
Cd = input('Please enter Drag coefficient for object: ')
BoolMenu(track)
follow = bool(BoolReturn)
def PadPlacement():
global vix, viy, g
taa = viy/g
happ = ((vix**2)+(viy**2))/(2*abs(g))
haa = happ + height
tff = sqrt((2*haa)/abs(g))
xpp = vix*tff
base = box(pos=(xpp,-1,0), size=(100,2,100))
def ScreenParam():
global height, pos
scene.autoscale = 0
scene.range = (100,height + 25,100)
#scene.center = (pos.x,pos.y,pos.z)
def ObjectDraw():
global height, pos, bpr, gp, gz, Aref
ground = box(pos=(0,-1,0), size=(100,2,100))
DropStructure = box(pos=(0,(.5 * height),0), size=(5,height,5), color=color.green)
dsp = DropStructure.pos
dss = DropStructure.size
tr = 2
ballistic = sphere(pos=(dsp.x, dsp.y + (.5*dss.y) + (.5*tr),dsp.z), radius = tr, make_trail=True)
pos = ballistic.pos
bpr = ballistic.radius
gp = ground.pos
gz = ground.size
bt = ballistic.trail_object.color
scene.center = (pos.x,pos.y,pos.z) #This is hear for ease of variable use, it should really be in ScreenParam()
Aref = 4 * pi * (tr**2)
print Aref
def keyInput(evt):
global paused, follow, pos
x = pos.x
y = pos.y
z = pos.z
print('four')
s = evt.key
if (s=='k'):
print('Pausing Program')
paused = True
elif (s=='u'):
print('Unpausing Program')
paused = False
#Movment
elif (s=="right" and follow == False):
x = x + 1
scene.center = (x, y, z)
elif (s=="left" and follow == False):
x = x - 1
scene.center = (x - 1, y, z)
elif (s=="up" and follow == False):
y = y + 1
scene.center = (x, y + 1, z)
elif (s=="down" and follow == False):
y = y - 1
scene.center = (x, y - 1, z)
def PositionUpdate():
global impacted, follow, dt, g, pos, viy, vix, gz, gp, paused, R, pressure, temp, Cd, mass
while not impacted:
#Positional Update Per Loop Run
x = 0
rate(100)
D = pressure/(R*temp)
DragY = (1/2)*D*(viy**2)*Cd*Aref
Fnety = -(mass * g) + DragY
NetAy = Fnety / mass
print DragY
DragX = (1/2)*D*(vix**2)*Cd*Aref
Fnetx = DragX
NetAx = Fnetx / mass
print DragX
bp = pos
dx = vix * dt + ((1/2)*NetAx*(dt**2))
bp.x = bp.x + dx
vfy = viy + (NetAy * dt)
dy = ((viy * dt) + ((NetAy * (dt**2))))/2
bp.y = bp.y + dy
viy = vfy
if follow == True:
scene.center = (bp.x, bp.y, 0)
if paused:
x = x + 1
time.sleep(x)
#Impact Check per loop run
if bp.y - (.5 * bpr) <= gp.y + (.5 * gz.y):
impacted = True
break
def ProgramTermination():
global ended, endtime, E1
while not ended:
while endtime is True:
end = raw_input('Would you like to end the program? [Yes]: ')
if end == 'Yes':
sys.exit()
else:
print E2
endtime = True
def HoldingPattern():
if paused is True:
time.sleep(0.01)
def main():
global paused
UserInput()
ScreenParam()
ObjectDraw()
scene.bind('keydown', keyInput)
PadPlacement()
PositionUpdate()
ProgramTermination()
#General Math Section
#Function Calls
main()
|
tboudreaux/BallisticSim
|
MainBallistic.py
|
Python
|
gpl-2.0
| 6,306
|
# _ UnionGen.py ____________________________________________________________________________
# File generated automatically by ATOM3. Graph Grammar Rule
# ___________________________________________________________________________________________
from GGrule import *
from ASG_TypesMetaModel import *
from ModelType import *
from Operator import *
from TypeName import *
from LeafType import *
class UnionGen (GGrule):
def __init__(self, parent):
GGrule.__init__(self, 1)
self.TimeDelay = ATOM3Integer(2)
self.exactMatch = 1
self.LHS = ASG_TypesMetaModel(parent)
self.obj32=Operator(parent)
self.obj32.type.setValue( (['X', 'U', '->'], 1) )
self.obj32.type.config = 0
self.obj32.GGLabel.setValue(1)
self.obj32.graphClass_= graph_Operator
if parent.genGraphics:
from graph_Operator import *
new_obj = graph_Operator(215.0,300.0,self.obj32)
else: new_obj = None
self.obj32.graphObject_ = new_obj
self.LHS.addNode(self.obj32)
self.RHS = ASG_TypesMetaModel(parent)
self.obj34=Operator(parent)
self.obj34.type.setValue( (['X', 'U', '->'], 1) )
self.obj34.type.config = 0
self.obj34.GGLabel.setValue(1)
self.obj34.graphClass_= graph_Operator
if parent.genGraphics:
from graph_Operator import *
new_obj = graph_Operator(226.0,344.0,self.obj34)
else: new_obj = None
self.obj34.graphObject_ = new_obj
self.obj340= AttrCalc()
self.obj340.Copy=ATOM3Boolean()
self.obj340.Copy.setValue(('Copy from LHS', 1))
self.obj340.Copy.config = 0
self.obj340.Specify=ATOM3Constraint()
self.obj340.Specify.setValue(('AttrSpecify', (['Python', 'OCL'], 1), (['PREcondition', 'POSTcondition'], 1), (['EDIT', 'SAVE', 'CREATE', 'CONNECT', 'DELETE', 'DISCONNECT', 'TRANSFORM', 'SELECT', 'DRAG', 'DROP', 'MOVE'], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), '\n'))
self.obj34.GGset2Any['type']= self.obj340
self.RHS.addNode(self.obj34)
def condition(self, graphID, isograph, atom3i):
node = self.getMatched(graphID, self.LHS.nodeWithLabel(1))
if node.FILE_: # This node has been processed yet!
return 0
self.generatedChilds = []
for child in node.out_connections_: # All childs must have the FILE_ slot filled...
if child.FILE_: # if this is the case, add to list
self.generatedChilds.append(child)
else:
return 0 # otherwise, the condition does not hold...
return 1
def action(self, graphID, isograph, atom3i):
"action performed when the rule is applied "
def getElementFromName (list, name):
for element in list:
if element.getClass() == "LeafType":
if element.Type.getValue()[0] == name: return element
elif name == element.getClass() == "ModelType":
if element.MetaModelName.toString() == name: return element
return None
nnode = self.getMatched(graphID, self.LHS.nodeWithLabel(1))
AT3Types = ['Enum', 'String', 'Integer', 'Float', 'List', 'Attribute', 'Boolean', 'Connection', 'Port', 'Constraint', 'Appearance', 'BottomType', 'File', 'Link']
# compose the class name
className = ""
counter = 0
attributesInfo = []
for node in self.generatedChilds: # for each child that has been found...
if counter > 0: className = className+"U"
if node.getClass() == 'LeafType':
className = className + node.Type.getValue()[0] # append the name of the element
val = node.Type.getValue()
if val[1] in AT3Types: attributesInfo.append((val[0], 'ATOM3'+val[1], val[2], node))
else: attributesInfo.append((val[0], val[1], val[2], node))
elif node.getClass() == 'ModelType': # A "Model" node...
className += node.MetaModelName.toString() # add the MetaModel name to the class...
attributesInfo.append(( node.Name.toString(), "ASG_"+node.MetaModelName.toString(), None, node)) # add info. to the attributesInfo list
else:
className = className + node.FILE_
attributesInfo.append((node.FILE_, node.FILE_, None, node))
counter = counter + 1
print "performing action! className = ", className
ind = " "
nnode.FILE_=className
fileName = className+".py"
file = open(atom3i.codeGenDir+"/"+fileName, "w+t")
file.write("from Tkinter import *\n")
file.write("from ATOM3Type import *\n")
# generate imports...
imports = []
for element in attributesInfo: # for each element in attributesInfo...
name, type, valInitial, node = element # unpack element components
if not type in imports: imports.append(type)
if type == "ATOM3List": # should look for the initialValue
element = getElementFromName ( self.generatedChilds, name )
if element.getClass() == "LeafType" and element.Type.initialValue:
initialItems = element.Type.initialValue.getValue() # get a list of items...
for item in initialItems:
if not item.getTypeName() in imports: imports.append(item.getTypeName())
if element.Type.initialValue.itemType:
if not element.Type.initialValue.itemType.__name__ in imports: imports.append(element.Type.initialValue.itemType.__name__)
for type in imports:
file.write("from "+type+" import "+type+"\n")
file.write("class "+className+" (ATOM3Type):\n")
file.write(" def __init__(self):\n")
# .........................................................
# Generate the __init__ method
# .........................................................
file.write(ind+"ATOM3Type.__init__(self)\n")
file.write(ind+"self.optMenu = None\n")
file.write(ind+"self.selected= None\n")
file.write(ind+"self.lastSelected= None\n")
for element in attributesInfo: # for each element in attributesInfo...
name, type, valInitial, node = element # unpack element components
file.write(ind+"self."+name+"= None\n") # for the moment, initialize to None...
file.write("\n")
# .........................................................
# Generate the createComponents method
# .........................................................
file.write(" def createComponents(self):\n")
for element in attributesInfo: # for each element in attributesInfo...
name, type, valInitial, node = element # unpack element components
file.write(ind+"if not self."+name+":\n")
if node.getClass() == 'LeafType': # we are dealing with a simple type...
node.Type.initialValue.writeConstructor2File(file,ind+" ", "self."+name, 0, 1)
elif node.getClass() == 'ModelType': # a model
file.write(ind+" from "+type+" import "+type+"\n") # import the class...
file.write(ind+" self."+name+"="+type+"()\n")
else:
file.write(ind+" from "+name+" import *\n") # import the class...
file.write(ind+" self."+name+"="+type+"()\n")
file.write("\n")
# .........................................................
# Generate the show method
# .........................................................
file.write(" def show(self, parent, parentWindowInfo=None):\n")
file.write(ind+"ATOM3Type.show(self, parent, parentWindowInfo)\n")
file.write(ind+"self.createComponents()\n")
file.write(ind+"self.showParent = parent\n")
file.write(ind+"self.parentWindowInfo = parentWindowInfo\n")
file.write(ind+"self.selected = StringVar()\n")
file.write(ind+"if not self.lastSelected:\n")
# make 1st element selected...
name, type, valInitial, node = attributesInfo[0]
file.write(ind+" self.selected.set('"+name+"')\n")
file.write(ind+"else:\n")
file.write(ind+" self.selected.set(self.lastSelected)\n")
file.write(ind+"self.label = None\n")
file.write(ind+"self.widget = None\n")
file.write(ind+"self.containerFrame = Frame(parent)\n")
file.write(ind+"Label(self.containerFrame, text='Select Attribute').grid(row=0,column=0,sticky=W)\n")
file.write(ind+"self.optMenu = OptionMenu(self.containerFrame, self.selected")
for element in attributesInfo: # for each element in attributesInfo...
name, type, valInitial, node = element # unpack element components
file.write(",'"+name+"'")
file.write(")\n")
file.write(ind+"self.selected.trace_variable( 'w', self.valueChanged)\n")
file.write(ind+"self.optMenu.grid(row=0,column=1,sticky=W)\n")
counter = 0
for element in attributesInfo: # for each element in attributesInfo...
name, type, valInitial, node = element # unpack element components
if counter == 0:
file.write(ind+"if self.lastSelected == '"+name+"':\n")
else:
file.write(ind+"elif self.lastSelected == '"+name+"':\n")
file.write(ind+" self.destroyAllBut(self."+name+")\n")
file.write(ind+" self.show"+name+"(self.showParent, self.parentWindowInfo)\n")
file.write(ind+"return self.containerFrame\n\n")
# .........................................................
# Generate the valueChanged method
# .........................................................
file.write(" def valueChanged(self, param1, param2, param3):\n")
counter = 0
file.write(ind+"value = self.selected.get()\n")
for element in attributesInfo: # for each element in attributesInfo...
name, type, valInitial, node = element # unpack element components
if counter == 0:
file.write(ind+"if value == '"+name+"':\n")
else:
file.write(ind+"elif value == '"+name+"':\n")
file.write(ind+" if self.lastSelected != '"+name+"':\n")
file.write(ind+" self.destroyAllBut(self."+name+")\n")
file.write(ind+" self.show"+name+"(self.showParent, self.parentWindowInfo)\n")
# .........................................................
# Generate the destroyAllBut method
# .........................................................
file.write(" def destroyAllBut(self, survivor):\n")
for element in attributesInfo: # for each element in attributesInfo...
name, type, valInitial, node = element # unpack element components
file.write(ind+"if survivor != self."+name+":\n")
file.write(ind+" self."+name+".destroy()\n")
file.write("\n")
# .........................................................
# Generate the show<name> method
# .........................................................
for element in attributesInfo: # for each element in attributesInfo...
name, type, valInitial, node = element # unpack element components
file.write(" def show"+name+"(self, parent, parentWindow = None):\n")
file.write(ind+"if self.label and self.widget:\n")
file.write(ind+" self.label.grid_forget()\n")
file.write(ind+" self.widget.grid_forget()\n")
if node.getClass() == "LeafType": # Check if we should create an intermediate button or not
directEditing = node.Type.getValue()[4][1]
elif node.getClass() == "ModelType": # Always create an intermediat button te edit models
directEditing = 0
else: # direct editing for other composite types
directEditing = 1
file.write(ind+"self.label = Label(self.containerFrame, text='"+name+"')\n")
file.write(ind+"self.label.grid(row=1,column=0,sticky=W)\n")
if directEditing:
file.write(ind+"self.widget = self."+name+".show(self.containerFrame, self.parentWindowInfo)\n")
file.write(ind+"self.widget.grid(row=1,column=1,sticky=W)\n")
else:
file.write(ind+"Button( self.containerFrame, text = 'edit', ")
if node.getClass() == "ModelType":
file.write("command = lambda x=self : ATOM3TypeDialog(x.containerFrame, x."+name+", ATOM3TypeDialog.OPEN))")
else:
file.write("command = lambda x=self : ATOM3TypeDialog(x.containerFrame, x."+name+"))")
file.write(".grid(row=1,column=1,sticky=W)\n")
file.write(ind+"self.lastSelected = '"+name+"'\n")
file.write("\n")
# .........................................................
# Generate the toString method
# .........................................................
file.write(" def toString(self, fils = 25, cols = 5):\n")
file.write(ind+"self.createComponents()\n")
file.write(ind+"if self.selected:\n")
file.write(ind+" value = self.selected.get()\n")
file.write(ind+"elif self.lastSelected:\n")
file.write(ind+" value = self.lastSelected\n")
file.write(ind+"else:\n")
file.write(ind+" value = None\n")
counter = 0
for element in attributesInfo: # for each element in attributesInfo...
name, type, valInitial, node = element # unpack element components
if counter==0: file.write(ind+"if value == '"+name+"':\n")
else:file.write(ind+"elif value == '"+name+"':\n")
file.write(ind+" return self."+name+".toString(fils, cols)\n")
counter = counter + 1
file.write(ind+"return ''\n")
file.write("\n")
# .........................................................
# Generate the getValue method
# .........................................................
file.write(" def getValue(self):\n")
file.write(ind+"self.createComponents()\n")
file.write(ind+"return (")
for element in attributesInfo: # for each element in attributesInfo...
name, type, valInitial, node = element # unpack element components
file.write("self."+name+".getValue(),")
file.write(")\n\n")
# .........................................................
# Generate the setValue method
# .........................................................
file.write(" def setValue(self, value):\n")
file.write(ind+"self.createComponents()\n")
file.write(ind+"if value == None:\n")
for element in attributesInfo: # for each element in attributesInfo...
name, type, valInitial, node = element # unpack element components
file.write(ind+" self."+name+".setNone()\n")
file.write(ind+"else:\n")
counter = 0
for element in attributesInfo: # for each element in attributesInfo...
name, type, valInitial, node = element # unpack element components
file.write(ind+" self."+name+".setValue(value["+str(counter)+"])\n")
counter = counter + 1
file.write("\n")
# .........................................................
# Generate the writeConstructor2File method
# .........................................................
file.write(' def writeConstructor2File(self, file, indent, objName="at", depth = 0, generatingCode = 0):\n')
file.write(ind+"self.createComponents()\n")
file.write(ind+"file.write(indent+objName+'= "+className+"()\\n')\n")
file.write(ind+"if self.lastSelected:\n")
file.write(ind+" file.write(indent+objName+'.lastSelected= "+'"'+"'+self.lastSelected+'"+'"'+"\\n')\n")
for element in attributesInfo: # for each element in attributesInfo...
name, type, valInitial, node = element # unpack element components
file.write(ind+" if self.lastSelected == '"+name+"' :\n")
file.write(ind+" file.write(indent+'from "+type+" import "+type+"\\n')\n")
file.write(ind+" file.write(indent+objName+'."+name+" = "+type+"()\\n')\n")
for element in attributesInfo: # for each element in attributesInfo...
name, type, valInitial, node = element # unpack element components
file.write(ind+"self."+name+".writeConstructor2File(file, indent, objName+'."+name+"', depth, generatingCode)\n")
file.write("\n")
# .........................................................
# Generate the writeValue2File method
# .........................................................
file.write(' def writeValue2File(self, file, indent, objName="at", depth = 0, generatingCode = 0):\n')
file.write(ind+"self.createComponents()\n")
for element in attributesInfo: # for each element in attributesInfo...
name, type, valInitial, node = element # unpack element components
file.write(ind+"if self.lastSelected == '"+name+"' :\n")
file.write(ind+" file.write(indent+'from "+type+" import "+type+"\\n')\n")
file.write(ind+" file.write(indent+objName+'."+name+" = "+type+"()\\n')\n")
for element in attributesInfo: # for each element in attributesInfo...
name, type, valInitial, node = element # unpack element components
file.write(ind+"self."+name+".writeValue2File(file, indent, objName+'."+name+"', depth, generatingCode)\n")
file.write(ind+"if self.lastSelected:\n")
file.write(ind+" file.write(indent+objName+'.lastSelected= "+'"'+"'+self.lastSelected+'"+'"'+"\\n')\n")
file.write("\n")
# .........................................................
# Generate the clone method
# .........................................................
file.write(' def clone(self):\n')
file.write(ind+'"Makes an exact copy of itself"\n')
file.write(ind+'cloneObject = '+className+'()\n')
for element in attributesInfo: # for each element in attributesInfo...
name, type, valInitial, node = element # unpack element components
file.write(ind+'if self.'+name+': cloneObject.'+name+' = self.'+name+'.clone()\n')
file.write(ind+'cloneObject.lastSelected = self.lastSelected\n')
file.write(ind+'return cloneObject\n')
# .........................................................
# Generate the copy method
# ........................................................
file.write(' def copy(self, other):\n')
file.write(ind+'"Copies the content of other into itself"\n')
file.write(ind+'ATOM3Type.copy(self, other)\n')
for element in attributesInfo: # for each element in attributesInfo...
name, type, valInitial, node = element # unpack element components
file.write(ind+'self.'+name+' = other.'+name+'\n')
file.write(ind+'ASGNode.copy(self,other)\n')
# .........................................................
# Generate the destroy method
# .........................................................
file.write(' def destroy(self):\n')
file.write(ind+'"Destroys (i.e. updates) each field"\n')
for element in attributesInfo: # for each element in attributesInfo...
name, type, valInitial, node = element # unpack element components
file.write(ind+'if self.'+name+': self.'+name+'.destroy()\n')
file.write('\n\n')
# .........................................................
# Generate the invalid method
# .........................................................
file.write(' def invalid(self):\n')
file.write(' "checks whether the entity is valid or not"\n')
file.write(' inval = 0\n')
for element in attributesInfo: # for each element in attributesInfo...
name, type, valInitial, node = element # unpack element components
file.write(' if self.'+name+': inval = inval or self.'+name+'.invalid()\n')
file.write(' return inval\n\n')
file.close()
|
Balannen/LSMASOMM
|
atom3/Kernel/TypeModels/UnionGen.py
|
Python
|
gpl-3.0
| 20,885
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2006-2011 TUBITAK/UEKAE
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Please read the COPYING file.
#
import os
import pisi.api
import pisi.config
from buildfarm import cli, logger, utils
from buildfarm.releasecache import ReleaseCache
from buildfarm.config import configuration as conf
class PisiApi:
def __init__(self, stdout=None, stderr=None, output_dir = conf.workdir):
self.options = pisi.config.Options()
# Override these so that pisi searches for .pisi files in the right locations
self.options.output_dir = output_dir
self.options.yes_all = True
self.options.ignore_file_conflicts = True
self.options.ignore_package_conflicts = True
self.options.debug = True
self.options.verbose = True
self.options.ignore_check = conf.ignorecheck
self.options.ignore_sandbox = False
# Set API options
pisi.api.set_options(self.options)
# Set IO streams
pisi.api.set_io_streams(stdout=stdout, stderr=stderr)
pisi.api.set_userinterface(cli.CLI(stdout))
self.builder = None
def get_new_packages(self):
return self.builder.new_packages
def get_new_debug_packages(self):
return self.builder.new_debug_packages
def get_delta_package_map(self):
# Return type is a dictionary
return self.builder.delta_map
def close(self):
pisi.api.ctx.ui.flush_logs()
def build(self, pspec):
if not os.path.exists(pspec):
logger.error("'%s' does not exist." % pspec)
if conf.sandboxblacklist and \
utils.get_package_name_from_path(pspec) in conf.sandboxblacklist:
logger.info("Disabling sandbox for %s" % pspec)
pisi.api.ctx.set_option("ignore_sandbox", True)
logger.info("Building %s" % pspec)
self.builder = pisi.operations.build.Builder(pspec)
# This will only make builder search for old packages
# 2 packages for testing repository
self.builder.search_old_packages_for_delta(max_count=2,
search_paths=(utils.get_compiled_packages_directory(),))
if utils.get_stable_packages_directory():
# 3 packages for stable repository
self.builder.search_old_packages_for_delta(max_count=3,
search_paths=(utils.get_stable_packages_directory(),))
# and 1 for the previous distribution release (e.g. 2011.1)
package_name = utils.get_package_name_from_path(pspec)
last_distro_release = ReleaseCache().get_last_release(package_name)
if last_distro_release:
self.builder.search_old_packages_for_delta(release=last_distro_release,
search_paths=(utils.get_stable_packages_directory(),))
self.builder.build()
logger.info("Created package(s): %s" % self.builder.new_packages)
def install(self, pkgs):
pisi.api.install(pkgs, ignore_file_conflicts=self.options.ignore_file_conflicts,
ignore_package_conflicts=self.options.ignore_package_conflicts,
reinstall=True)
|
Pardus-Linux/buildfarm
|
buildfarm/pisiinterface.py
|
Python
|
gpl-2.0
| 3,559
|
import frappe
from frappe import _
change_icons_map = [
{
"module_name": "Patient",
"color": "#6BE273",
"icon": "fa fa-user",
"doctype": "Patient",
"type": "link",
"link": "List/Patient",
"label": _("Patient")
},
{
"module_name": "Patient Encounter",
"color": "#2ecc71",
"icon": "fa fa-stethoscope",
"doctype": "Patient Encounter",
"type": "link",
"link": "List/Patient Encounter",
"label": _("Patient Encounter"),
},
{
"module_name": "Healthcare Practitioner",
"color": "#2ecc71",
"icon": "fa fa-user-md",
"doctype": "Healthcare Practitioner",
"type": "link",
"link": "List/Healthcare Practitioner",
"label": _("Healthcare Practitioner")
},
{
"module_name": "Patient Appointment",
"color": "#934F92",
"icon": "fa fa-calendar-plus-o",
"doctype": "Patient Appointment",
"type": "link",
"link": "List/Patient Appointment",
"label": _("Patient Appointment")
},
{
"module_name": "Lab Test",
"color": "#7578f6",
"icon": "octicon octicon-beaker",
"doctype": "Lab Test",
"type": "link",
"link": "List/Lab Test",
"label": _("Lab Test")
}
]
def execute():
change_healthcare_desktop_icons()
def change_healthcare_desktop_icons():
doctypes = ["patient", "patient_encounter", "healthcare_practitioner",
"patient_appointment", "lab_test"]
for doctype in doctypes:
frappe.reload_doc("healthcare", "doctype", doctype)
for spec in change_icons_map:
frappe.db.sql("""
delete from `tabDesktop Icon`
where _doctype = '{0}'
""".format(spec['doctype']))
desktop_icon = frappe.new_doc("Desktop Icon")
desktop_icon.hidden = 1
desktop_icon.standard = 1
desktop_icon.icon = spec['icon']
desktop_icon.color = spec['color']
desktop_icon.module_name = spec['module_name']
desktop_icon.label = spec['label']
desktop_icon.app = "erpnext"
desktop_icon.type = spec['type']
desktop_icon._doctype = spec['doctype']
desktop_icon.link = spec['link']
desktop_icon.save(ignore_permissions=True)
frappe.db.sql("""
delete from `tabDesktop Icon`
where module_name = 'Healthcare' and type = 'module'
""")
desktop_icon = frappe.new_doc("Desktop Icon")
desktop_icon.hidden = 1
desktop_icon.standard = 1
desktop_icon.icon = "fa fa-heartbeat"
desktop_icon.color = "#FF888B"
desktop_icon.module_name = "Healthcare"
desktop_icon.label = _("Healthcare")
desktop_icon.app = "erpnext"
desktop_icon.type = 'module'
desktop_icon.save(ignore_permissions=True)
|
chdecultot/erpnext
|
erpnext/patches/v11_0/change_healthcare_desktop_icons.py
|
Python
|
gpl-3.0
| 2,450
|
from string import ascii_letters, digits
from django.shortcuts import get_object_or_404
from ngnotifier.settings import API_KEY
from random import choice
from _sha256 import sha224
from datetime import datetime
from uuid import uuid4
from django.core.validators import validate_email
from django.core.exceptions import ValidationError
from django.contrib.auth import authenticate
from django.core.exceptions import ObjectDoesNotExist
from django.template import RequestContext
from django.template.loader import render_to_string
from django.views.decorators.cache import never_cache
from django.http import HttpResponse, JsonResponse
from django.views.decorators.csrf import csrf_exempt
from django.views.decorators.http import require_http_methods
from ngnotifier.notifs import send_email
from ngnotifier import settings
from ngnotifier.utils import serializable_object, post_article
from push_notifications.models import GCMDevice, APNSDevice
from rest_framework.decorators import api_view
from ngnotifier.decorators import api_key_required, device_login_required
from ngnotifier.models import NGHost, NGGroup, NGNews, DeviceSession, User, Log
from ngnotifier.api_serializers import NGHostSerializer, NGGroupSerializer,\
NGNewsSerializer, NGNewsDetailSerializer, NGNewsSerializerWithNames,\
NGNewsSerializerWithNamesAndHost
from ngnotifier.views import JSONResponse
@never_cache
@csrf_exempt
@require_http_methods("GET")
def host_detail(request, host_url):
"""
Retrieve a NGHost.
"""
try:
host = NGHost.objects.get(host=host_url)
except NGHost.DoesNotExist:
return HttpResponse(status=400)
serializer = NGHostSerializer(host)
return JSONResponse(serializer.data)
@never_cache
@csrf_exempt
@require_http_methods("GET")
def host_list(request):
"""
Retrieve all NGHost.
"""
hosts = NGHost.objects.all()
serializer = NGHostSerializer(hosts, many=True)
return JSONResponse(serializer.data)
@never_cache
@csrf_exempt
@require_http_methods("GET")
def host_last(request, host):
"""
Retrieve all last NGNews.
"""
try:
host = NGHost.objects.get(host=host)
except NGHost.DoesNotExist:
return HttpResponse(status=400)
limit = request.GET.get('limit', '1000')
if limit == '' or not limit.isdigit():
return HttpResponse(status=400)
limit = int(limit)
if not limit > 0:
return HttpResponse(status=400)
groups = NGGroup.objects.filter(host=host)
n_list = NGNews.objects\
.filter(groups__in=groups)\
.order_by('-date')[:limit]
names = request.GET.get('names', 'false')
names = True if names == '' else (True if names == 'true' else False)
if names:
serializer = NGNewsSerializerWithNames(n_list, many=True)
else:
serializer = NGNewsSerializer(n_list, many=True)
return JSONResponse(serializer.data)
@never_cache
@csrf_exempt
@require_http_methods("GET")
def group_list(request, host):
"""
Retrieve all NGGroup.
"""
hosts = NGGroup.objects.filter(host__host=host)\
.order_by('name')
serializer = NGGroupSerializer(hosts, many=True)
return JSONResponse(serializer.data)
@never_cache
@csrf_exempt
@require_http_methods("GET")
def news_list(request, host, group):
"""
Retrieve all NGNews.
"""
try:
host = NGHost.objects.get(host=host)
except NGHost.DoesNotExist:
return HttpResponse(status=400)
start_date = request.GET.get('start_date', '')
if start_date != '':
try:
s_date = datetime.strptime(start_date, '%Y-%m-%dT%H:%M:%S%z')
s_date = s_date.replace(tzinfo=None)
except ValueError:
return HttpResponse(status=400)
else:
s_date = datetime.now()
limit = int(request.GET.get('limit', '1000'))
if limit < 1:
return HttpResponse(status=400)
try:
group = NGGroup.objects.get(host=host, name=group)
except NGGroup.DoesNotExist:
return HttpResponse(status=400)
n_list = NGNews.objects\
.filter(groups__in=[group], date__lt=s_date, father='')\
.order_by('-date')[:limit]
names = request.GET.get('names', 'false')
names = True if names == '' else (True if names == 'true' else False)
if names:
serializer = NGNewsSerializerWithNames(n_list, many=True)
else:
serializer = NGNewsSerializer(n_list, many=True)
return JSONResponse(serializer.data)
@never_cache
@csrf_exempt
@require_http_methods("GET")
def news_list_refresh(request, host, group):
"""
Retrieve all NGNews.
"""
try:
host = NGHost.objects.get(host=host)
except NGHost.DoesNotExist:
return HttpResponse(status=400)
end_date = request.GET.get('end_date', '')
if end_date != '':
try:
e_date = datetime.strptime(end_date, '%Y-%m-%dT%H:%M:%S%z')
e_date = e_date.replace(tzinfo=None)
except ValueError as e:
return HttpResponse(status=400)
else:
e_date = datetime(1970, 1, 1, 00, 00)
limit = request.GET.get('limit', '1000')
if limit == '' or not limit.isdigit():
return HttpResponse(status=400)
limit = int(limit)
if not limit > 0:
return HttpResponse(status=400)
try:
group = NGGroup.objects.get(host=host, name=group)
except NGGroup.DoesNotExist:
return HttpResponse(status=400)
n_list = NGNews.objects\
.filter(groups__in=[group], date__gt=e_date, father='')\
.order_by('-date')[:limit]
names = request.GET.get('names', 'false')
names = True if names == '' else (True if names == 'true' else False)
if names:
serializer = NGNewsSerializerWithNames(n_list, many=True)
else:
serializer = NGNewsSerializer(n_list, many=True)
return JSONResponse(serializer.data)
@never_cache
@csrf_exempt
@api_view(['GET'])
def news_detail(request, news_id):
"""
Retrieve a news details with all answers
"""
news = None
while True:
try:
news = NGNews.objects.get(id=news_id)
except NGGroup.DoesNotExist:
return HttpResponse(status=400)
if news.father == '':
break
else:
try:
father = NGNews.objects.get(message_id=news.father)
news_id = father.id
except:
return HttpResponse(status=500)
serializer = NGNewsDetailSerializer(news)
return JSONResponse(serializer.to_representation(news))
@never_cache
@csrf_exempt
@api_view(['GET'])
def search(request, host=None, group=None):
term = request.GET.get('term', '')
if len(term) < 2:
return HttpResponse(status=400)
limit = request.GET.get('limit', '1000')
if limit == '' or not limit.isdigit():
return HttpResponse(status=400)
limit = int(limit)
if not limit > 0:
return HttpResponse(status=400)
start_date = request.GET.get('start_date', '')
if start_date != '':
try:
s_date = datetime.strptime(start_date, '%Y-%m-%dT%H:%M:%S%z')
s_date = s_date.replace(tzinfo=None)
except ValueError:
return HttpResponse(status=400)
else:
s_date = datetime(2099, 1, 1, 00, 00)
author = request.GET.get('author', 'false')
author = True if author == '' else (True if author == 'true' else False)
title = request.GET.get('title', 'false')
title = True if title == '' else (True if title == 'true' else False)
message = request.GET.get('message', 'false')
message = True if message == '' else (True if message == 'true' else False)
if not author and not title and not message:
author = True
title = True
message = True
if host and group:
try:
host = NGHost.objects.get(host=host)
groups = [NGGroup.objects.get(host=host, name=group)]
except:
return HttpResponse(status=404)
elif host:
try:
host = NGHost.objects.get(host=host)
groups = NGGroup.objects.filter(host=host)
except:
return HttpResponse(status=404)
else:
groups = NGGroup.objects.all()
case = request.GET.get('case', 'false')
case = True if (case == '') else (True if case == 'true' else False)
n_list = []
if author:
if case:
a_list = NGNews.objects.filter(groups__in=groups, date__lt=s_date,
email_from__regex=r'^.*' + term + '.*$')
else:
a_list = NGNews.objects.filter(groups__in=groups, date__lt=s_date,
email_from__iregex=r'^.*' + term + '.*$')
n_list = list(set(n_list) | set(a_list))
if title:
if case:
a_list = NGNews.objects.filter(groups__in=groups, date__lt=s_date,
subject__regex=r'^.*' + term + '.*$')
else:
a_list = NGNews.objects.filter(groups__in=groups, date__lt=s_date,
subject__iregex=r'^.*' + term + '.*$')
n_list = list(set(n_list) | set(a_list))
if message:
if case:
a_list = NGNews.objects.filter(groups__in=groups, date__lt=s_date,
contents__regex=r'^.*' + term + '.*$')
else:
a_list = NGNews.objects.filter(groups__in=groups, date__lt=s_date,
contents__iregex=r'^.*' + term + '.*$')
n_list = list(set(n_list) | set(a_list))
n_list.sort(key=lambda x: x.date, reverse=True)
n_list = n_list[:limit]
names = request.GET.get('names', 'false')
names = True if names == '' else (True if names == 'true' else False)
if names:
if host:
serializer = NGNewsSerializerWithNames(n_list, many=True)
else:
serializer = NGNewsSerializerWithNamesAndHost(n_list, many=True)
else:
serializer = NGNewsSerializer(n_list, many=True)
web_render = request.GET.get('web_render', False) != False
if web_render:
data = [serializable_object(n, light=True) for n in n_list]
return JsonResponse(data, safe=False)
else:
return JSONResponse(serializer.data)
@csrf_exempt
@api_view(['POST'])
@api_key_required
def login_phone(request):
username = request.POST.get('username', '')
password = request.POST.get('password', '')
service = request.POST.get('service', '')
registration_id = request.POST.get('registration_id', '')
device_name = request.POST.get('device_name', '')
if username == '' or password == '' or \
service == '' or registration_id == '' or device_name == '':
return HttpResponse(status=404)
if not (service == 'android' or service == 'ios'):
return JsonResponse({'error': 3, 'message': "The service should be either 'android' or 'ios'"}, safe=False, status=403)
user = authenticate(username=username, password=password)
if user is not None:
if user.is_active:
session = user.create_session(service, registration_id, device_name)
data = {
'error': 0,
'notifs': {
'email': user.send_emails,
'pushbullet': user.send_pushbullets,
'pushbullet_api_key': user.pushbullet_api_key,
'devices': [
{
'id': d.id,
'name': d.get_name(),
'active': d.is_active(),
'type': 'android' if d.service == 'AN' else 'ios'
} for d in user.get_devices()
] if user.get_devices() else []
},
'session_key': session.session_key
}
return JsonResponse(data, safe=False)
else:
return JsonResponse({'error': 2, 'message': "You must first confirm your account"}, safe=False, status=403)
return JsonResponse({'error': 1, 'message': "This account does not exist / the specified password is incorrect"}, safe=False, status=403)
@csrf_exempt
@api_view(['GET'])
@api_key_required
@device_login_required
def logout_phone(request, device_session):
device_session.delete()
return HttpResponse(status=200)
@csrf_exempt
@api_view(['GET'])
@api_key_required
@device_login_required
def group_subscriptions(request, device_session, host):
user = device_session.get_user()
try:
host = NGHost.objects.get(host=host)
groups = NGGroup.objects.filter(host=host)
except ObjectDoesNotExist:
return HttpResponse(status=404)
data = [{'name': g.name, 'subscribed': (user in g.followers.all())} for g in groups.all()]
return JsonResponse(data, safe=False, status=200)
@csrf_exempt
@api_view(['POST'])
@api_key_required
def register_phone(request):
username = request.POST.get('username', '')
password = request.POST.get('password', '')
if username == '' or password == '':
return HttpResponse(status=404)
try:
validate_email(username)
except ValidationError:
data = {
"error" : 2,
"message" : "Invalid username format (must be a valid email address)"
}
return JsonResponse(data, safe=False, status=400)
try:
User.objects.get(email=username)
data = {
"error" : 1,
"message" : "This mail is already used in an account"
}
return JsonResponse(data, safe=False, status=400)
except ObjectDoesNotExist:
token = sha224(uuid4().hex.encode('utf-8')).hexdigest()
new_user = User()
new_user.token = token
new_user.email = username
new_user.set_password(password)
new_user.save()
context = {
'user': new_user,
'token': token,
'site_url': settings.SITE_URL
}
html_content = render_to_string(
'email/token.html',
context,
context_instance=RequestContext(request)
)
if send_email(new_user.email, 'NG Notifier', html_content, 'html'):
return HttpResponse(status=200)
return HttpResponse(status=500)
@csrf_exempt
@api_view(['POST'])
@api_key_required
def forgot_password_phone(request):
username = request.POST.get('username', '')
if username == '':
return HttpResponse(status=200)
try:
token = sha224(uuid4().hex.encode('utf-8')).hexdigest()
user = User.objects.get(email=username)
user.token = token
user.save()
context = {
'user': user,
'token': token,
'site_url': settings.SITE_URL
}
html_content = render_to_string(
'email/forgot_password.html',
context,
context_instance=RequestContext(request)
)
if send_email([user.email], 'NG Notifier', html_content, 'html'):
return HttpResponse(status=200)
except ObjectDoesNotExist:
return HttpResponse(status=200)
@csrf_exempt
@api_view(['POST'])
@api_key_required
def subscribe_notifications(request):
service = request.POST.get('service', '')
registration_id = request.POST.get('registration_id', '')
host = request.POST.get('host', '')
newsgroup = request.POST.get('newsgroup', '')
newsgroup_list = newsgroup.split(sep=',')
if service == '' or registration_id == '' or host == '' or newsgroup == '':
return HttpResponse(status=404)
if not (service == 'android' or service == 'ios'):
return JsonResponse({'error': 0, 'message': "The service should be either 'android' or 'ios'"}, safe=False, status=403)
host_obj = get_object_or_404(NGHost, host=host)
# Optional login
session_key = request.META.get('HTTP_SESSION', '')
try:
session = DeviceSession.objects.get(session_key=session_key)
except ObjectDoesNotExist:
# Try to find if an anonymous user already exists for this device
try:
if service == 'android':
device = GCMDevice.objects.get(registration_id=registration_id)
session = DeviceSession.objects.get(gcm_device=device)
else:
device = APNSDevice.objects.get(registration_id=registration_id)
session = DeviceSession.objects.get(apns_device=device)
except ObjectDoesNotExist:
# Else create a temporary anonymous user
random = ''.join([choice(ascii_letters + digits) for n in range(32)])
anonymous = User()
anonymous.email = random + '@anonymo.us'
anonymous.is_active = True
anonymous.anonymous = True
anonymous.send_emails = False
anonymous.save()
session = anonymous.create_session(service, registration_id, "anonymous")
user = session.get_user()
added_list = []
for newsgroup in newsgroup_list:
if newsgroup == '':
continue
group_obj = get_object_or_404(NGGroup, name=newsgroup, host=host_obj)
if not user in group_obj.followers.all():
user.add_ng_group(group_obj)
added_list.append(newsgroup)
return JsonResponse({'added': added_list}, safe=False, status=200)
@csrf_exempt
@api_view(['POST'])
@api_key_required
def unsubscribe_notifications(request):
service = request.POST.get('service', '')
registration_id = request.POST.get('registration_id', '')
host = request.POST.get('host', '')
newsgroup = request.POST.get('newsgroup', '')
newsgroup_list = newsgroup.split(sep=',')
if service == '' or registration_id == '' or host == '' or newsgroup == '':
return HttpResponse(status=404)
if not (service == 'android' or service == 'ios'):
return JsonResponse({'error': 0, 'message': "The service should be either 'android' or 'ios'"}, safe=False, status=403)
host_obj = get_object_or_404(NGHost, host=host)
# Optional login
session_key = request.META.get('HTTP_SESSION', '')
try:
session = DeviceSession.objects.get(session_key=session_key)
except ObjectDoesNotExist:
# Try to find if an anonymous user already exists for this device
try:
if service == 'android':
device = GCMDevice.objects.get(registration_id=registration_id)
session = DeviceSession.objects.get(gcm_device=device)
else:
device = APNSDevice.objects.get(registration_id=registration_id)
session = DeviceSession.objects.get(apns_device=device)
except ObjectDoesNotExist:
# Else create a temporary anonymous user
random = ''.join([choice(ascii_letters + digits) for n in range(32)])
anonymous = User()
anonymous.email = random + '@anonymo.us'
anonymous.is_active = True
anonymous.anonymous = True
anonymous.send_emails = False
anonymous.save()
session = anonymous.create_session(service, registration_id, "anonymous")
user = session.get_user()
remove_list = []
for newsgroup in newsgroup_list:
if newsgroup == '':
continue
group_obj = get_object_or_404(NGGroup, name=newsgroup, host=host_obj)
if user in group_obj.followers.all():
user.del_ng_group(group_obj)
remove_list.append(newsgroup)
return JsonResponse({'removed': remove_list}, safe=False, status=200)
@csrf_exempt
@api_view(['POST'])
@api_key_required
@device_login_required
def post_phone(request, device_session):
host = request.POST.get('host', '')
groups = request.POST.get('groups', '')
subject = request.POST.get('subject', '')
name = request.POST.get('name', '')
email = request.POST.get('email', '')
contents = request.POST.get('contents', '')
father_uid = request.POST.get('father_uid', '')
if host == '' or groups == '' or subject == '' or name == '' or email == ''\
or contents == '':
return HttpResponse(status=404)
try:
groups = [NGGroup.objects.get(name=group, host__host=host) for group in
groups.split(sep=',')]
except ObjectDoesNotExist:
return HttpResponse(status=400)
if father_uid != '':
father_news = get_object_or_404(NGNews, message_id=father_uid)
else:
father_news = None
if post_article(name, email, groups, subject, contents, father_news):
for group in groups:
post_log = Log()
post_log.type = 'P'
post_log.user = device_session.get_user()
post_log.description = subject + ' ' + name
post_log.group = group
post_log.save()
return HttpResponse(status=200)
return HttpResponse(status=500)
@csrf_exempt
@api_view(['POST'])
@api_key_required
def update_regid(request):
service = request.POST.get('service', '')
previous = request.POST.get('previous' '')
new = request.POST.get('new' '')
if service == '' or previous == '' or new == '':
return HttpResponse(status=404)
if previous == new:
return HttpResponse(status=400)
try:
if service == 'android':
previous_device = GCMDevice.objects.get(registration_id=previous)
device_name = previous_device.name
session = DeviceSession.objects.get(gcm_device=previous_device)
session.gcm_device.delete()
gcm_device = GCMDevice(registration_id=new, user=session.get_user(), name=device_name)
gcm_device.save()
session.gcm_device = gcm_device
else:
previous_device = APNSDevice.objects.get(registration_id=previous)
device_name = previous_device.name
session = DeviceSession.objects.get(apns_device=previous_device)
session.apns_device.delete()
apns_device = APNSDevice(registration_id=new, user=session.get_user(), name=device_name)
apns_device.save()
session.apns_device = apns_device
session.save()
except ObjectDoesNotExist:
pass
return HttpResponse(status=200)
@csrf_exempt
@api_view(['POST'])
@api_key_required
def get_subscribed_groups(request):
service = request.POST.get('service', '')
registration_id = request.POST.get('registration_id', '')
host = request.POST.get('host', '')
if service == '' or registration_id == '' or host == '':
return HttpResponse(status=404)
if not (service == 'android' or service == 'ios'):
return JsonResponse({'error': 0, 'message': "The service should be either 'android' or 'ios'"}, safe=False, status=403)
host_obj = get_object_or_404(NGHost, host=host)
# Try to find if an anonymous user already exists for this device
try:
if service == 'android':
device = GCMDevice.objects.get(registration_id=registration_id)
session = DeviceSession.objects.get(gcm_device=device)
else:
device = APNSDevice.objects.get(registration_id=registration_id)
session = DeviceSession.objects.get(apns_device=device)
except ObjectDoesNotExist:
return HttpResponse(status=404)
user = session.get_user()
data = {'subscribed_groups': [g.name for g in NGGroup.objects.filter(followers__in=[user])]}
return JSONResponse(data)
|
Dubrzr/NG-Notifier
|
ngnotifier/api_views.py
|
Python
|
mit
| 23,722
|
#!/usr/bin/env python
# #
# Copyright 2009-2016 Ghent University
#
# This file is part of hanythingondemand
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://vscentrum.be/nl/en),
# the Hercules foundation (http://www.herculesstichting.be/in_English)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/hanythingondemand
#
# hanythingondemand is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# hanythingondemand is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with hanythingondemand. If not, see <http://www.gnu.org/licenses/>.
# #
"""
Destroy an HOD cluster.
@author: Kenneth Hoste (Ghent University)
"""
import os
import sys
from vsc.utils.generaloption import GeneralOption
import hod
import hod.rmscheduler.rm_pbs as rm_pbs
from hod.cluster import cluster_info_exists, cluster_jobid, rm_cluster_info, rm_cluster_localworkdir
from hod.subcommands.subcommand import SubCommand
class DestroyOptions(GeneralOption):
"""Option parser for 'destroy' subcommand."""
VERSION = hod.VERSION
ALLOPTSMANDATORY = False # let us use optionless arguments.
class DestroySubCommand(SubCommand):
"""
Implementation of HOD 'destroy' subcommand;
destroys HOD cluster with specified label, regardless of cluster state, i.e.:
* delete job (if it is still present)
* remove hod.d directory corresponding to this cluster
* remove local work directory used by this cluster
"""
CMD = 'destroy'
HELP = "Destroy an HOD cluster."
EXAMPLE = "hod destroy <label>"
def run(self, args):
"""Run 'destroy' subcommand."""
optparser = DestroyOptions(go_args=args, envvar_prefix=self.envvar_prefix, usage=self.usage_txt)
try:
label, jobid = None, None
if len(optparser.args) > 1:
label = optparser.args[1]
print "Destroying HOD cluster with label '%s'..." % label
else:
self.report_error("No label provided.")
try:
jobid = cluster_jobid(label)
print "Job ID: %s" % jobid
except ValueError as err:
self.report_error(err)
# try to figure out job state
job_state = None
pbs = rm_pbs.Pbs(optparser)
jobs = pbs.state()
pbsjobs = [job for job in jobs if job.jobid == jobid]
self.log.debug("Matching jobs for job ID '%s': %s", jobid, pbsjobs)
if len(pbsjobs) == 1:
job_state = pbsjobs[0].state
print "Job status: %s" % job_state
elif len(pbsjobs) == 0:
print "(job no longer found)"
else:
self.report_error("Multiple jobs found with job ID '%s': %s", jobid, pbsjobs)
# request confirmation is case the job is currently running
if job_state == 'R':
resp = raw_input("Confirm destroying the *running* HOD cluster with label '%s'? [y/n]: " % label)
if resp != 'y':
print "(destruction aborted)"
return
elif job_state in ['C', 'E']:
print "(job has already ended/completed)"
job_state = None
print "\nStarting actual destruction of HOD cluster with label '%s'...\n" % label
# actually destroy HOD cluster by deleting job and removing cluster info dir and local work dir
if job_state is not None:
# if job was not successfully deleted, pbs.remove will print an error message
if pbs.remove(jobid):
print "Job with ID %s deleted." % jobid
rm_cluster_localworkdir(label)
if cluster_info_exists(label):
rm_cluster_info(label)
print "\nHOD cluster with label '%s' (job ID: %s) destroyed." % (label, jobid)
except StandardError as err:
self._log_and_raise(err)
return 0
|
boegel/hanythingondemand
|
hod/subcommands/destroy.py
|
Python
|
gpl-2.0
| 4,546
|
# EFILTER Forensic Query Language
#
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
EFILTER individual object filter and matcher.
"""
__author__ = "Adam Sindelar <adamsh@google.com>"
# pylint: disable=function-redefined
import collections
import re
import six
from efilter import ast
from efilter import dispatch
from efilter import errors
from efilter import protocol
from efilter import query as q
from efilter import scope
from efilter.ext import row_tuple
from efilter.protocols import applicative
from efilter.protocols import associative
from efilter.protocols import boolean
from efilter.protocols import counted
from efilter.protocols import number
from efilter.protocols import ordered
from efilter.protocols import reducer
from efilter.protocols import repeated
from efilter.protocols import structured
from efilter.stdlib import core as std_core
Result = collections.namedtuple("Result", ["value", "branch"])
@dispatch.multimethod
def solve(query, vars):
"""Evaluate the 'query' using variables in 'vars'.
Canonical implementation of the EFILTER AST's actual behavior. This may
not be the most optimal way of executing the query, but it is guaranteed
to have full coverage without falling through to some other implementation.
Arguments:
query: The instance of Query to evaluate against data in vars.
vars: An object implementing IStructured (like a dict) containing
pairs of variable -> value. Best thing to pass is an instance of
efilter.scope.ScopeStack, which is what the solver will convert
'vars' to anyway, eventually.
Returns:
Instance of Result, with members set as follows:
value: The result of evaluation. The type of the result can be
determined by calling infer_type on 'query'.
branch: An instance of Expression, representing a subtree of 'query'
that was that last branch evaluated before a match was produced.
This only applies to simple queries using AND/OR and NOT
operators, which evaluate to booleans and can terminate early.
For other queries this will be set to None.
"""
_ = query, vars
raise NotImplementedError()
def __solve_for_repeated(expr, vars):
"""Helper: solve 'expr' always returning an IRepeated.
If the result of solving 'expr' is a list or a tuple of IStructured objects
then treat is as a repeated value of IStructured objects because that's
what the called meant to do. This is a convenience helper so users of the
API don't have to create IRepeated objects.
If the result of solving 'expr' is a scalar then return it as a repeated
value of one element.
Arguments:
expr: Expression to solve.
vars: The scope.
Returns:
IRepeated result of solving 'expr'.
A booelan to indicate whether the original was repeating.
"""
var = solve(expr, vars).value
if (var and isinstance(var, (tuple, list))
and protocol.implements(var[0], structured.IStructured)):
return repeated.meld(*var), False
return var, repeated.isrepeating(var)
def __solve_for_scalar(expr, vars):
"""Helper: solve 'expr' always returning a scalar (not IRepeated).
If the output of 'expr' is a single value or a single RowTuple with a single
column then return the value in that column. Otherwise raise.
Arguments:
expr: Expression to solve.
vars: The scope.
Returns:
A scalar value (not an IRepeated).
Raises:
EfilterTypeError if it cannot get a scalar.
"""
var = solve(expr, vars).value
try:
scalar = repeated.getvalue(var)
except TypeError:
raise errors.EfilterTypeError(
root=expr, query=expr.source,
message="Wasn't expecting more than one value here. Got %r."
% (var,))
if isinstance(scalar, row_tuple.RowTuple):
try:
return scalar.get_singleton()
except ValueError:
raise errors.EfilterTypeError(
root=expr, query=expr.source,
message="Was expecting a scalar value here. Got %r."
% (scalar,))
else:
return scalar
def __solve_and_destructure_repeated(expr, vars):
"""Helper: solve 'expr' always returning a list of scalars.
If the output of 'expr' is one or more row tuples with only a single column
then return a repeated value of values in that column. If there are more
than one column per row then raise.
This returns a list because there's no point in wrapping the scalars in
a repeated value for use internal to the implementing solver.
Returns:
Two values:
- An iterator (not an IRepeated!) of scalars.
- A boolean to indicate whether the original value was repeating.
Raises:
EfilterTypeError if the values don't conform.
"""
iterable, isrepeating = __solve_for_repeated(expr, vars)
if iterable is None:
return (), isrepeating
if not isrepeating:
return [iterable], False
values = iter(iterable)
try:
value = next(values)
except StopIteration:
return (), True
if not isinstance(value, row_tuple.RowTuple):
result = [value]
# We skip type checking the remaining values because it'd be slow.
result.extend(values)
return result, True
try:
result = [value.get_singleton()]
for value in values:
result.append(value.get_singleton())
return result, True
except ValueError:
raise errors.EfilterTypeError(
root=expr, query=expr.source,
message="Was expecting exactly one column in %r." % (value,))
def __nest_scope(expr, outer, inner):
try:
return scope.ScopeStack(outer, inner)
except TypeError:
if protocol.implements(inner, applicative.IApplicative):
raise errors.EfilterTypeError(
root=expr, query=expr.source,
message="Attempting to use a function %r as an object." % inner)
raise errors.EfilterTypeError(
root=expr, query=expr.source,
message="Attempting to use %r as an object (IStructured)." % inner)
@solve.implementation(for_type=q.Query)
def solve_query(query, vars):
# Standard library must always be included. Others are optional, and the
# caller can add them to vars using ScopeStack.
vars = scope.ScopeStack(std_core.MODULE, vars)
try:
return solve(query.root, vars)
except errors.EfilterError as error:
if not error.query:
error.query = query.source
raise
@solve.implementation(for_type=ast.Literal)
def solve_literal(expr, vars):
"""Returns just the value of literal."""
_ = vars
return Result(expr.value, ())
@solve.implementation(for_type=ast.Var)
def solve_var(expr, vars):
"""Returns the value of the var named in the expression."""
try:
return Result(structured.resolve(vars, expr.value), ())
except (KeyError, AttributeError) as e:
# Raise a better exception for accessing a non-existent member.
raise errors.EfilterKeyError(root=expr, key=expr.value, message=e,
query=expr.source)
except (TypeError, ValueError) as e:
# Raise a better exception for what is probably a null pointer error.
if vars.locals is None:
raise errors.EfilterNoneError(
root=expr, query=expr.source,
message="Trying to access member %r of a null." % expr.value)
else:
raise errors.EfilterTypeError(
root=expr, query=expr.source,
message="%r (vars: %r)" % (e, vars))
except NotImplementedError as e:
raise errors.EfilterError(
root=expr, query=expr.source,
message="Trying to access member %r of an instance of %r." %
(expr.value, type(vars)))
@solve.implementation(for_type=ast.Select)
def solve_select(expr, vars):
"""Use IAssociative.select to get key (rhs) from the data (lhs).
This operation supports both scalars and repeated values on the LHS -
selecting from a repeated value implies a map-like operation and returns a
new repeated value.
"""
data, _ = __solve_for_repeated(expr.lhs, vars)
key = solve(expr.rhs, vars).value
try:
results = [associative.select(d, key) for d in repeated.getvalues(data)]
except (KeyError, AttributeError):
# Raise a better exception for accessing a non-existent key.
raise errors.EfilterKeyError(root=expr, key=key, query=expr.source)
except (TypeError, ValueError):
# Raise a better exception for what is probably a null pointer error.
if vars.locals is None:
raise errors.EfilterNoneError(
root=expr, query=expr.source,
message="Cannot select key %r from a null." % key)
else:
raise
except NotImplementedError:
raise errors.EfilterError(
root=expr, query=expr.source,
message="Cannot select keys from a non-associative value.")
return Result(repeated.meld(*results), ())
@solve.implementation(for_type=ast.Resolve)
def solve_resolve(expr, vars):
"""Use IStructured.resolve to get member (rhs) from the object (lhs).
This operation supports both scalars and repeated values on the LHS -
resolving from a repeated value implies a map-like operation and returns a
new repeated values.
"""
objs, _ = __solve_for_repeated(expr.lhs, vars)
member = solve(expr.rhs, vars).value
try:
results = [structured.resolve(o, member)
for o in repeated.getvalues(objs)]
except (KeyError, AttributeError):
# Raise a better exception for the non-existent member.
raise errors.EfilterKeyError(root=expr.rhs, key=member,
query=expr.source)
except (TypeError, ValueError):
# Is this a null object error?
if vars.locals is None:
raise errors.EfilterNoneError(
root=expr, query=expr.source,
message="Cannot resolve member %r from a null." % member)
else:
raise
except NotImplementedError:
raise errors.EfilterError(
root=expr, query=expr.source,
message="Cannot resolve members from a non-structured value.")
return Result(repeated.meld(*results), ())
@solve.implementation(for_type=ast.Apply)
def solve_apply(expr, vars):
"""Returns the result of applying function (lhs) to its arguments (rest).
We use IApplicative to apply the function, because that gives the host
application an opportunity to compare the function being called against
a whitelist. EFILTER will never directly call a function that wasn't
provided through a protocol implementation.
"""
func = __solve_for_scalar(expr.func, vars)
args = []
kwargs = {}
for arg in expr.args:
if isinstance(arg, ast.Pair):
if not isinstance(arg.lhs, ast.Var):
raise errors.EfilterError(
root=arg.lhs,
message="Invalid argument name.")
kwargs[arg.key.value] = solve(arg.value, vars).value
else:
args.append(solve(arg, vars).value)
result = applicative.apply(func, args, kwargs)
return Result(result, ())
@solve.implementation(for_type=ast.Bind)
def solve_bind(expr, vars):
"""Build a RowTuple from key/value pairs under the bind.
The Bind subtree is arranged as follows:
Bind
| First KV Pair
| | First Key Expression
| | First Value Expression
| Second KV Pair
| | Second Key Expression
| | Second Value Expression
Etc...
As we evaluate the subtree, each subsequent KV pair is evaluated with
the all previous bingings already in scope. For example:
bind(x: 5, y: x + 5) # Will bind y = 10 because x is already available.
"""
value_expressions = []
keys = []
for pair in expr.children:
keys.append(solve(pair.key, vars).value)
value_expressions.append(pair.value)
result = row_tuple.RowTuple(ordered_columns=keys)
intermediate_scope = scope.ScopeStack(vars, result)
for idx, value_expression in enumerate(value_expressions):
value = solve(value_expression, intermediate_scope).value
# Update the intermediate bindings so as to make earlier bindings
# already available to the next child-expression.
result[keys[idx]] = value
return Result(result, ())
@solve.implementation(for_type=ast.Repeat)
def solve_repeat(expr, vars):
"""Build a repeated value from subexpressions."""
try:
result = repeated.meld(*[solve(x, vars).value for x in expr.children])
return Result(result, ())
except TypeError:
raise errors.EfilterTypeError(
root=expr, query=expr.source,
message="All values in a repeated value must be of the same type.")
@solve.implementation(for_type=ast.Tuple)
def solve_tuple(expr, vars):
"""Build a tuple from subexpressions."""
result = tuple(solve(x, vars).value for x in expr.children)
return Result(result, ())
@solve.implementation(for_type=ast.IfElse)
def solve_ifelse(expr, vars):
"""Evaluate conditions and return the one that matches."""
for condition, result in expr.conditions():
if boolean.asbool(solve(condition, vars).value):
return solve(result, vars)
return solve(expr.default(), vars)
@solve.implementation(for_type=ast.Map)
def solve_map(expr, vars):
"""Solves the map-form, by recursively calling its RHS with new vars.
let-forms are binary expressions. The LHS should evaluate to an IAssociative
that can be used as new vars with which to solve a new query, of which
the RHS is the root. In most cases, the LHS will be a Var (var).
Typically, map-forms result from the dotty "dot" (.) operator. For example,
the query "User.name" will translate to a map-form with the var "User"
on LHS and a var to "name" on the RHS. With top-level vars being
something like {"User": {"name": "Bob"}}, the Var on the LHS will
evaluate to {"name": "Bob"}, which subdict will then be used on the RHS as
new vars, and that whole form will evaluate to "Bob".
"""
lhs_values, _ = __solve_for_repeated(expr.lhs, vars)
def lazy_map():
try:
for lhs_value in repeated.getvalues(lhs_values):
yield solve(expr.rhs,
__nest_scope(expr.lhs, vars, lhs_value)).value
except errors.EfilterNoneError as error:
error.root = expr
raise
return Result(repeated.lazy(lazy_map), ())
@solve.implementation(for_type=ast.Let)
def solve_let(expr, vars):
"""Solves a let-form by calling RHS with nested scope."""
lhs_value = solve(expr.lhs, vars).value
if not isinstance(lhs_value, structured.IStructured):
raise errors.EfilterTypeError(
root=expr.lhs, query=expr.original,
message="The LHS of 'let' must evaluate to an IStructured. Got %r."
% (lhs_value,))
return solve(expr.rhs, __nest_scope(expr.lhs, vars, lhs_value))
@solve.implementation(for_type=ast.Filter)
def solve_filter(expr, vars):
"""Filter values on the LHS by evaluating RHS with each value.
Returns any LHS values for which RHS evaluates to a true value.
"""
lhs_values, _ = __solve_for_repeated(expr.lhs, vars)
def lazy_filter():
for lhs_value in repeated.getvalues(lhs_values):
if solve(expr.rhs, __nest_scope(expr.lhs, vars, lhs_value)).value:
yield lhs_value
return Result(repeated.lazy(lazy_filter), ())
@solve.implementation(for_type=ast.Reducer)
def solve_reducer(expr, vars):
def _mapper(rows):
mapper = expr.mapper
for row in rows:
yield solve(mapper, __nest_scope(expr.lhs, vars, row)).value
delegate = solve(expr.reducer, vars).value
return Result(reducer.Map(delegate=delegate, mapper=_mapper), ())
@solve.implementation(for_type=ast.Group)
def solve_group(expr, vars):
rows, _ = __solve_for_repeated(expr.lhs, vars)
reducers = [solve(child, vars).value for child in expr.reducers]
r = reducer.Compose(*reducers)
intermediates = {}
# To avoid loading too much data into memory we segment the input rows.
for chunk in reducer.generate_chunks(rows, reducer.DEFAULT_CHUNK_SIZE):
# Group rows based on the output of the grouper expression.
groups = {}
for value in chunk:
key = solve(expr.grouper, __nest_scope(expr.lhs, vars, value)).value
grouped_values = groups.setdefault(key, [])
grouped_values.append(value)
# Fold each group in this chunk, merge with previous intermediate, if
# any.
for key, group in six.iteritems(groups):
intermediate = reducer.fold(r, group)
previous = intermediates.get(key)
if previous:
intermediate = reducer.merge(r, intermediate, previous)
intermediates[key] = intermediate
# This could equally well return a lazy repeated value to avoid finalizing
# right away. The assumption here is that finalize is cheap, at least
# compared to fold and merge, which already have to run eagerly. Using a
# lazy value here would keep the intermediates around in memory, and just
# doesn't seem worth it.
results = [reducer.finalize(r, intermediate)
for intermediate in six.itervalues(intermediates)]
return Result(repeated.meld(*results), ())
@solve.implementation(for_type=ast.Sort)
def solve_sort(expr, vars):
"""Sort values on the LHS by the value they yield when passed to RHS."""
lhs_values = repeated.getvalues(__solve_for_repeated(expr.lhs, vars)[0])
sort_expression = expr.rhs
def _key_func(x):
return solve(sort_expression, __nest_scope(expr.lhs, vars, x)).value
results = ordered.ordered(lhs_values, key_func=_key_func)
return Result(repeated.meld(*results), ())
@solve.implementation(for_type=ast.Each)
def solve_each(expr, vars):
"""Return True if RHS evaluates to a true value with each state of LHS.
If LHS evaluates to a normal IAssociative object then this is the same as
a regular let-form, except the return value is always a boolean. If LHS
evaluates to a repeared var (see efilter.protocols.repeated) of
IAssociative objects then RHS will be evaluated with each state and True
will be returned only if each result is true.
"""
lhs_values, _ = __solve_for_repeated(expr.lhs, vars)
for lhs_value in repeated.getvalues(lhs_values):
result = solve(expr.rhs, __nest_scope(expr.lhs, vars, lhs_value))
if not result.value:
# Each is required to return an actual boolean.
return result._replace(value=False)
return Result(True, ())
@solve.implementation(for_type=ast.Any)
def solve_any(expr, vars):
"""Same as Each, except returning True on first true result at LHS."""
lhs_values, _ = __solve_for_repeated(expr.lhs, vars)
try:
rhs = expr.rhs
except IndexError:
# Child 1 is out of range. There is no condition on the RHS.
# Just see if we have anything on the LHS.
return Result(len(repeated.getvalues(lhs_values)) > 0, ())
result = Result(False, ())
for lhs_value in repeated.getvalues(lhs_values):
result = solve(rhs, __nest_scope(expr.lhs, vars, lhs_value))
if result.value:
# Any is required to return an actual boolean.
return result._replace(value=True)
return result
@solve.implementation(for_type=ast.Cast)
def solve_cast(expr, vars):
"""Get cast LHS to RHS."""
lhs = solve(expr.lhs, vars).value
t = solve(expr.rhs, vars).value
if t is None:
raise errors.EfilterTypeError(
root=expr, query=expr.source,
message="Cannot find type named %r." % expr.rhs.value)
if not isinstance(t, type):
raise errors.EfilterTypeError(
root=expr.rhs, query=expr.source,
message="%r is not a type and cannot be used with 'cast'." % (t,))
try:
cast_value = t(lhs)
except TypeError:
raise errors.EfilterTypeError(
root=expr, query=expr.source,
message="Invalid cast %s -> %s." % (type(lhs), t))
return Result(cast_value, ())
@solve.implementation(for_type=ast.IsInstance)
def solve_isinstance(expr, vars):
"""Typecheck whether LHS is type on the RHS."""
lhs = solve(expr.lhs, vars)
try:
t = solve(expr.rhs, vars).value
except errors.EfilterKeyError:
t = None
if t is None:
raise errors.EfilterTypeError(
root=expr.rhs, query=expr.source,
message="Cannot find type named %r." % expr.rhs.value)
if not isinstance(t, type):
raise errors.EfilterTypeError(
root=expr.rhs, query=expr.source,
message="%r is not a type and cannot be used with 'isa'." % (t,))
return Result(protocol.implements(lhs.value, t), ())
@solve.implementation(for_type=ast.Complement)
def solve_complement(expr, vars):
result = solve(expr.value, vars)
return result._replace(value=not result.value)
@solve.implementation(for_type=ast.Intersection)
def solve_intersection(expr, vars):
result = Result(False, ())
for child in expr.children:
result = solve(child, vars)
if not result.value:
# Intersections don't preserve the last value the way Unions do.
return result._replace(value=False)
return result
@solve.implementation(for_type=ast.Union)
def solve_union(expr, vars):
for child in expr.children:
result = solve(child, vars)
if result.value:
# Don't replace a matched child branch. Also, preserve the actual
# value of the last subexpression (as opposed to just returning a
# boolean).
if result.branch:
return result
return result._replace(branch=child)
return Result(False, ())
@solve.implementation(for_type=ast.Pair)
def solve_pair(expr, vars):
return Result((solve(expr.lhs, vars).value, solve(expr.rhs, vars).value),
())
@solve.implementation(for_type=ast.Sum)
def solve_sum(expr, vars):
total = 0
for child in expr.children:
val = __solve_for_scalar(child, vars)
try:
total += val
except TypeError:
raise errors.EfilterTypeError(expected=number.INumber,
actual=type(val),
root=child, query=expr.source)
return Result(total, ())
@solve.implementation(for_type=ast.Difference)
def solve_difference(expr, vars):
children = enumerate(expr.children)
_, first_child = next(children)
difference = __solve_for_scalar(first_child, vars)
for idx, child in children:
val = __solve_for_scalar(child, vars)
try:
difference -= val
except TypeError:
# The type what caused that there error.
if idx == 1:
actual_t = type(difference)
else:
actual_t = type(val)
raise errors.EfilterTypeError(expected=number.INumber,
actual=actual_t,
root=expr.children[idx - 1],
query=expr.source)
return Result(difference, ())
@solve.implementation(for_type=ast.Product)
def solve_product(expr, vars):
product = 1
for child in expr.children:
val = __solve_for_scalar(child, vars)
try:
product *= val
except TypeError:
raise errors.EfilterTypeError(expected=number.INumber,
actual=type(val),
root=child,
query=expr.source)
return Result(product, ())
@solve.implementation(for_type=ast.Quotient)
def solve_quotient(expr, vars):
children = enumerate(expr.children)
_, first_child = next(children)
quotient = __solve_for_scalar(first_child, vars)
for idx, child in children:
val = __solve_for_scalar(child, vars)
try:
quotient /= val
except TypeError:
# The type what caused that there error.
if idx == 1:
actual_t = type(quotient)
else:
actual_t = type(val)
raise errors.EfilterTypeError(expected=number.INumber,
actual=actual_t,
root=expr.children[idx - 1],
query=expr.source)
return Result(quotient, ())
@solve.implementation(for_type=ast.Equivalence)
def solve_equivalence(expr, vars):
children = iter(expr.children)
first_value = __solve_for_scalar(next(children), vars)
for child in children:
value = __solve_for_scalar(child, vars)
if not value == first_value:
return Result(False, ())
return Result(True, ())
@solve.implementation(for_type=ast.Membership)
def solve_membership(expr, vars):
# There is an expectation that "foo" in "foobar" will be true, and,
# simultaneously, that "foo" in ["foobar"] will be false. This is how the
# analogous operator works in Python, among other languages. Where this
# mental model breaks down is around repeated values, because, in EFILTER,
# there is no difference between a tuple of one value and the one value,
# so that "foo" in ("foobar") is true, while "foo" in ("foobar", "bar") is
# false and "foo" in ("foo", "bar") is again true. These semantics are a
# little unfortunate, and it may be that, in the future, the in operator
# is disallowed on repeated values to prevent ambiguity.
needle = solve(expr.element, vars).value
if repeated.isrepeating(needle):
raise errors.EfilterError(
root=expr.element, query=expr.source,
message=("More than one value not allowed in the needle. "
"Got %d values.") % counted.count(needle))
# We need to fall through to __solve_and_destructure_repeated to handle
# row tuples correctly.
haystack, isrepeating = __solve_and_destructure_repeated(expr.set, vars)
# For non-repeated values just use the first (singleton) value.
if not isrepeating:
for straw in haystack:
haystack = straw
break
if isinstance(haystack, six.string_types):
return Result(needle in haystack, ())
# Repeated values of more than one value and collections behave the same.
# There are no proper sets in EFILTER so O(N) is what we get.
if isrepeating or isinstance(haystack, (tuple, list)):
for straw in haystack: # We're all farmers here.
if straw == needle:
return Result(True, ())
return Result(False, ())
# If haystack is not a repeating value, but it is iterable then it must
# have originated from outside EFILTER. Lets try to do the right thing and
# delegate to Python.
for straw in haystack:
return Result(needle in straw, None)
return Result(False, ())
@solve.implementation(for_type=ast.RegexFilter)
def solve_regexfilter(expr, vars):
string = __solve_for_scalar(expr.string, vars)
pattern = __solve_for_scalar(expr.regex, vars)
return Result(re.compile(pattern).search(six.text_type(string)), ())
@solve.implementation(for_type=ast.StrictOrderedSet)
def solve_strictorderedset(expr, vars):
iterator = iter(expr.children)
min_ = __solve_for_scalar(next(iterator), vars)
if min_ is None:
return Result(False, ())
for child in iterator:
val = __solve_for_scalar(child, vars)
try:
if not min_ > val or val is None:
return Result(False, ())
except TypeError:
raise errors.EfilterTypeError(expected=type(min_),
actual=type(val),
root=child,
query=expr.source,)
min_ = val
return Result(True, ())
@solve.implementation(for_type=ast.PartialOrderedSet)
def solve_partialorderedset(expr, vars):
iterator = iter(expr.children)
min_ = __solve_for_scalar(next(iterator), vars)
if min_ is None:
return Result(False, ())
for child in iterator:
val = __solve_for_scalar(child, vars)
try:
if min_ < val or val is None:
return Result(False, ())
except TypeError:
raise errors.EfilterTypeError(expected=type(min_),
actual=type(val),
root=child,
query=expr.source)
min_ = val
return Result(True, ())
|
google/dotty
|
efilter/transforms/solve.py
|
Python
|
apache-2.0
| 30,051
|
from modules.py532lib.NFC import NFC as NFC
print ('Write UID')
#write .UID in current dir
UID_FILE = ".UID"
def write_uid(uid):
f = open(UID_FILE,'w')
print("Write UID : %s"%str(uid))
uid = f.write(str(uid))
f.close
NFC.stop()
def stop(uid):
NFC.stop()
NFC.add_event_detect(NFC.NEWTAG,write_uid)
NFC.add_event_detect(NFC.REMOVETAG,stop)
print('Put the disk on plate')
NFC.start()
|
belese/luciphone
|
Luciphone/writeuid.py
|
Python
|
gpl-2.0
| 431
|
import uuid
def custom_receiver(sender, **kwargs):
"""
Set `youyouid` field with custom uuid.uuid4() ;)
"""
instance = kwargs['instance']
field_name = instance._base64field_name
if getattr(instance, field_name) in ['', None]:
gen_uuid = str(uuid.uuid4())
sender._default_manager.filter(pk=instance.pk).update(
**{field_name: gen_uuid}
)
|
Alir3z4/django-base64field
|
django_base64field/tests/receivers.py
|
Python
|
bsd-3-clause
| 401
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.