prefix stringlengths 0 918k | middle stringlengths 0 812k | suffix stringlengths 0 962k |
|---|---|---|
def f(x):
return x + 1
rais | e V | alueError("I do not want to be imported")
|
Here we print an expression that may contain Indexed objects, they
# correspond to arrays in the generated code. The low-level implementation
# involves looping over array elements and possibly storing results in temporary
# variables or accumulate it in the assign_to object.
lhs_printed = self._print(assign_to)
lines = []
# Setup loops over non-dummy indices -- all terms need these
indices = self.get_expression_indices(expr, assign_to)
openloop, closeloop = self._get_loop_opening_ending(indices)
# Setup loops over dummy indices -- each term needs separate treatment
from sympy.tensor import get_contraction_structure
d = get_contraction_structure(expr)
# terms with no summations first
if None in d:
text = CodePrinter.doprint(self, Add(*d[None]))
else:
# If all terms have summations we must initialize array to Zero
text = CodePrinter.doprint(self, 0)
# skip redundant assignments
if text != lhs_printed:
lines.extend(openloop)
if assign_to is not None:
text = self._get_statement("%s = %s" % (lhs_printed, text))
lines.append(text)
lines.extend(closeloop)
for dummies in d:
# then terms with summations
if isinstance(dummies, tuple):
indices = self._sort_optimized(dummies, expr)
openloop_d, closeloop_d = self._get_loop_opening_ending(
indices)
for term in d[dummies]:
if term in d and not ([list(f.keys()) for f in d[term]]
== [[None] for f in d[term]]):
# If one factor in the term has it's own internal
# contractions, those must be computed first.
# (temporary variables?)
raise NotImplementedError(
"FIXME: no support for contractions in factor yet")
else:
# We need the lhs expression as an accumulator for
# the loops, i.e
#
# for (int d=0; d < dim; d++){
# lhs[] = lhs[] + term[][d]
| # } ^.................. the accumulator
#
# We check if the expression already contains the
# lhs, and raise an exception if it does, as that
# syntax is currently undefined. FIXME: What would be
# a good interpretation?
| if assign_to is None:
raise AssignmentError(
"need assignment variable for loops")
if term.has(assign_to):
raise ValueError("FIXME: lhs present in rhs,\
this is undefined in CCodePrinter")
lines.extend(openloop)
lines.extend(openloop_d)
text = "%s = %s" % (lhs_printed, CodePrinter.doprint(
self, assign_to + term))
lines.append(self._get_statement(text))
lines.extend(closeloop_d)
lines.extend(closeloop)
return lines
def get_expression_indices(self, expr, assign_to):
from sympy.tensor import get_indices, get_contraction_structure
rinds, junk = get_indices(expr)
linds, junk = get_indices(assign_to)
# support broadcast of scalar
if linds and not rinds:
rinds = linds
if rinds != linds:
raise ValueError("lhs indices must match non-dummy"
" rhs indices in %s" % expr)
return self._sort_optimized(rinds, assign_to)
def _sort_optimized(self, indices, expr):
if not indices:
return []
# determine optimized loop order by giving a score to each index
# the index with the highest score are put in the innermost loop.
score_table = {}
for i in indices:
score_table[i] = 0
arrays = expr.atoms(C.Indexed)
for arr in arrays:
for p, ind in enumerate(arr.indices):
try:
score_table[ind] += self._rate_index_position(p)
except KeyError:
pass
return sorted(indices, key=lambda x: score_table[x])
def _print_NumberSymbol(self, expr):
# A Number symbol that is not implemented here or with _printmethod
# is registered and evaluated
self._number_symbols.add((expr,
self._print(expr.evalf(self._settings["precision"]))))
return str(expr)
def _print_Dummy(self, expr):
# dummies must be printed as unique symbols
return "%s_%i" % (expr.name, expr.dummy_index) # Dummy
_print_Catalan = _print_NumberSymbol
_print_EulerGamma = _print_NumberSymbol
_print_GoldenRatio = _print_NumberSymbol
def _print_And(self, expr):
PREC = precedence(expr)
return (" %s " % self._operators['and']).join(self.parenthesize(a, PREC)
for a in sorted(expr.args, key=default_sort_key))
def _print_Or(self, expr):
PREC = precedence(expr)
return (" %s " % self._operators['or']).join(self.parenthesize(a, PREC)
for a in sorted(expr.args, key=default_sort_key))
def _print_Xor(self, expr):
if self._operators.get('xor') is None:
return self._print_not_supported(expr)
PREC = precedence(expr)
return (" %s " % self._operators['xor']).join(self.parenthesize(a, PREC)
for a in expr.args)
def _print_Equivalent(self, expr):
if self._operators.get('equivalent') is None:
return self._print_not_supported(expr)
PREC = precedence(expr)
return (" %s " % self._operators['equivalent']).join(self.parenthesize(a, PREC)
for a in expr.args)
def _print_Not(self, expr):
PREC = precedence(expr)
return self._operators['not'] + self.parenthesize(expr.args[0], PREC)
def _print_Mul(self, expr):
prec = precedence(expr)
c, e = expr.as_coeff_Mul()
if c < 0:
expr = _keep_coeff(-c, e)
sign = "-"
else:
sign = ""
a = [] # items in the numerator
b = [] # items that are in the denominator (if any)
if self.order not in ('old', 'none'):
args = expr.as_ordered_factors()
else:
# use make_args in case expr was something like -x -> x
args = Mul.make_args(expr)
# Gather args for numerator/denominator
for item in args:
if item.is_commutative and item.is_Pow and item.exp.is_Rational and item.exp.is_negative:
if item.exp != -1:
b.append(Pow(item.base, -item.exp, evaluate=False))
else:
b.append(Pow(item.base, -item.exp))
else:
a.append(item)
a = a or [S.One]
a_str = [self.parenthesize(x, prec) for x in a]
b_str = [self.parenthesize(x, prec) for x in b]
if len(b) == 0:
return sign + '*'.join(a_str)
elif len(b) == 1:
if len(a) == 1 and not (a[0].is_Atom or a[0].is_Add):
return sign + "%s/" % a_str[0] + '*'.join(b_str)
else:
return sign + '*'.join(a_str) + "/%s" % b_str[0]
else:
return sign + '*'.join(a_str) + "/(%s)" % '*'.join(b_str)
def _print_not_supported(self, expr):
self._not_supported.add(expr)
return self.emptyPrinter(expr)
# The following can not be simply translated into C or Fortran
_print_Basic = _print_not_supported
_print_ComplexInfinity = _print_not_supported
_print_Derivative = _print_not_supported
_pri |
None, "cual entidad remover un atributo?", 'Eliminar Atributo', '')
if dlg.ShowModal() == wx.ID_OK:
response = dlg.GetValue()
for elemento in self.GetActiveChild().entidades:
if elemento.nombre == response:
ejecute.DlgEliminarAtributo(self.GetActiveChild().canvas, elemento)"""
self.GetActiveChild().canvas.Refresh()
def CrearRelacion(self, evt):
ejecute = Relacion()
ejecute.DlgCrearRelacion(self, self.GetActiveChild().canvas, self.GetActiveChild().entidades)
self.GetActiveChild().contadorRelacion += 1
self.GetActiveChild().canvas.Refresh()
def TreeModificarAtributo(self, evt):
ejecute = Atributo()
ejecute.ModificarAtributo(self.GetActiveChild().canvas, self.atributoAcc.entidad, self.atributoAcc)
self.GetActiveChild().canvas.Refresh()
def TreeEliminarAtributo(self, evt):
if self.atributoAcc.claveForanea == True:
dial = wx.MessageDialog(self, self.Idioma(archivo[ATRIBUTO_ELIMINAR_ERROR]) % self.atributoAcc.nombre, 'Error', wx.OK | wx.ICON_ERROR)
dial.ShowModal()
return
dlg = wx.MessageDialog(self.GetActiveChild().canvas, self.Idioma('Want to remove the attribute %s') % self.atributoAcc.nombre, self.Idioma('Delete Attribute %s') % self.atributoAcc.nombre, wx.YES_NO | wx.ICON_QUESTION)
if dlg.ShowModal() == wx.ID_YES:
ejecute = Atributo()
ejecute.EliminarAtributo(self.GetActiveChild().canvas, self.atributoAcc.entidad, self.atributoAcc)
self.GetActiveChild().canvas.Refresh()
def RelacionIdentificadora(self, evt):
self.GetActiveChild().canvas.SetCursor(wx.CROSS_CURSOR)
self.GetActiveChild().relacion = 1
def RelacionNoIdentificadora(self, evt):
self.GetActiveChild().canvas.SetCursor(wx.CROSS_CURSOR)
self.GetActiveChild().relacion = 2
def ModificarRelacion(self, evt):
ejecute = Relacion()
for elemento in self.GetActiveChild().relaciones:
if elemento.Selected():
ejecute.DlgModificarRelacion(elemento, self, self.GetActiveChild().canvas, self.GetActiveChild().entidades)
def EliminarRelacion(self, evt):
ejecute = Relacion()
for elemento in self.GetActiveChild().relaciones:
if elemento.Selected():
ejecute.EliminarRelacion(elemento, self.GetActiveChild().canvas, self.GetActiveChild(), self.GetActiveChild().entidades)
def GenerarScriptSql(self, evt):
script = SQL().ScriptPostgreSQL(self.GetActiveChild())
dlg = Dialogos(self, "Script SQL")
dlg.ScriptSql(script)
dlg.ShowModal()
def GenerarScriptDjango(self, evt):
script = Django().ScriptDjango(self.GetActiveChild())
dlg = Dialogos(self, "Script Django")
dlg.ScriptSql(script)
dlg.ShowModal()
def GuardarScriptSql(self, evt):
script = SQL().ScriptPostgreSQL(self.GetActiveChild())
tempFile = wx.FileDialog(self, message="Guardar SQL", defaultDir=os.path.expanduser("~"), defaultFile="sofiaSQL", wildcard="Archivos SQL (*.sql)|*.sql", style=wx.FD_SAVE|wx.FD_OVERWRITE_PROMPT)
if tempFile.ShowModal() == wx.ID_OK:
fileSQL = "%s.sql" % tempFile.GetPath()
#nombreArchivoTemporal = tempFile.GetFilename()
file = codecs.open(fileSQL, encoding='UTF-8', mode = 'w+')
file.write(script)
file.close()
def Idioma(self, texto):
if language[self.data["idioma"]] != '':
return self.translation(texto)
else:
return texto
def ActualizarIdioma(self, evt):
dlg = Dialogos(self, self.Idioma("Configuration"))
dlg.Configuracion(self.data)
if dlg.ShowModal() == wx.ID_OK:
countMenuBar = 0
if language[self.data["idioma"]] != '':
se | lf.locale.AddCatalog(language[self.data["idioma"]])
idioma = language[self.data["idioma"]]
for menu in self.menuFile.GetMenuItems():
if menu.GetId() != -2:
menu.SetText(self.translation(archivo[menu.GetId()]))
menu.SetHelp(self.translation(archivoHelp[menu.GetId()]))
for | menu in self.menuVer.GetMenuItems():
if menu.GetId() != -2:
menu.SetText(self.translation(archivo[menu.GetId()]))
menu.SetHelp(self.translation(archivoHelp[menu.GetId()]))
for menu in self.menuTool.GetMenuItems():
if menu.GetId() != -2:
menu.SetText(self.translation(archivo[menu.GetId()]))
menu.SetHelp(self.translation(archivoHelp[menu.GetId()]))
for menu in self.menuHelp.GetMenuItems():
if menu.GetId() != -2:
menu.SetText(self.translation(archivo[menu.GetId()]))
menu.SetHelp(self.translation(archivoHelp[menu.GetId()]))
for menu in self.menuBar.GetMenus():
try:
menu[0].SetTitle(self.translation(menuBar[countMenuBar]))
self.menuBar.Replace(countMenuBar, menu[0], self.translation(menuBar[countMenuBar]))
countMenuBar = countMenuBar + 1
except:
countMenuBar = countMenuBar + 1
for menu in self.menu_tree_entidad.GetMenuItems():
if menu.GetId() != -2:
menu.SetText(self.translation(archivo[menu.GetId()]))
menu.SetHelp(self.translation(archivoHelp[menu.GetId()]))
for menu in self.menu_tree_atributo.GetMenuItems():
if menu.GetId() != -2:
menu.SetText(self.translation(archivo[menu.GetId()]))
menu.SetHelp(self.translation(archivoHelp[menu.GetId()]))
for menu in self.menu_tree_relacion.GetMenuItems():
if menu.GetId() != -2:
menu.SetText(self.translation(archivo[menu.GetId()]))
menu.SetHelp(self.translation(archivoHelp[menu.GetId()]))
for menu in self.menu_entidad.GetMenuItems():
if menu.GetId() != -2:
menu.SetText(self.translation(archivo[menu.GetId()]))
menu.SetHelp(self.translation(archivoHelp[menu.GetId()]))
for menu in self.menu_atributo.GetMenuItems():
if menu.GetId() != -2:
menu.SetText(self.translation(archivo[menu.GetId()]))
menu.SetHelp(self.translation(archivoHelp[menu.GetId()]))
for menu in self.menu_relacion.GetMenuItems():
if menu.GetId() != -2:
menu.SetText(self.translation(archivo[menu.GetId()]))
menu.SetHelp(self.translation(archivoHelp[menu.GetId()]))
for menu in self.menu_relacionIdentificadora.GetMenuItems():
if menu.GetId() != -2:
menu.SetText(self.translation(archivo[menu.GetId()]))
menu.SetHelp(self.translation(archivoHelp[menu.GetId()]))
for menu in self.menu_relacionNoIdentificadora.GetMenuItems():
if menu.GetId() != -2:
menu.SetText(self.translation(archivo[menu.GetId()]))
menu.SetHelp(self.translation(archivoHelp[menu.GetId()]))
try:
self.SetTitle(self.translation(archivo[TITULO]))
self.GetActiveChild().lienzo.Caption(self.translation("Canvas"))
self.GetActiveChild().nav.Caption(self.translation("Object Browser"))
except:
pass
else:
idioma = 'English'
for menu in self.menuFile.GetMenuItems():
if menu.GetId() != -2:
menu.SetText(archivo[menu.GetId()])
menu.SetHelp(archivoHelp[menu.GetId()])
for menu in self.menuVer.GetMenuItems():
if menu.GetId() != -2:
menu.SetText(archivo[menu.GetId()])
menu.SetHelp(archivoHelp[menu.GetId()])
for menu in self.menuTool.GetMenuItems():
if menu.GetId() != -2:
menu.SetText(archivo[menu.GetId()])
menu.SetHelp(archivoHelp[menu.GetId()])
for menu in self.menuHelp.GetMenuItems():
if menu.GetId() != -2:
menu.SetText(archivo[menu.GetId()])
menu.SetHelp(archivoHelp[menu.GetId()])
for menu in self.menuBar.GetMenus():
try:
menu[0].SetTitle(menuBar[countMenuBar])
self.menuBar.Replace(countMenuBar, menu[0], menuBar[countMenuBar])
countMenuBar = countMenuBar + 1
except:
countMenuBar = countMenuBar + 1
for menu in self.menu_tree_entidad.GetMenuItems():
if |
g>
##
## This file is part of Gajim.
##
## Gajim is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published
## by the Free Software Foundation; version 3 only.
##
## Gajim is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Gajim. If not, see <http://www.gnu.org/licenses/>.
##
from gi.repository import GLib
from gi.repository import Gtk
from gi.repository import Gdk
from common import gajim
from common import dataforms
from common import ged
import gtkgui_helpers
import dialogs
import vcard
import config
import dataforms_widget
class SearchWindow:
def __init__(self, account, jid):
"""
Create new window
"""
# an account object
self.account = account
self.jid = jid
# retrieving widgets from xml
self.xml = gtkgui_helpers.get_gtk_builder('search_window.ui')
self.window = self.xml.get_object('search_window')
for name in ('label', 'progressbar', 'search_vbox', 'search_button',
'add_contact_button', 'information_button'):
self.__dict__[name] = self.xml.get_object(name)
self.search_button.set_sensitive(False)
# displaying the window
self.xml.connect_signals(self)
self.window.show_all()
self.request_form()
self.pulse_id = GLib.timeout_add(80, self.pulse_callback)
self.is_form = None
# Is there a jid column in results ? if -1: no, else column number
self.jid_column = -1
gajim.ged.register_event_handler('search-form-received', ged.GUI1,
self._nec_search_form_received)
gajim.ged.register_event_handler('search-result-received', ged.GUI1,
self._nec_search_result_received)
def request_form(self):
gajim.connections[self.account].request_search_fields(self.jid)
def pulse_callback(self):
self.progressbar.pulse()
return True
def on_search_window_key_press_event(self, widget, event):
if event.keyval == Gdk.KEY_Escape:
self.window.destroy()
def on_search_window_destroy(self, widget):
if self.pulse_id:
GLib.source_remove(self.pulse_id)
del gajim.interface.instances[self.account]['search'][self.jid]
gajim.ged.remove_event_handler('search-form-received', ged.GUI1,
self._nec_search_form_received)
gajim.ged.remove_event_handler('search-result-received', ged.GUI1,
self._nec_search_result_received)
def on_close_button_clicked(self, button):
self.window.destroy()
def on_search_button_clicked(self, button):
if self.is_form:
self.data_form_widget.data_form.type_ = 'submit'
gajim.connections[self.account].send_search_form(self.jid,
self.data_form_widget.data_form.get_purged(), True)
else:
infos = self.data_form_widget.get_infos()
if 'instructions' in infos:
del infos['instructions']
gajim.connections[self.account].send_search_form(self.jid, infos,
False)
self.search_vbox.remove(self.data_form_widget)
self.progressbar.show()
self.label.set_text(_('Waiting for results'))
self.label.show()
self.pulse_id = GLib.timeout_add(80, self.pulse_callback)
self.search_button.hide()
def on_add_contact_button_clicked(self, widget):
(model, iter_) = self.result_treeview.get_selection().get_selected()
if not iter_:
return
jid = model[iter_][self.jid_column]
dialogs.AddNewContactWindow(self.account, jid)
def on_information_button_clicked(self, widget):
(model, iter_) = self.result_treeview.get_selection().get_selected()
if not iter_:
return
jid = model[iter_][self.jid_column]
if jid in gajim.interface.instances[self.account]['infos']:
gajim.interface.instances[self.account]['infos'][jid].window.present()
else:
contact = gajim.contacts.create_contact(jid=jid, account=self.account)
gajim.interface.instances[self.account]['infos'][jid] = \
vcard.VcardWindow(contact, self.account)
def _nec_search_form_received(self, obj):
if self.pulse_id:
GLib.source_remove(self.pulse_id)
self.progressbar.hide()
self.label.hide()
if obj.is_dataform:
self.is_form = True
self.data_form_widget = dataforms_widget.DataFormWidget()
self.dataform = dataforms.ExtendForm(node=obj.data)
self.data_form_widget.set_sensitive(True)
try:
self.data_form_widget.data_form = self.dataform
except dataforms.Error:
self.label.set_text(_('Error in received dataform'))
| self.label.show()
return
if self.data_form_widget.title:
self.window.set_title('%s - Search - Gajim' % \
self.data_form_widget.title)
else:
self.is_form = False
self.data_form_widget = config.FakeDataForm(obj.data)
self.data_form_widget.show_all()
self.search_vbox.pack_start(self.data_form_widget, True, True, 0)
self.searc | h_button.set_sensitive(True)
def on_result_treeview_cursor_changed(self, treeview):
if self.jid_column == -1:
return
(model, iter_) = treeview.get_selection().get_selected()
if not iter_:
return
if model[iter_][self.jid_column]:
self.add_contact_button.set_sensitive(True)
self.information_button.set_sensitive(True)
else:
self.add_contact_button.set_sensitive(False)
self.information_button.set_sensitive(False)
def _nec_search_result_received(self, obj):
if self.pulse_id:
GLib.source_remove(self.pulse_id)
self.progressbar.hide()
self.label.hide()
if not obj.is_dataform:
if not obj.data:
self.label.set_text(_('No result'))
self.label.show()
return
# We suppose all items have the same fields
sw = Gtk.ScrolledWindow()
sw.set_policy(Gtk.PolicyType.AUTOMATIC, Gtk.PolicyType.AUTOMATIC)
self.result_treeview = Gtk.TreeView()
self.result_treeview.connect('cursor-changed',
self.on_result_treeview_cursor_changed)
sw.add(self.result_treeview)
# Create model
fieldtypes = [str]*len(obj.data[0])
model = Gtk.ListStore(*fieldtypes)
# Copy data to model
for item in obj.data:
model.append(item.values())
# Create columns
counter = 0
for field in obj.data[0].keys():
self.result_treeview.append_column(Gtk.TreeViewColumn(field,
Gtk.CellRendererText(), text=counter))
if field == 'jid':
self.jid_column = counter
counter += 1
self.result_treeview.set_model(model)
sw.show_all()
self.search_vbox.pack_start(sw, True, True, 0)
if self.jid_column > -1:
self.add_contact_button.show()
self.information_button.show()
return
self.dataform = dataforms.ExtendForm(node=obj.data)
if len(self.dataform.items) == 0:
# No result
self.label.set_text(_('No result'))
self.label.show()
return
self.data_form_widget.set_sensitive(True)
try:
self.data_form_widget.data_form = self.dataform
except dataforms.Error:
self.label.set_text(_('Error in received dataform' |
definovanou v konstruktoru (self.shape).
U modelu je potřeba brát v potaz polohu objektu. Ta je udávána pomocí
crinfo. To je skupina polí s minimální a maximální hodnotou pro každou osu.
Trénování je prováděno opakovaným voláním funkce train_one().
:param model_margin: stanovuje velikost okraje v modelu. Objekt bude ve
výchozím nastavení vzdálen 0 px od každého okraje.
"""
def __init__(self, shape=[5, 5, 5]):
"""TODO: to be defined1. """
self.model = np.ones(shape)
self.data_number = 0
self.model_margin = [0, 0, 0]
pass
def get_model(self, crinfo, image_shape):
"""
:param image_shape: Size of output image
:param crinfo: Array with min and max index of object for each axis.
[[mi | nx, maxx], [miny, maxy], [minz, maxz]]
"""
# Průměrování
mdl = self.model / se | lf.data_number
print(mdl.shape)
print(crinfo)
# mdl_res = imma.image.resize_to_shape(mdl, crinfo[0][]
uncr = qmisc.uncrop(mdl, crinfo, image_shape, resize=True)
return uncr
def train_one(self, data,voxelSize_mm):
"""
Trenovani shape modelu
data se vezmou a oriznou (jen jatra)
na oriznuta data je aplikovo binarni otevreni - rychlejsi nez morphsnakes
co vznikne je uhlazena cast ktera se odecte od puvodniho obrazu
cimz vzniknou spicky
orezany obraz se nasledne rozparceluje podle velikosti (shape) modelu
pokud pocet voxelu v danem useku prekroci danou mez, je modelu
prirazena nejaka hodnota. Meze jsou nasledujici:
0%-50% => 1
50%-75% => 2
75%-100% => 3
"""
crinfo = qmisc.crinfo_from_specific_data(data, margin=self.model_margin)
datacr = qmisc.crop(data, crinfo=crinfo)
dataShape = self.model.shape
datacrres = self.trainThresholdMap(datacr, voxelSize_mm, dataShape)
self.model += datacrres
self.data_number += 1
# Tady bude super kód pro trénování
def train(self, data_arr):
for data in data_arr:
self.train_one(data)
def objectThreshold(self,objekt,thresholds,values):
'''
Objekt - 3d T/F pole
thresholds = [0,0.5,0.75] zacina nulou
values = [3,2,1]
vrati hodnotu z values odpovidajici thresholds
podle podilu True voxelu obsazenych v 3d poli
zde napriklad 60% =>2, 80% => 1.
'''
bile = np.sum(objekt)
velikost = objekt.shape
velikostCelkem = 1.0
for x in velikost:
velikostCelkem = velikostCelkem*x
podil = bile/velikostCelkem #podil True voxelu
#print podil
#vybrani threshold
final = 0 #vracena hodnota
pomocny = 0 #pomocna promenna
for threshold in thresholds:
if(podil >= threshold ):
final = values[pomocny]
pomocny = pomocny+1
return final
def rozdelData(self,crData,dataShape, nasobitel1=1,nasobitel2 = 2):
'''
crData - vstupni data
dataShape - velikost vraceneho pole
volte 0<nasobitel1 < nasobitel2, vysvetleni nasleduje:
rozdeli pole crData na casti vrati pole rozmeru dataShape
vysledne hodnoty pole jsou urceny funkci objectThreshold(object,thresholds,values)
intervaly prirazeni values [1-3] jsou nasledujici:
[0-prumer*nasobitel1],[prumer*nasobitel1-prumer*nasobitel2],[prumer*nasobitel2 a vice]
'''
'vypocet prumerneho podilu bilych voxelu'
bile = np.sum(crData)
velikost = crData.shape
velikostCelkem = 1.0
for x in velikost:
velikostCelkem = velikostCelkem*x
podil = bile/velikostCelkem #prumerny podil True voxelu
thresholds = [0,nasobitel1*podil,nasobitel2*podil]
values = [3,2,1]
'vybrani voxelu a vytvoreni objektu'
velikostDat = crData.shape
voxelySmer = [0,0,0]
vysledek = np.zeros(dataShape)
for poradi in range(3):
voxelySmer[poradi] = velikostDat[poradi]/dataShape[poradi]
for x in range(dataShape[0]):
for y in range(dataShape[1]):
for z in range(dataShape[2]):
xStart = x * voxelySmer[0]
xKonec = xStart + voxelySmer[0]
yStart = y * voxelySmer[1]
yKonec = yStart + voxelySmer[1]
zStart = z * voxelySmer[2]
zKonec = zStart + voxelySmer[2]
objekt = crData[
int(xStart):int(xKonec),
int(yStart):int(yKonec),
int(zStart):int(zKonec)
]
vysledek[x,y,z] = self.objectThreshold(objekt,thresholds,values)
return vysledek
def vytvorKouli3D(self,voxelSize_mm,polomer_mm):
'''voxelSize:mm = [x,y,z], polomer_mm = r
Vytvari kouli v 3d prostoru postupnym vytvarenim
kruznic podel X (prvni) osy. Predpokladem spravnosti
funkce je ze Y a Z osy maji stejne rozliseni
funkce vyuziva pythagorovu vetu'''
print('zahajeno vytvareni 3D objektu')
x = voxelSize_mm[0]
y = voxelSize_mm[1]
z = voxelSize_mm[2]
xVoxely = int(np.ceil(polomer_mm/x))
yVoxely = int(np.ceil(polomer_mm/y))
zVoxely = int( np.ceil(polomer_mm/z))
rozmery = [xVoxely*2+1,yVoxely*2+1,yVoxely*2+1]
xStred = xVoxely
konec = yVoxely*2+1
koule = np.zeros(rozmery) #pole kam bude ulozen vysledek
for xR in range(xVoxely*2+1):
if(xR == xStred):
print('3D objekt z 50% vytvoren')
c = polomer_mm #nejdelsi strana
a = (xStred-xR )*x
vnitrek = (c**2-a**2)
b = 0.0
if(vnitrek > 0):
b = np.sqrt((c**2-a**2))#pythagorova veta b je v mm
rKruznice = float(b)/float(y)
if(rKruznice == np.NAN):
continue
#print rKruznice #osetreni NAN
kruznice = self.vytvoritTFKruznici(yVoxely,rKruznice)
koule[xR,0:konec,0:konec] = kruznice[0:konec,0:konec]
print('3D objekt uspesne vytvoren')
return koule
def vytvoritTFKruznici(self,polomerPole,polomerKruznice):
'''vytvori 2d pole velikosti 2xpolomerPole+1
s kruznici o polomeru polomerKruznice uprostred '''
radius = polomerPole
r2 = np.arange(-radius, radius+1)**2
dist2 = r2[:, None] + r2
vratit = (dist2 <= polomerKruznice**2).astype(np.int)
return vratit
def trainThresholdMap(self,data3d,voxelSize,dataShape):
structure = self.vytvorKouli3D(voxelSize, 5)
smoothed = ndimage.binary_opening(data3d, structure, 3)
spicky = smoothed != data3d
vysledek = self.rozdelData(spicky,dataShape)
return vysledek
def main():
# logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
logger.addHandler(ch)
# create file handler which logs even debug messages
# fh = logging.FileHandler('log.txt')
# fh.setLevel(logging.DEBUG)
# formatter = logging.Formatter(
# '%(asctime)s - %(name)s - %(levelname)s - %(message)s')
# fh.setFormatter(formatter)
# logger.addHandler(fh)
# logger.debug('start')
# input parser
parser = argparse.ArgumentParser(
description=__doc__
)
parser.add_argument(
'-i', '--inputfile',
default=None,
required=True,
help='input file'
)
parser.add_argument(
'-d', '--debug', action='store_true',
help='Debug mode')
args = parser.parse_args()
if args.debug:
ch.setLevel(logging.DEBUG)
if __name__ == "__main__":
main |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-06-18 14:57
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('testapp', '0001_initial | '),
]
operations = | [
migrations.AlterField(
model_name='cat',
name='sex',
field=models.CharField(choices=[('F', 'Female'), ('M', 'Male')], max_length=1),
),
]
|
consecutive characters to a
single '_', but this has been removed. This functionality should
possibly be added as a new function."""
return s.replace("/", "\\").lower()
def isabs(s):
"""Return whether a path is absolute.
Trivial in Posix, harder on the Mac or MS-DOS.
For DOS it is absolute if it starts with a slash or backslash (current
volume), or if a pathname after the volume letter and colon starts with
a slash or backslash."""
s = splitdrive(s)[1]
return s != '' and s[:1] in '/\\'
def join(a, *p):
"""Join two (or more) paths."""
path = a
for b in p:
if isabs(b):
path = b
elif path == '' or path[-1:] in '/\\:':
path = path + b
else:
path = path + "\\" + b
return path
def splitdrive(p):
"""Split a path into a drive specification (a drive letter followed
by a colon) and path specification.
It is always true that drivespec + pathspec == p."""
if p[1:2] == ':':
return p[0:2], p[2:]
return '', p
def split(p):
"""Split a path into head (everything up to the last '/') and tail
(the rest). After the trailing '/' is stripped, the invariant
join(head, tail) == p holds.
The resulting head won't end in '/' unless it is the root."""
d, p = splitdrive(p)
# set i to index beyond p's last slash
i = len(p)
while i and p[i-1] not in '/\\':
i = i - 1
head, tail = p[:i], p[i:] # now tail has no slashes
# remove trailing slashes from head, unless it's all slashes
head2 = head
while head2 and head2[-1] in '/\\':
head2 = head2[:-1]
head = head2 or head
return d + head, tail
def splitext(p):
"""Split a path into root and extension.
The extension is everything starting at the first dot in the last
pathname component; the root is everything before that.
It is always true that root + ext == p."""
root, ext = '', ''
for c in p:
if c in '/\\':
root, ext = root + ext + c, ''
elif c == '.' or ext:
ext = ext + c
else:
root = root + c
return root, ext
def basename(p):
"""Return the tail (basename) part of a path."""
return split(p)[1]
def dirname(p):
"""Ret | urn the head (dirname) part of a path."""
return split(p)[0]
def commonprefix(m):
"""Return the longest prefix of all list elements."""
if not m: return ''
prefix = m[0]
for item in | m:
for i in range(len(prefix)):
if prefix[:i+1] != item[:i+1]:
prefix = prefix[:i]
if i == 0: return ''
break
return prefix
# Get size, mtime, atime of files.
def getsize(filename):
"""Return the size of a file, reported by os.stat()."""
st = os.stat(filename)
return st[stat.ST_SIZE]
def getmtime(filename):
"""Return the last modification time of a file, reported by os.stat()."""
st = os.stat(filename)
return st[stat.ST_MTIME]
def getatime(filename):
"""Return the last access time of a file, reported by os.stat()."""
st = os.stat(filename)
return st[stat.ST_ATIME]
def islink(path):
"""Is a path a symbolic link?
This will always return false on systems where posix.lstat doesn't exist."""
return 0
def exists(path):
"""Does a path exist?
This is false for dangling symbolic links."""
try:
st = os.stat(path)
except os.error:
return 0
return 1
def isdir(path):
"""Is a path a dos directory?"""
try:
st = os.stat(path)
except os.error:
return 0
return stat.S_ISDIR(st[stat.ST_MODE])
def isfile(path):
"""Is a path a regular file?"""
try:
st = os.stat(path)
except os.error:
return 0
return stat.S_ISREG(st[stat.ST_MODE])
def ismount(path):
"""Is a path a mount point?"""
# XXX This degenerates in: 'is this the root?' on DOS
return isabs(splitdrive(path)[1])
def walk(top, func, arg):
"""Directory tree walk with callback function.
For each directory in the directory tree rooted at top (including top
itself, but excluding '.' and '..'), call func(arg, dirname, fnames).
dirname is the name of the directory, and fnames a list of the names of
the files and subdirectories in dirname (excluding '.' and '..'). func
may modify the fnames list in-place (e.g. via del or slice assignment),
and walk will only recurse into the subdirectories whose names remain in
fnames; this can be used to implement a filter, or to impose a specific
order of visiting. No semantics are defined for, or required of, arg,
beyond that arg is always passed to func. It can be used, e.g., to pass
a filename pattern, or a mutable object designed to accumulate
statistics. Passing None for arg is common."""
try:
names = os.listdir(top)
except os.error:
return
func(arg, top, names)
exceptions = ('.', '..')
for name in names:
if name not in exceptions:
name = join(top, name)
if isdir(name):
walk(name, func, arg)
def expanduser(path):
"""Expand paths beginning with '~' or '~user'.
'~' means $HOME; '~user' means that user's home directory.
If the path doesn't begin with '~', or if the user or $HOME is unknown,
the path is returned unchanged (leaving error reporting to whatever
function is called with the expanded path as argument).
See also module 'glob' for expansion of *, ? and [...] in pathnames.
(A function should also be defined to do full *sh-style environment
variable expansion.)"""
if path[:1] != '~':
return path
i, n = 1, len(path)
while i < n and path[i] not in '/\\':
i = i+1
if i == 1:
if not os.environ.has_key('HOME'):
return path
userhome = os.environ['HOME']
else:
return path
return userhome + path[i:]
def expandvars(path):
"""Expand paths containing shell variable substitutions.
The following rules apply:
- no expansion within single quotes
- no escape character, except for '$$' which is translated into '$'
- ${varname} is accepted.
- varnames can be made out of letters, digits and the character '_'"""
# XXX With COMMAND.COM you can use any characters in a variable name,
# XXX except '^|<>='.
if '$' not in path:
return path
import string
varchars = string.ascii_letters + string.digits + "_-"
res = ''
index = 0
pathlen = len(path)
while index < pathlen:
c = path[index]
if c == '\'': # no expansion within single quotes
path = path[index + 1:]
pathlen = len(path)
try:
index = path.index('\'')
res = res + '\'' + path[:index + 1]
except ValueError:
res = res + path
index = pathlen -1
elif c == '$': # variable or '$$'
if path[index + 1:index + 2] == '$':
res = res + c
index = index + 1
elif path[index + 1:index + 2] == '{':
path = path[index+2:]
pathlen = len(path)
try:
index = path.index('}')
var = path[:index]
if os.environ.has_key(var):
res = res + os.environ[var]
except ValueError:
res = res + path
index = pathlen - 1
else:
var = ''
index = index + 1
c = path[index:index + 1]
while c != '' and c in varchars:
var = var + c
index = index + 1
c = path[index:inde |
#!/usr/bin/env python
'''
Command to send dynamic filesystem information to Zagg
'''
# vim: expandtab:tabstop=4:shiftwidth=4
#
# Copyright 2015 Red Hat Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#This is not a module, but pylint thinks it is. This is a command.
#pylint: disable=invalid-name,import-error
import argparse
import re
from openshift_tools.monitoring.metric_sender import MetricSender
from openshift_tools.monitoring import pminfo
def parse_args():
""" parse the args from the cli """
parser = argparse.ArgumentParser(description='Disk metric sender')
parser.add_argument('-v', '--verbose', action='store_true', default=None, help='Verbose?')
parser.add_argument('--debug', action='store_true', default=None, help='Debug?')
parser.add_argument('--filter-pod-pv', action='store_true', default=None,
help="Filter out OpenShift Pod PV mounts")
parser.add_argument('--force-send-zeros', action='store_true', default=None,
help="Send 0% full for mounts, useful for clearing existing bad alerts")
return parser.parse_args()
def filter_out_key_name_chars(metric_dict, filesystem_filter):
""" Simple filter to elimate unnecessary characters in the key name """
filtered_dict = {k.replace(filesystem_filter, ''):v
for (k, v) in metric_dict.iteritems()
}
return filtered_dict
def filter_out_container_root(metric_dict):
""" Simple filter to remove the container root FS info """
container_root_regex = r'^/dev/mapper/docker-\d+:\d+-\d+-[0-9a-f]+$'
filtered_dict = {k: v
for (k, v) in metric_dict.iteritems()
if not re.match(container_root_regex, k)
}
return filtered_dict
def filter_out_customer_pv_filesystems(metric_dict):
""" Remove customer PVs from list """
r = re.compile("^/dev/(?:xvd[a-z]{2}|nvme(?:[2-9].*|\d{2,}.*))$")
# filt | er out xvda{2} (???) and nvme devices past 2
return {
k:v for (k, v) in metric_dict.iteritems() if not r.match(k)
}
def zero_mount_percentages(metric_dict):
""" Make all mounts report 0% used """
return {
k:0 for (k, v) in metric_dict.iteritems()
}
def main():
""" Main function | to run the check """
args = parse_args()
metric_sender = MetricSender(verbose=args.verbose, debug=args.debug)
filesys_full_metric = ['filesys.full']
filesys_inode_derived_metrics = {'filesys.inodes.pused' :
'filesys.usedfiles / (filesys.usedfiles + filesys.freefiles) * 100'
}
discovery_key_fs = 'disc.filesys'
item_prototype_macro_fs = '#OSO_FILESYS'
item_prototype_key_full = 'disc.filesys.full'
item_prototype_key_inode = 'disc.filesys.inodes.pused'
# Get the disk space
filesys_full_metrics = pminfo.get_metrics(filesys_full_metric)
filtered_filesys_metrics = filter_out_key_name_chars(filesys_full_metrics, 'filesys.full.')
filtered_filesys_metrics = filter_out_container_root(filtered_filesys_metrics)
if args.filter_pod_pv:
filtered_filesys_metrics = filter_out_customer_pv_filesystems(filtered_filesys_metrics)
if args.force_send_zeros:
filtered_filesys_metrics = zero_mount_percentages(filtered_filesys_metrics)
metric_sender.add_dynamic_metric(discovery_key_fs, item_prototype_macro_fs, filtered_filesys_metrics.keys())
for filesys_name, filesys_full in filtered_filesys_metrics.iteritems():
metric_sender.add_metric({'%s[%s]' % (item_prototype_key_full, filesys_name): filesys_full})
# Get filesytem inode metrics
filesys_inode_metrics = pminfo.get_metrics(derived_metrics=filesys_inode_derived_metrics)
filtered_filesys_inode_metrics = filter_out_key_name_chars(filesys_inode_metrics, 'filesys.inodes.pused.')
filtered_filesys_inode_metrics = filter_out_container_root(filtered_filesys_inode_metrics)
if args.filter_pod_pv:
filtered_filesys_inode_metrics = filter_out_customer_pv_filesystems(filtered_filesys_inode_metrics)
if args.force_send_zeros:
filtered_filesys_inode_metrics = zero_mount_percentages(filtered_filesys_inode_metrics)
for filesys_name, filesys_inodes in filtered_filesys_inode_metrics.iteritems():
metric_sender.add_metric({'%s[%s]' % (item_prototype_key_inode, filesys_name): filesys_inodes})
metric_sender.send_metrics()
if __name__ == '__main__':
main()
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
# ==============================================================================
"""Tests for DICOM."""
import os
import numpy as np
import pytest
import tensorflow as tf
import tensorflow_io as tfio
# The DICOM sample files must be downloaded befor running the tests
#
# To download the DICOM samples:
# $ bash dicom_samples.sh download
# $ bash dicom_samples.sh extract
#
# To remopve the DICOM samples:
# $ bash dicom_samples.sh clean_dcm
#
# To remopve all the downloaded files:
# $ bash dicom_samples.sh clean_all
def test_dicom_input():
"""test_dicom_input"""
_ = tfio.image.decode_dicom_data
_ = tfio.image.decode_dicom_image
_ = tfio.image.dicom_tags
@pytest.mark.parametrize(
"fname, exp_shape",
[
("OT-MONO2-8-colon.dcm", (1, 512, 512, 1)),
("CR-MONO1-10-chest.dcm", (1, 440, 440, 1)),
("CT-MONO2-16-ort.dcm", (1, 512, 512, 1)),
("MR-MONO2-16-head.dcm", (1, 256, 256, 1)),
("US-RGB-8-epicard.dcm", (1, 480, 640, 3)),
("CT-MONO2-8-abdo.dcm", (1, 512, 512, 1)),
("MR-MONO2-16-knee.dcm", (1, 256, 256, 1)),
("OT-MONO2-8-hip.dcm", (1, 512, 512, 1)),
("US-RGB-8-esopecho.dcm", (1, 120, 256, 3)),
("CT-MONO2-16-ankle.dcm", (1, 512, 512, 1)),
("MR-MONO2-12-an2.dcm", (1, 256, 256, 1)),
("MR-MONO2-8-16x-heart.dcm", (16, 256, 256, 1)),
("OT-PAL-8-face.dcm", (1, 480, 640, 3)),
("XA-MONO2-8-12x-catheter.dcm", (12, 512, 512, 1)),
("CT-MONO2-16-brain.dcm", (1, 512, 512, 1)),
("NM-MONO2-16-13x-heart.dcm", (13, 64, 64, 1)),
("US-MONO2-8-8x-execho.dcm", (8, 120, 128, 1)),
("CT-MONO2-16-chest.dcm", (1, 400, 512, 1)),
("MR-MONO2-12-shoulder.dcm", (1, 1024, 1024, 1)),
("OT-MONO2-8-a7.dcm", (1, 512, 512, 1)),
("US-PAL-8-10x-echo.dcm", (10, 430, 600, 3)),
("TOSHIBA_J2K_OpenJPEGv2Regression.dcm", (1, 512, 512, 1)),
],
)
def test_decode_dicom_image(fname, exp_shape):
"""test_decode_dicom_image"""
dcm_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "test_dicom", fname
)
file_contents = tf.io.read_file(filename=dcm_path)
dcm_image = tfio.image.decode_dicom_image(
contents=file_contents,
dtype=tf.float32,
on_error="strict",
scale="auto",
color_dim=True,
)
assert dcm_image.numpy().shape == exp_shape
@pytest.mark.parametrize(
"fname, tag, exp_value",
[
( |
"OT-MONO2-8-colon.dcm",
tfio.image.dicom_tags.StudyIns | tanceUID,
b"1.3.46.670589.17.1.7.1.1.16",
),
("OT-MONO2-8-colon.dcm", tfio.image.dicom_tags.Rows, b"512"),
("OT-MONO2-8-colon.dcm", tfio.image.dicom_tags.Columns, b"512"),
("OT-MONO2-8-colon.dcm", tfio.image.dicom_tags.SamplesperPixel, b"1"),
(
"US-PAL-8-10x-echo.dcm",
tfio.image.dicom_tags.StudyInstanceUID,
b"999.999.3859744",
),
(
"US-PAL-8-10x-echo.dcm",
tfio.image.dicom_tags.SeriesInstanceUID,
b"999.999.94827453",
),
("US-PAL-8-10x-echo.dcm", tfio.image.dicom_tags.NumberofFrames, b"10"),
("US-PAL-8-10x-echo.dcm", tfio.image.dicom_tags.Rows, b"430"),
("US-PAL-8-10x-echo.dcm", tfio.image.dicom_tags.Columns, b"600"),
],
)
def test_decode_dicom_data(fname, tag, exp_value):
"""test_decode_dicom_data"""
dcm_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "test_dicom", fname
)
file_contents = tf.io.read_file(filename=dcm_path)
dcm_data = tfio.image.decode_dicom_data(contents=file_contents, tags=tag)
assert dcm_data.numpy() == exp_value
def test_dicom_image_shape():
"""test_decode_dicom_image"""
dcm_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"test_dicom",
"US-PAL-8-10x-echo.dcm",
)
dataset = tf.data.Dataset.from_tensor_slices([dcm_path])
dataset = dataset.map(tf.io.read_file)
dataset = dataset.map(lambda e: tfio.image.decode_dicom_image(e, dtype=tf.uint16))
dataset = dataset.map(lambda e: tf.image.resize(e, (224, 224)))
def test_dicom_image_concurrency():
"""test_decode_dicom_image_currency"""
@tf.function
def preprocess(dcm_content):
tags = tfio.image.decode_dicom_data(
dcm_content, tags=[tfio.image.dicom_tags.PatientsName]
)
tf.print(tags)
image = tfio.image.decode_dicom_image(dcm_content, dtype=tf.float32)
return image
dcm_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"test_dicom",
"TOSHIBA_J2K_OpenJPEGv2Regression.dcm",
)
dataset = (
tf.data.Dataset.from_tensor_slices([dcm_path])
.repeat()
.map(tf.io.read_file)
.map(preprocess, num_parallel_calls=8)
.take(200)
)
for i, item in enumerate(dataset):
print(tf.shape(item), i)
assert np.array_equal(tf.shape(item), [1, 512, 512, 1])
dcm_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"test_dicom",
"US-PAL-8-10x-echo.dcm",
)
dataset = (
tf.data.Dataset.from_tensor_slices([dcm_path])
.repeat()
.map(tf.io.read_file)
.map(preprocess, num_parallel_calls=8)
.take(200)
)
for i, item in enumerate(dataset):
print(tf.shape(item), i)
assert np.array_equal(tf.shape(item), [10, 430, 600, 3])
def test_dicom_sequence():
"""test_decode_dicom_sequence"""
dcm_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"test_dicom",
"2.25.304589190180579357564631626197663875025.dcm",
)
dcm_content = tf.io.read_file(filename=dcm_path)
tags = tfio.image.decode_dicom_data(
dcm_content, tags=["[0x0008,0x1115][0][0x0008,0x1140][0][0x0008,0x1155]"]
)
assert np.array_equal(tags, [b"2.25.211904290918469145111906856660599393535"])
dcm_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"test_dicom",
"US-PAL-8-10x-echo.dcm",
)
dcm_content = tf.io.read_file(filename=dcm_path)
tags = tfio.image.decode_dicom_data(dcm_content, tags=["[0x0020,0x000E]"])
assert np.array_equal(tags, [b"999.999.94827453"])
tags = tfio.image.decode_dicom_data(dcm_content, tags=["0x0020,0x000e"])
assert np.array_equal(tags, [b"999.999.94827453"])
if __name__ == "__main__":
test.main()
|
# test driver to verify that new version of code works
import opiniongame.config as og_cfg
import opiniongame.IO as og_io
import opiniongame.coupling as og_coupling
import opiniongame.state as og_state
import opiniongame.opinions as og_opinions
import opiniongame.adjacency as og_adj
import opiniongame.selection as og_select
import opiniongame.potentials as og_pot
import opiniongame.core as og_core
import opiniongame.stopping as og_stop
import numpy as np
#
# process command line
#
cmdline = og_cfg.CmdLineArguments()
cmdline.printOut()
#
# load configuration
#
# TODO: add option to generate defaults and save to file
# TODO: interpret args to get filename if specified on cmd line
config = og_cfg.staticParameters()
config.readFromFile('staticParameters.cfg')
config.threshold = 0.01
config.printOut()
#
# seed PRNG: must do this before any random numbers are
# ever sampled during default generation
#
print("SEEDING PRNG: "+str(config.startingseed))
np.random.seed(config.startingseed)
state = og_state.WorldState.fromCmdlineArguments(cmdline, config)
#
# run
#
tau_list = np.arange(0.45, 0.9, 0.01)
alpha_list = np.arange(0.05, 0.25, 0.01)
numalphas = len(alpha_list)
numtaus = len(tau_list)
numvars = 3
resultMatrix = np.zeros((numalphas, numtaus, numvars))
for (i, alpha) in enumerate(alpha_list):
config.learning_rate = alpha
print("")
for (j, tau) in enumerate(tau_list):
print((alpha, tau))
#
# functions for use by the simulation engine
#
ufuncs = og_cfg.UserFunctions(og_select.FastPairSelection,
og_stop.totalChangeStop,
og_pot.createTent(tau))
polarized = 0
notPolarized = 0
aveIters = 0
for k in range(100):
state = og_core.run_until_convergence(config, state, ufuncs)
results = og_opinions.isPolarized(state.history[-1], 0.05)
for result in results:
if result:
polarized += 1
else:
notPolarized += 1
aveIters += state.iterCount
state.reset()
| state.initialOpinions = og_opinions.initialize_opinions(config.popSize, config.ntopics)
# maybe you want to do Consensus and nonConsensus. Finding consensus is easier!
# assuming pop_size = 20, ten people at 1, nine people at 0 and and one person
| # at 0.5 will be polarization, but, still ...
resultMatrix[i][j][0] = polarized
resultMatrix[i][j][1] = notPolarized
resultMatrix[i][j][2] = aveIters/100.0
rdict = {}
rdict['results'] = resultMatrix
og_io.saveMatrix('output.mat', rdict)
|
#!/usr/bin/env python
# ----------------------------------------------------------------------------
# Copyright 2014 Nervana Systems Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
from nose.plugins.attrib import attr
from nose.tools import nottest
import numpy as np
from neon.util.testing import assert_tensor_equal
@attr('cuda')
class TestGPUTensor(object):
def setup(self):
from neon.backends.cc2 import GPUTensor
| self.gpt = GPUTensor
def test_empty_creation(self):
tns = self.gpt([])
expected_shape = (0, )
while len(expected_shape) < tns._min_dims:
| expected_shape += (1, )
assert tns.shape == expected_shape
def test_1d_creation(self):
tns = self.gpt([1, 2, 3, 4])
expected_shape = (4, )
while len(expected_shape) < tns._min_dims:
expected_shape += (1, )
assert tns.shape == expected_shape
def test_2d_creation(self):
tns = self.gpt([[1, 2], [3, 4]])
expected_shape = (2, 2)
while len(expected_shape) < tns._min_dims:
expected_shape += (1, )
assert tns.shape == expected_shape
def test_2d_ndarray_creation(self):
tns = self.gpt(np.array([[1.5, 2.5], [3.3, 9.2],
[0.111111, 5]]))
assert tns.shape == (3, 2)
@nottest # TODO: add >2 dimension support to cudanet
def test_higher_dim_creation(self):
shapes = ((1, 1, 1), (1, 2, 3, 4), (1, 2, 3, 4, 5, 6, 7))
for shape in shapes:
tns = self.gpt(np.empty(shape))
assert tns.shape == shape
def test_str(self):
tns = self.gpt([[1, 2], [3, 4]])
assert str(tns) == "[[ 1. 2.]\n [ 3. 4.]]"
def test_scalar_slicing(self):
tns = self.gpt([[1, 2], [3, 4]])
res = tns[1, 0]
assert res.shape == (1, 1)
assert_tensor_equal(res, self.gpt([[3]]))
def test_range_slicing(self):
tns = self.gpt([[1, 2], [3, 4]])
res = tns[0:2, 0]
assert res.shape == (2, 1)
assert_tensor_equal(res, self.gpt([1, 3]))
@nottest # TODO: add scalar assignment to self.gpt class
def test_scalar_slice_assignment(self):
tns = self.gpt([[1, 2], [3, 4]])
tns[1, 0] = 9
assert_tensor_equal(tns, self.gpt([[1, 2], [9, 4]]))
def test_asnumpyarray(self):
tns = self.gpt([[1, 2], [3, 4]])
res = tns.asnumpyarray()
assert isinstance(res, np.ndarray)
assert_tensor_equal(res, np.array([[1, 2], [3, 4]]))
@nottest # TODO: fix this for self.gpt
def test_transpose(self):
tns = self.gpt([[1, 2], [3, 4]])
res = tns.transpose()
assert_tensor_equal(res, self.gpt([[1, 3], [2, 4]]))
def test_fill(self):
tns = self.gpt([[1, 2], [3, 4]])
tns.fill(-9.5)
assert_tensor_equal(tns, self.gpt([[-9.5, -9.5], [-9.5, -9.5]]))
|
import lxml
import requests
def requests_session():
"""
Get a suitable requests session for use in SmartBot.
In particular, this sets the `User-Agent` header to the value of
'SmartBot'.
"""
session = requests.Session()
session.headers.update({"User-Agent": "SmartBot"})
return session
def _check_content_type(respon | se, content_type="text/html"):
return response.headers.get("Content-Type", "").startswith(content_type)
def get_title(url):
"""Get the title of a website."""
try:
page = requests_session().get(url, timeout=5, stream=True)
if page.status_code == 200 and _check_content_type(page):
try:
tree = lxml.html.fromstring(page.text)
except ValueError: # lxml seems to have issues with unicode
| tree = lxml.html.fromstring(page.content)
title = tree.cssselect("title")[0].text_content()
return title.strip().replace("\n", "").replace("\r", "")
except requests.exceptions.Timeout:
return "Timeout!"
except IndexError: # no title element
return "No title."
def sprunge(data):
"""Upload the data to `sprunge.us` (a popular plain-text paste bin)."""
payload = {"sprunge": data}
page = requests_session().post("http://sprunge.us", data=payload)
return page.text
|
#-*- coding: utf-8 -*-
#+---------------------------------------------------------------------------+
#| 01001110 01100101 01110100 01111010 01101111 01100010 |
#| |
#| Netzob : Inferring communication protocols |
#+---------------------------------------------------------------------------+
#| Copyright (C) 2011-2017 Georges Bossert and Frédéric Guihéry |
#| This program is free software: you can redistribute it and/or modify |
#| it under the terms of the GNU General Public License as published by |
#| the Free Software Foundation, either version 3 of the License, or |
#| (at your option) any later version. |
#| |
#| This program is distributed in the hope that it will be useful, |
#| but WITHOUT ANY WARRANTY; without even the implied warranty of |
#| MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
#| GNU General Public License for more details. |
#| |
#| You should have received a copy of the GNU General Public License |
#| along with this program. If not, see <http://www.gnu.org/licenses/>. |
#+---------------------------------------------------------------------------+
#| @url : http://www.netzob.org |
#| @contact : contact@netzob.org |
#| @sponsors : Amossys, http://www.amossys.fr |
#| Supélec, http: | //www.rennes.supelec.fr/ren/rd/cidre/ |
#+-------------------------------------------------------------- | -------------+
#+---------------------------------------------------------------------------+
#| File contributors : |
#| - Georges Bossert <georges.bossert (a) supelec.fr> |
#| - Frédéric Guihéry <frederic.guihery (a) amossys.fr> |
#+---------------------------------------------------------------------------+
#+---------------------------------------------------------------------------+
#| Standard library imports |
#+---------------------------------------------------------------------------+
import uuid
#+---------------------------------------------------------------------------+
#| Related third party imports |
#+---------------------------------------------------------------------------+
#+---------------------------------------------------------------------------+
#| Local application imports |
#+---------------------------------------------------------------------------+
from netzob.Common.Utils.Decorators import NetzobLogger
from netzob.Common.Utils.Decorators import typeCheck
from netzob.Model.Vocabulary.Domain.Variables.AbstractVariable import AbstractVariable
from netzob.Model.Vocabulary.Domain.Parser.VariableParserResult import VariableParserResult
@NetzobLogger
class VariableParserPath(object):
"""This class denotes one parsing result of a variable against a specified content
"""
def __init__(self,
variableParser,
consumedData,
remainingData,
originalVariableParserPath=None):
self.name = str(uuid.uuid4())
self.consumedData = consumedData
self.remainingData = remainingData
self.variableParser = variableParser
self.memory = self.variableParser.memory.duplicate()
self.originalVariableParserPath = originalVariableParserPath
self.variableParserResults = []
if originalVariableParserPath is not None:
self.variableParserResults.extend(
originalVariableParserPath.variableParserResults)
def getValueToParse(self, variable):
"""Returns the value that is assigned to the specified variable"""
def createVariableParserResult(self, variable, parserResult, consumedData,
remainedData):
variableParserResult = VariableParserResult(variable, parserResult,
consumedData, remainedData)
if parserResult:
self._logger.debug("New parser result attached to path {0}: {1}".
format(self, variableParserResult))
self.remainingData = variableParserResult.remainedData
if self.consumedData is None:
self._logger.debug("consumed is none...")
self.consumedData = variableParserResult.consumedData
else:
self.consumedData.extend(variableParserResult.consumedData)
else:
self._logger.debug("creation of an invalid parser result.")
self.variableParserResults.append(variableParserResult)
self._logger.debug(
"After registering new VariablePathResult, Path is {0}".format(
self))
def __str__(self):
return "Path {0} (consumedData={1}, remainingData={2}".format(
self.name, self.consumedData, self.remainingData)
@property
def consumedData(self):
return self.__consumedData
@consumedData.setter
def consumedData(self, consumedData):
self.__consumedData = consumedData
@property
def memory(self):
return self.__memory
@memory.setter
def memory(self, memory):
if memory is None:
raise Exception("Memory cannot be None")
self.__memory = memory
|
"""
homeassistant.config
~~~~~~~~~~~~~~~~~~~~
Module to help with parsing and generating configuration files.
"""
import logging
import os
from homeassistant.exceptions import HomeAssistantError
from homeassistant.const import (
CONF_LATITUDE, CONF_LONGITUDE, CONF_TEMPERATURE_UNIT, CONF_NAME,
CONF_TIME_ZONE)
import homeassistant.util.location as loc_util
_LOGGER = logging.getLogger(__name__)
YAML_CONFIG_FILE = 'configuration.yaml'
CONFIG_DIR_NAME = '.homeassistant'
DEFAULT_CONFIG = (
# Tuples (attribute, default, auto detect property, description)
(CONF_NAME, 'Home', None, 'Name of the location where Home Assistant is '
'running'),
(CONF_LATITUDE, None, 'latitude', 'Location required to calculate the time'
' the sun rises and sets'),
(CONF_LONGITUDE, None, 'longitude', None),
(CONF_TEMPERATURE_UNIT, 'C', None, 'C for Celcius, F for Fahrenheit'),
(CONF_TIME_ZONE, 'UTC', 'time_zone', 'Pick yours from here: http://en.wiki'
'pedia.org/wiki/List_of_tz_database_time_zones'),
)
DEFAULT_COMPONENTS = {
'introduction': 'Show links to resources in log and frontend',
'frontend': 'Enables the frontend',
'discovery': 'Discover some devices automatically',
'conversation': 'Allows you to issue voice commands from the frontend',
'history': 'Enables support for tracking state changes over time.',
'logbook': 'View all events in a logbook',
'sun': 'Track the sun',
}
def get_default_config_dir():
""" Put together the default configuration directory based on OS. """
data_dir = os.getenv('APPDATA') if os.name == "nt" \
else os.path.expanduser('~')
return os.path.join(data_dir, CONFIG_DIR_NAME)
def ensure_config_exists(config_dir, detect_location=True):
""" Ensures a config file exists in given config dir.
Creating a default one if needed.
Returns path to the config file. """
config_path = find_config_file(config_dir)
if config_path is None:
print("Unable to find configuration. Creating default one in",
config_dir)
config_path = create_default_config(config_dir, detect_location)
return config_path
def create_default_config(config_dir, detect_location=True):
""" Creates a default configuration file in given config dir.
Returns path to new config file if success, None if failed. """
config_path = os.path.join(config_dir, YAML_CONFIG_FILE)
info = {attr: default for attr, default, *_ in DEFAULT_CONFIG}
location_info = detect_location and loc_util.detect_loc | ation_info()
if location_info:
if location_info.use_fahrenheit:
info[CONF_TEMPERATURE_UNIT] = 'F' |
for attr, default, prop, _ in DEFAULT_CONFIG:
if prop is None:
continue
info[attr] = getattr(location_info, prop) or default
# Writing files with YAML does not create the most human readable results
# So we're hard coding a YAML template.
try:
with open(config_path, 'w') as config_file:
config_file.write("homeassistant:\n")
for attr, _, _, description in DEFAULT_CONFIG:
if info[attr] is None:
continue
elif description:
config_file.write(" # {}\n".format(description))
config_file.write(" {}: {}\n".format(attr, info[attr]))
config_file.write("\n")
for component, description in DEFAULT_COMPONENTS.items():
config_file.write("# {}\n".format(description))
config_file.write("{}:\n\n".format(component))
return config_path
except IOError:
print('Unable to create default configuration file', config_path)
return None
def find_config_file(config_dir):
""" Looks in given directory for supported config files. """
config_path = os.path.join(config_dir, YAML_CONFIG_FILE)
return config_path if os.path.isfile(config_path) else None
def load_config_file(config_path):
""" Loads given config file. """
return load_yaml_config_file(config_path)
def load_yaml_config_file(config_path):
""" Parse a YAML configuration file. """
import yaml
def parse(fname):
""" Parse a YAML file. """
try:
with open(fname, encoding='utf-8') as conf_file:
# If configuration file is empty YAML returns None
# We convert that to an empty dict
return yaml.load(conf_file) or {}
except yaml.YAMLError:
error = 'Error reading YAML configuration file {}'.format(fname)
_LOGGER.exception(error)
raise HomeAssistantError(error)
def yaml_include(loader, node):
"""
Loads another YAML file and embeds it using the !include tag.
Example:
device_tracker: !include device_tracker.yaml
"""
fname = os.path.join(os.path.dirname(loader.name), node.value)
return parse(fname)
yaml.add_constructor('!include', yaml_include)
conf_dict = parse(config_path)
if not isinstance(conf_dict, dict):
_LOGGER.error(
'The configuration file %s does not contain a dictionary',
os.path.basename(config_path))
raise HomeAssistantError()
return conf_dict
|
from namespace_class import *
try:
p = Private1()
error = 1
except:
error = 0
if (error):
raise Runti | meError, "Private1 is private"
try:
p = Private2()
error = 1
except:
error = | 0
if (error):
raise RuntimeError, "Private2 is private"
EulerT3D.toFrame(1,1,1)
b = BooT_i()
b = BooT_H()
f = FooT_i()
f.quack(1)
f = FooT_d()
f.moo(1)
f = FooT_H()
f.foo(Hi)
|
"""
Django settings for BenHoboCo project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside | the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
GET_SOLO_TEMPLATE_TAG_NAME = | 'get_solo'
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'b&r86v3qyzx=d^8p8k4$c!#imhb+jys*$g@yxz8#vt83@r-va_'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
# NOTE: Local server has to be in the first position!
ALLOWED_HOSTS = [
'127.0.0.1:8000',
'cs410.cs.ualberta.ca:41011',
]
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'crispy_forms',
'solo',
'core',
'south',
'images',
'posts',
'authors',
'friends',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'BenHoboCo.urls'
WSGI_APPLICATION = 'BenHoboCo.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME':'helix',
'USER':'myuser',
'PASSWORD':'mypass',
'HOST':'leago.btrinh.com',
'PORT':'3306',
}
}
CRISPY_TEMPLATE_PACK = 'bootstrap3'
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = False
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
STATIC_PATH = os.path.join( BASE_DIR, "static" )
STATICFILES_DIRS = (
STATIC_PATH,
)
# Templates
TEMPLATE_PATH = os.path.join( BASE_DIR, "templates")
TEMPLATE_DIRS = (
TEMPLATE_PATH,
)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.core.context_processors.request',
'django.contrib.auth.context_processors.auth',
)
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join( BASE_DIR, 'media' )
LOGIN_URL = '/login/'
|
# -*- coding: utf-8 -*-
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('rdrf', '0022_merge'),
]
operations = [
migrations.AddField(
model_name='rdrfcontext',
na | me='context_form_group',
field=models.ForeignKey(blank=True,
to='rdrf.ContextFormGroup',
null=True,
| on_delete=models.SET_NULL),
),
]
|
# -*- coding: utf8 -*-
# SDAPS - Scripts for data acquisition with paper based surveys
# Copyright(C) 2008, Christoph Simon <post@christoph-simon.eu>
# Copyright(C) 2008, Benjamin Berg <benjamin@sipsolutions.net>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import buddy
class Sheet(buddy.Object):
def __init__(self):
self.survey = None
self.data = dict()
self.images = list()
self.survey_id = None
self.questionnaire_id = None
self.global_id = None
self.valid = 1
self.quality = 1
def add_image(self, image):
s | elf.images.append(image)
image.sheet = self
def get_page_image(self, page):
| # Simply return the image for the requested page.
# Note: We return the first one we find; this means in the error case
# that a page exists twice, we return the first one.
for image in self.images:
if image.page_number == page and image.survey_id == self.survey.survey_id:
return image
return None
class Image(buddy.Object):
def __init__(self):
self.sheet = None
self.filename = str()
self.tiff_page = 0
self.rotated = 0
self.raw_matrix = None
self.page_number = None
self.survey_id = None
self.global_id = None
self.questionnaire_id = None
|
# -*- coding: utf-8 -*-
"""
**********************
Minimum Dominating Set
**********************
A dominating set for a graph G = (V, E) is a subset D of V such that every
vertex not in D is joined to at least one member of D by some edge. The
domination number gamma(G) is the number of vertices in a smallest dominating
set for G. Given a graph G = (V, E) find a minimum weight dominating set V'.
http://en.wikipedia.org/wiki/Dominating_set
This is reducible to the minimum set dom_set problem.
"""
# Copyright (C) 2011-2012 by
# Nicholas Mancuso <nick.mancuso@gmail.com>
# All rights reserved.
# BSD license.
import networkx as nx
__all__ = ["min_weighted_dominating_set",
"min_edge_dominating_set"]
__author__ = """Nicholas Mancuso (nick.mancuso@gmail.com)"""
def min_weighted_dominating_set(graph, weight=None):
"""Return minimum weight dominating set.
Parameters
----------
graph : NetworkX graph
Undirected graph
weight : None or string, optional (default = None)
If None, every edge has weight/distance/weight 1. If a string, use this
edge attribute as the edge weight. Any edge attribute not present
defaults to 1.
Returns
-------
min_weight_dominating_set : set
Returns a set of vertices whose weight sum is no more than 1 + log w(V)
References
----------
.. [1] Vazirani, Vijay Approximation Algorithms (2001)
"""
if not graph:
raise ValueError("Expected non-empty NetworkX graph!")
# min cover = min dominating set
dom_set = set([])
cost_func = dict((node, nd.get(weight, 1)) \
for node, nd in graph.nodes_iter(data=True))
vertices = set(graph)
sets = dict((node, set([node]) | set(graph[node])) for node in graph)
def _cost(subset):
""" Our cost effectiveness function for sets given its weight
"""
cost = sum(cost_func[node] for node in subset)
return cost / float(len(subset - dom_set))
while vertices:
# find the most cost effective set, and the vertex that for that set
dom_node, min_set = min(sets.items(),
key=lambda x: (x[0], _cost(x[1])))
alpha = _cost(min_set)
| # reduce the cost for the rest
for node in min_set - dom_set:
cost_func[node] = alpha
# add the node to the dominating set and reduce what we must cover
dom_set.add(dom_node)
del sets[dom_node]
vertices = vertices - min_set
return dom_set
def min_edge_dominating_set(graph):
"""Return minimum weight dominating edge set.
Parameters
----------
graph : NetworkX graph
Undirected graph
Returns
-------
min_edge_dom | inating_set : set
Returns a set of dominating edges whose size is no more than 2 * OPT.
"""
if not graph:
raise ValueError("Expected non-empty NetworkX graph!")
return nx.maximal_matching(graph)
|
#!/usr/bin/env python3
# https://www.hackerrank.com/challenges/class-2-find-the-torsional-angle
import io
import math
import sys
import unittest
class Vector:
def __init__(self, x, y, z):
self.x = x
self.y = y
self.z = z
def subtract(self, other):
x = self.x - other.x
y = self.y - other.y
z = self.z - other.z
return Vector(x, y, z)
def dot_product(self, other):
return self.x * other.x + self.y * other.y + self.z * other.z
def cross_product(self, other):
zero = Vector(0, 0, 0)
x = self.y * other.z - self.z * other.y
y = self.z * other.x - self.x * other.z
z = self.x * other.y - self.y * other.x
return zero.subtract(Vector(x, y, z))
def value(self):
xx = math.pow(self.x, 2)
yy = math.pow(self.y, 2)
zz = math.pow(self.z, 2)
return math.sqrt(xx + yy + zz)
def torsional_angle(a, b, c, d):
ab = a.subtract(b)
bc = b.subtract(c)
| cd = c.subtract(d)
x = ab.cross_product(bc)
y = bc.cross_product(cd)
cosine = x.dot_product(y) / (x.value() * y.value())
return math.degrees(math.acos(cosine))
def main():
a = Vector(*tuple(map(float, input().strip().split())))
b = Vector(*tuple(map(float, input().strip().split())))
c = Vector(*tuple(map(float, input().strip().split())))
d = Vector(*tuple(map(float, input().strip().split())))
pr | int('%.2f' % torsional_angle(a, b, c, d))
if __name__ == '__main__': # pragma: no cover
main()
class TestCode(unittest.TestCase):
def generalized_test(self, which):
sys.stdin = open(__file__.replace('.py', f'.{which}.in'), 'r')
sys.stdout = io.StringIO()
expected = open(__file__.replace('.py', f'.{which}.out'), 'r')
main()
self.assertEqual(sys.stdout.getvalue(), expected.read())
for handle in [sys.stdin, sys.stdout, expected]:
handle.close()
def test_0(self):
self.generalized_test('0')
|
# Author: Marcin Serwach
# https://github.com/iblis-ms/conan_gbenchmark
from conans import ConanFile, CMake, tools
import os
import sys
import shutil
class GbenchmarkConan(ConanFile):
name = 'GBenchmark'
version = '1.3.0'
license = 'MIT L | icence'
url = 'https://github.com/iblis-ms/conan_gbenchmark'
description = 'Conan.io su | pport for Google Benchmark'
settings = ['os', 'compiler', 'build_type', 'arch', 'cppstd']
options = {
'BENCHMARK_ENABLE_TESTING': [True, False],
'BENCHMARK_ENABLE_LTO': [True, False]
}
default_options = ('BENCHMARK_ENABLE_TESTING=False',
'BENCHMARK_ENABLE_LTO=False'
)
generators = 'cmake'
source_root = 'benchmark-%s' % version
exports = 'CMakeLists.txt'
buildFolder = '_build'
def source(self):
zipFileName = "v%s.zip" % self.version
tools.download("https://github.com/google/benchmark/archive/%s" % zipFileName, zipFileName)
tools.unzip(zipFileName)
def build(self):
cmake = CMake(self)
for (opt, val) in self.options.items():
if val is not None:
cmake.definitions[opt] = 'ON' if val == "True" else 'OFF'
if self.settings.compiler == 'clang' and str(self.settings.compiler.libcxx) == 'libc++':
cmake.definitions['BENCHMARK_USE_LIBCXX'] = 'YES'
if str(self.settings.compiler) in ['gcc', 'apple-clang', 'clang', 'sun-cc']:
if str(self.settings.arch) in ['x86_64', 'sparcv9']:
cmake.definitions['BENCHMARK_BUILD_32_BITS'] = 'OFF'
elif str(self.settings.arch) in ['x86', 'sparc']:
cmake.definitions['BENCHMARK_BUILD_32_BITS'] = 'YES'
sys.stdout.write("cmake " + str(cmake.command_line) + "\n")
cmake.configure(source_dir=self.build_folder, build_dir=self.buildFolder)
cmake.build()
def package(self):
self.copy(pattern='*.h', dst='include', src='%s/include' % self.source_root, keep_path=True)
self.copy(pattern='*.lib', dst='lib', src=os.path.join(self.buildFolder,'lib'), keep_path=False)
self.copy(pattern='*.a', dst='lib', src=os.path.join(self.buildFolder,'lib'), keep_path=False)
for docPatter in ['*.md', 'LICENSE', 'AUTHORS', 'CONTRIBUTORS']:
self.copy(pattern=docPatter, dst='doc', src=self.source_root, keep_path=False)
def package_info(self):
self.cpp_info.libs = ['benchmark']
if self.settings.os == 'Windows':
self.cpp_info.libs.extend(['Shlwapi'])
|
# -*- coding | : utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class HackernewsscraperItem(scrapy.Item):
# define the fields for your item here like:
title = scrapy.Field()
link = s | crapy.Field()
|
from django import forms
from .models import doctor
class ContactForm(forms.Form):
message = forms.CharField()
class SignUpForm(forms.ModelForm):
class Meta:
model = doctor
fields = ['full_name', 'email']
class are | aForm(forms.For | m):
messag = forms.CharField(required=False)
|
import os
from segments import Segment, theme
from utils import colors, glyphs
class CurrentDir(Segment):
bg = colors.background(theme.CURRENTDIR_BG)
fg = colors.foreground(theme.CURRENTDIR_FG)
def init(self, cwd):
home = os.path.expanduser('~')
self.text = cwd.replace(home, '~')
class ReadOnly(Segment):
bg = colors.background(theme.READONLY_BG)
fg = colors.foreground(theme.READONLY_FG)
def init(self | , cwd):
self.text = ' ' + glyphs.WRITE_ONLY + ' '
if os.access(cwd, os.W_OK):
self.active = False
class Venv(Segment):
bg = colors.background(theme.VENV_BG)
fg = colors.foreground(theme.VENV_FG)
def init(self):
env = os.getenv('VIRTU | AL_ENV')
if env is None:
self.active = False
return
env_name = os.path.basename(env)
self.text = glyphs.VIRTUAL_ENV + ' ' + env_name |
import os
import sys
from .interfaces import Interface
from .search import SearchManager
from .cache im | port CacheManager
from .select import Select
from .help import | Inspector
from .users import Users
from .packages import Packages
|
# Copyright 2016 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Pure-Python RSA cryptography implementation.
Uses the ``rsa``, ``pyasn1`` and ``pyasn1_modules`` packages
to parse PEM files storing PKCS#1 or PKCS#8 keys as well as
certificates. There is no support for p12 files.
"""
from __future__ import absolute_import
from pyasn1.codec.der import decoder
from pyasn1_modules import pem
from pyasn1_modules.rfc2459 import Certificate
from pyasn1_modules.rfc5208 import PrivateKeyInfo
import rsa
import six
from google.auth import _helpers
from google.auth.crypt import base
_POW2 = (128, 64, 32, 16, 8, 4, 2, 1)
_CERTIFICATE_MARKER = b"-----BEGIN CERTIFICATE-----"
_PKCS1_MARKER = ("-----BEGIN RSA PRIVATE KEY-----", "-----END RSA PRIVATE KEY-----")
_PKCS8_MARKER = ("-----BEGIN PRIVATE KEY-----", "-----END PRIVATE KEY-----")
_PKCS8_SPEC = PrivateKeyInfo()
def _bit_list_to_bytes(bit_list):
"""Converts an iterable of 1s and 0s to bytes.
Combines the list 8 at a time, treating each group of 8 bits
as a single byte.
Args:
bit_list (Sequence): Sequence of 1s and 0s.
Returns:
bytes: The decoded bytes.
"""
num_bits = len(bit_list)
byte_vals = bytearray()
for start in six.moves.xrange(0, num_bits, 8):
curr_bits = bit_list[start : start + 8]
char_val = sum(val * digit for val, digit in six. | moves.zip(_POW2, curr_bits))
byte_vals.append(char_val)
return bytes(byte_vals)
class RSAVerifier(b | ase.Verifier):
"""Verifies RSA cryptographic signatures using public keys.
Args:
public_key (rsa.key.PublicKey): The public key used to verify
signatures.
"""
def __init__(self, public_key):
self._pubkey = public_key
@_helpers.copy_docstring(base.Verifier)
def verify(self, message, signature):
message = _helpers.to_bytes(message)
try:
return rsa.pkcs1.verify(message, signature, self._pubkey)
except (ValueError, rsa.pkcs1.VerificationError):
return False
@classmethod
def from_string(cls, public_key):
"""Construct an Verifier instance from a public key or public
certificate string.
Args:
public_key (Union[str, bytes]): The public key in PEM format or the
x509 public key certificate.
Returns:
google.auth.crypt._python_rsa.RSAVerifier: The constructed verifier.
Raises:
ValueError: If the public_key can't be parsed.
"""
public_key = _helpers.to_bytes(public_key)
is_x509_cert = _CERTIFICATE_MARKER in public_key
# If this is a certificate, extract the public key info.
if is_x509_cert:
der = rsa.pem.load_pem(public_key, "CERTIFICATE")
asn1_cert, remaining = decoder.decode(der, asn1Spec=Certificate())
if remaining != b"":
raise ValueError("Unused bytes", remaining)
cert_info = asn1_cert["tbsCertificate"]["subjectPublicKeyInfo"]
key_bytes = _bit_list_to_bytes(cert_info["subjectPublicKey"])
pubkey = rsa.PublicKey.load_pkcs1(key_bytes, "DER")
else:
pubkey = rsa.PublicKey.load_pkcs1(public_key, "PEM")
return cls(pubkey)
class RSASigner(base.Signer, base.FromServiceAccountMixin):
"""Signs messages with an RSA private key.
Args:
private_key (rsa.key.PrivateKey): The private key to sign with.
key_id (str): Optional key ID used to identify this private key. This
can be useful to associate the private key with its associated
public key or certificate.
"""
def __init__(self, private_key, key_id=None):
self._key = private_key
self._key_id = key_id
@property
@_helpers.copy_docstring(base.Signer)
def key_id(self):
return self._key_id
@_helpers.copy_docstring(base.Signer)
def sign(self, message):
message = _helpers.to_bytes(message)
return rsa.pkcs1.sign(message, self._key, "SHA-256")
@classmethod
def from_string(cls, key, key_id=None):
"""Construct an Signer instance from a private key in PEM format.
Args:
key (str): Private key in PEM format.
key_id (str): An optional key id used to identify the private key.
Returns:
google.auth.crypt.Signer: The constructed signer.
Raises:
ValueError: If the key cannot be parsed as PKCS#1 or PKCS#8 in
PEM format.
"""
key = _helpers.from_bytes(key) # PEM expects str in Python 3
marker_id, key_bytes = pem.readPemBlocksFromFile(
six.StringIO(key), _PKCS1_MARKER, _PKCS8_MARKER
)
# Key is in pkcs1 format.
if marker_id == 0:
private_key = rsa.key.PrivateKey.load_pkcs1(key_bytes, format="DER")
# Key is in pkcs8.
elif marker_id == 1:
key_info, remaining = decoder.decode(key_bytes, asn1Spec=_PKCS8_SPEC)
if remaining != b"":
raise ValueError("Unused bytes", remaining)
private_key_info = key_info.getComponentByName("privateKey")
private_key = rsa.key.PrivateKey.load_pkcs1(
private_key_info.asOctets(), format="DER"
)
else:
raise ValueError("No key could be detected.")
return cls(private_key, key_id=key_id)
|
# coding: utf-8
import numpy as np
from common.functions import *
from common.util import im2col, col2im
class Relu:
def __init__(self):
self.mask = None
def forward(self, x):
self.mask = (x <= 0)
out = x.copy()
out[self.mask] = 0
return out
def backward(self, dout):
dout[self.mask] = 0
dx = dout
return dx
class Sigmoid:
def __init__(self):
self.out = None
def forward(self, x):
out = sigmoid(x)
self.out = out
return out
def backward(self, dout):
dx = dout * (1.0 - self.out) * self.out
return dx
class Affine:
def __init__(self, W, b):
self.W =W
self.b = b
self.x = None
self.original_x_shape = None
# 重み・バイアスパラメータの微分
self.dW = None
self.db = None
def forward(self, x):
# テンソル対応
self.original_x_shape = x.shape
x = x.reshape(x.shape[0], -1)
self.x = x
out = np.dot(self.x, self.W) + self.b
return out
def backward(self, dout):
dx = np.dot(dout, self.W.T)
self.dW = np.dot(self.x.T, dout)
| self.db = np.sum(dout, axis=0)
dx = dx.reshape(*self.original_x_shape) # 入力データの形状に戻す(テンソル対応)
return dx
class SoftmaxWithLoss:
def __init__(self):
self.loss = None
self.y = None # softmaxの出力
self.t = None # 教師データ
def forward(self, x, t):
self.t = t
self.y = softmax(x)
self.loss = cross_entropy_error(self.y, self.t)
return self.loss
def backward(se | lf, dout=1):
batch_size = self.t.shape[0]
if self.t.size == self.y.size: # 教師データがone-hot-vectorの場合
dx = (self.y - self.t) / batch_size
else:
dx = self.y.copy()
dx[np.arange(batch_size), self.t] -= 1
dx = dx / batch_size
return dx
class Dropout:
"""
http://arxiv.org/abs/1207.0580
"""
def __init__(self, dropout_ratio=0.5):
self.dropout_ratio = dropout_ratio
self.mask = None
def forward(self, x, train_flg=True):
if train_flg:
self.mask = np.random.rand(*x.shape) > self.dropout_ratio
return x * self.mask
else:
return x * (1.0 - self.dropout_ratio)
def backward(self, dout):
return dout * self.mask
class BatchNormalization:
"""
http://arxiv.org/abs/1502.03167
"""
def __init__(self, gamma, beta, momentum=0.9, running_mean=None, running_var=None):
self.gamma = gamma
self.beta = beta
self.momentum = momentum
self.input_shape = None # Conv層の場合は4次元、全結合層の場合は2次元
# テスト時に使用する平均と分散
self.running_mean = running_mean
self.running_var = running_var
# backward時に使用する中間データ
self.batch_size = None
self.xc = None
self.std = None
self.dgamma = None
self.dbeta = None
def forward(self, x, train_flg=True):
self.input_shape = x.shape
if x.ndim != 2:
N, C, H, W = x.shape
x = x.transpose(1, 0, 2, 3).reshape(C, -1)
out = self.__forward(x, train_flg)
return out.reshape(*self.input_shape)
def __forward(self, x, train_flg):
if self.running_mean is None:
N, D = x.shape
self.running_mean = np.zeros(D)
self.running_var = np.zeros(D)
if train_flg:
mu = x.mean(axis=0)
xc = x - mu
var = np.mean(xc**2, axis=0)
std = np.sqrt(var + 10e-7)
xn = xc / std
self.batch_size = x.shape[0]
self.xc = xc
self.xn = xn
self.std = std
self.running_mean = self.momentum * self.running_mean + (1-self.momentum) * mu
self.running_var = self.momentum * self.running_var + (1-self.momentum) * var
else:
xc = x - self.running_mean
xn = xc / ((np.sqrt(self.running_var + 10e-7)))
out = self.gamma * xn + self.beta
return out
def backward(self, dout):
if dout.ndim != 2:
N, C, H, W = dout.shape
dout = dout.transpose(1, 0, 2, 3).reshape(C, -1)
dx = self.__backward(dout)
dx = dx.reshape(*self.input_shape)
return dx
def __backward(self, dout):
dbeta = dout.sum(axis=0)
dgamma = np.sum(self.xn * dout, axis=0)
dxn = self.gamma * dout
dxc = dxn / self.std
dstd = -np.sum((dxn * self.xc) / (self.std * self.std), axis=0)
dvar = 0.5 * dstd / self.std
dxc += (2.0 / self.batch_size) * self.xc * dvar
dmu = np.sum(dxc, axis=0)
dx = dxc - dmu / self.batch_size
self.dgamma = dgamma
self.dbeta = dbeta
return dx
class Convolution:
def __init__(self, W, b, stride=1, pad=0):
self.W = W
self.b = b
self.stride = stride
self.pad = pad
# 中間データ(backward時に使用)
self.x = None
self.col = None
self.col_W = None
# 重み・バイアスパラメータの勾配
self.dW = None
self.db = None
def forward(self, x):
FN, C, FH, FW = self.W.shape
N, C, H, W = x.shape
out_h = 1 + int((H + 2*self.pad - FH) / self.stride)
out_w = 1 + int((W + 2*self.pad - FW) / self.stride)
col = im2col(x, FH, FW, self.stride, self.pad)
col_W = self.W.reshape(FN, -1).T
out = np.dot(col, col_W) + self.b
out = out.reshape(N, out_h, out_w, -1).transpose(0, 3, 1, 2)
self.x = x
self.col = col
self.col_W = col_W
return out
def backward(self, dout):
FN, C, FH, FW = self.W.shape
dout = dout.transpose(0,2,3,1).reshape(-1, FN)
self.db = np.sum(dout, axis=0)
self.dW = np.dot(self.col.T, dout)
self.dW = self.dW.transpose(1, 0).reshape(FN, C, FH, FW)
dcol = np.dot(dout, self.col_W.T)
dx = col2im(dcol, self.x.shape, FH, FW, self.stride, self.pad)
return dx
class Pooling:
def __init__(self, pool_h, pool_w, stride=1, pad=0):
self.pool_h = pool_h
self.pool_w = pool_w
self.stride = stride
self.pad = pad
self.x = None
self.arg_max = None
def forward(self, x):
N, C, H, W = x.shape
out_h = int(1 + (H - self.pool_h) / self.stride)
out_w = int(1 + (W - self.pool_w) / self.stride)
col = im2col(x, self.pool_h, self.pool_w, self.stride, self.pad)
col = col.reshape(-1, self.pool_h*self.pool_w)
arg_max = np.argmax(col, axis=1)
out = np.max(col, axis=1)
out = out.reshape(N, out_h, out_w, C).transpose(0, 3, 1, 2)
self.x = x
self.arg_max = arg_max
return out
def backward(self, dout):
dout = dout.transpose(0, 2, 3, 1)
pool_size = self.pool_h * self.pool_w
dmax = np.zeros((dout.size, pool_size))
dmax[np.arange(self.arg_max.size), self.arg_max.flatten()] = dout.flatten()
dmax = dmax.reshape(dout.shape + (pool_size,))
dcol = dmax.reshape(dmax.shape[0] * dmax.shape[1] * dmax.shape[2], -1)
dx = col2im(dcol, self.x.shape, self.pool_h, self.pool_w, self.stride, self.pad)
return dx
|
from setuptools import setup, find_packages
import os
version = '0.5'
setup(name='uwosh.emergency.master',
version=version,
description="",
long_description=open("README.txt").read() + "\n" +
open(os.path.join("docs", "HISTORY.txt")).read(),
# Get more strings from http://www.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
"Framework :: Plone",
"Programming Languag | e :: Python",
"Topic :: Software Development :: Libraries :: Python Modules",
],
keywords='',
author='Nathan Van Gheem',
author_email='vangheem@gmail.com',
| url='http://svn.plone.org/svn/plone/plone.example',
license='GPL',
packages=find_packages(exclude=['ez_setup']),
namespace_packages=['uwosh', 'uwosh.emergency'],
include_package_data=True,
zip_safe=False,
install_requires=[
'setuptools',
'uwosh.simpleemergency>=1.1',
'rsa'
],
entry_points="""
# -*- Entry points: -*-
[z3c.autoinclude.plugin]
target = plone
""",
)
|
__author__ = 'harsha'
class ForceReply(object):
def __init__(self, force_reply, selective):
self.force_reply = force_reply
self.selective = selective
def get_force_reply(self):
return self.force_reply
def get_selective(self):
return self.selective
| def __str | __(self):
return str(self.__dict__) |
# Copyright (c) 2014-2016, NVIDIA CORPORATION. All | rights reserved.
from __future__ import absolute_import
from . import caffe_train
from digits import test_utils
def test_caffe_imports():
test_utils.skipIfNotFramework('caffe')
impo | rt numpy
import google.protobuf
|
ding: utf-8 -*-
#
# agnez documentation build configuration file, created by
# sphinx-quickstart on Tue Jul 9 22:26:36 2013.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# Get the project root dir, which is the parent dir of this
cwd = os.getcwd()
project_root = os.path.dirname(cwd)
# Insert the project root dir as the first element in the PYTHONPATH.
# This lets us ensure that the source package is imported, and that its
# version is used.
sys.path.insert(0, project_root)
import agnez
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Agnez'
copyright = u'2015, Eder Santana'
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = agnez.__version__
# The full version, including alpha/beta/rc tags.
release = agnez.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to
# some non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built
# documents.
#keep_warnings = False
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as
# html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the
# top of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon
# of the docs. This file should be a Windows icon file (.ico) being
# 16x16 or 32x32 pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets)
# here, relative to this directory. They are copied after the builtin
# static files, so a file named "default.css" will overwrite the builtin
# "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names
# to template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer.
# Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer.
# Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages
# will contain a <link> tag referring to it. The value of this option
# must be the base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'agnezdoc'
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index', 'agnez.tex',
u'Agnez Documentation',
u'Eder Santana', 'manual'),
]
# The name of an image file (relative to this directory) to place at
# the top of the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings
# are parts, not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'agnez',
| u'Agnez Documentation',
[u'Eder Santana'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, ti | tle, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'agnez',
u'Agnez Documentation',
u'Eder Santana',
'agnez',
'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', |
/python
# -*- coding: utf-8 -*-
"""
Script to upload images to wikipedia.
Arguments:
-keep Keep the filename as is
-filename Target filename without the namespace prefix
-noverify Do not ask for verification of the upload description if one
is given
-abortonwarn: Abort upload on the specified warning type. If no warning type
is specified, aborts on any warning.
-ignorewarn: Ignores specified upload warnings. If no warning type is
specified, ignores all warnings. Use with caution
-chunked: Upload the file in chunks (more overhead, but restartable). If
no value is specified the chunk size is 1 MiB. The value must
be a number which can be preceded by a suffix. The units are:
No suffix: Bytes
'k': Kilobytes (1000 B)
'M': Megabytes (1000000 B)
'Ki': Kibibytes (1024 B)
'Mi': Mebibytes (1024x1024 B)
The suffixes are case insensitive.
-always Don't ask the user anything. This will imply -keep and
-noverify and require that either -abortonwarn or -ignorewarn
is defined for all. It will also require a valid file name and
description. It'll only overwrite files if -ignorewarn includes
the 'exists' warning.
-recursive When the filename is a directory it also uploads the files from
the subdirectories.
-summary Pick a custom edit summary for the bot.
It is possible to combine -abortonwarn and -ignorewarn so that if the specific
warning is given it won't apply the general one but more specific one. So if it
should ignore specific warnings and abort on the rest it's possible by defining
no warning for -abortonwarn and the specific warnings for -ignorewarn. The
order does not matter. If both are unspecific or a warning is specified by
both, it'll prefer aborting.
If any other arguments are given, the first is either URL, filename or
directory to upload, and the rest is a proposed description to go with the
upload. If none of these are given, the user is asked for the directory, file
or URL to upload. The bot will then upload the image to the wiki.
The script will ask for the location of an image(s), if not given as a
parameter, and for a description.
"""
#
# (C) Rob W.W. Hooft, Andre Engels 2003-2004
# (C) Pywikibot team, 2003-2017
#
# Distributed under the terms of the MIT license.
#
from __future__ import absolute_import, unicode_literals
import math
import os
import re
import pywikibot
from pywikibot.bot import suggest_help
from pywikibot.specialbots import UploadRobot
from datetime import date
from pywikibot import config
def main(*args):
"""
Process command line arguments and invoke bot.
If args is an empty list, sys.argv is used.
@param args: command line arguments
@type args: list of unicode
"""
url = u''
description = []
summary = None
keepFilename = False
always = False
useFilename = None
verifyDescription = True
aborts = set()
ignorewarn = set()
chunk_size = 0
chunk_size_regex = r'^-chunked(?::(\d+(?:\.\d+)?)[ \t]*(k|ki|m|mi)?b?)?$'
chunk_size_regex = re.compile(chunk_size_regex, re.I)
recursive = False
# process all global bot args
# returns a list of non-global args, i.e. args for upload.py
for arg in pywikibot.handle_args(args):
if arg:
if arg == '-always':
keepFilename = True
always = True
verifyDescription = False
elif arg == '-recursive':
recursive = True
elif arg.startswith('-keep'):
keepFilename = True
elif arg.startswith('-filename:'):
useFilename = arg[10:]
elif arg.startswith('-summary'):
summary = arg[9:]
elif arg.startswith('-noverify'):
verifyDescription = False
elif arg.startswith('-abortonwarn'):
if len(arg) > len('-abortonwarn:') and aborts is not True:
aborts.add(arg[len('-abortonwarn:'):])
else:
aborts = True
elif arg.startswith('-ignorewarn'):
if len(arg) > len('-ignorewarn:') and ignorewarn is not True:
ignorewarn.add(arg[len('-ignorewarn:'):])
else:
ignorewarn = True
elif arg.startswith('-chunked'):
match = chunk_size_regex.match(arg)
if match:
if match.group(1): # number was in there
base = float(match.group(1 | ))
if match.group(2): # suffix too
suffix = match.group | (2).lower()
if suffix == "k":
suffix = 1000
elif suffix == "m":
suffix = 1000000
elif suffix == "ki":
suffix = 1 << 10
elif suffix == "mi":
suffix = 1 << 20
else:
pass # huh?
else:
suffix = 1
chunk_size = math.trunc(base * suffix)
else:
chunk_size = 1 << 20 # default to 1 MiB
else:
pywikibot.error('Chunk size parameter is not valid.')
elif url == u'':
url = arg
else:
description.append(arg)
description = u' '.join(description)
# curly barckets need to double in formatted string
description = """=={{{{int:filedesc}}}}==
{{{{Information
|description={{{{en|1=Native Israeli pronunciation of this Hebrew word}}}}
|date={0}
|source={{{{own}}}}
|author=[[User:{1}|{1}]]
|permission=
|other versions=
}}}}
=={{{{int:license-header}}}}==
{{{{self|cc-zero}}}}
[[Category:Hebrew pronunciation]]""".format(date.today(),config.usernames['commons']['commons'])
while not ("://" in url or os.path.exists(url)):
if not url:
error = 'No input filename given.'
else:
error = 'Invalid input filename given.'
if not always:
error += ' Try again.'
if always:
url = None
break
else:
pywikibot.output(error)
url = pywikibot.input(u'URL, file or directory where files are now:')
if always and ((aborts is not True and ignorewarn is not True) or
not description or url is None):
additional = ''
missing = []
if url is None:
missing += ['filename']
additional = error + ' '
if description is None:
missing += ['description']
if aborts is not True and ignorewarn is not True:
additional += ('Either -ignorewarn or -abortonwarn must be '
'defined for all codes. ')
additional += 'Unable to run in -always mode'
suggest_help(missing_parameters=missing, additional_text=additional)
return False
if os.path.isdir(url):
file_list = []
for directory_info in os.walk(url):
if not recursive:
# Do not visit any subdirectories
directory_info[1][:] = []
for dir_file in directory_info[2]:
file_list.append(os.path.join(directory_info[0], dir_file))
url = file_list
else:
url = [url]
bot = UploadRobot(url, description=description, useFilename=useFilename,
keepFilename=keepFilename,
verifyDescription=verifyDescription,
aborts=aborts, ignoreWarning=ignorewarn,
chunk_size=chunk_size, always=always,
summary="bot upload",
targetSite=pywikibot.Site |
from time import sleep
from os.path import join
import pytest
from cosmo_tester.framework.examples import get_example_deployment
from cosmo_tester.framework.test_hosts import Hosts, VM
from cosmo_tester.test_suites.snapshots import (
create_snapshot,
download_snapshot,
restore_snapshot,
upload_snapshot,
)
@pytest.fixture(scope='function')
def manager_and_vm(request, ssh_key, module_tmpdir, test_config,
logger):
hosts = Hosts(ssh_key, module_tmpdir, test_config, logger, request, 2)
hosts.instances[0] = VM('master', test_config)
hosts.instances[1] = VM('centos_7', test_config)
manager, vm = hosts.instances
passed = True
try:
hosts.create()
yield hosts.instances
except Exception:
passed = False
raise
finally:
hosts.destroy(passed=passed)
@pytest.fixture(scope='function')
def example(manager_and_vm, ssh_key, tmpdir, logger, test_config):
man | ager, vm = manager_and_vm
example = get_example_deployment(
manager, ssh_key, logger, 'inplace_restore', test_config, vm)
try:
yield example
finally:
if example.installed:
example.uninstall()
def test_inplace_restore(manager_and_vm,
example,
module_tmpdi | r,
logger):
manager, vm = manager_and_vm
snapshot_name = 'inplace_restore_snapshot_{0}'.format(manager.image_type)
snapshot_path = join(str(module_tmpdir), snapshot_name) + '.zip'
example.upload_and_verify_install()
create_snapshot(manager, snapshot_name, logger)
download_snapshot(manager, snapshot_path, snapshot_name, logger)
# We need the certs to be the same for the 'new' manager otherwise an
# inplace upgrade can't properly work
manager.run_command('mkdir /tmp/ssl_backup')
manager.run_command('cp /etc/cloudify/ssl/* /tmp/ssl_backup',
use_sudo=True)
manager.teardown()
# The teardown doesn't properly clean up rabbitmq
manager.run_command('pkill -f rabbitmq', use_sudo=True)
manager.run_command('rm -rf /var/lib/rabbitmq', use_sudo=True)
manager.install_config['rabbitmq'] = {
'ca_path': '/tmp/ssl_backup/cloudify_internal_ca_cert.pem',
'cert_path': '/tmp/ssl_backup/rabbitmq-cert.pem',
'key_path': '/tmp/ssl_backup/rabbitmq-key.pem',
}
manager.install_config['prometheus'] = {
'ca_path': '/tmp/ssl_backup/cloudify_internal_ca_cert.pem',
'cert_path': '/tmp/ssl_backup/monitoring_cert.pem',
'key_path': '/tmp/ssl_backup/monitoring_key.pem',
}
manager.install_config['ssl_inputs'] = {
'external_cert_path': '/tmp/ssl_backup/cloudify_external_cert.pem',
'external_key_path': '/tmp/ssl_backup/cloudify_external_key.pem',
'internal_cert_path': '/tmp/ssl_backup/cloudify_internal_cert.pem',
'internal_key_path': '/tmp/ssl_backup/cloudify_internal_key.pem',
'ca_cert_path': '/tmp/ssl_backup/cloudify_internal_ca_cert.pem',
'external_ca_cert_path':
'/tmp/ssl_backup/cloudify_internal_ca_cert.pem',
}
manager.bootstrap()
upload_snapshot(manager, snapshot_path, snapshot_name, logger)
restore_snapshot(manager, snapshot_name, logger,
admin_password=manager.mgr_password)
manager.wait_for_manager()
logger.info('Waiting 35 seconds for agents to reconnect. '
'Agent reconnect retries are up to 30 seconds apart.')
sleep(35)
example.uninstall()
|
from pathlib import Path
import re
from setuptools import setup
setup_dir = Path(__file__).resolve().parent
version = re.search(
r'__version__ = "(.*)"',
Path(setup_dir, 'tldr.py').open().read()
)
if version is None:
raise SystemExit("Could not determine version to use")
version = version.group(1)
with open('requirements.txt') as f:
required = f.read().splitlines()
setup(
name='tldr',
author='Felix Yan',
author_email='felixonmars@gmail.com',
url='https://github.com/tldr-pages/tldr-python-client',
description='command line client for tldr',
long_description=Path(setup_dir, 'README.md').open().read(),
long_description_content_type='text/markdown',
license='MIT',
py_modules=['tldr'],
entry_points={
"console_scripts": [
"tldr = tldr:cli"
]
},
data_files=[('share/man/man1', ['docs/man/tldr.1' | ])],
install_requires=required,
tests_require=[
'pytest',
'pytest-runner',
],
version=version,
python_ | requires='~=3.6',
classifiers=[
"Development Status :: 5 - Production/Stable",
"License :: OSI Approved :: MIT License",
"Environment :: Console",
"Intended Audience :: End Users/Desktop",
"Natural Language :: English",
"Operating System :: POSIX :: Linux",
"Operating System :: POSIX :: SunOS/Solaris",
"Operating System :: MacOS :: MacOS X",
"Operating System :: Microsoft :: Windows",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Topic :: Utilities",
"Topic :: System"
]
)
|
import unittest
import sys
sys.path.insert(0, "..")
from sections.sections import Circle
import test_sections_generic as generic
class TestPhysicalProperties(generic.TestPhysicalProperties, unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.sectclass = Circle
cls.dimensions = dict(r=3.0) |
cls.rp = 5.0, 4.0
cls.A = 28.274333882308138
cls._I0 = 63.61725123519331, 63.61725123519331, 0.0
cls._I = 63.61725123519331, 63.61725123519331, 0.0
cls._cog = 0.0, 0.0
def test_check_dimensions(self):
self.assertRaises(ValueError, self.section.set_dimensions, r=-1)
self.assertRaises(ValueError, s | elf.section.set_dimensions, r=0)
if __name__ == "__main__":
unittest.main()
|
"""API integration tests factories."""
import factory
from django_common.auth_backends import User
from factory.django import DjangoModelFactory
from samaritan.models import Address, ChurchRole, MembershipType, ChurchGroup, Member
class UserFactory(DjangoModelFactory):
"""Factory for users."""
username = factory.Faker('name')
class Meta:
model = User
class AddressFactory(DjangoModelFactory):
"""Factory for address."""
number = factory.Faker('word')
street = factory.Faker('name')
locality = factory.Faker('name')
city = factory.Faker('name')
post_code = factory.Faker('word')
class Meta:
model = Address
class RoleFactory(DjangoModelFactory):
"""Factory for Roles."""
name = factory.Faker('name')
description = factory.Faker('text')
class Meta:
model = ChurchRole
class GroupFactory(DjangoModelFactory):
"""Factory for Groups."""
name = factory.Faker('name')
description = factory.Faker('text')
class Meta:
model = ChurchGroup
@factory.post_generation
def members(self, create, extracted, **kwargs):
if create and extracted:
| for member in extracted:
self.members.add(member)
class MembershipTypeFactory(DjangoModelFactory):
"""Membership Type Factory."""
name = factory.Faker('name')
description = factory.Faker('text')
class Meta:
model = MembershipType
class MemberFactory(DjangoModelFactory):
"""Factory for Members."""
first_name = factory.Faker('name')
last_name = factory.Fake | r('name')
date_of_birth = factory.Faker('date_this_century')
telephone = factory.Faker('random_int', min=0, max=99999999)
address = factory.SubFactory(AddressFactory)
email = factory.Faker('email')
details = factory.Faker('text')
is_baptised = factory.Faker('boolean')
baptismal_date = factory.Faker('date_this_century')
baptismal_place = factory.Faker('name')
is_member = factory.Faker('boolean')
membership_type = factory.SubFactory(MembershipTypeFactory)
membership_date = factory.Faker('date_this_year')
is_active = factory.Faker('boolean')
notes = factory.Faker('text')
church_role = factory.SubFactory(RoleFactory)
gdpr = factory.Faker('boolean')
class Meta:
model = Member
|
"""ユーザー設定用モジュール."""
import os
DEBUG = True
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
INSTALLED_APPS = [
'app1',
'app2',
]
ROOT_URLCONF = 'project.urls'
WSGI_APPLICATION = [
# 'wsgiref.validate.validator',
'ngo.wsgi.RedirectAp | p',
'ngo.wsgi.WSGIHandler',
]
"""
以下のように読み込まれていきます
app = None
app = WSGIHandler(None)
app = RedirectApp(app)
app = validator(app)
"""
# TEMPLATES = ('ngo.backends.Ngo', [])
"""
TEMPLATES = (
'ngo.backends.Ngo',
[os.path.join(BASE_DIR, 'template'), os.path.join(BASE_DIR, 'template2')]
)
"""
TEMPLATES = ('ngo.backends.Jinja2', [])
STATICFILES_DIRS = None
"""
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static'), os.path.join(BASE_DIR, 'static2') |
]
"""
STATIC_URL = 'static'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = 'media'
|
plit
else:
shell_split = shlex.split
try:
while True:
arg = itr.next()
if arg.strip() in ['-I', '-isystem']:
# extract paths handling quotes and multiple paths
paths += shell_split(itr.next())[0].split(os.pathsep)
elif arg.startswith('-I'):
paths += shell_split(arg[2:])[0].split(os.pathsep)
except StopIteration:
pass
return paths
try:
multiarch = subprocess.check_output(['gcc', '-print-multiarch']).strip()
except:
multiarch = None
# search /usr/include and its multiarch subdir (if any) by default
paths = [ os.path.join(os.sep, 'usr', 'include')]
if multiarch:
paths += [ os.path.join(paths[0], multiarch) ]
paths += extract_path_from_cflags(os.environ.get('CPPFLAGS', ''))
# check include paths incorrectly configured in CFLAGS, CXXFLAGS
paths += extract_path_from_cflags(os.environ.get('CFLAGS', ''))
paths += extract_path_from_cflags(os.environ.get('CXXFLAGS', ''))
# check include paths incorrectly configured in makeargs
paths += extract_path_from_cflags(config.makeargs)
paths += extract_path_from_cflags(config.module_autogenargs.get
(module_name, ''))
paths += extract_path_from_cflags(config.module_makeargs.get
(module_name, ''))
paths = list(set(paths)) # remove duplicates
return paths
c_include_search_paths = None
for dep_type, value in sysdeps:
if dep_type.lower() == 'path':
if os.path.split(value)[0]:
if not os.path.isfile(value) and not os.access(value, os.X_OK):
return False
else:
found = False
for path in os.environ.get('PATH', '').split(os.pathsep):
filename = os.path.join(path, value)
if (os.path.isfile(filename) and
os.access(filename, os.X_OK)):
found = True
break
if not found:
return False
elif dep_type.lower() == 'c_include':
if c_include_search_paths is None:
c_include_search_paths = get_c_include_search_paths(config)
found = False
for path in c_include_search_paths:
filename = os.path.join(path, value)
if os.path.isfile(filename):
found = True
break
if not found:
return False
return True
class SystemInstall(object):
def __init__(self):
if cmds.has_command('pkexec'):
self._root_command_prefix_args = ['pkexec']
elif cmds.has_command('sudo'):
self._root_command_prefix_args = ['sudo']
else:
raise SystemExit, _('No suitable root privilege command found; you should install "pkexec"')
def install(self, pkgconfig_ids):
"""Takes a list of pkg-config identifiers and uses a system-specific method to install them."""
raise NotImplemente | dError()
@classmethod
def find_best(cls):
global _cla | sses
for possible_cls in _classes:
if possible_cls.detect():
return possible_cls()
# PackageKit dbus interface contains bitfield constants which
# aren't introspectable
PK_PROVIDES_ANY = 1
PK_FILTER_ENUM_NOT_INSTALLED = 1 << 3
PK_FILTER_ENUM_NEWEST = 1 << 16
PK_FILTER_ENUM_ARCH = 1 << 18
# NOTE: This class is unfinished
class PKSystemInstall(SystemInstall):
def __init__(self):
SystemInstall.__init__(self)
self._loop = None
# PackageKit 0.8.1 has API breaks in the D-BUS interface, for now
# we try to support both it and older PackageKit
self._using_pk_0_8_1 = None
self._sysbus = None
self._pkdbus = None
def _on_pk_message(self, msgtype, msg):
logging.info(_('PackageKit: %s' % (msg,)))
def _on_pk_error(self, msgtype, msg):
logging.error(_('PackageKit: %s' % (msg,)))
def _get_new_transaction(self):
if self._loop is None:
import glib
self._loop = glib.MainLoop()
if self._sysbus is None:
import dbus.glib
import dbus
self._dbus = dbus
self._sysbus = dbus.SystemBus()
if self._pkdbus is None:
self._pkdbus = dbus.Interface(self._sysbus.get_object('org.freedesktop.PackageKit',
'/org/freedesktop/PackageKit'),
'org.freedesktop.PackageKit')
if self._using_pk_0_8_1 is None:
try:
txn_path = self._pkdbus.CreateTransaction()
txn = self._sysbus.get_object('org.freedesktop.PackageKit', txn_path)
self._using_pk_0_8_1 = True
except dbus.exceptions.DBusException:
tid = self._pkdbus.GetTid()
txn = self._sysbus.get_object('org.freedesktop.PackageKit', tid)
self._using_pk_0_8_1 = False
elif self._using_pk_0_8_1:
txn_path = self._pkdbus.CreateTransaction()
txn = self._sysbus.get_object('org.freedesktop.PackageKit', txn_path)
else:
tid = self._pkdbus.GetTid()
txn = self._sysbus.get_object('org.freedesktop.PackageKit', tid)
txn_tx = self._dbus.Interface(txn, 'org.freedesktop.PackageKit.Transaction')
txn.connect_to_signal('Message', self._on_pk_message)
txn.connect_to_signal('ErrorCode', self._on_pk_error)
txn.connect_to_signal('Destroy', lambda *args: self._loop.quit())
return txn_tx, txn
def install(self, uninstalled_pkgconfigs, uninstalled_filenames):
pk_package_ids = set()
if uninstalled_pkgconfigs:
txn_tx, txn = self._get_new_transaction()
txn.connect_to_signal('Package', lambda info, pkid, summary: pk_package_ids.add(pkid))
if self._using_pk_0_8_1:
txn_tx.WhatProvides(PK_FILTER_ENUM_ARCH | PK_FILTER_ENUM_NEWEST |
PK_FILTER_ENUM_NOT_INSTALLED,
PK_PROVIDES_ANY,
['pkgconfig(%s)' % pkg for modname, pkg in
uninstalled_pkgconfigs])
else:
txn_tx.WhatProvides('arch;newest;~installed', 'any',
['pkgconfig(%s)' % pkg for modname, pkg in
uninstalled_pkgconfigs])
self._loop.run()
del txn, txn_tx
if uninstalled_filenames:
txn_tx, txn = self._get_new_transaction()
txn.connect_to_signal('Package', lambda info, pkid, summary: pk_package_ids.add(pkid))
if self._using_pk_0_8_1:
txn_tx.SearchFiles(PK_FILTER_ENUM_ARCH | PK_FILTER_ENUM_NEWEST |
PK_FILTER_ENUM_NOT_INSTALLED,
[pkg for modname, pkg in
uninstalled_filenames])
else:
txn_tx.SearchFiles('arch;newest;~installed',
[pkg for modname, pkg in
uninstalled_filenames])
self._loop.run()
del txn, txn_tx
# On Fedora 17 a file can be in two packages: the normal package and
# an older compat- package. Don't install compat- packages.
pk_package_ids = [pkg for pkg in pk_package_ids
if not pkg.startswith('compat-')]
if len(pk_package_ids) == 0:
logging.info(_('Nothing available to install'))
return
logging.info(_('Installing:\n %s' % ('\n '.join(pk_package_ids, ))))
txn_tx, txn = self._get_new_transaction()
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
from op_test import OpTest
import paddle.fluid.core as core
import paddle.fluid as fluid
from paddle.fluid.op import Operator
class TestSamplingIdOp(OpTest):
def setUp(self):
self.op_type = "sampling_id"
self.use_mkldnn = False
self.init_kernel_type()
self.X = np.random.random((100, 10)).astype('float32')
self.inputs = {"X": self.X}
self.Y = np.random.random(100).astype('int64')
self.outputs = {'Out': self.Y}
self.attrs = {'max': 1.0, 'min': 0.0, 'seed': 1}
def test_check_output(self):
self.check_output_customized(self.verify_output)
y1 = self.out
self.check_output_customized(self.verify_output)
y2 = self.out
# check dtype
assert y1.dtype == np.int64
assert y2.dtype == np.int64
# check output is index ids of inputs
inputs_ids = np.arange(self.X.shape[1])
assert np.isin(y1, inputs_ids).all()
assert np.isin(y2, inputs_ids).all()
self.assertTrue(np.array_equal(y1, y2))
self.assertEqual(len(y1), len(self.Y))
def verify_output(self, outs):
out = np.array(outs[0])
self.out = out
def init_kernel_type(self):
pass
class TestSamplingIdShape(unittest.TestCase):
def test_shape(self): |
x = fluid.layers.data(name='x', shape=[3], dtype='float32')
output = fluid.layers.sampling_id(x)
place = fluid.CPUPlace()
exe = fluid.Executor(place=place)
exe.run(fluid.default_startup_program())
feed = {
'x': np.array(
[[0.2, 0.3, 0.5], [0.2, 0.3, 0.4]], dty | pe='float32')
}
output_np = exe.run(feed=feed, fetch_list=[output])[0]
self.assertEqual(output.shape[0], -1)
self.assertEqual(len(output.shape), 1)
self.assertEqual(output_np.shape[0], 2)
self.assertEqual(len(output_np.shape), 1)
if __name__ == "__main__":
unittest.main()
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
UNSCOPED_TOKEN_HEADER = 'UNSCOPED_TOKEN'
UNSCOPED_TOKEN = {
"token": {
"issued_at": "2014-06-09T09:48:59.643406Z",
"extras": {},
"methods": ["token"],
"expires_at": "2014-06-09T10:48:59.643375Z",
"user": {
"OS-FEDERATION": {
"identity_provider": {
"id": "testshib"
},
"protocol": {
"id": "saml2"
},
"groups": [
{"id": "1764fa5cf69a49a4918131de5ce4af9a"}
]
},
"id": "testhib%20user",
"name": "testhib user"
}
}
}
SAML_ENCODING = "<?xml version='1.0' encoding='UTF-8'?>"
TOKEN_SAML_RESPONSE = """
<ns2:Response Destination="http://beta.example.com/Shibboleth.sso/POST/ECP"
ID="8c21de08d2f2435c9acf13e72c982846"
IssueInstant="2015-03-25T14:43:21Z"
Version="2.0">
<saml:Issuer Format="urn:oasis:names:tc:SAML:2.0:nameid-format:entity">
http://keystone.idp/v3/OS-FEDERATION/saml2/idp
</saml:Issuer>
<ns2:Status>
<ns2:StatusCode Value="urn:oasis:names:tc:SAML:2.0:status:Success"/>
</ns2:Status>
<saml:Assertion ID="a5f02efb0bff4044b294b4583c7dfc5d"
IssueInstant="2015-03-25T14:43:21Z" Version="2.0">
<saml:Issuer Format="urn:oasis:names:tc:SAML:2.0:nameid-format:entity">
http://keystone.idp/v3/OS-FEDERATION/saml2/idp</saml:Issuer>
<xmldsig:Signature>
<xmldsig:SignedInfo>
<xmldsig:CanonicalizationMethod
Algorithm="http://www.w3.org/2001/10/xml-exc-c14n#"/>
<xmldsig:SignatureMethod
Algorithm="http://www.w3.org/2000/09/xmldsig#rsa-sha1"/>
| <xmldsig:Reference URI="#a5f02efb0bff4044b294b4583c7dfc5d">
<xmldsig:Transforms>
<xmldsig:Transform
Algorithm="http://www.w3.org/2000/09/xmldsig#
| enveloped-signature"/>
<xmldsig:Transform
Algorithm="http://www.w3.org/2001/10/xml-exc-c14n#"/>
</xmldsig:Transforms>
<xmldsig:DigestMethod
Algorithm="http://www.w3.org/2000/09/xmldsig#sha1"/>
<xmldsig:DigestValue>
0KH2CxdkfzU+6eiRhTC+mbObUKI=
</xmldsig:DigestValue>
</xmldsig:Reference>
</xmldsig:SignedInfo>
<xmldsig:SignatureValue>
m2jh5gDvX/1k+4uKtbb08CHp2b9UWsLw
</xmldsig:SignatureValue>
<xmldsig:KeyInfo>
<xmldsig:X509Data>
<xmldsig:X509Certificate>...</xmldsig:X509Certificate>
</xmldsig:X509Data>
</xmldsig:KeyInfo>
</xmldsig:Signature>
<saml:Subject>
<saml:NameID>admin</saml:NameID>
<saml:SubjectConfirmation Method="urn:oasis:names:tc:SAML:2.0:cm:bearer">
<saml:SubjectConfirmationData
NotOnOrAfter="2015-03-25T15:43:21.172385Z"
Recipient="http://beta.example.com/Shibboleth.sso/POST/ECP"/>
</saml:SubjectConfirmation>
</saml:Subject>
<saml:AuthnStatement AuthnInstant="2015-03-25T14:43:21Z"
SessionIndex="9790eb729858456f8a33b7a11f0a637e"
SessionNotOnOrAfter="2015-03-25T15:43:21.172385Z">
<saml:AuthnContext>
<saml:AuthnContextClassRef>
urn:oasis:names:tc:SAML:2.0:ac:classes:Password
</saml:AuthnContextClassRef>
<saml:AuthenticatingAuthority>
http://keystone.idp/v3/OS-FEDERATION/saml2/idp
</saml:AuthenticatingAuthority>
</saml:AuthnContext>
</saml:AuthnStatement>
<saml:AttributeStatement>
<saml:Attribute Name="openstack_user"
NameFormat="urn:oasis:names:tc:SAML:2.0:attrname-format:uri">
<saml:AttributeValue xsi:type="xs:string">admin</saml:AttributeValue>
</saml:Attribute>
<saml:Attribute Name="openstack_roles"
NameFormat="urn:oasis:names:tc:SAML:2.0:attrname-format:uri">
<saml:AttributeValue xsi:type="xs:string">admin</saml:AttributeValue>
</saml:Attribute>
<saml:Attribute Name="openstack_project"
NameFormat="urn:oasis:names:tc:SAML:2.0:attrname-format:uri">
<saml:AttributeValue xsi:type="xs:string">admin</saml:AttributeValue>
</saml:Attribute>
</saml:AttributeStatement>
</saml:Assertion>
</ns2:Response>
"""
TOKEN_BASED_SAML = ''.join([SAML_ENCODING, TOKEN_SAML_RESPONSE])
ECP_ENVELOPE = """
<ns0:Envelope
xmlns:ns0="http://schemas.xmlsoap.org/soap/envelope/"
xmlns:ns1="urn:oasis:names:tc:SAML:2.0:profiles:SSO:ecp"
xmlns:ns2="urn:oasis:names:tc:SAML:2.0:protocol"
xmlns:saml="urn:oasis:names:tc:SAML:2.0:assertion"
xmlns:xmldsig="http://www.w3.org/2000/09/xmldsig#"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
<ns0:Header>
<ns1:RelayState
ns0:actor="http://schemas.xmlsoap.org/soap/actor/next"
ns0:mustUnderstand="1">
ss:mem:1ddfe8b0f58341a5a840d2e8717b0737
</ns1:RelayState>
</ns0:Header>
<ns0:Body>
{0}
</ns0:Body>
</ns0:Envelope>
""".format(TOKEN_SAML_RESPONSE)
TOKEN_BASED_ECP = ''.join([SAML_ENCODING, ECP_ENVELOPE])
|
# https://codecombat.com/play/level/if-stravaganza?
#
# Debes Comprar & Equipar:
# 1. Reloj de Pulsera Simple
# 2. Programática II
#
# ¡Derrota a los ogros desde dentro de su pro | pio campamento!
while True:
enemy = hero.findNearestEnemy()
# Usa la sentencia if para comprobar si existe un enemigo
# Ataca al enemigo si existe:
if enemy:
| hero.attack(enemy) * 2
|
tr:
field_type = "unicode"
else:
field_type = "unicode"
result.append(dict(name=key, type=field_type))
return result
def apply(self, session=None, **kwargs):
result = super(WMISourceType, self).apply(
result_type=self.type_name, **kwargs)
wmi = session.plugins.wmi(query=self.query)
# The wmi plugin may not exist on non-windows systems.
if wmi == None:
return
for collected in wmi.collect():
match = collected["Result"]
row = {}
# If the user did not specify the fields, we must
# deduce them from the first returned row.
if not self.fields:
self.fields = self._guess_returned_fields(match)
result.fields = self.fields
for column in self.fields:
name = column["name"]
type = column["type"]
value = match.get(name)
if value is None:
continue
row[name] = RekallEFilterArtifacts.allowed_types[
type](value)
result.add_result(**row)
yield result
class RegistryKeySourceType(LiveModeSourceMixin, SourceType):
_field_definitions = [
dict(name="keys", default=[]),
dict(name="supported_os", optional=True,
default=["Windows"]),
]
_FIELDS = [
dict(name="st_mtime", type="epoch"),
dict(name="hive", type="unicode"),
dict(name="key_name", type="unicode"),
dict(name="value", type="str"),
dict(name="value_type", type="str"),
]
def apply(self, session=None, **kwargs):
result = super(RegistryKeySourceType, self).apply(
fields=self._FIELDS, result_type="registry_key", **kwargs)
for hits in session.plugins.glob(
self.keys, path_sep="\\", filesystem="Reg",
root="\\").collect():
# Hits are FileInformation objects, and we just pick some of the
# important fields to report.
info = hits["path"]
row = {}
for field in self._FIELDS:
name = field["name"]
field_type = RekallEFilterArtifacts.allowed_types[field["type"]]
data = info.get(name)
if data is not None:
row[name] = field_type(data)
result.add_result(**row)
yield result
class RegistryValueSourceType(LiveModeSourceMixin, SourceType):
def CheckKeyValuePairs(self, source):
key_value_pairs = source["key_value_pairs"]
for pair in key_value_pairs:
if (not isinstance(pair, dict) or "key" not in pair or
"value" not in pair):
raise errors.FormatError(
u"key_value_pairs should consist of dicts with key and "
"value items.")
return key_value_pairs
_field_definitions = [
dict(name="key_value_pairs", default=[],
checker=CheckKeyValuePairs),
dict(name="supported_os", optional=True,
default=["Windows"]),
]
_FIELDS = [
dict(name="st_mtime", type="epoch"),
dict(name="hive", type="unicode"),
dict(name="key_name", type="unicode"),
dict(name="value_name", type="unicode"),
dict(name="value_type", type="str"),
dict(name="value", type="str"),
]
def apply(self, session=None, **kwargs):
result = super(RegistryValueSourceType, self).apply(
fields=self._FIELDS, result_type="registry_value", **kwargs)
globs = [u"%s\\%s" % (x["key"], x["value"])
for x in self.key_value_pairs]
for hits in session.plugins.glob(
globs, path_sep="\\", filesystem="Reg",
root="\\").collect():
info = hits["path"]
row = {}
for field in self._FIELDS:
name = field["name"]
field_type = RekallEFilterArtifacts.allowed_types[field["type"]]
data = info.get(name)
if data is not None:
row[name] = field_type(data)
result.add_result(**row)
yield result
# This lookup table maps between source type name and concrete implementations
# that we support. Artifacts which contain sources which are not implemented
# will be ignored.
SOURCE_TYPES = {
TYPE_INDICATOR_REKALL: RekallEFilterArtifacts,
definitions.TYPE_INDICATOR_FILE: FileSourceType,
definitions.TYPE_INDICATOR_ARTIFACT_GROUP: ArtifactGroupSourceType,
definitions.TYPE_INDICATOR_WMI_QUERY: WMISourceType,
definitions.TYPE_INDICATOR_WINDOWS_REGISTRY_KEY: RegistryKeySourceType,
definitions.TYPE_INDICATOR_WINDOWS_REGISTRY_VALUE: RegistryValueSourceType,
}
class ArtifactDefinition(_FieldDefinitionValidator):
"""The main artifact class."""
def CheckLabels(self, art_definition):
"""Ensure labels are defined."""
labels = art_definition.get("labels", [])
# Keep unknown labels around in case callers want to check for complete
# label coverage. In most cases it is desirable to allow users to extend
# labels but when super strict validation is required we want to make
# sure that users dont typo a label.
self.undefined_labels = set(labels).difference(definitions.LABELS)
return labels
def BuildSources(self, art_definition):
sources = art_definition["sources"]
result = []
self.unsupported_source_types = []
for source in sources:
if not isinstance(source, dict):
raise errors.FormatError("Source is not a dict.")
source_type_name = source.get("type")
if source_type_name is None:
raise errors.FormatError("Source has no type.")
source_cls = self.source_types.get(source_type_name)
if source_cls:
result.append(source_cls(source, artifact=self))
else:
self.unsupported_source_types.append(source_type_n | ame)
if not result:
if self.unsupported_source_types:
raise errors.FormatError(
"No supported sources: %s" % (
self.unsupported_source_types,))
raise errors.FormatError("No available sources.")
return result
def SupportedOS(self, art_definition):
| supported_os = art_definition.get(
"supported_os", definitions.SUPPORTED_OS)
undefined_supported_os = set(supported_os).difference(
definitions.SUPPORTED_OS)
if undefined_supported_os:
raise errors.FormatError(
u'supported operating system: {} '
u'not defined.'.format(
u', '.join(undefined_supported_os)))
return supported_os
_field_definitions = [
dict(name="name", type=basestring),
dict(name="doc", type=basestring),
dict(name="labels", default=[],
checker=CheckLabels, optional=True),
dict(name="sources", default=[],
checker=BuildSources),
dict(name="supported_os",
checker=SupportedOS, optional=True),
dict(name="conditions", default=[], optional=True),
dict(name="returned_types", default=[], optional=True),
dict(name="provides", type=list, optional=True),
dict(name="urls", type=list, optional=True)
]
name = "unknown"
source_types = SOURCE_TYPES
def __init__(self, data, source_types=None):
self.source_types = source_types or SOURCE_TYPES
self.data = data
try:
self._LoadDefinition(data)
except Exception as e:
exc_info = sys.exc_info()
raise errors.FormatError(
"Definition %s: %s" % (self.name, e))
def set_implementations(self, source_types):
return self.__class__(self.data, source_types)
def _LoadDefinition(self, data):
if not isinstance(d |
import sys
try:
| from gensim.models.word2vec_inner import FAST_VERSION
print('FAST_VERSION ok ! Retrieved with value ', FAST_VERSION)
sys.exit()
except ImportError:
print('Failed... fall back to plain numpy (20-80x slower training than the above)')
| sys.exit(-1)
|
# usr/bin/env python
# -*- coding: utf-8 -*-
"""
Created on Thu Jul 20 13:15:05 2017
@author: Vijayasai S
"""
# Use python3
from haversine import distance
from datetime import datetime
from dateutil import tz
import my_dbscan as mydb
import alert_update as au
from pymongo import MongoClient
import pandas as pd
import time
from_zone = tz.tzutc()
to_zone = tz.tzlocal()
def _connect_mongo(host, port, username, password, db):
""" A util for making a connection to mongo """
if username and password:
mongo_uri = 'mongodb://%s:%s@%s:%s/%s' % (username, password, host, port, db)
conn = MongoClient(mongo_uri)
else:
conn = MongoClient(host, port)
return conn[db]
def read_mongo(db, collection, query={}, host='localhost', port=27017, username=None, password=None, no_id=True):
""" Read from Mongo and Store into DataFrame """
# Connect to MongoDB
db = _connect_mongo(host=host, port=port, username=username, password=password, db=db)
# Make a query to the specific DB and Collection
cursor = db[collection].find(query)
# Expand the cursor and construct the DataFrame
df = pd.DataFrame(list(cursor))
# Delete the _id
if no_id:
del df['_id']
return df
def Generate_data(get_col, set_col1, set_col2, time_delay, year, month, startday, endday, starthr, endhr, startmin, endmin):
id_dist = [] ; item_id_dist = []
main_curr_rank = {} ; tot_rank_curr = {}
count = 0
client = MongoClient('localhost', 27017)
db = client.maximus_db
for day in range(startday,endday+1):
for hr in range(starthr,endhr+1):
for mins in range(startmin,endmin+1,time_delay):
try:
#set_col1.drop()
#set_col2.drop()
mins_next = mins + time_delay
hr_next = hr
if time_delay + mins > 59:
mins_next = (time_delay + mins) - 60
hr_next += 1
if hr_next > 23:
hr_next = 0
day += 1
#print (hr,mins)
items = get_col.find({"$and" :[{"packettimestamp":{"$gte":datetime(year,month,day,hr,mins,0)}},{"packettimestamp":{"$lte":datetime(year,month,day,hr_next,mins_next,0)}}]},{"unit_id":1,"latitude":1,"longitude":1,"_id":0}).sort([("packettimestamp", -1)])
utc = datetime(year,month,day,hr,mins)
utc = utc.replace(tzinfo=from_zone)
# Convert time zone
ist = utc.astimezone(to_zone)
data = [] ; item_id = []
for item in items:
if item["unit_id"] not in item_id:
item_id.append(item["unit_id"])
data.append(item)
if item["unit_id"] not in item_id_dist:
item_id_dist.append(item["unit_id"])
id_dist.append(item)
u_id = [ids["unit_id"] for ids in id_dist]
if count > 0:
rank_curr = {} ; lat_curr = {} ; long_curr = {}
for item in item_id:
if item in u_id:
for i in range(len(id_dist)):
if item == id_dist[i]["unit_id"]:
for j in range(len(data)):
if item == data[j]["unit_id"]:
dist = distance(id_dist[i]["latitude"],data[j]["latitude"],id_dist[i]["longitude"],data[j]["longitude"])
id_dist[i]["latitude"] = data[j]["latitude"]
id_dist[i]["longitude"] = data[j]["longitude"]
rank_curr[item] = dist
lat_curr[item] = id_dist[i]["latitude"]
long_curr[item] = id_dist[i]["longitude"]
try:
tot_rank_curr[item] = dist + main_curr_rank[item]
main_curr_rank[item] = dist + main_curr_rank[item]
except Exception:
tot_rank_curr[item] = dist
main_curr_rank[item] = dist
#print (item, dist)
rank_current_sorted = sorted(rank_curr.values(), reverse=True)
tot_rank_current_sorted = sorted(tot_rank_curr.values(), reverse=True)
#rank,r_id,dist_rank = [],[],[]
for item in item_id:
if rank_curr[item] in rank_current_sorted:
set_col1.insert([{"latitude":lat_curr[item], "longitude":long_curr[item], "distance_by_interval":rank_curr[item], "unit_id":item, "rank":rank_current_sorted.index(rank_curr[item])+1,"timestamp":ist}])
set_col2.insert([{"latitude":lat_curr[item], "longitude":long_curr[item], "distance_by_interval":tot_rank_curr[item], "unit_id":item, "rank":tot_rank_current_sorted.index(tot_rank_curr[item])+1,"timestamp":ist}])
##########################################################################
# CREATING CLUSTERS AND SAVING IT IN DATABASE #
##########################################################################
table_to_read_1 = "tapola_rank_15_total"
eps = 5.0 # in KM
ride_id = None
coll_1 = db.tapola_rank_15_manual_clustering
df_1 = read_mongo("maximus_db", table_to_read_1, {"$and" :[{"timestamp":{"$gte":datetime(year,month,day,hr,mins,0)}},{"timestamp":{"$lte":datetime(year,month,day,hr_next,mins_next,0)}}], "ride_id":ride_id})
mydb.manual_DBSCAN(df_1, coll_1, eps)
print (ist)
print ("Creating cluster using manual dbscan algorithm")
##########################################################################
# CREATING ALERTS AND SAVING IT IN DATABASE #
##########################################################################
table_to_read_2 = "tapola_rank_15_manual_clustering"
df_2 = read_mongo("maximus_db", table_to_read_2, {"$and" :[{"timestamp" | :{"$gte":datetime(year,month,day,hr,mins,0)}},{"timestamp":{"$lte":datetime(year,month,day,hr_next,mins_next,0)}}], "ride_id":ride_id})
coll_2 = db.tapola_rank_15_manual_clus_alert
|
au.Generate_alert(df_2, coll_2)
print ("Generating alert and saving in the database\n")
time.sleep(1)
count += 1
except KeyError:
pass
return
|
# © 2019 ForgeFlow S.L.
# © 2019 Serpent Consulting Services Pvt. Ltd.
# License LGPL-3.0 or later (https://www.gnu.org/licenses/lgpl.html).
from odoo import _, api, fields, models
from odoo.exceptions import UserError
class StockWarehouse(models.Model):
_inherit = "stock.warehouse"
def _default_operating_unit(self):
if self.company_id:
company = self.company_id
else:
company = self.env.company
for ou in self.env.user.operating_unit_ids:
if company == self.company_id:
self.operating_unit_id = ou
operating_unit_id = fields.Many2one(
comodel_name="operating.unit",
string="Operating Unit",
default=_default_operating_unit,
)
@api.constrains("operating_unit_id", "company_id")
def _check_company_operating_unit(self):
for rec in self:
if (
rec.operating_unit_id
and rec.company_id
and rec.operating_unit_id
and rec.company_id != rec.operating_unit_id.company_id
):
raise UserError(
_(
"Configuration error. The Company in the Stock Warehouse"
" and in the Operating Unit must be the same."
)
)
class StockWarehouseOrderPoint(models.Model):
_inherit = "stock.warehouse.orderpoint"
@api.constrains(
"warehouse_id",
"location_id",
"location_id.operating_unit_id",
"warehouse_id.operating_unit_id",
)
| def _check_location(self):
for rec in self:
if (
rec.warehouse_id.operating_unit_id
and rec.warehouse_id
and rec.location_id
and rec.warehouse_id.operating_unit_id
!= rec.location_id.operating_unit_id
):
raise | UserError(
_(
"Configuration Error. The Operating Unit of the "
"Warehouse and the Location must be the same. "
)
)
|
REEZE: ('Freeze Restrictions', 'mdi:cancel'),
TYPE_FREEZE_PROTECTION: ('Freeze Protection', 'mdi:weather-snowy'),
TYPE_HOT_DAYS: ('Extra Water on Hot Days', 'mdi:thermometer-lines'),
TYPE_HOURLY: ('Hourly Restrictions', 'mdi:cancel'),
TYPE_MONTH: ('Month Restrictions', 'mdi:cancel'),
TYPE_RAINDELAY: ('Rain Delay Restrictions', 'mdi:cancel'),
TYPE_RAINSENSOR: ('Rain Sensor Restrictions', 'mdi:cancel'),
TYPE_WEEKDAY: ('Weekday Restrictions', 'mdi:cancel'),
}
SENSORS = {
TYPE_FLOW_SENSOR_CLICK_M3: (
'Flow Sensor Clicks', 'mdi:water-pump', 'clicks/m^3'),
TYPE_FLOW_SENSOR_CONSUMED_LITERS: (
'Flow Sensor Consumed Liters', 'mdi:water-pump', 'liter'),
TYPE_FLOW_SENSOR_START_INDEX: (
'Flow Sensor Start Index', 'mdi:water-pump', None),
TYPE_FLOW_SENSOR_WATERING_CLICKS: (
'Flow Sensor Clicks', 'mdi:water-pump', 'clicks'),
TYPE_FREEZE_TEMP: ('Freeze Protect Temperature', 'mdi:thermometer', '°C'),
}
BINARY_SENSOR_SCHEMA = vol.Schema({
vol.Optional(CONF_MONITORED_CONDITIONS, default=list(BINARY_SENSORS)):
vol.All(cv.ensure_list, [vol.In(BINARY_SENSORS)])
})
SENSOR_SCHEMA = vol.Schema({
vol.Optional(CONF_MONITORED_CONDITIONS, default=list(SENSORS)):
vol.All(cv.ensure_list, [vol.In(SENSORS)])
})
SERVICE_ALTER_PROGRAM = vol.Schema({
vol.Required(CONF_PROGRAM_ID): cv.positive_int,
})
SERVICE_ALTER_ZONE = vol.Schema({
vol.Required(CONF_ZONE_ID): cv.positive_int,
})
SERVICE_PAUSE_WATERING = vol.Schema({
vol.Required(CONF_SECONDS): cv.positive_int,
})
SERVICE_START_PROGRAM_SCHEMA = vol.Schema({
vol.Required(CONF_PROGRAM_ID): cv.positive_int,
})
SERVICE_START_ZONE_SCHEMA = vol.Schema({
vol.Required(CONF_ZONE_ID): cv.positive_int,
vol.Optional(CONF_ZONE_RUN_TIME, default=DEFAULT_ZONE_RUN):
cv.positive_int,
})
SERVICE_STOP_PROGRAM_SCHEMA = vol.Schema({
vol.Required(CONF_PROGRAM_ID): cv.positive_int,
})
SERVICE_STOP_ZONE_SCHEMA = vol.Schema({
vol.Required(CONF_ZONE_ID): cv.positive_int,
})
SWITCH_SCHEMA = vol.Schema({vol.Optional(CONF_ZONE_RUN_TIME): cv.positive_int})
CONTROLLER_SCHEMA = vol.Schema({
vol.Required(CONF_IP_ADDRESS): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_SSL, default=DEFAULT_SSL): cv.boolean,
vol.Optional(CONF_SCAN_INTERVAL, default=DEFAULT_SCAN_INTERVAL):
cv.time_period,
vol.Optional(CONF_BINARY_SENSORS, default={}): BINARY_SENSOR_SCHEMA,
vol.Optional(CONF_SENSORS, default={}): SENSOR_SCHEMA,
vol.Optional(CONF_SWITCHES, default={}): SWITCH_SCHEMA,
})
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Required(CONF_CONTROLLERS):
vol.All(cv.ensure_list, [CONTROLLER_SCHEMA]),
}),
}, extra=vol.ALLOW_EXTRA)
async def async_setup(hass, config):
"""Set up the RainMachine component."""
hass.data[DOMAIN] = {}
hass.data[DOMAIN][DATA_CLIENT] = {}
hass.data[DOMAIN][DATA_LISTENER] = {}
if DOMAIN not in config:
return True
conf = config[DOMAIN]
for controller in conf[CONF_CONTROLLERS]:
if controller[CONF_IP_ADDRESS] in configured_instances(hass):
continue
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN,
context={'source': SOURCE_IMPORT},
data=controller))
return True
async def async_setup_entry(hass, config_entry):
"""Set up RainMachine as config entry."""
from regenmaschine import login
from regenmaschine.errors import RainMachineError
_verify_domain_control = verify_domain_control(hass, DOMAIN)
websession = aiohttp_client.async_get_clientsession(hass)
try:
client = await login(
config_entry.data[CONF_IP_ADDRESS],
config_entry.data[CONF_PASSWORD],
websession,
port=config_entry.data[CONF_PORT],
ssl=config_entry.data[CONF_SSL])
rainmachine = RainMachine(
client,
config_entry.data.get(CONF_BINARY_SENSORS, {}).get(
CONF_MONITORED_CONDITIONS, list(BINARY_SENSORS)),
config_entry.data.get(CONF_SENSORS, {}).get(
CONF_MONITORED_CONDITIONS, list(SENSORS)),
config_entry.data.get(CONF_ZONE_RUN_TIME, DEFAULT_ZONE_RUN))
await rainmachine.async_update()
except RainMachineError as err:
_LOGGER.error('An error occurred: %s', err)
raise ConfigEntryNotReady
hass.data[DOMAIN][DATA_CLIENT][config_entry.entry_id] = rainmachine
for component in ('binary_sensor', 'sensor', 'switch'):
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(
config_entry, component))
async def refresh(event_time):
"""Refresh RainMachine sensor data."""
_LOGGER.debug('Updating RainMachine sensor data')
await rainmachine.async_update()
async_dispatcher_send(hass, SENSOR_UPDATE_TOPIC)
hass.data[DOMAIN][DATA_LISTENER][
config_entry.entry_id] = async_track_time_interval(
hass,
refresh,
timedelta(seconds=config_entry.data[CONF_SCAN_INTERVAL]))
@_verify_domain_control
async def disable_program(call):
"""Disable a program."""
await rainmachine.client.programs.disable(
call.data[CONF_PROGRAM_ID])
async_dispatcher_send(hass, PROGRAM_UPDATE_TOPIC)
@_verify_domain_control
async def disable_zone(call):
"""Disable a zone."""
await rainmachine.client.zones.disable(call.data[CONF_ZONE_ID])
async_dispatcher_send(hass, ZONE_UPDATE_TOPIC)
@_verify_domain_control
async def enable_program(call):
"""Enable a program."""
await rainmachine.client.programs.enable(call.data[CONF_PROGRAM_ID])
async_dispatcher_send(hass, PROGRAM_UPDATE_TOPIC)
@_verify_domain_control
async def enable_zone(call):
"""Enable a zone."""
await rainmachine.client.zones.enable(call.data[CONF_ZONE_ID])
async_dispatcher_send(hass, ZONE_UPDATE_TOPIC)
@_verify_domain_control
async def pause_watering(call):
"""Pause watering for a set number of seconds."""
await rainmachine.client.watering.pause_all(call.data[CONF_SECONDS])
async_dispatcher_send(hass, PROGRAM_UPDATE_TOPIC)
@_verify_domain_control
async def start_program(call):
"""Start a particular program."""
await rainmachine.client.programs.start(call.data[CONF_PROGRAM_ID])
async_dispatcher_send(hass, PROGRAM_UPDATE_TOPIC)
@_verify_domain_control
async def start_zone(call):
"""Start a particular zone for a certain amount of time."""
await rainmachine.client.zones.start(
call.data[CONF_ZONE_ID], call.data[CONF_ZONE_RUN_TIME])
async_dispatcher_send(hass, ZONE_UPDATE_TOPIC)
@_verify_domain_control
async def stop_all(call):
"""Stop all watering."""
await rainmachine.client.wat | ering.stop_all()
async_dispatcher_send(hass, PROGRAM_UPDATE_TOPIC)
@_verify_domain_control
async def stop_program(call):
"""Stop a program."""
aw | ait rainmachine.client.programs.stop(call.data[CONF_PROGRAM_ID])
async_dispatcher_send(hass, PROGRAM_UPDATE_TOPIC)
@_verify_domain_control
async def stop_zone(call):
"""Stop a zone."""
await rainmachine.client.zones.stop(call.data[CONF_ZONE_ID])
async_dispatcher_send(hass, ZONE_UPDATE_TOPIC)
@_verify_domain_control
async def unpause_watering(call):
"""Unpause watering."""
await rainmachine.client.watering.unpause_all()
async_dispatcher_send(hass, PROGRAM_UPDATE_TOPIC)
for service, method, schema in [
('disable_program', disable_program, SERVICE_ALTER_PROGRAM),
('disable_zone', disable_zone, SERVICE_ALTER_ZONE),
('enable_program', enable_program, SERVICE_ALTER_PROGRAM),
('enable_zone', enable_zone, SERVICE_ALTER_ZONE),
('pause |
import unittest
import os
import os.path
import json
# The folder holding the test data
data_path = os.path.dirname(__file__)
# Set the temporal config for testing
os.environ['TIMEVIS_CONFIG'] = os.path.join(data_path, 'config.py')
import timevis
class TestExp | eriment(unittest.TestCase):
def setUp(self):
self.app = timevis.app.test_client()
self.url = '/api/v2/experiment'
def test_post(self):
name = os.path.join(data_path, 'post_exp.json')
with open(name) as file:
obj = json.load(file)
resp = self.app.post(self.url, data=json.dumps(obj),
content_type='application/json')
self.assertIsNotNone(resp.data) |
def test_get(self):
resp = self.app.get(self.url)
self.assertIsNotNone(resp.data)
def test_put(self):
name = os.path.join(data_path, 'put_exp.json')
with open(name) as file:
obj = json.load(file)
resp = self.app.put(self.url, data=json.dumps(obj),
content_type='application/json')
self.assertIsNotNone(resp.data)
if __name__ == '__main__':
unittest.main()
|
# -*- coding: utf-8 -*-
# Resource object code
#
# Created: Sun 21. Feb 22:22:07 2016
# by: The Resource Compiler for PyQt (Qt v4.8 | .5)
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore
qt_resource_data = "\
\x00 | \x00\x02\xf9\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x24\x00\x00\x00\x24\x08\x06\x00\x00\x00\xe1\x00\x98\x98\
\x00\x00\x00\x06\x62\x4b\x47\x44\x00\xff\x00\xff\x00\xff\xa0\xbd\
\xa7\x93\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0b\x13\x00\x00\
\x0b\x13\x01\x00\x9a\x9c\x18\x00\x00\x00\x07\x74\x49\x4d\x45\x07\
\xe0\x02\x15\x16\x0a\x03\xbc\xda\x23\x1d\x00\x00\x02\x86\x49\x44\
\x41\x54\x58\xc3\xed\x96\x4d\x48\x54\x51\x18\x86\x9f\x7b\x38\x77\
\xfe\xcc\x66\xb4\xd4\x85\x84\xa4\x44\x85\x8b\x1a\xc8\xc8\x08\x04\
\xa3\xa0\xa0\x36\x31\x42\x8b\xa0\x76\x45\x8b\xa4\x55\xfb\x68\xe3\
\x6e\x5a\xb5\x09\x72\x35\x38\xe0\x46\x83\xa0\x8d\x18\xd1\xb4\x30\
\x66\xa9\x26\x39\x3a\x15\x24\x42\x51\xa6\xe1\xfd\x99\xd3\xc2\x9a\
\x4a\x67\xbc\xf7\x72\xef\x8c\x2e\x7a\x97\x87\x7b\xbe\xfb\x9e\xf7\
\x7c\xdf\xc3\xd1\x72\xf9\x19\xf5\x70\x78\x0c\x21\x43\x68\xf6\x0f\
\x34\x55\xc2\x8b\x14\x02\x25\xa3\x94\x2c\x83\x3b\xd7\x2f\x73\xea\
\xf8\x11\x0d\x1f\x92\xe9\xe1\x31\x9a\x3a\x4f\x20\xcc\x15\xfa\x3b\
\xe1\x50\xd7\x41\x4f\x05\xe6\xe6\x17\x98\x78\x07\xb6\xbe\x87\xf4\
\xf0\x38\x7e\x25\x4b\x80\xd4\x43\xa8\x75\x8b\x8e\x03\x1d\xb4\xb7\
\xb7\x7b\x2a\x60\x18\x06\xcc\x2d\x22\xf5\x10\xb6\x52\xfe\x0d\x6d\
\x5e\xc8\xe7\xf3\x64\xb3\x59\xc7\x8d\x42\x08\x52\xa9\x14\xf1\x78\
\x9c\x20\xb5\xc5\x50\x32\x99\x24\x99\x4c\xba\x2e\x50\x28\x14\x6a\
\x6b\xe8\x7f\x42\x5e\x13\xba\x71\xeb\x2e\xee\xb0\x30\x43\x18\xb8\
\x36\xf8\x40\xf9\xc1\x82\x63\x42\xb7\xef\x3f\xae\x2b\x16\xca\x86\
\x94\x90\xcc\x2f\x14\xb1\x6d\xfb\x9f\x0f\xea\x8d\x85\xb2\x21\x11\
\x6d\xe6\xc5\xfb\xaf\xa8\xc5\x4f\xdb\x6e\xa8\x75\xd3\xff\xb9\x32\
\x4d\x43\x8b\x24\x70\xe2\x7e\xad\x9b\x5e\x7a\x9d\x82\xfa\x25\x04\
\xa8\xd5\x65\x9a\x8d\x02\x4d\x4d\x89\xf2\xda\xd2\x4e\x26\xa4\x6c\
\x83\xd4\xa5\x73\x34\xee\x8d\xb3\x6e\x98\x00\xe4\x66\x47\x77\x2e\
\xa1\x8d\x56\xd2\x78\x3a\x31\xc5\xe8\xf3\x1c\x00\x2d\xad\x2d\xdb\
\x26\xf4\xb6\xb8\x5c\x95\x53\x4f\xc6\x5f\x7b\xe6\x94\xeb\x1e\xaa\
\x86\x85\x74\x66\x32\x50\x4e\xb9\x36\x54\x0d\x0b\x41\x73\xaa\xa2\
\xa1\x86\x58\x84\xd6\x7d\xf1\x5f\x91\x7a\xc3\x82\xdf\x1e\x93\xaa\
\x54\x02\xa5\x40\xdb\xf8\x95\x69\x5a\xf4\xf5\x74\xd3\xd7\xd3\x0d\
\xc0\xbd\xf4\x88\xa7\x13\xfb\x9d\x42\x79\xb6\xf7\x18\x93\x53\x6f\
\x08\xc5\x1a\x11\xe6\x2a\x23\xa3\x33\x48\x5d\xaf\xd8\xf7\x6e\xb0\
\xe0\x3b\xa1\x9b\x57\x2f\x6c\x7b\x0b\x03\x83\x43\xca\x0b\x16\x7c\
\x27\xe4\x95\xd4\x4e\x58\xf0\x9d\x10\x01\xab\xee\x09\x79\xe5\x94\
\x93\x16\x8b\x1f\x41\xe8\xfe\x0c\x55\xc2\x82\xdb\xe7\xcb\x96\x16\
\x10\x21\xb4\x58\xc2\xbd\x21\xd7\x58\x70\xc9\x29\xdf\xcf\x0f\x2f\
\x58\x08\x42\x7e\x0f\xc4\xc0\xe0\x90\x6a\x3b\x7c\xba\x2a\xa7\x00\
\x56\xbe\xaf\xa1\x2a\x3c\x5f\x2d\xd3\xe0\x73\xa4\x0b\x11\xdb\xbf\
\xc1\xb4\xd9\x57\xc1\x1e\xaf\x12\xa7\xc2\x21\x9d\x68\x24\x8c\x94\
\x5b\x7f\x35\x3d\x3d\x4d\xe6\xe5\x87\xda\x8e\xfd\x66\x4e\x5d\x39\
\xdf\xcb\xc0\xc5\x33\xae\xf7\x0b\x76\x99\x76\x9d\x21\x59\x8b\xa2\
\x7f\x73\x2a\x16\x0d\xd7\xd7\x90\x13\xa7\x7e\xf7\x95\x73\x21\x85\
\xa6\x02\x18\xfb\x47\x99\x67\x6a\x72\x6a\xb6\xcc\xa9\x36\xf9\x65\
\x13\xa7\xaa\xcb\xb2\x2c\x96\xcc\x04\x25\xbd\x01\x63\xed\x1b\xfd\
\x27\x8f\xf2\x13\x0c\xc0\x8b\x69\x94\xd1\x9d\xcc\x00\x00\x00\x00\
\x49\x45\x4e\x44\xae\x42\x60\x82\
"
qt_resource_name = "\
\x00\x07\
\x07\x3b\xe0\xb3\
\x00\x70\
\x00\x6c\x00\x75\x00\x67\x00\x69\x00\x6e\x00\x73\
\x00\x0f\
\x0f\x12\xef\x33\
\x00\x42\
\x00\x61\x00\x74\x00\x63\x00\x68\x00\x53\x00\x61\x00\x76\x00\x65\x00\x4c\x00\x61\x00\x79\x00\x65\x00\x72\x00\x73\
\x00\x08\
\x0a\x61\x5a\xa7\
\x00\x69\
\x00\x63\x00\x6f\x00\x6e\x00\x2e\x00\x70\x00\x6e\x00\x67\
"
qt_resource_struct = "\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x02\
\x00\x00\x00\x14\x00\x02\x00\x00\x00\x01\x00\x00\x00\x03\
\x00\x00\x00\x38\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\
"
def qInitResources():
QtCore.qRegisterResourceData(0x01, qt_resource_struct, qt_resource_name, qt_resource_data)
def qCleanupResources():
QtCore.qUnregisterResourceData(0x01, qt_resource_struct, qt_resource_name, qt_resource_data)
qInitResources()
|
path': inputPath})
hasharray = json.dumps(hasharray)
msg = '{"sign":{"meta":"sign message", "data":%s}}' % (hasharray)
dbb_client = self.plugin.get_client(self)
if not dbb_client.is_paired():
raise Exception("Could not sign message.")
reply = dbb_client.hid_send_encrypt(msg)
self.handler.show_message(_("Signing message ...\r\n\r\n" \
"To continue, touch the Digital Bitbox's blinking light for 3 seconds.\r\n\r\n" \
"To cancel, briefly touch the blinking light or wait for the timeout."))
reply = dbb_client.hid_send_encrypt(msg) # Send twice, first returns an echo for smart verification (not implemented)
self.handler.clear_dialog()
if 'error' in reply:
raise Exception(reply['error']['message'])
if 'sign' not in reply:
raise Exception("Could not sign message.")
for i in range(4):
sig = chr(27 + i + 4) + reply['sign'][0]['sig'].decode('hex')
try:
addr = public_key_to_p2pkh(reply['sign'][0]['pubkey'].decode('hex'))
if verify_message(addr, sig, message):
break
except Exception:
continue
else:
raise Exception("Could not sign message")
except BaseException as e:
self.give_error(e)
return sig
def sign_transaction(self, tx, password):
if tx.is_complete():
return
try:
p2shTransaction = False
derivations = self.get_tx_derivations(tx)
hasharray = []
pubkeyarray = []
# Build hasharray from inputs
for i, txin in enumerate(tx.inputs()):
if txin['type'] == 'coinbase':
self.give_error("Coinbase not supported") # should never happen
if txin['type'] in ['p2sh']:
p2shTransaction = True
for x_pubkey in txin['x_pubkeys']:
if x_pubkey in derivations:
index = derivations.get(x_pubkey)
inputPath = "%s/%d/%d" % (self.get_derivation(), index[0], index[1])
inputHash = Hash(tx.serialize_preimage(i).decode('hex')).encode('hex')
hasharray_i = {'hash': inputHash, 'keypath': inputPath}
hasharray.append(hasharray_i)
break
else:
self.give_error("No matching x_key for sign_transaction") # should never happen
# Sanity check
if p2shTransaction:
for txinput in tx.inputs():
if txinput['type'] != 'p2sh':
self.give_error("P2SH / regular input mixed in same transaction not supported") # should never happen
# Build pubkeyarray from outputs (unused because echo for smart verification not implemented)
if not p2shTransaction:
for _type, address, amount in tx.outputs():
assert _type == | TYPE_ADDRESS
info = tx.output_info.get(address)
if info is not None:
index, xpubs, m = info
changePath = self.get_derivation() + "/%d/%d" % index
changePubkey = self.derive_pubkey(index[0], index[1])
pubkeyarray_i = {'pubkey': changePubkey, 'keypath': changePath}
pubkeyarray.append(pubkeyarray_i)
# Build sign command
| dbb_signatures = []
steps = math.ceil(1.0 * len(hasharray) / self.maxInputs)
for step in range(int(steps)):
hashes = hasharray[step * self.maxInputs : (step + 1) * self.maxInputs]
msg = '{"sign": {"meta":"%s", "data":%s, "checkpub":%s} }' % \
(Hash(tx.serialize()).encode('hex'), json.dumps(hashes), json.dumps(pubkeyarray))
dbb_client = self.plugin.get_client(self)
if not dbb_client.is_paired():
raise Exception("Could not sign transaction.")
reply = dbb_client.hid_send_encrypt(msg)
if 'error' in reply:
raise Exception(reply['error']['message'])
if 'echo' not in reply:
raise Exception("Could not sign transaction.")
if steps > 1:
self.handler.show_message(_("Signing large transaction. Please be patient ...\r\n\r\n" \
"To continue, touch the Digital Bitbox's blinking light for 3 seconds. " \
"(Touch " + str(step + 1) + " of " + str(int(steps)) + ")\r\n\r\n" \
"To cancel, briefly touch the blinking light or wait for the timeout.\r\n\r\n"))
else:
self.handler.show_message(_("Signing transaction ...\r\n\r\n" \
"To continue, touch the Digital Bitbox's blinking light for 3 seconds.\r\n\r\n" \
"To cancel, briefly touch the blinking light or wait for the timeout."))
reply = dbb_client.hid_send_encrypt(msg) # Send twice, first returns an echo for smart verification (not implemented)
self.handler.clear_dialog()
if 'error' in reply:
raise Exception(reply['error']['message'])
if 'sign' not in reply:
raise Exception("Could not sign transaction.")
dbb_signatures.extend(reply['sign'])
# Fill signatures
if len(dbb_signatures) <> len(tx.inputs()):
raise Exception("Incorrect number of transactions signed.") # Should never occur
for i, txin in enumerate(tx.inputs()):
num = txin['num_sig']
for pubkey in txin['pubkeys']:
signatures = filter(None, txin['signatures'])
if len(signatures) == num:
break # txin is complete
ii = txin['pubkeys'].index(pubkey)
signed = dbb_signatures[i]
if signed['pubkey'] != pubkey:
continue
sig_r = int(signed['sig'][:64], 16)
sig_s = int(signed['sig'][64:], 16)
sig = sigencode_der(sig_r, sig_s, generator_secp256k1.order())
txin['signatures'][ii] = sig.encode('hex')
tx._inputs[i] = txin
except BaseException as e:
self.give_error(e, True)
else:
print_error("Transaction is_complete", tx.is_complete())
tx.raw = tx.serialize()
class DigitalBitboxPlugin(HW_PluginBase):
libraries_available = DIGIBOX
keystore_class = DigitalBitbox_KeyStore
client = None
DEVICE_IDS = [
(0x03eb, 0x2402) # Digital Bitbox
]
def __init__(self, parent, config, name):
HW_PluginBase.__init__(self, parent, config, name)
if self.libraries_available:
self.device_manager().register_devices(self.DEVICE_IDS)
def get_dbb_device(self, device):
dev = hid.device()
dev.open_path(device.path)
return dev
def create_client(self, device, handler):
self.handler = handler
client = self.get_dbb_device(device)
if client <> None:
client = D |
__author__ = 'dimitris'
import os
# Flask Configuration
basedir = os.path.abspath( | os.path.dirname(__file__))
SECRET_KEY = 'knaskndfknasdfiaosifoaignaosdnfoasodfnaodgnas'
PREFERRED_URL_SCHEME = 'https'
#SqlAlchemy Configuration
DB_NAME = 'puzzles.db'
SQLALCHEMY_DATABASE_URI = 'sqlite | :///' + os.path.join(basedir, DB_NAME)
#Cache Configuration
CACHE_TYPE = 'simple' |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
from setuptools import setup
from glob import glob
classifiers = [
'Development Status :: 4 - Beta',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Topic :: Scientific/Engineering :: Bio-Informatics',
]
exec(open("goatools/version.py").read())
set | up(
name="goatools",
version=__version__,
author='Haibao Tang',
author_email='tanghaibao@gmail.com',
packages=['goatools'],
scripts=glob('scripts/*.py'),
license='BSD',
classifiers=classifiers,
url='http://github.com/tanghaibao/goatools',
description="Python scripts | to find enrichment of GO terms",
long_description=open("README.rst").read(),
install_requires=['fisher', 'xlsxwriter', 'statsmodels']
)
|
from StringIO import StringIO
import textwrap
import importer
d | ef test_import_csv():
current = StringIO(textwrap.dedent('''\
status,qty,type,transaction_date,posting_date,description,amount
A,,,2016/11/02,,This is a test,$4.53
'''))
new = StringIO(textwrap.dedent('''\
"Trans Date", "Summary", "Amount"
5/2/2007, Regal Theaters, $15.99
11/2/2016, This | is a test , $4.53
5/2/2007, Regal Theaters, $15.99
'''))
mapping = {
'Trans Date': 'transaction_date',
'Summary': 'description',
'Amount': 'amount'
}
importer.save_csv(current, new, mapping, '%m/%d/%Y')
lines = current.getvalue().splitlines()
assert lines[0].rstrip() == 'status,qty,type,transaction_date,posting_date,description,amount'
assert lines[1].rstrip() == 'N,2,,2007/05/02,,Regal Theaters,$15.99'
assert lines[2].rstrip() == 'A,,,2016/11/02,,This is a test,$4.53'
assert len(lines) == 3
|
ge(self, cr, uid, ids, name, args, context=None):
result = dict.fromkeys(ids, False)
for obj in self.browse(cr, uid, ids, context=context):
result[obj.id] = tools.image_get_resized_images(obj.image, avoid_resize_medium=True)
return result
def _set_image(self, cr, uid, id, name, value, args, context=None):
return self.write(cr, uid, [id], {'image': tools.image_resize_image_big(value)}, context=context)
def _is_product_variant(self, cr, uid, ids, name, arg, context=None):
return self._is_product_variant_impl(cr, uid, ids, name, arg, context=context)
def _is_product_variant_impl(self, cr, uid, ids, name, arg, context=None):
return dict.fromkeys(ids, False)
def _product_template_price(self, cr, uid, ids, name, arg, context=None):
plobj = self.pool.get('product.pricelist')
res = {}
quantity = context.get('quantity') or 1.0
pricelist = context.get('pricelist', False)
partner = context.get('partner', False)
if pricelist:
# Support context pricelists specified as display_name or ID for compatibility
if isinstance(pricelist, basestring):
| pricelist_ids = plobj.name_search(
cr, uid, pricelist, operator='=', context=context, limit=1)
pricelist = pricelist_ids[0][0] if pricelist_ids else pricelist
if isinstance(pricelist, (int, long)):
products = self.browse(cr, uid, ids, context=context)
qtys = map(lambda x: (x, quantity, partner), products)
pl = plobj.browse(cr, uid, pr | icelist, context=context)
price = plobj._price_get_multi(cr,uid, pl, qtys, context=context)
for id in ids:
res[id] = price.get(id, 0.0)
for id in ids:
res.setdefault(id, 0.0)
return res
def get_history_price(self, cr, uid, product_tmpl, company_id, date=None, context=None):
if context is None:
context = {}
if date is None:
date = time.strftime(DEFAULT_SERVER_DATETIME_FORMAT)
price_history_obj = self.pool.get('product.price.history')
history_ids = price_history_obj.search(cr, uid, [('company_id', '=', company_id), ('product_template_id', '=', product_tmpl), ('datetime', '<=', date)], limit=1)
if history_ids:
return price_history_obj.read(cr, uid, history_ids[0], ['cost'], context=context)['cost']
return 0.0
def _set_standard_price(self, cr, uid, product_tmpl_id, value, context=None):
''' Store the standard price change in order to be able to retrieve the cost of a product template for a given date'''
if context is None:
context = {}
price_history_obj = self.pool['product.price.history']
user_company = self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id.id
company_id = context.get('force_company', user_company)
price_history_obj.create(cr, uid, {
'product_template_id': product_tmpl_id,
'cost': value,
'company_id': company_id,
}, context=context)
def _get_product_variant_count(self, cr, uid, ids, name, arg, context=None):
res = {}
for product in self.browse(cr, uid, ids, context=context):
res[product.id] = len(product.product_variant_ids)
return res
_columns = {
'name': fields.char('Name', required=True, translate=True, select=True),
'product_manager': fields.many2one('res.users','Product Manager'),
'description': fields.text('Description',translate=True,
help="A precise description of the Product, used only for internal information purposes."),
'description_purchase': fields.text('Purchase Description',translate=True,
help="A description of the Product that you want to communicate to your suppliers. "
"This description will be copied to every Purchase Order, Receipt and Supplier Invoice/Refund."),
'description_sale': fields.text('Sale Description',translate=True,
help="A description of the Product that you want to communicate to your customers. "
"This description will be copied to every Sale Order, Delivery Order and Customer Invoice/Refund"),
'type': fields.selection([('consu', 'Consumable'),('service','Service')], 'Product Type', required=True, help="Consumable are product where you don't manage stock, a service is a non-material product provided by a company or an individual."),
'rental': fields.boolean('Can be Rent'),
'categ_id': fields.many2one('product.category','Internal Category', required=True, change_default=True, domain="[('type','=','normal')]" ,help="Select category for the current product"),
'price': fields.function(_product_template_price, type='float', string='Price', digits_compute=dp.get_precision('Product Price')),
'list_price': fields.float('Sale Price', digits_compute=dp.get_precision('Product Price'), help="Base price to compute the customer price. Sometimes called the catalog price."),
'lst_price' : fields.related('list_price', type="float", string='Public Price', digits_compute=dp.get_precision('Product Price')),
'standard_price': fields.property(type = 'float', digits_compute=dp.get_precision('Product Price'),
help="Cost price of the product template used for standard stock valuation in accounting and used as a base price on purchase orders. "
"Expressed in the default unit of measure of the product.",
groups="base.group_user", string="Cost Price"),
'volume': fields.float('Volume', help="The volume in m3."),
'weight': fields.float('Gross Weight', digits_compute=dp.get_precision('Stock Weight'), help="The gross weight in Kg."),
'weight_net': fields.float('Net Weight', digits_compute=dp.get_precision('Stock Weight'), help="The net weight in Kg."),
'warranty': fields.float('Warranty'),
'sale_ok': fields.boolean('Can be Sold', help="Specify if the product can be selected in a sales order line."),
'pricelist_id': fields.dummy(string='Pricelist', relation='product.pricelist', type='many2one'),
'state': fields.selection([('',''),
('draft', 'In Development'),
('sellable','Normal'),
('end','End of Lifecycle'),
('obsolete','Obsolete')], 'Status'),
'uom_id': fields.many2one('product.uom', 'Unit of Measure', required=True, help="Default Unit of Measure used for all stock operation."),
'uom_po_id': fields.many2one('product.uom', 'Purchase Unit of Measure', required=True, help="Default Unit of Measure used for purchase orders. It must be in the same category than the default unit of measure."),
'uos_id' : fields.many2one('product.uom', 'Unit of Sale',
help='Specify a unit of measure here if invoicing is made in another unit of measure than inventory. Keep empty to use the default unit of measure.'),
'uos_coeff': fields.float('Unit of Measure -> UOS Coeff', digits_compute= dp.get_precision('Product UoS'),
help='Coefficient to convert default Unit of Measure to Unit of Sale\n'
' uos = uom * coeff'),
'mes_type': fields.selection((('fixed', 'Fixed'), ('variable', 'Variable')), 'Measure Type'),
'company_id': fields.many2one('res.company', 'Company', select=1),
# image: all image fields are base64 encoded and PIL-supported
'image': fields.binary("Image",
help="This field holds the image used as image for the product, limited to 1024x1024px."),
'image_medium': fields.function(_get_image, fnct_inv=_set_image,
string="Medium-sized image", type="binary", multi="_get_image",
store={
'product.template': (lambda self, cr, uid, ids, c={}: ids, ['image'], 10),
},
help="Medium-sized image of |
#!/usr/bin/env python
import sys
from weather import Weather
def main(args):
weather = Weather()
| location = weather.lookup_by_location(args[1])
condition = location.forecast()[0]
if condition:
return condition['text'] + ' with high of ' + condition['high'] + ' and low of ' + condition['low']
else:
return "City not found. It's probably raining m | eatballs. Please try again."
if __name__ == '__main__':
main(sys.argv) |
# -*- coding: utf-8 -*-
# This file is | part of Shuup.
#
# Copyright (c) 2012-2017, Shoop Commerce Ltd. All rights reserved.
#
# This source code is licensed under the OSL-3.0 license found in the
# LICENSE file in the root directory of this source tree.
from django.http import HttpResponseRedirect
from shuup import configuration
def toggle_all_seeing(request):
return_url = request.META["HTTP_REFERER"]
if not request.user.is_superuser:
return HttpResponseRedirect(return_url)
| all_seeing_key = "is_all_seeing:%d" % request.user.pk
is_all_seeing = not configuration.get(None, all_seeing_key, False)
configuration.set(None, all_seeing_key, is_all_seeing)
return HttpResponseRedirect(return_url)
|
rom Catalog.Schema import DBSchema
from Query.Operators.TableScan import TableScan
from Query.Operators.Select import Select
from Query.Operators.Project import Project
from Query.Operators.Union import Union
from Query.Operators.Join import Join
from Query.Operators.GroupBy import GroupBy
from Query.Operators.Sort import Sort
class Plan:
"""
A data structure implementing query plans.
Query plans are tree data structures whose nodes are objects
inheriting from the Query.Operator class.
Our Query.Plan class tracks the root of the plan tree,
and provides basic accessors such as the ability to
retrieve the relations accessed by the query, the query's
output schema, and plan pretty printing facilities.
Plan instances delegate their iterator to the root operator,
enabling direct iteration over query results.
Plan instances should use the 'prepare' method prior to
iteration (as done with Database.processQuery), to initialize
all operators contained in the plan.
"""
def __init__(self, **kwargs):
other = kwargs.get("other", None)
if other:
self.fromOther(other)
elif "root" in kwargs:
self.root = kwargs["root"]
else:
raise ValueError("No root operator specified for query plan")
def fromOther(self):
self.root = other.root
# Returns the root operator in the query plan
def root(self):
return self.root
# Returns the query result schema.
def schema(self):
return self.root.schema()
# Returns the relations used by the query.
def relations(self):
return [op.relationId() for (_,op) in self.flatten() if isinstance(op, TableScan)]
# Pre-order depth-first flattening of the query tree.
def flatten(self):
if self.root:
result = []
queue = deque([(0, self.root)])
while queue:
(depth, operator) = queue.popleft()
children = operator.inputs()
result.append((depth, operator))
if children:
queue.extendleft([(depth+1, c) for c in children])
return result
# Plan preparation and execution
# Returns a prepared plan, where every operator has filled in
# internal parameters necessary for processing data.
def prepare(self, database):
if self.root:
for (_, operator) in self.flatten():
operator.prepare(database)
return self
else:
raise ValueError("Invalid query plan")
# Iterator abstraction for query processing.
# Thus, we can use: "for page in plan: ..."
def __iter__(self):
| return iter(self.root)
# Plan and statistics information.
# Returns a description for the entire query plan, based on the
# description of each individual operator.
def explain(self):
if self.root:
planDesc = []
indent = ' ' * 2
for (depth, operator) in self.flatten():
planDesc.append(indent * depth + operator.explain())
|
return '\n'.join(planDesc)
# Returns the cost of this query plan. Each operator should determine
# its own local cost added to the cost of its children.
def cost(self):
return self.root.cost()
# Plan I/O, e.g., for query shipping.
def pack(self):
raise NotImplementedError
def unpack(self):
raise NotImplementedError
class PlanBuilder:
"""
A query plan builder class that can be used for LINQ-like construction of queries.
A plan builder consists of an operator field, as the running root of the query tree.
Each method returns a plan builder instance, that can be used to further
operators compose with additional builder methods.
A plan builder yields a Query.Plan instance through its finalize() method.
>>> import Database
>>> db = Database.Database()
>>> db.createRelation('employee', [('id', 'int'), ('age', 'int')])
>>> schema = db.relationSchema('employee')
# Populate relation
>>> for tup in [schema.pack(schema.instantiate(i, 2*i+20)) for i in range(20)]:
... _ = db.insertTuple(schema.name, tup)
...
### SELECT * FROM Employee WHERE age < 30
>>> query1 = db.query().fromTable('employee').where("age < 30").finalize()
>>> query1.relations()
['employee']
>>> print(query1.explain()) # doctest: +ELLIPSIS
Select[...,cost=...](predicate='age < 30')
TableScan[...,cost=...](employee)
>>> [schema.unpack(tup).age for page in db.processQuery(query1) for tup in page[1]]
[20, 22, 24, 26, 28]
### SELECT eid FROM Employee WHERE age < 30
>>> query2 = db.query().fromTable('employee').where("age < 30").select({'id': ('id', 'int')}).finalize()
>>> print(query2.explain()) # doctest: +ELLIPSIS
Project[...,cost=...](projections={'id': ('id', 'int')})
Select[...,cost=...](predicate='age < 30')
TableScan[...,cost=...](employee)
>>> [query2.schema().unpack(tup).id for page in db.processQuery(query2) for tup in page[1]]
[0, 1, 2, 3, 4]
### SELECT * FROM Employee UNION ALL Employee
>>> query3 = db.query().fromTable('employee').union(db.query().fromTable('employee')).finalize()
>>> print(query3.explain()) # doctest: +ELLIPSIS
UnionAll[...,cost=...]
TableScan[...,cost=...](employee)
TableScan[...,cost=...](employee)
>>> [query3.schema().unpack(tup).id for page in db.processQuery(query3) for tup in page[1]] # doctest:+ELLIPSIS
[0, 1, 2, ..., 19, 0, 1, 2, ..., 19]
### SELECT * FROM Employee E1 JOIN Employee E2 ON E1.id = E2.id
>>> e2schema = schema.rename('employee2', {'id':'id2', 'age':'age2'})
>>> query4 = db.query().fromTable('employee').join( \
db.query().fromTable('employee'), \
rhsSchema=e2schema, \
method='block-nested-loops', expr='id == id2').finalize()
>>> print(query4.explain()) # doctest: +ELLIPSIS
BNLJoin[...,cost=...](expr='id == id2')
TableScan[...,cost=...](employee)
TableScan[...,cost=...](employee)
>>> q4results = [query4.schema().unpack(tup) for page in db.processQuery(query4) for tup in page[1]]
>>> [(tup.id, tup.id2) for tup in q4results] # doctest:+ELLIPSIS
[(0, 0), (1, 1), (2, 2), ..., (18, 18), (19, 19)]
### Hash join test with the same query.
### SELECT * FROM Employee E1 JOIN Employee E2 ON E1.id = E2.id
>>> e2schema = schema.rename('employee2', {'id':'id2', 'age':'age2'})
>>> keySchema = DBSchema('employeeKey', [('id', 'int')])
>>> keySchema2 = DBSchema('employeeKey2', [('id2', 'int')])
>>> query5 = db.query().fromTable('employee').join( \
db.query().fromTable('employee'), \
rhsSchema=e2schema, \
method='hash', \
lhsHashFn='hash(id) % 4', lhsKeySchema=keySchema, \
rhsHashFn='hash(id2) % 4', rhsKeySchema=keySchema2, \
).finalize()
>>> print(query5.explain()) # doctest: +ELLIPSIS
HashJoin[...,cost=...](lhsKeySchema=employeeKey[(id,int)],rhsKeySchema=employeeKey2[(id2,int)],lhsHashFn='hash(id) % 4',rhsHashFn='hash(id2) % 4')
TableScan[...,cost=...](employee)
TableScan[...,cost=...](employee)
>>> q5results = [query5.schema().unpack(tup) for page in db.processQuery(query5) for tup in page[1]]
>>> [(tup.id, tup.id2) for tup in q5results] # doctest:+ELLIPSIS
[(0, 0), (1, 1), (2, 2), ..., (18, 18), (19, 19)]
>>> sorted([(tup.id, tup.id2) for tup in q5results]) # doctest:+ELLIPSIS
[(0, 0), (1, 1), (2, 2), ..., (18, 18), (19, 19)]
### Group by aggregate query
### SELECT id, max(age) FROM Employee GROUP BY id
>>> aggMinMaxSchema = DBSchema('minmax', [('minAge', 'int'), ('maxAge','int')])
>>> query6 = db.query().fromTable('employee').groupBy( \
groupSchema=keySchema, \
aggSchema=aggMinMaxSchema, \
groupExpr=(lambda e: e.id), \
aggExprs=[(sys.maxsize, lambda acc, e: min(acc, e.age), lambda x: x), \
(0, lambda acc, e: max(acc, e.age), lambda x: x)], \
groupHashFn=(lambda gbVal: hash(gbVal[0]) % 2) \
).finalize()
>>> print(query6.explain()) # doctest: +ELLIPSIS
GroupBy[...,cost=...](groupSchema=employeeKey[(id,int)], aggSchema=minmax[(minAge,int),(maxAge,int)])
TableScan[...,cost=...](employee)
>>> q6results = [query6.schema().unpack(tup) for page in db.processQuery(query6) f |
import streamcorpus as sc
import cuttsum.events
import cuttsum.corpora
from cuttsum.trecdata import SCChunkResource
from cuttsum.pipeline import ArticlesResource, DedupedArticlesResource
import os
import pandas as pd
from datetime import datetime
from collections import defaultdict
import matplotlib.pylab as plt
plt.style.use('ggplot')
pd.set_option('display.max_rows', 500)
pd.set_option('display.width', 200)
import locale
locale.setlocale(locale.LC_ALL, 'en_US.UTF8')
def format_int(x):
return locale.format("%d", x, grouping=True)
def epoch(dt):
return int((dt - datetime(1970, 1, 1)).total_seconds())
chunk_res = SCChunkResource()
articles_res = ArticlesResource()
ded_articles_res = DedupedArticlesResource()
data = []
event2ids = defaultdict(set)
fltr_event2ids = defaultdict(set)
for event in cuttsum.events.get_events():
corpus = cuttsum.corpora.get_raw_corpus(event)
hours = event.list_event_hours()
hour2ded = defaultdict(int)
hour2ded_fltr = defaultdict(int)
ded_df = ded_articles_res.get_stats_df(event, corpus, "goose", .8)
if ded_df is not None:
if event.query_num > 25:
for ids in ded_df["stream ids"].apply(eval).tolist():
for id1 in ids:
event2ids[event.fs_name()].add(id1)
for _, row in ded_df.iterrows():
dt = datetime.utcfromtimestamp(row["earliest"])
hour = datetime(dt.year, dt.month, dt.day, dt.hour)
hour2ded[hour] += 1
if row["match"] == True:
hour2ded_fltr[hour] += 1
hour2goose = defaultdict(int)
for hour in hours:
path = articles_res.get_chunk_path(event, "goose", hour, corpus)
if path is None:
continue
#print path
fname = os.path.split(path)[1]
num_goose = int(fname.split("-")[0])
hour2goose[hour] = num_goose
# goose_df = articles_res.get_stats_df(event, "goose")
# if goose_df is not None:
# for _, row in goose_df.iterrows():
# dt = datetime.utcfromtimestamp(row["hour"])
# hour = datetime(dt.year, dt.month, dt.day, dt.hour)
# hour2goose[hour] = row["goose articles"]
for hour in hours:
raw_chunks = chunk_res.get_chunks_for_hour(hour, corpus, event)
num_raw_si = 0
for chunk in raw_chunks:
fname = os.path.split(chunk)[1]
num_raw_si += int(fname.split("-")[1])
#num_fltr_si = len(articles_res.get_si(event, corpus, "goose", hour))
data.append({
"event": event.query_id,
"title": event.title,
"hour": hour,
"raw articles": num_raw_si,
"goose articles": hour2goose[hour],
"deduped articles": hour2ded[hour],
"deduped match articles": hour2ded_fltr[hour],
})
for event in cuttsum.events.get_events():
if event.query_num < 26: continue
corpus = cuttsum.corpora.FilteredTS2015()
hours = event.list_event_hours()
hour2ded = defaultdict(int)
hour2ded_fltr = defaultdict(int)
ded_df = ded_articles_res.get_stats_df(event, corpus, "goose", .8)
if ded_df is not None:
for ids in ded_df["stream ids"].apply(eval).tolist():
for id1 in ids:
fltr_event2ids[event.fs_name()].add(id1)
for _, row in ded_df.iterrows():
dt = datetime.utcfromtimestamp(row["earliest"])
hour = datetime(dt.year, dt.month, dt.day, dt.hour)
hour2ded[hour] += 1
if row["match"] == True:
hour2ded_fltr[hour] += 1
hour2goose = defaultdict(int)
for hour in hours:
path = articles_res.get_chunk_path(event, "goose", hour, corpus)
if path is None:
continue
print path
fname = os.path.split(path)[1]
num_goose = int(fname.split("-")[0])
hour2goose[hour] = num_goose
# goose_df = articles_res.get_stats_df(event, "goose")
# if goose_df is not None:
# for _, row in goose_df.iterrows():
# dt = datetime.utcfromtimestamp(row["hour"])
# hour = datetime(dt.year, dt.month, dt.day, dt.hour)
# hour2goose[hour] = row["goose articles"]
for hour in hours:
print hour
raw_chunks = chunk_res.get_chunks_for_hour(hour, corpus, event)
num_raw_si = 0
for chunk in raw_chunks:
fname = os.path.split(chunk)[1]
#num_raw_si += int(fname.split("-")[1])
with sc.Chunk(path=chunk, mode="rb", message=corpus.sc_msg()) as c:
for si in c:
num_raw_si += 1
#num_fltr_si = len(articles_res.get_si(event, corpus, "goose", hour))
data.append({
"event": event.query_id + " (filtered)",
"title": event.title,
"hour": hour,
"raw articles": num_raw_si,
"goose articles": hour2goose[hour],
"deduped articles": hour2ded[hour],
"deduped match articles": hour2ded_fltr[hour],
})
df = pd.DataFrame(data)
cols = ["raw articles", "goose articles", "deduped articles",
"deduped match articles"]
df_sum = df.groupby("event")[cols].sum()
df_sum["raw articles"] = df_sum["raw articles"].apply(format_int)
df_sum["goose articles"] = df_sum["goose articles"].apply(format_int)
df_sum["deduped articles"] = df_sum["deduped articles"].apply(format_int)
df_sum["deduped match articles"] = df_sum["deduped match articles"].apply(format_int)
print df_sum
print
coverage = []
for event in cuttsum.events.get_events():
if event.query_num < 26: continue
isect = event2ids[event.fs_name()].intersection(fltr_event2ids[event.fs_name()])
n_isect = len(isect)
n_unfltr = max(len(event2ids[event.fs_name()]), 1)
n_fltr = max(len(fltr_event2ids[event.fs_name()]), 1)
print event.fs_name()
print n_isect, float(n_isect) / n_fltr, float(n_isect) / n_unfltr
coverage.append({
"event": event.query_id,
"intersection": n_isect,
"isect/n_2015F": float(n_isect) / n_fltr,
"isect/n_2014": float(n_isect) / n_u | nfltr,
| })
df = pd.DataFrame(coverage)
df_u = df.mean()
df_u["event"] = "mean"
print pd.concat([df, df_u.to_frame().T]).set_index("event")
exit()
with open("article_count.tex", "w") as f:
f.write(df_sum.to_latex())
import os
if not os.path.exists("plots"):
os.makedirs("plots")
import cuttsum.judgements
ndf = cuttsum.judgements.get_merged_dataframe()
for (event, title), group in df.groupby(["event", "title"]):
matches = ndf[ndf["query id"] == event]
#fig = plt.figure()
group = group.set_index(["hour"])
#ax = group[["goose articles", "deduped articles", "deduped match articles"]].plot()
linex = epoch(group.index[10])
ax = plt.plot(group.index, group["goose articles"], label="goose")
ax = plt.plot(group.index, group["deduped articles"], label="dedupe")
ax = plt.plot(group.index, group["deduped match articles"], label="dedupe qmatch")
for nugget, ngroup in matches.groupby("nugget id"):
times = ngroup["update id"].apply(lambda x: datetime.utcfromtimestamp(int(x.split("-")[0])))
#ngroup = ngroup.sort("timestamp")
times.sort()
times = times.reset_index(drop=True)
if len(times) == 0: continue
plt.plot_date(
(times[0], times[0]),
(0, plt.ylim()[1]),
'--', color="black", linewidth=.5, alpha=.5)
plt.gcf().autofmt_xdate()
plt.gcf().suptitle(title)
plt.gcf().savefig(os.path.join("plots", "{}-stream.png".format(event)))
plt.close("all")
|
from flask import session
from appconfig import *
class UserModel:
def __init__(self):
from models import Tag
from models import Post
from models import User
self.Tag = Tag.Tag
self.Post = Post.Post
self.User = User.User
def login(self, email, password):
user = self.User.query.filter_by(Email = email).first()
if user and user.check_password(password):
session['email'] = user.Email
session['nick'] = user.Nick
session['Id'] = user.Id
return True
return F | alse
def register(self, email, password, nick, role, id = None):
from models import db
if id:
u = self.User.query.filter_by(Id=id).first()
u.Email = email
u.Role = role
u.set_password(password)
u.Nick = nick
subject = "You account is updated"
else:
u = self.User(nick, email, role, password)
db.session.add(u)
subject = "Account | is created"
db.session.commit()
body = "<p>Hello "+nick+", </p> <p>Your login details for "+URL+" :</p> <p>Username: "+email+" <br />Password: "+password+"</p>"
self.send_email(subject, email, body, nick)
return u.Id
def list(self):
users = self.User.query.all()
if users:
return users
return False
def getUser(self, id):
user = self.User.query.filter_by(Id=id).first()
if user:
return user
return False
def send_email(self, subject, recipients, html_body, nick):
import mandrill
try:
mandrill_client = mandrill.Mandrill('ajQ8I81AVELYSYn--6xbmw')
message = {
'from_email': ADMINS[0],
'from_name': 'Blog admin',
'headers': {'Reply-To': ADMINS[0]},
'html': html_body,
'important': True,
'subject': subject,
'to': [{'email': recipients,
'name': nick,
'type': 'to'}],
}
result = mandrill_client.messages.send(message=message, async=False)
'''
[{'_id': 'abc123abc123abc123abc123abc123',
'email': 'recipient.email@example.com',
'reject_reason': 'hard-bounce',
'status': 'sent'}]
'''
except mandrill.Error, e:
# Mandrill errors are thrown as exceptions
print 'A mandrill error occurred: %s - %s' % (e.__class__, e)
# A mandrill error occurred: <class 'mandrill.UnknownSubaccountError'> - No subaccount exists with the id 'customer-123'
raise |
# -*- coding: utf-8 -*-
#
# Copyright © 2012 - 2017 Michal Čihař <michal@cihar.com>
#
# This file is part of Weblate <https://weblate.org/>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
"""Simple mathematical captcha."""
from __future__ import unicode_literals
import ast
from base64 import b64encode, b64decode
import hashlib
import operator
from random import SystemRandom
import time
from django.conf import settings
TIMEDELTA = 600
# Supported operators
OPERATORS = {
ast.Add: operator.add,
ast.Sub: operator.sub,
ast.Mult: op | erator.mul,
}
class MathCaptcha(object):
"""Simple match captcha object."""
| operators = ('+', '-', '*')
operators_display = {
'+': '<i class="fa fa-plus"></i>',
'-': '<i class="fa fa-minus"></i>',
'*': '<i class="fa fa-times"></i>',
}
interval = (1, 10)
def __init__(self, question=None, timestamp=None):
if question is None:
self.question = self.generate_question()
else:
self.question = question
if timestamp is None:
self.timestamp = time.time()
else:
self.timestamp = timestamp
def generate_question(self):
"""Generate random question."""
generator = SystemRandom()
operation = generator.choice(self.operators)
first = generator.randint(self.interval[0], self.interval[1])
second = generator.randint(self.interval[0], self.interval[1])
# We don't want negative answers
if operation == '-':
first += self.interval[1]
return ' '.join((
str(first),
operation,
str(second)
))
@staticmethod
def from_hash(hashed):
"""Create object from hash."""
question, timestamp = unhash_question(hashed)
return MathCaptcha(question, timestamp)
@property
def hashed(self):
"""Return hashed question."""
return hash_question(self.question, self.timestamp)
def validate(self, answer):
"""Validate answer."""
return (
self.result == answer and
self.timestamp + TIMEDELTA > time.time()
)
@property
def result(self):
"""Return result."""
return eval_expr(self.question)
@property
def display(self):
"""Get unicode for display."""
parts = self.question.split()
return ' '.join((
parts[0],
self.operators_display[parts[1]],
parts[2],
))
def format_timestamp(timestamp):
"""Format timestamp in a form usable in captcha."""
return '{0:>010x}'.format(int(timestamp))
def checksum_question(question, timestamp):
"""Return checksum for a question."""
challenge = ''.join((settings.SECRET_KEY, question, timestamp))
sha = hashlib.sha1(challenge.encode('utf-8'))
return sha.hexdigest()
def hash_question(question, timestamp):
"""Hashe question so that it can be later verified."""
timestamp = format_timestamp(timestamp)
hexsha = checksum_question(question, timestamp)
return ''.join((
hexsha,
timestamp,
b64encode(question.encode('utf-8')).decode('ascii')
))
def unhash_question(question):
"""Unhashe question, verifying its content."""
if len(question) < 40:
raise ValueError('Invalid data')
hexsha = question[:40]
timestamp = question[40:50]
try:
question = b64decode(question[50:]).decode('utf-8')
except (TypeError, UnicodeError):
raise ValueError('Invalid encoding')
if hexsha != checksum_question(question, timestamp):
raise ValueError('Tampered question!')
return question, int(timestamp, 16)
def eval_expr(expr):
"""Evaluate arithmetic expression used in Captcha.
>>> eval_expr('2+6')
8
>>> eval_expr('2*6')
12
"""
return eval_node(ast.parse(expr).body[0].value)
def eval_node(node):
"""Evaluate single AST node."""
if isinstance(node, ast.Num):
# number
return node.n
elif isinstance(node, ast.operator):
# operator
return OPERATORS[type(node)]
elif isinstance(node, ast.BinOp):
# binary operation
return eval_node(node.op)(
eval_node(node.left),
eval_node(node.right)
)
else:
raise ValueError(node)
|
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this fi | le
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License fo | r the specific language governing permissions and
# limitations under the License.
#
# Tests for Help
from nose.tools import assert_true, assert_equal
from desktop.lib.django_test_util import make_logged_in_client
def test_about():
c = make_logged_in_client(username="test", is_superuser=True)
# Test default output
response = c.get('/help/')
assert_true("Welcome to Hue!" in response.content)
# Test default to index.md
response = c.get("/help/about/")
response2 = c.get("/help/about/index.html")
assert_equal(response.content, response2.content)
# Test index at the bottom
assert_true('href="/help/desktop' in response.content)
|
import os
from flask import Flask, request, g
from flask_sqlalchemy import SQLAlchemy
from .decorators import json
db = SQLAlchemy()
def create_app(config_name):
""" Create the usual Flask application instance."""
app = Flask(__name__)
# Apply configuration
cfg = os.path.join(os.getcwd(), 'config', config_name + '.py')
app.config.from_pyfile(cfg)
# initialize extensions
db.init_app(app)
# register blueprints
from .api_v1 import api as api_blueprint
app.register_blueprint(api_blueprint, url_prefix='/api/v1')
# authentication token route
from .auth import auth
from .models import User
@app.route('/api/v1', methods=['GET'])
@json
def api_index():
return {
"message": "Welcome to Maisha Goals. Register a new "
" user or login to get started"}
@app.route('/auth/register', methods=['POST'])
@json
def register_user():
u = User()
| u.import_data(request.json)
db.session.add(u)
| db.session.commit()
return {
'message': 'Your account has been successfuly created'
}, 201, {'Location': u.get_url()}
@app.route('/auth/login')
@auth.login_required
@json
def login_user():
return {'token': g.user.generate_auth_token()}
return app
|
from com.googlecode.fascinator.api.indexer import SearchRequest
f | rom com.googlecode.fascinator.common.solr import SolrResult
from com.googlecode.fascinator.spring import ApplicationContextProvider
from java.io import ByteArrayInputStream, ByteArrayOutputStream
class MaintenanceData:
def __init__(self):
| pass
def __activate__(self, context):
self.velocityContext = context
self.response = self.velocityContext["response"]
self.maintenanceModeService = ApplicationContextProvider.getApplicationContext().getBean("maintenanceModeService")
if self.maintenanceModeService.isMaintanceMode() == False:
self.response.sendRedirect(self.velocityContext["portalPath"]+"/home")
|
#!/usr/bin/python
# Copyright (c) 2009, Purdue University
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice, this
# list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# Neither the name of the Purdue University nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBU | TORS "AS I | S"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Test for Credential cache library."""
__copyright__ = 'Copyright (C) 2009, Purdue University'
__license__ = 'BSD'
__version__ = '#TRUNK#'
import unittest
import os
import roster_core
from roster_server import credentials
CONFIG_FILE = 'test_data/roster.conf' # Example in test_data
SCHEMA_FILE = '../roster-core/data/database_schema.sql'
DATA_FILE = 'test_data/test_data.sql'
class TestCredentialsLibrary(unittest.TestCase):
def setUp(self):
self.config_instance = roster_core.Config(file_name=CONFIG_FILE)
self.cred_instance = credentials.CredCache(self.config_instance,
u'sharrell')
db_instance = self.config_instance.GetDb()
db_instance.CreateRosterDatabase()
data = open(DATA_FILE, 'r').read()
db_instance.StartTransaction()
db_instance.cursor.execute(data)
db_instance.EndTransaction()
db_instance.close()
self.core_instance = roster_core.Core(u'sharrell', self.config_instance)
def is_valid_uuid (self, uuid):
"""
TAKEN FROM THE BLUEZ MODULE
is_valid_uuid (uuid) -> bool
returns True if uuid is a valid 128-bit UUID.
valid UUIDs are always strings taking one of the following forms:
XXXX
XXXXXXXX
XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX
where each X is a hexadecimal digit (case insensitive)
"""
try:
if len (uuid) == 4:
if int (uuid, 16) < 0: return False
elif len (uuid) == 8:
if int (uuid, 16) < 0: return False
elif len (uuid) == 36:
pieces = uuid.split ("-")
if len (pieces) != 5 or \
len (pieces[0]) != 8 or \
len (pieces[1]) != 4 or \
len (pieces[2]) != 4 or \
len (pieces[3]) != 4 or \
len (pieces[4]) != 12:
return False
[ int (p, 16) for p in pieces ]
else:
return False
except ValueError:
return False
except TypeError:
return False
return True
def testCredentials(self):
self.assertTrue(self.cred_instance.Authenticate(u'sharrell', 'test'))
cred_string = self.cred_instance.GetCredentials(u'sharrell', 'test',
self.core_instance)
self.assertEqual(self.cred_instance.CheckCredential(cred_string,
u'sharrell',
self.core_instance),
u'')
self.assertEqual(self.cred_instance.CheckCredential(u'test', u'sharrell',
self.core_instance),
None)
if( __name__ == '__main__' ):
unittest.main()
|
# Generated by Django 2.0.1 on 2018-01-21 14:23
from django.db import migrations, mod | els
class Migration(migrations.Migration):
dependencies = [
('test_app', '0016_add_fil | epath'),
]
operations = [
migrations.AddField(
model_name='secondobject',
name='floating',
field=models.FloatField(default=0.0),
),
]
|
a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.todo']
todo_include_todos = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'contents'
# General information about the project.
project = u'CoderDojo Twin Cities Python for Minecraft'
copyright = u'by multiple <a href="https://github.com/CoderDojoTC/python-minecraft/graphs/contributors">contributors</a>'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0'
# The full version, including alpha/beta/rc tags.
release = '1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, | the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, | sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
html_show_copyright = False
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'CoderDojoTwinCitiesPythonforMinecraftdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'CoderDojoTwinCitiesPythonforMinecraft.tex', u'CoderDojo Twin Cities Python for Minecraft Documentation',
u'Mike McCallister', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'coderdojotwincitiespythonforminecraft', u'CoderDojo Twin Cities Python for Minecraft Documentation',
[u'Mike McCallister'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'CoderDojoTwinCitiesPythonforMinecraft', u'CoderDojo Twin Cities Python for Minecraft Documentation',
u'Mike McCallister', 'CoderDojoTwinCitiesPythonforMinecraft', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "To |
from django.conf.urls import patterns, url
from .views import PhotoListView
urlpatterns = patterns('' | ,
url(r'^(?P<slug>[\w-]+)/$', PhotoListView.as_view(), name='image' | ),
) |
response")
if deployment_settings.has_module("req"):
div_res.append(menu_box(T("Requests"), "req", "req"))
if deployment_settings.has_module("project"):
div_res.append(menu_box(T("Activities"), "project", "activity"))
#div_additional = DIV(A(DIV(T("Mobile Assess."),
# _class = "menu_box"
# ),
# _href = URL( r=request, c="assess", f= "mobile_basic_assess")
# ))
menu_boxes = DIV(div_sit,
div_arrow_1,
div_dec,
div_arrow_2,
div_res,
#div_additional,
)
# @ToDo: Replace this with an easily-customisable section on the homepage
#settings = db(db.s3_setting.id == 1).select(limitby=(0, 1)).first()
#if settings:
# admin_name = settings.admin_name
# admin_email = settings.admin_email
# admin_tel = settings.admin_tel
#else:
# # db empty and prepopulate is false
# admin_name = T("Sahana Administrator").xml(),
# admin_email = "support@Not Set",
# admin_tel = T("Not Set").xml(),
# Login/Registration forms
self_registration = deployment_settings.get_security_self_registration()
registered = False
login_form = None
login_div = None
register_form = None
register_div = None
if 2 not in session.s3.roles:
# This user isn't yet logged-in
if request.cookies.has_key("registered"):
# This browser has logged-in before
registered = True
# Provide a login box on front page
request.args = ["login"]
auth.messages.submit_button = T("Login")
login_form = auth()
login_div = DIV(H3(T("Login")),
P(XML("%s <b>%s</b> %s" % (T("Registered users can"),
T("login"),
| T("to ac | cess the system")))))
if self_registration:
# Provide a Registration box on front page
request.args = ["register"]
auth.messages.submit_button = T("Register")
register_form = auth()
register_div = DIV(H3(T("Register")),
P(XML("%s <b>%s</b>" % (T("If you would like to help, then please"),
T("sign-up now")))))
if session.s3.debug:
validate_script = SCRIPT(_type="text/javascript",
_src=URL(r=request, c="static", f="scripts/S3/jquery.validate.js"))
else:
validate_script = SCRIPT(_type="text/javascript",
_src=URL(r=request, c="static", f="scripts/S3/jquery.validate.pack.js"))
register_div.append(validate_script)
if request.env.request_method == "POST":
post_script = """
// Unhide register form
$('#register_form').removeClass('hide');
// Hide login form
$('#login_form').addClass('hide');
"""
else:
post_script = ""
register_script = SCRIPT("""
$(document).ready(function() {
// Change register/login links to avoid page reload, make back button work.
$('#register-btn').attr('href', '#register');
$('#login-btn').attr('href', '#login');
%s
// Redirect Register Button to unhide
$('#register-btn').click(function() {
// Unhide register form
$('#register_form').removeClass('hide');
// Hide login form
$('#login_form').addClass('hide');
});
// Redirect Login Button to unhide
$('#login-btn').click(function() {
// Hide register form
$('#register_form').addClass('hide');
// Unhide login form
$('#login_form').removeClass('hide');
});
});
""" % post_script)
register_div.append(register_script)
return dict(title = title,
#modules=modules,
menu_boxes=menu_boxes,
#admin_name=admin_name,
#admin_email=admin_email,
#admin_tel=admin_tel,
self_registration=self_registration,
registered=registered,
login_form=login_form,
login_div=login_div,
register_form=register_form,
register_div=register_div
)
# -----------------------------------------------------------------------------
def rapid():
""" Set/remove rapid data entry flag """
val = request.vars.get("val", True)
if val == "0":
val = False
else:
val = True
session.s3.rapid_data_entry = val
response.view = "xml.html"
return dict(item=str(session.s3.rapid_data_entry))
# -----------------------------------------------------------------------------
def user():
"Auth functions based on arg. See gluon/tools.py"
auth.settings.on_failed_authorization = URL(r=request, f="error")
if request.args and request.args(0) == "login_next":
# Can redirect the user to another page on first login for workflow (set in 00_settings.py)
# Note the timestamp of last login through the browser
if auth.is_logged_in():
db(db.auth_user.id == auth.user.id).update(timestmp = request.utcnow)
_table_user = auth.settings.table_user
if request.args and request.args(0) == "profile":
#_table_user.organisation.writable = False
_table_user.utc_offset.readable = True
_table_user.utc_offset.writable = True
login_form = register_form = None
if request.args and request.args(0) == "login":
auth.messages.submit_button = T("Login")
form = auth()
login_form = form
elif request.args and request.args(0) == "register":
auth.messages.submit_button = T("Register")
form = auth()
register_form = form
else:
form = auth()
if request.args and request.args(0) == "profile" and deployment_settings.get_auth_openid():
form = DIV(form, openid_login_form.list_user_openids())
self_registration = deployment_settings.get_security_self_registration()
# Use Custom Ext views
# Best to not use an Ext form for login: can't save username/password in browser & can't hit 'Enter' to submit!
#if request.args(0) == "login":
# response.title = T("Login")
# response.view = "auth/login.html"
return dict(form=form, login_form=login_form, register_form=register_form, self_registration=self_registration)
# -------------------------------------------------------------------------
def source():
""" RESTful CRUD controller """
return s3_rest_controller("s3", "source")
# -------------------------------------------------------------------------
# About Sahana
def apath(path=""):
"Application path"
import os
from gluon.fileutils import up
opath = up(request.folder)
#TODO: This path manipulation is very OS specific.
while path[:3] == "../": opath, path=up(opath), path[3:]
return os.path.join(opath,path).replace("\\", "/")
def about():
"""
The About page provides details on the software
depedencies and versions available to this instance
of Sahana Eden.
"""
import sys
import subprocess
import string
python_version = sys.version
web2py_version = open(apath("../VERSION"), "r").read()[8:]
sahana_version = open(os.path.join(request.folder, "VERSION"), "r").read()
try:
sqlite_version = (subprocess.Popen(["sqlite3", "-version"], stdout=subprocess.PIPE).communicate()[0]).rstrip()
except:
sqlite_version = T("Not installed or incorrectly configured.")
try:
mysql_version = (subprocess.Popen(["mysql", "--version"], stdout=subprocess.PIPE).communicate()[0]).rstrip()[10:]
except:
mysql_version = T("Not installed or incorrectly configured.")
try:
pgsql_reply = (subprocess.Popen(["psql", "--version"], stdout=subprocess.PIPE |
from odoo import models, fields, api, _
from odoo.exceptions import ValidationError
from datetime import datetime
class StockScheduleWizard(models.TransientModel):
_name = "stock.schedule.wizard"
scheduled_date = fields.Datetime('Scheduled shipping date')
@api.multi
def action_button_schedule(self):
if self.scheduled_date:
date_now = str(datetime.now())
difference = datetime.strptime(date_now, '%Y-%m-%d %H:%M:%S.%f') - \
datetime.strptime(self.scheduled_date, '%Y-%m-%d %H:%M:%S')
difference = difference.total_seconds() / float(60)
if difference > 0:
raise ValidationError(_("Scheduled date must be bigger than current date"))
picking = self.env['stock.picking'].browse(self.env.context['parent_obj'])
cron_id = self.env['queue.job'].search([('model_name','=','stock.picking'),('state','=','pending'),('record_ids','like',picking.id), ('method_name','=','make_picking_sync')])
if cron_id:
if len(cron_id) > 1:
cron_ | id = cron_id[0]
if self.scheduled_date > cron_id.eta:
| cron_id.unlink()
picking.sale_id.scheduled_date = self.scheduled_date
picking.not_sync = True
picking._process_picking_scheduled_time()
|
#Copyright 2013 Paul Barton
#
#This program is free software: you can redistribute it and/or modify
#it under the terms of the GNU General Public License as published by
#the Free Software Foundation, either version 3 of the License, or
#(at your option) any later version.
#
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#
#You should have received a copy of the GNU General Public License
#along with this program. If not, see <http://www.gnu.org/licenses/>.
import Quartz
from AppKit import NSEvent, NSScreen
from .base import PyMouseMeta, PyMouseEventMeta
pressID = [None, Quartz.kCGEventLeftMouseDown,
Quartz.kCGEventRightMouseDown, Quartz.kCGEventOtherMouseDown]
releaseID = [None, Quartz.kCGEventLeftMouseUp,
Quartz.kCGEventRightMouseUp, Quartz.kCGEventOtherMouseUp]
class PyMouse(PyMouseMeta):
def press(self, x, y, button=1):
event = Quartz.CGEventCreateMouseEvent(None,
pressID[button],
(x, y),
button - 1)
Quartz.CGEventPost(Quartz.kCGHIDEventTap, event)
def release(self, x, y, button=1):
event = Quartz.CGEventCreateMouseEvent(None,
releaseID[button],
(x, y),
button - 1)
Quartz.CGEventPost(Quartz.kCGHIDEventTap, event)
def move(self, x, y):
move = Quartz.CGEventCreateMouseEvent(None, Quartz.kCGEventMouseMoved, (x, y), 0)
Quartz.CGEventPost(Quartz.kCGHIDEventTap, move)
def drag(self, x, y):
drag = Quartz.CGEventCreateMouseEvent(None, Quartz.kCGEventLeftMouseDragged, (x, y), 0)
Quartz.CGEventPost(Quartz.kCGHIDEventTap, drag)
def position(self):
loc = NSEvent.mouseLocation()
return loc.x, Quartz.CGDisplayPixelsHigh(0) - loc.y
def screen_size(self):
return NSScreen.mainScreen().frame().size.width, NSScreen.mainScreen().frame().size.height
def scroll(self, vertical=None, horizontal=None, depth=None):
#Local submethod for generating Mac scroll events in one axis at a time
def scroll_event(y_move=0, x_move=0, z_move=0, n=1):
for _ in range(abs(n)):
scrollWheelEvent = Quartz.CGEventCreateScrollWheelEvent(
None, # No source
Quartz.kCGScrollEventUnitLine, # Unit of measurement is lines
3, # Number of wheels(dimensions)
y_move,
x_move,
z_move)
Quartz.CGEventPost(Quartz.kCGHIDEventTap, scrollWheelEvent)
#Execute vertical then horizontal then depth scrolling events
if vertical is not None:
vertical = int(vertical)
if vertical == 0: # Do nothing with 0 distance
pass
elif vertical > 0: # Scroll up if positive
scroll_event(y_move=1, n=vertical)
else: # Scroll down if negative
scroll_event(y_move=-1, n=abs(vertical))
if horizontal is not None:
horizontal = int(horizontal)
if horizontal == 0: # Do nothing with 0 distance
pass
elif horizontal > 0: # Scroll right if positive
scroll_event(x_move=1, n=horizontal)
else: # Scroll left if negative
scroll_event(x_move=-1, n=abs(horizontal))
if depth is not None:
depth = int(depth)
if depth == 0: # Do nothing with 0 distance
pass
elif vertical > 0: # Scroll "out" if positive
scroll_event(z_move=1, n=depth)
| else: # Scroll "in" if negative
scroll_event(z_move=-1, n=abs(depth))
class PyMouseEvent(PyMouseEventMeta):
def run(self):
tap = Quartz.CGEventTapCreate(
Quartz.kCGSessionEventTap,
Quartz.kCGHeadInsertEventTap,
Quartz.kCGEventTapOptionDefault,
Quartz.CGEventMaskBit(Quartz.kCGEventMouseMoved) |
Quartz.CGEventMaskBit(Quartz.kCGEventLeftMouseDown) |
Quartz.CGEventMaskBit(Q | uartz.kCGEventLeftMouseUp) |
Quartz.CGEventMaskBit(Quartz.kCGEventRightMouseDown) |
Quartz.CGEventMaskBit(Quartz.kCGEventRightMouseUp) |
Quartz.CGEventMaskBit(Quartz.kCGEventOtherMouseDown) |
Quartz.CGEventMaskBit(Quartz.kCGEventOtherMouseUp),
self.handler,
None)
loopsource = Quartz.CFMachPortCreateRunLoopSource(None, tap, 0)
loop = Quartz.CFRunLoopGetCurrent()
Quartz.CFRunLoopAddSource(loop, loopsource, Quartz.kCFRunLoopDefaultMode)
Quartz.CGEventTapEnable(tap, True)
while self.state:
Quartz.CFRunLoopRunInMode(Quartz.kCFRunLoopDefaultMode, 5, False)
def handler(self, proxy, type, event, refcon):
(x, y) = Quartz.CGEventGetLocation(event)
if type in pressID:
self.click(x, y, pressID.index(type), True)
elif type in releaseID:
self.click(x, y, releaseID.index(type), False)
else:
self.move(x, y)
if self.capture:
Quartz.CGEventSetType(event, Quartz.kCGEventNull)
return event
|
# Copyright (C)
#
# Author :
from GIC.Channels.GenericChannel import *
class ChannelTest (GenericChannel):
# mandatory fields to work on LibreGeoSocial search engine
MANDATORY_FIELDS = ["latitude", "longitude", "radius", "category"]
CATEGORIES = [{"id" : "0", "name" : "all", "desc" : "All supported categories "},
{"id" : "1", "name" : "category1", "desc" : "Category for..."},
]
def __init__ (self):
self.options = {}
def get_categories(self):
return self.CATEGORIES
def get_info(self):
return "Channel description"
def set_options(self, options):
"" | "
Fill self.options with the received dicti | onary
regarding mandatory and optional fields of your channel
"""
return True, ""
def process (self):
"""
Make the search and return the nodes
"""
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('dataset', '0009_remove_selection_model'),
]
operations = [
migrations.AlterField(
model_name='dataset',
| name= | 'name',
field=models.CharField(default=b'', max_length=150),
),
]
|
# coding=utf-8
# ------------------ | --------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRe | st Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
VERSION = "1.0.0b3"
|
import copy
import glob
import fnmatch
import os
import logging
import yaml
log = logging.getLogger("subiquitycore.netplan")
def _sanitize_inteface_config(iface_config):
for ap, ap_config in iface_config.get('access-points', {}).items():
if 'password' in ap_config:
ap_config['password'] = '<REDACTED>'
def sanitize_interface_config(iface_config):
iface_config = copy.deepcopy(iface_config)
_sanitize_inteface_config(iface_config)
return iface_config
def sanitize_config(config):
"""Return a copy of config with passwords redacted."""
config = copy.deepcopy(config)
interfaces = config.get('network', {}).get('wifis', {}).items()
for iface, iface_config in interfaces:
_sanitize_inteface_config(iface_config)
return config
class Config:
"""A NetplanConfig represents the network config for a system.
Call parse_netplan_config() with each piece of yaml config, and then
call config_for_device to get the config that matches a particular
network device, if any.
"""
def __init__(self):
self.physical_devices = []
self.virtual_devices = []
self.config = {}
def parse_netplan_config(self, config):
try:
self.config = config = yaml.safe_load(config)
except yaml.ReaderError as e:
log.info("could not parse config: %s", e)
return
network = config.get('network')
if network is None:
log.info("no 'network' key in config")
return
version = network.get("version")
if version != 2:
log.info("network has no/unexpected version %s", version)
return
for phys_key in 'ethernets', 'wifis':
for dev, dev_config in network.get(phys_key, {}).items():
self.physical_devices.append(_PhysicalDevice(dev, dev_config))
for virt_key in 'bonds', 'vlans':
for dev, dev_config in network.get(virt_key, {}).items():
self.virtual_devices.append(_VirtualDevice(dev, dev_config))
def config_for_device(self, link):
if link.is_virtual:
for dev in self.virtual_devices:
if dev.name == link.name:
return copy.deepcopy(dev.config)
else:
allowed_matches = ('macaddress',)
match_key = 'match'
for dev in self.physical_devices:
if dev.matches_link(link):
config = copy.deepcopy(dev.config)
if match_key in config:
match = {k: v for k, v in config[match_key].items()
if k in allowed_matches}
if match:
config[match_key] = match
| else:
del config[match_key]
return config
return {}
def load_from_root(self, root):
for path in configs_in_root(root):
try:
fp = open(path)
except OSError:
log.exception("opening %s failed", path)
| with fp:
self.parse_netplan_config(fp.read())
class _PhysicalDevice:
def __init__(self, name, config):
match = config.get('match')
if match is None:
self.match_name = name
self.match_mac = None
self.match_driver = None
else:
self.match_name = match.get('name')
self.match_mac = match.get('macaddress')
self.match_driver = match.get('driver')
self.config = config
log.debug(
"config for %s = %s" % (
name, sanitize_interface_config(self.config)))
def matches_link(self, link):
if self.match_name is not None:
matches_name = fnmatch.fnmatch(link.name, self.match_name)
else:
matches_name = True
if self.match_mac is not None:
matches_mac = self.match_mac == link.hwaddr
else:
matches_mac = True
if self.match_driver is not None:
matches_driver = self.match_driver == link.driver
else:
matches_driver = True
return matches_name and matches_mac and matches_driver
class _VirtualDevice:
def __init__(self, name, config):
self.name = name
self.config = config
log.debug(
"config for %s = %s" % (
name, sanitize_interface_config(self.config)))
def configs_in_root(root, masked=False):
"""Return a list of all netplan configs under root.
The list is ordered in increasing precedence.
@param masked: if True, include config paths that are masked
by the same basename in a different directory."""
if not os.path.isabs(root):
root = os.path.abspath(root)
wildcard = "*.yaml"
dirs = {"lib": "0", "etc": "1", "run": "2"}
rootlen = len(root)
paths = []
for d in dirs:
paths.extend(glob.glob(os.path.join(root, d, "netplan", wildcard)))
def mykey(path):
"""returned key is basename + string-precidence based on dir."""
bname = os.path.basename(path)
bdir = path[rootlen + 1]
bdir = bdir[:bdir.find(os.path.sep)]
return "%s/%s" % (bname, bdir)
if not masked:
paths = {os.path.basename(p): p for p in paths}.values()
return sorted(paths, key=mykey)
|
warning:: I doubt SPOINTs/EPOINTs work correctly
.. warning:: xref not fully implemented (assumes cid=0)
.. todo:: node_set stil does work on the all the nodes in the big
kdtree loop, which is very inefficient
.. todo:: remove_collapsed_elements is not supported
.. todo:: avoid_collapsed_elements is not supported
"""
if not isinstance(tol, float):
tol = float(tol)
nodes_xyz, model, nids, inew = _eq_nodes_setup(
bdf_filename, tol, renumber_nodes=renumber_nodes,
xref=xref, node_set=node_set, debug=debug)
ieq, slots = _eq_nodes_build_tree(nodes_xyz, nids, tol,
inew=inew, node_set=node_set,
neq_max=neq_max)[1:]
nid_pairs = _eq_nodes_find_pairs(nids, slots, ieq, node_set=node_set)
_eq_nodes_final(nid_pairs, model, tol, node_set=node_set)
if bdf_filename_out is not None:
model.write_bdf(bdf_filename_out, size=size, is_double=is_double)
if crash_on_collapse:
# lazy way to make sure there aren't any collapsed nodes
model2 = BDF(log=log, debug=debug)
model2.read_bdf(bdf_filename_out)
return model
def _eq_nodes_setup(bdf_filename, tol,
renumber_nodes=False, xref=True,
node_set=None, debug=True):
"""helper function for `bdf_equivalence_nodes`"""
if node_set is not None:
if renumber_nodes:
raise NotImplementedError('node_set is not None & renumber_nodes=True')
#print(type(node_set))
#print('*node_set', node_set)
assert len(node_set) > 0, node_set
if isinstance(node_set, set):
node_set = asarray(list(node_set), dtype='int32')
else:
node_set = asarray(node_set, dtype='int32')
if isinstance(bdf_filename, string_types):
xref = True
model = BDF(debug=debug)
model.read_bdf(bdf_filename, xref=True)
else:
model = bdf_filename
model.cross_reference(xref=xref)
coord_ids = model.coord_ids
needs_get_position = True if coord_ids == [0] else False
# quads / tris
#nids_quads = []
#eids_quads = []
#nids_tris = []
#eids_tris = []
# map the node ids to the slot in the nids array
renumber_nodes = False
inode = 0
nid_map = {}
if node_set is not None:
if PY2:
all_nids = array(model.nodes.keys(), dtype='int32')
else:
all_nids = array(list(model.nodes.keys()), dtype='int32')
# B - A
# these are all the nodes that are requested from node_set that are missing
# thus len(diff_nodes) == 0
diff_nodes = setdiff1d(node_set, all_nids)
if len(diff_nodes) != 0:
msg = ('The following nodes cannot be found, but are included'
' in the reduced set; nids=%s' % diff_nodes)
raise RuntimeError(msg)
# A & B
# the nodes to analyze are the union of all the nodes and the desired set
# which is basically the same as:
# nids = unique(node_set)
nids = intersect1d(all_nids, node_set, assume_unique=True) # the new values
if renumber_nodes:
raise NotImplementedError('node_set is not None & renumber_nodes=True')
else:
for nid in all_nids:
nid_map[inode] = nid
inode += 1
#nids = array([node.nid for nid, node in sorted(iteritems(model.nodes))
#if nid in node_set], dtype='int32')
else:
if renumber_nodes:
for nid, node in sorted(iteritems(model.nodes)):
node.nid = inode + 1
nid_map[inode] = nid
inode += 1
nnodes = len(model.nodes)
nids = arange(1, inode + 1, dtype='int32')
assert nids[-1] == nnodes
else:
for nid, node in sorted(iteritems(model.nodes)):
nid_map[inode] = nid
inode += 1
nids = array([node.nid for nid, node in sorted(iteritems(model.nodes))], dtype='int32')
all_nids = nids
if needs_get_position:
nodes_xyz = array([model.nodes[nid].get_position()
for nid in nids], dtype='float32')
else:
nodes_xyz = array([model.nodes[nid].xyz
for nid in nids], dtype='float32')
if node_set is not None:
assert nodes_xyz.shape[0] == len(nids)
if 0:
# I forget entirely what this block of code is for, but my general
# recollection was that it checked that all the nodes that were
# referenced were included in the nids list | . I'd rather break that
# check in order to support nodes_set.
#
# It's also possible that it's here, so you only consider nodes that
# are associated...
# there is some set of points that are used on the elements | that
# will be considered.
#
# Presumably this is enough to capture all the node ids and NOT
# spoints, but I doubt it...
spoint_epoint_nid_set = set([])
for eid, element in sorted(iteritems(model.elements)):
spoint_epoint_nid_set.update(element.node_ids)
for eid, element in sorted(iteritems(model.masses)):
spoint_epoint_nid_set.update(element.node_ids)
if model.spoints and model.epoints:
nids_new = spoint_epoint_nid_set - model.spoints.points - model.epoints.points
elif model.spoints:
nids_new = spoint_epoint_nid_set - model.spoints.points
elif model.epoints:
nids_new = spoint_epoint_nid_set - model.epoints.points
else:
nids_new = spoint_epoint_nid_set
if None in nids_new:
nids_new.remove(None)
# autosorts the data
nids_new = unique(list(nids_new))
assert isinstance(nids_new[0], integer_types), type(nids_new[0])
missing_nids = list(set(nids_new) - set(all_nids))
if missing_nids:
missing_nids.sort()
msg = 'There are missing nodes...\n' # TODO: in what???
msg = 'missing nids=%s' % str(missing_nids)
raise RuntimeError(msg)
# get the node_id mapping for the kdtree
inew = searchsorted(nids, nids_new, side='left')
# print('nids_new =', nids_new)
else:
inew = slice(None)
#assert np.array_equal(nids[inew], nids_new), 'some nodes are not defined'
return nodes_xyz, model, nids, inew
def _eq_nodes_find_pairs(nids, slots, ieq, node_set=None):
"""helper function for `bdf_equivalence_nodes`"""
irows, icols = slots
#replacer = unique(ieq[slots]) ## TODO: turn this back on?
#skip_nodes = []
nid_pairs = []
for (irow, icol) in zip(irows, icols):
inid2 = ieq[irow, icol]
nid1 = nids[irow]
nid2 = nids[inid2]
if nid1 == nid2:
continue
if node_set is not None:
if nid1 not in node_set and nid2 not in node_set:
continue
nid_pairs.append((nid1, nid2))
return nid_pairs
def _eq_nodes_final(nid_pairs, model, tol, node_set=None):
"""apply nodal equivalencing to model"""
for (nid1, nid2) in nid_pairs:
node1 = model.nodes[nid1]
node2 = model.nodes[nid2]
# TODO: doesn't use get position...
distance = norm(node1.xyz - node2.xyz)
#print(' irow=%s->n1=%s icol=%s->n2=%s' % (irow, nid1, icol, nid2))
if distance > tol:
#print(' *n1=%-4s xyz=%s\n *n2=%-4s xyz=%s\n *distance=%s\n' % (
# nid1, list_print(node1.xyz),
# nid2, list_print(node2.xyz),
# distance))
continue
if node_set is not None:
assert nid1 in node_set, 'nid1=%s node_set=%s' % (nid1, node_set)
assert nid2 in node_set, 'nid2=%s node_set=%s' % (nid2, node_set)
#print(' n1=%-4s xyz=%s\n n2=%-4s xyz=%s\n distance=%s\n' % (
#nid1, str(node1.xyz),
#nid2, str(node2.xyz),
|
#
# THIS FILE WAS AUTOGENERATED BY makeSip6.py
# Do not edit this file manually. All changes will be lost.
#
"""
# TOP2049 Open Source programming suite
#
# Microchip PIC24f08kl201 SIP6
#
# Copyright (c) 2014 Pavel Stemberk <stemberk@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
from .microchip16_common import *
from .configWords import klx0x_fuseDesc
class Chip_Pic24f08kl201sip6(Chip_Microchip16_common):
voltageVDD = 3.3
voltageVPP = 8
logicalFlashProgramMe | morySize = 0x800000
logicalFlashConfigurationMemorySize = 0x800000
hasEEPROM = False
def __init__(self):
Chip_Microchip16_common.__init__(self,
chipPackage = "DIP10",
chipPinVCC = 9,
chipPinsVPP = 10,
chipPinGND = 8,
signature=b"\x06\x4b",
# flashPageSize (in number of 24bit words)
flash | PageSize=0x15FE // 2 + 2,
# flashPageSize=0x40,
flashPages=1,
# eepromPageSize (in 16bit words)
eepromPageSize=0,
eepromPages=0,
# all 7 words uses lowest byte only
fuseBytes=2 * 9
)
self.configWordAddr = 0xF80000
# self.osccalBackupAddr = self.userIDLocationAddr + self.userIDLocationSize
fuseDesc = klx0x_fuseDesc
ChipDescription(
Chip_Pic24f08kl201sip6,
bitfile = "microchip16sip6",
chipID="pic24f08kl201sip6",
runtimeID = (0xDF05, 0x01),
chipVendors="Microchip",
description = "PIC24F08KL201 - ICD",
packages = (("DIP10", ""), ),
fuseDesc=fuseDesc,
maintainer="Pavel Stemberk <stemberk@gmail.com>",
)
|
#!/usr/bin/env python
'''
Created on 18 gru 2014
@author: ghalajko
'''
from lvstop.screen import Screen
from lvstop import loop
if __name__ == '__main__':
with Screen() as scr:
| try:
scr.main_loop(loop)
except KeyboardInterrupt:
pass
except:
| raise |
#!/usr/bin/env python3
# Questo file visualizza la chiave "lists" redis
#
# Prima verifica che ci sia la chiave nel form
# Serve per la parte di gestione html in python
import cgi
import cgitb
# Abilita gli errori al server web/http
cgitb.enable()
# Le mie librerie mjl (Json, Files), mhl (Html), flt (T w/ Redis)
import mjl, mhl, flt
import redis, subprocess
# Parametri generali
TestoPagina="Genera file \".csv\" dei valori di chiave \"lists\" Redis"
DirBase="/var/www"
ConfigFile=DirBase+"/conf/config.json"
#ExecFile="/cgi-bin/<exefile>"
# Redis "key"
RedisKey = "*" # Tutte le chiavi
# Form name/s
FormName = "rkey"
# Apro il database Redis con l'istruzione della mia libreria
MyDB = flt.OpenDBFile(ConfigFile)
# Start web page - Sono blocchi di html presenti nella libreria
print (mhl.MyHtml())
print (mhl.MyHtmlHead())
# Scrivo il Titolo/Testo della pagina
print ("<h1>","<center>",TestoPagina,"</cent | er>","</h1>")
#print ("<hr/>","<b | r/>")
# Eventuale help/annotazione
#print ("Non ho rinominato i campi e non sono stato a riordinare le voci.<br/>")
form=cgi.FieldStorage()
if FormName not in form:
print ("<h2>ERRORE: Non e` stata passata la chiave Redis</h2>")
else:
RedisKey = cgi.escape(form[FormName].value)
RedisKeyStart = cgi.escape(form["VStart"].value)
RedisKeyStop = cgi.escape(form["VStop"].value)
print ("La chiave viene passata come argomento ad un'altro programma, quindi l'unico feedback possibile e` 0 se e` andato a buon fine, o 1 se c'e` stato un'errore.</br></br>")
print ("Comando eseguito:</br>/var/www/cgi-bin/setsVals2csv.py {0:s} {1:s} {2:s}</br></br>".format(RedisKey, RedisKeyStart, RedisKeyStop))
print (subprocess.call(['/var/www/cgi-bin/setsVals2csv.py', RedisKey, RedisKeyStart, RedisKeyStop]))
# End web page
print (mhl.MyHtmlBottom())
|
re.sub('sn2.fits', 'fits', string.split(img, '/')[-1]))
elif _typemag == 'ph':
agnkey.agnsqldef.updatevalue(_datatable, 'magtype', 3,
re.sub('sn2.fits', 'fits', string.split(img, '/')[-1]))
agnkey.util.updateheader(img, 0, {'mag': [M1, 'calibrated mag']})
else:
agnkey.agnsqldef.updatevalue(_datatable, 'mag', 9999,
re.sub('sn2.fits', 'fits', string.split(img, '/')[-1]))
agnkey.util.updateheader(img, 0, {'mag': [9999, 'calibrated mag']})
if np.isfinite(DM11):
agnkey.agnsqldef.updatevalue(_datatable, 'dmag', DM11,
re.sub('sn2.fits', 'fits', string.split(img, '/')[-1]))
agnkey.util.updateheader(img, 0, {'dmag': [DM11, 'calibrated mag error']})
else:
agnkey.agnsqldef.updatevalue(_datatable, 'dmag', 9999,
re.sub('sn2.fits', 'fits', string.split(img, '/')[-1]))
agnkey.util.updateheader(img, 0, {'dmag': [9999, 'calibrated mag error']})
except:
print 'module mysqldef not found'
else:
Z2 = float(string.split(dicti[_filter][img]['ZP' + filters1[_filter].upper() + col.upper()])[1])
C2 = float(string.split(dicti[_filter][img]['ZP' + filters1[_filter].upper() + col.upper()])[2])
Z1 = float(
string.split(dicti[_filter2][img2]['ZP' + filters1[_filter2].upper() + col.upper()])[1])
C1 = float(
string.split(dicti[_filter2][img2]['ZP' + filters1[_filter2].upper() + col.upper()])[2])
M1, M2 = agnkey.agnabsphotdef.finalmag(Z1, Z2, C1, C2, mag1, mag0)
DZ1 = 0.0
DZ2 = 0.0
dc1, dc2, dz1, dz2, dm1, dm2 = agnkey.agnabsphotdef.erroremag(Z1, Z2, mag0, mag1, C1, C2, 1)
DM22 = np.sqrt((dm1 * dmag0) ** 2 + (dz1 * DZ1) ** 2 + (dm2 * dmag1) ** 2 + (dz2 * DZ2) ** 2)
if _interactive:
print '\n#### example computation '
print 'Z1 Z1 C1 C2 mag1 mag 2'
print 'M1 M2 '
print Z1, Z2, C1, C2, mag1, mag0
print M1, M2
try:
if np.isfinite(M2) and M2 < 999:
agnkey.agnsqldef.updatevalue(_datatable, 'mag', M2,
re.sub('sn2.fits', 'fits', string.split(img, '/')[-1]))
if _typemag == 'fit':
agnkey.agnsqldef.updatevalue(_datatable, 'magtype', 2,
re.sub('sn2.fits', 'fits', string.split(img, '/')[-1]))
elif _typemag == 'ph':
agnkey.agnsqldef.updatevalue(_datatable, 'magtype', 3,
re.sub('sn2.fits', 'fits', string.split(img, '/')[-1]))
agnkey.util.updateheader(img, 0, {'mag': [M2, 'calibrated mag']})
else:
agnkey.agnsqldef.updatevalue(_datatable, 'mag', 9999,
re.sub('sn2.fits', 'fits', string.split(img, '/')[-1]))
agnkey.util.updateheader(img, 0, {'mag': [9999, 'calibrated mag']})
if np.isfinite(DM22):
agnkey.agnsqldef.updatevalue(_datatable, 'dmag', DM22,
re.sub('sn2.fits', 'fits', string.split(img, '/')[-1]))
agnkey.util.updateheader(img, 0, {'dmag': [DM22, 'calibrated mag error']})
else:
agnkey.agnsqldef.updatevalue(_datatable, 'dmag', 9999,
re.sub('sn2.fits', 'fits', string.split(img, '/')[-1]))
agnkey.util.updateheader(img, 0, {'dmag': [9999, 'calibrated mag error']})
except:
print 'module mysqldef not found'
print _filter, col
else:
if dicti[_filter][img]['telescope'] in ['lsc', '1m0-04', '1m0-05', '1m0-06', '1m0-09']:
kk = agnkey.sites.extintion('ctio')
elif dicti[_filter][img]['telescope'] in ['elp', '1m0-08']:
kk = agnkey.sites.extintion('mcdonald')
elif dicti[_filter][img]['telescope'] in ['cpt', '1m0-12', '1m0-10', '1m0-13']:
kk = agnkey.sites.extintion('southafrica')
elif dicti[_filter][img]['telescope'] in ['ftn']:
kk = agnkey.sites.extintion('mauna')
elif dicti[_filter][img]['telescope'] in ['1m0-03', '1m0-11', 'coj', 'fts']:
kk = agnkey.sites.extintion('siding')
else:
print _filter, img, dicti[_filter][img]
sys.exit('problem with dicti')
Z1 = ''
for ww in dicti[_filter][img].keys( | ):
if 'ZP' + filters1[_filter].upper | () == ww[0:3] and float(
string.split(dicti[_filter][img][ww])[1]) < 99:
Z1 = float(string.split(dicti[_filter][img][ww])[1])
C1 = float(string.split(dicti[_filter][img][ww])[2])
break
# mag0=dicti[_filter][img][namemag[_typemag][0]]+2.5*math.log10(dicti[_filter][img]['exptime'])-kk[filters1[_filter]]*dicti[_filter][img]['airmass']
mag0 = dicti[_filter][img][namemag[_typemag][0]] - kk[filters1[_filter]] * dicti[_filter][img][
'airmass']
dmag0 = dicti[_filter][img][namemag[_typemag][1]]
if Z1 and mag0 < 99:
M1 = mag0 + Z1
agnkey.agnsqldef.updatevalue(_datatable, 'mag', M1,
re.sub('sn2.fits', 'fits', string.split(img, '/')[-1]))
if _typemag == 'fit':
agnkey.agnsqldef.updatevalue(_datatable, 'magtype', 2,
re.sub('sn2.fits', 'fits', string.split(img, '/')[-1]))
elif _typemag == 'ph':
agnkey.agnsqldef.updatevalue(_datatable, 'magtype', 3,
re.sub('sn2.fits', 'fits', string.split(img, '/')[-1]))
agnkey.util.updateheader(img, 0, {'mag': [M1, 'calibrated mag']})
else:
print 'no other filters with calibration in ' + _filter + ' band'
print img, _filter, mag0, dmag0, Z1, C1
agnkey.agnsqldef.updatevalue(_datatable, 'mag', 9999,
re.sub('sn2.fits', 'fits', string.split(img, '/')[-1]))
agnkey.util.updateheader(img, 0, {'mag': [9999, 'calibrated mag']})
# try:
# except:
print |
"""
Model to hold edx-video-pipeline configurations.
"""
from __future__ import absolute_import
from config_models.models import ConfigurationModel
from django.contrib.auth import get_user_model
from django.db import models
from django.utils.translation import ugettext_lazy as _
from opaque_keys.edx.django.models import CourseKeyField
import six
class VideoPipelineIntegration(ConfigurationModel):
"""
Manages configuration for connecting to the edx-video-pipeline service and using its API.
.. no_pii:
"""
client_name = models.CharField(
max_length=100,
default='VEDA-Prod',
null=False,
blank=False,
help_te | xt=_('Oauth client name of video pipeline service.')
)
api_url = models.URLField(
verbose_name=_('Internal API URL'),
help_text=_('edx-video-pipeline API URL.')
)
service_user | name = models.CharField(
max_length=100,
default='veda_service_user',
null=False,
blank=False,
help_text=_('Username created for Video Pipeline Integration, e.g. veda_service_user.')
)
def get_service_user(self):
# NOTE: We load the user model here to avoid issues at startup time that result from the hacks
# in lms/startup.py.
User = get_user_model() # pylint: disable=invalid-name
return User.objects.get(username=self.service_username)
class VideoUploadsEnabledByDefault(ConfigurationModel):
"""
Enables video uploads enabled By default feature across the platform.
.. no_pii:
"""
# this field overrides course-specific settings
enabled_for_all_courses = models.BooleanField(default=False)
@classmethod
def feature_enabled(cls, course_id):
"""
Looks at the currently active configuration model to determine whether
the VideoUploadsEnabledByDefault feature is available.
If the feature flag is not enabled, the feature is not available.
If the flag is enabled for all the courses, feature is available.
If the flag is enabled and the provided course_id is for a course
with CourseVideoUploadsEnabledByDefault enabled, then the
feature is available.
Arguments:
course_id (CourseKey): course id for whom feature will be checked.
"""
if not cls.is_enabled():
return False
elif not cls.current().enabled_for_all_courses:
feature = (CourseVideoUploadsEnabledByDefault.objects
.filter(course_id=course_id)
.order_by('-change_date')
.first())
return feature.enabled if feature else False
return True
def __unicode__(self):
current_model = VideoUploadsEnabledByDefault.current()
return u"VideoUploadsEnabledByDefault: enabled {is_enabled}".format(
is_enabled=current_model.is_enabled()
)
class CourseVideoUploadsEnabledByDefault(ConfigurationModel):
"""
Enables video uploads enabled by default feature for a specific course. Its global feature must be
enabled for this to take effect.
.. no_pii:
"""
KEY_FIELDS = ('course_id',)
course_id = CourseKeyField(max_length=255, db_index=True)
def __unicode__(self):
not_en = "Not "
if self.enabled:
not_en = ""
return u"Course '{course_key}': Video Uploads {not_enabled}Enabled by default.".format(
course_key=six.text_type(self.course_id),
not_enabled=not_en
)
|
"""
Django settings for SimpleMooc project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
import dj_database_url
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '#f3*&^_56z9tj4=l%7+0gzg17o(sw&%(use@zt+_k@=y(ke2f5'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# libs
'taggit',
# apps
'SimpleMooc.core',
'SimpleMooc.courses',
'SimpleMooc.accounts',
'SimpleMooc.forum',
)
from django.conf.global_settings import TEMPLATE_CONTEXT_PROCESSORS as TCP
TEMPLATE_CON | TEXT_PROCESSORS = TCP + (
'django.core.context_processors.request',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'd | jango.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'SimpleMooc.urls'
WSGI_APPLICATION = 'SimpleMooc.wsgi.application'
# Database
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'pt-br'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
MEDIA_ROOT = os.path.join(BASE_DIR, "SimpleMooc", "media")
MEDIA_URL = "/media/"
# Email
# EMAIL_BACKEND = "django.core.mail.backends.smtp.EmailBackend"
EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
DEFAULT_FROM_EMAIL = "Nome <anderson.bcc.uag@gmail.com>"
EMAIL_USE_TLS = True
EMAIL_HOST = "smtp.gmail.com"
EMAIL_HOST_USER = "anderson.bcc.uag@gmail.com"
EMAIL_HOST_PASSWORD = "123"
EMAIL_PORT = "587"
CONTACT_EMAIL = "anderson.adss.hotmail@gmail.com"
# auth
LOGIN_URL = "accounts:login"
LOGIN_REDIRECT_URL = "core:home"
LOGOUT_URL = "accounts:logout"
AUTH_USER_MODEL = "accounts.User"
# Heroku settings
DATABASES = {
'default': dj_database_url.config(),
}
# Honor the 'X-Forwarded-Proto' header for request.is_secure()
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# Allow all host headers
ALLOWED_HOSTS = ['*']
STATIC_ROOT = 'staticfiles'
STATIC_URL = '/static/'
STATICFILES_DIRS = (os.path.join(BASE_DIR, "SimpleMooc", "core", "static/"),)
try:
from SimpleMooc.local_settings import *
except ImportError:
pass
|
"""
This module is used to generate graphs that show the interaction between authors either through multiple edges or
through edge weights. There is an edge from one author to another if the former sent a message to the latter. These
graphs depict thread-wise interaction of the authors for the entire mailing list and these interactions are labelled
in chronological order to help identify the flow of messages across authors.
"""
import json
from util.read_utils import *
def add_to_multigraph(graph_obj, discussion_graph, json_data, nbunch, label_prefix=''):
"""
"""
i = 0
for node in sorted(nbunch):
node_attr = json_data[node]
if node_attr['Cc'] is None:
addr_list = node_attr['To']
else:
addr_list = node_attr['To'] | node_attr['Cc']
for to_address in addr_list:
graph_obj.add_edge(node_attr['From'], to_address, label=label_prefix+str(i))
succ_nbunch = [int(x) for x in discussion_graph.successors(node)]
if succ_nbunch is not None:
add_to_multigraph(graph_obj, discussion_graph, json_data, succ_nbunch, label_prefix+str(i)+'.')
i += 1
def author_interaction_multigraph(discussion_graph, json_data, limit=10):
"""
"""
niter = 0
for conn_subgraph in nx.weakly_connected_component_subgraphs(discussion_graph):
interaction_graph = nx.MultiDiGraph()
origin = min(int(x) for x in conn_subgraph.nodes())
add_to_multigraph(interaction_graph, discussion_graph, json_data, [origin])
# print(json_data[origin])
g1 = nx.to_agraph(interaction_graph)
g1.draw("author_multi/"+str(origin)+'.png', prog='circo')
niter += 1
if limit == niter and limit > 0:
break
def add_to_weighted_graph(graph_obj, discussion_graph, json_data, nbunch, node_enum=list()):
"""
"""
for node in sorted(nbunch):
node_attr = json_data[node]
if node_attr['Cc'] is None:
addr_list = node_attr['To']
else:
addr_list = node_attr['To'] | node_attr['Cc']
if node_attr['From'] not in node_enum:
node_enum.append(node_attr['From'])
from_node = node_enum.index(node_attr['From'])
for to_address in addr_list:
if to_address not in node_enum:
node_enum.append(to_address)
to_node = node_enum.index(to_address)
if not graph_obj.has_edge(from_node, to_node):
graph_obj.add_edge(from_node, to_node, label=1)
else:
graph_obj[from_node][to_node]['label'] += 1
succ_nbunch = [int(x) for x in discussion_graph.successors(node)]
if succ_nbunch is not None:
add_to_weighted_graph(graph_obj, discussion_graph, json_data, succ_nbunch, node_enum)
def author_interaction_weighted_graph(discussion_graph, json_data, limit=10):
"""
"""
niter = 0
for conn_subgraph in nx.weakly_connected_component_subgraphs(discussion_graph):
interaction_graph = nx.DiGraph()
origin = min(int(x) for x in conn_subgraph.nodes())
add_to_weighted_graph(interaction_graph, discussion_graph, json_data, [origin], [])
# print(json_data[origin])
g1 = nx.to_agraph(interaction_graph)
g1.draw("author_weighted/"+str(origin)+'.png', prog='circo')
niter += 1
if limit == niter and limit > 0:
break
def weighted_multigraph():
# Time limit can be specified here in the form of a timestamp in one of the identifiable formats and all messages
# that have arrived after this timestamp will be ignored.
time_limit = None
# If true, then messages that belong to threads that have only a single author are ignored.
ignore_lat = True
if time_limit is None:
time_limit = time.strftime("%a, %d %b %Y %H:%M:%S %z")
msgs_before_time = set()
time_limit = get_datetime_object(time_limit)
print("All messages before", time_limit, "are being considered.")
discussion_graph = nx.DiGraph()
email_re = re.compile(r'[\w\.-]+@[\w\.-]+')
json_data = dict()
# Add nodes into NetworkX graph by reading from CSV file
if not ignore_lat:
with open("graph_nodes.csv", "r") as node_file:
for pair in node_file:
node = pair.split(';', 2)
if get_datetime_object(node[2].strip()) < time_limit:
node[0] = int(node[0])
msgs_before_time.add(node[0])
from_addr = email_re.search(node[1].strip())
from_addr = from_addr.group(0) if from_addr is not None else node[1].strip()
discussion_graph.add_node(node[0], time=node[2].strip(), color="#ffffff", style='bold', sender=from_addr)
node_file.close()
print("Nodes added.")
# Add edges into NetworkX graph by reading from CSV file
with open("graph_edges.csv", "r") as edge_file:
for pair in edge_file:
edge = pair.split(';')
edge[0] = int(edge[0])
edge[1] = int(edge[1])
if edge[0] in msgs_before_time and edge[1] in msgs_before_time:
discussion_graph.add_edge(*edge)
edge_file.close()
print("Edges added.")
else:
lone_author_threads = get_lone_author_threads(False)
# Add nodes into NetworkX graph only if they are not a part of a thread that has only a single author
with open("graph_nodes.csv", "r") as node_file:
for pair in node_file:
node = pair.split(';', 2)
node[0] = int(node[0])
if get_datetime_object(node[2].strip()) < time_limit and node[0] not in lone_author_threads:
msgs_before_time.add(node[0])
from_addr = email_re.search(node[1].strip())
from_addr = from_addr.group(0) if from_addr is not None else node[1].strip()
discussion_graph.add_node(node[0], tim | e=node[2].strip(), color="#ffffff", style='bold', sender=from_addr)
node_file.close()
print("Nodes added.")
# Add edges into NetworkX graph only if they are not a part of a thread that has only a single author
with open("graph_edges.csv", "r") as edge_file:
for pair in edge_file:
| edge = pair.split(';')
edge[0] = int(edge[0])
edge[1] = int(edge[1])
if edge[0] not in lone_author_threads and edge[1] not in lone_author_threads:
if edge[0] in msgs_before_time and edge[1] in msgs_before_time:
discussion_graph.add_edge(*edge)
edge_file.close()
print("Edges added.")
with open('clean_data.json', 'r') as json_file:
for chunk in lines_per_n(json_file, 9):
json_obj = json.loads(chunk)
# print("\nFrom", json_obj['From'], "\nTo", json_obj['To'], "\nCc", json_obj['Cc'])
from_addr = email_re.search(json_obj['From'])
json_obj['From'] = from_addr.group(0) if from_addr is not None else json_obj['From']
json_obj['To'] = set(email_re.findall(json_obj['To']))
json_obj['Cc'] = set(email_re.findall(json_obj['Cc'])) if json_obj['Cc'] is not None else None
# print("\nFrom", json_obj['From'], "\nTo", json_obj['To'], "\nCc", json_obj['Cc'])
json_data[json_obj['Message-ID']] = json_obj
print("JSON data loaded.")
author_interaction_weighted_graph(discussion_graph, json_data, limit=20)
author_interaction_multigraph(discussion_graph, json_data, limit=20)
|
#! | /usr/bin/python
# encoding: utf-8
import sys
from gist import create_workflow, | set_github_token
from workflow import Workflow, web
from workflow.background import run_in_background, is_running
def main(wf):
arg = wf.args[0]
if len(arg) > 0:
token = wf.args[0]
set_github_token(wf, token)
if __name__ == '__main__':
wf = create_workflow()
sys.exit(wf.run(main))
|
"""scons.Node.Alias
Alias nodes.
This creates a hash of global Aliases (dummy targets).
"""
#
# Copyright (c) 2001 - 2014 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Node/Alias.py 2014/07/05 09:42:21 garyo"
import collections
import SCons.Errors
import SCons.Node
import SCons.Util
class AliasNameSpace(collections.UserDict):
def Alias(self, name, **kw):
if isinstance(name, SCons.Node.Alias.Alias):
return name
try:
a = self[name]
except KeyError:
a = SCons.Node.Alias.Alias(name, **kw)
self[name] = a
return a
def lookup(self, name, **kw):
try:
return self[name]
except KeyError:
return None
class AliasNodeInfo(SCons.Node.NodeInfoBase):
current_version_id = 1
field_list = ['csig']
def str_to_node(self, s):
return default_ans.Alias(s)
class AliasBuildInfo(SCons.Node.BuildInfoBase):
current_version_id = 1
class Alias(SCons.Node.Node):
NodeInfo = AliasNodeInfo
BuildInfo = AliasBuildInfo
def __init__(self, name):
SCons.Node.Node.__init__(self)
self.name = name
def str_for_display(self):
return '"' + self.__str__() + '"'
def __str__(self):
return self.name
def make_ready(self):
self.get_csig()
really_build = SCons.Node.Node.build
is_up_to_date = SCons.Node.Node.children_are_up_to_date
def is_under(self, dir):
# Make Alias nodes get built regardless of
# what directory scons was run from. Alias nodes
# are outside the filesystem:
return 1
def get_contents(self):
| """The contents of an alias is the concatenation
of the content signatures of all its sources."""
childsigs = [n.get_csig() for n in self.children()]
return ''.join(childsigs)
def sconsign(self):
"""An Alias is not recorded in .sconsign files"""
pass
#
#
#
def changed_since_last_build(self, target, prev_ni):
c | ur_csig = self.get_csig()
try:
return cur_csig != prev_ni.csig
except AttributeError:
return 1
def build(self):
"""A "builder" for aliases."""
pass
def convert(self):
try: del self.builder
except AttributeError: pass
self.reset_executor()
self.build = self.really_build
def get_csig(self):
"""
Generate a node's content signature, the digested signature
of its content.
node - the node
cache - alternate node to use for the signature cache
returns - the content signature
"""
try:
return self.ninfo.csig
except AttributeError:
pass
contents = self.get_contents()
csig = SCons.Util.MD5signature(contents)
self.get_ninfo().csig = csig
return csig
default_ans = AliasNameSpace()
SCons.Node.arg2nodes_lookups.append(default_ans.lookup)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
def f(x):
| """
Returns
-------
object
| """
return 42 |
# coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
clean_html,
int_or_none,
js_to_json,
parse_iso8601,
)
class NetzkinoIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?netzkino\.de/\#!/(?P<category>[^/]+)/(?P<id>[^/]+)'
_TEST = {
'url': 'http://www.netzkino.de/#!/scifikino/rakete-zum-mond',
'md5': '92a3f8b76f8d7220acce5377ea5d4873',
'info_dict': {
'id': 'rakete-zum-mond',
'ext': 'mp4',
'title': 'Rakete zum Mond (Endstation Mond, Destination Moon)',
'comments': 'mincount:3',
'description': 'md5:1eddeacc7e62d5a25a2d1a7290c64a28',
'upload_date': '20120813',
'thumbnail': r're:https?://.*\.jpg$',
'timestamp': 1344858571,
'age_limit': 12,
},
'params': {
'skip_download': 'Download only works from Germany',
}
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
category_id = mobj.group('category')
video_id = mobj.group('id')
api_url = 'http://api.netzkino.de.simplecache.net/capi-2.0a/categories/%s.json?d=www' % category_id
api_info = self._download_json(api_url, video_id)
info = next(
p for p in api_info['posts'] if p['slug'] == video_id)
custom_fields = info['custom_fields']
production_js = self._download_webpage(
'http://www.netzkino.de/beta/dist/production.min.js', video_id,
note='Downloading player code')
avo_js = self._search_regex(
r'var urlTemplate=(\{.*?"\})',
production_js, 'URL templates')
templates = self._parse_json(
avo_js, video_id, transform_source=js_to_json)
suffix = {
'hds': '.mp4/manifest.f4m',
'hls': '.mp4/master.m3u8',
'pmd': '.mp4',
}
film_fn = custom_fields['Streaming'][0]
formats = [{
'fo | rmat_id': key,
'ext': 'mp4',
'url': tpl.replace('{}', film_fn) + suffix[key],
} for key, tpl in templates.items()]
self._sort_formats(formats)
comments = [{
' | timestamp': parse_iso8601(c.get('date'), delimiter=' '),
'id': c['id'],
'author': c['name'],
'html': c['content'],
'parent': 'root' if c.get('parent', 0) == 0 else c['parent'],
} for c in info.get('comments', [])]
return {
'id': video_id,
'formats': formats,
'comments': comments,
'title': info['title'],
'age_limit': int_or_none(custom_fields.get('FSK')[0]),
'timestamp': parse_iso8601(info.get('date'), delimiter=' '),
'description': clean_html(info.get('content')),
'thumbnail': info.get('thumbnail'),
'playlist_title': api_info.get('title'),
'playlist_id': category_id,
}
|
"""Auto-generated file, do not edit by hand. BL metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_BL = PhoneMetadata(id='BL', country_code=590, international_prefix='00',
general_desc=PhoneNumberDesc(national_number_pattern='(?:590|(?:69|80)\\d|976)\\d{6}', possible_length=(9,)),
fixed_line=PhoneNumberDesc(national_number_pattern=' | 590(?:2[7-9]|5[12]|87)\\d{4}', example_number='590271234', possible_length=(9,)),
mobile=PhoneNumberDesc(national_number_pattern='69(?:0\\d\\d|1(?:2[2-9]|3[0-5]))\\d{4}', example_number='690001234', possible_length=(9,)),
toll_free=PhoneNumberDe | sc(national_number_pattern='80[0-5]\\d{6}', example_number='800012345', possible_length=(9,)),
voip=PhoneNumberDesc(national_number_pattern='976[01]\\d{5}', example_number='976012345', possible_length=(9,)),
national_prefix='0',
national_prefix_for_parsing='0',
mobile_number_portable_region=True)
|
mock_create_router,
mock_delete_router):
scenario = network.NeutronNetworks()
subnets_per_network = 1
subnet_cidr_start = "default_cidr"
net = {
"network": {
"id": "network-id"
}
}
subnet = {
"subnet": {
"name": "subnet-name",
"id": "subnet-id",
"enable_dhcp": False
}
}
router = {
"router": {
"name": "router-name",
"id": "router-id"
}
}
mock_create_router.return_value = router
mock_create_network_and_subnets.return_value = (net, [subnet])
mock_clients("neutron").add_interface_router = mock.Mock()
# Default options
scenario.create_and_delete_routers(
subnet_cidr_start=subnet_cidr_start,
subnets_per_network=subnets_per_network)
mock_create_network_and_subnets.assert_has_calls(
[mock.call({}, {}, subnets_per_network, subnet_cidr_start)])
mock_create_router.assert_has_calls(
[mock.call({})] * subnets_per_network)
mock_clients("neutron").add_interface_router.assert_has_calls(
[mock.call(router["router"]["id"],
{"subnet_id": subnet["subnet"]["id"]})
] * subnets_per_network)
mock_delete_router.assert_has_calls(
[mock.call(router)] * subnets_per_network)
mock_create_network_and_subnets.reset_mock()
mock_create_router.reset_mock()
mock_clients("neutron").add_interface_router.reset_mock()
mock_delete_router.reset_mock()
# Custom options
subnet_cidr_start = "custom_cidr"
subnet_create_args = {"allocation_pools": []}
router_create_args = {"admin_state_up": False}
scenario.create_and_delete_routers(
subnet_create_args=subnet_create_args,
subnet_cidr_start="custom_cidr",
subnets_per_network=subnets_per_network,
router_create_args=router_create_args)
mock_create_network_and_subnets.assert_has_calls(
[mock.call({}, subnet_create_args, subnets_per_network,
subnet_cidr_start)])
mock_create_router.assert_has_calls(
[mock.call(router_create_args)] * subnets_per_network)
mock_clients("neutron").add_interface_router.assert_has_calls(
[mock.call(router["router"]["id"],
{"subnet_id": subnet["subnet"]["id"]})
] * subnets_per_network)
mock_delete_router.assert_has_calls(
[mock.call(router)] * subnets_per_network)
@mock.patch(NEUTRON_NETWORKS + "._generate_random_name")
@mock.patch(NEUTRON_NETWORKS + "._list_ports")
@mock.patch(NEUTRON_NETWORKS + "._create_port")
@mock.patch(NEUTRON_NETWORKS + "._create_network")
def test_create_and_list_ports(self,
mock_create_network,
mock_create_port,
mock_list,
mock_random_name):
scenario = network.NeutronNetworks()
mock_random_name.return_value = "random-name"
net = {"network": {"id": "fake-id"}}
mock_create_network.return_value = net
ports_per_network = 10
self.assertRaises(TypeError, scenario.create_and_list_ports)
mock_create_network.reset_mock()
# Defaults
scenario.create_and_list_ports(ports_per_network=ports_per_network)
mock_create_network.assert_called_once_with({})
self.assertEqual(mock_create_port.mock_calls,
[mock.call(net, {})] * ports_per_network)
mock_list.assert_called_once_with()
mock_create_network.reset_mock()
mock_create_port.reset_mock()
mock_list.reset_mock()
# Custom options
scenario.create_and_list_ports(
network_create_args={"name": "given-name"},
port_create_args={"allocation_pools": []},
ports_per_network=ports_per_network)
mock_create_network.assert_called_once_with({"name": "given-name"})
self.assertEqual(
mock_create_port.mock_calls,
[mock.call(net, {"allocation_pools": []})] * ports_per_network)
mock_list.assert_called_once_with()
@mock.patch(NEUTRON_NETWORKS + "._generate_random_name")
@mock.patch(NEUTRON_NETWORKS + "._update_port")
@mock.patch(NEUTRON_NETWORKS + "._create_port", return_value={
"port": {
"name": "port-name",
"id": "port-id",
"admin_state_up": True
}
})
@mock.patch(NEUTRON_NETWORKS + "._create_network", return_value={
"network": {
"id": "fake-id"
}
})
def test_create_and_update_ports(self,
mock_create_network,
mock_create_port,
mock_update_port,
mock_random_name):
scenario = network.NeutronNetworks()
mock_random_name.return_value = "random-name"
ports_per_network = 10
port_update_args = {
"name": "_updated",
"admin_state_up": False
}
# Defaults
scenario.create_and_update_ports(
port_update_args=port_update_args,
ports_per_network=ports_per_network)
mock_create_network.assert_called_once_with({})
mock_create_port.assert_has_calls(
[mock.call({"network": {"id": "fake-id"}},
{})] * ports_per_network)
mock_update_port.assert_has_calls(
[mock.call(mock_create_port.return_value, port_update_args)
] * ports_per_network)
mock_create_network.reset_mock()
mock_create_port.reset_mock()
mock_update_port.reset_mock()
# Custom options
scenario.create_and_update_ports(
port_update_args=port_update_args,
network_create_args={"name": "given-name"},
port_create_args={"allocation_pools": []},
ports_per_network=ports_per_network)
mock_create_network.assert_called_once_with({"name": "given-name"})
mock_create_port.assert_h | as_calls(
[mock.call({"network": {"id": "fake-id"}},
{"allocation_pools": []})] * ports_per_network)
mock_update_port.assert_has_calls(
| [mock.call(mock_create_port.return_value, port_update_args)
] * ports_per_network)
@mock.patch(NEUTRON_NETWORKS + "._generate_random_name")
@mock.patch(NEUTRON_NETWORKS + "._delete_port")
@mock.patch(NEUTRON_NETWORKS + "._create_port")
@mock.patch(NEUTRON_NETWORKS + "._create_network")
def test_create_and_delete_ports(self,
mock_create_network,
mock_create_port,
mock_delete,
mock_random_name):
scenario = network.NeutronNetworks()
mock_random_name.return_value = "random-name"
net = {"network": {"id": "fake-id"}}
mock_create_network.return_value = net
ports_per_network = 10
self.assertRaises(TypeError, scenario.create_and_delete_ports)
mock_create_network.reset_mock()
# Default options
scenario.create_and_delete_ports(ports_per_network=ports_per_network)
mock_create_network.assert_called_once_with({})
self.assertEqual(mock_create_port.mock_calls,
[mock.call(net, {})] * ports_per_network)
self.assertEqual(mock_delete.mock_calls,
[mock.call(mock_create_port())] * ports_per_network)
mock_create_network.reset_mock()
mock_create_port.reset_mock()
mock_delete.reset_mock()
# Custom options
scenario.create_and_delete_ports(
network_create_args={ |
# coding: utf-8
"""
Copyright 2015 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ref: https://github.com/swagger-api/swagger-codegen
"""
from pprint import pformat
from six import iteritems
class Chassis100ChassisActions(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
Chassis100ChassisActions - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'oem': 'object',
'chassis_reset': 'Chassis100Reset'
}
self.attribute_map = {
'oem': 'Oem',
'chassis_reset': '#Chassis.Reset'
}
self._oem = None
self._chassis_reset = None
@property
def oem(self):
"""
Gets the oem of this Chassis100ChassisActions.
:return: The oem of this Chassis100ChassisActions.
:rtype: object
"""
return self._oem
@oem.setter
def oem(self, oem):
"""
Sets the oem of this Chassis100ChassisActions.
:param oem: The oem of this Chassis100ChassisActions.
:type: object
"""
self._oem = oem
@property
def chassis_reset(self):
"""
Gets the chassis_reset of this Chassis100ChassisActions.
:return: The chassis_reset of this Chassis100ChassisActions.
:rtype: Chassis100Reset
"""
return self._chassis_reset
@chassis_reset.setter
def chassis_reset(self, chassis_reset):
"""
Sets the chassis_reset of this Chassis100ChassisActions.
:param chassis_reset: The chassis_reset of this Chassis100ChassisActions. |
:type: Chassis100Reset
"""
self._chassis_reset = chassis_reset
| def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
############################### | ################################################
# This file is part of openWNS (open Wireless Network Simulator)
# _____________________________________________________________________________
#
# Copyright (C) 2004-2007
# Chair of Communication Networks (ComNets)
# Kopernikusstr. 16, D-52074 Aachen, Germany
# phone: ++49-241-80-27910,
# fax: ++49-241-80-22242
# email: info@openwns.org
# www: http://www.openwns.org
# ______________ | _______________________________________________________________
#
# openWNS is free software; you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License version 2 as published by the
# Free Software Foundation;
#
# openWNS is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
#from Cleanup import *
#from Documentation import *
#from ForEach import *
#from Install import *
#from Lint import *
#from Missing import *
#from Replay import *
#from SanityCheck import *#
#f#rom Testing import *
#f#rom Update import *
#from Upgrade import *
|
tRaises(AttributeError):
Sequence('ACGT').observed_chars = {'a', 'b', 'c'}
def test_eq_and_ne(self):
seq_a = Sequence("A")
seq_b = Sequence("B")
self.assertTrue(seq_a == seq_a)
self.assertTrue(Sequence("a") == Sequence("a"))
self.assertTrue(Sequence("a", metadata={'id': 'b'}) ==
Sequence("a", metadata={'id': 'b'}))
self.assertTrue(Sequence("a",
metadata={'id': 'b', 'description': 'c'}) ==
Sequence("a",
metadata={'id': 'b', 'description': 'c'}))
self.assertTrue(Sequence("a", metadata={'id': 'b', 'description': 'c'},
positional_metadata={'quality': [1]}) ==
Sequence("a", metadata={'id': 'b', 'description': 'c'},
positional_metadata={'quality': [1]}))
self.assertTrue(seq_a != seq_b)
self.assertTrue(SequenceSubclass("a") != Sequence("a"))
self.assertTrue(Sequence("a") != Sequence("b"))
self.assertTrue(Sequence("a") != Sequence("a", metadata={'id': 'b'}))
self.assertTrue(Sequence("a", metadata={'id': 'c'}) !=
Sequence("a",
metadata={'id': 'c', 'description': 't'}))
self.assertTrue(Sequence("a", positional_metadata={'quality': [1]}) !=
Sequence("a"))
self.assertTrue(Sequence("a", positional_metadata={'quality': [1]}) !=
Sequence("a", positional_metadata={'quality': [2]}))
self.assertTrue(Sequence("c", positional_metadata={'quality': [3]}) !=
Sequence("b", positional_metadata={'quality': [3]}))
self.assertTrue(Sequence("a", metadata={'id': 'b'}) !=
Sequence("c", metadata={'id': 'b'}))
def test_eq_sequences_without_metadata_compare_equal(self):
self.assertTrue(Sequence('') == Sequence(''))
self.assertTrue(Sequence('z') == Sequence('z'))
self.assertTrue(
Sequence('ACGT') == Sequence('ACGT'))
def test_eq_sequences_with_metadata_compare_equal(self):
seq1 = Sequence('ACGT', metadata={'id': 'foo', 'desc': 'abc'},
positional_metadata={'qual': [1, 2, 3, 4]})
seq2 = Sequence('ACGT', metadata={'id': 'foo', 'desc': 'abc'},
positional_metadata={'qual': [1, 2, 3, 4]})
self.assertTrue(seq1 == seq2)
# order shouldn't matter
self.assertTrue(seq2 == seq1)
def test_eq_sequences_from_different_sources_compare_equal(self):
# sequences that have the same data but are constructed from different
# types of data should compare equal
seq1 = Sequence('ACGT', metadata={'id': 'foo', 'desc': 'abc'},
positional_metadata={'quality': (1, 2, 3, 4)})
seq2 = Sequence(np.array([65, 67, 71, 84], dtype=np.uint8),
metadata={'id': 'foo', 'desc': 'abc'},
positional_metadata={'quality': np.array([1, 2, 3,
4])})
self.assertTrue(seq1 == seq2)
def test_eq_type_mismatch(self):
seq1 = Sequence('ACGT')
seq2 = SequenceSubclass('ACGT')
self.assertFalse(seq1 == seq2)
def test_eq_metadata_mismatch(self):
# both provided
seq1 = Sequence('ACGT', metadata={'id': 'foo'})
seq2 = Sequence('ACGT', metadata={'id': 'bar'})
self.assertFalse(seq1 == seq2)
# one provided
seq1 = Sequence('ACGT', metadata={'id': 'foo'})
seq2 = Sequence('ACGT')
self.assertFalse(seq1 == seq2)
def test_eq_positional_metadata_mismatch(self):
# both provided
seq1 = Sequence('ACGT', positional_metadata={'quality': [1, 2, 3, 4]})
seq2 = Sequence('ACGT', positional_metadata={'quality': [1, 2, 3, 5]})
self.assertFalse(seq1 == seq2)
# one provided
seq1 = Sequence('ACGT', positional_metadata={'quality': [1, 2, 3, 4]})
seq2 = Sequence('ACGT')
self.assertFalse(seq1 == seq2)
def test_eq_sequence_mismatch(self):
seq1 = Sequence('ACGT')
seq2 = Sequence('TGCA')
self.assertFalse(seq1 == seq2)
def test_eq_handles_missing_metadata_efficiently(self):
seq1 = Sequence('ACGT')
seq2 = Sequence('ACGT')
self.assertTrue(seq1 == seq2)
# metadata attributes shou | ld be None and not initialized to a "missing"
# representation
self.assertIsNone(seq1._metadata)
self.assertIsNone(seq1._positional_metadata)
self.assertIsNone(seq2._metadata)
self.assertIsNone(seq2._positional_metadata)
def test_getitem_gives_new_sequence(self):
seq = Sequence("Sequence string !1@2#3?.,")
self.assertFalse(seq is seq[:])
def test_getitem_with_int_ha | s_positional_metadata(self):
s = "Sequence string !1@2#3?.,"
length = len(s)
seq = Sequence(s, metadata={'id': 'id', 'description': 'dsc'},
positional_metadata={'quality': np.arange(length)})
eseq = Sequence("S", {'id': 'id', 'description': 'dsc'},
positional_metadata={'quality': np.array([0])})
self.assertEqual(seq[0], eseq)
eseq = Sequence(",", metadata={'id': 'id', 'description': 'dsc'},
positional_metadata={'quality':
np.array([len(seq) - 1])})
self.assertEqual(seq[len(seq) - 1], eseq)
eseq = Sequence("t", metadata={'id': 'id', 'description': 'dsc'},
positional_metadata={'quality': [10]})
self.assertEqual(seq[10], eseq)
def test_single_index_to_slice(self):
a = [1, 2, 3, 4]
self.assertEqual(slice(0, 1), _single_index_to_slice(0))
self.assertEqual([1], a[_single_index_to_slice(0)])
self.assertEqual(slice(-1, None),
_single_index_to_slice(-1))
self.assertEqual([4], a[_single_index_to_slice(-1)])
def test_is_single_index(self):
self.assertTrue(_is_single_index(0))
self.assertFalse(_is_single_index(True))
self.assertFalse(_is_single_index(bool()))
self.assertFalse(_is_single_index('a'))
def test_as_slice_if_single_index(self):
self.assertEqual(slice(0, 1), _as_slice_if_single_index(0))
slice_obj = slice(2, 3)
self.assertIs(slice_obj,
_as_slice_if_single_index(slice_obj))
def test_slice_positional_metadata(self):
seq = Sequence('ABCDEFGHIJ',
positional_metadata={'foo': np.arange(10),
'bar': np.arange(100, 110)})
self.assertTrue(pd.DataFrame({'foo': [0], 'bar': [100]}).equals(
seq._slice_positional_metadata(0)))
self.assertTrue(pd.DataFrame({'foo': [0], 'bar': [100]}).equals(
seq._slice_positional_metadata(slice(0, 1))))
self.assertTrue(pd.DataFrame({'foo': [0, 1],
'bar': [100, 101]}).equals(
seq._slice_positional_metadata(slice(0, 2))))
self.assertTrue(pd.DataFrame(
{'foo': [9], 'bar': [109]}, index=[9]).equals(
seq._slice_positional_metadata(9)))
def test_getitem_with_int_no_positional_metadata(self):
seq = Sequence("Sequence string !1@2#3?.,",
metadata={'id': 'id2', 'description': 'no_qual'})
eseq = Sequence("t", metadata={'id': 'id2', 'description': 'no_qual'})
self.assertEqual(seq[10], eseq)
def test_getitem_with_slice_has_positional_metadata(self):
s = "0123456789abcdef"
length = len(s)
seq = Sequence(s, metadata={'id': 'id3', 'description': 'dsc3'},
positional_metadata={'quality': np.arange(length)})
eseq = Sequence("012", metadata={'id': 'id3', 'description': 'dsc3'},
po |
olor=True, bitmapdpi=100,
antialias=True, quality=85, backcolor='#ffffff00',
pdfdpi=150, svgtextastext=False):
"""Initialise export class. Parameters are:
doc: document to write
filename: output | filename
pagenumber: pagenumber to export or list of pages for some formats
color: use color | or try to use monochrome
bitmapdpi: assume this dpi value when writing images
antialias: antialias text and lines when writing bitmaps
quality: compression factor for bitmaps
backcolor: background color default for bitmaps (default transparent).
pdfdpi: dpi for pdf and eps files
svgtextastext: write text in SVG as text, rather than curves
"""
self.doc = doc
self.filename = filename
self.pagenumber = pagenumber
self.color = color
self.bitmapdpi = bitmapdpi
self.antialias = antialias
self.quality = quality
self.backcolor = backcolor
self.pdfdpi = pdfdpi
self.svgtextastext = svgtextastext
def export(self):
"""Export the figure to the filename."""
ext = os.path.splitext(self.filename)[1].lower()
if ext in ('.eps', '.ps', '.pdf'):
self.exportPDFOrPS(ext)
elif ext in ('.png', '.jpg', '.jpeg', '.bmp', '.tiff', '.xpm'):
self.exportBitmap(ext)
elif ext == '.svg':
self.exportSVG()
elif ext == '.selftest':
self.exportSelfTest()
elif ext == '.pic':
self.exportPIC()
elif ext == '.emf' and hasemf:
self.exportEMF()
else:
raise RuntimeError("File type '%s' not supported" % ext)
def renderPage(self, page, size, dpi, painter):
"""Render page using paint helper to painter.
This first renders to the helper, then to the painter
"""
helper = painthelper.PaintHelper(size, dpi=dpi, directpaint=painter)
painter.setClipRect( qt4.QRectF(
qt4.QPointF(0,0), qt4.QPointF(*size)) )
painter.save()
self.doc.paintTo(helper, page)
painter.restore()
painter.end()
def getSinglePage(self):
"""Check single number of pages or throw exception,
else return page number."""
try:
if len(self.pagenumber) != 1:
raise RuntimeError(
'Can only export a single page in this format')
return self.pagenumber[0]
except TypeError:
return self.pagenumber
def exportBitmap(self, ext):
"""Export to a bitmap format."""
format = ext[1:] # setFormat() doesn't want the leading '.'
if format == 'jpeg':
format = 'jpg'
page = self.getSinglePage()
# get size for bitmap's dpi
dpi = self.bitmapdpi
size = self.doc.pageSize(page, dpi=(dpi,dpi))
# create real output image
backqcolor = utils.extendedColorToQColor(self.backcolor)
if format == 'png':
# transparent output
image = qt4.QImage(size[0], size[1],
qt4.QImage.Format_ARGB32_Premultiplied)
else:
# non transparent output
image = qt4.QImage(size[0], size[1],
qt4.QImage.Format_RGB32)
backqcolor.setAlpha(255)
image.setDotsPerMeterX(dpi*m_inch)
image.setDotsPerMeterY(dpi*m_inch)
if backqcolor.alpha() == 0:
image.fill(qt4.qRgba(0,0,0,0))
else:
image.fill(backqcolor.rgb())
# paint to the image
painter = painthelper.DirectPainter(image)
painter.setRenderHint(qt4.QPainter.Antialiasing, self.antialias)
painter.setRenderHint(qt4.QPainter.TextAntialiasing, self.antialias)
self.renderPage(page, size, (dpi,dpi), painter)
# write image to disk
writer = qt4.QImageWriter()
writer.setFormat(qt4.QByteArray(format))
writer.setFileName(self.filename)
# enable LZW compression for TIFFs
writer.setCompression(1)
try:
# try to enable optimal JPEG compression using new
# options added in Qt 5.5
writer.setOptimizedWrite(True)
writer.setProgressiveScanWrite(True)
except AttributeError:
pass
if format == 'png':
# min quality for png as it makes no difference to output
# and makes file size smaller
writer.setQuality(0)
else:
writer.setQuality(self.quality)
writer.write(image)
def exportPDFOrPS(self, ext):
"""Export to EPS or PDF format."""
# setup printer with requested parameters
printer = qt4.QPrinter()
printer.setResolution(self.pdfdpi)
printer.setFullPage(True)
printer.setColorMode(
qt4.QPrinter.Color if self.color else qt4.QPrinter.GrayScale)
printer.setOutputFormat(
qt4.QPrinter.PdfFormat if ext=='.pdf' else
qt4.QPrinter.PostScriptFormat)
printer.setOutputFileName(self.filename)
printer.setCreator('Veusz %s' % utils.version())
# convert page to list if necessary
try:
pages = list(self.pagenumber)
except TypeError:
pages = [self.pagenumber]
if len(pages) != 1 and ext == '.eps':
raise RuntimeError(
'Only single pages allowed for .eps. Use .ps instead.')
# render ranges and return size of each page
sizes = self.doc.printTo(printer, pages)
# We have to modify the page sizes or bounding boxes to match
# the document. This is copied to a temporary file.
tmpfile = "%s.tmp.%i" % (self.filename, random.randint(0,1000000))
if ext == '.eps' or ext == '.ps':
# only 1 size allowed for PS, so use maximum
maxsize = sizes[0]
for size in sizes[1:]:
maxsize = max(size[0], maxsize[0]), max(size[1], maxsize[1])
fixupPSBoundingBox(self.filename, tmpfile, printer.width(), maxsize)
elif ext == '.pdf':
# change pdf bounding box and correct pdf index
with open(self.filename, 'rb') as fin:
text = fin.read()
text = scalePDFMediaBox(text, printer.width(), sizes)
text = fixupPDFIndices(text)
with open(tmpfile, 'wb') as fout:
fout.write(text)
else:
raise RuntimeError('Invalid file type')
# replace original by temporary
os.remove(self.filename)
os.rename(tmpfile, self.filename)
def exportSVG(self):
"""Export document as SVG"""
page = self.getSinglePage()
dpi = svg_export.dpi * 1.
size = self.doc.pageSize(
page, dpi=(dpi,dpi), integer=False)
with codecs.open(self.filename, 'w', 'utf-8') as f:
paintdev = svg_export.SVGPaintDevice(
f, size[0]/dpi, size[1]/dpi, writetextastext=self.svgtextastext)
painter = painthelper.DirectPainter(paintdev)
self.renderPage(page, size, (dpi,dpi), painter)
def exportSelfTest(self):
"""Export document for testing"""
page = self.getSinglePage()
dpi = svg_export.dpi * 1.
size = width, height = self.doc.pageSize(
page, dpi=(dpi,dpi), integer=False)
f = open(self.filename, 'w')
paintdev = selftest_export.SelfTestPaintDevice(f, width/dpi, height/dpi)
painter = painthelper.DirectPainter(paintdev)
self.renderPage(page, size, (dpi,dpi), painter)
f.close()
def exportPIC(self):
"""Export document as Qt PIC"""
page = self.getSinglePage()
pic = qt4.QPicture()
painter = painthelper.DirectPainter(pic)
dpi = (pic.logicalDpiX(), pic.logicalDpiY())
size = self.doc.pageSize(page, dpi=dpi)
self.renderPage(page, size, dpi, painter)
pic.save(self.filename)
def exportEMF(self):
"""Ex |
_code=None, layerType=None, limit=100,
offset=0, url_user=None, scenario_id=None, squid=None, **params):
"""GET request. Individual layer, count, or list.
"""
# Layer type:
# 0 - Anything
# 1 - Environmental layer
# 2 - ? (Not implemented yet)
if layerType is None or layerType == 0:
if path_layer_id is None:
return self._list_layers(
self.get_user_id(url_user=url_user), after_time=after_time,
before_time=before_time, epsg_code=epsg_code, limit=limit,
offset=offset, squid=squid)
if path_layer_id.lower() == 'count':
return self._count_layers(
self.get_user_id(url_user=url_user), after_time=after_time,
before_time=before_time, epsg_code=epsg_code, squid=squid)
return self._get_layer(path_layer_id, env_layer=False)
if path_layer_id is None:
return self._list_env_layers(
self.get_user_id(url_user=url_user), after_time=after_time,
alt_pred_code=alt_pred_code, before_time=before_time,
date_code=date_code, env_code=env_code,
env_type_id=env_type_id, epsg_code=epsg_code,
gcm_code=gcm_code, limit=limit, offset=offset,
scenario_id=scenario_id)
if path_layer_id.lower() == 'count':
return self._count_env_layers(
self.get_user_id(url_user=url_user), after_time=after_time,
alt_pred_code=alt_pred_code, before_time=before_time,
date_code=date_code, env_code=env_code,
env_type_id=env_type_id, epsg_code=epsg_code,
gcm_code=gcm_code, scenario_code=scenario_id)
return self._get_layer(path_layer_id, env_layer=True)
# ................................
def _count_env_layers(self, user_id, after_time=None, alt_pred_code=None,
before_time=None, date_code=None, env_code=None,
env_type_id=None, epsg_code=None, gcm_code=None,
scenario_code=None):
"""Count environmental layer objects matching the specified criteria
Args:
user_id: The user to list environmental layers for. Note that this
may not be the same user logged into the system
after_time: Return layers modified after this time (Modified Julian
Day)
alt_pred_code: Return layers with this alternate prediction code
before_time: Return layers modified before this time (Modified
Julian Day)
date_code: Return layers with this date code
env_code: Return layers with this environment code
env_type_id: Return layers with this environmental type
epsg_code: Return layers with this EPSG code
gcm_code: Return layers with this GCM code
scenario_id: Return layers from this scenario
"""
layer_count = self.scribe.count_env_layers(
user_id=user_id, env_code=env_code, gcm_code=gcm_code,
alt_pred_code=alt_pred_code, date_code=date_code,
after_time=after_time, before_time=before_time, epsg=epsg_code,
env_type_id=env_type_id, scenario_code=scenario_code)
return {'count': layer_count}
# ................................
def _count_layers(self, user_id, after_time=None, before_time=None,
epsg_code=None, squid=None):
"""Return a count of layers matching the specified criteria
Args:
user_id: The user to list layers for. Note that this may not be
the same user that is logged into the system
after_time: List layers modified after this time (Modified Julian
Day)
before_time: List layers modified before this time (Modified Julian
Day)
epsg_code: Return layers that have this EPSG code
limit: Return this number of layers, at most
offset: Offset the returned layers by this number
squid: Return layers with this species identifier
"""
layer_count = self.scribe.count_layers(
user_id=user_id, squid=squid, after_time=after_time,
before_time=before_time, epsg=epsg_code)
return {'count': layer_count}
# ................................
def _get_layer(self, path_layer_id, env_layer=False):
"""Attempt to get a layer
"""
try:
_ = int(path_layer_id)
except ValueError:
raise cherrypy.HTTPError(
HTTPStatus.BAD_REQUEST,
'{} is not a valid layer ID'.format(path_layer_id))
if env_layer:
lyr = self.scribe.get_env_layer(lyr_id=path_layer_id)
else:
lyr = self.scribe.get_layer(lyr_id=path_layer_id)
if lyr is None:
raise cherrypy.HTTPError(
HTTPStatus.NOT_FOUND,
'Environmental layer {} was not found'.format(path_layer_id))
if check_user_permission(self.get_user_id(), lyr, HTTPMethod.GET):
return lyr
raise cherrypy.HTTPError(
HTTPStatus.FORBIDDEN,
'User {} does not have permission to access layer {}'.format(
self.get_user_id(), path_layer_id))
# ................................
def _list_env_layers(self, user_id, after_time=None, alt_pred_code=None,
before_time=None, date_code=None, env_code=None,
env_type_id=None, epsg_code=None, gcm_code=None,
limit=100, offset=0, scenario_id=None):
"""List environmental layer objects matching the specified criteria
Args:
user_id: The user to list environmental layers for. Note that this
may not be the same user logged into the system
after_time: (optional) Return layers modified after this time
(Modified Julian Day)
alt_pred_code: (optional) Return layers with this alternate
prediction code
before_time: (optional) Return layers modified before this time
(Modified Julian Day)
date_code: (optional) Return layers with this date code
env_code: (optional) Return layers with this environment code
env_type_id: (optional) Return layers with this environmental type
epsg_code: (optional) Return layers with this EPSG code
gcm_code: (optional) Return layers with this GCM code
limit: (optional) Return this number of layers, at most
offset: (optional) Offset the returned layers by this number
scenario_id: (optional) Return layers from this scenario
"""
lyr_atoms = self.scribe.list_env_layers(
offset, limit, user_id=user_id, env_code=env_code,
gcm_code=gcm_code, alt_pred_code=alt_pred_code,
da | te_code=date_code, after_time=after_time,
before_time=before_time, epsg=epsg_code, env_type_id=env_type_id)
return lyr_atoms
# ................................
def _list_layers(self, user_id, after_time=None, before_time=None,
epsg_code=None, limit=100, offset=0, squid=None):
"""Return a list of | layers matching the specified criteria
Args:
user_id: The user to list layers for. Note that this may not be
the same user that is logged into the system
after_time: List layers modified after this time (Modified Julian
Day)
before_time: List layers modified before this time (Modified Julian
Day)
epsg_code: Return layers that have this EPSG code
limit: Return this number of layers, at most
offset: Offset the returned layers by this number
squid: Return layers with this species identifie |
from unittest import skipIf
from django.conf import settings
def skipIfDefaultUser(test_func):
"""
Skip a test if a default user model is in use.
"""
return skipIf(settings.AUTH_USER_MODEL == "auth.User", "Default user model in use")(
test_func
)
def skipIfCustomUser(test_func):
"""
Skip a test if a custom user model is in use.
"""
return skipIf(settings.AUTH_USER | _MODEL != "auth.User", "Custom user model in use")(
te | st_func
)
|
""" Tests for OAuth Dispatch python API module. """
import unittest
from django.conf import settings
from django.http import HttpRequest
from django.test import TestCase
from oauth2_provider.models import AccessToken
from common.djangoapps.student.tests.factories import UserFactory
OAUTH_PROVIDER_ENABLED = settings.FEATURES.get('ENABLE_OAUTH2_PROVIDER')
if OAUTH_PROVIDER_ENABLED:
from openedx.core.djangoapps.oauth_dispatch import api
from openedx.core.djangoapps.oauth_dispatch.adapters import DOTAdapter
from openedx.core.djangoapps.oauth_dispatch.tests.constants import DUMMY_REDIRECT_URL
EXPECTED_DEFAULT_EXPIRES_IN = 36000
@unittest.skipUnless(OAUTH_PROVIDER_ENABLED, 'OAuth2 not enabled')
class TestOAuthDispatchAPI(TestCase):
""" Tests for oauth_dispatch's api.py module. """
def setUp(self):
super().setUp()
self.adapter = DOTAdapter()
self.user = U | serFactory()
self.client = self.adapter.create_public_client(
name='public app',
user=self.user,
redirect_uri=DUMMY_REDIRECT_URL,
client_id='public-client-id',
)
def _assert_stored_token(self, stored_tok | en_value, expected_token_user, expected_client):
stored_access_token = AccessToken.objects.get(token=stored_token_value)
assert stored_access_token.user.id == expected_token_user.id
assert stored_access_token.application.client_id == expected_client.client_id
assert stored_access_token.application.user.id == expected_client.user.id
def test_create_token_success(self):
token = api.create_dot_access_token(HttpRequest(), self.user, self.client)
assert token['access_token']
assert token['refresh_token']
self.assertDictContainsSubset(
{
'token_type': 'Bearer',
'expires_in': EXPECTED_DEFAULT_EXPIRES_IN,
'scope': '',
},
token,
)
self._assert_stored_token(token['access_token'], self.user, self.client)
def test_create_token_another_user(self):
another_user = UserFactory()
token = api.create_dot_access_token(HttpRequest(), another_user, self.client)
self._assert_stored_token(token['access_token'], another_user, self.client)
def test_create_token_overrides(self):
expires_in = 4800
token = api.create_dot_access_token(
HttpRequest(), self.user, self.client, expires_in=expires_in, scopes=['profile'],
)
self.assertDictContainsSubset({'scope': 'profile'}, token)
self.assertDictContainsSubset({'expires_in': expires_in}, token)
|
# encoding: utf8
from __future__ import unicode_literals
from ..symbols import *
TAG_MAP = {
# Explanation of Unidic tags:
# https://www.gavo.t.u-tokyo.ac.jp/~mine/japanese/nlp+slp/UNIDIC_manual.pdf
# Universal Dependencies Mapping:
# http://universaldependencies.org/ja/overview/morphology.html
# http://universaldependencies.org/ja/pos/all.html
"記号,一般,*,*":{POS: PUNCT}, # this includes characters used to represent sounds like ドレミ
"記号,文字,*,*":{POS: PUNCT}, # this is for Greek and Latin characters used as sumbols, as in math
"感動詞,フィラー,*,*": {POS: INTJ},
"感動詞,一般,*,*": {POS: INTJ},
# this is specifically for unicode full-width space
"空白,*,*,*": {POS: X},
"形状詞,一般,*,*":{POS: ADJ},
"形状詞,タリ,*,*":{POS: ADJ},
"形状詞,助動詞語幹,*,*":{POS: ADJ},
"形容詞,一般,*,*":{POS: ADJ},
"形容詞,非自立可能,*,*":{POS: AUX}, # XXX ADJ if alone, AUX otherwise
"助詞,格助詞,*,*":{POS: ADP},
"助詞,係助詞,*,*":{POS: ADP},
"助詞,終助詞,*,*":{POS: PART},
"助詞,準体助詞,*,*":{POS: SCONJ}, # の as in 走るのが速い
"助詞,接続助詞,*,*":{POS: SCONJ}, # verb ending て
"助詞,副助詞,*,*":{POS: PART}, # ばかり, つつ after a verb
"助動詞,*,*,*":{POS: AUX},
"接続詞,*,*,*":{POS: SCONJ}, # XXX: might need refinement
"接頭辞,*,*,*":{POS: NOUN},
"接尾辞,形状詞的,*,*":{POS: ADJ}, # がち, チック
"接尾辞,形容詞的,*,*":{POS: ADJ}, # -らしい
"接尾辞,動詞的,*,*":{POS: NOUN}, # -じみ
"接尾辞,名詞的,サ変可能,*":{POS: NOUN}, # XXX see 名詞,普通名詞,サ変可能,*
"接尾辞,名詞的,一般,*":{POS: NOUN},
"接尾辞,名詞的,助数詞 | ,*":{POS: NOUN},
"接尾辞,名詞的,副詞可能,*":{POS: NOUN}, # -後, -過ぎ
"代名詞,*,*,*":{POS: PRON},
"動詞,一般,*,*":{POS: VERB},
"動詞,非自立可能,*,*":{POS: VERB}, # XXX VERB if alone, AUX otherwise
"動詞,非自立可能,*,*,AUX":{POS: AUX},
"動詞,非自立可能,*,*,VERB":{POS: VERB},
"副詞,*,*,*":{POS: ADV},
"補助記号,AA,一般,*":{POS: SYM}, # text art
"補助記号,AA,顔文字,*":{POS: SYM}, # kaomoji
| "補助記号,一般,*,*":{POS: SYM},
"補助記号,括弧開,*,*":{POS: PUNCT}, # open bracket
"補助記号,括弧閉,*,*":{POS: PUNCT}, # close bracket
"補助記号,句点,*,*":{POS: PUNCT}, # period or other EOS marker
"補助記号,読点,*,*":{POS: PUNCT}, # comma
"名詞,固有名詞,一般,*":{POS: PROPN}, # general proper noun
"名詞,固有名詞,人名,一般":{POS: PROPN}, # person's name
"名詞,固有名詞,人名,姓":{POS: PROPN}, # surname
"名詞,固有名詞,人名,名":{POS: PROPN}, # first name
"名詞,固有名詞,地名,一般":{POS: PROPN}, # place name
"名詞,固有名詞,地名,国":{POS: PROPN}, # country name
"名詞,助動詞語幹,*,*":{POS: AUX},
"名詞,数詞,*,*":{POS: NUM}, # includes Chinese numerals
"名詞,普通名詞,サ変可能,*":{POS: NOUN}, # XXX: sometimes VERB in UDv2; suru-verb noun
"名詞,普通名詞,サ変可能,*,NOUN":{POS: NOUN},
"名詞,普通名詞,サ変可能,*,VERB":{POS: VERB},
"名詞,普通名詞,サ変形状詞可能,*":{POS: NOUN}, # ex: 下手
"名詞,普通名詞,一般,*":{POS: NOUN},
"名詞,普通名詞,形状詞可能,*":{POS: NOUN}, # XXX: sometimes ADJ in UDv2
"名詞,普通名詞,形状詞可能,*,NOUN":{POS: NOUN},
"名詞,普通名詞,形状詞可能,*,ADJ":{POS: ADJ},
"名詞,普通名詞,助数詞可能,*":{POS: NOUN}, # counter / unit
"名詞,普通名詞,副詞可能,*":{POS: NOUN},
"連体詞,*,*,*":{POS: ADJ}, # XXX this has exceptions based on literal token
"連体詞,*,*,*,ADJ":{POS: ADJ},
"連体詞,*,*,*,PRON":{POS: PRON},
"連体詞,*,*,*,DET":{POS: DET},
}
|
from | unittest import TestCase
from pyrake.c | ontrib.spidermiddleware.urllength import UrlLengthMiddleware
from pyrake.http import Response, Request
from pyrake.spider import Spider
class TestUrlLengthMiddleware(TestCase):
def test_process_spider_output(self):
res = Response('http://pyraketest.org')
short_url_req = Request('http://pyraketest.org/')
long_url_req = Request('http://pyraketest.org/this_is_a_long_url')
reqs = [short_url_req, long_url_req]
mw = UrlLengthMiddleware(maxlength=25)
spider = Spider('foo')
out = list(mw.process_spider_output(res, reqs, spider))
self.assertEquals(out, [short_url_req])
|
#!/usr/bin/python2.5
#
# Copyright 2009 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND | , either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module contains the Priority Group Model."""
__authors__ = [
'"Sverre Rabbelier" <sverre@rabbelier.nl>',
]
from google.appengi | ne.ext import db
from django.utils.translation import ugettext
from soc.models import linkable
class PriorityGroup(linkable.Linkable):
"""The PriorityGroup model.
"""
#: the priority of this group, 0 being lower than 1
priority = db.IntegerProperty(required=False, default=0)
#: the human readable name of this priority gropu
name = db.StringProperty(required=False)
|
# -*- coding: utf-8 -*-
#
# metasci documentation build configuration file, created by
# sphinx-quickstart on Sun Feb 7 22:29:49 2010.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# Determine if we're on Read the Docs server
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
# On Read the Docs, we need to mock a few third-party modules so we don't get
# ImportErrors when building documentation
try:
from unittest.mock import MagicMock
except ImportError:
from mock import Mock as MagicMock
MOCK_MODULES = ['numpy', 'numpy.polynomial', 'numpy.polynomial.polynomial',
'h5py', 'pandas', 'opencg']
sys.modules.update((mod_name, MagicMock()) for mod_name in MOCK_MODULES)
import numpy as np
np.polynomial.Polynomial = MagicMock
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../sphinxext'))
sys.path.insert(0, os.path.abspath('../..'))
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.napoleon',
'sphinx.ext.mathjax',
'sphinx.ext.autosummary',
'sphinx.ext.intersphinx',
'sphinx.ext.viewcode',
'sphinx_numfig',
'notebook_sphinxext']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'OpenMC'
copyright = u'2011-2016, Massachusetts Institute of Technology'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = "0.8"
# The full version, including alpha/beta/rc tags.
release = "0.8.0"
# The language for content autogenerated by Sphinx. Refer to documenta | tion
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# | non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
#pygments_style = 'sphinx'
#pygments_style = 'friendly'
#pygments_style = 'bw'
#pygments_style = 'fruity'
#pygments_style = 'manni'
pygments_style = 'tango'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages
if not on_rtd:
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
html_logo = '_images/openmc_logo.png'
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = "OpenMC Documentation"
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
def setup(app):
app.add_stylesheet('theme_overrides.css')
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'openmcdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'openmc.tex', u'OpenMC Documentation',
u'Massachusetts Institute of Technology', 'manual'),
]
latex_elements = {
'preamble': r"""
\usepackage{enumitem}
\usepackage{amsfonts}
\usepackage{amsmath}
\setlistdepth{99}
\usepackage{tikz}
\usetikzlibrary{shapes,snakes,shadows,arrows,calc,decorations.markings,patterns,fit,matrix,spy}
\usepackage{fixltx2e}
\hypersetup{bookmarksdepth=3}
\setcounter{tocdepth}{2}
\numberwithin{equation}{section}
""",
'printindex': r""
}
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
#Autodocumentation Flags
#autodoc_member_order = "groupwise"
#autoclass_content = "both"
autosummary_generate = True
napoleon_use_ivar = True
intersphinx_mapping = {
'python': ('https://docs.python.org/3', None),
'numpy': ('http://docs.scipy.org/doc/numpy/', None),
'pandas': ('http://pandas.pydata.org/pandas-docs/stable/', None),
'matplotlib': ('http://matplotlib.org/', None)
}
|
import json
import shakedown
import dcos
from dcos import marathon
from enum import Enum
from tests.command import (
cassandra_api_url,
spin,
WAIT_TIME_IN_SECONDS
)
class PlanState(Enum):
ERROR = "ERROR"
WAITING = "WAITING"
PENDING = "PENDING"
IN_PROGRESS = "IN_PROGRESS"
COMPLETE = "COMPLETE"
def filter_phase(plan, phase_name):
for phase in plan['phases']:
if phase['name'] == phase_name:
return phase
return None
def get_phase_index(plan, phase_name):
idx = 0
for phase in plan['phases']:
if phase['name'] == phase_name:
return idx
else:
idx += 1
return -1
counter = 0
def get_and_verify_plan(predicate=lambda r: True, wait_time=WAIT_TIME_IN_SECONDS):
global counter
plan_url = cassandra_api_url('plan')
def fn():
try:
return dcos.http.get(plan_url)
except dcos.errors.DCOSHTTPException as err:
return err.response
def success_predicate(result):
global counter
message = 'Request to {} failed'.format(plan_url)
try:
body = result.json()
except ValueError:
return False, message
if counter < 3:
counter += 1
pred_res = predicate(body)
if pred_res:
counter = 0
return pred_res, message
return spin(fn, success_predicate, wait_time=wait_time).json()
def get_marathon_uri():
"""Gets URL to the Marathon instance"""
return '{}/marathon'.format(shakedown.dcos_url())
def get_marathon_client():
"""Gets a marathon clie | nt"""
return marathon.Client(get_marathon_uri())
def strip_meta(app):
app.pop('fetch' | )
app.pop('version')
app.pop('versionInfo')
return app
|
# Author: Martin Oehler <oehler@knopper.net> 2013
# License: GPL V2
from django.forms import ModelForm
from django.forms import Form
from django.forms import ModelChoiceField
from django.forms.widgets import RadioSelect
from django.forms.widgets import CheckboxSelectMultiple
from django.forms.widgets import TextInput
from django.forms.widgets import Textarea
from django.forms.widgets import DateInput
from django.contrib.admin import widgets
from linboweb.linboserver.models import partition
from linboweb.linboserver.models import partitionSelection
from linboweb.linboserver.models import os
from linboweb.linboserver.models import vm
from linboweb.linboserver.models import client
from linboweb.linboserver.models import clientGroup
from linboweb.linboserver.models import pxelinuxcfg
class partitionForm(ModelForm):
class Meta:
model = partition
class partitionSelectionForm(ModelForm):
class Meta:
model = partitionSelection
class osForm(ModelForm):
partitionselection = ModelChoiceField(queryset=partitionSelection.objects.all())
class Meta:
model = os
class vmForm(ModelForm):
cla | ss Meta:
model = vm
class clientForm(ModelForm):
pxelinuxconfiguration = ModelChoiceField(queryset=pxelinuxcfg.objects.all())
class Meta:
model = client
class clientGroupForm(ModelForm):
class Meta:
model = clientG | roup
class pxelinuxcfgForm(ModelForm):
class Meta:
model = pxelinuxcfg
widgets = {
'configuration': Textarea(attrs={'cols': 80, 'rows': 40}),
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.