code
stringlengths 1
199k
|
|---|
import shlex
import sys
import notify2
import os
import gi
gi.require_version("AppIndicator3", "0.1")
from gi.repository import AppIndicator3
gi.require_version("Gtk", "3.0")
from gi.repository import Gtk
_VERSION = "1.0"
_SETTINGS_FILE = os.getenv("HOME") + "/.sshplus"
_ABOUT_TXT = """A simple application starter as appindicator.
To add items to the menu, edit the file <i>.sshplus</i> in your home directory. Each entry must be on a new line in this format:
<tt>NAME|COMMAND|ARGS</tt>
If the item is clicked in the menu, COMMAND with arguments ARGS will be executed. ARGS can be empty. To insert a separator, add a line which only contains "sep". Lines starting with "#" will be ignored. You can set an unclickable label with the prefix "label:". To insert a nested menu, use the prefix "folder:menu name". Subsequent items will be inserted in this menu, until a line containing an empty folder name is found: "folder:". After that, subsequent items get inserted in the parent menu. That means that more than one level of nested menus can be created.
Example file:
<tt><small>
Show top|gnome-terminal|-x top
sep
label:SSH connections
folder:Home
SSH Ex|gnome-terminal|-x ssh user@1.2.3.4
folder:
SSH Ex|gnome-terminal|-x ssh user@1.2.3.4
label:RDP connections
RDP Ex|rdesktop|-T "RDP-Server" -r sound:local 1.2.3.4
</small></tt>
Copyright 2011 Anil Gulecha
Incorporating changes from simplestarter, Benjamin Heil, http://www.bheil.net
Released under GPL3, http://www.gnu.org/licenses/gpl-3.0.html"""
_EDIT_CONFIG = """To add items to the menu, edit the file <i>.sshplus</i> in your home directory. Each entry must be on a new line in this format:
<tt>NAME|COMMAND|ARGS</tt>
If the item is clicked in the menu, COMMAND with arguments ARGS will be executed. ARGS can be empty. To insert a separator, add a line which only contains "sep". Lines starting with "#" will be ignored. You can set an unclickable label with the prefix "label:". To insert a nested menu, use the prefix "folder:menu name". Subsequent items will be inserted in this menu, until a line containing an empty folder name is found: "folder:". After that, subsequent items get inserted in the parent menu. That means that more than one level of nested menus can be created.
Example file:
<tt><small>
Show top|gnome-terminal|-x top
sep
label:SSH connections
folder:Home
SSH Ex|gnome-terminal|-x ssh user@1.2.3.4
folder:
SSH Ex|gnome-terminal|-x ssh user@1.2.3.4
label:RDP connections
RDP Ex|rdesktop|-T "RDP-Server" -r sound:local 1.2.3.4
</small></tt>"""
def menuitem_response(w, item):
if item == "_about":
show_help_dlg(_ABOUT_TXT)
elif item == "_edit":
edit_config_file()
elif item == "_refresh":
newmenu = build_menu()
ind.set_menu(newmenu)
notify2.init("sshplus")
notify2.Notification(
"SSHplus refreshed", '"%s" has been read! Menu list was refreshed!' % _SETTINGS_FILE
).show()
elif item == "_quit":
sys.exit(0)
elif item == "folder":
pass
else:
print(item)
os.spawnvp(os.P_NOWAIT, item["cmd"], [item["cmd"]] + item["args"])
os.wait3(os.WNOHANG)
def show_help_dlg(msg, error=False):
if error:
dlg_icon = Gtk.MessageType.ERROR
md = Gtk.MessageDialog(
None, 0, dlg_icon, Gtk.ButtonsType.OK, "This is an INFO MessageDialog"
)
edit_config_file()
else:
dlg_icon = Gtk.MessageType.INFO
md = Gtk.MessageDialog(
None, 0, dlg_icon, Gtk.ButtonsType.OK, "This is an INFO MessageDialog"
)
try:
md.set_markup("<b>SSHplus %s</b>" % _VERSION)
md.format_secondary_markup(msg)
md.run()
finally:
md.destroy()
def edit_config_file():
if os.path.isfile(_SETTINGS_FILE) is not True:
os.mknod(_SETTINGS_FILE)
show_help_dlg(
"<b>No <i>.sshplus</i> config file found, we created one for you!\n\nPlease edit the"
" file and reload the config.</b>\n\n%s"
% _EDIT_CONFIG,
error=True,
)
os.spawnvp(os.P_NOWAIT, "xdg-open", ["xdg-open", _SETTINGS_FILE])
os.wait3(os.WNOHANG)
def add_separator(menu):
separator = Gtk.SeparatorMenuItem()
separator.show()
menu.append(separator)
def add_menu_item(menu, caption, item=None):
menu_item = Gtk.MenuItem.new_with_label(caption)
if item:
menu_item.connect("activate", menuitem_response, item)
else:
menu_item.set_sensitive(False)
menu_item.show()
menu.append(menu_item)
return menu_item
def get_sshplusconfig():
if not os.path.exists(_SETTINGS_FILE):
return []
app_list = []
f = open(_SETTINGS_FILE, "r")
try:
for line in f.readlines():
line = line.rstrip()
if not line or line.startswith("#"):
continue
elif line == "sep":
app_list.append("sep")
elif line.startswith("label:"):
app_list.append({"name": "LABEL", "cmd": line[6:], "args": ""})
elif line.startswith("folder:"):
app_list.append({"name": "FOLDER", "cmd": line[7:], "args": ""})
else:
try:
name, cmd, args = line.split("|", 2)
app_list.append(
{
"name": name,
"cmd": cmd,
"args": [n.replace("\n", "") for n in shlex.split(args)],
}
)
except ValueError:
print("The following line has errors and will be ignored:\n%s" % line)
finally:
f.close()
return app_list
def build_menu():
if not os.path.exists(_SETTINGS_FILE):
show_help_dlg(
"<b>ERROR: No .sshmenu file found in home directory</b>\n\n%s" % _ABOUT_TXT, error=True
)
sys.exit(1)
app_list = get_sshplusconfig()
menu = Gtk.Menu()
menus = [menu]
for app in app_list:
if app == "sep":
add_separator(menus[-1])
elif app["name"] == "FOLDER" and not app["cmd"]:
if len(menus) > 1:
menus.pop()
elif app["name"] == "FOLDER":
menu_item = add_menu_item(menus[-1], app["cmd"], "folder")
menus.append(Gtk.Menu())
menu_item.set_submenu(menus[-1])
elif app["name"] == "LABEL":
add_menu_item(menus[-1], app["cmd"], None)
else:
add_menu_item(menus[-1], app["name"], app)
# Add SSHplus options folder to the end of the Menu
add_separator(menu)
menu_item = add_menu_item(menus[-1], "SSHplus Options", "folder")
menus.append(Gtk.Menu())
menu_item.set_submenu(menus[-1])
add_menu_item(menus[-1], "Options", None)
add_menu_item(menus[-1], "Edit", "_edit")
add_menu_item(menus[-1], "Refresh", "_refresh")
add_menu_item(menus[-1], "About", "_about")
add_separator(menus[-1])
add_menu_item(menus[-1], "Quit", "_quit")
menus.pop()
return menu
if __name__ == "__main__":
ind = AppIndicator3.Indicator.new(
"SSHplus", "utilities-terminal", AppIndicator3.IndicatorCategory.APPLICATION_STATUS
)
ind.set_label("Launch", "none")
ind.set_status(AppIndicator3.IndicatorStatus.ACTIVE)
if not os.path.exists(_SETTINGS_FILE):
edit_config_file()
appmenu = build_menu()
ind.set_menu(appmenu)
Gtk.main()
|
from __future__ import unicode_literals
import django.contrib.postgres.fields.jsonb
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Company',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=255)),
('source', django.contrib.postgres.fields.jsonb.JSONField(default={})),
('financial_data', django.contrib.postgres.fields.jsonb.JSONField(default={})),
('stock_data', django.contrib.postgres.fields.jsonb.JSONField(default={})),
],
),
migrations.CreateModel(
name='Exchange',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=255)),
('source', django.contrib.postgres.fields.jsonb.JSONField(default={})),
],
),
migrations.CreateModel(
name='News',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=255)),
('source', models.URLField(max_length=255)),
('source_title', models.CharField(max_length=255)),
('companies', models.ManyToManyField(related_name='news', to='intellifin.Company')),
],
),
migrations.AddField(
model_name='company',
name='exchange',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='companies', to='intellifin.Exchange'),
),
]
|
from threading import Thread
from time import sleep
import random
from adapt.intent import IntentBuilder
from mycroft.messagebus.message import Message
from mycroft.skills.LILACS_core.question_parser import LILACSQuestionParser
from mycroft.skills.LILACS_knowledge.knowledgeservice import KnowledgeService
from mycroft.skills.core import MycroftSkill
from mycroft.util.log import getLogger
__author__ = 'jarbas'
logger = getLogger(__name__)
class LILACSChatbotSkill(MycroftSkill):
# https://github.com/ElliotTheRobot/LILACS-mycroft-core/issues/19
def __init__(self):
super(LILACSChatbotSkill, self).__init__(name="ChatbotSkill")
# initialize your variables
self.reload_skill = False
self.active = True
self.parser = None
self.service = None
self.TIMEOUT = 2
def initialize(self):
# register intents
self.parser = LILACSQuestionParser()
self.service = KnowledgeService(self.emitter)
self.build_intents()
# make thread to keep active
self.make_bump_thread()
def ping(self):
while True:
i = 0
if self.active:
self.emitter.emit(Message("recognizer_loop:utterance", {"source": "LILACS_chatbot_skill",
"utterances": [
"bump chat to active skill list"]}))
while i < 60 * self.TIMEOUT:
i += 1
sleep(1)
i = 0
def make_bump_thread(self):
timer_thread = Thread(target=self.ping)
timer_thread.setDaemon(True)
timer_thread.start()
def build_intents(self):
# build intents
deactivate_intent = IntentBuilder("DeactivateChatbotIntent") \
.require("deactivateChatBotKeyword").build()
activate_intent=IntentBuilder("ActivateChatbotIntent") \
.require("activateChatBotKeyword").build()
bump_intent = IntentBuilder("BumpChatBotSkillIntent"). \
require("bumpChatBotKeyword").build()
# register intents
self.register_intent(deactivate_intent, self.handle_deactivate_intent)
self.register_intent(activate_intent, self.handle_activate_intent)
self.register_intent(bump_intent, self.handle_set_on_top_active_list())
def handle_set_on_top_active_list(self):
# dummy intent just to bump curiosity skill to top of active skill list
# called on a timer in order to always use converse method
pass
def handle_deactivate_intent(self, message):
self.active = False
self.speak_dialog("chatbot_off")
def handle_activate_intent(self, message):
self.active = True
self.speak_dialog("chatbot_on")
def stop(self):
self.handle_deactivate_intent("global stop")
def converse(self, transcript, lang="en-us"):
# parse 1st utterance for entitys
if self.active and "bump chat" not in transcript[0] and "bump curiosity" not in transcript[0]:
nodes, parents, synonims = self.parser.tag_from_dbpedia(transcript[0])
self.log.info("nodes: " + str(nodes))
self.log.info("parents: " + str(parents))
self.log.info("synonims: " + str(synonims))
# get concept net , talk
possible_responses = []
for node in nodes:
try:
dict = self.service.adquire(node, "concept net")
usages = dict["concept net"]["surfaceText"]
for usage in usages:
possible_responses.append(usage.replace("[", "").replace("]", ""))
except:
self.log.info("could not get reply for node " + node)
try:
# say something random
reply = random.choice(possible_responses)
self.speak(reply)
return True
except:
self.log.error("Could not get chatbot response for: " + transcript[0])
# dont know what to say
# TODO ask user a question and play du,mb
return False
# tell intent skill you did not handle intent
return False
def create_skill():
return LILACSChatbotSkill()
|
from __future__ import unicode_literals
from __future__ import print_function
from builtins import str as text
import traceback
import subprocess
import wx
import wx.lib.filebrowsebutton
from ooffice import *
class DocumentDialog(wx.Dialog):
def __init__(self, parent, modifications):
self.modifications = modifications
self.document_generated = False
# Instead of calling wx.Dialog.__init__ we precreate the dialog
# so we can set an extra style that must be set before
# creation, and then we create the GUI object using the Create
# method.
pre = wx.PreDialog()
pre.SetExtraStyle(wx.DIALOG_EX_CONTEXTHELP)
pre.Create(parent, -1, "Génération de document")
# This next step is the most important, it turns this Python
# object into the real wrapper of the dialog (instead of pre)
# as far as the wxPython extension is concerned.
self.PostCreate(pre)
self.sizer = wx.BoxSizer(wx.VERTICAL)
sizer = wx.BoxSizer(wx.HORIZONTAL)
sizer.Add(wx.StaticText(self, -1, "Format :"), 0, wx.ALIGN_CENTER_VERTICAL | wx.RIGHT, 5)
if not IsOODocument(modifications.template):
self.format = wx.Choice(self, -1, choices=["Texte"])
elif sys.platform == 'win32':
self.format = wx.Choice(self, -1, choices=["LibreOffice", "PDF"])
else:
self.format = wx.Choice(self, -1, choices=["LibreOffice"])
self.format.SetSelection(0)
self.Bind(wx.EVT_CHOICE, self.onFormat, self.format)
sizer.Add(self.format, wx.ALIGN_CENTER_VERTICAL | wx.RIGHT, 5)
default_output = normalize_filename(modifications.default_output)
self.extension = os.path.splitext(default_output)[-1]
wildcard = "OpenDocument (*%s)|*%s|PDF files (*.pdf)|*.pdf" % (self.extension, self.extension)
self.fbb = wx.lib.filebrowsebutton.FileBrowseButton(self, -1,
size=(600, -1),
labelText="Nom de fichier :",
startDirectory=config.documents_directory,
initialValue=os.path.join(config.documents_directory, default_output),
fileMask=wildcard,
fileMode=wx.SAVE)
sizer.Add(self.fbb, 0, wx.GROW | wx.ALIGN_CENTER_VERTICAL | wx.LEFT, 5)
self.sizer.Add(sizer, 0, wx.GROW | wx.ALIGN_CENTER_VERTICAL | wx.ALL, 5)
self.gauge = wx.Gauge(self, -1, size=(-1, 10))
self.gauge.SetRange(100)
self.sizer.Add(self.gauge, 0, wx.GROW | wx.ALIGN_CENTER_VERTICAL | wx.RIGHT | wx.LEFT | wx.TOP, 5)
line = wx.StaticLine(self, -1, size=(20, -1), style=wx.LI_HORIZONTAL)
self.sizer.Add(line, 0, wx.GROW | wx.ALIGN_CENTER_VERTICAL | wx.TOP | wx.BOTTOM, 5)
sizer = wx.BoxSizer(wx.HORIZONTAL)
self.sauver_ouvrir = wx.Button(self, -1, "Sauver et ouvrir")
self.sauver_ouvrir.SetDefault()
self.Bind(wx.EVT_BUTTON, self.OnSauverOuvrir, self.sauver_ouvrir)
sizer.Add(self.sauver_ouvrir, 0, wx.LEFT | wx.RIGHT, 5)
self.sauver = wx.Button(self, -1, "Sauver")
self.Bind(wx.EVT_BUTTON, self.OnSauver, self.sauver)
sizer.Add(self.sauver, 0, wx.RIGHT, 5)
if modifications.multi:
button = wx.Button(self, -1, "Sauver individuellement")
self.Bind(wx.EVT_BUTTON, self.OnSauverUnitaire, button)
sizer.Add(button, 0, wx.RIGHT, 5)
if modifications.email:
self.sauver_envoyer = wx.Button(self, -1, "Sauver et envoyer par email")
self.Bind(wx.EVT_BUTTON, self.OnSauverEnvoyer, self.sauver_envoyer)
sizer.Add(self.sauver_envoyer, 0, wx.RIGHT, 5)
if modifications.multi is False and not modifications.email_to:
self.sauver_envoyer.Disable()
if database.creche.caf_email:
self.sauver_envoyer = wx.Button(self, -1, "Sauver et envoyer par email à la CAF")
self.Bind(wx.EVT_BUTTON, self.OnSauverEnvoyerCAF, self.sauver_envoyer)
sizer.Add(self.sauver_envoyer, 0, wx.LEFT | wx.RIGHT, 5)
# btnsizer.Add(self.ok)
btn = wx.Button(self, wx.ID_CANCEL)
sizer.Add(btn, 0, wx.RIGHT, 5)
self.sizer.Add(sizer, 0, wx.ALIGN_CENTER_VERTICAL | wx.ALL, 5)
self.SetSizer(self.sizer)
self.sizer.Fit(self)
self.CenterOnScreen()
def onFormat(self, _):
filename = os.path.splitext(self.fbb.GetValue())[0]
if self.format.GetSelection() == 0:
self.fbb.SetValue(filename + self.extension, None)
else:
self.fbb.SetValue(filename + ".pdf", None)
def Sauver(self):
self.fbb.Disable()
self.sauver.Disable()
if self.sauver_ouvrir:
self.sauver_ouvrir.Disable()
self.filename = self.fbb.GetValue()
f, e = os.path.splitext(self.filename)
if e == ".pdf":
self.pdf = True
self.oo_filename = f + self.extension
else:
self.pdf = False
self.oo_filename = self.filename
config.documents_directory = os.path.dirname(self.filename)
dlg = None
try:
if self.modifications.multi is not False:
errors = {}
simple_modifications = self.modifications.get_simple_modifications(self.oo_filename)
for i, (filename, modifs) in enumerate(simple_modifications):
self.gauge.SetValue((100 * i) / len(simple_modifications))
errors.update(GenerateDocument(modifs, filename=filename))
if self.pdf:
f, e = os.path.splitext(filename)
convert_to_pdf(filename, f + ".pdf")
os.remove(filename)
else:
self.filename = self.filename.replace(" <prenom> <nom>", "")
self.oo_filename = self.oo_filename.replace(" <prenom> <nom>", "")
errors = GenerateDocument(self.modifications, filename=self.oo_filename, gauge=self.gauge)
if self.pdf:
convert_to_pdf(self.oo_filename, self.filename)
os.remove(self.oo_filename)
self.document_generated = True
if errors:
message = "Document %s généré avec des erreurs :\n" % self.filename
for label in errors.keys():
message += '\n' + label + ' :\n '
message += '\n '.join(errors[label])
dlg = wx.MessageDialog(self, message, 'Message', wx.OK | wx.ICON_WARNING)
except IOError:
print(sys.exc_info())
dlg = wx.MessageDialog(self, "Impossible de sauver le document. Peut-être est-il déjà ouvert ?", 'Erreur',
wx.OK | wx.ICON_WARNING)
dlg.ShowModal()
dlg.Destroy()
return
except Exception as e:
info = sys.exc_info()
message = ' [type: %s value: %s traceback: %s]' % (info[0], info[1], traceback.extract_tb(info[2]))
dlg = wx.MessageDialog(self, message, 'Erreur', wx.OK | wx.ICON_WARNING)
if dlg:
dlg.ShowModal()
dlg.Destroy()
self.EndModal(wx.ID_OK)
def OnSauver(self, _):
self.modifications.multi = False
self.Sauver()
def OnSauverOuvrir(self, event):
self.OnSauver(event)
if self.document_generated:
if self.filename.endswith(".pdf"):
StartAcrobatReader(self.filename)
else:
StartLibreOffice(self.filename)
def OnSauverUnitaire(self, _):
self.Sauver()
def OnSauverEnvoyer(self, event):
self.OnSauverUnitaire(event)
if self.document_generated:
if self.modifications.multi is not False:
simple_modifications = self.modifications.get_simple_modifications(self.oo_filename)
emails = '\n'.join(
[" - %s (%s)" % (modifs.email_subject, ", ".join(modifs.email_to)) for filename, modifs in
simple_modifications])
if len(emails) > 1000:
emails = emails[:1000] + "\n..."
dlg = wx.MessageDialog(self, "Ces emails seront envoyés :\n" + emails, 'Confirmation',
wx.OK | wx.CANCEL | wx.ICON_WARNING)
response = dlg.ShowModal()
dlg.Destroy()
if response != wx.ID_OK:
return
for filename, modifs in simple_modifications:
if self.pdf:
oo_filename = filename
filename, e = os.path.splitext(oo_filename)
filename += ".pdf"
try:
SendDocument(filename, modifs)
except Exception as e:
dlg = wx.MessageDialog(self, "Impossible d'envoyer le document %s\n%r" % (filename, e),
'Erreur', wx.OK | wx.ICON_WARNING)
dlg.ShowModal()
dlg.Destroy()
else:
try:
SendDocument(self.filename, self.modifications)
except Exception as e:
dlg = wx.MessageDialog(self, "Impossible d'envoyer le document %s\n%r" % (self.filename, e),
'Erreur', wx.OK | wx.ICON_WARNING)
dlg.ShowModal()
dlg.Destroy()
def OnSauverEnvoyerCAF(self, event):
self.OnSauver(event)
if self.document_generated:
try:
root, ext = os.path.splitext(self.modifications.introduction_filename)
introduction_filename = root + " CAF" + ext
SendDocument(self.filename, self.modifications, to=[database.creche.caf_email], introduction_filename=GetTemplateFile(introduction_filename))
except Exception as e:
dlg = wx.MessageDialog(self, "Impossible d'envoyer le document %s\n%r" % (self.filename, e), "Erreur", wx.OK | wx.ICON_WARNING)
dlg.ShowModal()
dlg.Destroy()
def StartLibreOffice(filename):
if sys.platform == 'win32':
filename = "".join(["file:", urllib.pathname2url(os.path.abspath(filename.encode("utf-8")))])
# print filename
try:
StarDesktop, objServiceManager, core_reflection = getOOoContext()
StarDesktop.LoadComponentFromURL(filename, "_blank", 0, MakePropertyValues(objServiceManager, [
["ReadOnly", False],
["Hidden", False]]))
except Exception as e:
print("Exception ouverture LibreOffice", e)
dlg = wx.MessageDialog(None, "Impossible d'ouvrir le document\n%r" % e, "Erreur", wx.OK|wx.ICON_WARNING)
dlg.ShowModal()
dlg.Destroy()
else:
paths = []
if sys.platform == "darwin":
paths.append("/Applications/LibreOffice.app/Contents/MacOS/soffice")
paths.append("/Applications/OpenOffice.app/Contents/MacOS/soffice")
else:
paths.append("/usr/bin/libreoffice")
paths.append("ooffice")
for path in paths:
try:
print(path, filename)
subprocess.Popen([path, filename])
return
except Exception as e:
print(e)
pass
dlg = wx.MessageDialog(None, "Impossible de lancer OpenOffice / LibreOffice", 'Erreur', wx.OK|wx.ICON_WARNING)
dlg.ShowModal()
dlg.Destroy()
DDE_ACROBAT_STRINGS = ["AcroviewR15", "AcroviewA15", "AcroviewR12", "AcroviewA12", "AcroviewR11", "AcroviewA11",
"AcroviewR10", "AcroviewA10", "acroview"]
dde_server = None
def StartAcrobatReader(filename):
global dde_server
import win32api
import win32ui
import dde
filename = str(os.path.abspath(filename))
path, name = os.path.split(filename)
reader = win32api.FindExecutable(name, path)
os.spawnl(os.P_NOWAIT, reader[1], " ")
for t in range(10):
time.sleep(1)
for acrobat in DDE_ACROBAT_STRINGS:
try:
if not dde_server:
dde_server = dde.CreateServer()
dde_server.Create('Gertrude')
c = dde.CreateConversation(dde_server)
c.ConnectTo(acrobat, 'control')
c.Exec('[DocOpen("%s")]' % (filename,))
return
except Exception as e:
pass
print("Impossible de lancer acrobat reader ; prochain essai dans 1s ...", e)
dlg = wx.MessageDialog(None, "Impossible d'ouvrir le document", 'Erreur', wx.OK | wx.ICON_WARNING)
dlg.ShowModal()
dlg.Destroy()
|
from ..rerequest import TemplateRequest
init_req = TemplateRequest(
re = r'(http://)?(www\.)?(?P<domain>ur(play)?)\.se/(?P<req_url>.+)',
encode_vars = lambda v: { 'req_url': 'http://%(domain)s.se/%(req_url)s' % v } )
hls = { 'title': 'UR-play', 'url': 'http://urplay.se/', 'feed_url': 'http://urplay.se/rss',
'items': [init_req,
TemplateRequest(
re = r'file_html5":\s?"(?P<final_url>[^"]+)".*?"subtitles":\s?"(?P<subtitles>[^",]*)',
encode_vars = lambda v: { 'final_url': ('http://130.242.59.75/%(final_url)s/playlist.m3u8' % v).replace('\\', ''),
'suffix-hint': 'mp4',
'subtitles': v.get('subtitles', '').replace('\\', '') % v } )] }
rtmp = { 'items': [init_req,
TemplateRequest(
re = r'file_flash":\s?"(?P<final_url>[^"]+\.(?P<ext>mp[34]))".*?"subtitles":\s?"(?P<subtitles>[^",]*)',
encode_vars = lambda v: { 'final_url': ('rtmp://130.242.59.75/ondemand playpath=%(ext)s:/%(final_url)s app=ondemand' % v).replace('\\', ''),
'suffix-hint': 'flv',
'rtmpdump-realtime': True,
'subtitles': v.get('subtitles', '').replace('\\', '') % v } )] }
services = [hls, rtmp]
|
class Object:
def __init__(self, name):
self._name = name
@property
def name(self):
return self._name
|
from planarprocess import *
from gds_helpers import *
from itertools import cycle
xmin, xmax = -5, 5
layers = gds_cross_section('mypmos.gds', [(0,xmin), (0, xmax)], 'gdsmap.map')
['P-Active-Well', 'Active-Cut', 'N-Well', 'Metal-2', 'Metal-1', 'P-Select',
'N-Select', 'Transistor-Poly', 'Via1']
wafer = Wafer(1., 5., 0, xmax - xmin)
nw = layers['N-Well']
wafer.implant(.7, nw, outdiffusion=5., label='N-Well')
de = layers['P-Active-Well']
fox = wafer.grow(.5, wafer.blank_mask().difference(de),
y_offset=-.2, outdiffusion=.1)
gox = wafer.grow(.05, de, outdiffusion=.05, base=wafer.wells,
label='Gate oxide')
gp = layers['Transistor-Poly']
poly = wafer.grow(.25, gp, outdiffusion=.25, label='Gate poly')
np = layers['N-Select'].intersection(
layers['P-Active-Well']).difference(gp)
nplus = wafer.implant(.1, np, outdiffusion=.1, target=wafer.wells, source=gox,
label='N+')
pp = layers['P-Select'].intersection(
layers['P-Active-Well']).difference(gp)
pplus = wafer.implant(.1, pp, outdiffusion=.1, target=wafer.wells, source=gox,
label='P+')
mld_thickness = .5
mld = wafer.grow(mld_thickness, wafer.blank_mask(), outdiffusion=.1)
ct = layers['Active-Cut']
contact = wafer.grow(-mld_thickness*1.1, ct, consuming=[mld, gox], base=wafer.air,
outdiffusion=.05, outdiffusion_vertices=3)
m1 = layers['Metal-1']
metal1 = wafer.grow(.6, m1, outdiffusion=.1, label='Metal-1')
ild_thickness = 1.2
ild1 = wafer.grow(ild_thickness, wafer.blank_mask(), outdiffusion=.1)
wafer.planarize()
v1 = layers['Via1']
via1 = wafer.grow(-ild_thickness*1.1, v1, consuming=[ild1], base=wafer.air,
outdiffusion=.05, outdiffusion_vertices=3)
m2 = layers['Metal-2']
metal2 = wafer.grow(1., m2, outdiffusion=.1, label='Metal-2')
custom_style = {s: {} for s in wafer.solids}
for solid, color in {
fox: '.4', gox: 'r', poly: 'g', mld: 'k',
ild1: '.3', contact: '.5', via1: '.5',
metal1: '.7', metal2: '.8'}.items():
custom_style[solid].update(dict(facecolor=color, edgecolor='k'))
for solid in wafer.solids:
if solid not in wafer.wells:
custom_style[solid].update(dict(hatch=None, fill=True))
base_hatches = r'\/' # r'/\|-+xoO.*'
hatches = cycle(list(base_hatches) + [h1+h2 for h1 in base_hatches
for h2 in base_hatches])
colors = cycle('krgbcmy')
plot_geometryref(wafer.air, hatch='.', fill=False, linewidth=0, color=(.9,.9,.9),
zorder=-100)
zorder = -99
for solid in wafer.solids:
style = dict(hatch=next(hatches), fill=False,
edgecolor=next(colors), zorder=zorder)
zorder += 1
style.update(custom_style.get(solid, {}))
plot_geometryref(solid, **style)
pyplot.legend()
pyplot.savefig('mypmos-x.png')
pyplot.show()
|
from setuptools import setup, find_packages
setup(
name = "CyprjToMakefile",
version = "0.1",
author = "Simon Marchi",
author_email = "simon.marchi@polymtl.ca",
description = "Generate Makefiles from Cypress cyprj files.",
license = "GPLv3",
url = "https://github.com/simark/cyprj-to-makefile",
packages = find_packages(),
install_requires = ['jinja2'],
package_data = {
'cyprj_to_makefile': ['Makefile.tpl'],
},
entry_points = {
'console_scripts': [
'cyprj-to-makefile = cyprj_to_makefile.cyprj_to_makefile:main',
],
},
)
|
from .submaker import Submaker
from inception.tools.signapk import SignApk
import shutil
import os
from inception.constants import InceptionConstants
class UpdatezipSubmaker(Submaker):
def make(self, updatePkgDir):
keys_name = self.getValue("keys")
signingKeys = self.getMaker().getConfig().getKeyConfig(keys_name) if keys_name else None
updateBinaryKey, updateBinary = self.getTargetBinary("update-binary")
assert updateBinary, "%s is not set" % updateBinaryKey
if keys_name:
assert signingKeys, "update.keys is '%s' but __config__.host.keys.%s is not set" % (keys_name, keys_name)
signingKeys = signingKeys["private"], signingKeys["public"]
shutil.copy(updateBinary, os.path.join(updatePkgDir, "META-INF/com/google/android/update-binary"))
updateZipPath = updatePkgDir + "/../"
updateZipPath += "update_unsigned" if signingKeys else "update"
shutil.make_archive(updateZipPath, "zip", updatePkgDir)
updateZipPath += ".zip"
if signingKeys:
javaKey, javaPath = self.getHostBinary("java")
signApkKey, signApkPath = self.getHostBinary("signapk")
assert signApkPath, "%s is not set" % signApkKey
assert os.path.exists(signApkPath), "'%s' from %s does not exist" % (signApkPath, signApkKey)
assert os.path.exists(javaPath), "'%s' from %s does not exist" % (javaPath, javaKey)
signApk = SignApk(javaPath, signApkPath)
targetPath = updatePkgDir + "/../" + InceptionConstants.OUT_NAME_UPDATE
signApk.sign(updateZipPath, targetPath, signingKeys[0], signingKeys[1])
updateZipPath = targetPath
return updateZipPath
|
PYS_SERVICE_MOD_PRE='pys_' # 模块名称的前缀
PYS_HEAD_LEN=12 # 报文头长度
PYS_MAX_BODY_LEN=10485760 # 最大报文长度
|
import hashlib, time
from django import forms
from django.contrib.auth.models import User, Group
from django.contrib.auth.forms import UserCreationForm, AuthenticationForm
from poetry.models import Poem, Theme
from user.models import Contributor
my_default_errors = {
'required': 'Еңгізуге міндетті параметр'
}
error_messages = {'required': 'Толтыруға маңызды параметр'}
class UserAuthenticateForm(forms.ModelForm):
email = forms.EmailField(required=True, error_messages=error_messages)
password = forms.CharField(
required=True,
label='Құпиясөз',
error_messages=error_messages,
widget=forms.PasswordInput)
class Meta:
model = User
fields = ('email', 'password')
labels = {
'email': 'Email',
'password': 'Құпиясөз',
}
class UserCreateForm(UserCreationForm):
email = forms.EmailField(required=True, error_messages=error_messages)
full_name = forms.CharField(required=True, label='Есіміңіз', error_messages=error_messages)
password1 = forms.CharField(required=True, label='Құпиясөз', widget=forms.PasswordInput,
error_messages=error_messages)
password2 = forms.CharField(required=True, label='Құпиясөзді қайталаңыз', widget=forms.PasswordInput,
error_messages=error_messages)
class Meta:
model = User
fields = ('full_name', 'email', 'password1', 'password2')
def save(self, commit=True):
user = super(UserCreateForm, self).save(commit=False)
user.email = self.cleaned_data["email"]
user.username = user.email
user.is_active = 0
hash = '%s%s' % (user.email, time.time())
if commit:
user.save()
user.contributor = Contributor(user_id=user, full_name=self.cleaned_data["full_name"],
activation_code=hashlib.md5(hash.encode('utf-8')).hexdigest())
user.contributor.save()
group = self.get_user_group()
user.groups.add(group)
else:
pass
return user
def get_user_group(self):
return Group.objects.get(name='site-users')
def clean_email(self):
email = self.cleaned_data.get('email')
user = User.objects.filter(email=email).first()
if user:
raise forms.ValidationError("Бұл email-мен колднушы тіркелген.")
return email
def clean_password2(self):
password1 = self.cleaned_data.get('password1')
password2 = self.cleaned_data.get('password2')
if not password2:
raise forms.ValidationError("Құпиясөзді растаңыз")
if password1 != password2:
raise forms.ValidationError("Құпиясөздер бір біріне сәйкес емес. Қайта теріңіз")
if len(password2) < 6:
raise forms.ValidationError('Кемінде 6 символ')
return super(UserCreateForm, self).clean_password2()
class UserEditForm(forms.ModelForm):
text_status = forms.CharField(
widget=forms.Textarea(attrs={'rows': 5, 'cols': 100}),
label='Сайттағы статусыңыз (250 символ)',
error_messages=error_messages)
class Meta:
model = Contributor
fields = ('full_name', 'text_status')
labels = {
'full_name': 'Есіміңіз',
'text_status': 'Сайттағы статусыңыз (250 символ)',
}
error_messages = {
'full_name': error_messages
}
class OfferPoemFrom(forms.ModelForm):
theme = forms.MultipleChoiceField(
label="Тақырып",
widget=forms.SelectMultiple,
error_messages=error_messages,
choices=Theme.objects.values_list('id', 'name').all()
)
class Meta:
model = Poem
fields = ('author', 'title', 'content', 'theme',)
labels = {
'author': 'Автор',
'title': 'Шығарма аты',
'content': 'Текст',
}
error_messages = {
'author': error_messages,
'title': error_messages,
'content': error_messages,
'theme': error_messages
}
|
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app', '0054_add_field_user_to_productionform'),
]
operations = [
migrations.AddField(
model_name='applicationform',
name='requires_development',
field=models.BooleanField(default=False, verbose_name='requires_development'),
),
]
|
__all__ = ["speedtest_exceptions", "speedtest"]
from . import sendtest
|
import os
import re
import sys
"""
* Perform initial configuration to ensure that the server is set up to work with Burton's format
sudo chown -R ubuntu:ubuntu /var/www
mkdir -p /var/www/default/public_html
mv /var/www/html/index.html /var/www/default/public_html # Ubuntu >=14.04
mv /var/www/index.html /var/www/default/public_html # Ubuntu <14.04
rm -rf /var/www/html
sudo vim /etc/apache2/sites-available/000-default.conf # Ubuntu >=14.04
sudo vim /etc/apache2/sites-available/default # Ubuntu <14.04
sudo a2enmod ssl
sudo service apache2 restart
* Enable / disable .htaccess for a site
* PHP configuration
"""
environment = ''
def main(env):
global environment
environment = env
while True:
print("\nConfigure Websites\n")
print("Please select an operation:")
print(" 1. Restart Apache")
print(" 2. Add a new website")
print(" 3. Add SSL to website")
print(" 0. Go Back")
print(" -. Exit")
operation = input(environment.prompt)
if operation == '0':
return True
elif operation == '-':
sys.exit()
elif operation == '1':
restart_apache()
elif operation == '2':
add_website()
elif operation == '3':
add_ssl()
else:
print("Invalid input.")
def restart_apache():
print("\nAttempting to restart Apache:")
# TODO: Print an error when the user does not have permissions to perform the action.
result = os.system("sudo service apache2 restart")
print(result)
return True
def add_website():
global environment
print('\nAdd website.\n')
input_file = open('./example-files/apache-site', 'r')
input_file_text = input_file.read()
input_file.close()
site_name = input('Website name (without www or http)' + environment.prompt)
new_filename = '/etc/apache2/sites-available/%s.conf' % (site_name,)
tmp_filename = '/tmp/%s.conf' % (site_name,)
# TODO: Check that site_name is legal for both a domain name and a filename.
while os.path.isfile(new_filename):
print('Site exists! Please choose another.')
site_name = input('Website name (without www or http)' + environment.prompt)
new_filename = '/etc/apache2/sites-available/%s.conf' % (site_name,)
tmp_filename = '/tmp/%s.conf' % (site_name,)
new_config = re.sub('SITE', site_name, input_file_text)
try:
output_file = open(tmp_filename, 'w')
output_file.write(new_config)
output_file.close()
tmp_move = os.system("sudo mv %s %s" % (tmp_filename, new_filename))
except PermissionError as e:
print('\n\nError!')
print('The current user does not have permission to perform this action.')
#print('Please run Burton with elevated permissions to resolve this error.\n\n')
if tmp_move != 0:
print('\n\nError!')
print('The current user does not have permission to perform this action.')
#print('Please run Burton with elevated permissions to resolve this error.\n\n')
current_user = str(os.getuid())
result = os.system('sudo mkdir -p /var/www/%s/public_html/' % (site_name,))
result = os.system('sudo mkdir -p /var/www/%s/logs/' % (site_name,))
result = os.system('sudo chown -R %s:%s /var/www/%s/' % (current_user, current_user,))
result = os.system('sudo a2ensite %s.conf' % (site_name,))
restart_apache()
return True
def add_ssl():
global environment
print("\nAdd SSL to website.\n")
print("Please enter the URL of the website.\n")
site_name = input(environment.prompt)
print("Is this a wildcard certificate? (y/N)\n")
wildcard = input(environment.prompt)
if wildcard.lower()=='y':
print("Generating wildcard cert for *.%s" % (site_name,))
wildcard = '*.'
else:
print("Generating cert for %s" % (site_name,))
wildcard = ''
# http://serverfault.com/questions/649990/non-interactive-creation-of-ssl-certificate-requests
#command_template = 'openssl req -new -newkey rsa:2048 -nodes -sha256 -keyout foobar.com.key -out foobar.com.csr -subj "/C=US/ST=New foobar/L=foobar/O=foobar foobar, Inc./CN=foobar.com/emailAddress=foobar@foobar.com"'
command_template = "openssl req -new -newkey rsa:2048 -nodes -sha256 -keyout %s.key -out %s.csr -subj \"/CN=%s%s\""
print(command_template % (site_name, site_name, wildcard, site_name))
return True
|
from django.conf.urls.defaults import *
urlpatterns = patterns('',
(r'^(\d+)/$', 'onpsx.gallery.views.index'),
(r'^$', 'onpsx.gallery.views.index'),
)
|
import io
import os
import six
import pytest
from pytest_pootle.factories import (
LanguageDBFactory, ProjectDBFactory, StoreDBFactory,
TranslationProjectFactory)
from pytest_pootle.utils import update_store
from translate.storage.factory import getclass
from django.db.models import Max
from django.core.exceptions import ValidationError
from django.core.files.uploadedfile import SimpleUploadedFile
from pootle.core.delegate import (
config, format_classes, format_diffs, formats)
from pootle.core.models import Revision
from pootle.core.delegate import deserializers, serializers
from pootle.core.url_helpers import to_tp_relative_path
from pootle.core.plugin import provider
from pootle.core.serializers import Serializer, Deserializer
from pootle_app.models import Directory
from pootle_config.exceptions import ConfigurationError
from pootle_format.exceptions import UnrecognizedFiletype
from pootle_format.formats.po import PoStoreSyncer
from pootle_format.models import Format
from pootle_language.models import Language
from pootle_project.models import Project
from pootle_statistics.models import (
SubmissionFields, SubmissionTypes)
from pootle_store.constants import (
NEW, OBSOLETE, PARSED, POOTLE_WINS, TRANSLATED)
from pootle_store.diff import DiffableStore, StoreDiff
from pootle_store.models import Store
from pootle_store.util import parse_pootle_revision
from pootle_translationproject.models import TranslationProject
def _update_from_upload_file(store, update_file,
content_type="text/x-gettext-translation",
user=None, submission_type=None):
with open(update_file, "r") as f:
upload = SimpleUploadedFile(os.path.basename(update_file),
f.read(),
content_type)
test_store = getclass(upload)(upload.read())
store_revision = parse_pootle_revision(test_store)
store.update(test_store, store_revision=store_revision,
user=user, submission_type=submission_type)
def _store_as_string(store):
ttk = store.syncer.convert(store.syncer.file_class)
if hasattr(ttk, "updateheader"):
# FIXME We need those headers on import
# However some formats just don't support setting metadata
ttk.updateheader(
add=True, X_Pootle_Path=store.pootle_path)
ttk.updateheader(
add=True, X_Pootle_Revision=store.get_max_unit_revision())
return str(ttk)
@pytest.mark.django_db
def test_delete_mark_obsolete(project0_nongnu, project0, store0):
"""Tests that the in-DB Store and Directory are marked as obsolete
after the on-disk file ceased to exist.
Refs. #269.
"""
tp = TranslationProjectFactory(
project=project0, language=LanguageDBFactory())
store = StoreDBFactory(
translation_project=tp,
parent=tp.directory)
store.update(store.deserialize(store0.serialize()))
store.sync()
pootle_path = store.pootle_path
# Remove on-disk file
os.remove(store.file.path)
# Update stores by rescanning TP
tp.scan_files()
# Now files that ceased to exist should be marked as obsolete
updated_store = Store.objects.get(pootle_path=pootle_path)
assert updated_store.obsolete
# The units they contained are obsolete too
assert not updated_store.units.exists()
assert updated_store.unit_set.filter(state=OBSOLETE).exists()
obs_unit = updated_store.unit_set.filter(state=OBSOLETE).first()
obs_unit.submission_set.count() == 0
@pytest.mark.django_db
def test_sync(project0_nongnu, project0, store0):
"""Tests that the new on-disk file is created after sync for existing
in-DB Store if the corresponding on-disk file ceased to exist.
"""
tp = TranslationProjectFactory(
project=project0, language=LanguageDBFactory())
store = StoreDBFactory(
translation_project=tp,
parent=tp.directory)
store.update(store.deserialize(store0.serialize()))
assert not store.file.exists()
store.sync()
assert store.file.exists()
os.remove(store.file.path)
assert not store.file.exists()
store.sync()
assert store.file.exists()
@pytest.mark.django_db
def test_update_from_ts(store0, test_fs, member):
store0.parsed = True
orig_units = store0.units.count()
existing_created_at = store0.units.aggregate(
Max("creation_time"))["creation_time__max"]
existing_mtime = store0.units.aggregate(
Max("mtime"))["mtime__max"]
old_revision = store0.data.max_unit_revision
with test_fs.open(['data', 'ts', 'tutorial', 'en', 'tutorial.ts']) as f:
store = getclass(f)(f.read())
store0.update(
store,
submission_type=SubmissionTypes.UPLOAD,
user=member)
assert not store0.units[orig_units].hasplural()
unit = store0.units[orig_units + 1]
assert unit.submission_set.count() == 0
assert unit.hasplural()
assert unit.creation_time >= existing_created_at
assert unit.creation_time >= existing_mtime
unit_source = unit.unit_source
assert unit_source.created_with == SubmissionTypes.UPLOAD
assert unit_source.created_by == member
assert unit.change.changed_with == SubmissionTypes.UPLOAD
assert unit.change.submitted_by == member
assert unit.change.submitted_on >= unit.creation_time
assert unit.change.reviewed_by is None
assert unit.change.reviewed_on is None
assert unit.revision > old_revision
@pytest.mark.django_db
def test_update_ts_plurals(store_po, test_fs, ts):
project = store_po.translation_project.project
filetype_tool = project.filetype_tool
project.filetypes.add(ts)
filetype_tool.set_store_filetype(store_po, ts)
with test_fs.open(['data', 'ts', 'add_plurals.ts']) as f:
file_store = getclass(f)(f.read())
store_po.update(file_store)
unit = store_po.units[0]
assert unit.hasplural()
assert unit.submission_set.count() == 0
with test_fs.open(['data', 'ts', 'update_plurals.ts']) as f:
file_store = getclass(f)(f.read())
store_po.update(file_store)
unit = store_po.units[0]
assert unit.hasplural()
assert unit.submission_set.count() == 1
update_sub = unit.submission_set.first()
assert update_sub.revision == unit.revision
assert update_sub.creation_time == unit.change.submitted_on
assert update_sub.submitter == unit.change.submitted_by
assert update_sub.new_value == unit.target
assert update_sub.type == unit.change.changed_with
assert update_sub.field == SubmissionFields.TARGET
# this fails 8(
# from pootle.core.utils.multistring import unparse_multistring
# assert (
# unparse_multistring(update_sub.new_value)
# == unparse_multistring(unit.target))
@pytest.mark.django_db
def test_update_with_non_ascii(store0, test_fs):
store0.state = PARSED
orig_units = store0.units.count()
path = 'data', 'po', 'tutorial', 'en', 'tutorial_non_ascii.po'
with test_fs.open(path) as f:
store = getclass(f)(f.read())
store0.update(store)
last_unit = store0.units[orig_units]
updated_target = "Hèḽḽě, ŵôrḽḓ"
assert last_unit.target == updated_target
assert last_unit.submission_set.count() == 0
# last_unit.target = "foo"
# last_unit.save()
# this should now have a submission with the old target
# but it fails
# assert last_unit.submission_set.count() == 1
# update_sub = last_unit.submission_set.first()
# assert update_sub.old_value == updated_target
# assert update_sub.new_value == "foo"
@pytest.mark.django_db
def test_update_unit_order(project0_nongnu, ordered_po, ordered_update_ttk):
"""Tests unit order after a specific update.
"""
# Set last sync revision
ordered_po.sync()
assert ordered_po.file.exists()
expected_unit_list = ['1->2', '2->4', '3->3', '4->5']
updated_unit_list = [unit.unitid for unit in ordered_po.units]
assert expected_unit_list == updated_unit_list
original_revision = ordered_po.get_max_unit_revision()
ordered_po.update(
ordered_update_ttk,
store_revision=original_revision)
expected_unit_list = [
'X->1', '1->2', '3->3', '2->4',
'4->5', 'X->6', 'X->7', 'X->8']
updated_unit_list = [unit.unitid for unit in ordered_po.units]
assert expected_unit_list == updated_unit_list
unit = ordered_po.units.first()
assert unit.revision > original_revision
assert unit.submission_set.count() == 0
@pytest.mark.django_db
def test_update_save_changed_units(project0_nongnu, store0, member, system):
"""Tests that any update saves changed units only.
"""
# not sure if this is testing anything
store = store0
# Set last sync revision
store.sync()
store.update(store.file.store)
unit_list = list(store.units)
store.file = 'tutorial/ru/update_save_changed_units_updated.po'
store.update(store.file.store, user=member)
updated_unit_list = list(store.units)
# nothing changed
for index in range(0, len(unit_list)):
unit = unit_list[index]
updated_unit = updated_unit_list[index]
assert unit.revision == updated_unit.revision
assert unit.mtime == updated_unit.mtime
assert unit.target == updated_unit.target
@pytest.mark.django_db
def test_update_set_last_sync_revision(project0_nongnu, tp0, store0, test_fs):
"""Tests setting last_sync_revision after store creation.
"""
unit = store0.units.first()
unit.target = "UPDATED TARGET"
unit.save()
store0.sync()
# Store is already parsed and store.last_sync_revision should be equal to
# max unit revision
assert store0.last_sync_revision == store0.get_max_unit_revision()
# store.last_sync_revision is not changed after empty update
saved_last_sync_revision = store0.last_sync_revision
store0.updater.update_from_disk()
assert store0.last_sync_revision == saved_last_sync_revision
orig = str(store0)
update_file = test_fs.open(
"data/po/tutorial/ru/update_set_last_sync_revision_updated.po",
"r")
with update_file as sourcef:
with open(store0.file.path, "wb") as targetf:
targetf.write(sourcef.read())
store0 = Store.objects.get(pk=store0.pk)
# any non-empty update sets last_sync_revision to next global revision
next_revision = Revision.get() + 1
store0.updater.update_from_disk()
assert store0.last_sync_revision == next_revision
# store.last_sync_revision is not changed after empty update (even if it
# has unsynced units)
item_index = 0
next_unit_revision = Revision.get() + 1
dbunit = store0.units.first()
dbunit.target = "ANOTHER DB TARGET UPDATE"
dbunit.save()
assert dbunit.revision == next_unit_revision
store0.updater.update_from_disk()
assert store0.last_sync_revision == next_revision
# Non-empty update sets store.last_sync_revision to next global revision
# (even the store has unsynced units). There is only one unsynced unit in
# this case so its revision should be set next to store.last_sync_revision
next_revision = Revision.get() + 1
with open(store0.file.path, "wb") as targetf:
targetf.write(orig)
store0 = Store.objects.get(pk=store0.pk)
store0.updater.update_from_disk()
assert store0.last_sync_revision == next_revision
# Get unsynced unit in DB. Its revision should be greater
# than store.last_sync_revision to allow to keep this change during
# update from a file
dbunit = store0.units[item_index]
assert dbunit.revision == store0.last_sync_revision + 1
@pytest.mark.django_db
def test_update_upload_defaults(store0, system):
store0.state = PARSED
unit = store0.units.first()
original_revision = unit.revision
last_sub_pk = unit.submission_set.order_by(
"id").values_list("id", flat=True).last() or 0
update_store(
store0,
[(unit.source, "%s UPDATED" % unit.source, False)],
store_revision=Revision.get() + 1)
unit = store0.units[0]
assert unit.change.submitted_by == system
assert unit.change.submitted_on >= unit.creation_time
assert unit.change.submitted_by == system
assert (
unit.submission_set.last().type
== SubmissionTypes.SYSTEM)
assert unit.revision > original_revision
new_subs = unit.submission_set.filter(id__gt=last_sub_pk).order_by("id")
# there should be 2 new subs - state_change and target_change
new_subs = unit.submission_set.filter(id__gt=last_sub_pk).order_by("id")
assert new_subs.count() == 2
target_sub = new_subs[0]
assert target_sub.old_value == ""
assert target_sub.new_value == unit.target
assert target_sub.field == SubmissionFields.TARGET
assert target_sub.type == SubmissionTypes.SYSTEM
assert target_sub.submitter == system
assert target_sub.revision == unit.revision
assert target_sub.creation_time == unit.change.submitted_on
state_sub = new_subs[1]
assert state_sub.old_value == "0"
assert state_sub.new_value == "200"
assert state_sub.field == SubmissionFields.STATE
assert state_sub.type == SubmissionTypes.SYSTEM
assert state_sub.submitter == system
assert state_sub.revision == unit.revision
assert state_sub.creation_time == unit.change.submitted_on
@pytest.mark.django_db
def test_update_upload_member_user(store0, system, member):
store0.state = PARSED
original_unit = store0.units.first()
original_revision = original_unit.revision
last_sub_pk = original_unit.submission_set.order_by(
"id").values_list("id", flat=True).last() or 0
update_store(
store0,
[(original_unit.source, "%s UPDATED" % original_unit.source, False)],
user=member,
store_revision=Revision.get() + 1,
submission_type=SubmissionTypes.UPLOAD)
unit = store0.units[0]
assert unit.change.submitted_by == member
assert unit.change.changed_with == SubmissionTypes.UPLOAD
assert unit.change.submitted_on >= unit.creation_time
assert unit.change.reviewed_on is None
assert unit.revision > original_revision
unit_source = unit.unit_source
unit_source.created_by == system
unit_source.created_with == SubmissionTypes.SYSTEM
# there should be 2 new subs - state_change and target_change
new_subs = unit.submission_set.filter(id__gt=last_sub_pk).order_by("id")
assert new_subs.count() == 2
target_sub = new_subs[0]
assert target_sub.old_value == ""
assert target_sub.new_value == unit.target
assert target_sub.field == SubmissionFields.TARGET
assert target_sub.type == SubmissionTypes.UPLOAD
assert target_sub.submitter == member
assert target_sub.revision == unit.revision
assert target_sub.creation_time == unit.change.submitted_on
state_sub = new_subs[1]
assert state_sub.old_value == "0"
assert state_sub.new_value == "200"
assert state_sub.field == SubmissionFields.STATE
assert state_sub.type == SubmissionTypes.UPLOAD
assert state_sub.submitter == member
assert state_sub.revision == unit.revision
assert state_sub.creation_time == unit.change.submitted_on
@pytest.mark.django_db
def test_update_upload_submission_type(store0):
store0.state = PARSED
unit = store0.units.first()
last_sub_pk = unit.submission_set.order_by(
"id").values_list("id", flat=True).last() or 0
update_store(
store0,
[(unit.source, "%s UPDATED" % unit.source, False)],
submission_type=SubmissionTypes.UPLOAD,
store_revision=Revision.get() + 1)
unit_source = store0.units[0].unit_source
unit_change = store0.units[0].change
assert unit_source.created_with == SubmissionTypes.SYSTEM
assert unit_change.changed_with == SubmissionTypes.UPLOAD
# there should be 2 new subs - state_change and target_change
# and both should show as by UPLOAD
new_subs = unit.submission_set.filter(id__gt=last_sub_pk)
assert (
list(new_subs.values_list("type", flat=True))
== [SubmissionTypes.UPLOAD] * 2)
@pytest.mark.django_db
def test_update_upload_new_revision(store0, member):
original_revision = store0.data.max_unit_revision
old_unit = store0.units.first()
update_store(
store0,
[("Hello, world", "Hello, world UPDATED", False)],
submission_type=SubmissionTypes.UPLOAD,
store_revision=Revision.get() + 1,
user=member)
old_unit.refresh_from_db()
assert old_unit.state == OBSOLETE
assert len(store0.units) == 1
unit = store0.units[0]
unit_source = unit.unit_source
assert unit.revision > original_revision
assert unit_source.created_by == member
assert unit.change.submitted_by == member
assert unit.creation_time == unit.change.submitted_on
assert unit.change.reviewed_by is None
assert unit.change.reviewed_on is None
assert unit.target == "Hello, world UPDATED"
assert unit.submission_set.count() == 0
@pytest.mark.django_db
def test_update_upload_again_new_revision(store0, member, member2):
store = store0
assert store.state == NEW
original_unit = store0.units[0]
update_store(
store,
[("Hello, world", "Hello, world UPDATED", False)],
submission_type=SubmissionTypes.UPLOAD,
store_revision=Revision.get() + 1,
user=member)
original_unit.refresh_from_db()
assert original_unit.state == OBSOLETE
store = Store.objects.get(pk=store0.pk)
assert store.state == PARSED
created_unit = store.units[0]
assert created_unit.target == "Hello, world UPDATED"
assert created_unit.state == TRANSLATED
assert created_unit.submission_set.count() == 0
old_unit_revision = store.data.max_unit_revision
update_store(
store0,
[("Hello, world", "Hello, world UPDATED AGAIN", False)],
submission_type=SubmissionTypes.WEB,
user=member2,
store_revision=Revision.get() + 1)
assert created_unit.submission_set.count() == 1
update_sub = created_unit.submission_set.first()
store = Store.objects.get(pk=store0.pk)
assert store.state == PARSED
unit = store.units[0]
unit_source = unit.unit_source
assert unit.revision > old_unit_revision
assert unit.target == "Hello, world UPDATED AGAIN"
assert unit_source.created_by == member
assert unit_source.created_with == SubmissionTypes.UPLOAD
assert unit.change.submitted_by == member2
assert unit.change.submitted_on >= unit.creation_time
assert unit.change.reviewed_by is None
assert unit.change.reviewed_on is None
assert unit.change.changed_with == SubmissionTypes.WEB
assert update_sub.creation_time == unit.change.submitted_on
assert update_sub.type == unit.change.changed_with
assert update_sub.field == SubmissionFields.TARGET
assert update_sub.submitter == unit.change.submitted_by
assert update_sub.old_value == created_unit.target
assert update_sub.new_value == unit.target
assert update_sub.revision == unit.revision
@pytest.mark.django_db
def test_update_upload_old_revision_unit_conflict(store0, admin, member):
original_revision = Revision.get()
original_unit = store0.units[0]
update_store(
store0,
[("Hello, world", "Hello, world UPDATED", False)],
submission_type=SubmissionTypes.UPLOAD,
store_revision=original_revision + 1,
user=admin)
unit = store0.units[0]
unit_source = unit.unit_source
assert unit_source.created_by == admin
updated_revision = unit.revision
assert (
unit_source.created_with
== SubmissionTypes.UPLOAD)
assert unit.change.submitted_by == admin
assert (
unit.change.changed_with
== SubmissionTypes.UPLOAD)
last_submit_time = unit.change.submitted_on
assert last_submit_time >= unit.creation_time
# load update with expired revision and conflicting unit
update_store(
store0,
[("Hello, world", "Hello, world CONFLICT", False)],
submission_type=SubmissionTypes.WEB,
store_revision=original_revision,
user=member)
unit = store0.units[0]
assert unit.submission_set.count() == 0
unit_source = unit.unit_source
# unit target is not updated and revision remains the same
assert store0.units[0].target == "Hello, world UPDATED"
assert unit.revision == updated_revision
unit_source = original_unit.unit_source
unit_source.created_by == admin
assert unit_source.created_with == SubmissionTypes.SYSTEM
unit.change.changed_with == SubmissionTypes.UPLOAD
unit.change.submitted_by == admin
unit.change.submitted_on == last_submit_time
unit.change.reviewed_by is None
unit.change.reviewed_on is None
# but suggestion is added
suggestion = store0.units[0].get_suggestions()[0]
assert suggestion.target == "Hello, world CONFLICT"
assert suggestion.user == member
@pytest.mark.django_db
def test_update_upload_new_revision_new_unit(store0, member):
file_name = "pytest_pootle/data/po/tutorial/en/tutorial_update_new_unit.po"
store0.state = PARSED
old_unit_revision = store0.data.max_unit_revision
_update_from_upload_file(
store0,
file_name,
user=member,
submission_type=SubmissionTypes.WEB)
unit = store0.units.last()
unit_source = unit.unit_source
# the new unit has been added
assert unit.submission_set.count() == 0
assert unit.revision > old_unit_revision
assert unit.target == 'Goodbye, world'
assert unit_source.created_by == member
assert unit_source.created_with == SubmissionTypes.WEB
assert unit.change.submitted_by == member
assert unit.change.changed_with == SubmissionTypes.WEB
@pytest.mark.django_db
def test_update_upload_old_revision_new_unit(store0, member2):
store0.units.delete()
store0.state = PARSED
old_unit_revision = store0.data.max_unit_revision
# load initial update
_update_from_upload_file(
store0,
"pytest_pootle/data/po/tutorial/en/tutorial_update.po")
# load old revision with new unit
file_name = "pytest_pootle/data/po/tutorial/en/tutorial_update_old_unit.po"
_update_from_upload_file(
store0,
file_name,
user=member2,
submission_type=SubmissionTypes.WEB)
# the unit has been added because its not already obsoleted
assert store0.units.count() == 2
unit = store0.units.last()
unit_source = unit.unit_source
# the new unit has been added
assert unit.submission_set.count() == 0
assert unit.revision > old_unit_revision
assert unit.target == 'Goodbye, world'
assert unit_source.created_by == member2
assert unit_source.created_with == SubmissionTypes.WEB
assert unit.change.submitted_by == member2
assert unit.change.changed_with == SubmissionTypes.WEB
def _test_store_update_indexes(store, *test_args):
# make sure indexes are not fooed indexes only have to be unique
indexes = [x.index for x in store.units]
assert len(indexes) == len(set(indexes))
def _test_store_update_units_before(*test_args):
# test what has happened to the units that were present before the update
(store, units_update, store_revision, resolve_conflict,
units_before, member_, member2) = test_args
updates = {unit[0]: unit[1] for unit in units_update}
for unit, change in units_before:
updated_unit = store.unit_set.get(unitid=unit.unitid)
if unit.source not in updates:
# unit is not in update, target should be left unchanged
assert updated_unit.target == unit.target
assert updated_unit.change.submitted_by == change.submitted_by
# depending on unit/store_revision should be obsoleted
if unit.isobsolete() or store_revision >= unit.revision:
assert updated_unit.isobsolete()
else:
assert not updated_unit.isobsolete()
else:
# unit is in update
if store_revision >= unit.revision:
assert not updated_unit.isobsolete()
elif unit.isobsolete():
# the unit has been obsoleted since store_revision
assert updated_unit.isobsolete()
else:
assert not updated_unit.isobsolete()
if not updated_unit.isobsolete():
if store_revision >= unit.revision:
# file store wins outright
assert updated_unit.target == updates[unit.source]
if unit.target != updates[unit.source]:
# unit has changed, or was resurrected
assert updated_unit.change.submitted_by == member2
# damn mysql microsecond precision
if change.submitted_on.time().microsecond != 0:
assert (
updated_unit.change.submitted_on
!= change.submitted_on)
elif unit.isobsolete():
# unit has changed, or was resurrected
assert updated_unit.change.reviewed_by == member2
# damn mysql microsecond precision
if change.reviewed_on.time().microsecond != 0:
assert (
updated_unit.change.reviewed_on
!= change.reviewed_on)
else:
assert (
updated_unit.change.submitted_by
== change.submitted_by)
assert (
updated_unit.change.submitted_on
== change.submitted_on)
assert updated_unit.get_suggestions().count() == 0
else:
# conflict found
suggestion = updated_unit.get_suggestions()[0]
if resolve_conflict == POOTLE_WINS:
assert updated_unit.target == unit.target
assert (
updated_unit.change.submitted_by
== change.submitted_by)
assert suggestion.target == updates[unit.source]
assert suggestion.user == member2
else:
assert updated_unit.target == updates[unit.source]
assert updated_unit.change.submitted_by == member2
assert suggestion.target == unit.target
assert suggestion.user == change.submitted_by
def _test_store_update_ordering(*test_args):
(store, units_update, store_revision, resolve_conflict_,
units_before, member_, member2_) = test_args
updates = {unit[0]: unit[1] for unit in units_update}
old_units = {unit.source: unit for unit, change in units_before}
# test ordering
new_unit_list = []
for unit, change_ in units_before:
add_unit = (not unit.isobsolete()
and unit.source not in updates
and unit.revision > store_revision)
if add_unit:
new_unit_list.append(unit.source)
for source, target_, is_fuzzy_ in units_update:
if source in old_units:
old_unit = old_units[source]
should_add = (not old_unit.isobsolete()
or old_unit.revision <= store_revision)
if should_add:
new_unit_list.append(source)
else:
new_unit_list.append(source)
assert new_unit_list == [x.source for x in store.units]
def _test_store_update_units_now(*test_args):
(store, units_update, store_revision, resolve_conflict_,
units_before, member_, member2_) = test_args
# test that all the current units should be there
updates = {unit[0]: unit[1] for unit in units_update}
old_units = {unit.source: unit for unit, change in units_before}
for unit in store.units:
assert (
unit.source in updates
or (old_units[unit.source].revision > store_revision
and not old_units[unit.source].isobsolete()))
@pytest.mark.django_db
def test_store_update(param_update_store_test):
_test_store_update_indexes(*param_update_store_test)
_test_store_update_units_before(*param_update_store_test)
_test_store_update_units_now(*param_update_store_test)
_test_store_update_ordering(*param_update_store_test)
@pytest.mark.django_db
def test_store_file_diff(store_diff_tests):
diff, store, update_units, store_revision = store_diff_tests
assert diff.target_store == store
assert diff.source_revision == store_revision
assert (
update_units
== [(x.source, x.target, x.isfuzzy())
for x in diff.source_store.units[1:]]
== [(v['source'], v['target'], v['state'] == 50)
for v in diff.source_units.values()])
assert diff.active_target_units == [x.source for x in store.units]
assert diff.target_revision == store.get_max_unit_revision()
assert (
diff.target_units
== {unit["source_f"]: unit
for unit
in store.unit_set.values("source_f", "index", "target_f",
"state", "unitid", "id", "revision",
"developer_comment", "translator_comment",
"locations", "context")})
diff_diff = diff.diff()
if diff_diff is not None:
assert (
sorted(diff_diff.keys())
== ["add", "index", "obsolete", "update"])
# obsoleted units have no index - so just check they are all they match
obsoleted = (store.unit_set.filter(state=OBSOLETE)
.filter(revision__gt=store_revision)
.values_list("source_f", flat=True))
assert len(diff.obsoleted_target_units) == obsoleted.count()
assert all(x in diff.obsoleted_target_units for x in obsoleted)
assert (
diff.updated_target_units
== list(store.units.filter(revision__gt=store_revision)
.values_list("source_f", flat=True)))
@pytest.mark.django_db
def test_store_repr():
store = Store.objects.first()
assert str(store) == str(store.syncer.convert(store.syncer.file_class))
assert repr(store) == u"<Store: %s>" % store.pootle_path
@pytest.mark.django_db
def test_store_po_deserializer(test_fs, store_po):
with test_fs.open("data/po/complex.po") as test_file:
test_string = test_file.read()
ttk_po = getclass(test_file)(test_string)
store_po.update(store_po.deserialize(test_string))
assert len(ttk_po.units) - 1 == store_po.units.count()
@pytest.mark.django_db
def test_store_po_serializer(test_fs, store_po):
with test_fs.open("data/po/complex.po") as test_file:
test_string = test_file.read()
ttk_po = getclass(test_file)(test_string)
store_po.update(store_po.deserialize(test_string))
store_io = io.BytesIO(store_po.serialize())
store_ttk = getclass(store_io)(store_io.read())
assert len(store_ttk.units) == len(ttk_po.units)
@pytest.mark.django_db
def test_store_po_serializer_custom(test_fs, store_po):
class SerializerCheck(object):
original_data = None
context = None
checker = SerializerCheck()
class EGSerializer(Serializer):
@property
def output(self):
checker.original_data = self.original_data
checker.context = self.context
@provider(serializers, sender=Project)
def provide_serializers(**kwargs):
return dict(eg_serializer=EGSerializer)
with test_fs.open("data/po/complex.po") as test_file:
test_string = test_file.read()
# ttk_po = getclass(test_file)(test_string)
store_po.update(store_po.deserialize(test_string))
# add config to the project
project = store_po.translation_project.project
config.get(project.__class__, instance=project).set_config(
"pootle.core.serializers",
["eg_serializer"])
store_po.serialize()
assert checker.context == store_po
assert (
not isinstance(checker.original_data, six.text_type)
and isinstance(checker.original_data, str))
assert checker.original_data == _store_as_string(store_po)
@pytest.mark.django_db
def test_store_po_deserializer_custom(test_fs, store_po):
class DeserializerCheck(object):
original_data = None
context = None
checker = DeserializerCheck()
class EGDeserializer(Deserializer):
@property
def output(self):
checker.context = self.context
checker.original_data = self.original_data
return self.original_data
@provider(deserializers, sender=Project)
def provide_deserializers(**kwargs):
return dict(eg_deserializer=EGDeserializer)
with test_fs.open("data/po/complex.po") as test_file:
test_string = test_file.read()
# add config to the project
project = store_po.translation_project.project
config.get().set_config(
"pootle.core.deserializers",
["eg_deserializer"],
project)
store_po.deserialize(test_string)
assert checker.original_data == test_string
assert checker.context == store_po
@pytest.mark.django_db
def test_store_base_serializer(store_po):
original_data = "SOME DATA"
serializer = Serializer(store_po, original_data)
assert serializer.context == store_po
assert serializer.data == original_data
@pytest.mark.django_db
def test_store_base_deserializer(store_po):
original_data = "SOME DATA"
deserializer = Deserializer(store_po, original_data)
assert deserializer.context == store_po
assert deserializer.data == original_data
@pytest.mark.django_db
def test_store_set_bad_deserializers(store_po):
project = store_po.translation_project.project
with pytest.raises(ConfigurationError):
config.get(project.__class__, instance=project).set_config(
"pootle.core.deserializers",
["DESERIALIZER_DOES_NOT_EXIST"])
class EGDeserializer(object):
pass
@provider(deserializers)
def provide_deserializers(**kwargs):
return dict(eg_deserializer=EGDeserializer)
# must be list
with pytest.raises(ConfigurationError):
config.get(project.__class__, instance=project).set_config(
"pootle.core.deserializers",
"eg_deserializer")
with pytest.raises(ConfigurationError):
config.get(project.__class__, instance=project).set_config(
"pootle.core.deserializers",
dict(serializer="eg_deserializer"))
config.get(project.__class__, instance=project).set_config(
"pootle.core.deserializers",
["eg_deserializer"])
@pytest.mark.django_db
def test_store_set_bad_serializers(store_po):
project = store_po.translation_project.project
with pytest.raises(ConfigurationError):
config.get(project.__class__, instance=project).set_config(
"pootle.core.serializers",
["SERIALIZER_DOES_NOT_EXIST"])
class EGSerializer(Serializer):
pass
@provider(serializers)
def provide_serializers(**kwargs):
return dict(eg_serializer=EGSerializer)
# must be list
with pytest.raises(ConfigurationError):
config.get(project.__class__, instance=project).set_config(
"pootle.core.serializers",
"eg_serializer")
with pytest.raises(ConfigurationError):
config.get(project.__class__, instance=project).set_config(
"pootle.core.serializers",
dict(serializer="eg_serializer"))
config.get(project.__class__, instance=project).set_config(
"pootle.core.serializers",
["eg_serializer"])
@pytest.mark.django_db
def test_store_create_by_bad_path(project0):
# bad project name
with pytest.raises(Project.DoesNotExist):
Store.objects.create_by_path(
"/language0/does/not/exist.po")
# bad language code
with pytest.raises(Language.DoesNotExist):
Store.objects.create_by_path(
"/does/project0/not/exist.po")
# project and project code dont match
with pytest.raises(ValueError):
Store.objects.create_by_path(
"/language0/project1/store.po",
project=project0)
# bad store.ext
with pytest.raises(ValueError):
Store.objects.create_by_path(
"/language0/project0/store_by_path.foo")
# subdir doesnt exist
path = '/language0/project0/path/to/subdir.po'
with pytest.raises(Directory.DoesNotExist):
Store.objects.create_by_path(
path, create_directory=False)
path = '/%s/project0/notp.po' % LanguageDBFactory().code
with pytest.raises(TranslationProject.DoesNotExist):
Store.objects.create_by_path(
path, create_tp=False)
@pytest.mark.django_db
def test_store_create_by_path(po_directory):
# create in tp
path = '/language0/project0/path.po'
store = Store.objects.create_by_path(path)
assert store.pootle_path == path
# "create" in tp again - get existing store
store = Store.objects.create_by_path(path)
assert store.pootle_path == path
# create in existing subdir
path = '/language0/project0/subdir0/exists.po'
store = Store.objects.create_by_path(path)
assert store.pootle_path == path
# create in new subdir
path = '/language0/project0/path/to/subdir.po'
store = Store.objects.create_by_path(path)
assert store.pootle_path == path
@pytest.mark.django_db
def test_store_create_by_path_with_project(project0):
# create in tp with project
path = '/language0/project0/path2.po'
store = Store.objects.create_by_path(
path, project=project0)
assert store.pootle_path == path
# create in existing subdir with project
path = '/language0/project0/subdir0/exists2.po'
store = Store.objects.create_by_path(
path, project=project0)
assert store.pootle_path == path
# create in new subdir with project
path = '/language0/project0/path/to/subdir2.po'
store = Store.objects.create_by_path(
path, project=project0)
assert store.pootle_path == path
@pytest.mark.django_db
def test_store_create_by_new_tp_path(po_directory):
language = LanguageDBFactory()
path = '/%s/project0/tp.po' % language.code
store = Store.objects.create_by_path(path)
assert store.pootle_path == path
assert store.translation_project.language == language
language = LanguageDBFactory()
path = '/%s/project0/with/subdir/tp.po' % language.code
store = Store.objects.create_by_path(path)
assert store.pootle_path == path
assert store.translation_project.language == language
@pytest.mark.django_db
def test_store_create(tp0):
tp = tp0
project = tp.project
registry = formats.get()
po = Format.objects.get(name="po")
po2 = registry.register("special_po_2", "po")
po3 = registry.register("special_po_3", "po")
xliff = Format.objects.get(name="xliff")
project.filetypes.add(xliff)
project.filetypes.add(po2)
project.filetypes.add(po3)
store = Store.objects.create(
name="store.po",
parent=tp.directory,
translation_project=tp)
assert store.filetype == po
assert not store.is_template
store = Store.objects.create(
name="store.pot",
parent=tp.directory,
translation_project=tp)
# not in source_language folder
assert not store.is_template
assert store.filetype == po
store = Store.objects.create(
name="store.xliff",
parent=tp.directory,
translation_project=tp)
assert store.filetype == xliff
# push po to the back of the queue
project.filetypes.remove(po)
project.filetypes.add(po)
store = Store.objects.create(
name="another_store.po",
parent=tp.directory,
translation_project=tp)
assert store.filetype == po2
store = Store.objects.create(
name="another_store.pot",
parent=tp.directory,
translation_project=tp)
assert store.filetype == po
store = Store.objects.create(
name="another_store.xliff",
parent=tp.directory,
translation_project=tp)
with pytest.raises(UnrecognizedFiletype):
store = Store.objects.create(
name="another_store.foo",
parent=tp.directory,
translation_project=tp)
@pytest.mark.django_db
def test_store_create_name_with_slashes_or_backslashes(tp0):
"""Test Stores are not created with (back)slashes on their name."""
with pytest.raises(ValidationError):
Store.objects.create(name="slashed/name.po", parent=tp0.directory,
translation_project=tp0)
with pytest.raises(ValidationError):
Store.objects.create(name="backslashed\\name.po", parent=tp0.directory,
translation_project=tp0)
@pytest.mark.django_db
def test_store_get_file_class():
store = Store.objects.filter(
translation_project__project__code="project0",
translation_project__language__code="language0").first()
# this matches because po is recognised by ttk
assert store.syncer.file_class == getclass(store)
# file_class is cached so lets delete it
del store.syncer.__dict__["file_class"]
class CustomFormatClass(object):
pass
@provider(format_classes)
def format_class_provider(**kwargs):
return dict(po=CustomFormatClass)
# we get the CutomFormatClass as it was registered
assert store.syncer.file_class is CustomFormatClass
# the Store.filetype is used in this case not the name
store.name = "new_store_name.foo"
del store.syncer.__dict__["file_class"]
assert store.syncer.file_class is CustomFormatClass
# lets register a foo filetype
format_registry = formats.get()
foo_filetype = format_registry.register("foo", "foo")
store.filetype = foo_filetype
store.save()
# oh no! not recognised by ttk
del store.syncer.__dict__["file_class"]
with pytest.raises(ValueError):
store.syncer.file_class
@provider(format_classes)
def another_format_class_provider(**kwargs):
return dict(foo=CustomFormatClass)
# works now
assert store.syncer.file_class is CustomFormatClass
format_classes.disconnect(format_class_provider)
format_classes.disconnect(another_format_class_provider)
@pytest.mark.django_db
def test_store_get_template_file_class(po_directory, templates):
project = ProjectDBFactory(source_language=templates)
tp = TranslationProjectFactory(language=templates, project=project)
format_registry = formats.get()
foo_filetype = format_registry.register("foo", "foo", template_extension="bar")
tp.project.filetypes.add(foo_filetype)
store = Store.objects.create(
name="mystore.bar",
translation_project=tp,
parent=tp.directory)
# oh no! not recognised by ttk
with pytest.raises(ValueError):
store.syncer.file_class
class CustomFormatClass(object):
pass
@provider(format_classes)
def format_class_provider(**kwargs):
return dict(foo=CustomFormatClass)
assert store.syncer.file_class == CustomFormatClass
format_classes.disconnect(format_class_provider)
@pytest.mark.django_db
def test_store_create_templates(po_directory, templates):
project = ProjectDBFactory(source_language=templates)
tp = TranslationProjectFactory(language=templates, project=project)
po = Format.objects.get(name="po")
store = Store.objects.create(
name="mystore.pot",
translation_project=tp,
parent=tp.directory)
assert store.filetype == po
assert store.is_template
@pytest.mark.django_db
def test_store_get_or_create_templates(po_directory, templates):
project = ProjectDBFactory(source_language=templates)
tp = TranslationProjectFactory(language=templates, project=project)
po = Format.objects.get(name="po")
store = Store.objects.get_or_create(
name="mystore.pot",
translation_project=tp,
parent=tp.directory)[0]
assert store.filetype == po
assert store.is_template
@pytest.mark.django_db
def test_store_diff(diffable_stores):
target_store, source_store = diffable_stores
differ = StoreDiff(
target_store,
source_store,
target_store.get_max_unit_revision() + 1)
# no changes
assert not differ.diff()
assert differ.target_store == target_store
assert differ.source_store == source_store
@pytest.mark.django_db
def test_store_diff_delete_target_unit(diffable_stores):
target_store, source_store = diffable_stores
# delete a unit in the target store
remove_unit = target_store.units.first()
remove_unit.delete()
# the unit will always be re-added (as its not obsolete)
# with source_revision to the max
differ = StoreDiff(
target_store,
source_store,
target_store.get_max_unit_revision())
result = differ.diff()
assert result["add"][0][0].source_f == remove_unit.source_f
assert len(result["add"]) == 1
assert len(result["index"]) == 0
assert len(result["obsolete"]) == 0
assert result['update'] == (set(), {})
# and source_revision to 0
differ = StoreDiff(
target_store,
source_store,
0)
result = differ.diff()
assert result["add"][0][0].source_f == remove_unit.source_f
assert len(result["add"]) == 1
assert len(result["index"]) == 0
assert len(result["obsolete"]) == 0
assert result['update'] == (set(), {})
@pytest.mark.django_db
def test_store_diff_delete_source_unit(diffable_stores):
target_store, source_store = diffable_stores
# delete a unit in the source store
remove_unit = source_store.units.first()
remove_unit.delete()
# set the source_revision to max and the unit will be obsoleted
differ = StoreDiff(
target_store,
source_store,
target_store.get_max_unit_revision())
result = differ.diff()
to_remove = target_store.units.get(unitid=remove_unit.unitid)
assert result["obsolete"] == [to_remove.pk]
assert len(result["obsolete"]) == 1
assert len(result["add"]) == 0
assert len(result["index"]) == 0
# set the source_revision to less that than the target_stores' max_revision
# and the unit will be ignored, as its assumed to have been previously
# deleted
differ = StoreDiff(
target_store,
source_store,
target_store.get_max_unit_revision() - 1)
assert not differ.diff()
@pytest.mark.django_db
def test_store_diff_delete_obsoleted_target_unit(diffable_stores):
target_store, source_store = diffable_stores
# delete a unit in the source store
remove_unit = source_store.units.first()
remove_unit.delete()
# and obsolete the same unit in the target
obsolete_unit = target_store.units.get(unitid=remove_unit.unitid)
obsolete_unit.makeobsolete()
obsolete_unit.save()
# as the unit is already obsolete - nothing
differ = StoreDiff(
target_store,
source_store,
target_store.get_max_unit_revision() + 1)
assert not differ.diff()
@pytest.mark.django_db
def test_store_diff_obsoleted_target_unit(diffable_stores):
target_store, source_store = diffable_stores
# obsolete a unit in target
obsolete_unit = target_store.units.first()
obsolete_unit.makeobsolete()
obsolete_unit.save()
# as the revision is higher it gets unobsoleted
differ = StoreDiff(
target_store,
source_store,
target_store.get_max_unit_revision() + 1)
result = differ.diff()
assert result["update"][0] == set([obsolete_unit.pk])
assert len(result["update"][1]) == 1
assert result["update"][1][obsolete_unit.unitid]["dbid"] == obsolete_unit.pk
# if the revision is less - no change
differ = StoreDiff(
target_store,
source_store,
target_store.get_max_unit_revision() - 1)
assert not differ.diff()
@pytest.mark.django_db
def test_store_diff_update_target_unit(diffable_stores):
target_store, source_store = diffable_stores
# update a unit in target
update_unit = target_store.units.first()
update_unit.target_f = "Some other string"
update_unit.save()
# the unit is always marked for update
differ = StoreDiff(
target_store,
source_store,
target_store.get_max_unit_revision() + 1)
result = differ.diff()
assert result["update"][0] == set([update_unit.pk])
assert result["update"][1] == {}
assert len(result["add"]) == 0
assert len(result["index"]) == 0
differ = StoreDiff(
target_store,
source_store,
0)
result = differ.diff()
assert result["update"][0] == set([update_unit.pk])
assert result["update"][1] == {}
assert len(result["add"]) == 0
assert len(result["index"]) == 0
@pytest.mark.django_db
def test_store_diff_update_source_unit(diffable_stores):
target_store, source_store = diffable_stores
# update a unit in source
update_unit = source_store.units.first()
update_unit.target_f = "Some other string"
update_unit.save()
target_unit = target_store.units.get(
unitid=update_unit.unitid)
# the unit is always marked for update
differ = StoreDiff(
target_store,
source_store,
target_store.get_max_unit_revision() + 1)
result = differ.diff()
assert result["update"][0] == set([target_unit.pk])
assert result["update"][1] == {}
assert len(result["add"]) == 0
assert len(result["index"]) == 0
differ = StoreDiff(
target_store,
source_store,
0)
result = differ.diff()
assert result["update"][0] == set([target_unit.pk])
assert result["update"][1] == {}
assert len(result["add"]) == 0
assert len(result["index"]) == 0
@pytest.mark.django_db
def test_store_diff_custom(diffable_stores):
target_store, source_store = diffable_stores
class CustomDiffableStore(DiffableStore):
pass
@provider(format_diffs)
def format_diff_provider(**kwargs):
return {
target_store.filetype.name: CustomDiffableStore}
differ = StoreDiff(
target_store,
source_store,
target_store.get_max_unit_revision() + 1)
assert isinstance(
differ.diffable, CustomDiffableStore)
@pytest.mark.django_db
def test_store_diff_delete_obsoleted_source_unit(diffable_stores):
target_store, source_store = diffable_stores
# delete a unit in the target store
remove_unit = target_store.units.first()
remove_unit.delete()
# and obsolete the same unit in the target
obsolete_unit = source_store.units.get(unitid=remove_unit.unitid)
obsolete_unit.makeobsolete()
obsolete_unit.save()
# as the unit is already obsolete - nothing
differ = StoreDiff(
target_store,
source_store,
target_store.get_max_unit_revision() + 1)
assert not differ.diff()
@pytest.mark.django_db
def test_store_syncer(tp0):
store = tp0.stores.live().first()
assert isinstance(store.syncer, PoStoreSyncer)
assert store.syncer.file_class == getclass(store)
assert store.syncer.translation_project == store.translation_project
assert (
store.syncer.language
== store.translation_project.language)
assert (
store.syncer.project
== store.translation_project.project)
assert (
store.syncer.source_language
== store.translation_project.project.source_language)
@pytest.mark.django_db
def test_store_syncer_obsolete_unit(tp0):
store = tp0.stores.live().first()
unit = store.units.filter(state=TRANSLATED).first()
unit_syncer = store.syncer.unit_sync_class(unit)
newunit = unit_syncer.create_unit(store.syncer.file_class.UnitClass)
# unit is untranslated, its always just deleted
obsolete, deleted = store.syncer.obsolete_unit(newunit, True)
assert not obsolete
assert deleted
obsolete, deleted = store.syncer.obsolete_unit(newunit, False)
assert not obsolete
assert deleted
# set unit to translated
newunit.target = unit.target
# if conservative, nothings changed
obsolete, deleted = store.syncer.obsolete_unit(newunit, True)
assert not obsolete
assert not deleted
# not conservative and the unit is deleted
obsolete, deleted = store.syncer.obsolete_unit(newunit, False)
assert obsolete
assert not deleted
@pytest.mark.django_db
def test_store_syncer_sync_store(tp0, dummy_store_syncer):
store = tp0.stores.live().first()
DummyStoreSyncer, __, expected = dummy_store_syncer
disk_store = store.syncer.convert()
dummy_syncer = DummyStoreSyncer(store, expected=expected)
result = dummy_syncer.sync(
disk_store,
expected["last_revision"],
update_structure=expected["update_structure"],
conservative=expected["conservative"])
assert result[0] is True
assert result[1]["updated"] == expected["changes"]
# conservative makes no diff here
expected["conservative"] = False
dummy_syncer = DummyStoreSyncer(store, expected=expected)
result = dummy_syncer.sync(
disk_store,
expected["last_revision"],
update_structure=expected["update_structure"],
conservative=expected["conservative"])
assert result[0] is True
assert result[1]["updated"] == expected["changes"]
@pytest.mark.django_db
def test_store_syncer_sync_store_no_changes(tp0, dummy_store_syncer):
store = tp0.stores.live().first()
DummyStoreSyncer, __, expected = dummy_store_syncer
disk_store = store.syncer.convert()
dummy_syncer = DummyStoreSyncer(store, expected=expected)
# no changes
expected["changes"] = []
expected["conservative"] = True
dummy_syncer = DummyStoreSyncer(store, expected=expected)
result = dummy_syncer.sync(
disk_store,
expected["last_revision"],
expected["update_structure"],
expected["conservative"])
assert result[0] is False
assert not result[1].get("updated")
# conservative makes no diff here
expected["conservative"] = False
dummy_syncer = DummyStoreSyncer(store, expected=expected)
result = dummy_syncer.sync(
disk_store,
expected["last_revision"],
expected["update_structure"],
expected["conservative"])
assert result[0] is False
assert not result[1].get("updated")
@pytest.mark.django_db
def test_store_syncer_sync_store_structure(tp0, dummy_store_syncer):
store = tp0.stores.live().first()
DummyStoreSyncer, DummyDiskStore, expected = dummy_store_syncer
disk_store = DummyDiskStore(expected)
expected["update_structure"] = True
expected["changes"] = []
dummy_syncer = DummyStoreSyncer(store, expected=expected)
result = dummy_syncer.sync(
disk_store,
expected["last_revision"],
expected["update_structure"],
expected["conservative"])
assert result[0] is True
assert result[1]["updated"] == []
assert result[1]["obsolete"] == 8
assert result[1]["deleted"] == 9
assert result[1]["added"] == 10
expected["obsolete_units"] = []
expected["new_units"] = []
expected["changes"] = []
dummy_syncer = DummyStoreSyncer(store, expected=expected)
result = dummy_syncer.sync(
disk_store,
expected["last_revision"],
expected["update_structure"],
expected["conservative"])
assert result[0] is False
@pytest.mark.django_db
def test_store_syncer_sync_update_structure(dummy_store_structure_syncer, tp0):
store = tp0.stores.live().first()
DummyStoreSyncer, DummyDiskStore, DummyUnit = dummy_store_structure_syncer
expected = dict(
unit_class="FOO",
conservative=True,
obsolete_delete=(True, True),
obsolete_units=["a", "b", "c"])
expected["new_units"] = [
DummyUnit(unit, expected=expected)
for unit in ["5", "6", "7"]]
syncer = DummyStoreSyncer(store, expected=expected)
disk_store = DummyDiskStore(expected)
result = syncer.update_structure(
disk_store,
expected["obsolete_units"],
expected["new_units"],
expected["conservative"])
obsolete_units = (
len(expected["obsolete_units"])
if expected["obsolete_delete"][0]
else 0)
deleted_units = (
len(expected["obsolete_units"])
if expected["obsolete_delete"][1]
else 0)
new_units = len(expected["new_units"])
assert result == (obsolete_units, deleted_units, new_units)
def _test_get_new(results, syncer, old_ids, new_ids):
assert list(results) == list(
syncer.store.findid_bulk(
[syncer.dbid_index.get(uid)
for uid
in new_ids - old_ids]))
def _test_get_obsolete(results, disk_store, syncer, old_ids, new_ids):
assert list(results) == list(
disk_store.findid(uid)
for uid
in old_ids - new_ids
if (disk_store.findid(uid)
and not disk_store.findid(uid).isobsolete()))
@pytest.mark.django_db
def test_store_syncer_obsolete_units(dummy_store_syncer_units, tp0):
store = tp0.stores.live().first()
disk_store = store.syncer.convert()
expected = dict(
old_ids=set(),
new_ids=set(),
disk_ids={})
syncer = dummy_store_syncer_units(store, expected=expected)
results = syncer.get_units_to_obsolete(
disk_store, expected["old_ids"], expected["new_ids"])
_test_get_obsolete(
results, disk_store, syncer,
expected["old_ids"], expected["new_ids"])
expected = dict(
old_ids=set(["2", "3", "4"]),
new_ids=set(["3", "4", "5"]),
disk_ids={"3": "foo", "4": "bar", "5": "baz"})
results = syncer.get_units_to_obsolete(
disk_store, expected["old_ids"], expected["new_ids"])
_test_get_obsolete(
results, disk_store, syncer, expected["old_ids"], expected["new_ids"])
@pytest.mark.django_db
def test_store_syncer_new_units(dummy_store_syncer_units, tp0):
store = tp0.stores.live().first()
expected = dict(
old_ids=set(),
new_ids=set(),
disk_ids={},
db_ids={})
syncer = dummy_store_syncer_units(store, expected=expected)
results = syncer.get_new_units(
expected["old_ids"], expected["new_ids"])
_test_get_new(
results, syncer, expected["old_ids"], expected["new_ids"])
expected = dict(
old_ids=set(["2", "3", "4"]),
new_ids=set(["3", "4", "5"]),
db_ids={"3": "foo", "4": "bar", "5": "baz"})
syncer = dummy_store_syncer_units(store, expected=expected)
results = syncer.get_new_units(
expected["old_ids"], expected["new_ids"])
_test_get_new(
results, syncer, expected["old_ids"], expected["new_ids"])
@pytest.mark.django_db
def test_store_path(store0):
assert store0.path == to_tp_relative_path(store0.pootle_path)
@pytest.mark.django_db
def test_store_sync_empty(project0_nongnu, tp0, caplog):
store = StoreDBFactory(
name="empty.po",
translation_project=tp0,
parent=tp0.directory)
store.sync()
assert os.path.exists(store.file.path)
modified = os.stat(store.file.path).st_mtime
store.sync()
assert modified == os.stat(store.file.path).st_mtime
# warning message - nothing changes
store.sync(conservative=True, only_newer=False)
assert "nothing changed" in caplog.records[-1].message
assert modified == os.stat(store.file.path).st_mtime
@pytest.mark.django_db
def test_store_sync_template(project0_nongnu, templates_project0, caplog):
template = templates_project0.stores.first()
template.sync()
modified = os.stat(template.file.path).st_mtime
unit = template.units.first()
unit.target = "NEW TARGET"
unit.save()
template.sync(conservative=True)
assert modified == os.stat(template.file.path).st_mtime
template.sync(conservative=False)
assert not modified == os.stat(template.file.path).st_mtime
@pytest.mark.django_db
def test_store_update_with_state_change(store0, admin):
units = dict([(x.id, (x.source, x.target, not x.isfuzzy()))
for x in store0.units])
update_store(
store0,
units=units.values(),
store_revision=store0.data.max_unit_revision,
user=admin)
for unit_id, unit in units.items():
assert unit[2] == store0.units.get(id=unit_id).isfuzzy()
@pytest.mark.django_db
def test_update_xliff(store_po, test_fs, xliff):
project = store_po.translation_project.project
filetype_tool = project.filetype_tool
project.filetypes.add(xliff)
filetype_tool.set_store_filetype(store_po, xliff)
with test_fs.open(['data', 'xliff', 'welcome.xliff']) as f:
file_store = getclass(f)(f.read())
store_po.update(file_store)
unit = store_po.units[0]
assert unit.istranslated()
with test_fs.open(['data', 'xliff', 'updated_welcome.xliff']) as f:
file_store = getclass(f)(f.read())
store_po.update(file_store)
updated_unit = store_po.units.get(id=unit.id)
assert unit.source != updated_unit.source
@pytest.mark.django_db
def test_update_resurrect(store_po, test_fs):
with test_fs.open(['data', 'po', 'obsolete.po']) as f:
file_store = getclass(f)(f.read())
store_po.update(file_store)
obsolete_units = store_po.unit_set.filter(state=OBSOLETE)
obsolete_ids = list(obsolete_units.values_list('id', flat=True))
assert len(obsolete_ids) > 0
with test_fs.open(['data', 'po', 'resurrected.po']) as f:
file_store = getclass(f)(f.read())
store_revision = store_po.data.max_unit_revision
# set store_revision as we do in update_stores cli command
store_po.update(file_store, store_revision=store_revision - 1)
obsolete_units = store_po.unit_set.filter(state=OBSOLETE)
assert obsolete_units.count() == len(obsolete_ids)
for unit in obsolete_units.filter(id__in=obsolete_ids):
assert unit.isobsolete()
# set store_revision as we do in update_stores cli command
store_po.update(file_store, store_revision=store_revision)
units = store_po.units.filter(id__in=obsolete_ids)
assert units.count() == len(obsolete_ids)
for unit in units:
assert not unit.isobsolete()
@pytest.mark.django_db
def test_store_comment_update(store0, member):
ttk = store0.deserialize(store0.serialize())
fileunit = ttk.units[-1]
fileunit.removenotes()
fileunit.addnote("A new comment")
unit = store0.findid(fileunit.getid())
last_sub_pk = unit.submission_set.order_by(
"id").values_list("id", flat=True).last() or 0
store0.update(
ttk, store_revision=store0.data.max_unit_revision + 1,
user=member
)
assert ttk.units[-1].getnotes("translator") == "A new comment"
unit = store0.units.get(id=unit.id)
assert unit.translator_comment == "A new comment"
assert unit.change.commented_by == member
new_subs = unit.submission_set.filter(id__gt=last_sub_pk).order_by("id")
assert new_subs.count() == 1
comment_sub = new_subs[0]
assert comment_sub.old_value == ""
assert comment_sub.new_value == "A new comment"
assert comment_sub.field == SubmissionFields.COMMENT
assert comment_sub.type == SubmissionTypes.SYSTEM
assert comment_sub.submitter == member
assert comment_sub.revision == unit.revision
assert comment_sub.creation_time == unit.change.commented_on
|
import pilas
import json
from pilas.escena import Base
from general import General
from individual import Individual
class jugadores(Base):
def __init__(self):
Base.__init__(self)
def fondo(self):
pilas.fondos.Fondo("data/img/fondos/aplicacion.jpg")
def general(self):
self.sonido_boton.reproducir()
pilas.almacenar_escena(General())
def individual(self):
self.sonido_boton.reproducir()
pilas.almacenar_escena(Individual())
def volver(self):
self.sonido_boton.reproducir()
pilas.recuperar_escena()
def iniciar(self):
self.fondo()
self.sonido_boton = pilas.sonidos.cargar("data/audio/boton.ogg")
self.interfaz()
self.mostrar()
def interfaz(self):
opcion= [("General",self.general),("Individual",self.individual),("Volver",self.volver)]
menu = pilas.actores.Menu(opcion, y=50, fuente="data/fonts/American Captain.ttf")
menu.escala = 1.3
enunciado = pilas.actores.Actor("data/img/enunciados/estadisticas.png",y=250)
enunciado.escala = 0.3
|
import logging
logging.basicConfig()
from enum import Enum
logger = logging.getLogger('loopabull')
logger.setLevel(logging.INFO)
class Result(Enum):
runfinished = 1
runerrored = 2
unrouted = 3
error = 4
|
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import traceback
REQUESTS_IMP_ERR = None
try:
import requests
HAS_REQUESTS = True
except ImportError:
REQUESTS_IMP_ERR = traceback.format_exc()
HAS_REQUESTS = False
PYVMOMI_IMP_ERR = None
try:
from pyVim import connect
from pyVmomi import vim, vmodl
HAS_PYVMOMI = True
except ImportError:
PYVMOMI_IMP_ERR = traceback.format_exc()
HAS_PYVMOMI = False
VSPHERE_IMP_ERR = None
try:
from com.vmware.vapi.std_client import DynamicID
from vmware.vapi.vsphere.client import create_vsphere_client
from com.vmware.vapi.std.errors_client import Unauthorized
from com.vmware.content.library_client import Item
from com.vmware.vcenter_client import (Folder,
Datacenter,
ResourcePool,
Datastore,
Cluster,
Host)
HAS_VSPHERE = True
except ImportError:
VSPHERE_IMP_ERR = traceback.format_exc()
HAS_VSPHERE = False
from ansible.module_utils.basic import env_fallback, missing_required_lib
class VmwareRestClient(object):
def __init__(self, module):
"""
Constructor
"""
self.module = module
self.params = module.params
self.check_required_library()
self.api_client = self.connect_to_vsphere_client()
# Helper function
def get_error_message(self, error):
"""
Helper function to show human readable error messages.
"""
err_msg = []
if not error.messages:
if isinstance(error, Unauthorized):
return "Authorization required."
return "Generic error occurred."
for err in error.messages:
err_msg.append(err.default_message % err.args)
return " ,".join(err_msg)
def check_required_library(self):
"""
Check required libraries
"""
if not HAS_REQUESTS:
self.module.fail_json(msg=missing_required_lib('requests'),
exception=REQUESTS_IMP_ERR)
if not HAS_PYVMOMI:
self.module.fail_json(msg=missing_required_lib('PyVmomi'),
exception=PYVMOMI_IMP_ERR)
if not HAS_VSPHERE:
self.module.fail_json(
msg=missing_required_lib('vSphere Automation SDK',
url='https://code.vmware.com/web/sdk/65/vsphere-automation-python'),
exception=VSPHERE_IMP_ERR)
@staticmethod
def vmware_client_argument_spec():
return dict(
hostname=dict(type='str',
fallback=(env_fallback, ['VMWARE_HOST'])),
username=dict(type='str',
fallback=(env_fallback, ['VMWARE_USER']),
aliases=['user', 'admin']),
password=dict(type='str',
fallback=(env_fallback, ['VMWARE_PASSWORD']),
aliases=['pass', 'pwd'],
no_log=True),
protocol=dict(type='str',
default='https',
choices=['https', 'http']),
validate_certs=dict(type='bool',
fallback=(env_fallback, ['VMWARE_VALIDATE_CERTS']),
default=True),
)
def connect_to_vsphere_client(self):
"""
Connect to vSphere API Client with Username and Password
"""
username = self.params.get('username')
password = self.params.get('password')
hostname = self.params.get('hostname')
session = requests.Session()
session.verify = self.params.get('validate_certs')
if not all([hostname, username, password]):
self.module.fail_json(msg="Missing one of the following : hostname, username, password."
" Please read the documentation for more information.")
client = create_vsphere_client(
server=hostname,
username=username,
password=password,
session=session)
if client is None:
self.module.fail_json(msg="Failed to login to %s" % hostname)
return client
def get_tags_for_object(self, tag_service=None, tag_assoc_svc=None, dobj=None):
"""
Return list of tag objects associated with an object
Args:
dobj: Dynamic object
tag_service: Tag service object
tag_assoc_svc: Tag Association object
Returns: List of tag objects associated with the given object
"""
# This method returns list of tag objects only,
# Please use get_tags_for_dynamic_obj for more object details
tags = []
if not dobj:
return tags
if not tag_service:
tag_service = self.api_client.tagging.Tag
if not tag_assoc_svc:
tag_assoc_svc = self.api_client.tagging.TagAssociation
tag_ids = tag_assoc_svc.list_attached_tags(dobj)
for tag_id in tag_ids:
tags.append(tag_service.get(tag_id))
return tags
def get_tags_for_dynamic_obj(self, mid=None, type=None):
"""
Return list of tag object details associated with object
Args:
mid: Dynamic object for specified object
type: Type of DynamicID to lookup
Returns: List of tag object details associated with the given object
"""
tags = []
if mid is None:
return tags
dynamic_managed_object = DynamicID(type=type, id=mid)
temp_tags_model = self.get_tags_for_object(dobj=dynamic_managed_object)
category_service = self.api_client.tagging.Category
for tag_obj in temp_tags_model:
tags.append({
'id': tag_obj.id,
'category_name': category_service.get(tag_obj.category_id).name,
'name': tag_obj.name,
'description': tag_obj.description,
'category_id': tag_obj.category_id,
})
return tags
def get_tags_for_cluster(self, cluster_mid=None):
"""
Return list of tag object associated with cluster
Args:
cluster_mid: Dynamic object for cluster
Returns: List of tag object associated with the given cluster
"""
return self.get_tags_for_dynamic_obj(mid=cluster_mid, type='ClusterComputeResource')
def get_tags_for_hostsystem(self, hostsystem_mid=None):
"""
Return list of tag object associated with host system
Args:
hostsystem_mid: Dynamic object for host system
Returns: List of tag object associated with the given host system
"""
return self.get_tags_for_dynamic_obj(mid=hostsystem_mid, type='HostSystem')
def get_tags_for_vm(self, vm_mid=None):
"""
Return list of tag object associated with virtual machine
Args:
vm_mid: Dynamic object for virtual machine
Returns: List of tag object associated with the given virtual machine
"""
return self.get_tags_for_dynamic_obj(mid=vm_mid, type='VirtualMachine')
def get_vm_tags(self, tag_service=None, tag_association_svc=None, vm_mid=None):
"""
Return list of tag name associated with virtual machine
Args:
tag_service: Tag service object
tag_association_svc: Tag association object
vm_mid: Dynamic object for virtual machine
Returns: List of tag names associated with the given virtual machine
"""
# This API returns just names of tags
# Please use get_tags_for_vm for more tag object details
tags = []
if vm_mid is None:
return tags
dynamic_managed_object = DynamicID(type='VirtualMachine', id=vm_mid)
temp_tags_model = self.get_tags_for_object(tag_service, tag_association_svc, dynamic_managed_object)
for tag_obj in temp_tags_model:
tags.append(tag_obj.name)
return tags
def get_library_item_by_name(self, name):
"""
Returns the identifier of the library item with the given name.
Args:
name (str): The name of item to look for
Returns:
str: The item ID or None if the item is not found
"""
find_spec = Item.FindSpec(name=name)
item_ids = self.api_client.content.library.Item.find(find_spec)
item_id = item_ids[0] if item_ids else None
return item_id
def get_datacenter_by_name(self, datacenter_name):
"""
Returns the identifier of a datacenter
Note: The method assumes only one datacenter with the mentioned name.
"""
filter_spec = Datacenter.FilterSpec(names=set([datacenter_name]))
datacenter_summaries = self.api_client.vcenter.Datacenter.list(filter_spec)
datacenter = datacenter_summaries[0].datacenter if len(datacenter_summaries) > 0 else None
return datacenter
def get_folder_by_name(self, datacenter_name, folder_name):
"""
Returns the identifier of a folder
with the mentioned names.
"""
datacenter = self.get_datacenter_by_name(datacenter_name)
if not datacenter:
return None
filter_spec = Folder.FilterSpec(type=Folder.Type.VIRTUAL_MACHINE,
names=set([folder_name]),
datacenters=set([datacenter]))
folder_summaries = self.api_client.vcenter.Folder.list(filter_spec)
folder = folder_summaries[0].folder if len(folder_summaries) > 0 else None
return folder
def get_resource_pool_by_name(self, datacenter_name, resourcepool_name):
"""
Returns the identifier of a resource pool
with the mentioned names.
"""
datacenter = self.get_datacenter_by_name(datacenter_name)
if not datacenter:
return None
names = set([resourcepool_name]) if resourcepool_name else None
filter_spec = ResourcePool.FilterSpec(datacenters=set([datacenter]),
names=names)
resource_pool_summaries = self.api_client.vcenter.ResourcePool.list(filter_spec)
resource_pool = resource_pool_summaries[0].resource_pool if len(resource_pool_summaries) > 0 else None
return resource_pool
def get_datastore_by_name(self, datacenter_name, datastore_name):
"""
Returns the identifier of a datastore
with the mentioned names.
"""
datacenter = self.get_datacenter_by_name(datacenter_name)
if not datacenter:
return None
names = set([datastore_name]) if datastore_name else None
filter_spec = Datastore.FilterSpec(datacenters=set([datacenter]),
names=names)
datastore_summaries = self.api_client.vcenter.Datastore.list(filter_spec)
datastore = datastore_summaries[0].datastore if len(datastore_summaries) > 0 else None
return datastore
def get_cluster_by_name(self, datacenter_name, cluster_name):
"""
Returns the identifier of a cluster
with the mentioned names.
"""
datacenter = self.get_datacenter_by_name(datacenter_name)
if not datacenter:
return None
names = set([cluster_name]) if cluster_name else None
filter_spec = Cluster.FilterSpec(datacenters=set([datacenter]),
names=names)
cluster_summaries = self.api_client.vcenter.Cluster.list(filter_spec)
cluster = cluster_summaries[0].cluster if len(cluster_summaries) > 0 else None
return cluster
def get_host_by_name(self, datacenter_name, host_name):
"""
Returns the identifier of a Host
with the mentioned names.
"""
datacenter = self.get_datacenter_by_name(datacenter_name)
if not datacenter:
return None
names = set([host_name]) if host_name else None
filter_spec = Host.FilterSpec(datacenters=set([datacenter]),
names=names)
host_summaries = self.api_client.vcenter.Host.list(filter_spec)
host = host_summaries[0].host if len(host_summaries) > 0 else None
return host
@staticmethod
def search_svc_object_by_name(service, svc_obj_name=None):
"""
Return service object by name
Args:
service: Service object
svc_obj_name: Name of service object to find
Returns: Service object if found else None
"""
if not svc_obj_name:
return None
for svc_object in service.list():
svc_obj = service.get(svc_object)
if svc_obj.name == svc_obj_name:
return svc_obj
return None
|
class CachedProperty(object):
""" A property that is only computed once per instance and
then stores the result in _cached_properties of the object.
Source: https://github.com/bottlepy/bottle/commit/fa7733e075da0d790d809aa3d2f53071897e6f76
"""
def __init__(self, func):
self.__doc__ = getattr(func, '__doc__')
self.func = func
def __get__(self, obj, cls):
if obj is None:
return self
propname = self.func.__name__
if not hasattr(obj, '_cached_properties'):
obj._cached_properties = {}
if propname not in obj._cached_properties:
obj._cached_properties[propname] = self.func(obj)
# value = obj.__dict__[propname] = self.func(obj)
return obj._cached_properties[propname]
@staticmethod
def clear(obj):
"""clears cache of obj"""
if hasattr(obj, '_cached_properties'):
obj._cached_properties = {}
@staticmethod
def is_cached(obj, propname):
if hasattr(obj, '_cached_properties') and propname in obj._cached_properties:
return True
else:
return False
|
from vsg.rules import token_prefix
from vsg import token
lTokens = []
lTokens.append(token.signal_declaration.identifier)
class rule_008(token_prefix):
'''
This rule checks for valid prefixes on signal identifiers.
Default signal prefix is *s\_*.
|configuring_prefix_and_suffix_rules_link|
**Violation**
.. code-block:: vhdl
signal wr_en : std_logic;
signal rd_en : std_logic;
**Fix**
.. code-block:: vhdl
signal s_wr_en : std_logic;
signal s_rd_en : std_logic;
'''
def __init__(self):
token_prefix.__init__(self, 'signal', '008', lTokens)
self.prefixes = ['s_']
self.solution = 'Signal identifiers'
|
import urllib2, json, os, sys, re
def download_asset(path, url):
asset_path = None
try:
file_name = os.path.basename(url)
asset_path = os.path.join(path, file_name)
if os.path.exists(asset_path):
# Skip downloading
asset_path = None
else:
if not os.path.exists(path):
os.makedirs(path)
f = urllib2.urlopen(url)
with open(asset_path, "wb") as local_file:
local_file.write(f.read())
except Exception as e:
sys.exit('Failed to fetch IDE. Error: {0}'.format(e))
finally:
return asset_path
def handle_release_assets(assets):
assets = [ asset for asset in assets if re.match(r'redhawk-ide.+?(?=x86_64)', asset['name'])]
if not assets:
sys.exit('Failed to find the IDE asset')
elif len(assets) > 1:
sys.exit('Found too many IDE assets matching that description...?')
return download_asset('downloads', assets[0]['browser_download_url'])
def run(pv):
RELEASES_URL = 'http://api.github.com/repos/RedhawkSDR/redhawk/releases'
ide_asset = ''
try:
releases = json.loads(urllib2.urlopen(RELEASES_URL).read())
releases = [r for r in releases if r['tag_name'] == pv]
if releases:
ide_asset = handle_release_assets(releases[0]['assets'])
else:
sys.exit('Failed to find the release: {0}'.format(pv))
finally:
return ide_asset
if __name__ == '__main__':
# First argument is the version
asset = run(sys.argv[1])
print asset
|
from setuptools import setup
def readme():
with open('README.rst') as f:
return f.read()
setup(name='MapperTools',
packages=['MapperTools'],
version='0.1',
description='A python 2.7 implementation of Mapper algorithm for Topological Data Analysis',
keywords='mapper TDA python',
long_description=readme(),
url='http://github.com/alpatania',
author='Alice Patania',
author_email='alice.patania@gmail.com',
license='MIT',
classifiers=['Development Status :: 3 - Alpha',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2.7'],
install_requires=['hdbscan', 'sklearn', 'pandas', 'collections'],
include_package_data=True,
zip_safe=False)
|
from .meta import classproperty
class AtomData(object):
# Maximum ASA for each residue
# from Miller et al. 1987, JMB 196: 641-656
total_asa = {
'A': 113.0,
'R': 241.0,
'N': 158.0,
'D': 151.0,
'C': 140.0,
'Q': 189.0,
'E': 183.0,
'G': 85.0,
'H': 194.0,
'I': 182.0,
'L': 180.0,
'K': 211.0,
'M': 204.0,
'F': 218.0,
'P': 143.0,
'S': 122.0,
'T': 146.0,
'W': 259.0,
'Y': 229.0,
'V': 160.0,
}
@classmethod
def is_surface(cls, resn, asa, total_asa=None, cutoff=0.1):
"""Return True if ratio of residue ASA to max ASA >= cutoff"""
if total_asa is None:
total_asa = cls.total_asa
resn = resn.upper()
if len(resn) == 3:
resn = cls.three_to_one[resn]
return float(asa) / total_asa[resn] >= cutoff
three_to_full = {
'Val': 'Valine', 'Ile': 'Isoleucine', 'Leu': 'Leucine',
'Glu': 'Glutamic acid', 'Gln': 'Glutamine',
'Asp': 'Aspartic acid', 'Asn': 'Asparagine', 'His': 'Histidine',
'Trp': 'Tryptophan', 'Phe': 'Phenylalanine', 'Tyr': 'Tyrosine',
'Arg': 'Arginine', 'Lys': 'Lysine',
'Ser': 'Serine', 'Thr': 'Threonine',
'Met': 'Methionine', 'Ala': 'Alanine',
'Gly': 'Glycine', 'Pro': 'Proline', 'Cys': 'Cysteine'}
three_to_one = {
'VAL': 'V', 'ILE': 'I', 'LEU': 'L', 'GLU': 'E', 'GLN': 'Q',
'ASP': 'D', 'ASN': 'N', 'HIS': 'H', 'TRP': 'W', 'PHE': 'F', 'TYR': 'Y',
'ARG': 'R', 'LYS': 'K', 'SER': 'S', 'THR': 'T', 'MET': 'M', 'ALA': 'A',
'GLY': 'G', 'PRO': 'P', 'CYS': 'C'}
one_to_three = {o: t for t, o in three_to_one.iteritems()}
@classproperty
def one_to_full(cls):
"""
This can't see three_to_full unless explicitly passed because
dict comprehensions create their own local scope
"""
return {o: cls.three_to_full[t.title()] for t, o in cls.three_to_one.iteritems()}
res_atom_list = dict(
ALA=['C', 'CA', 'CB', 'N', 'O'],
ARG=['C', 'CA', 'CB', 'CD', 'CG', 'CZ', 'N', 'NE', 'NH1', 'NH2', 'O'],
ASN=['C', 'CA', 'CB', 'CG', 'N', 'ND2', 'O', 'OD1'],
ASP=['C', 'CA', 'CB', 'CG', 'N', 'O', 'OD1', 'OD2'],
CYS=['C', 'CA', 'CB', 'N', 'O', 'SG'],
GLN=['C', 'CA', 'CB', 'CD', 'CG', 'N', 'NE2', 'O', 'OE1'],
GLU=['C', 'CA', 'CB', 'CD', 'CG', 'N', 'O', 'OE1', 'OE2'],
GLY=['C', 'CA', 'N', 'O'],
HIS=['C', 'CA', 'CB', 'CD2', 'CE1', 'CG', 'N', 'ND1', 'NE2', 'O'],
ILE=['C', 'CA', 'CB', 'CD1', 'CG1', 'CG2', 'N', 'O'],
LEU=['C', 'CA', 'CB', 'CD1', 'CD2', 'CG', 'N', 'O'],
LYS=['C', 'CA', 'CB', 'CD', 'CE', 'CG', 'N', 'NZ', 'O'],
MET=['C', 'CA', 'CB', 'CE', 'CG', 'N', 'O', 'SD'],
PHE=['C', 'CA', 'CB', 'CD1', 'CD2',
'CE1', 'CE2', 'CG', 'CZ', 'N', 'O'],
PRO=['C', 'CA', 'CB', 'CD', 'CG', 'N', 'O'],
SER=['C', 'CA', 'CB', 'N', 'O', 'OG'],
THR=['C', 'CA', 'CB', 'CG2', 'N', 'O', 'OG1'],
TRP=['C', 'CA', 'CB', 'CD1', 'CD2', 'CE2',
'CE3', 'CG', 'CH2', 'CZ2', 'CZ3', 'N', 'NE1', 'O'],
TYR=['C', 'CA', 'CB', 'CD1', 'CD2',
'CE1', 'CE2', 'CG', 'CZ', 'N', 'O', 'OH'],
VAL=['C', 'CA', 'CB', 'CG1', 'CG2', 'N', 'O'],
)
all_chi = dict(
chi1=dict(
ARG=['N', 'CA', 'CB', 'CG'],
ASN=['N', 'CA', 'CB', 'CG'],
ASP=['N', 'CA', 'CB', 'CG'],
CYS=['N', 'CA', 'CB', 'SG'],
GLN=['N', 'CA', 'CB', 'CG'],
GLU=['N', 'CA', 'CB', 'CG'],
HIS=['N', 'CA', 'CB', 'CG'],
ILE=['N', 'CA', 'CB', 'CG1'],
LEU=['N', 'CA', 'CB', 'CG'],
LYS=['N', 'CA', 'CB', 'CG'],
MET=['N', 'CA', 'CB', 'CG'],
PHE=['N', 'CA', 'CB', 'CG'],
PRO=['N', 'CA', 'CB', 'CG'],
SER=['N', 'CA', 'CB', 'OG'],
THR=['N', 'CA', 'CB', 'OG1'],
TRP=['N', 'CA', 'CB', 'CG'],
TYR=['N', 'CA', 'CB', 'CG'],
VAL=['N', 'CA', 'CB', 'CG1'],
),
chi2=dict(
ARG=['CA', 'CB', 'CG', 'CD'],
ASN=['CA', 'CB', 'CG', 'OD1'],
ASP=['CA', 'CB', 'CG', 'OD1'],
GLN=['CA', 'CB', 'CG', 'CD'],
GLU=['CA', 'CB', 'CG', 'CD'],
HIS=['CA', 'CB', 'CG', 'ND1'],
ILE=['CA', 'CB', 'CG1', 'CD1'],
LEU=['CA', 'CB', 'CG', 'CD1'],
LYS=['CA', 'CB', 'CG', 'CD'],
MET=['CA', 'CB', 'CG', 'SD'],
PHE=['CA', 'CB', 'CG', 'CD1'],
PRO=['CA', 'CB', 'CG', 'CD'],
TRP=['CA', 'CB', 'CG', 'CD1'],
TYR=['CA', 'CB', 'CG', 'CD1'],
),
chi3=dict(
ARG=['CB', 'CG', 'CD', 'NE'],
GLN=['CB', 'CG', 'CD', 'OE1'],
GLU=['CB', 'CG', 'CD', 'OE1'],
LYS=['CB', 'CG', 'CD', 'CE'],
MET=['CB', 'CG', 'SD', 'CE'],
),
chi4=dict(
ARG=['CG', 'CD', 'NE', 'CZ'],
LYS=['CG', 'CD', 'CE', 'NZ'],
),
chi5=dict(
ARG=['CD', 'NE', 'CZ', 'NH1'],
),
)
alt_chi = dict(
chi1=dict(
VAL=['N', 'CA', 'CB', 'CG2'],
),
chi2=dict(
ASP=['CA', 'CB', 'CG', 'OD2'],
LEU=['CA', 'CB', 'CG', 'CD2'],
PHE=['CA', 'CB', 'CG', 'CD2'],
TYR=['CA', 'CB', 'CG', 'CD2'],
),
)
chi_atoms = dict(
ARG=set(['CB', 'CA', 'CG', 'NE', 'N', 'CZ', 'NH1', 'CD']),
ASN=set(['CB', 'CA', 'N', 'CG', 'OD1']),
ASP=set(['CB', 'CA', 'N', 'CG', 'OD1', 'OD2']),
CYS=set(['CB', 'CA', 'SG', 'N']),
GLN=set(['CB', 'CA', 'CG', 'N', 'CD', 'OE1']),
GLU=set(['CB', 'CA', 'CG', 'N', 'CD', 'OE1']),
HIS=set(['ND1', 'CB', 'CA', 'CG', 'N']),
ILE=set(['CG1', 'CB', 'CA', 'CD1', 'N']),
LEU=set(['CB', 'CA', 'CG', 'CD1', 'CD2', 'N']),
LYS=set(['CB', 'CA', 'CG', 'CE', 'N', 'NZ', 'CD']),
MET=set(['CB', 'CA', 'CG', 'CE', 'N', 'SD']),
PHE=set(['CB', 'CA', 'CG', 'CD1', 'CD2', 'N']),
PRO=set(['CB', 'CA', 'N', 'CG', 'CD']),
SER=set(['OG', 'CB', 'CA', 'N']),
THR=set(['CB', 'CA', 'OG1', 'N']),
TRP=set(['CB', 'CA', 'CG', 'CD1', 'N']),
TYR=set(['CB', 'CA', 'CG', 'CD1', 'CD2', 'N']),
VAL=set(['CG1', 'CG2', 'CB', 'CA', 'N']),
)
|
import sys
import subprocess
result = subprocess.Popen('sh test.sh', shell=True)
text = result.communicate()[0]
sys.exit(result.returncode)
|
__requires__ = 'setuptools==0.6c11'
import sys
from pkg_resources import load_entry_point
sys.exit(
load_entry_point('setuptools==0.6c11', 'console_scripts', 'easy_install')()
)
|
import urllib.request
from bs4 import BeautifulSoup
import time
req_header = {'User-Agent':'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11',
'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
#'Accept-Language': 'en-US,en;q=0.8,zh-Hans-CN;q=0.5,zh-Hans;q=0.3',
'Accept-Charset':'ISO-8859-1,utf-8;q=0.7,*;q=0.3',
'Accept-Encoding':'en-us',
'Connection':'keep-alive',
'Referer':'http://www.baidu.com/'
}
req_timeout = 5
testUrl = "http://www.baidu.com/"
testStr = "wahaha"
file1 = open('proxy.txt' , 'w')
import http.cookiejar
cjar = http.cookiejar.CookieJar()
cookies = urllib.request.HTTPCookieProcessor(cjar)
checked_num = 0
grasp_num = 0
for page in range(1, 3):
# req = urllib2.Request('http://www.xici.net.co/nn/' + str(page), None, req_header)
# html_doc = urllib2.urlopen(req, None, req_timeout).read()
req = urllib.request.Request('http://www.xici.net.co/nn/' + str(page))
req.add_header('User-Agent',
"Mozilla/5.0 (iPhone; CPU iPhone OS 10_3 like Mac OS X) AppleWebKit/602.1.50 (KHTML, like Gecko) CriOS/56.0.2924.75 Mobile/14E5239e Safari/602.1")
html_doc = urllib.request.urlopen(req).read().decode('utf-8')
# html_doc = urllib2.urlopen('http://www.xici.net.co/nn/' + str(page)).read()
soup = BeautifulSoup(html_doc)
trs = soup.find('table', id='ip_list').find_all('tr')
print(trs)
for tr in trs[1:]:
tds = tr.find_all('td')
ip = tds[1].text.strip()
port = tds[2].text.strip()
protocol = tds[5].text.strip()
if protocol == 'HTTP' or protocol == 'HTTPS':
#of.write('%s=%s:%s\n' % (protocol, ip, port))
print('%s=%s:%s' % (protocol, ip, port))
grasp_num +=1
proxyHandler = urllib.request.ProxyHandler({"http": r'http://%s:%s' % (ip, port)})
opener = urllib.request.build_opener(cookies, proxyHandler)
opener.addheaders = [('User-Agent',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/31.0.1650.63 Safari/537.36')]
t1 = time.time()
try:
req = opener.open(testUrl, timeout=req_timeout)
result = req.read()
timeused = time.time() - t1
pos = result.find(testStr)
if pos > 1:
file1.write(protocol+"\t"+ip+"\t"+port+"\n")
checked_num+=1
print(checked_num, grasp_num)
else:
continue
except Exception as e:
print(str(e))
continue
file1.close()
print(checked_num,grasp_num)
|
import tensorflow as tf
import matplotlib.pyplot as plt
import math
x_node = tf.random_uniform([1], minval=-1, maxval=1, dtype=tf.float32,
name='x_node')
y_node = tf.random_uniform([1], minval=-1, maxval=1, dtype=tf.float32,
name='y_node')
times = 5000
hits = 0
pis = []
with tf.Session() as session:
for i in range(1, times):
x = session.run(x_node)
y = session.run(y_node)
if x*x + y*y < 1:
hits += 1
pass
pi = 4 * float(hits) / i
print(pi)
pis.append(pi)
pass
pass
plt.plot(pis)
plt.plot([0, times], [math.pi, math.pi])
plt.show()
|
import sys
import os
import configparser
import logging
log = logging.getLogger(__name__)
class gcfg(object):
datapath = None
cfgpath = None
defaults = {'bind_address': '127.0.0.1',
'port': '4242',
'data_dir': '~/.gazee',
'temp_dir': '',
'comic_path': '',
'comic_scan_interval': '60',
'comics_per_page': '15',
'thumb_maxwidth': '300',
'thumb_maxheight': '400',
'image_script': '0',
'mylar_db': '',
'ssl_key': '',
'ssl_cert': '',
'web_text_color': 'ffffff',
'main_color': '757575',
'accent_color': 'bdbdbd'}
def __init__(self, data_override=None):
self.cfg = configparser.ConfigParser()
self.datapath = data_override
self.logpath = None
self.dbpath = None
self.sessionspath = None
print("Created a new gcfg...")
if self.datapath is not None:
self.datapath = os.path.realpath(os.path.expanduser(self.datapath))
if self.datapath is None and data_override is not None:
log.error("Somehow the datapath is now None.")
self.configRead()
log.debug("Initialized configation... in %s", __name__)
def create_init_dirs(self, data_dir):
''' Sets up the data_dir plus the two paths that aren't
configurable, and are relative to the data_dir - the
log_dir and db_dir
'''
if self.datapath is not None and data_dir is None:
log.error("data_dir is None while datapath is not.")
self.datapath = data_dir
self.logpath = os.path.join(self.datapath, "logs")
self.dbpath = os.path.join(self.datapath, "db")
self.sessionspath = os.path.join(self.datapath, "sessions")
if not os.path.exists(self.logpath):
os.makedirs(self.logpath, 0o700)
if not os.path.exists(self.dbpath):
os.makedirs(self.dbpath, 0o700)
if not os.path.exists(self.sessionspath):
os.makedirs(self.sessionspath, 0o700)
def find_config(self):
''' Looks for where the data dir is located.
Once it finds the dir, it calls create_
'''
dirfound = None
firstdir = None
cfgfound = None
if self.datapath is not None:
if not os.path.exists(self.datapath):
msg = 'Path %s does not exist.\n\nDo you wish to create it? [y/n]: ' % self.datapath
if self.get_yn(msg):
try:
os.makedirs(self.datapath)
except PermissionError:
print("You don't have the permissions to create that path.\nExiting.")
sys.exit(1)
else:
print("Exiting.")
sys.exit(1)
firstdir = dirfound = self.datapath
cfile = os.path.join(dirfound, "app.ini")
if os.path.exists(cfile):
cfgfound = cfile
else:
cfgfound = None
else:
dirs = ['data', '~/.gazee', '../data']
for d in dirs:
ddir = os.path.realpath(os.path.expanduser(d))
cfile = os.path.join(ddir, "app.ini")
if os.path.exists(ddir) and os.path.isdir(ddir):
if firstdir is None:
firstdir = ddir
dirfound = ddir
if os.path.exists(cfile):
cfgfound = cfile
break
if dirfound is None:
log.error("Data directory not found!")
return False
dirfound = firstdir
self.datapath = dirfound
self.create_init_dirs(dirfound)
if cfgfound is not None:
log.debug('cfgfound=%s', cfgfound)
self.cfgpath = cfgfound
else:
cfile = os.path.join(self.datapath, 'app.ini')
self.cfg['GLOBAL'] = {}
self.cfg['DEFAULT'] = self.defaults
self.cfg.set('DEFAULT', 'data_dir', self.datapath)
self.cfg.set('DEFAULT', 'image_script', self.defaults['image_script'])
cfgfound = cfile
self.cfgpath = cfgfound
self.configWrite()
self.cfg.set('GLOBAL', 'data_dir', self.datapath)
self.cfg.set('GLOBAL', 'log_dir', self.logpath)
self.cfg.set('GLOBAL', 'db_dir', self.dbpath)
self.cfg.set('GLOBAL', 'sessions_dir', self.sessionspath)
return True
def configWrite(self):
''' Write self.cfg to disk
'''
with open(self.cfgpath, 'w') as configfile:
self.cfg.write(configfile)
return True
def globalize(self):
''' Place the cfg variables into the self.config
scope
'''
mod = sys.modules[__name__]
for vn in self.cfg['GLOBAL']:
vn = vn.upper()
v = self.cfg.get('GLOBAL', vn)
if vn in ['PORT', 'COMIC_SCAN_INTERVAL', 'IMAGE_SCRIPT',
'COMICS_PER_PAGE', 'THUMB_MAXWIDTH', 'THUMB_MAXHEIGHT']:
if v == '':
v = self.cfg.get('DEFAULT', vn)
v = int(v, 10)
setattr(mod, vn, v)
def get_yn(self, msg):
while True:
v = input(msg)
if v.lower() in ['y', 'n']:
break
print("\nInvalid response. Enter 'y' or 'n'.")
return v.lower() == 'y'
def get_path(self, name):
p = None
while True:
prompt = 'Please enter %s: ' % name
p = input(prompt)
if not os.path.exists(p):
msg = 'Path %s does not exist.\n\nDo you wish to create it? [y/n]: ' % p
if self.get_yn(msg):
try:
os.makedirs(p)
except PermissionError:
print("You don't have the permissions to create that path.\n")
continue
else:
print("Not creating directory: %s" % p)
continue
break
return p
def configRead(self):
''' Read the app.ini config file.
'''
print("configRead() being called...")
dp = self.find_config()
if dp is None or self.datapath is None:
log.error("Failed to find_config()")
sys.exit(1)
self.cfgpath = os.path.join(self.datapath, 'app.ini')
self.cfg.read(self.cfgpath)
for k in self.defaults.keys():
if k not in self.cfg['DEFAULT']:
v = self.defaults[k]
log.info("Setting default[%s] = %s", k, v)
self.cfg['DEFAULT'][k] = v
if 'GLOBAL' not in self.cfg:
log.info("Resetting GLOBAL cfg...")
self.cfg['GLOBAL'] = {}
self.cfg.set('GLOBAL', 'data_dir', self.datapath)
if 'comic_path' not in self.cfg['GLOBAL'] or self.cfg.get('GLOBAL', 'comic_path') in [None, '']:
cpath = self.get_path("your comic share's path")
if cpath is not None:
self.cfg.set('GLOBAL', 'comic_path', cpath)
if 'temp_dir' not in self.cfg['GLOBAL'] or self.cfg.get('GLOBAL', 'temp_dir') in [None, '']:
tdir = self.get_path('a directory for temporary (large) file storage')
if tdir is not None:
self.cfg.set('GLOBAL', 'temp_dir', tdir)
self.configWrite()
self.cfg.set('GLOBAL', 'log_dir', self.logpath)
self.cfg.set('GLOBAL', 'db_dir', self.dbpath)
self.cfg.set('GLOBAL', 'sessions_dir', self.sessionspath)
self.globalize()
return True
def updateCfg(self, newvals):
''' Update the self.cfg with newvals, which should be
a dict in the form {'GLOBAL': {'varname': 'varval'}}
'''
log.debug(newvals)
for k in newvals['GLOBAL'].keys():
if not isinstance(newvals['GLOBAL'][k], str):
if newvals['GLOBAL'][k] is None:
newvals['GLOBAL'][k] = ''
else:
log.debug("newvals['GLOBAL'][%s] is type %s",
k, str(type(newvals['GLOBAL'][k])))
self.cfg.set('GLOBAL', k, newvals['GLOBAL'][k])
self.configWrite()
self.globalize()
return True
|
import pytest
from mathmaker.lib.core.root_calculus import Value
from mathmaker.lib.core.base_geometry import Point
from mathmaker.lib.core.geometry import Polygon
@pytest.fixture
def p1():
p1 = Polygon([Point('A', 0.5, 0.5),
Point('B', 3, 1),
Point('C', 3.2, 4),
Point('D', 0.8, 3)
])
p1.side[0].label = Value(4, unit='cm')
p1.side[1].label = Value(3, unit='cm')
p1.side[2].label = Value(2, unit='cm')
p1.side[3].label = Value(6.5, unit='cm')
p1.angle[0].label = Value(64, unit="\\textdegree")
p1.angle[1].label = Value(128, unit="\\textdegree")
p1.angle[2].label = Value(32, unit="\\textdegree")
p1.angle[3].label = Value(256, unit="\\textdegree")
p1.angle[0].mark = 'simple'
p1.angle[1].mark = 'simple'
p1.angle[2].mark = 'simple'
p1.angle[3].mark = 'simple'
return p1
def test_p1_into_euk(p1):
"""Check Polygon's generated euk file."""
assert p1.into_euk() == \
'box -0.1, -0.1, 3.8, 4.6\n\n'\
'A = point(0.5, 0.5)\n'\
'B = point(3, 1)\n'\
'C = point(3.2, 4)\n'\
'D = point(0.8, 3)\n'\
'\n'\
'draw\n'\
' (A.B.C.D)\n'\
' $\\rotatebox{11}{\sffamily 4~cm}$ A 11 - 12.7 deg 4.1\n'\
' $\\rotatebox{86}{\sffamily 3~cm}$ B 86 - 8.9 deg 4.9\n'\
' $\\rotatebox{23}{\sffamily 2~cm}$ C 203 - 12.2 deg 4.2\n'\
' $\\rotatebox{83}{\sffamily 6.5~cm}$ D 263 - 12.9 deg 4.1\n'\
' $\\rotatebox{47.3}{\sffamily 64\\textdegree}$ A 47.3 deg 2.7\n'\
' $\\rotatebox{-41.3}{\sffamily 128\\textdegree}$ B 138.7 deg 2.7\n'\
' $\\rotatebox{54.3}{\sffamily 32\\textdegree}$ C 234.3 deg 2.7\n'\
' $\\rotatebox{322.7}{\sffamily 256\\textdegree}$ D 322.7 deg 2.7\n'\
' "A" A 227.3 deg, font("sffamily")\n'\
' "B" B 318.7 deg, font("sffamily")\n'\
' "C" C 54.3 deg, font("sffamily")\n'\
' "D" D 142.7 deg, font("sffamily")\n'\
'end\n\n'\
'label\n'\
' B, A, D simple\n'\
' C, B, A simple\n'\
' D, C, B simple\n'\
' A, D, C simple\n'\
'end\n'
def test_p1_rename_errors(p1):
"""Check wrong arguments trigger exceptions when renaming."""
with pytest.raises(TypeError):
p1.rename(5678)
with pytest.raises(ValueError):
p1.rename('KJLIZ')
def test_p1_renamed(p1):
"""Check renaming Polygon is OK."""
p1.rename('YOGA')
assert p1.into_euk() == \
'box -0.1, -0.1, 3.8, 4.6\n\n'\
'A = point(0.5, 0.5)\n'\
'G = point(3, 1)\n'\
'O = point(3.2, 4)\n'\
'Y = point(0.8, 3)\n'\
'\n'\
'draw\n'\
' (A.G.O.Y)\n'\
' $\\rotatebox{11}{\sffamily 4~cm}$ A 11 - 12.7 deg 4.1\n'\
' $\\rotatebox{86}{\sffamily 3~cm}$ G 86 - 8.9 deg 4.9\n'\
' $\\rotatebox{23}{\sffamily 2~cm}$ O 203 - 12.2 deg 4.2\n'\
' $\\rotatebox{83}{\sffamily 6.5~cm}$ Y 263 - 12.9 deg 4.1\n'\
' $\\rotatebox{47.3}{\sffamily 64\\textdegree}$ A 47.3 deg 2.7\n'\
' $\\rotatebox{-41.3}{\sffamily 128\\textdegree}$ G 138.7 deg 2.7\n'\
' $\\rotatebox{54.3}{\sffamily 32\\textdegree}$ O 234.3 deg 2.7\n'\
' $\\rotatebox{322.7}{\sffamily 256\\textdegree}$ Y 322.7 deg 2.7\n'\
' "A" A 227.3 deg, font("sffamily")\n'\
' "G" G 318.7 deg, font("sffamily")\n'\
' "O" O 54.3 deg, font("sffamily")\n'\
' "Y" Y 142.7 deg, font("sffamily")\n'\
'end\n\n'\
'label\n'\
' G, A, Y simple\n'\
' O, G, A simple\n'\
' Y, O, G simple\n'\
' A, Y, O simple\n'\
'end\n'
|
from __future__ import absolute_import
import re
import abc
class AddressbookError(Exception):
pass
class AddressBook(object):
"""can look up email addresses and realnames for contacts.
.. note::
This is an abstract class that leaves :meth:`get_contacts`
unspecified. See :class:`AbookAddressBook` and
:class:`ExternalAddressbook` for implementations.
"""
__metaclass__ = abc.ABCMeta
def __init__(self, ignorecase=True):
self.reflags = re.IGNORECASE if ignorecase else 0
@abc.abstractmethod
def get_contacts(self): # pragma no cover
"""list all contacts tuples in this abook as (name, email) tuples"""
return []
def lookup(self, query=''):
"""looks up all contacts where name or address match query"""
res = []
query = re.compile('.*%s.*' % query, self.reflags)
for name, email in self.get_contacts():
if query.match(name) or query.match(email):
res.append((name, email))
return res
|
import numpy as np
from scipy.signal import medfilt
import manager.operations.method as method
from manager.operations.methodsteps.confirmation import Confirmation
from manager.exceptions import VoltPyNotAllowed
class MedianFilter(method.ProcessingMethod):
can_be_applied = True
_steps = [
{
'class': Confirmation,
'title': 'Apply median filter',
'desc': 'Press Forward to apply Median Filter.',
},
]
description = """
Median filter is smoothing algorithm similar to the Savitzky-Golay, however instead of fitting of the polynomial,
the middle point of the window is moved to the value of median of the points in the window. The median filter is
most usefull for removal of spikes from the signal (single point, large amplitude errors).
"""
@classmethod
def __str__(cls):
return "Median Filter"
def apply(self, user, dataset):
if self.model.completed is not True:
raise VoltPyNotAllowed('Incomplete procedure.')
self.__perform(dataset)
def __perform(self, dataset):
for cd in dataset.curves_data.all():
yvec = cd.yVector
newyvec = medfilt(yvec)
dataset.updateCurve(self.model, cd, newyvec)
dataset.save()
def finalize(self, user):
self.__perform(self.model.dataset)
self.model.step = None
self.model.completed = True
self.model.save()
return True
|
"""
Django settings for lwc project.
Generated by 'django-admin startproject' using Django 1.9.7.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
SECRET_KEY = '7fm_f66p8e!p%o=sr%d&cue(%+bh@@j_y6*b3d@t^c5%i8)1)2'
DEBUG = True
ALLOWED_HOSTS = []
SHARER_URL = "http://127.0.0.1:8000/?ref="
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'joins',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'lwc.middleware.ReferMiddleware',
]
ROOT_URLCONF = 'lwc.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'lwc.wsgi.application'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static', 'static_root')
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static', 'static_dirs'),
)
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'static', 'media')
|
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "MyGarden.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
|
from osweb.projects.ManageProject import ManageProject
from osweb.projects.projects_data import ProjectsData
|
def test_generators():
import os
import tempfile
from fusesoc.config import Config
from fusesoc.coremanager import CoreManager
from fusesoc.edalizer import Edalizer
from fusesoc.librarymanager import Library
from fusesoc.vlnv import Vlnv
tests_dir = os.path.dirname(__file__)
cores_dir = os.path.join(tests_dir, "capi2_cores", "misc", "generate")
lib = Library("edalizer", cores_dir)
cm = CoreManager(Config())
cm.add_library(lib)
core = cm.get_core(Vlnv("::generate"))
build_root = tempfile.mkdtemp(prefix="export_")
cache_root = tempfile.mkdtemp(prefix="export_cache_")
export_root = os.path.join(build_root, "exported_files")
edalizer = Edalizer(
toplevel=core.name,
flags={"tool": "icarus"},
core_manager=cm,
cache_root=cache_root,
work_root=os.path.join(build_root, "work"),
export_root=export_root,
system_name=None,
)
edalizer.run()
gendir = os.path.join(
cache_root, "generated", "generate-testgenerate_without_params_0"
)
assert os.path.isfile(os.path.join(gendir, "generated.core"))
assert os.path.isfile(os.path.join(gendir, "testgenerate_without_params_input.yml"))
gendir = os.path.join(
cache_root, "generated", "generate-testgenerate_with_params_0"
)
assert os.path.isfile(os.path.join(gendir, "generated.core"))
assert os.path.isfile(os.path.join(gendir, "testgenerate_with_params_input.yml"))
|
from bollinger import bands, plot, strategies
import argparse
parser = argparse.ArgumentParser(description="plots bollinger bands or suggests investments", epilog="example: bolly.py plot AMZN FB")
parser.add_argument("action", metavar="ACTION", choices=["plot", "suggest"], help="either plot or suggest")
parser.add_argument("symbols", metavar="SYMBOL", nargs="+", help="stock symbols")
parser.add_argument("-s", "--strategy", choices=["uponce", "downonce", "moreup", "moredown"], default="moredown", help="selects invesment strategy")
args = parser.parse_args()
if args.action == "plot":
for symbol in args.symbols:
print("plot [ %s ]: " %(symbol), end="")
b = bands.Bands(symbol)
b.fetch()
try:
p = plot.Plot(b)
p.save()
print("OK")
except Exception as ex:
print("FAIL: (%s)"%(ex))
if args.action == "suggest":
for symbol in args.symbols:
print("suggest [ %s ]: " %(symbol), end="")
b = bands.Bands(symbol)
b.fetch()
try:
if args.strategy == "uponce": s = strategies.UpOnce(b)
elif args.strategy == "downonce": s = strategies.DownOnce(b)
elif args.strategy == "moreup": s = strategies.MoreUp(b)
elif args.strategy == "moredown": s = strategies.MoreDown(b)
print("YES" if s.invest() else "NO")
except Exception as ex:
print("FAIL: (%s)"%(ex))
|
import sys
import csv
from itertools import izip
VERSION = (0, 9, 4)
__version__ = ".".join(map(str, VERSION))
pass_throughs = [
'register_dialect',
'unregister_dialect',
'get_dialect',
'list_dialects',
'field_size_limit',
'Dialect',
'excel',
'excel_tab',
'Sniffer',
'QUOTE_ALL',
'QUOTE_MINIMAL',
'QUOTE_NONNUMERIC',
'QUOTE_NONE',
'Error'
]
__all__ = [
'reader',
'writer',
'DictReader',
'DictWriter',
] + pass_throughs
for prop in pass_throughs:
globals()[prop] = getattr(csv, prop)
def _stringify(s, encoding, errors):
if s is None:
return ''
if isinstance(s, unicode):
return s.encode(encoding, errors)
elif isinstance(s, (int, float)):
pass # let csv.QUOTE_NONNUMERIC do its thing.
elif not isinstance(s, str):
s = str(s)
return s
def _stringify_list(l, encoding, errors='strict'):
try:
return [_stringify(s, encoding, errors) for s in iter(l)]
except TypeError, e:
raise csv.Error(str(e))
def _unicodify(s, encoding):
if s is None:
return None
if isinstance(s, (unicode, int, float)):
return s
elif isinstance(s, str):
return s.decode(encoding)
return s
class UnicodeWriter(object):
"""
>>> import unicodecsv
>>> from cStringIO import StringIO
>>> f = StringIO()
>>> w = unicodecsv.writer(f, encoding='utf-8')
>>> w.writerow((u'é', u'ñ'))
>>> f.seek(0)
>>> r = unicodecsv.reader(f, encoding='utf-8')
>>> row = r.next()
>>> row[0] == u'é'
True
>>> row[1] == u'ñ'
True
"""
def __init__(self, f, dialect=csv.excel, encoding='utf-8', errors='strict',
*args, **kwds):
self.encoding = encoding
self.writer = csv.writer(f, dialect, *args, **kwds)
self.encoding_errors = errors
def writerow(self, row):
self.writer.writerow(
_stringify_list(row, self.encoding, self.encoding_errors))
def writerows(self, rows):
for row in rows:
self.writerow(row)
@property
def dialect(self):
return self.writer.dialect
writer = UnicodeWriter
class UnicodeReader(object):
def __init__(self, f, dialect=None, encoding='utf-8', errors='strict',
**kwds):
format_params = ['delimiter', 'doublequote', 'escapechar',
'lineterminator', 'quotechar', 'quoting', 'skipinitialspace']
if dialect is None:
if not any([kwd_name in format_params for kwd_name in kwds.keys()]):
dialect = csv.excel
self.reader = csv.reader(f, dialect, **kwds)
self.encoding = encoding
self.encoding_errors = errors
def next(self):
row = self.reader.next()
encoding = self.encoding
encoding_errors = self.encoding_errors
float_ = float
unicode_ = unicode
try:
val = [(value if isinstance(value, float_) else unicode_(value, encoding, encoding_errors))
for value in row]
except UnicodeDecodeError as e:
# attempt a different encoding...
encoding = 'ISO-8859-1'
val = [(value if isinstance(value, float_) else unicode_(value, encoding, encoding_errors))
for value in row]
return val
def __iter__(self):
return self
@property
def dialect(self):
return self.reader.dialect
@property
def line_num(self):
return self.reader.line_num
reader = UnicodeReader
class DictWriter(csv.DictWriter):
"""
>>> from cStringIO import StringIO
>>> f = StringIO()
>>> w = DictWriter(f, ['a', u'ñ', 'b'], restval=u'î')
>>> w.writerow({'a':'1', u'ñ':'2'})
>>> w.writerow({'a':'1', u'ñ':'2', 'b':u'ø'})
>>> w.writerow({'a':u'é', u'ñ':'2'})
>>> f.seek(0)
>>> r = DictReader(f, fieldnames=['a', u'ñ'], restkey='r')
>>> r.next() == {'a': u'1', u'ñ':'2', 'r': [u'î']}
True
>>> r.next() == {'a': u'1', u'ñ':'2', 'r': [u'\xc3\xb8']}
True
>>> r.next() == {'a': u'\xc3\xa9', u'ñ':'2', 'r': [u'\xc3\xae']}
True
"""
def __init__(self, csvfile, fieldnames, restval='', extrasaction='raise', dialect='excel', encoding='utf-8', errors='strict', *args, **kwds):
self.encoding = encoding
csv.DictWriter.__init__(
self, csvfile, fieldnames, restval, extrasaction, dialect, *args, **kwds)
self.writer = UnicodeWriter(
csvfile, dialect, encoding=encoding, errors=errors, *args, **kwds)
self.encoding_errors = errors
def writeheader(self):
fieldnames = _stringify_list(
self.fieldnames, self.encoding, self.encoding_errors)
header = dict(zip(self.fieldnames, self.fieldnames))
self.writerow(header)
class DictReader(csv.DictReader):
"""
>>> from cStringIO import StringIO
>>> f = StringIO()
>>> w = DictWriter(f, fieldnames=['name', 'place'])
>>> w.writerow({'name': 'Cary Grant', 'place': 'hollywood'})
>>> w.writerow({'name': 'Nathan Brillstone', 'place': u'øLand'})
>>> w.writerow({'name': u'Willam ø. Unicoder', 'place': u'éSpandland'})
>>> f.seek(0)
>>> r = DictReader(f, fieldnames=['name', 'place'])
>>> print r.next() == {'name': 'Cary Grant', 'place': 'hollywood'}
True
>>> print r.next() == {'name': 'Nathan Brillstone', 'place': u'øLand'}
True
>>> print r.next() == {'name': u'Willam ø. Unicoder', 'place': u'éSpandland'}
True
"""
def __init__(self, csvfile, fieldnames=None, restkey=None, restval=None,
dialect='excel', encoding='utf-8', errors='strict', *args,
**kwds):
if fieldnames is not None:
fieldnames = _stringify_list(fieldnames, encoding)
csv.DictReader.__init__(
self, csvfile, fieldnames, restkey, restval, dialect, *args, **kwds)
self.reader = UnicodeReader(csvfile, dialect, encoding=encoding,
errors=errors, *args, **kwds)
if fieldnames is None and not hasattr(csv.DictReader, 'fieldnames'):
# Python 2.5 fieldnames workaround.
# (http://bugs.python.org/issue3436)
reader = UnicodeReader(
csvfile, dialect, encoding=encoding, *args, **kwds)
self.fieldnames = _stringify_list(reader.next(), reader.encoding)
self.unicode_fieldnames = [_unicodify(f, encoding) for f in
self.fieldnames]
self.unicode_restkey = _unicodify(restkey, encoding)
def next(self):
row = csv.DictReader.next(self)
result = dict((uni_key, row[str_key]) for (str_key, uni_key) in
izip(self.fieldnames, self.unicode_fieldnames))
rest = row.get(self.restkey)
if rest:
result[self.unicode_restkey] = rest
return result
|
import os,re,sys,pprint,shutil
from pathlib import Path
PACKAGES_DIR = "../packages"
def errorExit(msg):
print(msg)
sys.exit(1)
def isPathDisabled(path):
for part in path.parts:
if part.lower().startswith("_disabled"):
return True
return False
depsFolder = Path("_deps_split")
prodFolder = Path("_prods_split")
merged_deps = Path("merged_deps.py" )
merged_prods = Path("merged_prods.py")
if not os.path.isfile(merged_deps):
errorExit("Merged depends file does not exist")
if not os.path.isfile(merged_prods):
errorExit("Merged products file does not exist")
if not os.path.isdir(depsFolder):
os.makedirs(depsFolder)
else:
print("Clearing old split folder:" + str(depsFolder))
shutil.rmtree(depsFolder)
os.makedirs(depsFolder)
if not os.path.isdir(prodFolder):
os.makedirs(prodFolder)
else:
print("Clearing old split folder:" + str(prodFolder))
shutil.rmtree(prodFolder)
os.makedirs(prodFolder)
things = { "merged_deps.py" : depsFolder, "merged_prods.py" : prodFolder, }
for mergefile_name in things:
mergedFile = None
enableWrite = False
curFile = None
print("Splitting " +mergefile_name+ " into seperate files in " + str(things[mergefile_name]))
with open(mergefile_name, "r", encoding="utf-8") as f:
mergedFile = f.read().split("\n")
fileBuffer = ""
for line in mergedFile:
startR = re.search("^########START:\[(.+)\]$",line)
endR = re.search("^########END:\[(.+)\]$",line)
if endR != None:
enableWrite = False
curFile.write(fileBuffer.rstrip("\n"))
curFile.close()
if enableWrite:
fileBuffer+=line+"\n"
if startR != None:
enableWrite = True
fileBuffer = ""
curFile = open(os.path.join(things[mergefile_name],startR.groups()[0]) ,"w",encoding="utf-8")
print("Done")
|
from unittest import mock
from configman.dotdict import DotDict
from socorro.lib.task_manager import TaskManager, default_task_func
class TestTaskManager:
def test_constuctor1(self):
config = DotDict()
config.quit_on_empty_queue = False
tm = TaskManager(config)
assert tm.config == config
assert tm.task_func == default_task_func
assert tm.quit is False
def test_get_iterator(self):
config = DotDict()
config.quit_on_empty_queue = False
tm = TaskManager(config, job_source_iterator=range(1))
assert list(tm._get_iterator()) == [0]
def an_iter(self):
yield from range(5)
tm = TaskManager(config, job_source_iterator=an_iter)
assert list(tm._get_iterator()) == [0, 1, 2, 3, 4]
class X:
def __init__(self, config):
self.config = config
def __iter__(self):
yield from self.config
tm = TaskManager(config, job_source_iterator=X(config))
assert list(tm._get_iterator()) == list(config.keys())
def test_blocking_start(self):
config = DotDict()
config.idle_delay = 1
config.quit_on_empty_queue = False
class MyTaskManager(TaskManager):
def _responsive_sleep(self, seconds, wait_log_interval=0, wait_reason=""):
try:
if self.count >= 2:
raise KeyboardInterrupt
self.count += 1
except AttributeError:
self.count = 0
tm = MyTaskManager(config, task_func=mock.Mock())
waiting_func = mock.Mock()
tm.blocking_start(waiting_func=waiting_func)
assert tm.task_func.call_count == 10
assert waiting_func.call_count == 0
def test_blocking_start_with_quit_on_empty(self):
config = DotDict()
config.idle_delay = 1
config.quit_on_empty_queue = True
tm = TaskManager(config, task_func=mock.Mock())
waiting_func = mock.Mock()
tm.blocking_start(waiting_func=waiting_func)
assert tm.task_func.call_count == 10
assert waiting_func.call_count == 0
|
import argparse
import datetime
import os
import re
def version_str(args):
return str(args.major) + "." + str(args.minor) + "." + str(args.maintenance)
def file_process(name, rule, args):
print("--- Processing " + os.path.basename(name))
with open(name) as source:
data = rule(source.read(), args)
if not data:
return
print("Writing " + name)
with open(name) as dest:
dest = open(name, "w")
dest.write(data)
def changelog_rule(data, args):
new_version = version_str(args)
regex = r"## \[Unreleased\]"
subst = r"## [Unreleased]\n\n## [" + new_version + r"] - " + datetime.date.today().isoformat()
result = re.subn(regex, subst, data)
if result[1] != 1:
return None
regex = r"(\[Unreleased)(\]: https://github.com/morinim/vita/compare/v)(.+)(\.\.\.HEAD)"
subst = r"\g<1>\g<2>" + new_version + r"\g<4>\n[" + new_version + r"\g<2>\g<3>...v" + new_version
result = re.subn(regex, subst, result[0])
return result[0] if result[1] == 1 else None
def doxygen_rule(data, args):
regex = r"([\s]+)(\*[\s]+\\mainpage VITA v)([\d]+)\.([\d]+)\.([\d]+)([\s]*)"
subst = r"\g<1>\g<2>" + version_str(args) + r"\g<6>"
result = re.subn(regex, subst, data)
return result[0] if result[1] > 0 else None
def get_cmd_line_options():
description = "Helps to set up a new version of Vita"
parser = argparse.ArgumentParser(description = description)
parser.add_argument("-v", "--verbose", action = "store_true",
help = "Turn on verbose mode")
# Now the positional arguments.
parser.add_argument("major", type=int)
parser.add_argument("minor", type=int)
parser.add_argument("maintenance", type=int)
return parser
def main():
args = get_cmd_line_options().parse_args()
print("Setting version to v" + str(args.major)
+ "." + str(args.minor)
+ "." + str(args.maintenance))
file_process("../NEWS.md", changelog_rule, args)
file_process("../doc/doxygen/doxygen.h", doxygen_rule, args)
print("\n\nRELEASE NOTE\n")
print("1. Build. cmake -DCMAKE_BUILD_TYPE=Release -B build/ src/ ; cmake --build build/")
print("2. Check. cd build/ ; ./tests")
print('3. Commit. git commit -am "[DOC] Changed revision number to v'
+ version_str(args) + '"')
print("4. Tag. git tag -a v" + version_str(args) + " -m \"tag message\"")
print("\nRemember to 'git push' both code and tag. For the tag:\n")
print(" git push origin [tagname]\n")
if __name__ == "__main__":
main()
|
import json
import mock
from django.test import TestCase
from django.core.urlresolvers import reverse
class TestAPI(TestCase):
@mock.patch('ldap.initialize')
def test_exists(self, mocked_initialize):
connection = mock.MagicMock()
mocked_initialize.return_value = connection
url = reverse('api:exists')
response = self.client.get(url)
self.assertEqual(response.status_code, 400)
# check that 400 Bad Request errors are proper JSON
self.assertEqual(response['Content-Type'], 'application/json')
self.assertEqual(
json.loads(response.content),
{'error': "missing key 'mail'"}
)
response = self.client.get(url, {'mail': ''})
self.assertEqual(response.status_code, 400)
result = {
'abc123': {'uid': 'abc123', 'mail': 'peter@example.com'},
}
def search_s(base, scope, filterstr, *args, **kwargs):
if 'peter@example.com' in filterstr:
# if 'hgaccountenabled=TRUE' in filterstr:
# return []
return result.items()
return []
connection.search_s.side_effect = search_s
response = self.client.get(url, {'mail': 'peter@example.com'})
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/json')
self.assertEqual(json.loads(response.content), True)
response = self.client.get(url, {'mail': 'never@heard.of.com'})
self.assertEqual(response.status_code, 200)
self.assertEqual(json.loads(response.content), False)
# response = self.client.get(url, {'mail': 'peter@example.com',
# 'hgaccountenabled': ''})
# self.assertEqual(response.status_code, 200)
# self.assertEqual(json.loads(response.content), False)
response = self.client.get(url, {'mail': 'peter@example.com',
'gender': 'male'})
self.assertEqual(response.status_code, 200)
self.assertEqual(json.loads(response.content), True)
@mock.patch('ldap.initialize')
def test_employee(self, mocked_initialize):
connection = mock.MagicMock()
mocked_initialize.return_value = connection
url = reverse('api:employee')
response = self.client.get(url)
self.assertEqual(response.status_code, 400)
response = self.client.get(url, {'mail': ''})
self.assertEqual(response.status_code, 400)
result = {
'abc123': {'uid': 'abc123',
'mail': 'peter@mozilla.com',
'sn': u'B\xe3ngtsson'},
}
def search_s(base, scope, filterstr, *args, **kwargs):
if 'peter@example.com' in filterstr:
return result.items()
return []
connection.search_s.side_effect = search_s
response = self.client.get(url, {'mail': 'peter@example.com'})
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/json')
self.assertEqual(json.loads(response.content), True)
response = self.client.get(url, {'mail': 'never@heard.of.com'})
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/json')
self.assertEqual(json.loads(response.content), False)
@mock.patch('ldap.initialize')
def test_ingroup(self, mocked_initialize):
connection = mock.MagicMock()
mocked_initialize.return_value = connection
url = reverse('api:in-group')
response = self.client.get(url)
self.assertEqual(response.status_code, 400)
response = self.client.get(url, {'mail': ''})
self.assertEqual(response.status_code, 400)
response = self.client.get(url, {'mail': 'peter@example.com'})
self.assertEqual(response.status_code, 400)
response = self.client.get(url, {'mail': 'peter@example.com',
'cn': ''})
self.assertEqual(response.status_code, 400)
result = {
'abc123': {'uid': 'abc123', 'mail': 'peter@example.com'},
}
def search_s(base, scope, filterstr, *args, **kwargs):
if 'ou=groups' in base:
if (
'peter@example.com' in filterstr and
'cn=CrashStats' in filterstr
):
return result.items()
else:
# basic lookup
if 'peter@example.com' in filterstr:
return result.items()
return []
connection.search_s.side_effect = search_s
response = self.client.get(url, {'mail': 'not@head.of.com',
'cn': 'CrashStats'})
self.assertEqual(response.status_code, 200)
self.assertEqual(json.loads(response.content), False)
response = self.client.get(url, {'mail': 'peter@example.com',
'cn': 'CrashStats'})
self.assertEqual(response.status_code, 200)
self.assertEqual(json.loads(response.content), True)
response = self.client.get(url, {'mail': 'peter@example.com',
'cn': 'NotInGroup'})
self.assertEqual(response.status_code, 200)
self.assertEqual(json.loads(response.content), False)
|
"""
Monitor the WR31 door enclosure
"""
import time
import sys
import sarcli
import idigidata
def millisecond_timestamp():
"""
Return a timestamp, in milliseconds
:return ms_timestamp: int, Timestamp in milliseconds
"""
ms_timestamp = int(time.time() * 1000)
return ms_timestamp
def cli_command(cmd):
"""
Send a command to the SarOS CLI and receive the response
:param cmd: str, Command to run
:return response: str, Response to cmd
"""
cli = sarcli.open()
cli.write(cmd)
response = cli.read()
cli.close()
return response
class SmsAlert(object):
"""
Send an SMS alert
"""
def __init__(self, destination, custom_text):
self.destination = destination
self.custom_text = custom_text
def send_alert(self, message):
"""
Send an SMS alert
:param message: str, Content of SMS message
:return response: str, Response to sendsms command
"""
message = "{0}: {1}".format(self.custom_text, message)
command = 'sendsms ' + self.destination + ' "' + message + '" '
response = cli_command(command)
return response
class DatapointAlert(object):
"""
Send a Datapoint alert
"""
def __init__(self, destination):
self.destination = destination
def send_alert(self, message):
"""
Send a Datapoint alert
:param message: str, Datapoint content
:return response: tuple, Result code of datapoint upload attempt
"""
timestamp = millisecond_timestamp()
dpoint = """\
<DataPoint>
<dataType>STRING</dataType>
<data>{0}</data>
<timestamp>{1}</timestamp>
<streamId>{2}</streamId>
</DataPoint>""".format(message, timestamp, self.destination)
response = idigidata.send_to_idigi(dpoint, "DataPoint/stream.xml")
return response
class DoorMonitor(object):
"""
Provides methods to monitor the enclosure door status
"""
def __init__(self, alert_list):
self.d1_status = ""
self.alert_list = alert_list
@classmethod
def switch_status(cls):
"""
Reads line status and sends an alert if the status is different
:return status: str, Door status, "OPEN" or "CLOSED"
"""
response = cli_command("gpio dio")
if "D1: DOUT=OFF, DIN=LOW" in response:
if not "D0: DOUT=ON" in response:
# Door is closed
status = "CLOSED"
else:
# Door is open
status = "OPEN"
return status
def send_alert(self, text):
"""
:param text: str, Alert content
:return:
"""
for alert in self.alert_list:
alert.send_alert(text)
def monitor_switch(self):
"""
Runs line monitoring and alerting in a loop
:return:
"""
while True:
status = self.switch_status()
if status != self.d1_status:
print "WR31 door is: {0}".format(status)
self.send_alert(status)
self.d1_status = status
time.sleep(.5)
if __name__ == '__main__':
ALERT_FUNCTIONS = [DatapointAlert("WR31_door")]
if len(sys.argv) >= 3:
CUSTOM_TEXT = sys.argv[2]
else:
CUSTOM_TEXT = "WR31 Door"
if len(sys.argv) >= 2:
ALERT_FUNCTIONS.append(SmsAlert(sys.argv[1], CUSTOM_TEXT))
MONITOR = DoorMonitor(ALERT_FUNCTIONS)
MONITOR.monitor_switch()
|
import copy
import logging
import os
import time
from datetime import datetime
from hashlib import sha1
import newrelic.agent
from django.core.exceptions import ObjectDoesNotExist
from django.db.utils import IntegrityError
from past.builtins import long
from treeherder.etl.artifact import (serialize_artifact_json_blobs,
store_job_artifacts)
from treeherder.etl.common import get_guid_root
from treeherder.model.models import (BuildPlatform,
FailureClassification,
Job,
JobGroup,
JobLog,
JobType,
Machine,
MachinePlatform,
Option,
OptionCollection,
Product,
Push,
ReferenceDataSignatures,
TaskclusterMetadata)
logger = logging.getLogger(__name__)
def _get_number(s):
try:
return long(s)
except (ValueError, TypeError):
return 0
def _remove_existing_jobs(data):
"""
Remove jobs from data where we already have them in the same state.
1. split the incoming jobs into pending, running and complete.
2. fetch the ``job_guids`` from the db that are in the same state as they
are in ``data``.
3. build a new list of jobs in ``new_data`` that are not already in
the db and pass that back. It could end up empty at that point.
"""
new_data = []
guids = [datum['job']['job_guid'] for datum in data]
state_map = {
guid: state for (guid, state) in Job.objects.filter(
guid__in=guids).values_list('guid', 'state')
}
for datum in data:
job = datum['job']
if not state_map.get(job['job_guid']):
new_data.append(datum)
else:
# should not transition from running to pending,
# or completed to any other state
current_state = state_map[job['job_guid']]
if current_state == 'completed' or (
job['state'] == 'pending' and
current_state == 'running'):
continue
new_data.append(datum)
return new_data
def _load_job(repository, job_datum, push_id):
"""
Load a job into the treeherder database
If the job is a ``retry`` the ``job_guid`` will have a special
suffix on it. But the matching ``pending``/``running`` job will not.
So we append the suffixed ``job_guid`` to ``retry_job_guids``
so that we can update the job_id_lookup later with the non-suffixed
``job_guid`` (root ``job_guid``). Then we can find the right
``pending``/``running`` job and update it with this ``retry`` job.
"""
build_platform, _ = BuildPlatform.objects.get_or_create(
os_name=job_datum.get('build_platform', {}).get('os_name', 'unknown'),
platform=job_datum.get('build_platform', {}).get('platform', 'unknown'),
architecture=job_datum.get('build_platform', {}).get('architecture',
'unknown'))
machine_platform, _ = MachinePlatform.objects.get_or_create(
os_name=job_datum.get('machine_platform', {}).get('os_name', 'unknown'),
platform=job_datum.get('machine_platform', {}).get('platform', 'unknown'),
architecture=job_datum.get('machine_platform', {}).get('architecture',
'unknown'))
option_names = job_datum.get('option_collection', [])
option_collection_hash = OptionCollection.calculate_hash(
option_names)
if not OptionCollection.objects.filter(
option_collection_hash=option_collection_hash).exists():
# in the unlikely event that we haven't seen this set of options
# before, add the appropriate database rows
options = []
for option_name in option_names:
option, _ = Option.objects.get_or_create(name=option_name)
options.append(option)
for option in options:
OptionCollection.objects.create(
option_collection_hash=option_collection_hash,
option=option)
machine, _ = Machine.objects.get_or_create(
name=job_datum.get('machine', 'unknown'))
job_type, _ = JobType.objects.get_or_create(
symbol=job_datum.get('job_symbol') or 'unknown',
name=job_datum.get('name') or 'unknown')
job_group, _ = JobGroup.objects.get_or_create(
name=job_datum.get('group_name') or 'unknown',
symbol=job_datum.get('group_symbol') or 'unknown')
product_name = job_datum.get('product_name', 'unknown')
if not product_name.strip():
product_name = 'unknown'
product, _ = Product.objects.get_or_create(name=product_name)
job_guid = job_datum['job_guid']
job_guid = job_guid[0:50]
who = job_datum.get('who') or 'unknown'
who = who[0:50]
reason = job_datum.get('reason') or 'unknown'
reason = reason[0:125]
state = job_datum.get('state') or 'unknown'
state = state[0:25]
build_system_type = job_datum.get('build_system_type', 'buildbot')
reference_data_name = job_datum.get('reference_data_name', None)
default_failure_classification = FailureClassification.objects.get(
name='not classified')
sh = sha1()
sh.update(''.join(
map(str,
[build_system_type, repository.name, build_platform.os_name,
build_platform.platform, build_platform.architecture,
machine_platform.os_name, machine_platform.platform,
machine_platform.architecture,
job_group.name, job_group.symbol, job_type.name,
job_type.symbol, option_collection_hash,
reference_data_name])).encode('utf-8'))
signature_hash = sh.hexdigest()
# Should be the buildername in the case of buildbot (if not provided
# default to using the signature hash)
if not reference_data_name:
reference_data_name = signature_hash
signature, _ = ReferenceDataSignatures.objects.get_or_create(
name=reference_data_name,
signature=signature_hash,
build_system_type=build_system_type,
repository=repository.name, defaults={
'first_submission_timestamp': time.time(),
'build_os_name': build_platform.os_name,
'build_platform': build_platform.platform,
'build_architecture': build_platform.architecture,
'machine_os_name': machine_platform.os_name,
'machine_platform': machine_platform.platform,
'machine_architecture': machine_platform.architecture,
'job_group_name': job_group.name,
'job_group_symbol': job_group.symbol,
'job_type_name': job_type.name,
'job_type_symbol': job_type.symbol,
'option_collection_hash': option_collection_hash
})
tier = job_datum.get('tier') or 1
result = job_datum.get('result', 'unknown')
submit_time = datetime.fromtimestamp(
_get_number(job_datum.get('submit_timestamp')))
start_time = datetime.fromtimestamp(
_get_number(job_datum.get('start_timestamp')))
end_time = datetime.fromtimestamp(
_get_number(job_datum.get('end_timestamp')))
# first, try to create the job with the given guid (if it doesn't
# exist yet)
job_guid_root = get_guid_root(job_guid)
if not Job.objects.filter(guid__in=[job_guid, job_guid_root]).exists():
# This could theoretically already have been created by another process
# that is running updates simultaneously. So just attempt to create
# it, but allow it to skip if it's the same guid. The odds are
# extremely high that this is a pending and running job that came in
# quick succession and are being processed by two different workers.
Job.objects.get_or_create(
guid=job_guid,
defaults={
"repository": repository,
"signature": signature,
"build_platform": build_platform,
"machine_platform": machine_platform,
"machine": machine,
"option_collection_hash": option_collection_hash,
"job_type": job_type,
"job_group": job_group,
"product": product,
"failure_classification": default_failure_classification,
"who": who,
"reason": reason,
"result": result,
"state": state,
"tier": tier,
"submit_time": submit_time,
"start_time": start_time,
"end_time": end_time,
"last_modified": datetime.now(),
"push_id": push_id
}
)
# Can't just use the ``job`` we would get from the ``get_or_create``
# because we need to try the job_guid_root instance first for update,
# rather than a possible retry job instance.
try:
job = Job.objects.get(guid=job_guid_root)
except ObjectDoesNotExist:
job = Job.objects.get(guid=job_guid)
# add taskcluster metadata if applicable
if all([k in job_datum for k in ['taskcluster_task_id', 'taskcluster_retry_id']]):
try:
TaskclusterMetadata.objects.create(
job=job,
task_id=job_datum['taskcluster_task_id'],
retry_id=job_datum['taskcluster_retry_id'])
except IntegrityError:
pass
# Update job with any data that would have changed
Job.objects.filter(id=job.id).update(
guid=job_guid,
signature=signature,
build_platform=build_platform,
machine_platform=machine_platform,
machine=machine,
option_collection_hash=option_collection_hash,
job_type=job_type,
job_group=job_group,
product=product,
result=result,
state=state,
tier=tier,
submit_time=submit_time,
start_time=start_time,
end_time=end_time,
last_modified=datetime.now(),
push_id=push_id)
artifacts = job_datum.get('artifacts', [])
has_text_log_summary = any(x for x in artifacts
if x['name'] == 'text_log_summary')
if artifacts:
artifacts = serialize_artifact_json_blobs(artifacts)
# need to add job guid to artifacts, since they likely weren't
# present in the beginning
for artifact in artifacts:
if not all(k in artifact for k in ("name", "type", "blob")):
raise ValueError(
"Artifact missing properties: {}".format(artifact))
# Ensure every artifact has a ``job_guid`` value.
# It is legal to submit an artifact that doesn't have a
# ``job_guid`` value. But, if missing, it should inherit that
# value from the job itself.
if "job_guid" not in artifact:
artifact["job_guid"] = job_guid
store_job_artifacts(artifacts)
log_refs = job_datum.get('log_references', [])
job_logs = []
if log_refs:
for log in log_refs:
name = log.get('name') or 'unknown'
name = name[0:50]
url = log.get('url') or 'unknown'
url = url[0:255]
# this indicates that a summary artifact was submitted with
# this job that corresponds to the buildbot_text log url.
# Therefore, the log does not need parsing. So we should
# ensure that it's marked as already parsed.
if has_text_log_summary and name == 'buildbot_text':
parse_status = JobLog.PARSED
else:
parse_status_map = dict([(k, v) for (v, k) in
JobLog.STATUSES])
mapped_status = parse_status_map.get(
log.get('parse_status'))
if mapped_status:
parse_status = mapped_status
else:
parse_status = JobLog.PENDING
jl, _ = JobLog.objects.get_or_create(
job=job, name=name, url=url, defaults={
'status': parse_status
})
job_logs.append(jl)
_schedule_log_parsing(job, job_logs, result)
return job_guid
def _schedule_log_parsing(job, job_logs, result):
"""Kick off the initial task that parses the log data.
log_data is a list of job log objects and the result for that job
"""
# importing here to avoid an import loop
from treeherder.log_parser.tasks import parse_logs
task_types = {
"errorsummary_json",
"buildbot_text",
"builds-4h"
}
job_log_ids = []
for job_log in job_logs:
# a log can be submitted already parsed. So only schedule
# a parsing task if it's ``pending``
# the submitter is then responsible for submitting the
# text_log_summary artifact
if job_log.status != JobLog.PENDING:
continue
# if this is not a known type of log, abort parse
if job_log.name not in task_types:
continue
job_log_ids.append(job_log.id)
# TODO: Replace the use of different queues for failures vs not with the
# RabbitMQ priority feature (since the idea behind separate queues was
# only to ensure failures are dealt with first if there is a backlog).
if result != 'success':
queue = 'log_parser_fail'
priority = 'failures'
else:
queue = 'log_parser'
priority = "normal"
parse_logs.apply_async(queue=queue,
args=[job.id, job_log_ids, priority])
def store_job_data(repository, originalData):
"""
Store job data instances into jobs db
Example:
[
{
"revision": "24fd64b8251fac5cf60b54a915bffa7e51f636b5",
"job": {
"job_guid": "d19375ce775f0dc166de01daa5d2e8a73a8e8ebf",
"name": "xpcshell",
"desc": "foo",
"job_symbol": "XP",
"group_name": "Shelliness",
"group_symbol": "XPC",
"product_name": "firefox",
"state": "TODO",
"result": 0,
"reason": "scheduler",
"who": "sendchange-unittest",
"submit_timestamp": 1365732271,
"start_timestamp": "20130411165317",
"end_timestamp": "1365733932"
"machine": "tst-linux64-ec2-314",
"build_platform": {
"platform": "Ubuntu VM 12.04",
"os_name": "linux",
"architecture": "x86_64"
},
"machine_platform": {
"platform": "Ubuntu VM 12.04",
"os_name": "linux",
"architecture": "x86_64"
},
"option_collection": {
"opt": true
},
"log_references": [
{
"url": "http://ftp.mozilla.org/pub/...",
"name": "unittest"
}
],
artifacts:[{
type:" json | img | ...",
name:"",
log_urls:[
]
blob:""
}],
},
"superseded": []
},
...
]
"""
data = copy.deepcopy(originalData)
# Ensure that we have job data to process
if not data:
return
# remove any existing jobs that already have the same state
data = _remove_existing_jobs(data)
if not data:
return
superseded_job_guid_placeholders = []
# TODO: Refactor this now that store_job_data() is only over called with one job at a time.
for datum in data:
try:
# TODO: this might be a good place to check the datum against
# a JSON schema to ensure all the fields are valid. Then
# the exception we caught would be much more informative. That
# being said, if/when we transition to only using the pulse
# job consumer, then the data will always be vetted with a
# JSON schema before we get to this point.
job = datum['job']
revision = datum['revision']
superseded = datum.get('superseded', [])
revision_field = 'revision__startswith' if len(revision) < 40 else 'revision'
filter_kwargs = {'repository': repository, revision_field: revision}
push_id = Push.objects.values_list('id', flat=True).get(**filter_kwargs)
# load job
job_guid = _load_job(repository, job, push_id)
for superseded_guid in superseded:
superseded_job_guid_placeholders.append(
# superseded by guid, superseded guid
[job_guid, superseded_guid]
)
except Exception as e:
# Surface the error immediately unless running in production, where we'd
# rather report it on New Relic and not block storing the remaining jobs.
# TODO: Once buildbot support is removed, remove this as part of
# refactoring this method to process just one job at a time.
if 'DYNO' not in os.environ:
raise
logger.exception(e)
# make more fields visible in new relic for the job
# where we encountered the error
datum.update(datum.get("job", {}))
newrelic.agent.record_exception(params=datum)
# skip any jobs that hit errors in these stages.
continue
# Update the result/state of any jobs that were superseded by those ingested above.
if superseded_job_guid_placeholders:
for (job_guid, superseded_by_guid) in superseded_job_guid_placeholders:
Job.objects.filter(guid=superseded_by_guid).update(
result='superseded',
state='completed')
|
import os
import re
import shutil
import zipfile
from . import Collection
from .hooks import runHook
from .lang import _
from .utils import ids2str, json, splitFields
class Exporter(object):
def __init__(self, col, did=None):
self.col = col
self.did = did
def exportInto(self, path):
self._escapeCount = 0
file = open(path, "wb")
self.doExport(file)
file.close()
def escapeText(self, text):
"Escape newlines, tabs, CSS and quotechar."
text = text.replace("\n", "<br>")
text = text.replace("\t", " " * 8)
text = re.sub("(?i)<style>.*?</style>", "", text)
if "\"" in text:
text = "\"" + text.replace("\"", "\"\"") + "\""
return text
def cardIds(self):
if not self.did:
cids = self.col.db.list("select id from cards")
else:
cids = self.col.decks.cids(self.did, children=True)
self.count = len(cids)
return cids
class TextCardExporter(Exporter):
key = _("Cards in Plain Text")
ext = ".txt"
hideTags = True
def __init__(self, col):
Exporter.__init__(self, col)
def doExport(self, file):
ids = sorted(self.cardIds())
# strids = ids2str(ids)
def esc(s):
# strip off the repeated question in answer if exists
s = re.sub("(?si)^.*<hr id=answer>\n*", "", s)
return self.escapeText(s)
out = ""
for cid in ids:
c = self.col.getCard(cid)
out += esc(c.q())
out += "\t" + esc(c.a()) + "\n"
file.write(out.encode("utf-8"))
class TextNoteExporter(Exporter):
key = _("Notes in Plain Text")
ext = ".txt"
def __init__(self, col):
Exporter.__init__(self, col)
self.includeID = False
self.includeTags = True
def doExport(self, file):
cardIds = self.cardIds()
data = []
for id, flds, tags in self.col.db.execute("""
select guid, flds, tags from notes
where id in
(select nid from cards
where cards.id in %s)""" % ids2str(cardIds)):
row = []
# note id
if self.includeID:
row.append(str(id))
# fields
row.extend([self.escapeText(f) for f in splitFields(flds)])
# tags
if self.includeTags:
row.append(tags.strip())
data.append("\t".join(row))
self.count = len(data)
out = "\n".join(data)
file.write(out.encode("utf-8"))
class AnkiExporter(Exporter):
key = _("Anki 2.0 Deck")
ext = ".anki2"
def __init__(self, col):
Exporter.__init__(self, col)
self.includeSched = False
self.includeMedia = True
def exportInto(self, path):
# create a new collection at the target
try:
os.unlink(path)
except (IOError, OSError):
pass
self.dst = Collection(path)
self.src = self.col
# find cards
if not self.did:
cids = self.src.db.list("select id from cards")
else:
cids = self.src.decks.cids(self.did, children=True)
# copy cards, noting used nids
nids = {}
data = []
for row in self.src.db.execute(
"select * from cards where id in " + ids2str(cids)):
nids[row[1]] = True
data.append(row)
self.dst.db.executemany(
"insert into cards values (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)",
data)
# notes
strnids = ids2str(list(nids.keys()))
notedata = []
for row in self.src.db.all(
"select * from notes where id in "+strnids):
# remove system tags if not exporting scheduling info
if not self.includeSched:
row = list(row)
row[5] = self.removeSystemTags(row[5])
notedata.append(row)
self.dst.db.executemany(
"insert into notes values (?,?,?,?,?,?,?,?,?,?,?)",
notedata)
# models used by the notes
mids = self.dst.db.list(
"select distinct mid from notes where id in " + strnids)
# card history and revlog
if self.includeSched:
data = self.src.db.all(
"select * from revlog where cid in " + ids2str(cids))
self.dst.db.executemany(
"insert into revlog values (?,?,?,?,?,?,?,?,?)",
data)
else:
# need to reset card state
self.dst.sched.resetCards(cids)
# models - start with zero
self.dst.models.models = {}
for m in self.src.models.all():
if int(m['id']) in mids:
self.dst.models.update(m)
# decks
if not self.did:
dids = []
else:
dids = [self.did] + [
x[1] for x in self.src.decks.children(self.did)]
dconfs = {}
for d in self.src.decks.all():
if str(d['id']) == "1":
continue
if dids and d['id'] not in dids:
continue
if not d['dyn'] and d['conf'] != 1:
if self.includeSched:
dconfs[d['conf']] = True
if not self.includeSched:
# scheduling not included, so reset deck settings to default
d = dict(d)
d['conf'] = 1
self.dst.decks.update(d)
# copy used deck confs
for dc in self.src.decks.allConf():
if dc['id'] in dconfs:
self.dst.decks.updateConf(dc)
# find used media
media = {}
self.mediaDir = self.src.media.dir()
if self.includeMedia:
for row in notedata:
flds = row[6]
mid = row[2]
for file in self.src.media.filesInStr(mid, flds):
media[file] = True
if self.mediaDir:
for fname in os.listdir(self.mediaDir):
if fname.startswith("_"):
media[fname] = True
self.mediaFiles = list(media.keys())
self.dst.crt = self.src.crt
# todo: tags?
self.count = self.dst.cardCount()
self.dst.setMod()
self.postExport()
self.dst.close()
def postExport(self):
# overwrite to apply customizations to the deck before it's closed,
# such as update the deck description
pass
def removeSystemTags(self, tags):
return self.src.tags.remFromStr("marked leech", tags)
class AnkiPackageExporter(AnkiExporter):
key = _("Anki Deck Package")
ext = ".apkg"
def __init__(self, col):
AnkiExporter.__init__(self, col)
def exportInto(self, path):
# open a zip file
z = zipfile.ZipFile(path, "w", zipfile.ZIP_DEFLATED)
# if all decks and scheduling included, full export
if self.includeSched and not self.did:
media = self.exportVerbatim(z)
else:
# otherwise, filter
media = self.exportFiltered(z, path)
# media map
z.writestr("media", json.dumps(media))
z.close()
def exportFiltered(self, z, path):
# export into the anki2 file
colfile = path.replace(".apkg", ".anki2")
AnkiExporter.exportInto(self, colfile)
z.write(colfile, "collection.anki2")
# and media
self.prepareMedia()
media = {}
for c, file in enumerate(self.mediaFiles):
c = str(c)
mpath = os.path.join(self.mediaDir, file)
if os.path.exists(mpath):
z.write(mpath, c)
media[c] = file
# tidy up intermediate files
os.unlink(colfile)
p = path.replace(".apkg", ".media.db2")
if os.path.exists(p):
os.unlink(p)
os.chdir(self.mediaDir)
shutil.rmtree(path.replace(".apkg", ".media"))
return media
def exportVerbatim(self, z):
# close our deck & write it into the zip file, and reopen
self.count = self.col.cardCount()
self.col.close()
z.write(self.col.path, "collection.anki2")
self.col.reopen()
# copy all media
if not self.includeMedia:
return {}
media = {}
mdir = self.col.media.dir()
for c, file in enumerate(os.listdir(mdir)):
c = str(c)
mpath = os.path.join(mdir, file)
if os.path.exists(mpath):
z.write(mpath, c)
media[c] = file
return media
def prepareMedia(self):
# chance to move each file in self.mediaFiles into place before media
# is zipped up
pass
def exporters():
def id(obj):
return ("%s (*%s)" % (obj.key, obj.ext), obj)
exps = [
id(AnkiPackageExporter),
id(TextNoteExporter),
id(TextCardExporter),
]
runHook("exportersList", exps)
return exps
|
from . import res_partner
|
{
'name': "MIS Builder Cost Center Filter",
'version': '8.0.1.0.0',
'category': 'Reporting',
'summary': """
Add Cost Center filters to MIS Reports
""",
'author':
'ICTSTUDIO,'
'ACSONE SA/NV,'
'Odoo Community Association (OCA)',
'website': "http://www.ictstudio.eu",
'license': 'AGPL-3',
'depends': [
'mis_builder',
'account_cost_center'
],
'data': [
'views/mis_report_view.xml',
'views/mis_builder_cost_center.xml',
],
'qweb': [
'static/src/xml/mis_widget.xml'
],
}
|
__license__ = "GNU Affero General Public License, Ver.3"
__author__ = "Pablo Alvarez de Sotomayor Posadillo"
import os
import os.path
import subprocess
import httplib
from datetime import datetime
from django import forms
from django.http import HttpResponse, HttpResponseRedirect
from django.template import RequestContext
from django.template.loader import render_to_string
from kirinki.config import Config
from kirinki.common import ErrorClear
from kirinki.mainviewer import MainViewer
from kirinki.models import streaming
from kirinki.models import video
from kirinki.message import Message
from kirinki.user import LoginForm
class StreamingController():
'''Class that implements the Streaming controller'''
def __init__(self, request):
if request.session.get('isConfig', False) is False:
Config.getSession(request.session)
# Left block
leftBlocks = []
if not request.session['user'].is_authenticated():
leftBlocks = [render_to_string('kirinki/section.html', {'title' : 'login', 'content': render_to_string('kirinki/form.html', {'form' : LoginForm(), 'action' : request.session['base_url'] + '/login'}, context_instance=RequestContext(request))})]
# Center block
centerBlocks = []
try:
videoStr = streaming.objects.all()
for video in videoStr:
centerBlocks = [render_to_string('kirinki/section.html', {'title' : 'login', 'content': str(video.idStreaming)})]
except streaming.DoesNotExist:
pass
self.render = MainViewer(request).render(leftBlocks, centerBlocks, [])
def getRender(self):
'''This method return the html rendered'''
return self.render
class StrForm(forms.Form):
isVideo = forms.BooleanField(label='Emitir Video',
required=False)
srcIP = forms.IPAddressField(label='Ip de origen',
required=False)
srcPort = forms.IntegerField(label='Puerto de origen',
required=False)
srcMux = forms.ChoiceField(label='Multiplexor de origen',
choices=[('ogg', 'ogg'), ('ffmpeg{mux=flv}', 'mp4'), ('webm', 'webm')],
required=False)
vStream = forms.ChoiceField(label='Video a emitir',
choices=[],
required=True)
class StreamController():
'''Class to implement the Stream controller'''
def __init__(self, request):
if request.session.get('isConfig', False) is False:
Config.getSession(request.session)
if request.method == 'GET':
# GET request
form = StrForm(error_class=ErrorClear)
form.fields['isVideo'].initial = False
form.fields['srcIP'].initial = request.META['REMOTE_ADDR']
form.fields['srcPort'].initial = 9000
form.fields['vStream'].choices = self.userVideos(request)
self.render = MainViewer(request).render([], [render_to_string('kirinki/form.html', {'form' : form, 'action' : request.session['base_url'] + '/stream', 'id' : 'stream'}, context_instance=RequestContext(request))], [])
elif request.method == 'POST':
# POST request
form = StrForm(request.POST, error_class=ErrorClear)
form.fields['isVideo'].initial = False
form.fields['srcIP'].initial = request.META['REMOTE_ADDR']
form.fields['srcPort'].initial = 9000
form.fields['vStream'].choices = self.userVideos(request)
# Check if the form data is valid and try to start the streaming
if form.is_valid():
try:
v = video.objects.filter(idVideo=form.cleaned_data['vStream'])[0]
except video.DoesNotExist:
v = None
if form.cleaned_data['isVideo'] is True and v is not None:
clvc = None
if v.format == 'video/mp4':
cvlc = subprocess.Popen(["/usr/bin/cvlc " + v.path + " --sout '#http{mux=ffmpeg{mux=flv},dst=" + request.session['strIP'] + ":" + request.session['strPort'] + "/} -no-sout-rtp-sap -no-sout-standard-sap -sout-keep' --ttl 12"],
shell=True)
elif v.format == 'video/webm':
cvlc = subprocess.Popen(["/usr/bin/cvlc " + v.path + " --sout '#http{mux=webm,dst=" + request.session['strIP'] + ":" + request.session['strPort'] + "/} -no-sout-rtp-sap -no-sout-standard-sap -sout-keep' --ttl 12"],
shell=True)
elif v.format == 'video/ogg':
cvlc = subprocess.Popen(["/usr/bin/cvlc " + v.path + " --sout '#http{mux=ogg,dst=" + request.session['strIP'] + ":" + request.session['strPort'] + "/} -no-sout-rtp-sap -no-sout-standard-sap -sout-keep' --ttl 12"],
shell=True)
else:
Message.pushMessage(request, Message.ERROR,'Video type not supported')
if clvc is not None:
vStream = streaming(src=form.cleaned_data['srcIP'], port=form.cleaned_data['srcPort'], mux=form.cleaned_data['srcMux'], vMode=form.cleaned_data['isVideo'], pid=cvlc.pid,video=v, owner=request.session['user'])
vStream.save()
Message.pushMessage(request, Message.INFO,'Video streaming')
elif form.cleaned_data['isVideo'] is False:
if form.cleaned_data['srcMux'] != "ffmpeg{mux=flv}" and form.cleaned_data['srcMux'] != "webm" and form.cleaned_data['srcMux'] != "ogg":
Message.pushMessage(request, Message.ERROR,'Video type not supported')
else:
cvlc = subprocess.Popen(["/usr/bin/cvlc http://" + str(form.cleaned_data['srcIP']) + ":" + str(form.cleaned_data['srcPort']) + " --sout '#http{mux=" + str(form.cleaned_data['srcMux']) + ",dst=" + request.session['strIP'] + ":" + request.session['strPort'] + "/} -no-sout-rtp-sap -no-sout-standard-sap -sout-keep' --ttl 12"],
shell=True)
vStream = streaming(src=form.cleaned_data['srcIP'], port=form.cleaned_data['srcPort'], mux=form.cleaned_data['srcMux'], vMode=form.cleaned_data['isVideo'], pid=cvlc.pid,video=v, owner=request.session['user'])
vStream.save()
Message.pushMessage(request, Message.ERROR, 'External video streaming.')
else:
Message.pushMessage(request, Message.ERROR, 'If you select the video mode you must select a video.')
# os.waitpid(p.pid, 0)[1]
self.render = HttpResponseRedirect('/streaming')
else:
for error in form.errors:
Message.pushMessage(request, Message.ERROR, 'Error en ' + error + ': ' + str(form._errors[error]))
if request.META.get('HTTP_REFERER', False) is not False:
self.render = HttpResponseRedirect(request.META['HTTP_REFERER'])
else:
self.render = HttpResponseRedirect('/index')
else:
raise Http404
def userVideos(self, request):
'''This method return the videos owned by the actual user.'''
init = []
try:
videos = video.objects.filter(owner=request.session['user'])
for v in videos:
init.append((v.idVideo, v.name))
except video.DoesNotExist:
pass
return init
def getRender(self):
'''This method return the html rendered'''
return self.render
class VideoController():
'''Class to implement the Video controller'''
# Definition of the video actions
LIST = 0
VIEW = 1
DELETE = 2
REFERENCE = 3
def __init__(self, request, action=0, key=None):
if request.session.get('isConfig', False) is False:
Config.getSession(request.session)
# Blocks assigned to the left area
leftBlocks = []
if not request.session['user'].is_authenticated():
leftBlocks = [render_to_string('kirinki/section.html', {'title' : 'login', 'content': render_to_string('kirinki/form.html', {'form' : LoginForm(), 'action' : request.session['base_url'] + '/login'}, context_instance=RequestContext(request))})]
else:
try:
myVideos = video.objects.filter(owner = request.session['user'])
leftBlocks = [render_to_string('kirinki/section.html', {'title' : 'Mis vídeos', 'content' : render_to_string('kirinki/myVideo.html', {'videos' : myVideos, 'session' : request.session}).encode('utf-8')})]
except video.DoesNotExist:
pass
# Blocks assigned to the center area
centerBlocks = []
if action == self.LIST:
try:
videoList = video.objects.all()
centerBlocks = [render_to_string('kirinki/section.html', {'title' : 'Lista de videos', 'content': render_to_string('kirinki/videoList.html', {'videos' : videoList, 'session' : request.session}).encode('utf-8')})]
except video.DoesNotExist:
pass
elif action == self.VIEW:
if key is not None:
try:
v = video.objects.get(idVideo=key)
bfile = '/media/'+v.path[v.path.rfind('/')+1:v.path.rfind('.')]
src = {'orig' : request.session['base_url'] + '/media/'+v.path[v.path.rfind('/')+1:]}
if os.path.exists(v.path[:v.path.rfind('.')]+'.ogv'):
src['ogv'] = request.session['base_url'] +bfile+'.ogv'
if os.path.exists(v.path[:v.path.rfind('.')]+'.webm'):
src['webm'] = request.session['base_url'] +bfile+'.webm'
if os.path.exists(v.path[:v.path.rfind('.')]+'.mp4'):
src['mp4'] = request.session['base_url'] +bfile+'.mp4'
if os.path.exists(v.path[:v.path.rfind('.')]+'.flv'):
src['flv'] = request.session['base_url'] +bfile+'.flv'
src['flash'] = request.session['base_url']+'/static/flowplayer/flowplayer-3.2.5.swf'
src['flash_str'] = request.session['base_url']+'/static/flowplayer.pseudostreaming/flowplayer.pseudostreaming-3.2.5.swf'
centerBlocks = [render_to_string('kirinki/section.html', {'title' : v.name, 'content': render_to_string('kirinki/video.html', {'controls' : True, 'src' : src})})]
except video.DoesNotExist:
pass
elif action == self.DELETE:
try:
v = video.objects.get(idVideo=key, owner=request.session['user'])
name = v.name
os.remove(v.path)
v.delete()
centerBlocks = ['<p>Video ' + name + ' deleted.</p>']
except video.DoesNotExist:
pass
elif action == self.REFERENCE:
pass
else:
# Error. Action not defined
raise Http404
# Blocks assigned to the right area
# Ultimos subidos, ultimos usuarios que han subido, usuarios que mas han subido, ...
rightBlocks = []
self.render = MainViewer(request).render(leftBlocks, centerBlocks, rightBlocks)
def getRender(self):
'''This method returns the html generated'''
return self.render
class UploadForm(forms.Form):
title = forms.CharField(label='Título',
min_length=5,
max_length=80,
required=True)
description = forms.CharField(label='Descripción',
min_length=5,
max_length=250,
required=True)
fileUpload = forms.FileField(label='Fichero',
required=True)
convertMP4 = forms.BooleanField(label='Convertir a mp4',
required=False)
convertOGG = forms.BooleanField(label='Convertir a ogg',
required=False)
convertWEBM = forms.BooleanField(label='Convertir a webm',
required=False)
class UploadController():
'''Class to implement the Upload controller. This class will be merged with the VideoController'''
def __init__(self, request):
if request.session.get('isConfig', False) is False:
Config.getSession(request.session)
if request.method == 'GET':
# GET request
leftBlocks = [self.getMyVideos(request.session)]
centerBlocks = [self.getUploadVideo(request.session['base_url'], request)]
self.render = MainViewer(request).render(leftBlocks, centerBlocks, [])
elif request.method == 'POST':
# POST request.
form = UploadForm(request.POST, request.FILES, error_class=ErrorClear)
if form.is_valid():
upFile = request.FILES['fileUpload']
if upFile.size > 0:
path = ''
if request.session.get('upload_path', False):
path = request.session['upload_path']+'/'
path += upFile.name
destination = open(path, 'wb+')
for chunk in upFile.chunks():
destination.write(chunk)
destination.close()
v = video(name=form.cleaned_data['title'], description=form.cleaned_data['description'], path=path, format=upFile.content_type, pub_date=datetime.now(), owner=request.session['user'])
v.save()
if form.cleaned_data['convertMP4'] and path[v.path.rfind('.'):].lower() != 'mp4':
pass
if form.cleaned_data['convertOGG'] and path[v.path.rfind('.'):].lower() != 'ogg':
pass
if form.cleaned_data['convertWEBM'] and path[v.path.rfind('.'):].lower() != 'web':
pass
if path[v.path.rfind('.'):].lower() != 'flv':
pass
else:
for error in form.errors:
Message.pushMessage(request, Message.ERROR, 'Error en ' + error + ': ' + str(form._errors[error]))
if request.META.get('HTTP_REFERER', False) is not False:
self.render = HttpResponseRedirect(request.META['HTTP_REFERER'])
else:
self.render = HttpResponseRedirect('/index')
else:
raise Http404
def getMyVideos(self, session):
'''This method return the videos owned by the actual user.'''
content = ''
try:
myVideos = video.objects.filter(owner = session['user'])
content = render_to_string('kirinki/myVideo.html', {'videos' : myVideos, 'session' : session}).encode('utf-8')
except video.DoesNotExist:
pass
return render_to_string('kirinki/section.html', {'title' : 'Mis vídeos', 'content' : content})
def getUploadVideo(self, base_url, request):
content = render_to_string('kirinki/form.html', {'form' : UploadForm(request.POST, request.FILES, error_class=ErrorClear), 'action' : base_url + '/upload', 'upload' : True}, context_instance=RequestContext(request))
return render_to_string('kirinki/section.html', {'title' : 'Subir vídeo', 'content' : content})
def getRender(self):
'''This method returns the html generated'''
return self.render
|
from pts.modeling.core.environment import load_modeling_environment_cwd
from pts.modeling.config.component import definition
environment = load_modeling_environment_cwd()
runs = environment.fitting_runs
properties = ["representation", "filters", "ranges", "genetic", "grid", "units", "types"]
definition = definition.copy()
if runs.empty: raise RuntimeError("No fitting runs are present")
elif runs.has_single: definition.add_fixed("name", "name of the fitting run", runs.single_name)
else: definition.add_required("name", "string", "name of the fitting run", choices=runs.names)
definition.add_positional_optional("properties", "string_list", "properties to adapt", default=properties, choices=properties)
definition.add_optional("contains", "string", "only adapt properties containing this string in their name")
definition.add_optional("not_contains", "string", "don't adapt properties containing this string in their name")
definition.add_optional("exact_name", "string", "only adapt properties with this exact string as their name")
definition.add_optional("exact_not_name", "string", "don't adapt properties with this exact string as their name")
definition.add_optional("startswith", "string", "only adapt properties whose name starts with this string")
definition.add_optional("endswith", "string", "only adapt properties whose name starts with this string")
definition.add_flag("save", "save adapted properties", True)
|
import os
from importlib import import_module
from django.core.management.base import BaseCommand
from django.utils import translation
from django.conf import settings
def get_modules():
path = os.path.join(settings.BASE_DIR, 'utils', 'upgrade')
root, dirs, files = next(os.walk(path))
return files
class Command(BaseCommand):
"""
Upgrades Janeway
"""
help = "Upgrades an install from one version to another."
def add_arguments(self, parser):
"""Adds arguments to Django's management command-line parser.
:param parser: the parser to which the required arguments will be added
:return: None
"""
parser.add_argument('--path', required=False)
def handle(self, *args, **options):
if not options.get('path'):
print('No upgrade selected. Available upgrade paths: ')
for file in get_modules():
module_name = file.split('.')[0]
print('- {module_name}'.format(module_name=module_name))
print('To run an upgrade use the following: `python3 manage.py run_upgrade --script 12_13`')
else:
translation.activate('en')
upgrade_module_name = options.get('path')
upgrade_module_path = 'utils.upgrade.{module_name}'.format(module_name=upgrade_module_name)
try:
upgrade_module = import_module(upgrade_module_path)
upgrade_module.execute()
except ImportError as e:
print('There was an error running the requested upgrade: ')
print(e)
|
from .naive import StratNaive
import random
import numpy as np
class BetaDecreaseStrat(StratNaive):
def __init__(self, vu_cfg, time_scale=0.9, **strat_cfg2):
StratNaive.__init__(self,vu_cfg=vu_cfg, **strat_cfg2)
self.time_scale = time_scale
def update_speaker(self, ms, w, mh, voc, mem, bool_succ, context=[]):
self.voc_update.beta = max(0,self.voc_update.beta - 1./self.time_scale)
return self.voc_update.update_speaker(ms, w, mh, voc, mem, bool_succ, context)
def update_hearer(self, ms, w, mh, voc, mem, bool_succ, context=[]):
self.voc_update.beta = max(0,self.voc_update.beta - 1./self.time_scale)
return self.voc_update.update_hearer(ms, w, mh, voc, mem, bool_succ, context)
|
from django.db import connection
from django.conf import settings
from django.utils import timezone
from taiga.projects.history import services as history_service
from taiga.projects.history.choices import HistoryType
from . import tasks
def _get_project_webhooks(project):
webhooks = []
for webhook in project.webhooks.all():
webhooks.append({
"id": webhook.pk,
"url": webhook.url,
"key": webhook.key,
})
return webhooks
def on_new_history_entry(sender, instance, created, **kwargs):
if not settings.WEBHOOKS_ENABLED:
return None
if instance.is_hidden:
return None
model = history_service.get_model_from_key(instance.key)
pk = history_service.get_pk_from_key(instance.key)
try:
obj = model.objects.get(pk=pk)
except model.DoesNotExist:
# Catch simultaneous DELETE request
return None
webhooks = _get_project_webhooks(obj.project)
if instance.type == HistoryType.create:
task = tasks.create_webhook
extra_args = []
elif instance.type == HistoryType.change:
task = tasks.change_webhook
extra_args = [instance]
elif instance.type == HistoryType.delete:
task = tasks.delete_webhook
extra_args = []
by = instance.owner
date = timezone.now()
webhooks_args = []
for webhook in webhooks:
args = [webhook["id"], webhook["url"], webhook["key"], by, date, obj] + extra_args
webhooks_args.append(args)
connection.on_commit(lambda: _execute_task(task, webhooks_args))
def _execute_task(task, webhooks_args):
for webhook_args in webhooks_args:
if settings.CELERY_ENABLED:
task.delay(*webhook_args)
else:
task(*webhook_args)
|
'''
'''
import unittest, copy
from testRoot import RootClass
from noink.user_db import UserDB
from noink.entry_db import EntryDB
class AddEntry(RootClass):
def test_AddEntry(self):
userDB = UserDB()
entryDB = EntryDB()
u = userDB.add("jontest", "pass", "Jon Q. Testuser")
title = 'Little Buttercup'
entry = 'There once was a man from Nantucket,' + \
'who kept his wife in a Bucket.' + \
"Wait... how'd she fit in that bucket anyway?"
e = entryDB.add(copy.deepcopy(title), entry, u)
self.assertTrue(e.title == title)
if __name__ == '__main__':
unittest.main()
|
from unittest import TestCase
from micall.drivers.run_info import RunInfo
from micall.drivers.sample import Sample
from micall.drivers.sample_group import SampleGroup
class RunInfoTest(TestCase):
def test_get_all_samples(self):
expected_fastq_paths = ['1a_R1_001.fastq',
'1b_R1_001.fastq',
'2_R1_001.fastq']
run_info = RunInfo(
sample_groups=[SampleGroup(Sample(fastq1='1a_R1_001.fastq'),
Sample(fastq1='1b_R1_001.fastq')),
SampleGroup(Sample(fastq1='2_R1_001.fastq'))])
fastq_paths = [sample.fastq1 for sample in run_info.get_all_samples()]
self.assertEqual(expected_fastq_paths, fastq_paths)
|
import os
import arrow
import magic
import hashlib
import logging
import requests
from io import BytesIO
from PIL import Image
from flask import json
from .image import get_meta
from .video import get_meta as video_meta
import base64
from superdesk.errors import SuperdeskApiError
logger = logging.getLogger(__name__)
def hash_file(afile, hasher, blocksize=65536):
buf = afile.read(blocksize)
while len(buf) > 0:
hasher.update(buf)
buf = afile.read(blocksize)
return hasher.hexdigest()
def get_file_name(file):
return hash_file(file, hashlib.sha256())
def download_file_from_url(url):
rv = requests.get(url, timeout=15)
if rv.status_code not in (200, 201):
raise SuperdeskApiError.internalError('Failed to retrieve file from URL: %s' % url)
mime = magic.from_buffer(rv.content, mime=True).decode('UTF-8')
ext = mime.split('/')[1]
name = 'stub.' + ext
return BytesIO(rv.content), name, mime
def download_file_from_encoded_str(encoded_str):
content = encoded_str.split(';base64,')
mime = content[0].split(':')[1]
ext = content[0].split('/')[1]
name = 'web_capture.' + ext
content = base64.b64decode(content[1])
return BytesIO(content), name, mime
def process_file_from_stream(content, content_type=None):
content_type = content_type or content.content_type
content = BytesIO(content.read())
if 'application/' in content_type:
content_type = magic.from_buffer(content.getvalue(), mime=True).decode('UTF-8')
content.seek(0)
file_type, ext = content_type.split('/')
try:
metadata = process_file(content, file_type)
except OSError: # error from PIL when image is supposed to be an image but is not.
raise SuperdeskApiError.internalError('Failed to process file')
file_name = get_file_name(content)
content.seek(0)
metadata = encode_metadata(metadata)
metadata.update({'length': json.dumps(len(content.getvalue()))})
return file_name, content_type, metadata
def encode_metadata(metadata):
return dict((k.lower(), json.dumps(v)) for k, v in metadata.items())
def decode_metadata(metadata):
return dict((k.lower(), decode_val(v)) for k, v in metadata.items())
def decode_val(string_val):
"""Format dates that elastic will try to convert automatically."""
val = json.loads(string_val)
try:
arrow.get(val, 'YYYY-MM-DD') # test if it will get matched by elastic
return str(arrow.get(val))
except (Exception):
return val
def process_file(content, type):
if type == 'image':
return process_image(content, type)
if type in ('audio', 'video'):
return process_video(content, type)
return {}
def process_video(content, type):
content.seek(0)
meta = video_meta(content)
content.seek(0)
return meta
def process_image(content, type):
content.seek(0)
meta = get_meta(content)
content.seek(0)
return meta
def crop_image(content, file_name, cropping_data):
if cropping_data:
file_ext = os.path.splitext(file_name)[1][1:]
if file_ext in ('JPG', 'jpg'):
file_ext = 'jpeg'
logger.debug('Opened image from stream, going to crop it s')
content.seek(0)
img = Image.open(content)
cropped = img.crop(cropping_data)
logger.debug('Cropped image from stream, going to save it')
try:
out = BytesIO()
cropped.save(out, file_ext)
out.seek(0)
return (True, out)
except Exception as io:
logger.exception(io)
return (False, content)
|
"""
Copyright 2012 Paul Willworth <ioscode@gmail.com>
This file is part of Galaxy Harvester.
Galaxy Harvester is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Galaxy Harvester is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with Galaxy Harvester. If not, see <http://www.gnu.org/licenses/>.
"""
import os
import sys
import cgi
import Cookie
import dbSession
import dbShared
import MySQLdb
import ghShared
import ghLists
from jinja2 import Environment, FileSystemLoader
try:
url = os.environ['SCRIPT_NAME']
except KeyError:
url = ''
uiTheme = ''
form = cgi.FieldStorage()
useCookies = 1
cookies = Cookie.SimpleCookie()
try:
cookies.load(os.environ['HTTP_COOKIE'])
except KeyError:
useCookies = 0
if useCookies:
try:
currentUser = cookies['userID'].value
except KeyError:
currentUser = ''
try:
loginResult = cookies['loginAttempt'].value
except KeyError:
loginResult = 'success'
try:
sid = cookies['gh_sid'].value
except KeyError:
sid = form.getfirst('gh_sid', '')
try:
uiTheme = cookies['uiTheme'].value
except KeyError:
uiTheme = ''
else:
currentUser = ''
loginResult = form.getfirst('loginAttempt', '')
sid = form.getfirst('gh_sid', '')
logged_state = 0
linkappend = ''
disableStr = ''
sid = dbShared.dbInsertSafe(sid)
if loginResult == None:
loginResult = 'success'
sess = dbSession.getSession(sid, 2592000)
if (sess != ''):
logged_state = 1
currentUser = sess
if (uiTheme == ''):
uiTheme = dbShared.getUserAttr(currentUser, 'themeName')
if (useCookies == 0):
linkappend = 'gh_sid=' + sid
else:
disableStr = ' disabled="disabled"'
if (uiTheme == ''):
uiTheme = 'crafter'
pictureName = dbShared.getUserAttr(currentUser, 'pictureName')
print 'Content-type: text/html\n'
env = Environment(loader=FileSystemLoader('templates'))
env.globals['BASE_SCRIPT_URL'] = ghShared.BASE_SCRIPT_URL
template = env.get_template('waypointmaps.html')
print template.render(uiTheme=uiTheme, loggedin=logged_state, currentUser=currentUser, loginResult=loginResult, linkappend=linkappend, url=url, pictureName=pictureName, imgNum=ghShared.imgNum, galaxyList=ghLists.getGalaxyList(), planetList=ghLists.getPlanetList())
|
import os
import sys
import nose
from subprocess import CalledProcessError, check_output as run
from functools import partial
GJSLINT_COMMAND = 'gjslint'
GJSLINT_OPTIONS = ['--strict']
JS_BASE_FOLDER = os.path.join('skylines', 'public', 'js')
JS_FILES = [
'baro.js',
'fix-table.js',
'flight.js',
'general.js',
'map.js',
'phase-table.js',
'topbar.js',
'tracking.js',
'units.js',
]
def test_js_files():
for filename in JS_FILES:
f = partial(run_gjslint, filename)
f.description = 'gjslint {}'.format(filename)
yield f
def run_gjslint(filename):
path = os.path.join(JS_BASE_FOLDER, filename)
args = [GJSLINT_COMMAND]
args.extend(GJSLINT_OPTIONS)
args.append(path)
try:
run(args)
except CalledProcessError, e:
print e.output
raise AssertionError('gjslint has found errors.')
except OSError:
raise OSError('Failed to run gjslint. Please check that you have '
'installed it properly.')
if __name__ == "__main__":
sys.argv.append(__name__)
nose.run()
|
from . import sale_order
from . import purchase_order
|
"""
Learning Tools Interoperability (LTI) module.
Resources
---------
Theoretical background and detailed specifications of LTI can be found on:
http://www.imsglobal.org/LTI/v1p1p1/ltiIMGv1p1p1.html
This module is based on the version 1.1.1 of the LTI specifications by the
IMS Global authority. For authentication, it uses OAuth1.
When responding back to the LTI tool provider, we must issue a correct
response. Types of responses and their message payload is available at:
Table A1.2 Interpretation of the 'CodeMajor/severity' matrix.
http://www.imsglobal.org/gws/gwsv1p0/imsgws_wsdlBindv1p0.html
A resource to test the LTI protocol (PHP realization):
http://www.imsglobal.org/developers/LTI/test/v1p1/lms.php
We have also begun to add support for LTI 1.2/2.0. We will keep this
docstring in synch with what support is available. The first LTI 2.0
feature to be supported is the REST API results service, see specification
at
http://www.imsglobal.org/lti/ltiv2p0/uml/purl.imsglobal.org/vocab/lis/v2/outcomes/Result/service.html
What is supported:
------------------
1.) Display of simple LTI in iframe or a new window.
2.) Multiple LTI components on a single page.
3.) The use of multiple LTI providers per course.
4.) Use of advanced LTI component that provides back a grade.
A) LTI 1.1.1 XML endpoint
a.) The LTI provider sends back a grade to a specified URL.
b.) Currently only action "update" is supported. "Read", and "delete"
actions initially weren't required.
B) LTI 2.0 Result Service JSON REST endpoint
(http://www.imsglobal.org/lti/ltiv2p0/uml/purl.imsglobal.org/vocab/lis/v2/outcomes/Result/service.html)
a.) Discovery of all such LTI http endpoints for a course. External tools GET from this discovery
endpoint and receive URLs for interacting with individual grading units.
(see lms/djangoapps/courseware/views.py:get_course_lti_endpoints)
b.) GET, PUT and DELETE in LTI Result JSON binding
(http://www.imsglobal.org/lti/ltiv2p0/mediatype/application/vnd/ims/lis/v2/result+json/index.html)
for a provider to synchronize grades into edx-platform. Reading, Setting, and Deleteing
Numeric grades between 0 and 1 and text + basic HTML feedback comments are supported, via
GET / PUT / DELETE HTTP methods respectively
"""
import datetime
from django.utils.timezone import UTC
import logging
import oauthlib.oauth1
from oauthlib.oauth1.rfc5849 import signature
import hashlib
import base64
import urllib
import textwrap
import bleach
from lxml import etree
from webob import Response
import mock
from xml.sax.saxutils import escape
from xmodule.editing_module import MetadataOnlyEditingDescriptor
from xmodule.raw_module import EmptyDataRawDescriptor
from xmodule.x_module import XModule, module_attr
from xmodule.course_module import CourseDescriptor
from xmodule.lti_2_util import LTI20ModuleMixin, LTIError
from pkg_resources import resource_string
from xblock.core import String, Scope, List, XBlock
from xblock.fields import Boolean, Float
log = logging.getLogger(__name__)
_ = lambda text: text
DOCS_ANCHOR_TAG_OPEN = (
"<a target='_blank' "
"href='http://edx.readthedocs.org/projects/ca/en/latest/exercises_tools/lti_component.html'>"
)
class LTIFields(object):
"""
Fields to define and obtain LTI tool from provider are set here,
except credentials, which should be set in course settings::
`lti_id` is id to connect tool with credentials in course settings. It should not contain :: (double semicolon)
`launch_url` is launch URL of tool.
`custom_parameters` are additional parameters to navigate to proper book and book page.
For example, for Vitalsource provider, `launch_url` should be
*https://bc-staging.vitalsource.com/books/book*,
and to get to proper book and book page, you should set custom parameters as::
vbid=put_book_id_here
book_location=page/put_page_number_here
Default non-empty URL for `launch_url` is needed due to oauthlib demand (URL scheme should be presented)::
https://github.com/idan/oauthlib/blob/master/oauthlib/oauth1/rfc5849/signature.py#L136
"""
display_name = String(
display_name=_("Display Name"),
help=_(
"Enter the name that students see for this component. "
"Analytics reports may also use the display name to identify this component."
),
scope=Scope.settings,
default="LTI",
)
lti_id = String(
display_name=_("LTI ID"),
help=_(
"Enter the LTI ID for the external LTI provider. "
"This value must be the same LTI ID that you entered in the "
"LTI Passports setting on the Advanced Settings page."
"<br />See {docs_anchor_open}the edX LTI documentation{anchor_close} for more details on this setting."
).format(
docs_anchor_open=DOCS_ANCHOR_TAG_OPEN,
anchor_close="</a>"
),
default='',
scope=Scope.settings
)
launch_url = String(
display_name=_("LTI URL"),
help=_(
"Enter the URL of the external tool that this component launches. "
"This setting is only used when Hide External Tool is set to False."
"<br />See {docs_anchor_open}the edX LTI documentation{anchor_close} for more details on this setting."
).format(
docs_anchor_open=DOCS_ANCHOR_TAG_OPEN,
anchor_close="</a>"
),
default='http://www.example.com',
scope=Scope.settings)
custom_parameters = List(
display_name=_("Custom Parameters"),
help=_(
"Add the key/value pair for any custom parameters, such as the page your e-book should open to or "
"the background color for this component."
"<br />See {docs_anchor_open}the edX LTI documentation{anchor_close} for more details on this setting."
).format(
docs_anchor_open=DOCS_ANCHOR_TAG_OPEN,
anchor_close="</a>"
),
scope=Scope.settings)
open_in_a_new_page = Boolean(
display_name=_("Open in New Page"),
help=_(
"Select True if you want students to click a link that opens the LTI tool in a new window. "
"Select False if you want the LTI content to open in an IFrame in the current page. "
"This setting is only used when Hide External Tool is set to False. "
),
default=True,
scope=Scope.settings
)
has_score = Boolean(
display_name=_("Scored"),
help=_(
"Select True if this component will receive a numerical score from the external LTI system."
),
default=False,
scope=Scope.settings
)
weight = Float(
display_name=_("Weight"),
help=_(
"Enter the number of points possible for this component. "
"The default value is 1.0. "
"This setting is only used when Scored is set to True."
),
default=1.0,
scope=Scope.settings,
values={"min": 0},
)
module_score = Float(
help=_("The score kept in the xblock KVS -- duplicate of the published score in django DB"),
default=None,
scope=Scope.user_state
)
score_comment = String(
help=_("Comment as returned from grader, LTI2.0 spec"),
default="",
scope=Scope.user_state
)
hide_launch = Boolean(
display_name=_("Hide External Tool"),
help=_(
"Select True if you want to use this component as a placeholder for syncing with an external grading "
"system rather than launch an external tool. "
"This setting hides the Launch button and any IFrames for this component."
),
default=False,
scope=Scope.settings
)
# Users will be presented with a message indicating that their e-mail/username would be sent to a third
# party application. When "Open in New Page" is not selected, the tool automatically appears without any user action.
ask_to_send_username = Boolean(
display_name=_("Request user's username"),
# Translators: This is used to request the user's username for a third party service.
# Usernames can only be requested if "Open in New Page" is set to True.
help=_(
"Select True to request the user's username. You must also set Open in New Page to True to get the user's information."
),
default=False,
scope=Scope.settings
)
ask_to_send_email = Boolean(
display_name=_("Request user's email"),
# Translators: This is used to request the user's email for a third party service.
# Emails can only be requested if "Open in New Page" is set to True.
help=_(
"Select True to request the user's email address. You must also set Open in New Page to True to get the user's information."
),
default=False,
scope=Scope.settings
)
description = String(
display_name=_("LTI Application Information"),
help=_(
"Enter a description of the third party application. If requesting username and/or email, use this text box to inform users "
"why their username and/or email will be forwarded to a third party application."
),
default="",
scope=Scope.settings
)
button_text = String(
display_name=_("Button Text"),
help=_(
"Enter the text on the button used to launch the third party application."
),
default="",
scope=Scope.settings
)
accept_grades_past_due = Boolean(
display_name=_("Accept grades past deadline"),
help=_("Select True to allow third party systems to post grades past the deadline."),
default=True,
scope=Scope.settings
)
class LTIModule(LTIFields, LTI20ModuleMixin, XModule):
"""
Module provides LTI integration to course.
Except usual Xmodule structure it proceeds with OAuth signing.
How it works::
1. Get credentials from course settings.
2. There is minimal set of parameters need to be signed (presented for Vitalsource)::
user_id
oauth_callback
lis_outcome_service_url
lis_result_sourcedid
launch_presentation_return_url
lti_message_type
lti_version
roles
*+ all custom parameters*
These parameters should be encoded and signed by *OAuth1* together with
`launch_url` and *POST* request type.
3. Signing proceeds with client key/secret pair obtained from course settings.
That pair should be obtained from LTI provider and set into course settings by course author.
After that signature and other OAuth data are generated.
OAuth data which is generated after signing is usual::
oauth_callback
oauth_nonce
oauth_consumer_key
oauth_signature_method
oauth_timestamp
oauth_version
4. All that data is passed to form and sent to LTI provider server by browser via
autosubmit via JavaScript.
Form example::
<form
action="${launch_url}"
name="ltiLaunchForm-${element_id}"
class="ltiLaunchForm"
method="post"
target="ltiLaunchFrame-${element_id}"
encType="application/x-www-form-urlencoded"
>
<input name="launch_presentation_return_url" value="" />
<input name="lis_outcome_service_url" value="" />
<input name="lis_result_sourcedid" value="" />
<input name="lti_message_type" value="basic-lti-launch-request" />
<input name="lti_version" value="LTI-1p0" />
<input name="oauth_callback" value="about:blank" />
<input name="oauth_consumer_key" value="${oauth_consumer_key}" />
<input name="oauth_nonce" value="${oauth_nonce}" />
<input name="oauth_signature_method" value="HMAC-SHA1" />
<input name="oauth_timestamp" value="${oauth_timestamp}" />
<input name="oauth_version" value="1.0" />
<input name="user_id" value="${user_id}" />
<input name="role" value="student" />
<input name="oauth_signature" value="${oauth_signature}" />
<input name="custom_1" value="${custom_param_1_value}" />
<input name="custom_2" value="${custom_param_2_value}" />
<input name="custom_..." value="${custom_param_..._value}" />
<input type="submit" value="Press to Launch" />
</form>
5. LTI provider has same secret key and it signs data string via *OAuth1* and compares signatures.
If signatures are correct, LTI provider redirects iframe source to LTI tool web page,
and LTI tool is rendered to iframe inside course.
Otherwise error message from LTI provider is generated.
"""
js = {
'js': [
resource_string(__name__, 'js/src/lti/lti.js')
]
}
css = {'scss': [resource_string(__name__, 'css/lti/lti.scss')]}
js_module_name = "LTI"
def get_input_fields(self):
# LTI provides a list of default parameters that might be passed as
# part of the POST data. These parameters should not be prefixed.
# Likewise, The creator of an LTI link can add custom key/value parameters
# to a launch which are to be included with the launch of the LTI link.
# In this case, we will automatically add `custom_` prefix before this parameters.
# See http://www.imsglobal.org/LTI/v1p1p1/ltiIMGv1p1p1.html#_Toc316828520
PARAMETERS = [
"lti_message_type",
"lti_version",
"resource_link_title",
"resource_link_description",
"user_image",
"lis_person_name_given",
"lis_person_name_family",
"lis_person_name_full",
"lis_person_contact_email_primary",
"lis_person_sourcedid",
"role_scope_mentor",
"context_type",
"context_title",
"context_label",
"launch_presentation_locale",
"launch_presentation_document_target",
"launch_presentation_css_url",
"launch_presentation_width",
"launch_presentation_height",
"launch_presentation_return_url",
"tool_consumer_info_product_family_code",
"tool_consumer_info_version",
"tool_consumer_instance_guid",
"tool_consumer_instance_name",
"tool_consumer_instance_description",
"tool_consumer_instance_url",
"tool_consumer_instance_contact_email",
]
client_key, client_secret = self.get_client_key_secret()
# parsing custom parameters to dict
custom_parameters = {}
for custom_parameter in self.custom_parameters:
try:
param_name, param_value = [p.strip() for p in custom_parameter.split('=', 1)]
except ValueError:
_ = self.runtime.service(self, "i18n").ugettext
msg = _('Could not parse custom parameter: {custom_parameter}. Should be "x=y" string.').format(
custom_parameter="{0!r}".format(custom_parameter)
)
raise LTIError(msg)
# LTI specs: 'custom_' should be prepended before each custom parameter, as pointed in link above.
if param_name not in PARAMETERS:
param_name = 'custom_' + param_name
custom_parameters[unicode(param_name)] = unicode(param_value)
return self.oauth_params(
custom_parameters,
client_key,
client_secret,
)
def get_context(self):
"""
Returns a context.
"""
# use bleach defaults. see https://github.com/jsocol/bleach/blob/master/bleach/__init__.py
# ALLOWED_TAGS are
# ['a', 'abbr', 'acronym', 'b', 'blockquote', 'code', 'em', 'i', 'li', 'ol', 'strong', 'ul']
#
# ALLOWED_ATTRIBUTES are
# 'a': ['href', 'title'],
# 'abbr': ['title'],
# 'acronym': ['title'],
#
# This lets all plaintext through.
sanitized_comment = bleach.clean(self.score_comment)
return {
'input_fields': self.get_input_fields(),
# These parameters do not participate in OAuth signing.
'launch_url': self.launch_url.strip(),
'element_id': self.location.html_id(),
'element_class': self.category,
'open_in_a_new_page': self.open_in_a_new_page,
'display_name': self.display_name,
'form_url': self.runtime.handler_url(self, 'preview_handler').rstrip('/?'),
'hide_launch': self.hide_launch,
'has_score': self.has_score,
'weight': self.weight,
'module_score': self.module_score,
'comment': sanitized_comment,
'description': self.description,
'ask_to_send_username': self.ask_to_send_username,
'ask_to_send_email': self.ask_to_send_email,
'button_text': self.button_text,
'accept_grades_past_due': self.accept_grades_past_due,
}
def get_html(self):
"""
Renders parameters to template.
"""
return self.system.render_template('lti.html', self.get_context())
@XBlock.handler
def preview_handler(self, _, __):
"""
This is called to get context with new oauth params to iframe.
"""
template = self.system.render_template('lti_form.html', self.get_context())
return Response(template, content_type='text/html')
def get_user_id(self):
user_id = self.runtime.anonymous_student_id
assert user_id is not None
return unicode(urllib.quote(user_id))
def get_outcome_service_url(self, service_name="grade_handler"):
"""
Return URL for storing grades.
To test LTI on sandbox we must use http scheme.
While testing locally and on Jenkins, mock_lti_server use http.referer
to obtain scheme, so it is ok to have http(s) anyway.
The scheme logic is handled in lms/lib/xblock/runtime.py
"""
return self.runtime.handler_url(self, service_name, thirdparty=True).rstrip('/?')
def get_resource_link_id(self):
"""
This is an opaque unique identifier that the TC guarantees will be unique
within the TC for every placement of the link.
If the tool / activity is placed multiple times in the same context,
each of those placements will be distinct.
This value will also change if the item is exported from one system or
context and imported into another system or context.
This parameter is required.
Example: u'edx.org-i4x-2-3-lti-31de800015cf4afb973356dbe81496df'
Hostname, edx.org,
makes resource_link_id change on import to another system.
Last part of location, location.name - 31de800015cf4afb973356dbe81496df,
is random hash, updated by course_id,
this makes resource_link_id unique inside single course.
First part of location is tag-org-course-category, i4x-2-3-lti.
Location.name itself does not change on import to another course,
but org and course_id change.
So together with org and course_id in a form of
i4x-2-3-lti-31de800015cf4afb973356dbe81496df this part of resource_link_id:
makes resource_link_id to be unique among courses inside same system.
"""
return unicode(urllib.quote("{}-{}".format(self.system.hostname, self.location.html_id())))
def get_lis_result_sourcedid(self):
"""
This field contains an identifier that indicates the LIS Result Identifier (if any)
associated with this launch. This field identifies a unique row and column within the
TC gradebook. This field is unique for every combination of context_id / resource_link_id / user_id.
This value may change for a particular resource_link_id / user_id from one launch to the next.
The TP should only retain the most recent value for this field for a particular resource_link_id / user_id.
This field is generally optional, but is required for grading.
"""
return "{context}:{resource_link}:{user_id}".format(
context=urllib.quote(self.context_id),
resource_link=self.get_resource_link_id(),
user_id=self.get_user_id()
)
def get_course(self):
"""
Return course by course id.
"""
return self.descriptor.runtime.modulestore.get_course(self.course_id)
@property
def context_id(self):
"""
Return context_id.
context_id is an opaque identifier that uniquely identifies the context (e.g., a course)
that contains the link being launched.
"""
return self.course_id.to_deprecated_string()
@property
def role(self):
"""
Get system user role and convert it to LTI role.
"""
roles = {
'student': u'Student',
'staff': u'Administrator',
'instructor': u'Instructor',
}
return roles.get(self.system.get_user_role(), u'Student')
def oauth_params(self, custom_parameters, client_key, client_secret):
"""
Signs request and returns signature and OAuth parameters.
`custom_paramters` is dict of parsed `custom_parameter` field
`client_key` and `client_secret` are LTI tool credentials.
Also *anonymous student id* is passed to template and therefore to LTI provider.
"""
client = oauthlib.oauth1.Client(
client_key=unicode(client_key),
client_secret=unicode(client_secret)
)
# Must have parameters for correct signing from LTI:
body = {
u'user_id': self.get_user_id(),
u'oauth_callback': u'about:blank',
u'launch_presentation_return_url': '',
u'lti_message_type': u'basic-lti-launch-request',
u'lti_version': 'LTI-1p0',
u'roles': self.role,
# Parameters required for grading:
u'resource_link_id': self.get_resource_link_id(),
u'lis_result_sourcedid': self.get_lis_result_sourcedid(),
u'context_id': self.context_id,
}
if self.has_score:
body.update({
u'lis_outcome_service_url': self.get_outcome_service_url()
})
self.user_email = ""
self.user_username = ""
# Username and email can't be sent in studio mode, because the user object is not defined.
# To test functionality test in LMS
if callable(self.runtime.get_real_user):
real_user_object = self.runtime.get_real_user(self.runtime.anonymous_student_id)
try:
self.user_email = real_user_object.email
except AttributeError:
self.user_email = ""
try:
self.user_username = real_user_object.username
except AttributeError:
self.user_username = ""
if self.open_in_a_new_page:
if self.ask_to_send_username and self.user_username:
body["lis_person_sourcedid"] = self.user_username
if self.ask_to_send_email and self.user_email:
body["lis_person_contact_email_primary"] = self.user_email
# Appending custom parameter for signing.
body.update(custom_parameters)
headers = {
# This is needed for body encoding:
'Content-Type': 'application/x-www-form-urlencoded',
}
try:
__, headers, __ = client.sign(
unicode(self.launch_url.strip()),
http_method=u'POST',
body=body,
headers=headers)
except ValueError: # Scheme not in url.
# https://github.com/idan/oauthlib/blob/master/oauthlib/oauth1/rfc5849/signature.py#L136
# Stubbing headers for now:
headers = {
u'Content-Type': u'application/x-www-form-urlencoded',
u'Authorization': u'OAuth oauth_nonce="80966668944732164491378916897", \
oauth_timestamp="1378916897", oauth_version="1.0", oauth_signature_method="HMAC-SHA1", \
oauth_consumer_key="", oauth_signature="frVp4JuvT1mVXlxktiAUjQ7%2F1cw%3D"'}
params = headers['Authorization']
# Parse headers to pass to template as part of context:
params = dict([param.strip().replace('"', '').split('=') for param in params.split(',')])
params[u'oauth_nonce'] = params[u'OAuth oauth_nonce']
del params[u'OAuth oauth_nonce']
# oauthlib encodes signature with
# 'Content-Type': 'application/x-www-form-urlencoded'
# so '='' becomes '%3D'.
# We send form via browser, so browser will encode it again,
# So we need to decode signature back:
params[u'oauth_signature'] = urllib.unquote(params[u'oauth_signature']).decode('utf8')
# Add LTI parameters to OAuth parameters for sending in form.
params.update(body)
return params
def max_score(self):
return self.weight if self.has_score else None
@XBlock.handler
def grade_handler(self, request, suffix): # pylint: disable=unused-argument
"""
This is called by courseware.module_render, to handle an AJAX call.
Used only for grading. Returns XML response.
Example of request body from LTI provider::
<?xml version = "1.0" encoding = "UTF-8"?>
<imsx_POXEnvelopeRequest xmlns = "some_link (may be not required)">
<imsx_POXHeader>
<imsx_POXRequestHeaderInfo>
<imsx_version>V1.0</imsx_version>
<imsx_messageIdentifier>528243ba5241b</imsx_messageIdentifier>
</imsx_POXRequestHeaderInfo>
</imsx_POXHeader>
<imsx_POXBody>
<replaceResultRequest>
<resultRecord>
<sourcedGUID>
<sourcedId>feb-123-456-2929::28883</sourcedId>
</sourcedGUID>
<result>
<resultScore>
<language>en-us</language>
<textString>0.4</textString>
</resultScore>
</result>
</resultRecord>
</replaceResultRequest>
</imsx_POXBody>
</imsx_POXEnvelopeRequest>
Example of correct/incorrect answer XML body:: see response_xml_template.
"""
response_xml_template = textwrap.dedent("""\
<?xml version="1.0" encoding="UTF-8"?>
<imsx_POXEnvelopeResponse xmlns = "http://www.imsglobal.org/services/ltiv1p1/xsd/imsoms_v1p0">
<imsx_POXHeader>
<imsx_POXResponseHeaderInfo>
<imsx_version>V1.0</imsx_version>
<imsx_messageIdentifier>{imsx_messageIdentifier}</imsx_messageIdentifier>
<imsx_statusInfo>
<imsx_codeMajor>{imsx_codeMajor}</imsx_codeMajor>
<imsx_severity>status</imsx_severity>
<imsx_description>{imsx_description}</imsx_description>
<imsx_messageRefIdentifier>
</imsx_messageRefIdentifier>
</imsx_statusInfo>
</imsx_POXResponseHeaderInfo>
</imsx_POXHeader>
<imsx_POXBody>{response}</imsx_POXBody>
</imsx_POXEnvelopeResponse>
""")
# Returns when `action` is unsupported.
# Supported actions:
# - replaceResultRequest.
unsupported_values = {
'imsx_codeMajor': 'unsupported',
'imsx_description': 'Target does not support the requested operation.',
'imsx_messageIdentifier': 'unknown',
'response': ''
}
# Returns if:
# - past due grades are not accepted and grade is past due
# - score is out of range
# - can't parse response from TP;
# - can't verify OAuth signing or OAuth signing is incorrect.
failure_values = {
'imsx_codeMajor': 'failure',
'imsx_description': 'The request has failed.',
'imsx_messageIdentifier': 'unknown',
'response': ''
}
if not self.accept_grades_past_due and self.is_past_due():
failure_values['imsx_description'] = "Grade is past due"
return Response(response_xml_template.format(**failure_values), content_type="application/xml")
try:
imsx_messageIdentifier, sourcedId, score, action = self.parse_grade_xml_body(request.body)
except Exception as e:
error_message = "Request body XML parsing error: " + escape(e.message)
log.debug("[LTI]: " + error_message)
failure_values['imsx_description'] = error_message
return Response(response_xml_template.format(**failure_values), content_type="application/xml")
# Verify OAuth signing.
try:
self.verify_oauth_body_sign(request)
except (ValueError, LTIError) as e:
failure_values['imsx_messageIdentifier'] = escape(imsx_messageIdentifier)
error_message = "OAuth verification error: " + escape(e.message)
failure_values['imsx_description'] = error_message
log.debug("[LTI]: " + error_message)
return Response(response_xml_template.format(**failure_values), content_type="application/xml")
real_user = self.system.get_real_user(urllib.unquote(sourcedId.split(':')[-1]))
if not real_user: # that means we can't save to database, as we do not have real user id.
failure_values['imsx_messageIdentifier'] = escape(imsx_messageIdentifier)
failure_values['imsx_description'] = "User not found."
return Response(response_xml_template.format(**failure_values), content_type="application/xml")
if action == 'replaceResultRequest':
self.set_user_module_score(real_user, score, self.max_score())
values = {
'imsx_codeMajor': 'success',
'imsx_description': 'Score for {sourced_id} is now {score}'.format(sourced_id=sourcedId, score=score),
'imsx_messageIdentifier': escape(imsx_messageIdentifier),
'response': '<replaceResultResponse/>'
}
log.debug("[LTI]: Grade is saved.")
return Response(response_xml_template.format(**values), content_type="application/xml")
unsupported_values['imsx_messageIdentifier'] = escape(imsx_messageIdentifier)
log.debug("[LTI]: Incorrect action.")
return Response(response_xml_template.format(**unsupported_values), content_type='application/xml')
@classmethod
def parse_grade_xml_body(cls, body):
"""
Parses XML from request.body and returns parsed data
XML body should contain nsmap with namespace, that is specified in LTI specs.
Returns tuple: imsx_messageIdentifier, sourcedId, score, action
Raises Exception if can't parse.
"""
lti_spec_namespace = "http://www.imsglobal.org/services/ltiv1p1/xsd/imsoms_v1p0"
namespaces = {'def': lti_spec_namespace}
data = body.strip().encode('utf-8')
parser = etree.XMLParser(ns_clean=True, recover=True, encoding='utf-8')
root = etree.fromstring(data, parser=parser)
imsx_messageIdentifier = root.xpath("//def:imsx_messageIdentifier", namespaces=namespaces)[0].text or ''
sourcedId = root.xpath("//def:sourcedId", namespaces=namespaces)[0].text
score = root.xpath("//def:textString", namespaces=namespaces)[0].text
action = root.xpath("//def:imsx_POXBody", namespaces=namespaces)[0].getchildren()[0].tag.replace('{' + lti_spec_namespace + '}', '')
# Raise exception if score is not float or not in range 0.0-1.0 regarding spec.
score = float(score)
if not 0 <= score <= 1:
raise LTIError('score value outside the permitted range of 0-1.')
return imsx_messageIdentifier, sourcedId, score, action
def verify_oauth_body_sign(self, request, content_type='application/x-www-form-urlencoded'):
"""
Verify grade request from LTI provider using OAuth body signing.
Uses http://oauth.googlecode.com/svn/spec/ext/body_hash/1.0/oauth-bodyhash.html::
This specification extends the OAuth signature to include integrity checks on HTTP request bodies
with content types other than application/x-www-form-urlencoded.
Arguments:
request: DjangoWebobRequest.
Raises:
LTIError if request is incorrect.
"""
client_key, client_secret = self.get_client_key_secret()
headers = {
'Authorization': unicode(request.headers.get('Authorization')),
'Content-Type': content_type,
}
sha1 = hashlib.sha1()
sha1.update(request.body)
oauth_body_hash = base64.b64encode(sha1.digest())
oauth_params = signature.collect_parameters(headers=headers, exclude_oauth_signature=False)
oauth_headers = dict(oauth_params)
oauth_signature = oauth_headers.pop('oauth_signature')
mock_request_lti_1 = mock.Mock(
uri=unicode(urllib.unquote(self.get_outcome_service_url())),
http_method=unicode(request.method),
params=oauth_headers.items(),
signature=oauth_signature
)
mock_request_lti_2 = mock.Mock(
uri=unicode(urllib.unquote(request.url)),
http_method=unicode(request.method),
params=oauth_headers.items(),
signature=oauth_signature
)
if oauth_body_hash != oauth_headers.get('oauth_body_hash'):
log.error(
"OAuth body hash verification failed, provided: {}, "
"calculated: {}, for url: {}, body is: {}".format(
oauth_headers.get('oauth_body_hash'),
oauth_body_hash,
self.get_outcome_service_url(),
request.body
)
)
raise LTIError("OAuth body hash verification is failed.")
if (not signature.verify_hmac_sha1(mock_request_lti_1, client_secret) and not
signature.verify_hmac_sha1(mock_request_lti_2, client_secret)):
log.error("OAuth signature verification failed, for "
"headers:{} url:{} method:{}".format(
oauth_headers,
self.get_outcome_service_url(),
unicode(request.method)
))
raise LTIError("OAuth signature verification has failed.")
def get_client_key_secret(self):
"""
Obtains client_key and client_secret credentials from current course.
"""
course = self.get_course()
for lti_passport in course.lti_passports:
try:
lti_id, key, secret = [i.strip() for i in lti_passport.split(':')]
except ValueError:
_ = self.runtime.service(self, "i18n").ugettext
msg = _('Could not parse LTI passport: {lti_passport}. Should be "id:key:secret" string.').format(
lti_passport='{0!r}'.format(lti_passport)
)
raise LTIError(msg)
if lti_id == self.lti_id.strip():
return key, secret
return '', ''
def is_past_due(self):
"""
Is it now past this problem's due date, including grace period?
"""
due_date = self.due # pylint: disable=no-member
if self.graceperiod is not None and due_date: # pylint: disable=no-member
close_date = due_date + self.graceperiod # pylint: disable=no-member
else:
close_date = due_date
return close_date is not None and datetime.datetime.now(UTC()) > close_date
class LTIDescriptor(LTIFields, MetadataOnlyEditingDescriptor, EmptyDataRawDescriptor):
"""
Descriptor for LTI Xmodule.
"""
module_class = LTIModule
grade_handler = module_attr('grade_handler')
preview_handler = module_attr('preview_handler')
lti_2_0_result_rest_handler = module_attr('lti_2_0_result_rest_handler')
clear_user_module_score = module_attr('clear_user_module_score')
get_outcome_service_url = module_attr('get_outcome_service_url')
|
from datetime import datetime, date
import pytest
from pytz import UTC
from uber.config import c
from uber.models import Attendee, Session
from uber.site_sections import summary
@pytest.fixture
def birthdays():
dates = [
date(1964, 12, 30),
date(1964, 12, 31),
date(1964, 1, 1),
date(1964, 1, 2),
date(1964, 1, 9),
date(1964, 1, 10),
date(1964, 1, 11),
date(1964, 1, 12),
date(1964, 1, 30),
date(1964, 1, 31),
date(1964, 2, 1),
date(1964, 2, 2),
date(1964, 2, 27),
date(1964, 2, 28),
date(1964, 2, 29),
date(1964, 3, 1),
date(1964, 3, 2)]
attendees = []
for d in dates:
attendees.append(Attendee(
placeholder=True,
first_name='Born on',
last_name=d.strftime('%B %-d, %Y'),
ribbon=c.VOLUNTEER_RIBBON,
staffing=True,
birthdate=d))
ids = []
with Session() as session:
session.bulk_insert(attendees)
ids = [a.id for a in attendees]
yield ids
with Session() as session:
session.query(Attendee).filter(Attendee.id.in_(ids)).delete(
synchronize_session=False)
class TestBirthdayCalendar(object):
@pytest.mark.parametrize('year', [None, 2027, 2028])
def test_attendee_birthday_calendar(
self,
admin_attendee,
year,
birthdays,
monkeypatch):
if year:
assert str(year)
response = summary.Root().attendee_birthday_calendar(year=year)
else:
assert str(datetime.now(UTC).year)
response = summary.Root().attendee_birthday_calendar()
if isinstance(response, bytes):
response = response.decode('utf-8')
lines = response.strip().split('\n')
assert len(lines) == (17 + 1) # Extra line for the header
@pytest.mark.parametrize('epoch,eschaton,expected', [
(datetime(2018, 1, 10), datetime(2018, 1, 11), 2), # Normal dates
(datetime(2017, 12, 31), datetime(2018, 1, 1), 2), # Crossing the year
(datetime(2018, 1, 31), datetime(2018, 2, 1), 2), # Crossing the month
(datetime(2018, 2, 28), datetime(2018, 3, 1), 3), # Leap day
(datetime(2018, 1, 1), datetime(2018, 3, 4), 15), # Multi-month
(datetime(2017, 12, 28), datetime(2018, 3, 4), 17), # Everybody
])
def test_event_birthday_calendar(
self,
admin_attendee,
epoch,
eschaton,
expected,
birthdays,
monkeypatch):
monkeypatch.setattr(c, 'EPOCH', epoch)
monkeypatch.setattr(c, 'ESCHATON', eschaton)
response = summary.Root().event_birthday_calendar()
if isinstance(response, bytes):
response = response.decode('utf-8')
lines = response.strip().split('\n')
assert len(lines) == (expected + 1) # Extra line for the header
def test_event_birthday_calendar_correct_birthday_years(
self,
admin_attendee,
birthdays,
monkeypatch):
monkeypatch.setattr(c, 'EPOCH', datetime(2017, 12, 31))
monkeypatch.setattr(c, 'ESCHATON', datetime(2018, 1, 1))
response = summary.Root().event_birthday_calendar()
if isinstance(response, bytes):
response = response.decode('utf-8')
assert '"Born on December 31, 1964\'s Birthday",2017-12-31' in response
assert '"Born on January 1, 1964\'s Birthday",2018-01-01' in response
lines = response.strip().split('\n')
assert len(lines) == (2 + 1) # Extra line for the header
|
from django.template import Library, Node, TemplateSyntaxError, Variable
from django.conf import settings
from django.core import urlresolvers
import hashlib
import re
register = Library()
class ViewNode(Node):
def __init__(self, parser, token):
self.args = []
self.kwargs = {}
tokens = token.split_contents()
if len(tokens) < 2:
raise TemplateSyntaxError("%r tag requires one or more arguments" % token.contents.split()[0])
tag_name = tokens.pop(0)
self.url_or_view = tokens.pop(0)
for token in tokens:
equals = token.find("=")
if equals == -1:
self.args.append(token)
else:
self.kwargs[str(token[:equals])] = token[equals + 1:]
def render(self, context):
print('render view tag...')
if 'request' not in context:
return ""
request = context['request']
# get the url for the view
url = Variable(self.url_or_view).resolve(context)
if not settings.USE_AJAX_REQUESTS:
# do not load the whole template, just the content, like an ajax request
#request.is_ajax = True # not needed since the jQuery.get() is implying this
urlconf = getattr(request, "urlconf", settings.ROOT_URLCONF)
resolver = urlresolvers.RegexURLResolver(r'^/', urlconf)
# get the view function
view, args, kwargs = resolver.resolve(url)
try:
if callable(view):
ret = view(context['request'], *args, **kwargs).render()
return ret.rendered_content
raise Exception("%r is not callable" % view)
except:
if settings.TEMPLATE_DEBUG:
raise
else:
print('return js code for jquery')
return """<div id="%(div_id)s">loading ...</div>
<script>
$.get( "%(url)s", function( data ) {
$( "#%(div_id)s" ).html( data );
});
</script>""" % {'div_id': url.replace("/", ""), 'url': url}
return ""
register.tag('view', ViewNode)
|
import errno
import os
import signal
import socket
import StringIO
import sys
def grim_reaper(signum, frame):
while True:
try:
pid, status = os.waitpid(
-1, # Wait for any child process
os.WNOHANG # Do not block and return EWOULDBLOCK error
)
print(
'Child {pid} terminated with status {status}'
'\n'.format(pid=pid, status=status)
)
except OSError:
return
if pid == 0: # no more zombies
return
class WSGIServer(object):
address_family = socket.AF_INET
socket_type = socket.SOCK_STREAM
request_queue_size = 1024
def __init__(self, server_address):
# Create a listening socket
self.listen_socket = listen_socket = socket.socket(
self.address_family,
self.socket_type
)
# Allow to reuse the same address
listen_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# Bind
listen_socket.bind(server_address)
# Activate
listen_socket.listen(self.request_queue_size)
# Get server host name and port
host, port = self.listen_socket.getsockname()[:2]
self.server_name = socket.getfqdn(host)
self.server_port = port
# Return headers set by Web framework/Web application
self.headers_set = []
def set_app(self, application):
self.application = application
def serve_forever(self):
listen_socket = self.listen_socket
while True:
try:
self.client_connection, client_address = listen_socket.accept()
except IOError as e:
code, msg = e.args
# restart 'accept' if it was interrupted
if code == errno.EINTR:
continue
else:
raise
pid = os.fork()
if pid == 0: # child
listen_socket.close() # close child copy
# Handle one request and close the client connection.
self.handle_one_request()
os._exit(0)
else: # parent
self.client_connection.close() # close parent copy
def handle_one_request(self):
self.request_data = request_data = self.client_connection.recv(1024)
# Print formatted request data a la 'curl -v'
print(''.join(
'< {line}\n'.format(line=line)
for line in request_data.splitlines()
))
self.parse_request(request_data)
# Construct environment dictionary using request data
env = self.get_environ()
# It's time to call our application callable and get
# back a result that will become HTTP response body
result = self.application(env, self.start_response)
# Construct a response and send it back to the client
self.finish_response(result)
def parse_request(self, text):
request_line = text.splitlines()[0]
request_line = request_line.rstrip('\r\n')
# Break down the request line into components
(self.request_method, # GET
self.path, # /hello
self.request_version # HTTP/1.1
) = request_line.split()
def get_environ(self):
env = {}
# The following code snippet does not follow PEP8 conventions
# but it's formatted the way it is for demonstration purposes
# to emphasize the required variables and their values
#
# Required WSGI variables
env['wsgi.version'] = (1, 0)
env['wsgi.url_scheme'] = 'http'
env['wsgi.input'] = StringIO.StringIO(self.request_data)
env['wsgi.errors'] = sys.stderr
env['wsgi.multithread'] = False
env['wsgi.multiprocess'] = False
env['wsgi.run_once'] = False
# Required CGI variables
env['REQUEST_METHOD'] = self.request_method # GET
env['PATH_INFO'] = self.path # /hello
env['SERVER_NAME'] = self.server_name # localhost
env['SERVER_PORT'] = str(self.server_port) # 8888
return env
def start_response(self, status, response_headers, exc_info=None):
# Add necessary server headers
server_headers = [
('Date', 'Tue, 31 Mar 2015 12:54:48 GMT'),
('Server', 'WSGIServer 0.2'),
]
self.headers_set = [status, response_headers + server_headers]
# To adhere to WSGI specification the start_response must return
# a 'write' callable. We simplicity's sake we'll ignore that detail
# for now.
# return self.finish_response
def finish_response(self, result):
try:
status, response_headers = self.headers_set
response = 'HTTP/1.1 {status}\r\n'.format(status=status)
for header in response_headers:
response += '{0}: {1}\r\n'.format(*header)
response += '\r\n'
for data in result:
response += data
# Print formatted response data a la 'curl -v'
print(''.join(
'> {line}\n'.format(line=line)
for line in response.splitlines()
))
self.client_connection.sendall(response)
finally:
self.client_connection.close()
SERVER_ADDRESS = (HOST, PORT) = '', 8888
def make_server(server_address, application):
signal.signal(signal.SIGCHLD, grim_reaper)
server = WSGIServer(server_address)
server.set_app(application)
return server
if __name__ == '__main__':
if len(sys.argv) < 2:
sys.exit('Provide a WSGI application object as module:callable')
app_path = sys.argv[1]
module, application = app_path.split(':')
module = __import__(module)
application = getattr(module, application)
httpd = make_server(SERVER_ADDRESS, application)
print('WSGIServer: Serving HTTP on port {port} ...\n'.format(port=PORT))
httpd.serve_forever()
|
from . import models
from . import wizards
|
{
'name': "Better validation for Attendance",
'summary': """
Short (1 phrase/line) summary of the module's purpose, used as
subtitle on modules listing or apps.openerp.com""",
'description': """
Long description of module's purpose
""",
'author': "Jörn Mankiewicz",
'website': "http://www.yourcompany.com",
# Categories can be used to filter modules in modules listing
# Check https://github.com/odoo/odoo/blob/master/openerp/addons/base/module/module_data.xml
# for the full list
'category': 'Uncategorized',
'version': '8.0.0.1',
# any module necessary for this one to work correctly
'depends': ['base','hr_attendance','hr_timesheet_improvement'],
# always loaded
'data': [
# 'security/ir.model.access.csv',
'views/hr_attendance.xml',
],
# only loaded in demonstration mode
'demo': [
'demo.xml',
],
}
|
import os
import time
import sys
FOLDERPATH = sys.argv[1]
walk = os.walk(FOLDERPATH)
FSEVENT = "delete"
for item in walk:
FILEPATHPREFIX = item[0] + "\\"
for song in item[2]:
if song.endswith(".mp3"):
FILEPATH = "%s%s" % (FILEPATHPREFIX, song)
os.system('python script.py "' + song + '" "' + FILEPATH + '" "' + FSEVENT + '"')
|
from tests.mock_navitia import navitia_response
response = navitia_response.NavitiaResponse()
response.queries = [
"vehicle_journeys/?filter=vehicle_journey.has_code(source, Code-orders)&since=20120615T120000Z&until=20120615T190000Z&data_freshness=base_schedule&depth=2"
# resquest time is UTC -> 12:00 is 8:00 local in Sherbrooke
]
response.response_code = 200
response.json_response = """
{
"disruptions": [],
"feed_publishers": [
{
"id": "builder",
"license": "ODBL",
"name": "departure board",
"url": "www.canaltp.fr"
}
],
"links": [
],
"pagination": {
"items_on_page": 1,
"items_per_page": 25,
"start_page": 0,
"total_result": 1
},
"vehicle_journeys": [
{
"calendars": [
{
"active_periods": [
{
"begin": "20120615",
"end": "20130615"
}
],
"week_pattern": {
"friday": true,
"monday": false,
"saturday": false,
"sunday": false,
"thursday": false,
"tuesday": false,
"wednesday": false
}
}
],
"disruptions": [],
"id": "R:vj1",
"name": "R:vj1",
"stop_times": [
{
"arrival_time": "100000",
"departure_time": "100000",
"utc_arrival_time": "140000",
"utc_departure_time": "140000",
"headsign": "R:vj1",
"journey_pattern_point": {
"id": "journey_pattern_point:14"
},
"stop_point": {
"codes": [
{
"type": "source",
"value": "Code-StopR1"
}
],
"coord": {
"lat": "0",
"lon": "0"
},
"equipments": [
"has_wheelchair_boarding",
"has_bike_accepted"
],
"id": "StopR1",
"label": "StopR1",
"links": [],
"name": "StopR1",
"stop_area": {
"coord": {
"lat": "0",
"lon": "0"
},
"id": "StopR1",
"label": "StopR1",
"links": [],
"name": "StopR1",
"timezone": "America/Montreal"
}
}
},
{
"arrival_time": "101000",
"departure_time": "101000",
"utc_arrival_time": "140100",
"utc_departure_time": "140100",
"headsign": "R:vj1",
"journey_pattern_point": {
"id": "journey_pattern_point:15"
},
"stop_point": {
"codes": [
{
"type": "source",
"value": "Code-StopR2"
}
],
"coord": {
"lat": "0",
"lon": "0"
},
"equipments": [
"has_wheelchair_boarding",
"has_bike_accepted"
],
"id": "StopR2",
"label": "StopR2",
"links": [],
"name": "StopR2",
"stop_area": {
"coord": {
"lat": "0",
"lon": "0"
},
"id": "StopR2",
"label": "StopR2",
"links": [],
"name": "StopR2",
"timezone": "America/Montreal"
}
}
},
{
"arrival_time": "102000",
"departure_time": "102000",
"utc_arrival_time": "140200",
"utc_departure_time": "140200",
"headsign": "R:vj1",
"journey_pattern_point": {
"id": "journey_pattern_point:16"
},
"stop_point": {
"codes": [
{
"type": "source",
"value": "Code-StopR3"
}
],
"coord": {
"lat": "0",
"lon": "0"
},
"equipments": [
"has_wheelchair_boarding",
"has_bike_accepted"
],
"id": "StopR3",
"label": "StopR3",
"links": [],
"name": "StopR3",
"stop_area": {
"coord": {
"lat": "0",
"lon": "0"
},
"id": "StopR3",
"label": "StopR3",
"links": [],
"name": "StopR3",
"timezone": "America/Montreal"
}
}
},
{
"arrival_time": "103000",
"departure_time": "103000",
"utc_arrival_time": "140300",
"utc_departure_time": "140300",
"headsign": "R:vj1",
"journey_pattern_point": {
"id": "journey_pattern_point:17"
},
"stop_point": {
"codes": [
{
"type": "source",
"value": "Code-StopR4"
}
],
"coord": {
"lat": "0",
"lon": "0"
},
"equipments": [
"has_wheelchair_boarding",
"has_bike_accepted"
],
"id": "StopR4",
"label": "StopR4",
"links": [],
"name": "StopR4",
"stop_area": {
"coord": {
"lat": "0",
"lon": "0"
},
"id": "StopR4",
"label": "StopR4",
"links": [],
"name": "StopR4",
"timezone": "America/Montreal"
}
}
},
{
"arrival_time": "104000",
"departure_time": "104000",
"utc_arrival_time": "140400",
"utc_departure_time": "140400",
"headsign": "R:vj1",
"journey_pattern_point": {
"id": "journey_pattern_point:17"
},
"stop_point": {
"codes": [
{
"type": "source",
"value": "Code-StopR5"
}
],
"coord": {
"lat": "0",
"lon": "0"
},
"equipments": [
"has_wheelchair_boarding",
"has_bike_accepted"
],
"id": "StopR5",
"label": "StopR5",
"links": [],
"name": "StopR5",
"stop_area": {
"coord": {
"lat": "0",
"lon": "0"
},
"id": "StopR5",
"label": "StopR5",
"links": [],
"name": "StopR5",
"timezone": "America/Montreal"
}
}
},
{
"arrival_time": "105000",
"departure_time": "105000",
"utc_arrival_time": "140500",
"utc_departure_time": "140500",
"headsign": "R:vj1",
"journey_pattern_point": {
"id": "journey_pattern_point:17"
},
"stop_point": {
"codes": [
{
"type": "source",
"value": "Code-StopR6"
}
],
"coord": {
"lat": "0",
"lon": "0"
},
"equipments": [
"has_wheelchair_boarding",
"has_bike_accepted"
],
"id": "StopR6",
"label": "StopR6",
"links": [],
"name": "StopR6",
"stop_area": {
"coord": {
"lat": "0",
"lon": "0"
},
"id": "StopR6",
"label": "StopR6",
"links": [],
"name": "StopR6",
"timezone": "America/Montreal"
}
}
}
],
"trip": {
"id": "R:vj1",
"name": "R:vj1"
},
"validity_pattern": {
"beginning_date": "20120614",
"days": "100000010000001000000100000010000001000000100000010000001000000100000010000001000000100000010000001000000100000010000001000000100000010000001000000100000010000001000000100000010000001000000100000010000001000000100000010000001000000100000010000001000000100000010000001000000100000010000001000000100000010000001000000100000010000001000000100000010000001000000100000010"
}
}
]
}
"""
|
import argparse
import socket
import time
import yarp
EMSG_YARP_NOT_FOUND = "Could not connect to the yarp server. Try running 'yarp detect'."
EMSG_ROBOT_NOT_FOUND = 'Could not connect to the robot at %s:%s'
class EZModule(yarp.RFModule):
""" The EZBModule class provides a base class for developing modules for the JD robot.
"""
# Default IP Address and Port for the JD Humanoid Robot.
TCP_IP = '192.168.1.1'
TCP_PORT = 23
# Existing motor ID's are D0-D9, D12-D14 and D16-D18 there are more limits
LIMITS = [ (30, 180),
(70, 170),
(0, 170),
(0, 170),
(0, 60),
(0, 180),
(0, 90),
(0, 60),
(0, 180),
(0, 180),
(0, 180),
(0, 160),
(0, 180),
(0, 130),
(0, 180),
(0, 160),
(0, 180),
(50, 130),
(0, 180),
(0, 180),
(0, 180) ]
def __init__(self, ip, port, prefix):
yarp.RFModule.__init__(self)
self.ip = ip
self.port = int(port)
self.prefix = prefix
def configure(self, rf):
name = self.__class__.__name__
if self.prefix:
name = self.prefix + '/' + name
self.setName(name)
# RPC Port
self.rpc_port = yarp.RpcServer()
# name settings
port_name = '/%s/%s' % (name, 'rpc')
if not self.rpc_port.open(port_name):
raise RuntimeError, EMSG_YARP_NOT_FOUND
self.attach_rpc_server(self.rpc_port)
return True
def interruptModule(self):
self.rpc_port.interrupt()
for x in dir(self):
if x.endswith('Port') and 'interrupt' in dir(getattr(self, x)):
getattr(self, x).interrupt()
return True
def close(self):
self.rpc_port.close()
for x in dir(self):
if x.endswith('Port') and 'close' in dir(getattr(self, x)):
getattr(self, x).close()
return True
def getPeriod(self):
return 0.1
def updateModule(self):
# XXX: I do not know why we need that, but if method is empty the module gets stuck
time.sleep(0.000001)
return True
def createInputPort(self, name, mode = 'unbuffered'):
""" This method returns an input port.
@param obj - the object that the port is created for
@param name - if a name is provided it gets appended to the modules name
@param buffered - if buffered is True a buffered port will be used otherwise not;
default is True.
@result port
"""
return self.__createPort(name + ':i', None, mode)
def __createPort(self, name, target = None, mode = 'unbuffered'):
""" This method returns a port object.
@param name - yarp name for the port
@param obj - object for which the port is created
@param buffered - if buffered is True a buffered port will be used otherwise not;
default is True.
@result port
"""
# create port
if mode == 'buffered':
port = yarp.BufferedPortBottle()
elif mode == 'rpcclient':
port = yarp.RpcClient()
elif mode == 'rpcserver':
port = yarp.RpcServer()
else:
port = yarp.Port()
# build port name
port_name = ['']
# prefix handling
if hasattr(self, 'prefix') and self.prefix:
port_name.append(self.prefix)
port_name.append(self.__class__.__name__)
port_name.append(name)
# open port
if not port.open('/'.join(port_name)):
raise RuntimeError, EMSG_YARP_NOT_FOUND
# add output if given
if target:
port.addOutput(target)
if hasattr(self, '_ports'):
self._ports.append(port)
return port
def createOutputPort(self, name, target = None, mode = 'unbuffered'):
""" This method returns an output port.
@param obj - the object that the port is created for
@param name - if a name is provided it gets appended to the modules name
@param buffered - if buffered is True a buffered port will be used otherwise not;
default is True.
@result port
"""
return self.__createPort(name + ':o', target, mode)
def createArgParser():
""" This method creates a base argument parser.
@return Argument Parser object
"""
parser = argparse.ArgumentParser(description='Create a JDModule to control the JD robot.')
parser.add_argument( '-i', '--ip',
dest = 'ip',
default = str(EZModule.TCP_IP),
help = 'IP address for the JD robot.')
parser.add_argument( '-p', '--port',
dest = 'port',
default = str(EZModule.TCP_PORT),
help = 'Port for the JD robot')
parser.add_argument( '-n', '--name',
dest = 'name',
default = '',
help = 'Name prefix for Yarp port names')
return parser.parse_args()
def main(module_cls):
""" This is a main method to run a module from command line.
@param module_cls - an EZModule based class that can be started as a standalone module.
"""
args = createArgParser()
yarp.Network.init()
resource_finder = yarp.ResourceFinder()
resource_finder.setVerbose(True)
# resource_finder.configure(argc,argv);
module = module_cls(args.ip, args.port, args.name)
module.runModule(resource_finder)
yarp.Network.fini()
|
import os
from xbrowse_server import xbrowse_controls
from django.core.management.base import BaseCommand
from xbrowse_server.base.models import Project, Individual, VCFFile
from xbrowse_server import sample_management
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument('args', nargs='*')
parser.add_argument('--indiv-id')
parser.add_argument('--cohort-id')
parser.add_argument('--clear', action="store_true", help="Whether to clear any previously-added VCF paths before adding this one")
parser.add_argument('--load', action="store_true", help="Whether to also load the VCF data, and not just add record its path in the meta-data tables")
def handle(self, *args, **options):
project_id = args[0]
project = Project.objects.get(project_id=project_id)
vcf_file_path = os.path.abspath(args[1])
vcf_file = VCFFile.objects.get_or_create(file_path=vcf_file_path)[0]
if options.get('clear'):
for individual in project.individual_set.all():
individual.vcf_files.clear()
if options.get('indiv_id'):
individual = Individual.objects.get(
project=project,
indiv_id=options.get('indiv_id')
)
sample_management.add_vcf_file_to_individual(individual, vcf_file)
else:
sample_management.add_vcf_file_to_project(project, vcf_file)
if options.get('load'):
print("Loading VCF into project store")
xbrowse_controls.load_project(project_id, vcf_files=[vcf_file_path])
print("Loading VCF datastore")
xbrowse_controls.load_project_datastore(project_id, vcf_files=[vcf_file_path])
|
from __future__ import print_function
import time
from flask import Flask, session, url_for
from flask_debugtoolbar import DebugToolbarExtension
from weblablib import WebLab, requires_active, weblab_user, poll
app = Flask(__name__)
app.config['SECRET_KEY'] = 'something random' # e.g., run: os.urandom(32) and put the output here
app.config['WEBLAB_USERNAME'] = 'weblabdeusto' # This is the http_username you put in WebLab-Deusto
app.config['WEBLAB_PASSWORD'] = 'password' # This is the http_password you put in WebLab-Deusto
app.config['SESSION_COOKIE_NAME'] = 'lab'
app.config['SESSION_COOKIE_PATH'] = '/lab'
app.config['WEBLAB_SESSION_ID_NAME'] = 'lab_session_id'
app.config['DEBUG_TB_INTERCEPT_REDIRECTS'] = False
weblab = WebLab(app, callback_url='/lab/public')
toolbar = DebugToolbarExtension(app)
@weblab.initial_url
def initial_url():
"""
This returns the landing URL (e.g., where the user will be forwarded).
"""
return url_for('.lab')
@weblab.on_start
def on_start(client_data, server_data):
"""
In this code, you can do something to setup the experiment. It is
called for every user, before they start using it.
"""
print("New user!")
print(weblab_user)
@weblab.on_dispose
def on_stop():
"""
In this code, you can do something to clean up the experiment. It is
guaranteed to be run.
"""
print("User expired. Here you should clean resources")
print(weblab_user)
@app.route('/lab/')
@requires_active
def lab():
"""
This is your code. If you provide @requires_active to any other URL, it is secured.
"""
user = weblab_user
return "Hello %s. You didn't poll in %.2f seconds (timeout configured to %s). Total time left: %s" % (user.username, user.time_without_polling, weblab.timeout, user.time_left)
@app.route("/")
def index():
return "<html><head></head><body><a href='{}'>Access to the lab</a></body></html>".format(url_for('.lab'))
if __name__ == '__main__':
print("Run the following:")
print()
print(" (optionally) $ export FLASK_DEBUG=1")
print(" $ export FLASK_APP={}".format(__file__))
print(" $ flask run")
print()
|
"""
Class_LabExperimBased provides functionalities for data handling of data obtained in lab experiments in the field of (waste)water treatment.
Copyright (C) 2016 Chaim De Mulder
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published
by the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see http://www.gnu.org/licenses/.
"""
import sys
import matplotlib.pyplot as plt #plotten in python
import warnings as wn
from wwdata.Class_HydroData import HydroData
class LabExperimBased(HydroData):
"""
Superclass for a HydroData object, expanding the functionalities with
specific functions for data gathered is lab experiments.
Attributes
----------
timedata_column : str
name of the column containing the time data
data_type : str
type of the data provided
experiment_tag : str
A tag identifying the experiment; can be a date or a code used by
the producer/owner of the data.
time_unit : str
The time unit in which the time data is given
units : array
The units of the variables in the columns
"""
def __init__(self,data,timedata_column='index',data_type='NAT',
experiment_tag='No tag given',time_unit=None):
"""
initialisation of a LabExperimBased object, based on a previously defined
HydroData object.
"""
HydroData.__init__(self,data,timedata_column=timedata_column,data_type=data_type,
experiment_tag=experiment_tag,time_unit=time_unit)
def hours(self,time_column='index'):
"""
calculates the hours from the relative values
Parameters
----------
time_column : string
column containing the relative time values; default to index
"""
if time_column == 'index':
self.data['index']=self.time.values
self.data['h']= (self.data['indexes'])*24 + self.data['indexes'].shift(1)
self.data['h'].fillna(0,inplace=True)
self.data.drop('index', axis=1, inplace=True)
else:
self.data['h']= (self.data[time_column])*24 + self.data[time_column].shift(1)
self.data['h'].fillna(0,inplace=True)
def add_conc(self,column_name,x,y,new_name='default'):
"""
calculates the concentration values of the given column and adds them as
a new column to the DataFrame.
Parameters
----------
column_name : str
column with values
x : int
...
y : int
...
new_name : str
name of the new column, default to 'column_name + mg/L'
"""
if new_name == 'default':
new_name = column_name + ' ' + 'mg/L'
self.data[new_name] = self.data[column_name].values*x*y
## Instead of this function: define a dataframe/dict with conversion or
## concentration factors, so that you can have a function that automatically
## converts all parameters in the frame to concentrations
def check_ph(self,ph_column='pH',thresh=0.4):
"""
gives the maximal change in pH
Parameters
----------
ph_column : str
column with pH-values, default to 'pH'
threshold : int
threshold value for warning, default to '0.4'
"""
dph = self.data[ph_column].max()-self.data[ph_column].min()
if dph > thresh:
wn.warn('Strong change in pH during experiment!')
else:
self.delta_ph = dph
def in_out(self,columns):
"""
(start_values-end_values)
Parameters
----------
columns : array of strings
"""
inv=0
outv=0
indexes= self.time.values
for column in columns:
inv += self.data[column][indexes[0]]
for column in columns:
outv += self.data[column][indexes[-1]]
in_out = inv-outv
return in_out
def removal(self,columns):
"""
total removal of nitrogen
(1-(end_values/start_values))
Parameters
----------
columns : array of strings
"""
inv=0
outv=0
indexes= self.time.values
for column in columns:
inv += self.data[column][indexes[0]]
for column in columns:
outv += self.data[column][indexes[-1]]
removal = 1-(outv/inv)
return removal
def calc_slope(self,columns,time_column='h'):
"""
calculates the slope of the selected columns
Parameters
----------
columns : array of strings
columns to calculate the slope for
time_column : str
time used for calculation; default to 'h'
"""
for column in columns:
self.data[column + " " +'slope'] = (self.data[column].shift(1)-self.data[column])\
/(self.data[time_column]-self.data[time_column].shift(1))
def plot(self,columns,time_column='index'):
"""
calculates the slope of the selected columns
Parameters
----------
columns : array of strings
columns to plot
time_column : str
time used for calculation; default to 'h'
"""
fig = plt.figure(figsize=(10,6))
ax = fig.add_subplot(111)
if time_column=='index':
for column in columns:
ax.plot(self.time,self.data[column],marker='o')
else:
for column in columns:
ax.plot(self.data[time_column],self.data[column],marker='o')
ax.legend()
return fig,ax
def _print_removed_output(original,new,type_):
"""
function printing the output of functions that remove datapoints.
Parameters
----------
original : int
original length of the dataset
new : int
length of the new dataset
type_ : str
'removed' or 'dropped'
"""
print('Original dataset:',original,'datapoints')
print('New dataset:',new,'datapoints')
print(original-new,'datapoints ',type_)
def _log_removed_output(log_file,original,new,type_):
"""
function writing the output of functions that remove datapoints to a log file.
Parameters
----------
log_file : str
string containing the directory to the log file to be written out
original : int
original length of the dataset
new : int
length of the new dataset
type_ : str
'removed' or 'dropped'
"""
log_file = open(log_file,'a')
log_file.write(str('\nOriginal dataset: '+str(original)+' datapoints; new dataset: '+
str(new)+' datapoints'+str(original-new)+' datapoints ',type_))
log_file.close()
|
import source_navigation_steps
import functional_test
class TestSourceInterfaceNotFound(
functional_test.FunctionalTest,
source_navigation_steps.SourceNavigationStepsMixin):
def test_not_found(self):
self._source_not_found()
|
from common.log import logUtils as log
from constants import clientPackets
from constants import serverPackets
def handle(userToken, packetData):
# get token data
username = userToken.username
# Read packet data
packetData = clientPackets.setAwayMessage(packetData)
# Set token away message
userToken.awayMessage = packetData["awayMessage"]
# Send private message from fokabot
if packetData["awayMessage"] == "":
fokaMessage = "Your away message has been reset"
else:
fokaMessage = "Your away message is now: {}".format(packetData["awayMessage"])
userToken.enqueue(serverPackets.sendMessage("FokaBot", username, fokaMessage))
log.info("{} has changed their away message to: {}".format(username, packetData["awayMessage"]))
|
"""
WSGI config for tumuli project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "tumuli.settings")
application = get_wsgi_application()
|
"""course_discovery URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
import os
from django.conf import settings
from django.conf.urls import include, url
from django.contrib import admin
from django.contrib.auth.views import logout
from django.core.urlresolvers import reverse_lazy
from django.views.generic import RedirectView
from course_discovery.apps.core import views as core_views
admin.autodiscover()
login = RedirectView.as_view(url=reverse_lazy('social:begin', args=['edx-oidc']), permanent=False, query_string=True)
urlpatterns = [
url(r'^admin/', include(admin.site.urls)),
url(r'^api/', include('course_discovery.apps.api.urls', namespace='api')),
url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework')),
url(r'^auto_auth/$', core_views.AutoAuth.as_view(), name='auto_auth'),
url(r'^health/$', core_views.health, name='health'),
url(r'^login/$', login, name='login'),
url(r'^logout/$', logout, name='logout'),
url('', include('social.apps.django_app.urls', namespace='social')),
]
if settings.DEBUG and os.environ.get('ENABLE_DJANGO_TOOLBAR', False): # pragma: no cover
import debug_toolbar # pylint: disable=import-error
urlpatterns.append(url(r'^__debug__/', include(debug_toolbar.urls)))
|
from odoo.tests.common import TransactionCase
class TestProjectDuplicateSubtask(TransactionCase):
def setUp(self):
super().setUp()
self.project1 = self.env["project.project"].create({"name": "Project 1"})
self.task1 = self.env["project.task"].create(
{"name": "name1", "project_id": self.project1.id}
)
self.subtask1 = self.env["project.task"].create(
{"name": "2", "project_id": self.project1.id, "parent_id": self.task1.id}
)
self.subtask2 = self.env["project.task"].create(
{"name": "3", "project_id": self.project1.id, "parent_id": self.task1.id}
)
def test_check_subtasks(self):
self.task1.action_duplicate_subtasks()
new_task = self.env["project.task"].search(
[("name", "ilike", self.task1.name), ("name", "ilike", "copy")]
)
self.assertEqual(
len(new_task.child_ids), 2, "Two subtasks should have been created"
)
|
from django.conf.urls import url
from .decorators import captcha_required
from captcha import views
urlpatterns = [
url(r'^new/', views.new_captcha, name='new_captcha'),
]
|
{
"name": "Romania - Invoice Report ",
"summary": "Localizare Terrabit",
"version": "14.0.3.0.3",
"author": "Dorin Hongu," "Odoo Community Association (OCA)",
"website": "https://github.com/OCA/l10n-romania",
"license": "AGPL-3",
"category": "Generic Modules",
"depends": [
"base",
"account",
"l10n_ro_config",
"purchase",
# "deltatech_watermark"
],
"data": [
"views/invoice_report.xml",
"views/voucher_report.xml",
"views/payment_report.xml",
# 'views/account_invoice_view.xml',
"views/account_voucher_report.xml",
"views/account_bank_statement_view.xml",
"views/statement_report.xml",
# 'views/res_partner_view.xml',
],
}
|
from odoo import fields, models, api
from datetime import datetime
class OutletLoss(models.Model):
_name = 'outlet.loss'
@api.multi
@api.depends('qty', 'price_outlet', 'price_unit')
def _get_outlet_loss(self):
for loss in self:
loss.total_lost = loss.qty*(loss.price_outlet-loss.price_unit)
product_id = fields.Many2one('product.product', 'Product')
price_unit = fields.Float('Price')
price_outlet = fields.Float('Outlet Price')
total_lost = fields.Float("Outlet Loss", compute=_get_outlet_loss,
store=True, readonly=True)
date_move = fields.Date('Move to outlet on', default=fields.datetime.now())
outlet_ok = fields.Boolean('Outlet')
order_line_id = fields.Many2one('sale.order.line', 'Order Line')
qty = fields.Float('Quantity')
percent = fields.Float('Outlet Percent')
|
import tempfile
from datetime import datetime
import flask_testing
from flask import url_for
import iis
from iis.models import User
from iis.extensions import db
class BaseTestCase(flask_testing.TestCase):
DB_FILE = tempfile.mkstemp()
SQLALCHEMY_DATABASE_URI = "sqlite:///" + DB_FILE[1]
LOGGING = {"version": 1}
TESTING = True
WTF_CSRF_ENABLED = False
USER_ENABLE_LOGIN_WITHOUT_CONFIRM = True
def create_app(self):
ret = iis.create_app(self.__class__)
app = ret[0]
self.user_manager = ret[1]
return app
def setUp(self):
db.create_all()
self.create_user("admin", "passW1")
def tearDown(self):
db.session.remove()
db.drop_all()
def login(self, username=None, password=None):
username = username or "admin"
password = password or "passW1"
self.client.post(url_for('user.login'), data=dict(
username=username,
password=password
), follow_redirects=False)
return User.query.filter_by(username=username).one()
def logout(self):
self.client.get(url_for("user.logout"))
def create_user(self, username, password):
user = User(username=username,
password=self.user_manager.hash_password(password),
email=username + "@localhost",
confirmed_at=datetime.fromtimestamp(0.0),
active=True)
db.session.add(user)
db.session.commit()
return user
def assertLoginRequired(self, url):
self.logout()
res = self.client.get(url)
self.assertEqual(302, res.status_code)
self.assertIn(url_for('user.login'), res.headers['Location'])
|
import os
from setuptools import setup
README = open(os.path.join(os.path.dirname(__file__), 'README.rst')).read()
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
setup(
name='django-isegory',
version='0.1',
packages=['isegory'],
include_package_data=True,
license='AGPL',
description='A simple Django app to declare the provenance of a dataset.',
long_description=README,
url='http://github.com/jdelacueva/django-isegory/',
author='Javier de la Cueva',
author_email='jdelacueva@derecho-internet.org',
classifiers=[
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: AGPL',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
],
)
|
import uuid
from bottle import Bottle, request, response, abort
import bcrypt
from recall.data import whitelist, blacklist
from recall import convenience as c
from recall import plugins, jobs, messages
app = Bottle()
app.install(plugins.exceptions)
app.install(plugins.ppjson)
app.install(plugins.auth)
app.install(plugins.cors)
app.error_handler = plugins.handler_dict
logger = c.logger("people")
@app.get("/")
def users():
abort(503, "Not yet implemented")
@app.get("/<who>/")
def user_(who):
try:
return whitelist(c.db().users.find_one({"email": who}), [
"email",
"firstName",
"pseudonym"
])
except TypeError:
logger.warn("Asked about {email}, but that is not a user".format(
email=who))
abort(404, "User not found")
@app.get("/<who>/self")
def _self(who, user):
if who != user["email"]:
response.status = 400
else:
return whitelist(user, ["pseudonym",
"firstName",
"surname",
"email",
"private_email"])
@app.post("/<who>/")
def request_invite(who):
# FIXME: Don't allow the pseudonym "public"
user = whitelist(request.json, [
"pseudonym",
"firstName",
"surname",
"private_email",
"token",
])
if "private_email" not in user:
abort(400, "You must provide a private_email field")
user["email_key"] = str(uuid.uuid4())
user["registered"] = c.unixtime()
user["email"] = who
c.db().users.ensure_index("email", unique=True)
c.db().users.insert(user, safe=True)
response.status = 202
logger.info("{email} subscribed".format(email=who))
jobs.enqueue(messages.SendInvite(user))
@app.post("/<who>/<email_key>")
def verify_email(who, email_key):
if "RECALL_TEST_MODE" in c.settings or "RECALL_DEBUG_MODE" in c.settings:
salt = bcrypt.gensalt(1)
else:
salt = bcrypt.gensalt()
password_hash = bcrypt.hashpw(request.json["password"], salt)
spec = {"email_key": email_key, "verified": {"$exists": False}}
update = {"$set": {"password_hash": password_hash,
"verified": c.unixtime()}}
success = c.db().users.update(spec, update, safe=True)["updatedExisting"]
if not success:
if c.db().users.find_one({"email_key": email_key}):
logger.warn("{email} tried to verify a second time".format(email=who))
abort(403, "Already verified")
else:
logger.warn("Someone tried to verify with a key, but it doesn't exist")
abort(404, "Don't know that key")
user = c.db().users.find_one({"email_key": email_key})
response.status = 201
return blacklist(user, ["_id", "email_key", "password_hash"])
|
from collections import defaultdict
from fs.errors import ResourceNotFoundError
import logging
import inspect
import re
from path import path
from django.http import Http404
from django.conf import settings
from .module_render import get_module
from xmodule.course_module import CourseDescriptor
from xmodule.modulestore import Location, XML_MODULESTORE_TYPE
from xmodule.modulestore.django import modulestore
from xmodule.contentstore.content import StaticContent
from xmodule.modulestore.exceptions import ItemNotFoundError, InvalidLocationError
from courseware.model_data import FieldDataCache
from static_replace import replace_static_urls
from courseware.access import has_access
import branding
log = logging.getLogger(__name__)
def get_request_for_thread():
"""Walk up the stack, return the nearest first argument named "request"."""
frame = None
try:
for f in inspect.stack()[1:]:
frame = f[0]
code = frame.f_code
if code.co_varnames[:1] == ("request",):
return frame.f_locals["request"]
elif code.co_varnames[:2] == ("self", "request",):
return frame.f_locals["request"]
finally:
del frame
def get_course(course_id, depth=0):
"""
Given a course id, return the corresponding course descriptor.
If course_id is not valid, raises a ValueError. This is appropriate
for internal use.
depth: The number of levels of children for the modulestore to cache.
None means infinite depth. Default is to fetch no children.
"""
try:
course_loc = CourseDescriptor.id_to_location(course_id)
return modulestore().get_instance(course_id, course_loc, depth=depth)
except (KeyError, ItemNotFoundError):
raise ValueError("Course not found: {}".format(course_id))
except InvalidLocationError:
raise ValueError("Invalid location: {}".format(course_id))
def get_course_by_id(course_id, depth=0):
"""
Given a course id, return the corresponding course descriptor.
If course_id is not valid, raises a 404.
depth: The number of levels of children for the modulestore to cache. None means infinite depth
"""
try:
course_loc = CourseDescriptor.id_to_location(course_id)
return modulestore().get_instance(course_id, course_loc, depth=depth)
except (KeyError, ItemNotFoundError):
raise Http404("Course not found.")
except InvalidLocationError:
raise Http404("Invalid location")
def get_course_with_access(user, course_id, action, depth=0):
"""
Given a course_id, look up the corresponding course descriptor,
check that the user has the access to perform the specified action
on the course, and return the descriptor.
Raises a 404 if the course_id is invalid, or the user doesn't have access.
depth: The number of levels of children for the modulestore to cache. None means infinite depth
"""
course = get_course_by_id(course_id, depth=depth)
if not has_access(user, course, action):
# Deliberately return a non-specific error message to avoid
# leaking info about access control settings
raise Http404("Course not found.")
return course
def get_opt_course_with_access(user, course_id, action):
"""
Same as get_course_with_access, except that if course_id is None,
return None without performing any access checks.
"""
if course_id is None:
return None
return get_course_with_access(user, course_id, action)
def course_image_url(course):
"""Try to look up the image url for the course. If it's not found,
log an error and return the dead link"""
if course.static_asset_path or modulestore().get_modulestore_type(course.location.course_id) == XML_MODULESTORE_TYPE:
return '/static/' + (course.static_asset_path or getattr(course, 'data_dir', '')) + "/images/course_image.jpg"
else:
loc = course.location.replace(tag='c4x', category='asset', name=course.course_image)
_path = StaticContent.get_url_path_from_location(loc)
return _path
def find_file(filesystem, dirs, filename):
"""
Looks for a filename in a list of dirs on a filesystem, in the specified order.
filesystem: an OSFS filesystem
dirs: a list of path objects
filename: a string
Returns d / filename if found in dir d, else raises ResourceNotFoundError.
"""
for directory in dirs:
filepath = path(directory) / filename
if filesystem.exists(filepath):
return filepath
raise ResourceNotFoundError("Could not find {0}".format(filename))
def get_course_about_section(course, section_key):
"""
This returns the snippet of html to be rendered on the course about page,
given the key for the section.
Valid keys:
- overview
- title
- university
- number
- short_description
- description
- key_dates (includes start, end, exams, etc)
- video
- course_staff_short
- course_staff_extended
- requirements
- syllabus
- textbook
- faq
- more_info
- ocw_links
"""
# Many of these are stored as html files instead of some semantic
# markup. This can change without effecting this interface when we find a
# good format for defining so many snippets of text/html.
if section_key in ['short_description', 'description', 'key_dates', 'video',
'course_staff_short', 'course_staff_extended',
'requirements', 'syllabus', 'textbook', 'faq', 'more_info',
'number', 'instructors', 'overview',
'effort', 'end_date', 'prerequisites', 'ocw_links']:
try:
request = get_request_for_thread()
loc = course.location.replace(category='about', name=section_key)
# Use an empty cache
field_data_cache = FieldDataCache([], course.id, request.user)
about_module = get_module(
request.user,
request,
loc,
field_data_cache,
course.id,
not_found_ok=True,
wrap_xmodule_display=False,
static_asset_path=course.static_asset_path
)
html = ''
if about_module is not None:
html = about_module.render('student_view').content
return html
except ItemNotFoundError:
log.warning("Missing about section {key} in course {url}".format(
key=section_key, url=course.location.url()))
return None
elif section_key == "title":
return course.display_name_with_default
elif section_key == "university":
return course.display_org_with_default
elif section_key == "number":
return course.display_number_with_default
raise KeyError("Invalid about key " + str(section_key))
def get_course_info_section(request, course, section_key):
"""
This returns the snippet of html to be rendered on the course info page,
given the key for the section.
Valid keys:
- handouts
- guest_handouts
- updates
- guest_updates
"""
loc = Location(course.location.tag, course.location.org, course.location.course, 'course_info', section_key)
# Use an empty cache
field_data_cache = FieldDataCache([], course.id, request.user)
info_module = get_module(
request.user,
request,
loc,
field_data_cache,
course.id,
wrap_xmodule_display=False,
static_asset_path=course.static_asset_path
)
html = ''
if info_module is not None:
html = info_module.render('student_view').content
return html
def get_course_syllabus_section(course, section_key):
"""
This returns the snippet of html to be rendered on the syllabus page,
given the key for the section.
Valid keys:
- syllabus
- guest_syllabus
"""
# Many of these are stored as html files instead of some semantic
# markup. This can change without effecting this interface when we find a
# good format for defining so many snippets of text/html.
if section_key in ['syllabus', 'guest_syllabus']:
try:
filesys = course.system.resources_fs
# first look for a run-specific version
dirs = [path("syllabus") / course.url_name, path("syllabus")]
filepath = find_file(filesys, dirs, section_key + ".html")
with filesys.open(filepath) as html_file:
return replace_static_urls(
html_file.read().decode('utf-8'),
getattr(course, 'data_dir', None),
course_id=course.location.course_id,
static_asset_path=course.static_asset_path,
)
except ResourceNotFoundError:
log.exception("Missing syllabus section {key} in course {url}".format(
key=section_key, url=course.location.url()))
return "! Syllabus missing !"
raise KeyError("Invalid about key " + str(section_key))
def get_courses_by_university(user, domain=None):
'''
Returns dict of lists of courses available, keyed by course.org (ie university).
Courses are sorted by course.number.
'''
# TODO: Clean up how 'error' is done.
# filter out any courses that errored.
visible_courses = get_courses(user, domain)
universities = defaultdict(list)
for course in visible_courses:
universities[course.org].append(course)
return universities
def get_courses(user, domain=None):
'''
Returns a list of courses available, sorted by course.number
'''
courses = branding.get_visible_courses(domain)
courses = [c for c in courses if has_access(user, c, 'see_exists')]
courses = sorted(courses, key=lambda course: course.number)
return courses
def sort_by_announcement(courses):
"""
Sorts a list of courses by their announcement date. If the date is
not available, sort them by their start date.
"""
# Sort courses by how far are they from they start day
key = lambda course: course.sorting_score
courses = sorted(courses, key=key)
return courses
def get_cms_course_link_by_id(course_id):
"""
Returns a proto-relative link to course_index for editing the course in cms, assuming that the course is actually
cms-backed. If course_id is improperly formatted, just return the root of the cms
"""
format_str = r'^(?P<org>[^/]+)/(?P<course>[^/]+)/(?P<name>[^/]+)$'
host = "//{}/".format(settings.CMS_BASE) # protocol-relative
m_obj = re.match(format_str, course_id)
if m_obj:
return "{host}{org}/{course}/course/{name}".format(host=host,
org=m_obj.group('org'),
course=m_obj.group('course'),
name=m_obj.group('name'))
return host
|
import io
import pytest
import databot
import pandas as pd
from databot.db.utils import Row
from databot.exporters.utils import flatten_nested_lists, flatten_nested_dicts, get_level_keys, flatten, sort_fields
from databot.exporters import jsonl
from databot.exporters import pandas
@pytest.fixture
def data():
return {
'a': 1,
'b': 2,
'c': {
'x': 1,
'y': 2,
'z': ['foo', 'bar', 'baz'],
}
}
def test_flatten_rows_update(data):
rows = [
Row(key=1, value={'text': 'abc'}),
Row(key=1, value={'text': 'abcde'}),
]
update = {'size': databot.this.value.text.apply(len)}
assert list(flatten(rows, include=['key', 'size'], update=update)) == [
('key', 'size'),
(1, 3),
(1, 5),
]
def test_flattenjson():
rows = [
{'key': 1, 'value': {'foo': 'bar', 'events': [
{'name': 'Event 1', 'date': '2017-01-01', 'people': ['a', 'b']},
{'name': 'Event 2', 'date': '2017-01-02', 'people': ['a']},
]}},
{'key': 2, 'value': {'foo': 'baz', 'events': [
{'name': 'Event 3', 'date': '2017-01-03', 'people': ['x', 'y']},
{'name': 'Event 4', 'date': '2017-01-04', 'people': ['z']},
]}},
]
assert list(map(dict, flatten_nested_lists(rows, include={('key',), ('value', 'events', 'date')}))) == [
{('key',): 1, ('value', 'events', 'date'): '2017-01-01'},
{('key',): 1, ('value', 'events', 'date'): '2017-01-02'},
{('key',): 2, ('value', 'events', 'date'): '2017-01-03'},
{('key',): 2, ('value', 'events', 'date'): '2017-01-04'},
]
assert list(map(dict, flatten_nested_lists(rows, include={('key',), ('value', 'events', 'people')}))) == [
{('key',): 1, ('value', 'events', 'people'): 'a'},
{('key',): 1, ('value', 'events', 'people'): 'b'},
{('key',): 1, ('value', 'events', 'people'): 'a'},
{('key',): 2, ('value', 'events', 'people'): 'x'},
{('key',): 2, ('value', 'events', 'people'): 'y'},
{('key',): 2, ('value', 'events', 'people'): 'z'},
]
assert [{v for k, v in x} for x in flatten_nested_lists(rows, include=[('key',), ('value',)])] == [
{1, 'bar', '2017-01-01', 'Event 1', 'a'},
{1, 'bar', '2017-01-01', 'Event 1', 'b'},
{1, 'bar', '2017-01-02', 'Event 2', 'a'},
{2, 'baz', '2017-01-03', 'Event 3', 'x'},
{2, 'baz', '2017-01-03', 'Event 3', 'y'},
{2, 'baz', '2017-01-04', 'Event 4', 'z'},
]
assert [{v for k, v in x} for x in flatten_nested_lists(rows)] == [
{1, 'bar', '2017-01-01', 'Event 1', 'a'},
{1, 'bar', '2017-01-01', 'Event 1', 'b'},
{1, 'bar', '2017-01-02', 'Event 2', 'a'},
{2, 'baz', '2017-01-03', 'Event 3', 'x'},
{2, 'baz', '2017-01-03', 'Event 3', 'y'},
{2, 'baz', '2017-01-04', 'Event 4', 'z'},
]
def test_flatten_nested_dicts():
assert set(flatten_nested_dicts({'a': 1, 'b': 2, 'c': 3})) == {
(('a',), 1),
(('b',), 2),
(('c',), 3),
}
def test_flatten_nested_dicts_include():
assert set(flatten_nested_dicts({'a': 1, 'b': 2, 'c': 3}, include=[('b',), ('a',), ('c',)])) == {
(('b',), 2),
(('a',), 1),
(('c',), 3),
}
def test_get_level_keys():
assert list(get_level_keys(keys=['c', 'b', 'a'], field=(), include=())) == ['a', 'b', 'c']
assert list(get_level_keys(keys=['c', 'b', 'a'], field=(), include=[('b',), ('a',), ('c',)])) == ['b', 'a', 'c']
assert list(get_level_keys(keys=['c', 'b', 'a'], field=('x',), include=())) == ['a', 'b', 'c']
assert list(get_level_keys(keys=['c', 'b', 'a'], field=('x',), include=[('x', 'b',), ('x', 'c',)])) == ['b', 'c']
assert list(get_level_keys(keys=['c', 'b', 'a'], field=(), include=[('b',), ('x',)])) == ['b']
assert list(get_level_keys(keys=['c', 'b', 'a'], field=('x', 'y'), include=[('x',)])) == ['a', 'b', 'c']
def test_flatten():
rows = [
Row(key=1, value={'foo': 'bar', 'events': [
{'name': 'Event 1', 'date': '2017-01-01', 'people': ['a', 'b']},
{'name': 'Event 2', 'date': '2017-01-02', 'people': ['a']},
]}),
Row(key=2, value={'foo': 'baz', 'events': [
{'name': 'Event 3', 'date': '2017-01-03', 'people': ['x', 'y']},
{'name': 'Event 4', 'date': '2017-01-04', 'people': ['z']},
]}),
]
assert list(flatten(rows)) == [
('events.date', 'events.name', 'events.people', 'foo', 'key'),
('2017-01-01', 'Event 1', 'a', 'bar', 1),
('2017-01-01', 'Event 1', 'b', 'bar', 1),
('2017-01-02', 'Event 2', 'a', 'bar', 1),
('2017-01-03', 'Event 3', 'x', 'baz', 2),
('2017-01-03', 'Event 3', 'y', 'baz', 2),
('2017-01-04', 'Event 4', 'z', 'baz', 2),
]
assert list(flatten(rows, include=('key', 'foo', 'events.people'))) == [
('key', 'foo', 'events.people'),
(1, 'bar', 'a'),
(1, 'bar', 'b'),
(1, 'bar', 'a'),
(2, 'baz', 'x'),
(2, 'baz', 'y'),
(2, 'baz', 'z'),
]
assert list(flatten(rows, include=('key', 'foo'))) == [
('key', 'foo'),
(1, 'bar'),
(2, 'baz'),
]
def test_sort_fields():
def _(fields, include):
fields = [tuple(x.split('.')) for x in fields]
include = [tuple(x.split('.')) for x in include]
return ['.'.join(x) for x in sort_fields(fields, include)]
assert _(['c', 'b', 'a'], []) == ['a', 'b', 'c']
assert _(['c', 'b', 'a'], ['a', 'c']) == ['a', 'c']
assert _(['x.c', 'x.b', 'x.a'], ['x']) == ['x.a', 'x.b', 'x.c']
assert _(['z', 'x.b', 'x.a'], ['x', 'z']) == ['x.a', 'x.b', 'z']
def test_flatten_rows_update_without_include(data):
rows = [
Row(key=1, value={'text': 'abc'}),
Row(key=1, value={'text': 'abcde'}),
]
update = {'size': databot.this.value.text.apply(len)}
assert list(flatten(rows, update=update)) == [
('key', 'size', 'text'),
(1, 3, 'abc'),
(1, 5, 'abcde'),
]
def test_flatten_rows_callable_update(data):
rows = [
Row(key=1, value={'text': 'abc'}),
Row(key=1, value={'text': 'abcde'}),
]
def update(row):
return {'size': len(row.value['text'])}
assert list(flatten(rows, update=update)) == [
('size',),
(3,),
(5,),
]
def test_flatten_rows_include(data):
rows = [
Row(key=1, value={'a': 1}),
Row(key=2, value={'b': 2}),
]
assert list(flatten(rows, include=['a', 'b'])) == [
('a', 'b'),
(1, None),
(None, 2),
]
def test_flatten_rows_include_value(data):
rows = [
Row(key=1, value='a'),
Row(key=2, value='b'),
]
assert list(flatten(rows, include=['key', 'value'])) == [
('key', 'value'),
(1, 'a'),
(2, 'b'),
]
def test_flatten_rows_value(data):
rows = [
Row(key=1, value='a'),
Row(key=2, value='b'),
]
assert list(flatten(rows)) == [
('key', 'value'),
(1, 'a'),
(2, 'b'),
]
def test_flatten_int_key(data):
rows = [
Row(key=1, value={'year': {2000: 1, 2001: 2}}),
Row(key=2, value={'year': {2000: 3, 2001: 4}}),
]
assert list(flatten(rows)) == [
('key', 'year.2000', 'year.2001'),
(1, 1, 2),
(2, 3, 4),
]
def test_flatten_list(data):
rows = [
Row(key=1, value={'events': [
{'name': 'Event 1', 'date': '2017-01-01'},
{'name': 'Event 2', 'date': '2017-02-01'},
]}),
Row(key=2, value={'events': [
{'name': 'Event 3', 'date': '2017-03-01'},
{'name': 'Event 4', 'date': '2017-04-01'},
]}),
]
assert list(flatten(rows)) == [
('events.date', 'events.name', 'key'),
('2017-01-01', 'Event 1', 1),
('2017-02-01', 'Event 2', 1),
('2017-03-01', 'Event 3', 2),
('2017-04-01', 'Event 4', 2),
]
def test_jsonl(bot):
pipe = bot.define('p1').append([('1', 'a'), ('2', 'b')])
stream = io.StringIO()
jsonl.export(stream, pipe.rows())
assert stream.getvalue().splitlines() == [
'{"key": "1", "value": "a"}',
'{"key": "2", "value": "b"}',
]
def test_jsonl_dict(bot):
pipe = bot.define('p1').append([('1', {'a': 2}), ('2', {'b': 3})])
stream = io.StringIO()
jsonl.export(stream, pipe.rows())
assert stream.getvalue().splitlines() == [
'{"key": "1", "a": 2}',
'{"key": "2", "b": 3}',
]
def test_pandas_rows_to_dataframe_items():
rows = [
[1, 'a', 'x'],
[2, 'b', 'y'],
]
assert list(pandas.rows_to_dataframe_items(rows, 0)) == [
(1, ['a', 'x']),
(2, ['b', 'y'])
]
assert list(pandas.rows_to_dataframe_items(rows, 2)) == [
('x', [1, 'a']),
('y', [2, 'b'])
]
def test_pandas(bot):
pipe = bot.define('p1').append([
(1, {'a': 10}),
(2, {'a': 20}),
])
assert [dict(x._asdict()) for x in pipe.export(pd).itertuples()] == [
{'Index': 1, 'a': 10.0},
{'Index': 2, 'a': 20.0},
]
|
from django.contrib.auth.decorators import login_required
from django.core.exceptions import PermissionDenied
from django.shortcuts import get_object_or_404, render
from base.models.person import Person
from base.views.learning_units.common import get_learning_unit_identification_context
@login_required
def learning_unit_identification(request, learning_unit_year_id):
person = get_object_or_404(Person, user=request.user)
context = get_learning_unit_identification_context(learning_unit_year_id, person)
learning_unit_year = context['learning_unit_year']
if learning_unit_year.is_external():
template = "learning_unit/external/read.html"
permission = 'base.can_access_externallearningunityear'
else:
template = "learning_unit/identification.html"
permission = 'base.can_access_learningunit'
if not person.user.has_perm(permission):
raise PermissionDenied
return render(request, template, context)
|
"""
Views for contract feature
"""
import logging
from edxmako.shortcuts import render_to_response
from django.contrib.auth.models import User
from django.contrib.auth import authenticate, login
from django.shortcuts import redirect
from django.core.urlresolvers import reverse
from biz.djangoapps.ga_manager.models import Manager
log = logging.getLogger(__name__)
LOGIN_ADMIN = 1
LOGIN_ERROR = -1
LOGIN_DEFAULT = 0
LOGIN_ERROR_AUTH = -2
def index(request):
"""
lists content of Login
"""
next_url = request.GET.get('next', '')
if request.user.is_active:
if request.user.is_authenticated():
if next_url == '':
return redirect(reverse('biz:index'))
else:
return redirect(next_url)
account_check = LOGIN_DEFAULT
post_email = request.POST.get('email', '')
post_password = request.POST.get("password")
post_remember = False
if request.method == 'POST':
next_url = request.POST.get("next", '')
if "remember" in request.POST:
post_remember = True
if not 0 < len(post_email) <= 255:
log.info('Login failed - email length over')
account_check = LOGIN_ERROR
if not 0 < len(post_password) <= 255:
log.info('Login failed - password length over')
account_check = LOGIN_ERROR
if User.objects.filter(email=post_email, is_active=True).exists():
user = User.objects.get(email=post_email, is_active=True)
else:
log.info("Login failed - password for {0} is invalid".format(post_email))
account_check = LOGIN_ERROR
if account_check == LOGIN_ERROR:
return render_to_response('gx_login/login.html', {'account_check': account_check, 'next_url': next_url, 'email': post_email})
if user.check_password(post_password):
mgs = Manager.get_managers(user)
if any([mg.is_aggregator() for mg in mgs]):
account_check = LOGIN_ADMIN
if any([mg.is_director() for mg in mgs]):
account_check = LOGIN_ADMIN
if any([mg.is_manager() for mg in mgs]):
account_check = LOGIN_ADMIN
if any([mg.is_platformer() for mg in mgs]):
account_check = LOGIN_ADMIN
if account_check == LOGIN_ADMIN:
# Auto Updating Last Login Datetime
user = authenticate(username=user.username, password=post_password)
login(request, user)
if post_remember:
# Session Retention 7 days
request.session.set_expiry(604800)
else:
request.session.set_expiry(0)
if next_url == '':
return redirect(reverse('biz:index'))
else:
return redirect(next_url)
else:
account_check = LOGIN_ERROR_AUTH
else:
log.info('Login failed - password mismatch')
account_check = LOGIN_ERROR
return render_to_response('gx_login/login.html', {'account_check': account_check, 'next_url': next_url, 'email': post_email})
|
import os
@VOLT.Command(description = 'Build the Voter application and catalog.',
options = VOLT.BooleanOption('-C', '--conditional', 'conditional',
'only build when the catalog file is missing'))
def build(runner):
if not runner.opts.conditional or not os.path.exists('voter.jar'):
runner.java.compile('obj', 'src/voter/*.java', 'src/voter/procedures/*.java')
runner.call('volt.compile', '-c', 'obj', '-o', 'voter.jar', 'ddl.sql')
@VOLT.Command(description = 'Clean the Voter build output.')
def clean(runner):
runner.shell('rm', '-rfv', 'obj', 'debugoutput', 'voter.jar', 'voltdbroot')
@VOLT.Server('create',
description = 'Start the Voter VoltDB server.',
command_arguments = 'voter.jar',
classpath = 'obj')
def server(runner):
runner.call('build', '-C')
runner.go()
@VOLT.Java('voter.AsyncBenchmark', classpath = 'obj',
description = 'Run the Voter asynchronous benchmark.')
def async(runner):
runner.call('build', '-C')
runner.go()
@VOLT.Java('voter.SyncBenchmark', classpath = 'obj',
description = 'Run the Voter synchronous benchmark.')
def sync(runner):
runner.call('build', '-C')
runner.go()
@VOLT.Java('voter.JDBCBenchmark', classpath = 'obj',
description = 'Run the Voter JDBC benchmark.')
def jdbc(runner):
runner.call('build', '-C')
runner.go()
@VOLT.Java('voter.SimpleBenchmark', classpath = 'obj',
description = 'Run the Voter simple benchmark.')
def simple(runner):
runner.call('build', '-C')
runner.go()
|
from openerp import fields, models, api, _
from openerp.exceptions import Warning
import logging
_logger = logging.getLogger(__name__)
class afip_incoterm(models.Model):
_name = 'afip.incoterm'
_description = 'Afip Incoterm'
afip_code = fields.Char(
'Code', required=True)
name = fields.Char(
'Name', required=True)
class afip_point_of_sale(models.Model):
_name = 'afip.point_of_sale'
_description = 'Afip Point Of Sale'
prefix = fields.Char(
'Prefix'
)
sufix = fields.Char(
'Sufix'
)
type = fields.Selection([
('manual', 'Manual'),
('preprinted', 'Preprinted'),
('online', 'Online'),
# Agregados por otro modulo
# ('electronic', 'Electronic'),
# ('fiscal_printer', 'Fiscal Printer'),
],
'Type',
default='manual',
required=True,
)
name = fields.Char(
compute='get_name',
)
number = fields.Integer(
'Number', required=True
)
company_id = fields.Many2one(
'res.company', 'Company', required=True,
default=lambda self: self.env['res.company']._company_default_get(
'afip.point_of_sale')
)
journal_ids = fields.One2many(
'account.journal',
'point_of_sale_id',
'Journals',
)
document_sequence_type = fields.Selection(
[('own_sequence', 'Own Sequence'),
('same_sequence', 'Same Invoice Sequence')],
string='Document Sequence Type',
default='own_sequence',
required=True,
help="Use own sequence or invoice sequence on Debit and Credit Notes?"
)
journal_document_class_ids = fields.One2many(
'account.journal.afip_document_class',
compute='get_journal_document_class_ids',
string='Documents Classes',
)
@api.one
@api.depends('type', 'sufix', 'prefix', 'number')
def get_name(self):
# TODO mejorar esto y que tome el lable traducido del selection
if self.type == 'manual':
name = 'Manual'
elif self.type == 'preprinted':
name = 'Preimpresa'
elif self.type == 'online':
name = 'Online'
elif self.type == 'electronic':
name = 'Electronica'
if self.prefix:
name = '%s %s' % (self.prefix, name)
if self.sufix:
name = '%s %s' % (name, self.sufix)
name = '%04d - %s' % (self.number, name)
self.name = name
@api.one
@api.depends('journal_ids.journal_document_class_ids')
def get_journal_document_class_ids(self):
journal_document_class_ids = self.env[
'account.journal.afip_document_class'].search([
('journal_id.point_of_sale_id', '=', self.id)])
self.journal_document_class_ids = journal_document_class_ids
_sql_constraints = [('number_unique', 'unique(number, company_id)',
'Number Must be Unique per Company!'), ]
class afip_document_class(models.Model):
_name = 'afip.document_class'
_description = 'Afip Document Class'
name = fields.Char(
'Name', size=120)
doc_code_prefix = fields.Char(
'Document Code Prefix', help="Prefix for Documents Codes on Invoices \
and Account Moves. For eg. 'FA ' will build 'FA 0001-0000001' Document Number")
afip_code = fields.Integer(
'AFIP Code', required=True)
document_letter_id = fields.Many2one(
'afip.document_letter', 'Document Letter')
report_name = fields.Char(
'Name on Reports',
help='Name that will be printed in reports, for example "CREDIT NOTE"')
document_type = fields.Selection([
('invoice', 'Invoices'),
('credit_note', 'Credit Notes'),
('debit_note', 'Debit Notes'),
('receipt', 'Receipt'),
('ticket', 'Ticket'),
('in_document', 'In Document'),
('other_document', 'Other Documents')
],
string='Document Type',
help='It defines some behaviours on automatic journal selection and\
in menus where it is shown.')
active = fields.Boolean(
'Active', default=True)
class afip_document_letter(models.Model):
_name = 'afip.document_letter'
_description = 'Afip Document letter'
name = fields.Char(
'Name', size=64, required=True)
afip_document_class_ids = fields.One2many(
'afip.document_class', 'document_letter_id', 'Afip Document Classes')
issuer_ids = fields.Many2many(
'afip.responsability', 'afip_doc_letter_issuer_rel',
'letter_id', 'responsability_id', 'Issuers',)
receptor_ids = fields.Many2many(
'afip.responsability', 'afip_doc_letter_receptor_rel',
'letter_id', 'responsability_id', 'Receptors',)
active = fields.Boolean(
'Active', default=True)
vat_discriminated = fields.Boolean(
'Vat Discriminated on Invoices?',
help="If True, the vat will be discriminated on invoice report.")
_sql_constraints = [('name', 'unique(name)', 'Name must be unique!'), ]
class afip_responsability(models.Model):
_name = 'afip.responsability'
_description = 'AFIP VAT Responsability'
name = fields.Char(
'Name', size=64, required=True)
code = fields.Char(
'Code', size=8, required=True)
active = fields.Boolean(
'Active', default=True)
issued_letter_ids = fields.Many2many(
'afip.document_letter', 'afip_doc_letter_issuer_rel',
'responsability_id', 'letter_id', 'Issued Document Letters')
received_letter_ids = fields.Many2many(
'afip.document_letter', 'afip_doc_letter_receptor_rel',
'responsability_id', 'letter_id', 'Received Document Letters')
vat_tax_required_on_sales_invoices = fields.Boolean(
'VAT Tax Required on Sales Invoices?',
help='If True, then a vay tax is mandatory on each sale invoice for companies of this responsability',
)
_sql_constraints = [('name', 'unique(name)', 'Name must be unique!'),
('code', 'unique(code)', 'Code must be unique!')]
class afip_document_type(models.Model):
_name = 'afip.document_type'
_description = 'AFIP document types'
name = fields.Char(
'Name', size=120, required=True)
code = fields.Char(
'Code', size=16, required=True)
afip_code = fields.Integer(
'AFIP Code', required=True)
active = fields.Boolean(
'Active', default=True)
|
"""Add is_loud and pronouns columns to PanelApplicant
Revision ID: bba880ef5bbd
Revises: 8f8419ebcf27
Create Date: 2019-07-20 02:57:17.794469
"""
revision = 'bba880ef5bbd'
down_revision = '8f8419ebcf27'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
try:
is_sqlite = op.get_context().dialect.name == 'sqlite'
except Exception:
is_sqlite = False
if is_sqlite:
op.get_context().connection.execute('PRAGMA foreign_keys=ON;')
utcnow_server_default = "(datetime('now', 'utc'))"
else:
utcnow_server_default = "timezone('utc', current_timestamp)"
def sqlite_column_reflect_listener(inspector, table, column_info):
"""Adds parenthesis around SQLite datetime defaults for utcnow."""
if column_info['default'] == "datetime('now', 'utc')":
column_info['default'] = utcnow_server_default
sqlite_reflect_kwargs = {
'listeners': [('column_reflect', sqlite_column_reflect_listener)]
}
def upgrade():
op.add_column('panel_applicant', sa.Column('other_pronouns', sa.Unicode(), server_default='', nullable=False))
op.add_column('panel_applicant', sa.Column('pronouns', sa.Unicode(), server_default='', nullable=False))
op.add_column('panel_application', sa.Column('is_loud', sa.Boolean(), server_default='False', nullable=False))
def downgrade():
op.drop_column('panel_application', 'is_loud')
op.drop_column('panel_applicant', 'pronouns')
op.drop_column('panel_applicant', 'other_pronouns')
|
def keysetter(key):
if not isinstance(key, str):
raise TypeError('key name must be a string')
resolve = key.split('.')
head, last = tuple(resolve[:-1]), resolve[-1]
def g(obj,value):
for key in head :
obj = obj[key]
obj[last] = value
return g
def keygetter(key):
if not isinstance(key, str):
raise TypeError('key name must be a string')
return lambda obj : resolve_key(obj, key)
def resolve_key(obj, key):
for name in key.split('.'):
obj = obj[name]
return obj
|
from odoo import api, fields, models
class EventType(models.Model):
_inherit = "event.type"
community_menu = fields.Boolean(
"Community Menu", compute="_compute_community_menu",
readonly=False, store=True,
help="Display community tab on website")
@api.depends('website_menu')
def _compute_community_menu(self):
for event_type in self:
event_type.community_menu = event_type.website_menu
|
import pytest
import json
from django.urls import reverse
from .. import factories as f
pytestmark = pytest.mark.django_db
def test_watch_task(client):
user = f.UserFactory.create()
task = f.create_task(owner=user, milestone=None)
f.MembershipFactory.create(project=task.project, user=user, is_admin=True)
url = reverse("tasks-watch", args=(task.id,))
client.login(user)
response = client.post(url)
assert response.status_code == 200
def test_unwatch_task(client):
user = f.UserFactory.create()
task = f.create_task(owner=user, milestone=None)
f.MembershipFactory.create(project=task.project, user=user, is_admin=True)
url = reverse("tasks-watch", args=(task.id,))
client.login(user)
response = client.post(url)
assert response.status_code == 200
def test_list_task_watchers(client):
user = f.UserFactory.create()
task = f.TaskFactory(owner=user)
f.MembershipFactory.create(project=task.project, user=user, is_admin=True)
f.WatchedFactory.create(content_object=task, user=user)
url = reverse("task-watchers-list", args=(task.id,))
client.login(user)
response = client.get(url)
assert response.status_code == 200
assert response.data[0]['id'] == user.id
def test_get_task_watcher(client):
user = f.UserFactory.create()
task = f.TaskFactory(owner=user)
f.MembershipFactory.create(project=task.project, user=user, is_admin=True)
watch = f.WatchedFactory.create(content_object=task, user=user)
url = reverse("task-watchers-detail", args=(task.id, watch.user.id))
client.login(user)
response = client.get(url)
assert response.status_code == 200
assert response.data['id'] == watch.user.id
def test_get_task_watchers(client):
user = f.UserFactory.create()
task = f.TaskFactory(owner=user)
f.MembershipFactory.create(project=task.project, user=user, is_admin=True)
url = reverse("tasks-detail", args=(task.id,))
f.WatchedFactory.create(content_object=task, user=user)
client.login(user)
response = client.get(url)
assert response.status_code == 200
assert response.data['watchers'] == [user.id]
assert response.data['total_watchers'] == 1
def test_get_task_is_watcher(client):
user = f.UserFactory.create()
task = f.create_task(owner=user, milestone=None)
f.MembershipFactory.create(project=task.project, user=user, is_admin=True)
url_detail = reverse("tasks-detail", args=(task.id,))
url_watch = reverse("tasks-watch", args=(task.id,))
url_unwatch = reverse("tasks-unwatch", args=(task.id,))
client.login(user)
response = client.get(url_detail)
assert response.status_code == 200
assert response.data['watchers'] == []
assert response.data['is_watcher'] == False
response = client.post(url_watch)
assert response.status_code == 200
response = client.get(url_detail)
assert response.status_code == 200
assert response.data['watchers'] == [user.id]
assert response.data['is_watcher'] == True
response = client.post(url_unwatch)
assert response.status_code == 200
response = client.get(url_detail)
assert response.status_code == 200
assert response.data['watchers'] == []
assert response.data['is_watcher'] == False
def test_remove_task_watcher(client):
user = f.UserFactory.create()
project = f.ProjectFactory.create()
task = f.TaskFactory(project=project,
user_story=None,
status__project=project,
milestone__project=project)
task.add_watcher(user)
role = f.RoleFactory.create(project=project, permissions=['modify_task', 'view_tasks'])
f.MembershipFactory.create(project=project, user=user, role=role)
url = reverse("tasks-detail", args=(task.id,))
client.login(user)
data = {"version": task.version, "watchers": []}
response = client.json.patch(url, json.dumps(data))
assert response.status_code == 200
assert response.data['watchers'] == []
assert response.data['is_watcher'] == False
|
import subprocess
def release():
subprocess.call(["python3", "setup.py", "sdist", "upload"])
|
from . import BaseWordChoice
class WordPreference(BaseWordChoice):
def pick_w(self,m,voc,mem,context=[]):
if m in voc.get_known_meanings():
if m in list(mem['prefered words'].keys()):
w = mem['prefered words'][m]
if w not in voc.get_known_words(m=m):
w = voc.get_random_known_w(m=m)
else:
w = voc.get_random_known_w(m=m)
elif voc.get_unknown_words():
w = voc.get_new_unknown_w()
else:
w = voc.get_random_known_w(option='min')
return w
class PlaySmart(WordPreference):
def __init__(self, *args, **kwargs):
WordPreference.__init__(self,memory_policies=[{'mem_type':'wordpreference_smart'}],*args,**kwargs)
class PlayLast(WordPreference):
def __init__(self, *args, **kwargs):
WordPreference.__init__(self,memory_policies=[{'mem_type':'wordpreference_last'}],*args,**kwargs)
class PlayFirst(WordPreference):
def __init__(self, *args, **kwargs):
WordPreference.__init__(self,memory_policies=[{'mem_type':'wordpreference_first'}],*args,**kwargs)
|
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('maposmatic', '0004_maprenderingjob_track'),
]
operations = [
migrations.AlterField(
model_name='maprenderingjob',
name='track',
field=models.FileField(null=True, upload_to=b'upload/tracks/', blank=True),
),
]
|
"""
Base test case for the course API views.
"""
from django.core.urlresolvers import reverse
from rest_framework.test import APITestCase
from lms.djangoapps.courseware.tests.factories import StaffFactory
from student.tests.factories import UserFactory
from xmodule.modulestore.tests.django_utils import TEST_DATA_SPLIT_MODULESTORE, SharedModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
class BaseCourseViewTest(SharedModuleStoreTestCase, APITestCase):
"""
Base test class for course data views.
"""
MODULESTORE = TEST_DATA_SPLIT_MODULESTORE
view_name = None # The name of the view to use in reverse() call in self.get_url()
@classmethod
def setUpClass(cls):
super(BaseCourseViewTest, cls).setUpClass()
cls.course = CourseFactory.create(display_name='test course', run="Testing_course")
cls.course_key = cls.course.id
cls.password = 'test'
cls.student = UserFactory(username='dummy', password=cls.password)
cls.staff = StaffFactory(course_key=cls.course.id, password=cls.password)
cls.initialize_course(cls.course)
@classmethod
def initialize_course(cls, course):
"""
Sets up the structure of the test course.
"""
course.self_paced = True
cls.store.update_item(course, cls.staff.id)
cls.section = ItemFactory.create(
parent_location=course.location,
category="chapter",
)
cls.subsection1 = ItemFactory.create(
parent_location=cls.section.location,
category="sequential",
)
unit1 = ItemFactory.create(
parent_location=cls.subsection1.location,
category="vertical",
)
ItemFactory.create(
parent_location=unit1.location,
category="video",
)
ItemFactory.create(
parent_location=unit1.location,
category="problem",
)
cls.subsection2 = ItemFactory.create(
parent_location=cls.section.location,
category="sequential",
)
unit2 = ItemFactory.create(
parent_location=cls.subsection2.location,
category="vertical",
)
unit3 = ItemFactory.create(
parent_location=cls.subsection2.location,
category="vertical",
)
ItemFactory.create(
parent_location=unit3.location,
category="video",
)
ItemFactory.create(
parent_location=unit3.location,
category="video",
)
def get_url(self, course_id):
"""
Helper function to create the url
"""
return reverse(
self.view_name,
kwargs={
'course_id': course_id
}
)
|
from django.contrib.auth.models import User, UserManager
from django.utils.translation import ugettext_lazy as _
from django.db import models
from django.db.models import signals, Avg, Q
from datetime import date
import os
from django.conf import settings
def create_profile_for_user(sender, **kwargs):
'''
This way everytime a User is created, a Profile is created too.
'''
if kwargs['created']:
profile = Profile()
if not kwargs['instance'].__dict__.has_key("birth_date"):
profile.birth_date = date.today()
if not kwargs['instance'].__dict__.has_key("address"):
profile.address = _("address")
profile.__dict__.update(kwargs['instance'].__dict__)
profile.save()
class Profile(User):
'''
<<<<<<< HEAD
User with timebank settings.
=======
User with time bank settings.
>>>>>>> 2db144ba2c6c34a8f17f795a1186a524059b1aa6
'''
photo = models.ImageField(_("Avatar"), blank=True, null=True,
upload_to=os.path.join(settings.STATIC_DOC_ROOT, "photos"))
<<<<<<< HEAD
birth_date = models.DateField(_("Rojstni datum"), default=date.today())
address = models.CharField(_("Naslov"), max_length=100, default=_("address"))
org_name = models.CharField(_("Ime organizacije"), max_length=30, default=_("org_name"))
first_name1 = models.CharField(_("Ime zastopnika"), max_length=30, default=_("first_name"))
last_name1 = models.CharField(_("Priimek zastopnika"), max_length=30, default=_("last_name"))
email1 = models.CharField(_("E-mail zastopnika"), max_length=30, default=_("email"))
# credits in minutes
balance = models.IntegerField(default=600)
=======
birth_date = models.DateField(_("Birth date"), default=date.today())
address = models.CharField(_("Address"), max_length=100, default=_("address"))
# credits in minutes
balance = models.IntegerField(default=0)
>>>>>>> 2db144ba2c6c34a8f17f795a1186a524059b1aa6
def balance_hours(self):
if self.balance % 60 == 0:
return self.balance/60
return self.balance/60.0
<<<<<<< HEAD
description = models.TextField(_("Opis"), max_length=300,
blank=True)
land_line = models.CharField(_("Stacionarni telefon"), max_length=20)
mobile_tlf = models.CharField(_("Mobilni telefon"), max_length=20)
email_updates = models.BooleanField(_(u"Želim prejemati novice Časovne banke"),
=======
description = models.TextField(_("Personal address"), max_length=300,
blank=True)
land_line = models.CharField(_("Land line"), max_length=20)
mobile_tlf = models.CharField(_("Mobile phone"), max_length=20)
email_updates = models.BooleanField(_("Receive email updates"),
>>>>>>> 2db144ba2c6c34a8f17f795a1186a524059b1aa6
default=True)
# Saving the user language allows sending emails to him in his desired
# language (among other things)
<<<<<<< HEAD
lang_code = models.CharField(_("Jezik"), max_length=10, default='')
class Meta:
verbose_name = _("user")
verbose_name_plural = _("users")
=======
lang_code = models.CharField(_("Language Code"), max_length=10, default='')
class Meta:
verbose_name = _("User")
verbose_name_plural = _("Users")
>>>>>>> 2db144ba2c6c34a8f17f795a1186a524059b1aa6
def __unicode__(self):
return self.username
# Use UserManager to get the create_user method, etc.
objects = UserManager()
def __eq__(self, value):
return value and self.id == value.id or False
def transfers_pending(self):
'''
Transfers from this user which are not in a final state
'''
from serv.models import Transfer
return Transfer.objects.filter(Q(credits_payee=self) \
| Q(credits_payee=self)).filter(status__in=['r', 'd'])
def karma(self):
'''
Average of the user's transfer scores
'''
karma = self.transfers_received.aggregate(Avg('rating_score'))
if karma['rating_score__avg']:
return int(karma['rating_score__avg'])
else:
return 0
|
{
'name': 'Products Management Group',
'version': '13.0.1.0.0',
'category': 'base.module_category_knowledge_management',
'author': 'ADHOC SA',
'website': 'www.adhoc.com.ar',
'license': 'AGPL-3',
'depends': [
'sale',
],
'data': [
'security/product_management_security.xml',
],
'installable': False,
}
|
from __future__ import absolute_import
from math import isinf, isnan
from warnings import warn
NOT_MASS_BALANCED_TERMS = {"SBO:0000627", # EXCHANGE
"SBO:0000628", # DEMAND
"SBO:0000629", # BIOMASS
"SBO:0000631", # PSEUDOREACTION
"SBO:0000632", # SINK
}
def check_mass_balance(model):
unbalanced = {}
for reaction in model.reactions:
if reaction.annotation.get("SBO") not in NOT_MASS_BALANCED_TERMS:
balance = reaction.check_mass_balance()
if balance:
unbalanced[reaction] = balance
return unbalanced
def check_reaction_bounds(model):
warn("no longer necessary, done by optlang solver interfaces",
DeprecationWarning)
errors = []
for reaction in model.reactions:
if reaction.lower_bound > reaction.upper_bound:
errors.append("Reaction '%s' has lower bound > upper bound" %
reaction.id)
if isinf(reaction.lower_bound):
errors.append("Reaction '%s' has infinite lower_bound" %
reaction.id)
elif isnan(reaction.lower_bound):
errors.append("Reaction '%s' has NaN for lower_bound" %
reaction.id)
if isinf(reaction.upper_bound):
errors.append("Reaction '%s' has infinite upper_bound" %
reaction.id)
elif isnan(reaction.upper_bound):
errors.append("Reaction '%s' has NaN for upper_bound" %
reaction.id)
return errors
def check_metabolite_compartment_formula(model):
errors = []
for met in model.metabolites:
if met.formula is not None and len(met.formula) > 0:
if not met.formula.isalnum():
errors.append("Metabolite '%s' formula '%s' not alphanumeric" %
(met.id, met.formula))
return errors
|
import math, os
from bup import _helpers, helpers
from bup.helpers import sc_page_size
_fmincore = getattr(helpers, 'fmincore', None)
BLOB_MAX = 8192*4 # 8192 is the "typical" blob size for bupsplit
BLOB_READ_SIZE = 1024*1024
MAX_PER_TREE = 256
progress_callback = None
fanout = 16
GIT_MODE_FILE = 0100644
GIT_MODE_TREE = 040000
GIT_MODE_SYMLINK = 0120000
assert(GIT_MODE_TREE != 40000) # 0xxx should be treated as octal
class Buf:
def __init__(self):
self.data = ''
self.start = 0
def put(self, s):
if s:
self.data = buffer(self.data, self.start) + s
self.start = 0
def peek(self, count):
return buffer(self.data, self.start, count)
def eat(self, count):
self.start += count
def get(self, count):
v = buffer(self.data, self.start, count)
self.start += count
return v
def used(self):
return len(self.data) - self.start
def _fadvise_pages_done(fd, first_page, count):
assert(first_page >= 0)
assert(count >= 0)
if count > 0:
_helpers.fadvise_done(fd,
first_page * sc_page_size,
count * sc_page_size)
def _nonresident_page_regions(status_bytes, max_region_len=None):
"""Return (start_page, count) pairs in ascending start_page order for
each contiguous region of nonresident pages indicated by the
mincore() status_bytes. Limit the number of pages in each region
to max_region_len."""
assert(max_region_len is None or max_region_len > 0)
start = None
for i, x in enumerate(status_bytes):
in_core = x & helpers.MINCORE_INCORE
if start is None:
if not in_core:
start = i
else:
count = i - start
if in_core:
yield (start, count)
start = None
elif max_region_len and count >= max_region_len:
yield (start, count)
start = i
if start is not None:
yield (start, len(status_bytes) - start)
def _uncache_ours_upto(fd, offset, first_region, remaining_regions):
"""Uncache the pages of fd indicated by first_region and
remaining_regions that are before offset, where each region is a
(start_page, count) pair. The final region must have a start_page
of None."""
rstart, rlen = first_region
while rstart is not None and (rstart + rlen) * sc_page_size <= offset:
_fadvise_pages_done(fd, rstart, rlen)
rstart, rlen = next(remaining_regions, (None, None))
return (rstart, rlen)
def readfile_iter(files, progress=None):
for filenum,f in enumerate(files):
ofs = 0
b = ''
fd = rpr = rstart = rlen = None
if _fmincore and hasattr(f, 'fileno'):
fd = f.fileno()
max_chunk = max(1, (8 * 1024 * 1024) / sc_page_size)
rpr = _nonresident_page_regions(_fmincore(fd), max_chunk)
rstart, rlen = next(rpr, (None, None))
while 1:
if progress:
progress(filenum, len(b))
b = f.read(BLOB_READ_SIZE)
ofs += len(b)
if rpr:
rstart, rlen = _uncache_ours_upto(fd, ofs, (rstart, rlen), rpr)
if not b:
break
yield b
if rpr:
rstart, rlen = _uncache_ours_upto(fd, ofs, (rstart, rlen), rpr)
def _splitbuf(buf, basebits, fanbits):
while 1:
b = buf.peek(buf.used())
(ofs, bits) = _helpers.splitbuf(b)
if ofs:
if ofs > BLOB_MAX:
ofs = BLOB_MAX
level = 0
else:
level = (bits-basebits)//fanbits # integer division
buf.eat(ofs)
yield buffer(b, 0, ofs), level
else:
break
while buf.used() >= BLOB_MAX:
# limit max blob size
yield buf.get(BLOB_MAX), 0
def _hashsplit_iter(files, progress):
assert(BLOB_READ_SIZE > BLOB_MAX)
basebits = _helpers.blobbits()
fanbits = int(math.log(fanout or 128, 2))
buf = Buf()
for inblock in readfile_iter(files, progress):
buf.put(inblock)
for buf_and_level in _splitbuf(buf, basebits, fanbits):
yield buf_and_level
if buf.used():
yield buf.get(buf.used()), 0
def _hashsplit_iter_keep_boundaries(files, progress):
for real_filenum,f in enumerate(files):
if progress:
def prog(filenum, nbytes):
# the inner _hashsplit_iter doesn't know the real file count,
# so we'll replace it here.
return progress(real_filenum, nbytes)
else:
prog = None
for buf_and_level in _hashsplit_iter([f], progress=prog):
yield buf_and_level
def hashsplit_iter(files, keep_boundaries, progress):
if keep_boundaries:
return _hashsplit_iter_keep_boundaries(files, progress)
else:
return _hashsplit_iter(files, progress)
total_split = 0
def split_to_blobs(makeblob, files, keep_boundaries, progress):
global total_split
for (blob, level) in hashsplit_iter(files, keep_boundaries, progress):
sha = makeblob(blob)
total_split += len(blob)
if progress_callback:
progress_callback(len(blob))
yield (sha, len(blob), level)
def _make_shalist(l):
ofs = 0
l = list(l)
total = sum(size for mode,sha,size, in l)
vlen = len('%x' % total)
shalist = []
for (mode, sha, size) in l:
shalist.append((mode, '%0*x' % (vlen,ofs), sha))
ofs += size
assert(ofs == total)
return (shalist, total)
def _squish(maketree, stacks, n):
i = 0
while i < n or len(stacks[i]) >= MAX_PER_TREE:
while len(stacks) <= i+1:
stacks.append([])
if len(stacks[i]) == 1:
stacks[i+1] += stacks[i]
elif stacks[i]:
(shalist, size) = _make_shalist(stacks[i])
tree = maketree(shalist)
stacks[i+1].append((GIT_MODE_TREE, tree, size))
stacks[i] = []
i += 1
def split_to_shalist(makeblob, maketree, files,
keep_boundaries, progress=None):
sl = split_to_blobs(makeblob, files, keep_boundaries, progress)
assert(fanout != 0)
if not fanout:
shal = []
for (sha,size,level) in sl:
shal.append((GIT_MODE_FILE, sha, size))
return _make_shalist(shal)[0]
else:
stacks = [[]]
for (sha,size,level) in sl:
stacks[0].append((GIT_MODE_FILE, sha, size))
_squish(maketree, stacks, level)
#log('stacks: %r\n' % [len(i) for i in stacks])
_squish(maketree, stacks, len(stacks)-1)
#log('stacks: %r\n' % [len(i) for i in stacks])
return _make_shalist(stacks[-1])[0]
def split_to_blob_or_tree(makeblob, maketree, files,
keep_boundaries, progress=None):
shalist = list(split_to_shalist(makeblob, maketree,
files, keep_boundaries, progress))
if len(shalist) == 1:
return (shalist[0][0], shalist[0][2])
elif len(shalist) == 0:
return (GIT_MODE_FILE, makeblob(''))
else:
return (GIT_MODE_TREE, maketree(shalist))
def open_noatime(name):
fd = _helpers.open_noatime(name)
try:
return os.fdopen(fd, 'rb', 1024*1024)
except:
try:
os.close(fd)
except:
pass
raise
|
"""Test of ARIA horizontal sliders using Firefox."""
from macaroon.playback import *
import utils
sequence = MacroSequence()
sequence.append(PauseAction(10000))
sequence.append(KeyComboAction("Tab"))
sequence.append(KeyComboAction("Tab"))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Tab"))
sequence.append(utils.AssertPresentationAction(
"1. Tab to Volume Horizontal Slider",
["BRAILLE LINE: 'Volume 0 % horizontal slider'",
" VISIBLE: 'Volume 0 % horizontal slider', cursor=1",
"SPEECH OUTPUT: 'Volume horizontal slider 0 %'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Right"))
sequence.append(utils.AssertPresentationAction(
"2. Volume Right Arrow",
["BRAILLE LINE: 'Volume 1 % horizontal slider'",
" VISIBLE: 'Volume 1 % horizontal slider', cursor=1",
"SPEECH OUTPUT: '1 %'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Right"))
sequence.append(utils.AssertPresentationAction(
"3. Volume Right Arrow",
["BRAILLE LINE: 'Volume 2 % horizontal slider'",
" VISIBLE: 'Volume 2 % horizontal slider', cursor=1",
"SPEECH OUTPUT: '2 %'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Left"))
sequence.append(utils.AssertPresentationAction(
"4. Volume Left Arrow",
["BRAILLE LINE: 'Volume 1 % horizontal slider'",
" VISIBLE: 'Volume 1 % horizontal slider', cursor=1",
"SPEECH OUTPUT: '1 %'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Left"))
sequence.append(utils.AssertPresentationAction(
"5. Volume Left Arrow",
["BRAILLE LINE: 'Volume 0 % horizontal slider'",
" VISIBLE: 'Volume 0 % horizontal slider', cursor=1",
"SPEECH OUTPUT: '0 %'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"6. Volume Up Arrow",
["BRAILLE LINE: 'Volume 1 % horizontal slider'",
" VISIBLE: 'Volume 1 % horizontal slider', cursor=1",
"SPEECH OUTPUT: '1 %'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"7. Volume Up Arrow",
["BRAILLE LINE: 'Volume 2 % horizontal slider'",
" VISIBLE: 'Volume 2 % horizontal slider', cursor=1",
"SPEECH OUTPUT: '2 %'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"8. Volume Down Arrow",
["BRAILLE LINE: 'Volume 1 % horizontal slider'",
" VISIBLE: 'Volume 1 % horizontal slider', cursor=1",
"SPEECH OUTPUT: '1 %'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"9. Volume Down Arrow",
["BRAILLE LINE: 'Volume 0 % horizontal slider'",
" VISIBLE: 'Volume 0 % horizontal slider', cursor=1",
"SPEECH OUTPUT: '0 %'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Page_Up"))
sequence.append(utils.AssertPresentationAction(
"10. Volume Page Up",
["BRAILLE LINE: 'Volume 25 % horizontal slider'",
" VISIBLE: 'Volume 25 % horizontal slider', cursor=1",
"SPEECH OUTPUT: '25 %'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Page_Down"))
sequence.append(utils.AssertPresentationAction(
"11. Volume Page Down",
["BRAILLE LINE: 'Volume 0 % horizontal slider'",
" VISIBLE: 'Volume 0 % horizontal slider', cursor=1",
"SPEECH OUTPUT: '0 %'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("End"))
sequence.append(utils.AssertPresentationAction(
"12. Volume End",
["BRAILLE LINE: 'Volume 100 % horizontal slider'",
" VISIBLE: 'Volume 100 % horizontal slider', cursor=1",
"SPEECH OUTPUT: '100 %'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Home"))
sequence.append(utils.AssertPresentationAction(
"13. Volume Home",
["BRAILLE LINE: 'Volume 0 % horizontal slider'",
" VISIBLE: 'Volume 0 % horizontal slider', cursor=1",
"SPEECH OUTPUT: '0 %'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Tab"))
sequence.append(utils.AssertPresentationAction(
"14. Tab to Food Quality Horizontal Slider",
["KNOWN ISSUE: The double-presentation is because of the authoring, putting the name and value into the description",
"BRAILLE LINE: 'Food Quality terrible horizontal slider'",
" VISIBLE: 'Food Quality terrible horizontal', cursor=1",
"SPEECH OUTPUT: 'Food Quality horizontal slider terrible.'",
"SPEECH OUTPUT: 'Food Quality: terrible (1 of 5)'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Right"))
sequence.append(utils.AssertPresentationAction(
"15. Food Quality Right Arrow",
["BRAILLE LINE: 'Food Quality bad horizontal slider'",
" VISIBLE: 'Food Quality bad horizontal slid', cursor=1",
"SPEECH OUTPUT: 'bad'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Right"))
sequence.append(utils.AssertPresentationAction(
"16. Food Quality Right Arrow",
["BRAILLE LINE: 'Food Quality decent horizontal slider'",
" VISIBLE: 'Food Quality decent horizontal s', cursor=1",
"SPEECH OUTPUT: 'decent'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Left"))
sequence.append(utils.AssertPresentationAction(
"17. Food Quality Left Arrow",
["BRAILLE LINE: 'Food Quality bad horizontal slider'",
" VISIBLE: 'Food Quality bad horizontal slid', cursor=1",
"SPEECH OUTPUT: 'bad'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"18. Food Quality Up Arrow",
["BRAILLE LINE: 'Food Quality decent horizontal slider'",
" VISIBLE: 'Food Quality decent horizontal s', cursor=1",
"SPEECH OUTPUT: 'decent'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"19. Food Quality Down Arrow",
["BRAILLE LINE: 'Food Quality bad horizontal slider'",
" VISIBLE: 'Food Quality bad horizontal slid', cursor=1",
"SPEECH OUTPUT: 'bad'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"20. Food Quality Down Arrow",
["BRAILLE LINE: 'Food Quality terrible horizontal slider'",
" VISIBLE: 'Food Quality terrible horizontal', cursor=1",
"SPEECH OUTPUT: 'terrible'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Page_Up"))
sequence.append(utils.AssertPresentationAction(
"21. Food Quality Page Up",
["BRAILLE LINE: 'Food Quality bad horizontal slider'",
" VISIBLE: 'Food Quality bad horizontal slid', cursor=1",
"SPEECH OUTPUT: 'bad'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Page_Down"))
sequence.append(utils.AssertPresentationAction(
"22. Food Quality Page Down",
["BRAILLE LINE: 'Food Quality terrible horizontal slider'",
" VISIBLE: 'Food Quality terrible horizontal', cursor=1",
"SPEECH OUTPUT: 'terrible'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("End"))
sequence.append(utils.AssertPresentationAction(
"23. Food Quality End",
["BRAILLE LINE: 'Food Quality excellent horizontal slider'",
" VISIBLE: 'Food Quality excellent horizonta', cursor=1",
"SPEECH OUTPUT: 'excellent'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Home"))
sequence.append(utils.AssertPresentationAction(
"24. Food Quality Home",
["BRAILLE LINE: 'Food Quality terrible horizontal slider'",
" VISIBLE: 'Food Quality terrible horizontal', cursor=1",
"SPEECH OUTPUT: 'terrible'"]))
sequence.append(utils.AssertionSummaryAction())
sequence.start()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.