src
stringlengths 721
1.04M
|
|---|
# Under MIT license, see LICENSE.txt
""" Livre des stratégies. """
from .HumanControl import HumanControl
from .SimpleDefense import SimpleDefense
from .SimpleOffense import SimpleOffense
from .DoNothing import DoNothing
class StrategyBook(object):
"""
Cette classe est capable de récupérer les stratégies enregistrés dans la
configuration des stratégies et de les exposer au Behavior Tree en
charge de sélectionner la stratégie courante.
"""
def __init__(self, p_info_manager):
self.strategy_book = {'SimpleDefense' : SimpleDefense,
'SimpleOffense' : SimpleOffense,
'HumanControl' : HumanControl,
'DoNothing' : DoNothing }
self.info_manager = p_info_manager
def get_strategies_name_list(self):
return list(self.strategy_book.keys())
def ball_in_offense_zone(self):
self.team_zone_side = "left" # constante bidon TODO: trouver une facon de demander au InfoManager notre zone initiale
self.ball_x_position = self.info_manager.get_ball_position().x
if self.team_zone_side == "left":
return self.ball_x_position > 0
return self.ball_x_position < 0
def most_opponents_in_our_zone(self):
pass
def get_optimal_strategy(self):
# simple choice
if self.ball_in_offense_zone():
self.chosen_strategy = SimpleOffense
else:
self.chosen_strategy = SimpleDefense
self.chosen_strategy = DoNothing
return self.chosen_strategy
def get_strategy(self, strategy_name):
return self.strategy_book[strategy_name]
def debug_show_all_players_tactics(self):
for i in range(0,6):
debug_string = ""
debug_string += "Robot:" + str(i) + str(self.info_manager.get_player_tactic(i))
print(debug_string)
|
#!/usr/bin/env python3
import re
import json # turns out the dump function of the json5 module just calls the normal json module (╯°□°)╯︵ ┻━┻
INPUT = "cards-DevOpsAgainstHumanity.csv"
META_NAME = "DevOps Against Humanity"
DELIM = ","
QUOTE = "\""
SKIPLINES = 2
def parse_csv(line):
a = []
tmp = ""
at_elem_start = True
in_quotes = False
in_escape = False
for c in line:
if at_elem_start:
if c == DELIM: # empty element
a.append("")
continue
in_quotes = (c == QUOTE)
if not in_quotes:
tmp += c
at_elem_start = False
continue
if c == QUOTE and in_quotes and not in_escape:
in_escape = True
elif c == QUOTE and in_quotes and in_escape:
tmp += QUOTE
in_escape = False
elif (c == DELIM and in_quotes and in_escape) or (c == DELIM and not in_quotes):
a.append(tmp)
tmp = ""
in_escape = False
at_elem_start = True
else:
tmp += c
a.append(tmp)
return a
r_blank = re.compile(r"_+")
odict = {}
odict["questions"] = []
odict["answers"] = []
odict["meta"] = {}
odict["meta"]["name"] = META_NAME
ifd = open(INPUT, "r")
for i in range(SKIPLINES):
ifd.readline()
n = 0
while True:
l = ifd.readline()
if not l:
break
l = l.rstrip("\r\n")
l = parse_csv(l)
if l[0] != "":
odict["answers"].append(l[0])
n += 1
if l[1] != "":
tmp = {}
tmp["text"] = re.sub(r_blank, "%s", l[1])
# pick is inferred from number of %s
odict["questions"].append(tmp)
n += 1
ifd.close()
ofd = open(INPUT.replace(".csv", ".json5"), "w")
json.dump(odict, ofd, indent=2, sort_keys=True)
ofd.close()
print("Processed %d cards." % (n, ))
|
# Copyright (C) 2010, One Laptop Per Child
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
from django.core.management.base import BaseCommand, CommandError
from django.core.files import File
from django.db.utils import IntegrityError
import sys
import os
import csv
import json
from optparse import make_option
from books.models import Book, Status
class Command(BaseCommand):
help = "Adds a book collection (via a CSV file)"
args = 'Absolute path to CSV file'
option_list = BaseCommand.option_list + (
make_option('--json',
action='store_true',
dest='is_json_format',
default=False,
help='The file is in JSON format'),
)
def _handle_csv(self, csvpath):
"""
Store books from a file in CSV format.
WARN: does not handle tags
"""
csvfile = open(csvpath)
dialect = csv.Sniffer().sniff(csvfile.read(1024))
csvfile.seek(0)
reader = csv.reader(csvfile, dialect)
#TODO: Figure out if this is a valid CSV file
for row in reader:
path = row[0]
title = row[1]
author = row[2]
summary = row[3]
f = open(path)
book = Book(book_file = File(f), a_title = title, a_author = author, a_summary = summary)
book.save()
def _handle_json(self, jsonpath):
"""
Store books from a file in JSON format.
"""
jsonfile = open(jsonpath)
data_list = json.loads(jsonfile.read())
for d in data_list:
# Get a Django File from the given path:
f = open(d['book_path'])
d['book_file'] = File(f)
del d['book_path']
if d.has_key('cover_path'):
f_cover = open(d['cover_path'])
d['cover_img'] = File(f_cover)
del d['cover_path']
if d.has_key('a_status'):
d['a_status'] = Status.objects.get(status = d['a_status'])
tags = d['tags']
del d['tags']
book = Book(**d)
try:
book.save() # must save item to generate Book.id before creating tags
[book.tags.add(tag) for tag in tags]
book.save() # save again after tags are generated
except IntegrityError as e:
if str(e) == "column file_sha256sum is not unique":
print "The book (", d['book_file'], ") was not saved because the file already exsists in the database."
else:
raise CommandError('Error adding file %s: %s' % (d['book_file'], sys.exc_info()[1]))
def handle(self, filepath='', *args, **options):
if not os.path.exists(filepath):
raise CommandError("%r is not a valid path" % filepath)
if options['is_json_format']:
self._handle_json(filepath)
else:
self._handle_csv(filepath)
|
# -*- coding: utf-8 -*-
##############################################################################
# For copyright and license notices, see __openerp__.py file in module root
# directory
##############################################################################
from openerp import fields, api, models
class infrastructure_rename_db_name(models.TransientModel):
_name = "infrastructure.rename_database.name"
_description = "Infrastructure Rename Database Name Wizard"
name = fields.Char(
'New Database Name',
size=64,
required=True
)
# database_type_id = fields.Many2one(
# 'infrastructure.database_type',
# string='Database Type',
# required=True,
# )
# TODO rmeove as we no longer use db prefix
# @api.onchange('database_type_id')
# def onchange_database_type_id(self):
# if self.database_type_id:
# self.name = self.database_type_id.prefix + '_'
# TODO send suggested backup data
@api.multi
def action_confirm(self):
active_id = self._context.get('active_id')
if not active_id:
return False
active_record = self.env['infrastructure.database'].browse(active_id)
active_record.rename_db(self.name)
# active_record.database_type_id = self.database_type_id
|
import os, sys
from PyQt5 import QtGui, QtWidgets
from PyQt5.QtGui import QPalette, QColor, QFont
from PyQt5.QtWidgets import QApplication, QFileDialog
from PyQt5.QtGui import QIntValidator, QDoubleValidator
from orangewidget import gui
from orangewidget.settings import Setting
from oasys.widgets import gui as oasysgui
from oasys.widgets import congruence
from comsyl.autocorrelation.CompactAFReader import CompactAFReader
from orangecontrib.comsyl.widgets.gui.ow_comsyl_widget import OWComsylWidget
from orangecontrib.comsyl.util.preprocessor import ComsylPreprocessorData
from orangecontrib.comsyl.util.python_script import PythonConsole
from orangecontrib.comsyl.util.messages import showConfirmMessage
class OWComsylPropagateBeamline(OWComsylWidget):
name = "Propagate Beamline Script"
description = "COMSYL Propagate Beamline"
icon = "icons/propagator.png"
maintainer = "Manuel Sanchez del Rio"
maintainer_email = "srio(@at@)esrf.eu"
priority = 46
category = ""
keywords = ["COMSYL", "coherent modes"]
inputs = [("COMSYL modes" , CompactAFReader, "setCompactAFReader" ),
("COMSYL preprocessor beamline" , ComsylPreprocessorData, "setPreprocessor" ),]
outputs = [{"name":"COMSYL modes",
"type":CompactAFReader,
"doc":"COMSYL modes",
"id":"COMSYL modes"} ]
COMSYL_AF_FILE = ""
BL_PICKLE_FILE = ""
MODE_INDEX = Setting(2) # maxumim mode index
REFERENCE_SOURCE = Setting(0)
DIRECTORY_NAME = "tmp_comsyl_propagation"
PYTHON_INTERPRETER = sys.executable
IMAGE_WIDTH = 890
IMAGE_HEIGHT = 680
def __init__(self, show_automatic_box=True):
super().__init__(show_automatic_box=show_automatic_box)
button_box = oasysgui.widgetBox(self.controlArea, "", addSpace=False, orientation="horizontal")
button = gui.button(button_box, self, "Refresh Script", callback=self.refresh_script)
font = QFont(button.font())
font.setBold(True)
button.setFont(font)
palette = QPalette(button.palette()) # make a copy of the palette
palette.setColor(QPalette.ButtonText, QColor('Dark Blue'))
button.setPalette(palette) # assign new palette
button.setFixedHeight(45)
button = gui.button(button_box, self, "Reset Fields", callback=self.callResetSettings)
font = QFont(button.font())
font.setItalic(True)
button.setFont(font)
palette = QPalette(button.palette()) # make a copy of the palette
palette.setColor(QPalette.ButtonText, QColor('Dark Red'))
button.setPalette(palette) # assign new palette
button.setFixedHeight(45)
button.setFixedWidth(150)
gui.separator(self.controlArea)
gen_box = oasysgui.widgetBox(self.controlArea, "COMSYL Beamline Propagation", addSpace=False, orientation="vertical", height=530, width=self.CONTROL_AREA_WIDTH-5)
figure_box0 = oasysgui.widgetBox(gen_box, "", addSpace=True, orientation="horizontal")
self.id_comsyl_af_file = oasysgui.lineEdit(figure_box0, self, "COMSYL_AF_FILE", "Comsyl File with Modes:",
labelWidth=90, valueType=str, orientation="horizontal")
gui.button(figure_box0, self, "...", callback=self.select_comsyl_af_file)
figure_box = oasysgui.widgetBox(gen_box, "", addSpace=True, orientation="horizontal")
self.id_bl_pickle_file = oasysgui.lineEdit(figure_box, self, "BL_PICKLE_FILE", "BL Pickle File:",
labelWidth=90, valueType=str, orientation="horizontal")
gui.button(figure_box, self, "...", callback=self.select_bl_pickle_file)
oasysgui.lineEdit(gen_box, self, "MODE_INDEX",
label="Maximum Mode index", addSpace=False,
valueType=int, validator=QIntValidator(), orientation="horizontal", labelWidth=150)
oasysgui.lineEdit(gen_box, self, "DIRECTORY_NAME", "Temporal Directory", labelWidth=160, valueType=str, orientation="horizontal")
oasysgui.lineEdit(gen_box, self, "PYTHON_INTERPRETER", "Python interpreter", labelWidth=160, valueType=str, orientation="horizontal")
tabs_setting = oasysgui.tabWidget(self.mainArea)
tabs_setting.setFixedHeight(self.IMAGE_HEIGHT)
tabs_setting.setFixedWidth(self.IMAGE_WIDTH)
tab_scr = oasysgui.createTabPage(tabs_setting, "Python Script")
tab_out = oasysgui.createTabPage(tabs_setting, "System Output")
self.pythonScript = oasysgui.textArea(readOnly=False)
self.pythonScript.setStyleSheet("background-color: white; font-family: Courier, monospace;")
self.pythonScript.setMaximumHeight(self.IMAGE_HEIGHT - 250)
script_box = oasysgui.widgetBox(tab_scr, "", addSpace=False, orientation="vertical", height=self.IMAGE_HEIGHT - 10, width=self.IMAGE_WIDTH - 10)
script_box.layout().addWidget(self.pythonScript)
console_box = oasysgui.widgetBox(script_box, "", addSpace=True, orientation="vertical",
height=150, width=self.IMAGE_WIDTH - 10)
self.console = PythonConsole(self.__dict__, self)
console_box.layout().addWidget(self.console)
self.shadow_output = oasysgui.textArea()
out_box = oasysgui.widgetBox(tab_out, "System Output", addSpace=True, orientation="horizontal", height=self.IMAGE_WIDTH - 45)
out_box.layout().addWidget(self.shadow_output)
button_box = oasysgui.widgetBox(tab_scr, "", addSpace=True, orientation="horizontal")
gui.button(button_box, self, "Run Script", callback=self.execute_script, height=40)
gui.button(button_box, self, "Save Script to File", callback=self.save_script, height=40)
#############################
# self.refresh_script()
#
def select_comsyl_af_file(self):
self.id_comsyl_af_file.setText(oasysgui.selectFileFromDialog(self,
self.COMSYL_AF_FILE, "Select Input File",
file_extension_filter="COMSYL Files (*.npz)"))
def select_bl_pickle_file(self):
self.id_bl_pickle_file.setText(oasysgui.selectFileFromDialog(self,
self.BL_PICKLE_FILE, "Select Input File",
file_extension_filter="COMSYL Beamline Pickle Files (*.p)"))
def setCompactAFReader(self, af):
if not af is None:
self.COMSYL_AF_FILE = af._af._io.fromFile()
self.refresh_script()
def setPreprocessor(self, data):
try:
self.BL_PICKLE_FILE = data.get_beamline_pickle_file()
self.refresh_script()
except:
pass
def execute_script(self):
if showConfirmMessage(message = "Do you confirm launching a COMSYL propagation?",
informative_text="This is a long and resource-consuming process: launching it within the OASYS environment is highly discouraged." + \
"The suggested solution is to save the script into a file and to launch it in a different environment."):
self._script = str(self.pythonScript.toPlainText())
self.console.write("\nRunning script:\n")
self.console.push("exec(_script)")
self.console.new_prompt(sys.ps1)
def save_script(self):
file_name = QFileDialog.getSaveFileName(self, "Save File to Disk", os.getcwd(), filter='*.py')[0]
if not file_name is None:
if not file_name.strip() == "":
file = open(file_name, "w")
file.write(str(self.pythonScript.toPlainText()))
file.close()
QtWidgets.QMessageBox.information(self, "QMessageBox.information()",
"File " + file_name + " written to disk",
QtWidgets.QMessageBox.Ok)
def refresh_script(self):
dd = {"COMSYL_AF_FILE": self.COMSYL_AF_FILE,
"BL_PICKLE_FILE": self.BL_PICKLE_FILE,
"DIRECTORY_NAME": self.DIRECTORY_NAME,
"PYTHON_INTERPRETER": self.PYTHON_INTERPRETER,
"MODE_INDEX": self.MODE_INDEX,
}
self.pythonScript.setText(self.script_template().format_map(dd))
def script_template(self):
return """import pickle
from comsyl.waveoptics.ComsylWofryBeamline import ComsylWofryBeamline
from comsyl.waveoptics.SRWAdapter import ComsylSRWBeamline
from comsyl.autocorrelation.CompactAFReader import CompactAFReader
comsyl_beamline = pickle.load(open("{BL_PICKLE_FILE}","rb"))
filename = "{COMSYL_AF_FILE}"
af_oasys = CompactAFReader.initialize_from_file(filename)
af_comsyl = af_oasys.get_af()
# **source position correction**
source_position=af_comsyl.info().sourcePosition()
if source_position == "entrance":
source_offset = af_comsyl._undulator.length() * 0.5
elif source_position == "center":
source_offset = 0.0
else:
raise Exception("Unhandled source position")
print("Using source position entrance z=%f" % source_offset)
comsyl_beamline.add_undulator_offset(source_offset)
af_propagated = comsyl_beamline.propagate_af(af_comsyl,
directory_name="{DIRECTORY_NAME}",
af_output_file_root="{DIRECTORY_NAME}/propagated_beamline",
maximum_mode={MODE_INDEX},
python_to_be_used="{PYTHON_INTERPRETER}")
#rediagonalization **uncomment to proceed**
#af_propagated.diagonalizeModes({MODE_INDEX})
#af_propagated.save("{DIRECTORY_NAME}/rediagonalized")
"""
if __name__ == '__main__':
from PyQt5.QtWidgets import QApplication
app = QApplication([])
ow = OWComsylPropagateBeamline()
ow.COMSYL_AF_FILE = "/scisoft/users/glass/Documents/sources/Orange-SRW/comsyl/calculations/cs_new_u18_2m_1h_s2.5.npz"
ow.BL_PICKLE_FILE = "/scisoft/xop2.4/extensions/shadowvui/shadow3-scripts/HIGHLIGHTS/bl.p"
ow.refresh_script()
ow.show()
app.exec_()
ow.saveSettings()
|
# coding=utf-8
# Copyright 2021 The Trax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests different ways to use the public tf-numpy module."""
import numpy as onp
import tensorflow as tf
import tensorflow.experimental.numpy as np1
from tensorflow.experimental import numpy as np2 # pylint: disable=reimported
np3 = tf.experimental.numpy
class PublicSymbolTest(tf.test.TestCase):
def testSimple(self):
a = 0.1
b = 0.2
for op in [np1.add, np2.add, np3.add]:
self.assertAllClose(onp.add(a, b), op(a, b))
if __name__ == "__main__":
tf.compat.v1.enable_eager_execution()
tf.test.main()
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Criteria.created'
db.add_column('dnstorm_criteria', 'created',
self.gf('django.db.models.fields.DateTimeField')(default='2000-01-01', auto_now_add=True, blank=True),
keep_default=False)
# Adding field 'Criteria.updated'
db.add_column('dnstorm_criteria', 'updated',
self.gf('django.db.models.fields.DateTimeField')(default='2000-01-01', auto_now=True, blank=True),
keep_default=False)
# Adding field 'Idea.description'
db.add_column('dnstorm_idea', 'description',
self.gf('django.db.models.fields.TextField')(default=0),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Criteria.created'
db.delete_column('dnstorm_criteria', 'created')
# Deleting field 'Criteria.updated'
db.delete_column('dnstorm_criteria', 'updated')
# Deleting field 'Idea.description'
db.delete_column('dnstorm_idea', 'description')
models = {
u'actstream.action': {
'Meta': {'ordering': "('-timestamp',)", 'object_name': 'Action'},
'action_object_content_type': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'action_object'", 'null': 'True', 'to': u"orm['contenttypes.ContentType']"}),
'action_object_object_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'actor_content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'actor'", 'to': u"orm['contenttypes.ContentType']"}),
'actor_object_id': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'data': ('jsonfield.fields.JSONField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'target_content_type': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'target'", 'null': 'True', 'to': u"orm['contenttypes.ContentType']"}),
'target_object_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'verb': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'app.alternative': {
'Meta': {'object_name': 'Alternative', 'db_table': "'dnstorm_alternative'"},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'coauthor': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'alternative_coauthor'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['auth.User']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': "'2001-01-01'", 'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'idea': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['app.Idea']", 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'order': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'problem': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['app.Problem']"}),
'updated': ('django.db.models.fields.DateTimeField', [], {'default': "'2000-01-01'", 'auto_now': 'True', 'blank': 'True'})
},
u'app.comment': {
'Meta': {'object_name': 'Comment', 'db_table': "'dnstorm_comment'"},
'alternative': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['app.Alternative']", 'null': 'True', 'blank': 'True'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'content': ('django.db.models.fields.TextField', [], {}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': "'2000-01-01'", 'auto_now_add': 'True', 'blank': 'True'}),
'criteria': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['app.Criteria']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'idea': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['app.Idea']", 'null': 'True', 'blank': 'True'}),
'problem': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['app.Problem']", 'null': 'True', 'blank': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'default': "'2000-01-01'", 'auto_now': 'True', 'blank': 'True'})
},
u'app.criteria': {
'Meta': {'object_name': 'Criteria', 'db_table': "'dnstorm_criteria'"},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'coauthor': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'criteria_coauthor'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['auth.User']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': "'2000-01-01'", 'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {}),
'fmt': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'max': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'min': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '90'}),
'order': ('django.db.models.fields.CharField', [], {'max_length': '4'}),
'problem': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['app.Problem']"}),
'result': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'slug': ('autoslug.fields.AutoSlugField', [], {'unique': 'True', 'max_length': '60', 'populate_from': "'name'", 'unique_with': '()'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'default': "'2000-01-01'", 'auto_now': 'True', 'blank': 'True'}),
'weight': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'})
},
u'app.idea': {
'Meta': {'object_name': 'Idea', 'db_table': "'dnstorm_idea'"},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'coauthor': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'idea_coauthor'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['auth.User']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': "'2000-01-01'", 'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'problem': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['app.Problem']"}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '90'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'default': "'2000-01-01'", 'auto_now': 'True', 'blank': 'True'})
},
u'app.ideacriteria': {
'Meta': {'object_name': 'IdeaCriteria', 'db_table': "'dnstorm_idea_criteria'"},
'criteria': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['app.Criteria']"}),
'description': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'idea': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['app.Idea']"}),
'value_boolean': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'value_currency': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'value_number': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'value_scale': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'value_time': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'})
},
u'app.invitation': {
'Meta': {'object_name': 'Invitation', 'db_table': "'dnstorm_invitation'"},
'hash': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'app.option': {
'Meta': {'object_name': 'Option', 'db_table': "'dnstorm_option'"},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.TextField', [], {'unique': 'True'}),
'value': ('django.db.models.fields.TextField', [], {})
},
u'app.problem': {
'Meta': {'object_name': 'Problem', 'db_table': "'dnstorm_problem'"},
'author': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'author'", 'to': u"orm['auth.User']"}),
'coauthor': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'coauthor'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['auth.User']"}),
'collaborator': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'collaborator'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['auth.User']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': "'2000-01-01'", 'auto_now_add': 'True', 'blank': 'True'}),
'description': ('ckeditor.fields.RichTextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_activity': ('django.db.models.fields.DateTimeField', [], {'default': "'2000-01-01'", 'auto_now': 'True', 'blank': 'True'}),
'open': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'slug': ('autoslug.fields.AutoSlugField', [], {'unique': 'True', 'max_length': '60', 'populate_from': "'title'", 'unique_with': '()'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '90'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'default': "'2000-01-01'", 'auto_now': 'True', 'blank': 'True'})
},
u'app.vote': {
'Meta': {'object_name': 'Vote', 'db_table': "'dnstorm_vote'"},
'alternative': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'vote_alternative'", 'null': 'True', 'to': u"orm['app.Alternative']"}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'comment': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'vote_comment'", 'null': 'True', 'to': u"orm['app.Alternative']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': "'2001-01-01'", 'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'idea': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'vote_idea'", 'null': 'True', 'to': u"orm['app.Idea']"}),
'value': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'})
},
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['app']
|
#!/usr/bin/env python3
from collections import defaultdict
import csv
from operator import attrgetter
import pathlib
import pprint
import re
import sys
import shutil
import tempfile
import ck2parser
from ck2parser import (rootpath, vanilladir, is_codename, Obj, csv_rows,
get_province_id_name_map, SimpleParser)
from print_time import print_time
VANILLA_HISTORY_WARN = True
results = {True: defaultdict(list),
False: defaultdict(list)}
def check_title(parser, v, path, titles, lhs=False, line=None):
if isinstance(v, str):
v_str = v
else:
v_str = v.val
if is_codename(v_str) and v_str not in titles:
if line is None:
line = '<file>'
else:
v_lines = line.inline_str(parser)[0].splitlines()
line = next((l for l in v_lines if not re.match(r'\s*#', l)),
v_lines[0])
results[lhs][path].append(line)
return False
return True
def check_titles(parser, path, titles):
def recurse(tree):
if tree.has_pairs:
for p in tree:
n, v = p
v_is_obj = isinstance(v, Obj)
check_title(parser, n, path, titles, v_is_obj, p)
if v_is_obj:
recurse(v)
else:
check_title(parser, v, path, titles, line=p)
else:
for v in tree:
check_title(parser, v, path, titles, line=v)
try:
recurse(parser.parse_file(path))
except:
print(path)
raise
def check_regions(parser, titles, duchies_de_jure):
bad_titles = []
missing_duchies = list(duchies_de_jure)
region_duchies = defaultdict(list)
path, tree = next(parser.parse_files('map/geographical_region.txt'))
for n, v in tree:
world = n.val.startswith('world_')
for n2, v2 in v:
if n2.val == 'regions':
for v3 in v2:
for duchy in region_duchies.get(v3.val, []):
try:
missing_duchies.remove(duchy)
except ValueError:
pass
region_duchies[n.val].append(duchy)
elif n2.val == 'duchies':
for v3 in v2:
if is_codename(v3.val):
check_title(parser, v3, path, titles, line=v3)
region_duchies[n.val].append(v3.val)
if v3.val in titles and v3.val not in duchies_de_jure:
bad_titles.append(v3.val)
elif world and v3.val in missing_duchies:
missing_duchies.remove(v3.val)
return bad_titles, missing_duchies
def check_province_history(parser, titles):
id_name_map = get_province_id_name_map(parser)
for path in parser.files('history/provinces/*.txt'):
number, name = path.stem.split(' - ')
if id_name_map.get(int(number)) == name:
check_titles(parser, path, titles)
def process_landed_titles(parser):
titles_list = []
title_liege_map = {}
title_vassals_map = defaultdict(set)
misogyny = []
for path, tree in parser.parse_files('common/landed_titles/*.txt'):
try:
dfs = list(reversed(tree))
while dfs:
n, v = dfs.pop()
if is_codename(n.val):
if n.val not in titles_list:
titles_list.append(n.val)
if v.get('title') and not v.get('title_female'):
misogyny.append(n.val)
for n2, v2 in v:
if is_codename(n2.val):
title_liege_map[n2.val] = n.val
title_vassals_map[n.val].add(n2.val)
dfs.extend(reversed(v))
except:
print(path)
raise
return titles_list, title_liege_map, title_vassals_map, misogyny
@print_time
def main():
# import pdb
parser = SimpleParser()
parser.moddirs = [rootpath / 'SWMH-BETA/SWMH']
# parser.moddirs.extend([rootpath / 'EMF/EMF', rootpath / 'EMF/EMF+SWMH'])
titles_list, title_liege_map, title_vassals_map, misogyny = (
process_landed_titles(parser))
titles = set(titles_list)
check_province_history(parser, titles)
start_date = parser.parse_file('common/defines.txt')['start_date'].val
for path, tree in parser.parse_files('history/titles/*.txt',
memcache=True):
if tree.contents:
title = path.stem
good = check_title(parser, title, path, titles)
if (VANILLA_HISTORY_WARN and not good and
not any(d in path.parents for d in parser.moddirs)):
# newpath = parser.moddirs[0] / 'history/titles' / path.name
# newpath.open('w').close()
print('Should override {} with blank file'.format(
'<vanilla>' / path.relative_to(vanilladir)))
else:
check_titles(parser, path, titles)
# update de jure changed before start_date
for n, v in sorted(tree, key=attrgetter('key.val')):
if n.val > start_date:
break
for n2, v2 in v:
if n2.val == 'de_jure_liege':
old_liege = title_liege_map.get(title)
if old_liege:
title_vassals_map[old_liege].discard(title)
title_liege_map[title] = v2.val
title_vassals_map[v2.val].add(title)
parser.flush(path)
duchies_de_jure = [t for t, v in title_vassals_map.items()
if t[0] == 'd' and v]
bad_region_titles, missing_duchies = check_regions(parser, titles,
duchies_de_jure)
for _ in parser.parse_files('history/characters/*.txt'):
pass # just parse it to see if it parses
globs = [
'events/*.txt',
'decisions/*.txt',
'common/laws/*.txt',
'common/objectives/*.txt',
'common/minor_titles/*.txt',
'common/job_titles/*.txt',
'common/job_actions/*.txt',
'common/religious_titles/*.txt',
'common/cb_types/*.txt',
'common/scripted_triggers/*.txt',
'common/scripted_effects/*.txt',
'common/achievements.txt'
]
for glob in globs:
for path in parser.files(glob):
check_titles(parser, path, titles)
with (rootpath / 'check_titles.txt').open('w') as fp:
if bad_region_titles:
print('Titular titles in regions:\n\t', end='', file=fp)
print(*bad_region_titles, sep=' ', file=fp)
if missing_duchies:
print('De jure duchies not found in "world_" regions:\n\t', end='',
file=fp)
print(*missing_duchies, sep=' ', file=fp)
for lhs in [True, False]:
if results[lhs]:
if lhs:
print('Undefined references as SCOPE:', file=fp)
else:
print('Undefined references:', file=fp)
for path, titles in sorted(results[lhs].items()):
if titles:
for modpath in parser.moddirs:
if modpath in path.parents:
rel_path = ('<{}>'.format(modpath.name) /
path.relative_to(modpath))
break
else:
rel_path = '<vanilla>' / path.relative_to(vanilladir)
print('\t' + str(rel_path), *titles, sep='\n\t\t', file=fp)
if misogyny:
print('Title defines title but not title_female:\n\t', end='',
file=fp)
print(*misogyny, sep=' ', file=fp)
if __name__ == '__main__':
main()
|
from vsg import parser
from vsg import rule
from vsg import violation
from vsg.rules import utils as rules_utils
class single_space_before_token(rule.Rule):
'''
Checks for a single space between two tokens.
Parameters
----------
name : string
The group the rule belongs to.
identifier : string
unique identifier. Usually in the form of 00N.
lTokens : token type object list
A list of tokens to check for a single space after.
'''
def __init__(self, name, identifier, lTokens):
rule.Rule.__init__(self, name=name, identifier=identifier)
self.solution = None
self.phase = 2
self.lTokens = lTokens
def _get_tokens_of_interest(self, oFile):
return oFile.get_token_and_n_tokens_before_it(self.lTokens, 1)
def _analyze(self, lToi):
for oToi in lToi:
lTokens = oToi.get_tokens()
if isinstance(lTokens[0], parser.carriage_return):
continue
if not isinstance(lTokens[0], parser.whitespace):
sSolution = 'Ensure a single space before ' + lTokens[1].get_value()
oViolation = violation.New(oToi.get_line_number(), oToi, sSolution)
oViolation.set_action('insert')
self.add_violation(oViolation)
else:
if lTokens[0].get_value() != ' ':
sSolution = 'Ensure a single space before ' + lTokens[1].get_value()
oViolation = violation.New(oToi.get_line_number(), oToi, sSolution)
oViolation.set_action('adjust')
self.add_violation(oViolation)
def _fix_violation(self, oViolation):
lTokens = oViolation.get_tokens()
sAction = oViolation.get_action()
if sAction == 'insert':
rules_utils.insert_whitespace(lTokens, 1)
elif sAction == 'adjust':
lTokens[0].set_value(' ')
oViolation.set_tokens(lTokens)
|
from mysqlConn import DbConnect
import argparse
import operator
from math import log,fabs
import pprint
#DB connector and curosor
db = DbConnect()
db_conn = db.get_connection()
cur2 = db_conn.cursor();
#Argument parser
parser = argparse.ArgumentParser()
parser.add_argument("GENRE1")
parser.add_argument("GENRE2")
parser.add_argument("MODEL")
args = parser.parse_args()
##########################################
#General computation
#########################################
#1. Getting total number of movies in genre1 U genre2
cur2.execute("SELECT COUNT(distinct movieid) FROM mlmovies_clean where genres=%s || genres=%s",[args.GENRE1,args.GENRE2])
result0 = cur2.fetchone()
total_movie_count = float(result0[0])
if args.MODEL== "TF-IDF-DIFF":
###############################
#MODEL = TF_IDF_DIFF
###############################
#===============================================================================================
#Subtask-1 : Calculate the weighted unique movies count returned by a tag for set of movies in genre1 U genre2
#===============================================================================================
cur2.execute("SELECT COUNT(distinct movieid) FROM mlmovies_clean where genres=%s || genres=%s",[args.GENRE1,args.GENRE2])
result0 = cur2.fetchone()
total_movie_count = result0[0]
#Since we already have the TF value and it's data, we now generate the required data for idf.
#IDF here will be considered as the number of movie-genre that belong to a certain tag. So the idf calculation will be
# Total movie-genres / sum of weight of movie-genre with a particular tag
#Calculate the total weighted count for movie-genre count for each tag.
#weighted count for an occurance of a tag = tag_newness
weighted_genre_movie_count={}
cur2.execute("SELECT movieid FROM `mlmovies_clean` where genres=%s || genres=%s",[args.GENRE1,args.GENRE2])
result1 = cur2.fetchall()
for data1 in result1:
#print data1
genre_movie_id = data1[0]
genre_tag_id=""
#Select distint tagIDs for the movieID
cur2.execute("SELECT tagid,newness_wt_norm_nolog FROM mltags WHERE movieid = %s",[genre_movie_id])
result2 = cur2.fetchall()
for data2 in result2:
genre_tag_id = data2[0]
genre_tag_newness = data2[1]
#Get the tag_name for the tagID. For each tag weight, add the rank_weight as well.
cur2.execute("SELECT tag FROM `genome-tags` WHERE tagID = %s", [genre_tag_id])
result2_sub = cur2.fetchone()
tagName = result2_sub[0]
tagWeight = round((float(genre_tag_newness)),10)
if tagName in weighted_genre_movie_count:
weighted_genre_movie_count[tagName] = round((weighted_genre_movie_count[tagName] + tagWeight), 10)
else:
weighted_genre_movie_count[tagName] = tagWeight
# ===============================================================================
#Subtask-2: Get the TF , IDF and TF-IDF for the genres
#===============================================================================
data_dictionary_tf_genre1 = {}
data_dictionary_tf_idf_genre1 = {}
total_tag_newness_weight = 0
#Get all movies of genre 1.
cur2.execute("SELECT movieid FROM `mlmovies_clean` where genres = %s",[args.GENRE1])
result1 = cur2.fetchall()
for data1 in result1:
genre_movie_id = data1[0]
#Select distint tagIDs for the movieID
cur2.execute("SELECT tagid,newness_wt_norm_nolog FROM mltags WHERE movieid = %s",[genre_movie_id])
result2 = cur2.fetchall()
for data2 in result2:
genre_tag_id = data2[0]
genre_tag_newness = data2[1]
#Get the tag_name for the tagID.
cur2.execute("SELECT tag FROM `genome-tags` WHERE tagID = %s", [genre_tag_id])
result2_sub = cur2.fetchone()
tagName = result2_sub[0]
tagWeight = round(float(genre_tag_newness),10)
total_tag_newness_weight = total_tag_newness_weight + tagWeight
#For TF
if tagName in data_dictionary_tf_genre1:
data_dictionary_tf_genre1[tagName] = round((data_dictionary_tf_genre1[tagName] + tagWeight),10)
else:
data_dictionary_tf_genre1[tagName] = tagWeight
# Make weight of other tags to zero. Calculate the tf, idf and tf-idf values for the tags that exist.
cur2.execute("SELECT tag FROM `genome-tags`")
tagName = cur2.fetchall()
for keyVal in tagName:
key = keyVal[0]
if key in data_dictionary_tf_genre1:
data_dictionary_tf_genre1[key] = round((float(data_dictionary_tf_genre1[key]) / float(total_tag_newness_weight)),10)
data_dictionary_tf_idf_genre1[key] = round((float(log((total_movie_count/weighted_genre_movie_count[key]),2.71828))), 10)
data_dictionary_tf_idf_genre1[key] = round((data_dictionary_tf_genre1[key] * data_dictionary_tf_idf_genre1[key]), 10)
else:
data_dictionary_tf_genre1[key] = 0.0
#genre_model_value_tf_genre1 = sorted(data_dictionary_tf_genre1.items(), key=operator.itemgetter(1), reverse=True)
#genre_model_value_tfidf_genre1 = sorted(data_dictionary_tf_genre1.items(), key=operator.itemgetter(1), reverse=True)
#Get all movies of a specific genre 2.
#--------------------------------------
data_dictionary_tf_genre2 = {}
data_dictionary_tf_idf_genre2 = {}
total_tag_newness_weight = 0
cur2.execute("SELECT movieid FROM `mlmovies_clean` where genres = %s",[args.GENRE2])
result1 = cur2.fetchall()
for data1 in result1:
genre_movie_id = data1[0]
#Select distint tagIDs for the movieID
cur2.execute("SELECT tagid,newness_wt_norm_nolog FROM mltags WHERE movieid = %s",[genre_movie_id])
result2 = cur2.fetchall()
for data2 in result2:
genre_tag_id = data2[0]
genre_tag_newness = data2[1]
#Get the tag_name for the tagID.
cur2.execute("SELECT tag FROM `genome-tags` WHERE tagID = %s", [genre_tag_id])
result2_sub = cur2.fetchone()
tagName = result2_sub[0]
tagWeight = round(float(genre_tag_newness),10)
total_tag_newness_weight = total_tag_newness_weight + tagWeight
#For TF
if tagName in data_dictionary_tf_genre2:
data_dictionary_tf_genre2[tagName] = round((data_dictionary_tf_genre2[tagName] + tagWeight),10)
else:
data_dictionary_tf_genre2[tagName] = tagWeight
# Make weight of other tags to zero.
cur2.execute("SELECT tag FROM `genome-tags`")
tagName = cur2.fetchall()
for keyVal in tagName:
key=keyVal[0]
if key in data_dictionary_tf_genre2:
data_dictionary_tf_genre2[key] = round((float(data_dictionary_tf_genre2[key]) / float(total_tag_newness_weight)),10)
data_dictionary_tf_idf_genre2[key] = round((float(log((total_movie_count/weighted_genre_movie_count[key]),2.71828))), 10)
data_dictionary_tf_idf_genre2[key] = round((data_dictionary_tf_genre2[key] * data_dictionary_tf_idf_genre2[key]), 10)
else:
data_dictionary_tf_genre2[key] = 0.0
#genre_model_value_tf_genre1 = sorted(data_dictionary_tf_genre1.items(), key=operator.itemgetter(1), reverse=True)
#genre_model_value_tfidf_genre2 = sorted(data_dictionary_tf_genre2.items(), key=operator.itemgetter(1), reverse=True)
#--------------------------------------------------------------------------------------------------------------
#Subtask-3 : Calculate the DIFF vector
#Manhattan distance is used since for high dimensions it works better. compared to higher order minkowski distance
diff_vector={}
#Makes more sense to have +ve 0, and -ve as it clearly states the difference, between genre1
#and genre2.
for key in data_dictionary_tf_idf_genre1:
if key in data_dictionary_tf_idf_genre2:
diff_vector[key] = data_dictionary_tf_idf_genre1[key] - data_dictionary_tf_idf_genre2[key]
else:
diff_vector[key] = data_dictionary_tf_idf_genre1[key]
for key in data_dictionary_tf_idf_genre2:
if key in diff_vector:
continue
else:
diff_vector[key] = 0 - data_dictionary_tf_idf_genre2[key]
cur2.execute("SELECT tag FROM `genome-tags`")
tagName = cur2.fetchall()
for keyVal in tagName:
key = keyVal[0]
if key in diff_vector:
continue;
else:
diff_vector[key] = 0.0
genre_diff = sorted(diff_vector.items(), key=operator.itemgetter(1), reverse=True)
#pprint.pprint(genre_model_value_tfidf_genre1)
#pprint.pprint(genre_model_value_tfidf_genre2)
pprint.pprint(genre_diff)
elif args.MODEL == "P-DIFF1" :
###############################
#MODEL = P-DIFF-1
###############################
# ===============================================================================
#Subtask-1: Calculate the number of movies for a given tag for genre1 and genre2
#and total movies in genre1
#================================================================================
dd_r1_genre1 = {}
dd_m1_genre2 = {}
M = total_movie_count #Movies in genre1 U genre2
cur2.execute("SELECT count(movieid) FROM `mlmovies_clean` where genres = %s",[args.GENRE1])
result1 = cur2.fetchone()
R = float(result1[0]) #Movies in genre1
#Calculation for genre1. r = movies in genre1 with tag t
cur2.execute("SELECT movieid FROM `mlmovies_clean` where genres = %s",[args.GENRE1])
result1 = cur2.fetchall()
for data1 in result1:
genre_movie_id = data1[0]
#Select distint tagIDs for the movieID
cur2.execute("SELECT tagid FROM mltags WHERE movieid = %s",[genre_movie_id])
result2 = cur2.fetchall()
for data2 in result2:
genre_tag_id = data2[0]
#Get the tag_name for the tagID.
cur2.execute("SELECT tag FROM `genome-tags` WHERE tagID = %s", [genre_tag_id])
result2_sub = cur2.fetchone()
tagName = result2_sub[0]
#For TF
if tagName in dd_r1_genre1:
dd_r1_genre1[tagName] = (dd_r1_genre1[tagName] + 1)
else:
dd_r1_genre1[tagName] = 1
#Calculation for m=movies in genre1 U genre 2 with tag t
cur2.execute("SELECT distinct(movieid) FROM `mlmovies_clean` where genres=%s || genres=%s",[args.GENRE1,args.GENRE2])
result1 = cur2.fetchall()
for data1 in result1:
genre_movie_id = data1[0]
#Select distint tagIDs for the movieID
cur2.execute("SELECT tagid FROM mltags WHERE movieid = %s",[genre_movie_id])
result2 = cur2.fetchall()
for data2 in result2:
genre_tag_id = data2[0]
#Get the tag_name for the tagID.
cur2.execute("SELECT tag FROM `genome-tags` WHERE tagID = %s", [genre_tag_id])
result2_sub = cur2.fetchone()
tagName = result2_sub[0]
#For TF
if tagName in dd_m1_genre2:
dd_m1_genre2[tagName] = (dd_m1_genre2[tagName] + 1)
else:
dd_m1_genre2[tagName] = 1
#print dd_r1_genre1
#print dd_m1_genre2
#Subtask:2 - Calculate the pdiff1 using the given formula
pdiff_wt_genre1={}
for tag in dd_m1_genre2:
r=0
if tag in dd_r1_genre1:
r = float(dd_r1_genre1[tag])
m = float(dd_m1_genre2[tag])
val1=0
val2=0
val3=0
val4=0
#r = 0 means that the tag never occurs for a genre.
#R=r means that the tag occurs for every movie of the genre, so its frequency is 1 and
#discriminating power is 0 . In both the scenarios, we ignore such a tag.
#m>= r always since its a union.
# Get the probability of the tag in M and add it to avoid edge cases- ref:Salton & buckley
p_tag = float(m / M)
#explain why you think square term comes in the picture.But as the max probability will be 1, the term does not make
#much difference for values less than 1.
val1 = float(float(r + p_tag)/(R-r+1))
val3 = float(float(r + p_tag)/(R + 1))
val2 = float((m-r+p_tag)/(M-m-R+r+1))
val4 = float((m-r+p_tag)/(M-R+1))
pdiff_wt_genre1[tag] = float(log(float(val1/val2),2)) * float(val3 - val4)
#Make weight of other tags to zero
cur2.execute("SELECT tag FROM `genome-tags`")
tagName = cur2.fetchall()
for keyval in tagName:
key = keyval[0]
if key in pdiff_wt_genre1:
continue
else:
pdiff_wt_genre1[key] = 0
pprint.pprint(sorted(pdiff_wt_genre1.items(), key=operator.itemgetter(1), reverse=True))
elif args.MODEL == "P-DIFF2":
###############################
#MODEL = P-DIFF-2
###############################
# ===============================================================================
#Subtask-1: Calculate the number of movies for a given tag for genre1 and genre2
#and total movies in genre2
#================================================================================
dd_r1_genre1 = {}
dd_m1_genre2 = {}
M = total_movie_count #Movies in genre1 U genre2
cur2.execute("SELECT count(movieid) FROM `mlmovies_clean` where genres = %s",[args.GENRE2])
result1 = cur2.fetchone()
R = float(result1[0]) #Movies in genre1
#Calculation for genre2. r = movies in genre2 without tag t. We first get the value of movies in genre2 with tag t then
#subtract that value from total movies there in genre2, for each tag
cur2.execute("SELECT movieid FROM `mlmovies_clean` where genres = %s",[args.GENRE2])
result1 = cur2.fetchall()
for data1 in result1:
genre_movie_id = data1[0]
#Select distint tagIDs for the movieID
cur2.execute("SELECT tagid FROM mltags WHERE movieid = %s",[genre_movie_id])
result2 = cur2.fetchall()
for data2 in result2:
genre_tag_id = data2[0]
#Get the tag_name for the tagID.
cur2.execute("SELECT tag FROM `genome-tags` WHERE tagID = %s", [genre_tag_id])
result2_sub = cur2.fetchone()
tagName = result2_sub[0]
#For TF
if tagName in dd_r1_genre1:
dd_r1_genre1[tagName] = (dd_r1_genre1[tagName] + 1)
else:
dd_r1_genre1[tagName] = 1
#Calculation for genre2. m=movies in genre1 U genre 2 without tag t. Subtract later from M to get movies in genre1 or genre2
#without a tag
cur2.execute("SELECT distinct(movieid) FROM `mlmovies_clean` where genres=%s || genres=%s",[args.GENRE1,args.GENRE2])
result1 = cur2.fetchall()
for data1 in result1:
genre_movie_id = data1[0]
#Select distint tagIDs for the movieID
cur2.execute("SELECT tagid FROM mltags WHERE movieid = %s",[genre_movie_id])
result2 = cur2.fetchall()
for data2 in result2:
genre_tag_id = data2[0]
#Get the tag_name for the tagID.
cur2.execute("SELECT tag FROM `genome-tags` WHERE tagID = %s", [genre_tag_id])
result2_sub = cur2.fetchone()
tagName = result2_sub[0]
#For TF
if tagName in dd_m1_genre2:
dd_m1_genre2[tagName] = (dd_m1_genre2[tagName] + 1)
else:
dd_m1_genre2[tagName] = 1
#Subtask:2 - Calculate the pdiff1 using the given formula
pdiff_wt_genre1={}
for tag in dd_m1_genre2:
r = R
if tag in dd_r1_genre1:
r = R - float(dd_r1_genre1[tag])
m = M - float(dd_m1_genre2[tag])
val1=0
val2=0
val3=0
val4=0
#r = 0 means that the tag never occurs for a genre.
#R=r means that the tag occurs for every movie of the genre, so its frequency is 1 and
#discriminating power is 0 . In both the scenarios, we ignore such a tag.
#m>= r always since its a union.
# Get the probability of the tag not in M and add it to avoid edge cases- ref:Salton & buckley
p_tag = float(m / M)
#explain why you think square term comes in the picture.But as the max probability will be 1, the term does not make
#much difference for values less than 1.
val1 = float(float(r + p_tag)/(R-r+1))
val3 = float(float(r + p_tag)/(R + 1))
val2 = float((m-r+p_tag)/(M-m-R+r+1))
val4 = float((m-r+p_tag)/(M-R+1))
pdiff_wt_genre1[tag] = float(log(float(val1/val2),2)) * (float(val3 - val4))
#Make weight of other tags to zero
cur2.execute("SELECT tag FROM `genome-tags`")
tagName = cur2.fetchall()
for keyval in tagName:
key = keyval[0]
if key in pdiff_wt_genre1:
continue
else:
pdiff_wt_genre1[key] = 0
pprint.pprint(sorted(pdiff_wt_genre1.items(), key=operator.itemgetter(1), reverse=True))
|
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
# take a list of files and (optionally) a list of paths
# return the first existing file found in the paths
# [file1, file2, file3], [path1, path2, path3]
# search order is:
# path1/file1
# path1/file2
# path1/file3
# path2/file1
# path2/file2
# path2/file3
# path3/file1
# path3/file2
# path3/file3
# first file found with os.path.exists() is returned
# no file matches raises ansibleerror
# EXAMPLES
# - name: copy first existing file found to /some/file
# action: copy src=$item dest=/some/file
# with_first_found:
# - files: foo ${inventory_hostname} bar
# paths: /tmp/production /tmp/staging
# that will look for files in this order:
# /tmp/production/foo
# ${inventory_hostname}
# bar
# /tmp/staging/foo
# ${inventory_hostname}
# bar
# - name: copy first existing file found to /some/file
# action: copy src=$item dest=/some/file
# with_first_found:
# - files: /some/place/foo ${inventory_hostname} /some/place/else
# that will look for files in this order:
# /some/place/foo
# $relative_path/${inventory_hostname}
# /some/place/else
# example - including tasks:
# tasks:
# - include: $item
# with_first_found:
# - files: generic
# paths: tasks/staging tasks/production
# this will include the tasks in the file generic where it is found first (staging or production)
# example simple file lists
#tasks:
#- name: first found file
# action: copy src=$item dest=/etc/file.cfg
# with_first_found:
# - files: foo.${inventory_hostname} foo
# example skipping if no matched files
# First_found also offers the ability to control whether or not failing
# to find a file returns an error or not
#
#- name: first found file - or skip
# action: copy src=$item dest=/etc/file.cfg
# with_first_found:
# - files: foo.${inventory_hostname}
# skip: true
# example a role with default configuration and configuration per host
# you can set multiple terms with their own files and paths to look through.
# consider a role that sets some configuration per host falling back on a default config.
#
#- name: some configuration template
# template: src={{ item }} dest=/etc/file.cfg mode=0444 owner=root group=root
# with_first_found:
# - files:
# - ${inventory_hostname}/etc/file.cfg
# paths:
# - ../../../templates.overwrites
# - ../../../templates
# - files:
# - etc/file.cfg
# paths:
# - templates
# the above will return an empty list if the files cannot be found at all
# if skip is unspecificed or if it is set to false then it will return a list
# error which can be caught bye ignore_errors: true for that action.
# finally - if you want you can use it, in place to replace first_available_file:
# you simply cannot use the - files, path or skip options. simply replace
# first_available_file with with_first_found and leave the file listing in place
#
#
# - name: with_first_found like first_available_file
# action: copy src=$item dest=/tmp/faftest
# with_first_found:
# - ../files/foo
# - ../files/bar
# - ../files/baz
# ignore_errors: true
import os
from lookups.lookup import LookupError
from lookups.lookup import LookupBase
from lookups.utils.boolean import boolean
class LookupModule(LookupBase):
def run(self, terms, variables={}, **kwargs):
anydict = False
skip = False
for term in terms:
if isinstance(term, dict):
anydict = True
total_search = []
if anydict:
for term in terms:
if isinstance(term, dict):
files = term.get('files', [])
paths = term.get('paths', [])
skip = boolean(term.get('skip', False))
filelist = files
if isinstance(files, basestring):
files = files.replace(',', ' ')
files = files.replace(';', ' ')
filelist = files.split(' ')
pathlist = paths
if paths:
if isinstance(paths, basestring):
paths = paths.replace(',', ' ')
paths = paths.replace(':', ' ')
paths = paths.replace(';', ' ')
pathlist = paths.split(' ')
if not pathlist:
total_search = filelist
else:
for path in pathlist:
for fn in filelist:
f = os.path.join(path, fn)
total_search.append(f)
else:
total_search.append(term)
else:
total_search = self._flatten(terms)
for fn in total_search:
if os.path.isabs(fn) and os.path.exists(fn):
return [fn]
else:
if skip:
return []
else:
err = "No file was found when using with_first_found. " + \
"Use the 'skip: true' option to allow this task to be skipped if no files are found"
raise LookupError(err)
|
# coding: utf-8
import time
import json
import urllib
import urllib2
import logging
from bs4 import BeautifulSoup as Soup
logging.basicConfig(level=logging.DEBUG)
url = 'https://play.google.com/store/getreviews?authuser=0'
headers = {
'content-type': 'application/x-www-form-urlencoded;charset=UTF-8',
'user-agent': 'Mozilla/5.0'
}
payload = {
'id': 'com.google.android.apps.maps',
'reviewType': 0,
'pageNum': 0,
'reviewSortOrder': 4,
'xhr': 1,
'hl': 'ko'
}
def parse():
values = urllib.urlencode(payload)
req = urllib2.Request(url, values, headers)
response = urllib2.urlopen(req)
data = json.loads(response.read()[5:])
soup = Soup(data[0][2])
for review in soup.select('.single-review'):
body = review.select('.review-body')[0].text
rating = int(review.select('.current-rating')[0]['style'].split(':')[1].strip()[:-2])/20
if 1 == rating:
logging.warning(body)
while True:
logging.info('start parsing')
parse()
logging.info('parsing ends')
logging.info('sleep in 60s')
time.sleep(60)
|
#Задача 12. Вариант 28.
#1-50. Разработайте игру "Крестики-нолики". (см. М.Доусон Программируем на Python гл. 6).
#Сароквашин Максим
#29.05.2016
X="X"
O="O"
EMPTY=" "
TIE="Ничья"
NUM_SQUARES=9
def display_instruct():
print('''
Добро пожаловать на ринг грандиознейших интеллектуальных состязаний всех времён.
Твой мозг и мой процессор сойдутся в схватке за доской игры "Крестики-нолики".
Чтобы сделать ход, введи число от 0 до 8. Числа однозначно соответствуют полям
доски - так, как показано ниже:
0 | 1 | 2
---------
3 | 4 | 5
---------
6 | 7 | 8''')
def ask_yes_no(question):
response = None
while response not in ("y", "n"):
response = input(question).lower()
return response
def ask_number(question, low, high):
response = None
while response not in range(low, high):
response = int(input(question))
return response
def pieces():
go_first = ask_yes_no("Хочешь оставить за собой первый ход? (y/n): ")
if go_first == "y":
print("\nНу что ж, даю тебе фору: играй крестиками.")
human = X
computer = O
else:
print("\nТвоя удаль тебя погубит... Буду начинать я.")
computer = X
human = O
return computer, human
def new_board():
board = []
for square in range(NUM_SQUARES):
board.append(EMPTY)
return board
def display_board(board):
print("\n\t", board[0], "|", board[1], "|", board[2])
print("\t", "---------")
print("\t", board[3], "|", board[4], "|", board[5])
print("\t", "---------")
print("\t", board[6], "|", board[7], "|", board[8])
def legal_moves(board):
moves = []
for square in range(NUM_SQUARES):
if board[square] == EMPTY:
moves.append(square)
return moves
def winner(board):
WAYS_TO_WIN = ((0, 1, 2), (3, 4, 5), (6, 7, 8), (0, 3, 6), (1, 4, 7), (2, 5, 8), (0, 4, 8), (2, 4, 6))
for row in WAYS_TO_WIN:
if board[row[0]] == board[row[1]] == board[row[2]] != EMPTY:
winner = board[row[0]]
return winner
if EMPTY not in board:
return TIE
return None
def human_move(board, human):
legal = legal_moves(board)
move = None
while move not in legal:
move = ask_number("Твой ход. Выбери одно из полей (0-8):", 0, NUM_SQUARES)
if move not in legal:
print("\nСмешной человек! Это поле уже занято. Выбери другое.\n")
print("Ладно...")
return move
def computer_move(board, computer, human):
board = board[:]
BEST_MOVES = (4, 0, 2, 6, 8, 1, 3, 5, 7)
print("Я выберу поле номер", end = " ")
for move in legal_moves(board):
board[move] = computer
if winner(board) == computer:
print(move)
return move
board[move] = EMPTY
for move in legal_moves(board):
board[move] = human
if winner(board) == human:
print(move)
return move
board[move] = EMPTY
for move in BEST_MOVES:
if move in legal_moves(board):
print(move)
return move
def next_turn(turn):
if turn == X:
return O
else:
return X
def congrat_winner(the_winner, computer, human):
if the_winner != TIE:
print("Три", the_winner, "в ряд!\n")
else:
print("Ничья!\n")
if the_winner == computer:
print("Kaк я и предсказывал. победа в очередной раз осталась за мной.\nВот еще один довод в пользу того. что компьютеры превосходят людей решительно во всем.")
elif the_winner == human:
print("О нет, этого не может быть! Неужели ты как-то сумел перехитрить меня, белковый?\nКлянусь: я, компьютер, не допущу этого больше никогда!")
elif the_winner == TIE:
print("Тебе несказанно повезло, дружок: ты сумел свести игру вничью.\nРадуйся же сегодняшнему успеху! Завтра уже не суждено его повторить.")
def main():
display_instruct()
computer, human = pieces()
turn = X
board = new_board()
display_board(board)
while not winner(board):
if turn == human:
move = human_move(board, human)
board[move] = human
else:
move = computer_move(board, computer, human)
board[move] = computer
display_board(board)
turn=next_turn(turn)
the_winner=winner(board)
congrat_winner(the_winner, computer, human)
main()
input("\n\nНажмите Enter, чтобы выйти.")
|
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from eventlet import greenthread
import mock
import os_xenapi
from oslo_concurrency import lockutils
from oslo_concurrency import processutils
from oslo_config import fixture as config_fixture
from oslo_utils import fixture as utils_fixture
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import timeutils
from oslo_utils import uuidutils
import six
from nova.compute import flavors
from nova.compute import power_state
import nova.conf
from nova import context
from nova import exception
from nova import objects
from nova.objects import fields as obj_fields
from nova import test
from nova.tests.unit import fake_flavor
from nova.tests.unit import fake_instance
from nova.tests.unit.objects import test_flavor
from nova.tests.unit.virt.xenapi import stubs
from nova.virt import hardware
from nova.virt.xenapi import driver as xenapi_conn
from nova.virt.xenapi import fake
from nova.virt.xenapi.image import utils as image_utils
from nova.virt.xenapi import vm_utils
import time
CONF = nova.conf.CONF
XENSM_TYPE = 'xensm'
ISCSI_TYPE = 'iscsi'
def get_fake_connection_data(sr_type):
fakes = {XENSM_TYPE: {'sr_uuid': 'falseSR',
'name_label': 'fake_storage',
'name_description': 'test purposes',
'server': 'myserver',
'serverpath': '/local/scratch/myname',
'sr_type': 'nfs',
'introduce_sr_keys': ['server',
'serverpath',
'sr_type'],
'vdi_uuid': 'falseVDI'},
ISCSI_TYPE: {'volume_id': 'fake_volume_id',
'target_lun': 1,
'target_iqn': 'fake_iqn:volume-fake_volume_id',
'target_portal': u'localhost:3260',
'target_discovered': False}, }
return fakes[sr_type]
def _fake_noop(*args, **kwargs):
return
class VMUtilsTestBase(stubs.XenAPITestBaseNoDB):
pass
class LookupTestCase(VMUtilsTestBase):
def setUp(self):
super(LookupTestCase, self).setUp()
self.session = mock.Mock()
self.name_label = 'my_vm'
def test_normal(self):
self.session.call_xenapi.return_value = ['x']
result = vm_utils.lookup(self.session, self.name_label)
self.assertEqual('x', result)
self.session.call_xenapi.assert_called_once_with(
"VM.get_by_name_label", self.name_label)
def test_no_result(self):
self.session.call_xenapi.return_value = []
result = vm_utils.lookup(self.session, self.name_label)
self.assertIsNone(result)
self.session.call_xenapi.assert_called_once_with(
"VM.get_by_name_label", self.name_label)
def test_too_many(self):
self.session.call_xenapi.return_value = ['a', 'b']
self.assertRaises(exception.InstanceExists,
vm_utils.lookup,
self.session, self.name_label)
self.session.call_xenapi.assert_called_once_with(
"VM.get_by_name_label", self.name_label)
def test_rescue_none(self):
self.session.call_xenapi.side_effect = [[], ['x']]
result = vm_utils.lookup(self.session, self.name_label,
check_rescue=True)
self.assertEqual('x', result)
self.session.call_xenapi.assert_has_calls([
mock.call("VM.get_by_name_label", self.name_label + '-rescue'),
mock.call("VM.get_by_name_label", self.name_label)])
def test_rescue_found(self):
self.session.call_xenapi.return_value = ['y']
result = vm_utils.lookup(self.session, self.name_label,
check_rescue=True)
self.assertEqual('y', result)
self.session.call_xenapi.assert_called_once_with(
"VM.get_by_name_label", self.name_label + '-rescue')
def test_rescue_too_many(self):
self.session.call_xenapi.return_value = ['a', 'b', 'c']
self.assertRaises(exception.InstanceExists,
vm_utils.lookup,
self.session, self.name_label,
check_rescue=True)
self.session.call_xenapi.assert_called_once_with(
"VM.get_by_name_label", self.name_label + '-rescue')
class GenerateConfigDriveTestCase(VMUtilsTestBase):
@mock.patch.object(vm_utils, 'safe_find_sr')
@mock.patch.object(vm_utils, "create_vdi", return_value='vdi_ref')
@mock.patch.object(vm_utils.instance_metadata, "InstanceMetadata")
@mock.patch.object(vm_utils.configdrive, 'ConfigDriveBuilder')
@mock.patch.object(vm_utils.utils, 'execute')
@mock.patch.object(vm_utils.volume_utils, 'stream_to_vdi')
@mock.patch.object(vm_utils.os.path, 'getsize', return_value=100)
@mock.patch.object(vm_utils, 'create_vbd', return_value='vbd_ref')
@mock.patch.object(vm_utils.utils, 'tempdir')
def test_no_admin_pass(self, mock_tmpdir, mock_create_vbd, mock_size,
mock_stream, mock_execute, mock_builder,
mock_instance_metadata, mock_create_vdi,
mock_find_sr):
mock_tmpdir.return_value.__enter__.return_value = '/mock'
with mock.patch.object(six.moves.builtins, 'open') as mock_open:
mock_open.return_value.__enter__.return_value = 'open_fd'
vm_utils.generate_configdrive('session', 'context', 'instance',
'vm_ref', 'userdevice',
'network_info')
mock_size.assert_called_with('/mock/configdrive.vhd')
mock_open.assert_called_with('/mock/configdrive.vhd')
mock_execute.assert_called_with('qemu-img', 'convert', '-Ovpc',
'/mock/configdrive',
'/mock/configdrive.vhd')
mock_instance_metadata.assert_called_with(
'instance', content=None, extra_md={},
network_info='network_info', request_context='context')
mock_stream.assert_called_with('session', 'instance', 'vhd',
'open_fd', 100, 'vdi_ref')
@mock.patch.object(vm_utils, "destroy_vdi")
@mock.patch.object(vm_utils, 'safe_find_sr')
@mock.patch.object(vm_utils, "create_vdi", return_value='vdi_ref')
@mock.patch.object(vm_utils.instance_metadata, "InstanceMetadata",
side_effect=test.TestingException)
def test_vdi_cleaned_up(self, mock_instance_metadata, mock_create,
mock_find_sr, mock_destroy):
self.assertRaises(test.TestingException, vm_utils.generate_configdrive,
'session', None, None, None, None, None)
mock_destroy.assert_called_once_with('session', 'vdi_ref')
class XenAPIGetUUID(VMUtilsTestBase):
@mock.patch.object(vm_utils, '_get_sys_hypervisor_uuid',
return_value='2f46f0f5-f14c-ef1b-1fac-9eeca0888a3f')
def test_get_this_vm_uuid_new_kernel(self, mock_get_sys_hypervisor_uuid):
result = vm_utils.get_this_vm_uuid(None)
self.assertEqual('2f46f0f5-f14c-ef1b-1fac-9eeca0888a3f', result)
mock_get_sys_hypervisor_uuid.assert_called_once_with()
@mock.patch('nova.virt.xenapi.vm_utils._get_sys_hypervisor_uuid',
side_effect=IOError(13, 'Permission denied'))
@mock.patch('nova.privsep.xenapi.xenstore_read',
side_effect=[('27', ''),
('/vm/2f46f0f5-f14c-ef1b-1fac-9eeca0888a3f', '')])
def test_get_this_vm_uuid_old_kernel_reboot(self, fake_read, fake_uuid):
result = vm_utils.get_this_vm_uuid(None)
self.assertEqual('2f46f0f5-f14c-ef1b-1fac-9eeca0888a3f', result)
fake_read.assert_has_calls([
mock.call('domid'),
mock.call('/local/domain/27/vm')])
fake_uuid.assert_called_once_with()
class FakeSession(object):
def call_xenapi(self, *args):
pass
def call_plugin(self, *args):
pass
def call_plugin_serialized(self, plugin, fn, *args, **kwargs):
pass
def call_plugin_serialized_with_retry(self, plugin, fn, num_retries,
callback, *args, **kwargs):
pass
class FetchVhdImageTestCase(VMUtilsTestBase):
def setUp(self):
super(FetchVhdImageTestCase, self).setUp()
self.context = context.get_admin_context()
self.context.auth_token = 'auth_token'
self.session = FakeSession()
self.instance = {"uuid": "uuid"}
self.image_handler = image_utils.get_image_handler(
CONF.xenserver.image_handler)
self.flags(group='glance', api_servers=['http://localhost:9292'])
make_uuid_stack_patcher = mock.patch.object(
vm_utils, '_make_uuid_stack', return_value=["uuid_stack"])
self.addCleanup(make_uuid_stack_patcher.stop)
self.mock_make_uuid_stack = make_uuid_stack_patcher.start()
get_sr_path_patcher = mock.patch.object(
vm_utils, 'get_sr_path', return_value='sr_path')
self.addCleanup(get_sr_path_patcher.stop)
self.mock_get_sr_path = get_sr_path_patcher.start()
def _stub_glance_download_vhd(self, raise_exc=None):
call_plugin_patcher = mock.patch.object(
self.session, 'call_plugin_serialized_with_retry')
self.addCleanup(call_plugin_patcher.stop)
self.mock_call_plugin = call_plugin_patcher.start()
if raise_exc:
self.mock_call_plugin.side_effect = raise_exc
else:
self.mock_call_plugin.return_value = {'root': {'uuid': 'vdi'}}
def _assert_make_uuid_stack_and_get_sr_path(self):
self.mock_make_uuid_stack.assert_called_once_with()
self.mock_get_sr_path.assert_called_once_with(self.session)
def _assert_call_plugin_serialized_with_retry(self):
self.mock_call_plugin.assert_called_once_with(
'glance.py',
'download_vhd2',
0,
mock.ANY,
mock.ANY,
extra_headers={'X-Auth-Token': 'auth_token',
'X-Roles': '',
'X-Tenant-Id': None,
'X-User-Id': None,
'X-Identity-Status': 'Confirmed'},
image_id='image_id',
uuid_stack=["uuid_stack"],
sr_path='sr_path')
@mock.patch.object(vm_utils, '_check_vdi_size')
@mock.patch.object(vm_utils, '_scan_sr')
@mock.patch.object(vm_utils, 'safe_find_sr', return_value="sr")
def test_fetch_vhd_image_works_with_glance(self, mock_safe_find_sr,
mock_scan_sr,
mock_check_vdi_size):
self._stub_glance_download_vhd()
result = vm_utils._fetch_vhd_image(self.context, self.session,
self.instance, 'image_id',
self.image_handler)
self.assertEqual("vdi", result['root']['uuid'])
mock_safe_find_sr.assert_called_once_with(self.session)
mock_scan_sr.assert_called_once_with(self.session, "sr")
mock_check_vdi_size.assert_called_once_with(self.context, self.session,
self.instance, "vdi")
self._assert_call_plugin_serialized_with_retry()
self._assert_make_uuid_stack_and_get_sr_path()
@mock.patch.object(vm_utils, 'destroy_vdi',
side_effect=exception.StorageError(reason=""))
@mock.patch.object(FakeSession, 'call_xenapi', return_value="ref")
@mock.patch.object(
vm_utils, '_check_vdi_size',
side_effect=exception.FlavorDiskSmallerThanImage(flavor_size=0,
image_size=1))
@mock.patch.object(vm_utils, '_scan_sr')
@mock.patch.object(vm_utils, 'safe_find_sr', return_value="sr")
def test_fetch_vhd_image_cleans_up_vdi_on_fail(
self, mock_safe_find_sr, mock_scan_sr, mock_check_vdi_size,
mock_call_xenapi, mock_destroy_vdi):
self._stub_glance_download_vhd()
self.assertRaises(exception.FlavorDiskSmallerThanImage,
vm_utils._fetch_vhd_image, self.context, self.session,
self.instance, 'image_id', self.image_handler)
mock_safe_find_sr.assert_called_once_with(self.session)
mock_scan_sr.assert_called_once_with(self.session, "sr")
mock_check_vdi_size.assert_called_once_with(self.context, self.session,
self.instance, "vdi")
mock_call_xenapi.assert_called_once_with("VDI.get_by_uuid", "vdi")
mock_destroy_vdi.assert_called_once_with(self.session, "ref")
self._assert_call_plugin_serialized_with_retry()
self._assert_make_uuid_stack_and_get_sr_path()
def test_fetch_vhd_image_download_exception(self):
self._stub_glance_download_vhd(raise_exc=RuntimeError)
self.assertRaises(RuntimeError, vm_utils._fetch_vhd_image,
self.context, self.session, self.instance, 'image_id',
self.image_handler)
self._assert_call_plugin_serialized_with_retry()
self._assert_make_uuid_stack_and_get_sr_path()
class TestImageCompression(VMUtilsTestBase):
def test_image_compression(self):
# Testing for nova.conf, too low, negative, and a correct value.
self.assertIsNone(vm_utils.get_compression_level())
self.flags(image_compression_level=6, group='xenserver')
self.assertEqual(vm_utils.get_compression_level(), 6)
class ResizeHelpersTestCase(VMUtilsTestBase):
def setUp(self):
super(ResizeHelpersTestCase, self).setUp()
self.context = context.RequestContext('user', 'project')
@mock.patch('nova.privsep.fs.ext_journal_disable')
@mock.patch('nova.privsep.fs.ext_journal_enable')
@mock.patch('nova.privsep.fs.resize_partition')
@mock.patch('nova.privsep.fs.resize2fs')
@mock.patch('nova.privsep.fs.e2fsck')
def test_resize_part_and_fs_down_succeeds(
self, mock_fsck, mock_resize2fs, mock_resize,
mock_disable_journal, mock_enable_journal):
dev_path = '/dev/fake'
partition_path = '%s1' % dev_path
vm_utils._resize_part_and_fs('fake', 0, 20, 10, 'boot')
mock_fsck.assert_has_calls([
mock.call(partition_path)])
mock_resize2fs.assert_has_calls([
mock.call(partition_path, [0], size='10s')])
mock_resize.assert_has_calls([
mock.call(dev_path, 0, 9, True)])
mock_disable_journal.assert_has_calls([
mock.call(partition_path)])
mock_enable_journal.assert_has_calls([
mock.call(partition_path)])
@mock.patch.object(vm_utils.LOG, 'debug')
def test_log_progress_if_required(self, mock_debug):
current = timeutils.utcnow()
time_fixture = self.useFixture(utils_fixture.TimeFixture(current))
time_fixture.advance_time_seconds(
vm_utils.PROGRESS_INTERVAL_SECONDS + 1)
vm_utils._log_progress_if_required(1, current, 2)
mock_debug.assert_called_once_with(
"Sparse copy in progress, %(complete_pct).2f%% complete. "
"%(left)s bytes left to copy",
{"complete_pct": 50.0, "left": 1})
@mock.patch.object(vm_utils.LOG, 'debug')
def test_log_progress_if_not_required(self, mock_debug):
current = timeutils.utcnow()
time_fixture = self.useFixture(utils_fixture.TimeFixture(current))
time_fixture.advance_time_seconds(
vm_utils.PROGRESS_INTERVAL_SECONDS - 1)
vm_utils._log_progress_if_required(1, current, 2)
mock_debug.assert_not_called()
@mock.patch('nova.privsep.fs.ext_journal_disable')
@mock.patch('nova.privsep.fs.resize2fs',
side_effect=processutils.ProcessExecutionError)
@mock.patch('nova.privsep.fs.e2fsck')
def test_resize_part_and_fs_down_fails_disk_too_big(
self, mock_fsck, mock_resize2fs, mock_disable_journal):
self.assertRaises(exception.ResizeError,
vm_utils._resize_part_and_fs,
"fake", 0, 20, 10, "boot")
mock_fsck.assert_has_calls([mock.call('/dev/fake1')])
@mock.patch('nova.privsep.fs.ext_journal_disable')
@mock.patch('nova.privsep.fs.ext_journal_enable')
@mock.patch('nova.privsep.fs.resize_partition')
@mock.patch('nova.privsep.fs.resize2fs')
@mock.patch('nova.privsep.fs.e2fsck')
def test_resize_part_and_fs_up_succeeds(
self, mock_fsck, mock_resize2fs, mock_resize,
mock_disable_journal, mock_enable_journal):
dev_path = '/dev/fake'
partition_path = '%s1' % dev_path
vm_utils._resize_part_and_fs('fake', 0, 20, 30, '')
mock_fsck.assert_has_calls([
mock.call(partition_path)])
mock_resize2fs.assert_has_calls([
mock.call(partition_path, [0])])
mock_resize.assert_has_calls([
mock.call(dev_path, 0, 29, False)])
mock_disable_journal.assert_has_calls([
mock.call(partition_path)])
mock_enable_journal.assert_has_calls([
mock.call(partition_path)])
def test_resize_disk_throws_on_zero_size(self):
flavor = fake_flavor.fake_flavor_obj(self.context, root_gb=0)
self.assertRaises(exception.ResizeError, vm_utils.resize_disk,
"session", "instance", "vdi_ref", flavor)
def test_auto_config_disk_returns_early_on_zero_size(self):
vm_utils.try_auto_configure_disk("bad_session", "bad_vdi_ref", 0)
class CheckVDISizeTestCase(VMUtilsTestBase):
def setUp(self):
super(CheckVDISizeTestCase, self).setUp()
self.context = 'fakecontext'
self.session = 'fakesession'
self.instance = objects.Instance(uuid=uuids.fake)
self.flavor = objects.Flavor()
self.vdi_uuid = 'fakeuuid'
self.stub_out('nova.objects.Instance.get_flavor',
lambda *a, **kw: self.flavor)
@mock.patch.object(vm_utils, '_get_vdi_chain_size',
return_value=1073741824)
def test_not_too_large(self, mock_get_vdi_chain_size):
self.flavor.root_gb = 1
vm_utils._check_vdi_size(self.context, self.session, self.instance,
self.vdi_uuid)
mock_get_vdi_chain_size.assert_called_once_with(self.session,
self.vdi_uuid)
@mock.patch.object(vm_utils, '_get_vdi_chain_size',
return_value=11811160065) # 10GB overhead allowed
def test_too_large(self, mock_get_vdi_chain_size):
self.flavor.root_gb = 1
self.assertRaises(exception.FlavorDiskSmallerThanImage,
vm_utils._check_vdi_size, self.context,
self.session, self.instance, self.vdi_uuid)
mock_get_vdi_chain_size.assert_called_once_with(self.session,
self.vdi_uuid)
def test_zero_root_gb_disables_check(self):
self.flavor.root_gb = 0
vm_utils._check_vdi_size(self.context, self.session, self.instance,
self.vdi_uuid)
class GetInstanceForVdisForSrTestCase(VMUtilsTestBase):
def setUp(self):
super(GetInstanceForVdisForSrTestCase, self).setUp()
self.fixture = self.useFixture(config_fixture.Config(lockutils.CONF))
self.fixture.config(disable_process_locking=True,
group='oslo_concurrency')
self.flags(instance_name_template='%d',
firewall_driver='nova.virt.xenapi.firewall.'
'Dom0IptablesFirewallDriver')
self.flags(connection_url='http://localhost',
connection_password='test_pass',
group='xenserver')
def test_get_instance_vdis_for_sr(self):
vm_ref = fake.create_vm("foo", "Running")
sr_ref = fake.create_sr()
vdi_1 = fake.create_vdi('vdiname1', sr_ref)
vdi_2 = fake.create_vdi('vdiname2', sr_ref)
for vdi_ref in [vdi_1, vdi_2]:
fake.create_vbd(vm_ref, vdi_ref)
stubs.stubout_session(self, fake.SessionBase)
driver = xenapi_conn.XenAPIDriver(False)
result = list(vm_utils.get_instance_vdis_for_sr(
driver._session, vm_ref, sr_ref))
self.assertEqual([vdi_1, vdi_2], result)
def test_get_instance_vdis_for_sr_no_vbd(self):
vm_ref = fake.create_vm("foo", "Running")
sr_ref = fake.create_sr()
stubs.stubout_session(self, fake.SessionBase)
driver = xenapi_conn.XenAPIDriver(False)
result = list(vm_utils.get_instance_vdis_for_sr(
driver._session, vm_ref, sr_ref))
self.assertEqual([], result)
class VMRefOrRaiseVMFoundTestCase(VMUtilsTestBase):
@mock.patch.object(vm_utils, 'lookup', return_value='ignored')
def test_lookup_call(self, mock_lookup):
vm_utils.vm_ref_or_raise('session', 'somename')
mock_lookup.assert_called_once_with('session', 'somename')
@mock.patch.object(vm_utils, 'lookup', return_value='vmref')
def test_return_value(self, mock_lookup):
self.assertEqual(
'vmref', vm_utils.vm_ref_or_raise('session', 'somename'))
mock_lookup.assert_called_once_with('session', 'somename')
class VMRefOrRaiseVMNotFoundTestCase(VMUtilsTestBase):
@mock.patch.object(vm_utils, 'lookup', return_value=None)
def test_exception_raised(self, mock_lookup):
self.assertRaises(
exception.InstanceNotFound,
lambda: vm_utils.vm_ref_or_raise('session', 'somename')
)
mock_lookup.assert_called_once_with('session', 'somename')
@mock.patch.object(vm_utils, 'lookup', return_value=None)
def test_exception_msg_contains_vm_name(self, mock_lookup):
try:
vm_utils.vm_ref_or_raise('session', 'somename')
except exception.InstanceNotFound as e:
self.assertIn('somename', six.text_type(e))
mock_lookup.assert_called_once_with('session', 'somename')
@mock.patch.object(vm_utils, 'safe_find_sr', return_value='safe_find_sr')
class CreateCachedImageTestCase(VMUtilsTestBase):
def setUp(self):
super(CreateCachedImageTestCase, self).setUp()
self.session = self.get_fake_session()
@mock.patch.object(vm_utils, '_clone_vdi', return_value='new_vdi_ref')
def test_cached(self, mock_clone_vdi, mock_safe_find_sr):
self.session.call_xenapi.side_effect = ['ext', {'vdi_ref': 2},
None, None, None, 'vdi_uuid']
self.assertEqual((False, {'root': {'uuid': 'vdi_uuid', 'file': None}}),
vm_utils._create_cached_image('context', self.session,
'instance', 'name', 'uuid',
vm_utils.ImageType.DISK_VHD,
'image_handler'))
@mock.patch.object(vm_utils, '_safe_copy_vdi', return_value='new_vdi_ref')
def test_no_cow(self, mock_safe_copy_vdi, mock_safe_find_sr):
self.flags(use_cow_images=False)
self.session.call_xenapi.side_effect = ['ext', {'vdi_ref': 2},
None, None, None, 'vdi_uuid']
self.assertEqual((False, {'root': {'uuid': 'vdi_uuid', 'file': None}}),
vm_utils._create_cached_image('context', self.session,
'instance', 'name', 'uuid',
vm_utils.ImageType.DISK_VHD,
'image_handler'))
def test_no_cow_no_ext(self, mock_safe_find_sr):
self.flags(use_cow_images=False)
self.session.call_xenapi.side_effect = ['non-ext', {'vdi_ref': 2},
'vdi_ref', None, None, None,
'vdi_uuid']
self.assertEqual((False, {'root': {'uuid': 'vdi_uuid', 'file': None}}),
vm_utils._create_cached_image('context', self.session,
'instance', 'name', 'uuid',
vm_utils.ImageType.DISK_VHD,
'image_handler'))
@mock.patch.object(vm_utils, '_clone_vdi', return_value='new_vdi_ref')
@mock.patch.object(vm_utils, '_fetch_image',
return_value={'root': {'uuid': 'vdi_uuid',
'file': None}})
def test_noncached(self, mock_fetch_image, mock_clone_vdi,
mock_safe_find_sr):
self.session.call_xenapi.side_effect = ['ext', {}, 'cache_vdi_ref',
None, None, None, None, None,
None, None, 'vdi_uuid']
self.assertEqual((True, {'root': {'uuid': 'vdi_uuid', 'file': None}}),
vm_utils._create_cached_image('context', self.session,
'instance', 'name', 'uuid',
vm_utils.ImageType.DISK_VHD,
'image_handler'))
class DestroyCachedImageTestCase(VMUtilsTestBase):
def setUp(self):
super(DestroyCachedImageTestCase, self).setUp()
self.session = self.get_fake_session()
@mock.patch.object(vm_utils, '_find_cached_images')
@mock.patch.object(vm_utils, 'destroy_vdi')
@mock.patch.object(vm_utils, '_walk_vdi_chain')
@mock.patch.object(time, 'time')
def test_destroy_cached_image_out_of_keep_days(self,
mock_time,
mock_walk_vdi_chain,
mock_destroy_vdi,
mock_find_cached_images):
fake_cached_time = '0'
mock_find_cached_images.return_value = {'fake_image_id': {
'vdi_ref': 'fake_vdi_ref', 'cached_time': fake_cached_time}}
self.session.call_xenapi.return_value = 'fake_uuid'
mock_walk_vdi_chain.return_value = ('just_one',)
mock_time.return_value = 2 * 3600 * 24
fake_keep_days = 1
expected_return = set()
expected_return.add('fake_uuid')
uuid_return = vm_utils.destroy_cached_images(self.session,
'fake_sr_ref', False, False, fake_keep_days)
mock_find_cached_images.assert_called_once()
mock_walk_vdi_chain.assert_called_once()
mock_time.assert_called()
mock_destroy_vdi.assert_called_once()
self.assertEqual(expected_return, uuid_return)
@mock.patch.object(vm_utils, '_find_cached_images')
@mock.patch.object(vm_utils, 'destroy_vdi')
@mock.patch.object(vm_utils, '_walk_vdi_chain')
@mock.patch.object(time, 'time')
def test_destroy_cached_image(self, mock_time, mock_walk_vdi_chain,
mock_destroy_vdi, mock_find_cached_images):
fake_cached_time = '0'
mock_find_cached_images.return_value = {'fake_image_id': {
'vdi_ref': 'fake_vdi_ref', 'cached_time': fake_cached_time}}
self.session.call_xenapi.return_value = 'fake_uuid'
mock_walk_vdi_chain.return_value = ('just_one',)
mock_time.return_value = 2 * 3600 * 24
fake_keep_days = 1
expected_return = set()
expected_return.add('fake_uuid')
uuid_return = vm_utils.destroy_cached_images(self.session,
'fake_sr_ref', False, False, fake_keep_days)
mock_find_cached_images.assert_called_once()
mock_walk_vdi_chain.assert_called_once()
mock_destroy_vdi.assert_called_once()
self.assertEqual(expected_return, uuid_return)
@mock.patch.object(vm_utils, '_find_cached_images')
@mock.patch.object(vm_utils, 'destroy_vdi')
@mock.patch.object(vm_utils, '_walk_vdi_chain')
@mock.patch.object(time, 'time')
def test_destroy_cached_image_cached_time_not_exceed(
self, mock_time, mock_walk_vdi_chain,
mock_destroy_vdi, mock_find_cached_images):
fake_cached_time = '0'
mock_find_cached_images.return_value = {'fake_image_id': {
'vdi_ref': 'fake_vdi_ref', 'cached_time': fake_cached_time}}
self.session.call_xenapi.return_value = 'fake_uuid'
mock_walk_vdi_chain.return_value = ('just_one',)
mock_time.return_value = 1 * 3600 * 24
fake_keep_days = 2
expected_return = set()
uuid_return = vm_utils.destroy_cached_images(self.session,
'fake_sr_ref', False, False, fake_keep_days)
mock_find_cached_images.assert_called_once()
mock_walk_vdi_chain.assert_called_once()
mock_destroy_vdi.assert_not_called()
self.assertEqual(expected_return, uuid_return)
@mock.patch.object(vm_utils, '_find_cached_images')
@mock.patch.object(vm_utils, 'destroy_vdi')
@mock.patch.object(vm_utils, '_walk_vdi_chain')
@mock.patch.object(time, 'time')
def test_destroy_cached_image_no_cached_time(
self, mock_time, mock_walk_vdi_chain,
mock_destroy_vdi, mock_find_cached_images):
mock_find_cached_images.return_value = {'fake_image_id': {
'vdi_ref': 'fake_vdi_ref', 'cached_time': None}}
self.session.call_xenapi.return_value = 'fake_uuid'
mock_walk_vdi_chain.return_value = ('just_one',)
fake_keep_days = 2
expected_return = set()
uuid_return = vm_utils.destroy_cached_images(self.session,
'fake_sr_ref', False, False, fake_keep_days)
mock_find_cached_images.assert_called_once()
mock_walk_vdi_chain.assert_called_once()
mock_destroy_vdi.assert_not_called()
self.assertEqual(expected_return, uuid_return)
@mock.patch.object(vm_utils, 'is_vm_shutdown', return_value=True)
class ShutdownTestCase(VMUtilsTestBase):
def test_hardshutdown_should_return_true_when_vm_is_shutdown(
self, mock_is_vm_shutdown):
session = FakeSession()
instance = "instance"
vm_ref = "vm-ref"
self.assertTrue(vm_utils.hard_shutdown_vm(
session, instance, vm_ref))
mock_is_vm_shutdown.assert_called_once_with(session, vm_ref)
def test_cleanshutdown_should_return_true_when_vm_is_shutdown(
self, mock_is_vm_shutdown):
session = FakeSession()
instance = "instance"
vm_ref = "vm-ref"
self.assertTrue(vm_utils.clean_shutdown_vm(
session, instance, vm_ref))
mock_is_vm_shutdown.assert_called_once_with(session, vm_ref)
@mock.patch.object(FakeSession, 'call_xenapi', return_value='vbd_ref')
class CreateVBDTestCase(VMUtilsTestBase):
def setUp(self):
super(CreateVBDTestCase, self).setUp()
self.session = FakeSession()
self.vbd_rec = self._generate_vbd_rec()
def _generate_vbd_rec(self):
vbd_rec = {}
vbd_rec['VM'] = 'vm_ref'
vbd_rec['VDI'] = 'vdi_ref'
vbd_rec['userdevice'] = '0'
vbd_rec['bootable'] = False
vbd_rec['mode'] = 'RW'
vbd_rec['type'] = 'disk'
vbd_rec['unpluggable'] = True
vbd_rec['empty'] = False
vbd_rec['other_config'] = {}
vbd_rec['qos_algorithm_type'] = ''
vbd_rec['qos_algorithm_params'] = {}
vbd_rec['qos_supported_algorithms'] = []
return vbd_rec
def test_create_vbd_default_args(self, mock_call_xenapi):
result = vm_utils.create_vbd(self.session, "vm_ref", "vdi_ref", 0)
self.assertEqual(result, "vbd_ref")
mock_call_xenapi.assert_called_once_with('VBD.create', self.vbd_rec)
def test_create_vbd_osvol(self, mock_call_xenapi):
result = vm_utils.create_vbd(self.session, "vm_ref", "vdi_ref", 0,
osvol=True)
self.assertEqual(result, "vbd_ref")
mock_call_xenapi.assert_has_calls([
mock.call('VBD.create', self.vbd_rec),
mock.call('VBD.add_to_other_config', "vbd_ref", "osvol", "True")])
def test_create_vbd_extra_args(self, mock_call_xenapi):
self.vbd_rec['VDI'] = 'OpaqueRef:NULL'
self.vbd_rec['type'] = 'a'
self.vbd_rec['mode'] = 'RO'
self.vbd_rec['bootable'] = True
self.vbd_rec['empty'] = True
self.vbd_rec['unpluggable'] = False
result = vm_utils.create_vbd(self.session, "vm_ref", None, 0,
vbd_type="a", read_only=True, bootable=True,
empty=True, unpluggable=False)
self.assertEqual(result, "vbd_ref")
mock_call_xenapi.assert_called_once_with('VBD.create', self.vbd_rec)
@mock.patch.object(vm_utils, 'create_vbd', return_value='vbd_ref')
def test_attach_cd(self, mock_create_vbd, mock_call_xenapi):
mock_call_xenapi.return_value = None
result = vm_utils.attach_cd(self.session, "vm_ref", "vdi_ref", 1)
self.assertEqual(result, "vbd_ref")
mock_create_vbd.assert_called_once_with(
self.session, "vm_ref", None, 1, vbd_type='cd', read_only=True,
bootable=True, empty=True, unpluggable=False)
mock_call_xenapi.assert_called_once_with('VBD.insert', 'vbd_ref',
'vdi_ref')
class UnplugVbdTestCase(VMUtilsTestBase):
@mock.patch.object(greenthread, 'sleep')
def test_unplug_vbd_works(self, mock_sleep):
session = self.get_fake_session()
vbd_ref = "vbd_ref"
vm_ref = 'vm_ref'
vm_utils.unplug_vbd(session, vbd_ref, vm_ref)
session.call_xenapi.assert_called_once_with('VBD.unplug', vbd_ref)
self.assertEqual(0, mock_sleep.call_count)
def test_unplug_vbd_raises_unexpected_error(self):
session = self.get_fake_session()
session.XenAPI.Failure = fake.Failure
vbd_ref = "vbd_ref"
vm_ref = 'vm_ref'
session.call_xenapi.side_effect = test.TestingException()
self.assertRaises(test.TestingException, vm_utils.unplug_vbd,
session, vm_ref, vbd_ref)
self.assertEqual(1, session.call_xenapi.call_count)
def test_unplug_vbd_already_detached_works(self):
error = "DEVICE_ALREADY_DETACHED"
session = self.get_fake_session(error)
vbd_ref = "vbd_ref"
vm_ref = 'vm_ref'
vm_utils.unplug_vbd(session, vbd_ref, vm_ref)
self.assertEqual(1, session.call_xenapi.call_count)
def test_unplug_vbd_already_raises_unexpected_xenapi_error(self):
session = self.get_fake_session("")
vbd_ref = "vbd_ref"
vm_ref = 'vm_ref'
self.assertRaises(exception.StorageError, vm_utils.unplug_vbd,
session, vbd_ref, vm_ref)
self.assertEqual(1, session.call_xenapi.call_count)
def _test_uplug_vbd_retries(self, mock_sleep, error):
session = self.get_fake_session(error)
vbd_ref = "vbd_ref"
vm_ref = 'vm_ref'
self.assertRaises(exception.StorageError, vm_utils.unplug_vbd,
session, vm_ref, vbd_ref)
self.assertEqual(11, session.call_xenapi.call_count)
self.assertEqual(10, mock_sleep.call_count)
def _test_uplug_vbd_retries_with_neg_val(self):
session = self.get_fake_session()
self.flags(num_vbd_unplug_retries=-1, group='xenserver')
vbd_ref = "vbd_ref"
vm_ref = 'vm_ref'
vm_utils.unplug_vbd(session, vbd_ref, vm_ref)
self.assertEqual(1, session.call_xenapi.call_count)
@mock.patch.object(greenthread, 'sleep')
def test_uplug_vbd_retries_on_rejected(self, mock_sleep):
self._test_uplug_vbd_retries(mock_sleep,
"DEVICE_DETACH_REJECTED")
@mock.patch.object(greenthread, 'sleep')
def test_uplug_vbd_retries_on_internal_error(self, mock_sleep):
self._test_uplug_vbd_retries(mock_sleep,
"INTERNAL_ERROR")
@mock.patch.object(greenthread, 'sleep')
def test_uplug_vbd_retries_on_missing_pv_drivers_error(self, mock_sleep):
self._test_uplug_vbd_retries(mock_sleep,
"VM_MISSING_PV_DRIVERS")
class VDIOtherConfigTestCase(VMUtilsTestBase):
"""Tests to ensure that the code is populating VDI's `other_config`
attribute with the correct metadta.
"""
def setUp(self):
super(VDIOtherConfigTestCase, self).setUp()
class _FakeSession(object):
def call_xenapi(self, operation, *args, **kwargs):
# VDI.add_to_other_config -> VDI_add_to_other_config
method = getattr(self, operation.replace('.', '_'), None)
if method:
return method(*args, **kwargs)
self.operation = operation
self.args = args
self.kwargs = kwargs
self.session = _FakeSession()
self.context = context.get_admin_context()
self.fake_instance = {'uuid': 'aaaa-bbbb-cccc-dddd',
'name': 'myinstance'}
def test_create_vdi(self):
# Some images are registered with XenServer explicitly by calling
# `create_vdi`
vm_utils.create_vdi(self.session, 'sr_ref', self.fake_instance,
'myvdi', 'root', 1024, read_only=True)
expected = {'nova_disk_type': 'root',
'nova_instance_uuid': 'aaaa-bbbb-cccc-dddd'}
self.assertEqual(expected, self.session.args[0]['other_config'])
@mock.patch.object(vm_utils, '_fetch_image',
return_value={'root': {'uuid': 'fake-uuid'}})
def test_create_image(self, mock_vm_utils):
# Other images are registered implicitly when they are dropped into
# the SR by a dom0 plugin or some other process
self.flags(cache_images='none', group='xenserver')
other_config = {}
def VDI_add_to_other_config(ref, key, value):
other_config[key] = value
# Stubbing on the session object and not class so we don't pollute
# other tests
self.session.VDI_add_to_other_config = VDI_add_to_other_config
self.session.VDI_get_other_config = lambda vdi: {}
vm_utils.create_image(self.context, self.session, self.fake_instance,
'myvdi', 'image1', vm_utils.ImageType.DISK_VHD,
'image_handler')
expected = {'nova_disk_type': 'root',
'nova_instance_uuid': 'aaaa-bbbb-cccc-dddd'}
self.assertEqual(expected, other_config)
@mock.patch.object(os_xenapi.client.vm_management, 'receive_vhd')
@mock.patch.object(vm_utils, 'scan_default_sr')
@mock.patch.object(vm_utils, 'get_sr_path')
def test_import_migrated_vhds(self, mock_sr_path, mock_scan_sr,
mock_recv_vhd):
# Migrated images should preserve the `other_config`
other_config = {}
def VDI_add_to_other_config(ref, key, value):
other_config[key] = value
# Stubbing on the session object and not class so we don't pollute
# other tests
self.session.VDI_add_to_other_config = VDI_add_to_other_config
self.session.VDI_get_other_config = lambda vdi: {}
mock_sr_path.return_value = {'root': {'uuid': 'aaaa-bbbb-cccc-dddd'}}
vm_utils._import_migrated_vhds(self.session, self.fake_instance,
"disk_label", "root", "vdi_label")
expected = {'nova_disk_type': 'root',
'nova_instance_uuid': 'aaaa-bbbb-cccc-dddd'}
self.assertEqual(expected, other_config)
mock_scan_sr.assert_called_once_with(self.session)
mock_recv_vhd.assert_called_with(
self.session, "disk_label",
{'root': {'uuid': 'aaaa-bbbb-cccc-dddd'}}, mock.ANY)
mock_sr_path.assert_called_once_with(self.session)
class GenerateDiskTestCase(VMUtilsTestBase):
@mock.patch.object(vm_utils, 'vdi_attached')
@mock.patch('nova.privsep.fs.mkfs',
side_effect = test.TestingException())
@mock.patch.object(vm_utils, '_get_dom0_ref', return_value='dom0_ref')
@mock.patch.object(vm_utils, 'safe_find_sr', return_value='sr_ref')
@mock.patch.object(vm_utils, 'create_vdi', return_value='vdi_ref')
@mock.patch.object(vm_utils, 'create_vbd')
def test_generate_disk_with_no_fs_given(self, mock_create_vbd,
mock_create_vdi, mock_findsr,
mock_dom0ref, mock_mkfs,
mock_attached_here):
session = self.get_fake_session()
vdi_ref = mock.MagicMock()
mock_attached_here.return_value = vdi_ref
instance = {'uuid': 'fake_uuid'}
vm_utils._generate_disk(session, instance, 'vm_ref', '2',
'name', 'user', 10, None, None)
mock_attached_here.assert_called_once_with(session, 'vdi_ref',
read_only=False,
dom0=True)
mock_create_vbd.assert_called_with(session, 'vm_ref', 'vdi_ref', '2',
bootable=False)
@mock.patch.object(vm_utils, 'vdi_attached')
@mock.patch('nova.privsep.fs.mkfs')
@mock.patch.object(vm_utils, '_get_dom0_ref', return_value='dom0_ref')
@mock.patch.object(vm_utils, 'safe_find_sr', return_value='sr_ref')
@mock.patch.object(vm_utils, 'create_vdi', return_value='vdi_ref')
@mock.patch.object(vm_utils.utils, 'make_dev_path',
return_value='/dev/fake_devp1')
@mock.patch.object(vm_utils, 'create_vbd')
def test_generate_disk_swap(self, mock_create_vbd, mock_make_path,
mock_create_vdi,
mock_findsr, mock_dom0ref, mock_mkfs,
mock_attached_here):
session = self.get_fake_session()
vdi_dev = mock.MagicMock()
mock_attached_here.return_value = vdi_dev
vdi_dev.__enter__.return_value = 'fakedev'
instance = {'uuid': 'fake_uuid'}
vm_utils._generate_disk(session, instance, 'vm_ref', '2',
'name', 'user', 10, 'swap',
'swap-1')
mock_attached_here.assert_any_call(session, 'vdi_ref',
read_only=False,
dom0=True)
# As swap is supported in dom0, mkfs will run there
session.call_plugin_serialized.assert_any_call(
'partition_utils.py', 'mkfs', 'fakedev', '1', 'swap', 'swap-1')
mock_create_vbd.assert_called_with(session, 'vm_ref', 'vdi_ref', '2',
bootable=False)
@mock.patch.object(vm_utils, 'vdi_attached')
@mock.patch('nova.privsep.fs.mkfs')
@mock.patch.object(vm_utils, '_get_dom0_ref', return_value='dom0_ref')
@mock.patch.object(vm_utils, 'safe_find_sr', return_value='sr_ref')
@mock.patch.object(vm_utils, 'create_vdi', return_value='vdi_ref')
@mock.patch.object(vm_utils.utils, 'make_dev_path',
return_value='/dev/fake_devp1')
@mock.patch.object(vm_utils, 'create_vbd')
def test_generate_disk_ephemeral(self, mock_create_vbd, mock_make_path,
mock_create_vdi, mock_findsr,
mock_dom0ref, mock_mkfs,
mock_attached_here):
session = self.get_fake_session()
vdi_ref = mock.MagicMock()
mock_attached_here.return_value = vdi_ref
instance = {'uuid': 'fake_uuid'}
vm_utils._generate_disk(session, instance, 'vm_ref', '2',
'name', 'ephemeral', 10, 'ext4',
'ephemeral-1')
mock_attached_here.assert_any_call(session, 'vdi_ref',
read_only=False,
dom0=True)
# As ext4 is not supported in dom0, mkfs will run in domU
mock_attached_here.assert_any_call(session, 'vdi_ref',
read_only=False)
mock_mkfs.assert_called_with('ext4', '/dev/fake_devp1',
'ephemeral-1')
mock_create_vbd.assert_called_with(session, 'vm_ref', 'vdi_ref', '2',
bootable=False)
@mock.patch.object(vm_utils, 'safe_find_sr', return_value='sr_ref')
@mock.patch.object(vm_utils, 'create_vdi', return_value='vdi_ref')
@mock.patch.object(vm_utils, '_get_dom0_ref',
side_effect = test.TestingException())
@mock.patch.object(vm_utils, 'safe_destroy_vdis')
def test_generate_disk_ensure_cleanup_called(self, mock_destroy_vdis,
mock_dom0ref,
mock_create_vdi,
mock_findsr):
session = self.get_fake_session()
instance = {'uuid': 'fake_uuid'}
self.assertRaises(test.TestingException, vm_utils._generate_disk,
session, instance, None, '2', 'name', 'user', 10,
None, None)
mock_destroy_vdis.assert_called_once_with(session, ['vdi_ref'])
@mock.patch.object(vm_utils, 'safe_find_sr', return_value='sr_ref')
@mock.patch.object(vm_utils, 'create_vdi', return_value='vdi_ref')
@mock.patch.object(vm_utils, 'vdi_attached')
@mock.patch.object(vm_utils, '_get_dom0_ref', return_value='dom0_ref')
@mock.patch.object(vm_utils, 'create_vbd')
def test_generate_disk_ephemeral_no_vmref(self, mock_create_vbd,
mock_dom0_ref,
mock_attached_here,
mock_create_vdi,
mock_findsr):
session = self.get_fake_session()
vdi_ref = mock.MagicMock()
mock_attached_here.return_value = vdi_ref
instance = {'uuid': 'fake_uuid'}
vdi_ref = vm_utils._generate_disk(
session, instance,
None, None, 'name', 'user', 10, None, None)
mock_attached_here.assert_called_once_with(session, 'vdi_ref',
read_only=False, dom0=True)
self.assertFalse(mock_create_vbd.called)
@mock.patch.object(vm_utils, '_generate_disk')
class GenerateEphemeralTestCase(VMUtilsTestBase):
def setUp(self):
super(GenerateEphemeralTestCase, self).setUp()
self.session = "session"
self.instance = "instance"
self.vm_ref = "vm_ref"
self.name_label = "name"
self.ephemeral_name_label = "name ephemeral"
self.userdevice = 4
self.fs_label = "ephemeral"
def test_get_ephemeral_disk_sizes_simple(self, mock_generate_disk):
result = vm_utils.get_ephemeral_disk_sizes(20)
expected = [20]
self.assertEqual(expected, list(result))
def test_get_ephemeral_disk_sizes_three_disks_2000(self,
mock_generate_disk):
result = vm_utils.get_ephemeral_disk_sizes(4030)
expected = [2000, 2000, 30]
self.assertEqual(expected, list(result))
def test_get_ephemeral_disk_sizes_two_disks_1024(self, mock_generate_disk):
result = vm_utils.get_ephemeral_disk_sizes(2048)
expected = [1024, 1024]
self.assertEqual(expected, list(result))
def test_generate_ephemeral_adds_one_disk(self, mock_generate_disk):
mock_generate_disk.return_value = self.userdevice
vm_utils.generate_ephemeral(
self.session, self.instance, self.vm_ref,
str(self.userdevice), self.name_label, 20)
mock_generate_disk.assert_called_once_with(
self.session, self.instance, self.vm_ref, str(self.userdevice),
self.ephemeral_name_label, 'ephemeral', 20480, None, self.fs_label)
def test_generate_ephemeral_adds_multiple_disks(self, mock_generate_disk):
mock_generate_disk.side_effect = [self.userdevice,
self.userdevice + 1,
self.userdevice + 2]
vm_utils.generate_ephemeral(
self.session, self.instance, self.vm_ref,
str(self.userdevice), self.name_label, 4030)
mock_generate_disk.assert_has_calls([
mock.call(self.session, self.instance, self.vm_ref,
str(self.userdevice), self.ephemeral_name_label,
'ephemeral', 2048000, None, self.fs_label),
mock.call(self.session, self.instance, self.vm_ref,
str(self.userdevice + 1),
self.ephemeral_name_label + " (1)",
'ephemeral', 2048000, None, self.fs_label + "1"),
mock.call(self.session, self.instance, self.vm_ref,
str(self.userdevice + 2),
self.ephemeral_name_label + " (2)",
'ephemeral', 30720, None, self.fs_label + "2")])
@mock.patch.object(vm_utils, 'safe_destroy_vdis')
def test_generate_ephemeral_cleans_up_on_error(
self, mock_safe_destroy_vdis, mock_generate_disk):
mock_generate_disk.side_effect = [self.userdevice,
self.userdevice + 1,
exception.NovaException]
self.assertRaises(
exception.NovaException, vm_utils.generate_ephemeral,
self.session, self.instance, self.vm_ref,
str(self.userdevice), self.name_label, 4096)
mock_safe_destroy_vdis.assert_called_once_with(self.session, [4, 5])
mock_generate_disk.assert_has_calls([
mock.call(self.session, self.instance, self.vm_ref,
str(self.userdevice), self.ephemeral_name_label,
'ephemeral', 1048576, None, self.fs_label),
mock.call(self.session, self.instance, self.vm_ref,
str(self.userdevice + 1),
self.ephemeral_name_label + " (1)",
'ephemeral', 1048576, None, self.fs_label + "1"),
mock.call(self.session, self.instance, self.vm_ref,
str(self.userdevice + 2),
"name ephemeral (2)",
'ephemeral', 1048576, None, 'ephemeral2')])
@mock.patch.object(vm_utils, '_write_partition')
@mock.patch.object(vm_utils.utils, 'temporary_chown')
@mock.patch.object(vm_utils.utils, 'make_dev_path', return_value='some_path')
class StreamDiskTestCase(VMUtilsTestBase):
def setUp(self):
super(StreamDiskTestCase, self).setUp()
# NOTE(matelakat): This might hide the fail reason, as test runners
# are unhappy with a mocked out open.
self.image_service_func = mock.Mock()
def test_non_ami(self, mock_make_dev_path, mock_temporary_chown,
mock_write_partition):
mock_temporary_chown.return_value.__enter__.return_value = None
mock_open = mock.mock_open()
with mock.patch.object(six.moves.builtins, 'open', mock_open):
vm_utils._stream_disk("session", self.image_service_func,
vm_utils.ImageType.KERNEL, None, 'dev')
mock_make_dev_path.assert_called_once_with('dev')
mock_temporary_chown.assert_called_once_with('some_path')
mock_write_partition.assert_not_called()
mock_open.assert_called_once_with('some_path', 'wb')
fake_file = mock_open()
fake_file.seek.assert_called_once_with(0)
self.image_service_func.assert_called_once_with(fake_file)
def test_ami_disk(self, mock_make_dev_path, mock_temporary_chown,
mock_write_partition):
mock_temporary_chown.return_value.__enter__.return_value = None
mock_open = mock.mock_open()
with mock.patch.object(six.moves.builtins, 'open', mock_open):
vm_utils._stream_disk("session", self.image_service_func,
vm_utils.ImageType.DISK, 100, 'dev')
mock_write_partition.assert_called_once_with("session", 100, 'dev')
mock_make_dev_path.assert_called_once_with('dev')
mock_temporary_chown.assert_called_once_with('some_path')
mock_open.assert_called_once_with('some_path', 'wb')
fake_file = mock_open()
fake_file.seek.assert_called_once_with(vm_utils.MBR_SIZE_BYTES)
self.image_service_func.assert_called_once_with(fake_file)
@mock.patch('os_xenapi.client.session.XenAPISession.call_xenapi')
@mock.patch.object(vm_utils, 'safe_find_sr', return_value='sr_ref')
class VMUtilsSRPath(VMUtilsTestBase):
def setUp(self):
super(VMUtilsSRPath, self).setUp()
self.fixture = self.useFixture(config_fixture.Config(lockutils.CONF))
self.fixture.config(disable_process_locking=True,
group='oslo_concurrency')
self.flags(instance_name_template='%d',
firewall_driver='nova.virt.xenapi.firewall.'
'Dom0IptablesFirewallDriver')
self.flags(connection_url='http://localhost',
connection_password='test_pass',
group='xenserver')
stubs.stubout_session(self, fake.SessionBase)
driver = xenapi_conn.XenAPIDriver(False)
self.session = driver._session
self.session.is_local_connection = False
def test_defined(self, mock_safe_find_sr, mock_call_xenapi):
self.session.host_ref = "host_ref"
mock_call_xenapi.return_value = {'pbd_ref': {'device_config':
{'path': 'sr_path'}}}
self.assertEqual('sr_path', vm_utils.get_sr_path(self.session))
mock_safe_find_sr.assert_called_once_with(self.session)
mock_call_xenapi.assert_called_once_with(
'PBD.get_all_records_where',
'field "host"="host_ref" and field "SR"="sr_ref"')
def test_default(self, mock_safe_find_sr, mock_call_xenapi):
self.session.host_ref = "host_ref"
mock_call_xenapi.side_effect = [
{'pbd_ref': {'device_config': {}}},
{'uuid': 'sr_uuid', 'type': 'ext'}]
self.assertEqual('/var/run/sr-mount/sr_uuid',
vm_utils.get_sr_path(self.session))
mock_safe_find_sr.assert_called_once_with(self.session)
mock_call_xenapi.assert_has_calls([
mock.call('PBD.get_all_records_where',
'field "host"="host_ref" and field "SR"="sr_ref"'),
mock.call("SR.get_record", "sr_ref")])
class CreateKernelRamdiskTestCase(VMUtilsTestBase):
def setUp(self):
super(CreateKernelRamdiskTestCase, self).setUp()
self.context = "context"
self.session = FakeSession()
self.instance = {"kernel_id": None, "ramdisk_id": None}
self.name_label = "name"
self.stub_out('os_xenapi.client.session.XenAPISession.call_xenapi',
lambda *a, **k: None)
def test_create_kernel_and_ramdisk_no_create(self):
result = vm_utils.create_kernel_and_ramdisk(self.context,
self.session, self.instance, self.name_label)
self.assertEqual((None, None), result)
@mock.patch.object(uuidutils, 'generate_uuid',
side_effect=['fake_uuid1', 'fake_uuid2'])
@mock.patch.object(os_xenapi.client.disk_management,
'create_kernel_ramdisk')
def test_create_kernel_and_ramdisk_create_both_cached(
self, mock_ramdisk, mock_generate_uuid):
kernel_id = "kernel"
ramdisk_id = "ramdisk"
self.instance["kernel_id"] = kernel_id
self.instance["ramdisk_id"] = ramdisk_id
mock_ramdisk.side_effect = ["k", "r"]
result = vm_utils.create_kernel_and_ramdisk(self.context,
self.session, self.instance, self.name_label)
self.assertEqual(("k", "r"), result)
mock_generate_uuid.assert_has_calls([mock.call(), mock.call()])
@mock.patch.object(uuidutils, 'generate_uuid', return_value='fake_uuid1')
@mock.patch.object(vm_utils, '_fetch_disk_image',
return_value={"kernel": {"file": "k"}})
@mock.patch.object(os_xenapi.client.disk_management,
'create_kernel_ramdisk')
def test_create_kernel_and_ramdisk_create_kernel_not_cached(
self, mock_ramdisk, mock_fetch_disk_image, mock_generate_uuid):
kernel_id = "kernel"
self.instance["kernel_id"] = kernel_id
mock_ramdisk.return_value = ""
result = vm_utils.create_kernel_and_ramdisk(self.context,
self.session, self.instance, self.name_label)
self.assertEqual(("k", None), result)
mock_generate_uuid.assert_called_once_with()
mock_ramdisk.assert_called_once_with(self.session, kernel_id,
'fake_uuid1')
mock_fetch_disk_image.assert_called_once_with(
self.context, self.session, self.instance, self.name_label,
kernel_id, 0)
@mock.patch.object(uuidutils, 'generate_uuid')
@mock.patch.object(vm_utils, '_fetch_disk_image')
def _test_create_kernel_image(self, cache_images, mock_fetch_disk_image,
mock_generate_uuid):
kernel_id = "kernel"
self.instance["kernel_id"] = kernel_id
self.flags(cache_images=cache_images, group='xenserver')
if cache_images == 'all':
mock_generate_uuid.return_value = 'fake_uuid1'
else:
mock_fetch_disk_image.return_value = {
"kernel": {"file": "new_image", "uuid": None}}
result = vm_utils._create_kernel_image(self.context,
self.session,
self.instance,
self.name_label,
kernel_id, 0)
if cache_images == 'all':
self.assertEqual(result, {"kernel":
{"file": "cached_image", "uuid": None}})
mock_generate_uuid.assert_called_once_with()
mock_fetch_disk_image.assert_not_called()
else:
self.assertEqual(result, {"kernel":
{"file": "new_image", "uuid": None}})
mock_fetch_disk_image.assert_called_once_with(
self.context, self.session, self.instance, self.name_label,
kernel_id, 0)
mock_generate_uuid.assert_not_called()
@mock.patch.object(os_xenapi.client.disk_management,
'create_kernel_ramdisk')
def test_create_kernel_image_cached_config(self, mock_ramdisk):
mock_ramdisk.return_value = "cached_image"
self._test_create_kernel_image('all')
mock_ramdisk.assert_called_once_with(self.session, "kernel",
"fake_uuid1")
def test_create_kernel_image_uncached_config(self):
self._test_create_kernel_image('none')
class ScanSrTestCase(VMUtilsTestBase):
@mock.patch.object(vm_utils, "_scan_sr")
@mock.patch.object(vm_utils, "safe_find_sr")
def test_scan_default_sr(self, mock_safe_find_sr, mock_scan_sr):
mock_safe_find_sr.return_value = "sr_ref"
self.assertEqual("sr_ref", vm_utils.scan_default_sr("fake_session"))
mock_scan_sr.assert_called_once_with("fake_session", "sr_ref")
def test_scan_sr_works(self):
session = mock.Mock()
vm_utils._scan_sr(session, "sr_ref")
session.call_xenapi.assert_called_once_with('SR.scan', "sr_ref")
def test_scan_sr_unknown_error_fails_once(self):
session = mock.Mock()
session.XenAPI.Failure = fake.Failure
session.call_xenapi.side_effect = test.TestingException
self.assertRaises(test.TestingException,
vm_utils._scan_sr, session, "sr_ref")
session.call_xenapi.assert_called_once_with('SR.scan', "sr_ref")
@mock.patch.object(greenthread, 'sleep')
def test_scan_sr_known_error_retries_then_throws(self, mock_sleep):
session = mock.Mock()
class FakeException(Exception):
details = ['SR_BACKEND_FAILURE_40', "", "", ""]
session.XenAPI.Failure = FakeException
session.call_xenapi.side_effect = FakeException
self.assertRaises(FakeException,
vm_utils._scan_sr, session, "sr_ref")
session.call_xenapi.assert_called_with('SR.scan', "sr_ref")
self.assertEqual(4, session.call_xenapi.call_count)
mock_sleep.assert_has_calls([mock.call(2), mock.call(4), mock.call(8)])
@mock.patch.object(greenthread, 'sleep')
def test_scan_sr_known_error_retries_then_succeeds(self, mock_sleep):
session = mock.Mock()
class FakeException(Exception):
details = ['SR_BACKEND_FAILURE_40', "", "", ""]
session.XenAPI.Failure = FakeException
def fake_call_xenapi(*args):
fake_call_xenapi.count += 1
if fake_call_xenapi.count != 2:
raise FakeException()
fake_call_xenapi.count = 0
session.call_xenapi.side_effect = fake_call_xenapi
vm_utils._scan_sr(session, "sr_ref")
session.call_xenapi.assert_called_with('SR.scan', "sr_ref")
self.assertEqual(2, session.call_xenapi.call_count)
mock_sleep.assert_called_once_with(2)
@mock.patch.object(flavors, 'extract_flavor',
return_value={
'memory_mb': 1024,
'vcpus': 1,
'vcpu_weight': 1.0,
})
class CreateVmTestCase(VMUtilsTestBase):
def test_vss_provider(self, mock_extract):
self.flags(vcpu_pin_set="2,3")
session = self.get_fake_session()
instance = objects.Instance(uuid=uuids.nova_uuid,
os_type="windows",
system_metadata={})
with mock.patch.object(instance, 'get_flavor') as get:
get.return_value = objects.Flavor._from_db_object(
None, objects.Flavor(), test_flavor.fake_flavor)
vm_utils.create_vm(session, instance, "label",
"kernel", "ramdisk")
vm_rec = {
'VCPUs_params': {'cap': '0', 'mask': '2,3', 'weight': '1'},
'PV_args': '',
'memory_static_min': '0',
'ha_restart_priority': '',
'HVM_boot_policy': 'BIOS order',
'PV_bootloader': '', 'tags': [],
'VCPUs_max': '4',
'memory_static_max': '1073741824',
'actions_after_shutdown': 'destroy',
'memory_dynamic_max': '1073741824',
'user_version': '0',
'xenstore_data': {'vm-data/allowvssprovider': 'false'},
'blocked_operations': {},
'is_a_template': False,
'name_description': '',
'memory_dynamic_min': '1073741824',
'actions_after_crash': 'destroy',
'memory_target': '1073741824',
'PV_ramdisk': '',
'PV_bootloader_args': '',
'PCI_bus': '',
'other_config': {'nova_uuid': uuids.nova_uuid},
'name_label': 'label',
'actions_after_reboot': 'restart',
'VCPUs_at_startup': '4',
'HVM_boot_params': {'order': 'dc'},
'platform': {'nx': 'true', 'pae': 'true', 'apic': 'true',
'timeoffset': '0', 'viridian': 'true',
'acpi': 'true'},
'PV_legacy_args': '',
'PV_kernel': '',
'affinity': '',
'recommendations': '',
'ha_always_run': False
}
session.call_xenapi.assert_called_once_with("VM.create", vm_rec)
def test_invalid_cpu_mask_raises(self, mock_extract):
self.flags(vcpu_pin_set="asdf")
session = mock.Mock()
instance = objects.Instance(uuid=uuids.fake, system_metadata={})
with mock.patch.object(instance, 'get_flavor') as get:
get.return_value = objects.Flavor._from_db_object(
None, objects.Flavor(), test_flavor.fake_flavor)
self.assertRaises(exception.Invalid,
vm_utils.create_vm,
session, instance, "label",
"kernel", "ramdisk")
def test_destroy_vm(self, mock_extract):
session = mock.Mock()
instance = objects.Instance(uuid=uuids.fake)
vm_utils.destroy_vm(session, instance, "vm_ref")
session.VM.destroy.assert_called_once_with("vm_ref")
def test_destroy_vm_silently_fails(self, mock_extract):
session = mock.Mock()
exc = test.TestingException()
session.XenAPI.Failure = test.TestingException
session.VM.destroy.side_effect = exc
instance = objects.Instance(uuid=uuids.fake)
vm_utils.destroy_vm(session, instance, "vm_ref")
session.VM.destroy.assert_called_once_with("vm_ref")
class DetermineVmModeTestCase(VMUtilsTestBase):
def _fake_object(self, updates):
return fake_instance.fake_instance_obj(None, **updates)
def test_determine_vm_mode_returns_xen_mode(self):
instance = self._fake_object({"vm_mode": "xen"})
self.assertEqual(obj_fields.VMMode.XEN,
vm_utils.determine_vm_mode(instance, None))
def test_determine_vm_mode_returns_hvm_mode(self):
instance = self._fake_object({"vm_mode": "hvm"})
self.assertEqual(obj_fields.VMMode.HVM,
vm_utils.determine_vm_mode(instance, None))
def test_determine_vm_mode_returns_xen_for_linux(self):
instance = self._fake_object({"vm_mode": None, "os_type": "linux"})
self.assertEqual(obj_fields.VMMode.XEN,
vm_utils.determine_vm_mode(instance, None))
def test_determine_vm_mode_returns_hvm_for_windows(self):
instance = self._fake_object({"vm_mode": None, "os_type": "windows"})
self.assertEqual(obj_fields.VMMode.HVM,
vm_utils.determine_vm_mode(instance, None))
def test_determine_vm_mode_returns_hvm_by_default(self):
instance = self._fake_object({"vm_mode": None, "os_type": None})
self.assertEqual(obj_fields.VMMode.HVM,
vm_utils.determine_vm_mode(instance, None))
def test_determine_vm_mode_returns_xen_for_VHD(self):
instance = self._fake_object({"vm_mode": None, "os_type": None})
self.assertEqual(obj_fields.VMMode.XEN,
vm_utils.determine_vm_mode(instance, vm_utils.ImageType.DISK_VHD))
def test_determine_vm_mode_returns_xen_for_DISK(self):
instance = self._fake_object({"vm_mode": None, "os_type": None})
self.assertEqual(obj_fields.VMMode.XEN,
vm_utils.determine_vm_mode(instance, vm_utils.ImageType.DISK))
class CallXenAPIHelpersTestCase(VMUtilsTestBase):
def test_vm_get_vbd_refs(self):
session = mock.Mock()
session.call_xenapi.return_value = "foo"
self.assertEqual("foo", vm_utils._vm_get_vbd_refs(session, "vm_ref"))
session.call_xenapi.assert_called_once_with("VM.get_VBDs", "vm_ref")
def test_vbd_get_rec(self):
session = mock.Mock()
session.call_xenapi.return_value = "foo"
self.assertEqual("foo", vm_utils._vbd_get_rec(session, "vbd_ref"))
session.call_xenapi.assert_called_once_with("VBD.get_record",
"vbd_ref")
def test_vdi_get_rec(self):
session = mock.Mock()
session.call_xenapi.return_value = "foo"
self.assertEqual("foo", vm_utils._vdi_get_rec(session, "vdi_ref"))
session.call_xenapi.assert_called_once_with("VDI.get_record",
"vdi_ref")
def test_vdi_snapshot(self):
session = mock.Mock()
session.call_xenapi.return_value = "foo"
self.assertEqual("foo", vm_utils._vdi_snapshot(session, "vdi_ref"))
session.call_xenapi.assert_called_once_with("VDI.snapshot",
"vdi_ref", {})
def test_vdi_get_virtual_size(self):
session = mock.Mock()
session.call_xenapi.return_value = "123"
self.assertEqual(123, vm_utils._vdi_get_virtual_size(session, "ref"))
session.call_xenapi.assert_called_once_with("VDI.get_virtual_size",
"ref")
@mock.patch.object(vm_utils, '_get_resize_func_name')
def test_vdi_resize(self, mock_get_resize_func_name):
session = mock.Mock()
mock_get_resize_func_name.return_value = "VDI.fake"
vm_utils._vdi_resize(session, "ref", 123)
session.call_xenapi.assert_called_once_with("VDI.fake", "ref", "123")
@mock.patch.object(vm_utils, '_vdi_resize')
@mock.patch.object(vm_utils, '_vdi_get_virtual_size')
def test_update_vdi_virtual_size_works(self, mock_get_size, mock_resize):
mock_get_size.return_value = (1024 ** 3) - 1
instance = {"uuid": "a"}
vm_utils.update_vdi_virtual_size("s", instance, "ref", 1)
mock_get_size.assert_called_once_with("s", "ref")
mock_resize.assert_called_once_with("s", "ref", 1024 ** 3)
@mock.patch.object(vm_utils, '_vdi_resize')
@mock.patch.object(vm_utils, '_vdi_get_virtual_size')
def test_update_vdi_virtual_size_skips_resize_down(self, mock_get_size,
mock_resize):
mock_get_size.return_value = 1024 ** 3
instance = {"uuid": "a"}
vm_utils.update_vdi_virtual_size("s", instance, "ref", 1)
mock_get_size.assert_called_once_with("s", "ref")
self.assertFalse(mock_resize.called)
@mock.patch.object(vm_utils, '_vdi_resize')
@mock.patch.object(vm_utils, '_vdi_get_virtual_size')
def test_update_vdi_virtual_size_raise_if_disk_big(self, mock_get_size,
mock_resize):
mock_get_size.return_value = 1024 ** 3 + 1
instance = {"uuid": "a"}
self.assertRaises(exception.ResizeError,
vm_utils.update_vdi_virtual_size,
"s", instance, "ref", 1)
mock_get_size.assert_called_once_with("s", "ref")
self.assertFalse(mock_resize.called)
@mock.patch.object(vm_utils, '_vdi_get_rec')
@mock.patch.object(vm_utils, '_vbd_get_rec')
@mock.patch.object(vm_utils, '_vm_get_vbd_refs')
class GetVdiForVMTestCase(VMUtilsTestBase):
def test_get_vdi_for_vm_safely(self, vm_get_vbd_refs,
vbd_get_rec, vdi_get_rec):
session = "session"
vm_get_vbd_refs.return_value = ["a", "b"]
vbd_get_rec.return_value = {'userdevice': '0', 'VDI': 'vdi_ref'}
vdi_get_rec.return_value = {}
result = vm_utils.get_vdi_for_vm_safely(session, "vm_ref")
self.assertEqual(('vdi_ref', {}), result)
vm_get_vbd_refs.assert_called_once_with(session, "vm_ref")
vbd_get_rec.assert_called_once_with(session, "a")
vdi_get_rec.assert_called_once_with(session, "vdi_ref")
def test_get_vdi_for_vm_safely_fails(self, vm_get_vbd_refs,
vbd_get_rec, vdi_get_rec):
session = "session"
vm_get_vbd_refs.return_value = ["a", "b"]
vbd_get_rec.return_value = {'userdevice': '0', 'VDI': 'vdi_ref'}
self.assertRaises(exception.NovaException,
vm_utils.get_vdi_for_vm_safely,
session, "vm_ref", userdevice='1')
self.assertEqual([], vdi_get_rec.call_args_list)
self.assertEqual(2, len(vbd_get_rec.call_args_list))
@mock.patch.object(vm_utils, '_vdi_get_uuid')
@mock.patch.object(vm_utils, '_vbd_get_rec')
@mock.patch.object(vm_utils, '_vm_get_vbd_refs')
class GetAllVdiForVMTestCase(VMUtilsTestBase):
def _setup_get_all_vdi_uuids_for_vm(self, vm_get_vbd_refs,
vbd_get_rec, vdi_get_uuid):
def fake_vbd_get_rec(session, vbd_ref):
return {'userdevice': vbd_ref, 'VDI': "vdi_ref_%s" % vbd_ref}
def fake_vdi_get_uuid(session, vdi_ref):
return vdi_ref
vm_get_vbd_refs.return_value = ["0", "2"]
vbd_get_rec.side_effect = fake_vbd_get_rec
vdi_get_uuid.side_effect = fake_vdi_get_uuid
def test_get_all_vdi_uuids_for_vm_works(self, vm_get_vbd_refs,
vbd_get_rec, vdi_get_uuid):
self._setup_get_all_vdi_uuids_for_vm(vm_get_vbd_refs,
vbd_get_rec, vdi_get_uuid)
result = vm_utils.get_all_vdi_uuids_for_vm('session', "vm_ref")
expected = ['vdi_ref_0', 'vdi_ref_2']
self.assertEqual(expected, list(result))
def test_get_all_vdi_uuids_for_vm_finds_none(self, vm_get_vbd_refs,
vbd_get_rec, vdi_get_uuid):
self._setup_get_all_vdi_uuids_for_vm(vm_get_vbd_refs,
vbd_get_rec, vdi_get_uuid)
result = vm_utils.get_all_vdi_uuids_for_vm('session', "vm_ref",
min_userdevice=1)
expected = ["vdi_ref_2"]
self.assertEqual(expected, list(result))
class GetAllVdisTestCase(VMUtilsTestBase):
def test_get_all_vdis_in_sr(self):
def fake_get_rec(record_type, ref):
if ref == "2":
return "vdi_rec_2"
session = mock.Mock()
session.call_xenapi.return_value = ["1", "2"]
session.get_rec.side_effect = fake_get_rec
sr_ref = "sr_ref"
actual = list(vm_utils._get_all_vdis_in_sr(session, sr_ref))
self.assertEqual(actual, [('2', 'vdi_rec_2')])
session.call_xenapi.assert_called_once_with("SR.get_VDIs", sr_ref)
class SnapshotAttachedHereTestCase(VMUtilsTestBase):
@mock.patch.object(vm_utils, '_snapshot_attached_here_impl')
def test_snapshot_attached_here(self, mock_impl):
def fake_impl(session, instance, vm_ref, label, userdevice,
post_snapshot_callback):
self.assertEqual("session", session)
self.assertEqual("instance", instance)
self.assertEqual("vm_ref", vm_ref)
self.assertEqual("label", label)
self.assertEqual('0', userdevice)
self.assertIsNone(post_snapshot_callback)
yield "fake"
mock_impl.side_effect = fake_impl
with vm_utils.snapshot_attached_here("session", "instance", "vm_ref",
"label") as result:
self.assertEqual("fake", result)
mock_impl.assert_called_once_with("session", "instance", "vm_ref",
"label", '0', None)
@mock.patch.object(vm_utils, '_delete_snapshots_in_vdi_chain')
@mock.patch.object(vm_utils, 'safe_destroy_vdis')
@mock.patch.object(vm_utils, '_walk_vdi_chain')
@mock.patch.object(vm_utils, '_wait_for_vhd_coalesce')
@mock.patch.object(vm_utils, '_vdi_get_uuid')
@mock.patch.object(vm_utils, '_vdi_snapshot')
@mock.patch.object(vm_utils, 'get_vdi_for_vm_safely')
def test_snapshot_attached_here_impl(self, mock_get_vdi_for_vm_safely,
mock_vdi_snapshot, mock_vdi_get_uuid,
mock_wait_for_vhd_coalesce, mock_walk_vdi_chain,
mock_safe_destroy_vdis, mock_delete_snapshots_in_vdi_chain):
session = "session"
instance = {"uuid": "uuid"}
mock_callback = mock.Mock()
mock_get_vdi_for_vm_safely.return_value = ("vdi_ref",
{"SR": "sr_ref",
"uuid": "vdi_uuid"})
mock_vdi_snapshot.return_value = "snap_ref"
mock_vdi_get_uuid.return_value = "snap_uuid"
mock_walk_vdi_chain.return_value = [{"uuid": "a"}, {"uuid": "b"}]
try:
with vm_utils.snapshot_attached_here(session, instance, "vm_ref",
"label", '2', mock_callback) as result:
self.assertEqual(["a", "b"], result)
raise test.TestingException()
self.assertTrue(False)
except test.TestingException:
pass
mock_get_vdi_for_vm_safely.assert_called_once_with(session, "vm_ref",
'2')
mock_vdi_snapshot.assert_called_once_with(session, "vdi_ref")
mock_wait_for_vhd_coalesce.assert_called_once_with(session, instance,
"sr_ref", "vdi_ref", ['a', 'b'])
mock_vdi_get_uuid.assert_called_once_with(session, "snap_ref")
mock_walk_vdi_chain.assert_has_calls([mock.call(session, "vdi_uuid"),
mock.call(session, "snap_uuid")])
mock_callback.assert_called_once_with(
task_state="image_pending_upload")
mock_safe_destroy_vdis.assert_called_once_with(session, ["snap_ref"])
mock_delete_snapshots_in_vdi_chain.assert_called_once_with(session,
instance, ['a', 'b'], "sr_ref")
@mock.patch.object(greenthread, 'sleep')
def test_wait_for_vhd_coalesce_leaf_node(self, mock_sleep):
instance = {"uuid": "fake"}
vm_utils._wait_for_vhd_coalesce("session", instance,
"sr_ref", "vdi_ref", ["uuid"])
self.assertFalse(mock_sleep.called)
@mock.patch.object(vm_utils, '_count_children')
@mock.patch.object(greenthread, 'sleep')
def test_wait_for_vhd_coalesce_parent_snapshot(self, mock_sleep,
mock_count):
mock_count.return_value = 2
instance = {"uuid": "fake"}
vm_utils._wait_for_vhd_coalesce("session", instance,
"sr_ref", "vdi_ref", ["uuid1", "uuid2"])
self.assertFalse(mock_sleep.called)
self.assertTrue(mock_count.called)
@mock.patch.object(greenthread, 'sleep')
@mock.patch.object(vm_utils, '_get_vhd_parent_uuid')
@mock.patch.object(vm_utils, '_count_children')
@mock.patch.object(vm_utils, '_scan_sr')
def test_wait_for_vhd_coalesce_raises(self, mock_scan_sr,
mock_count, mock_get_vhd_parent_uuid, mock_sleep):
mock_count.return_value = 1
instance = {"uuid": "fake"}
self.assertRaises(exception.NovaException,
vm_utils._wait_for_vhd_coalesce, "session", instance,
"sr_ref", "vdi_ref", ["uuid1", "uuid2"])
self.assertTrue(mock_count.called)
self.assertEqual(20, mock_sleep.call_count)
self.assertEqual(20, mock_scan_sr.call_count)
@mock.patch.object(greenthread, 'sleep')
@mock.patch.object(vm_utils, '_get_vhd_parent_uuid')
@mock.patch.object(vm_utils, '_count_children')
@mock.patch.object(vm_utils, '_scan_sr')
def test_wait_for_vhd_coalesce_success(self, mock_scan_sr,
mock_count, mock_get_vhd_parent_uuid, mock_sleep):
mock_count.return_value = 1
instance = {"uuid": "fake"}
mock_get_vhd_parent_uuid.side_effect = ["bad", "uuid2"]
vm_utils._wait_for_vhd_coalesce("session", instance,
"sr_ref", "vdi_ref", ["uuid1", "uuid2"])
self.assertEqual(1, mock_sleep.call_count)
self.assertEqual(2, mock_scan_sr.call_count)
@mock.patch.object(vm_utils, '_get_all_vdis_in_sr')
def test_count_children(self, mock_get_all_vdis_in_sr):
vdis = [('child1', {'sm_config': {'vhd-parent': 'parent1'}}),
('child2', {'sm_config': {'vhd-parent': 'parent2'}}),
('child3', {'sm_config': {'vhd-parent': 'parent1'}})]
mock_get_all_vdis_in_sr.return_value = vdis
self.assertEqual(2, vm_utils._count_children('session',
'parent1', 'sr'))
class ImportMigratedDisksTestCase(VMUtilsTestBase):
@mock.patch.object(vm_utils, '_import_migrate_ephemeral_disks')
@mock.patch.object(vm_utils, '_import_migrated_root_disk')
def test_import_all_migrated_disks(self, mock_root, mock_ephemeral):
session = "session"
instance = "instance"
mock_root.return_value = "root_vdi"
mock_ephemeral.return_value = ["a", "b"]
result = vm_utils.import_all_migrated_disks(session, instance)
expected = {'root': 'root_vdi', 'ephemerals': ["a", "b"]}
self.assertEqual(expected, result)
mock_root.assert_called_once_with(session, instance)
mock_ephemeral.assert_called_once_with(session, instance)
@mock.patch.object(vm_utils, '_import_migrate_ephemeral_disks')
@mock.patch.object(vm_utils, '_import_migrated_root_disk')
def test_import_all_migrated_disks_import_root_false(self, mock_root,
mock_ephemeral):
session = "session"
instance = "instance"
mock_root.return_value = "root_vdi"
mock_ephemeral.return_value = ["a", "b"]
result = vm_utils.import_all_migrated_disks(session, instance,
import_root=False)
expected = {'root': None, 'ephemerals': ["a", "b"]}
self.assertEqual(expected, result)
self.assertEqual(0, mock_root.call_count)
mock_ephemeral.assert_called_once_with(session, instance)
@mock.patch.object(vm_utils, '_import_migrated_vhds')
def test_import_migrated_root_disk(self, mock_migrate):
mock_migrate.return_value = "foo"
instance = {"uuid": "uuid", "name": "name"}
result = vm_utils._import_migrated_root_disk("s", instance)
self.assertEqual("foo", result)
mock_migrate.assert_called_once_with("s", instance, "uuid", "root",
"name")
@mock.patch.object(vm_utils, '_import_migrated_vhds')
def test_import_migrate_ephemeral_disks(self, mock_migrate):
mock_migrate.return_value = "foo"
instance = objects.Instance(id=1, uuid=uuids.fake)
instance.old_flavor = objects.Flavor(ephemeral_gb=4000)
result = vm_utils._import_migrate_ephemeral_disks("s", instance)
self.assertEqual({'4': 'foo', '5': 'foo'}, result)
inst_uuid = instance.uuid
inst_name = instance.name
expected_calls = [mock.call("s", instance,
"%s_ephemeral_1" % inst_uuid,
"ephemeral",
"%s ephemeral (1)" % inst_name),
mock.call("s", instance,
"%s_ephemeral_2" % inst_uuid,
"ephemeral",
"%s ephemeral (2)" % inst_name)]
self.assertEqual(expected_calls, mock_migrate.call_args_list)
@mock.patch.object(vm_utils, 'get_ephemeral_disk_sizes')
def test_import_migrate_ephemeral_disks_use_old_flavor(self,
mock_get_sizes):
mock_get_sizes.return_value = []
instance = objects.Instance(id=1, uuid=uuids.fake, ephemeral_gb=2000)
instance.old_flavor = objects.Flavor(ephemeral_gb=4000)
vm_utils._import_migrate_ephemeral_disks("s", instance)
mock_get_sizes.assert_called_once_with(4000)
@mock.patch.object(os_xenapi.client.vm_management, 'receive_vhd')
@mock.patch.object(vm_utils, '_set_vdi_info')
@mock.patch.object(vm_utils, 'scan_default_sr')
@mock.patch.object(vm_utils, 'get_sr_path')
def test_import_migrated_vhds(self, mock_get_sr_path, mock_scan_sr,
mock_set_info, mock_recv_vhd):
session = mock.Mock()
instance = {"uuid": "uuid"}
mock_recv_vhd.return_value = {"root": {"uuid": "a"}}
session.call_xenapi.return_value = "vdi_ref"
mock_get_sr_path.return_value = "sr_path"
result = vm_utils._import_migrated_vhds(session, instance,
'chain_label', 'disk_type', 'vdi_label')
expected = {'uuid': "a", 'ref': "vdi_ref"}
self.assertEqual(expected, result)
mock_get_sr_path.assert_called_once_with(session)
mock_recv_vhd.assert_called_once_with(session, 'chain_label',
'sr_path', mock.ANY)
mock_scan_sr.assert_called_once_with(session)
session.call_xenapi.assert_called_once_with('VDI.get_by_uuid', 'a')
mock_set_info.assert_called_once_with(session, 'vdi_ref', 'disk_type',
'vdi_label', 'disk_type', instance)
def test_get_vhd_parent_uuid_rec_provided(self):
session = mock.Mock()
vdi_ref = 'vdi_ref'
vdi_rec = {'sm_config': {}}
self.assertIsNone(vm_utils._get_vhd_parent_uuid(session,
vdi_ref,
vdi_rec))
self.assertFalse(session.call_xenapi.called)
class MigrateVHDTestCase(VMUtilsTestBase):
def _assert_transfer_called(self, session, label):
session.call_plugin_serialized.assert_called_once_with(
'migration.py', 'transfer_vhd', instance_uuid=label, host="dest",
vdi_uuid="vdi_uuid", sr_path="sr_path", seq_num=2)
@mock.patch.object(os_xenapi.client.vm_management, 'transfer_vhd')
def test_migrate_vhd_root(self, mock_trans_vhd):
session = mock.Mock()
instance = {"uuid": "a"}
vm_utils.migrate_vhd(session, instance, "vdi_uuid", "dest",
"sr_path", 2)
mock_trans_vhd.assert_called_once_with(session, "a",
"dest", "vdi_uuid", "sr_path",
2)
@mock.patch.object(os_xenapi.client.vm_management, 'transfer_vhd')
def test_migrate_vhd_ephemeral(self, mock_trans_vhd):
session = mock.Mock()
instance = {"uuid": "a"}
vm_utils.migrate_vhd(session, instance, "vdi_uuid", "dest",
"sr_path", 2, 2)
mock_trans_vhd.assert_called_once_with(session, "a_ephemeral_2",
"dest", "vdi_uuid", "sr_path",
2)
@mock.patch.object(os_xenapi.client.vm_management, 'transfer_vhd')
def test_migrate_vhd_converts_exceptions(self, mock_trans_vhd):
session = mock.Mock()
session.XenAPI.Failure = test.TestingException
mock_trans_vhd.side_effect = test.TestingException()
instance = {"uuid": "a"}
self.assertRaises(exception.MigrationError, vm_utils.migrate_vhd,
session, instance, "vdi_uuid", "dest", "sr_path", 2)
mock_trans_vhd.assert_called_once_with(session, "a",
"dest", "vdi_uuid", "sr_path",
2)
class StripBaseMirrorTestCase(VMUtilsTestBase):
def test_strip_base_mirror_from_vdi_works(self):
session = mock.Mock()
vm_utils._try_strip_base_mirror_from_vdi(session, "vdi_ref")
session.call_xenapi.assert_called_once_with(
"VDI.remove_from_sm_config", "vdi_ref", "base_mirror")
def test_strip_base_mirror_from_vdi_hides_error(self):
session = mock.Mock()
session.XenAPI.Failure = test.TestingException
session.call_xenapi.side_effect = test.TestingException()
vm_utils._try_strip_base_mirror_from_vdi(session, "vdi_ref")
session.call_xenapi.assert_called_once_with(
"VDI.remove_from_sm_config", "vdi_ref", "base_mirror")
@mock.patch.object(vm_utils, '_try_strip_base_mirror_from_vdi')
def test_strip_base_mirror_from_vdis(self, mock_strip):
def call_xenapi(method, arg):
if method == "VM.get_VBDs":
return ['VBD_ref_1', 'VBD_ref_2']
if method == "VBD.get_VDI":
return 'VDI' + arg[3:]
return "Unexpected call_xenapi: %s.%s" % (method, arg)
session = mock.Mock()
session.call_xenapi.side_effect = call_xenapi
vm_utils.strip_base_mirror_from_vdis(session, "vm_ref")
expected = [mock.call('VM.get_VBDs', "vm_ref"),
mock.call('VBD.get_VDI', "VBD_ref_1"),
mock.call('VBD.get_VDI', "VBD_ref_2")]
self.assertEqual(expected, session.call_xenapi.call_args_list)
expected = [mock.call(session, "VDI_ref_1"),
mock.call(session, "VDI_ref_2")]
self.assertEqual(expected, mock_strip.call_args_list)
class DeviceIdTestCase(VMUtilsTestBase):
def test_device_id_is_none_if_not_specified_in_meta_data(self):
image_meta = objects.ImageMeta.from_dict({})
session = mock.Mock()
session.product_version = (6, 1, 0)
self.assertIsNone(vm_utils.get_vm_device_id(session, image_meta))
def test_get_device_id_if_hypervisor_version_is_greater_than_6_1(self):
image_meta = objects.ImageMeta.from_dict(
{'properties': {'xenapi_device_id': '0002'}})
session = mock.Mock()
session.product_version = (6, 2, 0)
self.assertEqual(2,
vm_utils.get_vm_device_id(session, image_meta))
session.product_version = (6, 3, 1)
self.assertEqual(2,
vm_utils.get_vm_device_id(session, image_meta))
def test_raise_exception_if_device_id_not_supported_by_hyp_version(self):
image_meta = objects.ImageMeta.from_dict(
{'properties': {'xenapi_device_id': '0002'}})
session = mock.Mock()
session.product_version = (6, 0)
exc = self.assertRaises(exception.NovaException,
vm_utils.get_vm_device_id, session, image_meta)
self.assertEqual("Device id 2 specified is not supported by "
"hypervisor version (6, 0)", exc.message)
session.product_version = ('6a')
exc = self.assertRaises(exception.NovaException,
vm_utils.get_vm_device_id, session, image_meta)
self.assertEqual("Device id 2 specified is not supported by "
"hypervisor version 6a", exc.message)
class CreateVmRecordTestCase(VMUtilsTestBase):
@mock.patch.object(flavors, 'extract_flavor')
def test_create_vm_record_linux(self, mock_extract_flavor):
instance = objects.Instance(uuid=uuids.nova_uuid,
os_type="linux")
self._test_create_vm_record(mock_extract_flavor, instance, False)
@mock.patch.object(flavors, 'extract_flavor')
def test_create_vm_record_windows(self, mock_extract_flavor):
instance = objects.Instance(uuid=uuids.nova_uuid,
os_type="windows")
with mock.patch.object(instance, 'get_flavor') as get:
get.return_value = objects.Flavor._from_db_object(
None, objects.Flavor(), test_flavor.fake_flavor)
self._test_create_vm_record(mock_extract_flavor, instance, True)
def _test_create_vm_record(self, mock_extract_flavor, instance,
is_viridian):
session = self.get_fake_session()
flavor = {"memory_mb": 1024, "vcpus": 1, "vcpu_weight": 2}
mock_extract_flavor.return_value = flavor
with mock.patch.object(instance, 'get_flavor') as get:
get.return_value = objects.Flavor(memory_mb=1024,
vcpus=1,
vcpu_weight=2)
vm_utils.create_vm(session, instance, "name", "kernel", "ramdisk",
device_id=2)
is_viridian_str = str(is_viridian).lower()
expected_vm_rec = {
'VCPUs_params': {'cap': '0', 'weight': '2'},
'PV_args': '',
'memory_static_min': '0',
'ha_restart_priority': '',
'HVM_boot_policy': 'BIOS order',
'PV_bootloader': '',
'tags': [],
'VCPUs_max': '1',
'memory_static_max': '1073741824',
'actions_after_shutdown': 'destroy',
'memory_dynamic_max': '1073741824',
'user_version': '0',
'xenstore_data': {'vm-data/allowvssprovider': 'false'},
'blocked_operations': {},
'is_a_template': False,
'name_description': '',
'memory_dynamic_min': '1073741824',
'actions_after_crash': 'destroy',
'memory_target': '1073741824',
'PV_ramdisk': '',
'PV_bootloader_args': '',
'PCI_bus': '',
'other_config': {'nova_uuid': uuids.nova_uuid},
'name_label': 'name',
'actions_after_reboot': 'restart',
'VCPUs_at_startup': '1',
'HVM_boot_params': {'order': 'dc'},
'platform': {'nx': 'true', 'pae': 'true', 'apic': 'true',
'timeoffset': '0', 'viridian': is_viridian_str,
'acpi': 'true', 'device_id': '0002'},
'PV_legacy_args': '',
'PV_kernel': '',
'affinity': '',
'recommendations': '',
'ha_always_run': False}
session.call_xenapi.assert_called_with('VM.create', expected_vm_rec)
def test_list_vms(self):
self.fixture = self.useFixture(config_fixture.Config(lockutils.CONF))
self.fixture.config(disable_process_locking=True,
group='oslo_concurrency')
self.flags(instance_name_template='%d',
firewall_driver='nova.virt.xenapi.firewall.'
'Dom0IptablesFirewallDriver')
self.flags(connection_url='http://localhost',
connection_password='test_pass',
group='xenserver')
fake.create_vm("foo1", "Halted")
vm_ref = fake.create_vm("foo2", "Running")
stubs.stubout_session(self, fake.SessionBase)
driver = xenapi_conn.XenAPIDriver(False)
result = list(vm_utils.list_vms(driver._session))
# Will have 3 VMs - but one is Dom0 and one is not running on the host
self.assertEqual(len(driver._session.call_xenapi('VM.get_all')), 3)
self.assertEqual(len(result), 1)
result_keys = [key for (key, value) in result]
self.assertIn(vm_ref, result_keys)
class ChildVHDsTestCase(test.NoDBTestCase):
all_vdis = [
("my-vdi-ref",
{"uuid": "my-uuid", "sm_config": {},
"is_a_snapshot": False, "other_config": {}}),
("non-parent",
{"uuid": "uuid-1", "sm_config": {},
"is_a_snapshot": False, "other_config": {}}),
("diff-parent",
{"uuid": "uuid-1", "sm_config": {"vhd-parent": "other-uuid"},
"is_a_snapshot": False, "other_config": {}}),
("child",
{"uuid": "uuid-child", "sm_config": {"vhd-parent": "my-uuid"},
"is_a_snapshot": False, "other_config": {}}),
("child-snap",
{"uuid": "uuid-child-snap", "sm_config": {"vhd-parent": "my-uuid"},
"is_a_snapshot": True, "other_config": {}}),
]
@mock.patch.object(vm_utils, '_get_all_vdis_in_sr')
def test_child_vhds_defaults(self, mock_get_all):
mock_get_all.return_value = self.all_vdis
result = vm_utils._child_vhds("session", "sr_ref", ["my-uuid"])
self.assertJsonEqual(['uuid-child', 'uuid-child-snap'], result)
@mock.patch.object(vm_utils, '_get_all_vdis_in_sr')
def test_child_vhds_only_snapshots(self, mock_get_all):
mock_get_all.return_value = self.all_vdis
result = vm_utils._child_vhds("session", "sr_ref", ["my-uuid"],
old_snapshots_only=True)
self.assertEqual(['uuid-child-snap'], result)
@mock.patch.object(vm_utils, '_get_all_vdis_in_sr')
def test_child_vhds_chain(self, mock_get_all):
mock_get_all.return_value = self.all_vdis
result = vm_utils._child_vhds("session", "sr_ref",
["my-uuid", "other-uuid"], old_snapshots_only=True)
self.assertEqual(['uuid-child-snap'], result)
def test_is_vdi_a_snapshot_works(self):
vdi_rec = {"is_a_snapshot": True,
"other_config": {}}
self.assertTrue(vm_utils._is_vdi_a_snapshot(vdi_rec))
def test_is_vdi_a_snapshot_base_images_false(self):
vdi_rec = {"is_a_snapshot": True,
"other_config": {"image-id": "fake"}}
self.assertFalse(vm_utils._is_vdi_a_snapshot(vdi_rec))
def test_is_vdi_a_snapshot_false_for_non_snapshot(self):
vdi_rec = {"is_a_snapshot": False,
"other_config": {}}
self.assertFalse(vm_utils._is_vdi_a_snapshot(vdi_rec))
class RemoveOldSnapshotsTestCase(test.NoDBTestCase):
@mock.patch.object(vm_utils, 'get_vdi_for_vm_safely')
@mock.patch.object(vm_utils, '_walk_vdi_chain')
@mock.patch.object(vm_utils, '_delete_snapshots_in_vdi_chain')
def test_remove_old_snapshots(self, mock_delete, mock_walk, mock_get):
instance = {"uuid": "fake"}
mock_get.return_value = ("ref", {"uuid": "vdi", "SR": "sr_ref"})
mock_walk.return_value = [{"uuid": "uuid1"}, {"uuid": "uuid2"}]
vm_utils.remove_old_snapshots("session", instance, "vm_ref")
mock_delete.assert_called_once_with("session", instance,
["uuid1", "uuid2"], "sr_ref")
mock_get.assert_called_once_with("session", "vm_ref")
mock_walk.assert_called_once_with("session", "vdi")
@mock.patch.object(vm_utils, '_child_vhds')
def test_delete_snapshots_in_vdi_chain_no_chain(self, mock_child):
instance = {"uuid": "fake"}
vm_utils._delete_snapshots_in_vdi_chain("session", instance,
["uuid"], "sr")
self.assertFalse(mock_child.called)
@mock.patch.object(vm_utils, '_child_vhds')
def test_delete_snapshots_in_vdi_chain_no_snapshots(self, mock_child):
instance = {"uuid": "fake"}
mock_child.return_value = []
vm_utils._delete_snapshots_in_vdi_chain("session", instance,
["uuid1", "uuid2"], "sr")
mock_child.assert_called_once_with("session", "sr", ["uuid2"],
old_snapshots_only=True)
@mock.patch.object(vm_utils, '_scan_sr')
@mock.patch.object(vm_utils, 'safe_destroy_vdis')
@mock.patch.object(vm_utils, '_child_vhds')
def test_delete_snapshots_in_vdi_chain_calls_destroy(self, mock_child,
mock_destroy, mock_scan):
instance = {"uuid": "fake"}
mock_child.return_value = ["suuid1", "suuid2"]
session = mock.Mock()
session.VDI.get_by_uuid.side_effect = ["ref1", "ref2"]
vm_utils._delete_snapshots_in_vdi_chain(session, instance,
["uuid1", "uuid2"], "sr")
mock_child.assert_called_once_with(session, "sr", ["uuid2"],
old_snapshots_only=True)
session.VDI.get_by_uuid.assert_has_calls([
mock.call("suuid1"), mock.call("suuid2")])
mock_destroy.assert_called_once_with(session, ["ref1", "ref2"])
mock_scan.assert_called_once_with(session, "sr")
class ResizeFunctionTestCase(test.NoDBTestCase):
def _call_get_resize_func_name(self, brand, version):
session = mock.Mock()
session.product_brand = brand
session.product_version = version
return vm_utils._get_resize_func_name(session)
def _test_is_resize(self, brand, version):
result = self._call_get_resize_func_name(brand, version)
self.assertEqual("VDI.resize", result)
def _test_is_resize_online(self, brand, version):
result = self._call_get_resize_func_name(brand, version)
self.assertEqual("VDI.resize_online", result)
def test_xenserver_5_5(self):
self._test_is_resize_online("XenServer", (5, 5, 0))
def test_xenserver_6_0(self):
self._test_is_resize("XenServer", (6, 0, 0))
def test_xcp_1_1(self):
self._test_is_resize_online("XCP", (1, 1, 0))
def test_xcp_1_2(self):
self._test_is_resize("XCP", (1, 2, 0))
def test_xcp_2_0(self):
self._test_is_resize("XCP", (2, 0, 0))
def test_random_brand(self):
self._test_is_resize("asfd", (1, 1, 0))
def test_default(self):
self._test_is_resize(None, None)
def test_empty(self):
self._test_is_resize("", "")
class VMInfoTests(VMUtilsTestBase):
def setUp(self):
super(VMInfoTests, self).setUp()
self.session = mock.Mock()
def test_get_power_state_valid(self):
# Save on test setup calls by having these simple tests in one method
self.session.call_xenapi.return_value = "Running"
self.assertEqual(vm_utils.get_power_state(self.session, "ref"),
power_state.RUNNING)
self.session.call_xenapi.return_value = "Halted"
self.assertEqual(vm_utils.get_power_state(self.session, "ref"),
power_state.SHUTDOWN)
self.session.call_xenapi.return_value = "Paused"
self.assertEqual(vm_utils.get_power_state(self.session, "ref"),
power_state.PAUSED)
self.session.call_xenapi.return_value = "Suspended"
self.assertEqual(vm_utils.get_power_state(self.session, "ref"),
power_state.SUSPENDED)
self.session.call_xenapi.return_value = "Crashed"
self.assertEqual(vm_utils.get_power_state(self.session, "ref"),
power_state.CRASHED)
def test_get_power_state_invalid(self):
self.session.call_xenapi.return_value = "Invalid"
self.assertRaises(KeyError,
vm_utils.get_power_state, self.session, "ref")
_XAPI_record = {'power_state': 'Running',
'memory_static_max': str(10 << 10),
'memory_dynamic_max': str(9 << 10),
'VCPUs_max': '5'}
def test_compile_info(self):
def call_xenapi(method, *args):
if method.startswith('VM.get_') and args[0] == 'dummy':
return self._XAPI_record[method[7:]]
self.session.call_xenapi.side_effect = call_xenapi
info = vm_utils.compile_info(self.session, "dummy")
self.assertEqual(hardware.InstanceInfo(state=power_state.RUNNING),
info)
|
import json
from PyQt5.QtCore import pyqtSlot, pyqtSignal, QModelIndex, Qt
from PyQt5.QtWidgets import QWidget, QSpacerItem, QSizePolicy, QShortcut
from PyQt5.QtGui import QKeySequence
from txplayagui.ui.library import Ui_LibraryWidget
from txplayagui.library import LibraryModel
from txplayagui.utilities import unwrapMime
class LibraryWidget(Ui_LibraryWidget, QWidget):
rescanStarted = pyqtSignal()
itemsActivated = pyqtSignal(list)
def __init__(self, parent=None):
QWidget.__init__(self, parent)
Ui_LibraryWidget.setupUi(self, self)
self.rescanButton.show()
self.scanProgressBar.hide()
self.libraryModel = LibraryModel()
self.treeView.setModel(self.libraryModel)
self.libraryModel.toggleRow.connect(self.onToggleRow)
self.rescanButton.clicked.connect(self.rescanClicked)
self.treeView.doubleClicked.connect(self.onTreeViewDoubleClicked)
self.querySearchBox.textChanged.connect(self.onQueryChanged)
self.clearSearchButton.clicked.connect(self.onQueryClear)
# shortcuts
releaseSearchboxShortcut = QShortcut(QKeySequence('Esc'), self.querySearchBox)
releaseSearchboxShortcut.setContext(Qt.WidgetShortcut)
releaseSearchboxShortcut.activated.connect(self.onReleaseSearchbox)
scrollLibraryShortcut = QShortcut(QKeySequence(Qt.Key_Down), self.querySearchBox)
scrollLibraryShortcut.setContext(Qt.WidgetShortcut)
scrollLibraryShortcut.activated.connect(self.onScrollLibrary)
activateTracksShortcut = QShortcut(QKeySequence(Qt.Key_Return), self.treeView)
activateTracksShortcut.setContext(Qt.WidgetShortcut)
activateTracksShortcut.activated.connect(self.onActivateTracks)
@pyqtSlot()
def rescanClicked(self):
from txplayagui.client import rescanLibrary
self.rescanButton.hide()
self.scanControlsLayout.removeItem(self.scanControlsLayout.itemAt(2))
self.scanProgressBar.show()
self.scanResponse = rescanLibrary()
self.scanResponse.lineReceived.connect(self.scanProgress)
self.rescanStarted.emit()
@pyqtSlot(str)
def scanProgress(self, progress):
data = json.loads(progress.rstrip())
if 'scanprogress' in data:
progress = data['scanprogress']
self.setProgress(progress)
else:
self.scanResponse.close()
self.scanResponse.deleteLater()
self.rescanFinished(data['library'])
@pyqtSlot(int, QModelIndex, bool)
def onToggleRow(self, row, parentIndex, isShown):
self.treeView.setRowHidden(row, parentIndex, not isShown)
@pyqtSlot(QModelIndex)
def onTreeViewDoubleClicked(self, index):
hashes = self._getHashes(index)
if len(hashes) == 0:
return
self.itemsActivated.emit(hashes)
@pyqtSlot(unicode)
def onQueryChanged(self, query):
if len(query) > 2:
self.libraryModel.filter(query)
elif query == '':
return self.libraryModel.showAll()
@pyqtSlot()
def onQueryClear(self):
self.querySearchBox.setText('')
self.querySearchBox.setFocus()
def onReleaseSearchbox(self):
self.setFocus()
def onScrollLibrary(self):
self.treeView.setCurrentIndex(self.libraryModel.headIndex())
self.treeView.setFocus()
def onActivateTracks(self):
collectedHashes = []
for index in self.treeView.selectedIndexes():
for hash_ in self._getHashes(index):
if hash_ not in collectedHashes:
collectedHashes.append(hash_)
if len(collectedHashes) == 0:
return
self.itemsActivated.emit(collectedHashes)
def _getHashes(self, index):
mimeData = unwrapMime(self.libraryModel.mimeData([index]))
item = mimeData['items'][0]
try:
return [item['hash']]
except KeyError:
if 'album' in item:
return self.libraryModel.albumHashes(index)
return []
def setProgress(self, value):
self.scanProgressBar.setValue(value)
def rescanFinished(self, data):
self.libraryModel.loadData(data)
self.rescanButton.show()
spacerItem = QSpacerItem(0, 0, QSizePolicy.Expanding, QSizePolicy.Minimum)
self.scanControlsLayout.addItem(spacerItem)
self.scanProgressBar.hide()
self.scanProgressBar.setValue(0)
# apply filter if active
query = self.querySearchBox.text().lower()
if len(query) > 2:
self.libraryModel.filter(query)
|
__all__ = ['CostCalculator', 'EodMarketData', 'MarketDataColumns', 'TxnColumns', 'PositionColumns', 'PlColumns',
'TxnPlColumns']
class CostCalculator(object):
"""Define the methods necessary to be able to calculator the premium for a trade."""
def get_premium(self, qty, px, ts=None):
raise NotImplementedError()
def get_mkt_val(self, qty, px, ts=None):
raise NotImplementedError()
class EodMarketData(object):
def get_eod_frame(self):
"""Return an end of day DataFrame with columns ('close', 'mktval', 'dvd')"""
raise NotImplementedError()
class MarketDataColumns(object):
CLOSE = 'close'
MKT_VAL = 'mkt_val'
DVDS = 'dvds'
class TxnColumns(object):
DT = 'date'
TS = 'txn_ts'
PID = 'pid'
TID = 'tid'
QTY = 'txn_qty'
PX = 'txn_px'
FEES = 'txn_fees'
PREMIUM = 'txn_premium'
OPEN_VAL = 'open_val'
POS = 'pos'
INTENT = 'txn_intent'
ACTION = 'txn_action'
DESCRIPTIONS = {
DT: 'Date-only portion of transaction',
TS: 'Timestamp of transaction',
PID: 'position id',
TID: 'trade id',
QTY: 'quantity',
PX: 'price',
FEES: 'fees',
PREMIUM: 'premium',
OPEN_VAL: 'open value of position',
POS: 'position quantity',
INTENT: 'trade intent',
ACTION: 'trade action',
}
class PlColumns(object):
DT = 'date'
DVDS = 'dvds'
FEES = 'fees'
RPL_GROSS = 'rpl_gross'
RPL = 'rpl'
UPL = 'upl'
PL = 'pl'
DESCRIPTIONS = {
DT: 'p/l date',
DVDS: 'dividends',
FEES: 'fees',
RPL_GROSS: 'realized gross p/l (TOT_VAL - OPEN_VAL)',
RPL: 'realized pl (RPL_GROSS + FEES + DVDS)',
UPL: 'unrealized pl (MKT_VAL + OPEN_VAL)',
PL: 'Total p/l (UPL + RPL)'
}
ALL = [DT, DVDS, FEES, RPL_GROSS, RPL, UPL, PL]
LTDS = [DVDS, FEES, RPL_GROSS, RPL, UPL, PL]
class TxnPlColumns(object):
DT = 'date'
PID = TxnColumns.PID
TID = TxnColumns.TID
POS = 'pos'
TXN_QTY = 'txn_qty'
TXN_PX = 'txn_px'
TXN_FEES = 'txn_fees'
TXN_PREMIUM = 'txn_premium'
TXN_INTENT = 'txn_intent'
TXN_ACTION = 'txn_action'
CLOSE_PX = 'close'
OPEN_VAL = 'open_val'
MKT_VAL = 'mkt_val'
TOT_VAL = 'total_val'
DVDS = 'dvds'
FEES = 'fees'
RPL_GROSS = 'rpl_gross'
RPL = 'rpl'
UPL = 'upl'
PL = 'pl'
DESCRIPTIONS = {
DT: 'p/l date',
POS: 'end of day position quantity',
CLOSE_PX: 'end of day closing price',
OPEN_VAL: 'open value of the position',
MKT_VAL: 'market value',
TOT_VAL: 'total of trade premiums',
DVDS: 'dividends',
FEES: 'fees',
RPL_GROSS: 'realized gross p/l (TOT_VAL - OPEN_VAL)',
RPL: 'realized pl (RPL_GROSS + FEES + DVDS)',
UPL: 'unrealized pl (MKT_VAL + OPEN_VAL)',
PL: 'Total p/l (UPL + RPL)'
}
class PositionColumns(object):
PID = 'pid'
SIDE = 'side'
OPEN_DT = 'open_dt'
CLOSE_DT = 'close_dt'
OPEN_QTY = 'open_qty'
OPEN_PX = 'open_px'
CLOSE_PX = 'close_px'
OPEN_PREMIUM = 'open_premium'
PL = 'pl'
DURATION = 'duration'
NUM_TXNS = 'ntxns'
RET = 'ret'
STATE = 'state'
|
from rtxlib.executionstrategy.ForeverStrategy import start_forever_strategy
from rtxlib.executionstrategy.StepStrategy import start_step_strategy
from rtxlib.executionstrategy.SelfOptimizerStrategy import start_self_optimizer_strategy
from rtxlib.executionstrategy.SequencialStrategy import start_sequential_strategy
from rtxlib import log_results, error, info
from rtxlib.executionstrategy.UncorrelatedSelfOptimizerStrategy import start_uncorrelated_self_optimizer_strategy
def run_execution_strategy(wf):
""" we run the correct execution strategy """
applyInitKnobs(wf)
try:
# start the right execution strategy
if wf.execution_strategy["type"] == "sequential":
log_results(wf.folder, wf.execution_strategy["knobs"][0].keys() + ["result"], append=False)
start_sequential_strategy(wf)
elif wf.execution_strategy["type"] == "self_optimizer":
log_results(wf.folder, wf.execution_strategy["knobs"].keys() + ["result"], append=False)
start_self_optimizer_strategy(wf)
elif wf.execution_strategy["type"] == "uncorrelated_self_optimizer":
log_results(wf.folder, wf.execution_strategy["knobs"].keys() + ["result"], append=False)
start_uncorrelated_self_optimizer_strategy(wf)
elif wf.execution_strategy["type"] == "step_explorer":
log_results(wf.folder, wf.execution_strategy["knobs"].keys() + ["result"], append=False)
start_step_strategy(wf)
elif wf.execution_strategy["type"] == "forever":
start_forever_strategy(wf)
except RuntimeError:
error("Stopped the whole workflow as requested by a RuntimeError")
# finished
info(">")
applyDefaultKnobs(wf)
def applyInitKnobs(wf):
""" we are done, so revert to default if given """
if "pre_workflow_knobs" in wf.execution_strategy:
try:
info("> Applied the pre_workflow_knobs")
wf.change_provider["instance"] \
.applyChange(wf.change_event_creator(wf.execution_strategy["pre_workflow_knobs"]))
except:
error("apply changes did not work")
def applyDefaultKnobs(wf):
""" we are done, so revert to default if given """
if "post_workflow_knobs" in wf.execution_strategy:
try:
info("> Applied the post_workflow_knobs")
wf.change_provider["instance"] \
.applyChange(wf.change_event_creator(wf.execution_strategy["post_workflow_knobs"]))
except:
error("apply changes did not work")
|
''' Pipelined bi-directional LSTM model.
This model stacked biLSTM NLU and biLSTM SAP separate models together,
and its weights are initilized by the ones of seprate models. Besides,
for the SAP task, the decision threshold on the output layer is tuned
on dev data.
Author : Xuesong Yang
Email : xyang45@illinois.edu
Created Date: Dec. 31, 2016
'''
import numpy as np
from utils import checkExistence, get_windowedVec, eval_intentPredict, getActPred
from AgentActClassifyingModel import writeUtterActTxt
from DataSetCSVagentActPred import DataSetCSVagentActPred
import os
import argparse
def load_model_NLU(model_weights, test_data):
from SlotTaggingModel_multitask import SlotTaggingModel
params = ['train_data', 'dev_data', 'epoch_nb', 'batch_size', 'embedding_size', 'hidden_size',
'dropout_ratio', 'optimizer', 'patience', 'loss', 'test_tag_only', 'test_intent_only', 'threshold']
argparams = {key: None for key in params}
argparams['weights_fname'] = model_weights
argparams['model_folder'] = os.path.dirname(model_weights).replace('/weights', '', 1)
argparams['test_data'] = test_data
model = SlotTaggingModel(**argparams)
model.load_model()
return model
#def load_model_Policy(model_weights, test_data, threshold):
def load_model_Policy(model_weights):
from AgentActClassifyingModel import AgentActClassifying
params = ['train_data', 'dev_data', 'test_data', 'epoch_nb', 'batch_size', 'hidden_size',
'dropout_ratio', 'optimizer', 'patience', 'loss', 'threshold']
argparams = {key: None for key in params}
argparams['weights_fname'] = model_weights
argparams['model_folder'] = os.path.dirname(model_weights).replace('/weights', '', 1)
argparams['threshold'] = 1.0
# argparams['test_data'] = test_data
model = AgentActClassifying(**argparams)
model.load_model()
return model
def readTagPredTxt(tag_pred_txt, userTag2id, sample_nb, userTag_vocab_size):
checkExistence(tag_pred_txt)
indicator = np.zeros((sample_nb, userTag_vocab_size))
with open(tag_pred_txt, 'rb') as f:
for idx, line in enumerate(f):
for tag in line.strip().split():
tag = 'tag-{}'.format(tag)
if tag in userTag2id:
pos = userTag2id[tag] - 1
else:
pos = 0
indicator[idx, pos] = 1.
return indicator
def readIntentPredTxt(intent_pred_txt, userIntent2id, sample_nb, userIntent_vocab_size):
checkExistence(intent_pred_txt)
indicator = np.zeros((sample_nb, userIntent_vocab_size))
with open(intent_pred_txt, 'rb') as f:
for idx, line in enumerate(f):
for intent in line.strip().split(';'):
if intent == 'null':
continue
intent = 'intent-{}'.format(intent)
if intent in userIntent2id:
pos = userIntent2id[intent] - 1
else:
pos = 0
indicator[idx, pos] = 1.
return indicator
def pipelinePrediction(test_data, tag_model_weights, intent_model_weights, act_model_weights, result_folder, tuneTh=True, threshold=None):
# load slot tagging model, and make prediction
tag_model = load_model_NLU(tag_model_weights, test_data)
tag_model.test_tag_flag = True
tag_model.model_folder = result_folder
tag_model.predict()
tag_pred_txt = '{}/test_result/tag_{}.pred'.format(tag_model.model_folder, os.path.basename(tag_model_weights).split('_')[0])
tag_pred_indicator = readTagPredTxt(tag_pred_txt, test_data.userTag2id,
len(test_data.userTag_txt), test_data.userTag_vocab_size)
# load user intent model and make prediction
intent_model = load_model_NLU(intent_model_weights, test_data)
intent_model.test_intent_flag = True
intent_model.threshold = threshold_intent
intent_model.model_folder = result_folder
intent_model.predict()
intent_pred_txt = '{}/test_result/intent_{}.pred'.format(intent_model.model_folder, os.path.basename(intent_model_weights).split('_')[0])
intent_pred_indicator = readIntentPredTxt(intent_pred_txt, test_data.userIntent2id,
len(test_data.userIntent_txt), test_data.userIntent_vocab_size)
# merge indicators of slot tagging and user intents, and generate windowed tagIntent matrix
assert len(tag_pred_indicator) == len(intent_pred_indicator), 'sample_nb is not equal.'
nlu_vecBin = np.hstack((tag_pred_indicator, intent_pred_indicator))
# load agent act model and make prediction
act_model = load_model_Policy(act_model_weights)
act_model.model_folder = result_folder
nlu_vecBin_windowed = get_windowedVec(nlu_vecBin, act_model.window_size)
if tuneTh:
# tune threshold
print('Tuning threshold on Dev ...')
act_probs = act_model.model.predict(nlu_vecBin_windowed)
precision, recall, fscore, accuracy_frame, act_threshold = eval_intentPredict(act_probs, test_data.agentAct_vecBin)
print('AgentActPred on Dev: precision={:.4f}, recall={:.4f}, fscore={:.4f}, accuracy_frame={:.4f}, threshold={:.4f}'.format(precision, recall, fscore, accuracy_frame, act_threshold))
dev_pred_txt = getActPred(act_probs, act_threshold, test_data.id2agentAct)
dev_results_fname = '{}/act_dev.pred'.format(act_model.model_folder)
writeUtterActTxt(test_data.userUtter_txt, dev_pred_txt, dev_results_fname)
print('Write dev results: {}'.format(dev_results_fname))
return act_threshold
else:
# make prediction based on well-tuned threshold
assert threshold is not None, 'Argument required: threshold for agent action prediction.'
act_model.threshold = threshold
act_model.test_data = test_data
act_model.test_data.userTagIntent_vecBin = nlu_vecBin_windowed
act_model.predict()
if __name__ == '__main__':
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--data-npz', dest='npz_file', help='.npz file that contains the instance of DataSetCSVagentAct class')
parser.add_argument('--intent-weights', dest='intent_weights', help='.h5 weights for best user intent model')
parser.add_argument('--tag-weights', dest='tag_weights', help='.h5 weights for best user slot tagging model')
parser.add_argument('--act-weights', dest='act_weights', help='.h5 weights for oracle agent act model')
parser.add_argument('--intent-threshold', dest='intent_threshold', type=float, help='decision threshold for intent model')
parser.add_argument('--tune', dest='tune_threshold', action='store_true', help='tune decision threshold for act model if this option is activated.')
parser.add_argument('--act-threshold', dest='act_threshold', type=float, help='decision threshold for agent act model')
parser.add_argument('--model-folder', dest='model_folder', help='model folder')
args = parser.parse_args()
argparams = vars(args)
pid = os.getpid()
npz_file = argparams['npz_file']
intent_model_weights = argparams['intent_weights']
tag_model_weights = argparams['tag_weights']
act_model_weights = argparams['act_weights']
threshold_intent = argparams['intent_threshold']
tune_threshold = argparams['tune_threshold']
threshold_act = argparams['act_threshold']
# validate params
checkExistence(npz_file)
checkExistence(intent_model_weights)
checkExistence(tag_model_weights)
checkExistence(act_model_weights)
assert threshold_intent is not None, 'Argument required: --intent-threshold'
for key in sorted(argparams.keys()):
print('\t{}={}'.format(key, argparams[key]))
# load test data
data_npz = np.load(npz_file)
if tune_threshold:
dev_result_folder = './model/pipe_{}/dev'.format(pid)
if not os.path.exists(dev_result_folder):
os.makedirs(dev_result_folder)
print('\tdev_result_folder={}'.format(dev_result_folder))
dev_data = data_npz['dev_data'][()]
assert isinstance(dev_data, DataSetCSVagentActPred)
act_threshold = pipelinePrediction(dev_data, tag_model_weights, intent_model_weights, act_model_weights, dev_result_folder, tuneTh=True)
else:
assert threshold_act is not None, 'Argument required: --act-threshold.'
assert argparams['model_folder'] is not None, 'Argument required: --model-folder'
test_result_folder = '{}/test'.format(argparams['model_folder'])
if not os.path.exists(test_result_folder):
os.makedirs(test_result_folder)
print('\ttest_result_folder={}'.format(test_result_folder))
test_data = data_npz['test_data'][()]
assert isinstance(test_data, DataSetCSVagentActPred)
pipelinePrediction(test_data, tag_model_weights, intent_model_weights, act_model_weights, test_result_folder, tuneTh=False, threshold=threshold_act)
|
# #!/usr/bin/env python
# # -*- coding: utf-8 -*-
import subprocess
import sys
import glob, os
import shutil
import cPickle
import babel.messages.pofile as pofile
# Path to calibre-web location with -> location of mo files
FILEPATH="D:\\Desktop\\calibre-web\\"
with open('iso639.pickle', 'rb') as f:
need_iso = cPickle.load(f)
workdir = os.getcwd()
os.chdir(FILEPATH) # .encode(sys.getfilesystemencoding()
# Extract all messages from the source code and create a template file
p = subprocess.Popen("pybabel extract --no-wrap -F babel.cfg -o messages.pot cps"
,shell=True,stdout=subprocess.PIPE, stdin=subprocess.PIPE)
p.wait()
# update all translation files with the new content of the template file
# adding --ignore-obsolete will delete all obsolete translations
p = subprocess.Popen("pybabel update --no-wrap -i "+FILEPATH+"messages.pot -d "+FILEPATH+"cps/translations".encode(sys.getfilesystemencoding()),
shell=True, stdout=subprocess.PIPE, stdin=subprocess.PIPE)
p.wait()
# Include calibre iso639 translations of language names
out_iso = dict()
os.chdir(workdir)
invers_lang_table = [x for x in need_iso['3bto3t'].values()]
for file in glob.glob1("./translations", "*.po"):
langcode=file[23:-3]
translateFile=open(FILEPATH+"cps\\translations\\"+langcode+"\\LC_MESSAGES\\messages.po")
mergedTranslation=pofile.read_po(translateFile,locale=langcode)
translateFile.close()
languageFile=open("./translations/"+file)
LanguageTranslation=pofile.read_po(languageFile)
languageFile.close()
print("Merging: " + langcode)
# for msg in LanguageTranslation._messages._keys:
iso_translations = dict()
# code3t = need_iso['3bto3t'].values().index(need_iso['2to3'][langcode])
# iso_translations[invers_lang_table.index(code3t)] =
for msg in LanguageTranslation:
if msg.id:
# msg=LanguageTranslation.__getitem__(msg)
lCode = msg.auto_comments[0][9:]
if lCode in need_iso['codes3t']:
mergedTranslation.add(msg.id, msg.string, auto_comments=msg.auto_comments)
if msg.string:
iso_translations[lCode] = msg.string
else:
iso_translations[lCode] = msg.id
# mergedTranslation.header_comment=mergedTranslation.header_comment+LanguageTranslation.header_comment
shutil.move(os.path.join(FILEPATH,"cps\\translations\\"+langcode+"\\LC_MESSAGES\\messages.po"), os.path.join(FILEPATH,"cps\\translations\\"+langcode+"\\LC_MESSAGES\\messages_all.po"))
targetFile = open(FILEPATH + "cps\\translations\\" + langcode + "\\LC_MESSAGES\\messages.po",'w')
pofile.write_po(targetFile,mergedTranslation,ignore_obsolete=True)
targetFile.close()
out_iso[langcode]=iso_translations
# Add English to the translation table
for msg in LanguageTranslation:
if msg.id:
lCode = msg.auto_comments[0][9:]
if lCode in need_iso['codes3t']:
iso_translations[lCode] = msg.id
out_iso['en'] = iso_translations
# write language name table
with open(os.path.join(FILEPATH,'cps','translations','iso639.pickle'), 'wb') as f:
cPickle.dump(out_iso,f)
# Generate .mo files
p = subprocess.Popen("pybabel compile -d " + FILEPATH + "cps/translations".encode(sys.getfilesystemencoding()),
shell=True, stdout=subprocess.PIPE, stdin=subprocess.PIPE)
p.wait()
# Rename messages_all.mo in messages.mo und delete messages_all.po
for file in glob.glob1("./translations", "*.po"):
langcode=file[23:-3]
file_path = FILEPATH+"cps\\translations\\"+langcode+"\\LC_MESSAGES\\"
shutil.move(os.path.join(file_path, "messages_all.po"), os.path.join(file_path, "messages.po"))
# start all tests
# Server.startServer()
|
# Licensed under the GPLv3 - see LICENSE
import numpy as np
from astropy.utils import lazyproperty
import astropy.units as u
from ..vlbi_base.base import (make_opener, VLBIFileBase, VLBIFileReaderBase,
VLBIStreamBase, VLBIStreamReaderBase,
VLBIStreamWriterBase)
from .header import Mark4Header
from .payload import Mark4Payload
from .frame import Mark4Frame
from .file_info import Mark4FileReaderInfo
__all__ = ['Mark4FileReader', 'Mark4FileWriter',
'Mark4StreamBase', 'Mark4StreamReader', 'Mark4StreamWriter',
'open']
# Look-up table for the number of bits in a byte.
nbits = ((np.arange(256)[:, np.newaxis] >> np.arange(8) & 1)
.sum(1).astype(np.int16))
class Mark4FileReader(VLBIFileReaderBase):
"""Simple reader for Mark 4 files.
Wraps a binary filehandle, providing methods to help interpret the data,
such as `locate_frame`, `read_frame` and `get_frame_rate`.
Parameters
----------
fh_raw : filehandle
Filehandle of the raw binary data file.
ntrack : int or None, optional.
Number of Mark 4 bitstreams. Can be determined automatically as
part of locating the first frame.
decade : int or None
Decade in which the observations were taken. Can instead pass an
approximate ``ref_time``.
ref_time : `~astropy.time.Time` or None
Reference time within 4 years of the observation time. Used only
if ``decade`` is not given.
"""
def __init__(self, fh_raw, ntrack=None, decade=None, ref_time=None):
self.ntrack = ntrack
self.decade = decade
self.ref_time = ref_time
super().__init__(fh_raw)
def __repr__(self):
return ("{name}(fh_raw={s.fh_raw}, ntrack={s.ntrack}, "
"decade={s.decade}, ref_time={s.ref_time})"
.format(name=self.__class__.__name__, s=self))
info = Mark4FileReaderInfo()
def read_header(self):
"""Read a single header from the file.
Returns
-------
header : `~baseband.mark4.Mark4Header`
"""
return Mark4Header.fromfile(self, ntrack=self.ntrack,
decade=self.decade, ref_time=self.ref_time)
def read_frame(self, verify=True):
"""Read a single frame (header plus payload).
Returns
-------
frame : `~baseband.mark4.Mark4Frame`
With ``.header`` and ``.data`` properties that return the
:class:`~baseband.mark4.Mark4Header` and data encoded in the frame,
respectively.
verify : bool, optional
Whether to do basic checks of frame integrity. Default: `True`.
"""
return Mark4Frame.fromfile(self.fh_raw, self.ntrack,
decade=self.decade, ref_time=self.ref_time,
verify=verify)
def get_frame_rate(self):
"""Determine the number of frames per second.
The frame rate is calculated from the time elapsed between the
first two frames, as inferred from their time stamps.
Returns
-------
frame_rate : `~astropy.units.Quantity`
Frames per second.
"""
with self.temporary_offset():
self.seek(0)
self.locate_frame()
header0 = self.read_header()
self.seek(header0.payload_nbytes, 1)
header1 = self.read_header()
# Mark 4 specification states frames-lengths range from 1.25 ms
# to 160 ms.
tdelta = header1.fraction[0] - header0.fraction[0]
return np.round(1 / tdelta) * u.Hz
def locate_frame(self, forward=True, maximum=None):
"""Locate the frame nearest the current position.
The search is for the following pattern:
* 32*tracks bits set at offset bytes
* 1*tracks bits unset before offset
* 32*tracks bits set at offset+2500*tracks bytes
This reflects 'sync_pattern' of 0xffffffff for a given header and one
a frame ahead, which is in word 2, plus the lsb of word 1, which is
'system_id'.
If the file does not have ntrack is set, it will be auto-determined.
Parameters
----------
forward : bool, optional
Whether to search forwards or backwards. Default: `True`.
maximum : int, optional
Maximum number of bytes forward to search through.
Default: twice the frame size (``20000 * ntrack // 8``).
Returns
-------
offset : int or `None`
Byte offset of the next frame. `None` if the search was not
successful.
"""
fh = self.fh_raw
file_pos = fh.tell()
# Use initializer value (determines ntrack if not already given).
ntrack = self.ntrack
if ntrack is None:
fh.seek(0)
ntrack = self.determine_ntrack(maximum=maximum)
if ntrack is None:
raise ValueError("cannot determine ntrack automatically. "
"Try passing in an explicit value.")
if forward and fh.tell() >= file_pos:
return fh.tell()
fh.seek(file_pos)
nset = np.ones(32 * ntrack // 8, dtype=np.int16)
nunset = np.ones(ntrack // 8, dtype=np.int16)
frame_nbytes = ntrack * 2500
fh.seek(0, 2)
filesize = fh.tell()
if filesize < frame_nbytes:
fh.seek(file_pos)
return None
if maximum is None:
maximum = 2 * frame_nbytes
# Loop over chunks to try to find the frame marker.
step = frame_nbytes // 2
# Read a bit more at every step to ensure we don't miss a "split"
# header.
block = step + 160 * ntrack // 8
if forward:
iterate = range(max(min(file_pos, filesize - block), 0),
max(min(file_pos + maximum, filesize - block + 1),
1),
step)
else:
iterate = range(min(max(file_pos - step, 0), filesize - block),
min(max(file_pos - step - maximum - 1, -1),
filesize - block),
-step)
for frame in iterate:
fh.seek(frame)
data = np.frombuffer(fh.read(block), dtype=np.uint8)
assert len(data) == block
# Find header pattern.
databits1 = nbits[data]
nosync = np.convolve(databits1[len(nunset):] < 6, nset, 'valid')
nolow = np.convolve(databits1[:-len(nset)] > 1, nunset, 'valid')
wrong = nosync + nolow
possibilities = np.where(wrong == 0)[0]
# Check candidates by seeing whether there is a sync word
# a frame size ahead. (Note: loop can be empty.)
for possibility in possibilities[::1 if forward else -1]:
# Real start of possible header.
frame_start = frame + possibility - 63 * ntrack // 8
if (forward and frame_start < file_pos or
not forward and frame_start > file_pos):
continue
# Check there is a header following this.
check = frame_start + frame_nbytes
if check >= filesize - 32 * 2 * ntrack // 8 - len(nunset):
# But do before this one if we're beyond end of file.
check = frame_start - frame_nbytes
if check < 0: # Assume OK if only one frame fits in file.
if frame_start + frame_nbytes > filesize:
continue
else:
break
fh.seek(check + 32 * 2 * ntrack // 8)
check_data = np.frombuffer(fh.read(len(nunset)),
dtype=np.uint8)
databits2 = nbits[check_data]
if np.all(databits2 >= 6):
break # Got it!
else: # None of them worked, so do next block.
continue
fh.seek(frame_start)
return frame_start
fh.seek(file_pos)
return None
def determine_ntrack(self, maximum=None):
"""Determines the number of tracks, by seeking the next frame.
Uses `locate_frame` to look for the first occurrence of a frame from
the current position for all supported ``ntrack`` values. Returns the
first ``ntrack`` for which `locate_frame` is successful, setting
the file's ``ntrack`` property appropriately, and leaving the
file pointer at the start of the frame.
Parameters
----------
maximum : int, optional
Maximum number of bytes forward to search through.
Default: twice the frame size (``20000 * ntrack // 8``).
Returns
-------
ntrack : int or None
Number of Mark 4 bitstreams. `None` if no frame was found.
"""
# Currently only 16, 32 and 64-track frames supported.
old_ntrack = self.ntrack
for ntrack in 16, 32, 64:
try:
self.ntrack = ntrack
if self.locate_frame(maximum=maximum) is not None:
return ntrack
except Exception:
self.ntrack = old_ntrack
raise
self.ntrack = old_ntrack
return None
def find_header(self, forward=True, maximum=None):
"""Find the nearest header from the current position.
If successful, the file pointer is left at the start of the header.
Parameters
----------
forward : bool, optional
Seek forward if `True` (default), backward if `False`.
maximum : int, optional
Maximum number of bytes forward to search through.
Default: twice the frame size (``20000 * ntrack // 8``).
Returns
-------
header : :class:`~baseband.mark4.Mark4Header` or None
Retrieved Mark 4 header, or `None` if nothing found.
"""
offset = self.locate_frame(forward=forward)
if offset is None:
return None
header = self.read_header()
self.fh_raw.seek(offset)
return header
class Mark4FileWriter(VLBIFileBase):
"""Simple writer for Mark 4 files.
Adds `write_frame` method to the VLBI binary file wrapper.
"""
def write_frame(self, data, header=None, **kwargs):
"""Write a single frame (header plus payload).
Parameters
----------
data : `~numpy.ndarray` or `~baseband.mark4.Mark4Frame`
If an array, a header should be given, which will be used to
get the information needed to encode the array, and to construct
the Mark 4 frame.
header : `~baseband.mark4.Mark4Header`
Can instead give keyword arguments to construct a header. Ignored
if payload is a :class:`~baseband.mark4.Mark4Frame` instance.
**kwargs :
If ``header`` is not given, these are used to initialize one.
"""
if not isinstance(data, Mark4Frame):
data = Mark4Frame.fromdata(data, header, **kwargs)
return data.tofile(self.fh_raw)
class Mark4StreamBase(VLBIStreamBase):
"""Base for Mark 4 streams."""
def __init__(self, fh_raw, header0, sample_rate=None, squeeze=True,
subset=(), fill_value=0., verify=True):
super().__init__(
fh_raw, header0=header0, sample_rate=sample_rate,
samples_per_frame=header0.samples_per_frame,
unsliced_shape=(header0.nchan,),
bps=header0.bps, complex_data=False, squeeze=squeeze,
subset=subset, fill_value=fill_value, verify=verify)
self._frame_rate = int(round((self.sample_rate /
self.samples_per_frame).to_value(u.Hz)))
class Mark4StreamReader(Mark4StreamBase, VLBIStreamReaderBase):
"""VLBI Mark 4 format reader.
Allows access to a Mark 4 file as a continuous series of samples. Parts
of the data stream replaced by header values are filled in.
Parameters
----------
fh_raw : filehandle
Filehandle of the raw Mark 4 stream.
sample_rate : `~astropy.units.Quantity`, optional
Number of complete samples per second, i.e. the rate at which each
channel is sampled. If `None`, will be inferred from scanning two
frames of the file.
ntrack : int or None, optional
Number of Mark 4 bitstreams. If `None` (default), will attempt to
automatically detect it by scanning the file.
decade : int or None
Decade of the observation start time (eg. ``2010`` for 2018), needed to
remove ambiguity in the Mark 4 time stamp. Can instead pass an
approximate ``ref_time``.
ref_time : `~astropy.time.Time` or None
Reference time within 4 years of the start time of the observations.
Used only if ``decade`` is not given.
squeeze : bool, optional
If `True` (default), remove any dimensions of length unity from
decoded data.
subset : indexing object, optional
Specific channels of the complete sample to decode (after possible
squeezing). If an empty tuple (default), all channels are read.
fill_value : float or complex, optional
Value to use for invalid or missing data. Default: 0.
verify : bool, optional
Whether to do basic checks of frame integrity when reading. The first
frame of the stream is always checked. Default: `True`.
"""
_sample_shape_maker = Mark4Payload._sample_shape_maker
def __init__(self, fh_raw, sample_rate=None, ntrack=None, decade=None,
ref_time=None, squeeze=True, subset=(), fill_value=0.,
verify=True):
if decade is None and ref_time is None:
raise TypeError("Mark 4 stream reader requires either decade or "
"ref_time to be passed in.")
# Get binary file reader.
fh_raw = Mark4FileReader(fh_raw, ntrack=ntrack, decade=decade,
ref_time=ref_time)
# Find first header, determining ntrack if needed.
header0 = fh_raw.find_header()
assert header0 is not None, (
"could not find a first frame using ntrack={}. Perhaps "
"try ntrack=None for auto-determination.".format(ntrack))
self._offset0 = fh_raw.tell()
super().__init__(
fh_raw, header0=header0, sample_rate=sample_rate,
squeeze=squeeze, subset=subset, fill_value=fill_value,
verify=verify)
# Use reference time in preference to decade so that a stream wrapping
# a decade will work.
self.fh_raw.decade = None
self.fh_raw.ref_time = self.start_time
@lazyproperty
def _last_header(self):
"""Last header of the file."""
last_header = super()._last_header
# Infer the decade, assuming the end of the file is no more than
# 4 years away from the start.
last_header.infer_decade(self.start_time)
return last_header
def _read_frame(self, index):
self.fh_raw.seek(self._offset0 + index * self.header0.frame_nbytes)
frame = self.fh_raw.read_frame(verify=self.verify)
# Set decoded value for invalid data.
frame.fill_value = self.fill_value
# TODO: add check that we got the right frame.
return frame
class Mark4StreamWriter(Mark4StreamBase, VLBIStreamWriterBase):
"""VLBI Mark 4 format writer.
Encodes and writes sequences of samples to file.
Parameters
----------
raw : filehandle
Which will write filled sets of frames to storage.
header0 : `~baseband.mark4.Mark4Header`
Header for the first frame, holding time information, etc. Can instead
give keyword arguments to construct a header (see ``**kwargs``).
sample_rate : `~astropy.units.Quantity`
Number of complete samples per second, i.e. the rate at which each
channel is sampled. Needed to calculate header timestamps.
squeeze : bool, optional
If `True` (default), `write` accepts squeezed arrays as input, and
adds any dimensions of length unity.
**kwargs
If no header is given, an attempt is made to construct one from these.
For a standard header, this would include the following.
--- Header keywords : (see :meth:`~baseband.mark4.Mark4Header.fromvalues`)
time : `~astropy.time.Time`
Start time of the file. Sets bcd-encoded unit year, day, hour, minute,
second in the header.
ntrack : int
Number of Mark 4 bitstreams (equal to number of channels times
``fanout`` times ``bps``)
bps : int
Bits per elementary sample.
fanout : int
Number of tracks over which a given channel is spread out.
"""
_sample_shape_maker = Mark4Payload._sample_shape_maker
def __init__(self, fh_raw, header0=None, sample_rate=None, squeeze=True,
**kwargs):
if header0 is None:
header0 = Mark4Header.fromvalues(**kwargs)
super().__init__(fh_raw=fh_raw, header0=header0,
sample_rate=sample_rate, squeeze=squeeze)
# Set up initial payload with right shape.
samples_per_payload = (
header0.samples_per_frame * header0.payload_nbytes //
header0.frame_nbytes)
self._payload = Mark4Payload.fromdata(
np.zeros((samples_per_payload, header0.nchan), np.float32),
header0)
def _make_frame(self, frame_index):
header = self.header0.copy()
header.update(time=self.start_time + frame_index /
self._frame_rate * u.s)
# Reuse payload.
return Mark4Frame(header, self._payload)
open = make_opener('Mark4', globals(), doc="""
--- For reading a stream : (see `~baseband.mark4.base.Mark4StreamReader`)
sample_rate : `~astropy.units.Quantity`, optional
Number of complete samples per second, i.e. the rate at which each channel
is sampled. If not given, will be inferred from scanning two frames of
the file.
ntrack : int, optional
Number of Mark 4 bitstreams. If `None` (default), will attempt to
automatically detect it by scanning the file.
decade : int or None
Decade of the observation start time (eg. ``2010`` for 2018), needed to
remove ambiguity in the Mark 4 time stamp (default: `None`). Can instead
pass an approximate ``ref_time``.
ref_time : `~astropy.time.Time` or None
Reference time within 4 years of the start time of the observations. Used
only if ``decade`` is not given.
squeeze : bool, optional
If `True` (default), remove any dimensions of length unity from
decoded data.
subset : indexing object, optional
Specific channels of the complete sample to decode (after possible
squeezing). If an empty tuple (default), all channels are read.
fill_value : float or complex, optional
Value to use for invalid or missing data. Default: 0.
verify : bool, optional
Whether to do basic checks of frame integrity when reading. The first
frame of the stream is always checked. Default: `True`.
--- For writing a stream : (see `~baseband.mark4.base.Mark4StreamWriter`)
header0 : `~baseband.mark4.Mark4Header`
Header for the first frame, holding time information, etc. Can instead
give keyword arguments to construct a header (see ``**kwargs``).
sample_rate : `~astropy.units.Quantity`
Number of complete samples per second, i.e. the rate at which each channel
is sampled. Needed to calculate header timestamps.
squeeze : bool, optional
If `True` (default), writer accepts squeezed arrays as input, and adds
any dimensions of length unity.
file_size : int or None, optional
When writing to a sequence of files, the maximum size of one file in bytes.
If `None` (default), the file size is unlimited, and only the first
file will be written to.
**kwargs
If the header is not given, an attempt will be made to construct one
with any further keyword arguments. See
:class:`~baseband.mark4.base.Mark4StreamWriter`.
Returns
-------
Filehandle
:class:`~baseband.mark4.base.Mark4FileReader` or
:class:`~baseband.mark4.base.Mark4FileWriter` (binary), or
:class:`~baseband.mark4.base.Mark4StreamReader` or
:class:`~baseband.mark4.base.Mark4StreamWriter` (stream)
Notes
-----
Although it is not generally expected to be useful for Mark 4, like for
other formats one can also pass to ``name`` a list, tuple, or subclass of
`~baseband.helpers.sequentialfile.FileNameSequencer`. For writing to multiple
files, the ``file_size`` keyword must be passed or only the first file will be
written to. One may also pass in a `~baseband.helpers.sequentialfile` object
(opened in 'rb' mode for reading or 'w+b' for writing), though for typical use
cases it is practically identical to passing in a list or template.
""")
|
#! /usr/bin/python
# -*- coding:utf-8 -*-
"""
Author: AsherYang
Email: ouyangfan1991@gmail.com
Date: 2018/5/6
Desc: 属性类,包括分类属性,商品详情属性
_id INT NOT NULL AUTO_INCREMENT PRIMARY KEY,
cate_id VARCHAR(50),
goods_id VARCHAR(50),
attr_market_year VARCHAR(20),
attr_size VARCHAR(5),
attr_color VARCHAR(10),
foreign key (goods_id) references ffstore_goods(goods_id) on delete cascade on update cascade
"""
class DbAttribute:
def __init__(self):
self._cate_id = None
self._goods_id = None
self._attr_market_year = None
self._attr_size = None
self._attr_color = None
@property
def cate_id(self):
return self._cate_id
@cate_id.setter
def cate_id(self, value):
self._cate_id = value
@property
def goods_id(self):
return self._goods_id
@goods_id.setter
def goods_id(self, value):
self._goods_id = value
@property
def attr_market_year(self):
return self._attr_market_year
@attr_market_year.setter
def attr_market_year(self, value):
self._attr_market_year = value
@property
def attr_size(self):
return self._attr_size
@attr_size.setter
def attr_size(self, value):
self._attr_size = value
@property
def attr_color(self):
return self._attr_color
@attr_color.setter
def attr_color(self, value):
self._attr_color = value
|
# -*- encoding: utf-8 -*-
###########################################################################
# Module Writen to OpenERP, Open Source Management Solution
#
# Copyright (c) 2012 Vauxoo - http://www.vauxoo.com/
# All Rights Reserved.
# info Vauxoo (info@vauxoo.com)
############################################################################
# Coded by: Luis Ernesto Garcia Medina (ernesto_gm@vauxoo.com)
############################################################################
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
"name" : "Childrens of Employees",
"version" : "1.0",
"author" : "Vauxoo",
"category" : "hr",
"description" : """
This module add Date Start, SSN and children of employees
---------------------------------------------------------""",
"website" : "http://www.vauxoo.com/",
"license" : "AGPL-3",
"depends" : ["hr"],
"data" : ["hr_children_view.xml",
'security/ir.model.access.csv',],
"installable" : True,
"active" : False,
}
|
import datetime
import pytz
import re
try:
import simplejson as json
except ImportError:
import json
from functools import wraps
from flask import request, g, current_app
try:
from urllib.parse import urljoin
except ImportError:
from urlparse import urljoin
from alerta.app import app, db
from alerta.app.exceptions import RejectException, RateLimit, BlackoutPeriod
from alerta.app.metrics import Counter, Timer
from alerta.plugins import Plugins
LOG = app.logger
plugins = Plugins()
reject_counter = Counter('alerts', 'rejected', 'Rejected alerts', 'Number of rejected alerts')
error_counter = Counter('alerts', 'errored', 'Errored alerts', 'Number of errored alerts')
duplicate_timer = Timer('alerts', 'duplicate', 'Duplicate alerts', 'Total time to process number of duplicate alerts')
correlate_timer = Timer('alerts', 'correlate', 'Correlated alerts', 'Total time to process number of correlated alerts')
create_timer = Timer('alerts', 'create', 'Newly created alerts', 'Total time to process number of new alerts')
pre_plugin_timer = Timer('plugins', 'prereceive', 'Pre-receive plugins', 'Total number of pre-receive plugins')
post_plugin_timer = Timer('plugins', 'postreceive', 'Post-receive plugins', 'Total number of post-receive plugins')
def jsonp(func):
"""Wraps JSONified output for JSONP requests."""
@wraps(func)
def decorated(*args, **kwargs):
callback = request.args.get('callback', False)
if callback:
data = str(func(*args, **kwargs).data)
content = str(callback) + '(' + data + ')'
mimetype = 'application/javascript'
return current_app.response_class(content, mimetype=mimetype)
else:
return func(*args, **kwargs)
return decorated
def absolute_url(path=''):
return urljoin(request.base_url.rstrip('/'), app.config.get('BASE_URL', '') + path)
def add_remote_ip(request, alert):
if request.headers.getlist("X-Forwarded-For"):
alert.attributes.update(ip=request.headers.getlist("X-Forwarded-For")[0])
else:
alert.attributes.update(ip=request.remote_addr)
PARAMS_EXCLUDE = [
'_',
'callback',
'token',
'api-key'
]
def parse_fields(p):
params = p.copy()
query_time = datetime.datetime.utcnow()
for s in PARAMS_EXCLUDE:
if s in params:
del params[s]
if params.get('q', None):
query = json.loads(params['q'])
del params['q']
else:
query = dict()
if g.get('customer', None):
query['customer'] = g.get('customer')
page = params.get('page', 1)
if 'page' in params:
del params['page']
page = int(page)
if params.get('from-date', None):
try:
from_date = datetime.datetime.strptime(params['from-date'], '%Y-%m-%dT%H:%M:%S.%fZ')
except ValueError as e:
LOG.warning('Could not parse from-date query parameter: %s', e)
raise
from_date = from_date.replace(tzinfo=pytz.utc)
del params['from-date']
else:
from_date = None
if params.get('to-date', None):
try:
to_date = datetime.datetime.strptime(params['to-date'], '%Y-%m-%dT%H:%M:%S.%fZ')
except ValueError as e:
LOG.warning('Could not parse to-date query parameter: %s', e)
raise
to_date = to_date.replace(tzinfo=pytz.utc)
del params['to-date']
else:
to_date = query_time
to_date = to_date.replace(tzinfo=pytz.utc)
if from_date and to_date:
query['lastReceiveTime'] = {'$gt': from_date, '$lte': to_date}
elif to_date:
query['lastReceiveTime'] = {'$lte': to_date}
if params.get('duplicateCount', None):
query['duplicateCount'] = int(params.get('duplicateCount'))
del params['duplicateCount']
if params.get('repeat', None):
query['repeat'] = True if params.get('repeat', 'true') == 'true' else False
del params['repeat']
sort = list()
direction = 1
if params.get('reverse', None):
direction = -1
del params['reverse']
if params.get('sort-by', None):
for sort_by in params.getlist('sort-by'):
if sort_by in ['createTime', 'receiveTime', 'lastReceiveTime']:
sort.append((sort_by, -direction)) # reverse chronological
else:
sort.append((sort_by, direction))
del params['sort-by']
else:
sort.append(('lastReceiveTime', -direction))
group = list()
if 'group-by' in params:
group = params.get('group-by')
del params['group-by']
if 'limit' in params:
limit = params.get('limit')
del params['limit']
else:
limit = app.config['QUERY_LIMIT']
limit = int(limit)
ids = params.getlist('id')
if len(ids) == 1:
query['$or'] = [{'_id': {'$regex': '^' + ids[0]}}, {'lastReceiveId': {'$regex': '^' + ids[0]}}]
del params['id']
elif ids:
query['$or'] = [{'_id': {'$regex': re.compile('|'.join(['^' + i for i in ids]))}}, {'lastReceiveId': {'$regex': re.compile('|'.join(['^' + i for i in ids]))}}]
del params['id']
if 'fields' in params:
fields = dict([(field, True) for field in params.get('fields').split(',')])
fields.update({'resource': True, 'event': True, 'environment': True, 'createTime': True, 'receiveTime': True, 'lastReceiveTime': True})
del params['fields']
elif 'fields!' in params:
fields = dict([(field, False) for field in params.get('fields!').split(',')])
del params['fields!']
else:
fields = dict()
for field in params:
value = params.getlist(field)
if len(value) == 1:
value = value[0]
if field.endswith('!'):
if value.startswith('~'):
query[field[:-1]] = dict()
query[field[:-1]]['$not'] = re.compile(value[1:], re.IGNORECASE)
else:
query[field[:-1]] = dict()
query[field[:-1]]['$ne'] = value
else:
if value.startswith('~'):
query[field] = dict()
query[field]['$regex'] = re.compile(value[1:], re.IGNORECASE)
else:
query[field] = value
else:
if field.endswith('!'):
if '~' in [v[0] for v in value]:
value = '|'.join([v.lstrip('~') for v in value])
query[field[:-1]] = dict()
query[field[:-1]]['$not'] = re.compile(value, re.IGNORECASE)
else:
query[field[:-1]] = dict()
query[field[:-1]]['$nin'] = value
else:
if '~' in [v[0] for v in value]:
value = '|'.join([v.lstrip('~') for v in value])
query[field] = dict()
query[field]['$regex'] = re.compile(value, re.IGNORECASE)
else:
query[field] = dict()
query[field]['$in'] = value
return query, fields, sort, group, page, limit, query_time
def process_alert(alert):
for plugin in plugins.routing(alert):
started = pre_plugin_timer.start_timer()
try:
alert = plugin.pre_receive(alert)
except (RejectException, RateLimit):
reject_counter.inc()
pre_plugin_timer.stop_timer(started)
raise
except Exception as e:
error_counter.inc()
pre_plugin_timer.stop_timer(started)
raise RuntimeError("Error while running pre-receive plug-in '%s': %s" % (plugin.name, str(e)))
if not alert:
error_counter.inc()
pre_plugin_timer.stop_timer(started)
raise SyntaxError("Plug-in '%s' pre-receive hook did not return modified alert" % plugin.name)
pre_plugin_timer.stop_timer(started)
if db.is_blackout_period(alert):
raise BlackoutPeriod("Suppressed alert during blackout period")
try:
if db.is_duplicate(alert):
started = duplicate_timer.start_timer()
alert = db.save_duplicate(alert)
duplicate_timer.stop_timer(started)
elif db.is_correlated(alert):
started = correlate_timer.start_timer()
alert = db.save_correlated(alert)
correlate_timer.stop_timer(started)
else:
started = create_timer.start_timer()
alert = db.create_alert(alert)
create_timer.stop_timer(started)
except Exception as e:
error_counter.inc()
raise RuntimeError(e)
for plugin in plugins.routing(alert):
started = post_plugin_timer.start_timer()
try:
plugin.post_receive(alert)
except Exception as e:
error_counter.inc()
post_plugin_timer.stop_timer(started)
raise RuntimeError("Error while running post-receive plug-in '%s': %s" % (plugin.name, str(e)))
post_plugin_timer.stop_timer(started)
return alert
def process_status(alert, status, text):
for plugin in plugins.routing(alert):
try:
plugin.status_change(alert, status, text)
except RejectException:
reject_counter.inc()
raise
except Exception as e:
error_counter.inc()
raise RuntimeError("Error while running status plug-in '%s': %s" % (plugin.name, str(e)))
|
from otp.ai.AIBaseGlobal import *
from otp.avatar import DistributedAvatarAI
import SuitPlannerBase
import SuitBase
import SuitDNA
from direct.directnotify import DirectNotifyGlobal
from toontown.battle import SuitBattleGlobals
class DistributedSuitBaseAI(DistributedAvatarAI.DistributedAvatarAI, SuitBase.SuitBase):
notify = DirectNotifyGlobal.directNotify.newCategory('DistributedSuitBaseAI')
def __init__(self, air, suitPlanner):
DistributedAvatarAI.DistributedAvatarAI.__init__(self, air)
SuitBase.SuitBase.__init__(self)
self.sp = suitPlanner
self.maxHP = 10
self.currHP = 10
self.zoneId = 0
self.dna = SuitDNA.SuitDNA()
self.virtual = 0
self.skeleRevives = 0
self.maxSkeleRevives = 0
self.reviveFlag = 0
self.buildingHeight = None
return
def generate(self):
DistributedAvatarAI.DistributedAvatarAI.generate(self)
def delete(self):
self.sp = None
del self.dna
DistributedAvatarAI.DistributedAvatarAI.delete(self)
return
def requestRemoval(self):
if self.sp != None:
self.sp.removeSuit(self)
else:
self.requestDelete()
return
def setLevel(self, lvl = None):
attributes = SuitBattleGlobals.SuitAttributes[self.dna.name]
if lvl:
self.level = lvl - attributes['level'] - 1
else:
self.level = SuitBattleGlobals.pickFromFreqList(attributes['freq'])
self.notify.debug('Assigning level ' + str(lvl))
if hasattr(self, 'doId'):
self.d_setLevelDist(self.level)
hp = attributes['hp'][self.level]
self.maxHP = hp
self.currHP = hp
def getLevelDist(self):
return self.getLevel()
def d_setLevelDist(self, level):
self.sendUpdate('setLevelDist', [level])
def setupSuitDNA(self, level, type, track):
dna = SuitDNA.SuitDNA()
dna.newSuitRandom(type, track)
self.dna = dna
self.track = track
self.setLevel(level)
return None
def getDNAString(self):
if self.dna:
return self.dna.makeNetString()
else:
self.notify.debug('No dna has been created for suit %d!' % self.getDoId())
return ''
def b_setBrushOff(self, index):
self.setBrushOff(index)
self.d_setBrushOff(index)
return None
def d_setBrushOff(self, index):
self.sendUpdate('setBrushOff', [index])
def setBrushOff(self, index):
pass
def d_denyBattle(self, toonId):
self.sendUpdateToAvatarId(toonId, 'denyBattle', [])
def b_setSkeleRevives(self, num):
if num == None:
num = 0
self.setSkeleRevives(num)
self.d_setSkeleRevives(self.getSkeleRevives())
return
def d_setSkeleRevives(self, num):
self.sendUpdate('setSkeleRevives', [num])
def getSkeleRevives(self):
return self.skeleRevives
def setSkeleRevives(self, num):
if num == None:
num = 0
self.skeleRevives = num
if num > self.maxSkeleRevives:
self.maxSkeleRevives = num
return
def getMaxSkeleRevives(self):
return self.maxSkeleRevives
def useSkeleRevive(self):
self.skeleRevives -= 1
self.currHP = self.maxHP
self.reviveFlag = 1
def reviveCheckAndClear(self):
returnValue = 0
if self.reviveFlag == 1:
returnValue = 1
self.reviveFlag = 0
return returnValue
def getHP(self):
return self.currHP
def setHP(self, hp):
if hp > self.maxHP:
self.currHP = self.maxHP
else:
self.currHP = hp
return None
def b_setHP(self, hp):
self.setHP(hp)
self.d_setHP(hp)
def d_setHP(self, hp):
self.sendUpdate('setHP', [hp])
def releaseControl(self):
return None
def getDeathEvent(self):
return 'cogDead-%s' % self.doId
def resume(self):
self.notify.debug('resume, hp=%s' % self.currHP)
if self.currHP <= 0:
messenger.send(self.getDeathEvent())
self.requestRemoval()
return None
def prepareToJoinBattle(self):
pass
def b_setSkelecog(self, flag):
self.setSkelecog(flag)
self.d_setSkelecog(flag)
def setSkelecog(self, flag):
SuitBase.SuitBase.setSkelecog(self, flag)
def d_setSkelecog(self, flag):
self.sendUpdate('setSkelecog', [flag])
def isForeman(self):
return 0
def isSupervisor(self):
return 0
def setVirtual(self, virtual):
pass
def getVirtual(self):
return 0
def isVirtual(self):
return self.getVirtual()
|
from steve.backend.sqlitedb import SDB
from steve.system import System
class Constellation(object):
def __init__(self, universe, data):
self.universe = universe
self.regionID = data[0]
self.uid = data[1]
self.name = data[2]
self.x = data[3]
self.y = data[4]
self.z = data[5]
self.xMin = data[6]
self.xMax = data[7]
self.yMin = data[8]
self.yMax = data[9]
self.zMin = data[10]
self.zMax = data[11]
self.factionID = data[12]
self.radius = data[13]
self._systems = {}
@property
def system(self):
if len(self._constellations) == 0:
query = 'SELECT * from mapSolarSystems WHERE constellationID = %' % self.uid
for entry in SDB.queryAll(query):
system = System(self.universe, entry)
self._systems[system.name] = system
self._systems[system.uid] = system
return self._systems
@property
def region(self):
return self.universe.regions[self.regionID]
|
#!/usr/bin/python
"""Convert certbot private_key.json to manuale's account.json
Source: https://gist.github.com/JonLundy/f25c99ee0770e19dc595
./jwk_convert.py private_key.json > private-key.asn1
openssl asn1parse -genconf private-key.asn1 -noout -out private-key.der
openssl rsa -inform DER -in private-key.der -outform PEM -out private-key.key
echo -n '{"key": "' > account.json
paste -s -d '|' private-key.key | sed -e 's/|/\\n/g' | tr -d '\n' >> account.json
echo '", "uri": "https://acme-v01.api.letsencrypt.org/acme/reg/9999999"}' >> account.json # From regr.json
"""
import sys
import json
import base64
import binascii
with open(sys.argv[1]) as fp:
PKEY = json.load(fp)
def enc(data):
missing_padding = 4 - len(data) % 4
if missing_padding:
data += b'=' * missing_padding
return '0x'+binascii.hexlify(base64.b64decode(data, b'-_')).upper()
for k, v in PKEY.items():
if k == 'kty':
continue
PKEY[k] = enc(v.encode())
print "asn1=SEQUENCE:private_key\n[private_key]\nversion=INTEGER:0"
print "n=INTEGER:{}".format(PKEY[u'n'])
print "e=INTEGER:{}".format(PKEY[u'e'])
print "d=INTEGER:{}".format(PKEY[u'd'])
print "p=INTEGER:{}".format(PKEY[u'p'])
print "q=INTEGER:{}".format(PKEY[u'q'])
print "dp=INTEGER:{}".format(PKEY[u'dp'])
print "dq=INTEGER:{}".format(PKEY[u'dq'])
print "qi=INTEGER:{}".format(PKEY[u'qi'])
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# ------------------------------------
# file: corpus.py
# date: Wed February 26 21:29 2014
# author:
# Maarten Versteegh
# github.com/mwv
# maartenversteegh AT gmail DOT com
#
# Licensed under GPLv3
# ------------------------------------
"""corpus: interface functions to the transcriptions of the buckeye corpus
"""
from __future__ import division
import os
import os.path as path
from re import compile, match
from collections import namedtuple
import json
import util
datadir = path.join(os.environ['HOME'],
'data', 'BUCKEYE')
phncorrecteddir = path.join(os.environ['HOME'], 'data', 'output',
'lrec_buckeye', 'phon_corrected', 'phn')
__phonesp = compile(r"^ *(?P<end>\d+(?:\.\d*)?|\.\d+) +\d+ +(?P<symbol>.+)$")
__wordsp = compile(r"^ *(?P<end>\d+(?:\.\d*)?|\.\d+)"
" +\d+ +(?P<symbol>.+?);.*$")
__triphonep = compile(r"^(?P<pre>.+?)-(?P<symbol>.+?)\+(?P<post>.+?)$")
Interval = namedtuple('Interval', ['start', 'end'])
Interval.__repr__ = lambda x: '[{0}, {1}]'.format(x.start, x.end)
FileSet = namedtuple('FileSet', ['phones', 'words', 'txt', 'wav'])
Fragment = namedtuple('Fragment', ['fname', 'phones', 'interval'])
with open('buckeye_foldings.json') as fid:
__fold = json.load(fid)
def parse_goldfile(f):
curr_fname1 = None
curr_fname2 = None
curr_fragments = []
for line in open(f):
if line.startswith('s'):
if curr_fname1 is None:
curr_fname1 = line.strip()
elif curr_fname2 is None:
curr_fname2 = line.strip()
else:
raise ValueError('attempting to read filename while filenames'
' have already been read.')
elif line.strip() == '':
for fragment in curr_fragments:
yield fragment
curr_fname1 = None
curr_fname2 = None
curr_fragments = []
else:
if curr_fname1 is None or curr_fname2 is None:
raise ValueError('attempting to read intervals while'
' filenames are None')
s = line.strip().split(' ')
phonseq = tuple(s[0].split('-'))
interval1 = Interval(float(s[1]), float(s[2]))
interval2 = Interval(float(s[3]), float(s[4]))
curr_fragments.append(Fragment(curr_fname1,
phonseq,
interval1))
curr_fragments.append(Fragment(curr_fname2,
phonseq,
interval2))
for fragment in curr_fragments:
yield fragment
def readmlf(fname):
"""Read triphone mlf"""
result = []
current_intervals = None
current_symbols = None
current_fname = None
current_contexts = None
in_file = False
for line in open(fname):
if line.startswith('"'):
current_fname = line.strip().split('/')[1].split('.')[0]
in_file = True
current_intervals = []
current_symbols = []
current_contexts = []
continue
elif line.startswith('<s>'):
# just ignore
continue
elif (line.startswith('</s>')
or line.startswith('#!MLF!#')):
# just ignore
continue
elif line.startswith('.'):
result.append((current_fname, current_symbols,
current_intervals, current_contexts))
current_fname = None
current_symbols = None
current_intervals = None
current_contexts = None
in_file = False
continue
elif not in_file:
raise ValueError('error parsing line: {0}'.format(line))
# now we are in_file and parsing interval lines
line = line.strip().split()
current_intervals.append(Interval(int(line[0]), int(line[1])))
m = match(__triphonep, line[2])
if m is None:
continue
symbol = m.group('symbol')
current_symbols.append(symbol)
pre = m.group('pre')
post = m.group('post')
current_contexts.append((pre, post))
return result
def extract_content(filename, filetype, foldphones=False):
"""For txt files, return a list of utterances.
For phones and words files, return a list of (symbol, Interval) pairs.
Arguments:
:param filename: filename
:param filetype: must be one of 'phones', 'words', 'txt'
"""
if filetype == 'txt':
s = []
for line in open(filename, 'r'):
s.append(line.strip().split(' '))
else:
s = []
start_prev = 0.0
for line in open(filename):
if filetype == 'phones':
line = line.strip().split()
if foldphones:
symbol = fold(line[2])
else:
symbol = line[2]
s.append((symbol, Interval(float(line[0]), float(line[1]))))
continue
elif filetype == 'words':
m = match(__wordsp, line)
else:
raise ValueError("filetype must be one of "
"'phones', 'words', 'txt'")
if m is None:
continue
end = float(m.group('end'))
symbol = m.group('symbol') \
.replace(';', '').replace('*', '').strip()
if symbol == '':
continue
if foldphones and filetype == 'phones':
symbol = fold(symbol)
s.append((symbol, Interval(start_prev, end)))
start_prev = end
return s
def get_filesets():
for wavfile in util.rglob(datadir, '*.wav'):
if path.basename(wavfile).startswith('._'):
continue
wordsfile = path.splitext(wavfile)[0] + '.words'
txtfile = path.splitext(wavfile)[0] + '.txt'
phonesfile = path.join(phncorrecteddir,
path.splitext(path.basename(wavfile))[0]
+ '.phn')
# phonesfile = path.splitext(wavfile)[0] + '.phones'
if not (path.exists(wordsfile) and
path.exists(txtfile) and
path.exists(phonesfile)):
continue
yield FileSet(wav=wavfile,
phones=phonesfile,
txt=txtfile,
words=wordsfile)
def fold(phone):
try:
return __fold[phone]
except KeyError:
return phone
def phongold():
for phnfile, _, _, _ in get_filesets():
bname = path.splitext(path.basename(phnfile))[0]
for idx, pair in enumerate(util.split(extract_content(phnfile,
'phones'),
lambda x: x[0] == '__')):
try:
phones, intervals = zip(*pair)
except ValueError as e:
print bname, pair
raise e
yield bname + '_{0}'.format(idx), phones, intervals
|
#!/usr/bin/env python
import sys
try:
import readline
except ImportError:
import pyreadline as readline
import os
import code
import rlcompleter
lib_path = os.path.abspath(os.path.join('..', 'src'))
sys.path.append(lib_path)
lib_path = os.path.abspath(os.path.join('..', '..', 'ARSDKBuildUtils', 'Utils', 'Python'))
sys.path.append(lib_path)
from Bybop_Discovery import *
import Bybop_Device
print('Searching for devices')
from zeroconf import ZeroconfServiceTypes
print('\n'.join(ZeroconfServiceTypes.find()))
print('done.')
discovery = Discovery([DeviceID.BEBOP_DRONE, DeviceID.JUMPING_SUMO, DeviceID.AIRBORNE_NIGHT, DeviceID.JUMPING_NIGHT])
discovery.wait_for_change()
devices = discovery.get_devices()
#discovery.stop()
if not devices:
print('Oops ...')
sys.exit(1)
device = devices.itervalues().next()
print('Will connect to ' + get_name(device))
d2c_port = 43210
controller_type = "PC"
controller_name = "bybop shell"
drone = Bybop_Device.create_and_connect(device, d2c_port, controller_type, controller_name)
if drone is None:
print('Unable to connect to a product')
sys.exit(1)
drone.dump_state()
vars = globals().copy()
vars.update(locals())
readline.set_completer(rlcompleter.Completer(vars).complete)
readline.parse_and_bind("tab: complete")
shell = code.InteractiveConsole(vars)
# drone.jump(0) # jump forward
# drone.jump(1) # jump up
# drone.move_forward(20) # move forwards
# drone.move_forward(-20) # move backwards
# drone.move(0,50) # turn right?
# drone.move(0,-50) # turn left?
# drone.spin() # spin around
# drone.simpleAnimation(0)
# drone.simpleAnimation(9)
# Currently known values:
# - 0 : stop
# - 1 : spin
# - 2 : tap
# - 3 : slowshake
# - 4 : metronome
# - 5 : ondulation
# - 6 : spinjump
# - 7 : spintoposture
# - 8 : spiral
# - 9 : slalom
# """
shell.interact()
drone.stop()
|
from functools import partial
from graphql.utilities import build_schema
from graphql.validation.rules.unique_directive_names import UniqueDirectiveNamesRule
from .harness import assert_sdl_validation_errors
assert_errors = partial(assert_sdl_validation_errors, UniqueDirectiveNamesRule)
assert_valid = partial(assert_errors, errors=[])
def describe_validate_unique_directive_names():
def no_directive():
assert_valid(
"""
type Foo
"""
)
def one_directive():
assert_valid(
"""
directive @foo on SCHEMA
"""
)
def many_directives():
assert_valid(
"""
directive @foo on SCHEMA
directive @bar on SCHEMA
directive @baz on SCHEMA
"""
)
def directive_and_non_directive_definitions_named_the_same():
assert_valid(
"""
query foo { __typename }
fragment foo on foo { __typename }
type foo
directive @foo on SCHEMA
"""
)
def directives_named_the_same():
assert_errors(
"""
directive @foo on SCHEMA
directive @foo on SCHEMA
""",
[
{
"message": "There can be only one directive named '@foo'.",
"locations": [(2, 24), (4, 24)],
}
],
)
def adding_new_directive_to_existing_schema():
schema = build_schema("directive @foo on SCHEMA")
assert_valid("directive @bar on SCHEMA", schema=schema)
def adding_new_directive_with_standard_name_to_existing_schema():
schema = build_schema("type foo")
assert_errors(
"directive @skip on SCHEMA",
[
{
"message": "Directive '@skip' already exists in the schema."
" It cannot be redefined.",
"locations": [(1, 12)],
}
],
schema,
)
def adding_new_directive_to_existing_schema_with_same_named_type():
schema = build_schema("type foo")
assert_valid("directive @foo on SCHEMA", schema=schema)
def adding_conflicting_directives_to_existing_schema():
schema = build_schema("directive @foo on SCHEMA")
assert_errors(
"directive @foo on SCHEMA",
[
{
"message": "Directive '@foo' already exists in the schema."
" It cannot be redefined.",
"locations": [(1, 12)],
}
],
schema,
)
|
# The wifi world stage
#
# Copyright (C) 2015 Kano Computing Ltd.
# License: http://www.gnu.org/licenses/gpl-2.0.txt GNU GPL v2
#
import threading
import subprocess
from gi.repository import Gtk, GLib
from kano_init_flow.stage import Stage
from kano_init_flow.ui.scene import Scene, Placement
from kano.logging import logger
from kano.network import is_internet
class KanoWorld(Stage):
"""
The overscan setting window
"""
id = 'kano-world'
_root = __file__
def __init__(self, ctl):
super(KanoWorld, self).__init__(ctl)
self._ctl = ctl
def first_scene(self):
if not is_internet():
self.next_stage()
return
s = self._setup_first_scene()
self._ctl.main_window.push(s)
def next_stage(self):
self._ctl.next_stage()
def _setup_first_scene(self):
self._is_on = False
self._scene = scene = Scene(self._ctl.main_window)
scene.set_background(
self.media_path('world-registration-scene-1600x1200.png'),
self.media_path('world-registration-scene-1920x1080.png')
)
scene.add_profile_icon()
scene.add_widget(
Gtk.Image.new_from_file(self.media_path('rocket-animation-file-medium.gif')),
Placement(0.05, 0.5),
Placement(0.05, 0.5)
)
scene.add_widget(
Gtk.Image.new_from_file(self.media_path('world-widget-animation.gif')),
Placement(0.95, 0.05, 0),
Placement(0.95, 0.05, 0)
)
# Launch the settings on top
thread = threading.Thread(target=self.launch_registration)
thread.daemon = True
thread.start()
return scene
def launch_registration(self):
try:
p = subprocess.Popen(['/usr/bin/kano-login', '-r'])
p.wait()
except Exception:
logger.debug("kano-login failed to launch")
GLib.idle_add(self.next_stage)
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from oslo.config import cfg
database_opts = [
cfg.StrOpt('sqlite_db',
deprecated_group='DEFAULT',
default='rally.sqlite',
help='The file name to use with SQLite'),
cfg.BoolOpt('sqlite_synchronous',
deprecated_group='DEFAULT',
default=True,
help='If True, SQLite uses synchronous mode'),
cfg.StrOpt('backend',
default='sqlalchemy',
deprecated_name='db_backend',
deprecated_group='DEFAULT',
help='The backend to use for db'),
cfg.StrOpt('connection',
help='The SQLAlchemy connection string used to connect to the '
'database',
secret=True,
deprecated_opts=[cfg.DeprecatedOpt('sql_connection',
group='DEFAULT'),
cfg.DeprecatedOpt('sql_connection',
group='DATABASE'),
cfg.DeprecatedOpt('connection',
group='sql'), ]),
cfg.StrOpt('mysql_sql_mode',
default='TRADITIONAL',
help='The SQL mode to be used for MySQL sessions. '
'This option, including the default, overrides any '
'server-set SQL mode. To use whatever SQL mode '
'is set by the server configuration, '
'set this to no value. Example: mysql_sql_mode='),
cfg.IntOpt('idle_timeout',
default=3600,
deprecated_opts=[cfg.DeprecatedOpt('sql_idle_timeout',
group='DEFAULT'),
cfg.DeprecatedOpt('sql_idle_timeout',
group='DATABASE'),
cfg.DeprecatedOpt('idle_timeout',
group='sql')],
help='Timeout before idle sql connections are reaped'),
cfg.IntOpt('min_pool_size',
default=1,
deprecated_opts=[cfg.DeprecatedOpt('sql_min_pool_size',
group='DEFAULT'),
cfg.DeprecatedOpt('sql_min_pool_size',
group='DATABASE')],
help='Minimum number of SQL connections to keep open in a '
'pool'),
cfg.IntOpt('max_pool_size',
default=None,
deprecated_opts=[cfg.DeprecatedOpt('sql_max_pool_size',
group='DEFAULT'),
cfg.DeprecatedOpt('sql_max_pool_size',
group='DATABASE')],
help='Maximum number of SQL connections to keep open in a '
'pool'),
cfg.IntOpt('max_retries',
default=10,
deprecated_opts=[cfg.DeprecatedOpt('sql_max_retries',
group='DEFAULT'),
cfg.DeprecatedOpt('sql_max_retries',
group='DATABASE')],
help='Maximum db connection retries during startup. '
'(setting -1 implies an infinite retry count)'),
cfg.IntOpt('retry_interval',
default=10,
deprecated_opts=[cfg.DeprecatedOpt('sql_retry_interval',
group='DEFAULT'),
cfg.DeprecatedOpt('reconnect_interval',
group='DATABASE')],
help='Interval between retries of opening a sql connection'),
cfg.IntOpt('max_overflow',
default=None,
deprecated_opts=[cfg.DeprecatedOpt('sql_max_overflow',
group='DEFAULT'),
cfg.DeprecatedOpt('sqlalchemy_max_overflow',
group='DATABASE')],
help='If set, use this value for max_overflow with sqlalchemy'),
cfg.IntOpt('connection_debug',
default=0,
deprecated_opts=[cfg.DeprecatedOpt('sql_connection_debug',
group='DEFAULT')],
help='Verbosity of SQL debugging information. 0=None, '
'100=Everything'),
cfg.BoolOpt('connection_trace',
default=False,
deprecated_opts=[cfg.DeprecatedOpt('sql_connection_trace',
group='DEFAULT')],
help='Add python stack traces to SQL as comment strings'),
cfg.IntOpt('pool_timeout',
default=None,
deprecated_opts=[cfg.DeprecatedOpt('sqlalchemy_pool_timeout',
group='DATABASE')],
help='If set, use this value for pool_timeout with sqlalchemy'),
cfg.BoolOpt('use_db_reconnect',
default=False,
help='Enable the experimental use of database reconnect '
'on connection lost'),
cfg.IntOpt('db_retry_interval',
default=1,
help='seconds between db connection retries'),
cfg.BoolOpt('db_inc_retry_interval',
default=True,
help='Whether to increase interval between db connection '
'retries, up to db_max_retry_interval'),
cfg.IntOpt('db_max_retry_interval',
default=10,
help='max seconds between db connection retries, if '
'db_inc_retry_interval is enabled'),
cfg.IntOpt('db_max_retries',
default=20,
help='maximum db connection retries before error is raised. '
'(setting -1 implies an infinite retry count)'),
]
CONF = cfg.CONF
CONF.register_opts(database_opts, 'database')
def set_defaults(sql_connection, sqlite_db, max_pool_size=None,
max_overflow=None, pool_timeout=None):
"""Set defaults for configuration variables."""
cfg.set_defaults(database_opts,
connection=sql_connection,
sqlite_db=sqlite_db)
# Update the QueuePool defaults
if max_pool_size is not None:
cfg.set_defaults(database_opts,
max_pool_size=max_pool_size)
if max_overflow is not None:
cfg.set_defaults(database_opts,
max_overflow=max_overflow)
if pool_timeout is not None:
cfg.set_defaults(database_opts,
pool_timeout=pool_timeout)
def list_opts():
"""Returns a list of oslo.config options available in the library.
The returned list includes all oslo.config options which may be registered
at runtime by the library.
Each element of the list is a tuple. The first element is the name of the
group under which the list of elements in the second element will be
registered. A group name of None corresponds to the [DEFAULT] group in
config files.
The purpose of this is to allow tools like the Oslo sample config file
generator to discover the options exposed to users by this library.
:returns: a list of (group_name, opts) tuples
"""
return [('database', copy.deepcopy(database_opts))]
|
#! /usr/bin/env python3.4
# -*- coding:utf-8 -*-
#
# Krypton - A little tool for GAMESS (US) users
#
# Copyright (C) 2012-20.. Mathias M.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from os import listdir, path, makedirs
from lib.config import *
from lib.parser import extract_basis_set, extract_ECPs
################################################################################
def add_basis_set(bsid, basis_set_file):
"""
Add the basis set of a basis set file from the BSE portal into the folder
used as database.
bsid : ID to use for the basis set (STO-3G, 6-31G, etc.)
basis_set_file : GAMESS (US) input file from the BSE portal
"""
basis_set = extract_basis_set(basis_set_file)
ECPs = extract_ECPs(basis_set_file)
elements = list()
if bsid in listdir(DB):
elements = get_elements(bsid)
else:
makedirs(DB+"/"+bsid)
for element, coeffs in basis_set.items():
if element not in elements:
with open(DB+"/"+bsid+"/"+element+".txt", "w") as f:
for coeff in coeffs:
f.write(coeff+"\n")
if ECPs:
if "ECP" not in listdir(DB+"/"+bsid):
makedirs(DB+"/"+bsid+"/ECP")
elements = get_elements(bsid, True)
for element, coeffs in ECPs.items():
if element not in elements:
with open(DB+"/"+bsid+"/ECP/"+element+".txt", "w") as f:
for coeff in coeffs:
f.write(coeff+"\n")
################################################################################
def load_basis_set(bsid):
"""
Extract the basis set from the database.
bsid : ID of the basis set
return : dictionary = list of strings for each atom
example: {'H':['S 3','1 3.425 0.154','2 0.623 0.535'], 'C': ...}
"""
basis_set = dict()
if not path.isdir(DB):
raise Exception("ERROR: There is no database.")
if bsid not in listdir(DB):
raise Exception("ERROR: Basis set "+bsid+" does not exist.")
for element_file in listdir(DB+"/"+bsid):
if element_file != "ECP":
element = element_file.split(".")[0]
with open(DB+"/"+bsid+"/"+element_file) as f:
basis_set[element] = []
for line in f:
basis_set[element].append(line.rstrip())
return basis_set
################################################################################
def get_elements(bsid, ECP=False):
"""
Return the elements available in the database for the basis set bsid.
bsid : ID of the basis set
return : list of elements
"""
elements = list()
if bsid not in listdir(DB):
raise Exception("ERROR: Basis set "+bsid+" does not exist.")
path = DB+"/"+bsid
if ECP:
path += "/ECP"
for element in listdir(path):
if element.endswith(".txt"):
elements.append(element.split(".")[0])
return elements
################################################################################
def list_basis_sets():
"""
Print the available basis sets in the database and their atoms.
"""
if not path.isdir(DB):
raise Exception("ERROR: There is no database.")
for bsid in listdir(DB):
line = bsid + " : "
for elements in get_elements(bsid):
line += elements
line += " "
if "ECP" in listdir(DB+"/"+bsid):
line += "(ECP :"
ECPs = get_elements(bsid, True)
for ECP in ECPs:
line += " "
line += ECP
line += ")"
print(line)
|
"""
<Program>
namespace.py
<Started>
September 2009
<Author>
Justin Samuel
<Purpose>
This is the namespace layer that ensures separation of the namespaces of
untrusted code and our code. It provides a single public function to be
used to setup the context in which untrusted code is exec'd (that is, the
context that is seen as the __builtins__ by the untrusted code).
The general idea is that any function or object that is available between
trusted and untrusted code gets wrapped in a function or object that does
validation when the function or object is used. In general, if user code
is not calling any functions improperly, neither the user code nor our
trusted code should ever notice that the objects and functions they are
dealing with have been wrapped by this namespace layer.
All of our own api functions are wrapped in NamespaceAPIFunctionWrapper
objects whose wrapped_function() method is mapped in to the untrusted
code's context. When called, the wrapped_function() method performs
argument, return value, and exception validation as well as additional
wrapping and unwrapping, as needed, that is specific to the function
that was ultimately being called. If the return value or raised exceptions
are not considered acceptable, a NamespaceViolationError is raised. If the
arguments are not acceptable, a TypeError is raised.
Note that callback functions that are passed from untrusted user code
to trusted code are also wrapped (these are arguments to wrapped API
functions, so we get to wrap them before calling the underlying function).
The reason we wrap these is so that we can intercept calls to the callback
functions and wrap arguments passed to them, making sure that handles
passed as arguments to the callbacks get wrapped before user code sees them.
The function and object wrappers have been defined based on the API as
documented at https://seattle.cs.washington.edu/wiki/RepyLibrary
Example of using this module (this is really the only way to use the module):
import namespace
usercontext = {}
namespace.wrap_and_insert_api_functions(usercontext)
safe.safe_exec(usercode, usercontext)
The above code will result in the dict usercontext being populated with keys
that are the names of the functions available to the untrusted code (such as
'open') and the values are the wrapped versions of the actual functions to be
called (such as 'emulfile.emulated_open').
Note that some functions wrapped by this module lose some python argument
flexibility. Wrapped functions can generally only have keyword args in
situations where the arguments are optional. Using keyword arguments for
required args may not be supported, depending on the implementation of the
specific argument check/wrapping/unwrapping helper functions for that
particular wrapped function. If this becomes a problem, it can be dealt with
by complicating some of the argument checking/wrapping/unwrapping code in
this module to make the checking functions more flexible in how they take
their arguments.
Implementation details:
The majority of the code in this module is made up of helper functions to do
argument checking, etc. for specific wrapped functions.
The most important parts to look at in this module for maintenance and
auditing are the following:
USERCONTEXT_WRAPPER_INFO
The USERCONTEXT_WRAPPER_INFO is a dictionary that defines the API
functions that are wrapped and inserted into the user context when
wrap_and_insert_api_functions() is called.
FILE_OBJECT_WRAPPER_INFO
LOCK_OBJECT_WRAPPER_INFO
TCP_SOCKET_OBJECT_WRAPPER_INFO
TCP_SERVER_SOCKET_OBJECT_WRAPPER_INFO
UDP_SERVER_SOCKET_OBJECT_WRAPPER_INFO
VIRTUAL_NAMESPACE_OBJECT_WRAPPER_INFO
The above four dictionaries define the methods available on the wrapped
objects that are returned by wrapped functions. Additionally, timerhandle
and commhandle objects are wrapped but instances of these do not have any
public methods and so no *_WRAPPER_INFO dictionaries are defined for them.
NamespaceObjectWrapper
NamespaceAPIFunctionWrapper
The above two classes are the only two types of objects that will be
allowed in untrusted code. In fact, instances of NamespaceAPIFunctionWrapper
are never actually allowed in untrusted code. Rather, each function that
is wrapped has a single NamespaceAPIFunctionWrapper instance created
when wrap_and_insert_api_functions() is called and what is actually made
available to the untrusted code is the wrapped_function() method of each
of the corresponding NamespaceAPIFunctionWrapper instances.
NamespaceInternalError
If this error is raised anywhere (along with any other unexpected exceptions),
it should result in termination of the running program (see the except blocks
in NamespaceAPIFunctionWrapper.wrapped_function).
"""
import types
# To check if objects are thread.LockType objects.
import thread
import emulcomm
import emulfile
import emulmisc
import emultimer
import nonportable
import safe # Used to get SafeDict
import tracebackrepy
import virtual_namespace
from exception_hierarchy import *
# Save a copy of a few functions not available at runtime.
_saved_getattr = getattr
_saved_callable = callable
_saved_hash = hash
_saved_id = id
##############################################################################
# Public functions of this module to be called from the outside.
##############################################################################
def wrap_and_insert_api_functions(usercontext):
"""
This is the main public function in this module at the current time. It will
wrap each function in the usercontext dict in a wrapper with custom
restrictions for that specific function. These custom restrictions are
defined in the dictionary USERCONTEXT_WRAPPER_INFO.
"""
_init_namespace()
for function_name in USERCONTEXT_WRAPPER_INFO:
function_info = USERCONTEXT_WRAPPER_INFO[function_name]
wrapperobj = NamespaceAPIFunctionWrapper(function_info)
usercontext[function_name] = wrapperobj.wrapped_function
##############################################################################
# Helper functions for the above public function.
##############################################################################
# Whether _init_namespace() has already been called.
initialized = False
def _init_namespace():
"""
Performs one-time initialization of the namespace module.
"""
global initialized
if not initialized:
initialized = True
_prepare_wrapped_functions_for_object_wrappers()
# These dictionaries will ultimately contain keys whose names are allowed
# methods that can be called on the objects and values which are the wrapped
# versions of the functions which are exposed to users. If a dictionary
# is empty, it means no methods can be called on a wrapped object of that type.
file_object_wrapped_functions_dict = {}
lock_object_wrapped_functions_dict = {}
tcp_socket_object_wrapped_functions_dict = {}
tcp_server_socket_object_wrapped_functions_dict = {}
udp_server_socket_object_wrapped_functions_dict = {}
virtual_namespace_object_wrapped_functions_dict = {}
def _prepare_wrapped_functions_for_object_wrappers():
"""
Wraps functions that will be used whenever a wrapped object is created.
After this has been called, the dictionaries such as
file_object_wrapped_functions_dict have been populated and therefore can be
used by functions such as wrap_socket_obj().
"""
objects_tuples = [(FILE_OBJECT_WRAPPER_INFO, file_object_wrapped_functions_dict),
(LOCK_OBJECT_WRAPPER_INFO, lock_object_wrapped_functions_dict),
(TCP_SOCKET_OBJECT_WRAPPER_INFO, tcp_socket_object_wrapped_functions_dict),
(TCP_SERVER_SOCKET_OBJECT_WRAPPER_INFO, tcp_server_socket_object_wrapped_functions_dict),
(UDP_SERVER_SOCKET_OBJECT_WRAPPER_INFO, udp_server_socket_object_wrapped_functions_dict),
(VIRTUAL_NAMESPACE_OBJECT_WRAPPER_INFO, virtual_namespace_object_wrapped_functions_dict)]
for description_dict, wrapped_func_dict in objects_tuples:
for function_name in description_dict:
function_info = description_dict[function_name]
wrapperobj = NamespaceAPIFunctionWrapper(function_info, is_method=True)
wrapped_func_dict[function_name] = wrapperobj.wrapped_function
##############################################################################
# Helper functions.
##############################################################################
def _handle_internalerror(message, exitcode):
"""
Terminate the running program. This is used rather than
tracebackrepy.handle_internalerror directly in order to make testing easier."""
tracebackrepy.handle_internalerror(message, exitcode)
def _is_in(obj, sequence):
"""
A helper function to do identity ("is") checks instead of equality ("==")
when using X in [A, B, C] type constructs. So you would write:
if _is_in(type(foo), [int, long]):
instead of:
if type(foo) in [int, long]:
"""
for item in sequence:
if obj is item:
return True
return False
##############################################################################
# Constants that define which functions should be wrapped and how. These are
# used by the functions wrap_and_insert_api_functions() and
# wrap_builtin_functions().
##############################################################################
class BaseProcessor(object):
"""Base type for ValueProcess and ObjectProcessor."""
class ValueProcessor(BaseProcessor):
"""
This is for simple/builtin types and combinations of them. Basically,
anything that needs to be copied when used as an argument or return
value and doesn't need to be wrapped or unwrapped as it passes through
the namespace layer.
"""
def check(self):
raise NotImplementedError
def copy(self, val):
return _copy(val)
class ObjectProcessor(BaseProcessor):
"""
This is for for anything that needs to be wrapped or unwrapped (not copied)
as it passes through the namespace layer.
"""
def check(self):
raise NotImplementedError
def wrap(self, val):
raise NotImplementedError
def unwrap(self, val):
return val._wrapped__object
class Str(ValueProcessor):
"""Allows str or unicode."""
def __init__(self, maxlen=None, minlen=None):
self.maxlen = maxlen
self.minlen = minlen
def check(self, val):
if not _is_in(type(val), [str, unicode]):
raise RepyArgumentError("Invalid type %s" % type(val))
if self.maxlen is not None:
if len(val) > self.maxlen:
raise RepyArgumentError("Max string length is %s" % self.maxlen)
if self.minlen is not None:
if len(val) < self.minlen:
raise RepyArgumentError("Min string length is %s" % self.minlen)
class Int(ValueProcessor):
"""Allows int or long."""
def __init__(self, min=0):
self.min = min
def check(self, val):
if not _is_in(type(val), [int, long]):
raise RepyArgumentError("Invalid type %s" % type(val))
if val < self.min:
raise RepyArgumentError("Min value is %s." % self.min)
class NoneOrInt(ValueProcessor):
"""Allows a NoneType or an int. This doesn't enforce min limit on the
ints."""
def check(self, val):
if val is not None and not _is_in(type(val), [int, long]):
raise RepyArgumentError("Invalid type %s" % type(val))
class StrOrInt(ValueProcessor):
"""Allows a string or int. This doesn't enforce max/min/length limits on the
strings and ints."""
def check(self, val):
if not _is_in(type(val), [int, long, str, unicode]):
raise RepyArgumentError("Invalid type %s" % type(val))
class StrOrNone(ValueProcessor):
"""Allows str, unicode, or None."""
def check(self, val):
if val is not None:
Str().check(val)
class Float(ValueProcessor):
"""Allows float, int, or long."""
def __init__(self, allow_neg=False):
self.allow_neg = allow_neg
def check(self, val):
if not _is_in(type(val), [int, long, float]):
raise RepyArgumentError("Invalid type %s" % type(val))
if not self.allow_neg:
if val < 0:
raise RepyArgumentError("Must be non-negative.")
class Bool(ValueProcessor):
"""Allows bool."""
def check(self, val):
if type(val) is not bool:
raise RepyArgumentError("Invalid type %s" % type(val))
class ListOfStr(ValueProcessor):
"""Allows lists of strings. This doesn't enforce max/min/length limits on the
strings and ints."""
def check(self, val):
if not type(val) is list:
raise RepyArgumentError("Invalid type %s" % type(val))
for item in val:
Str().check(item)
class List(ValueProcessor):
"""Allows lists. The list may contain anything."""
def check(self, val):
if not type(val) is list:
raise RepyArgumentError("Invalid type %s" % type(val))
class Dict(ValueProcessor):
"""Allows dictionaries. The dictionaries may contain anything."""
def check(self, val):
if not type(val) is dict:
raise RepyArgumentError("Invalid type %s" % type(val))
class DictOfStrOrInt(ValueProcessor):
"""
Allows a tuple that contains dictionaries that only contain string keys
and str or int values. This doesn't enforce max/min/length limits on the
strings and ints.
"""
def check(self, val):
if not type(val) is dict:
raise RepyArgumentError("Invalid type %s" % type(val))
for key, value in val.items():
Str().check(key)
StrOrInt().check(value)
class Func(ValueProcessor):
"""Allows a user-defined function object."""
def check(self, val):
if not _is_in(type(val), [types.FunctionType, types.LambdaType, types.MethodType]):
raise RepyArgumentError("Invalid type %s" % type(val))
class NonCopiedVarArgs(ValueProcessor):
"""Allows any number of arguments. This must be the last arg listed. """
def check(self, val):
pass
def copy(self, val):
return val
class File(ObjectProcessor):
"""Allows File objects."""
def check(self, val):
if not isinstance(val, emulfile.emulated_file):
raise RepyArgumentError("Invalid type %s" % type(val))
def wrap(self, val):
return NamespaceObjectWrapper("file", val, file_object_wrapped_functions_dict)
class Lock(ObjectProcessor):
"""Allows Lock objects."""
def check(self, val):
if not isinstance(val, emulmisc.emulated_lock):
raise RepyArgumentError("Invalid type %s" % type(val))
def wrap(self, val):
return NamespaceObjectWrapper("lock", val, lock_object_wrapped_functions_dict)
class UDPServerSocket(ObjectProcessor):
"""Allows UDPServerSocket objects."""
def check(self, val):
if not isinstance(val, emulcomm.UDPServerSocket):
raise RepyArgumentError("Invalid type %s" % type(val))
def wrap(self, val):
return NamespaceObjectWrapper("socket", val, udp_server_socket_object_wrapped_functions_dict)
class TCPServerSocket(ObjectProcessor):
"""Allows TCPServerSocket objects."""
def check(self, val):
if not isinstance(val, emulcomm.TCPServerSocket):
raise RepyArgumentError("Invalid type %s" % type(val))
def wrap(self, val):
return NamespaceObjectWrapper("socket", val, tcp_server_socket_object_wrapped_functions_dict)
class TCPSocket(ObjectProcessor):
"""Allows TCPSocket objects."""
def check(self, val):
if not isinstance(val, emulcomm.EmulatedSocket):
raise RepyArgumentError("Invalid type %s" % type(val))
def wrap(self, val):
return NamespaceObjectWrapper("socket", val, tcp_socket_object_wrapped_functions_dict)
class VirtualNamespace(ObjectProcessor):
"""Allows VirtualNamespace objects."""
def check(self, val):
if not isinstance(val, virtual_namespace.VirtualNamespace):
raise RepyArgumentError("Invalid type %s" % type(val))
def wrap(self, val):
return NamespaceObjectWrapper("VirtualNamespace", val,
virtual_namespace_object_wrapped_functions_dict)
class SafeDict(ValueProcessor):
"""Allows SafeDict objects."""
# TODO: provide a copy function that won't actually copy so that
# references are maintained.
def check(self, val):
if not isinstance(val, safe.SafeDict):
raise RepyArgumentError("Invalid type %s" % type(val))
class DictOrSafeDict(ValueProcessor):
"""Allows SafeDict objects or regular dict objects."""
# TODO: provide a copy function that won't actually copy so that
# references are maintained.
def check(self, val):
if type(val) is not dict:
SafeDict().check(val)
# These are the functions in the user's name space excluding the builtins we
# allow. Each function is a key in the dictionary. Each value is a dictionary
# that defines the functions to be used by the wrapper when a call is
# performed. It is the same dictionary that is passed as a constructor to
# the NamespaceAPIFunctionWrapper class to create the actual wrappers.
# The public function wrap_and_insert_api_functions() uses this dictionary as
# the basis for what is populated in the user context. Anything function
# defined here will be wrapped and made available to untrusted user code.
USERCONTEXT_WRAPPER_INFO = {
'gethostbyname' :
{'func' : emulcomm.gethostbyname,
'args' : [Str()],
'return' : Str()},
'getmyip' :
{'func' : emulcomm.getmyip,
'args' : [],
'return' : Str()},
'sendmessage' :
{'func' : emulcomm.sendmessage,
'args' : [Str(), Int(), Str(), Str(), Int()],
'return' : Int()},
'listenformessage' :
{'func' : emulcomm.listenformessage,
'args' : [Str(), Int()],
'return' : UDPServerSocket()},
'openconnection' :
{'func' : emulcomm.openconnection,
'args' : [Str(), Int(), Str(), Int(), Float()],
# 'raise' : [AddressBindingError, PortRestrictedError, PortInUseError,
# ConnectionRefusedError, TimeoutError, RepyArgumentError],
'return' : TCPSocket()},
'listenforconnection' :
{'func' : emulcomm.listenforconnection,
'args' : [Str(), Int()],
'return' : TCPServerSocket()},
'openfile' :
{'func' : emulfile.emulated_open,
'args' : [Str(maxlen=120), Bool()],
'return' : File()},
'listfiles' :
{'func' : emulfile.listfiles,
'args' : [],
'return' : ListOfStr()},
'removefile' :
{'func' : emulfile.removefile,
'args' : [Str(maxlen=120)],
'return' : None},
'exitall' :
{'func' : emulmisc.exitall,
'args' : [],
'return' : None},
'createlock' :
{'func' : emulmisc.createlock,
'args' : [],
'return' : Lock()},
'getruntime' :
{'func' : emulmisc.getruntime,
'args' : [],
'return' : Float()},
'randombytes' :
{'func' : emulmisc.randombytes,
'args' : [],
'return' : Str(maxlen=1024, minlen=1024)},
'createthread' :
{'func' : emultimer.createthread,
'args' : [Func()],
'return' : None},
'sleep' :
{'func' : emultimer.sleep,
'args' : [Float()],
'return' : None},
'log' :
{'func' : emulmisc.log,
'args' : [NonCopiedVarArgs()],
'return' : None},
'getthreadname' :
{'func' : emulmisc.getthreadname,
'args' : [],
'return' : Str()},
'createvirtualnamespace' :
{'func' : virtual_namespace.createvirtualnamespace,
'args' : [Str(), Str()],
'return' : VirtualNamespace()},
'getresources' :
{'func' : nonportable.get_resources,
'args' : [],
'return' : (Dict(), Dict(), List())},
'getlasterror' :
{'func' : emulmisc.getlasterror,
'args' : [],
'return' : StrOrNone()},
}
FILE_OBJECT_WRAPPER_INFO = {
'close' :
{'func' : emulfile.emulated_file.close,
'args' : [],
'return' : None},
'readat' :
{'func' : emulfile.emulated_file.readat,
'args' : [NoneOrInt(), Int(min=0)],
'return' : Str()},
'writeat' :
{'func' : emulfile.emulated_file.writeat,
'args' : [Str(), Int(min=0)],
'return' : None},
}
TCP_SOCKET_OBJECT_WRAPPER_INFO = {
'close' :
{'func' : emulcomm.EmulatedSocket.close,
'args' : [],
'return' : Bool()},
'recv' :
{'func' : emulcomm.EmulatedSocket.recv,
'args' : [Int(min=1)],
'return' : Str()},
'send' :
{'func' : emulcomm.EmulatedSocket.send,
'args' : [Str()],
'return' : Int(min=0)},
}
# TODO: Figure out which real object should be wrapped. It doesn't appear
# to be implemented yet as there is no "getconnection" in the repy_v2 source.
TCP_SERVER_SOCKET_OBJECT_WRAPPER_INFO = {
'close' :
{'func' : emulcomm.TCPServerSocket.close,
'args' : [],
'return' : Bool()},
'getconnection' :
{'func' : emulcomm.TCPServerSocket.getconnection,
'args' : [],
'return' : (Str(), Int(), TCPSocket())},
}
UDP_SERVER_SOCKET_OBJECT_WRAPPER_INFO = {
'close' :
{'func' : emulcomm.UDPServerSocket.close,
'args' : [],
'return' : Bool()},
'getmessage' :
{'func' : emulcomm.UDPServerSocket.getmessage,
'args' : [],
'return' : (Str(), Int(), Str())},
}
LOCK_OBJECT_WRAPPER_INFO = {
'acquire' :
# A string for the target_func indicates a function by this name on the
# instance rather is what should be wrapped.
{'func' : 'acquire',
'args' : [Bool()],
'return' : Bool()},
'release' :
# A string for the target_func indicates a function by this name on the
# instance rather is what should be wrapped.
{'func' : 'release',
'args' : [],
'return' : None},
}
VIRTUAL_NAMESPACE_OBJECT_WRAPPER_INFO = {
# Evaluate must take a dict or SafeDict, and can
# only return a SafeDict. We must _not_ copy the
# dict since that will screw up the references in the dict.
'evaluate' :
{'func' : 'evaluate',
'args' : [DictOrSafeDict()],
'return' : SafeDict()},
}
##############################################################################
# The classes we define from which actual wrappers are instantiated.
##############################################################################
def _copy(obj, objectmap=None):
"""
<Purpose>
Create a deep copy of an object without using the python 'copy' module.
Using copy.deepcopy() doesn't work because builtins like id and hasattr
aren't available when this is called.
<Arguments>
obj
The object to make a deep copy of.
objectmap
A mapping between original objects and the corresponding copy. This is
used to handle circular references.
<Exceptions>
TypeError
If an object is encountered that we don't know how to make a copy of.
NamespaceViolationError
If an unexpected error occurs while copying. This isn't the greatest
solution, but in general the idea is we just need to abort the wrapped
function call.
<Side Effects>
A new reference is created to every non-simple type of object. That is,
everything except objects of type str, unicode, int, etc.
<Returns>
The deep copy of obj with circular/recursive references preserved.
"""
try:
# If this is a top-level call to _copy, create a new objectmap for use
# by recursive calls to _copy.
if objectmap is None:
objectmap = {}
# If this is a circular reference, use the copy we already made.
elif _saved_id(obj) in objectmap:
return objectmap[_saved_id(obj)]
# types.InstanceType is included because the user can provide an instance
# of a class of their own in the list of callback args to settimer.
if _is_in(type(obj), [str, unicode, int, long, float, complex, bool, frozenset,
types.NoneType, types.FunctionType, types.LambdaType,
types.MethodType, types.InstanceType]):
return obj
elif type(obj) is list:
temp_list = []
# Need to save this in the objectmap before recursing because lists
# might have circular references.
objectmap[_saved_id(obj)] = temp_list
for item in obj:
temp_list.append(_copy(item, objectmap))
return temp_list
elif type(obj) is tuple:
temp_list = []
for item in obj:
temp_list.append(_copy(item, objectmap))
# I'm not 100% confident on my reasoning here, so feel free to point
# out where I'm wrong: There's no way for a tuple to directly contain
# a circular reference to itself. Instead, it has to contain, for
# example, a dict which has the same tuple as a value. In that
# situation, we can avoid infinite recursion and properly maintain
# circular references in our copies by checking the objectmap right
# after we do the copy of each item in the tuple. The existence of the
# dictionary would keep the recursion from being infinite because those
# are properly handled. That just leaves making sure we end up with
# only one copy of the tuple. We do that here by checking to see if we
# just made a copy as a result of copying the items above. If so, we
# return the one that's already been made.
if _saved_id(obj) in objectmap:
return objectmap[_saved_id(obj)]
retval = tuple(temp_list)
objectmap[_saved_id(obj)] = retval
return retval
elif type(obj) is set:
temp_list = []
# We can't just store this list object in the objectmap because it isn't
# a set yet. If it's possible to have a set contain a reference to
# itself, this could result in infinite recursion. However, sets can
# only contain hashable items so I believe this can't happen.
for item in obj:
temp_list.append(_copy(item, objectmap))
retval = set(temp_list)
objectmap[_saved_id(obj)] = retval
return retval
elif type(obj) is dict:
temp_dict = {}
# Need to save this in the objectmap before recursing because dicts
# might have circular references.
objectmap[_saved_id(obj)] = temp_dict
for key, value in obj.items():
temp_key = _copy(key, objectmap)
temp_dict[temp_key] = _copy(value, objectmap)
return temp_dict
# We don't copy certain objects. This is because copying an emulated file
# object, for example, will cause the destructor of the original one to
# be invoked, which will close the actual underlying file. As the object
# is wrapped and the client does not have access to it, it's safe to not
# wrap it.
elif isinstance(obj, (NamespaceObjectWrapper, emulfile.emulated_file,
emulcomm.EmulatedSocket, emulcomm.TCPServerSocket,
emulcomm.UDPServerSocket, thread.LockType,
virtual_namespace.VirtualNamespace)):
return obj
else:
raise TypeError("_copy is not implemented for objects of type " + str(type(obj)))
except Exception, e:
raise NamespaceInternalError("_copy failed on " + str(obj) + " with message " + str(e))
class NamespaceInternalError(Exception):
"""Something went wrong and we should terminate."""
class NamespaceObjectWrapper(object):
"""
Instances of this class are used to wrap handles and objects returned by
api functions to the user code.
The methods that can be called on these instances are mostly limited to
what is in the allowed_functions_dict passed to the constructor. The
exception is that a simple __repr__() is defined as well as an __iter__()
and next(). However, instances won't really be iterable unless a next()
method is defined in the allowed_functions_dict.
"""
def __init__(self, wrapped_type_name, wrapped_object, allowed_functions_dict):
"""
<Purpose>
Constructor
<Arguments>
self
wrapped_type_name
The name (a string) of what type of wrapped object. For example,
this could be "timerhandle".
wrapped_object
The actual object to be wrapped.
allowed_functions_dict
A dictionary of the allowed methods that can be called on the object.
The keys should be the names of the methods, the values are the
wrapped functions that will be called.
"""
# Only one underscore at the front so python doesn't do its own mangling
# of the name. We're not trying to keep this private in the private class
# variable sense of python where nothing is really private, instead we just
# want a double-underscore in there as extra protection against untrusted
# code being able to access the values.
self._wrapped__type_name = wrapped_type_name
self._wrapped__object = wrapped_object
self._wrapped__allowed_functions_dict = allowed_functions_dict
def __getattr__(self, name):
"""
When a method is called on an instance, we look for the method in the
allowed_functions_dict that was provided to the constructor. If there
is such a method in there, we return a function that will properly
invoke the method with the correct 'self' as the first argument.
"""
if name in self._wrapped__allowed_functions_dict:
wrapped_func = self._wrapped__allowed_functions_dict[name]
def __do_func_call(*args, **kwargs):
return wrapped_func(self._wrapped__object, *args, **kwargs)
return __do_func_call
else:
# This is the standard way of handling "it doesn't exist as far as we
# are concerned" in __getattr__() methods.
raise AttributeError, name
def __iter__(self):
"""
We provide __iter__() as part of the class rather than through __getattr__
because python won't look for the attribute in the object to determine if
the object is iterable, instead it will look directly at the class the
object is an instance of. See the docstring for next() for more info.
"""
return self
def next(self):
"""
We provide next() as part of the class rather than through __getattr__
because python won't look for the attribute in the object to determine if
the object is iterable, instead it will look directly at the class the
object is an instance of. We don't want everything that is wrapped to
be considered iterable, though, so we return a TypeError if this gets
called but there isn't a wrapped next() method.
"""
if "next" in self._wrapped__allowed_functions_dict:
return self._wrapped__allowed_functions_dict["next"](self._wrapped__object)
raise TypeError("You tried to iterate a non-iterator of type " + str(type(self._wrapped__object)))
def __repr__(self):
return "<Namespace wrapped " + self._wrapped__type_name + ": " + repr(self._wrapped__object) + ">"
def __hash__(self):
return _saved_hash(self._wrapped__object)
def __eq__(self, other):
"""In addition to __hash__, this is necessary for use as dictionary keys."""
# We could either assume "other" is a wrapped object and try to compare
# its wrapped object against this wrapped object, or we could just compare
# the hashes of each. If we try to unwrap the other object, it means you
# couldn't compare a wrapped object to an unwrapped one.
return _saved_hash(self) == _saved_hash(other)
def __ne__(self, other):
"""
It's good for consistency to define __ne__ if one is defining __eq__,
though this is not needed for using objects as dictionary keys.
"""
return _saved_hash(self) != _saved_hash(other)
class NamespaceAPIFunctionWrapper(object):
"""
Instances of this class exist solely to provide function wrapping. This is
done by creating an instance of the class and then making available the
instance's wrapped_function() method to any code that should only be allowed
to call the wrapped version of the function.
"""
def __init__(self, func_dict, is_method=False):
"""
<Purpose>
Constructor.
<Arguments>
self
func_dict
A dictionary whose with the following keys whose values are the
corresponding funcion:
func (required) -- a function or a string of the name
of the method on the underlying object.
args (required)
return (required)
is_method -- if this is an object's method being wrapped
rather than a regular function.
<Exceptions>
None
<Side Effects>
None
<Returns>
None
"""
# Required in func_dict.
self.__func = func_dict["func"]
self.__args = func_dict["args"]
self.__return = func_dict["return"]
self.__is_method = is_method
# Make sure that the __target_func really is a function or a string
# indicating a function by that name on the underlying object should
# be called.
if not _saved_callable(self.__func) and type(self.__func) is not str:
raise TypeError("The func was neither callable nor a string when " +
"constructing a namespace-wrapped function. The object " +
"used for target_func was: " + repr(self.__func))
if type(self.__func) is str:
self.__func_name = self.__func
else:
self.__func_name = self.__func.__name__
def _process_args(self, args):
args_to_return = []
for index in range(len(args)):
# Armon: If there are more arguments than there are type specifications
# and we are using NonCopiedVarArgs, then check against that.
if index >= len(self.__args) and isinstance(self.__args[-1], NonCopiedVarArgs):
arg_type = self.__args[-1]
else:
arg_type = self.__args[index]
# We only copy simple types, which means we only copy ValueProcessor not
# ObjectProcessor arguments.
if isinstance(arg_type, ValueProcessor):
temparg = arg_type.copy(args[index])
elif isinstance(arg_type, ObjectProcessor):
temparg = arg_type.unwrap(args[index])
else:
raise NamespaceInternalError("Unknown argument expectation.")
arg_type.check(temparg)
args_to_return.append(temparg)
return args_to_return
def _process_retval_helper(self, processor, retval):
try:
if isinstance(processor, ValueProcessor):
tempretval = processor.copy(retval)
processor.check(tempretval)
elif isinstance(processor, ObjectProcessor):
processor.check(retval)
tempretval = processor.wrap(retval)
elif processor is None:
if retval is not None:
raise InternalRepyError("Expected None but wasn't.")
tempretval = None
else:
raise InternalRepyError("Unknown retval expectation.")
return tempretval
except RepyArgumentError, err:
raise InternalRepyError("Invalid retval type: %s" % err)
def _process_retval(self, retval):
try:
# Allow the return value to be a tuple of processors.
if type(retval) is tuple:
if len(retval) != len(self.__return):
raise InternalRepyError("Returned tuple of wrong size: %s" % str(retval))
tempretval = []
for index in range(len(retval)):
tempitem = self._process_retval_helper(self.__return[index], retval[index])
tempretval.append(tempitem)
tempretval = tuple(tempretval)
else:
tempretval = self._process_retval_helper(self.__return, retval)
except Exception, e:
raise InternalRepyError(
"Function '" + self.__func_name + "' returned with unallowed return type " +
str(type(retval)) + " : " + str(e))
return tempretval
def wrapped_function(self, *args, **kwargs):
"""
<Purpose>
Act as the function that is wrapped but perform all required sanitization
and checking of data that goes into and comes out of the underlying
function.
<Arguments>
self
*args
**kwargs
The arguments to the underlying function.
<Exceptions>
NamespaceViolationError
If some aspect of the arguments or function call is not allowed.
Anything else that the underlying function may raise.
<Side Effects>
Anything that the underyling function may do.
<Returns>
Anything that the underlying function may return.
"""
try:
# We don't allow keyword args.
if kwargs:
raise RepyArgumentError("Keyword arguments not allowed when calling %s." %
self.__func_name)
if self.__is_method:
# This is a method of an object instance rather than a standalone function.
# The "self" argument will be passed implicitly by python in some cases, so
# we remove it from the args we check. For the others, we'll add it back in
# after the check.
args_to_check = args[1:]
else:
args_to_check = args
if len(args_to_check) != len(self.__args):
if not self.__args or not isinstance(self.__args[-1:][0], NonCopiedVarArgs):
raise RepyArgumentError("Function '" + self.__func_name +
"' takes " + str(len(self.__args)) + " arguments, not " +
str(len(args_to_check)) + " as you provided.")
args_copy = self._process_args(args_to_check)
args_to_use = None
# If it's a string rather than a function, then this is our convention
# for indicating that we want to wrap the function of this particular
# object. We use this if the function to wrap isn't available without
# having the object around, such as with real lock objects.
if type(self.__func) is str:
func_to_call = _saved_getattr(args[0], self.__func)
args_to_use = args_copy
else:
func_to_call = self.__func
if self.__is_method:
# Sanity check the object we're adding back in as the "self" argument.
if not isinstance(args[0], (NamespaceObjectWrapper, emulfile.emulated_file,
emulcomm.EmulatedSocket, emulcomm.TCPServerSocket,
emulcomm.UDPServerSocket, thread.LockType,
virtual_namespace.VirtualNamespace)):
raise NamespaceInternalError("Wrong type for 'self' argument.")
# If it's a method but the function was not provided as a string, we
# actually do have to add the first argument back in. Yes, this whole
# area of code is ugly.
args_to_use = [args[0]] + args_copy
else:
args_to_use = args_copy
retval = func_to_call(*args_to_use)
return self._process_retval(retval)
except RepyException:
# TODO: this should be changed to RepyError along with all references to
# RepyException in the rest of the repy code.
# We allow any RepyError to continue up to the client code.
raise
except:
# Code evaluated inside a `VirtualNamespace` may raise arbitrary
# errors, including plain Python exceptions. Reraise these errors
# so that the calling user code sees them.
# (Otherwise, things like `NameError`s in a virtual namespace
# crash the sandbox despite being wrapped in `try`/`except`,
# see SeattleTestbed/repy_v2#132.)
if type(args[0]) == virtual_namespace.VirtualNamespace:
raise
# Non-`RepyException`s outside of `VirtualNamespace` methods
# are unexpected and indicative of a programming error on
# our side, so we terminate.
_handle_internalerror("Unexpected exception from within Repy API", 843)
|
# -*- coding: utf-8 -*-
from operator import itemgetter
from uuid import uuid4
from typing import (
Set,
List,
Tuple,
Iterable,
Iterator,
Optional
)
from cadnano import (
app,
setBatch,
util
)
from cadnano.addinstancecmd import AddInstanceCommand
from cadnano.proxies.cnenum import (
EnumType,
GridEnum,
ModEnum,
PointEnum,
ViewSendEnum
)
from cadnano.proxies.cnobject import CNObject
from cadnano.objectinstance import ObjectInstance
from cadnano.proxies.cnproxy import (
ProxySignal,
UndoStack
)
from cadnano.docmodscmd import (
AddModCommand,
ModifyModCommand,
RemoveModCommand
)
from cadnano.fileio.decode import decodeFile
from cadnano.fileio.encode import encodeToFile
from cadnano.part import Part
from cadnano.part.nucleicacidpart import NucleicAcidPart
from cadnano.part.refreshsegmentscmd import RefreshSegmentsCommand
from cadnano.oligo import Oligo
from cadnano.strandset import StrandSet
from cadnano.strand import Strand
from cadnano.cntypes import (
DocCtrlT,
DocT,
WindowT
)
# Type Aliases
EndsSelected = Tuple[bool, bool]
class Document(CNObject):
"""
The Document class is the root of the model. It has two main purposes:
1. Serve as the parent all Part objects within the model.
2. Track all sub-model actions on its undoStack.
Args:
parent (CNObject): optional, defaults to None
Attributes:
view_names (list): views the document should support
filter_set (set): filters that should be applied when selecting.
"""
def __init__(self, parent=None):
super(Document, self).__init__(parent)
self._undostack = us = UndoStack() # notice NO parent, what does this mean?
us.setUndoLimit(30)
self._children = set() # for storing a reference to Parts (and Assemblies)
self._instances = set() # for storing instances of Parts (and Assemblies)
self._app_window = None
# the dictionary maintains what is selected
self._selection_dict = {}
self._active_part = None
self._filename = None
# the added list is what was recently selected or deselected
self._strand_selected_changed_dict = {}
self.view_names = []
self.filter_set: Set[str] = set()
self._mods = {} # modifications keyed by mod id
this_app = app()
this_app.documentWasCreatedSignal.emit(self)
# end def
# SIGNALS #
# Signal 1. Connected to the ViewRoots
documentPartAddedSignal = ProxySignal(object, CNObject, name='documentPartAddedSignal')
"""`Document`, `Part`"""
documentAssemblyAddedSignal = ProxySignal(object, CNObject, name='documentAssemblyAddedSignal')
"""`Document`, `Assembly`"""
documentSelectionFilterChangedSignal = ProxySignal(object, name='documentSelectionFilterChangedSignal')
documentPreXoverFilterChangedSignal = ProxySignal(str, name='documentPreXoverFilterChangedSignal')
documentViewResetSignal = ProxySignal(CNObject, name='documentViewResetSignal')
documentClearSelectionsSignal = ProxySignal(CNObject, name='documentClearSelectionsSignal')
documentChangeViewSignalingSignal = ProxySignal(int, name='documentChangeViewSignalingSignal')
# Signal 1. Connected to the ModTool
documentModAddedSignal = ProxySignal(object, object, object, name='documentModAddedSignal')
documentModRemovedSignal = ProxySignal(object, object, name='documentModRemovedSignal')
documentModChangedSignal = ProxySignal(object, object, object, name='documentModChangedSignal')
# SLOTS #
# ACCESSORS #
def undoStack(self) -> UndoStack:
"""This is the actual undoStack to use for all commands. Any children
needing to perform commands should just ask their parent for the
undoStack, and eventually the request will get here.
"""
return self._undostack
def children(self) -> Set[CNObject]:
"""Returns a list of parts associated with the document.
Returns:
list: list of all child objects
"""
return self._children
def addRefObj(self, child: CNObject):
"""For adding Part and Assembly object references
Args:
child (object):
"""
self._children.add(child)
def addInstance(self, instance: ObjectInstance):
"""Add an ObjectInstance to the list of instances
Args:
instance:
"""
self._instances.add(instance)
def removeInstance(self, instance: ObjectInstance):
""" Remove an ObjectInstance from the list of instances
Args:
instance:
"""
self._instances.remove(instance)
self.documentClearSelectionsSignal.emit(self)
def removeAllChildren(self):
"""Used to reset the document. Not undoable."""
self.documentClearSelectionsSignal.emit(self)
for child in list(self._children):
child.remove(use_undostack=True)
self.undoStack().clear()
self.deactivateActivePart()
# end def
def setFilterSet(self, filter_list: List[str]):
""" Set the Document filter list.
Emits `documentSelectionFilterChangedSignal`
Args:
filter_list: list of filter key names
"""
assert isinstance(filter_list, list)
vhkey = 'virtual_helix'
fs = self.filter_set
if vhkey in filter_list and vhkey not in fs:
self.clearAllSelected()
if vhkey in fs and vhkey not in filter_list:
self.clearAllSelected()
self.filter_set = fs = set(filter_list)
self.documentSelectionFilterChangedSignal.emit(fs)
# end def
def removeRefObj(self, child: CNObject):
""" Remove child Part or Assembly
Args:
child:
"""
self._children.remove(child)
# end def
def activePart(self) -> Part:
return self._active_part
# end def
def setActivePart(self, part: Part):
self._active_part = part
# end def
def deactivateActivePart(self):
self._active_part = None
# end def
def changeViewSignaling(self, signal_enum: int = ViewSendEnum.ALL):
'''Turn on and off viwe signaling for enabled slots in views.
Signals the root item in each view
Arg:
signal_enum: Default turns on all views signals
'''
self.documentChangeViewSignalingSignal.emit(signal_enum)
# end def
def fileName(self) -> str:
return self._filename
# end def
def setFileName(self, fname: str):
self._filename = fname
# end def
def writeToFile(self, filename: str, legacy: bool = False):
""" Convenience wrapper for `encodeToFile` to set the `document`
argument to `self`
Args:
filename: full path file name
legacy: attempt to export cadnano2 format
"""
encodeToFile(filename, self, legacy)
# end def
def readFile(self, filename: str) -> DocT:
"""Convenience wrapper for ``decodeFile`` to always emit_signals and
set the ``document`` argument to ``self``
Args:
filename: full path file name
Returns:
self ``Document`` object with data decoded from ``filename``
"""
print("reading file", filename)
return decodeFile(filename, document=self, emit_signals=True)
# end def
# def assemblies(self):
# """Returns a list of assemblies associated with the document."""
# return self._assemblies
# PUBLIC METHODS FOR QUERYING THE MODEL #
def addStrandToSelection(self, strand: Strand, value: EndsSelected):
""" Add `Strand` object to Document selection
Args:
strand:
value: of the form::
(is low index selected, is high index selected)
"""
ss = strand.strandSet()
if ss in self._selection_dict:
self._selection_dict[ss][strand] = value
else:
self._selection_dict[ss] = {strand: value}
self._strand_selected_changed_dict[strand] = value
# end def
def removeStrandFromSelection(self, strand: Strand) -> bool:
"""Remove ``Strand`` object from Document selection
Args:
strand:
Returns:
``True`` if successful, ``False`` otherwise
"""
ss = strand.strandSet()
if ss in self._selection_dict:
temp = self._selection_dict[ss]
if strand in temp:
del temp[strand]
if len(temp) == 0:
del self._selection_dict[ss]
self._strand_selected_changed_dict[strand] = (False, False)
return True
else:
return False
else:
return False
# end def
def addVirtualHelicesToSelection(self, part: Part, id_nums: Iterable[int]):
"""If the ``Part`` isn't in the ``_selection_dict`` its not
going to be in the changed_dict either, so go ahead and add
Args:
part: The Part
id_nums: List of virtual helix ID numbers
"""
selection_dict = self._selection_dict
if part not in selection_dict:
selection_dict[part] = s_set = set()
else:
s_set = selection_dict[part]
changed_set = set()
for id_num in id_nums:
if id_num not in s_set:
s_set.add(id_num)
changed_set.add(id_num)
if len(changed_set) > 0:
part.partVirtualHelicesSelectedSignal.emit(part, changed_set, True)
# end def
def removeVirtualHelicesFromSelection(self, part: Part, id_nums: Iterable[int]):
"""Remove from the ``Part`` selection the ``VirtualHelix`` objects
specified by id_nums.
Args:
part:
id_nums:
"""
# print("remove called", id(part), id_nums, self._selection_dict.get(part))
selection_dict = self._selection_dict
if part in selection_dict:
s_set = selection_dict[part]
changed_set = set()
for id_num in id_nums:
if id_num in s_set:
s_set.remove(id_num)
if len(s_set) == 0:
del selection_dict[part]
changed_set.add(id_num)
if len(changed_set) > 0:
part.partVirtualHelicesSelectedSignal.emit(part, changed_set, False)
# end def
def selectedOligos(self) -> Set[Oligo]:
"""As long as one endpoint of a strand is in the selection, then the
oligo is considered selected.
Returns:
Set of zero or more selected :obj:`Oligos`
"""
s_dict = self._selection_dict
selected_oligos = set()
for ss in s_dict.keys():
for strand in ss:
selected_oligos.add(strand.oligo())
# end for
# end for
return selected_oligos
# end def
def clearAllSelected(self):
"""Clear all selections
emits documentClearSelectionsSignal
"""
# print("clearAllSelected")
self._selection_dict = {}
# the added list is what was recently selected or deselected
self._strand_selected_changed_dict = {}
self.documentClearSelectionsSignal.emit(self)
# end def
def isModelStrandSelected(self, strand: Strand) -> bool:
ss = strand.strandSet()
if ss in self._selection_dict:
if strand in self._selection_dict[ss]:
return True
else:
return False
else:
return False
# end def
def isVirtualHelixSelected(self, part: Part, id_num: int) -> bool:
"""For a given ``Part``
Args:
part: ``Part`` in question
id_num: ID number of a virtual helix
Returns:
``True`` if ``id_num`` is selected else ``False``
"""
if part in self._selection_dict:
return id_num in self._selection_dict[part]
else:
return False
# end def
def isOligoSelected(self, oligo: Oligo) -> bool:
"""Determine if given ``Oligo`` is selected
Args:
oligo: ``Oligo`` object
Returns:
``True`` if ``oligo`` is selected otherwise ``False``
"""
strand5p = oligo.strand5p()
for strand in strand5p.generator3pStrand():
if self.isModelStrandSelected(strand):
return True
return False
# end def
def selectOligo(self, oligo: Oligo):
"""Select given ``Oligo``
Args:
oligo: ``Oligo`` object
"""
strand5p = oligo.strand5p()
both_ends = (True, True)
for strand in strand5p.generator3pStrand():
self.addStrandToSelection(strand, both_ends)
self.updateStrandSelection()
# end def
def deselectOligo(self, oligo: Oligo):
"""Deselect given ``Oligo``
Args:
oligo: ``Oligo`` object
"""
strand5p = oligo.strand5p()
for strand in strand5p.generator3pStrand():
self.removeStrandFromSelection(strand)
self.updateStrandSelection()
# end def
def getSelectedStrandValue(self, strand: Strand) -> EndsSelected:
"""Strand is an object to look up
it is pre-vetted to be in the dictionary
Args:
strand: ``Strand`` object in question
Returns:
Tuple of the end point selection
"""
return self._selection_dict[strand.strandSet()][strand]
# end def
def sortedSelectedStrands(self, strandset: StrandSet) -> List[Strand]:
"""Get a list sorted from low to high index of `Strands` in a `StrandSet`
that are selected
Args:
strandset: :obj:`StrandSet` to get selected strands from
Returns:
List of :obj:`Strand`s
"""
out_list = [x for x in self._selection_dict[strandset].items()]
def getLowIdx(x): return Strand.lowIdx(itemgetter(0)(x))
out_list.sort(key=getLowIdx)
return out_list
# end def
def determineStrandSetBounds(self, selected_strand_list: List[Tuple[Strand, EndsSelected]],
strandset: StrandSet) -> Tuple[int, int]:
"""Determine the bounds of a :class:`StrandSet` ``strandset`` among a
a list of selected strands in that same ``strandset``
Args:
selected_strand_list: list of ``( Strands, (is_low, is_high) )`` items
strandset: of interest
Returns:
tuple: min low bound and min high bound index
"""
length = strandset.length()
min_high_delta = min_low_delta = max_ss_idx = length - 1 # init the return values
ss_dict = self._selection_dict[strandset]
for strand, value in selected_strand_list:
idx_low, idx_high = strand.idxs()
low_neighbor, high_neighbor = strandset.getNeighbors(strand)
# print(low_neighbor, high_neighbor)
if value[0]: # the end is selected
if low_neighbor is None:
temp = idx_low - 0
else:
if low_neighbor in ss_dict:
value_N = ss_dict[low_neighbor]
# we only care if the low neighbor is not selected
temp = min_low_delta if value_N[1] else idx_low - low_neighbor.highIdx() - 1
# end if
else: # not selected
temp = idx_low - low_neighbor.highIdx() - 1
# end else
if temp < min_low_delta:
min_low_delta = temp
# end if
# check the other end of the strand
if not value[1]:
temp = idx_high - idx_low - 1
if temp < min_high_delta:
min_high_delta = temp
# end if
if value[1]:
if high_neighbor is None:
temp = max_ss_idx - idx_high
else:
if high_neighbor in ss_dict:
value_N = ss_dict[high_neighbor]
# we only care if the low neighbor is not selected
temp = min_high_delta if value_N[0] else high_neighbor.lowIdx() - idx_high - 1
# end if
else: # not selected
temp = high_neighbor.lowIdx() - idx_high - 1
# end else
# end else
if temp < min_high_delta:
min_high_delta = temp
# end if
# check the other end of the strand
if not value[0]:
temp = idx_high - idx_low - 1
if temp < min_low_delta:
min_low_delta = temp
# end if
# end for
return (min_low_delta, min_high_delta)
# end def
def getSelectionBounds(self) -> Tuple[int, int]:
"""Get the index bounds of a strand selection
Returns:
tuple: of :obj:`int`
"""
min_low_delta = -1
min_high_delta = -1
for strandset in self._selection_dict.keys():
selected_list = self.sortedSelectedStrands(strandset)
temp_low, temp_high = self.determineStrandSetBounds(selected_list, strandset)
if temp_low < min_low_delta or min_low_delta < 0:
min_low_delta = temp_low
if temp_high < min_high_delta or min_high_delta < 0:
min_high_delta = temp_high
return (min_low_delta, min_high_delta)
# end def
def deleteStrandSelection(self, use_undostack: bool = True):
"""Delete selected strands. First iterates through all selected strands
and extracts refs to xovers and strands. Next, calls removeXover
on xoverlist as part of its own macroed command for isoluation
purposes. Finally, calls removeStrand on all strands that were
fully selected (low and high), or had at least one non-xover
endpoint selected.
"""
xoList = []
strand_dict = {}
for strandset_dict in self._selection_dict.values():
for strand, selected in strandset_dict.items():
part = strand.part()
idx_low, idx_high = strand.idxs()
strand5p = strand.connection5p()
strand3p = strand.connection3p()
# both ends are selected
strand_dict[strand] = selected[0] and selected[1]
# only look at 3' ends to handle xover deletion
sel3p = selected[0] if idx_low == strand.idx3Prime() else selected[1]
if sel3p: # is idx3p selected?
if strand3p: # is there an xover
xoList.append((part, strand, strand3p, use_undostack))
else: # idx3p is a selected endpoint
strand_dict[strand] = True
else:
if not strand5p: # idx5p is a selected endpoint
strand_dict[strand] = True
if use_undostack and xoList:
self.undoStack().beginMacro("Delete xovers")
for part, strand, strand3p, useUndo in xoList:
NucleicAcidPart.removeXover(part, strand, strand3p, useUndo)
self.removeStrandFromSelection(strand)
self.removeStrandFromSelection(strand3p)
self._selection_dict = {}
self.documentClearSelectionsSignal.emit(self)
if use_undostack:
if xoList: # end xover macro if it was started
self.undoStack().endMacro()
if True in strand_dict.values():
self.undoStack().beginMacro("Delete selection")
else:
return # nothing left to do
for strand, delete in strand_dict.items():
if delete:
strand.strandSet().removeStrand(strand)
if use_undostack:
self.undoStack().endMacro()
# end def
def resizeSelection(self, delta: int, use_undostack: bool = True):
"""Moves the selected idxs by delta by first iterating over all strands
to calculate new idxs (method will return if snap-to behavior would
create illegal state), then applying a resize command to each strand.
Args:
delta:
use_undostack: optional, default is ``True``
"""
resize_list = []
vh_set = set()
# calculate new idxs
part = None
for strandset_dict in self._selection_dict.values():
for strand, selected in strandset_dict.items():
if part is None:
part = strand.part()
idx_low, idx_high = strand.idxs()
new_low, new_high = strand.idxs()
delta_low = delta_high = delta
# process xovers to get revised delta
if selected[0] and strand.connectionLow():
new_low = part.xoverSnapTo(strand, idx_low, delta)
if new_low is None:
return
delta_high = new_low - idx_low
if selected[1] and strand.connectionHigh():
new_high = part.xoverSnapTo(strand, idx_high, delta)
if new_high is None:
return
delta_low = new_high - idx_high
# process endpoints
if selected[0] and not strand.connectionLow():
new_low = idx_low + delta_low
if selected[1] and not strand.connectionHigh():
new_high = idx_high + delta_high
if new_low > new_high: # check for illegal state
return
vh_set.add(strand.idNum())
resize_list.append((strand, new_low, new_high))
# end for
# end for
# execute the resize commands
us = self.undoStack()
if use_undostack:
us.beginMacro("Resize Selection")
for strand, idx_low, idx_high in resize_list:
Strand.resize(strand,
(idx_low, idx_high),
use_undostack,
update_segments=False)
if resize_list:
cmd = RefreshSegmentsCommand(part, vh_set)
if use_undostack:
us.push(cmd)
else:
cmd.redo()
if use_undostack:
us.endMacro()
# end def
def updateStrandSelection(self):
"""Do it this way in the future when we have
a better signaling architecture between views
For now, individual objects need to emit signals
"""
oligos_selected_set = set()
oligos_set = set()
for obj, value in self._strand_selected_changed_dict.items():
oligo = obj.oligo()
oligos_set.add(oligo)
if True in value:
oligos_selected_set.add(oligo)
obj.strandSelectedChangedSignal.emit(obj, value)
# end for
for oligo in oligos_selected_set:
oligo.oligoSelectedChangedSignal.emit(oligo, True)
oligos_deselected_set = oligos_set - oligos_selected_set
for oligo in oligos_deselected_set:
oligo.oligoSelectedChangedSignal.emit(oligo, False)
self._strand_selected_changed_dict = {}
# end def
def resetViews(self):
"""This is a fast way to clear selections and the views.
We could manually deselect each item from the Dict, but we'll just
let them be garbage collect
the dictionary maintains what is selected
"""
# print("reset views")
self._selection_dict = {}
# the added list is what was recently selected or deselected
self._strand_selected_changed_dict = {}
self.documentViewResetSignal.emit(self)
# end def
def makeNew(self, fname: str = "untitled.json"):
"""For use in creating a new ``Document``
Args:
fname: new filename, default is ``untitled.json``
"""
self.clearAllSelected()
self.resetViews()
setBatch(True)
self.removeAllChildren() # clear out old parts
setBatch(False)
self.undoStack().clear() # reset undostack
self.deactivateActivePart()
self._filename = fname
# end def
def setViewNames(self, view_name_list: List[str], do_clear: bool = False):
"""Tell the model what views the document should support
Allows non-visible views to be used.
Intended to be called at application launch only at present.
Args:
view_name_list: List of view names like `slice`, `path`, or `inspector`
do_clear:: optional, clear the names or not? defaults to ``False``
"""
view_names = [] if do_clear else self.view_names
for view_name in view_name_list:
if view_name not in view_names:
view_names.append(view_name)
self.view_names = view_names
# end def
# PUBLIC METHODS FOR EDITING THE MODEL #
def createNucleicAcidPart( self,
use_undostack: bool = True,
grid_type: EnumType = GridEnum.NONE,
is_lattice: bool = True
) -> NucleicAcidPart:
"""Create and store a new DnaPart and instance, and return the instance.
Args:
use_undostack: optional, defaults to True
grid_type: optional default to GridEnum.NONE
Returns
new :obj:`NucleicAcidPart`
"""
dna_part = NucleicAcidPart(document=self, grid_type=grid_type, is_lattice=is_lattice)
self._addPart(dna_part, use_undostack=use_undostack)
return dna_part
# end def
def getParts(self) -> Iterator[Part]:
"""Get all child :obj:`Part` in the document
Yields:
the next :obj:`Part` in the the Set of children
"""
for item in self._children:
if isinstance(item, Part):
yield item
# end def
def getPartByUUID(self, uuid: str) -> Part:
"""Get the part given the uuid string
Args:
uuid: of the part
Returns:
Part
Raises:
KeyError: no part with that UUID
"""
for item in self._children:
if isinstance(item, Part) and item.uuid == uuid:
return item
raise KeyError("Part with uuid {} not found".format(uuid))
# end def
# PUBLIC SUPPORT METHODS #
def appWindow(self) -> WindowT:
return self._app_window
# end def
def setAppWindow(self, app_window: WindowT):
"""Called by :meth:`CNMainWindow.setDocument` method."""
self._app_window = app_window
# end def
# PRIVATE SUPPORT METHODS #
def _addPart(self, part: Part, use_undostack: bool = True):
"""Add part to the document via AddInstanceCommand.
"""
c = AddInstanceCommand(self, part)
util.doCmd(self, c, use_undostack)
# end def
def createMod( self,
params: dict,
mid: str = None,
use_undostack: bool = True) -> Tuple[dict, str]:
"""Create a modification
Args:
params:
mid: optional, modification ID string
use_undostack: optional, default is ``True``
Returns:
tuple of :obj:`dict`, :obj:`str` of form::
(dictionary of modification paramemters, modification ID string)
Raises:
KeyError: Duplicate mod ID
"""
if mid is None:
mid = uuid4().hex
elif mid in self._mods:
raise KeyError("createMod: Duplicate mod id: {}".format(mid))
name = params.get('name', mid)
color = params.get('color', '#00FF00')
seq5p = params.get('seq5p', '')
seq3p = params.get('seq3p', '')
seqInt = params.get('seqInt', '')
note = params.get('note', '')
cmdparams = {
'props': {'name': name,
'color': color,
'note': note,
'seq5p': seq5p,
'seq3p': seq3p,
'seqInt': seqInt,
},
'ext_locations': set(), # external mods, mod belongs to idx outside of strand
'int_locations': set() # internal mods, mod belongs between idx and idx + 1
}
item = {'name': name,
'color': color,
'note': note,
'seq5p': seq5p,
'seq3p': seq3p,
'seqInt': seqInt
}
c = AddModCommand(self, cmdparams, mid)
util.doCmd(self, c, use_undostack=use_undostack)
return item, mid
# end def
def modifyMod(self, params: dict, mid: str, use_undostack: bool = True):
"""Modify an existing modification
Args:
params:
mid: optional, modification ID string
use_undostack: optional, default is ``True``
"""
if mid in self._mods:
c = ModifyModCommand(self, params, mid)
util.doCmd(self, c, use_undostack=use_undostack)
# end def
def destroyMod(self, mid: str, use_undostack: bool = True):
"""Destroy an existing modification
Args:
mid: optional, modification ID string
use_undostack: optional, default is ``True``
"""
if mid in self._mods:
c = RemoveModCommand(self, mid)
util.doCmd(self, c, use_undostack=use_undostack)
# end def
def getMod(self, mid: str) -> Optional[dict]:
"""Get an existing modification
Args:
mid: modification ID string
Returns:
dict or None
"""
return self._mods.get(mid)
# end def
def getModProperties(self, mid: str) -> Optional[dict]:
"""Get an existing modification properties
Args:
mid: modification ID string
Returns:
dict or None
"""
return self._mods.get(mid)['props']
# end def
def getModLocationsSet(self, mid: str, is_internal: bool) -> dict:
"""Get an existing modifications locations in a ``Document``
(``Part``, Virtual Helix ID, ``Strand``)
Args:
mid: modification ID string
is_internal:
Returns:
dict
"""
if is_internal:
return self._mods[mid]['int_locations']
else:
return self._mods[mid]['ext_locations']
# end def
def addModInstance(self, mid: str, is_internal: bool, part: Part, key: str):
"""Add an instance of a modification to the Document
Args:
mid: modification id string
is_internal:
part: associated Part
key: key of the modification at the part level
"""
location_set = self.getModLocationsSet(mid, is_internal)
doc_key = ''.join((part.uuid, ',', key))
location_set.add(doc_key)
# end def
def removeModInstance(self, mid: str, is_internal: bool, part: Part, key: str):
"""Remove an instance of a modification from the Document
Args:
mid: modification id string
is_internal:
part: associated Part
key: key of the modification at the part level
"""
location_set = self.getModLocationsSet(mid, is_internal)
doc_key = ''.join((part.uuid, ',', key))
location_set.remove(doc_key)
# end def
def modifications(self) -> dict:
"""Get a copy of the dictionary of the modifications in this ``Document``
Returns:
dictionary of the modifications
"""
mods = self._mods
res = {}
for mid in list(mods.keys()):
mod_dict = mods[mid]
res[mid] = {'props': mod_dict['props'].copy(),
'int_locations': list(mod_dict['int_locations']),
'ext_locations': list(mod_dict['ext_locations'])
}
return res
# end def
def getModStrandIdx(self, key: str) -> Tuple[Part, Strand, int]:
"""Convert a key of a mod instance relative to a part
to a part, a strand and an index
Args:
key: Mod key
Returns:
tuple of the form::
(Part, Strand, and index)
"""
keylist = key.split(',')
part_uuid = keylist[0]
id_num = int(keylist[1])
is_fwd = int(keylist[2]) # enumeration of StrandEnum.FWD or StrandEnum.REV
idx = int(keylist[3])
part = self.getPartByUUID(part_uuid)
strand = part.getStrand(is_fwd, id_num, idx)
return part, strand, idx
# end def
def getModSequence(self, mid: str, mod_type: int) -> Tuple[str, str]:
"""Getter for the modification sequence give by the arguments
Args:
mid: mod id or ``None``
mod_type: [ModEnum.END_5PRIME, ModEnum.END_3PRIME]
Returns:
tuple: of :obj:`str` of form::
(sequence, name)
"""
mod_dict = self._mods.get(mid)
name = '' if mid is None else mod_dict['name']
if mod_type == ModEnum.END_5PRIME:
seq = '' if mid is None else mod_dict['seq5p']
elif mod_type == ModEnum.END_3PRIME:
seq = '' if mid is None else mod_dict['seq3p']
else:
seq = '' if mid is None else mod_dict['seqInt']
return seq, name
# end def
def getGridType(self) -> EnumType:
"""Get the current Grid type
Returns:
The current Grid type
"""
if self.activePart():
return self.activePart().getGridType()
# end def
def setGridType(self, grid_type: EnumType):
"""Set the current Grid type
"""
if self.activePart():
self.activePart().setGridType(grid_type)
# end def
# end class
|
#!/usr/bin/env python
"""
Sad attempt at a drop-in replacement for KSB's excellent xapply
"""
import os
import subprocess
import signal
import sys
import argparse
import time
VERSION = 0.1
class Dicer(object):
"""
Emulate KSB's dicer
Working on de-uglying it
"""
def __init__(self, fmat="", escape='%'):
# just make pylint happy
self._fmat = ""
# making these attributes so I can refactor dice later
self._diceon = 0
self._select = ""
# Our configurable escape sequence
self.escape = escape
# Set the format string with the setter
self.fmat = fmat
# Number of iterations (for %u expansion)
self.i = 1
def reset(self, fmat, escape):
"""
Start over without reinstantiating
"""
self.escape = escape
# Set the format string with the setter
self.fmat = fmat
# Number of iterations (for %u expansion)
self.i = 1
@property
def fmat(self):
"""
Just return the private attribute
"""
return self._fmat
@fmat.setter
def fmat(self, value):
"""
If no format string is specified assume that we're appending a la xargs
"""
escape_on = False
# Assume we don't need to do anything
self._fmat = value
# Bail if we find an escape followed by anything other than another one
for char in value:
if escape_on:
if char == self.escape:
escape_on = False
continue
else:
return True
if char == self.escape:
escape_on = True
# If we haven't found a valid escape sequence by now, tack one on
self._fmat = "%s %s1" % (value, self.escape)
def dice(self, intext):
"""
Do the work
"""
# keep track of state
self._diceon = 0
# which input stream
self._select = ""
# seperator character
seperator = ""
# which field
field = ""
# our output
out = ""
for char in self.fmat:
if char == self.escape:
self._diceon = 1
elif self._diceon == 1:
if is_int(char):
self._select = str(self._select) + str(char)
elif char == "u":
out += str(self.i)
self._select = ""
self._diceon = 0
elif char == "[":
self._diceon = 2
elif char == self.escape:
out = out + self.escape
self._select = ""
self._diceon = 0
else:
self._diceon = 0
self._select = int(self._select) - 1
out = out + intext[self._select].rstrip() + char
self._select = ""
elif self._diceon == 2:
if is_int(char):
self._select = str(self._select) + str(char)
else:
self._select = int(self._select) - 1
seperator = str(char)
if char == ' ':
seperator = None
self._diceon = 4
elif self._diceon == 4:
field = char
self._diceon = 5
elif self._diceon == 5:
if is_int(char):
field = "%d%d" % (field, char)
elif char == "]":
field = int(field) - int(1)
if field < len(intext[self._select].split(seperator)):
text = intext[self._select].split(seperator)[int(field)]
out = str(out) + text.rstrip()
self._diceon = 0
field = ""
seperator = ""
self._select = ""
else:
out = str(out) + "%%[1%d%d%c" % (seperator, field, char)
self._diceon = 0
field = ""
seperator = ""
self._select = ""
else:
out = str(out) + str(char)
# Clean up if we end on a substitution
if self._diceon == 1:
self._select = int(self._select) - 1
out = out + intext[self._select]
self.i += 1
return out
class MLogger(object):
"""
Do logging
"""
def __init__(self, verbosity=0, error=sys.stderr, out=sys.stdout):
self.verbosity = verbosity
# grab just the name of the process, not the full path
self.name = sys.argv[0]
self.name = self.name.split('/')[-1]
self.error = error
self.out = out
def verbose(self, level, msg):
"""
Print a message based on verbosity
"""
if self.verbosity > level:
self.error.write("%s: %s\n" % (self.name, msg))
def message(self, msg):
"""
Unconditionallu print a message
"""
self.out.write("%s: %s\n" % (self.name, msg))
class ParaDo(object):
"""
Managed Parallelized Processes.
Basically keep X number of plates spinning.
"""
def __init__(self, maxjobs):
"""
Takes:
number of jobs to run in parallel
"""
self.maxjobs = maxjobs
self.jobs = []
def startjob(self, cmd):
"""
Kick off jobs in parallel
"""
while True:
if len(self.jobs) >= self.maxjobs:
for (pid, pcmd) in self.jobs:
try:
os.waitpid(pid, os.WNOHANG)
except OSError:
self.jobs.remove((pid, pcmd))
LOG.verbose(2, "%s: finished!" % (str(pcmd)))
else:
break
LOG.verbose(1, "%s" % (str(cmd)))
self.jobs.append((subprocess.Popen(cmd, shell=True).pid, cmd))
def waitout(self):
"""
Make sure all of our kids are gone
"""
while len(self.jobs) > 0:
for (pid, pcmd) in self.jobs:
try:
os.waitpid(pid, os.WNOHANG)
except OSError:
self.jobs.remove((pid, pcmd))
LOG.verbose(2, "%s: finished!" % (str(pcmd)))
def kill(self):
"""
Kill off my children
Really just in theory. Unused
"""
shot = []
while len(self.jobs) > 0:
for (pid, pcmd) in self.jobs:
try:
os.waitpid(pid, os.WNOHANG)
except OSError:
self.jobs.remove((pid, pcmd))
LOG.verbose(2, "%s: finished!" % (str(pcmd)))
# TERM then KILL each
mysig = signal.SIGTERM
if pid in shot:
mysig = signal.SIGKILL
LOG.verbose(2,
"Shooting pid %s with signal %d" % (pid, mysig))
shot.append(pid)
os.kill(pid, mysig)
time.sleep(1)
class Mfitter(object):
"""
Multi-file iterator
"""
def __init__(self, paths, padding):
"""
Takes:
list of paths
"""
self.files = []
self.padding = padding
for path in paths:
if path == "-":
self.files.append(sys.stdin)
else:
self.files.append(open(path, 'r'))
def __iter__(self):
"""
I'm my own grandpa
"""
return self
def reset(self):
"""
seek all inputs back to 0
"""
for mfile in self.files:
mfile.seek(0)
def next(self):
"""
Returns:
list of lines
"""
out = []
done = 0
for mfile in self.files:
try:
out.append(mfile.next())
except StopIteration:
out.append(self.padding)
done = done + 1
if done >= len(self.files):
raise StopIteration
return out
def is_int(text):
"""
Return true if s is an integer
"""
try:
int(text)
return True
except ValueError:
return False
def num_cpus():
"""
Return the number of physical CPU cores
I intentionally don't do platform detection. I just check to see if
the method works, in case someone else implements the same interface.
I don't want to think about how to do this on windows.
"""
# This works on Linux (maybe elsewhere)
proc_path = '/proc/cpuinfo'
if os.path.isfile(proc_path):
cpuinfo = open(proc_path, 'r')
for line in cpuinfo:
# That's a tab
if line[0:9] == "cpu cores":
return int(line.split(':')[1].strip())
# This works on BSD, MacOS (maybe elsewhere)
else:
LOG.verbose(2, "No /proc/cpuinfo, trying sysctl")
try:
out = subprocess.check_output(['sysctl', 'hw.ncpu'])
except subprocess.CalledProcessError as ex:
# we got nothin, so we'll assume 1 core
msg = "Could not determine number of processors: %s exited %d" % \
(ex.cmd, ex.returncode)
LOG.verbose(2, msg)
return 1
return int(out.split(':')[1].strip())
def pargs():
"""
Parse Arguments
"""
prog = "papply"
parser = argparse.ArgumentParser(prog=prog,
description="Run jobs in parallel")
halp = 'Number of parallel jobs (default = number of cpu cores)'
parser.add_argument('-P', '--parallel', dest='parallel', metavar="jobs",
type=int, default=num_cpus(), help=halp)
parser.add_argument('-p', '--padding', dest='padding',
type=str, default="",
help="string to pad files with when they lack input")
parser.add_argument('-v', '--verbose', dest='verbosity', action="count",
default=0, help='Increase verbosity')
parser.add_argument('-V', '--version', action='version',
version='%s %s' % (prog, VERSION))
parser.add_argument('-f', '--use-file', dest='usefile',
action='store_true', default=False)
parser.add_argument('-a', '--escape', dest='escape', metavar='c',
default='%', help='Escape character (default = %%)')
parser.add_argument('command')
parser.add_argument('input',
type=str, nargs="+",
help='input string(s) or file(s) if -f has been specified')
opts = parser.parse_args()
if opts.usefile:
opts.list = Mfitter(opts.input, opts.padding)
else:
# make a list of single item lists
# just to have the same structure as Mfitter
opts.list = []
for text in opts.input:
opts.list.append([text])
LOG.verbosity = opts.verbosity
return opts
def main():
"""
Do Stuff
"""
opts = pargs()
pjob = ParaDo(opts.parallel)
dice = Dicer(opts.command, opts.escape)
for item in opts.list:
cmd = dice.dice(item)
pjob.startjob(cmd)
pjob.waitout()
if __name__ == '__main__':
LOG = MLogger()
main()
|
# -*- coding: utf-8 -*-
import os
import sys
import textwrap
import _pytest._code
import pytest
from _pytest.main import EXIT_NOTESTSCOLLECTED
from _pytest.nodes import Collector
from _pytest.warnings import SHOW_PYTEST_WARNINGS_ARG
class TestModule(object):
def test_failing_import(self, testdir):
modcol = testdir.getmodulecol("import alksdjalskdjalkjals")
pytest.raises(Collector.CollectError, modcol.collect)
def test_import_duplicate(self, testdir):
a = testdir.mkdir("a")
b = testdir.mkdir("b")
p = a.ensure("test_whatever.py")
p.pyimport()
del sys.modules["test_whatever"]
b.ensure("test_whatever.py")
result = testdir.runpytest()
result.stdout.fnmatch_lines(
[
"*import*mismatch*",
"*imported*test_whatever*",
"*%s*" % a.join("test_whatever.py"),
"*not the same*",
"*%s*" % b.join("test_whatever.py"),
"*HINT*",
]
)
def test_import_prepend_append(self, testdir, monkeypatch):
syspath = list(sys.path)
monkeypatch.setattr(sys, "path", syspath)
root1 = testdir.mkdir("root1")
root2 = testdir.mkdir("root2")
root1.ensure("x456.py")
root2.ensure("x456.py")
p = root2.join("test_x456.py")
monkeypatch.syspath_prepend(str(root1))
p.write(
textwrap.dedent(
"""\
import x456
def test():
assert x456.__file__.startswith({!r})
""".format(
str(root2)
)
)
)
with root2.as_cwd():
reprec = testdir.inline_run("--import-mode=append")
reprec.assertoutcome(passed=0, failed=1)
reprec = testdir.inline_run()
reprec.assertoutcome(passed=1)
def test_syntax_error_in_module(self, testdir):
modcol = testdir.getmodulecol("this is a syntax error")
pytest.raises(modcol.CollectError, modcol.collect)
pytest.raises(modcol.CollectError, modcol.collect)
def test_module_considers_pluginmanager_at_import(self, testdir):
modcol = testdir.getmodulecol("pytest_plugins='xasdlkj',")
pytest.raises(ImportError, lambda: modcol.obj)
def test_invalid_test_module_name(self, testdir):
a = testdir.mkdir("a")
a.ensure("test_one.part1.py")
result = testdir.runpytest("-rw")
result.stdout.fnmatch_lines(
[
"ImportError while importing test module*test_one.part1*",
"Hint: make sure your test modules/packages have valid Python names.",
]
)
@pytest.mark.parametrize("verbose", [0, 1, 2])
def test_show_traceback_import_error(self, testdir, verbose):
"""Import errors when collecting modules should display the traceback (#1976).
With low verbosity we omit pytest and internal modules, otherwise show all traceback entries.
"""
testdir.makepyfile(
foo_traceback_import_error="""
from bar_traceback_import_error import NOT_AVAILABLE
""",
bar_traceback_import_error="",
)
testdir.makepyfile(
"""
import foo_traceback_import_error
"""
)
args = ("-v",) * verbose
result = testdir.runpytest(*args)
result.stdout.fnmatch_lines(
[
"ImportError while importing test module*",
"Traceback:",
"*from bar_traceback_import_error import NOT_AVAILABLE",
"*cannot import name *NOT_AVAILABLE*",
]
)
assert result.ret == 2
stdout = result.stdout.str()
for name in ("_pytest", os.path.join("py", "_path")):
if verbose == 2:
assert name in stdout
else:
assert name not in stdout
def test_show_traceback_import_error_unicode(self, testdir):
"""Check test modules collected which raise ImportError with unicode messages
are handled properly (#2336).
"""
testdir.makepyfile(
u"""
# -*- coding: utf-8 -*-
raise ImportError(u'Something bad happened ☺')
"""
)
result = testdir.runpytest()
result.stdout.fnmatch_lines(
[
"ImportError while importing test module*",
"Traceback:",
"*raise ImportError*Something bad happened*",
]
)
assert result.ret == 2
class TestClass(object):
def test_class_with_init_warning(self, testdir):
testdir.makepyfile(
"""
class TestClass1(object):
def __init__(self):
pass
"""
)
result = testdir.runpytest("-rw")
result.stdout.fnmatch_lines(
[
"*cannot collect test class 'TestClass1' because it has a __init__ constructor"
]
)
def test_class_subclassobject(self, testdir):
testdir.getmodulecol(
"""
class test(object):
pass
"""
)
result = testdir.runpytest()
result.stdout.fnmatch_lines(["*collected 0*"])
def test_static_method(self, testdir):
"""Support for collecting staticmethod tests (#2528, #2699)"""
testdir.getmodulecol(
"""
import pytest
class Test(object):
@staticmethod
def test_something():
pass
@pytest.fixture
def fix(self):
return 1
@staticmethod
def test_fix(fix):
assert fix == 1
"""
)
result = testdir.runpytest()
result.stdout.fnmatch_lines(["*collected 2 items*", "*2 passed in*"])
def test_setup_teardown_class_as_classmethod(self, testdir):
testdir.makepyfile(
test_mod1="""
class TestClassMethod(object):
@classmethod
def setup_class(cls):
pass
def test_1(self):
pass
@classmethod
def teardown_class(cls):
pass
"""
)
result = testdir.runpytest()
result.stdout.fnmatch_lines(["*1 passed*"])
def test_issue1035_obj_has_getattr(self, testdir):
modcol = testdir.getmodulecol(
"""
class Chameleon(object):
def __getattr__(self, name):
return True
chameleon = Chameleon()
"""
)
colitems = modcol.collect()
assert len(colitems) == 0
def test_issue1579_namedtuple(self, testdir):
testdir.makepyfile(
"""
import collections
TestCase = collections.namedtuple('TestCase', ['a'])
"""
)
result = testdir.runpytest("-rw")
result.stdout.fnmatch_lines(
"*cannot collect test class 'TestCase' "
"because it has a __new__ constructor*"
)
def test_issue2234_property(self, testdir):
testdir.makepyfile(
"""
class TestCase(object):
@property
def prop(self):
raise NotImplementedError()
"""
)
result = testdir.runpytest()
assert result.ret == EXIT_NOTESTSCOLLECTED
@pytest.mark.filterwarnings(
"ignore:usage of Generator.Function is deprecated, please use pytest.Function instead"
)
class TestGenerator(object):
def test_generative_functions(self, testdir):
modcol = testdir.getmodulecol(
"""
def func1(arg, arg2):
assert arg == arg2
def test_gen():
yield func1, 17, 3*5
yield func1, 42, 6*7
"""
)
colitems = modcol.collect()
assert len(colitems) == 1
gencol = colitems[0]
assert isinstance(gencol, pytest.Generator)
gencolitems = gencol.collect()
assert len(gencolitems) == 2
assert isinstance(gencolitems[0], pytest.Function)
assert isinstance(gencolitems[1], pytest.Function)
assert gencolitems[0].name == "[0]"
assert gencolitems[0].obj.__name__ == "func1"
def test_generative_methods(self, testdir):
modcol = testdir.getmodulecol(
"""
def func1(arg, arg2):
assert arg == arg2
class TestGenMethods(object):
def test_gen(self):
yield func1, 17, 3*5
yield func1, 42, 6*7
"""
)
gencol = modcol.collect()[0].collect()[0].collect()[0]
assert isinstance(gencol, pytest.Generator)
gencolitems = gencol.collect()
assert len(gencolitems) == 2
assert isinstance(gencolitems[0], pytest.Function)
assert isinstance(gencolitems[1], pytest.Function)
assert gencolitems[0].name == "[0]"
assert gencolitems[0].obj.__name__ == "func1"
def test_generative_functions_with_explicit_names(self, testdir):
modcol = testdir.getmodulecol(
"""
def func1(arg, arg2):
assert arg == arg2
def test_gen():
yield "seventeen", func1, 17, 3*5
yield "fortytwo", func1, 42, 6*7
"""
)
colitems = modcol.collect()
assert len(colitems) == 1
gencol = colitems[0]
assert isinstance(gencol, pytest.Generator)
gencolitems = gencol.collect()
assert len(gencolitems) == 2
assert isinstance(gencolitems[0], pytest.Function)
assert isinstance(gencolitems[1], pytest.Function)
assert gencolitems[0].name == "['seventeen']"
assert gencolitems[0].obj.__name__ == "func1"
assert gencolitems[1].name == "['fortytwo']"
assert gencolitems[1].obj.__name__ == "func1"
def test_generative_functions_unique_explicit_names(self, testdir):
# generative
modcol = testdir.getmodulecol(
"""
def func(): pass
def test_gen():
yield "name", func
yield "name", func
"""
)
colitems = modcol.collect()
assert len(colitems) == 1
gencol = colitems[0]
assert isinstance(gencol, pytest.Generator)
pytest.raises(ValueError, "gencol.collect()")
def test_generative_methods_with_explicit_names(self, testdir):
modcol = testdir.getmodulecol(
"""
def func1(arg, arg2):
assert arg == arg2
class TestGenMethods(object):
def test_gen(self):
yield "m1", func1, 17, 3*5
yield "m2", func1, 42, 6*7
"""
)
gencol = modcol.collect()[0].collect()[0].collect()[0]
assert isinstance(gencol, pytest.Generator)
gencolitems = gencol.collect()
assert len(gencolitems) == 2
assert isinstance(gencolitems[0], pytest.Function)
assert isinstance(gencolitems[1], pytest.Function)
assert gencolitems[0].name == "['m1']"
assert gencolitems[0].obj.__name__ == "func1"
assert gencolitems[1].name == "['m2']"
assert gencolitems[1].obj.__name__ == "func1"
def test_order_of_execution_generator_same_codeline(self, testdir, tmpdir):
o = testdir.makepyfile(
"""
from __future__ import print_function
def test_generative_order_of_execution():
import py, pytest
test_list = []
expected_list = list(range(6))
def list_append(item):
test_list.append(item)
def assert_order_of_execution():
print('expected order', expected_list)
print('but got ', test_list)
assert test_list == expected_list
for i in expected_list:
yield list_append, i
yield assert_order_of_execution
"""
)
reprec = testdir.inline_run(o, SHOW_PYTEST_WARNINGS_ARG)
passed, skipped, failed = reprec.countoutcomes()
assert passed == 7
assert not skipped and not failed
def test_order_of_execution_generator_different_codeline(self, testdir):
o = testdir.makepyfile(
"""
from __future__ import print_function
def test_generative_tests_different_codeline():
import py, pytest
test_list = []
expected_list = list(range(3))
def list_append_2():
test_list.append(2)
def list_append_1():
test_list.append(1)
def list_append_0():
test_list.append(0)
def assert_order_of_execution():
print('expected order', expected_list)
print('but got ', test_list)
assert test_list == expected_list
yield list_append_0
yield list_append_1
yield list_append_2
yield assert_order_of_execution
"""
)
reprec = testdir.inline_run(o, SHOW_PYTEST_WARNINGS_ARG)
passed, skipped, failed = reprec.countoutcomes()
assert passed == 4
assert not skipped and not failed
def test_setupstate_is_preserved_134(self, testdir):
# yield-based tests are messy wrt to setupstate because
# during collection they already invoke setup functions
# and then again when they are run. For now, we want to make sure
# that the old 1.3.4 behaviour is preserved such that all
# yielded functions all share the same "self" instance that
# has been used during collection.
o = testdir.makepyfile(
"""
setuplist = []
class TestClass(object):
def setup_method(self, func):
#print "setup_method", self, func
setuplist.append(self)
self.init = 42
def teardown_method(self, func):
self.init = None
def test_func1(self):
pass
def test_func2(self):
yield self.func2
yield self.func2
def func2(self):
assert self.init
def test_setuplist():
# once for test_func2 during collection
# once for test_func1 during test run
# once for test_func2 during test run
#print setuplist
assert len(setuplist) == 3, len(setuplist)
assert setuplist[0] == setuplist[2], setuplist
assert setuplist[1] != setuplist[2], setuplist
"""
)
reprec = testdir.inline_run(o, "-v", SHOW_PYTEST_WARNINGS_ARG)
passed, skipped, failed = reprec.countoutcomes()
assert passed == 4
assert not skipped and not failed
class TestFunction(object):
@pytest.fixture
def ignore_parametrized_marks_args(self):
"""Provides arguments to pytester.runpytest() to ignore the warning about marks being applied directly
to parameters.
"""
return ("-W", "ignore:Applying marks directly to parameters")
def test_getmodulecollector(self, testdir):
item = testdir.getitem("def test_func(): pass")
modcol = item.getparent(pytest.Module)
assert isinstance(modcol, pytest.Module)
assert hasattr(modcol.obj, "test_func")
@pytest.mark.filterwarnings("default")
def test_function_as_object_instance_ignored(self, testdir):
testdir.makepyfile(
"""
class A(object):
def __call__(self, tmpdir):
0/0
test_a = A()
"""
)
result = testdir.runpytest()
result.stdout.fnmatch_lines(
[
"collected 0 items",
"*test_function_as_object_instance_ignored.py:2: "
"*cannot collect 'test_a' because it is not a function.",
]
)
def test_function_equality(self, testdir, tmpdir):
from _pytest.fixtures import FixtureManager
config = testdir.parseconfigure()
session = testdir.Session(config)
session._fixturemanager = FixtureManager(session)
def func1():
pass
def func2():
pass
f1 = pytest.Function(
name="name", parent=session, config=config, args=(1,), callobj=func1
)
assert f1 == f1
f2 = pytest.Function(name="name", config=config, callobj=func2, parent=session)
assert f1 != f2
def test_issue197_parametrize_emptyset(self, testdir):
testdir.makepyfile(
"""
import pytest
@pytest.mark.parametrize('arg', [])
def test_function(arg):
pass
"""
)
reprec = testdir.inline_run()
reprec.assertoutcome(skipped=1)
def test_single_tuple_unwraps_values(self, testdir):
testdir.makepyfile(
"""
import pytest
@pytest.mark.parametrize(('arg',), [(1,)])
def test_function(arg):
assert arg == 1
"""
)
reprec = testdir.inline_run()
reprec.assertoutcome(passed=1)
def test_issue213_parametrize_value_no_equal(self, testdir):
testdir.makepyfile(
"""
import pytest
class A(object):
def __eq__(self, other):
raise ValueError("not possible")
@pytest.mark.parametrize('arg', [A()])
def test_function(arg):
assert arg.__class__.__name__ == "A"
"""
)
reprec = testdir.inline_run("--fulltrace")
reprec.assertoutcome(passed=1)
def test_parametrize_with_non_hashable_values(self, testdir):
"""Test parametrization with non-hashable values."""
testdir.makepyfile(
"""
archival_mapping = {
'1.0': {'tag': '1.0'},
'1.2.2a1': {'tag': 'release-1.2.2a1'},
}
import pytest
@pytest.mark.parametrize('key value'.split(),
archival_mapping.items())
def test_archival_to_version(key, value):
assert key in archival_mapping
assert value == archival_mapping[key]
"""
)
rec = testdir.inline_run()
rec.assertoutcome(passed=2)
def test_parametrize_with_non_hashable_values_indirect(self, testdir):
"""Test parametrization with non-hashable values with indirect parametrization."""
testdir.makepyfile(
"""
archival_mapping = {
'1.0': {'tag': '1.0'},
'1.2.2a1': {'tag': 'release-1.2.2a1'},
}
import pytest
@pytest.fixture
def key(request):
return request.param
@pytest.fixture
def value(request):
return request.param
@pytest.mark.parametrize('key value'.split(),
archival_mapping.items(), indirect=True)
def test_archival_to_version(key, value):
assert key in archival_mapping
assert value == archival_mapping[key]
"""
)
rec = testdir.inline_run()
rec.assertoutcome(passed=2)
def test_parametrize_overrides_fixture(self, testdir):
"""Test parametrization when parameter overrides existing fixture with same name."""
testdir.makepyfile(
"""
import pytest
@pytest.fixture
def value():
return 'value'
@pytest.mark.parametrize('value',
['overridden'])
def test_overridden_via_param(value):
assert value == 'overridden'
@pytest.mark.parametrize('somevalue', ['overridden'])
def test_not_overridden(value, somevalue):
assert value == 'value'
assert somevalue == 'overridden'
@pytest.mark.parametrize('other,value', [('foo', 'overridden')])
def test_overridden_via_multiparam(other, value):
assert other == 'foo'
assert value == 'overridden'
"""
)
rec = testdir.inline_run()
rec.assertoutcome(passed=3)
def test_parametrize_overrides_parametrized_fixture(self, testdir):
"""Test parametrization when parameter overrides existing parametrized fixture with same name."""
testdir.makepyfile(
"""
import pytest
@pytest.fixture(params=[1, 2])
def value(request):
return request.param
@pytest.mark.parametrize('value',
['overridden'])
def test_overridden_via_param(value):
assert value == 'overridden'
"""
)
rec = testdir.inline_run()
rec.assertoutcome(passed=1)
def test_parametrize_overrides_indirect_dependency_fixture(self, testdir):
"""Test parametrization when parameter overrides a fixture that a test indirectly depends on"""
testdir.makepyfile(
"""
import pytest
fix3_instantiated = False
@pytest.fixture
def fix1(fix2):
return fix2 + '1'
@pytest.fixture
def fix2(fix3):
return fix3 + '2'
@pytest.fixture
def fix3():
global fix3_instantiated
fix3_instantiated = True
return '3'
@pytest.mark.parametrize('fix2', ['2'])
def test_it(fix1):
assert fix1 == '21'
assert not fix3_instantiated
"""
)
rec = testdir.inline_run()
rec.assertoutcome(passed=1)
@pytest.mark.filterwarnings("ignore:Applying marks directly to parameters")
def test_parametrize_with_mark(self, testdir):
items = testdir.getitems(
"""
import pytest
@pytest.mark.foo
@pytest.mark.parametrize('arg', [
1,
pytest.mark.bar(pytest.mark.baz(2))
])
def test_function(arg):
pass
"""
)
keywords = [item.keywords for item in items]
assert (
"foo" in keywords[0]
and "bar" not in keywords[0]
and "baz" not in keywords[0]
)
assert "foo" in keywords[1] and "bar" in keywords[1] and "baz" in keywords[1]
def test_function_equality_with_callspec(self, testdir, tmpdir):
items = testdir.getitems(
"""
import pytest
@pytest.mark.parametrize('arg', [1,2])
def test_function(arg):
pass
"""
)
assert items[0] != items[1]
assert not (items[0] == items[1])
def test_pyfunc_call(self, testdir):
item = testdir.getitem("def test_func(): raise ValueError")
config = item.config
class MyPlugin1(object):
def pytest_pyfunc_call(self, pyfuncitem):
raise ValueError
class MyPlugin2(object):
def pytest_pyfunc_call(self, pyfuncitem):
return True
config.pluginmanager.register(MyPlugin1())
config.pluginmanager.register(MyPlugin2())
config.hook.pytest_runtest_setup(item=item)
config.hook.pytest_pyfunc_call(pyfuncitem=item)
def test_multiple_parametrize(self, testdir):
modcol = testdir.getmodulecol(
"""
import pytest
@pytest.mark.parametrize('x', [0, 1])
@pytest.mark.parametrize('y', [2, 3])
def test1(x, y):
pass
"""
)
colitems = modcol.collect()
assert colitems[0].name == "test1[2-0]"
assert colitems[1].name == "test1[2-1]"
assert colitems[2].name == "test1[3-0]"
assert colitems[3].name == "test1[3-1]"
def test_issue751_multiple_parametrize_with_ids(self, testdir):
modcol = testdir.getmodulecol(
"""
import pytest
@pytest.mark.parametrize('x', [0], ids=['c'])
@pytest.mark.parametrize('y', [0, 1], ids=['a', 'b'])
class Test(object):
def test1(self, x, y):
pass
def test2(self, x, y):
pass
"""
)
colitems = modcol.collect()[0].collect()[0].collect()
assert colitems[0].name == "test1[a-c]"
assert colitems[1].name == "test1[b-c]"
assert colitems[2].name == "test2[a-c]"
assert colitems[3].name == "test2[b-c]"
def test_parametrize_skipif(self, testdir, ignore_parametrized_marks_args):
testdir.makepyfile(
"""
import pytest
m = pytest.mark.skipif('True')
@pytest.mark.parametrize('x', [0, 1, m(2)])
def test_skip_if(x):
assert x < 2
"""
)
result = testdir.runpytest(*ignore_parametrized_marks_args)
result.stdout.fnmatch_lines("* 2 passed, 1 skipped in *")
def test_parametrize_skip(self, testdir, ignore_parametrized_marks_args):
testdir.makepyfile(
"""
import pytest
m = pytest.mark.skip('')
@pytest.mark.parametrize('x', [0, 1, m(2)])
def test_skip(x):
assert x < 2
"""
)
result = testdir.runpytest(*ignore_parametrized_marks_args)
result.stdout.fnmatch_lines("* 2 passed, 1 skipped in *")
def test_parametrize_skipif_no_skip(self, testdir, ignore_parametrized_marks_args):
testdir.makepyfile(
"""
import pytest
m = pytest.mark.skipif('False')
@pytest.mark.parametrize('x', [0, 1, m(2)])
def test_skipif_no_skip(x):
assert x < 2
"""
)
result = testdir.runpytest(*ignore_parametrized_marks_args)
result.stdout.fnmatch_lines("* 1 failed, 2 passed in *")
def test_parametrize_xfail(self, testdir, ignore_parametrized_marks_args):
testdir.makepyfile(
"""
import pytest
m = pytest.mark.xfail('True')
@pytest.mark.parametrize('x', [0, 1, m(2)])
def test_xfail(x):
assert x < 2
"""
)
result = testdir.runpytest(*ignore_parametrized_marks_args)
result.stdout.fnmatch_lines("* 2 passed, 1 xfailed in *")
def test_parametrize_passed(self, testdir, ignore_parametrized_marks_args):
testdir.makepyfile(
"""
import pytest
m = pytest.mark.xfail('True')
@pytest.mark.parametrize('x', [0, 1, m(2)])
def test_xfail(x):
pass
"""
)
result = testdir.runpytest(*ignore_parametrized_marks_args)
result.stdout.fnmatch_lines("* 2 passed, 1 xpassed in *")
def test_parametrize_xfail_passed(self, testdir, ignore_parametrized_marks_args):
testdir.makepyfile(
"""
import pytest
m = pytest.mark.xfail('False')
@pytest.mark.parametrize('x', [0, 1, m(2)])
def test_passed(x):
pass
"""
)
result = testdir.runpytest(*ignore_parametrized_marks_args)
result.stdout.fnmatch_lines("* 3 passed in *")
def test_function_original_name(self, testdir):
items = testdir.getitems(
"""
import pytest
@pytest.mark.parametrize('arg', [1,2])
def test_func(arg):
pass
"""
)
assert [x.originalname for x in items] == ["test_func", "test_func"]
class TestSorting(object):
def test_check_equality(self, testdir):
modcol = testdir.getmodulecol(
"""
def test_pass(): pass
def test_fail(): assert 0
"""
)
fn1 = testdir.collect_by_name(modcol, "test_pass")
assert isinstance(fn1, pytest.Function)
fn2 = testdir.collect_by_name(modcol, "test_pass")
assert isinstance(fn2, pytest.Function)
assert fn1 == fn2
assert fn1 != modcol
if sys.version_info < (3, 0):
assert cmp(fn1, fn2) == 0 # NOQA
assert hash(fn1) == hash(fn2)
fn3 = testdir.collect_by_name(modcol, "test_fail")
assert isinstance(fn3, pytest.Function)
assert not (fn1 == fn3)
assert fn1 != fn3
for fn in fn1, fn2, fn3:
assert fn != 3
assert fn != modcol
assert fn != [1, 2, 3]
assert [1, 2, 3] != fn
assert modcol != fn
def test_allow_sane_sorting_for_decorators(self, testdir):
modcol = testdir.getmodulecol(
"""
def dec(f):
g = lambda: f(2)
g.place_as = f
return g
def test_b(y):
pass
test_b = dec(test_b)
def test_a(y):
pass
test_a = dec(test_a)
"""
)
colitems = modcol.collect()
assert len(colitems) == 2
assert [item.name for item in colitems] == ["test_b", "test_a"]
class TestConftestCustomization(object):
def test_pytest_pycollect_module(self, testdir):
testdir.makeconftest(
"""
import pytest
class MyModule(pytest.Module):
pass
def pytest_pycollect_makemodule(path, parent):
if path.basename == "test_xyz.py":
return MyModule(path, parent)
"""
)
testdir.makepyfile("def test_some(): pass")
testdir.makepyfile(test_xyz="def test_func(): pass")
result = testdir.runpytest("--collect-only")
result.stdout.fnmatch_lines(["*<Module*test_pytest*", "*<MyModule*xyz*"])
def test_customized_pymakemodule_issue205_subdir(self, testdir):
b = testdir.mkdir("a").mkdir("b")
b.join("conftest.py").write(
textwrap.dedent(
"""\
import pytest
@pytest.hookimpl(hookwrapper=True)
def pytest_pycollect_makemodule():
outcome = yield
mod = outcome.get_result()
mod.obj.hello = "world"
"""
)
)
b.join("test_module.py").write(
textwrap.dedent(
"""\
def test_hello():
assert hello == "world"
"""
)
)
reprec = testdir.inline_run()
reprec.assertoutcome(passed=1)
def test_customized_pymakeitem(self, testdir):
b = testdir.mkdir("a").mkdir("b")
b.join("conftest.py").write(
textwrap.dedent(
"""\
import pytest
@pytest.hookimpl(hookwrapper=True)
def pytest_pycollect_makeitem():
outcome = yield
if outcome.excinfo is None:
result = outcome.get_result()
if result:
for func in result:
func._some123 = "world"
"""
)
)
b.join("test_module.py").write(
textwrap.dedent(
"""\
import pytest
@pytest.fixture()
def obj(request):
return request.node._some123
def test_hello(obj):
assert obj == "world"
"""
)
)
reprec = testdir.inline_run()
reprec.assertoutcome(passed=1)
def test_pytest_pycollect_makeitem(self, testdir):
testdir.makeconftest(
"""
import pytest
class MyFunction(pytest.Function):
pass
def pytest_pycollect_makeitem(collector, name, obj):
if name == "some":
return MyFunction(name, collector)
"""
)
testdir.makepyfile("def some(): pass")
result = testdir.runpytest("--collect-only")
result.stdout.fnmatch_lines(["*MyFunction*some*"])
def test_makeitem_non_underscore(self, testdir, monkeypatch):
modcol = testdir.getmodulecol("def _hello(): pass")
values = []
monkeypatch.setattr(
pytest.Module, "makeitem", lambda self, name, obj: values.append(name)
)
values = modcol.collect()
assert "_hello" not in values
def test_issue2369_collect_module_fileext(self, testdir):
"""Ensure we can collect files with weird file extensions as Python
modules (#2369)"""
# We'll implement a little finder and loader to import files containing
# Python source code whose file extension is ".narf".
testdir.makeconftest(
"""
import sys, os, imp
from _pytest.python import Module
class Loader(object):
def load_module(self, name):
return imp.load_source(name, name + ".narf")
class Finder(object):
def find_module(self, name, path=None):
if os.path.exists(name + ".narf"):
return Loader()
sys.meta_path.append(Finder())
def pytest_collect_file(path, parent):
if path.ext == ".narf":
return Module(path, parent)"""
)
testdir.makefile(
".narf",
"""\
def test_something():
assert 1 + 1 == 2""",
)
# Use runpytest_subprocess, since we're futzing with sys.meta_path.
result = testdir.runpytest_subprocess()
result.stdout.fnmatch_lines("*1 passed*")
def test_setup_only_available_in_subdir(testdir):
sub1 = testdir.mkpydir("sub1")
sub2 = testdir.mkpydir("sub2")
sub1.join("conftest.py").write(
textwrap.dedent(
"""\
import pytest
def pytest_runtest_setup(item):
assert item.fspath.purebasename == "test_in_sub1"
def pytest_runtest_call(item):
assert item.fspath.purebasename == "test_in_sub1"
def pytest_runtest_teardown(item):
assert item.fspath.purebasename == "test_in_sub1"
"""
)
)
sub2.join("conftest.py").write(
textwrap.dedent(
"""\
import pytest
def pytest_runtest_setup(item):
assert item.fspath.purebasename == "test_in_sub2"
def pytest_runtest_call(item):
assert item.fspath.purebasename == "test_in_sub2"
def pytest_runtest_teardown(item):
assert item.fspath.purebasename == "test_in_sub2"
"""
)
)
sub1.join("test_in_sub1.py").write("def test_1(): pass")
sub2.join("test_in_sub2.py").write("def test_2(): pass")
result = testdir.runpytest("-v", "-s")
result.assert_outcomes(passed=2)
def test_modulecol_roundtrip(testdir):
modcol = testdir.getmodulecol("pass", withinit=False)
trail = modcol.nodeid
newcol = modcol.session.perform_collect([trail], genitems=0)[0]
assert modcol.name == newcol.name
class TestTracebackCutting(object):
def test_skip_simple(self):
excinfo = pytest.raises(pytest.skip.Exception, 'pytest.skip("xxx")')
assert excinfo.traceback[-1].frame.code.name == "skip"
assert excinfo.traceback[-1].ishidden()
def test_traceback_argsetup(self, testdir):
testdir.makeconftest(
"""
import pytest
@pytest.fixture
def hello(request):
raise ValueError("xyz")
"""
)
p = testdir.makepyfile("def test(hello): pass")
result = testdir.runpytest(p)
assert result.ret != 0
out = result.stdout.str()
assert "xyz" in out
assert "conftest.py:5: ValueError" in out
numentries = out.count("_ _ _") # separator for traceback entries
assert numentries == 0
result = testdir.runpytest("--fulltrace", p)
out = result.stdout.str()
assert "conftest.py:5: ValueError" in out
numentries = out.count("_ _ _ _") # separator for traceback entries
assert numentries > 3
def test_traceback_error_during_import(self, testdir):
testdir.makepyfile(
"""
x = 1
x = 2
x = 17
asd
"""
)
result = testdir.runpytest()
assert result.ret != 0
out = result.stdout.str()
assert "x = 1" not in out
assert "x = 2" not in out
result.stdout.fnmatch_lines([" *asd*", "E*NameError*"])
result = testdir.runpytest("--fulltrace")
out = result.stdout.str()
assert "x = 1" in out
assert "x = 2" in out
result.stdout.fnmatch_lines([">*asd*", "E*NameError*"])
def test_traceback_filter_error_during_fixture_collection(self, testdir):
"""integration test for issue #995.
"""
testdir.makepyfile(
"""
import pytest
def fail_me(func):
ns = {}
exec('def w(): raise ValueError("fail me")', ns)
return ns['w']
@pytest.fixture(scope='class')
@fail_me
def fail_fixture():
pass
def test_failing_fixture(fail_fixture):
pass
"""
)
result = testdir.runpytest()
assert result.ret != 0
out = result.stdout.str()
assert "INTERNALERROR>" not in out
result.stdout.fnmatch_lines(["*ValueError: fail me*", "* 1 error in *"])
def test_filter_traceback_generated_code(self):
"""test that filter_traceback() works with the fact that
py.code.Code.path attribute might return an str object.
In this case, one of the entries on the traceback was produced by
dynamically generated code.
See: https://bitbucket.org/pytest-dev/py/issues/71
This fixes #995.
"""
from _pytest.python import filter_traceback
try:
ns = {}
exec("def foo(): raise ValueError", ns)
ns["foo"]()
except ValueError:
_, _, tb = sys.exc_info()
tb = _pytest._code.Traceback(tb)
assert isinstance(tb[-1].path, str)
assert not filter_traceback(tb[-1])
def test_filter_traceback_path_no_longer_valid(self, testdir):
"""test that filter_traceback() works with the fact that
py.code.Code.path attribute might return an str object.
In this case, one of the files in the traceback no longer exists.
This fixes #1133.
"""
from _pytest.python import filter_traceback
testdir.syspathinsert()
testdir.makepyfile(
filter_traceback_entry_as_str="""
def foo():
raise ValueError
"""
)
try:
import filter_traceback_entry_as_str
filter_traceback_entry_as_str.foo()
except ValueError:
_, _, tb = sys.exc_info()
testdir.tmpdir.join("filter_traceback_entry_as_str.py").remove()
tb = _pytest._code.Traceback(tb)
assert isinstance(tb[-1].path, str)
assert filter_traceback(tb[-1])
class TestReportInfo(object):
def test_itemreport_reportinfo(self, testdir, linecomp):
testdir.makeconftest(
"""
import pytest
class MyFunction(pytest.Function):
def reportinfo(self):
return "ABCDE", 42, "custom"
def pytest_pycollect_makeitem(collector, name, obj):
if name == "test_func":
return MyFunction(name, parent=collector)
"""
)
item = testdir.getitem("def test_func(): pass")
item.config.pluginmanager.getplugin("runner")
assert item.location == ("ABCDE", 42, "custom")
def test_func_reportinfo(self, testdir):
item = testdir.getitem("def test_func(): pass")
fspath, lineno, modpath = item.reportinfo()
assert fspath == item.fspath
assert lineno == 0
assert modpath == "test_func"
def test_class_reportinfo(self, testdir):
modcol = testdir.getmodulecol(
"""
# lineno 0
class TestClass(object):
def test_hello(self): pass
"""
)
classcol = testdir.collect_by_name(modcol, "TestClass")
fspath, lineno, msg = classcol.reportinfo()
assert fspath == modcol.fspath
assert lineno == 1
assert msg == "TestClass"
@pytest.mark.filterwarnings(
"ignore:usage of Generator.Function is deprecated, please use pytest.Function instead"
)
def test_generator_reportinfo(self, testdir):
modcol = testdir.getmodulecol(
"""
# lineno 0
def test_gen():
def check(x):
assert x
yield check, 3
"""
)
gencol = testdir.collect_by_name(modcol, "test_gen")
fspath, lineno, modpath = gencol.reportinfo()
assert fspath == modcol.fspath
assert lineno == 1
assert modpath == "test_gen"
genitem = gencol.collect()[0]
fspath, lineno, modpath = genitem.reportinfo()
assert fspath == modcol.fspath
assert lineno == 2
assert modpath == "test_gen[0]"
"""
def test_func():
pass
def test_genfunc():
def check(x):
pass
yield check, 3
class TestClass(object):
def test_method(self):
pass
"""
def test_reportinfo_with_nasty_getattr(self, testdir):
# https://github.com/pytest-dev/pytest/issues/1204
modcol = testdir.getmodulecol(
"""
# lineno 0
class TestClass(object):
def __getattr__(self, name):
return "this is not an int"
def test_foo(self):
pass
"""
)
classcol = testdir.collect_by_name(modcol, "TestClass")
instance = classcol.collect()[0]
fspath, lineno, msg = instance.reportinfo()
def test_customized_python_discovery(testdir):
testdir.makeini(
"""
[pytest]
python_files=check_*.py
python_classes=Check
python_functions=check
"""
)
p = testdir.makepyfile(
"""
def check_simple():
pass
class CheckMyApp(object):
def check_meth(self):
pass
"""
)
p2 = p.new(basename=p.basename.replace("test", "check"))
p.move(p2)
result = testdir.runpytest("--collect-only", "-s")
result.stdout.fnmatch_lines(
["*check_customized*", "*check_simple*", "*CheckMyApp*", "*check_meth*"]
)
result = testdir.runpytest()
assert result.ret == 0
result.stdout.fnmatch_lines(["*2 passed*"])
def test_customized_python_discovery_functions(testdir):
testdir.makeini(
"""
[pytest]
python_functions=_test
"""
)
testdir.makepyfile(
"""
def _test_underscore():
pass
"""
)
result = testdir.runpytest("--collect-only", "-s")
result.stdout.fnmatch_lines(["*_test_underscore*"])
result = testdir.runpytest()
assert result.ret == 0
result.stdout.fnmatch_lines(["*1 passed*"])
def test_collector_attributes(testdir):
testdir.makeconftest(
"""
import pytest
def pytest_pycollect_makeitem(collector):
assert collector.Function == pytest.Function
assert collector.Class == pytest.Class
assert collector.Instance == pytest.Instance
assert collector.Module == pytest.Module
"""
)
testdir.makepyfile(
"""
def test_hello():
pass
"""
)
result = testdir.runpytest(SHOW_PYTEST_WARNINGS_ARG)
result.stdout.fnmatch_lines(["*1 passed*"])
def test_customize_through_attributes(testdir):
testdir.makeconftest(
"""
import pytest
class MyFunction(pytest.Function):
pass
class MyInstance(pytest.Instance):
Function = MyFunction
class MyClass(pytest.Class):
Instance = MyInstance
def pytest_pycollect_makeitem(collector, name, obj):
if name.startswith("MyTestClass"):
return MyClass(name, parent=collector)
"""
)
testdir.makepyfile(
"""
class MyTestClass(object):
def test_hello(self):
pass
"""
)
result = testdir.runpytest("--collect-only", SHOW_PYTEST_WARNINGS_ARG)
result.stdout.fnmatch_lines(["*MyClass*", "*MyFunction*test_hello*"])
def test_unorderable_types(testdir):
testdir.makepyfile(
"""
class TestJoinEmpty(object):
pass
def make_test():
class Test(object):
pass
Test.__name__ = "TestFoo"
return Test
TestFoo = make_test()
"""
)
result = testdir.runpytest()
assert "TypeError" not in result.stdout.str()
assert result.ret == EXIT_NOTESTSCOLLECTED
def test_collect_functools_partial(testdir):
"""
Test that collection of functools.partial object works, and arguments
to the wrapped functions are dealt correctly (see #811).
"""
testdir.makepyfile(
"""
import functools
import pytest
@pytest.fixture
def fix1():
return 'fix1'
@pytest.fixture
def fix2():
return 'fix2'
def check1(i, fix1):
assert i == 2
assert fix1 == 'fix1'
def check2(fix1, i):
assert i == 2
assert fix1 == 'fix1'
def check3(fix1, i, fix2):
assert i == 2
assert fix1 == 'fix1'
assert fix2 == 'fix2'
test_ok_1 = functools.partial(check1, i=2)
test_ok_2 = functools.partial(check1, i=2, fix1='fix1')
test_ok_3 = functools.partial(check1, 2)
test_ok_4 = functools.partial(check2, i=2)
test_ok_5 = functools.partial(check3, i=2)
test_ok_6 = functools.partial(check3, i=2, fix1='fix1')
test_fail_1 = functools.partial(check2, 2)
test_fail_2 = functools.partial(check3, 2)
"""
)
result = testdir.inline_run()
result.assertoutcome(passed=6, failed=2)
@pytest.mark.filterwarnings("default")
def test_dont_collect_non_function_callable(testdir):
"""Test for issue https://github.com/pytest-dev/pytest/issues/331
In this case an INTERNALERROR occurred trying to report the failure of
a test like this one because py test failed to get the source lines.
"""
testdir.makepyfile(
"""
class Oh(object):
def __call__(self):
pass
test_a = Oh()
def test_real():
pass
"""
)
result = testdir.runpytest("-rw")
result.stdout.fnmatch_lines(
[
"*collected 1 item*",
"*test_dont_collect_non_function_callable.py:2: *cannot collect 'test_a' because it is not a function*",
"*1 passed, 1 warnings in *",
]
)
def test_class_injection_does_not_break_collection(testdir):
"""Tests whether injection during collection time will terminate testing.
In this case the error should not occur if the TestClass itself
is modified during collection time, and the original method list
is still used for collection.
"""
testdir.makeconftest(
"""
from test_inject import TestClass
def pytest_generate_tests(metafunc):
TestClass.changed_var = {}
"""
)
testdir.makepyfile(
test_inject='''
class TestClass(object):
def test_injection(self):
"""Test being parametrized."""
pass
'''
)
result = testdir.runpytest()
assert (
"RuntimeError: dictionary changed size during iteration"
not in result.stdout.str()
)
result.stdout.fnmatch_lines(["*1 passed*"])
def test_syntax_error_with_non_ascii_chars(testdir):
"""Fix decoding issue while formatting SyntaxErrors during collection (#578)
"""
testdir.makepyfile(
u"""
# -*- coding: UTF-8 -*-
☃
"""
)
result = testdir.runpytest()
result.stdout.fnmatch_lines(["*ERROR collecting*", "*SyntaxError*", "*1 error in*"])
def test_skip_duplicates_by_default(testdir):
"""Test for issue https://github.com/pytest-dev/pytest/issues/1609 (#1609)
Ignore duplicate directories.
"""
a = testdir.mkdir("a")
fh = a.join("test_a.py")
fh.write(
textwrap.dedent(
"""\
import pytest
def test_real():
pass
"""
)
)
result = testdir.runpytest(a.strpath, a.strpath)
result.stdout.fnmatch_lines(["*collected 1 item*"])
def test_keep_duplicates(testdir):
"""Test for issue https://github.com/pytest-dev/pytest/issues/1609 (#1609)
Use --keep-duplicates to collect tests from duplicate directories.
"""
a = testdir.mkdir("a")
fh = a.join("test_a.py")
fh.write(
textwrap.dedent(
"""\
import pytest
def test_real():
pass
"""
)
)
result = testdir.runpytest("--keep-duplicates", a.strpath, a.strpath)
result.stdout.fnmatch_lines(["*collected 2 item*"])
def test_package_collection_infinite_recursion(testdir):
testdir.copy_example("collect/package_infinite_recursion")
result = testdir.runpytest()
result.stdout.fnmatch_lines("*1 passed*")
def test_package_collection_init_given_as_argument(testdir):
"""Regression test for #3749"""
p = testdir.copy_example("collect/package_init_given_as_arg")
result = testdir.runpytest(p / "pkg" / "__init__.py")
result.stdout.fnmatch_lines("*1 passed*")
def test_package_with_modules(testdir):
"""
.
└── root
├── __init__.py
├── sub1
│ ├── __init__.py
│ └── sub1_1
│ ├── __init__.py
│ └── test_in_sub1.py
└── sub2
└── test
└── test_in_sub2.py
"""
root = testdir.mkpydir("root")
sub1 = root.mkdir("sub1")
sub1.ensure("__init__.py")
sub1_test = sub1.mkdir("sub1_1")
sub1_test.ensure("__init__.py")
sub2 = root.mkdir("sub2")
sub2_test = sub2.mkdir("sub2")
sub1_test.join("test_in_sub1.py").write("def test_1(): pass")
sub2_test.join("test_in_sub2.py").write("def test_2(): pass")
# Execute from .
result = testdir.runpytest("-v", "-s")
result.assert_outcomes(passed=2)
# Execute from . with one argument "root"
result = testdir.runpytest("-v", "-s", "root")
result.assert_outcomes(passed=2)
# Chdir into package's root and execute with no args
root.chdir()
result = testdir.runpytest("-v", "-s")
result.assert_outcomes(passed=2)
def test_package_ordering(testdir):
"""
.
└── root
├── Test_root.py
├── __init__.py
├── sub1
│ ├── Test_sub1.py
│ └── __init__.py
└── sub2
└── test
└── test_sub2.py
"""
testdir.makeini(
"""
[pytest]
python_files=*.py
"""
)
root = testdir.mkpydir("root")
sub1 = root.mkdir("sub1")
sub1.ensure("__init__.py")
sub2 = root.mkdir("sub2")
sub2_test = sub2.mkdir("sub2")
root.join("Test_root.py").write("def test_1(): pass")
sub1.join("Test_sub1.py").write("def test_2(): pass")
sub2_test.join("test_sub2.py").write("def test_3(): pass")
# Execute from .
result = testdir.runpytest("-v", "-s")
result.assert_outcomes(passed=3)
|
import os
import io
import textwrap
import pytest
import libconf
CURDIR = os.path.abspath(os.path.dirname(__file__))
# Tests for load() and loads()
##############################
def test_loads_maintains_dict_order():
config = libconf.loads(u'''l: 1; i: 5; b: 3; c: 1; o: 9; n: 0; f: 7;''')
assert ''.join(config.keys()) == 'libconf'
def test_example_config():
example_file = os.path.join(CURDIR, 'test_e2e.cfg')
with io.open(example_file, 'r', encoding='utf-8') as f:
c = libconf.load(f, includedir=CURDIR)
assert c.appconfig.version == 37
assert c.appconfig['version-long'] == 370000000000000
assert c.appconfig['version-autolong'] == 370000000000000
assert c.appconfig.name == "libconf"
assert c.appconfig.delimiter == False
assert c.appconfig.works == True
assert c.appconfig.allows == 0xA
assert c.appconfig['eol-comments'] == 0xA
assert c.appconfig.list == (3, "chicken", (), dict(group=True))
assert c.appconfig.sub_group.sub_sub_group.yes == "yes"
assert c.appconfig.sub_group.sub_sub_group['include-works'] == True
assert c.appconfig.sub_group.arr == [1, 2]
assert c.appconfig.sub_group.str == "Strings are joined despite comments";
def test_string_merging():
# Unicode characters are supported, \u escapes not.
input = u"""s = "abc\x21def\n" /* comment */ "newline-" # second comment
"here \u2603 \\u2603";"""
assert libconf.loads(input).s == u"abc\x21def\nnewline-here \u2603 \\u2603"
def test_nonexisting_include_raises():
input = u'''@include "/NON_EXISTING_FILE/DOESNT_EXIST"'''
with pytest.raises(libconf.ConfigParseError):
libconf.loads(input)
def test_circular_include_raises():
circular_file = os.path.join(CURDIR, 'circular1.cfg')
with io.open(circular_file, 'r', encoding='utf-8') as f:
with pytest.raises(libconf.ConfigParseError):
libconf.load(f, includedir=CURDIR)
def test_loads_of_bytes_throws():
with pytest.raises(TypeError) as excinfo:
libconf.loads(b'')
assert 'libconf.loads' in str(excinfo.value)
def test_load_of_BytesIO_throws():
with pytest.raises(TypeError) as excinfo:
libconf.load(io.BytesIO(b'a: "37";'))
assert 'libconf.load' in str(excinfo.value)
def test_lists_support_trailing_comma():
config = libconf.loads(u'''a: (1, 2, 3,);''')
assert config.a == (1, 2, 3)
def test_arrays_support_trailing_comma():
config = libconf.loads(u'''a: [1, 2, 3,];''')
assert config.a == [1, 2, 3]
# Tests for dump() and dumps()
##############################
def test_dump_special_characters():
d = {'a': ({'b': [u"\x00 \n \x7f abc \xff \u2603"]},)}
s = libconf.dumps(d)
expected = textwrap.dedent(u'''\
a =
(
{
b =
[
"\\x00 \\n \\x7f abc \xff \u2603"
];
}
);
''')
assert s == expected
# Tests for dump-load round trips
#################################
def test_dumps_roundtrip():
example_file = os.path.join(CURDIR, 'test_e2e.cfg')
with io.open(example_file, 'r', encoding='utf-8') as f:
c = libconf.load(f, includedir=CURDIR)
c_dumped = libconf.loads(libconf.dumps(c))
assert c == c_dumped
def test_dump_roundtrip():
example_file = os.path.join(CURDIR, 'test_e2e.cfg')
with io.open(example_file, 'r', encoding='utf-8') as f:
c = libconf.load(f, includedir=CURDIR)
with io.StringIO() as f:
libconf.dump(c, f)
f.seek(0)
c_dumped = libconf.load(f, includedir=CURDIR)
assert c == c_dumped
def test_dump_special_characters_roundtrip():
d = {'a': ({'b': [u"\x00 \n \x7f abc \xff \u2603"]},)}
d2 = libconf.loads(libconf.dumps(d))
assert d == d2
def test_roundtrip_preserves_config_entry_order():
config = libconf.loads(u'''l: 1; i: 5; b: 3; c: 1; o: 9; n: 0; f: 7;''')
dumped = libconf.dumps(config)
reloaded = libconf.loads(dumped)
assert ''.join(reloaded.keys()) == 'libconf'
def test_roundtrip_of_int64_values():
s = u'a=2L;'
s2 = libconf.dumps(libconf.loads(s))
assert s == s2.replace(' ', '').replace('\n', '')
|
#! /usr/bin/python
# by edward silher for collecting gps data in conjuction with AIS data
# edwardsihler@ursusonline.net
import serial
import subprocess
import os
from gps import *
from time import *
import time
import threading
gpsd = None #seting the global variable
os.system('clear') #clear the terminal (optional)
class GpsPoller(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
global gpsd #bring it in scope
gpsd = gps(mode=WATCH_ENABLE) #starting the stream of info
self.current_value = None
self.running = True #setting the thread running to true
def run(self):
global gpsd
while gpsp.running:
gpsd.next() #this will continue to loop and grab EACH set of gpsd info to clear the buffer
# def utc(self):
# return gpsd.utc
def main (argv):
#find the port with the AIS reciver on it
usbPort = subprocess.check_output("dmesg | grep \"FTDI.*now attached to ttyUSB\"", shell=True)
i = usbPort.rfind("ttyUSB")
aisPort = '/dev/' + usbPort[i:].strip()
#aisPort = '/dev/ttyUSB0'
ais = serial.Serial(aisPort, 38400, stopbits=serial.STOPBITS_ONE, bytesize=serial.EIGHTBITS, parity=serial.PARITY_NONE)
global gpsp
gpsp = GpsPoller() # create the thread
try:
gpsp.start() # start it up
while True:
#It may take a second or two to get good data
#print gpsd.fix.latitude,', ',gpsd.fix.longitude,' Time: ',gpsd.utc
#os.system('clear')
msg = ''
msg = str(gpsd.utc)
msg += ", " + str(gpsd.fix.latitude)
msg += ", " + str(gpsd.fix.longitude )
print (msg)
#try:
# msg += ", " + ais.readline().strip()
# print(msg)
#except serial.SerialException:
# print(msg)
# time.sleep(5)
#print (msg)
#time.sleep(5) #set to whatever
except (KeyboardInterrupt, SystemExit): #when you press ctrl+c
print "\nKilling Thread..."
gpsp.running = False
gpsp.join() # wait for the thread to finish what it's doing
print "Done.\nExiting."
if __name__ == '__main__':
main(sys.argv[1:])
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import uuidutils
import six
from heat.common import exception
from heat.common.i18n import _
from heat.engine import attributes
from heat.engine.clients import progress
from heat.engine import constraints
from heat.engine import properties
from heat.engine.resources.openstack.neutron import port as neutron_port
from heat.engine.resources.openstack.neutron import subnet
from heat.engine.resources.openstack.nova import server_network_mixin
from heat.engine.resources import scheduler_hints as sh
from heat.engine.resources import server_base
from heat.engine import support
from heat.engine import translation
from heat.rpc import api as rpc_api
cfg.CONF.import_opt('default_software_config_transport', 'heat.common.config')
cfg.CONF.import_opt('default_user_data_format', 'heat.common.config')
LOG = logging.getLogger(__name__)
NOVA_MICROVERSIONS = (MICROVERSION_TAGS, MICROVERSION_STR_NETWORK,
MICROVERSION_NIC_TAGS) = ('2.26', '2.37', '2.42')
class Server(server_base.BaseServer, sh.SchedulerHintsMixin,
server_network_mixin.ServerNetworkMixin):
"""A resource for managing Nova instances.
A Server resource manages the running virtual machine instance within an
OpenStack cloud.
"""
PROPERTIES = (
NAME, IMAGE, BLOCK_DEVICE_MAPPING, BLOCK_DEVICE_MAPPING_V2,
FLAVOR, FLAVOR_UPDATE_POLICY, IMAGE_UPDATE_POLICY, KEY_NAME,
ADMIN_USER, AVAILABILITY_ZONE, SECURITY_GROUPS, NETWORKS,
SCHEDULER_HINTS, METADATA, USER_DATA_FORMAT, USER_DATA,
RESERVATION_ID, CONFIG_DRIVE, DISK_CONFIG, PERSONALITY,
ADMIN_PASS, SOFTWARE_CONFIG_TRANSPORT, USER_DATA_UPDATE_POLICY,
TAGS, DEPLOYMENT_SWIFT_DATA
) = (
'name', 'image', 'block_device_mapping', 'block_device_mapping_v2',
'flavor', 'flavor_update_policy', 'image_update_policy', 'key_name',
'admin_user', 'availability_zone', 'security_groups', 'networks',
'scheduler_hints', 'metadata', 'user_data_format', 'user_data',
'reservation_id', 'config_drive', 'diskConfig', 'personality',
'admin_pass', 'software_config_transport', 'user_data_update_policy',
'tags', 'deployment_swift_data'
)
_BLOCK_DEVICE_MAPPING_KEYS = (
BLOCK_DEVICE_MAPPING_DEVICE_NAME, BLOCK_DEVICE_MAPPING_VOLUME_ID,
BLOCK_DEVICE_MAPPING_SNAPSHOT_ID,
BLOCK_DEVICE_MAPPING_VOLUME_SIZE,
BLOCK_DEVICE_MAPPING_DELETE_ON_TERM,
) = (
'device_name', 'volume_id',
'snapshot_id',
'volume_size',
'delete_on_termination',
)
_BLOCK_DEVICE_MAPPING_V2_KEYS = (
BLOCK_DEVICE_MAPPING_DEVICE_NAME,
BLOCK_DEVICE_MAPPING_VOLUME_ID,
BLOCK_DEVICE_MAPPING_IMAGE_ID,
BLOCK_DEVICE_MAPPING_IMAGE,
BLOCK_DEVICE_MAPPING_SNAPSHOT_ID,
BLOCK_DEVICE_MAPPING_SWAP_SIZE,
BLOCK_DEVICE_MAPPING_DEVICE_TYPE,
BLOCK_DEVICE_MAPPING_DISK_BUS,
BLOCK_DEVICE_MAPPING_BOOT_INDEX,
BLOCK_DEVICE_MAPPING_VOLUME_SIZE,
BLOCK_DEVICE_MAPPING_DELETE_ON_TERM,
BLOCK_DEVICE_MAPPING_EPHEMERAL_SIZE,
BLOCK_DEVICE_MAPPING_EPHEMERAL_FORMAT,
) = (
'device_name',
'volume_id',
'image_id',
'image',
'snapshot_id',
'swap_size',
'device_type',
'disk_bus',
'boot_index',
'volume_size',
'delete_on_termination',
'ephemeral_size',
'ephemeral_format'
)
_NETWORK_KEYS = (
NETWORK_UUID, NETWORK_ID, NETWORK_FIXED_IP, NETWORK_PORT,
NETWORK_SUBNET, NETWORK_PORT_EXTRA, NETWORK_FLOATING_IP,
ALLOCATE_NETWORK, NIC_TAG,
) = (
'uuid', 'network', 'fixed_ip', 'port',
'subnet', 'port_extra_properties', 'floating_ip',
'allocate_network', 'tag',
)
_IFACE_MANAGED_KEYS = (NETWORK_PORT, NETWORK_ID,
NETWORK_FIXED_IP, NETWORK_SUBNET)
_SOFTWARE_CONFIG_FORMATS = (
HEAT_CFNTOOLS, RAW, SOFTWARE_CONFIG
) = (
'HEAT_CFNTOOLS', 'RAW', 'SOFTWARE_CONFIG'
)
_SOFTWARE_CONFIG_TRANSPORTS = (
POLL_SERVER_CFN, POLL_SERVER_HEAT, POLL_TEMP_URL, ZAQAR_MESSAGE
) = (
'POLL_SERVER_CFN', 'POLL_SERVER_HEAT', 'POLL_TEMP_URL', 'ZAQAR_MESSAGE'
)
_ALLOCATE_TYPES = (
NETWORK_NONE, NETWORK_AUTO,
) = (
'none', 'auto',
)
_DEPLOYMENT_SWIFT_DATA_KEYS = (
CONTAINER, OBJECT
) = (
'container', 'object',
)
ATTRIBUTES = (
NAME_ATTR, ADDRESSES, NETWORKS_ATTR, FIRST_ADDRESS,
INSTANCE_NAME, ACCESSIPV4, ACCESSIPV6, CONSOLE_URLS, TAGS_ATTR,
OS_COLLECT_CONFIG
) = (
'name', 'addresses', 'networks', 'first_address',
'instance_name', 'accessIPv4', 'accessIPv6', 'console_urls', 'tags',
'os_collect_config'
)
# Image Statuses
IMAGE_STATUSES = (IMAGE_ACTIVE, IMAGE_ERROR,
IMAGE_DELETED) = ('active', 'error', 'deleted')
properties_schema = {
NAME: properties.Schema(
properties.Schema.STRING,
_('Server name.'),
update_allowed=True
),
IMAGE: properties.Schema(
properties.Schema.STRING,
_('The ID or name of the image to boot with.'),
constraints=[
constraints.CustomConstraint('glance.image')
],
update_allowed=True
),
BLOCK_DEVICE_MAPPING: properties.Schema(
properties.Schema.LIST,
_('Block device mappings for this server.'),
schema=properties.Schema(
properties.Schema.MAP,
schema={
BLOCK_DEVICE_MAPPING_DEVICE_NAME: properties.Schema(
properties.Schema.STRING,
_('A device name where the volume will be '
'attached in the system at /dev/device_name. '
'This value is typically vda.'),
required=True
),
BLOCK_DEVICE_MAPPING_VOLUME_ID: properties.Schema(
properties.Schema.STRING,
_('The ID of the volume to boot from. Only one '
'of volume_id or snapshot_id should be '
'provided.'),
constraints=[
constraints.CustomConstraint('cinder.volume')
]
),
BLOCK_DEVICE_MAPPING_SNAPSHOT_ID: properties.Schema(
properties.Schema.STRING,
_('The ID of the snapshot to create a volume '
'from.'),
constraints=[
constraints.CustomConstraint('cinder.snapshot')
]
),
BLOCK_DEVICE_MAPPING_VOLUME_SIZE: properties.Schema(
properties.Schema.INTEGER,
_('The size of the volume, in GB. It is safe to '
'leave this blank and have the Compute service '
'infer the size.')
),
BLOCK_DEVICE_MAPPING_DELETE_ON_TERM: properties.Schema(
properties.Schema.BOOLEAN,
_('Indicate whether the volume should be deleted '
'when the server is terminated.')
),
},
)
),
BLOCK_DEVICE_MAPPING_V2: properties.Schema(
properties.Schema.LIST,
_('Block device mappings v2 for this server.'),
schema=properties.Schema(
properties.Schema.MAP,
schema={
BLOCK_DEVICE_MAPPING_DEVICE_NAME: properties.Schema(
properties.Schema.STRING,
_('A device name where the volume will be '
'attached in the system at /dev/device_name. '
'This value is typically vda.'),
),
BLOCK_DEVICE_MAPPING_VOLUME_ID: properties.Schema(
properties.Schema.STRING,
_('The volume_id can be boot or non-boot device '
'to the server.'),
constraints=[
constraints.CustomConstraint('cinder.volume')
]
),
BLOCK_DEVICE_MAPPING_IMAGE_ID: properties.Schema(
properties.Schema.STRING,
_('The ID of the image to create a volume from.'),
support_status=support.SupportStatus(
status=support.HIDDEN,
version='9.0.0',
message=_('Use property %s.') %
BLOCK_DEVICE_MAPPING_IMAGE,
previous_status=support.SupportStatus(
status=support.DEPRECATED,
version='7.0.0',
previous_status=support.SupportStatus(
version='5.0.0')
)
),
constraints=[
constraints.CustomConstraint('glance.image')
],
),
BLOCK_DEVICE_MAPPING_IMAGE: properties.Schema(
properties.Schema.STRING,
_('The ID or name of the image '
'to create a volume from.'),
support_status=support.SupportStatus(version='7.0.0'),
constraints=[
constraints.CustomConstraint('glance.image')
],
),
BLOCK_DEVICE_MAPPING_SNAPSHOT_ID: properties.Schema(
properties.Schema.STRING,
_('The ID of the snapshot to create a volume '
'from.'),
constraints=[
constraints.CustomConstraint('cinder.snapshot')
]
),
BLOCK_DEVICE_MAPPING_SWAP_SIZE: properties.Schema(
properties.Schema.INTEGER,
_('The size of the swap, in MB.')
),
BLOCK_DEVICE_MAPPING_EPHEMERAL_SIZE: properties.Schema(
properties.Schema.INTEGER,
_('The size of the local ephemeral block device, '
'in GB.'),
support_status=support.SupportStatus(version='8.0.0'),
constraints=[constraints.Range(min=1)]
),
BLOCK_DEVICE_MAPPING_EPHEMERAL_FORMAT: properties.Schema(
properties.Schema.STRING,
_('The format of the local ephemeral block device. '
'If no format is specified, uses default value, '
'defined in nova configuration file.'),
constraints=[
constraints.AllowedValues(['ext2', 'ext3', 'ext4',
'xfs', 'ntfs'])
],
support_status=support.SupportStatus(version='8.0.0')
),
BLOCK_DEVICE_MAPPING_DEVICE_TYPE: properties.Schema(
properties.Schema.STRING,
_('Device type: at the moment we can make distinction '
'only between disk and cdrom.'),
constraints=[
constraints.AllowedValues(['cdrom', 'disk']),
],
),
BLOCK_DEVICE_MAPPING_DISK_BUS: properties.Schema(
properties.Schema.STRING,
_('Bus of the device: hypervisor driver chooses a '
'suitable default if omitted.'),
constraints=[
constraints.AllowedValues(['ide', 'lame_bus',
'scsi', 'usb',
'virtio']),
],
),
BLOCK_DEVICE_MAPPING_BOOT_INDEX: properties.Schema(
properties.Schema.INTEGER,
_('Integer used for ordering the boot disks. If '
'it is not specified, value "0" will be set '
'for bootable sources (volume, snapshot, image); '
'value "-1" will be set for non-bootable sources.'),
),
BLOCK_DEVICE_MAPPING_VOLUME_SIZE: properties.Schema(
properties.Schema.INTEGER,
_('Size of the block device in GB. If it is omitted, '
'hypervisor driver calculates size.'),
),
BLOCK_DEVICE_MAPPING_DELETE_ON_TERM: properties.Schema(
properties.Schema.BOOLEAN,
_('Indicate whether the volume should be deleted '
'when the server is terminated.')
),
},
),
support_status=support.SupportStatus(version='2015.1')
),
FLAVOR: properties.Schema(
properties.Schema.STRING,
_('The ID or name of the flavor to boot onto.'),
required=True,
update_allowed=True,
constraints=[
constraints.CustomConstraint('nova.flavor')
]
),
FLAVOR_UPDATE_POLICY: properties.Schema(
properties.Schema.STRING,
_('Policy on how to apply a flavor update; either by requesting '
'a server resize or by replacing the entire server.'),
default='RESIZE',
constraints=[
constraints.AllowedValues(['RESIZE', 'REPLACE']),
],
update_allowed=True
),
IMAGE_UPDATE_POLICY: properties.Schema(
properties.Schema.STRING,
_('Policy on how to apply an image-id update; either by '
'requesting a server rebuild or by replacing '
'the entire server.'),
default='REBUILD',
constraints=[
constraints.AllowedValues(['REBUILD', 'REPLACE',
'REBUILD_PRESERVE_EPHEMERAL']),
],
update_allowed=True
),
KEY_NAME: properties.Schema(
properties.Schema.STRING,
_('Name of keypair to inject into the server.'),
constraints=[
constraints.CustomConstraint('nova.keypair')
]
),
ADMIN_USER: properties.Schema(
properties.Schema.STRING,
_('Name of the administrative user to use on the server.'),
support_status=support.SupportStatus(
status=support.HIDDEN,
version='5.0.0',
message=_('The default cloud-init user set up for each image '
'(e.g. "ubuntu" for Ubuntu 12.04+, "fedora" for '
'Fedora 19+ and "cloud-user" for CentOS/RHEL 6.5).'),
previous_status=support.SupportStatus(
status=support.DEPRECATED,
version='2014.1',
previous_status=support.SupportStatus(version='2013.2')
)
)
),
AVAILABILITY_ZONE: properties.Schema(
properties.Schema.STRING,
_('Name of the availability zone for server placement.')
),
SECURITY_GROUPS: properties.Schema(
properties.Schema.LIST,
_('List of security group names or IDs. Cannot be used if '
'neutron ports are associated with this server; assign '
'security groups to the ports instead.'),
default=[]
),
NETWORKS: properties.Schema(
properties.Schema.LIST,
_('An ordered list of nics to be added to this server, with '
'information about connected networks, fixed ips, port etc.'),
schema=properties.Schema(
properties.Schema.MAP,
schema={
NETWORK_UUID: properties.Schema(
properties.Schema.STRING,
_('ID of network to create a port on.'),
support_status=support.SupportStatus(
status=support.HIDDEN,
version='5.0.0',
previous_status=support.SupportStatus(
status=support.DEPRECATED,
message=_('Use property %s.') % NETWORK_ID,
version='2014.1'
)
),
constraints=[
constraints.CustomConstraint('neutron.network')
]
),
NETWORK_ID: properties.Schema(
properties.Schema.STRING,
_('Name or ID of network to create a port on.'),
constraints=[
constraints.CustomConstraint('neutron.network')
]
),
ALLOCATE_NETWORK: properties.Schema(
properties.Schema.STRING,
_('The special string values of network, '
'auto: means either a network that is already '
'available to the project will be used, or if one '
'does not exist, will be automatically created for '
'the project; none: means no networking will be '
'allocated for the created server. Supported by '
'Nova API since version "2.37". This property can '
'not be used with other network keys.'),
support_status=support.SupportStatus(version='9.0.0'),
constraints=[
constraints.AllowedValues(
[NETWORK_NONE, NETWORK_AUTO])
],
update_allowed=True,
),
NETWORK_FIXED_IP: properties.Schema(
properties.Schema.STRING,
_('Fixed IP address to specify for the port '
'created on the requested network.'),
constraints=[
constraints.CustomConstraint('ip_addr')
]
),
NETWORK_PORT: properties.Schema(
properties.Schema.STRING,
_('ID of an existing port to associate with this '
'server.'),
constraints=[
constraints.CustomConstraint('neutron.port')
]
),
NETWORK_PORT_EXTRA: properties.Schema(
properties.Schema.MAP,
_('Dict, which has expand properties for port. '
'Used only if port property is not specified '
'for creating port.'),
schema=neutron_port.Port.extra_properties_schema,
support_status=support.SupportStatus(version='6.0.0')
),
NETWORK_SUBNET: properties.Schema(
properties.Schema.STRING,
_('Subnet in which to allocate the IP address for '
'port. Used for creating port, based on derived '
'properties. If subnet is specified, network '
'property becomes optional.'),
support_status=support.SupportStatus(version='5.0.0')
),
NETWORK_FLOATING_IP: properties.Schema(
properties.Schema.STRING,
_('ID of the floating IP to associate.'),
support_status=support.SupportStatus(version='6.0.0')
),
NIC_TAG: properties.Schema(
properties.Schema.STRING,
_('Port tag. Heat ignores any update on this property '
'as nova does not support it.'),
support_status=support.SupportStatus(version='9.0.0')
)
},
),
update_allowed=True
),
SCHEDULER_HINTS: properties.Schema(
properties.Schema.MAP,
_('Arbitrary key-value pairs specified by the client to help '
'boot a server.')
),
METADATA: properties.Schema(
properties.Schema.MAP,
_('Arbitrary key/value metadata to store for this server. Both '
'keys and values must be 255 characters or less. Non-string '
'values will be serialized to JSON (and the serialized '
'string must be 255 characters or less).'),
update_allowed=True,
default={}
),
USER_DATA_FORMAT: properties.Schema(
properties.Schema.STRING,
_('How the user_data should be formatted for the server. For '
'HEAT_CFNTOOLS, the user_data is bundled as part of the '
'heat-cfntools cloud-init boot configuration data. For RAW '
'the user_data is passed to Nova unmodified. '
'For SOFTWARE_CONFIG user_data is bundled as part of the '
'software config data, and metadata is derived from any '
'associated SoftwareDeployment resources.'),
default=cfg.CONF.default_user_data_format,
constraints=[
constraints.AllowedValues(_SOFTWARE_CONFIG_FORMATS),
]
),
SOFTWARE_CONFIG_TRANSPORT: properties.Schema(
properties.Schema.STRING,
_('How the server should receive the metadata required for '
'software configuration. POLL_SERVER_CFN will allow calls to '
'the cfn API action DescribeStackResource authenticated with '
'the provided keypair. POLL_SERVER_HEAT will allow calls to '
'the Heat API resource-show using the provided keystone '
'credentials. POLL_TEMP_URL will create and populate a '
'Swift TempURL with metadata for polling. ZAQAR_MESSAGE will '
'create a dedicated zaqar queue and post the metadata '
'for polling.'),
default=cfg.CONF.default_software_config_transport,
update_allowed=True,
constraints=[
constraints.AllowedValues(_SOFTWARE_CONFIG_TRANSPORTS),
]
),
USER_DATA_UPDATE_POLICY: properties.Schema(
properties.Schema.STRING,
_('Policy on how to apply a user_data update; either by '
'ignoring it or by replacing the entire server.'),
default='REPLACE',
constraints=[
constraints.AllowedValues(['REPLACE', 'IGNORE']),
],
support_status=support.SupportStatus(version='6.0.0'),
update_allowed=True
),
USER_DATA: properties.Schema(
properties.Schema.STRING,
_('User data script to be executed by cloud-init. Changes cause '
'replacement of the resource by default, but can be ignored '
'altogether by setting the `user_data_update_policy` property.'),
default='',
update_allowed=True
),
RESERVATION_ID: properties.Schema(
properties.Schema.STRING,
_('A UUID for the set of servers being requested.')
),
CONFIG_DRIVE: properties.Schema(
properties.Schema.BOOLEAN,
_('If True, enable config drive on the server.')
),
DISK_CONFIG: properties.Schema(
properties.Schema.STRING,
_('Control how the disk is partitioned when the server is '
'created.'),
constraints=[
constraints.AllowedValues(['AUTO', 'MANUAL']),
]
),
PERSONALITY: properties.Schema(
properties.Schema.MAP,
_('A map of files to create/overwrite on the server upon boot. '
'Keys are file names and values are the file contents.'),
default={}
),
ADMIN_PASS: properties.Schema(
properties.Schema.STRING,
_('The administrator password for the server.'),
update_allowed=True
),
TAGS: properties.Schema(
properties.Schema.LIST,
_('Server tags. Supported since client version 2.26.'),
support_status=support.SupportStatus(version='8.0.0'),
schema=properties.Schema(properties.Schema.STRING),
update_allowed=True
),
DEPLOYMENT_SWIFT_DATA: properties.Schema(
properties.Schema.MAP,
_('Swift container and object to use for storing deployment data '
'for the server resource. The parameter is a map value '
'with the keys "container" and "object", and the values '
'are the corresponding container and object names. The '
'software_config_transport parameter must be set to '
'POLL_TEMP_URL for swift to be used. If not specified, '
'and software_config_transport is set to POLL_TEMP_URL, a '
'container will be automatically created from the resource '
'name, and the object name will be a generated uuid.'),
support_status=support.SupportStatus(version='9.0.0'),
default={},
update_allowed=True,
schema={
CONTAINER: properties.Schema(
properties.Schema.STRING,
_('Name of the container.'),
constraints=[
constraints.Length(min=1)
]
),
OBJECT: properties.Schema(
properties.Schema.STRING,
_('Name of the object.'),
constraints=[
constraints.Length(min=1)
]
)
}
)
}
attributes_schema = {
NAME_ATTR: attributes.Schema(
_('Name of the server.'),
type=attributes.Schema.STRING
),
ADDRESSES: attributes.Schema(
_('A dict of all network addresses with corresponding port_id and '
'subnets. Each network will have two keys in dict, they are '
'network name and network id. The port ID may be obtained '
'through the following expression: ``{get_attr: [<server>, '
'addresses, <network name_or_id>, 0, port]}``. The subnets may '
'be obtained trough the following expression: ``{get_attr: '
'[<server>, addresses, <network name_or_id>, 0, subnets]}``. '
'The network may be obtained through the following expression: '
'``{get_attr: [<server>, addresses, <network name_or_id>, 0, '
'network]}``.'),
type=attributes.Schema.MAP,
support_status=support.SupportStatus(
version='11.0.0',
status=support.SUPPORTED,
message=_('The attribute was extended to include subnets and '
'network with version 11.0.0.'),
previous_status=support.SupportStatus(
status=support.SUPPORTED
)
)
),
NETWORKS_ATTR: attributes.Schema(
_('A dict of assigned network addresses of the form: '
'{"public": [ip1, ip2...], "private": [ip3, ip4], '
'"public_uuid": [ip1, ip2...], "private_uuid": [ip3, ip4]}. '
'Each network will have two keys in dict, they are network '
'name and network id.'),
type=attributes.Schema.MAP
),
FIRST_ADDRESS: attributes.Schema(
_('Convenience attribute to fetch the first assigned network '
'address, or an empty string if nothing has been assigned at '
'this time. Result may not be predictable if the server has '
'addresses from more than one network.'),
support_status=support.SupportStatus(
status=support.HIDDEN,
version='5.0.0',
message=_('Use the networks attribute instead of '
'first_address. For example: "{get_attr: '
'[<server name>, networks, <network name>, 0]}"'),
previous_status=support.SupportStatus(
status=support.DEPRECATED,
version='2014.2',
previous_status=support.SupportStatus(version='2013.2')
)
)
),
INSTANCE_NAME: attributes.Schema(
_('AWS compatible instance name.'),
type=attributes.Schema.STRING
),
ACCESSIPV4: attributes.Schema(
_('The manually assigned alternative public IPv4 address '
'of the server.'),
type=attributes.Schema.STRING
),
ACCESSIPV6: attributes.Schema(
_('The manually assigned alternative public IPv6 address '
'of the server.'),
type=attributes.Schema.STRING
),
CONSOLE_URLS: attributes.Schema(
_("URLs of server's consoles. "
"To get a specific console type, the requested type "
"can be specified as parameter to the get_attr function, "
"e.g. get_attr: [ <server>, console_urls, novnc ]. "
"Currently supported types are "
"novnc, xvpvnc, spice-html5, rdp-html5, serial and webmks."),
support_status=support.SupportStatus(version='2015.1'),
type=attributes.Schema.MAP
),
TAGS_ATTR: attributes.Schema(
_('Tags from the server. Supported since client version 2.26.'),
support_status=support.SupportStatus(version='8.0.0'),
type=attributes.Schema.LIST
),
OS_COLLECT_CONFIG: attributes.Schema(
_('The os-collect-config configuration for the server\'s local '
'agent to be configured to connect to Heat to retrieve '
'deployment data.'),
support_status=support.SupportStatus(version='9.0.0'),
type=attributes.Schema.MAP,
cache_mode=attributes.Schema.CACHE_NONE
),
}
default_client_name = 'nova'
def translation_rules(self, props):
neutron_client_plugin = self.client_plugin('neutron')
glance_client_plugin = self.client_plugin('glance')
rules = [
translation.TranslationRule(
props,
translation.TranslationRule.REPLACE,
translation_path=[self.NETWORKS, self.NETWORK_ID],
value_name=self.NETWORK_UUID),
translation.TranslationRule(
props,
translation.TranslationRule.RESOLVE,
translation_path=[self.FLAVOR],
client_plugin=self.client_plugin('nova'),
finder='find_flavor_by_name_or_id'),
translation.TranslationRule(
props,
translation.TranslationRule.RESOLVE,
translation_path=[self.IMAGE],
client_plugin=glance_client_plugin,
finder='find_image_by_name_or_id'),
translation.TranslationRule(
props,
translation.TranslationRule.REPLACE,
translation_path=[self.BLOCK_DEVICE_MAPPING_V2,
self.BLOCK_DEVICE_MAPPING_IMAGE],
value_name=self.BLOCK_DEVICE_MAPPING_IMAGE_ID),
translation.TranslationRule(
props,
translation.TranslationRule.RESOLVE,
translation_path=[self.BLOCK_DEVICE_MAPPING_V2,
self.BLOCK_DEVICE_MAPPING_IMAGE],
client_plugin=glance_client_plugin,
finder='find_image_by_name_or_id'),
translation.TranslationRule(
props,
translation.TranslationRule.RESOLVE,
translation_path=[self.NETWORKS, self.NETWORK_ID],
client_plugin=neutron_client_plugin,
finder='find_resourceid_by_name_or_id',
entity=neutron_client_plugin.RES_TYPE_NETWORK),
translation.TranslationRule(
props,
translation.TranslationRule.RESOLVE,
translation_path=[self.NETWORKS, self.NETWORK_SUBNET],
client_plugin=neutron_client_plugin,
finder='find_resourceid_by_name_or_id',
entity=neutron_client_plugin.RES_TYPE_SUBNET),
translation.TranslationRule(
props,
translation.TranslationRule.RESOLVE,
translation_path=[self.NETWORKS, self.NETWORK_PORT],
client_plugin=neutron_client_plugin,
finder='find_resourceid_by_name_or_id',
entity=neutron_client_plugin.RES_TYPE_PORT)
]
return rules
def __init__(self, name, json_snippet, stack):
super(Server, self).__init__(name, json_snippet, stack)
if self.user_data_software_config():
self._register_access_key()
self.default_collectors = ['ec2']
def _config_drive(self):
# This method is overridden by the derived CloudServer resource
return self.properties[self.CONFIG_DRIVE]
def user_data_raw(self):
return self.properties[self.USER_DATA_FORMAT] == self.RAW
def user_data_software_config(self):
return self.properties[
self.USER_DATA_FORMAT] == self.SOFTWARE_CONFIG
def get_software_config(self, ud_content):
with self.rpc_client().ignore_error_by_name('NotFound'):
sc = self.rpc_client().show_software_config(
self.context, ud_content)
return sc[rpc_api.SOFTWARE_CONFIG_CONFIG]
return ud_content
def handle_create(self):
security_groups = self.properties[self.SECURITY_GROUPS]
user_data_format = self.properties[self.USER_DATA_FORMAT]
ud_content = self.properties[self.USER_DATA]
if self.user_data_software_config() or self.user_data_raw():
if uuidutils.is_uuid_like(ud_content):
# attempt to load the userdata from software config
ud_content = self.get_software_config(ud_content)
metadata = self.metadata_get(True) or {}
if self.user_data_software_config():
self._create_transport_credentials(self.properties)
self._populate_deployments_metadata(metadata, self.properties)
userdata = self.client_plugin().build_userdata(
metadata,
ud_content,
instance_user=None,
user_data_format=user_data_format)
availability_zone = self.properties[self.AVAILABILITY_ZONE]
instance_meta = self.properties[self.METADATA]
if instance_meta:
instance_meta = self.client_plugin().meta_serialize(
instance_meta)
scheduler_hints = self._scheduler_hints(
self.properties[self.SCHEDULER_HINTS])
nics = self._build_nics(self.properties[self.NETWORKS],
security_groups=security_groups)
block_device_mapping = self._build_block_device_mapping(
self.properties[self.BLOCK_DEVICE_MAPPING])
block_device_mapping_v2 = self._build_block_device_mapping_v2(
self.properties[self.BLOCK_DEVICE_MAPPING_V2])
reservation_id = self.properties[self.RESERVATION_ID]
disk_config = self.properties[self.DISK_CONFIG]
admin_pass = self.properties[self.ADMIN_PASS] or None
personality_files = self.properties[self.PERSONALITY]
key_name = self.properties[self.KEY_NAME]
flavor = self.properties[self.FLAVOR]
image = self.properties[self.IMAGE]
server = None
try:
server = self.client().servers.create(
name=self._server_name(),
image=image,
flavor=flavor,
key_name=key_name,
security_groups=security_groups,
userdata=userdata,
meta=instance_meta,
scheduler_hints=scheduler_hints,
nics=nics,
availability_zone=availability_zone,
block_device_mapping=block_device_mapping,
block_device_mapping_v2=block_device_mapping_v2,
reservation_id=reservation_id,
config_drive=self._config_drive(),
disk_config=disk_config,
files=personality_files,
admin_pass=admin_pass)
finally:
# Avoid a race condition where the thread could be canceled
# before the ID is stored
if server is not None:
self.resource_id_set(server.id)
return server.id
def check_create_complete(self, server_id):
check = self.client_plugin()._check_active(server_id)
if check:
if self.properties[self.TAGS]:
self._update_server_tags(self.properties[self.TAGS])
self.store_external_ports()
return check
def _update_server_tags(self, tags):
server = self.client().servers.get(self.resource_id)
self.client().servers.set_tags(server, tags)
def handle_check(self):
server = self.client().servers.get(self.resource_id)
status = self.client_plugin().get_status(server)
checks = [{'attr': 'status', 'expected': 'ACTIVE', 'current': status}]
self._verify_check_conditions(checks)
def get_live_resource_data(self):
try:
server = self.client().servers.get(self.resource_id)
server_data = server.to_dict()
active = self.client_plugin()._check_active(server)
if not active:
# There is no difference what error raised, because update
# method of resource just silently log it as warning.
raise exception.Error(_('Server %s is not '
'in ACTIVE state') % self.name)
except Exception as ex:
if self.client_plugin().is_not_found(ex):
raise exception.EntityNotFound(entity='Resource',
name=self.name)
raise
if self.client_plugin().is_version_supported(MICROVERSION_TAGS):
tag_server = self.client().servers.get(self.resource_id)
server_data['tags'] = tag_server.tag_list()
return server, server_data
def parse_live_resource_data(self, resource_properties, resource_data):
server, server_data = resource_data
result = {
# there's a risk that flavor id will be int type, so cast to str
self.FLAVOR: six.text_type(server_data.get(self.FLAVOR)['id']),
self.IMAGE: six.text_type(server_data.get(self.IMAGE)['id']),
self.NAME: server_data.get(self.NAME),
self.METADATA: server_data.get(self.METADATA),
self.NETWORKS: self._get_live_networks(server, resource_properties)
}
if 'tags' in server_data:
result.update({self.TAGS: server_data['tags']})
return result
def _get_live_networks(self, server, props):
reality_nets = self._add_attrs_for_address(server,
extend_networks=False)
reality_net_ids = {}
client_plugin = self.client_plugin('neutron')
for net_key in reality_nets:
try:
net_id = client_plugin.find_resourceid_by_name_or_id(
client_plugin.RES_TYPE_NETWORK,
net_key)
except Exception as ex:
if (client_plugin.is_not_found(ex) or
client_plugin.is_no_unique(ex)):
net_id = None
else:
raise
if net_id:
reality_net_ids[net_id] = reality_nets.get(net_key)
resource_nets = props.get(self.NETWORKS)
result_nets = []
for net in resource_nets or []:
net_id = self._get_network_id(net)
if reality_net_ids.get(net_id):
for idx, address in enumerate(reality_net_ids.get(net_id)):
if address['addr'] == net[self.NETWORK_FIXED_IP]:
result_nets.append(net)
reality_net_ids.get(net_id).pop(idx)
break
for key, value in six.iteritems(reality_nets):
for address in reality_nets[key]:
new_net = {self.NETWORK_ID: key,
self.NETWORK_FIXED_IP: address['addr']}
if address['port'] not in [port['id']
for port in self._data_get_ports()]:
new_net.update({self.NETWORK_PORT: address['port']})
result_nets.append(new_net)
return result_nets
@classmethod
def _build_block_device_mapping(cls, bdm):
if not bdm:
return None
bdm_dict = {}
for mapping in bdm:
mapping_parts = []
snapshot_id = mapping.get(cls.BLOCK_DEVICE_MAPPING_SNAPSHOT_ID)
if snapshot_id:
mapping_parts.append(snapshot_id)
mapping_parts.append('snap')
else:
volume_id = mapping.get(cls.BLOCK_DEVICE_MAPPING_VOLUME_ID)
mapping_parts.append(volume_id)
mapping_parts.append('')
volume_size = mapping.get(cls.BLOCK_DEVICE_MAPPING_VOLUME_SIZE)
delete = mapping.get(cls.BLOCK_DEVICE_MAPPING_DELETE_ON_TERM)
if volume_size:
mapping_parts.append(str(volume_size))
else:
mapping_parts.append('')
if delete:
mapping_parts.append(str(delete))
device_name = mapping.get(cls.BLOCK_DEVICE_MAPPING_DEVICE_NAME)
bdm_dict[device_name] = ':'.join(mapping_parts)
return bdm_dict
@classmethod
def _build_block_device_mapping_v2(cls, bdm_v2):
if not bdm_v2:
return None
bdm_v2_list = []
for mapping in bdm_v2:
bmd_dict = None
if mapping.get(cls.BLOCK_DEVICE_MAPPING_VOLUME_ID):
bmd_dict = {
'uuid': mapping.get(cls.BLOCK_DEVICE_MAPPING_VOLUME_ID),
'source_type': 'volume',
'destination_type': 'volume',
'boot_index': 0,
'delete_on_termination': False,
}
elif mapping.get(cls.BLOCK_DEVICE_MAPPING_SNAPSHOT_ID):
bmd_dict = {
'uuid': mapping.get(cls.BLOCK_DEVICE_MAPPING_SNAPSHOT_ID),
'source_type': 'snapshot',
'destination_type': 'volume',
'boot_index': 0,
'delete_on_termination': False,
}
elif mapping.get(cls.BLOCK_DEVICE_MAPPING_IMAGE):
bmd_dict = {
'uuid': mapping.get(cls.BLOCK_DEVICE_MAPPING_IMAGE),
'source_type': 'image',
'destination_type': 'volume',
'boot_index': 0,
'delete_on_termination': False,
}
elif mapping.get(cls.BLOCK_DEVICE_MAPPING_SWAP_SIZE):
bmd_dict = {
'source_type': 'blank',
'destination_type': 'local',
'boot_index': -1,
'delete_on_termination': True,
'guest_format': 'swap',
'volume_size': mapping.get(
cls.BLOCK_DEVICE_MAPPING_SWAP_SIZE),
}
elif (mapping.get(cls.BLOCK_DEVICE_MAPPING_EPHEMERAL_SIZE) or
mapping.get(cls.BLOCK_DEVICE_MAPPING_EPHEMERAL_FORMAT)):
bmd_dict = {
'source_type': 'blank',
'destination_type': 'local',
'boot_index': -1,
'delete_on_termination': True
}
ephemeral_size = mapping.get(
cls.BLOCK_DEVICE_MAPPING_EPHEMERAL_SIZE)
if ephemeral_size:
bmd_dict.update({'volume_size': ephemeral_size})
ephemeral_format = mapping.get(
cls.BLOCK_DEVICE_MAPPING_EPHEMERAL_FORMAT)
if ephemeral_format:
bmd_dict.update({'guest_format': ephemeral_format})
# NOTE(prazumovsky): In case of server doesn't take empty value of
# device name, need to escape from such situation.
device_name = mapping.get(cls.BLOCK_DEVICE_MAPPING_DEVICE_NAME)
if device_name:
bmd_dict[cls.BLOCK_DEVICE_MAPPING_DEVICE_NAME] = device_name
update_props = (cls.BLOCK_DEVICE_MAPPING_DEVICE_TYPE,
cls.BLOCK_DEVICE_MAPPING_DISK_BUS,
cls.BLOCK_DEVICE_MAPPING_BOOT_INDEX,
cls.BLOCK_DEVICE_MAPPING_VOLUME_SIZE,
cls.BLOCK_DEVICE_MAPPING_DELETE_ON_TERM)
for update_prop in update_props:
if mapping.get(update_prop) is not None:
bmd_dict[update_prop] = mapping.get(update_prop)
if bmd_dict:
bdm_v2_list.append(bmd_dict)
return bdm_v2_list
def _get_subnets_attr(self, fixed_ips):
subnets = []
try:
for fixed_ip in fixed_ips:
if fixed_ip.get('subnet_id'):
subnets.append(self.client('neutron').show_subnet(
fixed_ip['subnet_id'])['subnet'])
except Exception as ex:
LOG.warning("Failed to fetch resource attributes: %s", ex)
return
return subnets
def _get_network_attr(self, network_id):
try:
return self.client('neutron').show_network(network_id)['network']
except Exception as ex:
LOG.warning("Failed to fetch resource attributes: %s", ex)
return
def _add_attrs_for_address(self, server, extend_networks=True):
"""Adds port id, subnets and network attributes to addresses list.
This method is used only for resolving attributes.
:param server: The server resource
:param extend_networks: When False the network is not extended, i.e
the net is returned without replacing name on
id.
"""
nets = copy.deepcopy(server.addresses) or {}
ifaces = server.interface_list()
ip_mac_mapping_on_port_id = dict(((iface.fixed_ips[0]['ip_address'],
iface.mac_addr), iface.port_id)
for iface in ifaces)
for net_name in nets:
for addr in nets[net_name]:
addr['port'] = ip_mac_mapping_on_port_id.get(
(addr['addr'], addr['OS-EXT-IPS-MAC:mac_addr']))
# _get_live_networks() uses this method to get reality_nets.
# We don't need to get subnets and network in that case. Only
# do the external calls if extend_networks is true, i.e called
# from _resolve_attribute()
if not extend_networks:
continue
try:
port = self.client('neutron').show_port(
addr['port'])['port']
except Exception as ex:
addr['subnets'], addr['network'] = None, None
LOG.warning("Failed to fetch resource attributes: %s", ex)
continue
addr['subnets'] = self._get_subnets_attr(port['fixed_ips'])
addr['network'] = self._get_network_attr(port['network_id'])
if extend_networks:
return self._extend_networks(nets)
else:
return nets
def _extend_networks(self, networks):
"""Method adds same networks with replaced name on network id.
This method is used only for resolving attributes.
"""
nets = copy.deepcopy(networks)
client_plugin = self.client_plugin('neutron')
for key in list(nets.keys()):
try:
net_id = client_plugin.find_resourceid_by_name_or_id(
client_plugin.RES_TYPE_NETWORK,
key)
except Exception as ex:
if (client_plugin.is_not_found(ex) or
client_plugin.is_no_unique(ex)):
net_id = None
else:
raise
if net_id:
nets[net_id] = nets[key]
return nets
def _resolve_attribute(self, name):
if self.resource_id is None:
return
if name == self.FIRST_ADDRESS:
return self.client_plugin().server_to_ipaddress(
self.resource_id) or ''
if name == self.OS_COLLECT_CONFIG:
return self.metadata_get().get('os-collect-config', {})
if name == self.NAME_ATTR:
return self._server_name()
try:
server = self.client().servers.get(self.resource_id)
except Exception as e:
self.client_plugin().ignore_not_found(e)
return ''
if name == self.ADDRESSES:
return self._add_attrs_for_address(server)
if name == self.NETWORKS_ATTR:
return self._extend_networks(server.networks)
if name == self.INSTANCE_NAME:
return getattr(server, 'OS-EXT-SRV-ATTR:instance_name', None)
if name == self.ACCESSIPV4:
return server.accessIPv4
if name == self.ACCESSIPV6:
return server.accessIPv6
if name == self.CONSOLE_URLS:
return self.client_plugin('nova').get_console_urls(server)
if name == self.TAGS_ATTR:
if self.client_plugin().is_version_supported(MICROVERSION_TAGS):
return self.client().servers.tag_list(server)
return None
def add_dependencies(self, deps):
super(Server, self).add_dependencies(deps)
# Depend on any Subnet in this template with the same
# network_id as the networks attached to this server.
# It is not known which subnet a server might be assigned
# to so all subnets in a network should be created before
# the servers in that network.
try:
nets = self.properties[self.NETWORKS]
except (ValueError, TypeError):
# Properties errors will be caught later in validation,
# where we can report them in their proper context.
return
if not nets:
return
for res in six.itervalues(self.stack):
if res.has_interface('OS::Neutron::Subnet'):
try:
subnet_net = res.properties.get(subnet.Subnet.NETWORK)
except (ValueError, TypeError):
# Properties errors will be caught later in validation,
# where we can report them in their proper context.
continue
# Be wary of the case where we do not know a subnet's
# network. If that's the case, be safe and add it as a
# dependency.
if not subnet_net:
deps += (self, res)
continue
for net in nets:
# worry about network_id because that could be the match
# assigned to the subnet as well and could have been
# created by this stack. Regardless, the server should
# still wait on the subnet.
net_id = net.get(self.NETWORK_ID)
if net_id and net_id == subnet_net:
deps += (self, res)
break
# If we don't know a given net_id right now, it's
# plausible this subnet depends on it.
if not net_id:
deps += (self, res)
break
def _update_flavor(self, after_props):
flavor = after_props[self.FLAVOR]
handler_args = checker_args = {'args': (flavor,)}
prg_resize = progress.ServerUpdateProgress(self.resource_id,
'resize',
handler_extra=handler_args,
checker_extra=checker_args)
prg_verify = progress.ServerUpdateProgress(self.resource_id,
'verify_resize')
return prg_resize, prg_verify
def _update_image(self, after_props):
image_update_policy = after_props[self.IMAGE_UPDATE_POLICY]
instance_meta = after_props[self.METADATA]
if instance_meta is not None:
instance_meta = self.client_plugin().meta_serialize(
instance_meta)
personality_files = after_props[self.PERSONALITY]
image = after_props[self.IMAGE]
preserve_ephemeral = (
image_update_policy == 'REBUILD_PRESERVE_EPHEMERAL')
password = after_props[self.ADMIN_PASS]
kwargs = {'password': password,
'preserve_ephemeral': preserve_ephemeral,
'meta': instance_meta,
'files': personality_files}
prg = progress.ServerUpdateProgress(self.resource_id,
'rebuild',
handler_extra={'args': (image,),
'kwargs': kwargs})
return prg
def _update_networks(self, server, after_props):
updaters = []
new_networks = after_props[self.NETWORKS]
old_networks = self.properties[self.NETWORKS]
security_groups = after_props[self.SECURITY_GROUPS]
if not server:
server = self.client().servers.get(self.resource_id)
interfaces = server.interface_list()
remove_ports, add_nets = self.calculate_networks(
old_networks, new_networks, interfaces, security_groups)
for port in remove_ports:
updaters.append(
progress.ServerUpdateProgress(
self.resource_id, 'interface_detach',
handler_extra={'args': (port,)},
checker_extra={'args': (port,)})
)
for args in add_nets:
updaters.append(
progress.ServerUpdateProgress(
self.resource_id, 'interface_attach',
handler_extra={'kwargs': args},
checker_extra={'args': (args['port_id'],)})
)
return updaters
def needs_replace_with_prop_diff(self, changed_properties_set,
after_props, before_props):
"""Needs replace based on prop_diff."""
if self.FLAVOR in changed_properties_set:
flavor_update_policy = (
after_props.get(self.FLAVOR_UPDATE_POLICY) or
before_props.get(self.FLAVOR_UPDATE_POLICY))
if flavor_update_policy == 'REPLACE':
return True
if self.IMAGE in changed_properties_set:
image_update_policy = (
after_props.get(self.IMAGE_UPDATE_POLICY) or
before_props.get(self.IMAGE_UPDATE_POLICY))
if image_update_policy == 'REPLACE':
return True
if self.USER_DATA in changed_properties_set:
ud_update_policy = (
after_props.get(self.USER_DATA_UPDATE_POLICY) or
before_props.get(self.USER_DATA_UPDATE_POLICY))
return ud_update_policy == 'REPLACE'
def needs_replace_failed(self):
if not self.resource_id:
return True
with self.client_plugin().ignore_not_found:
server = self.client().servers.get(self.resource_id)
return server.status in ('ERROR', 'DELETED', 'SOFT_DELETED')
return True
def handle_update(self, json_snippet, tmpl_diff, prop_diff):
updaters = super(Server, self).handle_update(
json_snippet,
tmpl_diff,
prop_diff)
server = None
after_props = json_snippet.properties(self.properties_schema,
self.context)
if self.METADATA in prop_diff:
server = self.client_plugin().get_server(self.resource_id)
self.client_plugin().meta_update(server,
after_props[self.METADATA])
if self.TAGS in prop_diff:
self._update_server_tags(after_props[self.TAGS] or [])
if self.NAME in prop_diff:
if not server:
server = self.client_plugin().get_server(self.resource_id)
self.client_plugin().rename(server, after_props[self.NAME])
if self.NETWORKS in prop_diff:
updaters.extend(self._update_networks(server, after_props))
if self.FLAVOR in prop_diff:
updaters.extend(self._update_flavor(after_props))
if self.IMAGE in prop_diff:
updaters.append(self._update_image(after_props))
elif self.ADMIN_PASS in prop_diff:
if not server:
server = self.client_plugin().get_server(self.resource_id)
server.change_password(after_props[self.ADMIN_PASS])
# NOTE(pas-ha) optimization is possible (starting first task
# right away), but we'd rather not, as this method already might
# have called several APIs
return updaters
def check_update_complete(self, updaters):
"""Push all updaters to completion in list order."""
for prg in updaters:
if not prg.called:
handler = getattr(self.client_plugin(), prg.handler)
prg.called = handler(*prg.handler_args,
**prg.handler_kwargs)
return False
if not prg.complete:
check_complete = getattr(self.client_plugin(), prg.checker)
prg.complete = check_complete(*prg.checker_args,
**prg.checker_kwargs)
break
status = all(prg.complete for prg in updaters)
if status:
self.store_external_ports()
return status
def _validate_block_device_mapping(self):
# either volume_id or snapshot_id needs to be specified, but not both
# for block device mapping.
bdm = self.properties[self.BLOCK_DEVICE_MAPPING] or []
bdm_v2 = self.properties[self.BLOCK_DEVICE_MAPPING_V2] or []
image = self.properties[self.IMAGE]
if bdm and bdm_v2:
raise exception.ResourcePropertyConflict(
self.BLOCK_DEVICE_MAPPING, self.BLOCK_DEVICE_MAPPING_V2)
bootable = image is not None
for mapping in bdm:
device_name = mapping[self.BLOCK_DEVICE_MAPPING_DEVICE_NAME]
if device_name == 'vda':
bootable = True
volume_id = mapping.get(self.BLOCK_DEVICE_MAPPING_VOLUME_ID)
snapshot_id = mapping.get(self.BLOCK_DEVICE_MAPPING_SNAPSHOT_ID)
if volume_id is not None and snapshot_id is not None:
raise exception.ResourcePropertyConflict(
self.BLOCK_DEVICE_MAPPING_VOLUME_ID,
self.BLOCK_DEVICE_MAPPING_SNAPSHOT_ID)
if volume_id is None and snapshot_id is None:
msg = _('Either volume_id or snapshot_id must be specified for'
' device mapping %s') % device_name
raise exception.StackValidationFailed(message=msg)
bootable_devs = [image]
for mapping in bdm_v2:
volume_id = mapping.get(self.BLOCK_DEVICE_MAPPING_VOLUME_ID)
snapshot_id = mapping.get(self.BLOCK_DEVICE_MAPPING_SNAPSHOT_ID)
image_id = mapping.get(self.BLOCK_DEVICE_MAPPING_IMAGE)
boot_index = mapping.get(self.BLOCK_DEVICE_MAPPING_BOOT_INDEX)
swap_size = mapping.get(self.BLOCK_DEVICE_MAPPING_SWAP_SIZE)
ephemeral = (mapping.get(
self.BLOCK_DEVICE_MAPPING_EPHEMERAL_SIZE) or mapping.get(
self.BLOCK_DEVICE_MAPPING_EPHEMERAL_FORMAT))
property_tuple = (volume_id, snapshot_id, image_id, swap_size,
ephemeral)
if property_tuple.count(None) < 4:
raise exception.ResourcePropertyConflict(
self.BLOCK_DEVICE_MAPPING_VOLUME_ID,
self.BLOCK_DEVICE_MAPPING_SNAPSHOT_ID,
self.BLOCK_DEVICE_MAPPING_IMAGE,
self.BLOCK_DEVICE_MAPPING_SWAP_SIZE,
self.BLOCK_DEVICE_MAPPING_EPHEMERAL_SIZE,
self.BLOCK_DEVICE_MAPPING_EPHEMERAL_FORMAT
)
if property_tuple.count(None) == 5:
msg = _('Either volume_id, snapshot_id, image_id, swap_size, '
'ephemeral_size or ephemeral_format must be '
'specified.')
raise exception.StackValidationFailed(message=msg)
if any((volume_id is not None, snapshot_id is not None,
image_id is not None)):
# boot_index is not specified, set boot_index=0 when
# build_block_device_mapping for volume, snapshot, image
if boot_index is None or boot_index == 0:
bootable = True
bootable_devs.append(volume_id)
bootable_devs.append(snapshot_id)
bootable_devs.append(image_id)
if not bootable:
msg = _('Neither image nor bootable volume is specified for '
'instance %s') % self.name
raise exception.StackValidationFailed(message=msg)
if bdm_v2 and len(list(
dev for dev in bootable_devs if dev is not None)) != 1:
msg = _('Multiple bootable sources for instance %s.') % self.name
raise exception.StackValidationFailed(message=msg)
def _validate_image_flavor(self, image, flavor):
try:
image_obj = self.client_plugin('glance').get_image(image)
flavor_obj = self.client_plugin().get_flavor(flavor)
except Exception as ex:
# Flavor or image may not have been created in the backend
# yet when they are part of the same stack/template.
if (self.client_plugin().is_not_found(ex) or
self.client_plugin('glance').is_not_found(ex)):
return
raise
else:
if image_obj.status.lower() != self.IMAGE_ACTIVE:
msg = _('Image status is required to be %(cstatus)s not '
'%(wstatus)s.') % {
'cstatus': self.IMAGE_ACTIVE,
'wstatus': image_obj.status}
raise exception.StackValidationFailed(message=msg)
# validate image/flavor combination
if flavor_obj.ram < image_obj.min_ram:
msg = _('Image %(image)s requires %(imram)s minimum ram. '
'Flavor %(flavor)s has only %(flram)s.') % {
'image': image, 'imram': image_obj.min_ram,
'flavor': flavor, 'flram': flavor_obj.ram}
raise exception.StackValidationFailed(message=msg)
# validate image/flavor disk compatibility
if flavor_obj.disk < image_obj.min_disk:
msg = _('Image %(image)s requires %(imsz)s GB minimum '
'disk space. Flavor %(flavor)s has only '
'%(flsz)s GB.') % {
'image': image, 'imsz': image_obj.min_disk,
'flavor': flavor, 'flsz': flavor_obj.disk}
raise exception.StackValidationFailed(message=msg)
def validate(self):
"""Validate any of the provided params."""
super(Server, self).validate()
if self.user_data_software_config():
if 'deployments' in self.t.metadata():
msg = _('deployments key not allowed in resource metadata '
'with user_data_format of SOFTWARE_CONFIG')
raise exception.StackValidationFailed(message=msg)
self._validate_block_device_mapping()
# make sure the image exists if specified.
image = self.properties[self.IMAGE]
flavor = self.properties[self.FLAVOR]
if image:
self._validate_image_flavor(image, flavor)
networks = self.properties[self.NETWORKS] or []
for network in networks:
self._validate_network(network)
has_str_net = self._str_network(networks) is not None
if has_str_net:
if len(networks) != 1:
msg = _('Property "%s" can not be specified if '
'multiple network interfaces set for '
'server.') % self.ALLOCATE_NETWORK
raise exception.StackValidationFailed(message=msg)
# Check if str_network is allowed to use
if not self.client_plugin().is_version_supported(
MICROVERSION_STR_NETWORK):
msg = (_('Cannot use "%s" property - compute service '
'does not support the required api '
'microversion.') % self.ALLOCATE_NETWORK)
raise exception.StackValidationFailed(message=msg)
# record if any networks include explicit ports
has_port = any(n[self.NETWORK_PORT] is not None for n in networks)
# if 'security_groups' present for the server and explicit 'port'
# in one or more entries in 'networks', raise validation error
if has_port and self.properties[self.SECURITY_GROUPS]:
raise exception.ResourcePropertyConflict(
self.SECURITY_GROUPS,
"/".join([self.NETWORKS, self.NETWORK_PORT]))
# Check if nic tag is allowed to use
if self._is_nic_tagged(networks=networks):
if not self.client_plugin().is_version_supported(
MICROVERSION_NIC_TAGS):
msg = (_('Cannot use "%s" property in networks - '
'nova does not support required '
'api microversion.'), self.NIC_TAG)
raise exception.StackValidationFailed(message=msg)
# Check if tags is allowed to use
if self.properties[self.TAGS]:
if not self.client_plugin().is_version_supported(
MICROVERSION_TAGS):
msg = (_('Cannot use "%s" property - nova does not support '
'required api microversion.') % self.TAGS)
raise exception.StackValidationFailed(message=msg)
# retrieve provider's absolute limits if it will be needed
metadata = self.properties[self.METADATA]
personality = self.properties[self.PERSONALITY]
if metadata or personality:
limits = self.client_plugin().absolute_limits()
# verify that the number of metadata entries is not greater
# than the maximum number allowed in the provider's absolute
# limits
if metadata:
msg = _('Instance metadata must not contain greater than %s '
'entries. This is the maximum number allowed by your '
'service provider') % limits['maxServerMeta']
self._check_maximum(len(metadata),
limits['maxServerMeta'], msg)
# verify the number of personality files and the size of each
# personality file against the provider's absolute limits
if personality:
msg = _("The personality property may not contain "
"greater than %s entries.") % limits['maxPersonality']
self._check_maximum(len(personality),
limits['maxPersonality'], msg)
for path, contents in personality.items():
msg = (_("The contents of personality file \"%(path)s\" "
"is larger than the maximum allowed personality "
"file size (%(max_size)s bytes).") %
{'path': path,
'max_size': limits['maxPersonalitySize']})
self._check_maximum(len(bytes(contents.encode('utf-8'))
) if contents is not None else 0,
limits['maxPersonalitySize'], msg)
def _delete(self):
if self.user_data_software_config():
self._delete_queue()
self._delete_user()
self._delete_temp_url()
# remove internal and external ports
self._delete_internal_ports()
self.data_delete('external_ports')
if self.resource_id is None:
return
try:
self.client().servers.delete(self.resource_id)
except Exception as e:
self.client_plugin().ignore_not_found(e)
return
return progress.ServerDeleteProgress(self.resource_id)
def handle_snapshot_delete(self, state):
if state[1] != self.FAILED and self.resource_id:
image_id = self.client().servers.create_image(
self.resource_id, self.physical_resource_name())
return progress.ServerDeleteProgress(
self.resource_id, image_id, False)
return self._delete()
def handle_delete(self):
return self._delete()
def check_delete_complete(self, prg):
if not prg:
return True
if not prg.image_complete:
image = self.client_plugin('glance').get_image(prg.image_id)
if image.status.lower() in (self.IMAGE_ERROR,
self.IMAGE_DELETED):
raise exception.Error(image.status)
elif image.status.lower() == self.IMAGE_ACTIVE:
prg.image_complete = True
if not self._delete():
return True
return False
return self.client_plugin().check_delete_server_complete(
prg.server_id)
def handle_suspend(self):
"""Suspend a server.
Note we do not wait for the SUSPENDED state, this is polled for by
check_suspend_complete in a similar way to the create logic so we can
take advantage of coroutines.
"""
if self.resource_id is None:
raise exception.Error(_('Cannot suspend %s, resource_id not set') %
self.name)
try:
server = self.client().servers.get(self.resource_id)
except Exception as e:
if self.client_plugin().is_not_found(e):
raise exception.NotFound(_('Failed to find server %s') %
self.resource_id)
else:
raise
else:
# if the server has been suspended successful,
# no need to suspend again
if self.client_plugin().get_status(server) != 'SUSPENDED':
LOG.debug('suspending server %s', self.resource_id)
server.suspend()
return server.id
def check_suspend_complete(self, server_id):
cp = self.client_plugin()
server = cp.fetch_server(server_id)
if not server:
return False
status = cp.get_status(server)
LOG.debug('%(name)s check_suspend_complete status = %(status)s',
{'name': self.name, 'status': status})
if status in list(cp.deferred_server_statuses + ['ACTIVE']):
return status == 'SUSPENDED'
else:
exc = exception.ResourceUnknownStatus(
result=_('Suspend of server %s failed') % server.name,
resource_status=status)
raise exc
def handle_resume(self):
"""Resume a server.
Note we do not wait for the ACTIVE state, this is polled for by
check_resume_complete in a similar way to the create logic so we can
take advantage of coroutines.
"""
if self.resource_id is None:
raise exception.Error(_('Cannot resume %s, resource_id not set') %
self.name)
try:
server = self.client().servers.get(self.resource_id)
except Exception as e:
if self.client_plugin().is_not_found(e):
raise exception.NotFound(_('Failed to find server %s') %
self.resource_id)
else:
raise
else:
# if the server has been resumed successful,
# no need to resume again
if self.client_plugin().get_status(server) != 'ACTIVE':
LOG.debug('resuming server %s', self.resource_id)
server.resume()
return server.id
def check_resume_complete(self, server_id):
return self.client_plugin()._check_active(server_id)
def handle_snapshot(self):
image_id = self.client().servers.create_image(
self.resource_id, self.physical_resource_name())
self.data_set('snapshot_image_id', image_id)
return image_id
def check_snapshot_complete(self, image_id):
image = self.client_plugin('glance').get_image(image_id)
if image.status.lower() == self.IMAGE_ACTIVE:
return True
elif image.status.lower() in (self.IMAGE_ERROR, self.IMAGE_DELETED):
raise exception.Error(image.status)
return False
def handle_delete_snapshot(self, snapshot):
image_id = snapshot['resource_data'].get('snapshot_image_id')
with self.client_plugin('glance').ignore_not_found:
self.client('glance').images.delete(image_id)
def handle_restore(self, defn, restore_data):
image_id = restore_data['resource_data']['snapshot_image_id']
props = dict((k, v) for k, v in self.properties.data.items()
if v is not None)
for key in [self.BLOCK_DEVICE_MAPPING, self.BLOCK_DEVICE_MAPPING_V2,
self.NETWORKS]:
if props.get(key) is not None:
props[key] = list(dict((k, v) for k, v in prop.items()
if v is not None)
for prop in props[key])
props[self.IMAGE] = image_id
return defn.freeze(properties=props)
def prepare_for_replace(self):
# if the server has not been created yet, do nothing
if self.resource_id is None:
return
self.prepare_ports_for_replace()
def restore_prev_rsrc(self, convergence=False):
self.restore_ports_after_rollback(convergence=convergence)
def resource_mapping():
return {
'OS::Nova::Server': Server,
}
|
from rest_framework import authentication
from rest_framework import exceptions
from django.core.exceptions import ObjectDoesNotExist
from django.conf import settings
from jwt_auth.models import TokenBlackList, User
import re
import jwt
def validate_token(token):
"""
Validate token
"""
# check whether the token has been blacklisted
try:
TokenBlackList.objects.get(token=token)
raise exceptions.AuthenticationFailed('Invalid token')
except ObjectDoesNotExist:
pass
try:
user_info = jwt.decode(token, settings.SECRET_KEY)
user = User.objects.get(id=user_info['uid'])
except jwt.InvalidTokenError as e:
if isinstance(e, jwt.ExpiredSignatureError):
raise exceptions.AuthenticationFailed('Expired token')
else:
raise exceptions.AuthenticationFailed('Invalid token')
return (user, token)
class JwtAuthentication(authentication.BaseAuthentication):
"""
custom jwt authentication implement
"""
def authenticate(self, request):
# check the request path is excluded or not
paths = list(settings.AUTHENTICATION_EXCLUDE['ALL']) if 'ALL' in settings.AUTHENTICATION_EXCLUDE else []
try:
paths.extend(list(settings.AUTHENTICATION_EXCLUDE[request.method]))
except KeyError:
pass
for path in paths:
match = re.search(path, request.path)
if match:
return None
# do authentication
try:
auth = request.META.get('Authorization').split(' ')
except:
raise exceptions.AuthenticationFailed('The resource require token to authenticate')
auth_type = auth[0]
token = auth[1]
if auth_type.lower() == 'token':
return validate_token(token)
else:
raise exceptions.AuthenticationFailed('Invalid authentication header')
def authenticate_header(self, request):
return 'Token'
|
#!/usr/bin/env python
import sys, os, subprocess
def nmapScriptsScan(ip, port):
print "[-] Starting nmap ms-sql script scan for " + ip + ":" + port
nmapCmd = "nmap -sV -Pn -v -p "+port+" --script=ms-sql* -oN reports/sql/"+ip+"_"+port+"_nmap "+ip+ " >> reports/sql/"+ip+"_"+port+"_nmapOutput.txt"
subprocess.check_output(nmapCmd, shell=True)
print "[-] Completed nmap ms-sql script scan for " + ip + ":" + port
def hydraScan(ip, port):
print "[-] Starting ms-sql against " + ip + ":" + port
hydraCmd = "hydra -L wordlists/users.txt -P wordlists/passwords.txt -f -e n -o reports/sql/"+ip+"_"+port+"_ncrack.txt -u "+ip+" -s "+port + "mssql"
try:
results = subprocess.check_output(hydraCmd, shell=True)
resultarr = results.split("\n")
for result in resultarr:
if "login:" in result:
print "[*] Valid ms-sql credentials found: " + result
resultList=result.split()
self.username=resultList[4]
if resultList[6]:
self.password=resultList[6]
else:
self.password=''
except:
print "[-] No valid ms-sql credentials found"
print "[-] Completed hydra ms-sql against " + ip + ":" + port
def main():
if len(sys.argv) != 3:
print "Passed: ",sys.argv
print "Usage: sql-scan.py <ip> <port> "
sys.exit(0)
ip = str(sys.argv[1])
port = str(sys.argv[2])
nmapScriptsScan( ip, port)
hydraScan( ip, port)
main()
|
'''
This module should be run to recreate the files that we generate automatically
(i.e.: modules that shouldn't be traced and cython .pyx)
'''
from __future__ import print_function
import os
import struct
import re
def is_python_64bit():
return (struct.calcsize('P') == 8)
root_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
def get_cython_contents(filename):
if filename.endswith('.pyc'):
filename = filename[:-1]
state = 'regular'
replacements = []
new_contents = []
with open(filename, 'r') as stream:
for line in stream:
strip = line.strip()
if state == 'regular':
if strip == '# IFDEF CYTHON':
state = 'cython'
new_contents.append('%s -- DONT EDIT THIS FILE (it is automatically generated)\n' % line.replace('\n', '').replace('\r', ''))
continue
new_contents.append(line)
elif state == 'cython':
if strip == '# ELSE':
state = 'nocython'
new_contents.append(line)
continue
elif strip == '# ENDIF':
state = 'regular'
new_contents.append(line)
continue
if strip == '#':
continue
assert strip.startswith('# '), 'Line inside # IFDEF CYTHON must start with "# ". Found: %s' % (strip,)
strip = strip.replace('# ', '', 1).strip()
if strip.startswith('cython_inline_constant:'):
strip = strip.replace('cython_inline_constant:', '')
word_to_replace, replacement = strip.split('=')
replacements.append((word_to_replace.strip(), replacement.strip()))
continue
line = line.replace('# ', '', 1)
new_contents.append(line)
elif state == 'nocython':
if strip == '# ENDIF':
state = 'regular'
new_contents.append(line)
continue
new_contents.append('# %s' % line)
assert state == 'regular', 'Error: # IFDEF CYTHON found without # ENDIF'
ret = ''.join(new_contents)
for (word_to_replace, replacement) in replacements:
ret = re.sub(r"\b%s\b" % (word_to_replace,), replacement, ret)
return ret
def _generate_cython_from_files(target, modules):
contents = ['''from __future__ import print_function
# Important: Autogenerated file.
# DO NOT edit manually!
# DO NOT edit manually!
''']
for mod in modules:
contents.append(get_cython_contents(mod.__file__))
with open(target, 'w') as stream:
stream.write(''.join(contents))
def generate_dont_trace_files():
template = '''# Important: Autogenerated file.
# DO NOT edit manually!
# DO NOT edit manually!
from _pydevd_bundle.pydevd_constants import IS_PY3K
LIB_FILE = 1
PYDEV_FILE = 2
DONT_TRACE_DIRS = {
%(pydev_dirs)s
}
DONT_TRACE = {
# commonly used things from the stdlib that we don't want to trace
'Queue.py':LIB_FILE,
'queue.py':LIB_FILE,
'socket.py':LIB_FILE,
'weakref.py':LIB_FILE,
'_weakrefset.py':LIB_FILE,
'linecache.py':LIB_FILE,
'threading.py':LIB_FILE,
'dis.py':LIB_FILE,
# things from pydev that we don't want to trace
'_pydev_execfile.py':PYDEV_FILE,
%(pydev_files)s
}
if IS_PY3K:
# if we try to trace io.py it seems it can get halted (see http://bugs.python.org/issue4716)
DONT_TRACE['io.py'] = LIB_FILE
# Don't trace common encodings too
DONT_TRACE['cp1252.py'] = LIB_FILE
DONT_TRACE['utf_8.py'] = LIB_FILE
DONT_TRACE['codecs.py'] = LIB_FILE
'''
pydev_files = []
pydev_dirs = []
exclude_dirs = [
'.git',
'.settings',
'build',
'build_tools',
'dist',
'pydevd.egg-info',
'pydevd_attach_to_process',
'pydev_sitecustomize',
'stubs',
'tests',
'tests_mainloop',
'tests_python',
'tests_runfiles',
'test_pydevd_reload',
'third_party',
'__pycache__',
'pydev_ipython',
'vendored',
'.mypy_cache',
'pydevd.egg-info',
]
for root, dirs, files in os.walk(root_dir):
for d in dirs:
if 'pydev' in d and d != 'pydevd.egg-info':
# print(os.path.join(root, d))
pydev_dirs.append(" '%s': PYDEV_FILE," % (d,))
for d in exclude_dirs:
try:
dirs.remove(d)
except:
pass
for f in files:
if f.endswith('.py'):
if f not in (
'__init__.py',
'runfiles.py',
'pydev_coverage.py',
'pydev_pysrc.py',
'setup.py',
'setup_cython.py',
'interpreterInfo.py',
'conftest.py',
):
pydev_files.append(" '%s': PYDEV_FILE," % (f,))
contents = template % (dict(
pydev_files='\n'.join(sorted(pydev_files)),
pydev_dirs='\n'.join(sorted(pydev_dirs)),
))
assert 'pydevd.py' in contents
assert 'pydevd_dont_trace.py' in contents
with open(os.path.join(root_dir, '_pydevd_bundle', 'pydevd_dont_trace_files.py'), 'w') as stream:
stream.write(contents)
def remove_if_exists(f):
try:
if os.path.exists(f):
os.remove(f)
except:
import traceback;traceback.print_exc()
def generate_cython_module():
remove_if_exists(os.path.join(root_dir, '_pydevd_bundle', 'pydevd_cython.pyx'))
target = os.path.join(root_dir, '_pydevd_bundle', 'pydevd_cython.pyx')
curr = os.environ.get('PYDEVD_USE_CYTHON')
try:
os.environ['PYDEVD_USE_CYTHON'] = 'NO'
from _pydevd_bundle import pydevd_additional_thread_info_regular
from _pydevd_bundle import pydevd_frame, pydevd_trace_dispatch_regular
_generate_cython_from_files(target, [pydevd_additional_thread_info_regular, pydevd_frame, pydevd_trace_dispatch_regular])
finally:
if curr is None:
del os.environ['PYDEVD_USE_CYTHON']
else:
os.environ['PYDEVD_USE_CYTHON'] = curr
if __name__ == '__main__':
generate_dont_trace_files()
generate_cython_module()
|
import os
import os.path
from tempfile import mkdtemp
from shutil import rmtree
from evaluation_constants import MODEL_SUMMARIES_PATTERN, SYSTEM_SUMMARIES_PATTERN
from pyrouge import Rouge155
""" Class that runs ROUGE to compare the output of a custom summarization tool
comparing it to a 'gold standard' reference summary.
"""
ROUGE_PATH = os.path.join(os.getcwd(), 'ROUGE-RELEASE-1.5.5')
ROUGE_DATA_PATH = os.path.join(ROUGE_PATH, 'data')
SYSTEM_DIR = "system"
MODEL_DIR = "model"
CONFIG_FILENAME = "config.xml"
# Rouge options as used in the DUC2007 competition:
# http://www-nlpir.nist.gov/projects/duc/duc2007/tasks.html#main
ROUGE_OPTIONS = [
'-e', ROUGE_DATA_PATH, # Specify ROUGE_EVAL_HOME directory where the ROUGE data files can be found.
'-n', '2', # Compute ROUGE-1 and ROUGE-2.
'-x', # Do not calculate ROUGE-L.
'-m', # Apply Porter stemmer on both models and peers.
'-2', '4', # Compute skip bigram (ROGUE-S) co-occurrence with a maximum skip distance of 4,
'-u', # Include unigram in Skip Bigram (ROUGE-S).
'-c', '95', # Specify CF\% (0 <= CF <= 100) confidence interval to compute.
'-r', '1000', # Specify the number of sampling point in bootstrap resampling (default is 1000).
'-f', 'A', # Scores are averaged over multiple models.
'-p', '0.5', # Compute F-measure with alpha = 0.5.
'-t', '0', # Use model unit as the counting unit.
'-a' # Evaluate all systems.
]
def create_temporary_directories():
tempdir = mkdtemp()
# Creates the temp directories to hold the rouge files.
new_system_dir = os.path.join(tempdir, SYSTEM_DIR)
os.mkdir(new_system_dir)
new_model_dir = os.path.join(tempdir, MODEL_DIR)
os.mkdir(new_model_dir)
return tempdir
def evaluate_summary(model_directory, system_directory):
tempdir = create_temporary_directories()
rouge_instance = Rouge155(ROUGE_PATH, verbose=False, rouge_args=' '.join(ROUGE_OPTIONS))
# Converts the gold references files to rouge format.
model_input_dir = model_directory
model_output_dir = os.path.join(tempdir, MODEL_DIR)
rouge_instance.convert_summaries_to_rouge_format(model_input_dir, model_output_dir)
# Converts the summary file to rouge format.
system_output_dir = os.path.join(tempdir, SYSTEM_DIR)
rouge_instance.convert_summaries_to_rouge_format(system_directory, system_output_dir)
# Writes the configuration file.
config_filename = os.path.join(tempdir, CONFIG_FILENAME)
rouge_instance.write_config_static(system_output_dir, SYSTEM_SUMMARIES_PATTERN,
model_output_dir, MODEL_SUMMARIES_PATTERN,
config_filename, 1)
# Runs ROUGE comparing the gold reference summaries with the recently generated.
output = rouge_instance.evaluate_static(ROUGE_PATH, config_filename, ROUGE_OPTIONS)
# Removes the temporal directories.
rmtree(tempdir)
return rouge_instance.output_to_dict(output)
|
#!/usr/bin/env python3
from cereal import car
from selfdrive.car.subaru.values import CAR, PREGLOBAL_CARS
from selfdrive.car import STD_CARGO_KG, scale_rot_inertia, scale_tire_stiffness, gen_empty_fingerprint
from selfdrive.car.interfaces import CarInterfaceBase
class CarInterface(CarInterfaceBase):
@staticmethod
def compute_gb(accel, speed):
return float(accel) / 4.0
@staticmethod
def get_params(candidate, fingerprint=gen_empty_fingerprint(), car_fw=None):
ret = CarInterfaceBase.get_std_params(candidate, fingerprint)
ret.carName = "subaru"
ret.radarOffCan = True
if candidate in PREGLOBAL_CARS:
ret.safetyModel = car.CarParams.SafetyModel.subaruLegacy
ret.enableBsm = 0x25c in fingerprint[0]
else:
ret.safetyModel = car.CarParams.SafetyModel.subaru
ret.enableBsm = 0x228 in fingerprint[0]
# Subaru port is a community feature, since we don't own one to test
ret.communityFeature = True
ret.dashcamOnly = candidate in PREGLOBAL_CARS
ret.steerRateCost = 0.7
ret.steerLimitTimer = 0.4
if candidate == CAR.ASCENT:
ret.mass = 2031. + STD_CARGO_KG
ret.wheelbase = 2.89
ret.centerToFront = ret.wheelbase * 0.5
ret.steerRatio = 13.5
ret.steerActuatorDelay = 0.3 # end-to-end angle controller
ret.lateralTuning.pid.kf = 0.00003
ret.lateralTuning.pid.kiBP, ret.lateralTuning.pid.kpBP = [[0., 20.], [0., 20.]]
ret.lateralTuning.pid.kpV, ret.lateralTuning.pid.kiV = [[0.0025, 0.1], [0.00025, 0.01]]
if candidate == CAR.IMPREZA:
ret.mass = 1568. + STD_CARGO_KG
ret.wheelbase = 2.67
ret.centerToFront = ret.wheelbase * 0.5
ret.steerRatio = 15
ret.steerActuatorDelay = 0.4 # end-to-end angle controller
ret.lateralTuning.pid.kf = 0.00005
ret.lateralTuning.pid.kiBP, ret.lateralTuning.pid.kpBP = [[0., 20.], [0., 20.]]
ret.lateralTuning.pid.kpV, ret.lateralTuning.pid.kiV = [[0.2, 0.3], [0.02, 0.03]]
if candidate == CAR.FORESTER:
ret.mass = 1568. + STD_CARGO_KG
ret.wheelbase = 2.67
ret.centerToFront = ret.wheelbase * 0.5
ret.steerRatio = 17 # learned, 14 stock
ret.steerActuatorDelay = 0.1
ret.lateralTuning.pid.kf = 0.000038
ret.lateralTuning.pid.kiBP, ret.lateralTuning.pid.kpBP = [[0., 14., 23.], [0., 14., 23.]]
ret.lateralTuning.pid.kpV, ret.lateralTuning.pid.kiV = [[0.01, 0.065, 0.2], [0.001, 0.015, 0.025]]
if candidate in [CAR.FORESTER_PREGLOBAL, CAR.OUTBACK_PREGLOBAL_2018]:
ret.safetyParam = 1 # Outback 2018-2019 and Forester have reversed driver torque signal
ret.mass = 1568 + STD_CARGO_KG
ret.wheelbase = 2.67
ret.centerToFront = ret.wheelbase * 0.5
ret.steerRatio = 20 # learned, 14 stock
ret.steerActuatorDelay = 0.1
ret.lateralTuning.pid.kf = 0.000039
ret.lateralTuning.pid.kiBP, ret.lateralTuning.pid.kpBP = [[0., 10., 20.], [0., 10., 20.]]
ret.lateralTuning.pid.kpV, ret.lateralTuning.pid.kiV = [[0.01, 0.05, 0.2], [0.003, 0.018, 0.025]]
if candidate == CAR.LEGACY_PREGLOBAL:
ret.mass = 1568 + STD_CARGO_KG
ret.wheelbase = 2.67
ret.centerToFront = ret.wheelbase * 0.5
ret.steerRatio = 12.5 # 14.5 stock
ret.steerActuatorDelay = 0.15
ret.lateralTuning.pid.kf = 0.00005
ret.lateralTuning.pid.kiBP, ret.lateralTuning.pid.kpBP = [[0., 20.], [0., 20.]]
ret.lateralTuning.pid.kpV, ret.lateralTuning.pid.kiV = [[0.1, 0.2], [0.01, 0.02]]
if candidate == CAR.OUTBACK_PREGLOBAL:
ret.mass = 1568 + STD_CARGO_KG
ret.wheelbase = 2.67
ret.centerToFront = ret.wheelbase * 0.5
ret.steerRatio = 20 # learned, 14 stock
ret.steerActuatorDelay = 0.1
ret.lateralTuning.pid.kf = 0.000039
ret.lateralTuning.pid.kiBP, ret.lateralTuning.pid.kpBP = [[0., 10., 20.], [0., 10., 20.]]
ret.lateralTuning.pid.kpV, ret.lateralTuning.pid.kiV = [[0.01, 0.05, 0.2], [0.003, 0.018, 0.025]]
# TODO: get actual value, for now starting with reasonable value for
# civic and scaling by mass and wheelbase
ret.rotationalInertia = scale_rot_inertia(ret.mass, ret.wheelbase)
# TODO: start from empirically derived lateral slip stiffness for the civic and scale by
# mass and CG position, so all cars will have approximately similar dyn behaviors
ret.tireStiffnessFront, ret.tireStiffnessRear = scale_tire_stiffness(ret.mass, ret.wheelbase, ret.centerToFront)
return ret
# returns a car.CarState
def update(self, c, can_strings):
self.cp.update_strings(can_strings)
self.cp_cam.update_strings(can_strings)
ret = self.CS.update(self.cp, self.cp_cam)
ret.canValid = self.cp.can_valid and self.cp_cam.can_valid
ret.steeringRateLimited = self.CC.steer_rate_limited if self.CC is not None else False
ret.events = self.create_common_events(ret).to_msg()
self.CS.out = ret.as_reader()
return self.CS.out
def apply(self, c):
can_sends = self.CC.update(c.enabled, self.CS, self.frame, c.actuators,
c.cruiseControl.cancel, c.hudControl.visualAlert,
c.hudControl.leftLaneVisible, c.hudControl.rightLaneVisible, c.hudControl.leftLaneDepart, c.hudControl.rightLaneDepart)
self.frame += 1
return can_sends
|
#!/usr/bin/env python
#
# The wiringPi requires you to run as sudo.
# This is needed for access to /dev/mem, which we don't use here.
# There is no way around this though, so we start this code as a root user.
#
#
import wiringpi as wp
#
# Define a function to decode BCD encoded numbers.
def decBCD(num):
return( (num/16)*10 + (num%16) )
#
# Open the RTC
#
fc= wp.wiringPiI2CSetup(0x68)
#
# We read the registers one at a time.
secs = wp.wiringPiI2CReadReg8(fc,0x00)
mins = wp.wiringPiI2CReadReg8(fc,0x01)
hour = wp.wiringPiI2CReadReg8(fc,0x02)
day = wp.wiringPiI2CReadReg8(fc,0x03)
dat = wp.wiringPiI2CReadReg8(fc,0x04)
mon = wp.wiringPiI2CReadReg8(fc,0x05)
yr = wp.wiringPiI2CReadReg8(fc,0x06)
cent = wp.wiringPiI2CReadReg8(fc,0x07)
temp1 = wp.wiringPiI2CReadReg8(fc,0x11)
temp2 = wp.wiringPiI2CReadReg8(fc,0x12)
year = decBCD(yr)
month = decBCD(mon & 0x7f)
date = decBCD(dat)
if (mon & 0x80)>0:
year+=2100
else:
year+=2000
if (hour&0x40)>0: # Test for 12 or 24 hour clock. 1=12 hour 0=24 hour
hours = decBCD(hour&0x1F)
if (hour&0x20)>0:
ampm = "PM"
else:
ampm = "AM"
print "{2}:{1:02d}:{0:02d} {3} ({4}) {5}-{6}-{7}".format(decBCD(secs),decBCD(mins),hours,ampm,day,year,month,date)
else:
hours = decBCD(hour&0x3F)
print "{2}:{1:02d}:{0:02d} ({3}) {4}-{5}-{6}".format(decBCD(secs),decBCD(mins),hours,day,year,month,date)
|
# -*- coding: utf-8 -*-
# Authors: Denis A. Engemann <denis.engemann@gmail.com>
# simplified BSD-3 license
import os.path as op
import warnings
import inspect
import numpy as np
from numpy.testing import assert_array_equal, assert_allclose
from nose.tools import assert_true, assert_raises, assert_equal
from mne import find_events, pick_types
from mne.io import read_raw_egi
from mne.io.tests.test_raw import _test_raw_reader
from mne.io.egi.egi import _combine_triggers
from mne.utils import run_tests_if_main
from mne.datasets.testing import data_path, requires_testing_data
warnings.simplefilter('always') # enable b/c these tests throw warnings
FILE = inspect.getfile(inspect.currentframe())
base_dir = op.join(op.dirname(op.abspath(FILE)), 'data')
egi_fname = op.join(base_dir, 'test_egi.raw')
egi_txt_fname = op.join(base_dir, 'test_egi.txt')
@requires_testing_data
def test_io_egi_mff():
"""Test importing EGI MFF simple binary files"""
egi_fname_mff = op.join(data_path(), 'EGI', 'test_egi.mff')
raw = read_raw_egi(egi_fname_mff, include=None)
assert_true('RawMff' in repr(raw))
include = ['DIN1', 'DIN2', 'DIN3', 'DIN4', 'DIN5', 'DIN7']
raw = _test_raw_reader(read_raw_egi, input_fname=egi_fname_mff,
include=include, channel_naming='EEG %03d')
assert_equal('eeg' in raw, True)
eeg_chan = [c for c in raw.ch_names if 'EEG' in c]
assert_equal(len(eeg_chan), 129)
picks = pick_types(raw.info, eeg=True)
assert_equal(len(picks), 129)
assert_equal('STI 014' in raw.ch_names, True)
events = find_events(raw, stim_channel='STI 014')
assert_equal(len(events), 8)
assert_equal(np.unique(events[:, 1])[0], 0)
assert_true(np.unique(events[:, 0])[0] != 0)
assert_true(np.unique(events[:, 2])[0] != 0)
assert_raises(ValueError, read_raw_egi, egi_fname_mff, include=['Foo'],
preload=False)
assert_raises(ValueError, read_raw_egi, egi_fname_mff, exclude=['Bar'],
preload=False)
for ii, k in enumerate(include, 1):
assert_true(k in raw.event_id)
assert_true(raw.event_id[k] == ii)
def test_io_egi():
"""Test importing EGI simple binary files."""
# test default
with open(egi_txt_fname) as fid:
data = np.loadtxt(fid)
t = data[0]
data = data[1:]
data *= 1e-6 # μV
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
raw = read_raw_egi(egi_fname, include=None)
assert_true('RawEGI' in repr(raw))
assert_equal(len(w), 1)
assert_true(w[0].category == RuntimeWarning)
msg = 'Did not find any event code with more than one event.'
assert_true(msg in '%s' % w[0].message)
data_read, t_read = raw[:256]
assert_allclose(t_read, t)
assert_allclose(data_read, data, atol=1e-10)
include = ['TRSP', 'XXX1']
with warnings.catch_warnings(record=True): # preload=None
raw = _test_raw_reader(read_raw_egi, input_fname=egi_fname,
include=include)
assert_equal('eeg' in raw, True)
eeg_chan = [c for c in raw.ch_names if c.startswith('E')]
assert_equal(len(eeg_chan), 256)
picks = pick_types(raw.info, eeg=True)
assert_equal(len(picks), 256)
assert_equal('STI 014' in raw.ch_names, True)
events = find_events(raw, stim_channel='STI 014')
assert_equal(len(events), 2) # ground truth
assert_equal(np.unique(events[:, 1])[0], 0)
assert_true(np.unique(events[:, 0])[0] != 0)
assert_true(np.unique(events[:, 2])[0] != 0)
triggers = np.array([[0, 1, 1, 0], [0, 0, 1, 0]])
# test trigger functionality
triggers = np.array([[0, 1, 0, 0], [0, 0, 1, 0]])
events_ids = [12, 24]
new_trigger = _combine_triggers(triggers, events_ids)
assert_array_equal(np.unique(new_trigger), np.unique([0, 12, 24]))
assert_raises(ValueError, read_raw_egi, egi_fname, include=['Foo'],
preload=False)
assert_raises(ValueError, read_raw_egi, egi_fname, exclude=['Bar'],
preload=False)
for ii, k in enumerate(include, 1):
assert_true(k in raw.event_id)
assert_true(raw.event_id[k] == ii)
run_tests_if_main()
|
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Notmuch(AutotoolsPackage):
"""Notmuch is a mail indexer.
Essentially, is a very thin front end on top of xapian.
"""
homepage = "https://notmuchmail.org/"
url = "https://notmuchmail.org/releases/notmuch-0.23.7.tar.gz"
version('0.23.7', '1ad339b6d0c03548140434c7bcdf0624')
depends_on('zlib')
depends_on('talloc')
depends_on('gmime@2.6:')
depends_on('xapian-core')
|
"""
Extend pandas with custom array types.
"""
from typing import TYPE_CHECKING, Any, List, Optional, Tuple, Type, Union
import numpy as np
from pandas._typing import DtypeObj
from pandas.errors import AbstractMethodError
from pandas.core.dtypes.generic import ABCDataFrame, ABCIndexClass, ABCSeries
if TYPE_CHECKING:
from pandas.core.arrays import ExtensionArray
class ExtensionDtype:
"""
A custom data type, to be paired with an ExtensionArray.
See Also
--------
extensions.register_extension_dtype: Register an ExtensionType
with pandas as class decorator.
extensions.ExtensionArray: Abstract base class for custom 1-D array types.
Notes
-----
The interface includes the following abstract methods that must
be implemented by subclasses:
* type
* name
The following attributes and methods influence the behavior of the dtype in
pandas operations
* _is_numeric
* _is_boolean
* _get_common_dtype
Optionally one can override construct_array_type for construction
with the name of this dtype via the Registry. See
:meth:`extensions.register_extension_dtype`.
* construct_array_type
The `na_value` class attribute can be used to set the default NA value
for this type. :attr:`numpy.nan` is used by default.
ExtensionDtypes are required to be hashable. The base class provides
a default implementation, which relies on the ``_metadata`` class
attribute. ``_metadata`` should be a tuple containing the strings
that define your data type. For example, with ``PeriodDtype`` that's
the ``freq`` attribute.
**If you have a parametrized dtype you should set the ``_metadata``
class property**.
Ideally, the attributes in ``_metadata`` will match the
parameters to your ``ExtensionDtype.__init__`` (if any). If any of
the attributes in ``_metadata`` don't implement the standard
``__eq__`` or ``__hash__``, the default implementations here will not
work.
.. versionchanged:: 0.24.0
Added ``_metadata``, ``__hash__``, and changed the default definition
of ``__eq__``.
For interaction with Apache Arrow (pyarrow), a ``__from_arrow__`` method
can be implemented: this method receives a pyarrow Array or ChunkedArray
as only argument and is expected to return the appropriate pandas
ExtensionArray for this dtype and the passed values::
class ExtensionDtype:
def __from_arrow__(
self, array: Union[pyarrow.Array, pyarrow.ChunkedArray]
) -> ExtensionArray:
...
This class does not inherit from 'abc.ABCMeta' for performance reasons.
Methods and properties required by the interface raise
``pandas.errors.AbstractMethodError`` and no ``register`` method is
provided for registering virtual subclasses.
"""
_metadata: Tuple[str, ...] = ()
def __str__(self) -> str:
return self.name
def __eq__(self, other: Any) -> bool:
"""
Check whether 'other' is equal to self.
By default, 'other' is considered equal if either
* it's a string matching 'self.name'.
* it's an instance of this type and all of the attributes
in ``self._metadata`` are equal between `self` and `other`.
Parameters
----------
other : Any
Returns
-------
bool
"""
if isinstance(other, str):
try:
other = self.construct_from_string(other)
except TypeError:
return False
if isinstance(other, type(self)):
return all(
getattr(self, attr) == getattr(other, attr) for attr in self._metadata
)
return False
def __hash__(self) -> int:
return hash(tuple(getattr(self, attr) for attr in self._metadata))
def __ne__(self, other: Any) -> bool:
return not self.__eq__(other)
@property
def na_value(self) -> object:
"""
Default NA value to use for this type.
This is used in e.g. ExtensionArray.take. This should be the
user-facing "boxed" version of the NA value, not the physical NA value
for storage. e.g. for JSONArray, this is an empty dictionary.
"""
return np.nan
@property
def type(self) -> Type:
"""
The scalar type for the array, e.g. ``int``
It's expected ``ExtensionArray[item]`` returns an instance
of ``ExtensionDtype.type`` for scalar ``item``, assuming
that value is valid (not NA). NA values do not need to be
instances of `type`.
"""
raise AbstractMethodError(self)
@property
def kind(self) -> str:
"""
A character code (one of 'biufcmMOSUV'), default 'O'
This should match the NumPy dtype used when the array is
converted to an ndarray, which is probably 'O' for object if
the extension type cannot be represented as a built-in NumPy
type.
See Also
--------
numpy.dtype.kind
"""
return "O"
@property
def name(self) -> str:
"""
A string identifying the data type.
Will be used for display in, e.g. ``Series.dtype``
"""
raise AbstractMethodError(self)
@property
def names(self) -> Optional[List[str]]:
"""
Ordered list of field names, or None if there are no fields.
This is for compatibility with NumPy arrays, and may be removed in the
future.
"""
return None
@classmethod
def construct_array_type(cls) -> Type["ExtensionArray"]:
"""
Return the array type associated with this dtype.
Returns
-------
type
"""
raise NotImplementedError
@classmethod
def construct_from_string(cls, string: str):
r"""
Construct this type from a string.
This is useful mainly for data types that accept parameters.
For example, a period dtype accepts a frequency parameter that
can be set as ``period[H]`` (where H means hourly frequency).
By default, in the abstract class, just the name of the type is
expected. But subclasses can overwrite this method to accept
parameters.
Parameters
----------
string : str
The name of the type, for example ``category``.
Returns
-------
ExtensionDtype
Instance of the dtype.
Raises
------
TypeError
If a class cannot be constructed from this 'string'.
Examples
--------
For extension dtypes with arguments the following may be an
adequate implementation.
>>> @classmethod
... def construct_from_string(cls, string):
... pattern = re.compile(r"^my_type\[(?P<arg_name>.+)\]$")
... match = pattern.match(string)
... if match:
... return cls(**match.groupdict())
... else:
... raise TypeError(
... f"Cannot construct a '{cls.__name__}' from '{string}'"
... )
"""
if not isinstance(string, str):
raise TypeError(
f"'construct_from_string' expects a string, got {type(string)}"
)
# error: Non-overlapping equality check (left operand type: "str", right
# operand type: "Callable[[ExtensionDtype], str]") [comparison-overlap]
assert isinstance(cls.name, str), (cls, type(cls.name))
if string != cls.name:
raise TypeError(f"Cannot construct a '{cls.__name__}' from '{string}'")
return cls()
@classmethod
def is_dtype(cls, dtype: object) -> bool:
"""
Check if we match 'dtype'.
Parameters
----------
dtype : object
The object to check.
Returns
-------
bool
Notes
-----
The default implementation is True if
1. ``cls.construct_from_string(dtype)`` is an instance
of ``cls``.
2. ``dtype`` is an object and is an instance of ``cls``
3. ``dtype`` has a ``dtype`` attribute, and any of the above
conditions is true for ``dtype.dtype``.
"""
dtype = getattr(dtype, "dtype", dtype)
if isinstance(dtype, (ABCSeries, ABCIndexClass, ABCDataFrame, np.dtype)):
# https://github.com/pandas-dev/pandas/issues/22960
# avoid passing data to `construct_from_string`. This could
# cause a FutureWarning from numpy about failing elementwise
# comparison from, e.g., comparing DataFrame == 'category'.
return False
elif dtype is None:
return False
elif isinstance(dtype, cls):
return True
if isinstance(dtype, str):
try:
return cls.construct_from_string(dtype) is not None
except TypeError:
return False
return False
@property
def _is_numeric(self) -> bool:
"""
Whether columns with this dtype should be considered numeric.
By default ExtensionDtypes are assumed to be non-numeric.
They'll be excluded from operations that exclude non-numeric
columns, like (groupby) reductions, plotting, etc.
"""
return False
@property
def _is_boolean(self) -> bool:
"""
Whether this dtype should be considered boolean.
By default, ExtensionDtypes are assumed to be non-numeric.
Setting this to True will affect the behavior of several places,
e.g.
* is_bool
* boolean indexing
Returns
-------
bool
"""
return False
def _get_common_dtype(self, dtypes: List[DtypeObj]) -> Optional[DtypeObj]:
"""
Return the common dtype, if one exists.
Used in `find_common_type` implementation. This is for example used
to determine the resulting dtype in a concat operation.
If no common dtype exists, return None (which gives the other dtypes
the chance to determine a common dtype). If all dtypes in the list
return None, then the common dtype will be "object" dtype (this means
it is never needed to return "object" dtype from this method itself).
Parameters
----------
dtypes : list of dtypes
The dtypes for which to determine a common dtype. This is a list
of np.dtype or ExtensionDtype instances.
Returns
-------
Common dtype (np.dtype or ExtensionDtype) or None
"""
if len(set(dtypes)) == 1:
# only itself
return self
else:
return None
def register_extension_dtype(cls: Type[ExtensionDtype]) -> Type[ExtensionDtype]:
"""
Register an ExtensionType with pandas as class decorator.
.. versionadded:: 0.24.0
This enables operations like ``.astype(name)`` for the name
of the ExtensionDtype.
Returns
-------
callable
A class decorator.
Examples
--------
>>> from pandas.api.extensions import register_extension_dtype
>>> from pandas.api.extensions import ExtensionDtype
>>> @register_extension_dtype
... class MyExtensionDtype(ExtensionDtype):
... name = "myextension"
"""
registry.register(cls)
return cls
class Registry:
"""
Registry for dtype inference.
The registry allows one to map a string repr of a extension
dtype to an extension dtype. The string alias can be used in several
places, including
* Series and Index constructors
* :meth:`pandas.array`
* :meth:`pandas.Series.astype`
Multiple extension types can be registered.
These are tried in order.
"""
def __init__(self):
self.dtypes: List[Type[ExtensionDtype]] = []
def register(self, dtype: Type[ExtensionDtype]) -> None:
"""
Parameters
----------
dtype : ExtensionDtype class
"""
if not issubclass(dtype, ExtensionDtype):
raise ValueError("can only register pandas extension dtypes")
self.dtypes.append(dtype)
def find(
self, dtype: Union[Type[ExtensionDtype], str]
) -> Optional[Type[ExtensionDtype]]:
"""
Parameters
----------
dtype : Type[ExtensionDtype] or str
Returns
-------
return the first matching dtype, otherwise return None
"""
if not isinstance(dtype, str):
dtype_type = dtype
if not isinstance(dtype, type):
dtype_type = type(dtype)
if issubclass(dtype_type, ExtensionDtype):
return dtype
return None
for dtype_type in self.dtypes:
try:
return dtype_type.construct_from_string(dtype)
except TypeError:
pass
return None
registry = Registry()
|
# xfce4-rss-plugin - an RSS aggregator for the Xfce 4 Panel
# Copyright (c) 2006 Adriano Winter Bess <adriano@xfce.org>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License ONLY.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
import pygtk
pygtk.require("2.0")
import gtk
import gobject
from gettext import gettext as _
class PropertiesDialog (gtk.Dialog):
def __init__ (self, config):
gtk.Dialog.__init__ (self, _("RSS Aggregator"),
None, gtk.DIALOG_MODAL|gtk.DIALOG_NO_SEPARATOR,
(gtk.STOCK_CLOSE, gtk.RESPONSE_CLOSE))
self.config = config
self.set_border_width (8)
self.vbox.set_homogeneous (False)
self.set_default_size (600, 400)
model = gtk.ListStore (gobject.TYPE_STRING, gobject.TYPE_STRING)
for feed in config.traverse_feeds ():
model.append ((feed['name'], feed['url']))
self.build_view (model)
vb = gtk.VBox (spacing=8)
but = gtk.Button (stock=gtk.STOCK_ADD)
but.connect ("clicked", add_cb, self.feeds_view)
vb.pack_start (but, False)
but = gtk.Button (stock=gtk.STOCK_REMOVE)
but.connect ("clicked", remove_cb, self.feeds_view)
vb.pack_start (but, False)
lab = gtk.Label (_("RSS Feeds:"))
lab.set_alignment (0.0, 0.5)
self.vbox.pack_start (lab, False)
hb = gtk.HBox (spacing=8)
hb.pack_start (self.feeds_view)
align = gtk.Alignment (0.5, 0.5)
align.add (vb)
hb.pack_start (align, False)
self.vbox.pack_start (hb)
self.connect ("response", response_cb, None)
def build_view (self, model):
tv = gtk.TreeView (model)
tv.set_headers_visible (True)
tv.get_selection().set_mode(gtk.SELECTION_MULTIPLE)
col = gtk.TreeViewColumn (_("Name"))
cell = gtk.CellRendererText ()
cell.set_property ("editable", True)
cell.connect ("edited", edited_cb, (model, 0))
col.pack_start (cell)
tv.append_column (col)
col.set_attributes (cell, text=0)
col.set_sort_column_id (0)
tv.set_search_column (0)
col = gtk.TreeViewColumn (_("URL"))
cell = gtk.CellRendererText ()
cell.set_property ("editable", True)
cell.connect ("edited", edited_cb, (model, 1))
col.pack_start (cell)
tv.append_column (col)
col.set_attributes (cell, text=1)
self.feeds_view = tv
def response_cb (dlg, rid, dummy):
model = dlg.feeds_view.get_model ()
dlg.config.clear_feeds ()
for feed in model:
dlg.config.add_feed (feed[0], feed[1])
dlg.config.write_config ()
dlg.destroy ()
def edited_cb (cell, path, text, data):
model, column = data
model[path][column] = text
def remove_cb (but, feeds_view):
selection = feeds_view.get_selection ()
if selection.count_selected_rows () > 0:
(model, rows) = selection.get_selected_rows ()
refs = list ()
for path in rows:
refs.append (gtk.TreeRowReference (model, path))
for ref in refs:
model.remove (model.get_iter (ref.get_path ()))
def add_cb (but, feeds_view):
model = feeds_view.get_model ()
model.append ((_("Name"), "http://"))
|
import unittest
from decimal import Decimal
from httpretty import HTTPretty, httprettified
import openexchangerates
from datetime import date as Date
class TestOpenExchangeRates(unittest.TestCase):
_FIXTURE_CURRENCIES = """{
"AED": "United Arab Emirates Dirham",
"AFN": "Afghan Afghani",
"ALL": "Albanian Lek"
}
"""
_FIXTURE_LATEST = """{
"disclaimer": "<Disclaimer data>",
"license": "<License data>",
"timestamp": 1358150409,
"base": "USD",
"rates": {
"AED": 3.666311,
"AFN": 51.2281,
"ALL": 104.748751
}
}
"""
_FIXTURE_HISTORICAL = """{
"disclaimer": "<Disclaimer data>",
"license": "<License data>",
"timestamp": 1358150409,
"base": "USD",
"rates": {
"AED": 3.666311,
"AFN": 51.2281,
"ALL": 104.748751
}
}
"""
@httprettified
def test_historical(self):
"""Tests openexchangerates.OpenExchangeRateClient.historical``"""
client = openexchangerates.OpenExchangeRatesClient('DUMMY_API_KEY')
date = Date.fromtimestamp(1358150409)
HTTPretty.register_uri(HTTPretty.GET, client.ENDPOINT_HISTORICAL %
date.strftime("%Y-%m-%d"),
body=self._FIXTURE_LATEST)
historical = client.historical(date)
self.assertIn('rates', historical)
rates = historical['rates']
self.assertEqual(len(rates), 3)
self.assertIn('AED', rates)
self.assertEqual(rates['AED'], Decimal('3.666311'))
self.assertIn('AFN', rates)
self.assertEqual(rates['AFN'], Decimal('51.2281'))
self.assertIn('ALL', rates)
self.assertEqual(rates['ALL'], Decimal('104.748751'))
@httprettified
def test_currencies(self):
"""Tests ``openexchangerates.OpenExchangeRateClient\.currencies``"""
client = openexchangerates.OpenExchangeRatesClient('DUMMY_API_KEY')
HTTPretty.register_uri(HTTPretty.GET, client.ENDPOINT_CURRENCIES,
body=self._FIXTURE_CURRENCIES)
currencies = client.currencies()
self.assertEqual(len(currencies), 3)
self.assertIn('AED', currencies)
self.assertIn('AFN', currencies)
self.assertIn('ALL', currencies)
@httprettified
def test_latest(self):
"""Tests openexchangerates.OpenExchangeRateClient.latest``"""
client = openexchangerates.OpenExchangeRatesClient('DUMMY_API_KEY')
HTTPretty.register_uri(HTTPretty.GET, client.ENDPOINT_LATEST,
body=self._FIXTURE_LATEST)
latest = client.latest()
self.assertIn('rates', latest)
rates = latest['rates']
self.assertEqual(len(rates), 3)
self.assertIn('AED', rates)
self.assertEqual(rates['AED'], Decimal('3.666311'))
self.assertIn('AFN', rates)
self.assertEqual(rates['AFN'], Decimal('51.2281'))
self.assertIn('ALL', rates)
self.assertEqual(rates['ALL'], Decimal('104.748751'))
@httprettified
def test_exception(self):
"""Tests ``openexchangerates.OpenExchangeRateClientException``"""
client = openexchangerates.OpenExchangeRatesClient('DUMMY_API_KEY')
HTTPretty.register_uri(HTTPretty.GET, client.ENDPOINT_LATEST,
status=404)
with(self.assertRaises(
openexchangerates.OpenExchangeRatesClientException)) as e:
client.latest()
|
import math
from itertools import zip_longest
from typing import (
TYPE_CHECKING,
Callable,
Dict,
Iterable,
List,
Optional,
Tuple,
TypeVar,
Union,
cast,
)
from prompt_toolkit.application.current import get_app
from prompt_toolkit.buffer import CompletionState
from prompt_toolkit.completion import Completion
from prompt_toolkit.data_structures import Point
from prompt_toolkit.filters import (
Condition,
FilterOrBool,
has_completions,
is_done,
to_filter,
)
from prompt_toolkit.formatted_text import (
StyleAndTextTuples,
fragment_list_width,
to_formatted_text,
)
from prompt_toolkit.key_binding.key_processor import KeyPressEvent
from prompt_toolkit.layout.utils import explode_text_fragments
from prompt_toolkit.mouse_events import MouseEvent, MouseEventType
from prompt_toolkit.utils import get_cwidth
from .containers import ConditionalContainer, HSplit, ScrollOffsets, Window
from .controls import GetLinePrefixCallable, UIContent, UIControl
from .dimension import Dimension
from .margins import ScrollbarMargin
if TYPE_CHECKING:
from prompt_toolkit.key_binding.key_bindings import KeyBindings
NotImplementedOrNone = object
__all__ = [
"CompletionsMenu",
"MultiColumnCompletionsMenu",
]
E = KeyPressEvent
class CompletionsMenuControl(UIControl):
"""
Helper for drawing the complete menu to the screen.
:param scroll_offset: Number (integer) representing the preferred amount of
completions to be displayed before and after the current one. When this
is a very high number, the current completion will be shown in the
middle most of the time.
"""
# Preferred minimum size of the menu control.
# The CompletionsMenu class defines a width of 8, and there is a scrollbar
# of 1.)
MIN_WIDTH = 7
def has_focus(self) -> bool:
return False
def preferred_width(self, max_available_width: int) -> Optional[int]:
complete_state = get_app().current_buffer.complete_state
if complete_state:
menu_width = self._get_menu_width(500, complete_state)
menu_meta_width = self._get_menu_meta_width(500, complete_state)
return menu_width + menu_meta_width
else:
return 0
def preferred_height(
self,
width: int,
max_available_height: int,
wrap_lines: bool,
get_line_prefix: Optional[GetLinePrefixCallable],
) -> Optional[int]:
complete_state = get_app().current_buffer.complete_state
if complete_state:
return len(complete_state.completions)
else:
return 0
def create_content(self, width: int, height: int) -> UIContent:
"""
Create a UIContent object for this control.
"""
complete_state = get_app().current_buffer.complete_state
if complete_state:
completions = complete_state.completions
index = complete_state.complete_index # Can be None!
# Calculate width of completions menu.
menu_width = self._get_menu_width(width, complete_state)
menu_meta_width = self._get_menu_meta_width(
width - menu_width, complete_state
)
show_meta = self._show_meta(complete_state)
def get_line(i: int) -> StyleAndTextTuples:
c = completions[i]
is_current_completion = i == index
result = _get_menu_item_fragments(
c, is_current_completion, menu_width, space_after=True
)
if show_meta:
result += self._get_menu_item_meta_fragments(
c, is_current_completion, menu_meta_width
)
return result
return UIContent(
get_line=get_line,
cursor_position=Point(x=0, y=index or 0),
line_count=len(completions),
)
return UIContent()
def _show_meta(self, complete_state: CompletionState) -> bool:
"""
Return ``True`` if we need to show a column with meta information.
"""
return any(c.display_meta_text for c in complete_state.completions)
def _get_menu_width(self, max_width: int, complete_state: CompletionState) -> int:
"""
Return the width of the main column.
"""
return min(
max_width,
max(
self.MIN_WIDTH,
max(get_cwidth(c.display_text) for c in complete_state.completions) + 2,
),
)
def _get_menu_meta_width(
self, max_width: int, complete_state: CompletionState
) -> int:
"""
Return the width of the meta column.
"""
def meta_width(completion: Completion) -> int:
return get_cwidth(completion.display_meta_text)
if self._show_meta(complete_state):
return min(
max_width, max(meta_width(c) for c in complete_state.completions) + 2
)
else:
return 0
def _get_menu_item_meta_fragments(
self, completion: Completion, is_current_completion: bool, width: int
) -> StyleAndTextTuples:
if is_current_completion:
style_str = "class:completion-menu.meta.completion.current"
else:
style_str = "class:completion-menu.meta.completion"
text, tw = _trim_formatted_text(completion.display_meta, width - 2)
padding = " " * (width - 1 - tw)
return to_formatted_text(
cast(StyleAndTextTuples, []) + [("", " ")] + text + [("", padding)],
style=style_str,
)
def mouse_handler(self, mouse_event: MouseEvent) -> "NotImplementedOrNone":
"""
Handle mouse events: clicking and scrolling.
"""
b = get_app().current_buffer
if mouse_event.event_type == MouseEventType.MOUSE_UP:
# Select completion.
b.go_to_completion(mouse_event.position.y)
b.complete_state = None
elif mouse_event.event_type == MouseEventType.SCROLL_DOWN:
# Scroll up.
b.complete_next(count=3, disable_wrap_around=True)
elif mouse_event.event_type == MouseEventType.SCROLL_UP:
# Scroll down.
b.complete_previous(count=3, disable_wrap_around=True)
return None
def _get_menu_item_fragments(
completion: Completion,
is_current_completion: bool,
width: int,
space_after: bool = False,
) -> StyleAndTextTuples:
"""
Get the style/text tuples for a menu item, styled and trimmed to the given
width.
"""
if is_current_completion:
style_str = "class:completion-menu.completion.current %s %s" % (
completion.style,
completion.selected_style,
)
else:
style_str = "class:completion-menu.completion " + completion.style
text, tw = _trim_formatted_text(
completion.display, (width - 2 if space_after else width - 1)
)
padding = " " * (width - 1 - tw)
return to_formatted_text(
cast(StyleAndTextTuples, []) + [("", " ")] + text + [("", padding)],
style=style_str,
)
def _trim_formatted_text(
formatted_text: StyleAndTextTuples, max_width: int
) -> Tuple[StyleAndTextTuples, int]:
"""
Trim the text to `max_width`, append dots when the text is too long.
Returns (text, width) tuple.
"""
width = fragment_list_width(formatted_text)
# When the text is too wide, trim it.
if width > max_width:
result = [] # Text fragments.
remaining_width = max_width - 3
for style_and_ch in explode_text_fragments(formatted_text):
ch_width = get_cwidth(style_and_ch[1])
if ch_width <= remaining_width:
result.append(style_and_ch)
remaining_width -= ch_width
else:
break
result.append(("", "..."))
return result, max_width - remaining_width
else:
return formatted_text, width
class CompletionsMenu(ConditionalContainer):
# NOTE: We use a pretty big z_index by default. Menus are supposed to be
# above anything else. We also want to make sure that the content is
# visible at the point where we draw this menu.
def __init__(
self,
max_height: Optional[int] = None,
scroll_offset: Union[int, Callable[[], int]] = 0,
extra_filter: FilterOrBool = True,
display_arrows: FilterOrBool = False,
z_index: int = 10 ** 8,
) -> None:
extra_filter = to_filter(extra_filter)
display_arrows = to_filter(display_arrows)
super().__init__(
content=Window(
content=CompletionsMenuControl(),
width=Dimension(min=8),
height=Dimension(min=1, max=max_height),
scroll_offsets=ScrollOffsets(top=scroll_offset, bottom=scroll_offset),
right_margins=[ScrollbarMargin(display_arrows=display_arrows)],
dont_extend_width=True,
style="class:completion-menu",
z_index=z_index,
),
# Show when there are completions but not at the point we are
# returning the input.
filter=has_completions & ~is_done & extra_filter,
)
class MultiColumnCompletionMenuControl(UIControl):
"""
Completion menu that displays all the completions in several columns.
When there are more completions than space for them to be displayed, an
arrow is shown on the left or right side.
`min_rows` indicates how many rows will be available in any possible case.
When this is larger than one, it will try to use less columns and more
rows until this value is reached.
Be careful passing in a too big value, if less than the given amount of
rows are available, more columns would have been required, but
`preferred_width` doesn't know about that and reports a too small value.
This results in less completions displayed and additional scrolling.
(It's a limitation of how the layout engine currently works: first the
widths are calculated, then the heights.)
:param suggested_max_column_width: The suggested max width of a column.
The column can still be bigger than this, but if there is place for two
columns of this width, we will display two columns. This to avoid that
if there is one very wide completion, that it doesn't significantly
reduce the amount of columns.
"""
_required_margin = 3 # One extra padding on the right + space for arrows.
def __init__(self, min_rows: int = 3, suggested_max_column_width: int = 30) -> None:
assert min_rows >= 1
self.min_rows = min_rows
self.suggested_max_column_width = suggested_max_column_width
self.scroll = 0
# Info of last rendering.
self._rendered_rows = 0
self._rendered_columns = 0
self._total_columns = 0
self._render_pos_to_completion: Dict[Tuple[int, int], Completion] = {}
self._render_left_arrow = False
self._render_right_arrow = False
self._render_width = 0
def reset(self) -> None:
self.scroll = 0
def has_focus(self) -> bool:
return False
def preferred_width(self, max_available_width: int) -> Optional[int]:
"""
Preferred width: prefer to use at least min_rows, but otherwise as much
as possible horizontally.
"""
complete_state = get_app().current_buffer.complete_state
if complete_state is None:
return 0
column_width = self._get_column_width(complete_state)
result = int(
column_width
* math.ceil(len(complete_state.completions) / float(self.min_rows))
)
# When the desired width is still more than the maximum available,
# reduce by removing columns until we are less than the available
# width.
while (
result > column_width
and result > max_available_width - self._required_margin
):
result -= column_width
return result + self._required_margin
def preferred_height(
self,
width: int,
max_available_height: int,
wrap_lines: bool,
get_line_prefix: Optional[GetLinePrefixCallable],
) -> Optional[int]:
"""
Preferred height: as much as needed in order to display all the completions.
"""
complete_state = get_app().current_buffer.complete_state
if complete_state is None:
return 0
column_width = self._get_column_width(complete_state)
column_count = max(1, (width - self._required_margin) // column_width)
return int(math.ceil(len(complete_state.completions) / float(column_count)))
def create_content(self, width: int, height: int) -> UIContent:
"""
Create a UIContent object for this menu.
"""
complete_state = get_app().current_buffer.complete_state
if complete_state is None:
return UIContent()
column_width = self._get_column_width(complete_state)
self._render_pos_to_completion = {}
_T = TypeVar("_T")
def grouper(
n: int, iterable: Iterable[_T], fillvalue: Optional[_T] = None
) -> Iterable[List[_T]]:
" grouper(3, 'ABCDEFG', 'x') --> ABC DEF Gxx "
args = [iter(iterable)] * n
return zip_longest(fillvalue=fillvalue, *args)
def is_current_completion(completion: Completion) -> bool:
" Returns True when this completion is the currently selected one. "
return (
complete_state is not None
and complete_state.complete_index is not None
and c == complete_state.current_completion
)
# Space required outside of the regular columns, for displaying the
# left and right arrow.
HORIZONTAL_MARGIN_REQUIRED = 3
# There should be at least one column, but it cannot be wider than
# the available width.
column_width = min(width - HORIZONTAL_MARGIN_REQUIRED, column_width)
# However, when the columns tend to be very wide, because there are
# some very wide entries, shrink it anyway.
if column_width > self.suggested_max_column_width:
# `column_width` can still be bigger that `suggested_max_column_width`,
# but if there is place for two columns, we divide by two.
column_width //= column_width // self.suggested_max_column_width
visible_columns = max(1, (width - self._required_margin) // column_width)
columns_ = list(grouper(height, complete_state.completions))
rows_ = list(zip(*columns_))
# Make sure the current completion is always visible: update scroll offset.
selected_column = (complete_state.complete_index or 0) // height
self.scroll = min(
selected_column, max(self.scroll, selected_column - visible_columns + 1)
)
render_left_arrow = self.scroll > 0
render_right_arrow = self.scroll < len(rows_[0]) - visible_columns
# Write completions to screen.
fragments_for_line = []
for row_index, row in enumerate(rows_):
fragments: StyleAndTextTuples = []
middle_row = row_index == len(rows_) // 2
# Draw left arrow if we have hidden completions on the left.
if render_left_arrow:
fragments.append(("class:scrollbar", "<" if middle_row else " "))
elif render_right_arrow:
# Reserve one column empty space. (If there is a right
# arrow right now, there can be a left arrow as well.)
fragments.append(("", " "))
# Draw row content.
for column_index, c in enumerate(row[self.scroll :][:visible_columns]):
if c is not None:
fragments += _get_menu_item_fragments(
c, is_current_completion(c), column_width, space_after=False
)
# Remember render position for mouse click handler.
for x in range(column_width):
self._render_pos_to_completion[
(column_index * column_width + x, row_index)
] = c
else:
fragments.append(("class:completion", " " * column_width))
# Draw trailing padding for this row.
# (_get_menu_item_fragments only returns padding on the left.)
if render_left_arrow or render_right_arrow:
fragments.append(("class:completion", " "))
# Draw right arrow if we have hidden completions on the right.
if render_right_arrow:
fragments.append(("class:scrollbar", ">" if middle_row else " "))
elif render_left_arrow:
fragments.append(("class:completion", " "))
# Add line.
fragments_for_line.append(
to_formatted_text(fragments, style="class:completion-menu")
)
self._rendered_rows = height
self._rendered_columns = visible_columns
self._total_columns = len(columns_)
self._render_left_arrow = render_left_arrow
self._render_right_arrow = render_right_arrow
self._render_width = (
column_width * visible_columns + render_left_arrow + render_right_arrow + 1
)
def get_line(i: int) -> StyleAndTextTuples:
return fragments_for_line[i]
return UIContent(get_line=get_line, line_count=len(rows_))
def _get_column_width(self, complete_state: CompletionState) -> int:
"""
Return the width of each column.
"""
return max(get_cwidth(c.display_text) for c in complete_state.completions) + 1
def mouse_handler(self, mouse_event: MouseEvent) -> Optional["NotImplemented"]:
"""
Handle scroll and click events.
"""
b = get_app().current_buffer
def scroll_left() -> None:
b.complete_previous(count=self._rendered_rows, disable_wrap_around=True)
self.scroll = max(0, self.scroll - 1)
def scroll_right() -> None:
b.complete_next(count=self._rendered_rows, disable_wrap_around=True)
self.scroll = min(
self._total_columns - self._rendered_columns, self.scroll + 1
)
if mouse_event.event_type == MouseEventType.SCROLL_DOWN:
scroll_right()
elif mouse_event.event_type == MouseEventType.SCROLL_UP:
scroll_left()
elif mouse_event.event_type == MouseEventType.MOUSE_UP:
x = mouse_event.position.x
y = mouse_event.position.y
# Mouse click on left arrow.
if x == 0:
if self._render_left_arrow:
scroll_left()
# Mouse click on right arrow.
elif x == self._render_width - 1:
if self._render_right_arrow:
scroll_right()
# Mouse click on completion.
else:
completion = self._render_pos_to_completion.get((x, y))
if completion:
b.apply_completion(completion)
return None
def get_key_bindings(self) -> "KeyBindings":
"""
Expose key bindings that handle the left/right arrow keys when the menu
is displayed.
"""
from prompt_toolkit.key_binding.key_bindings import KeyBindings
kb = KeyBindings()
@Condition
def filter() -> bool:
" Only handle key bindings if this menu is visible. "
app = get_app()
complete_state = app.current_buffer.complete_state
# There need to be completions, and one needs to be selected.
if complete_state is None or complete_state.complete_index is None:
return False
# This menu needs to be visible.
return any(window.content == self for window in app.layout.visible_windows)
def move(right: bool = False) -> None:
buff = get_app().current_buffer
complete_state = buff.complete_state
if complete_state is not None and complete_state.complete_index is not None:
# Calculate new complete index.
new_index = complete_state.complete_index
if right:
new_index += self._rendered_rows
else:
new_index -= self._rendered_rows
if 0 <= new_index < len(complete_state.completions):
buff.go_to_completion(new_index)
# NOTE: the is_global is required because the completion menu will
# never be focussed.
@kb.add("left", is_global=True, filter=filter)
def _left(event: E) -> None:
move()
@kb.add("right", is_global=True, filter=filter)
def _right(event: E) -> None:
move(True)
return kb
class MultiColumnCompletionsMenu(HSplit):
"""
Container that displays the completions in several columns.
When `show_meta` (a :class:`~prompt_toolkit.filters.Filter`) evaluates
to True, it shows the meta information at the bottom.
"""
def __init__(
self,
min_rows: int = 3,
suggested_max_column_width: int = 30,
show_meta: FilterOrBool = True,
extra_filter: FilterOrBool = True,
z_index: int = 10 ** 8,
) -> None:
show_meta = to_filter(show_meta)
extra_filter = to_filter(extra_filter)
# Display filter: show when there are completions but not at the point
# we are returning the input.
full_filter = has_completions & ~is_done & extra_filter
@Condition
def any_completion_has_meta() -> bool:
complete_state = get_app().current_buffer.complete_state
return complete_state is not None and any(
c.display_meta for c in complete_state.completions
)
# Create child windows.
# NOTE: We don't set style='class:completion-menu' to the
# `MultiColumnCompletionMenuControl`, because this is used in a
# Float that is made transparent, and the size of the control
# doesn't always correspond exactly with the size of the
# generated content.
completions_window = ConditionalContainer(
content=Window(
content=MultiColumnCompletionMenuControl(
min_rows=min_rows,
suggested_max_column_width=suggested_max_column_width,
),
width=Dimension(min=8),
height=Dimension(min=1),
),
filter=full_filter,
)
meta_window = ConditionalContainer(
content=Window(content=_SelectedCompletionMetaControl()),
filter=show_meta & full_filter & any_completion_has_meta,
)
# Initialise split.
super().__init__([completions_window, meta_window], z_index=z_index)
class _SelectedCompletionMetaControl(UIControl):
"""
Control that shows the meta information of the selected completion.
"""
def preferred_width(self, max_available_width: int) -> Optional[int]:
"""
Report the width of the longest meta text as the preferred width of this control.
It could be that we use less width, but this way, we're sure that the
layout doesn't change when we select another completion (E.g. that
completions are suddenly shown in more or fewer columns.)
"""
app = get_app()
if app.current_buffer.complete_state:
state = app.current_buffer.complete_state
return 2 + max(get_cwidth(c.display_meta_text) for c in state.completions)
else:
return 0
def preferred_height(
self,
width: int,
max_available_height: int,
wrap_lines: bool,
get_line_prefix: Optional[GetLinePrefixCallable],
) -> Optional[int]:
return 1
def create_content(self, width: int, height: int) -> UIContent:
fragments = self._get_text_fragments()
def get_line(i: int) -> StyleAndTextTuples:
return fragments
return UIContent(get_line=get_line, line_count=1 if fragments else 0)
def _get_text_fragments(self) -> StyleAndTextTuples:
style = "class:completion-menu.multi-column-meta"
state = get_app().current_buffer.complete_state
if (
state
and state.current_completion
and state.current_completion.display_meta_text
):
return to_formatted_text(
cast(StyleAndTextTuples, [("", " ")])
+ state.current_completion.display_meta
+ [("", " ")],
style=style,
)
return []
|
# Copyright (C) 2014-2015 Andrey Antukh <niwi@niwi.be>
# Copyright (C) 2014-2015 Jesús Espino <jespinog@gmail.com>
# Copyright (C) 2014-2015 David Barragán <bameda@dbarragan.com>
# Copyright (C) 2014-2015 Anler Hernández <hello@anler.me>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import pytest
from django.core.urlresolvers import reverse
from .. import factories
from taiga.base.utils import json
pytestmark = pytest.mark.django_db
def test_list_userstorage(client):
user1 = factories.UserFactory()
user2 = factories.UserFactory()
storage11 = factories.StorageEntryFactory(owner=user1)
factories.StorageEntryFactory(owner=user1)
storage13 = factories.StorageEntryFactory(owner=user1)
factories.StorageEntryFactory(owner=user2)
# List by anonumous user
response = client.json.get(reverse("user-storage-list"))
assert response.status_code == 200
assert len(response.data) == 0
# List own entries
client.login(username=user1.username, password=user1.username)
response = client.json.get(reverse("user-storage-list"))
assert response.status_code == 200
assert len(response.data) == 3
client.login(username=user2.username, password=user2.username)
response = client.json.get(reverse("user-storage-list"))
assert response.status_code == 200
assert len(response.data) == 1
# Filter results by key
client.login(username=user1.username, password=user1.username)
keys = ",".join([storage11.key, storage13.key])
url = "{}?keys={}".format(reverse("user-storage-list"), keys)
response = client.json.get(url)
assert response.status_code == 200
assert len(response.data) == 2
def test_view_storage_entries(client):
user1 = factories.UserFactory()
user2 = factories.UserFactory()
storage11 = factories.StorageEntryFactory(owner=user1)
# Get by anonymous user
response = client.json.get(reverse("user-storage-detail", args=[storage11.key]))
assert response.status_code == 404
# Get single entry
client.login(username=user1.username, password=user1.username)
response = client.json.get(reverse("user-storage-detail", args=[storage11.key]))
assert response.status_code == 200
assert response.data["key"] == storage11.key
assert response.data["value"] == storage11.value
# Get not existent key
client.login(username=user2.username, password=user2.username)
response = client.json.get(reverse("user-storage-detail", args=[storage11.key]))
assert response.status_code == 404
response = client.json.get(reverse("user-storage-detail", args=["foobar"]))
assert response.status_code == 404
def test_create_entries(client):
user1 = factories.UserFactory()
storage11 = factories.StorageEntryFactory(owner=user1)
form = {"key": "foo",
"value": {"bar": "bar"}}
form_without_key = {"value": {"bar": "bar"}}
form_without_value = {"key": "foo"}
error_form = {"key": storage11.key,
"value": {"bar": "bar"}}
# Create entry by anonymous user
response = client.json.post(reverse("user-storage-list"), json.dumps(form))
assert response.status_code == 401
# Create by logged user
client.login(username=user1.username, password=user1.username)
response = client.json.post(reverse("user-storage-list"), json.dumps(form))
assert response.status_code == 201
response = client.json.get(reverse("user-storage-detail", args=[form["key"]]))
assert response.status_code == 200
# Wrong data
client.login(username=user1.username, password=user1.username)
response = client.json.post(reverse("user-storage-list"), json.dumps(form_without_key))
assert response.status_code == 400
response = client.json.post(reverse("user-storage-list"), json.dumps(form_without_value))
assert response.status_code == 400
response = client.json.post(reverse("user-storage-list"), json.dumps(error_form))
assert response.status_code == 400
def test_update_entries(client):
user1 = factories.UserFactory()
storage11 = factories.StorageEntryFactory(owner=user1)
# Update by anonymous user
form = {"value": "bar", "key": storage11.key}
response = client.json.put(reverse("user-storage-detail", args=[storage11.key]),
json.dumps(form))
assert response.status_code == 401
# Update by logged user
client.login(username=user1.username, password=user1.username)
form = {"value": {"bar": "bar"}, "key": storage11.key}
response = client.json.put(reverse("user-storage-detail", args=[storage11.key]),
json.dumps(form))
assert response.status_code == 200
response = client.json.get(reverse("user-storage-detail", args=[storage11.key]))
assert response.status_code == 200
assert response.data["value"] == form["value"]
# Update not existing entry
form = {"value": {"bar": "bar"}, "key": "foo"}
response = client.json.get(reverse("user-storage-detail", args=[form["key"]]))
assert response.status_code == 404
response = client.json.put(reverse("user-storage-detail", args=[form["key"]]),
json.dumps(form))
assert response.status_code == 201
response = client.json.get(reverse("user-storage-detail", args=[form["key"]]))
assert response.status_code == 200
assert response.data["value"] == form["value"]
def test_delete_storage_entry(client):
user1 = factories.UserFactory()
user2 = factories.UserFactory()
storage11 = factories.StorageEntryFactory(owner=user1)
# Delete by anonumous user
response = client.json.delete(reverse("user-storage-detail", args=[storage11.key]))
assert response.status_code == 401
# Delete by logged user
client.login(username=user1.username, password=user1.username)
response = client.json.delete(reverse("user-storage-detail", args=[storage11.key]))
assert response.status_code == 204
response = client.json.get(reverse("user-storage-detail", args=[storage11.key]))
assert response.status_code == 404
# Delete not existent entry
response = client.json.delete(reverse("user-storage-detail", args=["foo"]))
assert response.status_code == 404
client.login(username=user2.username, password=user2.username)
response = client.json.delete(reverse("user-storage-detail", args=[storage11.key]))
assert response.status_code == 404
|
"""
Copyright (c) 2017 - Philip Paquette
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
# Modified from https://raw.githubusercontent.com/Newmu/dcgan_code/master/lib/updates.py
# MIT License
import theano
import theano.tensor as T
from .utils import floatX
from .layers import l2norm
# ------------------------
# Regularization
# ------------------------
def clip_norm(grad, clip, norm):
if clip > 0:
grad = T.switch(T.ge(norm, clip), grad * clip / norm, grad)
return grad
def clip_norms(grads, clip):
norm = T.sqrt(sum([T.sum(grad ** 2) for grad in grads]))
return [clip_norm(grad, clip, norm) for grad in grads]
# Base regularizer
class Regularizer(object):
def __init__(self, l1=0., l2=0., maxnorm=0., l2norm=False, frobnorm=False):
self.__dict__.update(locals())
def max_norm(self, param, maxnorm):
if maxnorm > 0:
norms = T.sqrt(T.sum(T.sqr(param), axis=0))
desired = T.clip(norms, 0, maxnorm)
param = param * (desired / (1e-7 + norms))
return param
def l2_norm(self, param):
return param / l2norm(param, axis=0)
def frob_norm(self, param, nrows):
return (param / T.sqrt(T.sum(T.sqr(param)))) * T.sqrt(nrows)
def gradient_regularize(self, param, grad):
grad += param * self.l2
grad += T.sgn(param) * self.l1
return grad
def weight_regularize(self, param):
param = self.max_norm(param, self.maxnorm)
if self.l2norm:
param = self.l2_norm(param)
if self.frobnorm > 0:
param = self.frob_norm(param, self.frobnorm)
return param
# ------------------------
# Updates
# ------------------------
class Update(object):
def __init__(self, regularizer=Regularizer(), clipnorm=0.):
self.__dict__.update(locals())
def __call__(self, params, grads):
raise NotImplementedError
# Stochastic Gradient Descent
class SGD(Update):
def __init__(self, lr=0.01, *args, **kwargs):
Update.__init__(self, *args, **kwargs)
self.__dict__.update(locals())
def __call__(self, params, cost):
updates = []
grads = T.grad(cost, params)
grads = clip_norms(grads, self.clipnorm)
for param, grad in zip(params, grads):
grad = self.regularizer.gradient_regularize(param, grad)
updated_param = param - self.lr * grad
updated_param = self.regularizer.weight_regularize(updated_param)
updates.append((param, updated_param))
return updates
# SGD with momentum
class Momentum(Update):
def __init__(self, lr=0.01, momentum=0.9, *args, **kwargs):
Update.__init__(self, *args, **kwargs)
self.__dict__.update(locals())
def __call__(self, params, cost):
updates = []
grads = T.grad(cost, params)
grads = clip_norms(grads, self.clipnorm)
for param, grad in zip(params, grads):
grad = self.regularizer.gradient_regularize(param, grad)
m = theano.shared(param.get_value() * 0.)
v = (self.momentum * m) - (self.lr * grad)
updates.append((m, v))
updated_param = param + v
updated_param = self.regularizer.weight_regularize(updated_param)
updates.append((param, updated_param))
return updates
# SGD with Nesterov Accelerated Gradient
class Nesterov(Update):
def __init__(self, lr=0.01, momentum=0.9, *args, **kwargs):
Update.__init__(self, *args, **kwargs)
self.__dict__.update(locals())
def __call__(self, params, cost):
updates = []
grads = T.grad(cost, params)
grads = clip_norms(grads, self.clipnorm)
for param, grad in zip(params, grads):
grad = self.regularizer.gradient_regularize(param, grad)
m = theano.shared(param.get_value() * 0.)
v = (self.momentum * m) - (self.lr * grad)
updated_param = param + self.momentum * v - self.lr * grad
updated_param = self.regularizer.weight_regularize(updated_param)
updates.append((m, v))
updates.append((param, updated_param))
return updates
# RMS Prop
class RMSprop(Update):
def __init__(self, lr=0.001, rho=0.9, epsilon=1e-6, *args, **kwargs):
Update.__init__(self, *args, **kwargs)
self.__dict__.update(locals())
def __call__(self, params, cost):
updates = []
grads = T.grad(cost, params)
grads = clip_norms(grads, self.clipnorm)
for param, grad in zip(params, grads):
grad = self.regularizer.gradient_regularize(param, grad)
acc = theano.shared(param.get_value() * 0.)
acc_new = self.rho * acc + (1 - self.rho) * grad ** 2
updates.append((acc, acc_new))
updated_param = param - self.lr * (grad / T.sqrt(acc_new + self.epsilon))
updated_param = self.regularizer.weight_regularize(updated_param)
updates.append((param, updated_param))
return updates
# Adam
class Adam(Update):
def __init__(self, lr=0.001, b1=0.9, b2=0.999, e=1e-8, l=1 - 1e-8, *args, **kwargs):
Update.__init__(self, *args, **kwargs)
self.__dict__.update(locals())
def __call__(self, params, cost):
updates = []
grads = T.grad(cost, params)
grads = clip_norms(grads, self.clipnorm)
t = theano.shared(floatX(1.))
b1_t = self.b1 * self.l ** (t - 1)
for param, grad in zip(params, grads):
grad = self.regularizer.gradient_regularize(param, grad)
m = theano.shared(param.get_value() * 0.)
v = theano.shared(param.get_value() * 0.)
m_t = b1_t * m + (1 - b1_t) * grad
v_t = self.b2 * v + (1 - self.b2) * grad ** 2
m_c = m_t / (1 - self.b1 ** t)
v_c = v_t / (1 - self.b2 ** t)
p_t = param - (self.lr * m_c) / (T.sqrt(v_c) + self.e)
p_t = self.regularizer.weight_regularize(p_t)
updates.append((m, m_t))
updates.append((v, v_t))
updates.append((param, p_t))
updates.append((t, t + 1.))
return updates
# AdaGrad
class Adagrad(Update):
def __init__(self, lr=0.01, epsilon=1e-6, *args, **kwargs):
Update.__init__(self, *args, **kwargs)
self.__dict__.update(locals())
def __call__(self, params, cost):
updates = []
grads = T.grad(cost, params)
grads = clip_norms(grads, self.clipnorm)
for param, grad in zip(params, grads):
grad = self.regularizer.gradient_regularize(param, grad)
acc = theano.shared(param.get_value() * 0.)
acc_t = acc + grad ** 2
updates.append((acc, acc_t))
p_t = param - (self.lr / T.sqrt(acc_t + self.epsilon)) * grad
p_t = self.regularizer.weight_regularize(p_t)
updates.append((param, p_t))
return updates
# AdeDelta
class Adadelta(Update):
def __init__(self, lr=0.5, rho=0.95, epsilon=1e-6, *args, **kwargs):
Update.__init__(self, *args, **kwargs)
self.__dict__.update(locals())
def __call__(self, params, cost):
updates = []
grads = T.grad(cost, params)
grads = clip_norms(grads, self.clipnorm)
for param, grad in zip(params, grads):
grad = self.regularizer.gradient_regularize(param, grad)
acc = theano.shared(param.get_value() * 0.)
acc_delta = theano.shared(param.get_value() * 0.)
acc_new = self.rho * acc + (1 - self.rho) * grad ** 2
updates.append((acc, acc_new))
update = grad * T.sqrt(acc_delta + self.epsilon) / T.sqrt(acc_new + self.epsilon)
updated_param = param - self.lr * update
updated_param = self.regularizer.weight_regularize(updated_param)
updates.append((param, updated_param))
acc_delta_new = self.rho * acc_delta + (1 - self.rho) * update ** 2
updates.append((acc_delta, acc_delta_new))
return updates
# No updates
class NoUpdate(Update):
def __init__(self, lr=0.01, momentum=0.9, *args, **kwargs):
Update.__init__(self, *args, **kwargs)
self.__dict__.update(locals())
def __call__(self, params, cost):
updates = []
for param in params:
updates.append((param, param))
return updates
|
#!/usr/bin/env python
# coding: utf-8
from salada import language
from salada import segmenter
class TestDefault:
def test_segment_text_by_sequence_of_spaces(self):
text = ' foo \n \n\n bar \t\n baz '
expectation = [
language.Segment('', True, False),
language.Segment('foo', False, False),
language.Segment('bar', False, False),
language.Segment('baz', False, False),
language.Segment('', False, True),
]
result = segmenter.Default().segment(text)
assert result == expectation
def test_regard_first_as_headless(self):
text = 'foo \n \n\n bar \t\n baz '
expectation = [
language.Segment('foo', True, False),
language.Segment('bar', False, False),
language.Segment('baz', False, False),
language.Segment('', False, True),
]
result = segmenter.Default().segment(text)
assert result == expectation
def test_regard_last_as_tailless(self):
text = ' foo \n \n\n bar \t\n baz'
expectation = [
language.Segment('', True, False),
language.Segment('foo', False, False),
language.Segment('bar', False, False),
language.Segment('baz', False, True),
]
result = segmenter.Default().segment(text)
assert result == expectation
|
#!/usr/bin/env python3
# Enforce header order in a given file. This will only reorder in the first sequence of contiguous
# #include statements, so it will not play well with #ifdef.
#
# This attempts to enforce the guidelines at
# https://google.github.io/styleguide/cppguide.html#Names_and_Order_of_Includes
# with some allowances for Envoy-specific idioms.
#
# There is considerable overlap with what this does and clang-format's IncludeCategories (see
# https://clang.llvm.org/docs/ClangFormatStyleOptions.html). But, clang-format doesn't seem smart
# enough to handle block splitting and correctly detecting the main header subject to the Envoy
# canonical paths.
import argparse
import common
import pathlib
import re
import sys
def reorder_headers(path):
source = pathlib.Path(path).read_text(encoding='utf-8')
all_lines = iter(source.split('\n'))
before_includes_lines = []
includes_lines = []
after_includes_lines = []
# Collect all the lines prior to the first #include in before_includes_lines.
try:
while True:
line = next(all_lines)
if line.startswith('#include'):
includes_lines.append(line)
break
before_includes_lines.append(line)
except StopIteration:
pass
# Collect all the #include and whitespace lines in includes_lines.
try:
while True:
line = next(all_lines)
if not line:
continue
if not line.startswith('#include'):
after_includes_lines.append(line)
break
includes_lines.append(line)
except StopIteration:
pass
# Collect the remaining lines in after_includes_lines.
after_includes_lines += list(all_lines)
# Filter for includes that finds the #include of the header file associated with the source file
# being processed. E.g. if 'path' is source/common/common/hex.cc, this filter matches
# "source/common/common/hex.h".
def file_header_filter():
return lambda f: f.endswith('.h"') and path.endswith(f[1:-3] + '.cc')
def regex_filter(regex):
return lambda f: re.match(regex, f)
# Filters that define the #include blocks
block_filters = [
file_header_filter(),
regex_filter('<.*\.h>'),
regex_filter('<.*>'),
]
for subdir in include_dir_order:
block_filters.append(regex_filter('"' + subdir + '/.*"'))
blocks = []
already_included = set([])
for b in block_filters:
block = []
for line in includes_lines:
header = line[len('#include '):]
if line not in already_included and b(header):
block.append(line)
already_included.add(line)
if len(block) > 0:
blocks.append(block)
# Anything not covered by block_filters gets its own block.
misc_headers = list(set(includes_lines).difference(already_included))
if len(misc_headers) > 0:
blocks.append(misc_headers)
reordered_includes_lines = '\n\n'.join(['\n'.join(sorted(block)) for block in blocks])
if reordered_includes_lines:
reordered_includes_lines += '\n'
return '\n'.join(
filter(
lambda x: x, [
'\n'.join(before_includes_lines),
reordered_includes_lines,
'\n'.join(after_includes_lines),
]))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Header reordering.')
parser.add_argument('--path', type=str, help='specify the path to the header file')
parser.add_argument('--rewrite', action='store_true', help='rewrite header file in-place')
parser.add_argument(
'--include_dir_order',
type=str,
default=','.join(common.include_dir_order()),
help='specify the header block include directory order')
args = parser.parse_args()
target_path = args.path
include_dir_order = args.include_dir_order.split(',')
reorderd_source = reorder_headers(target_path)
if args.rewrite:
pathlib.Path(target_path).write_text(reorderd_source, encoding='utf-8')
else:
sys.stdout.buffer.write(reorderd_source.encode('utf-8'))
|
#! /usr/bin/env python
"""
agilent4155_matlab_output_param.py
Prarmeter extractor for matlab generated .xlsx idvd files
Created by Jeremy Smith on 2015-10-29
University of California, Berkeley
j-smith@eecs.berkeley.edu
"""
import os
import sys
import xlrd
import numpy as np
import myfunctions as mf
from scipy import stats
__author__ = "Jeremy Smith"
__version__ = "1.0"
data_path = os.path.dirname(__file__) # Path name for location of script
files = os.listdir(data_path) # All files in directory
data_summary = []
summary_list_header = [["filename", "channelL", "channelW"]]
fitrange = 3
def main():
"""Main function"""
print "\nBatch importing .xlsx files..."
print data_path, '\n'
for f in files:
print f
# Loops through all transfer files
if "IDVD.xlsx" in f:
workbook = xlrd.open_workbook(f, logfile=open(os.devnull, 'w'))
for dev in workbook.sheet_names():
if "Sheet" in dev:
continue
print " - device {:s}".format(dev)
datasheet = workbook.sheet_by_name(dev)
run_numbers = [str(int(x)) for x in datasheet.row_values(2) if x]
stepvg = 0
for i, run in enumerate(run_numbers):
print " - run {:s}".format(run)
data = {}
gdlin = []
gdsat = []
vg_list = []
# File name for outputs
outname = f[:-5] + '_' + dev + '_' + run
# Constant parameters taken from header
vgmin = float(datasheet.cell_value(3, (stepvg + 2)*i + 1))
vgmax = float(datasheet.cell_value(4, (stepvg + 2)*i + 1))
stepvg_prev = stepvg
stepvg = int(datasheet.cell_value(5, (stepvg + 2)*i + 1))
chl = float(datasheet.cell_value(1, 1))
chw = float(datasheet.cell_value(0, 1))
tox = float(datasheet.cell_value(1, 3))
kox = float(datasheet.cell_value(0, 3))
ldr = float(datasheet.cell_value(1, 5))
lso = float(datasheet.cell_value(0, 5))
ci = 8.85418782e-7*kox/tox
colheads = ['VDS'] + ["ID{:d}".format(x + 1) for x in range(stepvg)]
for h in colheads:
data[h] = []
for row in range(datasheet.nrows - 11 - stepvg):
for col, h in enumerate(colheads):
if datasheet.cell_type(9 + row, (stepvg_prev + 2)*i + col) is 0:
continue
data[h].append(float(datasheet.cell_value(9 + row, (stepvg_prev + 2)*i + col)))
vds = np.array(data['VDS'])
output_list = [vds]
for j in range(stepvg):
ids = np.array(data["ID{:d}".format(j + 1)])
# Fits to first data points (given by fitrange) i.e. linear
slope, intercept, r_value, p_value, std_err = stats.linregress(vds[:fitrange], ids[:fitrange])
gdlin.append(slope)
# Fits to last data points (given by fitrange) i.e. saturation
slope, intercept, r_value, p_value, std_err = stats.linregress(vds[-fitrange:], ids[-fitrange:])
gdsat.append(slope)
# Update lists
output_list.append(ids)
vg_list.append(vgmin + j*(vgmax-vgmin)/(stepvg-1))
# Output data
data_summary.append([outname, chl, chw])
# Ouput files
mf.dataOutputGen(outname+"_output.txt", data_path, map(list, zip(*output_list)))
mf.dataOutputHead(outname+"_gm.txt", data_path, [vg_list, gdlin, gdsat], [['VG', 'GDlin', 'GDsat']],
format_d="%.2f\t %.5e\t %.5e\n",
format_h="%s\t")
mf.dataOutputHead("SUMMARY_OUT.txt", data_path, map(list, zip(*data_summary)), summary_list_header,
format_d="%s\t %.1f\t %.1f\n",
format_h="%s\t")
return
if __name__ == "__main__":
sys.exit(main())
|
# -*- coding: utf-8 -*-
"""
/***************************************************************************
maps2WinBUGS
A QGIS plugin a tool to facilitate data processing for Bayesian spatial modeling
-------------------
begin : 2015-07-31
git sha : $Format:%H$
copyright : (C) 2015 by Norbert Solymosi
email : solymosi.norbert@gmail.com
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
from qgis.PyQt.QtWidgets import QDialog
from .xdist_dialog import Ui_xDist
class Dialog(QDialog, Ui_xDist):
def __init__(self):
"""Constructor for the dialog.
"""
QDialog.__init__(self)
self.setupUi(self)
self.pushCancel.clicked.connect(self.reject)
self.pushOK.clicked.connect(self.accept)
|
# -*- coding: utf-8 -*-
# pylint: disable=bad-continuation
""" Tools to discover and modify content.
"""
# Copyright © 2015 1&1 Group <git@1and1.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, unicode_literals, print_function
import re
import difflib
try:
import html.entities as htmlentitydefs
except ImportError: # Python 2
import htmlentitydefs # pylint: disable=import-error,wrong-import-order
from xml.sax.saxutils import quoteattr # pylint: disable=wrong-import-order
import arrow
from munch import munchify as bunchify
from lxml.etree import fromstring, HTMLParser, XMLParser, XMLSyntaxError # pylint: disable=no-name-in-module
from rudiments.reamed import click
from .._compat import BytesIO
# Mapping of CLI content format names to Confluence API names
CLI_CONTENT_FORMATS = dict(view='view', editor='editor', storage='storage', export='export_view', anon='anonymous_export_view')
# Simple replacement rules, order is important!
TIDY_REGEX_RULES = ((_name, re.compile(_rule), _subst) for _name, _rule, _subst in [
("FosWiki: Remove CSS class from section title",
r'<(h[1-5]) class="[^"]*">', r'<\1>'),
("FosWiki: Remove static section numbering",
r'(?<=<h.>)(<a name="[^"]+?"></a>|)[0-9.]+?\s*(?=<span class="tok"> </span>)', r'\1'),
("FosWiki: Empty anchor in headers",
r'(?<=<h.>)<a></a>\s* +', ''),
("FosWiki: 'tok' spans in front of headers",
r'(?<=<h.>)(<a name="[^"]+?"></a>|)\s*<span class="tok"> </span>', r'\1'),
("FosWiki: Section edit icons at the end of headers",
r'\s*<a(?: class="[^"]*")? href="[^"]+"(?: title="[^"]*")?>'
r'<ac:image [^>]+><ri:url ri:value="[^"]+/EditChapterPlugin/pencil.png" ?/>'
r'</ac:image></a>(?=</span></h)', ''),
("FosWiki: 'Edit Chapter Plugin' spans (old)",
r'(?<=<h.>)(<a name="[^"]+?"></a>|)\s*<span class="ecpHeading">'
r'\s*([^<]+)(?:<br\s*/>)</span>\s*(?=</h.>)', r'\1\2'),
("FosWiki: 'Edit Chapter Plugin' spans (new)",
r'(?<=<h.>)(<a name="[^"]+?"></a>|)\s*<span class="ecpHeading">'
r'\s*([^<]+)(?:<br\s*/>)<a class="ecpEdit".+?</a></span>\s*(?=</h.>)', r'\1\2'),
("FosWiki: Residual leading whitespace in headers",
r'(?<=<h.>)(<a name="[^"]+?"></a>|)\s* +', r'\1'),
("FosWiki: Replace TOC div with macro",
r'(<a name="foswikiTOC" ?/>)?<div class="foswikiToc">.*?</div>', '''
<ac:structured-macro ac:name="panel" ac:schema-version="1">
<ac:parameter ac:name="title">Contents</ac:parameter>
<ac:rich-text-body>
<p>
<ac:structured-macro ac:name="toc" ac:schema-version="1"/>
</p>
</ac:rich-text-body>
</ac:structured-macro>'''),
("FosWiki: Replace TOC in a Twisty with Expand+TOC macro",
r'<div class="twistyPlugin">.+?<big><strong>Table of Contents</strong></big></span></a></span></div>', '''
<ac:structured-macro ac:name="expand" ac:schema-version="1">
<ac:parameter ac:name="title">Table of Contents</ac:parameter>
<ac:rich-text-body>
<p>
<ac:structured-macro ac:name="toc" ac:schema-version="1"/>
</p>
</ac:rich-text-body>
</ac:structured-macro>'''),
("FosWiki: Named anchors (#WikiWords)",
r'(<a name=[^>]+></a><a href=")http[^#]+(#[^"]+" style="[^"]+)(" title="[^"]+"><big>[^<]+</big></a>)',
r'\1\2; float: right;\3'),
("FosWiki: Wrap HTML '<pre>' into 'panel' macro",
r'(?<!<ac:rich-text-body>)(<pre(?: class="[^"]*")?>)',
r'<ac:structured-macro ac:name="panel" ac:schema-version="1">'
r'<ac:parameter ac:name="bgColor">#eeeeee</ac:parameter>'
r'<ac:rich-text-body>'
r'\1'),
("FosWiki: Wrap HTML '</pre>' into 'panel' macro",
r'</pre>(?!</ac:rich-text-body>)', '</pre></ac:rich-text-body></ac:structured-macro>'),
("FosWiki: Embedded CSS - custom list indent",
r'<ul style="margin-left: [.0-9]+em;">', '<ul>'),
("FosWiki: Empty paragraphs",
r'<p> </p>', r''),
("FosWiki: Obsolete CSS classes",
r'(<(?:div|p|span|h[1-5])) class="(foswikiTopic)"', r'\1'),
])
def _apply_tidy_regex_rules(body, log=None):
"""Return tidied body after applying regex rules."""
body = body.replace(u'\u00A0', ' ')
for name, rule, subst in TIDY_REGEX_RULES:
length = len(body)
try:
body, count = rule.subn(subst, body)
except re.error as cause:
raise click.LoggedFailure('Error "{}" in "{}" replacement: {} => {}'.format(
cause, name, rule.pattern, subst,
))
if count and log:
length -= len(body)
log.info('Replaced %d matche(s) of "%s" (%d chars %s)',
count, name, abs(length), "added" if length < 0 else "removed")
return body
def _make_etree(body, content_format='storage', attrs=None):
"""Create an ElementTree from a page's body."""
attrs = (attrs or {}).copy()
attrs.update({
'xmlns:ac': 'http://www.atlassian.com/schema/confluence/4/ac/',
'xmlns:ri': 'http://www.atlassian.com/schema/confluence/4/ri/',
})
xml_body = re.sub(r'&(?!(amp|lt|gt|quot|apos))([a-zA-Z0-9]+);',
lambda cref: '&#{};'.format(htmlentitydefs.name2codepoint[cref.group(2)]), body)
#print(body.encode('utf8'))
xmldoc = u'<{root} {attrs}>{body}</{root}>'.format(
root=content_format,
attrs=' '.join('{}={}'.format(k, quoteattr(v)) for k, v in sorted(attrs.items())),
body=xml_body)
parser = (XMLParser if content_format == 'storage' else HTMLParser)(remove_blank_text=True)
try:
return fromstring(xmldoc, parser)
except XMLSyntaxError as cause:
raise click.LoggedFailure('{}\n{}'.format(
cause, '\n'.join(['{:7d} {}'.format(i+1, k) for i, k in enumerate(xmldoc.splitlines())])
))
def _pretty_xml(body, content_format='storage', attrs=None):
"""Pretty-print the given page body and return a list of lines."""
root = _make_etree(body, content_format=content_format, attrs=attrs)
prettyfied = BytesIO()
root.getroottree().write(prettyfied, encoding='utf8', pretty_print=True, xml_declaration=False)
return prettyfied.getvalue().decode('utf8').splitlines()
class ConfluencePage(object):
"""A page that holds enough state so it can be modified."""
DIFF_COLS = {
'+': 'green',
'-': 'red',
'@': 'yellow',
}
def __init__(self, cf, url, markup='storage', expand=None):
""" Load the given page.
"""
if expand and isinstance(expand, str):
expand = expand.split(',')
expand = set(expand or []) | {'space', 'version', 'body.' + markup}
self.cf = cf
self.url = url
self.markup = markup
self._data = cf.get(self.url, expand=','.join(expand))
self.body = self._data.body[self.markup].value
@property
def page_id(self):
"""The numeric page ID."""
return self._data.id
@property
def space_key(self):
"""The space this page belongs to."""
return self._data.space.key
@property
def title(self):
"""The page's title."""
return self._data.title
@property
def json(self):
"""The full JSON response data."""
return self._data
@property
def version(self):
"""The page's version number in history."""
return self._data.version.number
def etree(self):
"""Parse the page's body into an ElementTree."""
attrs = {
'id': 'page-' + self._data.id,
'href': self._data._links.base + (self._data._links.tinyui or ''),
'status': self._data.status,
'title': self._data.title,
}
return _make_etree(self.body, content_format=self.markup, attrs=attrs)
def tidy(self, log=None):
"""Return a tidy copy of this page's body."""
assert self.markup == 'storage', "Can only clean up pages in storage format!"
return _apply_tidy_regex_rules(self.body, log=log)
def update(self, body=None, minor=True):
"""Update a page's content."""
assert self.markup == 'storage', "Cannot update non-storage page markup!"
if body is None:
body = self.body
if body == self._data.body[self.markup].value:
return # No changes
data = {
#'id': self._data.id,
'type': 'page',
'space': {'key': self.space_key},
'title': self.title,
'version': dict(number=self.version + 1, minorEdit=minor),
'body': {
'storage': {
'value': body,
'representation': self.markup,
}
},
'expand': 'version',
}
response = self.cf.session.put(self._data._links.self, json=data)
response.raise_for_status()
##page = response.json(); print(page)
result = bunchify(response.json())
self._data.body[self.markup].value = body
self._data.version = result.version
return result
def dump_diff(self, changed):
"""Dump a diff to terminal between changed and stored body."""
if self.body == changed:
click.secho('=== No changes to "{0}"'.format(self.title), fg='green')
return
diff = difflib.unified_diff(
_pretty_xml(self.body, self.markup),
_pretty_xml(changed, self.markup),
u'v. {0} of "{1}"'.format(self.version, self.title),
u'v. {0} of "{1}"'.format(self.version + 1, self.title),
arrow.get(self._data.version.when).replace(microsecond=0).isoformat(sep=' '),
arrow.now().replace(microsecond=0).isoformat(sep=' '),
lineterm='', n=2)
for line in diff:
click.secho(line, fg=self.DIFF_COLS.get(line and line[0], None))
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack.compiler import Compiler, UnsupportedCompilerFlag
from spack.version import ver
class Intel(Compiler):
# Subclasses use possible names of C compiler
cc_names = ['icc']
# Subclasses use possible names of C++ compiler
cxx_names = ['icpc']
# Subclasses use possible names of Fortran 77 compiler
f77_names = ['ifort']
# Subclasses use possible names of Fortran 90 compiler
fc_names = ['ifort']
# Named wrapper links within build_env_path
link_paths = {'cc': 'intel/icc',
'cxx': 'intel/icpc',
'f77': 'intel/ifort',
'fc': 'intel/ifort'}
PrgEnv = 'PrgEnv-intel'
PrgEnv_compiler = 'intel'
version_argument = '--version'
version_regex = r'\((?:IFORT|ICC)\) ([^ ]+)'
@property
def verbose_flag(self):
return "-v"
required_libs = ['libirc', 'libifcore', 'libifcoremt', 'libirng']
@property
def debug_flags(self):
return ['-debug', '-g', '-g0', '-g1', '-g2', '-g3']
@property
def opt_flags(self):
return ['-O', '-O0', '-O1', '-O2', '-O3', '-Ofast', '-Os']
@property
def openmp_flag(self):
if self.real_version < ver('16.0'):
return "-openmp"
else:
return "-qopenmp"
@property
def cxx11_flag(self):
if self.real_version < ver('11.1'):
raise UnsupportedCompilerFlag(self,
"the C++11 standard",
"cxx11_flag",
"< 11.1")
elif self.real_version < ver('13'):
return "-std=c++0x"
else:
return "-std=c++11"
@property
def cxx14_flag(self):
# Adapted from CMake's Intel-CXX rules.
if self.real_version < ver('15'):
raise UnsupportedCompilerFlag(self,
"the C++14 standard",
"cxx14_flag",
"< 15")
elif self.real_version < ver('15.0.2'):
return "-std=c++1y"
else:
return "-std=c++14"
@property
def c99_flag(self):
if self.real_version < ver('12'):
raise UnsupportedCompilerFlag(self,
"the C99 standard",
"c99_flag",
"< 12")
else:
return "-std=c99"
@property
def c11_flag(self):
if self.real_version < ver('16'):
raise UnsupportedCompilerFlag(self,
"the C11 standard",
"c11_flag",
"< 16")
else:
return "-std=c1x"
@property
def cc_pic_flag(self):
return "-fPIC"
@property
def cxx_pic_flag(self):
return "-fPIC"
@property
def f77_pic_flag(self):
return "-fPIC"
@property
def fc_pic_flag(self):
return "-fPIC"
@property
def stdcxx_libs(self):
return ('-cxxlib', )
|
class ListIterator:
def __init__(self, obj):
self.list = obj
self.index = 0
self.length = len(obj)
def __iter__(self):
return self
def __next__(self):
if self.index == self.length:
raise StopIteration
index = self.index
self.index = self.index + 1
return self.list[index]
class list:
def __init__(self, iterable):
self.jsobject = JSArray()
if jscode('iterable !== undefined'):
for item in iterable:
self.append(item)
def __or__(self, other):
if self:
return self
if other:
return other
return False
def __hash__(self):
raise TypeError("unhashable type: 'list'")
def __repr__(self):
iterable = map(nest_str_with_quotes, self)
return "[" + ", ".join(iterable) + "]"
def __jstype__(self):
out = JSArray()
for item in self:
item = jstype(item)
jscode('out.push(item)')
return out
def append(self, item):
jsobject = self.jsobject
jscode('jsobject.push(item)')
def insert(self, index, item):
self.jsobject.splice(index, 0, item)
def __setitem__(self, index, value):
jsobject = self.jsobject
index = index.jsobject
jscode('jsobject[index] = value')
def __getitem__(self, s):
jsobject = self.jsobject
index = jstype(s)
return jscode('jsobject[index]')
def __len__(self):
jsobject = self.jsobject
length = jscode('jsobject.length')
return int(length)
def __notin__(self, element):
isin = element in self
return not isin
def __iter__(self):
return ListIterator(self)
def __contains__(self, obj):
for item in self:
if obj == item:
return True
return False
def pop(self):
return self.jsobject.pop()
def index(self, obj):
index = 0
for item in self:
if item == obj:
return index
index += 1
raise ValueError
def remove(self, obj):
index = self.index(obj)
jsobject = self.jsobject
jscode('jsobject.splice(jstype(index), 1)')
def __delitem__(self, index):
self.jsobject.splice(jstype(index), 1)
|
from struct import Struct
from mode import *
from ichisize import *
from incomdef import *
class AtData(Struct):
element = None
maxvalence = None
NUM_CHEM_ELEMENTS = 127 # well above number of known chem. elements
AT_ISO_SORT_KEY_MULT = 32 # up to 32 identical hydrogen isotopes
# (similar to T_GROUP_ISOWT_MULT)
# changed from 16 9-12-2003
class AT_STEREO_CARB(Struct):
at_num = None
parity = None
class AT_STEREO_DBLE(Struct):
at_num1 = None
at_num2 = None
parity = None
class AT_ISOTOPIC(Struct):
at_num = None
num_1H = None
num_D = None
num_T = None
iso_atw_diff = None
BYTE_BITS = 8 # number of bits in one byte
BOND_MASK = 0xf # 4 bits
BOND_BITS = 4 # 3 or 4 does not matter; 2 is too small for BOND_TAUTOM
BOND_ADD = (-1 if BOND_BITS==2 else 0) # subtract 1 from bonds stored in CT
class sp_ATOM(Struct):
elname = None
neighbor = None # changed to unsigned 2-2-95. D.Ch.
init_rank = None # also used in remove_terminal_HDT() to save orig. at. number
orig_at_number = None
orig_compt_at_numb = None
# low 3 bits=bond type;
# high 5 bits (in case of cut-vertex atom) = an attached part number
#
bond_type = None
el_number = None # periodic table number = charge of the nucleus = number of the protons
valence = None
chem_bonds_valence = None # 8-24-00 to treat tautomer centerpoints, etc.
num_H = None # first not including D, T; add_DT_to_num_H() includes.
num_iso_H = None # num 1H, 2H(D), 3H(T)
cFlags = None
iso_atw_diff = None # abs(iso_atw_diff) < 127 or 31 - ???
iso_sort_key = None # = num_1H + AT_ISO_SORT_KEY_MULT^1*num_D
# + AT_ISO_SORT_KEY_MULT^2*num_T
# + AT_ISO_SORT_KEY_MULT^3*iso_atw_diff
charge = None
radical = None # 1=>doublet(.), 2=> triplet as singlet (:) ???? why are they same ????
marked = None
endpoint = None # tautomer analysis. If != 0 then the hydrogens & (-)charge are in the tautomer group.
# Pairs stereo_bond_neighbor[] and stereo_bond_neighbor2[], etc
# initially refer to non-isotopic and isotopic cases, respectively.
# To use same stereo processing code these arrays are swapped when
# switching from non-isotopic to isotopic processing and back.
stereo_bond_neighbor = None # Original number of an opposite atom
stereo_bond_neighbor2 = None # (stereo bond neighbor) +1;
stereo_bond_ord = None # Ordering number of a bond/neighbor in the direction to the
stereo_bond_ord2 = None # stereo bond opposite atom (important for cumulenes);
stereo_bond_z_prod = None # Relative atom-neighbors
stereo_bond_z_prod2 = None # double bond planes orientation;
stereo_bond_parity = None # parity + MULT_STEREOBOND*chain_length,
stereo_bond_parity2 = None # where:
#
# parity (Mask 0x07=BITS_PARITY):
#
# 0 = AB_PARITY_NONE = not a stereo bond
# 1/2 = AB_PARITY_ODD/EVEN = bond parity defined from initial ranks
# 3 = AB_PARITY_UNKN = geometry is unknown to the user
# 4 = AB_PARITY_UNDF = not enough geometry info to find the parity
# 6 = AB_PARITY_CALC = calculate later from the neighbor ranks; some ot them can be
# replaced with AB_PARITY_ODD/EVEN after equivalence ranks have been determined
#
# length (Mask 0x38=MASK_CUMULENE_LEN, length=stereo_bond_parity[i]/MULT_STEREOBOND):
#
# 0 => double or alternating stereogenic bond
# 1 => cumulene with 2 double bonds (stereogenic center)
# 2 => cumulene with 3 double bonds (stereogenic bond)
# length <= (MAX_CUMULENE_LEN=2)
# bit KNOWN_PARITIES_EQL = 0x40: all pairs of const. equ. atoms are connected by stereo bonds
# and these bonds have identical parities
#
parity = None # -- Mask 0x07=BITS_PARITY: --
# 0 = AB_PARITY_NONE => no parity; also parity&0x38 = 0
# 1 = AB_PARITY_ODD => odd parity
# 2 = AB_PARITY_EVEN => even parity
# 3 = AB_PARITY_UNKN => user marked as unknown parity
# 4 = AB_PARITY_UNDF => parity cannot be defined because of symmetry or not well defined geometry
parity2 = None # parity including parity due to isotopic terminal H
# bit msks: 0x07 => known parity (1,2,3,4) or AB_PARITY_CALC=6, AB_PARITY_IISO = 6
# 0x40 => KNOWN_PARITIES_EQL
stereo_atom_parity = None # similar to stereo_bond_parity[]: known in advance AB_PARITY_* value + KNOWN_PARITIES_EQL bit
stereo_atom_parity2 = None
final_parity = None # defined by equivalence ranks
final_parity2 = None # defined by equivalence ranks, incl. due to terminal isotopic H
bAmbiguousStereo = None
bHasStereoOrEquToStereo = None
bHasStereoOrEquToStereo2 = None
if FIND_RING_SYSTEMS:
bCutVertex = None
nRingSystem = None
nNumAtInRingSystem = None
nBlockSystem = None
if FIND_RINS_SYSTEMS_DISTANCES:
nDistanceFromTerminal = None
z_dir = None
BOND_SINGLE = BOND_TYPE_SINGLE # 1
BOND_DOUBLE = BOND_TYPE_DOUBLE # 2
BOND_TRIPLE = BOND_TYPE_TRIPLE # 3
BOND_ALTERN = BOND_TYPE_ALTERN # 4 single/double
BOND_ALT_123 = 5 # single/double/triple
BOND_ALT_13 = 6 # single/triple
BOND_ALT_23 = 7 # double/triple
BOND_TAUTOM = 8
BOND_ALT12NS = 9
BOND_NUMDIF = 9 # number of different kinds of bonds
BOND_TYPE_MASK = 0x0f
BOND_MARK_ALL = 0xf0 # complement to BOND_TYPE_MASK
BOND_MARK_ALT12 = 0x10
BOND_MARK_ALT123 = 0x20
BOND_MARK_ALT13 = 0x30
BOND_MARK_ALT23 = 0x40
BOND_MARK_ALT12NS = 0x50 # 1 or 2, non-stereo
BOND_MARK_MASK = 0x70
def ACTUAL_ORDER(PBNS, IAT,IBOND, BTYPE):
return ( PBNS.edge[PBNS.vert[IAT].iedge[IBOND]].flow + BOND_TYPE_SINGLE if (PBNS and PBNS.edge and PBNS.vert and (BTYPE == BOND_ALT_123 or BTYPE == BOND_ALT_13 or BTYPE == BOND_ALT_23)) else BTYPE)
BITS_PARITY = 0x07 # mask to retrieve half-bond parity
MASK_CUMULENE_LEN = 0x38 # mask to retrieve (cumulene chain length - 1)*MULT_STEREOBOND
KNOWN_PARITIES_EQL = 0x40 # parity is same for all pairs of constit. equivalent atoms
MAX_CUMULENE_LEN = 2 # max number of bonds in a cumulene chain - 1
MULT_STEREOBOND = 0x08 # multiplier for cumulene chain length
# odd length => chiral, even length => stereogenic bond
def MAKE_BITS_CUMULENE_LEN(X):
return X * MULT_STEREOBOND
def GET_BITS_CUMULENE_LEN(X):
return X & MASK_CUMULENE_LEN
def BOND_CHAIN_LEN(X):
return GET_BITS_CUMULENE_LEN(X) / MULT_STEREOBOND # 0 => double bond, 1 => allene, 2 => cumulene,..
def IS_ALLENE_CHAIN(X):
return (GET_BITS_CUMULENE_LEN(X) / MULT_STEREOBOND) % 2
## atom or bond parity value definitions ##
AB_PARITY_NONE = 0 # 0 => no parity; also parity&0x38 = 0
AB_PARITY_ODD = 1 # 1 => odd parity
AB_PARITY_EVEN = 2 # 2 => even parity
AB_PARITY_UNKN = 3 # 3 => user marked as unknown parity
AB_PARITY_UNDF = 4 # 4 => parity cannot be defined because of symmetry or not well defined geometry
AB_PARITY_IISO = 5 # 5 => no parity because of identical atoms
AB_PARITY_CALC = 6 # 6 => calculate parity later
AB_PARITY_0D = 8 # 8 => bit signifies 0D case -- not used
AB_INV_PARITY_BITS = (AB_PARITY_ODD ^ AB_PARITY_EVEN)
AB_MAX_KNOWN_PARITY = 4 # precalculated from const. equivalence parities
AB_MIN_KNOWN_PARITY = 1
AB_MAX_PART_DEFINED_PARITY = 3 # 1, 2, 3 => defined parities, uncluding 'unknown'
AB_MIN_PART_DEFINED_PARITY = 1 # min(AB_PARITY_ODD, AB_PARITY_EVEN, AB_PARITY_UNKN)
AB_MAX_WELL_DEFINED_PARITY = 2 # 1, 2 => well defined parities, uncluding 'unknown'
AB_MIN_WELL_DEFINED_PARITY = 1 # min(AB_PARITY_ODD, AB_PARITY_EVEN)
AB_MIN_ILL_DEFINED_PARITY = 3
AB_MAX_ILL_DEFINED_PARITY = 4
AB_MAX_ANY_PARITY = 4
AB_MIN_ANY_PARITY = 1
AMBIGUOUS_STEREO = 1
AMBIGUOUS_STEREO_ATOM = 2
AMBIGUOUS_STEREO_BOND = 4
AMBIGUOUS_STEREO_ATOM_ISO = 8
AMBIGUOUS_STEREO_BOND_ISO = 16
AMBIGUOUS_STEREO_ERROR = 32
MIN_DOT_PROD = 50 # min value of at->stereo_bond_z_prod[i] to define parity
def ATOM_PARITY_VAL(X):
return X
def ATOM_PARITY_PART_DEF(X):
return AB_MIN_PART_DEFINED_PARITY <= X and X <= AB_MAX_PART_DEFINED_PARITY
def ATOM_PARITY_ILL_DEF(X):
return AB_MIN_ILL_DEFINED_PARITY <= X and X <= AB_MAX_ILL_DEFINED_PARITY
def ATOM_PARITY_KNOWN(X):
return AB_MIN_KNOWN_PARITY <= X and X <= AB_MAX_KNOWN_PARITY
def ATOM_PARITY_WELL_DEF(X):
return AB_MIN_WELL_DEFINED_PARITY <= X and X <= AB_MAX_WELL_DEFINED_PARITY
def ATOM_PARITY_NOT_UNKN(X):
return ATOM_PARITY_KNOWN(X) and X != AB_PARITY_UNKN
def PARITY_VAL(X):
return X & BITS_PARITY
def PARITY_PART_DEF(X):
return AB_MIN_PART_DEFINED_PARITY <= PARITY_VAL(X) and PARITY_VAL(X) <= AB_MAX_PART_DEFINED_PARITY
def PARITY_ILL_DEF(X):
return AB_MIN_ILL_DEFINED_PARITY <= PARITY_VAL(X) and PARITY_VAL(X) <= AB_MAX_ILL_DEFINED_PARITY
def PARITY_KNOWN(X):
return AB_MIN_KNOWN_PARITY <= PARITY_VAL(X) and PARITY_VAL(X) <= AB_MAX_KNOWN_PARITY
def PARITY_WELL_DEF(X):
return AB_MIN_WELL_DEFINED_PARITY <= PARITY_VAL(X) and PARITY_VAL(X) <= AB_MAX_WELL_DEFINED_PARITY
def PARITY_CALCULATE(X):
return AB_PARITY_CALC == PARITY_VAL(X)
def BOND_PARITY_PART_DEFINED(X):
return PARITY_PART_DEF(X) or PARITY_CALCULATE(X)
def BOND_PARITY_PART_KNOWN(X):
return PARITY_KNOWN(X) or PARITY_CALCULATE(X)
def ALL_BUT_PARITY(X):
return X & ~BITS_PARITY
ALWAYS_SET_STEREO_PARITY = 0
NO_ISOLATED_NON_6RING_AROM_BOND = 0 # for Yuri
SAVE_6_AROM_CENTERS = 0 # for Yuri
|
import argparse, json
from boto.mturk.connection import MTurkConnection
from boto.mturk.qualification import *
from jinja2 import Environment, FileSystemLoader
"""
A bunch of free functions that we use in all scripts.
"""
def get_jinja_env(config):
"""
Get a jinja2 Environment object that we can use to find templates.
"""
return Environment(loader=FileSystemLoader(config['template_directories']))
def json_file(filename):
with open(filename, 'r') as f:
return json.load(f)
def get_parent_parser():
"""
Get an argparse parser with arguments that are always needed
"""
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument('--prod', action='store_false', dest='sandbox',
default=True,
help="Whether to run on the production AMT site.")
parser.add_argument('--hit_ids_file')
parser.add_argument('--reject_file')
parser.add_argument('--reject_only', default=False)
parser.add_argument('--auto_mode', default=False)
parser.add_argument('--config', default='config.json',
type=json_file)
return parser
def get_mturk_connection_from_args(args):
"""
Utility method to get an MTurkConnection from argparse args.
"""
aws_access_key = args.config.get('aws_access_key')
aws_secret_key = args.config.get('aws_secret_key')
return get_mturk_connection(sandbox=args.sandbox,
aws_access_key=aws_access_key,
aws_secret_key=aws_secret_key)
def get_mturk_connection(sandbox=True, aws_access_key=None,
aws_secret_key=None):
"""
Get a boto mturk connection. This is a thin wrapper over the
MTurkConnection constructor; the only difference is a boolean
flag to indicate sandbox or not.
"""
kwargs = {}
if aws_access_key is not None:
kwargs['aws_access_key_id'] = aws_access_key
if aws_secret_key is not None:
kwargs['aws_secret_access_key'] = aws_secret_key
if sandbox:
host = 'mechanicalturk.sandbox.amazonaws.com'
else:
host='mechanicalturk.amazonaws.com'
return MTurkConnection(host=host, **kwargs)
def setup_qualifications(hit_properties):
"""
Replace some of the human-readable keys from the raw HIT properties
JSON data structure with boto-specific objects.
"""
qual = Qualifications()
if 'country' in hit_properties:
qual.add(LocaleRequirement('EqualTo',
hit_properties['country']))
del hit_properties['country']
if 'hits_approved' in hit_properties:
qual.add(NumberHitsApprovedRequirement('GreaterThan',
hit_properties['hits_approved']))
del hit_properties['hits_approved']
if 'percent_approved' in hit_properties:
qual.add(PercentAssignmentsApprovedRequirement('GreaterThan',
hit_properties['percent_approved']))
del hit_properties['percent_approved']
hit_properties['qualifications'] = qual
|
#!/usr/bin/python
from xml.dom.minidom import parse
from json import dumps
def item_text(item,field):
ret = item.getElementsByTagName(field)
if ret is None: return "missing"
if len(ret) < 1: return "missing"
ret = ret[0]
if ret is None: return "missing"
ret = ret.childNodes
if ret is None: return "missing"
if len(ret) < 1: return "missing"
ret = ret[0]
ret = ret.wholeText
if ret is None: return "missing"
return ret
dom = parse("PricesFull7290873900009-001-201505140700.xml")
root = dom.childNodes[0]
json_items = []
for item in root.getElementsByTagName('Items')[0].getElementsByTagName('Item'):
json_items.append({'PriceUpdateDate': item_text(item,'PriceUpdateDate')
,'ItemCode': item_text(item,'ItemCode')
,'ItemType': item_text(item,'ItemType')
,"ItemName": item_text(item,'ItemName')
,"ManufacturerName": item_text(item,'ManufacturerName')
,"ManufacturerCountry": item_text(item,'ManufactureCountry')
,"ManufacturerItemDescription": item_text(item, 'ManufacturerItemDescription')
,"Quantity": item_text(item, 'Quantity')
,"UnitOfMeasure": item_text(item, 'UnitOfMeasure')
,"QtyInPackage": item_text(item, 'QtyInPackage')
,"UnitOfMeasurePrice": item_text(item, 'UnitOfMeasurePrice')
,"AllowDiscount": item_text(item, 'AllowDiscount')
,"bIsWeighted": item_text(item, 'bIsWeighted')
,"ItemPrice": item_text(item, 'ItemPrice')
,"ItemStatus": item_text(item, 'ItemStatus')})
json = {'XmlDocVersion': item_text(root,'XmlDocVersion')
,'DllVerNo': "missing"
,'ChainId': item_text(root,'ChainId')
,"SubChainId": item_text(root,'SubChainId')
,"StoreId": item_text(root,'StoreId')
,"BikoretNo": item_text(root,'BikoretNo')
,"Items": json_items}
open('output','w').write(dumps(json, sort_keys=True, indent=4, separators=(',',': ')))
|
# Copyright (c) 2014 Bull.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from keystoneclient.middleware import auth_token
from oslo.config import cfg
import pecan
from climate.api.v2 import hooks
from climate.api.v2 import middleware
from climate.openstack.common.middleware import debug
auth_opts = [
cfg.StrOpt('auth_strategy',
default='keystone',
help='The strategy to use for auth: noauth or keystone.'),
]
CONF = cfg.CONF
CONF.register_opts(auth_opts)
CONF.import_opt('log_exchange', 'climate.config')
OPT_GROUP_NAME = 'keystone_authtoken'
def setup_app(pecan_config=None, extra_hooks=None):
app_hooks = [hooks.ConfigHook(),
hooks.DBHook(),
hooks.ContextHook(),
hooks.RPCHook(),
]
# TODO(sbauza): Add stevedore extensions for loading hooks
if extra_hooks:
app_hooks.extend(extra_hooks)
app = pecan.make_app(
pecan_config.app.root,
debug=CONF.debug,
hooks=app_hooks,
wrap_app=middleware.ParsableErrorMiddleware,
guess_content_type_from_ext=False
)
# WSGI middleware for debugging
if CONF.log_exchange:
app = debug.Debug.factory(pecan_config)(app)
# WSGI middleware for Keystone auth
# NOTE(sbauza): ACLs are always active unless for unittesting where
# enable_acl could be set to False
if pecan_config.app.enable_acl:
CONF.register_opts(auth_token.opts, group=OPT_GROUP_NAME)
keystone_config = dict(CONF.get(OPT_GROUP_NAME))
app = auth_token.AuthProtocol(app, conf=keystone_config)
return app
def make_app():
config = {
'app': {
'modules': ['climate.api.v2'],
'root': 'climate.api.root.RootController',
'enable_acl': True,
}
}
# NOTE(sbauza): Fill Pecan config and call modules' path app.setup_app()
app = pecan.load_app(config)
return app
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright 2013
# Author: Guewen Baconnier - Camptocamp
# Augustin Cisterne-Kaas - Elico-corp
# David Béal - Akretion
# Sébastien Beau - Akretion
# Chafique Delli - Akretion
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, orm
#from openerp.tools.translate import _
from openerp.osv.osv import except_osv
from openerp.addons.connector.unit.mapper import (
mapping,
#changed_by,
only_create,
ImportMapper,
ExportMapper,)
#from openerp.addons.connector.exception import MappingError
from openerp.addons.magentoerpconnect.backend import magento
from openerp.addons.magentoerpconnect.unit.backend_adapter import GenericAdapter
from openerp.addons.magentoerpconnect.unit.binder import MagentoModelBinder
from openerp.addons.magentoerpconnect.unit.delete_synchronizer import (
MagentoDeleteSynchronizer)
from openerp.addons.magentoerpconnect.unit.export_synchronizer import (
MagentoExporter)
from openerp.addons.magentoerpconnect.unit.import_synchronizer import (
DelayedBatchImport,
MagentoImportSynchronizer,)
from openerp.addons.connector.exception import FailedJobError
@magento(replacing=MagentoModelBinder)
class MagentoAttributeBinder(MagentoModelBinder):
_model_name = [
'magento.product.attribute',
'magento.attribute.option',
'magento.attribute.set',
]
# Attribute Set
class AttributeSet(orm.Model):
_inherit = 'attribute.set'
_columns = {
'magento_bind_ids': fields.one2many(
'magento.attribute.set',
'openerp_id',
string='Magento Bindings',),
}
class MagentoAttributeSet(orm.Model):
_name = 'magento.attribute.set'
_description = "Magento attribute set"
_inherit = 'magento.binding'
_rec_name = 'attribute_set_name'
_columns = {
'openerp_id': fields.many2one(
'attribute.set',
string='Attribute set',
ondelete='cascade'),
'attribute_set_name': fields.char(
'Name',
size=64,
required=True),
'sort_order': fields.integer(
'Sort order',
readonly=True),
}
def name_get(self, cr, uid, ids, context=None):
res = []
for elm in self.read(cr, uid, ids, ['attribute_set_name'],
context=context):
res.append((elm['id'], elm['attribute_set_name']))
return res
_sql_constraints = [
('magento_uniq', 'unique(backend_id, openerp_id)',
"An 'Attribute set' with the same ID on this Magento backend "
"already exists")
]
@magento
class AttributeSetAdapter(GenericAdapter):
_model_name = 'magento.attribute.set'
_magento_default_model = 'product_attribute_set'
_magento_model = 'ol_catalog_product_attributeset'
def create(self, data):
""" Create a record on the external system """
return self._call('%s.create' % self._magento_default_model,
[data['attribute_set_name'], data['skeletonSetId']])
def delete(self, id):
""" Delete a record on the external system """
return self._call('%s.remove' % self._magento_default_model, [str(id)])
def search(self, filters=None):
""" Search records according and returns a list of ids
:rtype: list
"""
return self._call('%s.search' % self._magento_model, [])
def read(self, id, storeview_id=None, attributes=None):
""" Returns the information of a record
:rtype: dict
"""
return self._call('%s.info' % self._magento_model, [int(id)])
def add_attribute(self, id, attribute_id):
""" Add an existing attribute to an attribute set on the external system
:rtype: boolean
"""
return self._call('%s.attributeAdd' % self._magento_default_model,
[str(attribute_id),str(id)])
@magento
class AttributeSetDelayedBatchImport(DelayedBatchImport):
_model_name = ['magento.attribute.set']
@magento
class AttributeSetImport(MagentoImportSynchronizer):
_model_name = ['magento.attribute.set']
@magento
class AttributeSetImportMapper(ImportMapper):
_model_name = 'magento.attribute.set'
direct = [
('attribute_set_name', 'attribute_set_name'),
('attribute_set_id', 'magento_id'),
('sort_order', 'sort_order'), ]
@mapping
def backend_id(self, record):
return {'backend_id': self.backend_record.id}
@magento
class AttributeSetDeleteSynchronizer(MagentoDeleteSynchronizer):
_model_name = ['magento.attribute.set']
@magento
class AttributeSetExport(MagentoExporter):
_model_name = ['magento.attribute.set']
def _should_import(self):
"""Attribute Set are only edited on OpenERP Side"""
return False
@magento
class AttributeSetExportMapper(ExportMapper):
_model_name = 'magento.attribute.set'
direct = [
('attribute_set_name', 'attribute_set_name'),
('sort_order', 'sort_order'),
]
@only_create
@mapping
def skeletonSetId(self, record):
tmpl_set_id = self.backend_record.attribute_set_tpl_id.id
if tmpl_set_id:
binder = self.get_binder_for_model('magento.attribute.set')
magento_tpl_set_id = binder.to_backend(tmpl_set_id)
else:
raise FailedJobError((
"'Attribute set template' field must be define on "
"the backend.\n\n"
"Resolution: \n"
"- Go to Connectors > Magento > Backends > '%s'\n"
"- Fill the field Attribte set Tempalte\n"
)% self.backend_record.name)
return {'skeletonSetId': magento_tpl_set_id}
# Attribute
class AttributeAttribute(orm.Model):
_inherit = 'attribute.attribute'
def _get_model_product(self, cr, uid, ids, idcontext=None):
model, res_id = self.pool['ir.model.data'].get_object_reference(
cr, uid, 'product', 'model_product_product')
return res_id
_columns = {
'magento_bind_ids': fields.one2many(
'magento.product.attribute',
'openerp_id',
string='Magento Bindings',),
}
_defaults = {
'model_id': _get_model_product,
}
class MagentoProductAttribute(orm.Model):
_name = 'magento.product.attribute'
_description = "Magento Product Attribute"
_inherit = 'magento.binding'
_rec_name = 'attribute_code'
MAGENTO_HELP = "This field is a technical / configuration field for " \
"the attribute on Magento. \nPlease refer to the Magento " \
"documentation for details. "
#Automatically create the magento binding for each option
def create(self, cr, uid, vals, context=None):
mag_option_obj = self.pool['magento.attribute.option']
mag_attr_id = super(MagentoProductAttribute, self).\
create(cr, uid, vals, context=None)
mag_attr = self.browse(cr, uid, mag_attr_id, context=context)
for option in mag_attr.openerp_id.option_ids:
mag_option_obj.create(cr, uid, {
'openerp_id': option.id,
'backend_id': mag_attr.backend_id.id,
}, context=context)
return mag_attr_id
def copy(self, cr, uid, id, default=None, context=None):
if default is None:
default = {}
default['attribute_code'] = default.get('attribute_code', '') + 'Copy '
return super(MagentoProductAttribute, self).copy(
cr, uid, id, default, context=context)
def _frontend_input(self, cr, uid, ids, field_names, arg, context=None):
res={}
for elm in self.browse(cr, uid, ids):
field_type = elm.openerp_id.attribute_type
map_type = {
'char': 'text',
'text': 'textarea',
'float': 'price',
'datetime': 'date',
'binary': 'media_image',
}
res[elm.id] = map_type.get(field_type, field_type)
return res
_columns = {
'openerp_id': fields.many2one(
'attribute.attribute',
string='Attribute',
required=True,
ondelete='cascade'),
'attribute_code': fields.char(
'Code',
required=True,
size=200,),
'scope': fields.selection(
[('store', 'store'), ('website', 'website'), ('global', 'global')],
'Scope',
required=True,
help=MAGENTO_HELP),
'apply_to': fields.selection([
('simple', 'simple'),
],
'Apply to',
required=True,
help=MAGENTO_HELP),
'frontend_input': fields.function(
_frontend_input,
method=True,
string='Frontend input',
type='char',
store=False,
help="This field depends on OpenERP attribute 'type' field "
"but used on Magento"),
'frontend_label': fields.char(
'Label', required=True, size=100, help=MAGENTO_HELP),
'position': fields.integer('Position', help=MAGENTO_HELP),
'group_id': fields.integer('Group', help=MAGENTO_HELP) ,
'default_value': fields.char(
'Default Value',
size=10,
help=MAGENTO_HELP),
'note': fields.char(
'Note', size=200, help=MAGENTO_HELP),
'entity_type_id': fields.integer(
'Entity Type', help=MAGENTO_HELP),
# boolean fields
'is_visible_in_advanced_search': fields.boolean(
'Visible in advanced search?', help=MAGENTO_HELP),
'is_visible': fields.boolean('Visible?', help=MAGENTO_HELP),
'is_visible_on_front': fields.boolean(
'Visible (front)?', help=MAGENTO_HELP),
'is_html_allowed_on_front': fields.boolean(
'Html (front)?', help=MAGENTO_HELP),
'is_wysiwyg_enabled': fields.boolean(
'Wysiwyg enabled?', help=MAGENTO_HELP),
'is_global': fields.boolean('Global?', help=MAGENTO_HELP),
'is_unique': fields.boolean('Unique?', help=MAGENTO_HELP),
'is_required': fields.boolean('Required?', help=MAGENTO_HELP),
'is_filterable': fields.boolean('Filterable?', help=MAGENTO_HELP),
'is_comparable': fields.boolean('Comparable?', help=MAGENTO_HELP),
'is_searchable': fields.boolean('Searchable ?', help=MAGENTO_HELP),
'is_configurable': fields.boolean('Configurable?', help=MAGENTO_HELP),
'is_user_defined': fields.boolean('User defined?', help=MAGENTO_HELP),
'used_for_sort_by': fields.boolean('Use for sort?', help=MAGENTO_HELP),
'is_used_for_price_rules': fields.boolean(
'Used for pricing rules?', help=MAGENTO_HELP),
'is_used_for_promo_rules': fields.boolean(
'Use for promo?', help=MAGENTO_HELP),
'used_in_product_listing': fields.boolean(
'In product listing?', help=MAGENTO_HELP),
}
_defaults = {
'scope': 'global',
'apply_to': 'simple',
'is_visible': True,
'is_visible_on_front': True,
'is_visible_in_advanced_search': True,
'is_filterable': True,
'is_searchable': True,
'is_comparable': True,
}
_sql_constraints = [
('magento_uniq', 'unique(attribute_code)',
"Attribute with the same code already exists : must be unique"),
('openerp_uniq', 'unique(backend_id, openerp_id)',
'An attribute can not be bound to several records on the same backend.'),
]
@magento
class ProductAttributeAdapter(GenericAdapter):
_model_name = 'magento.product.attribute'
_magento_model = 'product_attribute'
def delete(self, id):
return self._call('%s.remove'% self._magento_model,[int(id)])
@magento
class ProductAttributeDeleteSynchronizer(MagentoDeleteSynchronizer):
_model_name = ['magento.product.attribute']
@magento
class ProductAttributeExporter(MagentoExporter):
_model_name = ['magento.product.attribute']
def _should_import(self):
"Attributes in magento doesn't retrieve infos on dates"
return False
def _after_export(self):
""" Run the after export"""
sess = self.session
attr_binder = self.get_binder_for_model('magento.product.attribute')
attr_set_binder = self.get_binder_for_model('magento.attribute.set')
attr_set_adapter = self.get_connector_unit_for_model(
GenericAdapter, 'magento.attribute.set')
mag_attr_id = attr_binder.to_backend(self.binding_record.id)
attr_loc_ids = sess.search('attribute.location', [
['attribute_id', '=', self.binding_record.openerp_id.id],
])
for attr_location in sess.browse('attribute.location', attr_loc_ids):
attr_set_id = attr_location.attribute_set_id.id
mag_attr_set_id = attr_set_binder.to_backend(attr_set_id, wrap=True)
if mag_attr_set_id:
attr_set_adapter.add_attribute(mag_attr_set_id, mag_attr_id)
@magento
class ProductAttributeExportMapper(ExportMapper):
_model_name = 'magento.product.attribute'
direct = [
('attribute_code', 'attribute_code'), # required
('frontend_input', 'frontend_input'),
('scope', 'scope'),
('is_global', 'is_global'),
('is_filterable', 'is_filterable'),
('is_comparable', 'is_comparable'),
('is_visible', 'is_visible'),
('is_searchable', 'is_searchable'),
('is_user_defined', 'is_user_defined'),
('is_configurable', 'is_configurable'),
('is_visible_on_front', 'is_visible_on_front'),
('is_used_for_price_rules', 'is_used_for_price_rules'),
('is_unique', 'is_unique'),
('is_required', 'is_required'),
('position', 'position'),
('group_id', 'group_id'),
('default_value', 'default_value'),
('is_visible_in_advanced_search', 'is_visible_in_advanced_search'),
('note', 'note'),
('entity_type_id', 'entity_type_id'),
]
@mapping
def frontend_label(self, record):
#required
return {'frontend_label': [{
'store_id': 0,
'label': record.frontend_label,
}]}
# Attribute option
class AttributeOption(orm.Model):
_inherit = 'attribute.option'
_columns = {
'magento_bind_ids': fields.one2many(
'magento.attribute.option',
'openerp_id',
string='Magento Bindings',),
}
#Automatically create the magento binding for the option created
def create(self, cr, uid, vals, context=None):
option_id = super(AttributeOption, self).\
create(cr, uid, vals, context=None)
attr_obj = self.pool['attribute.attribute']
mag_option_obj = self.pool['magento.attribute.option']
attr = attr_obj.browse(cr, uid, vals['attribute_id'], context=context)
for binding in attr.magento_bind_ids:
mag_option_obj.create(cr, uid, {
'openerp_id': option_id,
'backend_id': binding.backend_id.id,
}, context=context)
return option_id
class MagentoAttributeOption(orm.Model):
_name = 'magento.attribute.option'
_description = ""
_inherit = 'magento.binding'
_columns = {
'openerp_id': fields.many2one(
'attribute.option',
string='Attribute option',
required=True,
ondelete='cascade'),
'magento_name': fields.char(
'Name',
size=64,
translate=True,
help=("Fill thi field if you want to force the name of the option "
"in Magento, if it's empty then the name of the option will "
"be used")
),
'is_default': fields.boolean('Is default'),
}
_defaults = {
'is_default': True,
}
_sql_constraints = [
('magento_uniq', 'unique(backend_id, magento_id)',
'An attribute option with the same ID on Magento already exists.'),
('openerp_uniq', 'unique(backend_id, openerp_id)',
'An attribute option can not be bound to several records on the same backend.'),
]
@magento
class AttributeOptionAdapter(GenericAdapter):
_model_name = 'magento.attribute.option'
_magento_model = 'oerp_product_attribute'
def create(self, data):
return self._call('%s.addOption'% self._magento_model,
[data.pop('attribute'), data])
def write(self, attribute_id, option_id, data):
return self._call('%s.updateOption'% self._magento_model,
[attribute_id, option_id, data])
@magento
class AttributeOptionDeleteSynchronizer(MagentoDeleteSynchronizer):
_model_name = ['magento.attribute.option']
@magento
class AttributeOptionExporter(MagentoExporter):
_model_name = ['magento.attribute.option']
def _update(self, data):
""" Update an Magento record """
assert self.magento_id
attribute_id = data.pop('attribute')
self.backend_adapter.write(attribute_id, self.magento_id, data)
def _should_import(self):
"Attributes in magento doesn't retrieve infos on dates"
return False
def _export_dependencies(self):
"""Export attribute if necessary"""
self._export_dependency(self.binding_record.openerp_id.attribute_id,
'magento.product.attribute',
exporter_class=ProductAttributeExporter)
@magento
class AttributeOptionExportMapper(ExportMapper):
_model_name = 'magento.attribute.option'
direct = []
@mapping
def label(self, record):
if record._context:
ctx = record._context.copy()
else:
ctx = {}
storeview_ids = self.session.search(
'magento.storeview',
[('backend_id', '=', self.backend_record.id)])
storeviews = self.session.browse('magento.storeview', storeview_ids)
label = []
for storeview in storeviews:
ctx['lang'] = storeview.lang_id.code
record_translated = record.browse(context=ctx)[0]
label.append({
'store_id': [storeview.magento_id],
'value': record_translated.magento_name\
or record_translated.openerp_id.name,
})
return {'label': label}
@mapping
def attribute(self, record):
binder = self.get_binder_for_model('magento.product.attribute')
magento_attribute_id = binder.to_backend(record.openerp_id.attribute_id.id, wrap=True)
return {'attribute': magento_attribute_id}
@mapping
def order(self, record):
#TODO FIXME
return {'order': record.openerp_id.sequence + 1 }
@mapping
def is_default(self, record):
return {'is_default': int(record.is_default)}
|
import functools
import numpy as np
import pandas as pd
from . import ops
from .combine import concat
from .common import (
ImplementsArrayReduce, ImplementsDatasetReduce, _maybe_promote,
)
from .pycompat import zip
from .utils import peek_at, maybe_wrap_array, safe_cast_to_index
from .variable import as_variable, Variable, Coordinate
def unique_value_groups(ar):
"""Group an array by its unique values.
Parameters
----------
ar : array-like
Input array. This will be flattened if it is not already 1-D.
Returns
-------
values : np.ndarray
Sorted, unique values as returned by `np.unique`.
indices : list of lists of int
Each element provides the integer indices in `ar` with values given by
the corresponding value in `unique_values`.
"""
inverse, values = pd.factorize(ar, sort=True)
groups = [[] for _ in range(len(values))]
for n, g in enumerate(inverse):
if g >= 0:
# pandas uses -1 to mark NaN, but doesn't include them in values
groups[g].append(n)
return values, groups
def _get_fill_value(dtype):
"""Return a fill value that appropriately promotes types when used with
np.concatenate
"""
dtype, fill_value = _maybe_promote(dtype)
return fill_value
def _dummy_copy(xarray_obj):
from .dataset import Dataset
from .dataarray import DataArray
if isinstance(xarray_obj, Dataset):
res = Dataset(dict((k, _get_fill_value(v.dtype))
for k, v in xarray_obj.data_vars.items()),
dict((k, _get_fill_value(v.dtype))
for k, v in xarray_obj.coords.items()
if k not in xarray_obj.dims),
xarray_obj.attrs)
elif isinstance(xarray_obj, DataArray):
res = DataArray(_get_fill_value(xarray_obj.dtype),
dict((k, _get_fill_value(v.dtype))
for k, v in xarray_obj.coords.items()
if k not in xarray_obj.dims),
name=xarray_obj.name,
attrs=xarray_obj.attrs)
else: # pragma: no cover
raise AssertionError
return res
class GroupBy(object):
"""A object that implements the split-apply-combine pattern.
Modeled after `pandas.GroupBy`. The `GroupBy` object can be iterated over
(unique_value, grouped_array) pairs, but the main way to interact with a
groupby object are with the `apply` or `reduce` methods. You can also
directly call numpy methods like `mean` or `std`.
You should create a GroupBy object by using the `DataArray.groupby` or
`Dataset.groupby` methods.
See Also
--------
Dataset.groupby
DataArray.groupby
"""
def __init__(self, obj, group, squeeze=False, grouper=None):
"""Create a GroupBy object
Parameters
----------
obj : Dataset or DataArray
Object to group.
group : DataArray or Coordinate
1-dimensional array with the group values.
squeeze : boolean, optional
If "group" is a coordinate of object, `squeeze` controls whether
the subarrays have a dimension of length 1 along that coordinate or
if the dimension is squeezed out.
grouper : pd.Grouper, optional
Used for grouping values along the `group` array.
"""
from .dataset import as_dataset
if group.ndim != 1:
# TODO: remove this limitation?
raise ValueError('`group` must be 1 dimensional')
if getattr(group, 'name', None) is None:
raise ValueError('`group` must have a name')
if not hasattr(group, 'dims'):
raise ValueError("`group` must have a 'dims' attribute")
group_dim, = group.dims
try:
expected_size = obj.dims[group_dim]
except TypeError:
expected_size = obj.shape[obj.get_axis_num(group_dim)]
if group.size != expected_size:
raise ValueError('the group variable\'s length does not '
'match the length of this variable along its '
'dimension')
full_index = None
if grouper is not None:
# time-series resampling
index = safe_cast_to_index(group)
if not index.is_monotonic:
# TODO: sort instead of raising an error
raise ValueError('index must be monotonic for resampling')
s = pd.Series(np.arange(index.size), index)
first_items = s.groupby(grouper).first()
if first_items.isnull().any():
full_index = first_items.index
first_items = first_items.dropna()
bins = first_items.values.astype(np.int64)
group_indices = ([slice(i, j) for i, j in zip(bins[:-1], bins[1:])] +
[slice(bins[-1], None)])
unique_coord = Coordinate(group.name, first_items.index)
elif group.name in obj.dims:
# assume that group already has sorted, unique values
if group.dims != (group.name,):
raise ValueError('`group` is required to be a coordinate if '
'`group.name` is a dimension in `obj`')
group_indices = np.arange(group.size)
if not squeeze:
# group_indices = group_indices.reshape(-1, 1)
# use slices to do views instead of fancy indexing
group_indices = [slice(i, i + 1) for i in group_indices]
unique_coord = group
else:
# look through group to find the unique values
unique_values, group_indices = unique_value_groups(group)
unique_coord = Coordinate(group.name, unique_values)
self.obj = obj
self.group = group
self.group_dim = group_dim
self.group_indices = group_indices
self.unique_coord = unique_coord
self._groups = None
self._full_index = full_index
@property
def groups(self):
# provided to mimic pandas.groupby
if self._groups is None:
self._groups = dict(zip(self.unique_coord.values,
self.group_indices))
return self._groups
def __len__(self):
return self.unique_coord.size
def __iter__(self):
return zip(self.unique_coord.values, self._iter_grouped())
def _iter_grouped(self):
"""Iterate over each element in this group"""
for indices in self.group_indices:
yield self.obj.isel(**{self.group_dim: indices})
def _infer_concat_args(self, applied_example):
if self.group_dim in applied_example.dims:
concat_dim = self.group
positions = self.group_indices
else:
concat_dim = self.unique_coord
positions = None
return concat_dim, positions
@staticmethod
def _binary_op(f, reflexive=False, **ignored_kwargs):
@functools.wraps(f)
def func(self, other):
g = f if not reflexive else lambda x, y: f(y, x)
applied = self._yield_binary_applied(g, other)
combined = self._concat(applied)
return combined
return func
def _yield_binary_applied(self, func, other):
dummy = None
for group_value, obj in self:
try:
other_sel = other.sel(**{self.group.name: group_value})
except AttributeError:
raise TypeError('GroupBy objects only support binary ops '
'when the other argument is a Dataset or '
'DataArray')
except KeyError:
if self.group.name not in other.dims:
raise ValueError('incompatible dimensions for a grouped '
'binary operation: the group variable %r '
'is not a dimension on the other argument'
% self.group.name)
if dummy is None:
dummy = _dummy_copy(other)
other_sel = dummy
result = func(obj, other_sel)
yield result
def _maybe_restore_empty_groups(self, combined):
"""Our index contained empty groups (e.g., from a resampling). If we
reduced on that dimension, we want to restore the full index.
"""
if (self._full_index is not None and self.group.name in combined.dims):
indexers = {self.group.name: self._full_index}
combined = combined.reindex(**indexers)
return combined
def fillna(self, value):
"""Fill missing values in this object by group.
This operation follows the normal broadcasting and alignment rules that
xarray uses for binary arithmetic, except the result is aligned to this
object (``join='left'``) instead of aligned to the intersection of
index coordinates (``join='inner'``).
Parameters
----------
value : valid type for the grouped object's fillna method
Used to fill all matching missing values by group.
Returns
-------
same type as the grouped object
See also
--------
Dataset.fillna
DataArray.fillna
"""
return self._fillna(value)
def where(self, cond):
"""Return an object of the same shape with all entries where cond is
True and all other entries masked.
This operation follows the normal broadcasting and alignment rules that
xarray uses for binary arithmetic.
Parameters
----------
cond : DataArray or Dataset
Returns
-------
same type as the grouped object
See also
--------
Dataset.where
"""
return self._where(cond)
def _first_or_last(self, op, skipna, keep_attrs):
if isinstance(self.group_indices[0], (int, np.integer)):
# NB. this is currently only used for reductions along an existing
# dimension
return self.obj
return self.reduce(op, self.group_dim, skipna=skipna,
keep_attrs=keep_attrs, allow_lazy=True)
def first(self, skipna=None, keep_attrs=True):
"""Return the first element of each group along the group dimension
"""
return self._first_or_last(ops.first, skipna, keep_attrs)
def last(self, skipna=None, keep_attrs=True):
"""Return the last element of each group along the group dimension
"""
return self._first_or_last(ops.last, skipna, keep_attrs)
def assign_coords(self, **kwargs):
"""Assign coordinates by group.
See also
--------
Dataset.assign_coords
"""
return self.apply(lambda ds: ds.assign_coords(**kwargs))
class DataArrayGroupBy(GroupBy, ImplementsArrayReduce):
"""GroupBy object specialized to grouping DataArray objects
"""
def _iter_grouped_shortcut(self):
"""Fast version of `_iter_grouped` that yields Variables without
metadata
"""
var = self.obj.variable
for indices in self.group_indices:
yield var[{self.group_dim: indices}]
def _concat_shortcut(self, applied, concat_dim, positions):
# nb. don't worry too much about maintaining this method -- it does
# speed things up, but it's not very interpretable and there are much
# faster alternatives (e.g., doing the grouped aggregation in a
# compiled language)
stacked = Variable.concat(
applied, concat_dim, positions, shortcut=True)
stacked.attrs.update(self.obj.attrs)
result = self.obj._replace_maybe_drop_dims(stacked)
result._coords[concat_dim.name] = as_variable(concat_dim, copy=True)
return result
def _restore_dim_order(self, stacked):
def lookup_order(dimension):
if dimension == self.group.name:
dimension, = self.group.dims
if dimension in self.obj.dims:
axis = self.obj.get_axis_num(dimension)
else:
axis = 1e6 # some arbitrarily high value
return axis
new_order = sorted(stacked.dims, key=lookup_order)
return stacked.transpose(*new_order)
def apply(self, func, shortcut=False, **kwargs):
"""Apply a function over each array in the group and concatenate them
together into a new array.
`func` is called like `func(ar, *args, **kwargs)` for each array `ar`
in this group.
Apply uses heuristics (like `pandas.GroupBy.apply`) to figure out how
to stack together the array. The rule is:
1. If the dimension along which the group coordinate is defined is
still in the first grouped array after applying `func`, then stack
over this dimension.
2. Otherwise, stack over the new dimension given by name of this
grouping (the argument to the `groupby` function).
Parameters
----------
func : function
Callable to apply to each array.
shortcut : bool, optional
Whether or not to shortcut evaluation under the assumptions that:
(1) The action of `func` does not depend on any of the array
metadata (attributes or coordinates) but only on the data and
dimensions.
(2) The action of `func` creates arrays with homogeneous metadata,
that is, with the same dimensions and attributes.
If these conditions are satisfied `shortcut` provides significant
speedup. This should be the case for many common groupby operations
(e.g., applying numpy ufuncs).
**kwargs
Used to call `func(ar, **kwargs)` for each array `ar`.
Returns
-------
applied : DataArray
The result of splitting, applying and combining this array.
"""
if shortcut:
grouped = self._iter_grouped_shortcut()
else:
grouped = self._iter_grouped()
applied = (maybe_wrap_array(arr, func(arr, **kwargs)) for arr in grouped)
combined = self._concat(applied, shortcut=shortcut)
result = self._maybe_restore_empty_groups(combined)
return result
def _concat(self, applied, shortcut=False):
# peek at applied to determine which coordinate to stack over
applied_example, applied = peek_at(applied)
concat_dim, positions = self._infer_concat_args(applied_example)
if shortcut:
combined = self._concat_shortcut(applied, concat_dim, positions)
else:
combined = concat(applied, concat_dim, positions=positions)
if isinstance(combined, type(self.obj)):
combined = self._restore_dim_order(combined)
return combined
def reduce(self, func, dim=None, axis=None, keep_attrs=False,
shortcut=True, **kwargs):
"""Reduce the items in this group by applying `func` along some
dimension(s).
Parameters
----------
func : function
Function which can be called in the form
`func(x, axis=axis, **kwargs)` to return the result of collapsing an
np.ndarray over an integer valued axis.
dim : str or sequence of str, optional
Dimension(s) over which to apply `func`.
axis : int or sequence of int, optional
Axis(es) over which to apply `func`. Only one of the 'dimension'
and 'axis' arguments can be supplied. If neither are supplied, then
`func` is calculated over all dimension for each group item.
keep_attrs : bool, optional
If True, the datasets's attributes (`attrs`) will be copied from
the original object to the new one. If False (default), the new
object will be returned without attributes.
**kwargs : dict
Additional keyword arguments passed on to `func`.
Returns
-------
reduced : Array
Array with summarized data and the indicated dimension(s)
removed.
"""
def reduce_array(ar):
return ar.reduce(func, dim, axis, keep_attrs=keep_attrs, **kwargs)
return self.apply(reduce_array, shortcut=shortcut)
ops.inject_reduce_methods(DataArrayGroupBy)
ops.inject_binary_ops(DataArrayGroupBy)
class DatasetGroupBy(GroupBy, ImplementsDatasetReduce):
def apply(self, func, **kwargs):
"""Apply a function over each Dataset in the group and concatenate them
together into a new Dataset.
`func` is called like `func(ds, *args, **kwargs)` for each dataset `ds`
in this group.
Apply uses heuristics (like `pandas.GroupBy.apply`) to figure out how
to stack together the datasets. The rule is:
1. If the dimension along which the group coordinate is defined is
still in the first grouped item after applying `func`, then stack
over this dimension.
2. Otherwise, stack over the new dimension given by name of this
grouping (the argument to the `groupby` function).
Parameters
----------
func : function
Callable to apply to each sub-dataset.
**kwargs
Used to call `func(ds, **kwargs)` for each sub-dataset `ar`.
Returns
-------
applied : Dataset
The result of splitting, applying and combining this dataset.
"""
kwargs.pop('shortcut', None) # ignore shortcut if set (for now)
applied = (func(ds, **kwargs) for ds in self._iter_grouped())
combined = self._concat(applied)
result = self._maybe_restore_empty_groups(combined)
return result
def _concat(self, applied):
applied_example, applied = peek_at(applied)
concat_dim, positions = self._infer_concat_args(applied_example)
combined = concat(applied, concat_dim, positions=positions)
return combined
def reduce(self, func, dim=None, keep_attrs=False, **kwargs):
"""Reduce the items in this group by applying `func` along some
dimension(s).
Parameters
----------
func : function
Function which can be called in the form
`func(x, axis=axis, **kwargs)` to return the result of collapsing an
np.ndarray over an integer valued axis.
dim : str or sequence of str, optional
Dimension(s) over which to apply `func`.
axis : int or sequence of int, optional
Axis(es) over which to apply `func`. Only one of the 'dimension'
and 'axis' arguments can be supplied. If neither are supplied, then
`func` is calculated over all dimension for each group item.
keep_attrs : bool, optional
If True, the datasets's attributes (`attrs`) will be copied from
the original object to the new one. If False (default), the new
object will be returned without attributes.
**kwargs : dict
Additional keyword arguments passed on to `func`.
Returns
-------
reduced : Array
Array with summarized data and the indicated dimension(s)
removed.
"""
def reduce_dataset(ds):
return ds.reduce(func, dim, keep_attrs, **kwargs)
return self.apply(reduce_dataset)
def assign(self, **kwargs):
"""Assign data variables by group.
See also
--------
Dataset.assign
"""
return self.apply(lambda ds: ds.assign(**kwargs))
ops.inject_reduce_methods(DatasetGroupBy)
ops.inject_binary_ops(DatasetGroupBy)
|
from Screens.Screen import Screen
from Screens.MessageBox import MessageBox
from Tools.BoundFunction import boundFunction
from Components.Sources.StaticText import StaticText
from Components.ActionMap import ActionMap
from Components.ActionMap import NumberActionMap
from Components.Label import Label
from Components.config import config, ConfigSubsection, ConfigSelection, ConfigSubList, getConfigListEntry, KEY_LEFT, KEY_RIGHT, KEY_0, ConfigNothing, ConfigPIN, ConfigYesNo, NoSave
from Components.ConfigList import ConfigList, ConfigListScreen
from Components.SystemInfo import SystemInfo
from enigma import eTimer, eDVBCI_UI, eDVBCIInterfaces
import Screens.Standby
forceNotShowCiMessages = False
def setCIBitrate(configElement):
eDVBCI_UI.getInstance().setClockRate(configElement.slotid, eDVBCI_UI.rateNormal if configElement.value == "no" else eDVBCI_UI.rateHigh)
def setdvbCiDelay(configElement):
open(SystemInfo["CommonInterfaceCIDelay"], "w").write(configElement.value)
configElement.save()
def setRelevantPidsRouting(configElement):
open(SystemInfo["CI%dRelevantPidsRoutingSupport" % configElement.slotid], "w").write("yes" if configElement.value else "no")
def InitCiConfig():
config.ci = ConfigSubList()
config.cimisc = ConfigSubsection()
if SystemInfo["CommonInterface"]:
for slot in range(SystemInfo["CommonInterface"]):
config.ci.append(ConfigSubsection())
config.ci[slot].canDescrambleMultipleServices = ConfigSelection(choices = [("auto", _("auto")), ("no", _("no")), ("yes", _("yes"))], default = "auto")
config.ci[slot].use_static_pin = ConfigYesNo(default = True)
config.ci[slot].static_pin = ConfigPIN(default = 0)
config.ci[slot].show_ci_messages = ConfigYesNo(default = True)
if SystemInfo["CI%dSupportsHighBitrates" % slot]:
config.ci[slot].canHandleHighBitrates = ConfigYesNo(default = True)
config.ci[slot].canHandleHighBitrates.slotid = slot
config.ci[slot].canHandleHighBitrates.addNotifier(setCIBitrate)
if SystemInfo["CI%dRelevantPidsRoutingSupport" % slot]:
config.ci[slot].relevantPidsRouting = ConfigYesNo(default = False)
config.ci[slot].relevantPidsRouting.slotid = slot
config.ci[slot].relevantPidsRouting.addNotifier(setRelevantPidsRouting)
if SystemInfo["CommonInterfaceCIDelay"]:
config.cimisc.dvbCiDelay = ConfigSelection(default = "256", choices = [("16"), ("32"), ("64"), ("128"), ("256")])
config.cimisc.dvbCiDelay.addNotifier(setdvbCiDelay)
class MMIDialog(Screen):
def __init__(self, session, slotid, action, handler=eDVBCI_UI.getInstance(), wait_text="", screen_data=None):
Screen.__init__(self, session)
print "MMIDialog with action" + str(action)
self.mmiclosed = False
self.tag = None
self.slotid = slotid
self.timer = eTimer()
self.timer.callback.append(self.keyCancel)
#else the skins fails
self["title"] = Label("")
self["subtitle"] = Label("")
self["bottom"] = Label("")
self["entries"] = ConfigList([ ])
self["actions"] = NumberActionMap(["SetupActions", "MenuActions"],
{
"ok": self.okbuttonClick,
"cancel": self.keyCancel,
"menu": self.forceExit,
#for PIN
"left": self.keyLeft,
"right": self.keyRight,
"1": self.keyNumberGlobal,
"2": self.keyNumberGlobal,
"3": self.keyNumberGlobal,
"4": self.keyNumberGlobal,
"5": self.keyNumberGlobal,
"6": self.keyNumberGlobal,
"7": self.keyNumberGlobal,
"8": self.keyNumberGlobal,
"9": self.keyNumberGlobal,
"0": self.keyNumberGlobal
}, -1)
self.action = action
self.screen_data = screen_data
self.is_pin_list = -1
self.handler = handler
if wait_text == "":
self.wait_text = _("wait for ci...")
else:
self.wait_text = wait_text
if action == 2: #start MMI
handler.startMMI(self.slotid)
self.showWait()
elif action == 3: #mmi already there (called from infobar)
self.showScreen()
def addEntry(self, list, entry):
if entry[0] == "TEXT": #handle every item (text / pin only?)
list.append( (entry[1], ConfigNothing(), entry[2]) )
if entry[0] == "PIN":
pinlength = entry[1]
if entry[3] == 1:
# masked pins:
x = ConfigPIN(0, len = pinlength, censor = "*")
else:
# unmasked pins:
x = ConfigPIN(0, len = pinlength)
x.addEndNotifier(self.pinEntered)
self["subtitle"].setText(entry[2])
list.append( getConfigListEntry("", x) )
self["bottom"].setText(_("please press OK when ready"))
def pinEntered(self, value):
self.okbuttonClick()
def okbuttonClick(self):
self.timer.stop()
if not self.tag:
return
if self.tag == "WAIT":
print "do nothing - wait"
elif self.tag == "MENU":
print "answer MENU"
cur = self["entries"].getCurrent()
if cur:
self.handler.answerMenu(self.slotid, cur[2])
else:
self.handler.answerMenu(self.slotid, 0)
self.showWait()
elif self.tag == "LIST":
print "answer LIST"
self.handler.answerMenu(self.slotid, 0)
self.showWait()
elif self.tag == "ENQ":
cur = self["entries"].getCurrent()
answer = str(cur[1].value)
length = len(answer)
while length < cur[1].getLength():
answer = '0' + answer
length += 1
self.answer = answer
if config.ci[self.slotid].use_static_pin.value:
self.session.openWithCallback(self.save_PIN_CB, MessageBox, _("Would you save the entered PIN %s persistent?") % self.answer, MessageBox.TYPE_YESNO)
else:
self.save_PIN_CB(False)
def save_PIN_CB(self, ret=None):
if ret:
config.ci[self.slotid].static_pin.value = self.answer
config.ci[self.slotid].static_pin.save()
self.handler.answerEnq(self.slotid, self.answer)
self.showWait()
def closeMmi(self):
self.timer.stop()
self.close(self.slotid)
def forceExit(self):
self.timer.stop()
if self.tag == "WAIT":
self.handler.stopMMI(self.slotid)
global forceNotShowCiMessages
forceNotShowCiMessages = True
self.close(self.slotid)
def keyCancel(self):
self.timer.stop()
if not self.tag or self.mmiclosed:
self.closeMmi()
elif self.tag == "WAIT":
self.handler.stopMMI(self.slotid)
self.closeMmi()
elif self.tag in ( "MENU", "LIST" ):
print "cancel list"
self.handler.answerMenu(self.slotid, 0)
self.showWait()
elif self.tag == "ENQ":
print "cancel enq"
self.handler.cancelEnq(self.slotid)
self.showWait()
else:
print "give cancel action to ci"
def keyConfigEntry(self, key):
self.timer.stop()
try:
self["entries"].handleKey(key)
if self.is_pin_list == 4:
self.okbuttonClick()
except:
pass
def keyNumberGlobal(self, number):
self.timer.stop()
if self.is_pin_list > -1:
self.is_pin_list += 1
self.keyConfigEntry(KEY_0 + number)
def keyLeft(self):
self.timer.stop()
if self.is_pin_list > 0:
self.is_pin_list += -1
self.keyConfigEntry(KEY_LEFT)
def keyRight(self):
self.timer.stop()
if self.is_pin_list > -1 and self.is_pin_list < 4:
self.is_pin_list += 1
self.keyConfigEntry(KEY_RIGHT)
def updateList(self, list):
List = self["entries"]
try:
List.instance.moveSelectionTo(0)
except:
pass
List.l.setList(list)
def showWait(self):
self.tag = "WAIT"
self["title"].setText("")
self["subtitle"].setText("")
self["bottom"].setText("")
list = [ ]
list.append( (self.wait_text, ConfigNothing()) )
self.updateList(list)
def showScreen(self):
if self.screen_data is not None:
screen = self.screen_data
self.screen_data = None
else:
screen = self.handler.getMMIScreen(self.slotid)
list = [ ]
self.timer.stop()
if len(screen) > 0 and screen[0][0] == "CLOSE":
timeout = screen[0][1]
self.mmiclosed = True
if timeout > 0:
self.timer.start(timeout*1000, True)
else:
self.keyCancel()
else:
self.mmiclosed = False
self.tag = screen[0][0]
for entry in screen:
if entry[0] == "PIN":
if config.ci[self.slotid].use_static_pin.value and str(config.ci[self.slotid].static_pin.value) != "0":
answer = str(config.ci[self.slotid].static_pin.value)
length = len(answer)
while length < config.ci[self.slotid].static_pin.getLength():
answer = '0' + answer
length += 1
self.handler.answerEnq(self.slotid, answer)
self.showWait()
break
else:
self.is_pin_list = 0
self.addEntry(list, entry)
else:
if entry[0] == "TITLE":
self["title"].setText(entry[1])
elif entry[0] == "SUBTITLE":
self["subtitle"].setText(entry[1])
elif entry[0] == "BOTTOM":
self["bottom"].setText(entry[1])
elif entry[0] == "TEXT":
self.addEntry(list, entry)
self.updateList(list)
def ciStateChanged(self):
do_close = False
if self.action == 0: #reset
do_close = True
if self.action == 1: #init
do_close = True
#module still there ?
if self.handler.getState(self.slotid) != 2:
do_close = True
#mmi session still active ?
if self.handler.getMMIState(self.slotid) != 1:
do_close = True
if do_close:
self.closeMmi()
elif self.action > 1 and self.handler.availableMMI(self.slotid) == 1:
self.showScreen()
#FIXME: check for mmi-session closed
class CiMessageHandler:
def __init__(self):
self.session = None
self.auto_close = False
self.ci = { }
self.dlgs = { }
eDVBCI_UI.getInstance().ciStateChanged.get().append(self.ciStateChanged)
def setSession(self, session):
self.session = session
def ciStateChanged(self, slot):
if slot in self.ci:
self.ci[slot](slot)
else:
handler = eDVBCI_UI.getInstance()
if slot in self.dlgs:
self.dlgs[slot].ciStateChanged()
elif handler.availableMMI(slot) == 1:
if self.session:
show_ui = False
if config.ci[slot].show_ci_messages.value:
show_ui = True
screen_data = handler.getMMIScreen(slot)
if config.ci[slot].use_static_pin.value:
if screen_data is not None and len(screen_data):
ci_tag = screen_data[0][0]
if ci_tag == 'ENQ' and len(screen_data) >= 2 and screen_data[1][0] == 'PIN':
if str(config.ci[slot].static_pin.value) == "0":
show_ui = True
else:
answer = str(config.ci[slot].static_pin.value)
length = len(answer)
while length < config.ci[slot].static_pin.getLength():
answer = '0' + answer
length += 1
handler.answerEnq(slot, answer)
show_ui = False
self.auto_close = True
elif ci_tag == 'CLOSE' and self.auto_close:
show_ui = False
self.auto_close = False
if show_ui and not forceNotShowCiMessages and not Screens.Standby.inStandby:
self.dlgs[slot] = self.session.openWithCallback(self.dlgClosed, MMIDialog, slot, 3, screen_data = screen_data)
def dlgClosed(self, slot):
if slot in self.dlgs:
del self.dlgs[slot]
def registerCIMessageHandler(self, slot, func):
self.unregisterCIMessageHandler(slot)
self.ci[slot] = func
def unregisterCIMessageHandler(self, slot):
if slot in self.ci:
del self.ci[slot]
CiHandler = CiMessageHandler()
class CiSelection(Screen):
def __init__(self, session):
Screen.__init__(self, session)
self["actions"] = ActionMap(["OkCancelActions", "CiSelectionActions"],
{
"left": self.keyLeft,
"right": self.keyLeft,
"ok": self.okbuttonClick,
"cancel": self.cancel
},-1)
self.dlg = None
self.state = { }
self.list = [ ]
self.slot = 0
for slot in range(SystemInfo["CommonInterface"]):
state = eDVBCI_UI.getInstance().getState(slot)
if state != -1:
self.slot += 1
self.appendEntries(slot, state)
CiHandler.registerCIMessageHandler(slot, self.ciStateChanged)
menuList = ConfigList(self.list)
menuList.list = self.list
menuList.l.setList(self.list)
self["entries"] = menuList
self["entries"].onSelectionChanged.append(self.selectionChanged)
self["text"] = Label(_("Slot %d") % 1)
self.onLayoutFinish.append(self.layoutFinished)
def layoutFinished(self):
global forceNotShowCiMessages
forceNotShowCiMessages = False
self.setTitle(_("Common Interface"))
def selectionChanged(self):
if self.slot > 1:
cur = self["entries"].getCurrent()
if cur and len(cur) > 2:
self["text"].setText(cur[0] == "**************************" and " " or cur[0] == _("DVB CI Delay") and _("All slots") or _("Slot %d") % (cur[3] + 1))
def keyConfigEntry(self, key):
try:
self["entries"].handleKey(key)
self["entries"].getCurrent()[1].save()
except:
pass
def keyLeft(self):
self.keyConfigEntry(KEY_LEFT)
def keyRight(self):
self.keyConfigEntry(KEY_RIGHT)
def appendEntries(self, slot, state):
self.state[slot] = state
if self.slot > 1:
self.list.append(("**************************", ConfigNothing(), 3, slot))
self.list.append((_("Reset"), ConfigNothing(), 0, slot))
self.list.append((_("Init"), ConfigNothing(), 1, slot))
if self.state[slot] == 0: #no module
self.list.append((_("no module found"), ConfigNothing(), 2, slot))
elif self.state[slot] == 1: #module in init
self.list.append((_("init module"), ConfigNothing(), 2, slot))
elif self.state[slot] == 2: #module ready
appname = eDVBCI_UI.getInstance().getAppName(slot)
self.list.append((appname, ConfigNothing(), 2, slot))
self.list.append(getConfigListEntry(_("Set pin code persistent"), config.ci[slot].use_static_pin, 3, slot))
self.list.append((_("Enter persistent PIN code"), ConfigNothing(), 5, slot))
self.list.append((_("Reset persistent PIN code"), ConfigNothing(), 6, slot))
self.list.append(getConfigListEntry(_("Show CI messages"), config.ci[slot].show_ci_messages, 3, slot))
self.list.append(getConfigListEntry(_("Multiple service support"), config.ci[slot].canDescrambleMultipleServices, 3, slot))
if SystemInfo["CI%dSupportsHighBitrates" % slot]:
self.list.append(getConfigListEntry(_("High bitrate support"), config.ci[slot].canHandleHighBitrates, 3, slot))
if SystemInfo["CI%dRelevantPidsRoutingSupport" % slot]:
self.list.append(getConfigListEntry(_("Relevant PIDs routing"), config.ci[slot].relevantPidsRouting, 3, slot))
if SystemInfo["CommonInterfaceCIDelay"]:
self.list.append(getConfigListEntry(_("DVB CI Delay"), config.cimisc.dvbCiDelay, 3, slot))
def updateState(self, slot):
state = eDVBCI_UI.getInstance().getState(slot)
self.state[slot] = state
slotidx = 0
while len(self.list[slotidx]) < 3 or self.list[slotidx][3] != slot:
slotidx += 1
slotidx += 1 #do not change Reset
slotidx += 1 #do not change Init
if state == 0: #no module
self.list[slotidx] = (_("no module found"), ConfigNothing(), 2, slot)
elif state == 1: #module in init
self.list[slotidx] = (_("init module"), ConfigNothing(), 2, slot)
elif state == 2: #module ready
appname = eDVBCI_UI.getInstance().getAppName(slot)
self.list[slotidx] = (appname, ConfigNothing(), 2, slot)
lst = self["entries"]
lst.list = self.list
lst.l.setList(self.list)
def ciStateChanged(self, slot):
if self.dlg:
self.dlg.ciStateChanged()
else:
state = eDVBCI_UI.getInstance().getState(slot)
if self.state[slot] != state:
self.state[slot] = state
self.updateState(slot)
def dlgClosed(self, slot):
self.dlg = None
def okbuttonClick(self):
cur = self["entries"].getCurrent()
if cur and len(cur) > 2:
action = cur[2]
slot = cur[3]
if action == 3:
pass
elif action == 0: #reset
eDVBCI_UI.getInstance().setReset(slot)
elif action == 1: #init
eDVBCI_UI.getInstance().setInit(slot)
elif action == 5:
self.session.openWithCallback(self.cancelCB, PermanentPinEntry, config.ci[slot].static_pin, _("Smartcard PIN"))
elif action == 6:
config.ci[slot].static_pin.value = 0
config.ci[slot].static_pin.save()
self.session.openWithCallback(self.cancelCB, MessageBox, _("The saved PIN was cleared."), MessageBox.TYPE_INFO)
elif self.state[slot] == 2:
self.dlg = self.session.openWithCallback(self.dlgClosed, MMIDialog, slot, action)
def cancelCB(self, value):
pass
def cancel(self):
for slot in range(SystemInfo["CommonInterface"]):
state = eDVBCI_UI.getInstance().getState(slot)
if state != -1:
CiHandler.unregisterCIMessageHandler(slot)
self.close()
class PermanentPinEntry(Screen, ConfigListScreen):
def __init__(self, session, pin, pin_slot):
Screen.__init__(self, session)
self.skinName = ["ParentalControlChangePin", "Setup" ]
self.setup_title = _("Enter pin code")
self.onChangedEntry = [ ]
self.slot = pin_slot
self.pin = pin
self.list = []
self.pin1 = ConfigPIN(default = 0, censor = "*")
self.pin2 = ConfigPIN(default = 0, censor = "*")
self.pin1.addEndNotifier(boundFunction(self.valueChanged, 1))
self.pin2.addEndNotifier(boundFunction(self.valueChanged, 2))
self.list.append(getConfigListEntry(_("Enter PIN"), NoSave(self.pin1)))
self.list.append(getConfigListEntry(_("Reenter PIN"), NoSave(self.pin2)))
ConfigListScreen.__init__(self, self.list)
self["actions"] = NumberActionMap(["DirectionActions", "ColorActions", "OkCancelActions"],
{
"cancel": self.cancel,
"red": self.cancel,
"save": self.keyOK,
}, -1)
self["key_red"] = StaticText(_("Cancel"))
self["key_green"] = StaticText(_("OK"))
self.onLayoutFinish.append(self.layoutFinished)
def layoutFinished(self):
self.setTitle(self.setup_title)
def valueChanged(self, pin, value):
if pin == 1:
self["config"].setCurrentIndex(1)
elif pin == 2:
self.keyOK()
def keyOK(self):
if self.pin1.value == self.pin2.value:
self.pin.value = self.pin1.value
self.pin.save()
self.session.openWithCallback(self.close, MessageBox, _("The PIN code has been saved successfully."), MessageBox.TYPE_INFO)
else:
self.session.open(MessageBox, _("The PIN codes you entered are different."), MessageBox.TYPE_ERROR)
def cancel(self):
self.close(None)
def keyNumberGlobal(self, number):
ConfigListScreen.keyNumberGlobal(self, number)
def changedEntry(self):
for x in self.onChangedEntry:
x()
def getCurrentEntry(self):
return self["config"].getCurrent()[0]
def getCurrentValue(self):
return str(self["config"].getCurrent()[1].getText())
def createSummary(self):
from Screens.Setup import SetupSummary
return SetupSummary
|
import random
import unittest
from vcii.sheet import *
class TestSheet(unittest.TestCase):
def test_key_to_indices(self):
with self.assertRaises(ValueError):
indices_from_label('1A')
self.assertEqual(indices_from_label('A1'), (0, 0))
self.assertEqual(indices_from_label('zz99'), (26**2 + 25, 98))
def test_append(self):
sheet = Sheet()
sheet.cursor = [(lambda: random.randint(0, 10))()] * 2
sheet.append('rain')
self.assertEqual(sheet.active_cell.content, 'rain')
self.assertTrue(sheet.modified)
sheet.modified = False
sheet.append('bow')
self.assertEqual(sheet.active_cell.content, 'rainbow')
self.assertTrue(sheet.modified)
def test_expand(self):
sheet = Sheet()
self.assertEqual(sheet.size, (0, 0))
max_size = 0, 0
for i in range(10):
coords = [(lambda: random.randint(0, 10))()] * 2
sheet.expand(*coords)
max_size = tuple(max(max_size[j], coords[j] + 1) for j in range(2))
self.assertEqual(sheet.size, max_size)
self.assertTrue(sheet.modified)
def test_setitem(self):
sheet = Sheet()
sheet['C2'] = 'testing'
self.assertEqual(sheet.cells[2][1].content, 'testing')
self.assertEqual(sheet.size, (3, 2))
self.assertTrue(sheet.modified)
def test_move_cursor(self):
sheet = Sheet()
sheet.move_cursor(0, 0)
self.assertEqual(sheet.cursor, [0, 0])
sheet.move_cursor(-1, 1)
self.assertEqual(sheet.cursor, [0, 1])
sheet.move_cursor(1, -2)
self.assertEqual(sheet.cursor, [1, 0])
self.assertFalse(sheet.modified)
sheet.expand(0, 0)
sheet.modified = False
sheet.move_cursor(-1, 0)
self.assertEqual(sheet.cursor, [0, 0])
self.assertFalse(sheet.modified)
def test_column_width(self):
sheet = Sheet()
sheet.expand(1, 0)
self.assertEqual(sheet.column_width(1), DEFAULT_COLUMN_WIDTH)
self.assertEqual(sheet.column_width(2), DEFAULT_COLUMN_WIDTH)
sheet.column_widths[1] = 5
self.assertEqual(sheet.column_width(0), DEFAULT_COLUMN_WIDTH)
self.assertEqual(sheet.column_width(1), 5)
def test_resize_column(self):
sheet = Sheet()
sheet.resize_column(0, 0)
self.assertEqual(sheet.size, (1, 1))
self.assertEqual(sheet.column_widths, [2])
|
"""
A subset of the tests in tests/servers/tests exercising
django.contrib.staticfiles.testing.StaticLiveServerTestCase instead of
django.test.LiveServerTestCase.
"""
import os
from urllib.request import urlopen
from django.contrib.staticfiles.testing import StaticLiveServerTestCase
from django.core.exceptions import ImproperlyConfigured
from django.test import modify_settings, override_settings
TEST_ROOT = os.path.dirname(__file__)
TEST_SETTINGS = {
'MEDIA_URL': '/media/',
'STATIC_URL': '/static/',
'MEDIA_ROOT': os.path.join(TEST_ROOT, 'project', 'site_media', 'media'),
'STATIC_ROOT': os.path.join(TEST_ROOT, 'project', 'site_media', 'static'),
}
class LiveServerBase(StaticLiveServerTestCase):
available_apps = []
@classmethod
def setUpClass(cls):
# Override settings
cls.settings_override = override_settings(**TEST_SETTINGS)
cls.settings_override.enable()
super(LiveServerBase, cls).setUpClass()
@classmethod
def tearDownClass(cls):
super(LiveServerBase, cls).tearDownClass()
# Restore original settings
cls.settings_override.disable()
class StaticLiveServerChecks(LiveServerBase):
@classmethod
def setUpClass(cls):
# If contrib.staticfiles isn't configured properly, the exception
# should bubble up to the main thread.
old_STATIC_URL = TEST_SETTINGS['STATIC_URL']
TEST_SETTINGS['STATIC_URL'] = None
cls.raises_exception()
TEST_SETTINGS['STATIC_URL'] = old_STATIC_URL
@classmethod
def tearDownClass(cls):
# skip it, as setUpClass doesn't call its parent either
pass
@classmethod
def raises_exception(cls):
try:
super(StaticLiveServerChecks, cls).setUpClass()
raise Exception("The line above should have raised an exception")
except ImproperlyConfigured:
# This raises ImproperlyConfigured("You're using the staticfiles
# app without having set the required STATIC_URL setting.")
pass
finally:
super(StaticLiveServerChecks, cls).tearDownClass()
def test_test_test(self):
# Intentionally empty method so that the test is picked up by the
# test runner and the overridden setUpClass() method is executed.
pass
class StaticLiveServerView(LiveServerBase):
def urlopen(self, url):
return urlopen(self.live_server_url + url)
# The test is going to access a static file stored in this application.
@modify_settings(INSTALLED_APPS={'append': 'staticfiles_tests.apps.test'})
def test_collectstatic_emulation(self):
"""
StaticLiveServerTestCase use of staticfiles' serve() allows it
to discover app's static assets without having to collectstatic first.
"""
with self.urlopen('/static/test/file.txt') as f:
self.assertEqual(f.read().rstrip(b'\r\n'), b'In static directory.')
|
import networkx as nx
import random
import scipy as sp
from gat.core.sna import ergm
def resilience(cliques_found, ergm_iters=3000):
scaledResilience = {}
scaledBaseline = {}
toScale = []
baselinesToScale = []
traces = []
formatted_traces = {}
cliques, selected = cliques_found
# Find resilience of subgraphs
for clique in cliques:
initShortestPath = nx.average_shortest_path_length(clique)
baselinesToScale.append(initShortestPath)
# creating perturbation by removing random 10% of nodes and averaging result of x iterations
G = clique.copy() # percent of nodes removed can be changed here
rSample = random.sample(G.nodes(), int(G.number_of_nodes() * 0.1))
G.remove_nodes_from(rSample)
coefs, new_trace = ergm.resilience(G, ergm_iters, mu=initShortestPath*.2)
toScale.append(coefs["aspl"])
traces.append(new_trace["aspl"].tolist())
# scale resilience measures on a normal scale
for i in range(len(cliques)):
scaledResilience[selected[i]] = toScale[i]
scaledBaseline[selected[i]] = sp.stats.percentileofscore(baselinesToScale, baselinesToScale[i])
formatted_traces[selected[i]] = traces[i]
return scaledBaseline, scaledResilience, formatted_traces
|
from logging import getLogger
logger = getLogger('update')
import contextlib
import io
import os
import platform
import requests
import tempfile
from wxUI import commonMessageDialogs
import widgetUtils
import webbrowser
try:
import czipfile as zipfile
except ImportError:
import zipfile
from platform_utils import paths
def perform_update(endpoint, current_version, app_name='', password=None, update_available_callback=None, progress_callback=None, update_complete_callback=None):
requests_session = create_requests_session(app_name=app_name, version=current_version)
available_update = find_update(endpoint, requests_session=requests_session)
if not available_update:
logger.debug("No update available")
return False
available_version = float(available_update['current_version'])
if not float(available_version) > float(current_version) or platform.system()+platform.architecture()[0][:2] not in available_update['downloads']:
logger.debug("No update for this architecture")
return False
available_description = available_update.get('description', None)
update_url = available_update ['downloads'][platform.system()+platform.architecture()[0][:2]]
logger.info("A new update is available. Version %s" % available_version)
donation()
if callable(update_available_callback) and not update_available_callback(version=available_version, description=available_description): #update_available_callback should return a falsy value to stop the process
logger.info("User canceled update.")
return
base_path = tempfile.mkdtemp()
download_path = os.path.join(base_path, 'update.zip')
update_path = os.path.join(base_path, 'update')
downloaded = download_update(update_url, download_path, requests_session=requests_session, progress_callback=progress_callback)
extracted = extract_update(downloaded, update_path, password=password)
bootstrap_path = move_bootstrap(extracted)
execute_bootstrap(bootstrap_path, extracted)
logger.info("Update prepared for installation.")
if callable(update_complete_callback):
update_complete_callback()
def create_requests_session(app_name=None, version=None):
user_agent = ''
session = requests.session()
if app_name:
user_agent = ' %s/%r' % (app_name, version)
session.headers['User-Agent'] = session.headers['User-Agent'] + user_agent
return session
def find_update(endpoint, requests_session):
response = requests_session.get(endpoint)
response.raise_for_status()
content = response.json()
return content
def download_update(update_url, update_destination, requests_session, progress_callback=None, chunk_size=io.DEFAULT_BUFFER_SIZE):
total_downloaded = total_size = 0
with io.open(update_destination, 'w+b') as outfile:
download = requests_session.get(update_url, stream=True)
total_size = int(download.headers.get('content-length', 0))
logger.debug("Total update size: %d" % total_size)
download.raise_for_status()
for chunk in download.iter_content(chunk_size):
outfile.write(chunk)
total_downloaded += len(chunk)
if callable(progress_callback):
call_callback(progress_callback, total_downloaded, total_size)
logger.debug("Update downloaded")
return update_destination
def extract_update(update_archive, destination, password=None):
"""Given an update archive, extracts it. Returns the directory to which it has been extracted"""
with contextlib.closing(zipfile.ZipFile(update_archive)) as archive:
if password:
archive.setpassword(password)
archive.extractall(path=destination)
logger.debug("Update extracted")
return destination
def move_bootstrap(extracted_path):
working_path = os.path.abspath(os.path.join(extracted_path, '..'))
if platform.system() == 'Darwin':
extracted_path = os.path.join(extracted_path, 'Contents', 'Resources')
downloaded_bootstrap = os.path.join(extracted_path, bootstrap_name())
new_bootstrap_path = os.path.join(working_path, bootstrap_name())
os.rename(downloaded_bootstrap, new_bootstrap_path)
return new_bootstrap_path
def execute_bootstrap(bootstrap_path, source_path):
arguments = r'"%s" "%s" "%s" "%s"' % (os.getpid(), source_path, paths.app_path(), paths.get_executable())
if platform.system() == 'Windows':
import win32api
win32api.ShellExecute(0, 'open', bootstrap_path, arguments, '', 5)
else:
import subprocess
make_executable(bootstrap_path)
subprocess.Popen(['%s %s' % (bootstrap_path, arguments)], shell=True)
logger.info("Bootstrap executed")
def bootstrap_name():
if platform.system() == 'Windows': return 'bootstrap.exe'
if platform.system() == 'Darwin': return 'bootstrap-mac.sh'
return 'bootstrap-lin.sh'
def make_executable(path):
import stat
st = os.stat(path)
os.chmod(path, st.st_mode | stat.S_IEXEC)
def call_callback(callback, *args, **kwargs):
# try:
callback(*args, **kwargs)
# except:
# logger.exception("Failed calling callback %r with args %r and kwargs %r" % (callback, args, kwargs))
def donation():
dlg = commonMessageDialogs.donation()
if dlg == widgetUtils.YES:
webbrowser.open_new_tab("http://twblue.es/?q=donate")
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Pad on tasks',
'version': '1.0',
'category': 'Project Management',
'description': """
This module adds a PAD in all project kanban views.
===================================================
""",
'author': 'OpenERP SA',
'website': 'https://www.tunierp.com/page/project-management',
'depends': ['project', 'pad'],
'data': ['project_task.xml'],
'demo': [],
'installable': True,
'auto_install': True,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
# coding: utf-8
import os
PROJECT_DIR = os.path.dirname(__file__)
DATASET_COMPLETO = os.path.join(PROJECT_DIR, 'dataset/completo/youtube_comments_20120117.csv')
DATASET_TREINO = os.path.join(PROJECT_DIR, 'dataset/treino/')
DATASET_PREPARADO = os.path.join(PROJECT_DIR, 'dataset/preparado/')
HOST_MONITOR = 'localhost'
HOST_PORTA = 8124
LOGGING = {
'version': 1,
'disable_existing_loggers': False, # this fixes the problem
'formatters': {
'standard': {
'format': '%(asctime)s [%(levelname)s] %(message)s'
},
},
'handlers': {
'default': {
'level': 'NOTSET',
'formatter': 'standard',
'class': 'logging.StreamHandler',
},
'arquivo': {
'level': 'INFO',
'formatter': 'standard',
'class': 'logging.FileHandler',
'filename': "logfile.log",
},
},
'root': {
'handlers': ['default', 'arquivo'],
'level': 'NOTSET',
'propagate': True
}
}
|
##
## Parse plotting configuration files for sashimi_plot
##
import sys
import os
import ast
import ConfigParser
import misopy
import misopy.miso_utils as miso_utils
def get_default_settings():
"""
Return default settings for sashimi_plot.
"""
settings = {"intron_scale": 30,
"exon_scale": 1,
"logged": False,
"ymax": None,
"show_posteriors": True,
"number_junctions": True,
"posterior_bins": 40,
"gene_posterior_ratio": 5,
"resolution": .5,
"fig_width": 8.5,
"fig_height": 11,
"bar_posteriors": False,
"junction_log_base": 10.,
"reverse_minus": False,
"bf_dist_bins": 20,
"font_size": 6,
"insert_len_bins": 25,
"bf_thresholds": [0, 1, 2, 5, 10, 20],
"nyticks": 3,
"nxticks": 4,
"show_ylabel": True,
"show_xlabel": True,
"sans_serif": False,
"bar_color": "k"}
return settings
def parse_plot_settings(settings_filename, event=None, chrom=None,
# Float parameters
FLOAT_PARAMS=["intron_scale",
"exon_scale",
"ymax",
"resolution",
"fig_width",
"fig_height",
"font_size",
"junction_log_base"],
# Integer parameters
INT_PARAMS=["posterior_bins",
"gene_posterior_ratio",
"insert_len_bins",
"nyticks",
"nxticks"],
# Boolean parameters
BOOL_PARAMS=["logged",
"show_posteriors",
"number_junctions",
"reverse_minus",
"bar_posteriors",
"show_ylabel",
"show_xlabel",
"sans_serif"],
# Parameters to be interpreted as Python lists or
# data structures
DATA_PARAMS=["miso_files",
"bam_files",
"bf_thresholds",
"bar_color",
"sample_labels",
"additional_tracks"],
no_posteriors=False):
"""
Populate a settings dictionary with the plotting parameters, parsed
as the right datatype.
"""
settings = get_default_settings()
config = ConfigParser.ConfigParser()
print "Reading settings from: %s" %(settings_filename)
config.read(settings_filename)
for section in config.sections():
for option in config.options(section):
print "Parsing %s:%s" %(section, option)
if option in FLOAT_PARAMS:
settings[option] = config.getfloat(section, option)
elif option in INT_PARAMS:
settings[option] = config.getint(section, option)
elif option in BOOL_PARAMS:
settings[option] = config.getboolean(section, option)
elif option in DATA_PARAMS:
settings[option] = ast.literal_eval(config.get(section,
option))
else:
settings[option] = config.get(section, option)
# Ensure that bf_thresholds are integers
settings["bf_thresholds"] = [int(t) for t in settings["bf_thresholds"]]
if "colors" in settings:
colors = ast.literal_eval(settings["colors"])
else:
colors = [None for x in settings["bam_files"]]
settings["colors"] = colors
if "bam_prefix" in settings:
bam_files = [os.path.join(settings["bam_prefix"], x) \
for x in settings["bam_files"]]
else:
bam_files = settings["bam_files"]
settings["bam_files"] = bam_files
# Make the sample labels be the BAM files by default
if "sample_labels" not in settings:
settings["sample_labels"] = [os.path.basename(bfile) \
for bfile in settings["bam_files"]]
num_labels = len(settings["sample_labels"])
num_bams = len(settings["bam_files"])
num_colors = len(settings["colors"])
if not (num_labels == num_bams == num_colors):
print "Error: Must provide sample label and color for each entry in bam_files!"
print " - Provided %d labels, %d BAMs, %d colors" \
%(num_labels, num_bams, num_colors)
sys.exit(1)
if no_posteriors:
settings["show_posteriors"] = False
if ("miso_prefix" in settings) and (event != None and chrom != None) \
and settings["show_posteriors"]:
miso_files = miso_utils.get_miso_output_files(event, chrom, settings)
elif "miso_files" in settings:
miso_files = settings["miso_files"]
else:
miso_files = []
settings["miso_files"] = miso_files
if "coverages" in settings:
coverages = ast.literal_eval(settings["coverages"])
coverages = map(float, coverages)
# Normalize coverages per M
coverages = [x / 1e6 for x in coverages]
else:
coverages = [1 for x in settings["bam_files"]]
settings["coverages"] = coverages
if len(settings["coverages"]) != len(settings["sample_labels"]):
print "Error: Must provide a coverage value for each sample or leave coverages unset."
sys.exit(1)
return settings
|
# -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
A luigi file system client that wraps around snakebite
Originally written by Alan Brenner <alan@magnetic.com> github.com/alanbbr
"""
from luigi.contrib.hdfs import config as hdfs_config
from luigi.contrib.hdfs import error as hdfs_error
from luigi.contrib.hdfs import hadoopcli_clients as hdfs_hadoopcli_clients
from luigi import six
import luigi.contrib.target
import logging
import datetime
import os
logger = logging.getLogger('luigi-interface')
class SnakebiteHdfsClient(hdfs_hadoopcli_clients.HdfsClient):
"""
A hdfs client using snakebite. Since Snakebite has a python API, it'll be
about 100 times faster than the hadoop cli client, which does shell out to
a java program on each file system operation.
"""
def __init__(self):
super(SnakebiteHdfsClient, self).__init__()
self._bite = None
self.pid = -1
@staticmethod
def list_path(path):
if isinstance(path, list) or isinstance(path, tuple):
return path
if isinstance(path, str) or isinstance(path, unicode):
return [path, ]
return [str(path), ]
def get_bite(self):
"""
If Luigi has forked, we have a different PID, and need to reconnect.
"""
config = hdfs_config.hdfs()
if self.pid != os.getpid() or not self._bite:
client_kwargs = dict(filter(
lambda k_v: k_v[1] is not None and k_v[1] != '', six.iteritems({
'hadoop_version': config.client_version,
'effective_user': config.effective_user,
})
))
if config.snakebite_autoconfig:
"""
This is fully backwards compatible with the vanilla Client and can be used for a non HA cluster as well.
This client tries to read ``${HADOOP_PATH}/conf/hdfs-site.xml`` to get the address of the namenode.
The behaviour is the same as Client.
"""
from snakebite.client import AutoConfigClient
self._bite = AutoConfigClient(**client_kwargs)
else:
from snakebite.client import Client
self._bite = Client(config.namenode_host, config.namenode_port, **client_kwargs)
return self._bite
def exists(self, path):
"""
Use snakebite.test to check file existence.
:param path: path to test
:type path: string
:return: boolean, True if path exists in HDFS
"""
try:
return self.get_bite().test(path, exists=True)
except Exception as err: # IGNORE:broad-except
raise hdfs_error.HDFSCliError("snakebite.test", -1, str(err), repr(err))
def rename(self, path, dest):
"""
Use snakebite.rename, if available.
:param path: source file(s)
:type path: either a string or sequence of strings
:param dest: destination file (single input) or directory (multiple)
:type dest: string
:return: list of renamed items
"""
parts = dest.rstrip('/').split('/')
if len(parts) > 1:
dir_path = '/'.join(parts[0:-1])
if not self.exists(dir_path):
self.mkdir(dir_path, parents=True)
return list(self.get_bite().rename(self.list_path(path), dest))
def rename_dont_move(self, path, dest):
"""
Use snakebite.rename_dont_move, if available.
:param path: source path (single input)
:type path: string
:param dest: destination path
:type dest: string
:return: True if succeeded
:raises: snakebite.errors.FileAlreadyExistsException
"""
from snakebite.errors import FileAlreadyExistsException
try:
self.get_bite().rename2(path, dest, overwriteDest=False)
return True
except FileAlreadyExistsException:
return False
def remove(self, path, recursive=True, skip_trash=False):
"""
Use snakebite.delete, if available.
:param path: delete-able file(s) or directory(ies)
:type path: either a string or a sequence of strings
:param recursive: delete directories trees like \*nix: rm -r
:type recursive: boolean, default is True
:param skip_trash: do or don't move deleted items into the trash first
:type skip_trash: boolean, default is False (use trash)
:return: list of deleted items
"""
return list(self.get_bite().delete(self.list_path(path), recurse=recursive))
def chmod(self, path, permissions, recursive=False):
"""
Use snakebite.chmod, if available.
:param path: update-able file(s)
:type path: either a string or sequence of strings
:param permissions: \*nix style permission number
:type permissions: octal
:param recursive: change just listed entry(ies) or all in directories
:type recursive: boolean, default is False
:return: list of all changed items
"""
if type(permissions) == str:
permissions = int(permissions, 8)
return list(self.get_bite().chmod(self.list_path(path),
permissions, recursive))
def chown(self, path, owner, group, recursive=False):
"""
Use snakebite.chown/chgrp, if available.
One of owner or group must be set. Just setting group calls chgrp.
:param path: update-able file(s)
:type path: either a string or sequence of strings
:param owner: new owner, can be blank
:type owner: string
:param group: new group, can be blank
:type group: string
:param recursive: change just listed entry(ies) or all in directories
:type recursive: boolean, default is False
:return: list of all changed items
"""
bite = self.get_bite()
if owner:
if group:
return all(bite.chown(self.list_path(path), "%s:%s" % (owner, group),
recurse=recursive))
return all(bite.chown(self.list_path(path), owner, recurse=recursive))
return list(bite.chgrp(self.list_path(path), group, recurse=recursive))
def count(self, path):
"""
Use snakebite.count, if available.
:param path: directory to count the contents of
:type path: string
:return: dictionary with content_size, dir_count and file_count keys
"""
try:
res = self.get_bite().count(self.list_path(path)).next()
dir_count = res['directoryCount']
file_count = res['fileCount']
content_size = res['spaceConsumed']
except StopIteration:
dir_count = file_count = content_size = 0
return {'content_size': content_size, 'dir_count': dir_count,
'file_count': file_count}
def get(self, path, local_destination):
"""
Use snakebite.copyToLocal, if available.
:param path: HDFS file
:type path: string
:param local_destination: path on the system running Luigi
:type local_destination: string
"""
return list(self.get_bite().copyToLocal(self.list_path(path),
local_destination))
def mkdir(self, path, parents=True, mode=0o755, raise_if_exists=False):
"""
Use snakebite.mkdir, if available.
Snakebite's mkdir method allows control over full path creation, so by
default, tell it to build a full path to work like ``hadoop fs -mkdir``.
:param path: HDFS path to create
:type path: string
:param parents: create any missing parent directories
:type parents: boolean, default is True
:param mode: \*nix style owner/group/other permissions
:type mode: octal, default 0755
"""
result = list(self.get_bite().mkdir(self.list_path(path),
create_parent=parents, mode=mode))
if raise_if_exists and "ile exists" in result[0].get('error', ''):
raise luigi.target.FileAlreadyExists("%s exists" % (path, ))
return result
def listdir(self, path, ignore_directories=False, ignore_files=False,
include_size=False, include_type=False, include_time=False,
recursive=False):
"""
Use snakebite.ls to get the list of items in a directory.
:param path: the directory to list
:type path: string
:param ignore_directories: if True, do not yield directory entries
:type ignore_directories: boolean, default is False
:param ignore_files: if True, do not yield file entries
:type ignore_files: boolean, default is False
:param include_size: include the size in bytes of the current item
:type include_size: boolean, default is False (do not include)
:param include_type: include the type (d or f) of the current item
:type include_type: boolean, default is False (do not include)
:param include_time: include the last modification time of the current item
:type include_time: boolean, default is False (do not include)
:param recursive: list subdirectory contents
:type recursive: boolean, default is False (do not recurse)
:return: yield with a string, or if any of the include_* settings are
true, a tuple starting with the path, and include_* items in order
"""
bite = self.get_bite()
for entry in bite.ls(self.list_path(path), recurse=recursive):
if ignore_directories and entry['file_type'] == 'd':
continue
if ignore_files and entry['file_type'] == 'f':
continue
rval = [entry['path'], ]
if include_size:
rval.append(entry['length'])
if include_type:
rval.append(entry['file_type'])
if include_time:
rval.append(datetime.datetime.fromtimestamp(entry['modification_time'] / 1000))
if len(rval) > 1:
yield tuple(rval)
else:
yield rval[0]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
import os
import sys
def warn(msg):
print '[powerline-bash] ', msg
class Powerline:
symbols = {
'compatible': {
'lock': 'RO',
'network': 'SSH',
'separator': u'\u25B6',
'separator_thin': u'\u276F'
},
'patched': {
'lock': u'\uE0A2',
'network': u'\uE0A2',
'separator': u'\uE0B0',
'separator_thin': u'\uE0B1'
},
'flat': {
'lock': '',
'network': '',
'separator': '',
'separator_thin': ''
},
}
color_templates = {
'bash': '\\[\\e%s\\]',
'zsh': '%%{%s%%}',
'bare': '%s',
}
def __init__(self, args, cwd):
self.args = args
self.cwd = cwd
mode, shell = args.mode, args.shell
self.color_template = self.color_templates[shell]
self.reset = self.color_template % '[0m'
self.lock = Powerline.symbols[mode]['lock']
self.network = Powerline.symbols[mode]['network']
self.separator = Powerline.symbols[mode]['separator']
self.separator_thin = Powerline.symbols[mode]['separator_thin']
self.segments = []
def color(self, prefix, code):
return self.color_template % ('[%s;5;%sm' % (prefix, code))
def fgcolor(self, code):
return self.color('38', code)
def bgcolor(self, code):
return self.color('48', code)
def append(self, content, fg, bg, separator=None, separator_fg=None):
self.segments.append((content, fg, bg, separator or self.separator,
separator_fg or bg))
def draw(self):
return (''.join(self.draw_segment(i) for i in range(len(self.segments)))
+ self.reset).encode('utf-8')
def draw_segment(self, idx):
segment = self.segments[idx]
next_segment = self.segments[idx + 1] if idx < len(self.segments)-1 else None
return ''.join((
self.fgcolor(segment[1]),
self.bgcolor(segment[2]),
segment[0],
self.bgcolor(next_segment[2]) if next_segment else self.reset,
self.fgcolor(segment[4]),
segment[3]))
def get_valid_cwd():
""" We check if the current working directory is valid or not. Typically
happens when you checkout a different branch on git that doesn't have
this directory.
We return the original cwd because the shell still considers that to be
the working directory, so returning our guess will confuse people
"""
try:
cwd = os.getcwd()
except:
cwd = os.getenv('PWD') # This is where the OS thinks we are
parts = cwd.split(os.sep)
up = cwd
while parts and not os.path.exists(up):
parts.pop()
up = os.sep.join(parts)
try:
os.chdir(up)
except:
warn("Your current directory is invalid.")
sys.exit(1)
warn("Your current directory is invalid. Lowest valid directory: " + up)
return cwd
if __name__ == "__main__":
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument('--cwd-only', action='store_true',
help='Only show the current directory')
arg_parser.add_argument('--cwd-max-depth', action='store', type=int,
default=5, help='Maximum number of directories to show in path')
arg_parser.add_argument('--colorize-hostname', action='store_true',
help='Colorize the hostname based on a hash of itself.')
arg_parser.add_argument('--mode', action='store', default='patched',
help='The characters used to make separators between segments',
choices=['patched', 'compatible', 'flat'])
arg_parser.add_argument('--shell', action='store', default='bash',
help='Set this to your shell type', choices=['bash', 'zsh', 'bare'])
arg_parser.add_argument('prev_error', nargs='?', type=int, default=0,
help='Error code returned by the last command')
args = arg_parser.parse_args()
powerline = Powerline(args, get_valid_cwd())
class DefaultColor:
"""
This class should have the default colors for every segment.
Please test every new segment with this theme first.
"""
USERNAME_FG = 250
USERNAME_BG = 240
USERNAME_ROOT_BG = 124
HOSTNAME_FG = 250
HOSTNAME_BG = 238
HOME_SPECIAL_DISPLAY = True
HOME_BG = 31 # blueish
HOME_FG = 15 # white
PATH_BG = 237 # dark grey
PATH_FG = 250 # light grey
CWD_FG = 254 # nearly-white grey
SEPARATOR_FG = 244
READONLY_BG = 124
READONLY_FG = 254
SSH_BG = 166 # medium orange
SSH_FG = 254
REPO_CLEAN_BG = 148 # a light green color
REPO_CLEAN_FG = 0 # black
REPO_DIRTY_BG = 161 # pink/red
REPO_DIRTY_FG = 15 # white
JOBS_FG = 39
JOBS_BG = 238
CMD_PASSED_BG = 236
CMD_PASSED_FG = 15
CMD_FAILED_BG = 161
CMD_FAILED_FG = 15
SVN_CHANGES_BG = 148
SVN_CHANGES_FG = 22 # dark green
VIRTUAL_ENV_BG = 35 # a mid-tone green
VIRTUAL_ENV_FG = 00
class Color(DefaultColor):
"""
This subclass is required when the user chooses to use 'default' theme.
Because the segments require a 'Color' class for every theme.
"""
pass
class DefaultColor:
"""
This class should have the default colors for every segment.
Please test every new segment with this theme first.
"""
USERNAME_FG = 250
USERNAME_BG = 240
USERNAME_ROOT_BG = 124
HOSTNAME_FG = 250
HOSTNAME_BG = 238
HOME_SPECIAL_DISPLAY = True
HOME_BG = 31 # blueish
HOME_FG = 15 # white
PATH_BG = 237 # dark grey
PATH_FG = 250 # light grey
CWD_FG = 254 # nearly-white grey
SEPARATOR_FG = 244
READONLY_BG = 124
READONLY_FG = 254
SSH_BG = 166 # medium orange
SSH_FG = 254
REPO_CLEAN_BG = 148 # a light green color
REPO_CLEAN_FG = 0 # black
REPO_DIRTY_BG = 161 # pink/red
REPO_DIRTY_FG = 15 # white
JOBS_FG = 39
JOBS_BG = 238
CMD_PASSED_BG = 236
CMD_PASSED_FG = 15
CMD_FAILED_BG = 161
CMD_FAILED_FG = 15
SVN_CHANGES_BG = 148
SVN_CHANGES_FG = 22 # dark green
VIRTUAL_ENV_BG = 35 # a mid-tone green
VIRTUAL_ENV_FG = 00
class Color(DefaultColor):
"""
This subclass is required when the user chooses to use 'default' theme.
Because the segments require a 'Color' class for every theme.
"""
pass
import os
def add_virtual_env_segment():
env = os.getenv('VIRTUAL_ENV')
if env is None:
return
env_name = os.path.basename(env)
bg = Color.VIRTUAL_ENV_BG
fg = Color.VIRTUAL_ENV_FG
powerline.append(' %s ' % env_name, fg, bg)
add_virtual_env_segment()
def add_username_segment():
import os
if powerline.args.shell == 'bash':
user_prompt = ' \\u '
elif powerline.args.shell == 'zsh':
user_prompt = ' %n '
else:
user_prompt = ' %s ' % os.getenv('USER')
if os.getenv('USER') == 'root':
bgcolor = Color.USERNAME_ROOT_BG
else:
bgcolor = Color.USERNAME_BG
powerline.append(user_prompt, Color.USERNAME_FG, bgcolor)
add_username_segment()
def add_hostname_segment():
if powerline.args.colorize_hostname:
from lib.color_compliment import stringToHashToColorAndOpposite
from lib.colortrans import rgb2short
from socket import gethostname
hostname = gethostname()
FG, BG = stringToHashToColorAndOpposite(hostname)
FG, BG = (rgb2short(*color) for color in [FG, BG])
host_prompt = ' %s' % hostname.split('.')[0]
powerline.append(host_prompt, FG, BG)
else:
if powerline.args.shell == 'bash':
host_prompt = ' \\h '
elif powerline.args.shell == 'zsh':
host_prompt = ' %m '
else:
import socket
host_prompt = ' %s ' % socket.gethostname().split('.')[0]
powerline.append(host_prompt, Color.HOSTNAME_FG, Color.HOSTNAME_BG)
add_hostname_segment()
import os
def add_ssh_segment():
if os.getenv('SSH_CLIENT'):
powerline.append(' %s ' % powerline.network, Color.SSH_FG, Color.SSH_BG)
add_ssh_segment()
import os
def get_short_path(cwd):
home = os.getenv('HOME')
names = cwd.split(os.sep)
if names[0] == '': names = names[1:]
path = ''
for i in range(len(names)):
path += os.sep + names[i]
if os.path.samefile(path, home):
return ['~'] + names[i+1:]
if not names[0]:
return ['/']
return names
def add_cwd_segment():
cwd = powerline.cwd or os.getenv('PWD')
names = get_short_path(cwd.decode('utf-8'))
max_depth = powerline.args.cwd_max_depth
if len(names) > max_depth:
names = names[:2] + [u'\u2026'] + names[2 - max_depth:]
if not powerline.args.cwd_only:
for n in names[:-1]:
if n == '~' and Color.HOME_SPECIAL_DISPLAY:
powerline.append(' %s ' % n, Color.HOME_FG, Color.HOME_BG)
else:
powerline.append(' %s ' % n, Color.PATH_FG, Color.PATH_BG,
powerline.separator_thin, Color.SEPARATOR_FG)
if names[-1] == '~' and Color.HOME_SPECIAL_DISPLAY:
powerline.append(' %s ' % names[-1], Color.HOME_FG, Color.HOME_BG)
else:
powerline.append(' %s ' % names[-1], Color.CWD_FG, Color.PATH_BG)
add_cwd_segment()
import os
def add_read_only_segment():
cwd = powerline.cwd or os.getenv('PWD')
if not os.access(cwd, os.W_OK):
powerline.append(' %s ' % powerline.lock, Color.READONLY_FG, Color.READONLY_BG)
add_read_only_segment()
import re
import subprocess
def get_git_status():
has_pending_commits = True
has_untracked_files = False
origin_position = ""
output = subprocess.Popen(['git', 'status', '--ignore-submodules'],
env={"LANG": "C", "HOME": os.getenv("HOME")}, stdout=subprocess.PIPE).communicate()[0]
for line in output.split('\n'):
origin_status = re.findall(
r"Your branch is (ahead|behind).*?(\d+) comm", line)
if origin_status:
origin_position = " %d" % int(origin_status[0][1])
if origin_status[0][0] == 'behind':
origin_position += u'\u21E3'
if origin_status[0][0] == 'ahead':
origin_position += u'\u21E1'
if line.find('nothing to commit') >= 0:
has_pending_commits = False
if line.find('Untracked files') >= 0:
has_untracked_files = True
return has_pending_commits, has_untracked_files, origin_position
def add_git_segment():
# See http://git-blame.blogspot.com/2013/06/checking-current-branch-programatically.html
p = subprocess.Popen(['git', 'symbolic-ref', '-q', 'HEAD'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
if 'Not a git repo' in err:
return
if out:
branch = out[len('refs/heads/'):].rstrip()
else:
branch = '(Detached)'
has_pending_commits, has_untracked_files, origin_position = get_git_status()
branch += origin_position
if has_untracked_files:
branch += ' +'
bg = Color.REPO_CLEAN_BG
fg = Color.REPO_CLEAN_FG
if has_pending_commits:
bg = Color.REPO_DIRTY_BG
fg = Color.REPO_DIRTY_FG
powerline.append(' %s ' % branch, fg, bg)
try:
add_git_segment()
except OSError:
pass
except subprocess.CalledProcessError:
pass
import os
import subprocess
def get_hg_status():
has_modified_files = False
has_untracked_files = False
has_missing_files = False
output = subprocess.Popen(['hg', 'status'],
stdout=subprocess.PIPE).communicate()[0]
for line in output.split('\n'):
if line == '':
continue
elif line[0] == '?':
has_untracked_files = True
elif line[0] == '!':
has_missing_files = True
else:
has_modified_files = True
return has_modified_files, has_untracked_files, has_missing_files
def add_hg_segment():
branch = os.popen('hg branch 2> /dev/null').read().rstrip()
if len(branch) == 0:
return False
bg = Color.REPO_CLEAN_BG
fg = Color.REPO_CLEAN_FG
has_modified_files, has_untracked_files, has_missing_files = get_hg_status()
if has_modified_files or has_untracked_files or has_missing_files:
bg = Color.REPO_DIRTY_BG
fg = Color.REPO_DIRTY_FG
extra = ''
if has_untracked_files:
extra += '+'
if has_missing_files:
extra += '!'
branch += (' ' + extra if extra != '' else '')
return powerline.append(' %s ' % branch, fg, bg)
add_hg_segment()
import subprocess
def add_svn_segment():
is_svn = subprocess.Popen(['svn', 'status'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
is_svn_output = is_svn.communicate()[1].strip()
if len(is_svn_output) != 0:
return
#"svn status | grep -c "^[ACDIMRX\\!\\~]"
p1 = subprocess.Popen(['svn', 'status'], stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
p2 = subprocess.Popen(['grep', '-c', '^[ACDIMR\\!\\~]'],
stdin=p1.stdout, stdout=subprocess.PIPE)
output = p2.communicate()[0].strip()
if len(output) > 0 and int(output) > 0:
changes = output.strip()
powerline.append(' %s ' % changes, Color.SVN_CHANGES_FG, Color.SVN_CHANGES_BG)
try:
add_svn_segment()
except OSError:
pass
except subprocess.CalledProcessError:
pass
import os
import subprocess
def get_fossil_status():
has_modified_files = False
has_untracked_files = False
has_missing_files = False
output = os.popen('fossil changes 2>/dev/null').read().strip()
has_untracked_files = True if os.popen("fossil extras 2>/dev/null").read().strip() else False
has_missing_files = 'MISSING' in output
has_modified_files = 'EDITED' in output
return has_modified_files, has_untracked_files, has_missing_files
def add_fossil_segment():
subprocess.Popen(['fossil'], stdout=subprocess.PIPE).communicate()[0]
branch = ''.join([i.replace('*','').strip() for i in os.popen("fossil branch 2> /dev/null").read().strip().split("\n") if i.startswith('*')])
if len(branch) == 0:
return
bg = Color.REPO_CLEAN_BG
fg = Color.REPO_CLEAN_FG
has_modified_files, has_untracked_files, has_missing_files = get_fossil_status()
if has_modified_files or has_untracked_files or has_missing_files:
bg = Color.REPO_DIRTY_BG
fg = Color.REPO_DIRTY_FG
extra = ''
if has_untracked_files:
extra += '+'
if has_missing_files:
extra += '!'
branch += (' ' + extra if extra != '' else '')
powerline.append(' %s ' % branch, fg, bg)
try:
add_fossil_segment()
except OSError:
pass
except subprocess.CalledProcessError:
pass
import os
import re
import subprocess
def add_jobs_segment():
pppid = subprocess.Popen(['ps', '-p', str(os.getppid()), '-oppid='], stdout=subprocess.PIPE).communicate()[0].strip()
output = subprocess.Popen(['ps', '-a', '-o', 'ppid'], stdout=subprocess.PIPE).communicate()[0]
num_jobs = len(re.findall(str(pppid), output)) - 1
if num_jobs > 0:
powerline.append(' %d ' % num_jobs, Color.JOBS_FG, Color.JOBS_BG)
add_jobs_segment()
def add_root_indicator_segment():
root_indicators = {
'bash': ' \\$ ',
'zsh': ' \\$ ',
'bare': ' $ ',
}
bg = Color.CMD_PASSED_BG
fg = Color.CMD_PASSED_FG
if powerline.args.prev_error != 0:
fg = Color.CMD_FAILED_FG
bg = Color.CMD_FAILED_BG
powerline.append(root_indicators[powerline.args.shell], fg, bg)
add_root_indicator_segment()
sys.stdout.write(powerline.draw())
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Forsteri documentation build configuration file, created by
# sphinx-quickstart on Fri Apr 24 16:09:15 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
import sphinx_rtd_theme
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Forsteri'
copyright = '2015, Andrew C. Hawkins'
author = 'Andrew C. Hawkins'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.0.1'
# The full version, including alpha/beta/rc tags.
release = '0.0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#html_theme = 'alabaster'
html_theme = "sphinx_rtd_theme"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = False
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
html_show_sphinx = False
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'Forsteridoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'Forsteri.tex', 'Forsteri Documentation',
'Andrew Hawkins', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'forsteri', 'Forsteri Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Forsteri', 'Forsteri Documentation',
author, 'Forsteri', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-03-26 16:11
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('description', models.CharField(max_length=1000)),
('image', models.ImageField(null=True, upload_to='')),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('likes', models.ManyToManyField(related_name='likes', to=settings.AUTH_USER_MODEL)),
('shares', models.ManyToManyField(related_name='shares', to=settings.AUTH_USER_MODEL)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='PostComments',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('comment', models.CharField(max_length=250)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('commenting_user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
('post', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='posts.Post')),
],
),
]
|
#!/usr/bin/env python3
"""
Detecting frequently repeating runs in a source file. Useful as some pairings
or triplets of letters are well known to occur frequently in English.
"""
################################################################################
from input_handling import read_file
from collections import Counter
def get_runs(source, length):
"""
Return a genexpr of runs in a source string, using simple slicing.
"""
return (source[i:i + length] for i in range(len(source) - length + 1))
def run_chart(source, length, maxdisplay, width):
"""
Generate a more visual chart of runs.
"""
count = Counter(get_runs(source, length))
total = sum(count.values())
# unpack the highest frequency in the chart
(_, most), = count.most_common(1)
# determine various padding values
longest_run = max(len("{!r}".format(run)) for run in count)
longest_i = max(len("{}".format(freq)) for freq in count.values())
# various padding referred to
return "\n".join("run {!r:{l}} {:{il}} times ({:6.2%}) {}"
.format(
run, freq, freq / total,
"-" * int(width * freq / most),
# provide padding values
l=longest_run, il=longest_i)
for run, freq in count.most_common(maxdisplay))
# if called directly, show some runs
if __name__ == "__main__":
source = read_file()
print("The 10 most frequent 3-runs are:\n{}\n"
.format(run_chart(source, 3, 10, 50)))
|
import os
import argparse
import logging
import logging.handlers
import urllib.request, urllib.parse, urllib.error
import requests
import datetime
import json
import re
import time
LOG_NAME = "logagg"
APP_LOG = "app"
PAGE_SIZE = 10000 #Number of records to reetrieve per request
DEFAULT_CORE = "event_core" #name of the solr core to query
MAX_LOGFILE_SIZE = 1073741824 #1GB
#========================
#==== Client implementing an iterator for paging over Solr results
SOLR_RESERVED_CHAR_LIST = [
'+', '-', '&', '|', '!', '(', ')', '{', '}', '[', ']', '^', '"', '~', '*',
'?', ':'
]
def escapeSolrQueryTerm(term):
term = term.replace('\\', '\\\\')
for c in SOLR_RESERVED_CHAR_LIST:
term = term.replace(c, '\{}'.format(c))
return term
class SolrClient(object):
def __init__(self, base_url, core_name, select="/"):
self.base_url = base_url
self.core_name = core_name
self._select = select
self.logger = logging.getLogger(APP_LOG)
self.client = requests.Session()
def doGet(self, params):
params['wt'] = 'json'
url = self.base_url + "/" + self.core_name + self._select
response = self.client.get(url, params=params)
data = json.loads(response.text)
return data
def getFieldValues(self, name,
q='*:*',
fq=None,
maxvalues=-1,
sort=True,
**query_args):
"""Retrieve the unique values for a field, along with their usage counts.
:param sort: Sort the result
:param name: Name of field for which to retrieve values
:type name: string
:param q: Query identifying the records from which values will be retrieved
:type q: string
:param fq: Filter query restricting operation of query
:type fq: string
:param maxvalues: Maximum number of values to retrieve. Default is -1,
which causes retrieval of all values.
:type maxvalues: int
:returns: dict of {fieldname: [[value, count], ... ], }
"""
params = {
'q': q,
'rows': '0',
'facet': 'true',
'facet.field': name,
'facet.limit': str(maxvalues),
'facet.zeros': 'false',
'facet.sort': str(sort).lower(),
'fq': fq,
}
resp_dict = self.doGet(params)
result_dict = resp_dict['facet_counts']['facet_fields']
result_dict['numFound'] = resp_dict['response']['numFound']
return result_dict
class SolrSearchResponseIterator(SolrClient):
"""Performs a search against a Solr index and acts as an iterator to retrieve
all the values."""
def __init__(self, select_url, q, fq=None, fields='*', page_size=PAGE_SIZE, max_records=None, sort=None, **query_args):
super(SolrSearchResponseIterator, self).__init__()
self.select_utl = select_url
self.q = q
self.fq = fq
self.fields = fields
self.query_args = query_args
if max_records is None:
max_records = 9999999999
self.max_records = max_records
self.sort = sort
self.c_record = 0
self.page_size = page_size
self.res = None
self.done = False
self._next_page(self.c_record)
self._num_hits = 0
if self.res['response']['numFound'] > 1000:
self.logger.warn("Retrieving %d records...", self.res['response']['numFound'])
def _next_page(self, offset):
"""Retrieves the next set of results from the service."""
self.logger.debug("Iterator c_record=%d", self.c_record)
start_time = time.time()
page_size = self.page_size
if (offset + page_size) > self.max_records:
page_size = self.max_records - offset
query_dict = {
'q': self.q,
'start': str(offset),
'rows': str(page_size),
'fl': self.fields,
'wt': 'json',
}
if self.fq is not None:
query_dict['fq'] = self.fq
if self.sort is not None:
query_dict['sort'] = self.sort
params = urllib.parse.urlencode(query_dict) #, quote_via=urllib.parse.quote)
self.logger.debug("request params = %s", str(params))
response = self.client.get(self.select_utl, params=params)
self.res = json.loads(response.text)
self._num_hits = int(self.res['response']['numFound'])
end_time = time.time()
self.logger.debug("Page loaded in %.4f seconds.", end_time - start_time)
def __iter__(self):
return self
def process_row(self, row):
"""Override this method in derived classes to reformat the row response."""
return row
def __next__(self):
if self.done:
raise StopIteration()
if self.c_record > self.max_records:
self.done = True
raise StopIteration()
idx = self.c_record - self.res['response']['start']
try:
row = self.res['response']['docs'][idx]
except IndexError:
self._next_page(self.c_record)
idx = self.c_record - self.res['response']['start']
try:
row = self.res['response']['docs'][idx]
except IndexError:
self.done = True
raise StopIteration()
self.c_record = self.c_record + 1
return row
|
"""
Utility methods related to course
"""
import logging
import six
from django.conf import settings
from django.utils.timezone import now
from openedx.core.djangoapps.site_configuration import helpers as configuration_helpers
log = logging.getLogger(__name__)
COURSE_SHARING_UTM_PARAMETERS = {
'facebook': {
'utm_medium': 'social',
'utm_campaign': 'social-sharing-db',
'utm_source': 'facebook',
},
'twitter': {
'utm_medium': 'social',
'utm_campaign': 'social-sharing-db',
'utm_source': 'twitter',
},
}
def get_encoded_course_sharing_utm_params():
"""
Returns encoded Course Sharing UTM Parameters.
"""
return {
utm_source: six.moves.urllib.parse.urlencode(utm_params)
for utm_source, utm_params in six.iteritems(COURSE_SHARING_UTM_PARAMETERS)
}
def get_link_for_about_page(course):
"""
Arguments:
course: This can be either a course overview object or a course descriptor.
Returns the course sharing url, this can be one of course's social sharing url, marketing url, or
lms course about url.
"""
is_social_sharing_enabled = configuration_helpers.get_value(
'SOCIAL_SHARING_SETTINGS',
getattr(settings, 'SOCIAL_SHARING_SETTINGS', {})
).get('CUSTOM_COURSE_URLS')
if is_social_sharing_enabled and course.social_sharing_url:
course_about_url = course.social_sharing_url
elif settings.FEATURES.get('ENABLE_MKTG_SITE') and getattr(course, 'marketing_url', None):
course_about_url = course.marketing_url
else:
about_base = configuration_helpers.get_value_for_org(
course.id.org,
'LMS_ROOT_URL',
settings.LMS_ROOT_URL
)
course_about_url = u'{about_base_url}/courses/{course_key}/about'.format(
about_base_url=about_base,
course_key=six.text_type(course.id),
)
return course_about_url
def has_certificates_enabled(course):
"""
Arguments:
course: This can be either a course overview object or a course descriptor.
Returns a boolean if the course has enabled certificates
"""
if not settings.FEATURES.get('CERTIFICATES_HTML_VIEW', False):
return False
return course.cert_html_view_enabled
def should_display_grade(course_overview):
"""
Returns True or False depending upon either certificate available date
or course-end-date
"""
course_end_date = course_overview.end_date
cert_available_date = course_overview.certificate_available_date
current_date = now().replace(hour=0, minute=0, second=0, microsecond=0)
if cert_available_date:
return cert_available_date < current_date
return course_end_date and course_end_date < current_date
|
#!/usr/bin/env python
import json, os, sys
import common
def main(argv):
# Command Line Arguments
args = [{'arg':'--apigateway', 'dest':'apigateway', 'default':None, 'type':'boolean', 'help':'Call API Gateway endpoint'}]
if '--apigateway' in argv:
args.extend([{'arg':'--stack', 'dest':'stack', 'default':None, 'help':'CloudFormation stack name'}])
else:
args.extend([{'arg':'--host', 'dest':'host', 'default':'localhost:2000', 'help':'Blox Scheduler <Host>:<Port>'}])
args.extend([{'arg':'--environment', 'dest':'environment', 'default':None, 'help':'Blox environment name'}])
args.extend([{'arg':'--cluster', 'dest':'cluster', 'default':None, 'help':'ECS cluster name'}])
args.extend([{'arg':'--task-definition', 'dest':'taskDef', 'default':None, 'help':'ECS task definition arn'}])
# Parse Command Line Arguments
params = common.parse_cli_args('Create Blox Environment', args)
if params.apigateway:
run_apigateway(params)
else:
run_local(params)
# Call Blox Scheduler API Gateway Endpoint
def run_apigateway(params):
command = ["cloudformation", "describe-stack-resource", "--stack-name", params.stack, "--logical-resource-id", "RestApi"]
restApi = common.run_shell_command(params.region, command)
command = ["cloudformation", "describe-stack-resource", "--stack-name", params.stack, "--logical-resource-id", "ApiResource"]
restResource = common.run_shell_command(params.region, command)
body = {'name': params.environment, 'instanceGroup': {'cluster': params.cluster}, 'taskDefinition': params.taskDef}
command = ["apigateway", "test-invoke-method", "--rest-api-id", restApi['StackResourceDetail']['PhysicalResourceId'], "--resource-id", restResource['StackResourceDetail']['PhysicalResourceId'], "--http-method", "POST", "--headers", "{}", "--path-with-query-string", "/v1/environments", "--body", json.dumps(body)]
response = common.run_shell_command(params.region, command)
print "HTTP Response Code: %d" % response['status']
try:
obj = json.loads(response['body'])
print json.dumps(obj, indent=2)
except Exception as e:
print "Error: Could not parse response - %s" % e
print json.dumps(response, indent=2)
sys.exit(1)
# Call Blox Scheduler Local Endpoint
def run_local(params):
api = common.Object()
api.method = 'POST'
api.headers = {}
api.host = params.host
api.uri = '/v1/environments'
api.queryParams = {}
api.data = {'name': params.environment, 'instanceGroup': {'cluster': params.cluster}, 'taskDefinition': params.taskDef}
response = common.call_api(api)
print "HTTP Response Code: %d" % response.status
try:
obj = json.loads(response.body)
print json.dumps(obj, indent=2)
except Exception as e:
print "Error: Could not parse response - %s" % e
print response.body
sys.exit(1)
if __name__ == "__main__":
main(sys.argv[1:])
|
# client.py
#
# Copyright 2002-2007 Wichert Akkerman <wichert@wiggy.net>
__docformat__ = "epytext en"
import select
import socket
import time
import six
from pyrad import host
from pyrad import packet
class Timeout(Exception):
"""Simple exception class which is raised when a timeout occurs
while waiting for a RADIUS server to respond."""
class Client(host.Host):
"""Basic RADIUS client.
This class implements a basic RADIUS client. It can send requests
to a RADIUS server, taking care of timeouts and retries, and
validate its replies.
:ivar retries: number of times to retry sending a RADIUS request
:type retries: integer
:ivar timeout: number of seconds to wait for an answer
:type timeout: integer
"""
def __init__(self, server, authport=1812, acctport=1813,
coaport=3799, discport=1700, secret=six.b(''), dict=None):
"""Constructor.
:param server: hostname or IP address of RADIUS server
:type server: string
:param authport: port to use for authentication packets
:type authport: integer
:param acctport: port to use for accounting packets
:type acctport: integer
:param coaport: port to use for CoA packets
:type coaport: integer
:param discport: port to use for CoA packets
:type discport: integer
:param secret: RADIUS secret
:type secret: string
:param dict: RADIUS dictionary
:type dict: pyrad.dictionary.Dictionary
"""
host.Host.__init__(self, authport, acctport, coaport, discport, dict)
self.server = server
self.secret = secret
self._socket = None
self.retries = 3
self.timeout = 5
def bind(self, addr):
"""Bind socket to an address.
Binding the socket used for communicating to an address can be
usefull when working on a machine with multiple addresses.
:param addr: network address (hostname or IP) and port to bind to
:type addr: host,port tuple
"""
self._CloseSocket()
self._SocketOpen()
self._socket.bind(addr)
def _SocketOpen(self):
if not self._socket:
self._socket = socket.socket(socket.AF_INET,
socket.SOCK_DGRAM)
self._socket.setsockopt(socket.SOL_SOCKET,
socket.SO_REUSEADDR, 1)
def _CloseSocket(self):
if self._socket:
self._socket.close()
self._socket = None
def CreateAuthPacket(self, **args):
"""Create a new RADIUS packet.
This utility function creates a new RADIUS packet which can
be used to communicate with the RADIUS server this client
talks to. This is initializing the new packet with the
dictionary and secret used for the client.
:return: a new empty packet instance
:rtype: pyrad.packet.Packet
"""
return host.Host.CreateAuthPacket(self, secret=self.secret, **args)
def CreateAcctPacket(self, **args):
"""Create a new RADIUS packet.
This utility function creates a new RADIUS packet which can
be used to communicate with the RADIUS server this client
talks to. This is initializing the new packet with the
dictionary and secret used for the client.
:return: a new empty packet instance
:rtype: pyrad.packet.Packet
"""
return host.Host.CreateAcctPacket(self, secret=self.secret, **args)
def CreateCoAPacket(self, **args):
"""Create a new RADIUS packet.
This utility function creates a new RADIUS packet which can
be used to communicate with the RADIUS server this client
talks to. This is initializing the new packet with the
dictionary and secret used for the client.
:return: a new empty packet instance
:rtype: pyrad.packet.Packet
"""
return host.Host.CreateCoAPacket(self, secret=self.secret, **args)
def CreateDiscPacket(self, **args):
"""Create a new RADIUS packet.
This utility function creates a new RADIUS packet which can
be used to communicate with the RADIUS server this client
talks to. This is initializing the new packet with the
dictionary and secret used for the client.
:return: a new empty packet instance
:rtype: pyrad.packet.Packet
"""
return host.Host.CreateDiscPacket(self, secret=self.secret, **args)
def _SendPacket(self, pkt, port):
"""Send a packet to a RADIUS server.
:param pkt: the packet to send
:type pkt: pyrad.packet.Packet
:param port: UDP port to send packet to
:type port: integer
:return: the reply packet received
:rtype: pyrad.packet.Packet
:raise Timeout: RADIUS server does not reply
"""
self._SocketOpen()
for attempt in range(self.retries):
if attempt and pkt.code == packet.AccountingRequest:
if "Acct-Delay-Time" in pkt:
pkt["Acct-Delay-Time"] = \
pkt["Acct-Delay-Time"][0] + self.timeout
else:
pkt["Acct-Delay-Time"] = self.timeout
self._socket.sendto(pkt.RequestPacket(), (self.server, port))
now = time.time()
waitto = now + self.timeout
while now < waitto:
ready = select.select([self._socket], [], [],
(waitto - now))
if ready[0]:
rawreply = self._socket.recv(4096)
else:
now = time.time()
continue
try:
reply = pkt.CreateReply(packet=rawreply)
if pkt.VerifyReply(reply, rawreply):
return reply
except packet.PacketError:
pass
now = time.time()
raise Timeout
def SendPacket(self, pkt):
"""Send a packet to a RADIUS server.
:param pkt: the packet to send
:type pkt: pyrad.packet.Packet
:return: the reply packet received
:rtype: pyrad.packet.Packet
:raise Timeout: RADIUS server does not reply
"""
if isinstance(pkt, packet.AuthPacket):
return self._SendPacket(pkt, self.authport)
elif isinstance(pkt, packet.CoAPacket):
return self._SendPacket(pkt, self.coaport)
elif isinstance(pkt, packet.DiscPacket):
return self._SendPacket(pkt, self.discport)
else:
return self._SendPacket(pkt, self.acctport)
|
#!/usr/bin/env python3
# THIS FILE IS PART OF THE CYLC SUITE ENGINE.
# Copyright (C) 2008-2019 NIWA & British Crown (Met Office) & Contributors.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""cylc [control] broadcast|bcast [OPTIONS] REG
Override [runtime] config in targeted namespaces in a running suite.
Uses for broadcast include making temporary changes to task behaviour,
and task-to-downstream-task communication via environment variables.
A broadcast can target any [runtime] namespace for all cycles or for a
specific cycle. If a task is affected by specific-cycle and all-cycle
broadcasts at once, the specific takes precedence. If a task is affected
by broadcasts to multiple ancestor namespaces, the result is determined
by normal [runtime] inheritance. In other words, it follows this order:
all:root -> all:FAM -> all:task -> tag:root -> tag:FAM -> tag:task
Broadcasts persist, even across suite restarts, until they expire when
their target cycle point is older than the oldest current in the suite,
or until they are explicitly cancelled with this command. All-cycle
broadcasts do not expire.
For each task the final effect of all broadcasts to all namespaces is
computed on the fly just prior to job submission. The --cancel and
--clear options simply cancel (remove) active broadcasts, they do not
act directly on the final task-level result. Consequently, for example,
you cannot broadcast to "all cycles except Tn" with an all-cycle
broadcast followed by a cancel to Tn (there is no direct broadcast to Tn
to cancel); and you cannot broadcast to "all members of FAMILY except
member_n" with a general broadcast to FAMILY followed by a cancel to
member_n (there is no direct broadcast to member_n to cancel).
To broadcast a variable to all tasks (quote items with internal spaces):
% cylc broadcast -s "[environment]VERSE = the quick brown fox" REG
To do the same with a file:
% cat >'broadcast.rc' <<'__RC__'
% [environment]
% VERSE = the quick brown fox
% __RC__
% cylc broadcast -F 'broadcast.rc' REG
To cancel the same broadcast:
% cylc broadcast --cancel "[environment]VERSE" REG
If -F FILE was used, the same file can be used to cancel the broadcast:
% cylc broadcast -G 'broadcast.rc' REG
Use -d/--display to see active broadcasts. Multiple --cancel options or
multiple --set and --set-file options can be used on the same command line.
Multiple --set and --set-file options are cumulative.
The --set-file=FILE option can be used when broadcasting multiple values, or
when the value contains newline or other metacharacters. If FILE is "-", read
from standard input.
Broadcast cannot change [runtime] inheritance.
See also 'cylc reload' - reload a modified suite definition at run time."""
import sys
if '--use-ssh' in sys.argv[1:]:
sys.argv.remove('--use-ssh')
from cylc.flow.remote import remrun
if remrun():
sys.exit(0)
import re
from tempfile import NamedTemporaryFile
from ansimarkup import parse as cparse
from cylc.flow.task_id import TaskID
from cylc.flow.terminal import cli_function
from cylc.flow.exceptions import UserInputError
from cylc.flow.print_tree import print_tree
from cylc.flow.option_parsers import CylcOptionParser as COP
from cylc.flow.broadcast_report import (
get_broadcast_bad_options_report, get_broadcast_change_report)
from cylc.flow.cfgspec.suite import SPEC, upg
from cylc.flow.network.client import SuiteRuntimeClient
from cylc.flow.parsec.config import ParsecConfig
from cylc.flow.parsec.validate import cylc_config_validate
REC_ITEM = re.compile(r'^\[([^\]]*)\](.*)$')
def get_padding(settings, level=0, padding=0):
"""Return the left padding for displaying a setting."""
level += 1
for key, val in settings.items():
tmp = level * 2 + len(key)
if tmp > padding:
padding = tmp
if isinstance(val, dict):
padding = get_padding(val, level, padding)
return padding
def get_rdict(left, right=None):
"""Check+transform left=right into a nested dict.
left can be key, [key], [key1]key2, [key1][key2], [key1][key2]key3, etc.
"""
if left == "inherit":
raise UserInputError(
"Inheritance cannot be changed by broadcast")
rdict = {}
cur_dict = rdict
tail = left
while tail:
match = REC_ITEM.match(tail)
if match:
sect, tail = match.groups()
if tail:
# [sect]... = right
cur_dict[sect.strip()] = {}
cur_dict = cur_dict[sect.strip()]
else:
# [sect] = right
cur_dict[sect.strip()] = right
else:
# item = right
cur_dict[tail.strip()] = right
tail = None
upg({'runtime': {'__MANY__': rdict}}, 'test')
cylc_config_validate(rdict, SPEC['runtime']['__MANY__'])
return rdict
def files_to_settings(settings, setting_files, cancel_mode=False):
"""Parse setting files, and append to settings."""
cfg = ParsecConfig(
SPEC['runtime']['__MANY__'], validator=cylc_config_validate)
for setting_file in setting_files:
if setting_file == '-':
with NamedTemporaryFile() as handle:
handle.write(sys.stdin.read().encode())
handle.seek(0, 0)
cfg.loadcfg(handle.name)
else:
cfg.loadcfg(setting_file)
stack = [([], cfg.get(sparse=True))]
while stack:
keys, item = stack.pop()
if isinstance(item, dict):
for key, value in item.items():
stack.append((keys + [key], value))
else:
settings.append({})
cur_setting = settings[-1]
while keys:
key = keys.pop(0)
if keys:
cur_setting[key] = {}
cur_setting = cur_setting[key]
elif cancel_mode:
cur_setting[key] = None
else:
cur_setting[key] = item
def report_bad_options(bad_options, is_set=False):
bad_opts = get_broadcast_bad_options_report(bad_options, is_set=is_set)
if bad_opts is not None:
return cparse(f'<red>{bad_opts}</red>')
return bad_opts
def get_option_parser():
"""CLI for "cylc broadcast"."""
parser = COP(__doc__, comms=True)
parser.add_option(
"-p", "--point", metavar="CYCLE_POINT",
help="Target cycle point. More than one can be added. "
"Defaults to '*' with --set and --cancel, "
"and nothing with --clear.",
action="append", dest="point_strings", default=[])
parser.add_option(
"-n", "--namespace", metavar="NAME",
help="Target namespace. Defaults to 'root' with "
"--set and --cancel, and nothing with --clear.",
action="append", dest="namespaces", default=[])
parser.add_option(
"-s", "--set", metavar="[SEC]ITEM=VALUE",
help="A [runtime] config item and value to broadcast.",
action="append", dest="settings", default=[])
parser.add_option(
"-F", "--set-file", "--file", metavar="FILE",
help="File with config to broadcast. Can be used multiple times.",
action="append", dest="setting_files", default=[])
parser.add_option(
"-c", "--cancel", metavar="[SEC]ITEM",
help="An item-specific broadcast to cancel.",
action="append", dest="cancel", default=[])
parser.add_option(
"-G", "--cancel-file", metavar="FILE",
help="File with broadcasts to cancel. Can be used multiple times.",
action="append", dest="cancel_files", default=[])
parser.add_option(
"-C", "--clear",
help="Cancel all broadcasts, or with -p/--point, "
"-n/--namespace, cancel all broadcasts to targeted "
"namespaces and/or cycle points. Use \"-C -p '*'\" "
"to cancel all all-cycle broadcasts without canceling "
"all specific-cycle broadcasts.",
action="store_true", dest="clear", default=False)
parser.add_option(
"-e", "--expire", metavar="CYCLE_POINT",
help="Cancel any broadcasts that target cycle "
"points earlier than, but not inclusive of, CYCLE_POINT.",
action="store", default=None, dest="expire")
parser.add_option(
"-d", "--display",
help="Display active broadcasts.",
action="store_true", default=False, dest="show")
parser.add_option(
"-k", "--display-task", metavar="TASKID",
help="Print active broadcasts for a given task "
"(" + TaskID.SYNTAX + ").",
action="store", default=None, dest="showtask")
parser.add_option(
"-b", "--box",
help="Use unicode box characters with -d, -k.",
action="store_true", default=False, dest="unicode")
parser.add_option(
"-r", "--raw",
help="With -d/--display or -k/--display-task, write out "
"the broadcast config structure in raw Python form.",
action="store_true", default=False, dest="raw")
return parser
@cli_function(get_option_parser)
def main(_, options, suite):
"""Implement cylc broadcast."""
pclient = SuiteRuntimeClient(
suite, options.owner, options.host, options.port,
options.comms_timeout)
if options.show or options.showtask:
if options.showtask:
try:
TaskID.split(options.showtask)
except ValueError:
raise UserInputError("TASKID must be " + TaskID.SYNTAX)
settings = pclient('get_broadcast', {'task_id': options.showtask})
padding = get_padding(settings) * ' '
if options.raw:
print(str(settings))
else:
print_tree(settings, padding, options.unicode)
sys.exit(0)
if options.clear:
modified_settings, bad_options = pclient(
'clear_broadcast',
{'point_strings': options.point_strings,
'namespaces': options.namespaces}
)
if modified_settings:
print(get_broadcast_change_report(
modified_settings, is_cancel=True))
sys.exit(report_bad_options(bad_options))
if options.expire:
modified_settings, bad_options = pclient(
'expire_broadcast',
{'cutoff': options.expire}
)
if modified_settings:
print(get_broadcast_change_report(
modified_settings, is_cancel=True))
sys.exit(report_bad_options(bad_options))
# implement namespace and cycle point defaults here
namespaces = options.namespaces
if not namespaces:
namespaces = ["root"]
point_strings = options.point_strings
if not point_strings:
point_strings = ["*"]
if options.cancel or options.cancel_files:
settings = []
for option_item in options.cancel:
if "=" in option_item:
raise UserInputError(
"--cancel=[SEC]ITEM does not take a value")
option_item = option_item.strip()
setting = get_rdict(option_item)
settings.append(setting)
files_to_settings(settings, options.cancel_files, options.cancel)
modified_settings, bad_options = pclient(
'clear_broadcast',
{'point_strings': point_strings,
'namespaces': namespaces,
'cancel_settings': settings}
)
if modified_settings:
print(get_broadcast_change_report(
modified_settings, is_cancel=True))
sys.exit(report_bad_options(bad_options))
if options.settings or options.setting_files:
settings = []
for option_item in options.settings:
if "=" not in option_item:
raise UserInputError(
"--set=[SEC]ITEM=VALUE requires a value")
lhs, rhs = [s.strip() for s in option_item.split("=", 1)]
setting = get_rdict(lhs, rhs)
settings.append(setting)
files_to_settings(settings, options.setting_files)
modified_settings, bad_options = pclient(
'put_broadcast',
{'point_strings': point_strings,
'namespaces': namespaces,
'settings': settings
}
)
print(get_broadcast_change_report(modified_settings))
sys.exit(report_bad_options(bad_options, is_set=True))
if __name__ == "__main__":
main()
|
# Copyright 2019 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This class contains the basic functionality needed to run any interpreter
# or an interpreter-based tool.
import pkg_resources
from .common import CMakeException, CMakeTarget, TargetOptions
from .client import CMakeClient, RequestCMakeInputs, RequestConfigure, RequestCompute, RequestCodeModel
from .fileapi import CMakeFileAPI
from .executor import CMakeExecutor
from .traceparser import CMakeTraceParser, CMakeGeneratorTarget
from .. import mlog, mesonlib
from ..environment import Environment
from ..mesonlib import MachineChoice, OrderedSet, version_compare
from ..compilers.compilers import lang_suffixes, header_suffixes, obj_suffixes, lib_suffixes, is_header
from enum import Enum
from functools import lru_cache
from pathlib import Path
import typing as T
import os, re
from ..mparser import (
Token,
BaseNode,
CodeBlockNode,
FunctionNode,
ArrayNode,
ArgumentNode,
AssignmentNode,
BooleanNode,
StringNode,
IdNode,
IndexNode,
MethodNode,
NumberNode,
)
if T.TYPE_CHECKING:
from ..build import Build
from ..backend.backends import Backend
# Disable all warnings automaticall enabled with --trace and friends
# See https://cmake.org/cmake/help/latest/variable/CMAKE_POLICY_WARNING_CMPNNNN.html
disable_policy_warnings = [
'CMP0025',
'CMP0047',
'CMP0056',
'CMP0060',
'CMP0065',
'CMP0066',
'CMP0067',
'CMP0082',
'CMP0089',
]
backend_generator_map = {
'ninja': 'Ninja',
'xcode': 'Xcode',
'vs2010': 'Visual Studio 10 2010',
'vs2015': 'Visual Studio 15 2017',
'vs2017': 'Visual Studio 15 2017',
'vs2019': 'Visual Studio 16 2019',
}
language_map = {
'c': 'C',
'cpp': 'CXX',
'cuda': 'CUDA',
'objc': 'OBJC',
'objcpp': 'OBJCXX',
'cs': 'CSharp',
'java': 'Java',
'fortran': 'Fortran',
'swift': 'Swift',
}
target_type_map = {
'STATIC_LIBRARY': 'static_library',
'MODULE_LIBRARY': 'shared_module',
'SHARED_LIBRARY': 'shared_library',
'EXECUTABLE': 'executable',
'OBJECT_LIBRARY': 'static_library',
'INTERFACE_LIBRARY': 'header_only'
}
skip_targets = ['UTILITY']
blacklist_compiler_flags = [
'-Wall', '-Wextra', '-Weverything', '-Werror', '-Wpedantic', '-pedantic', '-w',
'/W1', '/W2', '/W3', '/W4', '/Wall', '/WX', '/w',
'/O1', '/O2', '/Ob', '/Od', '/Og', '/Oi', '/Os', '/Ot', '/Ox', '/Oy', '/Ob0',
'/RTC1', '/RTCc', '/RTCs', '/RTCu',
'/Z7', '/Zi', '/ZI',
]
blacklist_link_flags = [
'/machine:x64', '/machine:x86', '/machine:arm', '/machine:ebc',
'/debug', '/debug:fastlink', '/debug:full', '/debug:none',
'/incremental',
]
blacklist_clang_cl_link_flags = ['/GR', '/EHsc', '/MDd', '/Zi', '/RTC1']
blacklist_link_libs = [
'kernel32.lib',
'user32.lib',
'gdi32.lib',
'winspool.lib',
'shell32.lib',
'ole32.lib',
'oleaut32.lib',
'uuid.lib',
'comdlg32.lib',
'advapi32.lib'
]
transfer_dependencies_from = ['header_only']
_cmake_name_regex = re.compile(r'[^_a-zA-Z0-9]')
def _sanitize_cmake_name(name: str) -> str:
name = _cmake_name_regex.sub('_', name)
return 'cm_' + name
class OutputTargetMap:
rm_so_version = re.compile(r'(\.[0-9]+)+$')
def __init__(self, build_dir: str):
self.tgt_map = {}
self.build_dir = build_dir
def add(self, tgt: T.Union['ConverterTarget', 'ConverterCustomTarget']) -> None:
def assign_keys(keys: T.List[str]) -> None:
for i in [x for x in keys if x]:
self.tgt_map[i] = tgt
keys = [self._target_key(tgt.cmake_name)]
if isinstance(tgt, ConverterTarget):
keys += [tgt.full_name]
keys += [self._rel_artifact_key(x) for x in tgt.artifacts]
keys += [self._base_artifact_key(x) for x in tgt.artifacts]
if isinstance(tgt, ConverterCustomTarget):
keys += [self._rel_generated_file_key(x) for x in tgt.original_outputs]
keys += [self._base_generated_file_key(x) for x in tgt.original_outputs]
assign_keys(keys)
def _return_first_valid_key(self, keys: T.List[str]) -> T.Optional[T.Union['ConverterTarget', 'ConverterCustomTarget']]:
for i in keys:
if i and i in self.tgt_map:
return self.tgt_map[i]
return None
def target(self, name: str) -> T.Optional[T.Union['ConverterTarget', 'ConverterCustomTarget']]:
return self._return_first_valid_key([self._target_key(name)])
def executable(self, name: str) -> T.Optional['ConverterTarget']:
tgt = self.target(name)
if tgt is None or not isinstance(tgt, ConverterTarget):
return None
if tgt.meson_func() != 'executable':
return None
return tgt
def artifact(self, name: str) -> T.Optional[T.Union['ConverterTarget', 'ConverterCustomTarget']]:
keys = []
candidates = [name, OutputTargetMap.rm_so_version.sub('', name)]
for i in lib_suffixes:
if not name.endswith('.' + i):
continue
new_name = name[:-len(i) - 1]
new_name = OutputTargetMap.rm_so_version.sub('', new_name)
candidates += ['{}.{}'.format(new_name, i)]
for i in candidates:
keys += [self._rel_artifact_key(i), os.path.basename(i), self._base_artifact_key(i)]
return self._return_first_valid_key(keys)
def generated(self, name: str) -> T.Optional[T.Union['ConverterTarget', 'ConverterCustomTarget']]:
return self._return_first_valid_key([self._rel_generated_file_key(name), self._base_generated_file_key(name)])
# Utility functions to generate local keys
def _rel_path(self, fname: str) -> T.Optional[str]:
fname = os.path.normpath(os.path.join(self.build_dir, fname))
if os.path.commonpath([self.build_dir, fname]) != self.build_dir:
return None
return os.path.relpath(fname, self.build_dir)
def _target_key(self, tgt_name: str) -> str:
return '__tgt_{}__'.format(tgt_name)
def _rel_generated_file_key(self, fname: str) -> T.Optional[str]:
path = self._rel_path(fname)
return '__relgen_{}__'.format(path) if path else None
def _base_generated_file_key(self, fname: str) -> str:
return '__gen_{}__'.format(os.path.basename(fname))
def _rel_artifact_key(self, fname: str) -> T.Optional[str]:
path = self._rel_path(fname)
return '__relart_{}__'.format(path) if path else None
def _base_artifact_key(self, fname: str) -> str:
return '__art_{}__'.format(os.path.basename(fname))
class ConverterTarget:
def __init__(self, target: CMakeTarget, env: Environment):
self.env = env
self.artifacts = target.artifacts
self.src_dir = target.src_dir
self.build_dir = target.build_dir
self.name = target.name
self.cmake_name = target.name
self.full_name = target.full_name
self.type = target.type
self.install = target.install
self.install_dir = ''
self.link_libraries = target.link_libraries
self.link_flags = target.link_flags + target.link_lang_flags
self.depends_raw = []
self.depends = []
if target.install_paths:
self.install_dir = target.install_paths[0]
self.languages = []
self.sources = []
self.generated = []
self.includes = []
self.sys_includes = []
self.link_with = []
self.object_libs = []
self.compile_opts = {}
self.public_compile_opts = []
self.pie = False
# Project default override options (c_std, cpp_std, etc.)
self.override_options = []
# Convert the target name to a valid meson target name
self.name = _sanitize_cmake_name(self.name)
for i in target.files:
# Determine the meson language
lang_cmake_to_meson = {val.lower(): key for key, val in language_map.items()}
lang = lang_cmake_to_meson.get(i.language.lower(), 'c')
if lang not in self.languages:
self.languages += [lang]
if lang not in self.compile_opts:
self.compile_opts[lang] = []
# Add arguments, but avoid duplicates
args = i.flags
args += ['-D{}'.format(x) for x in i.defines]
self.compile_opts[lang] += [x for x in args if x not in self.compile_opts[lang]]
# Handle include directories
self.includes += [x['path'] for x in i.includes if x not in self.includes and not x['isSystem']]
self.sys_includes += [x['path'] for x in i.includes if x not in self.sys_includes and x['isSystem']]
# Add sources to the right array
if i.is_generated:
self.generated += i.sources
else:
self.sources += i.sources
def __repr__(self) -> str:
return '<{}: {}>'.format(self.__class__.__name__, self.name)
std_regex = re.compile(r'([-]{1,2}std=|/std:v?|[-]{1,2}std:)(.*)')
def postprocess(self, output_target_map: OutputTargetMap, root_src_dir: str, subdir: str, install_prefix: str, trace: CMakeTraceParser) -> None:
# Detect setting the C and C++ standard
for i in ['c', 'cpp']:
if i not in self.compile_opts:
continue
temp = []
for j in self.compile_opts[i]:
m = ConverterTarget.std_regex.match(j)
if m:
std = m.group(2)
supported = self._all_lang_stds(i)
if std not in supported:
mlog.warning(
'Unknown {0}_std "{1}" -> Ignoring. Try setting the project-'
'level {0}_std if build errors occur. Known '
'{0}_stds are: {2}'.format(i, std, ' '.join(supported)),
once=True
)
continue
self.override_options += ['{}_std={}'.format(i, std)]
elif j in ['-fPIC', '-fpic', '-fPIE', '-fpie']:
self.pie = True
elif j in blacklist_compiler_flags:
pass
else:
temp += [j]
self.compile_opts[i] = temp
# Make sure to force enable -fPIC for OBJECT libraries
if self.type.upper() == 'OBJECT_LIBRARY':
self.pie = True
# Use the CMake trace, if required
tgt = trace.targets.get(self.cmake_name)
if tgt:
self.depends_raw = trace.targets[self.cmake_name].depends
# TODO refactor this copy paste from CMakeDependency for future releases
reg_is_lib = re.compile(r'^(-l[a-zA-Z0-9_]+|-l?pthread)$')
to_process = [self.cmake_name]
processed = []
while len(to_process) > 0:
curr = to_process.pop(0)
if curr in processed or curr not in trace.targets:
continue
tgt = trace.targets[curr]
cfgs = []
cfg = ''
otherDeps = []
libraries = []
mlog.debug(tgt)
if 'INTERFACE_INCLUDE_DIRECTORIES' in tgt.properties:
self.includes += [x for x in tgt.properties['INTERFACE_INCLUDE_DIRECTORIES'] if x]
if 'INTERFACE_LINK_OPTIONS' in tgt.properties:
self.link_flags += [x for x in tgt.properties['INTERFACE_LINK_OPTIONS'] if x]
if 'INTERFACE_COMPILE_DEFINITIONS' in tgt.properties:
self.public_compile_opts += ['-D' + re.sub('^-D', '', x) for x in tgt.properties['INTERFACE_COMPILE_DEFINITIONS'] if x]
if 'INTERFACE_COMPILE_OPTIONS' in tgt.properties:
self.public_compile_opts += [x for x in tgt.properties['INTERFACE_COMPILE_OPTIONS'] if x]
if 'IMPORTED_CONFIGURATIONS' in tgt.properties:
cfgs += [x for x in tgt.properties['IMPORTED_CONFIGURATIONS'] if x]
cfg = cfgs[0]
if 'CONFIGURATIONS' in tgt.properties:
cfgs += [x for x in tgt.properties['CONFIGURATIONS'] if x]
cfg = cfgs[0]
is_debug = self.env.coredata.get_builtin_option('debug');
if is_debug:
if 'DEBUG' in cfgs:
cfg = 'DEBUG'
elif 'RELEASE' in cfgs:
cfg = 'RELEASE'
else:
if 'RELEASE' in cfgs:
cfg = 'RELEASE'
if 'IMPORTED_IMPLIB_{}'.format(cfg) in tgt.properties:
libraries += [x for x in tgt.properties['IMPORTED_IMPLIB_{}'.format(cfg)] if x]
elif 'IMPORTED_IMPLIB' in tgt.properties:
libraries += [x for x in tgt.properties['IMPORTED_IMPLIB'] if x]
elif 'IMPORTED_LOCATION_{}'.format(cfg) in tgt.properties:
libraries += [x for x in tgt.properties['IMPORTED_LOCATION_{}'.format(cfg)] if x]
elif 'IMPORTED_LOCATION' in tgt.properties:
libraries += [x for x in tgt.properties['IMPORTED_LOCATION'] if x]
if 'LINK_LIBRARIES' in tgt.properties:
otherDeps += [x for x in tgt.properties['LINK_LIBRARIES'] if x]
if 'INTERFACE_LINK_LIBRARIES' in tgt.properties:
otherDeps += [x for x in tgt.properties['INTERFACE_LINK_LIBRARIES'] if x]
if 'IMPORTED_LINK_DEPENDENT_LIBRARIES_{}'.format(cfg) in tgt.properties:
otherDeps += [x for x in tgt.properties['IMPORTED_LINK_DEPENDENT_LIBRARIES_{}'.format(cfg)] if x]
elif 'IMPORTED_LINK_DEPENDENT_LIBRARIES' in tgt.properties:
otherDeps += [x for x in tgt.properties['IMPORTED_LINK_DEPENDENT_LIBRARIES'] if x]
for j in otherDeps:
if j in trace.targets:
to_process += [j]
elif reg_is_lib.match(j) or os.path.exists(j):
libraries += [j]
for j in libraries:
if j not in self.link_libraries:
self.link_libraries += [j]
processed += [curr]
elif self.type.upper() not in ['EXECUTABLE', 'OBJECT_LIBRARY']:
mlog.warning('CMake: Target', mlog.bold(self.cmake_name), 'not found in CMake trace. This can lead to build errors')
temp = []
for i in self.link_libraries:
# Let meson handle this arcane magic
if ',-rpath,' in i:
continue
if not os.path.isabs(i):
link_with = output_target_map.artifact(i)
if link_with:
self.link_with += [link_with]
continue
temp += [i]
self.link_libraries = temp
# Filter out files that are not supported by the language
supported = list(header_suffixes) + list(obj_suffixes)
for i in self.languages:
supported += list(lang_suffixes[i])
supported = ['.{}'.format(x) for x in supported]
self.sources = [x for x in self.sources if any([x.endswith(y) for y in supported])]
self.generated = [x for x in self.generated if any([x.endswith(y) for y in supported])]
# Make paths relative
def rel_path(x: str, is_header: bool, is_generated: bool) -> T.Optional[str]:
if not os.path.isabs(x):
x = os.path.normpath(os.path.join(self.src_dir, x))
if not os.path.exists(x) and not any([x.endswith(y) for y in obj_suffixes]) and not is_generated:
mlog.warning('CMake: path', mlog.bold(x), 'does not exist.')
mlog.warning(' --> Ignoring. This can lead to build errors.')
return None
if Path(x) in trace.explicit_headers:
return None
if (
os.path.isabs(x)
and os.path.commonpath([x, self.env.get_source_dir()]) == self.env.get_source_dir()
and not (
os.path.commonpath([x, root_src_dir]) == root_src_dir or
os.path.commonpath([x, self.env.get_build_dir()]) == self.env.get_build_dir()
)
):
mlog.warning('CMake: path', mlog.bold(x), 'is inside the root project but', mlog.bold('not'), 'inside the subproject.')
mlog.warning(' --> Ignoring. This can lead to build errors.')
return None
if os.path.isabs(x) and os.path.commonpath([x, self.env.get_build_dir()]) == self.env.get_build_dir():
if is_header:
return os.path.relpath(x, os.path.join(self.env.get_build_dir(), subdir))
else:
return os.path.relpath(x, root_src_dir)
if os.path.isabs(x) and os.path.commonpath([x, root_src_dir]) == root_src_dir:
return os.path.relpath(x, root_src_dir)
return x
def custom_target(x: str):
ctgt = output_target_map.generated(x)
if ctgt:
assert(isinstance(ctgt, ConverterCustomTarget))
ref = ctgt.get_ref(x)
assert(isinstance(ref, CustomTargetReference) and ref.valid())
return ref
return x
build_dir_rel = os.path.relpath(self.build_dir, os.path.join(self.env.get_build_dir(), subdir))
self.includes = list(OrderedSet([rel_path(x, True, False) for x in OrderedSet(self.includes)] + [build_dir_rel]))
self.sys_includes = list(OrderedSet([rel_path(x, True, False) for x in OrderedSet(self.sys_includes)]))
self.sources = [rel_path(x, False, False) for x in self.sources]
self.generated = [rel_path(x, False, True) for x in self.generated]
# Resolve custom targets
self.generated = [custom_target(x) for x in self.generated]
# Remove delete entries
self.includes = [x for x in self.includes if x is not None]
self.sys_includes = [x for x in self.sys_includes if x is not None]
self.sources = [x for x in self.sources if x is not None]
self.generated = [x for x in self.generated if x is not None]
# Make sure '.' is always in the include directories
if '.' not in self.includes:
self.includes += ['.']
# make install dir relative to the install prefix
if self.install_dir and os.path.isabs(self.install_dir):
if os.path.commonpath([self.install_dir, install_prefix]) == install_prefix:
self.install_dir = os.path.relpath(self.install_dir, install_prefix)
# Remove blacklisted options and libs
def check_flag(flag: str) -> bool:
if flag.lower() in blacklist_link_flags or flag in blacklist_compiler_flags + blacklist_clang_cl_link_flags:
return False
if flag.startswith('/D'):
return False
return True
self.link_libraries = [x for x in self.link_libraries if x.lower() not in blacklist_link_libs]
self.link_flags = [x for x in self.link_flags if check_flag(x)]
# Handle explicit CMake add_dependency() calls
for i in self.depends_raw:
tgt = output_target_map.target(i)
if tgt:
self.depends.append(tgt)
def process_object_libs(self, obj_target_list: T.List['ConverterTarget'], linker_workaround: bool):
# Try to detect the object library(s) from the generated input sources
temp = [x for x in self.generated if isinstance(x, str)]
temp = [os.path.basename(x) for x in temp]
temp = [x for x in temp if any([x.endswith('.' + y) for y in obj_suffixes])]
temp = [os.path.splitext(x)[0] for x in temp]
exts = self._all_source_suffixes()
# Temp now stores the source filenames of the object files
for i in obj_target_list:
source_files = [x for x in i.sources + i.generated if isinstance(x, str)]
source_files = [os.path.basename(x) for x in source_files]
for j in temp:
# On some platforms (specifically looking at you Windows with vs20xy backend) CMake does
# not produce object files with the format `foo.cpp.obj`, instead it skipps the language
# suffix and just produces object files like `foo.obj`. Thus we have to do our best to
# undo this step and guess the correct language suffix of the object file. This is done
# by trying all language suffixes meson knows and checking if one of them fits.
candidates = [j] # type: T.List[str]
if not any([j.endswith('.' + x) for x in exts]):
mlog.warning('Object files do not contain source file extensions, thus falling back to guessing them.', once=True)
candidates += ['{}.{}'.format(j, x) for x in exts]
if any([x in source_files for x in candidates]):
if linker_workaround:
self._append_objlib_sources(i)
else:
self.includes += i.includes
self.includes = list(OrderedSet(self.includes))
self.object_libs += [i]
break
# Filter out object files from the sources
self.generated = [x for x in self.generated if not isinstance(x, str) or not any([x.endswith('.' + y) for y in obj_suffixes])]
def _append_objlib_sources(self, tgt: 'ConverterTarget') -> None:
self.includes += tgt.includes
self.sources += tgt.sources
self.generated += tgt.generated
self.sources = list(OrderedSet(self.sources))
self.generated = list(OrderedSet(self.generated))
self.includes = list(OrderedSet(self.includes))
# Inherit compiler arguments since they may be required for building
for lang, opts in tgt.compile_opts.items():
if lang not in self.compile_opts:
self.compile_opts[lang] = []
self.compile_opts[lang] += [x for x in opts if x not in self.compile_opts[lang]]
@lru_cache(maxsize=None)
def _all_source_suffixes(self) -> T.List[str]:
suffixes = [] # type: T.List[str]
for exts in lang_suffixes.values():
suffixes += [x for x in exts]
return suffixes
@lru_cache(maxsize=None)
def _all_lang_stds(self, lang: str) -> T.List[str]:
lang_opts = self.env.coredata.compiler_options.build.get(lang, None)
if not lang_opts or 'std' not in lang_opts:
return []
return lang_opts['std'].choices
def process_inter_target_dependencies(self):
# Move the dependencies from all transfer_dependencies_from to the target
to_process = list(self.depends)
processed = []
new_deps = []
for i in to_process:
processed += [i]
if isinstance(i, ConverterTarget) and i.meson_func() in transfer_dependencies_from:
to_process += [x for x in i.depends if x not in processed]
else:
new_deps += [i]
self.depends = list(OrderedSet(new_deps))
def cleanup_dependencies(self):
# Clear the dependencies from targets that where moved from
if self.meson_func() in transfer_dependencies_from:
self.depends = []
def meson_func(self) -> str:
return target_type_map.get(self.type.upper())
def log(self) -> None:
mlog.log('Target', mlog.bold(self.name), '({})'.format(self.cmake_name))
mlog.log(' -- artifacts: ', mlog.bold(str(self.artifacts)))
mlog.log(' -- full_name: ', mlog.bold(self.full_name))
mlog.log(' -- type: ', mlog.bold(self.type))
mlog.log(' -- install: ', mlog.bold('true' if self.install else 'false'))
mlog.log(' -- install_dir: ', mlog.bold(self.install_dir))
mlog.log(' -- link_libraries: ', mlog.bold(str(self.link_libraries)))
mlog.log(' -- link_with: ', mlog.bold(str(self.link_with)))
mlog.log(' -- object_libs: ', mlog.bold(str(self.object_libs)))
mlog.log(' -- link_flags: ', mlog.bold(str(self.link_flags)))
mlog.log(' -- languages: ', mlog.bold(str(self.languages)))
mlog.log(' -- includes: ', mlog.bold(str(self.includes)))
mlog.log(' -- sys_includes: ', mlog.bold(str(self.sys_includes)))
mlog.log(' -- sources: ', mlog.bold(str(self.sources)))
mlog.log(' -- generated: ', mlog.bold(str(self.generated)))
mlog.log(' -- pie: ', mlog.bold('true' if self.pie else 'false'))
mlog.log(' -- override_opts: ', mlog.bold(str(self.override_options)))
mlog.log(' -- depends: ', mlog.bold(str(self.depends)))
mlog.log(' -- options:')
for key, val in self.compile_opts.items():
mlog.log(' -', key, '=', mlog.bold(str(val)))
class CustomTargetReference:
def __init__(self, ctgt: 'ConverterCustomTarget', index: int):
self.ctgt = ctgt # type: ConverterCustomTarget
self.index = index # type: int
def __repr__(self) -> str:
if self.valid():
return '<{}: {} [{}]>'.format(self.__class__.__name__, self.ctgt.name, self.ctgt.outputs[self.index])
else:
return '<{}: INVALID REFERENCE>'.format(self.__class__.__name__)
def valid(self) -> bool:
return self.ctgt is not None and self.index >= 0
def filename(self) -> str:
return self.ctgt.outputs[self.index]
class ConverterCustomTarget:
tgt_counter = 0 # type: int
out_counter = 0 # type: int
def __init__(self, target: CMakeGeneratorTarget):
assert(target.current_bin_dir is not None)
assert(target.current_src_dir is not None)
self.name = target.name
if not self.name:
self.name = 'custom_tgt_{}'.format(ConverterCustomTarget.tgt_counter)
ConverterCustomTarget.tgt_counter += 1
self.cmake_name = str(self.name)
self.original_outputs = list(target.outputs)
self.outputs = [os.path.basename(x) for x in self.original_outputs]
self.conflict_map = {}
self.command = target.command
self.working_dir = target.working_dir
self.depends_raw = target.depends
self.inputs = []
self.depends = []
self.current_bin_dir = Path(target.current_bin_dir)
self.current_src_dir = Path(target.current_src_dir)
# Convert the target name to a valid meson target name
self.name = _sanitize_cmake_name(self.name)
def __repr__(self) -> str:
return '<{}: {} {}>'.format(self.__class__.__name__, self.name, self.outputs)
def postprocess(self, output_target_map: OutputTargetMap, root_src_dir: str, subdir: str, all_outputs: T.List[str]) -> None:
# Default the working directory to ${CMAKE_CURRENT_BINARY_DIR}
if not self.working_dir:
self.working_dir = self.current_bin_dir.as_posix()
# relative paths in the working directory are always relative
# to ${CMAKE_CURRENT_BINARY_DIR}
if not os.path.isabs(self.working_dir):
self.working_dir = (self.current_bin_dir / self.working_dir).as_posix()
# Modify the original outputs if they are relative. Again,
# relative paths are relative to ${CMAKE_CURRENT_BINARY_DIR}
def ensure_absolute(x: Path) -> Path:
if x.is_absolute():
return x
else:
return self.current_bin_dir / x
self.original_outputs = [ensure_absolute(Path(x)).as_posix() for x in self.original_outputs]
# Ensure that there is no duplicate output in the project so
# that meson can handle cases where the same filename is
# generated in multiple directories
temp_outputs = [] # type: T.List[str]
for i in self.outputs:
if i in all_outputs:
old = str(i)
i = 'c{}_{}'.format(ConverterCustomTarget.out_counter, i)
ConverterCustomTarget.out_counter += 1
self.conflict_map[old] = i
all_outputs += [i]
temp_outputs += [i]
self.outputs = temp_outputs
# Check if the command is a build target
commands = []
for i in self.command:
assert(isinstance(i, list))
cmd = []
for j in i:
if not j:
continue
target = output_target_map.executable(j)
cmd += [target] if target else [j]
commands += [cmd]
self.command = commands
# If the custom target does not declare any output, create a dummy
# one that can be used as dependency.
if not self.outputs:
self.outputs = [self.name + '.h']
# Check dependencies and input files
root = Path(root_src_dir)
for i in self.depends_raw:
if not i:
continue
raw = Path(i)
art = output_target_map.artifact(i)
tgt = output_target_map.target(i)
gen = output_target_map.generated(i)
rel_to_root = None
try:
rel_to_root = raw.relative_to(root)
except ValueError:
rel_to_root = None
# First check for existing files. Only then check for existing
# targets, etc. This reduces the chance of misdetecting input files
# as outputs from other targets.
# See https://github.com/mesonbuild/meson/issues/6632
if not raw.is_absolute() and (self.current_src_dir / raw).exists():
self.inputs += [(self.current_src_dir / raw).relative_to(root).as_posix()]
elif raw.is_absolute() and raw.exists() and rel_to_root is not None:
self.inputs += [rel_to_root.as_posix()]
elif art:
self.depends += [art]
elif tgt:
self.depends += [tgt]
elif gen:
self.inputs += [gen.get_ref(i)]
def process_inter_target_dependencies(self):
# Move the dependencies from all transfer_dependencies_from to the target
to_process = list(self.depends)
processed = []
new_deps = []
for i in to_process:
processed += [i]
if isinstance(i, ConverterTarget) and i.meson_func() in transfer_dependencies_from:
to_process += [x for x in i.depends if x not in processed]
else:
new_deps += [i]
self.depends = list(OrderedSet(new_deps))
def get_ref(self, fname: str) -> T.Optional[CustomTargetReference]:
fname = os.path.basename(fname)
try:
if fname in self.conflict_map:
fname = self.conflict_map[fname]
idx = self.outputs.index(fname)
return CustomTargetReference(self, idx)
except ValueError:
return None
def log(self) -> None:
mlog.log('Custom Target', mlog.bold(self.name), '({})'.format(self.cmake_name))
mlog.log(' -- command: ', mlog.bold(str(self.command)))
mlog.log(' -- outputs: ', mlog.bold(str(self.outputs)))
mlog.log(' -- conflict_map: ', mlog.bold(str(self.conflict_map)))
mlog.log(' -- working_dir: ', mlog.bold(str(self.working_dir)))
mlog.log(' -- depends_raw: ', mlog.bold(str(self.depends_raw)))
mlog.log(' -- inputs: ', mlog.bold(str(self.inputs)))
mlog.log(' -- depends: ', mlog.bold(str(self.depends)))
class CMakeAPI(Enum):
SERVER = 1
FILE = 2
class CMakeInterpreter:
def __init__(self, build: 'Build', subdir: str, src_dir: str, install_prefix: str, env: Environment, backend: 'Backend'):
assert(hasattr(backend, 'name'))
self.build = build
self.subdir = subdir
self.src_dir = src_dir
self.build_dir_rel = os.path.join(subdir, '__CMake_build')
self.build_dir = os.path.join(env.get_build_dir(), self.build_dir_rel)
self.install_prefix = install_prefix
self.env = env
self.backend_name = backend.name
self.linkers = set() # type: T.Set[str]
self.cmake_api = CMakeAPI.SERVER
self.client = CMakeClient(self.env)
self.fileapi = CMakeFileAPI(self.build_dir)
# Raw CMake results
self.bs_files = []
self.codemodel_configs = None
self.raw_trace = None
# Analysed data
self.project_name = ''
self.languages = []
self.targets = []
self.custom_targets = [] # type: T.List[ConverterCustomTarget]
self.trace = CMakeTraceParser('', '') # Will be replaced in analyse
self.output_target_map = OutputTargetMap(self.build_dir)
# Generated meson data
self.generated_targets = {}
self.internal_name_map = {}
def configure(self, extra_cmake_options: T.List[str]) -> None:
for_machine = MachineChoice.HOST # TODO make parameter
# Find CMake
cmake_exe = CMakeExecutor(self.env, '>=3.7', for_machine)
if not cmake_exe.found():
raise CMakeException('Unable to find CMake')
self.trace = CMakeTraceParser(cmake_exe.version(), self.build_dir, permissive=True)
preload_file = pkg_resources.resource_filename('mesonbuild', 'cmake/data/preload.cmake')
# Prefere CMAKE_PROJECT_INCLUDE over CMAKE_TOOLCHAIN_FILE if possible,
# since CMAKE_PROJECT_INCLUDE was actually designed for code injection.
preload_var = 'CMAKE_PROJECT_INCLUDE'
if version_compare(cmake_exe.version(), '<3.15'):
preload_var = 'CMAKE_TOOLCHAIN_FILE'
generator = backend_generator_map[self.backend_name]
cmake_args = []
trace_args = self.trace.trace_args()
cmcmp_args = ['-DCMAKE_POLICY_WARNING_{}=OFF'.format(x) for x in disable_policy_warnings]
pload_args = ['-D{}={}'.format(preload_var, str(preload_file))]
if version_compare(cmake_exe.version(), '>=3.14'):
self.cmake_api = CMakeAPI.FILE
self.fileapi.setup_request()
# Map meson compiler to CMake variables
for lang, comp in self.env.coredata.compilers[for_machine].items():
if lang not in language_map:
continue
self.linkers.add(comp.get_linker_id())
cmake_lang = language_map[lang]
exelist = comp.get_exelist()
if len(exelist) == 1:
cmake_args += ['-DCMAKE_{}_COMPILER={}'.format(cmake_lang, exelist[0])]
elif len(exelist) == 2:
cmake_args += ['-DCMAKE_{}_COMPILER_LAUNCHER={}'.format(cmake_lang, exelist[0]),
'-DCMAKE_{}_COMPILER={}'.format(cmake_lang, exelist[1])]
if hasattr(comp, 'get_linker_exelist') and comp.get_id() == 'clang-cl':
cmake_args += ['-DCMAKE_LINKER={}'.format(comp.get_linker_exelist()[0])]
cmake_args += ['-G', generator]
cmake_args += ['-DCMAKE_INSTALL_PREFIX={}'.format(self.install_prefix)]
cmake_args += extra_cmake_options
# Run CMake
mlog.log()
with mlog.nested():
mlog.log('Configuring the build directory with', mlog.bold('CMake'), 'version', mlog.cyan(cmake_exe.version()))
mlog.log(mlog.bold('Running:'), ' '.join(cmake_args))
mlog.log(mlog.bold(' - build directory: '), self.build_dir)
mlog.log(mlog.bold(' - source directory: '), self.src_dir)
mlog.log(mlog.bold(' - trace args: '), ' '.join(trace_args))
mlog.log(mlog.bold(' - preload file: '), str(preload_file))
mlog.log(mlog.bold(' - disabled policy warnings:'), '[{}]'.format(', '.join(disable_policy_warnings)))
mlog.log()
os.makedirs(self.build_dir, exist_ok=True)
os_env = os.environ.copy()
os_env['LC_ALL'] = 'C'
final_args = cmake_args + trace_args + cmcmp_args + pload_args + [self.src_dir]
cmake_exe.set_exec_mode(print_cmout=True, always_capture_stderr=self.trace.requires_stderr())
rc, _, self.raw_trace = cmake_exe.call(final_args, self.build_dir, env=os_env, disable_cache=True)
mlog.log()
h = mlog.green('SUCCEEDED') if rc == 0 else mlog.red('FAILED')
mlog.log('CMake configuration:', h)
if rc != 0:
raise CMakeException('Failed to configure the CMake subproject')
def initialise(self, extra_cmake_options: T.List[str]) -> None:
# Run configure the old way because doing it
# with the server doesn't work for some reason
# Additionally, the File API requires a configure anyway
self.configure(extra_cmake_options)
# Continue with the file API If supported
if self.cmake_api is CMakeAPI.FILE:
# Parse the result
self.fileapi.load_reply()
# Load the buildsystem file list
cmake_files = self.fileapi.get_cmake_sources()
self.bs_files = [x.file for x in cmake_files if not x.is_cmake and not x.is_temp]
self.bs_files = [os.path.relpath(x, self.env.get_source_dir()) for x in self.bs_files]
self.bs_files = list(OrderedSet(self.bs_files))
# Load the codemodel configurations
self.codemodel_configs = self.fileapi.get_cmake_configurations()
return
with self.client.connect():
generator = backend_generator_map[self.backend_name]
self.client.do_handshake(self.src_dir, self.build_dir, generator, 1)
# Do a second configure to initialise the server
self.client.query_checked(RequestConfigure(), 'CMake server configure')
# Generate the build system files
self.client.query_checked(RequestCompute(), 'Generating build system files')
# Get CMake build system files
bs_reply = self.client.query_checked(RequestCMakeInputs(), 'Querying build system files')
# Now get the CMake code model
cm_reply = self.client.query_checked(RequestCodeModel(), 'Querying the CMake code model')
src_dir = bs_reply.src_dir
self.bs_files = [x.file for x in bs_reply.build_files if not x.is_cmake and not x.is_temp]
self.bs_files = [os.path.relpath(os.path.join(src_dir, x), self.env.get_source_dir()) for x in self.bs_files]
self.bs_files = list(OrderedSet(self.bs_files))
self.codemodel_configs = cm_reply.configs
def analyse(self) -> None:
if self.codemodel_configs is None:
raise CMakeException('CMakeInterpreter was not initialized')
# Clear analyser data
self.project_name = ''
self.languages = []
self.targets = []
self.custom_targets = []
# Parse the trace
self.trace.parse(self.raw_trace)
# Find all targets
added_target_names = [] # type: T.List[str]
for i in self.codemodel_configs:
for j in i.projects:
if not self.project_name:
self.project_name = j.name
for k in j.targets:
# Avoid duplicate targets from different configurations and known
# dummy CMake internal target types
if k.type not in skip_targets and k.name not in added_target_names:
added_target_names += [k.name]
self.targets += [ConverterTarget(k, self.env)]
# Add interface targets from trace, if not already present.
# This step is required because interface targets were removed from
# the CMake file API output.
api_target_name_list = [x.name for x in self.targets]
for i in self.trace.targets.values():
if i.type != 'INTERFACE' or i.name in api_target_name_list or i.imported:
continue
dummy = CMakeTarget({
'name': i.name,
'type': 'INTERFACE_LIBRARY',
'sourceDirectory': self.src_dir,
'buildDirectory': self.build_dir,
})
self.targets += [ConverterTarget(dummy, self.env)]
for i in self.trace.custom_targets:
self.custom_targets += [ConverterCustomTarget(i)]
# generate the output_target_map
for i in [*self.targets, *self.custom_targets]:
self.output_target_map.add(i)
# First pass: Basic target cleanup
object_libs = []
custom_target_outputs = [] # type: T.List[str]
for i in self.custom_targets:
i.postprocess(self.output_target_map, self.src_dir, self.subdir, custom_target_outputs)
for i in self.targets:
i.postprocess(self.output_target_map, self.src_dir, self.subdir, self.install_prefix, self.trace)
if i.type == 'OBJECT_LIBRARY':
object_libs += [i]
self.languages += [x for x in i.languages if x not in self.languages]
# Second pass: Detect object library dependencies
for i in self.targets:
i.process_object_libs(object_libs, self._object_lib_workaround())
# Third pass: Reassign dependencies to avoid some loops
for i in self.targets:
i.process_inter_target_dependencies()
for i in self.custom_targets:
i.process_inter_target_dependencies()
# Fourth pass: Remove rassigned dependencies
for i in self.targets:
i.cleanup_dependencies()
mlog.log('CMake project', mlog.bold(self.project_name), 'has', mlog.bold(str(len(self.targets) + len(self.custom_targets))), 'build targets.')
def pretend_to_be_meson(self, options: TargetOptions) -> CodeBlockNode:
if not self.project_name:
raise CMakeException('CMakeInterpreter was not analysed')
def token(tid: str = 'string', val='') -> Token:
return Token(tid, self.subdir, 0, 0, 0, None, val)
def string(value: str) -> StringNode:
return StringNode(token(val=value))
def id_node(value: str) -> IdNode:
return IdNode(token(val=value))
def number(value: int) -> NumberNode:
return NumberNode(token(val=value))
def nodeify(value):
if isinstance(value, str):
return string(value)
elif isinstance(value, bool):
return BooleanNode(token(val=value))
elif isinstance(value, int):
return number(value)
elif isinstance(value, list):
return array(value)
return value
def indexed(node: BaseNode, index: int) -> IndexNode:
return IndexNode(node, nodeify(index))
def array(elements) -> ArrayNode:
args = ArgumentNode(token())
if not isinstance(elements, list):
elements = [args]
args.arguments += [nodeify(x) for x in elements if x is not None]
return ArrayNode(args, 0, 0, 0, 0)
def function(name: str, args=None, kwargs=None) -> FunctionNode:
args = [] if args is None else args
kwargs = {} if kwargs is None else kwargs
args_n = ArgumentNode(token())
if not isinstance(args, list):
args = [args]
args_n.arguments = [nodeify(x) for x in args if x is not None]
args_n.kwargs = {id_node(k): nodeify(v) for k, v in kwargs.items() if v is not None}
func_n = FunctionNode(self.subdir, 0, 0, 0, 0, name, args_n)
return func_n
def method(obj: BaseNode, name: str, args=None, kwargs=None) -> MethodNode:
args = [] if args is None else args
kwargs = {} if kwargs is None else kwargs
args_n = ArgumentNode(token())
if not isinstance(args, list):
args = [args]
args_n.arguments = [nodeify(x) for x in args if x is not None]
args_n.kwargs = {id_node(k): nodeify(v) for k, v in kwargs.items() if v is not None}
return MethodNode(self.subdir, 0, 0, obj, name, args_n)
def assign(var_name: str, value: BaseNode) -> AssignmentNode:
return AssignmentNode(self.subdir, 0, 0, var_name, value)
# Generate the root code block and the project function call
root_cb = CodeBlockNode(token())
root_cb.lines += [function('project', [self.project_name] + self.languages)]
# Add the run script for custom commands
# Add the targets
processing = []
processed = {}
name_map = {}
def extract_tgt(tgt: T.Union[ConverterTarget, ConverterCustomTarget, CustomTargetReference]) -> IdNode:
tgt_name = None
if isinstance(tgt, (ConverterTarget, ConverterCustomTarget)):
tgt_name = tgt.name
elif isinstance(tgt, CustomTargetReference):
tgt_name = tgt.ctgt.name
assert(tgt_name is not None and tgt_name in processed)
res_var = processed[tgt_name]['tgt']
return id_node(res_var) if res_var else None
def detect_cycle(tgt: T.Union[ConverterTarget, ConverterCustomTarget]) -> None:
if tgt.name in processing:
raise CMakeException('Cycle in CMake inputs/dependencies detected')
processing.append(tgt.name)
def resolve_ctgt_ref(ref: CustomTargetReference) -> BaseNode:
tgt_var = extract_tgt(ref)
if len(ref.ctgt.outputs) == 1:
return tgt_var
else:
return indexed(tgt_var, ref.index)
def process_target(tgt: ConverterTarget):
detect_cycle(tgt)
# First handle inter target dependencies
link_with = []
objec_libs = [] # type: T.List[IdNode]
sources = []
generated = []
generated_filenames = []
custom_targets = []
dependencies = []
for i in tgt.link_with:
assert(isinstance(i, ConverterTarget))
if i.name not in processed:
process_target(i)
link_with += [extract_tgt(i)]
for i in tgt.object_libs:
assert(isinstance(i, ConverterTarget))
if i.name not in processed:
process_target(i)
objec_libs += [extract_tgt(i)]
for i in tgt.depends:
if not isinstance(i, ConverterCustomTarget):
continue
if i.name not in processed:
process_custom_target(i)
dependencies += [extract_tgt(i)]
# Generate the source list and handle generated sources
for i in tgt.sources + tgt.generated:
if isinstance(i, CustomTargetReference):
if i.ctgt.name not in processed:
process_custom_target(i.ctgt)
generated += [resolve_ctgt_ref(i)]
generated_filenames += [i.filename()]
if i.ctgt not in custom_targets:
custom_targets += [i.ctgt]
else:
sources += [i]
# Add all header files from all used custom targets. This
# ensures that all custom targets are built before any
# sources of the current target are compiled and thus all
# header files are present. This step is necessary because
# CMake always ensures that a custom target is executed
# before another target if at least one output is used.
for i in custom_targets:
for j in i.outputs:
if not is_header(j) or j in generated_filenames:
continue
generated += [resolve_ctgt_ref(i.get_ref(j))]
generated_filenames += [j]
# Determine the meson function to use for the build target
tgt_func = tgt.meson_func()
if not tgt_func:
raise CMakeException('Unknown target type "{}"'.format(tgt.type))
# Determine the variable names
inc_var = '{}_inc'.format(tgt.name)
dir_var = '{}_dir'.format(tgt.name)
sys_var = '{}_sys'.format(tgt.name)
src_var = '{}_src'.format(tgt.name)
dep_var = '{}_dep'.format(tgt.name)
tgt_var = tgt.name
install_tgt = options.get_install(tgt.cmake_name, tgt.install)
# Generate target kwargs
tgt_kwargs = {
'build_by_default': install_tgt,
'link_args': options.get_link_args(tgt.cmake_name, tgt.link_flags + tgt.link_libraries),
'link_with': link_with,
'include_directories': id_node(inc_var),
'install': install_tgt,
'override_options': options.get_override_options(tgt.cmake_name, tgt.override_options),
'objects': [method(x, 'extract_all_objects') for x in objec_libs],
}
# Only set if installed and only override if it is set
if install_tgt and tgt.install_dir:
tgt_kwargs['install_dir'] = tgt.install_dir
# Handle compiler args
for key, val in tgt.compile_opts.items():
tgt_kwargs['{}_args'.format(key)] = options.get_compile_args(tgt.cmake_name, key, val)
# Handle -fPCI, etc
if tgt_func == 'executable':
tgt_kwargs['pie'] = tgt.pie
elif tgt_func == 'static_library':
tgt_kwargs['pic'] = tgt.pie
# declare_dependency kwargs
dep_kwargs = {
'link_args': tgt.link_flags + tgt.link_libraries,
'link_with': id_node(tgt_var),
'compile_args': tgt.public_compile_opts,
'include_directories': id_node(inc_var),
}
if dependencies:
generated += dependencies
# Generate the function nodes
dir_node = assign(dir_var, function('include_directories', tgt.includes))
sys_node = assign(sys_var, function('include_directories', tgt.sys_includes, {'is_system': True}))
inc_node = assign(inc_var, array([id_node(dir_var), id_node(sys_var)]))
node_list = [dir_node, sys_node, inc_node]
if tgt_func == 'header_only':
del dep_kwargs['link_with']
dep_node = assign(dep_var, function('declare_dependency', kwargs=dep_kwargs))
node_list += [dep_node]
src_var = None
tgt_var = None
else:
src_node = assign(src_var, function('files', sources))
tgt_node = assign(tgt_var, function(tgt_func, [tgt_var, [id_node(src_var)] + generated], tgt_kwargs))
node_list += [src_node, tgt_node]
if tgt_func in ['static_library', 'shared_library']:
dep_node = assign(dep_var, function('declare_dependency', kwargs=dep_kwargs))
node_list += [dep_node]
else:
dep_var = None
# Add the nodes to the ast
root_cb.lines += node_list
processed[tgt.name] = {'inc': inc_var, 'src': src_var, 'dep': dep_var, 'tgt': tgt_var, 'func': tgt_func}
name_map[tgt.cmake_name] = tgt.name
def process_custom_target(tgt: ConverterCustomTarget) -> None:
# CMake allows to specify multiple commands in a custom target.
# To map this to meson, a helper script is used to execute all
# commands in order. This additionally allows setting the working
# directory.
detect_cycle(tgt)
tgt_var = tgt.name # type: str
def resolve_source(x: T.Any) -> T.Any:
if isinstance(x, ConverterTarget):
if x.name not in processed:
process_target(x)
return extract_tgt(x)
if isinstance(x, ConverterCustomTarget):
if x.name not in processed:
process_custom_target(x)
return extract_tgt(x)
elif isinstance(x, CustomTargetReference):
if x.ctgt.name not in processed:
process_custom_target(x.ctgt)
return resolve_ctgt_ref(x)
else:
return x
# Generate the command list
command = []
command += mesonlib.meson_command
command += ['--internal', 'cmake_run_ctgt']
command += ['-o', '@OUTPUT@']
if tgt.original_outputs:
command += ['-O'] + tgt.original_outputs
command += ['-d', tgt.working_dir]
# Generate the commands. Subcommands are separated by ';;;'
for cmd in tgt.command:
command += [resolve_source(x) for x in cmd] + [';;;']
tgt_kwargs = {
'input': [resolve_source(x) for x in tgt.inputs],
'output': tgt.outputs,
'command': command,
'depends': [resolve_source(x) for x in tgt.depends],
}
root_cb.lines += [assign(tgt_var, function('custom_target', [tgt.name], tgt_kwargs))]
processed[tgt.name] = {'inc': None, 'src': None, 'dep': None, 'tgt': tgt_var, 'func': 'custom_target'}
name_map[tgt.cmake_name] = tgt.name
# Now generate the target function calls
for i in self.custom_targets:
if i.name not in processed:
process_custom_target(i)
for i in self.targets:
if i.name not in processed:
process_target(i)
self.generated_targets = processed
self.internal_name_map = name_map
return root_cb
def target_info(self, target: str) -> T.Optional[T.Dict[str, str]]:
# Try resolving the target name
# start by checking if there is a 100% match (excluding the name prefix)
prx_tgt = _sanitize_cmake_name(target)
if prx_tgt in self.generated_targets:
return self.generated_targets[prx_tgt]
# check if there exists a name mapping
if target in self.internal_name_map:
target = self.internal_name_map[target]
assert(target in self.generated_targets)
return self.generated_targets[target]
return None
def target_list(self) -> T.List[str]:
return list(self.internal_name_map.keys())
def _object_lib_workaround(self) -> bool:
return 'link' in self.linkers and self.backend_name.startswith('vs')
|
# coding: utf-8
"""
CMS-related job helpers.
"""
__all__ = ["CMSJobDashboard"]
import time
import socket
import threading
import six
import law
from law.job.base import BaseJobManager
from law.job.dashboard import BaseJobDashboard
class CMSJobDashboard(BaseJobDashboard):
"""
This CMS job dashboard interface requires ``apmon`` to be installed on your system.
See http://monalisa.caltech.edu/monalisa__Documentation__ApMon_User_Guide__apmon_ug_py.html and
https://twiki.cern.ch/twiki/bin/view/ArdaGrid/CMSJobMonitoringCollector.
"""
PENDING = "pending"
RUNNING = "running"
CANCELLED = "cancelled"
POSTPROC = "postproc"
SUCCESS = "success"
FAILED = "failed"
tracking_url = "http://dashb-cms-job.cern.ch/dashboard/templates/task-analysis/#" + \
"table=Jobs&p=1&activemenu=2&refresh=60&tid={dashboard_task_id}"
persistent_attributes = ["task_id", "cms_user", "voms_user", "init_timestamp"]
def __init__(self, task, cms_user, voms_user, apmon_config=None, log_level="WARNING",
max_rate=20, task_type="analysis", site=None, executable="law", application=None,
application_version=None, submission_tool="law", submission_type="direct",
submission_ui=None, init_timestamp=None):
super(CMSJobDashboard, self).__init__(max_rate=max_rate)
# setup the apmon thread
try:
self.apmon = Apmon(apmon_config, self.max_rate, log_level)
except ImportError as e:
e.message += " (required for {})".format(self.__class__.__name__)
e.args = (e.message,) + e.args[1:]
raise e
# get the task family for use as default application name
task_family = task.get_task_family() if isinstance(task, law.Task) else task
# mandatory (persistent) attributes
self.task_id = task.task_id if isinstance(task, law.Task) else task
self.cms_user = cms_user
self.voms_user = voms_user
self.init_timestamp = init_timestamp or self.create_timestamp()
# optional attributes
self.task_type = task_type
self.site = site
self.executable = executable
self.application = application or task_family
self.application_version = application_version or self.task_id.rsplit("_", 1)[1]
self.submission_tool = submission_tool
self.submission_type = submission_type
self.submission_ui = submission_ui or socket.gethostname()
# start the apmon thread
self.apmon.daemon = True
self.apmon.start()
def __del__(self):
if getattr(self, "apmon", None) and self.apmon.is_alive():
self.apmon.stop()
self.apmon.join()
@classmethod
def create_timestamp(cls):
return time.strftime("%y%m%d_%H%M%S")
@classmethod
def create_dashboard_task_id(cls, task_id, cms_user, timestamp=None):
if not timestamp:
timestamp = cls.create_timestamp()
return "{}:{}_{}".format(timestamp, cms_user, task_id)
@classmethod
def create_dashboard_job_id(cls, job_num, job_id, attempt=0):
return "{}_{}_{}".format(job_num, job_id, attempt)
@classmethod
def params_from_status(cls, dashboard_status, fail_code=1):
if dashboard_status == cls.PENDING:
return {"StatusValue": "pending", "SyncCE": None}
elif dashboard_status == cls.RUNNING:
return {"StatusValue": "running"}
elif dashboard_status == cls.CANCELLED:
return {"StatusValue": "cancelled", "SyncCE": None}
elif dashboard_status == cls.POSTPROC:
return {"StatusValue": "running", "JobExitCode": 0}
elif dashboard_status == cls.SUCCESS:
return {"StatusValue": "success", "JobExitCode": 0}
elif dashboard_status == cls.FAILED:
return {"StatusValue": "failed", "JobExitCode": fail_code}
else:
raise ValueError("invalid dashboard status '{}'".format(dashboard_status))
@classmethod
def map_status(cls, job_status, event):
# when starting with "status.", event must end with the job status
if event.startswith("status.") and event.split(".", 1)[-1] != job_status:
raise ValueError("event '{}' does not match job status '{}'".format(event, job_status))
status = lambda attr: "status.{}".format(getattr(BaseJobManager, attr))
return {
"action.submit": cls.PENDING,
"action.cancel": cls.CANCELLED,
"custom.running": cls.RUNNING,
"custom.postproc": cls.POSTPROC,
"custom.failed": cls.FAILED,
status("FINISHED"): cls.SUCCESS,
}.get(event)
def remote_hook_file(self):
return law.util.rel_path(__file__, "scripts", "cmsdashb_hooks.sh")
def remote_hook_data(self, job_num, attempt):
data = [
"task_id='{}'".format(self.task_id),
"cms_user='{}'".format(self.cms_user),
"voms_user='{}'".format(self.voms_user),
"init_timestamp='{}'".format(self.init_timestamp),
"job_num={}".format(job_num),
"attempt={}".format(attempt),
]
if self.site:
data.append("site='{}'".format(self.site))
return data
def create_tracking_url(self):
dashboard_task_id = self.create_dashboard_task_id(self.task_id, self.cms_user,
self.init_timestamp)
return self.tracking_url.format(dashboard_task_id=dashboard_task_id)
def create_message(self, job_data, event, job_num, attempt=0, custom_params=None, **kwargs):
# we need the voms user, which must start with "/CN="
voms_user = self.voms_user
if not voms_user:
return
if not voms_user.startswith("/CN="):
voms_user = "/CN=" + voms_user
# map to job status to a valid dashboard status
dashboard_status = self.map_status(job_data.get("status"), event)
if not dashboard_status:
return
# build the dashboard task id
dashboard_task_id = self.create_dashboard_task_id(self.task_id, self.cms_user,
self.init_timestamp)
# build the id of the particular job
dashboard_job_id = self.create_dashboard_job_id(job_num, job_data["job_id"],
attempt=attempt)
# build the parameters to send
params = {
"TaskId": dashboard_task_id,
"JobId": dashboard_job_id,
"GridJobId": job_data["job_id"],
"CMSUser": self.cms_user,
"GridName": voms_user,
"JSToolUI": kwargs.get("submission_ui", self.submission_ui),
}
# add optional params
params.update({
"TaskType": kwargs.get("task_type", self.task_type),
"SyncCE": kwargs.get("site", self.site),
"Executable": kwargs.get("executable", self.executable),
"Application": kwargs.get("application", self.application),
"ApplicationVersion": kwargs.get("application_version", self.application_version),
"JSTool": kwargs.get("submission_tool", self.submission_tool),
"SubmissionType": kwargs.get("submission_type", self.submission_type),
})
# add status params
params.update(self.params_from_status(dashboard_status, fail_code=job_data.get("code", 1)))
# add custom params
if custom_params:
params.update(custom_params)
# finally filter None's and convert everything to strings
params = {key: str(value) for key, value in six.iteritems(params) if value is not None}
return (dashboard_task_id, dashboard_job_id, params)
@BaseJobDashboard.cache_by_status
def publish(self, *args, **kwargs):
message = self.create_message(*args, **kwargs)
if message:
self.apmon.send(*message)
apmon_lock = threading.Lock()
class Apmon(threading.Thread):
default_config = {
"cms-jobmon.cern.ch:8884": {
"sys_monitoring": 0,
"general_info": 0,
"job_monitoring": 0,
},
}
def __init__(self, config=None, max_rate=20, log_level="INFO"):
super(Apmon, self).__init__()
import apmon
log_level = getattr(apmon.Logger, log_level.upper())
self._apmon = apmon.ApMon(config or self.default_config, log_level)
self._apmon.maxMsgRate = int(max_rate * 1.5)
# hotfix of a bug occurring in apmon for too large pids
for key, value in self._apmon.senderRef.items():
value["INSTANCE_ID"] = value["INSTANCE_ID"] & 0x7fffffff
self._max_rate = max_rate
self._queue = six.moves.queue.Queue()
self._stop_event = threading.Event()
def send(self, *args, **kwargs):
self._queue.put((args, kwargs))
def _send(self, *args, **kwargs):
self._apmon.sendParameters(*args, **kwargs)
def stop(self):
self._stop_event.set()
def run(self):
while True:
# handling stopping
self._stop_event.wait(0.5)
if self._stop_event.is_set():
break
if self._queue.empty():
continue
with apmon_lock:
while not self._queue.empty():
args, kwargs = self._queue.get()
self._send(*args, **kwargs)
time.sleep(1. / self._max_rate)
|
# -*- coding: utf-8 -*-
"""Python API for Payson paymnents provider
Copyright (c) 2012 Tomas Walch
MIT-License, see LICENSE for details
"""
import datetime
import decimal
import logging
import json
import urllib
import urllib2
import urlparse
PAYSON_API_ENDPOINT = "https://api.payson.se"
PAYSON_TEST_API_ENDPOINT = "https://test-api.payson.se"
PAYSON_API_VERSION = "1.0"
PAYSON_API_PAY_ACTION = "Pay/"
PAYSON_API_PAYMENT_DETAILS_ACTION = "PaymentDetails/"
PAYSON_API_PAYMENT_UPDATE_ACTION = "PaymentUpdate/"
PAYSON_API_VALIDATE_ACTION = "Validate/"
PAYSON_WWW_PAY_FORWARD_URL = 'https://www.payson.se/paysecure/?token=%s'
PAYSON_WWW_PAY_FORWARD_TEST_URL = \
'https://test-www.payson.se/paysecure/?token=%s'
PAYSON_TEST_AGENT_ID = ('1', '4')
PAYSON_TEST_AGENT_KEY = ('fddb19ac-7470-42b6-a91d-072cb1495f0a',
'2acab30d-fe50-426f-90d7-8c60a7eb31d4')
log = logging.getLogger('Payson API')
class PaysonApi():
def __init__(self, user_id, user_key):
"""Constructor
:param user_id: Agent ID obtained from Payson
:type user_id: str
:param user_key: Password (MD5 Key) obtained from Payson
:type user_key: str
"""
if (user_id in PAYSON_TEST_AGENT_ID and
user_key in PAYSON_TEST_AGENT_KEY):
endpoint = PAYSON_TEST_API_ENDPOINT
self.forward_pay_url = PAYSON_WWW_PAY_FORWARD_TEST_URL
else:
endpoint = PAYSON_API_ENDPOINT
self.forward_pay_url = PAYSON_WWW_PAY_FORWARD_URL
self.user_id = user_id
self.user_key = user_key
def mkcmd(cmd):
return '/'.join((endpoint, PAYSON_API_VERSION, cmd))
self.pay_cmd = mkcmd(PAYSON_API_PAY_ACTION)
self.get_payment_details_cmd = mkcmd(PAYSON_API_PAYMENT_DETAILS_ACTION)
self.update_payment_details_cmd = \
mkcmd(PAYSON_API_PAYMENT_UPDATE_ACTION)
self.validate_ipn_cmd = mkcmd(PAYSON_API_VALIDATE_ACTION)
self.send_ipn_cmd = mkcmd('SendIPN/')
def pay(self,
returnUrl,
cancelUrl,
memo,
senderEmail,
senderFirstName,
senderLastName,
receiverList,
ipnNotificationUrl=None,
localeCode=None,
currencyCode=None,
fundingList=tuple(),
feesPayer=None,
invoiceFee=None,
custom=None,
trackingId=None,
guaranteeOffered=None,
orderItemList=tuple(),
showReceiptPage=True):
"""The starting point for any kind of payment.
For a longer description, including possible parameter values and
constraints, see https://api.payson.se/#Initializepayment
:type returnUrl: unicode
:type cancelUrl: unicode
:type memo: unicode
:type senderEmail: unicode
:type senderFirstName: unicode
:type senderLastName: unicode
:type receiverList: iterable of Receiver instances
:type ipnNotificationUrl: unicode
:type localeCode: unicode
:type currencyCode: unicode
:type fundingList: iterable with unicode instances
:type feesPayer: unicode
:type invoiceFee: decimal.Decimal
:type custom: any json serializable Python object
:type trackingId: unicode or int
:type guaranteeOffered: unicode
:type orderItemList: iterable of OrderItem instances
:type showReceiptPage: bool
:rtype: PayResponse
"""
pay_request = {'returnUrl': returnUrl,
'cancelUrl': cancelUrl,
'memo': memo.encode('utf-8'),
'senderEmail': senderEmail.encode('utf-8'),
'senderFirstName': senderFirstName.encode('utf-8'),
'senderLastName': senderLastName.encode('utf-8')}
for i, v in enumerate(receiverList):
k = 'receiverList.receiver(%d).%s'
pay_request[k % (i, 'email')] = v.email.encode('utf-8')
pay_request[k % (i, 'amount')] = str(v.amount)
if v.primary is not None:
pay_request[k % (i, 'primary')] = json.dumps(v.primary)
if v.firstName:
pay_request[k % (i, 'firstName')] = v.firstName.encode('utf-8')
if v.lastName:
pay_request[k % (i, 'lastName')] = v.lastName.encode('utf-8')
if ipnNotificationUrl:
pay_request['ipnNotificationUrl'] = ipnNotificationUrl
if localeCode:
pay_request['localeCode'] = localeCode
if currencyCode:
pay_request['currencyCode'] = currencyCode
for i, v in enumerate(fundingList):
pay_request['fundingList.fundingConstraint'
'(%d).constraint' % i] = v
if feesPayer:
pay_request['feesPayer'] = feesPayer
if invoiceFee is not None:
pay_request['invoiceFee'] = str(invoiceFee)
if custom is not None:
pay_request['custom'] = json.dumps(custom)
if trackingId is not None:
pay_request['trackingId'] = trackingId.encode('utf-8')
if guaranteeOffered:
pay_request['guaranteeOffered'] = guaranteeOffered
for i, v in enumerate(orderItemList):
k = 'orderItemList.orderItem(%d).%s'
pay_request[k % (i, 'description')] = v.description.encode('utf-8')
pay_request[k % (i, 'sku')] = str(v.sku)
pay_request[k % (i, 'quantity')] = str(v.quantity)
pay_request[k % (i, 'unitPrice')] = str(v.unitPrice)
pay_request[k % (i, 'taxPercentage')] = str(v.taxPercentage)
if showReceiptPage is False:
pay_request['showReceiptPage'] = json.dumps(showReceiptPage)
response_dict = self._do_request(self.pay_cmd, pay_request)
pay_response = PayResponse(self.forward_pay_url, response_dict)
log.info('PAYSON: %s response: %r' % (self.pay_cmd, response_dict))
return pay_response
def payment_details(self, token):
"""Get details about an existing payment.
For a longer description, including possible parameter values, see
https://api.payson.se/#PaymentDetailsrequest
:type token: unicode
:rtype: PaymentDetailsResponse
"""
response_dict = self._do_request(
self.get_payment_details_cmd,
{'token': token})
payment_details_response = PaymentDetailsResponse(response_dict)
log.info('PAYSON: %s response: %r' % (self.get_payment_details_cmd,
response_dict))
return payment_details_response
def payment_update(self, token, action):
"""Update an existing payment, for instance mark an order as shipped or canceled.
For a longer description, including possible parameter values, see
https://api.payson.se/#PaymentUpdaterequest
:type token: unicode
:type action: unicode
:rtype: ResponseEnvelope
"""
response_dict = self._do_request(
self.update_payment_details_cmd,
{'token': token,
'action': action})
response = ResponseEnvelope(response_dict)
log.info('PAYSON: %s response: %r' % (self.update_payment_details_cmd,
response_dict))
return response.ack == 'SUCCESS'
def validate(self, message):
"""This method is used to validate the content of the IPN message that was sent to your ipnNotificationUrl.
For a longer description, including possible parameter values, see
https://api.payson.se/#Validaterequest
:param message: complete unaltered query string from the IPN request
:type message: str
:returns: True if IPN is verified, otherwise False
:rtype: bool
"""
response = self._send_request(self.validate_ipn_cmd, message)
log.info('PAYSON: %s response: %r' % (self.validate_ipn_cmd,
response))
if response == 'VERIFIED':
return True
elif response == 'INVALID':
return False
else:
raise ValueError('Invalid response for IPN validation.')
def _do_request(self, cmd, data):
query = urllib.urlencode(data)
response_body = self._send_request(cmd, query)
data = urlparse.parse_qs(response_body)
return {k: v[0] for k, v in data.items()}
def _send_request(self, cmd, query):
request = urllib2.Request(cmd, query)
request.add_header('Content-Type', 'application/x-www-form-urlencoded')
request.add_header('PAYSON-SECURITY-USERID', self.user_id)
request.add_header('PAYSON-SECURITY-PASSWORD', self.user_key)
log.info('PAYSON: Calling %s with %r' % (cmd, query))
try:
response = urllib2.urlopen(request)
except urllib2.URLError, e:
log.error('Exception when calling {0}: {1}'.format(cmd, e))
raise
return response.read()
class OrderItem(object):
"""Holds Order Item values used in pay operation.
"""
def __init__(self,
description,
sku,
quantity,
unitPrice,
taxPercentage):
"""Constructor.
Payson API documentation states that some of these values are optional,
this is incorrect, all must be provided.
For possible parameter values and constraints see
https://api.payson.se/#Initializepayment
:param description: Description of this item.
:type description: unicode
:param sku: SKU of this item.
:type sku: unicode or int
:param quantity: Quantity of this item.
:type quantity: decimal.Decimal
:param unitPrice: The unit price of this item not including VAT.
:type unitPrice: decimal.Decimal
:param taxPercentage: Tax percentage for this item.
:type taxPercentage: decimal.Decimal
"""
self.description = description
self.sku = sku
self.quantity = quantity
self.unitPrice = unitPrice
self.taxPercentage = taxPercentage
class Receiver(object):
"""Holds receiver data.
Used both in pay request and in payment details objects.
"""
def __init__(self,
email,
amount,
primary=None,
firstName=None,
lastName=None):
self.email = email
self.amount = decimal.Decimal(amount)
self.primary = primary
self.firstName = firstName
self.lastName = lastName
@classmethod
def from_response_data(cls, data):
receivers = []
i = 0
while 'receiverList.receiver(%d).email' % i in data:
primary = data.get('receiverList.receiver(%d).primary' % i)
primary = json.loads(primary.lower()) if primary else None
receivers.append(
cls(data['receiverList.receiver(%d).email' % i],
data['receiverList.receiver(%d).amount' % i],
primary)
)
i += 1
return receivers
class Error(object):
def __init__(self, errorId, message, parameter=None):
self.errorId = int(errorId)
self.message = message
self.parameter = parameter
@classmethod
def from_response_dict(cls, data):
errors = []
i = 0
while 'errorList.error(%d).errorId' % i in data:
errors.append(
cls(data['errorList.error(%d).errorId' % i],
data['errorList.error(%d).message' % i],
data.get('errorList.error(%d).parameter' % i))
)
i += 1
return errors
class ResponseEnvelope(object):
def __init__(self, data):
self.ack = data['responseEnvelope.ack']
self.timestamp = datetime.datetime.strptime(
data['responseEnvelope.timestamp'], '%Y-%m-%dT%H:%M:%S')
self.correlationId = data['responseEnvelope.correlationId']
self.errorList = Error.from_response_dict(data)
@property
def success(self):
"""True if request succeeded."""
return self.ack == 'SUCCESS'
class PayResponse(object):
"""Holds the returned values from the pay operation.
"""
def __init__(self, forward_pay_url, data):
self.responseEnvelope = ResponseEnvelope(data)
self.token = data.get('TOKEN', '')
self.forward_pay_url = forward_pay_url % self.token if self.token \
else ''
@property
def success(self):
"""True if request (not payment) succeeded."""
return self.responseEnvelope.success
class ShippingAddress(object):
"""Invoice shipping address info.
"""
def __init__(self, data):
self.name = data['shippingAddress.name'].decode('utf-8')
self.streetAddress = data['shippingAddress.streetAddress'].decode('utf-8')
self.postalCode = data['shippingAddress.postalCode'].decode('utf-8')
self.city = data['shippingAddress.city'].decode('utf-8')
self.country = data['shippingAddress.country'].decode('utf-8')
class PaymentDetails(object):
"""Holds the returned values from the payment_details and IPN callback operations.
See https://api.payson.se/#PaymentDetailsrequest for a description of
attributes.
"""
def __init__(self, data):
self.purchaseId = data.get('purchaseId', '')
self.token = data.get('token')
self.senderEmail = data.get('senderEmail', '')
self.status = data['status']
self.type = data['type']
self.guaranteeStatus = data.get('guaranteeStatus')
self.guaranteeDeadlineTimestamp = datetime.datetime.strptime(
data['guaranteeDeadlineTimestamp'], '%Y-%m-%dT%H:%M:%S') \
if 'guaranteeDeadlineTimestamp' in data else None
self.invoiceStatus = data.get('invoiceStatus')
custom = data.get('custom')
self.custom = custom and json.loads(custom)
self.trackingId = data.get('trackingId', '').decode('utf-8')
self.currencyCode = data['currencyCode']
self.receiverFee = decimal.Decimal(data.get('receiverFee', '0'))
self.receiverList = Receiver.from_response_data(data)
if 'shippingAddress.name' in data:
self.shippingAddress = ShippingAddress(data)
self.post_data = data.copy()
@property
def amount(self):
return sum(receiver.amount for receiver in self.receiverList)
class PaymentDetailsResponse(PaymentDetails):
"""Returned from payment_details.
This class contains PaymentDetails with a ResponseEnvelope.
"""
def __init__(self, data):
super(PaymentDetailsResponse, self).__init__(data)
self.responseEnvelope = ResponseEnvelope(data)
@property
def success(self):
"""True if request succeeded."""
return self.responseEnvelope.success
|
# Copyright 2011 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Logic for evaluations."""
def evaluationRowAdder(evals):
"""Add rows for each evaluation for each entity that is fetched.
Args:
evals: a dictionary containing evaluations.
Returns:
adder function that can be used to add rows to a list of evaluations.
"""
def adder(content_response, entity, *args):
# get the last failed evaluation for the project so that an
# entry for survey record need not be calculated after this
# evaluation
failed_eval = None
if entity.failed_evaluations:
failed_grading_record = entity.failed_evaluations[-1]
fgr_ent = args[0].get(failed_grading_record)
failed_eval = fgr_ent.grading_survey_group.grading_survey
# since evals is an object of type Django's SortedDict
# we can be sure that the evaluations are iterated in the order
for eval_link_id, evaluation in evals.items():
content_response.addRow(entity, eval_link_id, *args)
if (failed_eval and
failed_eval.key().id_or_name() == evaluation.key().id_or_name()):
break
return adder
|
import time
from datetime import date
import numpy
from PIL import Image
import zbar
import os,sys
import wx # GUI
# Handle time lapse!
scanner = zbar.ImageScanner()
# configure the reader
scanner.parse_config('enable')
#scanner.set_config(0, zbar.Config.ENABLE, 0)
#scanner.set_config(zbar.Symbol.QRCODE, zbar.Config.ENABLE, 1)
label = ""
# TODO
# Read label better (crop enhance contrast etc...)
# copy files
# record previous file
def readQRCODE(ImageFile):
label = ""
pilImage = Image.open(ImageFile)
width, height = pilImage.size
pilImage = pilImage.crop((int(0.18*width), int(0.2*height),int(0.97*width), int(0.95*height)))
pilImage = pilImage.convert('L')
width, height = pilImage.size
raw = pilImage.tostring()
# wrap image data
image = zbar.Image(width, height, 'Y800', raw)
# scan the image for barcodes
scanner.scan(image)
# extract results
for symbol in image:
label = symbol.data
# clean up
del(image)
return label
class MainWindow(wx.Frame):
def __init__(self, parent, title):
wx.Frame.__init__(self, parent, title=title, size=(400,300))
self.control = wx.TextCtrl(self, style=wx.TE_MULTILINE | wx.TE_READONLY)
self.CreateStatusBar() # A Statusbar in the bottom of the window
# FOlders
self.dest_folder = os.path.dirname(sys.argv[0])
self.root_folder = os.path.dirname(sys.argv[0])
# Setting up the menu.
filemenu= wx.Menu()
# wx.ID_ABOUT and wx.ID_EXIT are standard IDs provided by wxWidgets.
filemenu.Append(1, "&Base Folders"," Set folders")
filemenu.Append(2, "&Run"," scan for files")
filemenu.Append(wx.ID_ABOUT, "&About"," Information about this program")
filemenu.AppendSeparator()
filemenu.Append(wx.ID_EXIT,"E&xit"," Terminate the program")
# xxxxxxx
self.Bind(wx.EVT_MENU, self.get_folder, id=1)
self.Bind(wx.EVT_MENU, self.scan_data, id=2)
# Creating the menubar.
menuBar = wx.MenuBar()
menuBar.Append(filemenu,"&File") # Adding the "filemenu" to the MenuBar
self.SetMenuBar(menuBar) # Adding the MenuBar to the Frame content.
self.Show(True)
def get_folder(self, id):
dlg = wx.DirDialog(self, "Choose Root Folder:")
if dlg.ShowModal() == wx.ID_OK:
self.root_folder = dlg.GetPath()
dlg.Destroy()
def scan_data(self, id):
#################################################################
# Find all suitable files in the current folder
#################################################################
dir = self.root_folder#os.path.dirname(sys.argv[0])
sys.path.append(dir)
for f in os.listdir(dir):
file, ext = os.path.splitext(f) # Handles no-extension files, etc.
if ext == '.JPG':
base_row = file.split("-")
base = base_row[0]
if len(base_row) == 1:
if os.path.isfile(dir+ "\\"+ file + "-QR-.jpg"):
genotype = readQRCODE(dir+ "\\"+ file + "-QR-.jpg")
# image properties
file_tmp1 = file.split('_')
file_id = file_tmp1[1]
#os.path.getmtime(dir+ "\\"+ file +ext)
# Image identifiers
identifyer = [None,None,None]
if len(genotype) > 5:
text = "Root directory: " + dir + "\n"
text += "File: " + file + "\n"
text += "Genotype: " + genotype
self.control.SetValue(text)
wx.Yield()
identifyer = genotype.split('_')
else:
pilImage = Image.open(dir+ "\\"+ file + "-QR-.jpg")
width, height = pilImage.size
pilImage = pilImage.crop((int(0.18*width), int(0.3*height),int(0.97*width), int(0.92*height)))
width, height = pilImage.size
sc = 0.6
pilImage = pilImage.resize((int(width*sc),int(height*sc)), Image.ANTIALIAS)
img = wx.EmptyImage( *pilImage.size )
pilImageCopy = pilImage.copy()
pilImageCopyRGB = pilImageCopy.convert( 'RGB' ) # Discard any alpha from the PIL image.
pilImageRgbData =pilImageCopyRGB.tostring()
img.SetData( pilImageRgbData )
identifyer_length = 0
while identifyer_length>-1:# !=3:
dlg = wx.TextEntryDialog(self, 'Type "Species Population Id" with space as separation', 'Could not read bar code', '')
dlg.SetValue("")
self.pnl = MyFrame(dlg, -1, "Label not read", size=(int(width*sc),int(height*sc)), pos = (800,100), style = wx.DEFAULT_FRAME_STYLE, pic = img)
self.pnl.Show(True)
if dlg.ShowModal() == wx.ID_OK:
txtvalue = dlg.GetValue() #genotype.split('_')
identifyer = txtvalue.split(' ')
identifyer_length = len(identifyer)
dlg.Destroy()
else:
text = "!!! Could not recover barcode for !!! :\n\n"
text += "Root directory: " + dir + "\n"
text += "File: " + file + "\n"
self.control.SetValue(text)
wx.Yield()
class MyFrame(wx.Frame):
def __init__(
self, parent, ID, title, pos=wx.DefaultPosition,
size=wx.DefaultSize, style=wx.DEFAULT_FRAME_STYLE, pic = None
):
wx.Frame.__init__(self, parent, ID, title, pos, size, style)
panel = wx.Panel(self, -1)
wx.StaticBitmap(panel, -1, pic.ConvertToBitmap(), (0, 0))
def OnCloseMe(self, event):
self.Close(True)
def OnCloseWindow(self, event):
self.Destroy()
app = wx.App(False)
frame = MainWindow(None, "Sample editor")
app.MainLoop()
|
# Copyright 2014-2016 Insight Software Consortium.
# Copyright 2004-2008 Roman Yakovenko.
# Distributed under the Boost Software License, Version 1.0.
# See http://www.boost.org/LICENSE_1_0.txt
"""
defines classes, that describes C++ classes
This modules contains definition for next C++ declarations:
- class definition
- class declaration
- small helper class for describing C++ class hierarchy
"""
import warnings
from . import scopedef
from . import declaration_utils
from . import declaration
from . import templates
from . import cpptypes
from .. import utils
class ACCESS_TYPES(object):
"""class that defines "access" constants"""
PUBLIC = "public"
PRIVATE = "private"
PROTECTED = "protected"
ALL = [PUBLIC, PRIVATE, PROTECTED]
class CLASS_TYPES(object):
"""class that defines "class" type constants"""
CLASS = "class"
STRUCT = "struct"
UNION = "union"
ALL = [CLASS, STRUCT, UNION]
def get_partial_name(name):
from . import container_traits # prevent cyclic dependencies
ct = container_traits.find_container_traits(name)
if ct:
return ct.remove_defaults(name)
elif templates.is_instantiation(name):
tmpl_name, args = templates.split(name)
for i, arg_name in enumerate(args):
args[i] = get_partial_name(arg_name.strip())
return templates.join(tmpl_name, args)
else:
return name
class hierarchy_info_t(object):
"""describes class relationship"""
def __init__(self, related_class=None, access=None, is_virtual=False):
"""creates class that contains partial information about class
relationship"""
if related_class:
assert(isinstance(related_class, class_t))
self._related_class = related_class
if access:
assert(access in ACCESS_TYPES.ALL)
self._access = access
self._is_virtual = is_virtual
self._declaration_path = None
self._declaration_path_hash = None
def __eq__(self, other):
if not isinstance(other, hierarchy_info_t):
return False
return (self.declaration_path_hash ==
other.declaration_path_hash) \
and self._declaration_path == other._declaration_path \
and self._access == other._access \
and self._is_virtual == other._is_virtual
def __hash__(self):
return self.declaration_path_hash
def __ne__(self, other):
return not self.__eq__(other)
def __lt__(self, other):
if not isinstance(other, self.__class__):
return self.__class__.__name__ < other.__class__.__name__
return (self.declaration_path, self.access, self.is_virtual) < \
(other.declaration_path, other.access, other.is_virtual)
@property
def related_class(self):
"""reference to base or derived :class:`class <class_t>`"""
return self._related_class
@related_class.setter
def related_class(self, new_related_class):
if new_related_class:
assert(isinstance(new_related_class, class_t))
self._related_class = new_related_class
self._declaration_path = None
self._declaration_path_hash = None
@property
def access(self):
return self._access
@access.setter
def access(self, new_access):
assert(new_access in ACCESS_TYPES.ALL)
self._access = new_access
# TODO: Why is there an access_type / access which are the same ?
@property
def access_type(self):
"""describes :class:`hierarchy type <ACCESS_TYPES>`"""
return self.access
@access_type.setter
def access_type(self, new_access_type):
self.access = new_access_type
# TODO: check whether GCC XML support this and if so parser this
# information
@property
def is_virtual(self):
"""indicates whether the inheritance is virtual or not"""
return self._is_virtual
@is_virtual.setter
def is_virtual(self, new_is_virtual):
self._is_virtual = new_is_virtual
@property
def declaration_path(self):
if self._declaration_path is None:
self._declaration_path = declaration_utils.declaration_path(
self.related_class)
return self._declaration_path
@property
def declaration_path_hash(self):
if self._declaration_path_hash is None:
self._declaration_path_hash = hash(tuple(self.declaration_path))
return self._declaration_path_hash
class class_declaration_t(declaration.declaration_t):
"""describes class declaration"""
def __init__(self, name=''):
"""creates class that describes C++ class declaration
( and not definition )"""
declaration.declaration_t.__init__(self, name)
self._aliases = []
self._container_traits = None # Deprecated
self._container_traits_set = False # Deprecated
self._container_traits_cache = None
def _get__cmp__items(self):
"""implementation details"""
return []
def i_depend_on_them(self, recursive=True):
return []
@property
def aliases(self):
"""List of :class:`aliases <typedef_t>` to this instance"""
return self._aliases
@aliases.setter
def aliases(self, new_aliases):
self._aliases = new_aliases
@property
def container_traits(self):
"""reference to :class:`container_traits_impl_t` or None"""
# Deprecated since 1.8.0. Will be removed in 1.9.0
warnings.warn(
"The container_traits attribute is deprecated. \n" +
"Please use the find_container_traits function from the"
"declarations module instead.",
DeprecationWarning)
if self._container_traits_set is False:
from . import container_traits # prevent cyclic dependencies
self._container_traits_set = True
self._container_traits = container_traits.find_container_traits(
self)
self._container_traits_cache = self._container_traits
return self._container_traits
def _get_partial_name_impl(self):
return get_partial_name(self.name)
class class_t(scopedef.scopedef_t):
"""describes class definition"""
# Can be set from outside
USE_DEMANGLED_AS_NAME = True
def __init__(
self,
name='',
class_type=CLASS_TYPES.CLASS,
is_abstract=False):
"""creates class that describes C++ class definition"""
scopedef.scopedef_t.__init__(self, name)
if class_type:
assert(class_type in CLASS_TYPES.ALL)
self._class_type = class_type
self._bases = []
self._derived = []
self._is_abstract = is_abstract
self._public_members = []
self._private_members = []
self._protected_members = []
self._aliases = []
self._byte_size = 0
self._byte_align = 0
self._container_traits_cache = None
self._container_traits = None # Deprecated
self._container_traits_set = False # Deprecated
self._recursive_bases = None
self._recursive_derived = None
self._use_demangled_as_name = False
@property
def use_demangled_as_name(self):
if "GCC" in utils.xml_generator:
return class_t.USE_DEMANGLED_AS_NAME
elif "CastXML" in utils.xml_generator:
return False
@use_demangled_as_name.setter
def use_demangled_as_name(self, use_demangled_as_name):
self._use_demangled_as_name = use_demangled_as_name
def _get_name_impl(self):
if not self._name: # class with empty name
return self._name
elif self.use_demangled_as_name and self.demangled:
if not self.cache.demangled_name:
fname = declaration_utils.full_name(self.parent)
if fname.startswith('::') and \
not self.demangled.startswith('::'):
fname = fname[2:]
if self.demangled.startswith(fname):
tmp = self.demangled[len(fname):] # demangled::name
if tmp.startswith('::'):
tmp = tmp[2:]
if '<' not in tmp and '<' in self._name:
# we have template class, but for some reason demangled
# name doesn't contain any template
# This happens for std::string class, but this breaks
# other cases, because this behaviour is not consistent
self.cache.demangled_name = self._name
return self.cache.demangled_name
else:
self.cache.demangled_name = tmp
return tmp
else:
self.cache.demangled_name = self._name
return self._name
else:
return self.cache.demangled_name
else:
return self._name
def __str__(self):
name = declaration_utils.full_name(self)
if name[:2] == "::":
name = name[2:]
return "%s [%s]" % (name, self.class_type)
def _get__cmp__scope_items(self):
"""implementation details"""
return [
self.class_type,
[declaration_utils.declaration_path(base.related_class) for
base in self.bases].sort(),
[declaration_utils.declaration_path(derive.related_class) for
derive in self.derived].sort(),
self.is_abstract,
self.public_members.sort(),
self.private_members.sort(),
self.protected_members.sort()]
def __eq__(self, other):
if not scopedef.scopedef_t.__eq__(self, other):
return False
return self.class_type == other.class_type \
and [declaration_utils.declaration_path(base.related_class) for
base in self.bases].sort() \
== [declaration_utils.declaration_path(base.related_class) for
base in other.bases].sort() \
and [declaration_utils.declaration_path(derive.related_class) for
derive in self.derived].sort() \
== [declaration_utils.declaration_path(derive.related_class) for
derive in other.derived].sort() \
and self.is_abstract == other.is_abstract \
and self.public_members.sort() \
== other.public_members.sort() \
and self.private_members.sort() \
== other.private_members.sort() \
and self.protected_members.sort() \
== other.protected_members.sort()
def __hash__(self):
return hash(self.class_type)
@property
def class_type(self):
"""describes class :class:`type <CLASS_TYPES>`"""
return self._class_type
@class_type.setter
def class_type(self, new_class_type):
if new_class_type:
assert(new_class_type in CLASS_TYPES.ALL)
self._class_type = new_class_type
@property
def bases(self):
"""list of :class:`base classes <hierarchy_info_t>`"""
return self._bases
@bases.setter
def bases(self, new_bases):
self._bases = new_bases
@property
def recursive_bases(self):
"""list of all :class:`base classes <hierarchy_info_t>`"""
if self._recursive_bases is None:
to_go = self.bases[:]
all_bases = []
while to_go:
base = to_go.pop()
if base not in all_bases:
all_bases.append(base)
to_go.extend(base.related_class.bases)
self._recursive_bases = all_bases
return self._recursive_bases
@property
def derived(self):
"""list of :class:`derived classes <hierarchy_info_t>`"""
return self._derived
@derived.setter
def derived(self, new_derived):
self._derived = new_derived
@property
def recursive_derived(self):
"""list of all :class:`derive classes <hierarchy_info_t>`"""
if self._recursive_derived is None:
to_go = self.derived[:]
all_derived = []
while to_go:
derive = to_go.pop()
if derive not in all_derived:
all_derived.append(derive)
to_go.extend(derive.related_class.derived)
self._recursive_derived = all_derived
return self._recursive_derived
@property
def is_abstract(self):
"""describes whether class abstract or not"""
return self._is_abstract
@is_abstract.setter
def is_abstract(self, is_abstract):
self._is_abstract = is_abstract
@property
def public_members(self):
"""list of all public :class:`members <declarationt_>`"""
return self._public_members
@public_members.setter
def public_members(self, new_public_members):
self._public_members = new_public_members
@property
def private_members(self):
"""list of all private :class:`members <declarationt_>`"""
return self._private_members
@private_members.setter
def private_members(self, new_private_members):
self._private_members = new_private_members
@property
def protected_members(self):
"""list of all protected :class:`members <declarationt_>`"""
return self._protected_members
@protected_members.setter
def protected_members(self, new_protected_members):
self._protected_members = new_protected_members
@property
def aliases(self):
"""List of :class:`aliases <typedef_t>` to this instance"""
return self._aliases
@aliases.setter
def aliases(self, new_aliases):
self._aliases = new_aliases
@property
def byte_size(self):
"""Size of this class in bytes @type: int"""
return self._byte_size
@byte_size.setter
def byte_size(self, new_byte_size):
self._byte_size = new_byte_size
@property
def byte_align(self):
"""Alignment of this class in bytes @type: int"""
return self._byte_align
@byte_align.setter
def byte_align(self, new_byte_align):
self._byte_align = new_byte_align
def _get_declarations_impl(self):
return self.get_members()
def get_members(self, access=None):
"""
returns list of members according to access type
If access equals to None, then returned list will contain all members.
You should not modify the list content, otherwise different
optimization data will stop work and may to give you wrong results.
:param access: describes desired members
:type access: :class:ACCESS_TYPES
:rtype: [ members ]
"""
if access == ACCESS_TYPES.PUBLIC:
return self.public_members
elif access == ACCESS_TYPES.PROTECTED:
return self.protected_members
elif access == ACCESS_TYPES.PRIVATE:
return self.private_members
else:
all_members = []
all_members.extend(self.public_members)
all_members.extend(self.protected_members)
all_members.extend(self.private_members)
return all_members
def adopt_declaration(self, decl, access):
"""adds new declaration to the class
:param decl: reference to a :class:`declaration_t`
:param access: member access type
:type access: :class:ACCESS_TYPES
"""
if access == ACCESS_TYPES.PUBLIC:
self.public_members.append(decl)
elif access == ACCESS_TYPES.PROTECTED:
self.protected_members.append(decl)
elif access == ACCESS_TYPES.PRIVATE:
self.private_members.append(decl)
else:
raise RuntimeError("Invalid access type: %s." % access)
decl.parent = self
decl.cache.reset()
decl.cache.access_type = access
def remove_declaration(self, decl):
"""
removes decl from members list
:param decl: declaration to be removed
:type decl: :class:`declaration_t`
"""
access_type = self.find_out_member_access_type(decl)
if access_type == ACCESS_TYPES.PUBLIC:
container = self.public_members
elif access_type == ACCESS_TYPES.PROTECTED:
container = self.protected_members
else: # decl.cache.access_type == ACCESS_TYPES.PRVATE
container = self.private_members
del container[container.index(decl)]
decl.cache.reset()
def find_out_member_access_type(self, member):
"""
returns member access type
:param member: member of the class
:type member: :class:`declaration_t`
:rtype: :class:ACCESS_TYPES
"""
assert member.parent is self
if not member.cache.access_type:
if member in self.public_members:
access_type = ACCESS_TYPES.PUBLIC
elif member in self.protected_members:
access_type = ACCESS_TYPES.PROTECTED
elif member in self.private_members:
access_type = ACCESS_TYPES.PRIVATE
else:
raise RuntimeError(
"Unable to find member within internal members list.")
member.cache.access_type = access_type
return access_type
else:
return member.cache.access_type
def __find_out_member_dependencies(self, access_type):
members = self.get_members(access_type)
answer = []
for mem in members:
answer.extend(mem.i_depend_on_them(recursive=True))
member_ids = set([id(m) for m in members])
for dependency in answer:
if id(dependency.declaration) in member_ids:
dependency.access_type = access_type
return answer
def i_depend_on_them(self, recursive=True):
answer = []
for base in self.bases:
answer.append(
dependency_info_t(
self,
base.related_class,
base.access_type,
"base class"))
if recursive:
for access_type in ACCESS_TYPES.ALL:
answer.extend(self.__find_out_member_dependencies(access_type))
return answer
@property
def container_traits(self):
"""reference to :class:`container_traits_impl_t` or None"""
# Deprecated since 1.8.0. Will be removed in 1.9.0
warnings.warn(
"The container_traits attribute is deprecated. \n" +
"Please use the find_container_traits function from the"
"declarations module instead.",
DeprecationWarning)
if self._container_traits_set is False:
from . import container_traits # prevent cyclic dependencies
self._container_traits_set = True
self._container_traits = container_traits.find_container_traits(
self)
self._container_traits_cache = self.container_traits
return self._container_traits
def find_copy_constructor(self):
# Deprecated since 1.8.0. Will be removed in 1.9.0
warnings.warn(
"The find_copy_constructor method is deprecated. \n" +
"Please use the find_copy_constructor function from the"
"declarations module instead.",
DeprecationWarning)
from . import type_traits_classes # prevent cyclic dependencies
return type_traits_classes.find_copy_constructor(self)
def find_trivial_constructor(self):
# Deprecated since 1.8.0. Will be removed in 1.9.0
warnings.warn(
"The find_trivial_constructor method is deprecated. \n" +
"Please use the find_trivial_constructor function from the"
"declarations module instead.",
DeprecationWarning)
from . import type_traits_classes # prevent cyclic dependencies
return type_traits_classes.find_trivial_constructor(self)
def _get_partial_name_impl(self):
from . import type_traits # prevent cyclic dependencies
if type_traits.is_std_string(self):
return 'string'
elif type_traits.is_std_wstring(self):
return 'wstring'
else:
return get_partial_name(self.name)
def find_noncopyable_vars(self):
"""returns list of all `noncopyable` variables"""
# Deprecated since 1.8.0. Will be removed in 1.9.0
warnings.warn(
"The find_noncopyable_vars method is deprecated. \n" +
"Please use the find_noncopyable_vars function from the"
"declarations module instead.",
DeprecationWarning)
from . import type_traits_classes # prevent cyclic dependencies
type_traits_classes.find_noncopyable_vars(self)
@property
def has_vtable(self):
"""True, if class has virtual table, False otherwise"""
# Deprecated since 1.8.0. Will be removed in 1.9.0
warnings.warn(
"The has_vtable argument is deprecated. \n" +
"Please use the has_vtable function from the declarations \n" +
"module instead.",
DeprecationWarning)
# prevent cyclic import
from . import type_traits_classes
return type_traits_classes.has_vtable(self)
@property
def top_class(self):
"""reference to a parent class, which contains this class and defined
within a namespace
if this class is defined under a namespace, self will be returned"""
curr = self
parent = self.parent
while isinstance(parent, class_t):
curr = parent
parent = parent.parent
return curr
class_types = (class_t, class_declaration_t)
class impl_details(object):
@staticmethod
def dig_declarations(depend_on_it):
# FIXME: prevent cyclic imports
from . import type_traits
if isinstance(depend_on_it, declaration.declaration_t):
return [depend_on_it]
base_type = type_traits.base_type(
type_traits.remove_alias(depend_on_it))
if isinstance(base_type, cpptypes.declarated_t):
return [base_type.declaration]
elif isinstance(base_type, cpptypes.calldef_type_t):
result = []
result.extend(impl_details.dig_declarations(base_type.return_type))
for argtype in base_type.arguments_types:
result.extend(impl_details.dig_declarations(argtype))
if isinstance(base_type, cpptypes.member_function_type_t):
result.extend(
impl_details.dig_declarations(
base_type.class_inst))
return result
return []
class dependency_info_t(object):
def __init__(self, declaration, depend_on_it, access_type=None, hint=None):
object.__init__(self)
assert isinstance(
depend_on_it,
(class_t,
cpptypes.type_t))
self._declaration = declaration
self._depend_on_it = depend_on_it
self._access_type = access_type
self._hint = hint
@property
def declaration(self):
return self._declaration
# short name
decl = declaration
@property
def depend_on_it(self):
return self._depend_on_it
@property
def access_type(self):
return self._access_type
@access_type.setter
def access_type(self, access_type):
self._access_type = access_type
def __str__(self):
return 'declaration "%s" depends( %s ) on "%s" ' \
% (self.declaration, self.access_type, self.depend_on_it)
@property
def hint(self):
"""The declaration, that report dependency can put some additional
inforamtion about dependency. It can be used later"""
return self._hint
def find_out_depend_on_it_declarations(self):
"""If declaration depends on other declaration and not on some type
this function will return reference to it. Otherwise None will be
returned
"""
return impl_details.dig_declarations(self.depend_on_it)
@staticmethod
def i_depend_on_them(decl):
"""Returns set of declarations. every item in the returned set,
depends on a declaration from the input"""
to_be_included = set()
for dependency_info in decl.i_depend_on_them():
for ddecl in dependency_info.find_out_depend_on_it_declarations():
if ddecl:
to_be_included.add(ddecl)
if isinstance(decl.parent, class_t):
to_be_included.add(decl.parent)
return to_be_included
@staticmethod
def we_depend_on_them(decls):
"""Returns set of declarations. every item in the returned set,
depends on a declaration from the input"""
to_be_included = set()
for decl in decls:
to_be_included.update(dependency_info_t.i_depend_on_them(decl))
return to_be_included
|
#!/usr/bin/env python
from ansible.module_utils.hashivault import hashivault_auth_client
from ansible.module_utils.hashivault import hashivault_argspec
from ansible.module_utils.hashivault import hashivault_init
from ansible.module_utils.hashivault import hashiwrapper
ANSIBLE_METADATA = {'status': ['preview'], 'supported_by': 'community', 'version': '1.1'}
DOCUMENTATION = r'''
---
module: hashivault_pki_cert_sign
version_added: "4.5.0"
short_description: Hashicorp Vault PKI Sign CSR ( Certificate / Intermediate / Verbatim )
description:
- This module signs a new certificate based upon the provided CSR and the supplied parameters.
options:
csr:
recuired: true
description:
- Specifies the PEM-encoded CSR.
role:
description:
- Specifies the name of the role to create.
- 'For *verbatim* type if set, the following parameters from the role will have effect: `ttl`, `max_ttl`,
`generate_lease`, and `no_store`.'
common_name:
description:
- Specifies the requested CN for the certificate. If the CN is allowed by role policy, it will be issued.
mount_point:
default: pki
description:
- location where secrets engine is mounted. also known as path
type:
type: str
description:
- Sign a new certificate with `certificate` based upon the provided CSR and the supplied parameters, subject
to the restrictions contained in the role named in the endpoint. The issuing CA certificate is returned as
well, so that only the root CA need be in a client's trust store.
- Use `intermediate` to configure CA certificate to issue a certificate with appropriate values for
acting as an intermediate CA. Distribution points use the values set via config/urls. Values set in the
CSR are ignored unless use_csr_values is set to true, in which case the values from the CSR are used
verbatim.
- Use `verbatim` to sign a new certificate based upon the provided CSR. Values are taken verbatim from the
CSR; the only restriction is that this endpoint will refuse to issue an intermediate CA certificate (use
`intermediate` type for that functionality.)
choices: ["certificate", "intermediate", "verbatim"]
default: certificate
extra_params:
description:
Extra parameters depending on the type.
type: dict
extends_documentation_fragment:
- hashivault
'''
EXAMPLES = r'''
---
- hosts: localhost
tasks:
- hashivault_pki_cert_sign:
role: 'tester'
common_name: 'test.example.com'
register: cert
- debug: msg="{{ cert }}"
'''
def main():
argspec = hashivault_argspec()
argspec['csr'] = dict(required=True, type='str')
argspec['role'] = dict(required=False, type='str')
argspec['common_name'] = dict(required=False, type='str')
argspec['extra_params'] = dict(required=False, type='dict', default={})
argspec['mount_point'] = dict(required=False, type='str', default='pki')
argspec['type'] = dict(required=False, type='str', default='certificate', choices=["certificate", "intermediate",
"verbatim"])
module = hashivault_init(argspec)
result = hashivault_pki_cert_sign(module)
if result.get('failed'):
module.fail_json(**result)
else:
module.exit_json(**result)
def certificate(params, mount_point, client):
csr = params.get('csr')
common_name = params.get('common_name')
extra_params = params.get('extra_params')
role = params.get('role').strip('/')
# check if role exists
try:
current_state = client.secrets.pki.read_role(name=role, mount_point=mount_point).get('data')
except Exception:
current_state = {}
if not current_state:
return {'failed': True, 'rc': 1, 'msg': 'role not found or permission denied'}
if not common_name:
return {'failed': True, 'rc': 1, 'msg': 'Missing required options: common_name'}
result = {"changed": False, "rc": 0}
try:
result['data'] = client.secrets.pki.sign_certificate(csr=csr, name=role, mount_point=mount_point,
common_name=common_name,
extra_params=extra_params).get('data')
result['changed'] = True
except Exception as e:
result['rc'] = 1
result['failed'] = True
result['msg'] = u"Exception: " + str(e)
return result
def intermediate(params, mount_point, client):
csr = params.get('csr')
common_name = params.get('common_name')
extra_params = params.get('extra_params')
if not common_name:
return {'failed': True, 'rc': 1, 'msg': 'Missing required options: common_name'}
result = {"changed": False, "rc": 0}
try:
result['data'] = client.secrets.pki.sign_intermediate(csr=csr, common_name=common_name,
extra_params=extra_params,
mount_point=mount_point).get('data')
result['changed'] = True
except Exception as e:
result['rc'] = 1
result['failed'] = True
result['msg'] = u"Exception: " + str(e)
return result
def verbatim(params, mount_point, client):
csr = params.get('csr')
extra_params = params.get('extra_params')
role = params.get('role').strip('/')
# check if role exists
try:
current_state = client.secrets.pki.read_role(name=role, mount_point=mount_point).get('data')
except Exception:
current_state = {}
if not current_state:
return {'failed': True, 'rc': 1, 'msg': 'role not found or permission denied'}
result = {"changed": False, "rc": 0}
try:
result['data'] = client.secrets.pki.sign_verbatim(csr=csr, name=role, extra_params=extra_params,
mount_point=mount_point).get('data')
result['changed'] = True
except Exception as e:
result['rc'] = 1
result['failed'] = True
result['msg'] = u"Exception: " + str(e)
return result
@hashiwrapper
def hashivault_pki_cert_sign(module):
supported_types = {
'certificate': certificate,
'intermediate': intermediate,
'verbatim': verbatim
}
params = module.params
client = hashivault_auth_client(params)
mount_point = params.get('mount_point').strip('/')
return supported_types[params.get('type')](params=params, mount_point=mount_point, client=client)
if __name__ == '__main__':
main()
|
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 28 11:36:04 2016
@author: J. C. Vasquez-Correa
"""
import numpy as np
import math
from statsmodels.tsa.tsatools import lagmat
from sklearn.metrics.pairwise import euclidean_distances as dist
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
def Dim_Corr(datas, Tao, m, graph=False):
"""
Compute the correlation dimension of a time series with a time-lag Tao and an embedding dimension m
datas--> time series to compute the correlation dimension
Tao--> time lag computed using the first zero crossing of the auto-correlation function (see Tao func)
m--> embeding dimension of the time-series, computed using the false neighbors method (see fnn func)
graph (optional)--> plot the phase space (attractor) in 3D
"""
x=PhaseSpace(datas, m, Tao, graph)
ED2=dist(x.T)
posD=np.triu_indices_from(ED2, k=1)
ED=ED2[posD]
max_eps=np.max(ED)
min_eps=np.min(ED[np.where(ED>0)])
max_eps=np.exp(math.floor(np.log(max_eps)))
n_div=int(math.floor(np.log(max_eps/min_eps)))
n_eps=n_div+1
eps_vec=range(n_eps)
unos=np.ones([len(eps_vec)])*-1
eps_vec1=max_eps*np.exp(unos*eps_vec-unos)
Npairs=((len(x[1,:]))*((len(x[1,:])-1)))
C_eps=np.zeros(n_eps)
for i in eps_vec:
eps=eps_vec1[i]
N=np.where(((ED<eps) & (ED>0)))
S=len(N[0])
C_eps[i]=float(S)/Npairs
omit_pts=1
k1=omit_pts
k2=n_eps-omit_pts
xd=np.log(eps_vec1)
yd=np.log(C_eps)
xp=xd[k1:k2]
yp=yd[k1:k2]
p = np.polyfit(xp, yp, 1)
return p[0]
def PhaseSpace(data, m, Tao, graph=False):
"""
Compute the phase space (attractor) a time series data with a time-lag Tao and an embedding dimension m
data--> time series
Tao--> time lag computed using the first zero crossing of the auto-correlation function (see Tao func)
m--> embeding dimension of the time-series, computed using the false neighbors method (see fnn func)
graph (optional)--> plot the phase space (attractor)
"""
ld=len(data)
yy = abs(ld-(m-1)*Tao) #take abs of second. sign just denotes direction
x = np.zeros([m, yy])
for j in range(m):
l1=(Tao*(j))
l2=(Tao*(j)+len(x[1,:]))
x[j,:]=data[l1:l2]
if graph:
fig = plt.figure()
if m>2:
ax = fig.add_subplot(111, projection='3d')
ax.plot(x[0,:], x[1,:], x[2,:])
else:
ax = fig.add_subplot(111)
ax.plot(x[0,:], x[1,:])
return x
def Tao(data):
"""
Compute the time-lag of a time series data to build the phase space using the first zero crossing rate criterion
data--> time series
"""
#autocorrelate
corr=np.correlate(data, data, mode="full") #correlation
corr=corr[len(corr)/2:len(corr)] #normalization by taking half and dividing
#when does autocorrelation reach 0?
tau = 0
j = 0
while (corr[j]>0):
j=j+1
tau=j
return tau
def fnn(data, maxm):
"""
Compute the embedding dimension of a time series data to build the phase space using the false neighbors criterion
data--> time series
maxm--> maximmum embeding dimension
"""
RT=15.0
AT=2
sigmay=np.std(data, ddof=1)
nyr=len(data)
m=maxm
EM=lagmat(data, maxlag=m-1)
EEM=np.asarray([EM[j,:] for j in range(m-1, EM.shape[0])])
embedm=maxm
for k in range(AT,EEM.shape[1]+1):
fnn1=[]
fnn2=[]
Ma=EEM[:,range(k)]
D=dist(Ma)
for i in range(1,EEM.shape[0]-m-k):
#print D.shape
#print(D[i,range(i-1)])
d=D[i,:]
pdnz=np.where(d>0)
dnz=d[pdnz]
Rm=np.min(dnz)
l=np.where(d==Rm)
l=l[0]
l=l[len(l)-1]
if l+m+k-1<nyr:
fnn1.append(np.abs(data[i+m+k-1]-data[l+m+k-1])/Rm)
fnn2.append(np.abs(data[i+m+k-1]-data[l+m+k-1])/sigmay)
Ind1=np.where(np.asarray(fnn1)>RT)
Ind2=np.where(np.asarray(fnn2)>AT)
if len(Ind1[0])/float(len(fnn1))<0.1 and len(Ind2[0])/float(len(fnn2))<0.1:
embedm=k
break
return embedm
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# [START kms_create_key_ring]
def create_key_ring(project_id, location_id, id):
"""
Creates a new key ring in Cloud KMS
Args:
project_id (string): Google Cloud project ID (e.g. 'my-project').
location_id (string): Cloud KMS location (e.g. 'us-east1').
id (string): ID of the key ring to create (e.g. 'my-key-ring').
Returns:
KeyRing: Cloud KMS key ring.
"""
# Import the client library.
from google.cloud import kms
# Create the client.
client = kms.KeyManagementServiceClient()
# Build the parent location name.
location_name = f'projects/{project_id}/locations/{location_id}'
# Build the key ring.
key_ring = {}
# Call the API.
created_key_ring = client.create_key_ring(request={'parent': location_name, 'key_ring_id': id, 'key_ring': key_ring})
print('Created key ring: {}'.format(created_key_ring.name))
return created_key_ring
# [END kms_create_key_ring]
|
#
# Support for the API of the multiprocessing package using threads
#
# multiprocessing/dummy/__init__.py
#
# Copyright (c) 2006-2008, R Oudkerk
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of author nor the names of any contributors may be
# used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
#
__all__ = [
'Process', 'current_process', 'active_children', 'freeze_support',
'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Condition',
'Event', 'Queue', 'Manager', 'Pipe', 'Pool', 'JoinableQueue'
]
#
# Imports
#
import threading
import sys
import weakref
import array
import itertools
from multiprocess import TimeoutError, cpu_count
from multiprocess.dummy.connection import Pipe
from threading import Lock, RLock, Semaphore, BoundedSemaphore
from threading import Event
from queue import Queue
#
#
#
class DummyProcess(threading.Thread):
def __init__(self, group=None, target=None, name=None, args=(), kwargs={}):
threading.Thread.__init__(self, group, target, name, args, kwargs)
self._pid = None
self._children = weakref.WeakKeyDictionary()
self._start_called = False
self._parent = current_process()
def start(self):
assert self._parent is current_process()
self._start_called = True
if hasattr(self._parent, '_children'):
self._parent._children[self] = None
threading.Thread.start(self)
@property
def exitcode(self):
if self._start_called and not self.is_alive():
return 0
else:
return None
#
#
#
class Condition(threading._Condition):
# XXX
if sys.version_info < (3, 0):
notify_all = threading._Condition.notify_all.__func__
else:
notify_all = threading._Condition.notify_all
#
#
#
Process = DummyProcess
current_process = threading.current_thread
current_process()._children = weakref.WeakKeyDictionary()
def active_children():
children = current_process()._children
for p in list(children):
if not p.is_alive():
children.pop(p, None)
return list(children)
def freeze_support():
pass
#
#
#
class Namespace(object):
def __init__(self, **kwds):
self.__dict__.update(kwds)
def __repr__(self):
items = list(self.__dict__.items())
temp = []
for name, value in items:
if not name.startswith('_'):
temp.append('%s=%r' % (name, value))
temp.sort()
return 'Namespace(%s)' % str.join(', ', temp)
dict = dict
list = list
def Array(typecode, sequence, lock=True):
return array.array(typecode, sequence)
class Value(object):
def __init__(self, typecode, value, lock=True):
self._typecode = typecode
self._value = value
def _get(self):
return self._value
def _set(self, value):
self._value = value
value = property(_get, _set)
def __repr__(self):
return '<%r(%r, %r)>'%(type(self).__name__,self._typecode,self._value)
def Manager():
return sys.modules[__name__]
def shutdown():
pass
def Pool(processes=None, initializer=None, initargs=()):
from multiprocess.pool import ThreadPool
return ThreadPool(processes, initializer, initargs)
JoinableQueue = Queue
|
import unittest
import os
from src.day8 import create_node_tree
SAMPLE_DATA = "2 3 0 3 10 11 12 1 1 0 1 99 2 1 1 2"
class TestMemoryManeuver(unittest.TestCase):
def test_sample_input_part_one(self):
tree = create_node_tree(SAMPLE_DATA)
self.assertEqual(str(tree), SAMPLE_DATA)
self.assertEqual(tree.metadata_sum, 138)
# def test_real_input_part_one(self):
# with open(os.path.join(os.path.dirname(__file__), '../input/day7.txt'), 'r') as f:
# lines = f.readlines()
# day7_input = [line.strip() for line in lines]
# result = find_basic_path(day7_input)
# print(f"The correct order of steps for Part One is {result}")
# self.assertEqual(result, "BDHNEGOLQASVWYPXUMZJIKRTFC")
# def test_sample_input_part_two(self):
# result, total_time = find_path_in_parallel(SAMPLE_DATA, 2, 0)
# self.assertEqual(result, "CABFDE")
# self.assertEqual(total_time, 15)
# def test_real_input_part_two(self):
# with open(os.path.join(os.path.dirname(__file__), '../input/day7.txt'), 'r') as f:
# lines = f.readlines()
# day7_input = [line.strip() for line in lines]
# result, total_time = find_path_in_parallel(day7_input, 5, 60)
# print(f"The total time for Part Two is {total_time}s")
# self.assertEqual(total_time, 1107)
if __name__ == '__main__':
unittest.main()
|
"""
Methods related to aligning reads.
This module was created because of a circular import issue with celery.
...
File "/home/glebk/Projects/churchlab/genome-designer-v2/genome_designer/pipeline/read_alignment.py", line 9, in <module>
from main.models import clean_filesystem_location
File "/home/glebk/Projects/churchlab/genome-designer-v2/genome_designer/main/__init__.py", line 1, in <module>
import signals
File "/home/glebk/Projects/churchlab/genome-designer-v2/genome_designer/main/signals.py", line 20, in <module>
from pipeline.read_alignment import ensure_bwa_index
ImportError: cannot import name ensure_bwa_index
"""
import os
import subprocess
from utils.bam_utils import filter_bam_file_by_row
from django.conf import settings
SAMTOOLS_BINARY = settings.SAMTOOLS_BINARY
TOOLS_DIR = settings.TOOLS_DIR
def has_bwa_index(ref_genome_fasta):
return os.path.exists(ref_genome_fasta + '.bwt')
def ensure_bwa_index(ref_genome_fasta, error_output=None):
"""Creates the reference genome index required by bwa, if it doesn't exist
already.
We rely on the convention that the index file location is the fasta
location with the extension '.bwt' appended to it.
"""
if not has_bwa_index(ref_genome_fasta):
build_bwa_index(ref_genome_fasta, error_output)
# Also build the fasta index.
if not os.path.exists(ref_genome_fasta + '.fai'):
subprocess.check_call([
SAMTOOLS_BINARY,
'faidx',
ref_genome_fasta
], stderr=error_output)
def build_bwa_index(ref_genome_fasta, error_output=None):
"""Calls the command that builds the bwa index required for alignment.
This creates a file in the same directory as ref_genome_fasta, appending
the extension '.bwt' to the name of the fasta.
"""
subprocess.check_call([
'%s/bwa/bwa' % TOOLS_DIR,
'index',
'-a',
'is',
ref_genome_fasta
], stderr=error_output)
def index_bam_file(bam_file, error_output=None):
subprocess.check_call([
SAMTOOLS_BINARY,
'index',
bam_file,
], stderr=error_output)
def extract_split_reads(bam_filename, bam_split_filename):
"""
Isolate split reads from a bam file.
This uses a python script supplied with Lumpy that is run as a
separate process.
This is an internal function that works directly with files, and
is called separately by both SV calling and read ref alignment.
NOTE THAT THIS SCRIPT ONLY WORKS WITH BWA MEM.
"""
assert os.path.exists(bam_filename), "BAM file '%s' is missing." % (
bam_filename)
# Use lumpy bwa-mem split read script to pull out split reads.
filter_split_reads = ' | '.join([
'{samtools} view -h {bam_filename}',
'python {lumpy_bwa_mem_sr_script} -i stdin',
'{samtools} view -Sb -']).format(
samtools=settings.SAMTOOLS_BINARY,
bam_filename=bam_filename,
lumpy_bwa_mem_sr_script=
settings.LUMPY_EXTRACT_SPLIT_READS_BWA_MEM)
with open(bam_split_filename, 'w') as fh:
subprocess.check_call(filter_split_reads,
stdout=fh,
shell=True,
executable=settings.BASH_PATH)
# sort the split reads, overwrite the old file
subprocess.check_call([settings.SAMTOOLS_BINARY, 'sort',
bam_split_filename,
os.path.splitext(bam_split_filename)[0]])
_filter_out_interchromosome_reads(bam_split_filename)
def extract_discordant_read_pairs(bam_filename, bam_discordant_filename):
"""Isolate discordant pairs of reads from a sample alignment.
"""
# Use bam read alignment flags to pull out discordant pairs only
filter_discordant = ' | '.join([
'{samtools} view -u -F 0x0002 {bam_filename} ',
'{samtools} view -u -F 0x0100 - ',
'{samtools} view -u -F 0x0004 - ',
'{samtools} view -u -F 0x0008 - ',
'{samtools} view -b -F 0x0400 - ']).format(
samtools=settings.SAMTOOLS_BINARY,
bam_filename=bam_filename)
with open(bam_discordant_filename, 'w') as fh:
subprocess.check_call(filter_discordant,
stdout=fh, shell=True, executable=settings.BASH_PATH)
# sort the discordant reads, overwrite the old file
subprocess.check_call([settings.SAMTOOLS_BINARY, 'sort',
bam_discordant_filename,
os.path.splitext(bam_discordant_filename)[0]])
_filter_out_interchromosome_reads(bam_discordant_filename)
def _filter_out_interchromosome_reads(bam_filename, overwrite_input=True):
"""Filters out read pairs which lie on different chromosomes.
Args:
bam_filename: Path to bam file.
overwrite_input: If True, overwrite the input file.
"""
def is_rnext_same(line):
parts = line.split('\t')
rnext_col = parts[6]
return rnext_col == '='
if overwrite_input:
output_bam_path = bam_filename
else:
output_bam_path = os.path.splitext(bam_filename)[0] + '.nointerchrom.bam'
filter_bam_file_by_row(bam_filename, is_rnext_same, output_bam_path)
|
import numpy as np
from scipy.stats import sem
from uncertainties import ufloat
import uncertainties.unumpy as unp
Zeiten = np.genfromtxt("Restdaten.txt", unpack = True)
#print(Zeiten[2:])
Zeiten /= 5
#print(Zeiten[2:])
daten = np.genfromtxt("WRGdyn.txt", unpack = True)
Mittelwerte = np.array([np.mean(row) for row in Zeiten])
Fehler = np.array([np.std(row, ddof = 1) for row in Zeiten])
s = 1/np.sqrt(len(Zeiten[0]))
Fehler = s*Fehler
T = np.array([ufloat(x, Fehler[index]) for index, x in np.ndenumerate(Mittelwerte)])
Wrg = ufloat(daten[0], daten[1])
Id = ufloat(daten[2], daten[3])
#Trägheitsmoment der Kugel
MasseK = 0.8124
RadiusK = 0.13766 / 2
Ik_theoretisch = 2/5 * MasseK * RadiusK**2
Ik_praktisch = T[0]**2 * Wrg /(4* (np.pi**2)) - Id
#print("IK: ")
#print(T[0])
print(Ik_theoretisch, Ik_praktisch)
print(Ik_praktisch/Ik_theoretisch)
#trägheitsmoment des Zylinders
MasseZ = 1.0058
RadiusZ = 0.08024/2
HöheZ = 0.13990
Iz_theoretisch = 1/2 * MasseZ * RadiusZ**2
Iz_praktisch = T[1]**2 * Wrg / (4* (np.pi**2)) - Id
#print("IZ: ")
#print(T[1])
print(Iz_theoretisch, Iz_praktisch)
print(Iz_praktisch/Iz_theoretisch)
#Trägheitsmoment Position 1
Ip1 = T[2]**2 * Wrg / (4 * (np.pi**2)) - Id
#Trägheitsmoment Position 2
Ip2 = T[3]**2 * Wrg / (4 * (np.pi**2)) - Id
#print(T[2], T[3])
print(Ip1, Ip2)
|
# Copyright 2018 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import cirq
import cirq.contrib.graph_device as ccgd
import cirq.contrib.graph_device.graph_device as ccgdgd
def test_fixed_duration_undirected_graph_device_edge_eq():
e = ccgd.FixedDurationUndirectedGraphDeviceEdge(cirq.Duration(picos=4))
f = ccgd.FixedDurationUndirectedGraphDeviceEdge(cirq.Duration(picos=4))
g = ccgd.FixedDurationUndirectedGraphDeviceEdge(cirq.Duration(picos=5))
assert e == f
assert e != g
assert e != 4
def test_unconstrained_undirected_graph_device_edge_eq():
e = ccgdgd._UnconstrainedUndirectedGraphDeviceEdge()
f = ccgd.UnconstrainedUndirectedGraphDeviceEdge
assert e == f
assert e != 3
def test_is_undirected_device_graph():
assert not ccgd.is_undirected_device_graph('abc')
graph = ccgd.UndirectedHypergraph()
assert ccgd.is_undirected_device_graph(graph)
a, b, c, d, e = cirq.LineQubit.range(5)
graph.add_edge((a, b))
assert ccgd.is_undirected_device_graph(graph)
graph.add_edge((b, c), ccgd.UnconstrainedUndirectedGraphDeviceEdge)
assert ccgd.is_undirected_device_graph(graph)
graph.add_edge((d, e), 'abc')
assert not ccgd.is_undirected_device_graph(graph)
graph = ccgd.UndirectedHypergraph(vertices=(0, 1))
assert not ccgd.is_undirected_device_graph(graph)
def test_is_crosstalk_graph():
a, b, c, d, e, f = cirq.LineQubit.range(6)
assert not ccgd.is_crosstalk_graph('abc')
graph = ccgd.UndirectedHypergraph()
graph.add_vertex('abc')
assert not ccgd.is_crosstalk_graph(graph)
graph = ccgd.UndirectedHypergraph()
graph.add_edge((frozenset((a, b)), frozenset((c, d))), 'abc')
assert not ccgd.is_crosstalk_graph(graph)
graph = ccgd.UndirectedHypergraph()
graph.add_edge((frozenset((a, b)), frozenset((c, d))), None)
graph.add_edge((frozenset((e, f)), frozenset((c, d))), lambda _: None)
assert ccgd.is_crosstalk_graph(graph)
graph = ccgd.UndirectedHypergraph()
graph.add_edge((frozenset((a, b)), frozenset((c, d))), 'abc')
assert not ccgd.is_crosstalk_graph(graph)
graph = ccgd.UndirectedHypergraph()
graph.add_edge((frozenset((a, b)),), None)
assert not ccgd.is_crosstalk_graph(graph)
graph = ccgd.UndirectedHypergraph()
graph.add_edge((frozenset((0, 1)), frozenset((2, 3))), None)
assert not ccgd.is_crosstalk_graph(graph)
def test_unconstrained_undirected_graph_device_edge():
edge = ccgd.UnconstrainedUndirectedGraphDeviceEdge
qubits = cirq.LineQubit.range(2)
assert edge.duration_of(cirq.X(qubits[0])) == cirq.Duration(picos=0)
assert edge.duration_of(cirq.CZ(*qubits[:2])) == cirq.Duration(picos=0)
def test_graph_device():
one_qubit_duration = cirq.Duration(picos=10)
two_qubit_duration = cirq.Duration(picos=1)
one_qubit_edge = ccgd.FixedDurationUndirectedGraphDeviceEdge(one_qubit_duration)
two_qubit_edge = ccgd.FixedDurationUndirectedGraphDeviceEdge(two_qubit_duration)
empty_device = ccgd.UndirectedGraphDevice()
assert not empty_device.qubits
assert not empty_device.edges
n_qubits = 4
qubits = cirq.LineQubit.range(n_qubits)
edges = {
(cirq.LineQubit(i), cirq.LineQubit((i + 1) % n_qubits)): two_qubit_edge
for i in range(n_qubits)
}
edges.update({(cirq.LineQubit(i),): one_qubit_edge for i in range(n_qubits)})
device_graph = ccgd.UndirectedHypergraph(labelled_edges=edges)
def not_cnots(first_op, second_op):
if all(
isinstance(op, cirq.GateOperation) and op.gate == cirq.CNOT
for op in (first_op, second_op)
):
raise ValueError('Simultaneous CNOTs')
assert ccgd.is_undirected_device_graph(device_graph)
with pytest.raises(TypeError):
ccgd.UndirectedGraphDevice('abc')
constraint_edges = {
(frozenset(cirq.LineQubit.range(2)), frozenset(cirq.LineQubit.range(2, 4))): None,
(
frozenset(cirq.LineQubit.range(1, 3)),
frozenset((cirq.LineQubit(0), cirq.LineQubit(3))),
): not_cnots,
}
crosstalk_graph = ccgd.UndirectedHypergraph(labelled_edges=constraint_edges)
assert ccgd.is_crosstalk_graph(crosstalk_graph)
with pytest.raises(TypeError):
ccgd.UndirectedGraphDevice(device_graph, crosstalk_graph='abc')
graph_device = ccgd.UndirectedGraphDevice(device_graph)
assert graph_device.crosstalk_graph == ccgd.UndirectedHypergraph()
graph_device = ccgd.UndirectedGraphDevice(device_graph, crosstalk_graph=crosstalk_graph)
assert sorted(graph_device.edges) == sorted(device_graph.edges)
assert graph_device.qubits == tuple(qubits)
assert graph_device.device_graph == device_graph
assert graph_device.labelled_edges == device_graph.labelled_edges
assert graph_device.duration_of(cirq.X(qubits[2])) == one_qubit_duration
assert graph_device.duration_of(cirq.CNOT(*qubits[:2])) == two_qubit_duration
with pytest.raises(KeyError):
graph_device.duration_of(cirq.CNOT(qubits[0], qubits[2]))
with pytest.raises(ValueError):
graph_device.validate_operation(cirq.CNOT(qubits[0], qubits[2]))
with pytest.raises(AttributeError):
graph_device.validate_operation(list((2, 3)))
moment = cirq.Moment([cirq.CNOT(*qubits[:2]), cirq.CNOT(*qubits[2:])])
with pytest.raises(ValueError):
graph_device.validate_moment(moment)
moment = cirq.Moment([cirq.CNOT(qubits[0], qubits[3]), cirq.CZ(qubits[1], qubits[2])])
graph_device.validate_moment(moment)
moment = cirq.Moment([cirq.CNOT(qubits[0], qubits[3]), cirq.CNOT(qubits[1], qubits[2])])
with pytest.raises(ValueError):
graph_device.validate_moment(moment)
def test_graph_device_copy_and_add():
a, b, c, d, e, f = cirq.LineQubit.range(6)
device_graph = ccgd.UndirectedHypergraph(labelled_edges={(a, b): None, (c, d): None})
crosstalk_graph = ccgd.UndirectedHypergraph(
labelled_edges={(frozenset((a, b)), frozenset((c, d))): None}
)
device = ccgd.UndirectedGraphDevice(device_graph=device_graph, crosstalk_graph=crosstalk_graph)
device_graph_addend = ccgd.UndirectedHypergraph(labelled_edges={(a, b): None, (e, f): None})
crosstalk_graph_addend = ccgd.UndirectedHypergraph(
labelled_edges={(frozenset((a, b)), frozenset((e, f))): None}
)
device_addend = ccgd.UndirectedGraphDevice(
device_graph=device_graph_addend, crosstalk_graph=crosstalk_graph_addend
)
device_sum = device + device_addend
device_copy = device.__copy__()
device_copy += device_addend
assert device != device_copy
assert device_copy == device_sum
def test_qubit_set():
a, b, c, d = cirq.LineQubit.range(4)
device_graph = ccgd.UndirectedHypergraph(labelled_edges={(a, b): None, (c, d): None})
device = ccgd.UndirectedGraphDevice(device_graph=device_graph)
assert device.qubit_set() == {a, b, c, d}
def test_qid_pairs():
a, b, c, d = cirq.LineQubit.range(4)
device_graph = ccgd.UndirectedHypergraph(labelled_edges={(a, b): None, (c, d): None})
device = ccgd.UndirectedGraphDevice(device_graph=device_graph)
assert len(device.qid_pairs()) == 2
|
# -*- coding: utf-8 -*-
from collections import namedtuple
import time
import random
from Crypto.PublicKey import RSA
PrivateRing = namedtuple('PrivateRing', 'timestamp key_id pub_key priv_key')
PublicRing = namedtuple('PublicRing', 'timestamp key_id pub_key owner_trust user_name key_legit')
def import_keyring(typeOfKeyRing):
ring = list()
try:
with open(typeOfKeyRing + '_keyring.txt', 'r') as r:
data = r.read()
data = data.rstrip().split('@')
for line in data:
if not line:
continue
line = line.rstrip().split('|')
if typeOfKeyRing == 'priv':
ring.append(PrivateRing(*line))
elif typeOfKeyRing == 'pub':
ring.append(PublicRing(*line))
except IOError:
new_file = open(typeOfKeyRing + '_keyring.txt', 'w')
new_file.close()
return ring
def export_keyring(ring, typeOfKeyRing):
with open(typeOfKeyRing + '_keyring.txt', 'w') as w:
for key in ring:
record = ''
for attr in key:
record += attr + '|'
record = record.rstrip('|')
record += '@'
w.write(record)
def add_to_keyring(ring, typeOfKeyRing, attributes):
if typeOfKeyRing == 'priv':
ring.append(PrivateRing(*attributes))
else:
ring.append(PublicRing(*attributes))
return ring
######randomly choose key from private keyring to encrypt
def find_pubkey_in_ring(ring, id = None, whose = None):
if id:
result = [x.pub_key for x in ring if x.key_id == id]
if len(result) == 0:
return None
else:
return RSA.importKey(result[0])
elif whose:
result = [x.pub_key for x in ring if x.user_name == whose]
if len(result) == 0:
return None
else:
print len(result)
ind = random.randint(0, len(result) - 1)
print ind
return RSA.importKey(result[ind])
def find_privkey_in_ring(ring, id):
result = [x.priv_key for x in ring if x.key_id == id]
if len(result) != 0:
return RSA.importKey(result[0])
else:
return []
def choose_randomly_enc_key(ring):
ind = random.randint(0,len(ring) - 1)
return RSA.importKey(ring[ind].priv_key), ring[ind].key_id
def parse_keys_from_db(data):
ring = list()
for i in data:
tmstmp = time.mktime(i[0].timetuple())
id = i[1]
pub_key = str(i[2])
usr_name = i[3]
trust = i[4]
ring.append(PublicRing(tmstmp, id, pub_key , 0, usr_name, trust))
return ring
|
#!/usr/bin/env python3
# Copyright (c) 2016 The Oakcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test account RPCs.
RPCs tested are:
- getaccountaddress
- getaddressesbyaccount
- listaddressgroupings
- setaccount
- sendfrom (with account arguments)
- move (with account arguments)
"""
from test_framework.test_framework import OakcoinTestFramework
from test_framework.util import (
start_nodes,
assert_equal,
)
class WalletAccountsTest(OakcoinTestFramework):
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 1
self.extra_args = [[]]
def run_test(self):
node = self.nodes[0]
# Check that there's no UTXO on any of the nodes
assert_equal(len(node.listunspent()), 0)
# Note each time we call generate, all generated coins go into
# the same address, so we call twice to get two addresses w/50 each
node.generate(1)
node.generate(101)
assert_equal(node.getbalance(), 100)
# there should be 2 address groups
# each with 1 address with a balance of 50 Oakcoins
address_groups = node.listaddressgroupings()
assert_equal(len(address_groups), 2)
# the addresses aren't linked now, but will be after we send to the
# common address
linked_addresses = set()
for address_group in address_groups:
assert_equal(len(address_group), 1)
assert_equal(len(address_group[0]), 2)
assert_equal(address_group[0][1], 50)
linked_addresses.add(address_group[0][0])
# send 50 from each address to a third address not in this wallet
# There's some fee that will come back to us when the miner reward
# matures.
common_address = "msf4WtN1YQKXvNtvdFYt9JBnUD2FB41kjr"
txid = node.sendmany(
fromaccount="",
amounts={common_address: 100},
subtractfeefrom=[common_address],
minconf=1,
)
tx_details = node.gettransaction(txid)
fee = -tx_details['details'][0]['fee']
# there should be 1 address group, with the previously
# unlinked addresses now linked (they both have 0 balance)
address_groups = node.listaddressgroupings()
assert_equal(len(address_groups), 1)
assert_equal(len(address_groups[0]), 2)
assert_equal(set([a[0] for a in address_groups[0]]), linked_addresses)
assert_equal([a[1] for a in address_groups[0]], [0, 0])
node.generate(1)
# we want to reset so that the "" account has what's expected.
# otherwise we're off by exactly the fee amount as that's mined
# and matures in the next 100 blocks
node.sendfrom("", common_address, fee)
accounts = ["a", "b", "c", "d", "e"]
amount_to_send = 1.0
account_addresses = dict()
for account in accounts:
address = node.getaccountaddress(account)
account_addresses[account] = address
node.getnewaddress(account)
assert_equal(node.getaccount(address), account)
assert(address in node.getaddressesbyaccount(account))
node.sendfrom("", address, amount_to_send)
node.generate(1)
for i in range(len(accounts)):
from_account = accounts[i]
to_account = accounts[(i+1) % len(accounts)]
to_address = account_addresses[to_account]
node.sendfrom(from_account, to_address, amount_to_send)
node.generate(1)
for account in accounts:
address = node.getaccountaddress(account)
assert(address != account_addresses[account])
assert_equal(node.getreceivedbyaccount(account), 2)
node.move(account, "", node.getbalance(account))
node.generate(101)
expected_account_balances = {"": 5200}
for account in accounts:
expected_account_balances[account] = 0
assert_equal(node.listaccounts(), expected_account_balances)
assert_equal(node.getbalance(""), 5200)
for account in accounts:
address = node.getaccountaddress("")
node.setaccount(address, account)
assert(address in node.getaddressesbyaccount(account))
assert(address not in node.getaddressesbyaccount(""))
for account in accounts:
addresses = []
for x in range(10):
addresses.append(node.getnewaddress())
multisig_address = node.addmultisigaddress(5, addresses, account)
node.sendfrom("", multisig_address, 50)
node.generate(101)
for account in accounts:
assert_equal(node.getbalance(account), 50)
if __name__ == '__main__':
WalletAccountsTest().main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.