text
stringlengths 2
999k
|
|---|
import os
import bpy
import bpy.utils.previews
from bpy.app.handlers import persistent
from . import internals
from . import preferences
from . import qcd_move_widget
from . import qcd_operators
from . import ui
addon_qcd_keymaps = []
addon_qcd_view_hotkey_keymaps = []
addon_qcd_view_edit_mode_hotkey_keymaps = []
qcd_classes = (
qcd_move_widget.QCDMoveWidget,
qcd_operators.MoveToQCDSlot,
qcd_operators.ViewQCDSlot,
qcd_operators.ViewMoveQCDSlot,
qcd_operators.RenumerateQCDSlots,
)
@persistent
def save_internal_data(dummy):
cm = bpy.context.scene.collection_manager
cm.qcd_slots_blend_data = internals.qcd_slots.get_data_for_blend()
@persistent
def load_internal_data(dummy):
cm = bpy.context.scene.collection_manager
data = cm.qcd_slots_blend_data
if not data:
return
internals.qcd_slots.load_blend_data(data)
def register_qcd():
for cls in qcd_classes:
bpy.utils.register_class(cls)
pcoll = bpy.utils.previews.new()
icons_dir = os.path.join(os.path.dirname(__file__), "icons")
pcoll.load("active_icon_base", os.path.join(icons_dir, "minus.png"), 'IMAGE', True)
pcoll.load("active_icon_text", os.path.join(icons_dir, "minus.png"), 'IMAGE', True)
pcoll.load("active_icon_text_sel", os.path.join(icons_dir, "minus.png"), 'IMAGE', True)
ui.preview_collections["icons"] = pcoll
wm = bpy.context.window_manager
km = wm.keyconfigs.addon.keymaps.new(name='Object Mode')
kmi = km.keymap_items.new('view3d.qcd_move_widget', 'V', 'PRESS')
addon_qcd_keymaps.append((km, kmi))
bpy.app.handlers.save_pre.append(save_internal_data)
bpy.app.handlers.load_post.append(load_internal_data)
prefs = bpy.context.preferences.addons[__package__].preferences
if prefs.enable_qcd_view_hotkeys:
register_qcd_view_hotkeys()
if prefs.enable_qcd_view_edit_mode_hotkeys:
register_qcd_view_edit_mode_hotkeys()
bpy.types.VIEW3D_HT_header.append(ui.view3d_header_qcd_slots)
bpy.types.TOPBAR_HT_upper_bar.append(ui.view_layer_update)
def register_qcd_view_hotkeys():
wm = bpy.context.window_manager
# create qcd hotkeys
qcd_hotkeys = [
["ONE", False, "1"],
["TWO", False, "2"],
["THREE", False, "3"],
["FOUR", False, "4"],
["FIVE", False, "5"],
["SIX", False, "6"],
["SEVEN", False, "7"],
["EIGHT", False, "8"],
["NINE", False, "9"],
["ZERO", False, "10"],
["ONE", True, "11"],
["TWO", True, "12"],
["THREE", True, "13"],
["FOUR", True, "14"],
["FIVE", True, "15"],
["SIX", True, "16"],
["SEVEN", True, "17"],
["EIGHT", True, "18"],
["NINE", True, "19"],
["ZERO", True, "20"],
]
for key in qcd_hotkeys:
for mode in ['Object Mode', 'Pose', 'Weight Paint']:
km = wm.keyconfigs.addon.keymaps.new(name=mode)
kmi = km.keymap_items.new('view3d.view_qcd_slot', key[0], 'PRESS', alt=key[1])
kmi.properties.slot = key[2]
kmi.properties.toggle = False
addon_qcd_view_hotkey_keymaps.append((km, kmi))
km = wm.keyconfigs.addon.keymaps.new(name=mode)
kmi = km.keymap_items.new('view3d.view_qcd_slot', key[0], 'PRESS',shift=True, alt=key[1])
kmi.properties.slot = key[2]
kmi.properties.toggle = True
addon_qcd_view_hotkey_keymaps.append((km, kmi))
def register_qcd_view_edit_mode_hotkeys():
wm = bpy.context.window_manager
# create qcd hotkeys
qcd_hotkeys = [
["ONE", False, "1"],
["TWO", False, "2"],
["THREE", False, "3"],
["FOUR", False, "4"],
["FIVE", False, "5"],
["SIX", False, "6"],
["SEVEN", False, "7"],
["EIGHT", False, "8"],
["NINE", False, "9"],
["ZERO", False, "10"],
["ONE", True, "11"],
["TWO", True, "12"],
["THREE", True, "13"],
["FOUR", True, "14"],
["FIVE", True, "15"],
["SIX", True, "16"],
["SEVEN", True, "17"],
["EIGHT", True, "18"],
["NINE", True, "19"],
["ZERO", True, "20"],
]
for mode in ["Mesh", "Curve", "Armature", "Metaball", "Lattice", "Grease Pencil Stroke Edit Mode"]:
for key in qcd_hotkeys:
km = wm.keyconfigs.addon.keymaps.new(name=mode)
kmi = km.keymap_items.new('view3d.view_qcd_slot', key[0], 'PRESS', alt=key[1])
kmi.properties.slot = key[2]
kmi.properties.toggle = False
addon_qcd_view_edit_mode_hotkey_keymaps.append((km, kmi))
km = wm.keyconfigs.addon.keymaps.new(name=mode)
kmi = km.keymap_items.new('view3d.view_qcd_slot', key[0], 'PRESS',shift=True, alt=key[1])
kmi.properties.slot = key[2]
kmi.properties.toggle = True
addon_qcd_view_edit_mode_hotkey_keymaps.append((km, kmi))
km = wm.keyconfigs.addon.keymaps.new(name="Mesh")
kmi = km.keymap_items.new('wm.call_menu', 'ACCENT_GRAVE', 'PRESS')
kmi.properties.name = "VIEW3D_MT_edit_mesh_select_mode"
addon_qcd_view_edit_mode_hotkey_keymaps.append((km, kmi))
def unregister_qcd():
bpy.types.VIEW3D_HT_header.remove(ui.view3d_header_qcd_slots)
bpy.types.TOPBAR_HT_upper_bar.remove(ui.view_layer_update)
for cls in qcd_classes:
bpy.utils.unregister_class(cls)
bpy.app.handlers.save_pre.remove(save_internal_data)
bpy.app.handlers.load_post.remove(load_internal_data)
for pcoll in ui.preview_collections.values():
bpy.utils.previews.remove(pcoll)
ui.preview_collections.clear()
ui.last_icon_theme_text = None
ui.last_icon_theme_text_sel = None
# remove keymaps when qcd is deactivated
for km, kmi in addon_qcd_keymaps:
km.keymap_items.remove(kmi)
addon_qcd_keymaps.clear()
unregister_qcd_view_hotkeys()
unregister_qcd_view_edit_mode_hotkeys()
def unregister_qcd_view_hotkeys():
# remove keymaps when qcd view hotkeys are deactivated
for km, kmi in addon_qcd_view_hotkey_keymaps:
km.keymap_items.remove(kmi)
addon_qcd_view_hotkey_keymaps.clear()
def unregister_qcd_view_edit_mode_hotkeys():
# remove keymaps when qcd view hotkeys are deactivated
for km, kmi in addon_qcd_view_edit_mode_hotkey_keymaps:
km.keymap_items.remove(kmi)
addon_qcd_view_edit_mode_hotkey_keymaps.clear()
|
from import_export.admin import ImportExportModelAdmin
from django.contrib import admin
from django.contrib.auth import get_user_model
from django.contrib.auth.admin import UserAdmin
from .forms import CustomUserCreationForm, CustomUserChangeForm
from .models import *
class CustomUserAdmin(UserAdmin):
add_form = CustomUserCreationForm
form = CustomUserChangeForm
model = CustomUser
list_display = ['email', 'username',]
class PDFFormAdmin(ImportExportModelAdmin):
pass
class PDFFormFieldAdmin(ImportExportModelAdmin):
model=PDFFormField
list_display=['pdf', "field", "field_page_number","field_index"]
class UserProfileAdmin(ImportExportModelAdmin):
model=UserProfile
list_display=['user', "field", "field_text", "field_date"]
class FieldAdmin(ImportExportModelAdmin):
model=Field
list_display=['id', "field_description",'field_question', 'category', 'category_order']
admin.site.register(CustomUser, CustomUserAdmin)
admin.site.register(PDFForm)
admin.site.register(Field,FieldAdmin)
admin.site.register(PDFFormField,PDFFormFieldAdmin)
admin.site.register(UserProfile,UserProfileAdmin)
admin.site.register(GeneratedPDF)
|
import sys
sys.path.append('/root/csdc3/lib/ablib')
sys.path.append('/root/csdc3/src/logs')
sys.path.append('/root/csdc3/src/logs/config_setup')
from sensor_constants import *
from sensor_manager import SensorManager
import argparse
from time import sleep
def main():
parser = argparse.ArgumentParser(description="Script to toggle switches", formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("-d", "--deploy", action="store_true", help="Switch to deploy antenna")
parser.add_argument("-r", "--radio", action="store_true", help="Turn on radio")
parser.add_argument("-p", "--payload", action="store_true", help="Turn on payload")
parser.add_argument("-s", "--sensors", action="store_false", help="Turn off sensors")
args = parser.parse_args()
deploy = args.deploy
radio = args.radio
payload = args.payload
sensors = args.sensors
SensorManager.gpio_output(RADIO_EN_GPIO, radio)
SensorManager.gpio_output(SENSORS_EN_GPIO, sensors)
SensorManager.gpio_output(PAYLOAD_EN_GPIO, payload)
SensorManager.gpio_output(PAYLOAD_HTR_A_GPIO, OFF)
SensorManager.gpio_output(PAYLOAD_HTR_B_GPIO, OFF)
SensorManager.gpio_output(PSS_HTR_EN_1_GPIO, OFF)
SensorManager.gpio_output(PSS_HTR_EN_2_GPIO, OFF)
SensorManager.gpio_output(PSS_HTR_EN_3_GPIO, OFF)
SensorManager.gpio_output(PSS_HTR_EN_4_GPIO, OFF)
SensorManager.gpio_output(PSS_HTR_MUX_SEL_GPIO, OFF)
SensorManager.gpio_output(PSS_HTR_EN_1_GPIO, OFF)
if deploy:
SensorManager.gpio_output(DEPLOYMENT_SW_A_GPIO, deploy)
sleep(10)
SensorManager.gpio_output(DEPLOYMENT_SW_A_GPIO, OFF)
SensorManager.gpio_output(DEPLOYMENT_SW_B_GPIO, OFF)
"""
time.sleep(2)
SensorManager.gpio_output(SENSORS_EN_GPIO, ON)
SensorManager.gpio_output(RADIO_EN_GPIO, OFF)
"""
if __name__ == "__main__":
main()
|
# Copyright(c) 2017, Intel Corporation
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of Intel Corporation nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import shutil
import tempfile
import collections
__work_dir__ = None
def get_work_dir():
global __work_dir__
if not __work_dir__:
__work_dir__ = tempfile.mkdtemp(prefix="packager_work_", dir=".")
return __work_dir__
def delete_work_dir():
global __work_dir__
if __work_dir__:
shutil.rmtree(__work_dir__)
"""
Utility function to convert input to a native type
"""
def convert_to_native_type(val):
try:
if isinstance(val, str) and val.startswith('0x'):
val = int(val, 16)
else:
val = int(val)
return val
except ValueError:
pass
try:
val = float(val)
return val
except ValueError:
pass
try:
val = str(val)
return val
except ValueError:
raise Exception("Cannot convert passed argument to native type!")
|
# Copyright (c) 2015-2019 Vector 35 Inc
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from __future__ import absolute_import
import atexit
import sys
import ctypes
from time import gmtime
# 2-3 compatibility
try:
import builtins # __builtins__ for python2
except ImportError:
pass
def range(*args):
""" A Python2 and Python3 Compatible Range Generator """
try:
return xrange(*args)
except NameError:
return builtins.range(*args)
def with_metaclass(meta, *bases):
"""Create a base class with a metaclass."""
class metaclass(type):
def __new__(cls, name, this_bases, d):
return meta(name, bases, d)
@classmethod
def __prepare__(cls, name, this_bases):
return meta.__prepare__(name, bases)
return type.__new__(metaclass, 'temporary_class', (), {})
try:
long = long
except NameError:
long = int
def cstr(arg):
if isinstance(arg, bytes) or arg is None:
return arg
else:
return arg.encode('charmap')
def pyNativeStr(arg):
if isinstance(arg, str):
return arg
else:
return arg.decode('charmap')
# Binary Ninja components
import binaryninja._binaryninjacore as core
# __all__ = [
# "enums",
# "databuffer",
# "filemetadata",
# "fileaccessor",
# "binaryview",
# "transform",
# "architecture",
# "basicblock",
# "function",
# "log",
# "lowlevelil",
# "mediumlevelil",
# "types",
# "functionrecognizer",
# "update",
# "plugin",
# "callingconvention",
# "platform",
# "demangle",
# "mainthread",
# "interaction",
# "lineardisassembly",
# "undoaction",
# "highlight",
# "scriptingprovider",
# "pluginmanager",
# "setting",
# "metadata",
# "flowgraph",
# ]
from binaryninja.enums import *
from binaryninja.databuffer import *
from binaryninja.filemetadata import *
from binaryninja.fileaccessor import *
from binaryninja.binaryview import *
from binaryninja.transform import *
from binaryninja.architecture import *
from binaryninja.basicblock import *
from binaryninja.function import *
from binaryninja.log import *
from binaryninja.lowlevelil import *
from binaryninja.mediumlevelil import *
from binaryninja.types import *
from binaryninja.functionrecognizer import *
from binaryninja.update import *
from binaryninja.plugin import *
from binaryninja.callingconvention import *
from binaryninja.platform import *
from binaryninja.demangle import *
from binaryninja.mainthread import *
from binaryninja.interaction import *
from binaryninja.lineardisassembly import *
from binaryninja.undoaction import *
from binaryninja.highlight import *
from binaryninja.scriptingprovider import *
from binaryninja.downloadprovider import *
from binaryninja.pluginmanager import *
from binaryninja.settings import *
from binaryninja.metadata import *
from binaryninja.flowgraph import *
from binaryninja.datarender import *
def shutdown():
"""
``shutdown`` cleanly shuts down the core, stopping all workers and closing all log files.
"""
core.BNShutdown()
atexit.register(shutdown)
def get_unique_identifier():
return core.BNGetUniqueIdentifierString()
def get_install_directory():
"""
``get_install_directory`` returns a string pointing to the installed binary currently running
..warning:: ONLY for use within the Binary Ninja UI, behavior is undefined and unreliable if run headlessly
"""
return core.BNGetInstallDirectory()
_plugin_api_name = "python2"
class PluginManagerLoadPluginCallback(object):
"""Callback for BNLoadPluginForApi("python2", ...), dynamically loads python plugins."""
def __init__(self):
self.cb = ctypes.CFUNCTYPE(
ctypes.c_bool,
ctypes.c_char_p,
ctypes.c_char_p,
ctypes.c_void_p)(self._load_plugin)
def _load_plugin(self, repo_path, plugin_path, ctx):
try:
repo = RepositoryManager()[repo_path]
plugin = repo[plugin_path]
if plugin.api != _plugin_api_name:
raise ValueError("Plugin API name is not " + _plugin_api_name)
if not plugin.installed:
plugin.installed = True
if repo.full_path not in sys.path:
sys.path.append(repo.full_path)
__import__(plugin.path)
log_info("Successfully loaded plugin: {}/{}: ".format(repo_path, plugin_path))
return True
except KeyError:
log_error("Failed to find python plugin: {}/{}".format(repo_path, plugin_path))
except ImportError as ie:
log_error("Failed to import python plugin: {}/{}: {}".format(repo_path, plugin_path, ie))
return False
load_plugin = PluginManagerLoadPluginCallback()
core.BNRegisterForPluginLoading(_plugin_api_name, load_plugin.cb, 0)
class _DestructionCallbackHandler(object):
def __init__(self):
self._cb = core.BNObjectDestructionCallbacks()
self._cb.context = 0
self._cb.destructBinaryView = self._cb.destructBinaryView.__class__(self.destruct_binary_view)
self._cb.destructFileMetadata = self._cb.destructFileMetadata.__class__(self.destruct_file_metadata)
self._cb.destructFunction = self._cb.destructFunction.__class__(self.destruct_function)
core.BNRegisterObjectDestructionCallbacks(self._cb)
def destruct_binary_view(self, ctxt, view):
BinaryView._unregister(view)
def destruct_file_metadata(self, ctxt, f):
FileMetadata._unregister(f)
def destruct_function(self, ctxt, func):
Function._unregister(func)
_plugin_init = False
def _init_plugins():
global _plugin_init
if not _plugin_init:
_plugin_init = True
core.BNInitCorePlugins()
core.BNInitUserPlugins()
core.BNInitRepoPlugins()
if not core.BNIsLicenseValidated():
raise RuntimeError("License is not valid. Please supply a valid license.")
_destruct_callbacks = _DestructionCallbackHandler()
def bundled_plugin_path():
"""
``bundled_plugin_path`` returns a string containing the current plugin path inside the `install path <https://docs.binary.ninja/getting-started.html#binary-path>`_
:return: current bundled plugin path
:rtype: str, or None on failure
"""
return core.BNGetBundledPluginDirectory()
def user_plugin_path():
"""
``user_plugin_path`` returns a string containing the current plugin path inside the `user directory <https://docs.binary.ninja/getting-started.html#user-folder>`_
:return: current user plugin path
:rtype: str, or None on failure
"""
return core.BNGetUserPluginDirectory()
def core_version():
"""
``core_version`` returns a string containing the current version
:return: current version
:rtype: str, or None on failure
"""
return core.BNGetVersionString()
def core_build_id():
"""
``core_build_id`` returns a string containing the current build id
:return: current build id
:rtype: str, or None on failure
"""
core.BNGetBuildId()
def core_serial():
"""
``core_serial`` returns a string containing the current serial number
:return: current serial
:rtype: str, or None on failure
"""
return core.BNGetSerialNumber()
def core_expires():
'''License Expiration'''
return gmtime(core.BNGetLicenseExpirationTime())
def core_product():
'''Product string from the license file'''
return core.BNGetProduct()
def core_product_type():
'''Product type from the license file'''
return core.BNGetProductType()
def core_license_count():
'''License count from the license file'''
return core.BNGetLicenseCount()
def core_ui_enabled():
'''Indicates that a UI exists and the UI has invoked BNInitUI'''
return core.BNIsUIEnabled()
|
#!/usr/bin/env python3
import asyncio
import json
import random
# the bulk of our AI
LEFT = b'Left\n'
RIGHT = b'Right\n'
FORWARD = b'Forward\n'
def decision(my_id, world) -> bytes:
"""Pick a direction entirely randomly."""
return random.choice([LEFT, RIGHT, FORWARD])
async def next_state(reader: asyncio.StreamReader) -> dict:
"""Get the next lot of data from the server."""
line = await reader.readline()
return json.loads(line.decode("utf-8"))
async def loop(reader: asyncio.StreamReader, writer: asyncio.StreamWriter, my_id: int):
"""The main event loop. Gets the current world and returns the decision made."""
data = await next_state(reader)
while data["state"] == "playing":
choice = decision(my_id, data["map"])
writer.write(choice)
await writer.drain()
data = await next_state(reader)
async def make_connection(host: str, port: int):
"""Kick the whole thing off."""
print("Connecting...")
reader, writer = await asyncio.open_connection(host, port)
writer.write(b'ai_random.py\n') # say who we are
await writer.drain()
print("Waiting for game to start...")
data = await next_state(reader)
my_id = data["id"]
assert data["state"] == "start"
print("Game started!")
await loop(reader, writer, my_id)
# just keep on doing it forever
while True:
try:
asyncio.run(make_connection('192.168.121.144', 3001))
except KeyboardInterrupt:
break
except:
# wait a bit before trying again
asyncio.wait(asyncio.sleep(10))
|
# Generated by Django 3.1 on 2020-09-25 12:31
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('worker', '0014_auto_20200925_1411'),
]
operations = [
migrations.RemoveField(
model_name='workday',
name='end',
),
migrations.RemoveField(
model_name='workday',
name='start',
),
]
|
#!/usr/bin/env python3
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, String, Float, DateTime, Integer
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
Base = declarative_base()
class Order(Base):
__tablename__ = "orders"
id = Column(Integer, primary_key=True)
currency = Column(String)
price = Column(Float)
size = Column(Float)
cbpro_order_id = Column(String)
created_at = Column(DateTime)
class Withdrawal(Base):
__tablename__ = "withdrawals"
id = Column(Integer, primary_key=True)
currency = Column(String)
amount = Column(Float)
crypto_address = Column(String)
cbpro_withdrawal_id = Column(String)
class Deposit(Base):
__tablename__ = "deposits"
id = Column(Integer, primary_key=True)
currency = Column(String)
amount = Column(Float)
payment_method_id = Column(String)
payout_at = Column(DateTime)
cbpro_deposit_id = Column(String)
def get_session(engine):
engine = create_engine(engine)
Base.metadata.create_all(engine)
Session = sessionmaker(bind=engine)
session = Session()
return session
|
import pytest
import numpy as np
import pandas as pd
import os
from pathlib import Path
from pandas.testing import assert_series_equal
from obsidiantools.api import Vault
# NOTE: run the tests from the project dir.
WKD = Path(os.getcwd())
@pytest.fixture
def expected_metadata_dict():
return {
'rel_filepath': {'Sussudio': Path('Sussudio.md'),
'Brevissimus moenia': Path('lipsum/Brevissimus moenia.md'),
'Ne fuit': Path('lipsum/Ne fuit.md'),
'Alimenta': Path('lipsum/Alimenta.md'),
'Vulnera ubera': Path('lipsum/Vulnera ubera.md'),
'Causam mihi': Path('lipsum/Causam mihi.md'),
'Tarpeia': np.NaN,
'Caelum': np.NaN,
'Vita': np.NaN,
'Aras Teucras': np.NaN,
'Manus': np.NaN,
'Bacchus': np.NaN,
'Amor': np.NaN,
'Virtus': np.NaN,
'Tydides': np.NaN,
'Dives': np.NaN,
'Aetna': np.NaN},
# abs_filepath would be here
'note_exists': {'Sussudio': True,
'Brevissimus moenia': True,
'Ne fuit': True,
'Alimenta': True,
'Vulnera ubera': True,
'Causam mihi': True,
'Tarpeia': False,
'Caelum': False,
'Vita': False,
'Aras Teucras': False,
'Manus': False,
'Bacchus': False,
'Amor': False,
'Virtus': False,
'Tydides': False,
'Dives': False,
'Aetna': False},
'n_backlinks': {'Sussudio': 0,
'Brevissimus moenia': 1,
'Ne fuit': 2,
'Alimenta': 0,
'Vulnera ubera': 0,
'Causam mihi': 1,
'Tarpeia': 3,
'Caelum': 3,
'Vita': 3,
'Aras Teucras': 1,
'Manus': 3,
'Bacchus': 5,
'Amor': 2,
'Virtus': 1,
'Tydides': 1,
'Dives': 1,
'Aetna': 1},
'n_wikilinks': {'Sussudio': 0.0,
'Brevissimus moenia': 3.0,
'Ne fuit': 6.0,
'Alimenta': 12.0,
'Vulnera ubera': 3.0,
'Causam mihi': 4.0,
'Tarpeia': np.NaN,
'Caelum': np.NaN,
'Vita': np.NaN,
'Aras Teucras': np.NaN,
'Manus': np.NaN,
'Bacchus': np.NaN,
'Amor': np.NaN,
'Virtus': np.NaN,
'Tydides': np.NaN,
'Dives': np.NaN,
'Aetna': np.NaN},
'n_embedded_files': {'Sussudio': 2.0,
'Brevissimus moenia': 0.0,
'Ne fuit': 0.0,
'Alimenta': 0.0,
'Vulnera ubera': 0.0,
'Causam mihi': 0.0,
'Tarpeia': np.NaN,
'Caelum': np.NaN,
'Vita': np.NaN,
'Aras Teucras': np.NaN,
'Manus': np.NaN,
'Bacchus': np.NaN,
'Amor': np.NaN,
'Virtus': np.NaN,
'Tydides': np.NaN,
'Dives': np.NaN,
'Aetna': np.NaN}
}
@pytest.fixture
def expected_embedded_files_index():
return {'Sussudio': ['Sussudio.mp3', '1999.flac'],
'Brevissimus moenia': [],
'Ne fuit': [],
'Alimenta': [],
'Vulnera ubera': [],
'Causam mihi': []}
@pytest.fixture
def expected_front_matter_index():
return {'Sussudio': {'title': 'Sussudio',
'artist': 'Phil Collins',
'category': 'music',
'year': 1985,
'url': 'https://www.discogs.com/Phil-Collins-Sussudio/master/106239',
'references': [[['American Psycho (film)']], 'Polka Party!'],
'chart_peaks': [{'US': 1}, {'UK': 12}]},
'Brevissimus moenia': {},
'Ne fuit': {},
'Alimenta': {},
'Vulnera ubera': {},
'Causam mihi': {'title': 'Causam mihi',
'author': 'Ovid',
'category': 'literature',
'year': 8,
'language': 'la'}}
@pytest.fixture
def expected_md_links_index():
return {'Sussudio': [],
'Brevissimus moenia': ['http://www.alii.io/',
'http://fronti.com/tumiseris.html'],
'Ne fuit': ['http://vires.io/',
'http://excoquitprotinus.net/quae.html',
'http://medullis-me.net/novat',
'http://sedibus.io/levemmonstra'],
'Alimenta': ['http://fugitaer.net/dignus',
'http://www.a-iuveni.com/',
'http://cibos.net/venulus-redito.html',
'http://et-pronus.com/',
'http://iuppiter.net/'],
'Vulnera ubera': [],
'Causam mihi': []}
@pytest.fixture
def actual_connected_vault():
return Vault(WKD / 'tests/vault-stub').connect()
@pytest.fixture
def actual_metadata_df(actual_connected_vault):
return actual_connected_vault.get_note_metadata()
def test_get_metadata_cols(actual_metadata_df):
assert isinstance(actual_metadata_df, pd.DataFrame)
expected_cols = ['rel_filepath', 'abs_filepath',
'note_exists',
'n_backlinks', 'n_wikilinks',
'n_embedded_files',
'modified_time']
assert actual_metadata_df.columns.tolist() == expected_cols
def test_get_metadata_dtypes(actual_metadata_df):
assert actual_metadata_df['rel_filepath'].dtype == 'object'
assert actual_metadata_df['abs_filepath'].dtype == 'object'
assert actual_metadata_df['note_exists'].dtype == 'bool'
assert actual_metadata_df['n_backlinks'].dtype == 'int'
assert actual_metadata_df['n_wikilinks'].dtype == 'float'
assert actual_metadata_df['n_embedded_files'].dtype == 'float'
assert actual_metadata_df['modified_time'].dtype == 'datetime64[ns]'
def test_get_metadata_rel_filepath(actual_metadata_df,
expected_metadata_dict):
TEST_COL = 'rel_filepath'
actual_series = actual_metadata_df[TEST_COL]
expected_series = (pd.Series(expected_metadata_dict.get(TEST_COL),
name=TEST_COL)
.rename_axis('note'))
assert_series_equal(actual_series, expected_series)
def test_get_metadata_note_exists(actual_metadata_df,
expected_metadata_dict):
TEST_COL = 'note_exists'
actual_series = actual_metadata_df[TEST_COL]
expected_series = (pd.Series(expected_metadata_dict.get(TEST_COL),
name=TEST_COL)
.rename_axis('note'))
assert_series_equal(actual_series, expected_series)
def test_get_metadata_wikilinks(actual_metadata_df,
expected_metadata_dict):
TEST_COL = 'n_wikilinks'
actual_series = actual_metadata_df[TEST_COL]
expected_series = (pd.Series(expected_metadata_dict.get(TEST_COL),
name=TEST_COL)
.rename_axis('note'))
assert_series_equal(actual_series, expected_series)
def test_get_metadata_backlinks(actual_metadata_df,
expected_metadata_dict):
TEST_COL = 'n_backlinks'
actual_series = actual_metadata_df[TEST_COL]
expected_series = (pd.Series(expected_metadata_dict.get(TEST_COL),
name=TEST_COL)
.rename_axis('note'))
assert_series_equal(actual_series, expected_series)
def test_backlink_and_wikilink_totals_equal(actual_metadata_df):
# every wikilink is another note's backlink
assert (actual_metadata_df['n_backlinks'].sum()
== actual_metadata_df['n_wikilinks'].sum())
def test_backlink_individual_notes(actual_connected_vault):
actual_bl_ix = actual_connected_vault.backlinks_index
assert isinstance(actual_bl_ix, dict)
expected_bl_subset = {
'Sussudio': [],
'Alimenta': [],
'Tarpeia': ['Brevissimus moenia', 'Alimenta', 'Vulnera ubera'],
'Ne fuit': ['Alimenta', 'Causam mihi']
}
for k in list(expected_bl_subset.keys()):
assert set(expected_bl_subset.get(k)) == set(actual_bl_ix.get(k))
with pytest.raises(ValueError):
actual_connected_vault.get_backlinks("Note that isn't in vault at all")
# check that every note is in the backlinks_index
graph_nodes = [n for n in actual_connected_vault.graph.nodes]
assert (len(actual_bl_ix)
== (actual_connected_vault.graph.number_of_nodes()))
for k in list(expected_bl_subset.keys()):
assert k in graph_nodes
def test_backlink_counts(actual_connected_vault):
expected_bl_count_subset = {
'Sussudio': {},
'Alimenta': {},
'Tarpeia': {'Brevissimus moenia': 1,
'Alimenta': 1,
'Vulnera ubera': 1},
'Ne fuit': {'Alimenta': 1,
'Causam mihi': 1},
'Bacchus': {'Ne fuit': 1,
'Alimenta': 4}
}
for k in list(expected_bl_count_subset.keys()):
assert (actual_connected_vault.get_backlink_counts(k)
== expected_bl_count_subset.get(k))
with pytest.raises(ValueError):
actual_connected_vault.get_backlink_counts("Note that isn't in vault at all")
def test_wikilink_individual_notes(actual_connected_vault):
actual_wl_ix = actual_connected_vault.wikilinks_index
assert isinstance(actual_wl_ix, dict)
# these notes exist
expected_wl_subset = {
'Sussudio': [],
'Alimenta': ['Manus', 'Bacchus', 'Amor', 'Ne fuit', 'Virtus',
'Brevissimus moenia', 'Tarpeia', 'Tydides', 'Vita',
'Bacchus', 'Bacchus', 'Bacchus'],
'Ne fuit': ['Aras Teucras', 'Manus', 'Bacchus',
'Amor', 'Caelum', 'Causam mihi']
}
assert (actual_connected_vault.get_wikilinks('Alimenta')
== expected_wl_subset.get('Alimenta'))
for k in list(expected_wl_subset.keys()):
# list - sequence the links appear in notes
assert expected_wl_subset.get(k) == actual_wl_ix.get(k)
with pytest.raises(ValueError):
actual_connected_vault.get_wikilinks('Tarpeia')
# check that every existing note (file) has wikilink info
assert len(actual_wl_ix) == len(actual_connected_vault.file_index)
for k in list(actual_wl_ix.keys()):
assert isinstance(actual_connected_vault.file_index.get(k),
Path)
def test_nonexistent_notes(actual_connected_vault, actual_metadata_df):
expected_non_e_notes = ['Tarpeia', 'Caelum', 'Vita', 'Aras Teucras',
'Manus', 'Bacchus', 'Amor', 'Virtus',
'Tydides', 'Dives', 'Aetna']
assert isinstance(actual_connected_vault.nonexistent_notes, list)
assert (set(actual_connected_vault.nonexistent_notes)
== set(expected_non_e_notes))
assert (set(actual_metadata_df.loc[~actual_metadata_df['note_exists'], :]
.index.tolist())
== set(expected_non_e_notes))
def test_isolated_notes(actual_connected_vault):
expected_isol_notes = ['Sussudio']
assert isinstance(actual_connected_vault.isolated_notes, list)
assert (set(actual_connected_vault.isolated_notes)
== set(expected_isol_notes))
# isolated notes can't have backlinks
for n in actual_connected_vault.isolated_notes:
assert actual_connected_vault.get_backlink_counts(n) == {}
# isolated notes can't have wikilinks
for n in actual_connected_vault.isolated_notes:
assert actual_connected_vault.get_wikilinks(n) == []
def test_front_matter_index(
actual_connected_vault, expected_front_matter_index):
assert isinstance(actual_connected_vault.front_matter_index, dict)
actual_front_matter_index = actual_connected_vault.front_matter_index
assert actual_front_matter_index == expected_front_matter_index
def test_front_matter_sussudio(actual_connected_vault):
expected_fm = {'title': 'Sussudio',
'artist': 'Phil Collins',
'category': 'music',
'year': 1985,
'url': 'https://www.discogs.com/Phil-Collins-Sussudio/master/106239',
'references': [[['American Psycho (film)']], 'Polka Party!'],
'chart_peaks': [{'US': 1}, {'UK': 12}]}
actual_fm = actual_connected_vault.get_front_matter('Sussudio')
assert actual_fm == expected_fm
def test_embedded_files_sussudio(actual_connected_vault):
expected_files = ['Sussudio.mp3', '1999.flac']
actual_files = actual_connected_vault.get_embedded_files('Sussudio')
assert actual_files == expected_files
def test_nodes_gte_files(actual_connected_vault):
act_f_len = len(actual_connected_vault.file_index)
act_n_len = len(actual_connected_vault.wikilinks_index)
assert act_n_len >= act_f_len
def test_embedded_files_index(
actual_connected_vault, expected_embedded_files_index):
actual_files_ix = actual_connected_vault.embedded_files_index
assert actual_files_ix == expected_embedded_files_index
def test_md_links_index(
actual_connected_vault, expected_md_links_index):
actual_md_links_ix = actual_connected_vault.md_links_index
assert actual_md_links_ix == expected_md_links_index
def test_unique_md_links(
actual_connected_vault, expected_md_links_index):
actual_u_md_links_ix = (actual_connected_vault.
_get_unique_md_links_index())
# all notes in stub have unique md links:
assert actual_u_md_links_ix == expected_md_links_index
def test_md_links_individual_notes(actual_connected_vault):
actual_md_links = actual_connected_vault.get_md_links('Ne fuit')
expected_md_links = ['http://vires.io/',
'http://excoquitprotinus.net/quae.html',
'http://medullis-me.net/novat',
'http://sedibus.io/levemmonstra']
assert actual_md_links == expected_md_links
def test_md_links_not_existing(actual_connected_vault):
with pytest.raises(ValueError):
actual_connected_vault.get_md_links('Tarpeia')
def test_front_matter_not_existing(actual_connected_vault):
with pytest.raises(ValueError):
actual_connected_vault.get_front_matter('Tarpeia')
def test_embedded_notes_not_existing(actual_connected_vault):
with pytest.raises(ValueError):
actual_connected_vault.get_embedded_files('Tarpeia')
|
"""
Use this script to dump the event data out to the terminal. It needs to know
what the sock_dir is.
This script is a generic tool to test event output
"""
import optparse
import os
import pprint
import time
import salt.utils.event
def parse():
"""
Parse the script command line inputs
"""
parser = optparse.OptionParser()
parser.add_option(
"-s",
"--sock-dir",
dest="sock_dir",
default="/var/run/salt",
help=(
"Statically define the directory holding the salt unix "
"sockets for communication"
),
)
parser.add_option(
"-n",
"--node",
dest="node",
default="master",
help=(
"State if this listener will attach to a master or a "
'minion daemon, pass "master" or "minion"'
),
)
parser.add_option(
"-f",
"--func_count",
default="",
help=(
"Return a count of the number of minions which have "
"replied to a job with a given func."
),
)
parser.add_option(
"-i",
"--id",
default="",
help=("If connecting to a live master or minion, pass in the id"),
)
parser.add_option(
"-t",
"--transport",
default="zeromq",
help=("Transport to use. (Default: 'zeromq'"),
)
options, args = parser.parse_args()
opts = {}
for k, v in options.__dict__.items():
if v is not None:
opts[k] = v
opts["sock_dir"] = os.path.join(opts["sock_dir"], opts["node"])
if "minion" in options.node:
if args:
opts["id"] = args[0]
return opts
if options.id:
opts["id"] = options.id
else:
opts["id"] = options.node
return opts
def check_access_and_print_warning(sock_dir):
"""
Check if this user is able to access the socket
directory and print a warning if not
"""
if (
os.access(sock_dir, os.R_OK)
and os.access(sock_dir, os.W_OK)
and os.access(sock_dir, os.X_OK)
):
return
else:
print(
"WARNING: Events will not be reported"
" (not able to access {})".format(sock_dir)
)
def listen(opts):
"""
Attach to the pub socket and grab messages
"""
event = salt.utils.event.get_event(
opts["node"],
sock_dir=opts["sock_dir"],
transport=opts["transport"],
opts=opts,
listen=True,
)
check_access_and_print_warning(opts["sock_dir"])
print(event.puburi)
jid_counter = 0
found_minions = []
while True:
ret = event.get_event(full=True)
if ret is None:
continue
if opts["func_count"]:
data = ret.get("data", False)
if data:
if "id" in data.keys() and data.get("id", False) not in found_minions:
if data["fun"] == opts["func_count"]:
jid_counter += 1
found_minions.append(data["id"])
print(
"Reply received from [{}]. Total replies now: [{}].".format(
ret["data"]["id"], jid_counter
)
)
continue
else:
print("Event fired at {}".format(time.asctime()))
print("*" * 25)
print("Tag: {}".format(ret["tag"]))
print("Data:")
pprint.pprint(ret["data"])
if __name__ == "__main__":
opts = parse()
listen(opts)
|
# An addition
print(6 + 12)
# A subtraction
print(6 - 12)
# A multiplication
print(3 * 5)
# A division
print((5 + 5) / 2)
# Exponentiation
print(2 ^ 5)
# Modulo
print(28 % 6)
# Assign the value 42 to x
x = 42
# Print out the value of the variable x
print(x)
# Assign the value 5 to the variable my_apples
my_apples = 5
# Print out the value of the variable my_apples
print(my_apples)
# Assign a value to the variables my_apples and my_oranges
my_apples = 5
my_oranges = 6
# Add these two variables together
print(my_oranges + my_apples)
# Create the variable my_fruit
my_fruit = my_oranges + my_apples
# Declare variables of different types
my_numeric = 42
my_character = "universe"
my_logical = FALSE
# Check class of my_numeric
type(my_numeric)
# Check class of my_character
type(my_character)
# Check class of my_logical
type(my_logical)
|
import sqlite3
def cursor():
global conn
return conn.cursor()
def commit():
global conn
conn.commit()
def insert(table, data):
global conn
c = conn.cursor()
keys = [*data]
template_list = ','.join(['?'] * len(data))
query = "INSERT INTO {} ({}) VALUES ({})".format(table, ','.join(keys), template_list)
c.execute(query, tuple(data[k] for k in keys))
conn.commit()
def start():
global conn
c = conn.cursor()
c.execute("CREATE TABLE IF NOT EXISTS quotes (content TEXT)")
c.execute("CREATE TABLE IF NOT EXISTS alerts (target TEXT, time INTEGER, message TEXT)")
conn.commit()
conn = sqlite3.connect('persist.db')
start()
|
"""This file provide the base for every models"""
from .extensions import DB
class CrudMixin(object):
"""Mixin that create/read/update/delete methods"""
__table_args__ = {'extend_existing': True}
@classmethod
def create(cls, commit=True, **kwargs):
"""Creates a new record and saves to database."""
instance = cls(**kwargs)
return instance.save(commit=commit)
@classmethod
def get(cls, row_id):
"""Retrieve a row by the id"""
return cls.query.get(row_id)
@classmethod
def get_or_404(cls, row_id):
""""Retrieve a row by the id or abort with 404"""
return cls.query.get_or_404(row_id)
def update(self, commit=True, **kwargs):
"""Update existing record and saves to database."""
for attr, value in kwargs.iteritems():
setattr(self, attr, value)
return commit and self.save() or self
def save(self, commit=True):
"""Saves record to database."""
DB.session.add(self)
if commit:
DB.session.commit()
return self
def delete(self, commit=True):
"""Removes the record from database."""
DB.session.delete(self)
return commit and DB.session.commit()
class Model(CrudMixin, DB.Model):
"""This class provice a base for every model class"""
__abstract__ = True
|
from radium.equity.equity import Equity
from radium.pair.pair import Pair
|
"""Application routes."""
from flask import render_template
from flask import current_app as app
@app.route("/")
def home():
"""Home page."""
return render_template("home.html")
@app.errorhandler(404)
def page_not_found(e):
# note that we set the 404 status explicitly
return render_template("404.html"), 404
|
from thrift.protocol.TBinaryProtocol import TBinaryProtocolAccelerated
from thrift.transport import TTransport, TSocket
from thrift.transport.TTransport import TTransportException
from jiffy.directory import directory_service
class Perms:
def __init__(self):
pass
none = 0
owner_read = 0o400
owner_write = 0o200
owner_exec = 0o100
owner_all = 0o700
group_read = 0o40
group_write = 0o20
group_exec = 0o10
group_all = 0o70
others_read = 0o4
others_write = 0o2
others_exec = 0o1
others_all = 0o7
all = 0o777
set_uid = 0o4000
set_gid = 0o2000
sticky_bit = 0o1000
mask = 0o7777
class PermOpts:
def __init__(self):
pass
replace = 0
add = 1
remove = 2
class DirectoryEntry:
def __init__(self, name, status):
self.name = name
self.status = status
def get_name(self):
return self.name
def get_status(self):
return self.status
class FileStatus:
def __init__(self, file_type, permissions, last_write_time):
self.type = file_type
self.permissions = permissions
self.last_write_time = last_write_time
def get_type(self):
return self.type
def get_permissions(self):
return self.permissions
def get_last_write_time(self):
return self.last_write_time
class StorageMode:
def __init__(self):
pass
in_memory = 0
in_memory_grace = 1
flushing = 2
on_disk = 3
class ReplicaChain:
def __init__(self, block_ids, name, metadata, storage_mode):
self.block_ids = block_ids
self.name = name
self.metadata = metadata
self.storage_mode = storage_mode
def __str__(self):
return "{} : {}, {}, {}".format(self.name, self.block_ids, self.metadata, self.storage_mode)
def __repr__(self):
return self.__str__()
class DataStatus:
def __init__(self, backing_path, chain_length, data_blocks, flags, tags):
self.backing_path = backing_path
self.chain_length = chain_length
self.data_blocks = [ReplicaChain(replica_chain.block_ids, replica_chain.name, replica_chain.metadata,
replica_chain.storage_mode)
for replica_chain in data_blocks]
self.flags = flags
self.tags = tags
class Flags:
pinned = 0x01
static_provisioned = 0x02
mapped = 0x04
class DirectoryClient:
def __init__(self, host='127.0.0.1', port=9090):
self.socket_ = TSocket.TSocket(host, port)
self.transport_ = TTransport.TBufferedTransport(self.socket_)
self.protocol_ = TBinaryProtocolAccelerated(self.transport_)
self.client_ = directory_service.Client(self.protocol_)
ex = None
for i in range(3):
try:
self.transport_.open()
except TTransportException as e:
ex = e
continue
except Exception:
raise
else:
break
else:
raise TTransportException(ex.type, "Connection failed {}:{}: {}".format(host, port, ex.message))
def __del__(self):
self.close()
def close(self):
if self.transport_.isOpen():
self.transport_.close()
def create_directory(self, path):
self.client_.create_directory(path)
def create_directories(self, path):
self.client_.create_directories(path)
def open(self, path):
s = self.client_.open(path)
return DataStatus(s.backing_path, s.chain_length, s.data_blocks, s.flags, s.tags)
def create(self, path, ds_type, backing_path, num_blocks=1, chain_length=1, flags=0, permissions=Perms.all,
block_names=None, block_metadata=None, tags=None):
if tags is None:
tags = {}
if block_names is None:
block_names = ["0"]
if block_metadata is None:
block_metadata = [""]
s = self.client_.create(path, ds_type, backing_path, num_blocks, chain_length, flags, permissions, block_names,
block_metadata, tags)
return DataStatus(s.backing_path, s.chain_length, s.data_blocks, s.flags, s.tags)
def open_or_create(self, path, ds_type, backing_path, num_blocks=1, chain_length=1, flags=0, permissions=Perms.all,
block_names=None, block_metadata=None, tags=None):
if tags is None:
tags = {}
if block_names is None:
block_names = ["0"]
if block_metadata is None:
block_metadata = [""]
s = self.client_.open_or_create(path, ds_type, backing_path, num_blocks, chain_length, flags, permissions,
block_names, block_metadata, tags)
return DataStatus(s.backing_path, s.chain_length, s.data_blocks, s.flags, s.tags)
def exists(self, path):
return self.client_.exists(path)
def last_write_time(self, path):
return self.client_.last_write_time(path)
def set_permissions(self, path, prms, opts):
self.client_.set_permissions(path, prms, opts)
def get_permissions(self, path):
return self.client_.get_permissions(path)
def remove(self, path):
self.client_.remove(path)
def remove_all(self, path):
self.client_.remove_all(path)
def sync(self, path, backing_path):
self.client_.sync(path, backing_path)
def dump(self, path, backing_path):
self.client_.dump(path, backing_path)
def load(self, path, backing_path):
self.client_.load(path, backing_path)
def status(self, path):
s = self.client_.status(path)
return FileStatus(s.type, s.permissions, s.last_write_time)
def directory_entries(self, path):
entries = self.client_.directory_entries(path)
return [DirectoryEntry(e.name, e.status) for e in entries]
def recursive_directory_entries(self, path):
entries = self.client_.recursive_directory_entries(path)
return [DirectoryEntry(e.name, e.status) for e in entries]
def dstatus(self, path):
s = self.client_.dstatus(path)
return DataStatus(s.backing_path, s.chain_length, s.data_blocks, s.flags, s.tags)
def add_tags(self, path, tags):
self.client_.add_tags(path, tags)
def is_regular_file(self, path):
return self.client_.is_regular_file(path)
def is_directory(self, path):
return self.client_.is_directory(path)
def resolve_failures(self, path, chain):
return self.client_.reslove_failures(path, chain)
def add_replica_to_chain(self, path, chain):
return self.client_.add_replica_to_chain(path, chain)
|
import json
import torch
import torch.nn.functional as F
from transformers import BertTokenizer
from .sentiment_classifier import SentimentClassifier
with open("config.json") as json_file:
config = json.load(json_file)
class Model:
def __init__(self):
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
self.tokenizer = BertTokenizer.from_pretrained(config["BERT_MODEL"])
classifier = SentimentClassifier(len(config["CLASS_NAMES"]))
classifier.load_state_dict(
torch.load(config["PRE_TRAINED_MODEL"], map_location=self.device)
)
classifier = classifier.eval()
self.classifier = classifier.to(self.device)
def predict(self, text):
encoded_text = self.tokenizer.encode_plus(
text,
max_length=config["MAX_SEQUENCE_LEN"],
add_special_tokens=True,
return_token_type_ids=False,
pad_to_max_length=True,
return_attention_mask=True,
return_tensors="pt",
)
input_ids = encoded_text["input_ids"].to(self.device)
attention_mask = encoded_text["attention_mask"].to(self.device)
with torch.no_grad():
probabilities = F.softmax(self.classifier(input_ids, attention_mask), dim=1)
confidence, predicted_class = torch.max(probabilities, dim=1)
predicted_class = predicted_class.cpu().item()
probabilities = probabilities.flatten().cpu().numpy().tolist()
return (
config["CLASS_NAMES"][predicted_class],
confidence,
dict(zip(config["CLASS_NAMES"], probabilities)),
)
model = Model()
def get_model():
return model
if __name__ == "__main__":
get_model()
|
import requests
class ApiHandler:
"""
Class for making requests to the Twitter API v2
https://developer.twitter.com/en/docs/twitter-api/early-access
"""
def __init__(self, path, authentication):
"""
:param path: Endpoint path.
:param authentication: Authentication object (see authentication.py).
"""
base_url = "https://api.twitter.com/2/"
self.url = base_url + path
self.auth = authentication.bearer_oauth
self.headers = {"Content-Type": "application/json"}
def __call__(self, payload=None):
"""
:param params: str
:param payload: dict
"""
return requests.request(
"GET", url=self.url, auth=self.auth, headers=self.headers, params=payload,
)
|
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from collections import OrderedDict
import contextlib
import copy
import datetime
import hashlib
import inspect
import os
import pprint
import fixtures
import mock
from oslo_log import log
from oslo_utils import timeutils
from oslo_versionedobjects import base as ovo_base
from oslo_versionedobjects import exception as ovo_exc
from oslo_versionedobjects import fixture
import six
from testtools import matchers
from nova import context
from nova import exception
from nova import objects
from nova.objects import base
from nova.objects import fields
from nova import test
from nova.tests import fixtures as nova_fixtures
from nova.tests.unit import fake_notifier
from nova import utils
LOG = log.getLogger(__name__)
class MyOwnedObject(base.NovaPersistentObject, base.NovaObject):
VERSION = '1.0'
fields = {'baz': fields.IntegerField()}
class MyObj(base.NovaPersistentObject, base.NovaObject,
base.NovaObjectDictCompat):
VERSION = '1.6'
fields = {'foo': fields.IntegerField(default=1),
'bar': fields.StringField(),
'missing': fields.StringField(),
'readonly': fields.IntegerField(read_only=True),
'rel_object': fields.ObjectField('MyOwnedObject', nullable=True),
'rel_objects': fields.ListOfObjectsField('MyOwnedObject',
nullable=True),
'mutable_default': fields.ListOfStringsField(default=[]),
}
@staticmethod
def _from_db_object(context, obj, db_obj):
self = MyObj()
self.foo = db_obj['foo']
self.bar = db_obj['bar']
self.missing = db_obj['missing']
self.readonly = 1
self._context = context
return self
def obj_load_attr(self, attrname):
setattr(self, attrname, 'loaded!')
@base.remotable_classmethod
def query(cls, context):
obj = cls(context=context, foo=1, bar='bar')
obj.obj_reset_changes()
return obj
@base.remotable
def marco(self):
return 'polo'
@base.remotable
def _update_test(self):
self.bar = 'updated'
@base.remotable
def save(self):
self.obj_reset_changes()
@base.remotable
def refresh(self):
self.foo = 321
self.bar = 'refreshed'
self.obj_reset_changes()
@base.remotable
def modify_save_modify(self):
self.bar = 'meow'
self.save()
self.foo = 42
self.rel_object = MyOwnedObject(baz=42)
def obj_make_compatible(self, primitive, target_version):
super(MyObj, self).obj_make_compatible(primitive, target_version)
# NOTE(danms): Simulate an older version that had a different
# format for the 'bar' attribute
if target_version == '1.1' and 'bar' in primitive:
primitive['bar'] = 'old%s' % primitive['bar']
class MyObjDiffVers(MyObj):
VERSION = '1.5'
@classmethod
def obj_name(cls):
return 'MyObj'
class MyObj2(base.NovaObject):
fields = {
'bar': fields.StringField(),
}
@classmethod
def obj_name(cls):
return 'MyObj'
@base.remotable_classmethod
def query(cls, *args, **kwargs):
pass
class RandomMixInWithNoFields(object):
"""Used to test object inheritance using a mixin that has no fields."""
pass
@base.NovaObjectRegistry.register_if(False)
class TestSubclassedObject(RandomMixInWithNoFields, MyObj):
fields = {'new_field': fields.StringField()}
class TestObjToPrimitive(test.NoDBTestCase):
def test_obj_to_primitive_list(self):
@base.NovaObjectRegistry.register_if(False)
class MyObjElement(base.NovaObject):
fields = {'foo': fields.IntegerField()}
def __init__(self, foo):
super(MyObjElement, self).__init__()
self.foo = foo
@base.NovaObjectRegistry.register_if(False)
class MyList(base.ObjectListBase, base.NovaObject):
fields = {'objects': fields.ListOfObjectsField('MyObjElement')}
mylist = MyList()
mylist.objects = [MyObjElement(1), MyObjElement(2), MyObjElement(3)]
self.assertEqual([1, 2, 3],
[x['foo'] for x in base.obj_to_primitive(mylist)])
def test_obj_to_primitive_dict(self):
base.NovaObjectRegistry.register(MyObj)
myobj = MyObj(foo=1, bar='foo')
self.assertEqual({'foo': 1, 'bar': 'foo'},
base.obj_to_primitive(myobj))
def test_obj_to_primitive_recursive(self):
base.NovaObjectRegistry.register(MyObj)
class MyList(base.ObjectListBase, base.NovaObject):
fields = {'objects': fields.ListOfObjectsField('MyObj')}
mylist = MyList(objects=[MyObj(), MyObj()])
for i, value in enumerate(mylist):
value.foo = i
self.assertEqual([{'foo': 0}, {'foo': 1}],
base.obj_to_primitive(mylist))
def test_obj_to_primitive_with_ip_addr(self):
@base.NovaObjectRegistry.register_if(False)
class TestObject(base.NovaObject):
fields = {'addr': fields.IPAddressField(),
'cidr': fields.IPNetworkField()}
obj = TestObject(addr='1.2.3.4', cidr='1.1.1.1/16')
self.assertEqual({'addr': '1.2.3.4', 'cidr': '1.1.1.1/16'},
base.obj_to_primitive(obj))
class TestObjMakeList(test.NoDBTestCase):
def test_obj_make_list(self):
class MyList(base.ObjectListBase, base.NovaObject):
fields = {
'objects': fields.ListOfObjectsField('MyObj'),
}
db_objs = [{'foo': 1, 'bar': 'baz', 'missing': 'banana'},
{'foo': 2, 'bar': 'bat', 'missing': 'apple'},
]
mylist = base.obj_make_list('ctxt', MyList(), MyObj, db_objs)
self.assertEqual(2, len(mylist))
self.assertEqual('ctxt', mylist._context)
for index, item in enumerate(mylist):
self.assertEqual(db_objs[index]['foo'], item.foo)
self.assertEqual(db_objs[index]['bar'], item.bar)
self.assertEqual(db_objs[index]['missing'], item.missing)
def compare_obj(test, obj, db_obj, subs=None, allow_missing=None,
comparators=None):
"""Compare a NovaObject and a dict-like database object.
This automatically converts TZ-aware datetimes and iterates over
the fields of the object.
:param:test: The TestCase doing the comparison
:param:obj: The NovaObject to examine
:param:db_obj: The dict-like database object to use as reference
:param:subs: A dict of objkey=dbkey field substitutions
:param:allow_missing: A list of fields that may not be in db_obj
:param:comparators: Map of comparator functions to use for certain fields
"""
if subs is None:
subs = {}
if allow_missing is None:
allow_missing = []
if comparators is None:
comparators = {}
for key in obj.fields:
if key in allow_missing and not obj.obj_attr_is_set(key):
continue
obj_val = getattr(obj, key)
db_key = subs.get(key, key)
db_val = db_obj[db_key]
if isinstance(obj_val, datetime.datetime):
obj_val = obj_val.replace(tzinfo=None)
if key in comparators:
comparator = comparators[key]
comparator(db_val, obj_val)
else:
test.assertEqual(db_val, obj_val)
class _BaseTestCase(test.TestCase):
def setUp(self):
super(_BaseTestCase, self).setUp()
self.remote_object_calls = list()
self.user_id = 'fake-user'
self.project_id = 'fake-project'
self.context = context.RequestContext(self.user_id, self.project_id)
fake_notifier.stub_notifier(self.stubs)
self.addCleanup(fake_notifier.reset)
# NOTE(danms): register these here instead of at import time
# so that they're not always present
base.NovaObjectRegistry.register(MyObj)
base.NovaObjectRegistry.register(MyObjDiffVers)
base.NovaObjectRegistry.register(MyOwnedObject)
def compare_obj(self, obj, db_obj, subs=None, allow_missing=None,
comparators=None):
compare_obj(self, obj, db_obj, subs=subs, allow_missing=allow_missing,
comparators=comparators)
def str_comparator(self, expected, obj_val):
"""Compare an object field to a string in the db by performing
a simple coercion on the object field value.
"""
self.assertEqual(expected, str(obj_val))
def assertNotIsInstance(self, obj, cls, msg=None):
"""Python < v2.7 compatibility. Assert 'not isinstance(obj, cls)."""
try:
f = super(_BaseTestCase, self).assertNotIsInstance
except AttributeError:
self.assertThat(obj,
matchers.Not(matchers.IsInstance(cls)),
message=msg or '')
else:
f(obj, cls, msg=msg)
class _LocalTest(_BaseTestCase):
def setUp(self):
super(_LocalTest, self).setUp()
# Just in case
self.useFixture(nova_fixtures.IndirectionAPIFixture(None))
@contextlib.contextmanager
def things_temporarily_local():
# Temporarily go non-remote so the conductor handles
# this request directly
_api = base.NovaObject.indirection_api
base.NovaObject.indirection_api = None
yield
base.NovaObject.indirection_api = _api
# FIXME(danms): We shouldn't be overriding any of this, but need to
# for the moment because of the mocks in the base fixture that don't
# hit our registry subclass.
class FakeIndirectionHack(fixture.FakeIndirectionAPI):
def object_action(self, context, objinst, objmethod, args, kwargs):
objinst = self._ser.deserialize_entity(
context, self._ser.serialize_entity(
context, objinst))
objmethod = six.text_type(objmethod)
args = self._ser.deserialize_entity(
None, self._ser.serialize_entity(None, args))
kwargs = self._ser.deserialize_entity(
None, self._ser.serialize_entity(None, kwargs))
original = objinst.obj_clone()
with mock.patch('nova.objects.base.NovaObject.'
'indirection_api', new=None):
result = getattr(objinst, objmethod)(*args, **kwargs)
updates = self._get_changes(original, objinst)
updates['obj_what_changed'] = objinst.obj_what_changed()
return updates, result
def object_class_action(self, context, objname, objmethod, objver,
args, kwargs):
objname = six.text_type(objname)
objmethod = six.text_type(objmethod)
objver = six.text_type(objver)
args = self._ser.deserialize_entity(
None, self._ser.serialize_entity(None, args))
kwargs = self._ser.deserialize_entity(
None, self._ser.serialize_entity(None, kwargs))
cls = base.NovaObject.obj_class_from_name(objname, objver)
with mock.patch('nova.objects.base.NovaObject.'
'indirection_api', new=None):
result = getattr(cls, objmethod)(context, *args, **kwargs)
manifest = ovo_base.obj_tree_get_versions(objname)
return (base.NovaObject.obj_from_primitive(
result.obj_to_primitive(target_version=objver,
version_manifest=manifest),
context=context)
if isinstance(result, base.NovaObject) else result)
def object_class_action_versions(self, context, objname, objmethod,
object_versions, args, kwargs):
objname = six.text_type(objname)
objmethod = six.text_type(objmethod)
object_versions = {six.text_type(o): six.text_type(v)
for o, v in object_versions.items()}
args, kwargs = self._canonicalize_args(context, args, kwargs)
objver = object_versions[objname]
cls = base.NovaObject.obj_class_from_name(objname, objver)
with mock.patch('nova.objects.base.NovaObject.'
'indirection_api', new=None):
result = getattr(cls, objmethod)(context, *args, **kwargs)
return (base.NovaObject.obj_from_primitive(
result.obj_to_primitive(target_version=objver),
context=context)
if isinstance(result, base.NovaObject) else result)
class IndirectionFixture(fixtures.Fixture):
def setUp(self):
super(IndirectionFixture, self).setUp()
ser = base.NovaObjectSerializer()
self.indirection_api = FakeIndirectionHack(serializer=ser)
self.useFixture(fixtures.MonkeyPatch(
'nova.objects.base.NovaObject.indirection_api',
self.indirection_api))
class _RemoteTest(_BaseTestCase):
def setUp(self):
super(_RemoteTest, self).setUp()
self.useFixture(IndirectionFixture())
class _TestObject(object):
def test_object_attrs_in_init(self):
# Spot check a few
objects.Instance
objects.InstanceInfoCache
objects.SecurityGroup
# Now check the test one in this file. Should be newest version
self.assertEqual('1.6', objects.MyObj.VERSION)
def test_hydration_type_error(self):
primitive = {'nova_object.name': 'MyObj',
'nova_object.namespace': 'nova',
'nova_object.version': '1.5',
'nova_object.data': {'foo': 'a'}}
self.assertRaises(ValueError, MyObj.obj_from_primitive, primitive)
def test_hydration(self):
primitive = {'nova_object.name': 'MyObj',
'nova_object.namespace': 'nova',
'nova_object.version': '1.5',
'nova_object.data': {'foo': 1}}
real_method = MyObj._obj_from_primitive
def _obj_from_primitive(*args):
return real_method(*args)
with mock.patch.object(MyObj, '_obj_from_primitive') as ofp:
ofp.side_effect = _obj_from_primitive
obj = MyObj.obj_from_primitive(primitive)
ofp.assert_called_once_with(None, '1.5', primitive)
self.assertEqual(obj.foo, 1)
def test_hydration_version_different(self):
primitive = {'nova_object.name': 'MyObj',
'nova_object.namespace': 'nova',
'nova_object.version': '1.2',
'nova_object.data': {'foo': 1}}
obj = MyObj.obj_from_primitive(primitive)
self.assertEqual(obj.foo, 1)
self.assertEqual('1.2', obj.VERSION)
def test_hydration_bad_ns(self):
primitive = {'nova_object.name': 'MyObj',
'nova_object.namespace': 'foo',
'nova_object.version': '1.5',
'nova_object.data': {'foo': 1}}
self.assertRaises(ovo_exc.UnsupportedObjectError,
MyObj.obj_from_primitive, primitive)
def test_hydration_additional_unexpected_stuff(self):
primitive = {'nova_object.name': 'MyObj',
'nova_object.namespace': 'nova',
'nova_object.version': '1.5.1',
'nova_object.data': {
'foo': 1,
'unexpected_thing': 'foobar'}}
obj = MyObj.obj_from_primitive(primitive)
self.assertEqual(1, obj.foo)
self.assertFalse(hasattr(obj, 'unexpected_thing'))
# NOTE(danms): If we call obj_from_primitive() directly
# with a version containing .z, we'll get that version
# in the resulting object. In reality, when using the
# serializer, we'll get that snipped off (tested
# elsewhere)
self.assertEqual('1.5.1', obj.VERSION)
def test_dehydration(self):
expected = {'nova_object.name': 'MyObj',
'nova_object.namespace': 'nova',
'nova_object.version': '1.6',
'nova_object.data': {'foo': 1}}
obj = MyObj(foo=1)
obj.obj_reset_changes()
self.assertEqual(obj.obj_to_primitive(), expected)
def test_object_property(self):
obj = MyObj(foo=1)
self.assertEqual(obj.foo, 1)
def test_object_property_type_error(self):
obj = MyObj()
def fail():
obj.foo = 'a'
self.assertRaises(ValueError, fail)
def test_load(self):
obj = MyObj()
self.assertEqual(obj.bar, 'loaded!')
def test_load_in_base(self):
@base.NovaObjectRegistry.register_if(False)
class Foo(base.NovaObject):
fields = {'foobar': fields.IntegerField()}
obj = Foo()
with self.assertRaisesRegex(NotImplementedError, ".*foobar.*"):
obj.foobar
def test_loaded_in_primitive(self):
obj = MyObj(foo=1)
obj.obj_reset_changes()
self.assertEqual(obj.bar, 'loaded!')
expected = {'nova_object.name': 'MyObj',
'nova_object.namespace': 'nova',
'nova_object.version': '1.6',
'nova_object.changes': ['bar'],
'nova_object.data': {'foo': 1,
'bar': 'loaded!'}}
self.assertEqual(obj.obj_to_primitive(), expected)
def test_changes_in_primitive(self):
obj = MyObj(foo=123)
self.assertEqual(obj.obj_what_changed(), set(['foo']))
primitive = obj.obj_to_primitive()
self.assertIn('nova_object.changes', primitive)
obj2 = MyObj.obj_from_primitive(primitive)
self.assertEqual(obj2.obj_what_changed(), set(['foo']))
obj2.obj_reset_changes()
self.assertEqual(obj2.obj_what_changed(), set())
def test_obj_class_from_name(self):
obj = base.NovaObject.obj_class_from_name('MyObj', '1.5')
self.assertEqual('1.5', obj.VERSION)
def test_obj_class_from_name_latest_compatible(self):
obj = base.NovaObject.obj_class_from_name('MyObj', '1.1')
self.assertEqual('1.6', obj.VERSION)
def test_unknown_objtype(self):
self.assertRaises(ovo_exc.UnsupportedObjectError,
base.NovaObject.obj_class_from_name, 'foo', '1.0')
def test_obj_class_from_name_supported_version(self):
error = None
try:
base.NovaObject.obj_class_from_name('MyObj', '1.25')
except ovo_exc.IncompatibleObjectVersion as ex:
error = ex
self.assertIsNotNone(error)
self.assertEqual('1.6', error.kwargs['supported'])
def test_orphaned_object(self):
obj = MyObj.query(self.context)
obj._context = None
self.assertRaises(ovo_exc.OrphanedObjectError,
obj._update_test)
def test_changed_1(self):
obj = MyObj.query(self.context)
obj.foo = 123
self.assertEqual(obj.obj_what_changed(), set(['foo']))
obj._update_test()
self.assertEqual(obj.obj_what_changed(), set(['foo', 'bar']))
self.assertEqual(obj.foo, 123)
def test_changed_2(self):
obj = MyObj.query(self.context)
obj.foo = 123
self.assertEqual(obj.obj_what_changed(), set(['foo']))
obj.save()
self.assertEqual(obj.obj_what_changed(), set([]))
self.assertEqual(obj.foo, 123)
def test_changed_3(self):
obj = MyObj.query(self.context)
obj.foo = 123
self.assertEqual(obj.obj_what_changed(), set(['foo']))
obj.refresh()
self.assertEqual(obj.obj_what_changed(), set([]))
self.assertEqual(obj.foo, 321)
self.assertEqual(obj.bar, 'refreshed')
def test_changed_4(self):
obj = MyObj.query(self.context)
obj.bar = 'something'
self.assertEqual(obj.obj_what_changed(), set(['bar']))
obj.modify_save_modify()
self.assertEqual(obj.obj_what_changed(), set(['foo', 'rel_object']))
self.assertEqual(obj.foo, 42)
self.assertEqual(obj.bar, 'meow')
self.assertIsInstance(obj.rel_object, MyOwnedObject)
def test_changed_with_sub_object(self):
@base.NovaObjectRegistry.register_if(False)
class ParentObject(base.NovaObject):
fields = {'foo': fields.IntegerField(),
'bar': fields.ObjectField('MyObj'),
}
obj = ParentObject()
self.assertEqual(set(), obj.obj_what_changed())
obj.foo = 1
self.assertEqual(set(['foo']), obj.obj_what_changed())
bar = MyObj()
obj.bar = bar
self.assertEqual(set(['foo', 'bar']), obj.obj_what_changed())
obj.obj_reset_changes()
self.assertEqual(set(), obj.obj_what_changed())
bar.foo = 1
self.assertEqual(set(['bar']), obj.obj_what_changed())
def test_static_result(self):
obj = MyObj.query(self.context)
self.assertEqual(obj.bar, 'bar')
result = obj.marco()
self.assertEqual(result, 'polo')
def test_updates(self):
obj = MyObj.query(self.context)
self.assertEqual(obj.foo, 1)
obj._update_test()
self.assertEqual(obj.bar, 'updated')
def test_base_attributes(self):
dt = datetime.datetime(1955, 11, 5)
obj = MyObj(created_at=dt, updated_at=dt, deleted_at=None,
deleted=False)
expected = {'nova_object.name': 'MyObj',
'nova_object.namespace': 'nova',
'nova_object.version': '1.6',
'nova_object.changes':
['deleted', 'created_at', 'deleted_at', 'updated_at'],
'nova_object.data':
{'created_at': timeutils.isotime(dt),
'updated_at': timeutils.isotime(dt),
'deleted_at': None,
'deleted': False,
}
}
actual = obj.obj_to_primitive()
self.assertJsonEqual(actual, expected)
def test_contains(self):
obj = MyObj()
self.assertNotIn('foo', obj)
obj.foo = 1
self.assertIn('foo', obj)
self.assertNotIn('does_not_exist', obj)
def test_obj_attr_is_set(self):
obj = MyObj(foo=1)
self.assertTrue(obj.obj_attr_is_set('foo'))
self.assertFalse(obj.obj_attr_is_set('bar'))
self.assertRaises(AttributeError, obj.obj_attr_is_set, 'bang')
def test_obj_reset_changes_recursive(self):
obj = MyObj(rel_object=MyOwnedObject(baz=123),
rel_objects=[MyOwnedObject(baz=456)])
self.assertEqual(set(['rel_object', 'rel_objects']),
obj.obj_what_changed())
obj.obj_reset_changes()
self.assertEqual(set(['rel_object']), obj.obj_what_changed())
self.assertEqual(set(['baz']), obj.rel_object.obj_what_changed())
self.assertEqual(set(['baz']), obj.rel_objects[0].obj_what_changed())
obj.obj_reset_changes(recursive=True, fields=['foo'])
self.assertEqual(set(['rel_object']), obj.obj_what_changed())
self.assertEqual(set(['baz']), obj.rel_object.obj_what_changed())
self.assertEqual(set(['baz']), obj.rel_objects[0].obj_what_changed())
obj.obj_reset_changes(recursive=True)
self.assertEqual(set([]), obj.rel_object.obj_what_changed())
self.assertEqual(set([]), obj.obj_what_changed())
def test_get(self):
obj = MyObj(foo=1)
# Foo has value, should not get the default
self.assertEqual(obj.get('foo', 2), 1)
# Foo has value, should return the value without error
self.assertEqual(obj.get('foo'), 1)
# Bar is not loaded, so we should get the default
self.assertEqual(obj.get('bar', 'not-loaded'), 'not-loaded')
# Bar without a default should lazy-load
self.assertEqual(obj.get('bar'), 'loaded!')
# Bar now has a default, but loaded value should be returned
self.assertEqual(obj.get('bar', 'not-loaded'), 'loaded!')
# Invalid attribute should raise AttributeError
self.assertRaises(AttributeError, obj.get, 'nothing')
# ...even with a default
self.assertRaises(AttributeError, obj.get, 'nothing', 3)
def test_object_inheritance(self):
base_fields = base.NovaPersistentObject.fields.keys()
myobj_fields = (['foo', 'bar', 'missing',
'readonly', 'rel_object',
'rel_objects', 'mutable_default'] +
list(base_fields))
myobj3_fields = ['new_field']
self.assertTrue(issubclass(TestSubclassedObject, MyObj))
self.assertEqual(len(myobj_fields), len(MyObj.fields))
self.assertEqual(set(myobj_fields), set(MyObj.fields.keys()))
self.assertEqual(len(myobj_fields) + len(myobj3_fields),
len(TestSubclassedObject.fields))
self.assertEqual(set(myobj_fields) | set(myobj3_fields),
set(TestSubclassedObject.fields.keys()))
def test_obj_as_admin(self):
obj = MyObj(context=self.context)
def fake(*args, **kwargs):
self.assertTrue(obj._context.is_admin)
with mock.patch.object(obj, 'obj_reset_changes') as mock_fn:
mock_fn.side_effect = fake
with obj.obj_as_admin():
obj.save()
self.assertTrue(mock_fn.called)
self.assertFalse(obj._context.is_admin)
def test_obj_as_admin_orphaned(self):
def testme():
obj = MyObj()
with obj.obj_as_admin():
pass
self.assertRaises(exception.OrphanedObjectError, testme)
def test_obj_alternate_context(self):
obj = MyObj(context=self.context)
with obj.obj_alternate_context(mock.sentinel.alt_ctx):
self.assertEqual(mock.sentinel.alt_ctx,
obj._context)
self.assertEqual(self.context, obj._context)
def test_get_changes(self):
obj = MyObj()
self.assertEqual({}, obj.obj_get_changes())
obj.foo = 123
self.assertEqual({'foo': 123}, obj.obj_get_changes())
obj.bar = 'test'
self.assertEqual({'foo': 123, 'bar': 'test'}, obj.obj_get_changes())
obj.obj_reset_changes()
self.assertEqual({}, obj.obj_get_changes())
def test_obj_fields(self):
@base.NovaObjectRegistry.register_if(False)
class TestObj(base.NovaObject):
fields = {'foo': fields.IntegerField()}
obj_extra_fields = ['bar']
@property
def bar(self):
return 'this is bar'
obj = TestObj()
self.assertEqual(['foo', 'bar'], obj.obj_fields)
def test_obj_constructor(self):
obj = MyObj(context=self.context, foo=123, bar='abc')
self.assertEqual(123, obj.foo)
self.assertEqual('abc', obj.bar)
self.assertEqual(set(['foo', 'bar']), obj.obj_what_changed())
def test_obj_read_only(self):
obj = MyObj(context=self.context, foo=123, bar='abc')
obj.readonly = 1
self.assertRaises(ovo_exc.ReadOnlyFieldError, setattr,
obj, 'readonly', 2)
def test_obj_mutable_default(self):
obj = MyObj(context=self.context, foo=123, bar='abc')
obj.mutable_default = None
obj.mutable_default.append('s1')
self.assertEqual(obj.mutable_default, ['s1'])
obj1 = MyObj(context=self.context, foo=123, bar='abc')
obj1.mutable_default = None
obj1.mutable_default.append('s2')
self.assertEqual(obj1.mutable_default, ['s2'])
def test_obj_mutable_default_set_default(self):
obj1 = MyObj(context=self.context, foo=123, bar='abc')
obj1.obj_set_defaults('mutable_default')
self.assertEqual(obj1.mutable_default, [])
obj1.mutable_default.append('s1')
self.assertEqual(obj1.mutable_default, ['s1'])
obj2 = MyObj(context=self.context, foo=123, bar='abc')
obj2.obj_set_defaults('mutable_default')
self.assertEqual(obj2.mutable_default, [])
obj2.mutable_default.append('s2')
self.assertEqual(obj2.mutable_default, ['s2'])
def test_obj_repr(self):
obj = MyObj(foo=123)
self.assertEqual('MyObj(bar=<?>,created_at=<?>,deleted=<?>,'
'deleted_at=<?>,foo=123,missing=<?>,'
'mutable_default=<?>,readonly=<?>,rel_object=<?>,'
'rel_objects=<?>,updated_at=<?>)',
repr(obj))
def test_obj_make_obj_compatible(self):
subobj = MyOwnedObject(baz=1)
subobj.VERSION = '1.2'
obj = MyObj(rel_object=subobj)
obj.obj_relationships = {
'rel_object': [('1.5', '1.1'), ('1.7', '1.2')],
}
orig_primitive = obj.obj_to_primitive()['nova_object.data']
with mock.patch.object(subobj, 'obj_make_compatible') as mock_compat:
primitive = copy.deepcopy(orig_primitive)
obj._obj_make_obj_compatible(primitive, '1.8', 'rel_object')
self.assertFalse(mock_compat.called)
with mock.patch.object(subobj, 'obj_make_compatible') as mock_compat:
primitive = copy.deepcopy(orig_primitive)
obj._obj_make_obj_compatible(primitive, '1.7', 'rel_object')
mock_compat.assert_called_once_with(
primitive['rel_object']['nova_object.data'], '1.2')
with mock.patch.object(subobj, 'obj_make_compatible') as mock_compat:
primitive = copy.deepcopy(orig_primitive)
obj._obj_make_obj_compatible(primitive, '1.6', 'rel_object')
mock_compat.assert_called_once_with(
primitive['rel_object']['nova_object.data'], '1.1')
self.assertEqual('1.1',
primitive['rel_object']['nova_object.version'])
with mock.patch.object(subobj, 'obj_make_compatible') as mock_compat:
primitive = copy.deepcopy(orig_primitive)
obj._obj_make_obj_compatible(primitive, '1.5', 'rel_object')
mock_compat.assert_called_once_with(
primitive['rel_object']['nova_object.data'], '1.1')
self.assertEqual('1.1',
primitive['rel_object']['nova_object.version'])
with mock.patch.object(subobj, 'obj_make_compatible') as mock_compat:
primitive = copy.deepcopy(orig_primitive)
obj._obj_make_obj_compatible(primitive, '1.4', 'rel_object')
self.assertFalse(mock_compat.called)
self.assertNotIn('rel_object', primitive)
def test_obj_make_compatible_hits_sub_objects(self):
subobj = MyOwnedObject(baz=1)
obj = MyObj(foo=123, rel_object=subobj)
obj.obj_relationships = {'rel_object': [('1.0', '1.0')]}
with mock.patch.object(obj, '_obj_make_obj_compatible') as mock_compat:
obj.obj_make_compatible({'rel_object': 'foo'}, '1.10')
mock_compat.assert_called_once_with({'rel_object': 'foo'}, '1.10',
'rel_object')
def test_obj_make_compatible_skips_unset_sub_objects(self):
obj = MyObj(foo=123)
obj.obj_relationships = {'rel_object': [('1.0', '1.0')]}
with mock.patch.object(obj, '_obj_make_obj_compatible') as mock_compat:
obj.obj_make_compatible({'rel_object': 'foo'}, '1.10')
self.assertFalse(mock_compat.called)
def test_obj_make_compatible_doesnt_skip_falsey_sub_objects(self):
@base.NovaObjectRegistry.register_if(False)
class MyList(base.ObjectListBase, base.NovaObject):
VERSION = '1.2'
fields = {'objects': fields.ListOfObjectsField('MyObjElement')}
obj_relationships = {
'objects': [('1.1', '1.1'), ('1.2', '1.2')],
}
mylist = MyList(objects=[])
@base.NovaObjectRegistry.register_if(False)
class MyOwner(base.NovaObject):
VERSION = '1.2'
fields = {'mylist': fields.ObjectField('MyList')}
obj_relationships = {
'mylist': [('1.1', '1.1')],
}
myowner = MyOwner(mylist=mylist)
primitive = myowner.obj_to_primitive('1.1')
self.assertIn('mylist', primitive['nova_object.data'])
def test_obj_make_compatible_handles_list_of_objects(self):
subobj = MyOwnedObject(baz=1)
obj = MyObj(rel_objects=[subobj])
obj.obj_relationships = {'rel_objects': [('1.0', '1.123')]}
def fake_make_compat(primitive, version):
self.assertEqual('1.123', version)
self.assertIn('baz', primitive)
with mock.patch.object(subobj, 'obj_make_compatible') as mock_mc:
mock_mc.side_effect = fake_make_compat
obj.obj_to_primitive('1.0')
self.assertTrue(mock_mc.called)
def test_delattr(self):
obj = MyObj(bar='foo')
del obj.bar
# Should appear unset now
self.assertFalse(obj.obj_attr_is_set('bar'))
# Make sure post-delete, references trigger lazy loads
self.assertEqual('loaded!', getattr(obj, 'bar'))
def test_delattr_unset(self):
obj = MyObj()
self.assertRaises(AttributeError, delattr, obj, 'bar')
class TestObject(_LocalTest, _TestObject):
def test_set_defaults(self):
obj = MyObj()
obj.obj_set_defaults('foo')
self.assertTrue(obj.obj_attr_is_set('foo'))
self.assertEqual(1, obj.foo)
def test_set_defaults_no_default(self):
obj = MyObj()
self.assertRaises(ovo_exc.ObjectActionError,
obj.obj_set_defaults, 'bar')
def test_set_all_defaults(self):
obj = MyObj()
obj.obj_set_defaults()
self.assertEqual(set(['deleted', 'foo', 'mutable_default']),
obj.obj_what_changed())
self.assertEqual(1, obj.foo)
def test_set_defaults_not_overwrite(self):
# NOTE(danms): deleted defaults to False, so verify that it does
# not get reset by obj_set_defaults()
obj = MyObj(deleted=True)
obj.obj_set_defaults()
self.assertEqual(1, obj.foo)
self.assertTrue(obj.deleted)
class TestObjectSerializer(_BaseTestCase):
def test_serialize_entity_primitive(self):
ser = base.NovaObjectSerializer()
for thing in (1, 'foo', [1, 2], {'foo': 'bar'}):
self.assertEqual(thing, ser.serialize_entity(None, thing))
def test_deserialize_entity_primitive(self):
ser = base.NovaObjectSerializer()
for thing in (1, 'foo', [1, 2], {'foo': 'bar'}):
self.assertEqual(thing, ser.deserialize_entity(None, thing))
def test_serialize_set_to_list(self):
ser = base.NovaObjectSerializer()
self.assertEqual([1, 2], ser.serialize_entity(None, set([1, 2])))
def _test_deserialize_entity_newer(self, obj_version, backported_to,
my_version='1.6'):
ser = base.NovaObjectSerializer()
ser._conductor = mock.Mock()
ser._conductor.object_backport_versions.return_value = 'backported'
class MyTestObj(MyObj):
VERSION = my_version
base.NovaObjectRegistry.register(MyTestObj)
obj = MyTestObj()
obj.VERSION = obj_version
primitive = obj.obj_to_primitive()
result = ser.deserialize_entity(self.context, primitive)
if backported_to is None:
self.assertFalse(ser._conductor.object_backport_versions.called)
else:
self.assertEqual('backported', result)
versions = ovo_base.obj_tree_get_versions('MyTestObj')
ser._conductor.object_backport_versions.assert_called_with(
self.context, primitive, versions)
def test_deserialize_entity_newer_version_backports(self):
self._test_deserialize_entity_newer('1.25', '1.6')
def test_deserialize_entity_newer_revision_does_not_backport_zero(self):
self._test_deserialize_entity_newer('1.6.0', None)
def test_deserialize_entity_newer_revision_does_not_backport(self):
self._test_deserialize_entity_newer('1.6.1', None)
def test_deserialize_entity_newer_version_passes_revision(self):
self._test_deserialize_entity_newer('1.7', '1.6.1', '1.6.1')
def test_deserialize_dot_z_with_extra_stuff(self):
primitive = {'nova_object.name': 'MyObj',
'nova_object.namespace': 'nova',
'nova_object.version': '1.6.1',
'nova_object.data': {
'foo': 1,
'unexpected_thing': 'foobar'}}
ser = base.NovaObjectSerializer()
obj = ser.deserialize_entity(self.context, primitive)
self.assertEqual(1, obj.foo)
self.assertFalse(hasattr(obj, 'unexpected_thing'))
# NOTE(danms): The serializer is where the logic lives that
# avoids backports for cases where only a .z difference in
# the received object version is detected. As a result, we
# end up with a version of what we expected, effectively the
# .0 of the object.
self.assertEqual('1.6', obj.VERSION)
@mock.patch('oslo_versionedobjects.base.obj_tree_get_versions')
def test_object_tree_backport(self, mock_get_versions):
# Test the full client backport path all the way from the serializer
# to the conductor and back.
self.start_service('conductor',
manager='nova.conductor.manager.ConductorManager')
# NOTE(danms): Actually register a complex set of objects,
# two versions of the same parent object which contain a
# child sub object.
@base.NovaObjectRegistry.register
class Child(base.NovaObject):
VERSION = '1.10'
@base.NovaObjectRegistry.register
class Parent(base.NovaObject):
VERSION = '1.0'
fields = {
'child': fields.ObjectField('Child'),
}
@base.NovaObjectRegistry.register # noqa
class Parent(base.NovaObject):
VERSION = '1.1'
fields = {
'child': fields.ObjectField('Child'),
}
# NOTE(danms): Since we're on the same node as conductor,
# return a fake version manifest so that we confirm that it
# actually honors what the client asked for and not just what
# it sees in the local machine state.
mock_get_versions.return_value = {
'Parent': '1.0',
'Child': '1.5',
}
call_context = {}
real_ofp = base.NovaObject.obj_from_primitive
def fake_obj_from_primitive(*a, **k):
# NOTE(danms): We need the first call to this to report an
# incompatible object version, but subsequent calls must
# succeed. Since we're testing the backport path all the
# way through conductor and RPC, we can't fully break this
# method, we just need it to fail once to trigger the
# backport.
if 'run' in call_context:
return real_ofp(*a, **k)
else:
call_context['run'] = True
raise ovo_exc.IncompatibleObjectVersion('foo')
child = Child()
parent = Parent(child=child)
prim = parent.obj_to_primitive()
ser = base.NovaObjectSerializer()
with mock.patch('nova.objects.base.NovaObject.'
'obj_from_primitive') as mock_ofp:
mock_ofp.side_effect = fake_obj_from_primitive
result = ser.deserialize_entity(self.context, prim)
# Our newest version (and what we passed back) of Parent
# is 1.1, make sure that the manifest version is honored
self.assertEqual('1.0', result.VERSION)
# Our newest version (and what we passed back) of Child
# is 1.10, make sure that the manifest version is honored
self.assertEqual('1.5', result.child.VERSION)
def test_object_serialization(self):
ser = base.NovaObjectSerializer()
obj = MyObj()
primitive = ser.serialize_entity(self.context, obj)
self.assertIn('nova_object.name', primitive)
obj2 = ser.deserialize_entity(self.context, primitive)
self.assertIsInstance(obj2, MyObj)
self.assertEqual(self.context, obj2._context)
def test_object_serialization_iterables(self):
ser = base.NovaObjectSerializer()
obj = MyObj()
for iterable in (list, tuple, set):
thing = iterable([obj])
primitive = ser.serialize_entity(self.context, thing)
self.assertEqual(1, len(primitive))
for item in primitive:
self.assertNotIsInstance(item, base.NovaObject)
thing2 = ser.deserialize_entity(self.context, primitive)
self.assertEqual(1, len(thing2))
for item in thing2:
self.assertIsInstance(item, MyObj)
# dict case
thing = {'key': obj}
primitive = ser.serialize_entity(self.context, thing)
self.assertEqual(1, len(primitive))
for item in six.itervalues(primitive):
self.assertNotIsInstance(item, base.NovaObject)
thing2 = ser.deserialize_entity(self.context, primitive)
self.assertEqual(1, len(thing2))
for item in six.itervalues(thing2):
self.assertIsInstance(item, MyObj)
# object-action updates dict case
thing = {'foo': obj.obj_to_primitive()}
primitive = ser.serialize_entity(self.context, thing)
self.assertEqual(thing, primitive)
thing2 = ser.deserialize_entity(self.context, thing)
self.assertIsInstance(thing2['foo'], base.NovaObject)
class TestArgsSerializer(test.NoDBTestCase):
def setUp(self):
super(TestArgsSerializer, self).setUp()
self.now = timeutils.utcnow()
self.str_now = timeutils.strtime(at=self.now)
self.unicode_str = u'\xF0\x9F\x92\xA9'
@base.serialize_args
def _test_serialize_args(self, *args, **kwargs):
expected_args = ('untouched', self.str_now, self.str_now)
for index, val in enumerate(args):
self.assertEqual(expected_args[index], val)
expected_kwargs = {'a': 'untouched', 'b': self.str_now,
'c': self.str_now, 'exc_val': self.unicode_str}
for key, val in six.iteritems(kwargs):
self.assertEqual(expected_kwargs[key], val)
def test_serialize_args(self):
self._test_serialize_args('untouched', self.now, self.now,
a='untouched', b=self.now, c=self.now,
exc_val=self.unicode_str)
class TestRegistry(test.NoDBTestCase):
@mock.patch('nova.objects.base.objects')
def test_hook_chooses_newer_properly(self, mock_objects):
reg = base.NovaObjectRegistry()
reg.registration_hook(MyObj, 0)
class MyNewerObj(object):
VERSION = '1.123'
@classmethod
def obj_name(cls):
return 'MyObj'
self.assertEqual(MyObj, mock_objects.MyObj)
reg.registration_hook(MyNewerObj, 0)
self.assertEqual(MyNewerObj, mock_objects.MyObj)
@mock.patch('nova.objects.base.objects')
def test_hook_keeps_newer_properly(self, mock_objects):
reg = base.NovaObjectRegistry()
reg.registration_hook(MyObj, 0)
class MyOlderObj(object):
VERSION = '1.1'
@classmethod
def obj_name(cls):
return 'MyObj'
self.assertEqual(MyObj, mock_objects.MyObj)
reg.registration_hook(MyOlderObj, 0)
self.assertEqual(MyObj, mock_objects.MyObj)
# NOTE(danms): The hashes in this list should only be changed if
# they come with a corresponding version bump in the affected
# objects
object_data = {
'Agent': '1.0-c0c092abaceb6f51efe5d82175f15eba',
'AgentList': '1.0-5a7380d02c3aaf2a32fc8115ae7ca98c',
'Aggregate': '1.1-1ab35c4516f71de0bef7087026ab10d1',
'AggregateList': '1.2-fb6e19f3c3a3186b04eceb98b5dadbfa',
'BandwidthUsage': '1.2-c6e4c779c7f40f2407e3d70022e3cd1c',
'BandwidthUsageList': '1.2-5fe7475ada6fe62413cbfcc06ec70746',
'BlockDeviceMapping': '1.15-d44d8d694619e79c172a99b3c1d6261d',
'BlockDeviceMappingList': '1.16-6fa262c059dad1d519b9fe05b9e4f404',
'CellMapping': '1.0-7f1a7e85a22bbb7559fc730ab658b9bd',
'ComputeNode': '1.14-a396975707b66281c5f404a68fccd395',
'ComputeNodeList': '1.14-3b6f4f5ade621c40e70cb116db237844',
'DNSDomain': '1.0-7b0b2dab778454b6a7b6c66afe163a1a',
'DNSDomainList': '1.0-4ee0d9efdfd681fed822da88376e04d2',
'EC2Ids': '1.0-474ee1094c7ec16f8ce657595d8c49d9',
'EC2InstanceMapping': '1.0-a4556eb5c5e94c045fe84f49cf71644f',
'EC2SnapshotMapping': '1.0-47e7ddabe1af966dce0cfd0ed6cd7cd1',
'EC2VolumeMapping': '1.0-5b713751d6f97bad620f3378a521020d',
'FixedIP': '1.14-53e1c10b539f1a82fe83b1af4720efae',
'FixedIPList': '1.14-87a39361c8f08f059004d6b15103cdfd',
'Flavor': '1.1-b6bb7a730a79d720344accefafacf7ee',
'FlavorList': '1.1-52b5928600e7ca973aa4fc1e46f3934c',
'FloatingIP': '1.10-52a67d52d85eb8b3f324a5b7935a335b',
'FloatingIPList': '1.11-7f2ba670714e1b7bab462ab3290f7159',
'HostMapping': '1.0-1a3390a696792a552ab7bd31a77ba9ac',
'HVSpec': '1.1-6b4f7c0f688cbd03e24142a44eb9010d',
'ImageMeta': '1.7-642d1b2eb3e880a367f37d72dd76162d',
'ImageMetaProps': '1.7-f12fc4cf3e25d616f69a66fb9d2a7aa6',
'Instance': '2.0-ff56804dce87d81d9a04834d4bd1e3d2',
# NOTE(danms): Reviewers: do not approve changes to the Instance1
# object schema. It is frozen for Liberty and will be removed in
# Mitaka.
'Instance1': '1.23-4e68422207667f4abff5fa730a5edc98',
'InstanceAction': '1.1-f9f293e526b66fca0d05c3b3a2d13914',
'InstanceActionEvent': '1.1-e56a64fa4710e43ef7af2ad9d6028b33',
'InstanceActionEventList': '1.1-13d92fb953030cdbfee56481756e02be',
'InstanceActionList': '1.0-4a53826625cc280e15fae64a575e0879',
'InstanceExternalEvent': '1.1-6e446ceaae5f475ead255946dd443417',
'InstanceFault': '1.2-7ef01f16f1084ad1304a513d6d410a38',
'InstanceFaultList': '1.1-f8ec07cbe3b60f5f07a8b7a06311ac0d',
'InstanceGroup': '1.10-1a0c8c7447dc7ecb9da53849430c4a5f',
'InstanceGroupList': '1.7-be18078220513316abd0ae1b2d916873',
'InstanceInfoCache': '1.5-cd8b96fefe0fc8d4d337243ba0bf0e1e',
'InstanceList': '2.0-6c8ba6147cca3082b1e4643f795068bf',
# NOTE(danms): Reviewers: do not approve changes to the InstanceList1
# object schema. It is frozen for Liberty and will be removed in
# Mitaka.
'InstanceList1': '1.22-6c8ba6147cca3082b1e4643f795068bf',
'InstanceMapping': '1.0-47ef26034dfcbea78427565d9177fe50',
'InstanceMappingList': '1.0-9e982e3de1613b9ada85e35f69b23d47',
'InstanceNUMACell': '1.2-535ef30e0de2d6a0d26a71bd58ecafc4',
'InstanceNUMATopology': '1.2-d944a7d6c21e1c773ffdf09c6d025954',
'InstancePCIRequest': '1.1-b1d75ebc716cb12906d9d513890092bf',
'InstancePCIRequests': '1.1-65e38083177726d806684cb1cc0136d2',
'KeyPair': '1.3-bfaa2a8b148cdf11e0c72435d9dd097a',
'KeyPairList': '1.2-58b94f96e776bedaf1e192ddb2a24c4e',
'Migration': '1.2-8784125bedcea0a9227318511904e853',
'MigrationContext': '1.0-d8c2f10069e410f639c49082b5932c92',
'MigrationList': '1.2-02c0ec0c50b75ca86a2a74c5e8c911cc',
'MonitorMetric': '1.1-53b1db7c4ae2c531db79761e7acc52ba',
'MonitorMetricList': '1.1-15ecf022a68ddbb8c2a6739cfc9f8f5e',
'NUMACell': '1.2-74fc993ac5c83005e76e34e8487f1c05',
'NUMAPagesTopology': '1.0-c71d86317283266dc8364c149155e48e',
'NUMATopology': '1.2-c63fad38be73b6afd04715c9c1b29220',
'NUMATopologyLimits': '1.0-9463e0edd40f64765ae518a539b9dfd2',
'Network': '1.2-a977ab383aa462a479b2fae8211a5dde',
'NetworkList': '1.2-69eca910d8fa035dfecd8ba10877ee59',
'NetworkRequest': '1.1-7a3e4ca2ce1e7b62d8400488f2f2b756',
'NetworkRequestList': '1.1-15ecf022a68ddbb8c2a6739cfc9f8f5e',
'PciDevice': '1.3-d92e0b17bbed61815b919af6b8d8998e',
'PciDeviceList': '1.2-3757458c45591cbc92c72ee99e757c98',
'PciDevicePool': '1.1-3f5ddc3ff7bfa14da7f6c7e9904cc000',
'PciDevicePoolList': '1.1-15ecf022a68ddbb8c2a6739cfc9f8f5e',
'Quotas': '1.2-1fe4cd50593aaf5d36a6dc5ab3f98fb3',
'QuotasNoOp': '1.2-e041ddeb7dc8188ca71706f78aad41c1',
'RequestSpec': '1.4-6922fe208b5d1186bdd825513f677921',
'S3ImageMapping': '1.0-7dd7366a890d82660ed121de9092276e',
'SchedulerLimits': '1.0-249c4bd8e62a9b327b7026b7f19cc641',
'SchedulerRetries': '1.1-3c9c8b16143ebbb6ad7030e999d14cc0',
'SecurityGroup': '1.1-0e1b9ba42fe85c13c1437f8b74bdb976',
'SecurityGroupList': '1.0-dc8bbea01ba09a2edb6e5233eae85cbc',
'SecurityGroupRule': '1.1-ae1da17b79970012e8536f88cb3c6b29',
'SecurityGroupRuleList': '1.1-674b323c9ccea02e93b1b40e7fd2091a',
'Service': '1.18-f1c6e82b5479f63e35970fe7625c3878',
'ServiceList': '1.16-b767102cba7cbed290e396114c3f86b3',
'TaskLog': '1.0-78b0534366f29aa3eebb01860fbe18fe',
'TaskLogList': '1.0-cc8cce1af8a283b9d28b55fcd682e777',
'Tag': '1.1-8b8d7d5b48887651a0e01241672e2963',
'TagList': '1.1-55231bdb671ecf7641d6a2e9109b5d8e',
'VirtCPUFeature': '1.0-3310718d8c72309259a6e39bdefe83ee',
'VirtCPUModel': '1.0-6a5cc9f322729fc70ddc6733bacd57d3',
'VirtCPUTopology': '1.0-fc694de72e20298f7c6bab1083fd4563',
'VirtualInterface': '1.0-19921e38cba320f355d56ecbf8f29587',
'VirtualInterfaceList': '1.0-9750e2074437b3077e46359102779fc6',
'VolumeUsage': '1.0-6c8190c46ce1469bb3286a1f21c2e475',
}
class TestObjectVersions(test.NoDBTestCase):
@staticmethod
def _is_method(thing):
# NOTE(dims): In Python3, The concept of 'unbound methods' has
# been removed from the language. When referencing a method
# as a class attribute, you now get a plain function object.
# so let's check for both
return inspect.isfunction(thing) or inspect.ismethod(thing)
def _find_remotable_method(self, cls, thing, parent_was_remotable=False):
"""Follow a chain of remotable things down to the original function."""
if isinstance(thing, classmethod):
return self._find_remotable_method(cls, thing.__get__(None, cls))
elif self._is_method(thing) and hasattr(thing, 'remotable'):
return self._find_remotable_method(cls, thing.original_fn,
parent_was_remotable=True)
elif parent_was_remotable:
# We must be the first non-remotable thing underneath a stack of
# remotable things (i.e. the actual implementation method)
return thing
else:
# This means the top-level thing never hit a remotable layer
return None
def _un_unicodify_enum_valid_values(self, _fields):
for name, field in _fields:
if not isinstance(field, (fields.BaseEnumField,
fields.EnumField)):
continue
orig_type = type(field._type._valid_values)
field._type._valid_values = orig_type(
[x.encode('utf-8') for x in
field._type._valid_values])
def _get_fingerprint(self, obj_class):
fields = list(obj_class.fields.items())
# NOTE(danms): We store valid_values in the enum as strings,
# but oslo is working to make these coerced to unicode (which
# is the right thing to do). The functionality will be
# unchanged, but the repr() result that we use for calculating
# the hashes will be different. This helper method coerces all
# Enum valid_values elements to UTF-8 string before we make the
# repr() call so that it is consistent before and after the
# unicode change, and on py2 and py3.
if six.PY2:
self._un_unicodify_enum_valid_values(fields)
fields.sort()
methods = []
for name in dir(obj_class):
thing = getattr(obj_class, name)
if self._is_method(thing) or isinstance(thing, classmethod):
method = self._find_remotable_method(obj_class, thing)
if method:
methods.append((name, inspect.getargspec(method)))
methods.sort()
# NOTE(danms): Things that need a version bump are any fields
# and their types, or the signatures of any remotable methods.
# Of course, these are just the mechanical changes we can detect,
# but many other things may require a version bump (method behavior
# and return value changes, for example).
if hasattr(obj_class, 'child_versions'):
relevant_data = (fields, methods,
OrderedDict(
sorted(obj_class.child_versions.items())))
else:
relevant_data = (fields, methods)
relevant_data = repr(relevant_data)
if six.PY3:
relevant_data = relevant_data.encode('utf-8')
fingerprint = '%s-%s' % (
obj_class.VERSION, hashlib.md5(relevant_data).hexdigest())
return fingerprint
def test_find_remotable_method(self):
class MyObject(object):
@base.remotable
def my_method(self):
return 'Hello World!'
thing = self._find_remotable_method(MyObject,
getattr(MyObject, 'my_method'))
self.assertIsNotNone(thing)
def test_versions(self):
fingerprints = {}
obj_classes = base.NovaObjectRegistry.obj_classes()
for obj_name in sorted(obj_classes, key=lambda x: x[0]):
index = 0
for version_cls in obj_classes[obj_name]:
if len(obj_classes[obj_name]) > 1 and index != 0:
name = '%s%s' % (obj_name,
version_cls.VERSION.split('.')[0])
else:
name = obj_name
fingerprints[name] = self._get_fingerprint(version_cls)
index += 1
if os.getenv('GENERATE_HASHES'):
file('object_hashes.txt', 'w').write(
pprint.pformat(fingerprints))
raise test.TestingException(
'Generated hashes in object_hashes.txt')
stored = set(object_data.items())
computed = set(fingerprints.items())
changed = stored.symmetric_difference(computed)
expected = {}
actual = {}
for name, hash in changed:
expected[name] = object_data.get(name)
actual[name] = fingerprints.get(name)
self.assertEqual(expected, actual,
'Some objects have changed; please make sure the '
'versions have been bumped, and then update their '
'hashes here.')
def _get_object_field_name(self, field):
if isinstance(field._type, fields.Object):
return field._type._obj_name
if isinstance(field, fields.ListOfObjectsField):
return field._type._element_type._type._obj_name
return None
def _get_obj_cls(self, name):
# NOTE(danms): We're moving to using manifest-based backports,
# which don't depend on relationships. Given that we only had
# one major version of each object before that change, we can
# make sure to pull the older version of objects that have
# a 2.0 version while calculating the old-style relationship
# mapping. Once we drop all the 1.x versions, we can drop this
# relationship test altogether.
new_objects = ['Instance', 'InstanceList']
versions = base.NovaObjectRegistry.obj_classes()[name]
if len(versions) > 1 and name in new_objects:
return versions[1]
else:
return versions[0]
def _build_tree(self, tree, obj_class, get_current_versions=True):
obj_name = obj_class.obj_name()
if obj_name in tree:
return
for name, field in obj_class.fields.items():
sub_obj_name = self._get_object_field_name(field)
if sub_obj_name:
sub_obj_class = self._get_obj_cls(sub_obj_name)
tree.setdefault(obj_name, {})
if get_current_versions:
sub_obj_ver = sub_obj_class.VERSION
else:
# get the most recent subobject version
# from obj_relationships
sub_obj_ver = obj_class.obj_relationships[name][-1][1]
tree[obj_name][sub_obj_name] = sub_obj_ver
def test_relationships(self):
# This test asserts that the obj_relationship map of all objects
# contain the current versions of any subobjects.
current_versions_tree = {}
obj_relationships_tree = {}
obj_classes = base.NovaObjectRegistry.obj_classes()
for obj_name in obj_classes.keys():
obj_cls = self._get_obj_cls(obj_name)
self._build_tree(current_versions_tree, obj_cls)
self._build_tree(obj_relationships_tree, obj_cls,
get_current_versions=False)
stored = set([(x, str(y))
for x, y in obj_relationships_tree.items()])
computed = set([(x, str(y))
for x, y in current_versions_tree.items()])
changed = stored.symmetric_difference(computed)
expected = {}
actual = {}
for name, deps in changed:
expected[name] = current_versions_tree.get(name)
actual[name] = obj_relationships_tree.get(name)
# If this assertion is failing, this means an object is holding a
# non-current version of another object.
# Example: if Instance is bumped from version 1.1 to 1.2,
# and InstanceList is still only has obj_relationships with 1.1,
# this assertion will fail. InstanceList will need to also be bumped
# a version, with the relationship to Instance 1.2 added.
self.assertEqual(expected, actual,
'Some objects have changed dependencies. '
'Please make sure to bump the versions of '
'parent objects and provide a rule in their '
'obj_make_compatible() routines to backlevel '
'the child object. The expected dict is the '
'current versions of all objects held by other '
'objects, and the actual dict is what is held '
'within obj_relationships on the given objects.')
def test_obj_make_compatible(self):
# Iterate all object classes and verify that we can run
# obj_make_compatible with every older version than current.
# This doesn't actually test the data conversions, but it at least
# makes sure the method doesn't blow up on something basic like
# expecting the wrong version format.
obj_classes = base.NovaObjectRegistry.obj_classes()
for obj_name in obj_classes:
obj_class = obj_classes[obj_name][0]
version = utils.convert_version_to_tuple(obj_class.VERSION)
for n in range(version[1]):
test_version = '%d.%d' % (version[0], n)
LOG.info('testing obj: %s version: %s' %
(obj_name, test_version))
obj_class().obj_to_primitive(target_version=test_version)
def test_list_obj_make_compatible(self):
@base.NovaObjectRegistry.register_if(False)
class TestObj(base.NovaObject):
VERSION = '1.4'
fields = {'foo': fields.IntegerField()}
@base.NovaObjectRegistry.register_if(False)
class TestListObj(base.ObjectListBase, base.NovaObject):
VERSION = '1.5'
fields = {'objects': fields.ListOfObjectsField('TestObj')}
obj_relationships = {
'objects': [('1.0', '1.1'), ('1.1', '1.2'),
('1.3', '1.3'), ('1.5', '1.4')]
}
my_list = TestListObj()
my_obj = TestObj(foo=1)
my_list.objects = [my_obj]
primitive = my_list.obj_to_primitive(target_version='1.5')
primitive_data = primitive['nova_object.data']
obj_primitive = my_obj.obj_to_primitive(target_version='1.4')
obj_primitive_data = obj_primitive['nova_object.data']
with mock.patch.object(TestObj, 'obj_make_compatible') as comp:
my_list.obj_make_compatible(primitive_data, '1.1')
comp.assert_called_with(obj_primitive_data,
'1.2')
def test_list_obj_make_compatible_when_no_objects(self):
# Test to make sure obj_make_compatible works with no 'objects'
# If a List object ever has a version that did not contain the
# 'objects' key, we need to make sure converting back to that version
# doesn't cause backporting problems.
@base.NovaObjectRegistry.register_if(False)
class TestObj(base.NovaObject):
VERSION = '1.1'
fields = {'foo': fields.IntegerField()}
@base.NovaObjectRegistry.register_if(False)
class TestListObj(base.ObjectListBase, base.NovaObject):
VERSION = '1.1'
fields = {'objects': fields.ListOfObjectsField('TestObj')}
# pretend that version 1.0 didn't have 'objects'
obj_relationships = {
'objects': [('1.1', '1.1')]
}
my_list = TestListObj()
my_list.objects = [TestObj(foo=1)]
primitive = my_list.obj_to_primitive(target_version='1.1')
primitive_data = primitive['nova_object.data']
my_list.obj_make_compatible(primitive_data,
target_version='1.0')
self.assertNotIn('objects', primitive_data,
"List was backported to before 'objects' existed."
" 'objects' should not be in the primitive.")
def test_obj_bad_relationships(self):
# Make sure having an object with bad relationships is caught by
# _build_tree()
@base.NovaObjectRegistry.register
class TestObj(base.NovaObject):
VERSION = '1.1'
fields = {'foo': fields.IntegerField()}
@base.NovaObjectRegistry.register
class OtherTestObj(base.NovaObject):
VERSION = '1.2'
fields = {'test': fields.ObjectField('TestObj')}
obj_relationships = {'test': [('1.0', '1.0')]}
current_versions_tree = {}
obj_relationships_tree = {}
obj_classes = base.NovaObjectRegistry.obj_classes()
expected_current = {'OtherTestObj': {'TestObj': '1.1'}}
self._build_tree(current_versions_tree,
obj_classes['OtherTestObj'][0])
expected_obj_relationships = {'OtherTestObj': {'TestObj': '1.0'}}
self._build_tree(obj_relationships_tree,
obj_classes['OtherTestObj'][0],
get_current_versions=False)
self.assertEqual(expected_current, current_versions_tree)
self.assertEqual(expected_obj_relationships, obj_relationships_tree)
def _get_obj_same_major(self, this_cls, obj_name):
this_major = this_cls.VERSION.split('.')[0]
obj_classes = base.NovaObjectRegistry.obj_classes()
for cls_version in obj_classes[obj_name]:
major = cls_version.VERSION.split('.')[0]
if major == this_major:
return cls_version
def _get_obj_to_test(self, obj_class):
obj = obj_class()
for fname, ftype in obj.fields.items():
if isinstance(ftype, fields.ObjectField):
fobjname = ftype.AUTO_TYPE._obj_name
fobjcls = self._get_obj_same_major(obj_class, fobjname)
setattr(obj, fname, self._get_obj_to_test(fobjcls))
elif isinstance(ftype, fields.ListOfObjectsField):
# FIXME(danms): This will result in no tests for this
# field type...
setattr(obj, fname, [])
return obj
def _find_version_mapping(self, my_ver, versions):
closest = None
my_ver = utils.convert_version_to_tuple(my_ver)
for _my, _child in versions:
_my = utils.convert_version_to_tuple(_my)
_child = utils.convert_version_to_tuple(_child)
if _my == my_ver:
return '%s.%s' % _child
elif _my < my_ver:
closest = _child
if closest:
return '%s.%s' % closest
else:
return None
def _validate_object_fields(self, obj_class, primitive):
for fname, ftype in obj_class.fields.items():
if isinstance(ftype, fields.ObjectField):
exp_vers = obj_class.obj_relationships[fname]
exp_ver = self._find_version_mapping(
primitive['nova_object.version'], exp_vers)
if exp_ver is None:
self.assertNotIn(fname, primitive['nova_object.data'])
else:
child_p = primitive['nova_object.data'][fname]
self.assertEqual(exp_ver,
child_p['nova_object.version'])
def test_obj_make_compatible_with_data(self):
# Iterate all object classes and verify that we can run
# obj_make_compatible with every older version than current.
# This doesn't actually test the data conversions, but it at least
# makes sure the method doesn't blow up on something basic like
# expecting the wrong version format.
obj_classes = base.NovaObjectRegistry.obj_classes()
for obj_name in obj_classes:
for obj_class in obj_classes[obj_name]:
if obj_class.VERSION.startswith('2'):
# NOTE(danms): Objects with major versions >=2 will
# use version_manifest for backports, which is a
# different test than this one, so skip.
continue
if 'tests.unit' in obj_class.__module__:
# NOTE(danms): Skip test objects. When we move to
# oslo.versionedobjects, we won't have to do this
continue
version = utils.convert_version_to_tuple(obj_class.VERSION)
for n in range(version[1]):
test_version = '%d.%d' % (version[0], n)
LOG.info('testing obj: %s version: %s' %
(obj_name, test_version))
test_object = self._get_obj_to_test(obj_class)
obj_p = test_object.obj_to_primitive(
target_version=test_version)
self._validate_object_fields(obj_class, obj_p)
def test_obj_relationships_in_order(self):
# Iterate all object classes and verify that we can run
# obj_make_compatible with every older version than current.
# This doesn't actually test the data conversions, but it at least
# makes sure the method doesn't blow up on something basic like
# expecting the wrong version format.
obj_classes = base.NovaObjectRegistry.obj_classes()
for obj_name in obj_classes:
obj_class = obj_classes[obj_name][0]
for field, versions in obj_class.obj_relationships.items():
last_my_version = (0, 0)
last_child_version = (0, 0)
for my_version, child_version in versions:
_my_version = utils.convert_version_to_tuple(my_version)
_ch_version = utils.convert_version_to_tuple(child_version)
self.assertTrue((last_my_version < _my_version
and last_child_version <= _ch_version),
'Object %s relationship '
'%s->%s for field %s is out of order' % (
obj_name, my_version, child_version,
field))
last_my_version = _my_version
last_child_version = _ch_version
def test_objects_use_obj_relationships(self):
obj_classes = base.NovaObjectRegistry.obj_classes()
for obj_name in obj_classes:
obj_class = obj_classes[obj_name][0]
self.assertFalse((hasattr(obj_class, 'child_versions')
and obj_class.child_versions),
'Object %s should be using obj_relationships, '
'not child_versions.' % obj_name)
def test_obj_relationships_not_past_current_parent_version(self):
# Iterate all object classes to verify that all versions of the parent
# held in obj_relationships are at or before the current version
obj_classes = base.NovaObjectRegistry.obj_classes()
for obj_name in obj_classes:
obj_class = obj_classes[obj_name][0]
cur_version = utils.convert_version_to_tuple(obj_class.VERSION)
for field, versions in obj_class.obj_relationships.items():
for my_version, child_version in versions:
tup_version = utils.convert_version_to_tuple(my_version)
self.assertTrue(tup_version <= cur_version,
"Field '%(field)s' of %(obj)s contains a "
"relationship that is past the current "
"version. Relationship version is %(ov)s."
" Current version is %(cv)s." %
{'field': field, 'obj': obj_name,
'ov': my_version,
'cv': obj_class.VERSION})
def test_obj_relationships_not_past_current_child_version(self):
# Iterate all object classes to verify that all versions of subobjects
# held in obj_relationships are at or before the current version
obj_classes = base.NovaObjectRegistry.obj_classes()
for obj_name in obj_classes:
obj_class = obj_classes[obj_name][0]
for field, versions in obj_class.obj_relationships.items():
obj_field = obj_class.fields[field]
child_name = self._get_object_field_name(obj_field)
child_class = obj_classes[child_name][0]
curr_child_ver = child_class.VERSION
tup_curr_child_ver = utils.convert_version_to_tuple(
curr_child_ver)
for parent_ver, child_ver in versions:
tup_version = utils.convert_version_to_tuple(child_ver)
self.assertTrue(tup_version <= tup_curr_child_ver,
"Field '%(field)s' of %(obj)s contains a "
"relationship that is past the current "
"version of %(child_obj)s. Relationship "
"version is %(ov)s. Current version is "
"%(cv)s." %
{'field': field, 'obj': obj_name,
'child_obj': child_name,
'ov': child_ver, 'cv': curr_child_ver})
class TestObjEqualPrims(_BaseTestCase):
def test_object_equal(self):
obj1 = MyObj(foo=1, bar='goodbye')
obj1.obj_reset_changes()
obj2 = MyObj(foo=1, bar='goodbye')
obj2.obj_reset_changes()
obj2.bar = 'goodbye'
# obj2 will be marked with field 'three' updated
self.assertTrue(base.obj_equal_prims(obj1, obj2),
"Objects that differ only because one a is marked "
"as updated should be equal")
def test_object_not_equal(self):
obj1 = MyObj(foo=1, bar='goodbye')
obj1.obj_reset_changes()
obj2 = MyObj(foo=1, bar='hello')
obj2.obj_reset_changes()
self.assertFalse(base.obj_equal_prims(obj1, obj2),
"Objects that differ in any field "
"should not be equal")
def test_object_ignore_equal(self):
obj1 = MyObj(foo=1, bar='goodbye')
obj1.obj_reset_changes()
obj2 = MyObj(foo=1, bar='hello')
obj2.obj_reset_changes()
self.assertTrue(base.obj_equal_prims(obj1, obj2, ['bar']),
"Objects that only differ in an ignored field "
"should be equal")
class TestObjMethodOverrides(test.NoDBTestCase):
def test_obj_reset_changes(self):
args = inspect.getargspec(base.NovaObject.obj_reset_changes)
obj_classes = base.NovaObjectRegistry.obj_classes()
for obj_name in obj_classes:
obj_class = obj_classes[obj_name][0]
self.assertEqual(args,
inspect.getargspec(obj_class.obj_reset_changes))
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class DescribeDomainTopUrlVisitRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Cdn', '2018-05-10', 'DescribeDomainTopUrlVisit')
def get_SecurityToken(self):
return self.get_query_params().get('SecurityToken')
def set_SecurityToken(self,SecurityToken):
self.add_query_param('SecurityToken',SecurityToken)
def get_DomainName(self):
return self.get_query_params().get('DomainName')
def set_DomainName(self,DomainName):
self.add_query_param('DomainName',DomainName)
def get_SortBy(self):
return self.get_query_params().get('SortBy')
def set_SortBy(self,SortBy):
self.add_query_param('SortBy',SortBy)
def get_StartTime(self):
return self.get_query_params().get('StartTime')
def set_StartTime(self,StartTime):
self.add_query_param('StartTime',StartTime)
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId)
|
# Copyright (C) 2015 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import json
import logging
from nose.tools import eq_
import os
import sys
import unittest
from ryu.lib import ofctl_v1_0
from ryu.lib import ofctl_v1_2
from ryu.lib import ofctl_v1_3
from ryu.lib import ofctl_v1_4
from ryu.ofproto import ofproto_parser
from ryu.ofproto.ofproto_protocol import ProtocolDesc
from ryu.tests import test_lib
LOG = logging.getLogger(__name__)
class DummyDatapath(ProtocolDesc):
def __init__(self, version):
super(DummyDatapath, self).__init__(version)
self.id = 1 # XXX
self.request_msg = None
self.reply_msg = None
self.waiters = None
@staticmethod
def set_xid(msg):
msg.set_xid(0)
return 0
def send_msg(self, msg):
msg.serialize()
self.request_msg = msg
if self.reply_msg:
lock, msgs = self.waiters[self.id][msg.xid]
msgs.append(self.reply_msg)
del self.waiters[self.id][msg.xid]
lock.set()
def set_reply(self, msg, waiters):
self.reply_msg = msg
self.waiters = waiters
class Test_ofctl(unittest.TestCase):
def _test(self, name, dp, method, args, request, reply, expected):
print('processing %s ...' % name)
waiters = {}
dp.set_reply(reply, waiters)
if reply:
output = method(dp=dp, waiters=waiters, **args)
else:
output = method(dp=dp, **args)
# expected message <--> sent message
request.serialize()
try:
eq_(json.dumps(request.to_jsondict(), sort_keys=True),
json.dumps(dp.request_msg.to_jsondict(), sort_keys=True))
except AssertionError as e:
# For debugging
json.dump(dp.request_msg.to_jsondict(),
open('/tmp/' + name + '_request.json', 'w'),
indent=3, sort_keys=True)
raise e
# expected output <--> return of ofctl
def _remove(d, names):
f = lambda x: _remove(x, names)
if isinstance(d, list):
return list(map(f, d))
if isinstance(d, dict):
d2 = {}
for k, v in d.items():
if k in names:
continue
d2[k] = f(v)
return d2
return d
expected = _remove(expected, ['len', 'length'])
output = _remove(output, ['len', 'length'])
try:
eq_(json.dumps(expected, sort_keys=True),
json.dumps(output, sort_keys=True))
except AssertionError as e:
# For debugging
json.dump(output, open('/tmp/' + name + '_reply.json', 'w'),
indent=4)
raise e
def _add_tests():
_ofp_vers = {
'of10': 0x01,
'of12': 0x03,
'of13': 0x04,
'of14': 0x05,
}
_test_cases = {
'of10': [
{
'method': ofctl_v1_0.mod_flow_entry,
'request': '1-2-ofp_flow_mod.packet.json',
'reply': None
},
],
'of12': [
{
'method': ofctl_v1_2.get_desc_stats,
'request': '3-24-ofp_desc_stats_request.packet.json',
'reply': '3-0-ofp_desc_stats_reply.packet.json'
},
{
'method': ofctl_v1_2.get_queue_stats,
'request': '3-37-ofp_queue_stats_request.packet.json',
'reply': '3-38-ofp_queue_stats_reply.packet.json'
},
{
'method': ofctl_v1_2.get_queue_stats,
'request': 'lib-ofctl-ofp_queue_stats_request.packet1.json',
'reply': '3-38-ofp_queue_stats_reply.packet.json'
},
{
'method': ofctl_v1_2.get_queue_stats,
'request': 'lib-ofctl-ofp_queue_stats_request.packet2.json',
'reply': '3-38-ofp_queue_stats_reply.packet.json'
},
{
'method': ofctl_v1_2.get_queue_stats,
'request': 'lib-ofctl-ofp_queue_stats_request.packet3.json',
'reply': '3-38-ofp_queue_stats_reply.packet.json'
},
{
'method': ofctl_v1_2.get_queue_config,
'request': '3-35-ofp_queue_get_config_request.packet.json',
'reply': '3-36-ofp_queue_get_config_reply.packet.json'
},
{
'method': ofctl_v1_2.get_flow_stats,
'request': '3-11-ofp_flow_stats_request.packet.json',
'reply': '3-12-ofp_flow_stats_reply.packet.json'
},
{
'method': ofctl_v1_2.get_aggregate_flow_stats,
'request': '3-25-ofp_aggregate_stats_request.packet.json',
'reply': '3-26-ofp_aggregate_stats_reply.packet.json'
},
{
'method': ofctl_v1_2.get_table_stats,
'request': '3-27-ofp_table_stats_request.packet.json',
'reply': '3-28-ofp_table_stats_reply.packet.json'
},
{
'method': ofctl_v1_2.get_port_stats,
'request': '3-29-ofp_port_stats_request.packet.json',
'reply': '3-30-ofp_port_stats_reply.packet.json'
},
{
'method': ofctl_v1_2.get_port_stats,
'request': 'lib-ofctl-ofp_port_stats_request.packet.json',
'reply': '3-30-ofp_port_stats_reply.packet.json'
},
{
'method': ofctl_v1_2.get_group_stats,
'request': '3-61-ofp_group_stats_request.packet.json',
'reply': '3-62-ofp_group_stats_reply.packet.json'
},
{
'method': ofctl_v1_2.get_group_stats,
'request': 'lib-ofctl-ofp_group_stats_request.packet.json',
'reply': '3-62-ofp_group_stats_reply.packet.json'
},
{
'method': ofctl_v1_2.get_group_features,
'request': '3-31-ofp_group_features_stats_request.packet.json',
'reply': '3-32-ofp_group_features_stats_reply.packet.json'
},
{
'method': ofctl_v1_2.get_group_desc,
'request': '3-33-ofp_group_desc_stats_request.packet.json',
'reply': '3-34-ofp_group_desc_stats_reply.packet.json'
},
# In OpenFlow 1.2, ofp_port_desc is not defined.
# We use ofp_features_request to get ports description instead.
{
'method': ofctl_v1_2.get_port_desc,
'request': '3-5-ofp_features_request.packet.json',
'reply': '3-6-ofp_features_reply.packet.json'
},
{
'method': ofctl_v1_2.mod_flow_entry,
'request': '3-2-ofp_flow_mod.packet.json',
'reply': None
},
{
'method': ofctl_v1_2.mod_group_entry,
'request': '3-21-ofp_group_mod.packet.json',
'reply': None
},
{
'method': ofctl_v1_2.mod_port_behavior,
'request': '3-22-ofp_port_mod.packet.json',
'reply': None
},
{
'method': ofctl_v1_2.send_experimenter,
'request': '3-16-ofp_experimenter.packet.json',
'reply': None
},
],
'of13': [
{
'method': ofctl_v1_3.get_desc_stats,
'request': '4-24-ofp_desc_request.packet.json',
'reply': '4-0-ofp_desc_reply.packet.json'
},
{
'method': ofctl_v1_3.get_queue_stats,
'request': '4-37-ofp_queue_stats_request.packet.json',
'reply': '4-38-ofp_queue_stats_reply.packet.json'
},
{
'method': ofctl_v1_3.get_queue_stats,
'request': 'lib-ofctl-ofp_queue_stats_request.packet1.json',
'reply': '4-38-ofp_queue_stats_reply.packet.json'
},
{
'method': ofctl_v1_3.get_queue_stats,
'request': 'lib-ofctl-ofp_queue_stats_request.packet2.json',
'reply': '4-38-ofp_queue_stats_reply.packet.json'
},
{
'method': ofctl_v1_3.get_queue_stats,
'request': 'lib-ofctl-ofp_queue_stats_request.packet3.json',
'reply': '4-38-ofp_queue_stats_reply.packet.json'
},
{
'method': ofctl_v1_3.get_queue_config,
'request': '4-35-ofp_queue_get_config_request.packet.json',
'reply': '4-36-ofp_queue_get_config_reply.packet.json'
},
{
'method': ofctl_v1_3.get_flow_stats,
'request': '4-11-ofp_flow_stats_request.packet.json',
'reply': '4-12-ofp_flow_stats_reply.packet.json'
},
{
'method': ofctl_v1_3.get_aggregate_flow_stats,
'request': '4-25-ofp_aggregate_stats_request.packet.json',
'reply': '4-26-ofp_aggregate_stats_reply.packet.json'
},
{
'method': ofctl_v1_3.get_table_stats,
'request': '4-27-ofp_table_stats_request.packet.json',
'reply': '4-28-ofp_table_stats_reply.packet.json'
},
{
'method': ofctl_v1_3.get_table_features,
'request': 'lib-ofctl-ofp_table_features_request.packet.json',
'reply': '4-56-ofp_table_features_reply.packet.json'
},
{
'method': ofctl_v1_3.get_port_stats,
'request': '4-29-ofp_port_stats_request.packet.json',
'reply': '4-30-ofp_port_stats_reply.packet.json'
},
{
'method': ofctl_v1_3.get_port_stats,
'request': 'lib-ofctl-ofp_port_stats_request.packet.json',
'reply': '4-30-ofp_port_stats_reply.packet.json'
},
{
'method': ofctl_v1_3.get_meter_stats,
'request': '4-49-ofp_meter_stats_request.packet.json',
'reply': '4-50-ofp_meter_stats_reply.packet.json'
},
{
'method': ofctl_v1_3.get_meter_stats,
'request': 'lib-ofctl-ofp_meter_stats_request.packet.json',
'reply': '4-50-ofp_meter_stats_reply.packet.json'
},
{
'method': ofctl_v1_3.get_meter_features,
'request': '4-51-ofp_meter_features_request.packet.json',
'reply': '4-52-ofp_meter_features_reply.packet.json'
},
{
'method': ofctl_v1_3.get_meter_config,
'request': '4-47-ofp_meter_config_request.packet.json',
'reply': '4-48-ofp_meter_config_reply.packet.json'
},
{
'method': ofctl_v1_3.get_meter_config,
'request': 'lib-ofctl-ofp_meter_config_request.packet.json',
'reply': '4-48-ofp_meter_config_reply.packet.json'
},
{
'method': ofctl_v1_3.get_group_stats,
'request': '4-57-ofp_group_stats_request.packet.json',
'reply': '4-58-ofp_group_stats_reply.packet.json'
},
{
'method': ofctl_v1_3.get_group_stats,
'request': 'lib-ofctl-ofp_group_stats_request.packet.json',
'reply': '4-58-ofp_group_stats_reply.packet.json'
},
{
'method': ofctl_v1_3.get_group_features,
'request': '4-31-ofp_group_features_request.packet.json',
'reply': '4-32-ofp_group_features_reply.packet.json'
},
{
'method': ofctl_v1_3.get_group_desc,
'request': '4-33-ofp_group_desc_request.packet.json',
'reply': '4-34-ofp_group_desc_reply.packet.json'
},
{
'method': ofctl_v1_3.get_port_desc,
'request': '4-53-ofp_port_desc_request.packet.json',
'reply': '4-54-ofp_port_desc_reply.packet.json'
},
{
'method': ofctl_v1_3.mod_flow_entry,
'request': '4-2-ofp_flow_mod.packet.json',
'reply': None
},
{
'method': ofctl_v1_3.mod_meter_entry,
'request': '4-45-ofp_meter_mod.packet.json',
'reply': None
},
{
'method': ofctl_v1_3.mod_group_entry,
'request': '4-21-ofp_group_mod.packet.json',
'reply': None
},
{
'method': ofctl_v1_3.mod_port_behavior,
'request': '4-22-ofp_port_mod.packet.json',
'reply': None
},
{
'method': ofctl_v1_3.send_experimenter,
'request': '4-16-ofp_experimenter.packet.json',
'reply': None
},
],
'of14': [
{
'method': ofctl_v1_4.get_desc_stats,
'request': '5-24-ofp_desc_request.packet.json',
'reply': '5-0-ofp_desc_reply.packet.json'
},
{
'method': ofctl_v1_4.get_queue_stats,
'request': '5-35-ofp_queue_stats_request.packet.json',
'reply': '5-36-ofp_queue_stats_reply.packet.json'
},
{
'method': ofctl_v1_4.get_queue_desc_stats,
'request': '5-63-ofp_queue_desc_request.packet.json',
'reply': '5-64-ofp_queue_desc_reply.packet.json'
},
{
'method': ofctl_v1_4.get_flow_stats,
'request': '5-11-ofp_flow_stats_request.packet.json',
'reply': '5-12-ofp_flow_stats_reply.packet.json'
},
{
'method': ofctl_v1_4.get_aggregate_flow_stats,
'request': '5-25-ofp_aggregate_stats_request.packet.json',
'reply': '5-26-ofp_aggregate_stats_reply.packet.json'
},
{
'method': ofctl_v1_4.get_table_stats,
'request': '5-27-ofp_table_stats_request.packet.json',
'reply': '5-28-ofp_table_stats_reply.packet.json'
},
{
'method': ofctl_v1_4.get_table_features,
'request': 'lib-ofctl-ofp_table_features_request.packet.json',
'reply': '5-54-ofp_table_features_reply.packet.json'
},
{
'method': ofctl_v1_4.get_port_stats,
'request': '5-29-ofp_port_stats_request.packet.json',
'reply': '5-30-ofp_port_stats_reply.packet.json'
},
{
'method': ofctl_v1_4.get_meter_stats,
'request': '5-47-ofp_meter_stats_request.packet.json',
'reply': '5-48-ofp_meter_stats_reply.packet.json'
},
{
'method': ofctl_v1_4.get_meter_features,
'request': '5-49-ofp_meter_features_request.packet.json',
'reply': '5-50-ofp_meter_features_reply.packet.json'
},
{
'method': ofctl_v1_4.get_meter_config,
'request': '5-45-ofp_meter_config_request.packet.json',
'reply': '5-46-ofp_meter_config_reply.packet.json'
},
{
'method': ofctl_v1_4.get_group_stats,
'request': '5-55-ofp_group_stats_request.packet.json',
'reply': '5-56-ofp_group_stats_reply.packet.json'
},
{
'method': ofctl_v1_4.get_group_features,
'request': '5-31-ofp_group_features_request.packet.json',
'reply': '5-32-ofp_group_features_reply.packet.json'
},
{
'method': ofctl_v1_4.get_group_desc,
'request': '5-33-ofp_group_desc_request.packet.json',
'reply': '5-34-ofp_group_desc_reply.packet.json'
},
{
'method': ofctl_v1_4.get_port_desc,
'request': '5-51-ofp_port_desc_request.packet.json',
'reply': '5-52-ofp_port_desc_reply.packet.json'
},
{
'method': ofctl_v1_4.mod_flow_entry,
'request': '5-2-ofp_flow_mod.packet.json',
'reply': None
},
{
'method': ofctl_v1_4.mod_meter_entry,
'request': '5-43-ofp_meter_mod.packet.json', # flow --> meter
'reply': None
},
{
'method': ofctl_v1_4.mod_group_entry,
'request': '5-21-ofp_group_mod.packet.json',
'reply': None
},
{
'method': ofctl_v1_4.mod_port_behavior,
'request': '5-22-ofp_port_mod.packet.json',
'reply': None
},
{
'method': ofctl_v1_4.send_experimenter,
'request': '5-16-ofp_experimenter.packet.json',
'reply': None
},
],
}
def _jsonfile_to_msg(datapath, jsonfile):
return ofproto_parser.ofp_msg_from_jsondict(
datapath, json.load(open(jsonfile)))
this_dir = os.path.dirname(sys.modules[__name__].__file__)
parser_json_root = os.path.join(this_dir, '../ofproto/json/')
ofctl_json_root = os.path.join(this_dir, 'ofctl_json/')
for ofp_ver, tests in _test_cases.items():
dp = DummyDatapath(_ofp_vers[ofp_ver])
parser_json_dir = os.path.join(parser_json_root, ofp_ver)
ofctl_json_dir = os.path.join(ofctl_json_root, ofp_ver)
for test in tests:
name = 'test_ofctl_' + ofp_ver + '_' + test['request']
print('adding %s ...' % name)
args = {}
args_json_path = os.path.join(ofctl_json_dir, test['request'])
if os.path.exists(args_json_path):
args = json.load(open(args_json_path))
request = _jsonfile_to_msg(
dp, os.path.join(parser_json_dir, test['request']))
reply = None
expected = None
if test['reply']:
reply = _jsonfile_to_msg(
dp, os.path.join(parser_json_dir, test['reply']))
expected = json.load(
open(os.path.join(ofctl_json_dir, test['reply'])))
f = functools.partial(
Test_ofctl._test, name=name, dp=dp, method=test['method'],
args=args, request=request, reply=reply, expected=expected)
test_lib.add_method(Test_ofctl, name, f)
_add_tests()
if __name__ == "__main__":
unittest.main()
|
import os
import math
import json
import datetime
import copy
import pandas as pd
import numpy as np
from collections import defaultdict
from ray import tune
import matplotlib.pyplot as plt
from matplotlib.pyplot import cm
def create_dict(filename):
loaded = json.load(open(filename))
result = dict()
for key in loaded:
result[key] = pd.read_json(loaded[key])
return result
def get_params(trial_path):
return json.load(open(trial_path + "/params.json"))
def get_varied_param(trial_dict):
'''
Returns the name of the paramater which was varied using tune.grid_search
If no such paramater or multiple such paramaters exist, return None
Also return the set of values that varied param takes if it exists, None if not
'''
#gather each paramater into its own set, if all sets are length one, proceed as normal,
#otherwise if only one is length > 1, return name and values, if more than one are len > 1
#treat the data as if no params have been varied and return two None
param_sets = defaultdict(set) # maps param names to sets of values
for trial_path in trial_dict.keys():
params = get_params(trial_path)
for param in params:
param_sets[param].add(params[param])
params_varied = False
varied_param_name = None
for param in param_sets:
if not params_varied and len(param_sets[param]) != 1:
params_varied = True
varied_param_name = param
elif len(param_sets[param]) != 1:
print("More than one param has been varied. Plotting all trials as one line.")
params_varied = False
break
return varied_param_name, param_sets[varied_param_name] if params_varied else None, None
def get_reward_matrix_and_dict(trial_dict):
'''
Given a dicionary mapping trial paths to dataframes, return a 2 dimensionaly numpy array where:
each row is a trial's full history of mean reward acquired
each column is one full training iteration
Also return a dictionary mapping trial_paths to the array of their mean reward recieved
Note that if all the trials are not of the same length, they will all be padded by exending the last value
to the maximum length among all trials so they can be vertically stacked. This is equivalent to letting the
converged models continue to run (assuming they would continue to output the same converged reward)
'''
# hardcoded number of max iterations so that trials of different lengths can be padded to this length and stacked
# eventually after the maximum trial length is found, all arrays are cut down to that length and extra zeros are discarded
max_iters = 999999
ave_reward = np.zeros((max_iters,))
all_rewards = dict()
max_len = -1
for trial_path in trial_dict:
cur_reward = trial_dict[trial_path]["episode_reward_mean"].to_numpy() # mean reward for this agent every iteration
assert len(cur_reward) <= max_iters, "number of iterations in trial exceeds max_iters"
padded = np.pad(cur_reward, (0, max_iters-len(cur_reward)), mode='edge') # pad reward array so they can all be added together
max_len = max(max_len, len(cur_reward))
all_rewards[trial_path] = padded
for trial_path in all_rewards:
#cut all the padded rows down to size now that we know the max_len
all_rewards[trial_path] = all_rewards[trial_path][:max_len]
reward_matrix = np.vstack(tuple(all_rewards.values()))
return reward_matrix, all_rewards
def get_ave_reward(reward_matrix):
return np.mean(reward_matrix, axis=0)
def get_x_array(reward_matrix):
max_len = reward_matrix.shape[1]
return [i for i in range(max_len)]
def get_std_error(reward_matrix):
std = np.std(reward_matrix, 0)
rootn = math.sqrt(reward_matrix.shape[0])
return std/rootn # standard error of every iteration
def get_reward_matrix_given_param(all_rewards, param_name, param_value):
all_rows = []
for trial_path in all_rewards:
if get_params(trial_path)[param_name] == param_value:
all_rows.append(all_rewards[trial_path])
return np.vstack(tuple(all_rows))
def plot_ave_reward(analysis):
'''
Takes in a tune analysis object (https://ray.readthedocs.io/en/latest/tune-package-ref.html#ray.tune.Analysis)
and plots the average reward over all iterations as well as the standard error. If a tune.grid_search
was performed over exactly one paramater, one line (as its corresponding error bars) is plotted for each
value of that paramater.
'''
all_trials = analysis.trial_dataframes
reward_matrix, all_rewards = get_reward_matrix_and_dict(all_trials)
temp = get_varied_param(all_trials)
varied_param, param_values = temp[0], temp[1]
plt.xlabel("Iteration Number")
plt.ylabel("Averge Reward")
if varied_param is not None:
#plot multiple lines, one for each value of varied_param
plt.title("Average Reward and Std Error Bars, {} samples per line".format(len(all_trials)//len(param_values)))
x = get_x_array(reward_matrix)
colors = iter(cm.rainbow(np.linspace(0,1,len(param_values))))
for value in param_values:
cur_reward_matrix = get_reward_matrix_given_param(all_rewards, varied_param, value)
ave_reward = get_ave_reward(cur_reward_matrix)
std_err = get_std_error(cur_reward_matrix)
c=next(colors)
plt.errorbar(x, ave_reward, color=c, yerr=std_err, ecolor='r', label=varied_param + ": {}".format(value))
plt.legend()
else:
#plot one line
plt.title("Average Reward and Std Error Bars, {} samples per line".format(len(all_trials)))
x = get_x_array(reward_matrix)
ave_reward = get_ave_reward(reward_matrix)
std_err = get_std_error(reward_matrix)
plt.errorbar(x, ave_reward, yerr=std_err, ecolor='r', label="average")
if "plots" not in os.listdir():
os.mkdir("plots")
timestamp = datetime.datetime.now().strftime("%H-%M-%S-%m-%d-%Y")
filename = "plots/ave_reward_{}.png".format(timestamp)
plt.grid()
plt.savefig(filename, dpi=600)
print("\nPlot saved to: " + filename + "\n")
plt.plot()
def eval_unpack(config, to_eval):
'''
takes all key/values pairs in to_eval and stores the eval'd values and corresponding keys
in config while taking into account the nesting of to_eval
config: the dictionary to add the eval'd key/value pairs to
to_eval: the dictionary to iterate though to get key/value pairs (can be nested)
'''
for key, value in to_eval.items():
if isinstance(value, dict):
eval_unpack(config[key], value)
else:
config[key] = eval(value)
def str_insert(config, to_insert):
'''
For debug printing! takes all key/values pairs in to_insert and stores the values and
corresponding keys in config while taking into account the nesting of to_insert
config: the dictionary to add the key/value pairs to
to_insert: the dictionary to iterate though to get key/value pairs (can be nested)
'''
for key, value in to_insert.items():
if isinstance(value, dict):
str_insert(config[key], value)
else:
config[key] = value
def debug_config_print(config):
'''
prints out the config as it would be passed to the tune.run, for example:
tune.run( PPO ,
stop = {'episode_reward_mean': 7.2} ,
config = {'env': 'swap', 'num_workers': 7, 'num_gpus': 0, 'seed': None} ,
num_samples = 1 ,
checkpoint_freq = 10 ,
checkpoint_at_end = True ,
max_failures = 5 ,
trial_name_creator = lambda x : str(x) ,
)
'''
config_copy = copy.deepcopy(config)
str_insert(config_copy['tune'], config_copy['to_eval'])
print("tune.run(",config_copy['algorithm'],',')
for key, value in config_copy['tune'].items():
print("\t",key,"=",value,",")
print("\t",")")
|
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
"""Client and server classes corresponding to protobuf-defined services."""
import grpc
from shared.library.component_b.common import person_message_pb2 as component__a_dot_common_dot_person__message__pb2
from shared.library.component_b.query import person_pb2 as component__a_dot_query_dot_person__pb2
class PersonControllerStub(object):
"""Missing associated documentation comment in .proto file."""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.List = channel.unary_stream(
'/component_b.query.PersonController/List',
request_serializer=component__a_dot_query_dot_person__pb2.PersonListRequest.SerializeToString,
response_deserializer=component__a_dot_common_dot_person__message__pb2.Person.FromString,
)
self.Retrieve = channel.unary_unary(
'/component_b.query.PersonController/Retrieve',
request_serializer=component__a_dot_query_dot_person__pb2.PersonRetrieveRequest.SerializeToString,
response_deserializer=component__a_dot_common_dot_person__message__pb2.Person.FromString,
)
class PersonControllerServicer(object):
"""Missing associated documentation comment in .proto file."""
def List(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Retrieve(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_PersonControllerServicer_to_server(servicer, server):
rpc_method_handlers = {
'List': grpc.unary_stream_rpc_method_handler(
servicer.List,
request_deserializer=component__a_dot_query_dot_person__pb2.PersonListRequest.FromString,
response_serializer=component__a_dot_common_dot_person__message__pb2.Person.SerializeToString,
),
'Retrieve': grpc.unary_unary_rpc_method_handler(
servicer.Retrieve,
request_deserializer=component__a_dot_query_dot_person__pb2.PersonRetrieveRequest.FromString,
response_serializer=component__a_dot_common_dot_person__message__pb2.Person.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'component_b.query.PersonController', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class PersonController(object):
"""Missing associated documentation comment in .proto file."""
@staticmethod
def List(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_stream(request, target, '/component_b.query.PersonController/List',
component__a_dot_query_dot_person__pb2.PersonListRequest.SerializeToString,
component__a_dot_common_dot_person__message__pb2.Person.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def Retrieve(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/component_b.query.PersonController/Retrieve',
component__a_dot_query_dot_person__pb2.PersonRetrieveRequest.SerializeToString,
component__a_dot_common_dot_person__message__pb2.Person.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
|
#!/usr/bin/env python
from ansible.module_utils.basic import *
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: cisco_ucs_boot_order
short_description: configures boot order on a cisco ucs server
version_added: 0.9.0.0
description:
- configures boot order on a cisco ucs server
options:
name:
version_added: "1.0(1e)"
description: boot policy name
required: true
devices:
description: list of dictionary
{"device_name": "", "device_order": "", "property_name": "property_value"}
required: true
org_dn:
description: org dn
required: false
default: "org-root"
requirements: ['ucsmsdk', 'ucsm_apis']
author: "Cisco Systems Inc(ucs-python@cisco.com)"
'''
EXAMPLES = '''
- name:
cisco_ucs_boot_order:
name: "test_boot_policy"
devices:
- {"device_name": "sdcard", "device_order": "1"}
- {"device_name": "iscsi", "device_order": "2", "vnic_name": "vnic_pri"}
- {"device_name": "efi", "device_order": "3"}
ucs_ip: "192.168.1.1"
ucs_username: "admin"
ucs_password: "password"
'''
def _argument_mo():
return dict(
name=dict(required=True, type='str'),
devices=dict(required=True, type='list'),
org_dn=dict(type='str', default="org-root"),
)
def _argument_connection():
return dict(
# UcsHandle
ucs_server=dict(type='dict'),
# Ucs server credentials
ucs_ip=dict(type='str'),
ucs_username=dict(default="admin", type='str'),
ucs_password=dict(type='str', no_log=True),
ucs_port=dict(default=None),
ucs_secure=dict(default=None),
ucs_proxy=dict(default=None)
)
def _ansible_module_create():
argument_spec = dict()
argument_spec.update(_argument_connection())
argument_spec.update(_argument_mo())
return AnsibleModule(argument_spec,
supports_check_mode=True)
def _get_mo_params(params):
from ansible.module_utils.cisco_ucs import UcsConnection
args = {}
for key in _argument_mo():
if params.get(key) is None:
continue
args[key] = params.get(key)
return args
def setup_boot_order(server, module):
from ucsm_apis.server.boot import boot_policy_order_set
from ucsm_apis.server.boot import boot_policy_order_exists
ansible = module.params
args_mo = _get_mo_params(ansible)
exists, mo = boot_policy_order_exists(handle=server, **args_mo)
if module.check_mode or exists:
return not exists
boot_policy_order_set(handle=server, **args_mo)
return True
def setup(server, module):
result = {}
err = False
try:
result["changed"] = setup_boot_order(server, module)
except Exception as e:
err = True
result["msg"] = "setup error: %s " % str(e)
result["changed"] = False
return result, err
def main():
from ansible.module_utils.cisco_ucs import UcsConnection
module = _ansible_module_create()
conn = UcsConnection(module)
server = conn.login()
result, err = setup(server, module)
conn.logout()
if err:
module.fail_json(**result)
module.exit_json(**result)
if __name__ == '__main__':
main()
|
# -*- coding: utf-8 -*-
from datetime import timedelta
from hashlib import sha1
import json
import logging
import os
import traceback
from compat import StringIO
from compat.models import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from django.db import models
from django.db.models import Q
from django.utils import timezone
from six import python_2_unicode_compatible
from background_task.exceptions import InvalidTaskError
from background_task.settings import app_settings
from background_task.signals import task_failed
from background_task.signals import task_rescheduled
logger = logging.getLogger(__name__)
class TaskQuerySet(models.QuerySet):
def created_by(self, creator):
"""
:return: A Task queryset filtered by creator
"""
content_type = ContentType.objects.get_for_model(creator)
return self.filter(
creator_content_type=content_type,
creator_object_id=creator.id,
)
class TaskManager(models.Manager):
def get_queryset(self):
return TaskQuerySet(self.model, using=self._db)
def created_by(self, creator):
return self.get_queryset().created_by(creator)
def find_available(self, queue=None):
now = timezone.now()
qs = self.unlocked(now)
if queue:
qs = qs.filter(queue=queue)
ready = qs.filter(run_at__lte=now, failed_at=None)
_priority_ordering = '{}priority'.format(app_settings.BACKGROUND_TASK_PRIORITY_ORDERING)
ready = ready.order_by(_priority_ordering, 'run_at')
if app_settings.BACKGROUND_TASK_RUN_ASYNC:
currently_failed = self.failed().count()
currently_locked = self.locked(now).count()
count = app_settings.BACKGROUND_TASK_ASYNC_THREADS - \
(currently_locked - currently_failed)
if count > 0:
ready = ready[:count]
else:
ready = self.none()
return ready
def unlocked(self, now):
max_run_time = app_settings.BACKGROUND_TASK_MAX_RUN_TIME
qs = self.get_queryset()
expires_at = now - timedelta(seconds=max_run_time)
unlocked = Q(locked_by=None) | Q(locked_at__lt=expires_at)
return qs.filter(unlocked)
def locked(self, now):
max_run_time = app_settings.BACKGROUND_TASK_MAX_RUN_TIME
qs = self.get_queryset()
expires_at = now - timedelta(seconds=max_run_time)
locked = Q(locked_by__isnull=False) | Q(locked_at__gt=expires_at)
return qs.filter(locked)
def failed(self):
"""
`currently_locked - currently_failed` in `find_available` assues that
tasks marked as failed are also in processing by the running PID.
"""
qs = self.get_queryset()
return qs.filter(failed_at__isnull=False)
def new_task(self, task_name, args=None, kwargs=None,
run_at=None, priority=0, queue=None, verbose_name=None,
creator=None, repeat=None, repeat_until=None,
remove_existing_tasks=False, force_synchronous_execution=False):
"""
If `remove_existing_tasks` is True, all unlocked tasks with the identical task hash will be removed.
The attributes `repeat` and `repeat_until` are not supported at the moment.
"""
args = args or ()
kwargs = kwargs or {}
if run_at is None:
run_at = timezone.now()
task_params = json.dumps((args, kwargs), sort_keys=True)
s = "%s%s" % (task_name, task_params)
task_hash = sha1(s.encode('utf-8')).hexdigest()
if remove_existing_tasks:
Task.objects.filter(task_hash=task_hash, locked_at__isnull=True).delete()
return Task(task_name=task_name,
task_params=task_params,
task_hash=task_hash,
priority=priority,
run_at=run_at,
queue=queue,
verbose_name=verbose_name,
creator=creator,
repeat=repeat or Task.NEVER,
repeat_until=repeat_until,
force_synchronous_execution=force_synchronous_execution
)
def get_task(self, task_name, args=None, kwargs=None):
args = args or ()
kwargs = kwargs or {}
task_params = json.dumps((args, kwargs), sort_keys=True)
s = "%s%s" % (task_name, task_params)
task_hash = sha1(s.encode('utf-8')).hexdigest()
qs = self.get_queryset()
return qs.filter(task_hash=task_hash)
def drop_task(self, task_name, args=None, kwargs=None):
return self.get_task(task_name, args, kwargs).delete()
@python_2_unicode_compatible
class Task(models.Model):
# the "name" of the task/function to be run
task_name = models.CharField(max_length=190, db_index=True)
# the json encoded parameters to pass to the task
task_params = models.TextField()
# a sha1 hash of the name and params, to lookup already scheduled tasks
task_hash = models.CharField(max_length=40, db_index=True)
verbose_name = models.CharField(max_length=255, null=True, blank=True)
# what priority the task has
priority = models.IntegerField(default=0, db_index=True)
# when the task should be run
run_at = models.DateTimeField(db_index=True)
# Repeat choices are encoded as number of seconds
# The repeat implementation is based on this encoding
HOURLY = 3600
DAILY = 24 * HOURLY
WEEKLY = 7 * DAILY
EVERY_2_WEEKS = 2 * WEEKLY
EVERY_4_WEEKS = 4 * WEEKLY
NEVER = 0
REPEAT_CHOICES = (
(HOURLY, 'hourly'),
(DAILY, 'daily'),
(WEEKLY, 'weekly'),
(EVERY_2_WEEKS, 'every 2 weeks'),
(EVERY_4_WEEKS, 'every 4 weeks'),
(NEVER, 'never'),
)
repeat = models.BigIntegerField(choices=REPEAT_CHOICES, default=NEVER)
repeat_until = models.DateTimeField(null=True, blank=True)
# the "name" of the queue this is to be run on
queue = models.CharField(max_length=190, db_index=True,
null=True, blank=True)
# Overrides the BACKGROUND_TASK_RUN_ASYNC setting for this particular task
force_synchronous_execution = models.BooleanField()
# how many times the task has been tried
attempts = models.IntegerField(default=0, db_index=True)
# when the task last failed
failed_at = models.DateTimeField(db_index=True, null=True, blank=True)
# details of the error that occurred
last_error = models.TextField(blank=True)
# details of who's trying to run the task at the moment
locked_by = models.CharField(max_length=64, db_index=True,
null=True, blank=True)
locked_at = models.DateTimeField(db_index=True, null=True, blank=True)
creator_content_type = models.ForeignKey(
ContentType, null=True, blank=True,
related_name='background_task', on_delete=models.CASCADE
)
creator_object_id = models.PositiveIntegerField(null=True, blank=True)
creator = GenericForeignKey('creator_content_type', 'creator_object_id')
objects = TaskManager()
def locked_by_pid_running(self):
"""
Check if the locked_by process is still running.
"""
if self.locked_by:
try:
# won't kill the process. kill is a bad named system call
os.kill(int(self.locked_by), 0)
return True
except:
return False
else:
return None
locked_by_pid_running.boolean = True
def has_error(self):
"""
Check if the last_error field is empty.
"""
return bool(self.last_error)
has_error.boolean = True
def params(self):
args, kwargs = json.loads(self.task_params)
# need to coerce kwargs keys to str
kwargs = dict((str(k), v) for k, v in kwargs.items())
return args, kwargs
def lock(self, locked_by):
now = timezone.now()
unlocked = Task.objects.unlocked(now).filter(pk=self.pk)
updated = unlocked.update(locked_by=locked_by, locked_at=now)
if updated:
return Task.objects.get(pk=self.pk)
return None
def unlock(self):
self.locked_by = None
self.locked_at = None
next_task = Task.objects.exclude(pk=self.pk).filter(
task_name=self.task_name,
run_at__gte=self.run_at
)
if next_task.exists():
self.run_at = next_task.earliest('run_at').run_at - timedelta(milliseconds=1)
else:
self.run_at = timezone.now()
self.save()
def _extract_error(self, type, err, tb):
file = StringIO()
traceback.print_exception(type, err, tb, None, file)
return file.getvalue()
def increment_attempts(self):
self.attempts += 1
self.save()
def has_reached_max_attempts(self):
max_attempts = app_settings.BACKGROUND_TASK_MAX_ATTEMPTS
return self.attempts >= max_attempts
def is_repeating_task(self):
return self.repeat > self.NEVER
def runs_async(self):
return not self.force_synchronous_execution and app_settings.BACKGROUND_TASK_RUN_ASYNC
def reschedule(self, type, err, traceback):
'''
Set a new time to run the task in future, or create a CompletedTask and delete the Task
if it has reached the maximum of allowed attempts
'''
self.last_error = self._extract_error(type, err, traceback)
self.increment_attempts()
if self.has_reached_max_attempts() or isinstance(err, InvalidTaskError):
self.failed_at = timezone.now()
logger.warning('Marking task %s as failed', self)
completed = self.create_completed_task()
task_failed.send(sender=self.__class__, task_id=self.id, completed_task=completed)
self.delete()
else:
backoff = timedelta(seconds=(self.attempts ** 4) + 5)
self.run_at = timezone.now() + backoff
logger.warning('Rescheduling task %s for %s later at %s', self,
backoff, self.run_at)
task_rescheduled.send(sender=self.__class__, task=self)
self.locked_by = None
self.locked_at = None
self.save()
def create_completed_task(self):
'''
Returns a new CompletedTask instance with the same values
'''
completed_task = CompletedTask(
task_name=self.task_name,
task_params=self.task_params,
task_hash=self.task_hash,
priority=self.priority,
run_at=timezone.now(),
queue=self.queue,
attempts=self.attempts,
failed_at=self.failed_at,
last_error=self.last_error,
locked_by=self.locked_by,
locked_at=self.locked_at,
verbose_name=self.verbose_name,
creator=self.creator,
repeat=self.repeat,
repeat_until=self.repeat_until,
)
completed_task.save()
return completed_task
def create_repetition(self):
"""
:return: A new Task with an offset of self.repeat, or None if the self.repeat_until is reached
"""
if not self.is_repeating_task():
return None
if self.repeat_until and self.repeat_until <= timezone.now():
# Repeat chain completed
return None
args, kwargs = self.params()
new_run_at = self.run_at + timedelta(seconds=self.repeat)
while new_run_at < timezone.now():
new_run_at += timedelta(seconds=self.repeat)
new_task = TaskManager().new_task(
task_name=self.task_name,
args=args,
kwargs=kwargs,
run_at=new_run_at,
priority=self.priority,
queue=self.queue,
verbose_name=self.verbose_name,
creator=self.creator,
repeat=self.repeat,
repeat_until=self.repeat_until,
)
new_task.save()
return new_task
def save(self, *arg, **kw):
# force NULL rather than empty string
self.locked_by = self.locked_by or None
return super(Task, self).save(*arg, **kw)
def __str__(self):
return u'{}'.format(self.verbose_name or self.task_name)
class Meta:
db_table = 'background_task'
class CompletedTaskQuerySet(models.QuerySet):
def created_by(self, creator):
"""
:return: A CompletedTask queryset filtered by creator
"""
content_type = ContentType.objects.get_for_model(creator)
return self.filter(
creator_content_type=content_type,
creator_object_id=creator.id,
)
def failed(self, within=None):
"""
:param within: A timedelta object
:return: A queryset of CompletedTasks that failed within the given timeframe (e.g. less than 1h ago)
"""
qs = self.filter(
failed_at__isnull=False,
)
if within:
time_limit = timezone.now() - within
qs = qs.filter(failed_at__gt=time_limit)
return qs
def succeeded(self, within=None):
"""
:param within: A timedelta object
:return: A queryset of CompletedTasks that completed successfully within the given timeframe
(e.g. less than 1h ago)
"""
qs = self.filter(
failed_at__isnull=True,
)
if within:
time_limit = timezone.now() - within
qs = qs.filter(run_at__gt=time_limit)
return qs
@python_2_unicode_compatible
class CompletedTask(models.Model):
# the "name" of the task/function to be run
task_name = models.CharField(max_length=190, db_index=True)
# the json encoded parameters to pass to the task
task_params = models.TextField()
# a sha1 hash of the name and params, to lookup already scheduled tasks
task_hash = models.CharField(max_length=40, db_index=True)
verbose_name = models.CharField(max_length=255, null=True, blank=True)
# what priority the task has
priority = models.IntegerField(default=0, db_index=True)
# when the task should be run
run_at = models.DateTimeField(db_index=True)
repeat = models.BigIntegerField(choices=Task.REPEAT_CHOICES, default=Task.NEVER)
repeat_until = models.DateTimeField(null=True, blank=True)
# the "name" of the queue this is to be run on
queue = models.CharField(max_length=190, db_index=True,
null=True, blank=True)
# how many times the task has been tried
attempts = models.IntegerField(default=0, db_index=True)
# when the task last failed
failed_at = models.DateTimeField(db_index=True, null=True, blank=True)
# details of the error that occurred
last_error = models.TextField(blank=True)
# details of who's trying to run the task at the moment
locked_by = models.CharField(max_length=64, db_index=True,
null=True, blank=True)
locked_at = models.DateTimeField(db_index=True, null=True, blank=True)
creator_content_type = models.ForeignKey(
ContentType, null=True, blank=True,
related_name='completed_background_task', on_delete=models.CASCADE
)
creator_object_id = models.PositiveIntegerField(null=True, blank=True)
creator = GenericForeignKey('creator_content_type', 'creator_object_id')
objects = CompletedTaskQuerySet.as_manager()
def locked_by_pid_running(self):
"""
Check if the locked_by process is still running.
"""
if self.locked_by:
try:
# won't kill the process. kill is a bad named system call
os.kill(int(self.locked_by), 0)
return True
except:
return False
else:
return None
locked_by_pid_running.boolean = True
def has_error(self):
"""
Check if the last_error field is empty.
"""
return bool(self.last_error)
has_error.boolean = True
def __str__(self):
return u'{} - {}'.format(
self.verbose_name or self.task_name,
self.run_at,
)
|
# Copyright (C) 2021 Dino Bollinger, ETH Zürich, Information Security Group
# Released under the MIT License
"""
Using a database of collected cookie + label data, determine potential GDPR violations by checking whether
Google Analytics cookie variants (or another known cookie, can be specified) were misclassified.
----------------------------------
Required arguments:
<db_path> Path to database to analyze.
Optional arguments:
<name_pattern>: Specifies the regex pattern for the cookie name.
<domain_pattern>: Specifies the regex pattern for the cookie domain.
<expected_label>: Expected label for the cookie.
--out_path <out_path>: Directory to store the resutls.
Usage:
method1_wrong_label.py <db_path> [<name_pattern> <domain_pattern> <expected_label> --out_path <out_path>]
"""
from docopt import docopt
import os
import sqlite3
import re
import logging
from utils import (setupLogger, CONSENTDATA_QUERY, write_json,
get_violation_details_consent_table, write_vdomains)
logger = logging.getLogger("vd")
def main():
"""
Try to detect potential violations by analyzing the category of specific cookies.
@return: exit code, 0 for success
"""
argv = None
cargs = docopt(__doc__, argv=argv)
setupLogger(".", logging.INFO)
logger.info("Running method 01: Wrong Label for Known Cookie")
# Specify name, domain patter and expected label by input, or
if cargs["<name_pattern>"]:
name_pattern = re.compile(cargs["<name_pattern>"])
domain_pattern = re.compile(cargs["<domain_pattern>"])
expected_label = int(cargs["<expected_label>"])
else:
logger.info("Using default GA check:")
name_pattern = re.compile("(^_ga$|^_gat$|^_gid$|^_gat_gtag_UA_[0-9]+_[0-9]+|^_gat_UA-[0-9]+-[0-9]+)")
domain_pattern = re.compile(".*")
expected_label = 2
# Verify that database exists
database_path = cargs["<db_path>"]
if not os.path.exists(database_path):
logger.error("Database file does not exist.")
return 1
logger.info(f"Database used: {database_path}")
# enable dictionary access by column name, access database
conn = sqlite3.connect(database_path)
conn.row_factory = sqlite3.Row
# some variables to collect violation details with
violation_details = dict()
violation_domains = set()
violation_counts = [0, 0, 0, 0, 0, 0, 0]
total_domains = set()
total_matching_cookies = 0
logger.info("Extracting info from database...")
with conn:
cur = conn.cursor()
cur.execute(CONSENTDATA_QUERY)
for row in cur:
# Duplicate check, not necessary anymore
#transform = {**row}
#if transform.values() in duplicate_reject:
# logger.info("Skipped exact duplicate entry")
# continue
#duplicate_reject.add(transform.values())
if name_pattern.match(row["consent_name"]) and domain_pattern.search(row["consent_domain"]):
total_domains.add(row["site_url"])
total_matching_cookies += 1
if row["cat_id"] != expected_label and row["cat_id"] != -1:
#logger.info(f"Potential Violation on website: {row['site_url']} for cookie entry: {row['consent_name']};{row['consent_domain']}")
#logger.info(f"Entry matches pattern, but given label was {row['cat_id']}")
cat_id = row["cat_id"]
if cat_id == 99:
cat_id = 5
vdomain = row["site_url"]
violation_domains.add(vdomain)
violation_counts[cat_id] += 1
if vdomain not in violation_details:
violation_details[vdomain] = list()
violation_details[vdomain].append(get_violation_details_consent_table(row))
conn.close()
logger.info(f"Total matching cookies found: {total_matching_cookies}")
logger.info(f"Number of potential violations: {violation_counts}")
logger.info(f"Number of sites that have the cookie in total: {len(total_domains)}")
logger.info(f"Number of sites with potential violations: {len(violation_domains)}")
v_per_cmp = [0, 0, 0]
for url, violating_cookies in violation_details.items():
for c in violating_cookies:
assert (c["cmp_type"] >= 0)
v_per_cmp[c["cmp_type"]] += 1
logger.info(f"Potential Violations per CMP Type: {v_per_cmp}")
if cargs["--out_path"]:
out_path = cargs["--out_path"]
else:
out_path = "./violation_stats/"
write_json(violation_details, "method1_cookies.json", out_path)
write_vdomains(violation_domains, "method1_domains.txt", out_path)
return 0
if __name__ == '__main__':
exit(main())
|
import datetime
from google.cloud import firestore
urls_collection = firestore.Client(project="tldr-278619").collection(u"urls")
def get_answer_from(text, answer_space, default):
answer = input(text)
if not answer:
return default
answer = answer.lower()
if answer in answer_space:
return answer
else:
return get_answer_from(text, answer_space, default)
def ask_to_approve(title, url, top_image, text):
print("title: {}".format(title))
print("URL: {}".format(url))
print("top_image url: {}".format(top_image))
print("################# SUMMARY ###############")
print(text)
print("################### END #################")
answer = get_answer_from("Approve? (y/N)", "yn", "n")
if answer == "y":
return True
elif answer == "n":
return False
def main():
docs_stream = urls_collection.where(u"publish", u"==", False).where(u"published", u"==", False).where(u"new",
u"==",
True).stream()
# Stream is timing out so we need to convert lazy stream to a normal list
# this is ok since we never will have huge list for a review (or this is a bug that needs to be fixed)
docs = [doc for doc in docs_stream]
for doc in docs:
title = doc.get("title")
url = doc.get("url")
top_image = doc.get("top_image")
text = doc.get("summary")
if ask_to_approve(title, url, top_image, text):
publish = True
print("approved")
skip_reason = None
else:
publish = False
skip_reason = get_answer_from("Reason? (B)ad news/already (p)ublished/news is good bud (s)ummary is bad",
"bps", "b")
print("skipped with the reason: {}".format(skip_reason))
updated_doc_data = {
"publish": publish,
"new": False,
"skip_reason": skip_reason,
"date": datetime.datetime.now()
}
urls_collection.document(doc.id).set(updated_doc_data, merge=True)
if "__main__" == __name__:
main()
|
#!/usr/bin/env python
#Reference: the baxter_stocking_stuffer project by students in Northwestern's MSR program - Josh Marino, Sabeen Admani, Andrew Turchina and Chu-Chu Igbokwe
#Service provided - ObjLocation service - contains x,y,z coordinates of object in baxter's stationary body frame, whether it is ok to grasp and if objects were found in the current frame.
import rospy
import numpy as np
import cv2
import cv_bridge
import baxter_interface
import math
from std_msgs.msg import String, Int32
from sensor_msgs.msg import Image
from geometry_msgs.msg import Point
from cv_bridge import CvBridge, CvBridgeError
from jp_baxtertry1.srv import *
def getKey(item):
return item[1]
def callback(message):
global Hmin_verde,Hmax_verde,Vmin_verde,Vmax_verde,Smin_verde,Smax_verde,Hum_verde,Hmin_negro,Hmax_negro,Vmin_negro,Vmax_negro,Smin_negro,Smax_negro,Hum_negro,Hmin_rojo1,Hmax_rojo1,Vmin_rojo1,Vmax_rojo1,Smin_rojo1,Smax_rojo1,Hum_rojo1,Hmin_rojo2,Hmax_rojo2,Vmin_rojo2,Vmax_rojo2,Smin_rojo2,Smax_rojo2,Hum_rojo2
global act,xmin,xmax,ymin,ymax
act=cv2.getTrackbarPos('act','enviar')
bridge = CvBridge()
frame = bridge.imgmsg_to_cv2(message, "bgr8")
#frame=cv2.imread('calibrar_guardado.jpg')
Hmin_verde,Hmax_verde,Vmin_verde,Vmax_verde,Smin_verde,Smax_verde,Hum_verde=asig_Color('verde')
Hmin_negro,Hmax_negro,Vmin_negro,Vmax_negro,Smin_negro,Smax_negro,Hum_negro=asig_Color('negro')
Hmin_rojo1,Hmax_rojo1,Vmin_rojo1,Vmax_rojo1,Smin_rojo1,Smax_rojo1,Hum_rojo1=asig_Color('rojo1')
Hmin_rojo2,Hmax_rojo2,Vmin_rojo2,Vmax_rojo2,Smin_rojo2,Smax_rojo2,Hum_rojo2=asig_Color('rojo2')
lower_verde,upper_verde=def_color(Hmin_verde,Vmin_verde,Smin_verde,Hmax_verde,Vmax_verde,Smax_verde)
lower_negro,upper_negro=def_color(Hmin_negro,Vmin_negro,Smin_negro,Hmax_negro,Vmax_negro,Smax_negro)
lower_rojo1,upper_rojo1=def_color(Hmin_rojo1,Vmin_rojo1,Smin_rojo1,Hmax_rojo1,Vmax_rojo1,Smax_rojo1)
lower_rojo2,upper_rojo2=def_color(Hmin_rojo2,Vmin_rojo2,Smin_rojo2,Hmax_rojo2,Vmax_rojo2,Smax_rojo2)
kernel=np.ones((7,7),np.uint8)
hsv=cv2.cvtColor(frame,cv2.COLOR_BGR2HSV)
mask_verde=mask_imagen(hsv,lower_verde,upper_verde)
mask_negro=mask_imagen(hsv,lower_negro,upper_negro)
#global arprom_verde,arprom_rojo,arprom_negro
mask_rojo=mask_imagen_rojo(hsv,lower_rojo1,upper_rojo1,lower_rojo2,upper_rojo2)
contour_verde,arprom_verde=filtro(mask_verde,kernel,frame)
contour_rojo,arprom_rojo=filtro(mask_rojo,kernel,frame)
#global xmin1,xmax1,ymin1,ymax1,xmin2,xmax2,ymin2,ymax2
contour_negro,arprom_negro=filtro(mask_negro,kernel,frame)
#rango_negro=sorted(rango_negro, key=getKey)
#n=len(rango_negro)
#maximo= max(rango_negro, key=getKey)
#minimo= min(rango_negro, key=getKey)
#xmin1=1000
#xmin2=1000
#xmax1=0
#xmax2=0
#for x in range(n):
#if rango_negro[x][1]>=maximo[1]-7 and rango_negro[x][0]<xmin1:
# xmin1=rango_negro[x][0]
# ymax1=rango_negro[x][1]
# centro1=xmin1,ymax1
# if rango_negro[x][1]<=minimo[1]+7 and rango_negro[x][0]<xmin2:
# ymin1=rango_negro[x][1]
# centro2=xmin2,ymin1
# if rango_negro[x][1]<=minimo[1]+7 and rango_negro[x][0]>xmax1:
# xmax1=rango_negro[x][0]
# ymin2=rango_negro[x][1]
# centro3=xmax1,ymin2
# if rango_negro[x][1]>=maximo[1]-7 and rango_negro[x][0]>xmax2:
# xmax2=rango_negro[x][0]
# ymax2=rango_negro[x][1]
# centro4=xmax2,ymax2
# cv2.circle(frame, centro1, 5, (0, 255, 255), -1)
# cv2.circle(frame, centro2, 5, (0, 255, 255), -1)
# cv2.circle(frame, centro3, 5, (0, 255, 255), -1)
#cv2.circle(frame, centro4, 5, (0, 255, 255), -1)
cv2.imshow('window',frame)
cv2.imshow('negro',mask_negro)
cv2.imshow('rojo',mask_rojo)
cv2.waitKey(27)
def mask_imagen(hsv,min_value,max_value):
mask=cv2.inRange(hsv,min_value,max_value)
return mask
def def_color(Hmin,Vmin,Smin,Hmax,Vmax,Smax):
lower=np.array([Hmin,Vmin,Smin])
upper=np.array([Hmax,Vmax,Smax])
return lower,upper
def mask_imagen_rojo(hsv,min_value1,max_value1,min_value2,max_value2):
mask1=cv2.inRange(hsv,min_value1,max_value1)
mask2=cv2.inRange(hsv,min_value2,max_value2)
mask=cv2.add(mask1,mask2)
return mask
def asig_Color(color):
Hmin=cv2.getTrackbarPos('Hmin',color)
Hmax=cv2.getTrackbarPos('Hmax',color)
Vmin=cv2.getTrackbarPos('Vmin',color)
Vmax=cv2.getTrackbarPos('Vmax',color)
Smin=cv2.getTrackbarPos('Smin',color)
Smax=cv2.getTrackbarPos('Smax',color)
Hum=cv2.getTrackbarPos('Hum',color)
return Hmin,Hmax,Vmin,Vmax,Smin,Smax,Hum
def filtro(mask,kernel,frame):
ret,mask = cv2.threshold(mask,157,255,0)
mask=cv2.morphologyEx(mask,cv2.MORPH_OPEN,kernel)
mask=cv2.morphologyEx(mask,cv2.MORPH_CLOSE,kernel)
contours=cv2.findContours(mask,cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[-2]
arprom=0
if len(contours)>0:
n_1=len(contours)
cont=0
xmin=1000
ymin=1000
xmax=0
ymax=0
beta=0
ar=0
#for x_1 in range(n_1):
# c=contours[x_1]
# area=cv2.contourArea(c)
# M=cv2.moments(c)
# y=int(M["m01"] / M["m00"])
# x=int(M["m10"] / M["m00"])
# if area>250 and y<420 and y>30:
# ar=ar+area
# beta=beta+1
#arprom=ar/beta
for x_1 in range(n_1):
c=contours[x_1]
area=cv2.contourArea(c)
M=cv2.moments(c)
y=int(M["m01"] / M["m00"])
x=int(M["m10"] / M["m00"])
if area>250 and y<420 and y>30:
accuracy=0.06*cv2.arcLength(c,True)
approx=cv2.approxPolyDP(c,accuracy,True)
cv2.drawContours(frame,[approx],0,(0,255,0),2)
w,h=frame.shape[:2]
center=x,y
#rango[cont]=center
cont=cont+1
cv2.circle(frame, center, 5, (0, 0, 255), -1)
#rango=rango[0:cont]
return contours,arprom
def nothing(x):
pass
def crearslider(color,Hmin,Hmax,Smin,Smax,Vmin,Vmax):
cv2.namedWindow(color)
cv2.createTrackbar('Hmin',color,Hmin,255,nothing)
cv2.createTrackbar('Hmax',color,Hmax,255,nothing)
cv2.createTrackbar('Vmin',color,Vmin,255,nothing)
cv2.createTrackbar('Vmax',color,Vmax,255,nothing)
cv2.createTrackbar('Smin',color,Smin,255,nothing)
cv2.createTrackbar('Smax',color,Smax,255,nothing)
cv2.createTrackbar('Hum',color,0,100,nothing)
def get_obj_calibrate(request):
global Hmin_verde,Hmax_verde,Vmin_verde,Vmax_verde,Smin_verde,Smax_verde,Hum_verde,Hmin_negro,Hmax_negro,Vmin_negro,Vmax_negro,Smin_negro,Smax_negro,Hum_negro,Hmin_rojo1,Hmax_rojo1,Vmin_rojo1,Vmax_rojo1,Smin_rojo1,Smax_rojo1,Hum_rojo1,Hmin_rojo2,Hmax_rojo2,Vmin_rojo2,Vmax_rojo2,Smin_rojo2,Smax_rojo2,Hum_rojo2,act,xmin1,xmax1,ymin1,ymax1,xmin2,xmax2,ymin2,ymax2,arprom_verde,arprom_rojo,arprom_negro
rospy.sleep(0.5)
return CalibradoResponse(Hmin_verde,Hmax_verde,Vmin_verde,Vmax_verde,Smin_verde,Smax_verde,Hum_verde,Hmin_negro,Hmax_negro,Vmin_negro,Vmax_negro,Smin_negro,Smax_negro,Hum_negro,Hmin_rojo1,Hmax_rojo1,Vmin_rojo1,Vmax_rojo1,Smin_rojo1,Smax_rojo1,Hum_rojo1,Hmin_rojo2,Hmax_rojo2,Vmin_rojo2,Vmax_rojo2,Smin_rojo2,Smax_rojo2,Hum_rojo2,act,xmin1,xmax1,ymin1,ymax1,xmin2,xmax2,ymin2,ymax2,arprom_verde,arprom_rojo,arprom_negro)
def user():
cv2.namedWindow("window", 1)
#Subscribe to left hand camera image
rospy.Subscriber("/camera/rgb/image_color", Image, callback)
#Calibrado_srv = rospy.Service("Calibrado_service", Calibrado, get_obj_calibrate)
rospy.spin()
def main():
#Initiate left hand camera object detection node
rospy.init_node('calibrate')
crearslider('verde',30,59,97,255,109,255)
crearslider('rojo1',0,12,115,255,104,255)
crearslider('rojo2',148,255,92,255,138,255)
crearslider('negro',0,255,0,82,0,83)
cv2.namedWindow('enviar')
cv2.createTrackbar('act','enviar',0,1,nothing)
user()
if __name__ == '__main__':
main()
|
import time
import unittest
from theano.compile.pfunc import pfunc
from theano import tensor
import numpy
import theano
import theano.tensor as T
# Skip test if cuda_ndarray is not available.
from nose.plugins.skip import SkipTest
import theano.sandbox.cuda as cuda_ndarray
if cuda_ndarray.cuda_available == False:
raise SkipTest('Optional package cuda disabled')
from theano.gof.python25 import any
import theano.sandbox.cuda as tcn
import theano.sandbox.cuda as cuda
import theano.sandbox.cuda.basic_ops as B
from theano.tensor.basic import _allclose
from theano.tests import unittest_tools as utt
if theano.config.mode == 'FAST_COMPILE':
mode_with_gpu = theano.compile.mode.get_mode('FAST_RUN').including('gpu')
mode_without_gpu = theano.compile.mode.get_mode('FAST_RUN').excluding('gpu')
else:
mode_with_gpu = theano.compile.mode.get_default_mode().including('gpu')
mode_without_gpu = theano.compile.mode.get_default_mode().excluding('gpu')
def rand_cuda_ndarray(shape):
return cuda_ndarray.CudaNdarray(theano._asarray(numpy.random.rand(*shape),
dtype='float32'))
#intentionally disabled
def tes_use():
tcn.use()
def tensor_pattern_to_gpu_pattern(shape, pattern):
gpu_pattern = [0 for elem in shape]
for idx in pattern:
gpu_pattern[idx] = 1
gpu_pattern = tuple(gpu_pattern)
return gpu_pattern
def test_careduce():
"""
test sum pattern 1, 11, 10, 01, 001, 010, 100, 110, 011, 111,
0011, 0101, 0111, 1011, 1111
test sum pattern implemented with reshape:
1000, 0100, 0010, 0001, 11111
others implemented by reshape that are not tested
0011,0101,0110,1001,1010,1100
1110,1101,1011
TODO: test with broadcast
"""
for scalar_op, careduce_op in [
(theano.scalar.mul, tensor.elemwise.CAReduceDtype),
(theano.scalar.add, tensor.elemwise.CAReduceDtype),
(theano.scalar.maximum, tensor.CAReduce),
(theano.scalar.minimum, tensor.CAReduce)
#The following 2 cases could work if the scalar_op.c_code work with float* dtype.
#Currently we have this error:
#error: invalid operands of types 'npy_float32' and 'npy_float32' to binary 'operator&'
#(theano.scalar.and_, tensor.elemwise.CAReduce),
#(theano.scalar.or_, tensor.elemwise.CAReduce),
]:
for shape, pattern in [((1,1),(1,)),
((1,0),(1,)),
((0,1),(1,)),
((0,0),(1,)),
((0,0,0),(1,2)),
((0,0,0,0),(1,2,3)),
((2,1),(1,)),
((1,2),(1,)),
((100,3,1300),[1]),
((0,),[0]),((5,),[0]),
((0,0),[0,1]),((1,0),[0,1]),((5,4),[0,1]),((33,31),[0,1]),((5,4),[1]),((5,4),[0]),#need something bigger then 32 for some opt test.
((5,4,3),[0]),((5,4,3),[1]),((5,4,3),[0,1]),((5,4,3),[2]),((5,4,3),[1,2]),((5,4,3),[0,1,2]),
((0,0,0,0),[0,1,2,3]),
((5,4,3,20),[2,3]), ((5,4,3,2),[0,1,2,3]), ((5,4,3,2),[0,2,3]),((5,4,3,2),[1,2,3]),
((5,4,3,10,11),[1,2]),
((5,4,3,20),[2,3]), ((5,4,3,2),[0,1,2,3]), ((5,4,3,2),[0,2,3]),((5,4,3,2),[1,2,3]),
#test shape bigger then 4096 on each dimension to make sure that we work correctly when we don't have enough thread/block in each dimensions
((4100,3),[0]),((3,4101),[0]),#10
((1024,33),[0]),((33,1024),[0]),#10
((1025,33),[0]),((33,1025),[0]),#10
((4100,3),[1]),((3,4101),[1]),#01
((1024,33),[1]),((33,1024),[1]),#01
((1025,33),[1]),((33,1025),[1]),#01
((4100,3),[0,1]),((3,4101),[0,1]),#11
((1024,33),[0,1]),((33,1024),[0,1]),#01
((1025,33),[0,1]),((33,1025),[0,1]),#01
((4100,4,3),[0]),((5,4100,3),[0]),((5,4,4100),[0]), ((3,65536,1), [0]),#100
((4100,4,3),[1]),((5,4100,3),[1]),((5,4,4100),[1]),#010
((4100,4,3),[2]),((5,4100,3),[2]),((5,4,4100),[2]),#001
((4100,4,3),[0,1]),((5,4100,3),[0,1]),((5,4,4100),[0,1]),#110
((4100,4,3),[1,2]),((5,4100,3),[1,2]),((5,4,4100),[1,2]),#011
#((4100,4,3),[0,2]),((5,4100,3),[0,2]),((5,4,4100),[0,2]),#101 ##not implemented
((4100,4,3),[0,1,2]),((5,4100,3),[0,1,2]),((5,4,4100),[0,1,2]),#111
((65,4,3),[0,1,2]),((5,65,3),[0,1,2]),((5,4,65),[0,1,2]),#111
((4100,4,3,2),[2,3]),((4,4100,3,2),[2,3]),((4,3,4100,2),[2,3]),((4,3,2,4100),[2,3]),#0011
((4100,4,3,2),[1,3]),((4,4100,3,2),[1,3]),((4,3,4100,2),[1,3]),((4,3,2,4100),[1,3]),#0101
((4100,4,3,2),[0,2,3]),((4,4100,3,2),[0,2,3]),((4,3,4100,2),[0,2,3]),#((4,3,2,4100),[0,2,3]),#1011
((4100,4,3,2),[1,2,3]),((4,4100,3,2),[1,2,3]),((4,3,4100,2),[1,2,3]),((4,3,2,4100),[1,2,3]),#0111
((65,4,3,2),[1,2,3]),((4,65,3,2),[1,2,3]),((4,3,65,2),[1,2,3]),((4,3,2,65),[1,2,3]),#0111
((4100,2,3,4),[0,1,2,3]),((2,4100,3,4),[0,1,2,3]),((2,3,4100,4),[0,1,2,3]),((2,3,4,4100),[0,1,2,3]),((128,1,3,3), [0,1,2,3]),#1111
#test pattern implemented by reshape
((4100,4,3,2),[0]),((4,4100,3,2),[0]),((4,3,4100,2),[0]),((4,3,2,4100),[0]),#1000
((4100,4,3,2),[1]),((4,4100,3,2),[1]),((4,3,4100,2),[1]),((4,3,2,4100),[1]),#0100
((4100,4,3,2),[2]),((4,4100,3,2),[2]),((4,3,4100,2),[2]),((4,3,2,4100),[2]),#0010
((4100,4,3,2),[3]),((4,4100,3,2),[3]),((4,3,4100,2),[3]),((4,3,2,4100),[3]),#0001
((1100,2,3,4,5),[0,1,2,3,4]),((2,1100,3,4,5),[0,1,2,3,4]),((2,3,1100,4,5),[0,1,2,3,4]),((2,3,4,1100,5),[0,1,2,3,4]),((2,3,4,5,1100),[0,1,2,3,4]),#11111
]:
op = careduce_op(scalar_op, axis=pattern)
pat = tensor_pattern_to_gpu_pattern(shape, pattern)
a = tensor.TensorType('float32', (False,) * len(shape))()
b = op(a)
val = numpy.random.rand(numpy.prod(shape)).reshape(shape)
# val = numpy.ones(shape)
# val = numpy.arange(numpy.prod(shape)).reshape(shape)
val = theano._asarray(val, dtype='float32')
f = theano.function([a], b, mode=mode_with_gpu)
f2 = theano.function([a], b, mode=mode_without_gpu)
assert tcn.GpuCAReduce in [x.op.__class__
for x in f.maker.fgraph.toposort()], (
scalar_op, shape, pattern)
assert op.__class__ in [x.op.__class__
for x in f2.maker.fgraph.toposort()], (
scalar_op, shape, pattern)
f_caused_value_error = False
try:
f_out = f(val)
except ValueError, e:
exc = e
f_caused_value_error = True
except NotImplementedError:
if (numpy.prod(shape) == 0 and
getattr(scalar_op, 'identity', None) != 0):
continue
raise
f2_caused_value_error = False
try:
f2_out = f2(val)
except ValueError, e:
exc2 = e
f2_caused_value_error = True
if f_caused_value_error != f2_caused_value_error:
if f_caused_value_error:
print 'f caused this value error:'
print exc
else:
print 'f did not raise a value error, but should have'
if f2_caused_value_error:
print 'f2 caused this value error:'
print exc2
else:
print 'f should not have raised a value error'
print 'shape was: ', shape
print 'pattern was: ', pattern
assert False
try:
#We raise the error threashold as we sum big matrix
#and this cause small rounding difference with some seed
#example in debug mode with unittests.rseed=9275
orig_rtol = theano.tensor.basic.float32_rtol
theano.tensor.basic.float32_rtol = 2e-5
assert _allclose(f_out, f2_out), ('shape', shape,
'pattern', pattern,
scalar_op,
sum([shape[i] for i in pattern]),
f2(val), f(val), val)
finally:
theano.tensor.basic.float32_rtol = orig_rtol
#test with dimshuffle
#we shuffle the 2 outer dims.
for shape, pattern in [#((5,),[0]),
((5,4),[0,1]),((5,4),[0]),
((5,4,3),[0]),((5,4,3),[0,1]),((5,4,3),[2]),((5,4,3),[0,1,2]),
((5,4,3,2),[0,1,2,3]), ((5,4,3,2),[0,2,3]),
((128,1,3,3),[0,1,2,3]),
]:
op = careduce_op(scalar_op, axis=pattern)
pat = tensor_pattern_to_gpu_pattern(shape, pattern)
a = tensor.TensorType('float32', (False,) * len(shape))()
dim_pattern = range(len(shape))
dim_pattern[0] = 1
dim_pattern[1] = 0
a = a.dimshuffle(dim_pattern)
b = op(a)
val = numpy.random.rand(numpy.prod(shape)).reshape(shape)
# val = numpy.ones(shape)
# val = numpy.arange(numpy.prod(shape)).reshape(shape)
val = theano._asarray(val, dtype='float32')
f = theano.function([a], b, mode=mode_with_gpu)
f2 = theano.function([a], b, mode=mode_without_gpu)
assert tcn.GpuCAReduce in [x.op.__class__
for x in f.maker.fgraph.toposort()], (
scalar_op, shape, pattern)
assert op.__class__ in [x.op.__class__
for x in f2.maker.fgraph.toposort()], (
scalar_op, shape, pattern)
assert _allclose(f2(val), f(val)), ('shape', shape,
'pattern', pattern,
scalar_op,
sum([shape[i] for i in pattern]))
#test with broadcast
for shape, pattern in [((5,),[0]),
((5,4),[0,1]),((5,4),[0]),
((5,4,3),[0]),((5,4,3),[0,1]),
((5,4,3),[2]),((5,4,3),[0,1,2]),
((5,4,3,2),[0,1,2,3]), ((5,4,3,2),[0,2,3]),
((128,1,3,3),[0,1,2,3]),
]:
op = careduce_op(scalar_op, axis=pattern)
pat = tensor_pattern_to_gpu_pattern(shape, pattern)
shape = numpy.asarray(shape) * 2
a = tensor.TensorType('float32', (False,) * len(shape))()
a2 = tcn.CudaNdarrayType((False,) * len(shape))()
b = op(a)
b2 = op(a2)
val = numpy.random.rand(numpy.prod(shape)).reshape(shape)
# val = numpy.ones(shape)
# val = numpy.arange(numpy.prod(shape)).reshape(shape)
val = theano._asarray(val, dtype='float32')
val2 = cuda.CudaNdarray(val)
if len(shape) == 1:
val = val[::2]
val2 = val2[::2]
elif len(shape) == 2:
val = val[::2, ::2]
val2 = val2[::2, ::2]
elif len(shape) == 3:
val = val[::2, ::2, ::2]
val2 = val2[::2, ::2, ::2]
elif len(shape) == 4:
val = val[::2, ::2, ::2, ::2]
val2 = val2[::2, ::2, ::2, ::2]
f = theano.function([a], b, mode=mode_without_gpu)
f2 = theano.function([a2], b2, mode=mode_with_gpu)
assert tcn.GpuCAReduce in [x.op.__class__
for x in f2.maker.fgraph.toposort()], (
scalar_op, shape, pattern)
assert op.__class__ in [x.op.__class__
for x in f.maker.fgraph.toposort()], (
scalar_op, shape, pattern)
assert _allclose(f2(val2), f(val)), ('shape', shape,
'pattern', pattern,
sum([shape[i] for i in pattern]))
def test_flatten():
x = cuda.fmatrix('x')
f = theano.function([x], x.flatten())
assert len(f([[0., 0.], [0., 0.]]).shape) == 1
def test_reshape():
a = tcn.CudaNdarrayType((False,))()
b = tcn.CudaNdarrayType((False, False))()
c = T.reshape(a, [2, 3])
#basic
f = theano.function([a], c, mode=mode_with_gpu)
fv = f(cuda_ndarray.CudaNdarray(theano._asarray([0, 1, 2, 3, 4, 5],
dtype='float32')))
topo = f.maker.fgraph.toposort()
assert any([isinstance(node.op, B.GpuReshape) for node in topo])
assert numpy.all(fv == numpy.asarray([[0, 1, 2], [3, 4, 5]]))
#test that it works without inplace operations
a_val = cuda_ndarray.CudaNdarray(theano._asarray([0, 1, 2, 3, 4, 5],
dtype='float32'))
a_val_copy = cuda_ndarray.CudaNdarray(theano._asarray([0, 1, 2, 3, 4, 5],
dtype='float32'))
b_val = cuda_ndarray.CudaNdarray(theano._asarray([[0, 1, 2], [3, 4, 5]],
dtype='float32'))
f_sub = theano.function([a, b], c - b, mode=mode_with_gpu)
topo = f_sub.maker.fgraph.toposort()
assert any([isinstance(node.op, B.GpuReshape) for node in topo])
assert numpy.all(f_sub(a_val, b_val) == 0.0)
assert numpy.all(numpy.asarray(a_val) == numpy.asarray(a_val_copy))
#test that it works with inplace operations
a_val = theano._asarray([0, 1, 2, 3, 4, 5], dtype='float32')
a_val_copy = theano._asarray([0, 1, 2, 3, 4, 5], dtype='float32')
b_val = theano._asarray([[0, 1, 2], [3, 4, 5]], dtype='float32')
f_sub = theano.function([a, b], c - b, mode=mode_with_gpu)
topo = f_sub.maker.fgraph.toposort()
assert any([isinstance(node.op, B.GpuReshape) for node in topo])
assert numpy.all(f_sub(a_val, b_val) == 0.0)
assert numpy.all(numpy.asarray(a_val) == numpy.asarray(a_val_copy))
# verify gradient
def just_vals(v):
return T.Reshape(2)(v, theano._asarray([2, 3], dtype='int32'))
utt.verify_grad(just_vals, [a_val])
def test_elemwise_empty():
#test with 0 element
a = tcn.shared_constructor(theano._asarray(numpy.random.rand(0, 0),
dtype='float32'), 'a')
b = tensor.fmatrix()
f = pfunc([b], [], updates=[(a, a + b)], mode=mode_with_gpu)
f2 = pfunc([b], [], updates=[(a, a + b)], mode=mode_without_gpu)
a0 = a.get_value() * 1.0
f(numpy.ones((0, 0), dtype='float32'))
assert numpy.all(a0 + 1.0 == a.get_value())
def test_elemwise0():
a = tcn.shared_constructor(theano._asarray(numpy.random.rand(4, 4),
dtype='float32'), 'a')
b = tensor.fmatrix()
f = pfunc([b], [], updates=[(a, a + b)], mode=mode_with_gpu)
#check that we work inplace.
assert f.maker.fgraph.toposort()[1].op.destroy_map.items() == [(0, [0])]
a0 = a.get_value() * 1.0
f(numpy.ones((4, 4), dtype='float32'))
assert numpy.all(a0 + 1.0 == a.get_value())
def test_elemwise_bad_broadcast():
x = cuda.fmatrix('x')
y = cuda.fmatrix('y')
f = theano.function([x, y], x * y, mode=mode_with_gpu)
assert len(f.maker.fgraph.toposort()) == 2
assert isinstance(f.maker.fgraph.toposort()[0].op, cuda.GpuElemwise)
assert f.maker.fgraph.toposort()[1].op == cuda.host_from_gpu
try:
f(rand_cuda_ndarray((10, 3)), rand_cuda_ndarray((10, 1)))
except ValueError:
pass
else:
raise Exception("Theano should have raised an error")
def test_elemwise1():
""" Several kinds of elemwise expressions with no broadcasting,
non power-of-two shape """
shape = (3, 4)
a = tcn.shared_constructor(theano._asarray(numpy.random.rand(*shape),
dtype='float32') + 0.5, 'a')
b = tensor.fmatrix()
#let debugmode catch any mistakes
f = pfunc([b], [], updates=[(a, b ** a)], mode=mode_with_gpu)
f(theano._asarray(numpy.random.rand(*shape), dtype='float32') + 0.3)
#let debugmode catch any mistakes
f = pfunc([b], [], updates=[(a, tensor.exp(b ** a))], mode=mode_with_gpu)
f(theano._asarray(numpy.random.rand(*shape), dtype='float32') + 0.3)
#let debugmode catch any mistakes
f = pfunc([b], [], updates=[(a, a + b * tensor.exp(b ** a))],
mode=mode_with_gpu)
f(theano._asarray(numpy.random.rand(*shape), dtype='float32') + 0.3)
def test_elemwise2():
""" Several kinds of elemwise expressions with dimension permutations """
rng = numpy.random.RandomState(int(time.time()))
shape = (3, 5)
for pattern in [(0, 1), (1, 0)]:
a = tcn.shared_constructor(theano._asarray(rng.rand(*shape),
dtype='float32'), name=None)
b = tensor.Tensor(dtype='float32', broadcastable=[0] * len(shape))()
f = pfunc([b], [], updates=[(a, (a + b).dimshuffle(pattern))],
mode=mode_with_gpu)
has_elemwise = False
for i, node in enumerate(f.maker.fgraph.toposort()):
has_elemwise = has_elemwise or isinstance(node.op, tensor.Elemwise)
assert not has_elemwise
#let debugmode catch errors
f(theano._asarray(rng.rand(*shape), dtype='float32') * .3)
shape = (3, 4, 5, 6)
a = tcn.shared_constructor(theano._asarray(rng.rand(*shape),
dtype='float32'), 'a')
b = tensor.Tensor(dtype='float32', broadcastable=[0] * len(shape))()
f = pfunc([b], [], updates=[(a, (a + b).dimshuffle([2, 0, 3, 1]) *
tensor.exp(b ** a).dimshuffle([2, 0, 3, 1]))], mode=mode_with_gpu)
has_elemwise = False
for i, node in enumerate(f.maker.fgraph.toposort()):
has_elemwise = has_elemwise or isinstance(node.op, tensor.Elemwise)
assert not has_elemwise
#let debugmode catch errors
f(theano._asarray(rng.rand(*shape), dtype='float32'))
def test_elemwise3():
""" Several kinds of elemwise expressions with dimension
permutations and broadcasting"""
shape = (3, 4, 5, 6)
a = tcn.shared_constructor(theano._asarray(numpy.random.rand(*shape),
dtype='float32'), 'a')
b = tensor.fvector()
new_val = (a + b).dimshuffle([2, 0, 3, 1])
new_val *= tensor.exp(1 + b ** a).dimshuffle([2, 0, 3, 1])
f = pfunc([b], [], updates=[(a, new_val)], mode=mode_with_gpu)
has_elemwise = False
for i, node in enumerate(f.maker.fgraph.toposort()):
has_elemwise = has_elemwise or isinstance(node.op, tensor.Elemwise)
assert not has_elemwise
#let debugmode catch errors
f(theano._asarray(numpy.random.rand(6), dtype='float32'))
def test_elemwise4():
""" Test that two vectors can be broadcast to form an outer
product (by performing rank-1 matrix update"""
shape = (3, 4)
a = tcn.shared_constructor(theano._asarray(numpy.random.rand(*shape),
dtype='float32'), 'a')
b = tensor.fvector()
c = tensor.fvector()
f = pfunc([b, c], [],
updates=[(a, (a + b.dimshuffle('x', 0) * c.dimshuffle(0, 'x')))],
mode=mode_with_gpu)
has_elemwise = False
for i, node in enumerate(f.maker.fgraph.toposort()):
has_elemwise = has_elemwise or isinstance(node.op, tensor.Elemwise)
assert not has_elemwise
#let debugmode catch errors
f(theano._asarray(numpy.random.rand(4), dtype='float32'),
theano._asarray(numpy.random.rand(3), dtype='float32'))
def test_elemwise_comparaison_cast():
"""
test if an elemwise comparaison followed by a cast to float32 are
pushed to gpu.
"""
a = tensor.fmatrix()
b = tensor.fmatrix()
av = theano._asarray(numpy.random.rand(4, 4), dtype='float32')
bv = numpy.ones((4, 4), dtype='float32')
for g, ans in [(tensor.lt, av < bv), (tensor.gt, av > bv),
(tensor.le, av <= bv), (tensor.ge, av >= bv)]:
f = pfunc([a, b], tensor.cast(g(a, b), 'float32'), mode=mode_with_gpu)
out = f(av, bv)
assert numpy.all(out == ans)
assert any([isinstance(node.op, cuda.GpuElemwise)
for node in f.maker.fgraph.toposort()])
def test_elemwise_composite_float64():
# test that we don't fuse composite elemwise with float64 somewhere inside
# nvcc by default downcast them to float32. We would need to tell him not
# to do so, but that possible only on some device.
a = tensor.fmatrix()
b = tensor.fmatrix()
av = theano._asarray(numpy.random.rand(4, 4), dtype='float32')
bv = numpy.ones((4, 4), dtype='float32')
def get_all_basic_scalar(composite_op):
l = []
for i in composite_op.fgraph.toposort():
if isinstance(i, theano.scalar.Composite):
l += get_all_basic_scalar(i)
else:
l.append(i)
return l
for mode in [mode_with_gpu, mode_with_gpu.excluding('gpu_after_fusion'),
mode_with_gpu.excluding('elemwise_fusion')]:
f = pfunc([a, b],
tensor.cast(tensor.lt(tensor.cast(a, 'float64') ** 2,
b),
'float32'), mode=mode)
out = f(av, bv)
assert numpy.all(out == ((av ** 2) < bv))
for node in f.maker.fgraph.toposort():
if isinstance(node.op, cuda.GpuElemwise):
if isinstance(node.op.scalar_op, theano.scalar.Composite):
scals = get_all_basic_scalar(node.op.scalar_op)
for s in scals:
assert not any([i.type.dtype == 'float64'
for i in s.inputs + s.outputs])
def test_elemwise_composite_support_code():
"""
This was generating an error at compile time.
Commit 3d1690fa346103594356ecaeceeb2c6757b45d2b fixed that.
"""
X = tcn.shared_constructor(value=numpy.zeros((100, 10), dtype="float32"),
name='X')
W = tcn.shared_constructor(value=numpy.zeros((10, 1), dtype="float32"),
name='W')
U = T.dot(X, W)
Y = tcn.shared_constructor(value=numpy.zeros((100, 1), dtype="float32"),
name='Y')
P = T.exp(-(Y - U) ** 2)
epsilon = numpy.asarray(0.001, dtype="float32")
NLL = -T.mean(T.log(P + epsilon)) # SupportCodeError
G = theano.gradient.grad(NLL, wrt=[W])
backup = theano.config.warn.identify_1pexp_bug
theano.config.warn.identify_1pexp_bug = False
try:
f_grad = theano.function(inputs=[], outputs=G, mode=mode_with_gpu)
finally:
theano.config.warn.identify_1pexp_bug = backup
f_grad()
topo = f_grad.maker.fgraph.toposort()
assert sum([isinstance(node.op, T.Elemwise) for node in topo]) == 1
#I suspect this was failing in the original branch too
assert sum([isinstance(node.op, tcn.GpuElemwise) for node in topo]) == 1
def speed_elemwise_collapse():
""" used to time if the collapse of ccontiguous dims are useful """
shape = (30, 40, 50, 600)
a = cuda_ndarray.CudaNdarray(theano._asarray(numpy.random.rand(*shape),
dtype='float32'))
a = theano._asarray(numpy.random.rand(*shape), dtype='float32')
a2 = tcn.shared_constructor(a, 'a')
a3 = a2[:, ::2, :, :]
b = tcn.CudaNdarrayType((False, False, False, False))()
c = a3 + b * tensor.exp(1 + b ** a3)
f = pfunc([b], [c], mode=mode_with_gpu)
v = theano._asarray(numpy.random.rand(*shape), dtype='float32')
v = v[:, ::2, :, :]
v = cuda_ndarray.CudaNdarray(v)
t1 = time.time()
for i in range(100):
#let debugmode catch errors
f(v)
t2 = time.time()
def speed_elemwise_collapse2():
""" used to test the speed up of the generalised collapse of
ccontiguous dims"""
shape = (30, 40, 50, 600)
a = cuda_ndarray.CudaNdarray(theano._asarray(numpy.random.rand(*shape),
dtype='float32'))
a = theano._asarray(numpy.random.rand(*shape), dtype='float32')
a2 = tcn.shared_constructor(a, 'a')
a3 = a2[:, :, :, ::2]
b = tcn.CudaNdarrayType((False, False, False, False))()
c = a3 + b * tensor.exp(1 + b ** a3)
f = pfunc([b], [c], mode=mode_with_gpu)
v = theano._asarray(numpy.random.rand(*shape), dtype='float32')
v = v[:, :, :, ::2]
v = cuda_ndarray.CudaNdarray(v)
t1 = time.time()
for i in range(100):
#let debugmode catch errors
f(v)
t2 = time.time()
def test_elemwise_collapse():
""" Test when all inputs have one(and the same) broadcastable dimension """
shape = (4, 5, 60)
a = cuda_ndarray.CudaNdarray(theano._asarray(numpy.random.rand(*shape),
dtype='float32'))
a = theano._asarray(numpy.random.rand(*shape), dtype='float32')
a2 = tcn.shared_constructor(a, 'a')
a3 = a2.dimshuffle(0, 'x', 1, 2)
b = tcn.CudaNdarrayType((False, True, False, False))()
c = a3 + b
f = pfunc([b], [c], mode=mode_with_gpu)
v = theano._asarray(numpy.random.rand(shape[0], 1, *shape[1:]),
dtype='float32')
v = cuda_ndarray.CudaNdarray(v)
#let debugmode catch errors
out = f(v)[0]
assert numpy.allclose(out, a.reshape(shape[0], 1, *shape[1:]) + v)
#print "Expected collapse of all dimensions"
def test_elemwise_collapse2():
""" Test when only one inputs have one broadcastable dimension """
shape = (4, 5, 9)
a = cuda_ndarray.CudaNdarray(theano._asarray(numpy.random.rand(*shape),
dtype='float32'))
a = theano._asarray(numpy.random.rand(*shape), dtype='float32')
a2 = tcn.shared_constructor(a, 'a')
a3 = a2.dimshuffle(0, 'x', 1, 2)
b = tcn.CudaNdarrayType((False, False, False, False))()
c = a3 + b
f = pfunc([b], [c], mode=mode_with_gpu)
v = theano._asarray(numpy.random.rand(shape[0], 5, *shape[1:]),
dtype='float32')
v = cuda_ndarray.CudaNdarray(v)
#let debugmode catch errors
out = f(v)[0]
assert numpy.allclose(out, a.reshape(shape[0], 1, *shape[1:]) + v)
#print "Expected collapse to 3 dimensions"
def test_elemwise_collapse3():
""" Test when only one inputs have two broadcastable dimension at each ends """
shape = (4, 5)
a = cuda_ndarray.CudaNdarray(theano._asarray(numpy.random.rand(*shape),
dtype='float32'))
a = theano._asarray(numpy.random.rand(*shape),
dtype='float32')
a2 = tcn.shared_constructor(a, 'a')
a3 = a2.dimshuffle('x', 0, 1, 'x')
b = tcn.CudaNdarrayType((False, False, False, False))()
c = (a3 + b)
f = pfunc([b], [c], mode=mode_with_gpu)
v = theano._asarray(numpy.random.rand(5, shape[0], shape[1], 4),
dtype='float32')
v = cuda_ndarray.CudaNdarray(v)
#let debugmode catch errors
out = f(v)[0]
assert numpy.allclose(out, a.reshape(1, shape[0], shape[1], 1) + v)
#print "Expected collapse to 3 dimensions"
def test_elemwise_collapse4():
""" Test when only one inputs have two broadcastable dimension at
each ends and we add a scalar"""
shape = (4, 5)
a = cuda_ndarray.CudaNdarray(theano._asarray(numpy.random.rand(*shape),
dtype='float32'))
a = theano._asarray(numpy.random.rand(*shape), dtype='float32')
a2 = tcn.shared_constructor(a, 'a')
a3 = a2.dimshuffle('x', 0, 1, 'x')
b = tcn.CudaNdarrayType((False, False, False, False))()
c = (a3 + b + 2)
f = pfunc([b], [c], mode=mode_with_gpu)
v = theano._asarray(numpy.random.rand(5, shape[0], shape[1], 4),
dtype='float32')
v = cuda_ndarray.CudaNdarray(v)
#let debugmode catch errors
out = f(v)[0]
assert numpy.allclose(out, a.reshape(1, shape[0], shape[1], 1) + v + 2)
#print "Expected collapse to 3 dimensions"
def test_elemwise_collapse5():
""" Test when only one inputs have two broadcastable dimension at
the beginning and we add a scalar"""
shape = (4, 5)
a = cuda_ndarray.CudaNdarray(theano._asarray(numpy.random.rand(*shape),
dtype='float32'))
a = theano._asarray(numpy.random.rand(*shape), dtype='float32')
a2 = tcn.shared_constructor(a, 'a')
a3 = a2.dimshuffle('x', 'x', 0, 1)
b = tcn.CudaNdarrayType((False, False, False, False))()
c = (a3 + b + 2)
f = pfunc([b], [c], mode=mode_with_gpu)
v = theano._asarray(numpy.random.rand(5, 4, shape[0], shape[1]),
dtype='float32')
v = cuda_ndarray.CudaNdarray(v)
#let debugmode catch errors
out = f(v)[0]
assert numpy.allclose(out, a.reshape(1, 1, shape[0], shape[1]) + v + 2)
#print "Expected collapse to 2 dimensions"
def test_elemwise_collapse6():
""" Test when all inputs have two broadcastable dimension at the
beginning"""
shape = (4, 5)
a = cuda_ndarray.CudaNdarray(theano._asarray(numpy.random.rand(*shape),
dtype='float32'))
a = theano._asarray(numpy.random.rand(*shape), dtype='float32')
a2 = tcn.shared_constructor(a, 'a')
a3 = a2.dimshuffle('x', 'x', 0, 1)
b = tcn.CudaNdarrayType((True, True, False, False))()
f = pfunc([b], [a3 + b], mode=mode_with_gpu)
v = theano._asarray(numpy.random.rand(1, 1, shape[0], shape[1]),
dtype='float32')
v = cuda_ndarray.CudaNdarray(v)
#let debugmode catch errors
out = f(v)[0]
assert numpy.allclose(out, a.reshape(1, 1, shape[0], shape[1]) + v)
#print "Expected collapse to c contiguous"
def test_elemwise_collapse7(atol=1e-6):
""" Test when one input have one broadcastable dimension and the
other is a scalar"""
shape = (5, 4, 1)
a = cuda_ndarray.CudaNdarray(theano._asarray(numpy.random.rand(*shape),
dtype='float32'))
a = theano._asarray(numpy.random.rand(*shape), dtype='float32')
a2 = tcn.shared_constructor(a.copy(), 'a')
a3 = a2.dimshuffle(0, 'x', 1, 2)
f = pfunc([], [a3 + 2], mode=mode_with_gpu)
#let debugmode catch errors
out = f()[0]
ans = (a + 2).reshape(shape[0], 1, shape[1], shape[2])
assert numpy.allclose(out, ans, atol=atol)
#print "Expected collapse to c contiguous"
def test_hostfromgpu_shape_i():
"""
Test that the shape is lifted over hostfromgpu
"""
pass
m = mode_with_gpu.including('local_dot_to_dot22',
'local_dot22_to_dot22scalar','specialize')
a = T.fmatrix('a')
ca = theano.sandbox.cuda.var.CudaNdarrayType((False, False))()
av = numpy.asarray(numpy.random.rand(5, 4), dtype='float32')
cv = cuda.CudaNdarray(numpy.asarray(numpy.random.rand(5, 4),
dtype='float32'))
f = theano.function([a], cuda.basic_ops.gpu_from_host(a), mode=m)
assert cuda.basic_ops.gpu_from_host in [x.op
for x in f.maker.fgraph.toposort()]
f = theano.function([a], cuda.basic_ops.gpu_from_host(a).shape, mode=m)
topo = f.maker.fgraph.toposort()
assert isinstance(topo[0].op, T.opt.Shape_i)
assert isinstance(topo[1].op, T.opt.Shape_i)
assert isinstance(topo[2].op, T.opt.MakeVector)
assert tuple(f(av)) == (5, 4)
f = theano.function([ca], cuda.basic_ops.host_from_gpu(ca), mode=m)
assert cuda.basic_ops.host_from_gpu in [x.op
for x in f.maker.fgraph.toposort()]
f = theano.function([ca], cuda.basic_ops.host_from_gpu(ca).shape, mode=m)
topo = f.maker.fgraph.toposort()
assert isinstance(topo[0].op, T.opt.Shape_i)
assert isinstance(topo[1].op, T.opt.Shape_i)
assert isinstance(topo[2].op, T.opt.MakeVector)
assert tuple(f(cv)) == (5, 4)
# -----------------------------------------------------------------------
import theano.sandbox.cuda as cuda_ndarray
def test_gpujoin_assert_cndas():
# this will end up being an ndarray, as it's float64
_a = numpy.asarray([[1, 2], [3, 4]], dtype='float64')
a = theano.shared(_a)
try:
c = cuda.basic_ops.gpu_join(1, a)
# can't "assert False" here, as we want the assertion
# error from gpu_join
except AssertionError:
assert True
return
assert False
def test_gpujoin_no_rebroadcast():
_a = numpy.asarray([[1, 2], [3, 4]], dtype='float32')
a = tcn.shared_constructor(_a)
f = theano.function([], T.join(1, a))
l = f.maker.fgraph.toposort()
assert not any([isinstance(x.op, T.Rebroadcast) for x in l])
def test_gpualloc_input_on_gpu():
a_val = numpy.asarray(numpy.random.rand(4, 5), dtype='float32')
a = tcn.shared_constructor(a_val)
b = T.fscalar()
f = theano.function([b], T.ones_like(a) + b, mode=mode_without_gpu)
f_gpu = theano.function([b], T.ones_like(a) + b, mode=mode_with_gpu)
assert sum([node.op == T.alloc for node in f.maker.fgraph.toposort()]) == 1
assert sum([node.op == B.gpu_alloc
for node in f_gpu.maker.fgraph.toposort()]) == 1
assert numpy.allclose(numpy.ones(a.get_value(borrow=True).shape) + 9,
f_gpu(9))
assert numpy.allclose(f(5), f_gpu(5))
def test_gpujoin_gpualloc():
a = T.fmatrix('a')
a_val = numpy.asarray(numpy.random.rand(4, 5), dtype='float32')
b = T.fmatrix('b')
b_val = numpy.asarray(numpy.random.rand(3, 5), dtype='float32')
f = theano.function([a, b], T.join(0, T.zeros_like(a),T.ones_like(b)) + 4,
mode=mode_without_gpu)
f_gpu = theano.function([a, b], T.join(0, T.zeros_like(a), T.ones_like(b)),
mode=mode_with_gpu)
f_gpu2 = theano.function([a, b], T.join(0, T.zeros_like(a),
T.ones_like(b)) + 4,
mode=mode_with_gpu)
assert sum([node.op == T.alloc for node in f.maker.fgraph.toposort()]) == 2
assert sum([node.op == T.join for node in f.maker.fgraph.toposort()]) == 1
assert sum([isinstance(node.op, B.GpuAlloc)
for node in f_gpu.maker.fgraph.toposort()]) == 2
assert sum([node.op == B.gpu_join
for node in f_gpu.maker.fgraph.toposort()]) == 1
assert sum([isinstance(node.op, B.GpuAlloc)
for node in f_gpu2.maker.fgraph.toposort()]) == 2
assert sum([node.op == B.gpu_join
for node in f_gpu2.maker.fgraph.toposort()]) == 1
assert numpy.allclose(f(a_val, b_val), f_gpu2(a_val, b_val))
def test_gpualloc_output_to_gpu():
a_val = numpy.asarray(numpy.random.rand(4, 5), dtype='float32')
a = tcn.shared_constructor(a_val)
b = T.fscalar()
f = theano.function([b], T.ones_like(a) + b, mode=mode_without_gpu)
f_gpu = theano.function([b], B.gpu_from_host(T.ones_like(a)) + b,
mode=mode_with_gpu)
f(2)
f_gpu(2)
assert sum([node.op == T.alloc for node in f.maker.fgraph.toposort()]) == 1
assert sum([node.op == B.gpu_alloc
for node in f_gpu.maker.fgraph.toposort()]) == 1
assert numpy.allclose(numpy.ones(a.get_value(borrow=True).shape) + 9,
f_gpu(9))
assert numpy.allclose(f(5), f_gpu(5))
import theano.tensor.tests.test_basic
class TestAlloc(theano.tensor.tests.test_basic.TestAlloc):
dtype = "float32"
mode = mode_with_gpu
shared = staticmethod(cuda.shared_constructor)
allocs = [B.GpuAlloc, B.GpuAlloc, tensor.Alloc]
class T_Join_and_Split(theano.tensor.tests.test_basic.T_Join_and_Split):
def setUp(self):
utt.seed_rng()
self.mode = mode_with_gpu.excluding('constant_folding')
self.join_op = cuda.GpuJoin
# No gpu split.
self.split_op = tensor.Split
# No Make vector on the gpu, Join used instead
self.make_vector_op = cuda.GpuJoin
self.floatX = "float32"
# In FAST_COMPILE mode, we force the FAST_RUN mode for optimization.
self.hide_error = theano.config.mode not in ['DebugMode', 'DEBUG_MODE']
self.shared = cuda.shared_constructor
import theano.tensor.tests.test_subtensor
# This is to don't duplicate test.
class T_subtensor(theano.tensor.tests.test_subtensor.T_subtensor):
# This prevents nose from printing method docstrings instead of method
# names
def shortDescription(self):
return None
shared = staticmethod(cuda.shared_constructor)
sub = cuda.GpuSubtensor
inc_sub = cuda.GpuIncSubtensor
adv_sub1 = cuda.GpuAdvancedSubtensor1
adv_incsub1 = cuda.GpuAdvancedIncSubtensor1
mode = mode_with_gpu
dtype = 'float32'
ignore_topo = (B.HostFromGpu, B.GpuFromHost, theano.compile.DeepCopyOp)
fast_compile = False
ops = (cuda.GpuSubtensor, cuda.GpuIncSubtensor,
cuda.GpuAdvancedSubtensor1, cuda.GpuAdvancedIncSubtensor1)
def __init__(self, name):
return super(theano.tensor.tests.test_subtensor.T_subtensor,
self).__init__(name)
def test_adv_sub1_fast(self):
"""We check that the special cases of advanced indexing that
use CudaNdarrayTakeFrom are handled correctly
"""
rand = numpy.random.rand
# The variable fast is used to set the member perform_using_take of
# the Op. It is only useful for testing that we use the fast
# version when we should. Users should not use it.
for shape, idx, fast in [((70000,), range(70000), True),
((70000, 5), range(70000), True),
((70000, 2, 3), range(70000), True),
((1025, 1025), [5, 10], True),
((3, 1025, 1026), [1, 2], True),
((1025, 67000), [5, 10], True),
((3, 10, 68000), [1, 2], True),
((3, 69000, 11), [1, 2], True),
# much memory, will be disabled if needed
((2*10e7,), [-1, 199999999], True),
((4, 5), [2, 3], True),
((4, 2, 3), [0, 3], True),
((4, 2, 3), [3, 3, 1, 1, 2,
2, 0, 0], True),
((4, 2, 3), [3, 3, 1, 1, 2, 2, 0,
0, -1, -2, -3, -4], True),
# Test 4 dims as gpu. code use another algo
# in that case. This new algo is not as much
# optimized for that case.
((4, 4, 2, 3), [3, 3, 1, 1, 2, 2, 0, 0,
-1, -2, -3, -4], False),
]:
# If there is not enough memory on the GPU, skip the test
size_needed = numpy.prod(shape) * (4 + 1)
if isinstance(theano.compile.get_default_mode(),
theano.compile.DebugMode):
size_needed = numpy.prod(shape) * 4 * 4
if size_needed >= theano.sandbox.cuda.mem_info()[0]:
#print "skip", shape
continue
data = rand(*shape)
data = numpy.asarray(data, dtype=self.dtype)
n = self.shared(data, borrow=True)
# Test with c_contiguous input
t = self.adv_sub1()(n, idx)
t.owner.op.perform_using_take = True # input c_contiguous, so we reshape
val = self.eval_output_and_check(t, list=True)
val = numpy.asarray(val)
good = data[idx]
self.assertTrue(val.ndim == data.ndim)
self.assertTrue(numpy.allclose(val, good), (val, good))
# Test with input strided
t = self.adv_sub1()(n[::-1], idx)
#DebugMode does a copy of the input, so we lose the strides.
if not isinstance(theano.compile.get_default_mode(),
theano.compile.DebugMode):
t.owner.op.perform_using_take = fast
val = theano.function([], t, mode=self.mode)()
val = numpy.asarray(val)
good = data[::-1][idx]
self.assertTrue(val.ndim == data.ndim)
self.assertTrue(numpy.allclose(val, good), (val, good))
def test_advinc_subtensor1():
""" Test the second case in the opt local_gpu_advanced_incsubtensor1 """
for shp in [(3, 3), (3, 3, 3)]:
shared = cuda.shared_constructor
xval = numpy.arange(numpy.prod(shp), dtype='float32').reshape(shp) + 1
yval = numpy.empty((2,) + shp[1:], dtype='float32')
yval[:] = 10
x = shared(xval, name='x')
y = T.tensor(dtype='float32',
broadcastable=(False,) * len(shp),
name='y')
expr = T.advanced_inc_subtensor1(x, y, [0, 2])
f = theano.function([y], expr, mode=mode_with_gpu)
assert sum([isinstance(node.op, cuda.GpuAdvancedIncSubtensor1)
for node in f.maker.fgraph.toposort()]) == 1
rval = f(yval)
rep = xval.copy()
rep[[0, 2]] += yval
assert numpy.allclose(rval, rep)
def test_inc_subtensor():
shared = cuda.shared_constructor
#shared = tensor.shared
x, y = T.fmatrices('x', 'y')
xval = numpy.asarray([[1, 2, 3], [4, 5, 6], [7, 8, 9]],
dtype='float32')
yval = numpy.asarray([[10, 10, 10], [10, 10, 10], [10, 10, 10]],
dtype='float32')
expr = T.inc_subtensor(x[:, 1:3], y[:, 1:3])
f = theano.function([x, y], expr, mode=mode_with_gpu)
assert sum([isinstance(node.op, cuda.GpuSubtensor)
for node in f.maker.fgraph.toposort()]) == 1
assert sum([isinstance(node.op, cuda.GpuIncSubtensor) and
node.op.set_instead_of_inc==False
for node in f.maker.fgraph.toposort()]) == 1
assert numpy.allclose(f(xval, yval), [[1., 12., 13.],
[4., 15., 16.], [7., 18., 19.]])
def test_set_subtensor():
shared = cuda.shared_constructor
#shared = tensor.shared
x, y = T.fmatrices('x', 'y')
xval = numpy.asarray([[1, 2, 3], [4, 5, 6], [7, 8, 9]],
dtype='float32')
yval = numpy.asarray([[10, 10, 10], [10, 10, 10], [10, 10, 10]],
dtype='float32')
expr = T.set_subtensor(x[:, 1:3], y[:, 1:3])
f = theano.function([x, y], expr, mode=mode_with_gpu)
assert sum([isinstance(node.op, cuda.GpuSubtensor)
for node in f.maker.fgraph.toposort()]) == 1
assert sum([isinstance(node.op, cuda.GpuIncSubtensor) and
node.op.set_instead_of_inc == True
for node in f.maker.fgraph.toposort()]) == 1
f(xval, yval)
def test_many_arg_elemwise():
"""this test checks whether the + and * elemwise ops can handle extremely large numbers of
arguments on gpu
i.e., it is a test of the optimization theano/sandbox/cuda/opt.py:local_gpu_huge_add_or_mul """
rng = numpy.random.RandomState([1, 2, 3])
for num_args in [25]:
for op_to_test in [theano.tensor.add, theano.tensor.mul]:
for nb_dim in [2, 3, 4, 5]:
shapes = [rng.randint(1, 5) for i in range(nb_dim)]
args = [numpy.cast['float32'](rng.randn(*shapes))
for arg in xrange(0, num_args)]
symb_args = [theano.tensor.TensorType('float32',
(False,)*nb_dim)()
for arg in xrange(0, num_args)]
outputs = []
for mode in [mode_with_gpu, mode_without_gpu]:
#test the optijmization local_gpu_elemwise_0
f = theano.function(
symb_args, op_to_test(*symb_args),
mode=mode.excluding("local_gpu_elemwise_1"))
outputs.append(f(*args))
#assert that the test was done on the gpu.
if mode is mode_with_gpu:
assert any([isinstance(node.op, cuda.GpuElemwise)
for node in f.maker.fgraph.apply_nodes])
#test the optijmization local_gpu_elemwise_1
f = theano.function(
symb_args,
cuda.gpu_from_host(op_to_test(*symb_args)),
mode=mode.excluding("local_gpu_elemwise_0"))
out = f(*args)
#assert that the test was done on the gpu.
if mode is mode_with_gpu:
assert any([isinstance(node.op, cuda.GpuElemwise)
for node in f.maker.fgraph.apply_nodes])
assert numpy.allclose(out, outputs[-1])
results_gpu, results_cpu = outputs
assert numpy.allclose(results_gpu, results_cpu)
def test_duplicate_arg_elemwise():
A = theano.tensor.fmatrix()
B = A + A
f = theano.function([A], B, mode=mode_with_gpu)
Aval = numpy.random.RandomState([1, 2, 3]).randn(5, 5).astype('float32')
Bval = Aval + Aval
assert numpy.allclose(Bval, f(Aval))
def test_shared_float32():
'''Test use of cuda.shared_constructor through theano.shared'''
# Register cuda.shared_constructor in theano.shared
theano.shared.constructors.append(cuda.shared_constructor)
a = theano.shared(numpy.ones((2, 3), dtype='float32'))
assert isinstance(a.type, tcn.CudaNdarrayType)
# Unregister
del theano.shared.constructors[-1]
def test_shared_cudandarray():
'''Test that we can create a CudaNdarraySharedVariable from a
CudaNdarray'''
a = cuda.shared_constructor(cuda.CudaNdarray.zeros((2, 3)))
assert isinstance(a.type, tcn.CudaNdarrayType)
def test_gpueye():
def check(dtype, N, M_=None):
# Theano does not accept None as a tensor.
# So we must use a real value.
M = M_
# Currently DebugMode does not support None as inputs even if this is
# allowed.
if M is None:
M = N
N_symb = T.iscalar()
M_symb = T.iscalar()
k_symb = numpy.asarray(0)
out = T.eye(N_symb, M_symb, k_symb, dtype=dtype)
f = theano.function([N_symb, M_symb],
B.as_cuda_ndarray_variable(out),
mode=mode_with_gpu)
result = numpy.asarray(f(N, M))
assert numpy.allclose(result, numpy.eye(N, M_, dtype=dtype))
assert result.dtype == numpy.dtype(dtype)
assert any([isinstance(node.op, B.GpuEye)
for node in f.maker.fgraph.toposort()])
for dtype in ['float32']:
yield check, dtype, 3
# M != N, k = 0
yield check, dtype, 3, 5
yield check, dtype, 5, 3
class test_size(unittest.TestCase):
"""
Ensure the `size` attribute of CUDA tensors behaves as in numpy.
"""
def test_matrix(self):
x = cuda.fmatrix()
y = numpy.zeros((5, 7), dtype='float32')
assert y.size == theano.function([x], x.size)(y)
def test_vector(self):
x = cuda.fvector()
y = numpy.zeros(7, dtype='float32')
assert y.size == theano.function([x], x.size)(y)
def test_scalar(self):
x = cuda.fscalar()
y = numpy.array(7, dtype='float32')
assert y.size == theano.function([x], x.size)(y)
def test_shared(self):
# NB: we also test higher order tensors at the same time.
y = cuda.CudaNdarray.zeros((1, 2, 3, 4))
x = cuda.shared_constructor(y)
assert y.size == theano.function([], x.size)()
import theano.tensor.tests.test_sharedvar
#This test the case when the shared constructor view an CudaNdarray as input
test_shared_options = theano.tensor.tests.test_sharedvar.makeSharedTester(
shared_constructor_=tcn.shared_constructor,
dtype_='float32',
get_value_borrow_true_alias_=True,
shared_borrow_true_alias_=True,#True when the original value is already a CudaNdarray!
set_value_borrow_true_alias_=True,
set_value_inplace_=True,
set_cast_value_inplace_=False,
shared_constructor_accept_ndarray_=True,
internal_type_=cuda_ndarray.CudaNdarray,
test_internal_type_=lambda a: isinstance(a, cuda_ndarray.CudaNdarray),
theano_fct_=theano.tensor.exp,
ref_fct_=numpy.exp,
cast_value_=cuda.as_cuda_array,
op_by_matrix_=True,
name='test_shared_options')
#This test the case when the shared constructor view an ndarray as input
test_shared_options2 = theano.tensor.tests.test_sharedvar.makeSharedTester(
shared_constructor_=tcn.shared_constructor,
dtype_='float32',
get_value_borrow_true_alias_=False,
shared_borrow_true_alias_=False,
set_value_borrow_true_alias_=False,
set_value_inplace_=True,
set_cast_value_inplace_=True,
shared_constructor_accept_ndarray_=True,
internal_type_=cuda_ndarray.CudaNdarray,
test_internal_type_=lambda a: isinstance(a, cuda_ndarray.CudaNdarray),
theano_fct_=theano.tensor.exp,
ref_fct_=numpy.exp,
cast_value_=numpy.asarray,
op_by_matrix_=True,
name='test_shared_options')
def speed_adv_sub1():
data = numpy.random.rand(50000, 21).astype("float32")
var = tcn.shared_constructor(data)
vec = tensor.lvector()
for batch_size in [100, 1000, 10000, 100000]:
idx = numpy.random.randint(0, 50000, batch_size)
mode_with_gpu = theano.compile.ProfileMode().including('gpu')
f = theano.function([vec], var[vec], mode=mode_with_gpu)
for i in range(100):
f(idx)
print "ProfileMode with batch size", batch_size
mode_with_gpu.print_summary()
if __name__ == '__main__':
test_many_arg_elemwise()
test_gpujoin_assert_cndas()
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2011 Ion Torrent Systems, Inc. All Rights Reserved
import sys
import os
import subprocess
import time
import json
import numpy
import csv
from optparse import OptionParser
def utf8_decoder(s):
try:
return s.decode('utf-8')
except:
return s
def utf8_encoder(s):
try:
return s.encode('utf-8')
except:
return s
def printtime(message, *args):
message = utf8_encoder(message)
if args:
message = message % args
print "[ " + time.strftime('%a %Y-%m-%d %X %Z') + " ] " + message
sys.stdout.flush()
sys.stderr.flush()
def RunCommand(command,description):
printtime(' ')
printtime('Task : ' + description)
printtime('Command : ' + command)
printtime(' ')
stat = subprocess.call(command,shell=True)
if stat != 0:
printtime('ERROR: command failed with status %d' % stat)
sys.exit(1)
def execute_output(cmd):
try:
process = subprocess.Popen(cmd, stderr=subprocess.STDOUT, stdout=subprocess.PIPE, shell=True)
return process.communicate()[0]
except:
traceback.print_exc()
return ''
# -------------------------------------------------------------------------
# splits argument strings in the torrent suite format into (arg_name, arg_value) tuples
def ion_argstr_to_tuples(arg_str):
tuple_list = []
try:
# IR-29686: IR allows a space '\ ' in file path.
# I first replace '\ ' by the null char '\0' and then split the tmap args, assuming that the null char '\0' is impossible to show up in tmap command line)
arg_words = arg_str.replace('\ ', '\0').split()
index = 0
while index < len(arg_words):
# stand alone command like mapall, or stage1
if not arg_words[index].startswith('-'):
tuple_list.append((arg_words[index], ''))
# argument name, value pair - could be split by space or '='
else:
argval = arg_words[index].split('=')
if len(argval)>1:
tuple_list.append((argval[0], '='.join(argval[1])))
elif index<len(arg_words)-1 and not arg_words[index+1].startswith('-'):
tuple_list.append((arg_words[index], arg_words[index+1]))
index+=1
else:
tuple_list.append((arg_words[index], ''))
index += 1
except:
pass
return [(my_item[0].replace('\0', '\ '), my_item[1].replace('\0', '\ ')) for my_item in tuple_list] # IR-29686: Now I need to get '\ ' back, i.e., replace '\0' by '\ '.
# -------------------------------------------------------------------------
# tmap call has the structure:
# [path to executable] [mapping algorithm] [global options] [flowspace options] [stage[0-9]+ [stage options] [algorithm [algorithm options]]+]+
def get_consensus_tmap_command(options, parameters, input_bam):
# 1) Get path to tmap executable
path_to_tmap = 'tmap'
if options.path_to_tmap:
path_to_tmap = options.path_to_tmap
else:
json_tmap_args = parameters.get('meta',{}).get('tmapargs','tmap').split()
if len(json_tmap_args)>0:
path_to_tmap = json_tmap_args[0]
# 2) Get command line arguments from consensus.json file (does not include executable call)
with open(os.path.join(options.outdir,'consensus.json'), 'r') as cj_in:
consensus_json = json.load(cj_in)
tmap_arg_tuples = ion_argstr_to_tuples(consensus_json.get('tmap',{}).get('command_line',{}))
if len(tmap_arg_tuples) < 4:
printtime('WARNING: Not able to read valid tmap command line from consensus.json: ' + os.path.join(options.outdir,'consensus.json'))
sys.exit(1)
# executable path plus mapping algorithm plus global options input bam, numthreads
tmap_command = path_to_tmap + ' ' + tmap_arg_tuples[0][0]
tmap_command += ' -f "' + options.reference + '"'
tmap_command += ' -r "' + input_bam + '" -n ' + str(options.numthreads)
tmap_command += ' -o 2'
# Strip and update tmap arguments regarding input files, etc.
for tp in tmap_arg_tuples[1:]:
# Remove some input gloabl options from command string
if tp[0] in ['-r','--fn-reads',
'-s','--fn-sam',
'-n','--num-threads',
'-f','--fn-fasta',
'-k','--shared-memory-key',
'--bam-start-vfo', '--bam-end-vfo',
'-o', '--output-type']:
continue
# Change path to bed file in place, if applicable
elif tp[0] in ['--bed-file']:
if options.ptrim_bed:
tmap_command += ' --bed-file "' + options.ptrim_bed + '"'
elif options.bedfile:
tmap_command += ' --bed-file "' + options.bedfile + '"'
continue
# And other other options in their original order
tmap_command += ' ' + ' '.join(tp).rstrip()
return tmap_command
# -------------------------------------------------------------------------
# The call to the consensus exectable creates two unsorted bam files, one in need for realignment
# name extensions are hardcoded inside executable
# 1) "consensus_bam_name".aln_not_needed.bam
# 2) "consensus_bam_name".aln_needed.bam
def consensus_alignment_pipeline(options, parameters, consensus_bam_name, remove_tmp_files):
# 1) Sort "consensus_bam_name".aln_not_needed.bam
# In fact, the file is supposed to be sorted. Use samtools sort to guarantee sortedness.
command = 'samtools sort -m 2000M -l0 -@' + options.numthreads + ' -T "' + consensus_bam_name + '.sort.tmp"'
command += ' -o "' + consensus_bam_name + '.aln_not_needed.sorted.bam" '
command += '"' + consensus_bam_name + '.aln_not_needed.bam"'
RunCommand(command,"Sorting first partial consensus BAM.")
if remove_tmp_files:
try:
os.remove(consensus_bam_name + '.aln_not_needed.bam')
except:
print('WARNING: Unable to delete file %s' % (consensus_bam_name + '.aln_not_needed.bam'))
# 2) Align and sort "consensus_bam_name".aln_needed.bam
command = get_consensus_tmap_command(options, parameters, consensus_bam_name + '.aln_needed.bam')
command += ' | samtools sort -m 2000M -l0 -@' + options.numthreads + ' -T "' + consensus_bam_name + '.sort.tmp"'
command += ' -o "' + consensus_bam_name + '.aligned.sorted.bam" '
RunCommand(command,"Aligning and sorting second partial consensus BAM.")
if remove_tmp_files:
try:
os.remove(consensus_bam_name + '.aln_needed.bam')
except:
print('WARNING: Unable to delete file %s' % (consensus_bam_name + '.aln_needed.bam'))
# 3) Merging the partial BAM files into one
final_consensus_bam = consensus_bam_name + '.bam'
command = 'samtools merge -l1 -@' + options.numthreads + ' -c -p -f "' + final_consensus_bam + '"'
# Note that the order of the two BAM files to be merged matters because I use the "-p" option. The first BAM file must be aligned.sorted.bam
command += ' "' + consensus_bam_name + '.aligned.sorted.bam" "' + consensus_bam_name + '.aln_not_needed.sorted.bam"'
RunCommand(command,"Merging aligning partial consensus BAM files.")
# And finally indexing consensus bam
RunCommand('samtools index "'+final_consensus_bam+'"','Indexing merged consensus bam')
if remove_tmp_files:
try:
os.remove(consensus_bam_name + ".aln_not_needed.sorted.bam")
os.remove(consensus_bam_name + ".aligned.sorted.bam")
except:
print('WARNING: Unable to delete files %s' % (consensus_bam_name + "*.sorted.bam"))
# -------------------------------------------------------------------------
# write tvc consensus coverage metrics into text files
# TODO: use json files instead of text files
def create_consensus_metrics(options, parameters):
# Import LodManager
from lod import LodManager
lod_manager = LodManager()
# Parameters for LOD
param_dict = {'min_var_coverage': 2, 'min_variant_score': 3, 'min_callable_prob': 0.98, 'min_allele_freq': 0.0005}
tvc_param_type_dict = {'min_var_coverage': ('hotspot_min_var_coverage', int), 'min_variant_score': ('hotspot_min_variant_score', float), 'min_callable_prob': ('min_callable_prob', float), 'min_allele_freq': ('hotspot_min_allele_freq', float)}
for key, type_tuple in tvc_param_type_dict.iteritems():
param_dict[key] = type_tuple[1](parameters.get('torrent_variant_caller', {}).get(type_tuple[0], param_dict[key]))
lod_manager.set_parameters(param_dict)
# Open targets_depth
targets_depth_path = os.path.join(options.outdir, 'targets_depth.txt')
with open(targets_depth_path, 'r') as f_target_depth:
read_depth_list, family_depth_list = zip(*[(int(region_dict['read_depth']), int(region_dict['family_depth'])) for region_dict in csv.DictReader(f_target_depth, delimiter='\t')])
lod_list = [1.0 if lod is None else lod for lod in map(lod_manager.calculate_lod, family_depth_list)]
# Get stats
read_depth_median = numpy.median(read_depth_list) if read_depth_list else 0
read_depth_20_quantile = numpy.percentile(read_depth_list, 20) if read_depth_list else 0
family_depth_median = numpy.median(family_depth_list) if family_depth_list else 0
family_depth_20_quantile = numpy.percentile(family_depth_list, 20) if family_depth_list else 0
lod_median = numpy.median(lod_list) if lod_list else 1.0
lod_80_quantile = numpy.percentile(lod_list, 80) if lod_list else 1.0
consensus_metrics_path = os.path.join(options.outdir, 'consensus_metrics.txt')
lines_to_write = ["Median read coverage:\t%d" %int(read_depth_median),
"Median molecular coverage:\t%d" %int(family_depth_median),
"20th percentile read coverage:\t%d" %int(read_depth_20_quantile),
"20th percentile molecular coverage:\t%d" %int(family_depth_20_quantile),
"Median LOD percent:\t%s" %('N/A' if lod_median == 1.0 else '%2.4f'%(lod_median * 100.0)),
"80th percentile LOD percent:\t%s" %('N/A' if lod_80_quantile == 1.0 else '%2.4f'%(lod_80_quantile * 100.0)),
]
with open(consensus_metrics_path, 'w') as outFileFW:
outFileFW.write('\n'.join(lines_to_write))
# -------------------------------------------------------------------------
# Have the pipeline create a samtools depth file in case tvc does not do it
def create_depth_txt(options, out_file):
cmdstr = 'samtools depth'
if options.bedfile != None:
cmdstr += ' -b "' + options.bedfile + '"'
if options.postprocessed_bam:
cmdstr += ' "' + options.postprocessed_bam + '"'
else:
cmdstr += ' ' + ' '.join(options.bamfile.split(','))
cmdstr += ' > "' + out_file + '"'
RunCommand(cmdstr,'Generating samtools depth file')
# -------------------------------------------------------------------------
# Run "tcvutils unify_vcf" to merge and post process vcf records
def run_tvcutils_unify(options, parameters):
# Get json specified args and replace executable with command line specified executable path
json_unify_args = parameters.get('meta',{}).get('unifyargs', '').split()
if len(json_unify_args)<2 or json_unify_args[1] != 'unify_vcf':
json_unify_args = ['tvcutils', 'unify_vcf']
if options.path_to_tvcutils:
json_unify_args[0] = options.path_to_tvcutils
unify_command = ' '.join(json_unify_args)
unify_command += ' --novel-tvc-vcf "%s/small_variants.vcf"' % options.outdir
unify_command += ' --output-vcf "%s/TSVC_variants.vcf"' % options.outdir
unify_command += ' --reference-fasta "%s"' % options.reference
if os.path.isfile("%s/indel_assembly.vcf" % options.outdir):
unify_command += ' --novel-assembly-vcf "%s/indel_assembly.vcf"' % options.outdir
if options.hotspot_vcf:
unify_command += ' --hotspot-annotation-vcf "%s"' % options.hotspot_vcf
if os.path.isfile(options.outdir + '/tvc_metrics.json'):
unify_command += ' --tvc-metrics "%s/tvc_metrics.json"' % options.outdir
if options.ptrim_bed:
# TS-17940: We use ptrim_bed. Otherwise HS_ONLY tag doesn't work.
unify_command += ' --target-file "%s"' % options.ptrim_bed
elif options.bedfile:
unify_command += ' --target-file "%s"' % options.bedfile
if options.generate_gvcf == "on":
unify_command += ' --input-depth "%s/depth.txt"' % options.outdir
if parameters and 'gen_min_coverage' in parameters.get('freebayes', {}):
unify_command +=' --min-depth ' + str(parameters['freebayes']['gen_min_coverage'])
RunCommand(unify_command, 'Unify variants and annotations from all sources (tvc,IndelAssembly,hotpots)')
# -------------------------------------------------------------------------
# Straighten out which executables to use. The order of precedence is
# 1) path explicity supplied through script options (regardless whether executable exists)*
# 2) non-standard executable^ supplied through parameter json (regardless whether executable exists)*
# 3) executable residing in "bin-dir" (if it exists)
# 4) system installed executable
# *) We want to hard error out if an explicitly supplied executable does mnot exists.
# ^) A non standard executable is, e.g., '/path/to/my_tmap', as opposed to system 'tmap'
def get_path_to_executable(parameters, system_exec, json_args, bin_dir):
json_cmd = parameters.get('meta',{}).get(json_args,'').split()
if json_cmd and json_cmd[0] != system_exec: # 2)
return json_cmd[0]
elif bin_dir and os.path.isfile(os.path.join(bin_dir, system_exec)): #3)
return os.path.join(bin_dir,system_exec)
else:
return system_exec #4)
# -------------------------------------------------------------------------
def print_bin_versions(options):
print(get_tvc_ver(options.path_to_tvc))
print(get_tvcutils_ver(options.path_to_tvcutils))
print(get_tmap_ver(options.path_to_tmap))
def get_tvc_ver(path_to_tvc):
tvc_v_output_text = execute_output('%s -v' %path_to_tvc).strip(' \n').replace('\t', ' ')
for line in tvc_v_output_text.split('\n'):
# Assumption:
# "tvc -v" outputs the text of the format: tvc <MAJOR>.<MINOR>-<BUILD> (<GITHASH>) - Torrent Variant Caller
try:
my_idx = line.index(' - Torrent Variant Caller')
except ValueError:
my_idx = None
if line.startswith('tvc') and my_idx is not None:
return line[:my_idx]
# Unexpected format: return the whole text
return tvc_v_output_text if tvc_v_output_text.startswith('tvc ') else 'tvc %s' %tvc_v_output_text
def get_tvcutils_ver(path_to_tvcutils):
tvcutils_output_text = execute_output('%s' %path_to_tvcutils).strip('\n').replace('\t', ' ')
for line in tvcutils_output_text.split('\n'):
# Assumption:
# "tvcutils" outputs the text line of the format: tvcutils <MAJOR>.<MINOR>-<BUILD> (<GITHASH>) - Miscellaneous tools used by Torrent Variant Caller plugin and workflow.
try:
my_idx = line.index(' - Miscellaneous')
except ValueError:
my_idx = None
if line.startswith('tvcutils') and my_idx is not None:
return line[:my_idx]
# Unexpected format: return the whole text
return tvcutils_output_text if tvcutils_output_text.startswith('tvcutils ') else 'tvcutils %s' %tvc_v_output_text
def get_tmap_ver(path_to_tmap):
import re
ansi_escape = re.compile(r'\x1B\[[0-?]*[ -/]*[@-~]')
# tmap -v outputs ansi escape code
tmap_v_output_text = execute_output('%s -v' %path_to_tmap).strip('\n').replace('\t', ' ')
tmap_v_output_text = ansi_escape.sub('', tmap_v_output_text)
for line in tmap_v_output_text.split('\n'):
# Assumption:
# "tmap" version is in the output text line of the format:
# Version: <MAJOR>.<MINOR>.<BUILD> (<GITHASH>) (<TIMESTAMP>)
if line.startswith('Version: '):
return 'tmap %s' %(line[len('Version: '):])
# Unexpected format: return the whole text
return tmap_v_output_text if tmap_v_output_text.startswith('tmap ') else 'tmap %s' %tmap_v_output_text
# ===============================================================================
def main():
# Parse and set options
parser = OptionParser()
parser.add_option('-b', '--region-bed', help='BED file specifing regions over which variant calls will be limited or filtered to', dest='bedfile')
parser.add_option('-s', '--hotspot-vcf', help='VCF file specifying exact hotspot positions. TVC will force an evaluation for the alleles specified at the hotspot positions in this VCF file. For details, please visit Hotspot Variants (optional)', dest='hotspot_vcf')
parser.add_option('-i', '--input-bam', help='Input BAM file(s) containing aligned reads. Multiple file names must be concatenated with commas (required)', dest='bamfile')
parser.add_option('-n', '--normal-bam', help='BAM file(s) containing aligned reads for normal (reference) sample (IR only, optional)', dest='normbamfile')
parser.add_option('-g', '--sample-name', help='Sample Name for Test Sample (IR only, optional)', dest='testsamplename')
parser.add_option('-r', '--reference-fasta', help='FASTA file containing reference genome (required)', dest='reference')
parser.add_option('-o', '--output-dir', help='Output directory (default: current)', dest='outdir', default='.')
parser.add_option('-p', '--parameters-file', help='JSON file containing variant calling parameters. This file can be obtained from https://ampliseq.com for your panel. If not provided, default params will be used. for more information about parameters, please visit TVC 4.x Parameters Description (optional, recommended)', dest='paramfile')
parser.add_option( '--error-motifs-dir', help='Directory for error-motifs files', dest='errormotifsdir')
parser.add_option('-m', '--error-motifs', help='System dependent motifs file helps improve variant calling accuracy. For Hi-Q chemistry use $TVC_ROOT_DIR/share/TVC/sse/ampliseqexome_germline_p1_hiq_motifset.txt else use $TVC_ROOT_DIR/share/TVC/sse/motifset.txt (optional)', dest='errormotifsfile')
parser.add_option('-e', '--sse-vcf', help='strand-specific systematic error vcf (optional)', dest='sse_vcf')
parser.add_option('-N', '--num-threads', help='Set TVC number of threads (default: 12)', dest='numthreads',default='12')
parser.add_option('-G', '--generate-gvcf', help='Request generation of gvcf file in addition to vcf (on/off, default off)', dest='generate_gvcf', default='off')
parser.add_option( '--primer-trim-bed', help='Perform primer trimming using provided unmerged BED file. (optional, recommended for ampliseq)', dest='ptrim_bed')
parser.add_option( '--postprocessed-bam',help='If provided, a primer trimmed BAM file will be produced for IGV viewing. This file does not contain the flow space data and should not be used as an input to TVC. Use of this option may increase TVC run time (optional, not recommended)', dest='postprocessed_bam')
parser.add_option( '--run-consensus', help='Run consensus to compress the bam file for molecular tagging (on/off, default off).', dest='run_consensus', default='off')
# Paths to Executables - new options in TS 5.4
parser.add_option( '--bin-tvc', help='Path to tvc executable. Defaults to the system tvc.', dest='path_to_tvc')
parser.add_option( '--bin-tvcutils', help='Path to tvcutils executable. Defaults to the system tvcutils.', dest='path_to_tvcutils')
parser.add_option( '--bin-tmap', help='Path to tmap executable. Defaults to the system tmap.', dest='path_to_tmap')
parser.add_option('-v', '--bin-version', help='Print the versions of tvc, tvcutils, tmap being used in the pipeline.', dest='bin_ver', action='store_true')
# We seem to still need this in TS 5.4 because i can't detangle the plugin code
parser.add_option('-B', '--bin-dir', help='DEPRECATED: Directory path to location of variant caller programs. Defaults to the directory this script is located', dest='bindir')
parser.add_option('-t', '--tvc-root-dir', help='DEPRECATED: Directory path to TVC root directory', dest='tvcrootdir')
(options, args) = parser.parse_args()
# -----------------------------------------------------------------------------------------
# Load parameters json
parameters = {}
if options.paramfile:
try:
with open(options.paramfile, 'r') as json_file:
parameters = json.load(json_file)
if 'pluginconfig' in parameters:
parameters = parameters['pluginconfig']
except:
printtime('ERROR: No parameter file found at: ' + options.paramfile)
sys.exit(1)
# -----------------------------------------------------------------------------------------
# Straighten out which executables to use. The order of precedence is
# 1) path explicity supplied through script options (regardless whether executable exists)*
# 2) non-standard executable^ supplied through parameter json (regardless whether executable exists)*
# 3) executable residing in "bin-dir" (if it exists)
# 4) system installed executable
# *) We want to hard error out if an explicitly supplied executable does mnot exists.
# ^) A non standard executable is, e.g., '/path/to/my_tmap', as opposed to system 'tmap'
# Get executable directory
bin_dir = os.path.dirname(os.path.realpath(__file__)) # TS-14950
if options.tvcrootdir:
#printtime('WARNING: Option --tvc-root-dir is DEPRECATED and will be removed in a future release.')
bin_dir = os.path.join(options.tvcrootdir, 'bin')
elif options.bindir:
#printtime('WARNING: Option --bin-dir is DEPRECATED and will be removed in a future release.')
bin_dir = options.bindir
bin_dir = os.path.normpath(bin_dir)
# Get path to executables
if not options.path_to_tvc: # 1)
options.path_to_tvc = get_path_to_executable(parameters, 'tvc', 'tvcargs', bin_dir)
if not options.path_to_tvcutils:
options.path_to_tvcutils = get_path_to_executable(parameters, 'tvcutils', 'unifyargs', bin_dir)
if not options.path_to_tmap:
options.path_to_tmap = get_path_to_executable(parameters, 'tmap', 'tmapargs', bin_dir)
# -----------------------------------------------------------------------------------------
# Print version and exit:
if options.bin_ver:
print_bin_versions(options)
sys.exit(0)
# -----------------------------------------------------------------------------------------
# And give some feedback about executables being used
printtime('Using tvc binary: ' + options.path_to_tvc)
printtime('Using tvcutils binary: ' + options.path_to_tvcutils)
printtime('Using tmap binary: ' + options.path_to_tmap)
# -----------------------------------------------------------------------------------------
# Verify that all pre-conditions are met
if not options.bamfile or not options.reference:
parser.print_help()
sys.exit(1)
multisample = (options.bamfile.find(",") != -1)
if options.run_consensus.lower() == 'on' and multisample:
printtime('ERROR: consensus currently does not support multisample runs.')
sys.exit(1) # No need to waste time
if not os.path.exists(options.outdir):
os.makedirs(options.outdir)
if not os.path.isdir(options.outdir):
printtime('ERROR: No output directory found at: ' + options.outdir)
sys.exit(1)
if not os.path.exists(options.reference):
printtime('ERROR: No reference file found at: ' + options.reference)
sys.exit(1)
if not os.path.exists(options.reference+'.fai'):
printtime('ERROR: No reference index file found at: ' + options.reference + '.fai')
sys.exit(1)
for bam_filename in options.bamfile.split(','):
if (not os.path.exists(bam_filename)) or (not bam_filename.endswith('.bam')):
printtime('ERROR: No bam file found at: ' + bam_filename)
sys.exit(1)
# If there is no index we try to simply index the existing bam before the workflow start
if not os.path.exists(bam_filename+'.bai'):
bamindex_command = 'samtools index "%s"' % bam_filename
RunCommand(bamindex_command,('Index input bam '+bam_filename))
if options.hotspot_vcf:
if not os.path.exists(options.hotspot_vcf):
printtime('ERROR: No hotspots vcf file found at: ' + options.hotspot_vcf)
sys.exit(1)
if options.ptrim_bed:
if not os.path.exists(options.ptrim_bed):
printtime('ERROR: No primer trim bed file found at: ' + options.ptrim_bed)
sys.exit(1)
if options.bedfile:
if not os.path.exists(options.bedfile):
printtime('ERROR: No target regions bed file found at: ' + options.bedfile)
sys.exit(1)
# write effective_regions.bed file
if options.ptrim_bed and not options.bedfile:
options.bedfile = options.ptrim_bed
if options.ptrim_bed:
tvcutils_command = options.path_to_tvcutils + " validate_bed"
tvcutils_command += ' --reference "%s"' % options.reference
tvcutils_command += ' --target-regions-bed "%s"' % options.ptrim_bed
tvcutils_command += ' --effective-bed "%s"' % os.path.join(options.outdir,'effective_regions.bed')
RunCommand(tvcutils_command,'Write effective bed')
# -----------------------------------------------------------------------------------------
# tvc consensus call and related pipeline operations
tvc_input_bam = options.bamfile
if options.run_consensus.lower() == 'on':
printtime('Generating consensus bam file using consensus ...')
bam_basename = os.path.basename(options.bamfile)
consensus_bam = bam_basename[0:-4] + '_consensus'
tvc_input_bam = os.path.join(options.outdir, consensus_bam+'.bam')
# Get json specified args and replace executable with command line specified executable path
json_consensus_args = parameters.get('meta',{}).get('consensusargs','').split()
if len(json_consensus_args)<2 or json_consensus_args[1] != 'consensus':
json_consensus_args = ['tvc', 'consensus']
if options.path_to_tvc and json_consensus_args[0] == 'tvc':
json_consensus_args[0] = options.path_to_tvc
consensus_command = ' '.join(json_consensus_args)
consensus_command += ' --output-dir %s' % options.outdir
consensus_command += ' --reference "%s"' % options.reference
consensus_command += ' --num-threads 4'
consensus_command += ' --input-bam "%s"' % (options.bamfile)
consensus_command += ' --consensus-bam "%s"' % (consensus_bam)
if options.ptrim_bed:
consensus_command += ' --target-file "%s"' % options.ptrim_bed
elif options.bedfile:
consensus_command += ' --target-file "%s"' % options.bedfile
if options.paramfile:
consensus_command += ' --parameters-file "%s"' % options.paramfile
RunCommand(consensus_command, 'Generate consensus bam file')
# Alignment and merging of the two bam files produced by consensus
consensus_alignment_pipeline(options, parameters, os.path.join(options.outdir, consensus_bam), True)
#Generate Coverage Metrics for QC
printtime('Generating coverage metrics') # TODO: Write json files rather than txt files
create_consensus_metrics(options, parameters)
# -----------------------------------------------------------------------------------------
# TVC call and related pipeline operations
printtime('Calling small INDELs and SNPs using tvc ...')
# Get json specified args and replace executable with command line specified executable path
json_tvc_args = parameters.get('meta',{}).get('tvcargs','').split()
if len(json_tvc_args)<1:
json_tvc_args = ['tvc']
if options.path_to_tvc:
json_tvc_args[0] = options.path_to_tvc
tvc_command = ' '.join(json_tvc_args)
# Concatenate other command line args (json args take precedence)
tvc_command += ' --output-dir %s' % options.outdir
tvc_command += ' --reference "%s"' % options.reference
if options.normbamfile:
tvc_command += ' --input-bam "%s","%s"' % ((options.bamfile, options.normbamfile))
tvc_command += ' --sample-name "%s"' % (options.testsamplename)
else:
tvc_command += ' --input-bam "%s"' % tvc_input_bam
if options.numthreads:
tvc_command += ' --num-threads %s' % options.numthreads
if options.ptrim_bed:
tvc_command += ' --target-file "%s"' % options.ptrim_bed
tvc_command += ' --trim-ampliseq-primers on'
elif options.bedfile:
tvc_command += ' --target-file "%s"' % options.bedfile
if options.postprocessed_bam:
tvc_command += ' --postprocessed-bam "%s"' % options.postprocessed_bam
if options.paramfile:
tvc_command += ' --parameters-file "%s"' % options.paramfile
if options.errormotifsdir:
tvc_command += ' --error-motifs-dir "%s"' % options.errormotifsdir
if options.errormotifsfile:
tvc_command += ' --error-motifs "%s"' % options.errormotifsfile
tvc_command += ' --output-vcf "small_variants.vcf"'
if options.hotspot_vcf:
tvc_command += ' --input-vcf "%s"' % options.hotspot_vcf
if options.sse_vcf:
tvc_command += ' --sse-vcf "%s"' % options.sse_vcf
if multisample:
tvc_command += ' --heal-snps false'
# --------------------------------------------------------
# After creating the command line, we actually can run tvc now
if parameters and parameters['torrent_variant_caller'].get('process_input_positions_only','0') == '1' and parameters['freebayes'].get('use_input_allele_only','0') == '1':
tvc_command += ' --process-input-positions-only on'
tvc_command += ' --use-input-allele-only on'
RunCommand(tvc_command, 'Call Hotspots Only')
else:
RunCommand(tvc_command,'Call small indels and SNPs')
# Long indel assembly is done within tvc and needs not be called in a pipeline operation
# -----------------------------------------------------------------------------------------
# index a post processed bam file - no need to sort, we used an ordered BAM writer
if options.postprocessed_bam:
#bamsort_command = 'samtools sort -m 2G -l1 -@6 %s %s' % (postprocessed_bam_tmp, options.postprocessed_bam[:-4])
#RunCommand(bamsort_command,'Sort postprocessed bam')
bamindex_command = 'samtools index "%s"' % options.postprocessed_bam
RunCommand(bamindex_command,'Index postprocessed bam')
#RunCommand('rm -f ' + postprocessed_bam_tmp, 'Remove unsorted postprocessed bam')
# -----------------------------------------------------------------------------------------
# run tvcutils to merge and post process vcf records
# tvcutils unify_vcf generates both, a compressed and a uncompressed vcf
# Post processing settings can be adjusted through parameter json["meta"]["unifyargs"]
if options.generate_gvcf == "on" and not os.path.isfile(options.outdir + '/depth.txt'):
create_depth_txt(options, options.outdir + '/depth.txt')
run_tvcutils_unify(options, parameters)
# Merge small_variants_filtered.vcf and black_listed.vcf if needed
from merge_and_sort_vcf import merge_and_sort
small_v = os.path.join(options.outdir, 'small_variants_filtered.vcf')
black_v = os.path.join(options.outdir, 'black_listed.vcf')
# merge black_listed.vcf into small_variants_filtered.vcf
if os.path.exists(black_v):
# Don't bother to merge the VCF files if there is no blacklisted alleles, e.g. AmplisSeq Exome.
has_black_listed_allele = False
with open(black_v, 'r') as f_blk:
for line in f_blk:
if line in ['', '\n'] or line.startswith('#'):
continue
has_black_listed_allele = True
break
if has_black_listed_allele:
merge_and_sort([small_v, black_v], '%s.fai' %options.reference, small_v)
try:
os.remove(black_v)
except:
print('WARNING: Unable to delete file %s' % black_v)
# =======================================================================================
if __name__ == '__main__':
sys.argv = map(utf8_decoder, sys.argv)
main()
|
from unittest import TestCase
from unittest.mock import Mock
from dataclay.heap.ClientHeapManager import ClientHeapManager
import uuid
class TestClientHeapManager(TestCase):
def test_add_to_heap(self):
runtime = Mock()
dc_object = Mock()
object_id = uuid.uuid4()
dc_object.get_object_id.return_value = object_id
heap_manager = ClientHeapManager(runtime)
heap_manager.add_to_heap(dc_object)
retrieved_object = heap_manager.get_from_heap(object_id)
self.assertEqual(retrieved_object, dc_object)
def test_flush_all(self):
pass
#self.fail()
def test_run_task(self):
pass
#self.fail()
|
#
# Copyright (c) 2017 Joy Diamond. All rights reserved.
#
@gem('Sapphire.DualTwig')
def gem():
require_gem('Sapphire.Tree')
dual_twig_cache = {}
lookup_dual_twig = dual_twig_cache.get
store_dual_twig = dual_twig_cache.__setitem__
@share
def construct__ab(t, a, b):
t.a = a
t.b = b
@share
def portray__ab(t):
return arrange('<%s %r %r>', t.__class__.__name__, t.a, t.b)
@share
def count_newlines__ab(t):
return t.a.count_newlines() + t.b.count_newlines()
@share
def display_token__ab(t):
return arrange('<%s %s %s>', t.display_name, t.a.display_token(), t.b.display_token())
@share
class DualTwig(SapphireTrunk):
__slots__ = ((
'a', # Any
'b', # Any
))
__init__ = construct__ab
__repr__ = portray__ab
count_newlines = count_newlines__ab
display_token = display_token__ab
def dump_token(t, f, newline = true):
f.partial('<%s ', t.display_name)
t .a.dump_token(f)
r = t.b.dump_token(f, false)
return f.token_result(r, newline)
order = order__ab
def write(t, w):
t.a.write(w)
t.b.write(w)
DualTwig.k1 = DualTwig.a
DualTwig.k2 = DualTwig.b
@share
def produce_conjure_dual_twig(name, Meta):
return produce_conjure_dual__21(name, Meta, dual_twig_cache, lookup_dual_twig, store_dual_twig)
append_cache('dual-twig', dual_twig_cache)
|
import time
import numpy as np
from config import Configuration as Cfg
from utils.monitoring import performance, ae_performance
from utils.visualization.diagnostics_plot import plot_diagnostics
def train_network(nnet):
if Cfg.reconstruction_loss:
nnet.ae_n_epochs = nnet.n_epochs
train_autoencoder(nnet)
return
print("Using %s solver" % nnet.sgd_solver)
print("Training settings:")
print("Hard margin: %r\nCenter fixed: %r\nBlock coordinate: %r"%(Cfg.hard_margin, Cfg.center_fixed, Cfg.block_coordinate))
epoch = nnet.checkpoint_epoch
# save initial network parameters for diagnostics
nnet.save_initial_parameters()
if Cfg.nnet_diagnostics & Cfg.e1_diagnostics:
# initialize diagnostics for first epoch (detailed diagnostics per batch)
nnet.initialize_diagnostics(Cfg.n_batches + 1)
else:
nnet.initialize_diagnostics(nnet.n_epochs)
# initialize c from mean of network feature representations in deep SVDD if specified
if Cfg.svdd_loss and Cfg.c_mean_init and nnet.cvar is None:
initialize_c_as_mean(nnet, Cfg.c_mean_init_n_batches)
if epoch == 0:
print("No earlier checkpoint, starting training from initialization")
else:
print("Starting training from checkpoint at epoch %d"%nnet.checkpoint_epoch)
while epoch < nnet.n_epochs:
# get copy of current network parameters to track differences between epochs
nnet.copy_parameters()
# In each epoch, we do a full pass over the training data:
start_time = time.time()
# learning rate decay
if Cfg.lr_decay:
decay_learning_rate(nnet, epoch)
if Cfg.lr_drop and (epoch == Cfg.lr_drop_in_epoch):
# Drop the learning rate in epoch specified in Cfg.lr_drop_after_epoch by factor Cfg.lr_drop_factor
# Thus, a simple separation of learning into a "region search" and "finetuning" stage.
lr_new = Cfg.floatX((1.0 / Cfg.lr_drop_factor) * Cfg.learning_rate.get_value())
print("")
print("Learning rate drop in epoch {} from {:.6f} to {:.6f}".format(
epoch, Cfg.floatX(Cfg.learning_rate.get_value()), lr_new))
print("")
Cfg.learning_rate.set_value(lr_new)
# train on epoch
i_batch = 0
for batch in nnet.data.get_epoch_train():
if Cfg.nnet_diagnostics & Cfg.e1_diagnostics:
# Evaluation before training
if (epoch == 0) and (i_batch == 0):
_, _ , _ = performance(nnet, which_set='train', epoch=i_batch)
if nnet.data.n_val > 0:
_, _ , _ = performance(nnet, which_set='val', epoch=i_batch)
_, _ , _ = performance(nnet, which_set='test', epoch=i_batch)
# train
inputs, targets, _ = batch
if Cfg.svdd_loss:
if Cfg.block_coordinate:
_, _ = nnet.backprop_without_R(inputs, targets)
elif Cfg.hard_margin:
_, _ = nnet.backprop_ball(inputs, targets)
else:
_, _ = nnet.backprop(inputs, targets)
else:
_, _ = nnet.backprop(inputs, targets)
if Cfg.nnet_diagnostics & Cfg.e1_diagnostics:
# Get detailed diagnostics (per batch) for the first epoch
if epoch == 0:
_, _ , _ = performance(nnet, which_set='train', epoch=i_batch+1)
if nnet.data.n_val > 0:
_, _ , _ = performance(nnet, which_set='val', epoch=i_batch + 1)
_, _ , _ = performance(nnet, which_set='test', epoch=i_batch+1)
nnet.copy_parameters()
i_batch += 1
if (epoch == 0) & Cfg.nnet_diagnostics & Cfg.e1_diagnostics:
# Plot diagnostics for first epoch
plot_diagnostics(nnet, Cfg.xp_path, Cfg.title_suffix, xlabel="Batches", file_prefix="e1_")
# Re-initialize diagnostics on epoch level
nnet.initialize_diagnostics(nnet.n_epochs)
nnet.copy_initial_parameters_to_cache()
# Performance on training set (use forward pass with deterministic=True) to get the exact training objective
train_objective, train_accuracy , _ = performance(nnet, which_set='train', epoch=epoch, print_=True)
# Adjust radius R for the SVDD hard-margin objective
if Cfg.svdd_loss and (Cfg.hard_margin or (Cfg.block_coordinate and (epoch < Cfg.warm_up_n_epochs))):
# set R to be the (1-nu)-th quantile of distances
out_idx = int(np.floor(nnet.data.n_train * Cfg.nu.get_value()))
sort_idx = nnet.diag['train']['scores'][:, epoch].argsort()
R_new = nnet.diag['train']['scores'][sort_idx, epoch][-out_idx] + nnet.Rvar.get_value()
nnet.Rvar.set_value(Cfg.floatX(R_new))
# Update radius R and center c if block coordinate optimization is chosen
if Cfg.block_coordinate and (epoch >= Cfg.warm_up_n_epochs) and ((epoch % Cfg.k_update_epochs) == 0):
if Cfg.center_fixed:
nnet.update_R()
else:
nnet.update_R_c()
if Cfg.nnet_diagnostics:
# Performance on validation and test set
if nnet.data.n_val > 0:
val_objective, val_accuracy , _ = performance(nnet, which_set='val', epoch=epoch, print_=True)
test_objective, test_accuracy , _ = performance(nnet, which_set='test', epoch=epoch, print_=True)
# log performance
nnet.log['train_objective'].append(train_objective)
nnet.log['train_accuracy'].append(train_accuracy)
if nnet.data.n_val > 0:
nnet.log['val_objective'].append(val_objective)
nnet.log['val_accuracy'].append(val_accuracy)
nnet.log['test_objective'].append(test_objective)
nnet.log['test_accuracy'].append(test_accuracy)
nnet.log['time_stamp'].append(time.time() - nnet.clock)
print("Epoch {} of {} took {:.3f}s".format(epoch + 1, nnet.n_epochs, time.time() - start_time))
print('')
# # save model as required
# if epoch + 1 == nnet.save_at:
# nnet.dump_weights(nnet.save_to)
epoch += 1
# Save checkpoint
if Cfg.use_checkpoint and epoch % Cfg.checkpoint_interval == 0:
nnet.save_checkpoint(epoch)
# save train time
nnet.train_time = time.time() - nnet.clock
# Get final performance in last epoch if no running diagnostics are taken
if not Cfg.nnet_diagnostics:
nnet.initialize_diagnostics(1)
nnet.copy_parameters()
# perform forward passes on train, val, and test set
print("Get final performance...")
train_objective, train_accuracy , _ = performance(nnet, which_set='train', epoch=0, print_=True)
if nnet.data.n_val > 0:
val_objective, val_accuracy , _ = performance(nnet, which_set='val', epoch=0, print_=True)
test_objective, test_accuracy , _ = performance(nnet, which_set='test', epoch=0, print_=True)
print("Evaluation completed.")
# log performance
nnet.log['train_objective'].append(train_objective)
nnet.log['train_accuracy'].append(train_accuracy)
if nnet.data.n_val > 0:
nnet.log['val_objective'].append(val_objective)
nnet.log['val_accuracy'].append(val_accuracy)
nnet.log['test_objective'].append(test_objective)
nnet.log['test_accuracy'].append(test_accuracy)
nnet.log['time_stamp'].append(time.time() - nnet.clock)
nnet.stop_clock()
nnet.test_time = time.time() - (nnet.train_time + nnet.clock)
# save final weights (and best weights in case of two-class dataset)
nnet.dump_weights("{}/weights_final.p".format(Cfg.xp_path))
if nnet.data.n_classes == 2:
nnet.dump_best_weights("{}/weights_best_ep.p".format(Cfg.xp_path))
def test_network(nnet): # untested. TODO: remove if not used
nnet.initialize_diagnostics(1)
# perform forward passes on train, val, and test set
print("Get final performance...")
train_objective, train_accuracy , _ = performance(nnet, which_set='train', epoch=0, print_=True)
if nnet.data.n_val > 0:
val_objective, val_accuracy , _ = performance(nnet, which_set='val', epoch=0, print_=True)
test_objective, test_accuracy , _ = performance(nnet, which_set='test', epoch=0, print_=True)
print("Evaluation completed.")
# log performance
nnet.log['train_objective'].append(train_objective)
nnet.log['train_accuracy'].append(train_accuracy)
if nnet.data.n_val > 0:
nnet.log['val_objective'].append(val_objective)
nnet.log['val_accuracy'].append(val_accuracy)
nnet.log['test_objective'].append(test_objective)
nnet.log['test_accuracy'].append(test_accuracy)
nnet.log['time_stamp'].append(time.time() - nnet.clock)
def decay_learning_rate(nnet, epoch):
"""
decay the learning rate after epoch specified in Cfg.lr_decay_after_epoch
"""
# only allow decay for non-adaptive solvers
assert nnet.solver in ("sgd", "momentum", "adam")
if epoch >= Cfg.lr_decay_after_epoch:
lr_new = (Cfg.lr_decay_after_epoch / Cfg.floatX(epoch)) * nnet.learning_rate_init
Cfg.learning_rate.set_value(Cfg.floatX(lr_new))
else:
return
def initialize_c_as_mean(nnet, n_batches, eps=0.1):
"""
initialize c as the mean of the final layer representations from all samples propagated in n_batches
"""
print("Initializing c...")
# number of batches (and thereby samples) to initialize from
if isinstance(n_batches, basestring) and n_batches == "all":
n_batches = Cfg.n_batches
elif n_batches > Cfg.n_batches:
n_batches = Cfg.n_batches
else:
pass
rep_list = list()
i_batch = 0
for batch in nnet.data.get_epoch_train():
inputs, targets, _ = batch
if i_batch == n_batches:
break
_, _, _, _, _, b_rep, _, _, _, _ = nnet.forward(inputs, targets)
rep_list.append(b_rep)
i_batch += 1
reps = np.concatenate(rep_list, axis=0)
c = np.mean(reps, axis=0)
# If c_i is too close to 0 in dimension i, set to +-eps.
# Reason: a zero unit can be trivially matched with zero weights.
c[(abs(c) < eps) & (c < 0)] = -eps
c[(abs(c) < eps) & (c > 0)] = eps
nnet.cvar.set_value(c)
# initialize R at the (1-nu)-th quantile of distances
dist_init = np.sum((reps - c) ** 2, axis=1)
out_idx = int(np.floor(len(reps) * Cfg.nu.get_value()))
sort_idx = dist_init.argsort()
nnet.Rvar.set_value(Cfg.floatX(dist_init[sort_idx][-out_idx]))
print("c initialized.")
def train_autoencoder(nnet):
print("Training autoencoder with %s solver" % nnet.sgd_solver)
epoch = nnet.ae_checkpoint_epoch
if Cfg.ae_diagnostics:
nnet.initialize_ae_diagnostics(nnet.ae_n_epochs)
if epoch == 0:
print("No ae checkpoint, starting from initialization")
else:
print("Starting training from checkpoint at epoch %d"%(epoch+1))
while epoch < nnet.ae_n_epochs:
start_time = time.time()
if Cfg.ae_lr_drop and (epoch == Cfg.ae_lr_drop_in_epoch):
# Drop the learning rate in epoch specified in Cfg.ae_lr_drop_after_epoch by factor Cfg.ae_lr_drop_factor
# Thus, a simple separation of learning into a "region search" and "finetuning" stage.
lr_new = Cfg.floatX((1.0 / Cfg.ae_lr_drop_factor) * Cfg.learning_rate.get_value())
print("")
print("Learning rate drop in epoch {} from {:.6f} to {:.6f}".format(
epoch, Cfg.floatX(Cfg.learning_rate.get_value()), lr_new))
print("")
Cfg.learning_rate.set_value(lr_new)
# In each epoch, we do a full pass over the training data:
l2 = 0
batches = 0
train_err = 0
train_scores = np.empty(nnet.data.n_train)
for batch in nnet.data.get_epoch_train():
inputs, _, batch_idx = batch
start_idx = batch_idx * Cfg.batch_size
stop_idx = min(nnet.data.n_train, start_idx + Cfg.batch_size)
err, l2, b_scores = nnet.ae_backprop(inputs)
train_err += err * inputs.shape[0]
train_scores[start_idx:stop_idx] = b_scores.flatten()
batches += 1
train_err /= nnet.data.n_train
# save train diagnostics and test performance on val and test data if specified
if Cfg.ae_diagnostics:
nnet.save_ae_diagnostics('train', epoch, train_err, train_scores, l2)
# Performance on validation and test set
if nnet.data.n_val > 0:
val_err, _ = ae_performance(nnet, which_set='val', epoch=epoch)
test_err, _ = ae_performance(nnet, which_set='test', epoch=epoch)
# print results for epoch
print("{:32} {:.5f}".format("Train error:", train_err))
if Cfg.ae_diagnostics:
if nnet.data.n_val > 0:
print("{:32} {:.5f}".format("Val error:", val_err))
print("{:32} {:.5f}".format("Test error:", test_err))
print("Epoch {} of {} took {:.3f}s".format(epoch + 1, nnet.ae_n_epochs, time.time() - start_time))
print("")
epoch += 1
if Cfg.use_checkpoint and epoch % Cfg.checkpoint_interval == 0:
nnet.save_ae_checkpoint(epoch)
# Save checkpoint
# Get final performance in last epoch if no running diagnostics are taken
if not Cfg.ae_diagnostics:
nnet.initialize_ae_diagnostics(1)
# perform forward passes on train, val, and test set
print("Get final performance...")
_, _ = ae_performance(nnet, which_set='train', epoch=0)
if nnet.data.n_val > 0:
_, _ = ae_performance(nnet, which_set='val', epoch=0)
_, _ = ae_performance(nnet, which_set='test', epoch=0)
print("Evaluation completed.")
# save weights
if Cfg.pretrain:
nnet.dump_weights("{}/ae_pretrained_weights.p".format(Cfg.xp_path), pretrain=True)
else:
nnet.dump_weights("{}/weights_final.p".format(Cfg.xp_path))
# if image data plot some random reconstructions
if nnet.data._X_train.ndim == 4:
from utils.visualization.mosaic_plot import plot_mosaic
n_img = 32
random_idx = np.random.choice(nnet.data.n_train, n_img, replace=False)
_, _, _, reps = nnet.ae_forward(nnet.data._X_train[random_idx, ...])
title = str(n_img) + " random autoencoder reconstructions"
plot_mosaic(reps, title=title, export_pdf=(Cfg.xp_path + "/ae_reconstructions"))
title = str(n_img) + " random autoencoder inputs"
plot_mosaic(nnet.data._X_train[random_idx, ...],title=title, export_pdf=(Cfg.xp_path + "/ae_inputs"))
# plot diagnostics if specified
if Cfg.ae_diagnostics & Cfg.pretrain:
from utils.visualization.diagnostics_plot import plot_ae_diagnostics
from utils.visualization.filters_plot import plot_filters
# common suffix for plot titles
str_lr = "lr = " + str(nnet.ae_learning_rate)
C = int(Cfg.C.get_value())
if not Cfg.weight_decay:
C = None
str_C = "C = " + str(C)
title_suffix = "(" + nnet.ae_solver + ", " + str_C + ", " + str_lr + ")"
# plot diagnostics
plot_ae_diagnostics(nnet, Cfg.xp_path, title_suffix)
# plot filters
plot_filters(nnet, Cfg.xp_path, title_suffix, file_prefix="ae_", pretrain=True)
|
# -*- coding: utf-8 -*-
import unittest
from src.pyutil.datetime_jp import (
now,
today,
isPast,
isFuture,
changeTimezone,
futureDate,
pastDate,
)
from datetime import datetime, timedelta, date
import time
import pytz
# reference : pytz timezone list
# https://gist.github.com/heyalexej/8bf688fd67d7199be4a1682b3eec7568
class UtDatetimeJp(unittest.TestCase):
def test_now(self):
jst = pytz.timezone("Asia/Tokyo")
expected: datetime = datetime.now(tz=jst)
actual = now()
# type test
self.assertIs(type(actual), datetime)
# value test (allowed an error of 1 second)
self.assertAlmostEqual(expected, actual, delta=timedelta(seconds=1))
def test_today(self):
jst = pytz.timezone("Asia/Tokyo")
expected: date = datetime.fromtimestamp(time.time(), tz=jst).date()
actual = today()
# type test
self.assertIs(type(actual), date)
# value test
self.assertEqual(actual, expected)
def test_isPast_case1(self):
ut_arg: datetime = now() + timedelta(seconds=-1)
expected: bool = True
actual = isPast(ut_arg)
# type test
self.assertIs(type(actual), bool)
# value test
self.assertEqual(actual, expected)
def test_isPast_case2(self):
ut_arg: date = today() + timedelta(days=-1)
expected: bool = True
actual = isPast(ut_arg)
# type test
self.assertIs(type(actual), bool)
# value test
self.assertEqual(actual, expected)
def test_isPast_case3(self):
ut_arg: datetime = now() + timedelta(seconds=1)
expected: bool = False
actual = isPast(ut_arg)
# type test
self.assertIs(type(actual), bool)
# value test
self.assertEqual(actual, expected)
def test_isPast_case4(self):
ut_arg: date = today() + timedelta(days=1)
expected: bool = False
actual = isPast(ut_arg)
# type test
self.assertIs(type(actual), bool)
# value test
self.assertEqual(actual, expected)
def test_isFuture_case1(self):
ut_arg: datetime = now() + timedelta(seconds=1)
expected: bool = True
actual = isFuture(ut_arg)
# type test
self.assertIs(type(actual), bool)
# value test
self.assertEqual(actual, expected)
def test_isFuture_case2(self):
ut_arg: date = today() + timedelta(days=1)
expected: bool = True
actual = isFuture(ut_arg)
# type test
self.assertIs(type(actual), bool)
# value test
self.assertEqual(actual, expected)
def test_isFuture_case3(self):
ut_arg: datetime = now() + timedelta(seconds=-1)
expected: bool = False
actual = isFuture(ut_arg)
# type test
self.assertIs(type(actual), bool)
# value test
self.assertEqual(actual, expected)
def test_isFuture_case4(self):
ut_arg: date = today() + timedelta(days=-1)
expected: bool = False
actual = isFuture(ut_arg)
# type test
self.assertIs(type(actual), bool)
# value test
self.assertEqual(actual, expected)
def test_changeTimezone(self):
hawaii = pytz.timezone("US/Hawaii")
ut_arg: datetime = now() # tokyo datetime
ut_arg2 = hawaii
expected: datetime = datetime.now(tz=hawaii) # hawaii datetime
# change timezone to hawaii from tokyo.
actual = changeTimezone(ut_arg, ut_arg2)
# type test
self.assertIs(type(actual), datetime)
# value test (allowed an error of 1 second)
self.assertAlmostEqual(expected, actual, delta=timedelta(seconds=1))
def test_futureDate_case1(self):
ut_arg: date = date(2020, 1, 1)
ut_arg2: int = 30
expected: date = date(2020, 1, 31)
actual = futureDate(ut_arg, ut_arg2)
# type test
self.assertIs(type(actual), date)
# value test
self.assertEqual(actual, expected)
def test_futureDate_case2(self):
ut_arg: date = date(2020, 1, 1)
ut_arg2: int = 1
expected: date = date(2020, 2, 1)
actual = futureDate(ut_arg, months=ut_arg2)
# type test
self.assertIs(type(actual), date)
# value test
self.assertEqual(actual, expected)
def test_futureDate_case3(self):
ut_arg: date = date(2020, 1, 1)
expected = None
actual = futureDate(ut_arg)
# value test
self.assertEqual(actual, expected)
def test_pastDate_case1(self):
ut_arg: date = date(2020, 1, 1)
ut_arg2: int = 30
expected: date = date(2019, 12, 2)
actual = pastDate(ut_arg, ut_arg2)
# type test
self.assertIs(type(actual), date)
# value test
self.assertEqual(actual, expected)
def test_pastDate_case2(self):
ut_arg: date = date(2020, 1, 1)
ut_arg2: int = 1
expected: date = date(2019, 12, 1)
actual = pastDate(ut_arg, months=ut_arg2)
# type test
self.assertIs(type(actual), date)
# value test
self.assertEqual(actual, expected)
def test_pastDate_case3(self):
ut_arg: date = date(2020, 1, 1)
expected = None
actual = futureDate(ut_arg)
# value test
self.assertEqual(actual, expected)
def test_args(self):
with self.assertRaises(TypeError):
isPast("2021/01/01")
with self.assertRaises(TypeError):
isFuture("2021/01/01")
with self.assertRaises(TypeError):
changeTimezone(datetime.now())
with self.assertRaises(TypeError):
changeTimezone(datetime.now(), "US/Hawaii")
|
import json
import os.path
import pathlib
import shutil
import time
import typing
from mitmproxy import contentviews
from mitmproxy import ctx
from mitmproxy import flowfilter
from mitmproxy import io, flow
from mitmproxy import version
from mitmproxy.tools.web.app import flow_to_json
web_dir = pathlib.Path(__file__).absolute().parent
def save_static(path: pathlib.Path) -> None:
"""
Save the files for the static web view.
"""
# We want to overwrite the static files to keep track of the update.
if (path / "static").exists():
shutil.rmtree(str(path / "static"))
shutil.copytree(str(web_dir / "static"), str(path / "static"))
shutil.copyfile(str(web_dir / 'templates' / 'index.html'), str(path / "index.html"))
with open(str(path / "static" / "static.js"), "w") as f:
f.write("MITMWEB_STATIC = true;")
def save_filter_help(path: pathlib.Path) -> None:
with open(str(path / 'filter-help.json'), 'w') as f:
json.dump(dict(commands=flowfilter.help), f)
def save_settings(path: pathlib.Path) -> None:
with open(str(path / 'settings.json'), 'w') as f:
json.dump(dict(version=version.VERSION), f)
def save_flows(path: pathlib.Path, flows: typing.Iterable[flow.Flow]) -> None:
with open(str(path / 'flows.json'), 'w') as f:
json.dump(
[flow_to_json(f) for f in flows],
f
)
def save_flows_content(path: pathlib.Path, flows: typing.Iterable[flow.Flow]) -> None:
for f in flows:
for m in ('request', 'response'):
message = getattr(f, m)
message_path = path / "flows" / f.id / m
os.makedirs(str(message_path / "content"), exist_ok=True)
with open(str(message_path / 'content.data'), 'wb') as content_file:
# don't use raw_content here as this is served with a default content type
if message:
content_file.write(message.content)
else:
content_file.write(b'No content.')
# content_view
t = time.time()
if message:
description, lines, error = contentviews.get_message_content_view(
'Auto', message
)
else:
description, lines = 'No content.', []
if time.time() - t > 0.1:
ctx.log(
"Slow content view: {} took {}s".format(
description.strip(),
round(time.time() - t, 1)
),
"info"
)
with open(str(message_path / "content" / "Auto.json"), "w") as content_view_file:
json.dump(
dict(lines=list(lines), description=description),
content_view_file
)
class StaticViewer:
# TODO: make this a command at some point.
def load(self, loader):
loader.add_option(
"web_static_viewer", typing.Optional[str], "",
"The path to output a static viewer."
)
def configure(self, updated):
if "web_static_viewer" in updated and ctx.options.web_static_viewer:
flows = io.read_flows_from_paths([ctx.options.rfile])
p = pathlib.Path(ctx.options.web_static_viewer).expanduser()
self.export(p, flows)
def export(self, path: pathlib.Path, flows: typing.Iterable[flow.Flow]) -> None:
save_static(path)
save_filter_help(path)
save_flows(path, flows)
save_flows_content(path, flows)
|
#
# Copyright (c) 2021, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from distutils.version import LooseVersion
import numpy as np
from nvtabular.dispatch import DataFrameType, _array, annotate
from ..tags import Tags
from .operator import ColumnSelector, Operator
class Bucketize(Operator):
"""This operation transforms continuous features into categorical features
with bins based on the provided bin boundaries.
Example usage::
#
cont_names = ['cont1', 'cont2']
boundaries = {
'cont1': [-50, 0, 50],
'cont2': [0, 25, 50, 75, 100]
}
bucketize_op = cont_names >> ops.Bucketize(boundaries)
processor = nvt.Workflow(bucketize_op)
Parameters
----------
boundaries : int, dict or callable
Defines how to transform the continous values into bins
"""
def __init__(self, boundaries):
# Check if we have cupy.digitize support
try:
import cupy
self.use_digitize = LooseVersion(cupy.__version__) >= "8.0.0"
except ImportError:
# Assume cpu-backed data (since cupy is not even installed)
self.use_digitize = True
# transform boundaries into a lookup function on column names
if isinstance(boundaries, (list, tuple)):
self.boundaries = lambda col: boundaries
elif isinstance(boundaries, dict):
self.boundaries = lambda col: boundaries[col]
elif callable(boundaries):
self.boundaries = boundaries
else:
raise TypeError(
"`boundaries` must be dict, callable, or list, got type {}".format(type(boundaries))
)
super().__init__()
@annotate("Bucketize_op", color="darkgreen", domain="nvt_python")
def transform(self, col_selector: ColumnSelector, df: DataFrameType) -> DataFrameType:
boundaries = {name: self.boundaries(name) for name in col_selector.names}
new_df = type(df)()
for col, b in boundaries.items():
if self.use_digitize:
new_df[col] = np.digitize(
df[col].values,
_array(b, like_df=df),
right=False,
)
else:
# TODO: Remove use_digitize=False code path
# once cupy>=8.0.0 is required.
val = 0
for boundary in b:
val += (df[col] >= boundary).astype("int")
new_df[col] = val
return new_df
def output_tags(self):
return [Tags.CATEGORICAL]
def _get_dtypes(self):
return np.int64
transform.__doc__ = Operator.transform.__doc__
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import logging
from examples.textless_nlp.gslm.speech2unit.pretrained.utils import (
get_and_dump_features,
)
def get_parser():
parser = argparse.ArgumentParser(
description="Compute and dump log mel fbank features."
)
parser.add_argument(
"--feature_type",
type=str,
choices=["logmel", "hubert", "w2v2", "cpc"],
default=None,
help="Acoustic feature type",
)
parser.add_argument(
"--manifest_path",
type=str,
default=None,
help="Manifest file containing the root dir and file names",
)
parser.add_argument(
"--out_features_path",
type=str,
default=None,
help="Features file path to write to",
)
parser.add_argument(
"--checkpoint_path",
type=str,
help="Pretrained acoustic model checkpoint",
)
parser.add_argument(
"--layer",
type=int,
help="The layer of the pretrained model to extract features from",
default=-1,
)
parser.add_argument(
"--sample_pct",
type=float,
help="Percent data to use for K-means training",
default=0.1,
)
parser.add_argument(
"--out_features_path",
type=str,
help="Path to save log mel fbank features",
)
return parser
def get_logger():
log_format = "[%(asctime)s] [%(levelname)s]: %(message)s"
logging.basicConfig(format=log_format, level=logging.INFO)
logger = logging.getLogger(__name__)
return logger
if __name__ == "__main__":
"""
Example command:
python ~/speechbot/clustering/dump_logmelfank_feats.py \
--manifest_path /checkpoint/kushall/data/LJSpeech-1.1/asr_input_wavs_16k/train.tsv
--out_features_path /checkpoint/kushall/experiments/speechbot/logmelfbank/features/ljspeech/train.npy
"""
parser = get_parser()
args = parser.parse_args()
logger = get_logger()
logger.info(args)
logger.info(f"Extracting {args.feature_type} acoustic features...")
get_and_dump_features(
feature_type=args.feature_type,
checkpoint_path=args.checkpoint_path,
layer=args.layer,
manifest_path=args.manifest_path,
sample_pct=args.sample_pct,
flatten=True,
out_features_path=args.out_features_path,
)
logger.info(f"Saved extracted features at {args.out_features_path}")
|
"""
Support for Modbus.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/modbus/
"""
import logging
import threading
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.const import (
EVENT_HOMEASSISTANT_START, EVENT_HOMEASSISTANT_STOP,
CONF_HOST, CONF_METHOD, CONF_PORT)
DOMAIN = 'modbus'
REQUIREMENTS = ['pymodbus==1.3.0rc1']
# Type of network
CONF_BAUDRATE = 'baudrate'
CONF_BYTESIZE = 'bytesize'
CONF_STOPBITS = 'stopbits'
CONF_TYPE = 'type'
CONF_PARITY = 'parity'
SERIAL_SCHEMA = {
vol.Required(CONF_BAUDRATE): cv.positive_int,
vol.Required(CONF_BYTESIZE): vol.Any(5, 6, 7, 8),
vol.Required(CONF_METHOD): vol.Any('rtu', 'ascii'),
vol.Required(CONF_PORT): cv.string,
vol.Required(CONF_PARITY): vol.Any('E', 'O', 'N'),
vol.Required(CONF_STOPBITS): vol.Any(1, 2),
vol.Required(CONF_TYPE): 'serial',
}
ETHERNET_SCHEMA = {
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_PORT): cv.positive_int,
vol.Required(CONF_TYPE): vol.Any('tcp', 'udp'),
}
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Any(SERIAL_SCHEMA, ETHERNET_SCHEMA)
}, extra=vol.ALLOW_EXTRA)
_LOGGER = logging.getLogger(__name__)
SERVICE_WRITE_REGISTER = 'write_register'
ATTR_ADDRESS = 'address'
ATTR_UNIT = 'unit'
ATTR_VALUE = 'value'
SERVICE_WRITE_REGISTER_SCHEMA = vol.Schema({
vol.Required(ATTR_UNIT): cv.positive_int,
vol.Required(ATTR_ADDRESS): cv.positive_int,
vol.Required(ATTR_VALUE): vol.All(cv.ensure_list, [cv.positive_int])
})
HUB = None
def setup(hass, config):
"""Set up Modbus component."""
# Modbus connection type
# pylint: disable=global-statement, import-error
client_type = config[DOMAIN][CONF_TYPE]
# Connect to Modbus network
# pylint: disable=global-statement, import-error
if client_type == 'serial':
from pymodbus.client.sync import ModbusSerialClient as ModbusClient
client = ModbusClient(method=config[DOMAIN][CONF_METHOD],
port=config[DOMAIN][CONF_PORT],
baudrate=config[DOMAIN][CONF_BAUDRATE],
stopbits=config[DOMAIN][CONF_STOPBITS],
bytesize=config[DOMAIN][CONF_BYTESIZE],
parity=config[DOMAIN][CONF_PARITY])
elif client_type == 'tcp':
from pymodbus.client.sync import ModbusTcpClient as ModbusClient
client = ModbusClient(host=config[DOMAIN][CONF_HOST],
port=config[DOMAIN][CONF_PORT])
elif client_type == 'udp':
from pymodbus.client.sync import ModbusUdpClient as ModbusClient
client = ModbusClient(host=config[DOMAIN][CONF_HOST],
port=config[DOMAIN][CONF_PORT])
else:
return False
global HUB
HUB = ModbusHub(client)
def stop_modbus(event):
"""Stop Modbus service."""
HUB.close()
def start_modbus(event):
"""Start Modbus service."""
HUB.connect()
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, stop_modbus)
# Register services for modbus
hass.services.register(DOMAIN, SERVICE_WRITE_REGISTER, write_register,
schema=SERVICE_WRITE_REGISTER_SCHEMA)
def write_register(service):
"""Write modbus registers."""
unit = int(float(service.data.get(ATTR_UNIT)))
address = int(float(service.data.get(ATTR_ADDRESS)))
value = service.data.get(ATTR_VALUE)
if isinstance(value, list):
HUB.write_registers(
unit,
address,
[int(float(i)) for i in value])
else:
HUB.write_register(
unit,
address,
int(float(value)))
hass.bus.listen_once(EVENT_HOMEASSISTANT_START, start_modbus)
return True
class ModbusHub(object):
"""Thread safe wrapper class for pymodbus."""
def __init__(self, modbus_client):
"""Initialize the modbus hub."""
self._client = modbus_client
self._lock = threading.Lock()
def close(self):
"""Disconnect client."""
with self._lock:
self._client.close()
def connect(self):
"""Connect client."""
with self._lock:
self._client.connect()
def read_coils(self, unit, address, count):
"""Read coils."""
with self._lock:
kwargs = {'unit': unit} if unit else {}
return self._client.read_coils(
address,
count,
**kwargs)
def read_input_registers(self, unit, address, count):
"""Read input registers."""
with self._lock:
kwargs = {'unit': unit} if unit else {}
return self._client.read_input_registers(
address,
count,
**kwargs)
def read_holding_registers(self, unit, address, count):
"""Read holding registers."""
with self._lock:
kwargs = {'unit': unit} if unit else {}
return self._client.read_holding_registers(
address,
count,
**kwargs)
def write_coil(self, unit, address, value):
"""Write coil."""
with self._lock:
kwargs = {'unit': unit} if unit else {}
self._client.write_coil(
address,
value,
**kwargs)
def write_register(self, unit, address, value):
"""Write register."""
with self._lock:
kwargs = {'unit': unit} if unit else {}
self._client.write_register(
address,
value,
**kwargs)
def write_registers(self, unit, address, values):
"""Write registers."""
with self._lock:
kwargs = {'unit': unit} if unit else {}
self._client.write_registers(
address,
values,
**kwargs)
|
from django.db import models
from users.models import User
class Ticket(models.Model):
user = models.ForeignKey(
User,
related_name="tickets",
null=True,
on_delete=models.SET_NULL,
)
created = models.DateTimeField(auto_now_add=True)
message = models.TextField()
additional_data = models.JSONField(blank=True, null=True)
def __str__(self):
return (
(self.user.full_name if self.user is not None else "Anonymous")
+ " - "
+ self.message[:100]
)
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from base.base_net import BaseNet
class Wheel_LeNet(BaseNet):
def __init__(self):
super().__init__()
self.rep_dim = 32
self.pool = nn.MaxPool2d(2, 2)
self.conv1 = nn.Conv2d(1, 16, 5, bias=False, padding=2)
self.bn1 = nn.BatchNorm2d(16, eps=1e-04, affine=False)
self.conv2 = nn.Conv2d(16, 32, 5, bias=False, padding=2)
self.bn2 = nn.BatchNorm2d(32, eps=1e-04, affine=False)
self.conv3 = nn.Conv2d(32, 64, 5, bias=False, padding=2)
self.bn3 = nn.BatchNorm2d(64, eps=1e-04, affine=False)
self.conv4 = nn.Conv2d(64, 64, 5, bias=False, padding=2)
self.bn4 = nn.BatchNorm2d(64, eps=1e-04, affine=False)
self.conv5 = nn.Conv2d(64, 128, 5, bias=False, padding=2)
self.bn5 = nn.BatchNorm2d(128, eps=1e-04, affine=False)
self.conv6 = nn.Conv2d(128, 256, 5, bias=False, padding=2)
self.bn6 = nn.BatchNorm2d(256, eps=1e-04, affine=False)
self.fc1 = nn.Linear(256 * 4 * 4, self.rep_dim, bias=False)
def forward(self, x):
x = self.conv1(x)
x = self.pool(F.leaky_relu(self.bn1(x)))
x = self.conv2(x)
x = self.pool(F.leaky_relu(self.bn2(x)))
x = self.conv3(x)
x = self.pool(F.leaky_relu(self.bn3(x)))
x = self.conv4(x)
x = self.pool(F.leaky_relu(self.bn4(x)))
x = self.conv5(x)
x = self.pool(F.leaky_relu(self.bn5(x)))
x = self.conv6(x)
x = self.pool(F.leaky_relu(self.bn6(x)))
x = x.view(x.size(0), -1)
x = self.fc1(x)
return x
class Wheel_LeNet_Autoencoder(BaseNet):
def __init__(self):
super().__init__()
self.rep_dim = 32
self.pool = nn.MaxPool2d(2, 2)
# Encoder (must match the Deep SVDD network above)
self.conv1 = nn.Conv2d(1, 16, 5, bias=False, padding=2)
self.bn1 = nn.BatchNorm2d(16, eps=1e-04, affine=False)
self.conv2 = nn.Conv2d(16, 32, 5, bias=False, padding=2)
self.bn2 = nn.BatchNorm2d(32, eps=1e-04, affine=False)
self.conv3 = nn.Conv2d(32, 64, 5, bias=False, padding=2)
self.bn3 = nn.BatchNorm2d(64, eps=1e-04, affine=False)
self.conv4 = nn.Conv2d(64, 64, 5, bias=False, padding=2)
self.bn4 = nn.BatchNorm2d(64, eps=1e-04, affine=False)
self.conv5 = nn.Conv2d(64, 128, 5, bias=False, padding=2)
self.bn5 = nn.BatchNorm2d(128, eps=1e-04, affine=False)
self.conv6 = nn.Conv2d(128, 256, 5, bias=False, padding=2)
self.bn6 = nn.BatchNorm2d(256, eps=1e-04, affine=False)
self.fc1 = nn.Linear(256 * 4 * 4, self.rep_dim, bias=False)
# Decoder
self.deconv1 = nn.ConvTranspose2d(2, 128, 5, bias=False, padding=2)
self.dbn1 = nn.BatchNorm2d(128, eps=1e-04, affine=False)
self.deconv2 = nn.ConvTranspose2d(128, 64, 5, bias=False, padding=2)
self.dbn2 = nn.BatchNorm2d(64, eps=1e-04, affine=False)
self.deconv3 = nn.ConvTranspose2d(64, 64, 5, bias=False, padding=2)
self.dbn3 = nn.BatchNorm2d(64, eps=1e-04, affine=False)
self.deconv4 = nn.ConvTranspose2d(64, 32, 5, bias=False, padding=2)
self.dbn4 = nn.BatchNorm2d(32, eps=1e-04, affine=False)
self.deconv5 = nn.ConvTranspose2d(32, 16, 5, bias=False, padding=2)
self.dbn5 = nn.BatchNorm2d(16, eps=1e-04, affine=False)
self.deconv6 = nn.ConvTranspose2d(16, 1, 5, bias=False, padding=2)
def forward(self, x):
x = self.conv1(x)
x = self.pool(F.leaky_relu(self.bn1(x)))
x = self.conv2(x)
x = self.pool(F.leaky_relu(self.bn2(x)))
x = self.conv3(x)
x = self.pool(F.leaky_relu(self.bn3(x)))
x = self.conv4(x)
x = self.pool(F.leaky_relu(self.bn4(x)))
x = self.conv5(x)
x = self.pool(F.leaky_relu(self.bn5(x)))
x = self.conv6(x)
x = self.pool(F.leaky_relu(self.bn6(x)))
x = x.view(x.size(0), -1)
x = self.fc1(x)
x = x.view(x.size(0), int(self.rep_dim / 16), 4, 4)
x = F.interpolate(F.leaky_relu(x), scale_factor=2)
x = self.deconv1(x)
x = F.interpolate(F.leaky_relu(self.dbn1(x)), scale_factor=2)
x = self.deconv2(x)
x = F.interpolate(F.leaky_relu(self.dbn2(x)), scale_factor=2)
x = self.deconv3(x)
x = F.interpolate(F.leaky_relu(self.dbn3(x)), scale_factor=2)
x = self.deconv4(x)
x = F.interpolate(F.leaky_relu(self.dbn4(x)), scale_factor=2)
x = self.deconv5(x)
x = F.interpolate(F.leaky_relu(self.dbn5(x)), scale_factor=2)
x = self.deconv6(x)
x = torch.sigmoid(x)
return x
|
"""Tests uti.nodes functions."""
from textwrap import dedent
from typing import Any
import pytest
from docutils import frontend, nodes
from docutils.parsers import rst
from docutils.utils import new_document
from sphinx.transforms import ApplySourceWorkaround
from sphinx.util.nodes import (NodeMatcher, clean_astext, extract_messages, make_id,
split_explicit_title)
def _transform(doctree):
ApplySourceWorkaround(doctree).apply()
def create_new_document():
settings = frontend.OptionParser(
components=(rst.Parser,)).get_default_values()
settings.id_prefix = 'id'
document = new_document('dummy.txt', settings)
return document
def _get_doctree(text):
document = create_new_document()
rst.Parser().parse(text, document)
_transform(document)
return document
def assert_node_count(messages, node_type, expect_count):
count = 0
node_list = [node for node, msg in messages]
for node in node_list:
if isinstance(node, node_type):
count += 1
assert count == expect_count, (
"Count of %r in the %r is %d instead of %d"
% (node_type, node_list, count, expect_count))
def test_NodeMatcher():
doctree = nodes.document(None, None)
doctree += nodes.paragraph('', 'Hello')
doctree += nodes.paragraph('', 'Sphinx', block=1)
doctree += nodes.paragraph('', 'World', block=2)
doctree += nodes.literal_block('', 'blah blah blah', block=3)
# search by node class
matcher = NodeMatcher(nodes.paragraph)
assert len(list(doctree.findall(matcher))) == 3
# search by multiple node classes
matcher = NodeMatcher(nodes.paragraph, nodes.literal_block)
assert len(list(doctree.findall(matcher))) == 4
# search by node attribute
matcher = NodeMatcher(block=1)
assert len(list(doctree.findall(matcher))) == 1
# search by node attribute (Any)
matcher = NodeMatcher(block=Any)
assert len(list(doctree.findall(matcher))) == 3
# search by both class and attribute
matcher = NodeMatcher(nodes.paragraph, block=Any)
assert len(list(doctree.findall(matcher))) == 2
# mismatched
matcher = NodeMatcher(nodes.title)
assert len(list(doctree.findall(matcher))) == 0
# search with Any does not match to Text node
matcher = NodeMatcher(blah=Any)
assert len(list(doctree.findall(matcher))) == 0
@pytest.mark.parametrize(
'rst,node_cls,count',
[
(
"""
.. admonition:: admonition title
admonition body
""",
nodes.title, 1
),
(
"""
.. figure:: foo.jpg
this is title
""",
nodes.caption, 1,
),
(
"""
.. rubric:: spam
""",
nodes.rubric, 1,
),
(
"""
| spam
| egg
""",
nodes.line, 2,
),
(
"""
section
=======
+----------------+
| | **Title 1** |
| | Message 1 |
+----------------+
""",
nodes.line, 2,
),
(
"""
* | **Title 1**
| Message 1
""",
nodes.line, 2,
),
]
)
def test_extract_messages(rst, node_cls, count):
msg = extract_messages(_get_doctree(dedent(rst)))
assert_node_count(msg, node_cls, count)
def test_extract_messages_without_rawsource():
"""
Check node.rawsource is fall-backed by using node.astext() value.
`extract_message` which is used from Sphinx i18n feature drop ``not node.rawsource``
nodes. So, all nodes which want to translate must have ``rawsource`` value.
However, sometimes node.rawsource is not set.
For example: recommonmark-0.2.0 doesn't set rawsource to `paragraph` node.
refs #1994: Fall back to node's astext() during i18n message extraction.
"""
p = nodes.paragraph()
p.append(nodes.Text('test'))
p.append(nodes.Text('sentence'))
assert not p.rawsource # target node must not have rawsource value
document = create_new_document()
document.append(p)
_transform(document)
assert_node_count(extract_messages(document), nodes.TextElement, 1)
assert [m for n, m in extract_messages(document)][0], 'text sentence'
def test_clean_astext():
node = nodes.paragraph(text='hello world')
assert 'hello world' == clean_astext(node)
node = nodes.image(alt='hello world')
assert '' == clean_astext(node)
node = nodes.paragraph(text='hello world')
node += nodes.raw('', 'raw text', format='html')
assert 'hello world' == clean_astext(node)
@pytest.mark.parametrize(
'prefix, term, expected',
[
('', '', 'id0'),
('term', '', 'term-0'),
('term', 'Sphinx', 'term-Sphinx'),
('', 'io.StringIO', 'io.StringIO'), # contains a dot
('', 'sphinx.setup_command', 'sphinx.setup_command'), # contains a dot & underscore
('', '_io.StringIO', 'io.StringIO'), # starts with underscore
('', 'sphinx', 'sphinx'), # alphabets in unicode fullwidth characters
('', '悠好', 'id0'), # multibytes text (in Chinese)
('', 'Hello=悠好=こんにちは', 'Hello'), # alphabets and multibytes text
('', 'fünf', 'funf'), # latin1 (umlaut)
('', '0sphinx', 'sphinx'), # starts with number
('', 'sphinx-', 'sphinx'), # ends with hyphen
])
def test_make_id(app, prefix, term, expected):
document = create_new_document()
assert make_id(app.env, document, prefix, term) == expected
def test_make_id_already_registered(app):
document = create_new_document()
document.ids['term-Sphinx'] = True # register "term-Sphinx" manually
assert make_id(app.env, document, 'term', 'Sphinx') == 'term-0'
def test_make_id_sequential(app):
document = create_new_document()
document.ids['term-0'] = True
assert make_id(app.env, document, 'term') == 'term-1'
@pytest.mark.parametrize(
'title, expected',
[
# implicit
('hello', (False, 'hello', 'hello')),
# explicit
('hello <world>', (True, 'hello', 'world')),
# explicit (title having angle brackets)
('hello <world> <sphinx>', (True, 'hello <world>', 'sphinx')),
]
)
def test_split_explicit_target(title, expected):
assert expected == split_explicit_title(title)
|
import os
import re
import shutil
from typing import Union
from zipfile import ZipFile
import pytest
from _pytest.fixtures import FixtureRequest
from _pytest.tmpdir import TempPathFactory, _mk_tmp
from mock import patch
from demisto_sdk.commands.common.constants import LAYOUT, LAYOUTS_CONTAINER
from demisto_sdk.commands.common.git_util import GitUtil
from demisto_sdk.commands.common.handlers import JSON_Handler
from demisto_sdk.commands.init.contribution_converter import \
ContributionConverter
from demisto_sdk.commands.update_release_notes.update_rn import UpdateRN
from TestSuite.contribution import Contribution
from TestSuite.repo import Repo
json = JSON_Handler()
RELEASE_NOTES_COPY = "demisto_sdk/commands/init/tests/RN/1_0_1-formatted.md"
SOURCE_RELEASE_NOTES_FILE = "demisto_sdk/commands/init/tests/RN/1_0_1.md"
EXPECTED_RELEASE_NOTES = "demisto_sdk/commands/init/tests/RN/1_0_1_expected.md"
name_reformatting_test_examples = [
('PACKYAYOK', 'PACKYAYOK'),
('PackYayOK', 'PackYayOK'),
('pack yay ok!', 'PackYayOk'),
('PackYayOK', 'PackYayOK'),
('-pack-yay-ok--', 'Pack-Yay-Ok'),
('PackYayOK', 'PackYayOK'),
('The quick brown fox, jumps over the lazy dog!', 'TheQuickBrownFox_JumpsOverTheLazyDog'),
('The quick`*+.brown fox, ;jumps over @@the lazy dog!', 'TheQuick_BrownFox_JumpsOver_TheLazyDog'),
('ThE quIck`*+.brown fox, ;jumps ovER @@the lazy dog!', 'ThEQuIck_BrownFox_JumpsOvER_TheLazyDog')
]
def util_open_file(path):
with open(path, mode='r') as f:
return f.read()
@pytest.fixture
def contrib_converter():
return ContributionConverter('')
def create_contribution_converter(request: FixtureRequest, tmp_path_factory: TempPathFactory) -> ContributionConverter:
tmp_dir = _mk_tmp(request, tmp_path_factory)
return ContributionConverter(name=request.param, base_dir=str(tmp_dir))
@pytest.fixture
def contribution_converter(request: FixtureRequest, tmp_path_factory: TempPathFactory) -> ContributionConverter:
"""Mocking tmp_path
"""
return create_contribution_converter(request, tmp_path_factory)
def rename_file_in_zip(path_to_zip: Union[os.PathLike, str], original_file_name: str, updated_file_name: str):
"""Utility to rename a file in a zip file
Useful for renaming files in an example contribution zip file to test specific cases.
If the zipped file includes directories, make sure the filenames take that into account.
Args:
path_to_zip (Union[os.PathLike, str]): The zip file containing a file which needs renaming
original_file_name (str): The file which will be renamed
updated_file_name (str): The name the original file will be renamed to
"""
modded_zip_file = os.path.join(os.path.dirname(path_to_zip), 'Edit' + os.path.basename(path_to_zip))
tmp_zf = ZipFile(modded_zip_file, 'w')
with ZipFile(path_to_zip, 'r') as zf:
for item in zf.infolist():
if item.filename == original_file_name:
with tmp_zf.open(updated_file_name, 'w') as out_file:
out_file.write(zf.read(item.filename))
else:
tmp_zf.writestr(item, zf.read(item.filename))
os.replace(modded_zip_file, path_to_zip)
@patch('demisto_sdk.commands.split.ymlsplitter.get_python_version')
@patch('demisto_sdk.commands.init.contribution_converter.get_content_path')
def test_convert_contribution_zip_updated_pack(get_content_path_mock, get_python_version_mock, tmp_path, mocker):
"""
Create a fake contribution zip file and test that it is converted to a Pack correctly.
The pack already exists, checking the update flow.
Args:
get_content_path_mock (MagicMock): Patch of the 'get_content_path' function to return the fake repo directory
used in the test
get_python_version_mock (MagicMock): Patch of the 'get_python_version' function to return the "3.7"
tmp_path (fixture): Temporary Path used for the unit test and cleaned up afterwards
Scenario: Simulate converting a contribution zip file.
Given
- A contribution zip file
- The zipfile contains a unified integration file
When
- Converting the zipfile to a valid Pack structure
- The contribution is an update to an existing pack
Then
- Ensure integration are componentized and in valid directory structure
- Ensure that readme file has not been changed.
"""
mocker.patch.object(GitUtil, '__init__', return_value=None)
mocker.patch.object(GitUtil, 'added_files', return_value=set())
mocker.patch.object(GitUtil, 'modified_files', return_value=set())
# Create all Necessary Temporary directories
# create temp directory for the repo
repo_dir = tmp_path / 'content_repo'
repo_dir.mkdir()
get_content_path_mock.return_value = repo_dir
get_python_version_mock.return_value = 3.7
# create temp target dir in which we will create all the TestSuite content items to use in the contribution zip and
# that will be deleted after
target_dir = repo_dir / 'target_dir'
target_dir.mkdir()
# create temp directory in which the contribution zip will reside
contribution_zip_dir = tmp_path / 'contrib_zip'
contribution_zip_dir.mkdir()
# Create fake content repo and contribution zip
repo = Repo(repo_dir)
pack = repo.create_pack('TestPack')
integration = pack.create_integration('integration0')
integration.create_default_integration()
contrib_zip = Contribution(target_dir, 'ContribTestPack', repo)
contrib_zip.create_zip(contribution_zip_dir)
# target_dir should have been deleted after creation of the zip file
assert not target_dir.exists()
name = 'Test Pack'
contribution_path = contrib_zip.created_zip_filepath
description = 'test pack description here'
author = 'Octocat Smith'
contrib_converter_inst = ContributionConverter(
name=name, contribution=contribution_path, description=description, author=author, create_new=False,
no_pipenv=True)
contrib_converter_inst.convert_contribution_to_pack()
converted_pack_path = repo_dir / 'Packs' / 'TestPack'
assert converted_pack_path.exists()
integrations_path = converted_pack_path / 'Integrations'
sample_integration_path = integrations_path / 'integration0'
integration_yml = sample_integration_path / 'integration0.yml'
integration_py = sample_integration_path / 'integration0.py'
integration_description = sample_integration_path / 'integration0_description.md'
integration_image = sample_integration_path / 'integration0_image.png'
integration_readme_md = sample_integration_path / 'README.md'
unified_yml = integrations_path / 'integration-integration0.yml'
unified_yml_in_sample = sample_integration_path / 'integration-integration0.yml'
integration_files = [integration_yml, integration_py, integration_description, integration_image,
integration_readme_md]
for integration_file in integration_files:
assert integration_file.exists()
# In a new pack that part will exist.
assert not unified_yml.exists()
assert not unified_yml_in_sample.exists()
@patch('demisto_sdk.commands.split.ymlsplitter.get_python_version')
@patch('demisto_sdk.commands.init.contribution_converter.get_content_path')
def test_convert_contribution_zip_outputs_structure(get_content_path_mock, get_python_version_mock, tmp_path, mocker):
"""Create a fake contribution zip file and test that it is converted to a Pack correctly
Args:
get_content_path_mock (MagicMock): Patch of the 'get_content_path' function to return the fake repo directory
used in the test
get_python_version_mock (MagicMock): Patch of the 'get_python_version' function to return the "3.7"
tmp_path (fixture): Temporary Path used for the unit test and cleaned up afterwards
Scenario: Simulate converting a contribution zip file
Given
- A contribution zip file
- The zipfile contains a unified script file
- The zipfile contains a unified integration file
When
- Converting the zipfile to a valid Pack structure
Then
- Ensure the unified yaml files of the integration and script have been removed from the output created by
converting the contribution zip file
"""
mocker.patch.object(GitUtil, '__init__', return_value=None)
mocker.patch.object(GitUtil, 'added_files', return_value=set())
mocker.patch.object(GitUtil, 'modified_files', return_value=set())
# ### SETUP ### #
# Create all Necessary Temporary directories
# create temp directory for the repo
repo_dir = tmp_path / 'content_repo'
repo_dir.mkdir()
get_content_path_mock.return_value = repo_dir
get_python_version_mock.return_value = 3.7
# create temp target dir in which we will create all the TestSuite content items to use in the contribution zip and
# that will be deleted after
target_dir = repo_dir / 'target_dir'
target_dir.mkdir()
# create temp directory in which the contribution zip will reside
contribution_zip_dir = tmp_path / 'contrib_zip'
contribution_zip_dir.mkdir()
# Create fake content repo and contribution zip
repo = Repo(repo_dir)
contrib_zip = Contribution(target_dir, 'ContribTestPack', repo)
contrib_zip.create_zip(contribution_zip_dir)
# rename script-script0.yml unified to automation-script0.yml
# this naming is aligned to how the server exports scripts in contribution zips
rename_file_in_zip(
contrib_zip.created_zip_filepath, 'automation/script-script0.yml', 'automation/automation-script0.yml'
)
# Convert Zip
name = 'Contrib Test Pack'
contribution_path = contrib_zip.created_zip_filepath
description = 'test pack description here'
author = 'Octocat Smith'
contrib_converter_inst = ContributionConverter(
name=name, contribution=contribution_path, description=description, author=author, no_pipenv=True)
contrib_converter_inst.convert_contribution_to_pack()
# Ensure directory/file structure output by conversion meets expectations
# target_dir should have been deleted after creation of the zip file
assert not target_dir.exists()
converted_pack_path = repo_dir / 'Packs' / 'ContribTestPack'
assert converted_pack_path.exists()
scripts_path = converted_pack_path / 'Scripts'
sample_script_path = scripts_path / 'SampleScript'
script_yml = sample_script_path / 'SampleScript.yml'
script_py = sample_script_path / 'SampleScript.py'
script_readme_md = sample_script_path / 'README.md'
unified_script_in_sample = sample_script_path / 'automation-script0.yml'
unified_script = scripts_path / 'automation-script0.yml'
assert scripts_path.exists()
assert sample_script_path.exists()
assert script_yml.exists()
assert script_py.exists()
assert script_readme_md.exists()
# generated script readme should not be empty
script_statinfo = os.stat(script_readme_md)
assert script_statinfo and script_statinfo.st_size > 0
# unified yaml of the script should have been deleted
assert not unified_script_in_sample.exists()
assert not unified_script.exists()
integrations_path = converted_pack_path / 'Integrations'
sample_integration_path = integrations_path / 'Sample'
integration_yml = sample_integration_path / 'Sample.yml'
integration_py = sample_integration_path / 'Sample.py'
integration_description = sample_integration_path / 'Sample_description.md'
integration_image = sample_integration_path / 'Sample_image.png'
integration_readme_md = sample_integration_path / 'README.md'
unified_yml = integrations_path / 'integration-integration0.yml'
unified_yml_in_sample = sample_integration_path / 'integration-integration0.yml'
integration_files = [integration_yml, integration_py, integration_description, integration_image,
integration_readme_md]
for integration_file in integration_files:
assert integration_file.exists()
# generated integration readme should not be empty
statinfo = os.stat(integration_readme_md)
assert statinfo and statinfo.st_size > 0
# unified yaml of the integration should have been deleted
assert not unified_yml.exists()
assert not unified_yml_in_sample.exists()
@patch('demisto_sdk.commands.split.ymlsplitter.get_python_version')
@patch('demisto_sdk.commands.init.contribution_converter.get_content_path')
def test_convert_contribution_zip(get_content_path_mock, get_python_version_mock, tmp_path, mocker):
"""Create a fake contribution zip file and test that it is converted to a Pack correctly
Args:
get_content_path_mock (MagicMock): Patch of the 'get_content_path' function to return the fake repo directory
used in the test
get_python_version_mock (MagicMock): Patch of the 'get_python_version' function to return the "3.7"
tmp_path (fixture): Temporary Path used for the unit test and cleaned up afterwards
Scenario: Simulate converting a contribution zip file
Given
- A contribution zip file
- The zipfile contains a unified script file
- The zipfile contains a unified integration file
When
- Converting the zipfile to a valid Pack structure
Then
- Ensure script and integration are componentized and in valid directory structure
- Ensure readme_files is not empty and the generated docs exists.
"""
mocker.patch.object(GitUtil, '__init__', return_value=None)
mocker.patch.object(GitUtil, 'added_files', return_value=set())
mocker.patch.object(GitUtil, 'modified_files', return_value=set())
# Create all Necessary Temporary directories
# create temp directory for the repo
repo_dir = tmp_path / 'content_repo'
repo_dir.mkdir()
get_content_path_mock.return_value = repo_dir
get_python_version_mock.return_value = 3.7
# create temp target dir in which we will create all the TestSuite content items to use in the contribution zip and
# that will be deleted after
target_dir = repo_dir / 'target_dir'
target_dir.mkdir()
# create temp directory in which the contribution zip will reside
contribution_zip_dir = tmp_path / 'contrib_zip'
contribution_zip_dir.mkdir()
# Create fake content repo and contribution zip
repo = Repo(repo_dir)
contrib_zip = Contribution(target_dir, 'ContribTestPack', repo)
contrib_zip.create_zip(contribution_zip_dir)
# target_dir should have been deleted after creation of the zip file
assert not target_dir.exists()
# rename script-script0.yml unified to automation-script0.yml
# this naming is aligned to how the server exports scripts in contribution zips
rename_file_in_zip(
contrib_zip.created_zip_filepath, 'automation/script-script0.yml', 'automation/automation-script0.yml'
)
name = 'Contrib Test Pack'
contribution_path = contrib_zip.created_zip_filepath
description = 'test pack description here'
author = 'Octocat Smith'
contrib_converter_inst = ContributionConverter(
name=name, contribution=contribution_path, description=description, author=author, no_pipenv=True)
contrib_converter_inst.convert_contribution_to_pack()
converted_pack_path = repo_dir / 'Packs' / 'ContribTestPack'
assert converted_pack_path.exists()
scripts_path = converted_pack_path / 'Scripts'
sample_script_path = scripts_path / 'SampleScript'
script_yml = sample_script_path / 'SampleScript.yml'
script_py = sample_script_path / 'SampleScript.py'
script_readme_md = sample_script_path / 'README.md'
unified_script_in_sample = sample_script_path / 'automation-script0.yml'
unified_script = scripts_path / 'automation-script0.yml'
assert scripts_path.exists()
assert sample_script_path.exists()
assert script_yml.exists()
assert script_py.exists()
assert script_readme_md.exists()
assert not unified_script_in_sample.exists()
assert not unified_script.exists()
integrations_path = converted_pack_path / 'Integrations'
sample_integration_path = integrations_path / 'Sample'
integration_yml = sample_integration_path / 'Sample.yml'
integration_py = sample_integration_path / 'Sample.py'
integration_description = sample_integration_path / 'Sample_description.md'
integration_image = sample_integration_path / 'Sample_image.png'
integration_readme_md = sample_integration_path / 'README.md'
unified_yml = integrations_path / 'integration-integration0.yml'
unified_yml_in_sample = sample_integration_path / 'integration-integration0.yml'
integration_files = [integration_yml, integration_py, integration_description, integration_image,
integration_readme_md]
for integration_file in integration_files:
assert integration_file.exists()
assert not unified_yml.exists()
assert not unified_yml_in_sample.exists()
playbooks_path = converted_pack_path / 'Playbooks'
playbook_yml = playbooks_path / 'playbook-SamplePlaybook.yml'
playbook_readme_md = playbooks_path / 'playbook-SamplePlaybook_README.md'
assert playbooks_path.exists()
assert playbook_yml.exists()
assert playbook_readme_md.exists()
layouts_path = converted_pack_path / 'Layouts'
sample_layoutscontainer = layouts_path / f'{LAYOUTS_CONTAINER}-fakelayoutscontainer.json'
sample_layout = layouts_path / f'{LAYOUT}-fakelayout.json'
assert layouts_path.exists()
assert sample_layoutscontainer.exists()
assert sample_layout.exists()
assert set(contrib_converter_inst.readme_files) == {str(playbook_readme_md), str(integration_readme_md),
str(script_readme_md)}
@patch('demisto_sdk.commands.split.ymlsplitter.get_python_version')
@patch('demisto_sdk.commands.init.contribution_converter.get_content_path')
def test_convert_contribution_zip_with_args(get_content_path_mock, get_python_version_mock, tmp_path, mocker):
"""Convert a contribution zip to a pack and test that the converted pack's 'pack_metadata.json' is correct
Args:
get_content_path_mock (MagicMock): Patch of the 'get_content_path' function to return the fake repo directory
used in the test
get_python_version_mock (MagicMock): Patch of the 'get_python_version' function to return the "3.7"
tmp_path (fixture): Temporary Path used for the unit test and cleaned up afterwards
Scenario: Simulate converting a contribution zip file
Given
- A contribution zip file
When
- The contrib_converter class instance is instantiated with the 'name' argument of 'Test Pack'
- The contrib_converter class instance is instantiated with the 'description' argument
of 'test pack description here'
- The contrib_converter class instance is instantiated with the 'author' argument of 'Octocat Smith'
- The contrib_converter class instance is instantiated with the 'gh_user' argument of 'octocat'
Then
- Ensure pack with directory name of 'TestPack' is created
- Ensure that the pack's 'pack_metadata.json' file's 'name' field is 'Test Pack'
- Ensure that the pack's 'pack_metadata.json' file's 'description' field is 'test pack description here'
- Ensure that the pack's 'pack_metadata.json' file's 'author' field is 'Octocat Smith'
- Ensure that the pack's 'pack_metadata.json' file's 'githubUser' field a list containing only 'octocat'
- Ensure that the pack's 'pack_metadata.json' file's 'email' field is the empty string
"""
mocker.patch.object(GitUtil, '__init__', return_value=None)
mocker.patch.object(GitUtil, 'added_files', return_value=set())
mocker.patch.object(GitUtil, 'modified_files', return_value=set())
# Create all Necessary Temporary directories
# create temp directory for the repo
repo_dir = tmp_path / 'content_repo'
repo_dir.mkdir()
get_content_path_mock.return_value = repo_dir
get_python_version_mock.return_value = 3.7
# create temp target dir in which we will create all the TestSuite content items to use in the contribution zip and
# that will be deleted after
target_dir = repo_dir / 'target_dir'
target_dir.mkdir()
# create temp directory in which the contribution zip will reside
contribution_zip_dir = tmp_path / 'contrib_zip'
contribution_zip_dir.mkdir()
# Create fake content repo and contribution zip
repo = Repo(repo_dir)
contrib_zip = Contribution(target_dir, 'ContribTestPack', repo)
# contrib_zip.create_zip(contribution_zip_dir)
contrib_zip.create_zip(contribution_zip_dir)
# target_dir should have been deleted after creation of the zip file
assert not target_dir.exists()
name = 'Test Pack'
contribution_path = contrib_zip.created_zip_filepath
description = 'test pack description here'
author = 'Octocat Smith'
gh_user = 'octocat'
contrib_converter_inst = ContributionConverter(
name=name, contribution=contribution_path, description=description, author=author, gh_user=gh_user,
no_pipenv=True)
contrib_converter_inst.convert_contribution_to_pack()
converted_pack_path = repo_dir / 'Packs' / 'TestPack'
assert converted_pack_path.exists()
pack_metadata_path = converted_pack_path / 'pack_metadata.json'
assert pack_metadata_path.exists()
with open(pack_metadata_path, 'r') as pack_metadata:
metadata = json.load(pack_metadata)
assert metadata.get('name', '') == name
assert metadata.get('description', '') == description
assert metadata.get('author', '') == author
assert metadata.get('githubUser', []) == [gh_user]
assert metadata.get('marketplaces', []) == ['xsoar', 'marketplacev2']
assert not metadata.get('email')
@pytest.mark.parametrize('input_name,expected_output_name', name_reformatting_test_examples)
def test_format_pack_dir_name(contrib_converter, input_name, expected_output_name):
'''Test the 'format_pack_dir_name' method with various inputs
Args:
contrib_converter (fixture): An instance of the ContributionConverter class
input_name (str): A 'name' argument value to test
expected_output_name (str): The value expected to be returned by passing 'input_name'
to the 'format_pack_dir_name' method
Scenario: Creating a new pack from a contribution zip file
Given
- A pack name
When
- The pack name is passed to the 'format_pack_dir_name' method
Then
- Ensure the reformatted pack name returned by the method matches the expected output
- Ensure the reformatted pack name returned by the method contains only valid characters
(alphanumeric, underscore, and dash with no whitespace)
'''
output_name = contrib_converter.format_pack_dir_name(input_name)
assert output_name == expected_output_name
assert not re.search(
r'\s', output_name), 'Whitespace was found in the returned value from executing "format_pack_dir_name"'
err_msg = 'Characters other than alphanumeric, underscore, and dash were found in the output'
assert all([char.isalnum() or char in {'_', '-'} for char in output_name]), err_msg
if len(output_name) > 1:
first_char = output_name[0]
if first_char.isalpha():
assert first_char.isupper(), 'The output\'s first character should be capitalized'
assert not output_name.startswith(('-', '_')), 'The output\'s first character must be alphanumeric'
assert not output_name.endswith(('-', '_')), 'The output\'s last character must be alphanumeric'
def test_convert_contribution_dir_to_pack_contents(tmp_path):
"""
Scenario: convert a directory which was unarchived from a contribution zip into the content
pack directory into which the contribution is intended to update, and the contribution
includes a file that already exists in the pack
Given
- The pack's original content contains incident field files and appears like so
├── IncidentFields
│ └── incidentfield-SomeIncidentField.json
When
- After the contribution zip files have been unarchived to the destination pack the pack
directory tree appears like so
├── IncidentFields
│ └── incidentfield-SomeIncidentField.json
├── incidentfield
│ └── incidentfield-SomeIncidentField.json
Then
- Ensure the file '.../incidentfield/incidentfield-SomeIncidentField.json' is moved to
'.../IncidentFields/incidentfield-SomeIncidentField.json' and overwrites the existing file
"""
fake_pack_subdir = tmp_path / 'IncidentFields'
fake_pack_subdir.mkdir()
extant_file = fake_pack_subdir / 'incidentfield-SomeIncidentField.json'
old_json = {"field": "old_value"}
extant_file.write_text(json.dumps(old_json))
fake_pack_extracted_dir = tmp_path / 'incidentfield'
fake_pack_extracted_dir.mkdir()
update_file = fake_pack_extracted_dir / 'incidentfield-SomeIncidentField.json'
new_json = {"field": "new_value"}
update_file.write_text(json.dumps(new_json))
cc = ContributionConverter()
cc.pack_dir_path = tmp_path
cc.convert_contribution_dir_to_pack_contents(fake_pack_extracted_dir)
assert json.loads(extant_file.read_text()) == new_json
assert not fake_pack_extracted_dir.exists()
@pytest.mark.parametrize('contribution_converter', ['TestPack'], indirect=True)
class TestEnsureUniquePackDirName:
def test_ensure_unique_pack_dir_name_no_conflict(self, contribution_converter):
"""Test the 'ensure_unique_pack_dir_name' method
Args:
contribution_converter (fixture): An instance of the ContributionConverter class
Scenario: Creating a new pack from a contribution zip file
Given
- A pack's directory name
When
- The pack's proposed directory name is passed to the 'ensure_unique_pack_dir_name' method
- There does not already exist a pack directory with the proposed name
Then
- Ensure the pack directory name returned by the method matches the expected output - should be unchanged
"""
pack_name = 'TestPack'
crb_crvrt = contribution_converter
assert crb_crvrt.name == pack_name
assert crb_crvrt.dir_name == pack_name
print(f'crb_crvrt.pack_dir_path={crb_crvrt.pack_dir_path}')
assert os.path.isdir(crb_crvrt.pack_dir_path)
def test_ensure_unique_pack_dir_name_with_conflict(self, contribution_converter):
"""Test the 'ensure_unique_pack_dir_name' method
Args:
contribution_converter (fixture): An instance of the ContributionConverter class
Scenario: Creating a new pack from a contribution zip file
Given
- A pack's directory name
When
- The pack's proposed directory name is passed to the 'ensure_unique_pack_dir_name' method
- There already exists a pack directory with the proposed name
Then
- Ensure the pack directory name returned by the method matches the expected output, which is that a
version number should have been added
"""
pack_name = 'TestPack'
crb_crvrt = contribution_converter
assert crb_crvrt.name == pack_name
assert crb_crvrt.dir_name == pack_name
assert os.path.isdir(crb_crvrt.pack_dir_path)
new_pack_dir_name = crb_crvrt.ensure_unique_pack_dir_name(pack_name)
assert new_pack_dir_name != pack_name
assert new_pack_dir_name == pack_name + 'V2'
def mock_format_manager(*args):
return args
@pytest.mark.parametrize('new_pack', [True, False])
def test_format_converted_pack(self, contribution_converter, mocker, new_pack):
"""Test the 'format_converted_pack' method
Args:
contribution_converter (fixture): An instance of the ContributionConverter class
Scenario: Formatting the added/modified files by including the untracked files in a non-interactive mode
Given
- ContributionConverter class
When
- Running the format_converted_pack method to format the files
Then
- Ensure that we format the untracked files as well and the interactive flag is set to false
"""
contribution_converter.create_new = new_pack
result = mocker.patch('demisto_sdk.commands.init.contribution_converter.format_manager',
side_efect=self.mock_format_manager())
contribution_converter.format_converted_pack()
assert result.call_args[1].get('include_untracked')
assert result.call_args[1].get('interactive') is False
def test_ensure_unique_pack_dir_name_with_conflict_and_version_suffix(self, contribution_converter):
"""Test the 'ensure_unique_pack_dir_name' method
Args:
contribution_converter (fixture): An instance of the ContributionConverter class
Scenario: Creating a new pack from a contribution zip file
Given
- A pack's directory name
When
- The pack's proposed directory name is passed to the 'ensure_unique_pack_dir_name' method
- There already exists a pack directory with the proposed name
- The proposed name ends with a version suffix, e.g. 'V2'
Then
- Ensure the pack directory name returned by the method matches the expected output, which is that the
version number should have been incremented
"""
pack_name = 'TestPack'
crb_crvrt = contribution_converter
assert crb_crvrt.name == pack_name
assert crb_crvrt.dir_name == pack_name
assert os.path.isdir(crb_crvrt.pack_dir_path)
new_pack_dir_name = crb_crvrt.ensure_unique_pack_dir_name(pack_name)
assert new_pack_dir_name != pack_name
assert new_pack_dir_name == pack_name + 'V2'
os.makedirs(os.path.join(crb_crvrt.packs_dir_path, new_pack_dir_name))
incremented_new_pack_dir_name = crb_crvrt.ensure_unique_pack_dir_name(new_pack_dir_name)
assert incremented_new_pack_dir_name == pack_name + 'V3'
class TestReleaseNotes:
@pytest.fixture(autouse=True)
def rn_file_copy(self):
yield shutil.copyfile(SOURCE_RELEASE_NOTES_FILE, RELEASE_NOTES_COPY)
if os.path.exists(RELEASE_NOTES_COPY):
os.remove(RELEASE_NOTES_COPY)
def test_replace_RN_template_with_value(self, mocker, contrib_converter, rn_file_copy):
"""Test the 'replace_RN_template_with_value' method
Scenario:
Adding the user's release note text to the rn file that was generated by the UpdateRN class.
Detected content item has less object than git detected.
Given
- A pack's release note file path
When
- The contribution was made to an existing pack.
Then
- Ensure the RN file template text was modified with the user's input
"""
contrib_converter.release_notes = "#### Integrations\n##### CrowdStrikeMalquery\n- release note entry number " \
"#1\n- release note entry number #2\n\n#### Playbooks\n##### " \
"CrowdStrikeMalquery - Multidownload and Fetch\n- changed this playbook\n- " \
"Updated another thing\n\n"
contrib_converter.detected_content_items = [
{
"id": "CrowdStrikeMalquery_copy",
"name": "CrowdStrikeMalquery_copy",
"source_id": "CrowdStrikeMalquery",
"source_name": "CrowdStrikeMalquery",
"source_file_name": "Packs/CrowdStrikeMalquery/Integrations/CrowdStrikeMalquery/CrowdStrikeMalquery.yml"
}
]
mocker.patch.object(UpdateRN, 'get_display_name', return_value='CrowdStrike Malquery')
contrib_converter.replace_RN_template_with_value(RELEASE_NOTES_COPY)
assert util_open_file(RELEASE_NOTES_COPY) == util_open_file(EXPECTED_RELEASE_NOTES)
assert True
def test_format_user_input(self, mocker, contrib_converter, rn_file_copy):
"""Test the 'format_user_input' method
Given
- A pack's release note file path
When
- The contribution was made to an existing pack.
Then
- Ensure the dictionary being built contains the relevant data with the content item display name if exists.
"""
contrib_converter.release_notes = "#### Integrations\n##### CrowdStrikeMalquery\n- release note entry number " \
"#1\n- release note entry number #2\n\n#### Playbooks\n##### " \
"CrowdStrikeMalquery - Multidownload and Fetch\n- changed this playbook\n- " \
"Updated another thing\n\n"
contrib_converter.detected_content_items = [
{"id": "a8026480-a286-46c7-8c44-b5161a37009d",
"name": "CrowdStrikeMalquery - Multidownload and Fetch_copy",
"source_id": "CrowdStrikeMalquery - Multidownload and Fetch",
"source_name": "CrowdStrikeMalquery - Multidownload and Fetch",
"source_file_name": "Packs/CrowdStrikeMalquery/Playbooks/CrowdStrikeMalquery_-_GenericPolling_"
"-_Multidownload_and_Fetch.yml"},
{"id": "CrowdStrikeMalquery_copy",
"name": "CrowdStrikeMalquery_copy",
"source_id": "CrowdStrikeMalquery",
"source_name": "CrowdStrikeMalquery",
"source_file_name": "Packs/CrowdStrikeMalquery/Integrations/CrowdStrikeMalquery/CrowdStrikeMalquery.yml"}]
expected_rn_per_content_item = {'CrowdStrike Malquery':
'- release note entry number #1\n- release note entry number #2\n',
'CrowdStrikeMalquery - Multidownload and Fetch':
'- changed this playbook\n- Updated another thing\n'}
mocker.patch.object(
UpdateRN, 'get_display_name',
side_effect=['CrowdStrike Malquery', 'CrowdStrikeMalquery - Multidownload and Fetch'])
rn_per_content_item = contrib_converter.format_user_input()
assert expected_rn_per_content_item == rn_per_content_item
|
from .main import cli
if __name__ == '__main__':
cli()
|
#!/usr/bin/python
#
# Copyright 2018-2021 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest import TestCase
from polyaxon import types
from polycommon.conf.exceptions import ConfException
from polycommon.conf.handlers.env_handler import EnvConfHandler
from polycommon.options.option import Option, OptionScope, OptionStores
class DummyEnvOption(Option):
key = "FOO_BAR1"
scope = OptionScope.GLOBAL
is_secret = False
is_optional = True
is_list = False
store = OptionStores.ENV
typing = types.INT
default = None
options = None
class DummyOptionalDefaultEnvOption(Option):
key = "FOO_BAR2"
scope = OptionScope.GLOBAL
is_secret = False
is_optional = True
is_list = False
store = OptionStores.ENV
typing = types.STR
default = "default_env"
options = None
class DummyNonOptionalEnvOption(Option):
key = "FOO_BAR3"
scope = OptionScope.GLOBAL
is_secret = False
is_optional = False
is_list = False
store = OptionStores.ENV
typing = types.INT
default = None
options = None
class DummySecretEnvOption(Option):
key = "FOO_BAR4"
scope = OptionScope.GLOBAL
is_secret = True
is_optional = False
is_list = False
store = OptionStores.ENV
typing = types.INT
default = None
options = None
class TestClusterOptionsHandler(TestCase):
def setUp(self):
super().setUp()
self.env_options_handler = EnvConfHandler()
def test_get_default_value(self):
assert self.env_options_handler.get(DummyEnvOption) is None
assert (
self.env_options_handler.get(DummyOptionalDefaultEnvOption) == "default_env"
)
with self.assertRaises(ConfException):
self.env_options_handler.get(DummyNonOptionalEnvOption)
with self.assertRaises(ConfException):
self.env_options_handler.get(DummySecretEnvOption)
def test_set_get_delete_value(self):
self.env_options_handler.set(DummyEnvOption, 123)
self.env_options_handler.set(DummyOptionalDefaultEnvOption, 123)
self.env_options_handler.set(DummyNonOptionalEnvOption, 123)
self.env_options_handler.set(DummySecretEnvOption, 123)
assert self.env_options_handler.get(DummyEnvOption) == 123
assert self.env_options_handler.get(DummyOptionalDefaultEnvOption) == "123"
assert self.env_options_handler.get(DummyNonOptionalEnvOption) == 123
assert self.env_options_handler.get(DummySecretEnvOption) == 123
self.env_options_handler.delete(DummyEnvOption)
self.env_options_handler.delete(DummyOptionalDefaultEnvOption)
self.env_options_handler.delete(DummyNonOptionalEnvOption)
self.env_options_handler.delete(DummySecretEnvOption)
assert self.env_options_handler.get(DummyEnvOption) is None
assert (
self.env_options_handler.get(DummyOptionalDefaultEnvOption) == "default_env"
)
with self.assertRaises(ConfException):
self.env_options_handler.get(DummyNonOptionalEnvOption)
with self.assertRaises(ConfException):
self.env_options_handler.get(DummySecretEnvOption)
|
import vistrails.core.db.action
import vistrails.db.versions
import vistrails.core.modules.module_registry
import vistrails.core.modules.utils
from vistrails.core.modules.vistrails_module import Module, ModuleError, \
ModuleConnector, InvalidOutput
from vistrails.core.modules.basic_modules import NotCacheable, Constant
from vistrails.core.vistrail.pipeline import Pipeline
from vistrails.core.vistrail.annotation import Annotation
from vistrails.core.vistrail.group import Group
from vistrails.core.vistrail.module_function import ModuleFunction
from vistrails.core.vistrail.module_param import ModuleParam
from vistrails.core.vistrail.vistrail import Vistrail
from vistrails.core.db.locator import XMLFileLocator
from vistrails.core.vistrail.controller import VistrailController
from vistrails.core.interpreter.default import get_default_interpreter
from vistrails.core.db.io import serialize, unserialize
from vistrails.core.log.module_exec import ModuleExec
from vistrails.core.log.group_exec import GroupExec
from vistrails.core.log.machine import Machine
from vistrails.core.utils import xor, long2bytes
from vistrails.db.domain import IdScope
from base64 import b16encode, b16decode
import copy
import inspect
from itertools import izip
import os
import re
import sys
import tempfile
from IPython.parallel.error import CompositeError
from .api import get_client
try:
import hashlib
sha1_hash = hashlib.sha1
except ImportError:
import sha
sha1_hash = sha.new
###############################################################################
# This function is sent to the engines which execute it
#
# It receives the workflow, and the list of targeted output ports
#
# It returns the corresponding computed outputs and the execution log
#
def execute_wf(wf, output_port):
# Save the workflow in a temporary file
temp_wf_fd, temp_wf = tempfile.mkstemp()
try:
f = open(temp_wf, 'w')
f.write(wf)
f.close()
os.close(temp_wf_fd)
# Clean the cache
interpreter = get_default_interpreter()
interpreter.flush()
# Load the Pipeline from the temporary file
vistrail = Vistrail()
locator = XMLFileLocator(temp_wf)
workflow = locator.load(Pipeline)
# Build a Vistrail from this single Pipeline
action_list = []
for module in workflow.module_list:
action_list.append(('add', module))
for connection in workflow.connection_list:
action_list.append(('add', connection))
action = vistrails.core.db.action.create_action(action_list)
vistrail.add_action(action, 0L)
vistrail.update_id_scope()
tag = 'parallel flow'
vistrail.addTag(tag, action.id)
# Build a controller and execute
controller = VistrailController()
controller.set_vistrail(vistrail, None)
controller.change_selected_version(vistrail.get_version_number(tag))
execution = controller.execute_current_workflow(
custom_aliases=None,
custom_params=None,
extra_info=None,
reason='API Pipeline Execution')
# Build a list of errors
errors = []
pipeline = vistrail.getPipeline(tag)
execution_errors = execution[0][0].errors
if execution_errors:
for key in execution_errors:
module = pipeline.modules[key]
msg = '%s: %s' %(module.name, execution_errors[key])
errors.append(msg)
# Get the execution log from the controller
try:
module_log = controller.log.workflow_execs[0].item_execs[0]
except IndexError:
errors.append("Module log not found")
return dict(errors=errors)
else:
machine = controller.log.workflow_execs[0].machines[
module_log.machine_id]
xml_log = serialize(module_log)
machine_log = serialize(machine)
# Get the output value
output = None
serializable = None
if not execution_errors:
executed_module, = execution[0][0].executed
executed_module = execution[0][0].objects[executed_module]
try:
output = executed_module.get_output(output_port)
except ModuleError:
errors.append("Output port not found: %s" % output_port)
return dict(errors=errors)
reg = vistrails.core.modules.module_registry.get_module_registry()
base_classes = inspect.getmro(type(output))
if Module in base_classes:
serializable = reg.get_descriptor(type(output)).sigstring
output = output.serialize()
# Return the dictionary, that will be sent back to the client
return dict(errors=errors,
output=output,
serializable=serializable,
xml_log=xml_log,
machine_log=machine_log)
finally:
os.unlink(temp_wf)
###############################################################################
_ansi_code = re.compile(r'%s(?:(?:\[[^A-Za-z]*[A-Za-z])|[^\[])' % '\x1B')
def strip_ansi_codes(s):
return _ansi_code.sub('', s)
###############################################################################
# Map Operator
#
class Map(Module):
"""The Map Module executes a map operator in parallel on IPython engines.
The FunctionPort should be connected to the 'self' output of the module you
want to execute.
The InputList is the list of values to be scattered on the engines.
"""
def __init__(self):
Module.__init__(self)
def update_upstream(self):
"""A modified version of the update_upstream method."""
# everything is the same except that we don't update anything
# upstream of FunctionPort
for port_name, connector_list in self.inputPorts.iteritems():
if port_name == 'FunctionPort':
for connector in connector_list:
connector.obj.update_upstream()
else:
for connector in connector_list:
connector.obj.update()
for port_name, connectorList in copy.copy(self.inputPorts.items()):
if port_name != 'FunctionPort':
for connector in connectorList:
if connector.obj.get_output(connector.port) is \
InvalidOutput:
self.remove_input_connector(port_name, connector)
@staticmethod
def print_compositeerror(e):
sys.stderr.write("Got %d exceptions from IPython engines:\n" %
len(e.elist))
for e_type, e_msg, formatted_tb, infos in e.elist:
sys.stderr.write("Error from engine %d (%r):\n" % (
infos['engine_id'], infos['engine_uuid']))
sys.stderr.write("%s\n" % strip_ansi_codes(formatted_tb))
@staticmethod
def list_exceptions(e):
return '\n'.join(
"% 3d: %s: %s" % (infos['engine_id'],
e_type,
e_msg)
for e_type, e_msg, tb, infos in e.elist)
def updateFunctionPort(self):
"""
Function to be used inside the updateUsptream method of the Map module. It
updates the module connected to the FunctionPort port, executing it in
parallel.
"""
nameInput = self.get_input('InputPort')
nameOutput = self.get_input('OutputPort')
rawInputList = self.get_input('InputList')
# Create inputList to always have iterable elements
# to simplify code
if len(nameInput) == 1:
element_is_iter = False
inputList = [[element] for element in rawInputList]
else:
element_is_iter = True
inputList = rawInputList
workflows = []
module = None
vtType = None
# iterating through the connectors
for connector in self.inputPorts.get('FunctionPort'):
module = connector.obj
# pipeline
original_pipeline = connector.obj.moduleInfo['pipeline']
# module
module_id = connector.obj.moduleInfo['moduleId']
vtType = original_pipeline.modules[module_id].vtType
# serialize the module for each value in the list
for i, element in enumerate(inputList):
if element_is_iter:
self.element = element
else:
self.element = element[0]
# checking type and setting input in the module
self.typeChecking(connector.obj, nameInput, inputList)
self.setInputValues(connector.obj, nameInput, element, i)
pipeline_db_module = original_pipeline.modules[module_id].do_copy()
# transforming a subworkflow in a group
# TODO: should we also transform inner subworkflows?
if pipeline_db_module.is_abstraction():
group = Group(id=pipeline_db_module.id,
cache=pipeline_db_module.cache,
location=pipeline_db_module.location,
functions=pipeline_db_module.functions,
annotations=pipeline_db_module.annotations)
source_port_specs = pipeline_db_module.sourcePorts()
dest_port_specs = pipeline_db_module.destinationPorts()
for source_port_spec in source_port_specs:
group.add_port_spec(source_port_spec)
for dest_port_spec in dest_port_specs:
group.add_port_spec(dest_port_spec)
group.pipeline = pipeline_db_module.pipeline
pipeline_db_module = group
# getting highest id between functions to guarantee unique ids
# TODO: can get current IdScope here?
if pipeline_db_module.functions:
high_id = max(function.db_id
for function in pipeline_db_module.functions)
else:
high_id = 0
# adding function and parameter to module in pipeline
# TODO: 'pos' should not be always 0 here
id_scope = IdScope(beginId=long(high_id+1))
for elementValue, inputPort in izip(element, nameInput):
p_spec = pipeline_db_module.get_port_spec(inputPort, 'input')
descrs = p_spec.descriptors()
if len(descrs) != 1:
raise ModuleError(
self,
"Tuple input ports are not supported")
if not issubclass(descrs[0].module, Constant):
raise ModuleError(
self,
"Module inputs should be Constant types")
type = p_spec.sigstring[1:-1]
mod_function = ModuleFunction(id=id_scope.getNewId(ModuleFunction.vtType),
pos=0,
name=inputPort)
mod_param = ModuleParam(id=0L,
pos=0,
type=type,
val=elementValue)
mod_function.add_parameter(mod_param)
pipeline_db_module.add_function(mod_function)
# serializing module
wf = self.serialize_module(pipeline_db_module)
workflows.append(wf)
# getting first connector, ignoring the rest
break
# IPython stuff
try:
rc = get_client()
except Exception, error:
raise ModuleError(self, "Exception while loading IPython: "
"%s" % error)
if rc is None:
raise ModuleError(self, "Couldn't get an IPython connection")
engines = rc.ids
if not engines:
raise ModuleError(
self,
"Exception while loading IPython: No IPython engines "
"detected!")
# initializes each engine
# importing modules and initializing the VisTrails application
# in the engines *only* in the first execution on this engine
uninitialized = []
for eng in engines:
try:
rc[eng]['init']
except Exception:
uninitialized.append(eng)
if uninitialized:
init_view = rc[uninitialized]
with init_view.sync_imports():
import tempfile
import inspect
# VisTrails API
import vistrails
import vistrails.core
import vistrails.core.db.action
import vistrails.core.application
import vistrails.core.modules.module_registry
from vistrails.core.db.io import serialize
from vistrails.core.vistrail.vistrail import Vistrail
from vistrails.core.vistrail.pipeline import Pipeline
from vistrails.core.db.locator import XMLFileLocator
from vistrails.core.vistrail.controller import VistrailController
from vistrails.core.interpreter.default import get_default_interpreter
# initializing a VisTrails application
try:
init_view.execute(
'app = vistrails.core.application.init('
' {"spawned": True},'
' args=[])',
block=True)
except CompositeError, e:
self.print_compositeerror(e)
raise ModuleError(self, "Error initializing application on "
"IPython engines:\n"
"%s" % self.list_exceptions(e))
init_view['init'] = True
# setting computing color
module.logging.set_computing(module)
# executing function in engines
# each map returns a dictionary
try:
ldview = rc.load_balanced_view()
map_result = ldview.map_sync(execute_wf, workflows, [nameOutput]*len(workflows))
except CompositeError, e:
self.print_compositeerror(e)
raise ModuleError(self, "Error from IPython engines:\n"
"%s" % self.list_exceptions(e))
# verifying errors
errors = []
for engine in range(len(map_result)):
if map_result[engine]['errors']:
msg = "ModuleError in engine %d: '%s'" % (
engine,
', '.join(map_result[engine]['errors']))
errors.append(msg)
if errors:
raise ModuleError(self, '\n'.join(errors))
# setting success color
module.logging.signalSuccess(module)
import vistrails.core.modules.module_registry
reg = vistrails.core.modules.module_registry.get_module_registry()
self.result = []
for map_execution in map_result:
serializable = map_execution['serializable']
output = None
if not serializable:
output = map_execution['output']
else:
d_tuple = vistrails.core.modules.utils.parse_descriptor_string(serializable)
d = reg.get_descriptor_by_name(*d_tuple)
module_klass = d.module
output = module_klass().deserialize(map_execution['output'])
self.result.append(output)
# including execution logs
for engine in range(len(map_result)):
log = map_result[engine]['xml_log']
exec_ = None
if (vtType == 'abstraction') or (vtType == 'group'):
exec_ = unserialize(log, GroupExec)
elif (vtType == 'module'):
exec_ = unserialize(log, ModuleExec)
else:
# something is wrong...
continue
# assigning new ids to existing annotations
exec_annotations = exec_.annotations
for i in range(len(exec_annotations)):
exec_annotations[i].id = self.logging.log.log.id_scope.getNewId(Annotation.vtType)
parallel_annotation = Annotation(key='parallel_execution', value=True)
parallel_annotation.id = self.logging.log.log.id_scope.getNewId(Annotation.vtType)
annotations = [parallel_annotation] + exec_annotations
exec_.annotations = annotations
# before adding the execution log, we need to get the machine information
machine = unserialize(map_result[engine]['machine_log'], Machine)
machine_id = self.logging.add_machine(machine)
# recursively add machine information to execution items
def add_machine_recursive(exec_):
for item in exec_.item_execs:
if hasattr(item, 'machine_id'):
item.machine_id = machine_id
if item.vtType in ('abstraction', 'group'):
add_machine_recursive(item)
exec_.machine_id = machine_id
if (vtType == 'abstraction') or (vtType == 'group'):
add_machine_recursive(exec_)
self.logging.add_exec(exec_)
def serialize_module(self, module):
"""
Serializes a module to be executed in parallel.
"""
def process_group(group):
group.pipeline.id = None
for module in group.pipeline.module_list:
if module.is_group():
process_group(module)
pipeline = Pipeline(version=vistrails.db.versions.currentVersion)
if module.is_group():
process_group(module)
module = module.do_copy()
pipeline.add_module(module)
return serialize(pipeline)
def compute(self):
"""The compute method for Map."""
self.result = None
self.updateFunctionPort()
self.set_output('Result', self.result)
###############################################################################
class NewConstant(Constant):
"""
A new Constant module to be used inside the Map module.
"""
def setValue(self, v):
self.set_output("value", v)
self.upToDate = True
def create_constant(value):
"""
Creates a NewConstant module, to be used for the ModuleConnector.
"""
constant = NewConstant()
constant.setValue(value)
return constant
def get_module(value, signature):
"""
Creates a module for value, in order to do the type checking.
"""
from vistrails.core.modules.basic_modules import Boolean, String, Integer, Float, List
if isinstance(value, Constant):
return type(value)
elif isinstance(value, bool):
return Boolean
elif isinstance(value, str):
return String
elif isinstance(value, int):
return Integer
elif isinstance(value, float):
return Float
elif isinstance(value, list):
return List
elif isinstance(value, tuple):
v_modules = ()
for element in xrange(len(value)):
v_modules += (get_module(value[element], signature[element]))
return v_modules
else:
from vistrails.core import debug
debug.warning("Could not identify the type of the list element.")
debug.warning("Type checking is not going to be done inside Map module.")
return None
|
from django.test import TestCase
from django.contrib.auth import get_user_model
class ModelTests(TestCase):
def test_create_user_with_email_successful(self):
"""Test creating a new user with an email is successful"""
email = 'test@netholdings.com.au'
password = 'good4now'
user = get_user_model().objects.create_user(
email=email,
password=password
)
self.assertEqual(user.email, email)
self.assertTrue(user.check_password(password))
def test_new_user_email_normalised(self):
"""Test the email for a new user normalised"""
email = 'test@LONDONAPPDEV.COM'
user = get_user_model().objects.create_user(email, 'test123')
self.assertEqual(user.email, email.lower())
def test_new_user_invalid_email(self):
"""Test creating user with no email raises error"""
with self.assertRaises(ValueError):
get_user_model().objects.create_user(None, 'test123')
def test_create_new_superuser(self):
"""Test creating a new superuser"""
user = get_user_model().objects.create_superuser(
'test@netholdings.com.au',
'password123'
)
self.assertTrue(user.is_superuser)
self.assertTrue(user.is_staff)
|
from django.contrib.auth.models import User, Group
from rest_framework import serializers
from events.models import EventPage, EventSignup
class DiscordUserSerialiser(serializers.Serializer):
discord_user = serializers.CharField(read_only=True)
def create(self, validated_data):
pass
def update(self, instance, validated_data):
pass
class UserSerialiser(serializers.ModelSerializer):
class Meta:
model = User
fields = ['username', 'first_name', 'last_name', 'email']
class GroupSerialiser(serializers.ModelSerializer):
class Meta:
model = Group
fields = ['name']
class UserGroupSerialiser(serializers.ModelSerializer):
groups = GroupSerialiser(many=True)
class Meta:
model = User
fields = ['groups', 'is_staff', 'is_superuser']
class RolesProfileSerialiser(serializers.Serializer):
nickname = serializers.CharField(read_only=True)
user = UserGroupSerialiser()
def create(self, validated_data):
pass
def update(self, instance, validated_data):
pass
class ProfileSerialiser(serializers.Serializer):
nickname = serializers.CharField(read_only=True)
user = UserSerialiser()
def create(self, validated_data):
pass
def update(self, instance, validated_data):
pass
class EventSignupSerialiser(serializers.ModelSerializer):
class Meta:
model = EventSignup
fields = ('event', 'member', 'signup_created', 'comment')
class EventSerialiser(serializers.ModelSerializer):
class Meta:
model = EventPage
fields = (
'title', 'id', 'location', 'start', 'finish', 'description', 'cancelled', 'signup_limit', 'signup_open',
'signup_close', 'signup_freshers_open', 'signup_count')
|
import itertools
import random
import networkx as nx
import sys
import pandas as pd
sys.setrecursionlimit(2000)
def recursion(a, b, n, m):
if n==0:
return m
if m==0:
return n
if a[n-1] == b[m-1]:
return recursion(a, b, n-1, m-1)
return 1 + min([
recursion(a, b, n-1, m),
recursion(a, b, n, m-1),
recursion(a, b, n-1, m-1)
])
def EditDistance(str1, str2):
len1 = len(str1)
len2 = len(str2)
return recursion(str1, str2, len1, len2)
def printRecursive(str1, str2, path, i, j):
if i==0 and j==0:
return ('', '')
if path[i][j] == 1:
ans = printRecursive(str1, str2, path, i-1, j-1)
return (ans[0] + str1[i-1], ans[1] + str2[j-1])
elif path[i][j] == 2:
ans = printRecursive(str1, str2, path, i-1, j)
return (ans[0] + str1[i-1], ans[1] + '-')
elif path[i][j] == 3:
ans = printRecursive(str1, str2, path, i, j-1)
return (ans[0] + '-', ans[1] + str2[j-1])
else:
return ('', '')
def main(infile, outfile):
# Read the input, but do something non-trivial instead of count the lines in the file
inp = [line.rstrip('\n') for line in infile]
print(inp)
output = EditDistance(inp[0], inp[1])
output = str(output)
print(output)
# output = str(output)
# # For debugging, print something to console
# print(output)
# Write the output.
outfile.write(output)
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import datetime
import collections
import logging
from django.conf import settings
from ralph.util import plugin
from ralph_pricing.models import UsageType, Venture, DailyUsage
from ralph_pricing.openstack import OpenStack
logger = logging.getLogger(__name__)
def set_usages(venture_symbol, data, date):
try:
venture = Venture.objects.get(symbol=venture_symbol)
except Venture.DoesNotExist:
logger.error('Venture: %s does not exist' % venture_symbol)
return
def set_usage(name, key, venture, multiplier):
if key not in data:
return
usage_type, created = UsageType.objects.get_or_create(name=name)
usage, created = DailyUsage.objects.get_or_create(
date=date,
type=usage_type,
pricing_venture=venture,
)
usage.value = data[key] / multiplier
usage.save()
if venture:
set_usage(
'OpenStack 10000 Memory GiB Hours',
'total_memory_mb_usage',
venture,
1024,
)
set_usage(
'OpenStack 10000 CPU Hours',
'total_vcpus_usage',
venture,
1,
)
set_usage(
'OpenStack 10000 Disk GiB Hours',
'total_local_gb_usage',
venture,
1,
)
set_usage(
'OpenStack 10000 Volume GiB Hours',
'total_volume_gb_usage',
venture,
1,
)
set_usage(
'OpenStack 10000 Images GiB Hours',
'total_images_gb_usage',
venture,
1,
)
@plugin.register(chain='pricing', requires=['ventures'])
def openstack(**kwargs):
"""Updates OpenStack usage per Venture"""
if settings.OPENSTACK_URL is None:
return False, 'Not configured.', kwargs
tenants = collections.defaultdict(lambda: collections.defaultdict(dict))
date = kwargs['today']
end = date
start = end - datetime.timedelta(days=1)
ventures = {}
for region in getattr(settings, 'OPENSTACK_REGIONS', ['']):
stack = OpenStack(
settings.OPENSTACK_URL,
settings.OPENSTACK_USER,
settings.OPENSTACK_PASS,
region=region,
)
ventures.update(stack.get_ventures())
for data in stack.simple_tenant_usage(start, end):
tenants[data['tenant_id']][region].update(data)
for url, query in getattr(settings, 'OPENSTACK_EXTRA_QUERIES', []):
for data in stack.query(
query,
url=url,
start=start.strftime('%Y-%m-%dT%H:%M:%S'),
end=end.strftime('%Y-%m-%dT%H:%M:%S'),
):
tenants[data['tenant_id']][url].update(data)
for tenant_id, regions in tenants.iteritems():
for region, data in regions.iteritems():
venture_symbol = ventures.get(data['tenant_id'])
if venture_symbol:
set_usages(venture_symbol, data, date)
return True, 'Openstack usages were saved', kwargs
|
import os
import glob
import torch
import pickle
from os import listdir
from os.path import isfile, join
import numpy as np
import librosa
import librosa.display
import matplotlib.pyplot as plt
from scipy.fftpack import dct
from torch.utils.data import random_split, Dataset, DataLoader
use_cuda = torch.cuda.is_available()
device = torch.device("cuda:0" if use_cuda else "cpu")
def display_spectrogram(spectrogram):
librosa.display.specshow(spectrogram.transpose(), hop_length=220.5,y_axis='mel', fmax=8000, x_axis='s')
#getting 7 second in time axis, it should be 3, why???
plt.title('Mel Spectrogram')
plt.colorbar(format='%+2.0f dB')
plt.show()
def logmel_filterbanks(filename,pre_emphasis=0.97,frame_size = 0.025,frame_stride = 0.01,nfilt=40,normalize=True):
target_len = 66150
signal, sample_rate = librosa.load(filename,duration=3)
while(signal.shape[0] != target_len):
signal = np.append(signal, signal[:target_len - signal.shape[0]])
#Pre-Emphasis step
emphasized_signal = np.empty(shape=len(signal)+1)
emphasized_signal = np.append(signal[0], signal[1:] - pre_emphasis * signal[:-1])
#Framing
frame_length, frame_step = frame_size * sample_rate, frame_stride * sample_rate # Convert from seconds to samples
signal_length = len(emphasized_signal)
frame_length = int(round(frame_length))
frame_step = int(round(frame_step))
num_frames = int(np.ceil(float(np.abs(signal_length - frame_length)) / frame_step)) + 1 # Make sure that we have at least 1 frame
pad_signal_length = num_frames * frame_step + frame_length
z = np.zeros((pad_signal_length - signal_length))
pad_signal = np.append(emphasized_signal, z) # Pad Signal to make sure that all frames have equal number of samples without truncating any samples from the original signal
indices = np.tile(np.arange(0, frame_length), (num_frames, 1)) + np.tile(np.arange(0, num_frames * frame_step, frame_step), (frame_length, 1)).T
frames = pad_signal[indices.astype(np.int32, copy=False)]
#Hamming-Window
frames *= np.hamming(frame_length)
#FFT
NFFT = 512
mag_frames = np.absolute(np.fft.rfft(frames, NFFT)) # Magnitude of the FFT
pow_frames = ((1.0 / NFFT) * ((mag_frames) ** 2))
#Filter-Bank
low_freq_mel = 0
high_freq_mel = (2595 * np.log10(1 + (sample_rate / 2) / 700)) # Convert Hz to Mel
mel_points = np.linspace(low_freq_mel, high_freq_mel, nfilt + 2) # Equally spaced in Mel scale
hz_points = (700 * (10**(mel_points / 2595) - 1)) # Convert Mel to Hz
bin = np.floor((NFFT + 1) * hz_points / sample_rate)
fbank = np.zeros((nfilt, int(np.floor(NFFT / 2 + 1))))
for m in range(1, nfilt + 1):
f_m_minus = int(bin[m - 1]) # left
f_m = int(bin[m]) # center
f_m_plus = int(bin[m + 1]) # right
for k in range(f_m_minus, f_m):
fbank[m - 1, k] = (k - bin[m - 1]) / (bin[m] - bin[m - 1])
for k in range(f_m, f_m_plus):
fbank[m - 1, k] = (bin[m + 1] - k) / (bin[m + 1] - bin[m])
filter_banks = np.dot(pow_frames, fbank.T)
filter_banks = np.where(filter_banks == 0, np.finfo(float).eps, filter_banks) # Numerical Stability
filter_banks = 20 * np.log10(filter_banks) # dB
if normalize==True:
filter_banks = (filter_banks - filter_banks.mean()) / (filter_banks.max() - filter_banks.min())
return filter_banks
def mfcc(filter_banks,num_ceps=13):
return dct(filter_banks, type=2, axis=1, norm='ortho')[:, 1 : (num_ceps + 1)]
dataset_dir = '/home/bbekci/datasets/vctk/wav48_silence_trimmed'
data = []
c2i, i2c = {}, {}
for indx, cla in enumerate(os.listdir(dataset_dir)):
main_path = dataset_dir + '/' + cla + '/*.flac'
for file_path in glob.glob(main_path):
data.append((file_path, cla))
c2i[cla] = indx
i2c[indx] = cla
with open('preprocessed_vctk.pkl', 'wb') as pickle_file:
result=[]
for i in range(0,len(data)):
sample = []
sound_path, class_name = data[i]
sound_data = logmel_filterbanks(sound_path)
label = c2i[class_name]
sample = [label, sound_data]
result.append((sample))
pickle.dump(result, pickle_file, protocol=pickle.HIGHEST_PROTOCOL)
f.close()
class PreprocessedDataset(Dataset):
def __init__(self, file_dir):
self.file_dir = file_dir
self.lst = 0
with open(file_dir, 'rb') as pickle_load:
self.lst = pickle.load(pickle_load)
def __len__(self):
return len(self.lst)
def n_class(self):
return self.lst[-1][0]
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
sound_data = self.lst[idx][1]
label = self.lst[idx][0]
sample = (sound_data, label)
return sample
dataset_dir = '/home/bbekci/inzpeech/preprocessed_vctk.pkl'
offset_dict = {}
max_epochs = 25
batch_size = 256
sound_data = PreprocessedDataset(file_dir=dataset_dir)
n_classes = sound_data.n_class()
train_data, test_data = random_split(sound_data,
[int(len(sound_data) * 0.8),
len(sound_data) - int(len(sound_data) * 0.8)]
)
train_dataset_loader = torch.utils.data.DataLoader(train_data,
batch_size=batch_size,
shuffle=True,
num_workers=4)
test_dataset_loader = torch.utils.data.DataLoader(test_data,
batch_size=batch_size,
shuffle=True,
num_workers=4)
|
# Copyright (c) 2020 FSMLP Authors. All Rights Reserved.
import argparse
import os, sys
import logging
import cv2
import numpy as np
import json
from PIL import Image, ImageDraw, ImageFont
import math
import unidecode
import onnxruntime
from paddle.fluid.core import PaddleTensor
from paddle.fluid.core import AnalysisConfig
from paddle.fluid.core import create_paddle_predictor
import multiprocessing
def initial_logger():
FORMAT = '%(asctime)s-%(levelname)s: %(message)s'
logging.basicConfig(level=logging.INFO, format=FORMAT)
logger = logging.getLogger(__name__)
return logger
logger = initial_logger()
def parse_args():
def str2bool(v):
return v.lower() in ("true", "t", "1")
parser = argparse.ArgumentParser()
# params for prediction engine
parser.add_argument("--use_gpu", type=str2bool, default=True)
parser.add_argument("--ir_optim", type=str2bool, default=True)
parser.add_argument("--use_tensorrt", type=str2bool, default=False)
parser.add_argument("--gpu_mem", type=int, default=8000)
parser.add_argument("--enable_mkldnn", type=str2bool, default=False)
parser.add_argument("--use_zero_copy_run", type=str2bool, default=False)
parser.add_argument("--use_pdserving", type=str2bool, default=False)
parser.add_argument("--print_to_excel", type=str2bool, default=False)
parser.add_argument("--checkbox_dir", default="", type=str)
# params for text detector
parser.add_argument("--image_dir", default="./input/", type=str)
parser.add_argument("--output_dir", default='./output/', type=str)
parser.add_argument("--det_algorithm", type=str, default='CRAFT')
parser.add_argument("--det_model_dir", type=str, default="./weights/multi_det_craft_infer")
parser.add_argument("--det_max_side_len", type=float, default=960)
parser.add_argument("--det_render", type=str2bool, default=True)
# CRAFT params
""" Data PreProcessing """
parser.add_argument(
'--canvas_size', default=1920,
type=int, help='image size for inference') #BH: 1280
parser.add_argument(
'--mag_ratio', default=1.5,
type=float, help='image magnification ratio')
""" Detection Model Specifications """
parser.add_argument(
'--text_threshold',
default=0.7, type=float, #BH: 0.5
help='text confidence threshold') #This threshold is not used in our case.
parser.add_argument(
'--low_text',
default=0.35, type=float,
help='text low-bound score') #0.003 was used with 0.2-tt and 0.1-lt #BH: 0.36
parser.add_argument(
'--link_threshold',
default=0.1, type=float, help='link confidence threshold')
parser.add_argument(
'--rotated_box',
type=str2bool, default=True,
help='use this to get rotated rectangles (bounding box)') # Currently not handling for rotated boxes
parser.add_argument(
'--is_dilate',
type=str2bool, default=False,
help='use this to specify x_dilation and y_dilation')
parser.add_argument('--x_dilate', default=3, type=int, help='left x-padding during post processing')
parser.add_argument('--y_dilate', default=3, type=int, help='up y-padding during post processing')
# params for text classifier
parser.add_argument("--use_box_orientation", type=str2bool, default=False)
parser.add_argument("--bor_model_dir", type=str, default="./weights/bor_infer/")
parser.add_argument("--bor_image_shape", type=str, default="3, 48, 192")
parser.add_argument("--label_list", type=list, default=['0', '180'])
parser.add_argument("--bor_batch_num", type=int, default=30)
parser.add_argument("--bor_thresh", type=float, default=0.9)
# params for text recognizer
parser.add_argument("--rec_algorithm", type=str, default='CRNN')
parser.add_argument("--rec_model_dir", type=str, default="./weights/multi_rec_crnn_infer/")
parser.add_argument("--rec_render", type=str2bool, default=True)
parser.add_argument("--rec_image_shape", type=str, default="3, 32, 320")
parser.add_argument("--rec_char_type", type=str, default='ch')
parser.add_argument("--rec_whitelist", type=str, default='') #whitelist characters while prediction
parser.add_argument("--rec_blacklist", type=str, default='') #blacklist characters while prediction
parser.add_argument("--rec_batch_num", type=int, default=6)
parser.add_argument("--max_text_length", type=int, default=25)
parser.add_argument(
"--rec_char_dict_path",
type=str,
default="./src/utils/phonemes/ppocr_keys_v1.txt")
parser.add_argument("--use_space_char", type=str2bool, default=True)
parser.add_argument(
"--vis_font_path", type=str, default="./src/utils/fonts/simfang.ttf")
# params for beam-search decoder in text recognizer
parser.add_argument("--use_beam_search", type=str2bool, default=False)
parser.add_argument("--beam_width", type=int, default=100)
parser.add_argument("--beam_lm_dir", type=str, default='')
parser.add_argument("--beam_alpha", type=float, default=0)
parser.add_argument("--beam_beta", type=float, default=0)
parser.add_argument("--beam_cutoff_top", type=int, default=40)
parser.add_argument("--beam_cutoff_prob", type=float, default=1.0)
# params for spell-checker in text recognizer
parser.add_argument("--use_spell_check", type=str2bool, default=False)
parser.add_argument("--spell_language", type=str, default="en")
parser.add_argument("--spell_case_sensitive", type=str2bool, default=False)
parser.add_argument("--spell_tokenizer", type=str, default='NLTK')
parser.add_argument("--spell_word_freq", type=str, default='')
parser.add_argument("--spell_text_corpus", type=str, default='')
# params for merging resulting values
parser.add_argument("--merge_boxes", type=str2bool, default=False) # BH: True
parser.add_argument("--merge_slope_thresh", type=float, default=0.1)
parser.add_argument("--merge_ycenter_thresh", type=float, default=0.5) # BH: 0.8
parser.add_argument("--merge_height_thresh", type=float, default=0.5) # BH: 0.6
parser.add_argument("--merge_width_thresh", type=float, default=1.0) # BH: 2.0
parser.add_argument("--merge_add_margin", type=float, default=0.05)
return parser.parse_args()
def create_predictor_onnx(args, mode):
if mode == 'det':
model_dir = args.det_model_dir
if model_dir is None:
logger.info("not find {} model file path {}".format(mode, model_dir))
sys.exit(0)
model_file_path = model_dir + "/model.onnx"
if not os.path.exists(model_file_path):
logger.info("not find model file path {}".format(model_file_path))
sys.exit(0)
session = onnxruntime.InferenceSession(model_file_path)
return session
def create_predictor(args, mode):
if mode == "det":
model_dir = args.det_model_dir
elif mode == 'bor':
model_dir = args.bor_model_dir
else:
model_dir = args.rec_model_dir
if model_dir is None:
logger.info("not find {} model file path {}".format(mode, model_dir))
sys.exit(0)
model_file_path = model_dir + "/model"
params_file_path = model_dir + "/params"
if not os.path.exists(model_file_path):
logger.info("not find model file path {}".format(model_file_path))
sys.exit(0)
if not os.path.exists(params_file_path):
logger.info("not find params file path {}".format(params_file_path))
sys.exit(0)
config = AnalysisConfig(model_file_path, params_file_path)
if args.use_gpu:
config.enable_use_gpu(args.gpu_mem, 0)
else:
config.disable_gpu()
config.set_cpu_math_library_num_threads(6)
if args.enable_mkldnn:
# cache 10 different shapes for mkldnn to avoid memory leak
config.set_mkldnn_cache_capacity(10)
config.enable_mkldnn()
# config.enable_memory_optim()
config.disable_glog_info()
if args.use_zero_copy_run:
config.delete_pass("conv_transpose_eltwiseadd_bn_fuse_pass")
config.switch_use_feed_fetch_ops(False)
else:
config.switch_use_feed_fetch_ops(True)
predictor = create_paddle_predictor(config)
input_names = predictor.get_input_names()
for name in input_names:
input_tensor = predictor.get_input_tensor(name)
output_names = predictor.get_output_names()
output_tensors = []
for output_name in output_names:
output_tensor = predictor.get_output_tensor(output_name)
output_tensors.append(output_tensor)
return predictor, input_tensor, output_tensors
def draw_text_det_res(dt_boxes, img_path):
src_im = cv2.imread(img_path)
for box in dt_boxes:
box = np.array(box).astype(np.int32).reshape(-1, 2)
cv2.polylines(src_im, [box], True, color=(255, 255, 0), thickness=2)
return src_im
# TODO: Cut this and other sections related to rendering and paste into src/utils/output_utility.py
# TODO: Handle exception when drawing boxes it might go out of Image
def draw_ocr_box_txt(image,
boxes,
txts,
scores=None,
drop_score=0.5,
font_path="./doc/simfang.ttf"):
h, w = image.height, image.width
img_left = image.copy()
img_right = Image.new('RGB', (w, h), (255, 255, 255))
import random
random.seed(0)
draw_left = ImageDraw.Draw(img_left)
draw_right = ImageDraw.Draw(img_right)
for idx, (box, txt) in enumerate(zip(boxes, txts)):
if scores is not None and scores[idx] < drop_score:
continue
color = (random.randint(0, 200), random.randint(0, 200),
random.randint(0, 200))
draw_left.line(
[
(box[0][0], box[0][1]), (box[1][0] - 3, box[1][1]), (box[2][0] - 3,
box[2][1]), (box[3][0], box[3][1]), (box[0][0], box[0][1])
],
fill=color, width=4)
box_height = math.sqrt((box[0][0] - box[3][0])**2 + (box[0][1] - box[3][
1])**2)
box_width = math.sqrt((box[0][0] - box[1][0])**2 + (box[0][1] - box[1][
1])**2)
if box_height > 2 * box_width:
font_size = max(int(box_width * 0.9), 10)
font = ImageFont.truetype(font_path, font_size, encoding="utf-8")
cur_y = box[0][1]
for c in txt:
char_size = font.getsize(c)
draw_right.text(
(box[0][0] + 3, cur_y), unidecode.unidecode(c), fill=(0, 0, 0), font=font)
cur_y += char_size[1]
draw_right.polygon(
[
box[0][0], box[0][1], box[1][0] - 3, box[1][1], box[2][0] - 3,
box[2][1], box[3][0], box[3][1]
],
outline=color)
else:
img_fraction = 1.05
# font_size=1
font_change=False
font_size = max(int(box_height * 0.8), 10)
font = ImageFont.truetype(font_path, font_size)
while font.getsize(txt)[0] > img_fraction*box_width:
# iterate until the text size is just larger than the criteria
font_change=True
font_size -= 1
font = ImageFont.truetype(font_path, font_size)
if font_change:
font_size +=1
font = ImageFont.truetype(font_path, font_size)
draw_right.text(
[box[0][0]+3, box[0][1]+3], unidecode.unidecode(txt), fill=(0, 0, 0), font=font)
wid,het = font.getsize(txt)
draw_right.polygon(
[
box[0][0], box[0][1], box[0][0] + wid + 6, box[0][1], box[0][0] + wid + 6,
box[0][1] + het + 6, box[0][0], box[0][1] + het + 6
],
outline=color)
img_left = Image.blend(image, img_left, 0.5)
img_show = Image.new('RGB', (w * 2, h), (255, 255, 255))
img_show.paste(img_left, (0, 0, w, h))
img_show.paste(img_right, (w, 0, w * 2, h))
return np.array(img_show)
|
from . import * # noqa
from ..app import app
def seed_db(db):
""" Add seed entities to the database. """
with app.app_context():
# for x in AnalysisNature.create_defaults():
# db.session.add(x)
#
# for x in Country.create_defaults():
# db.session.add(x)
# db.session.flush()
#
# for x in User.create_defaults():
# db.session.add(x)
#
# for x in Medium.create_defaults():
# db.session.add(x)
#
# for x in Gender.create_defaults():
# db.session.add(x)
#
# for x in Race.create_defaults():
# db.session.add(x)
#
# for x in SourceFunction.create_defaults():
# db.session.add(x)
#
# for x in Topic.create_defaults():
# db.session.add(x)
#
# for x in DocumentType.create_defaults():
# db.session.add(x)
#
# for x in AuthorType.create_defaults():
# db.session.add(x)
#
# for x in Issue.create_defaults():
# db.session.add(x)
#
# for x in Fairness.create_defaults():
# db.session.add(x)
#
# for x in Affiliation.create_defaults():
# db.session.add(x)
#
# for x in SourceRole.create_defaults():
# db.session.add(x)
#
# for x in InvestmentType.create_defaults():
# db.session.add(x)
#
# for x in InvestmentOrigins.create_defaults():
# db.session.add(x)
#
# for x in Sectors.create_defaults():
# db.session.add(x)
#
# for x in Phases.create_defaults():
# db.session.add(x)
#
# for x in Currencies.create_defaults():
# db.session.add(x)
#
# for x in Industries.create_defaults():
# db.session.add(x)
#
# for x in Involvements.create_defaults():
# db.session.add(x)
#
# for x in ValueUnits.create_defaults():
# db.session.add(x)
#
# db.session.flush()
#
# for x in Principle.create_defaults():
# db.session.add(x)
#
# for x in Role.create_defaults():
# db.session.add(x)
for x in Provinces.create_defaults():
db.session.add(x)
for x in Involvements1.create_defaults():
db.session.add(x)
for x in Involvements2.create_defaults():
db.session.add(x)
for x in Involvements3.create_defaults():
db.session.add(x)
db.session.commit()
|
#!/usr/bin/env python
"""
Network VLAN Helper Classes
"""
from bacpypes.debugging import bacpypes_debugging, ModuleLogger
from bacpypes.comm import Client, Server, ApplicationServiceElement, bind
from bacpypes.pdu import Address, PDU
from bacpypes.npdu import npdu_types, NPDU
from bacpypes.vlan import Node
from bacpypes.app import DeviceInfoCache, Application
from bacpypes.appservice import StateMachineAccessPoint, ApplicationServiceAccessPoint
from bacpypes.netservice import NetworkServiceAccessPoint, NetworkServiceElement
from bacpypes.object import register_object_type
from bacpypes.local.device import LocalDeviceObject
from bacpypes.service.device import WhoIsIAmServices
from bacpypes.service.object import ReadWritePropertyServices
from ..state_machine import StateMachine, ClientStateMachine
# some debugging
_debug = 0
_log = ModuleLogger(globals())
class _NetworkServiceElement(NetworkServiceElement):
"""
This class turns off the deferred startup function call that broadcasts
I-Am-Router-To-Network and Network-Number-Is messages.
"""
_startup_disabled = True
@bacpypes_debugging
class NPDUCodec(Client, Server):
def __init__(self):
if _debug: NPDUCodec._debug("__init__")
Client.__init__(self)
Server.__init__(self)
def indication(self, npdu):
if _debug: NPDUCodec._debug("indication %r", npdu)
# first as a generic NPDU
xpdu = NPDU()
npdu.encode(xpdu)
# now as a vanilla PDU
ypdu = PDU()
xpdu.encode(ypdu)
if _debug: NPDUCodec._debug(" - encoded: %r", ypdu)
# send it downstream
self.request(ypdu)
def confirmation(self, pdu):
if _debug: NPDUCodec._debug("confirmation %r", pdu)
# decode as a generic NPDU
xpdu = NPDU()
xpdu.decode(pdu)
# drop application layer messages
if xpdu.npduNetMessage is None:
return
# do a deeper decode of the NPDU
ypdu = npdu_types[xpdu.npduNetMessage]()
ypdu.decode(xpdu)
# send it upstream
self.response(ypdu)
#
# SnifferStateMachine
#
@bacpypes_debugging
class SnifferStateMachine(ClientStateMachine):
def __init__(self, address, vlan):
if _debug: SnifferStateMachine._debug("__init__ %r %r", address, vlan)
ClientStateMachine.__init__(self)
# save the name and address
self.name = address
self.address = Address(address)
# create a promiscuous node, added to the network
self.node = Node(self.address, vlan, promiscuous=True)
if _debug: SnifferStateMachine._debug(" - node: %r", self.node)
# bind this to the node
bind(self, self.node)
#
# NetworkLayerStateMachine
#
@bacpypes_debugging
class NetworkLayerStateMachine(ClientStateMachine):
def __init__(self, address, vlan):
if _debug: NetworkLayerStateMachine._debug("__init__ %r %r", address, vlan)
ClientStateMachine.__init__(self)
# save the name and address
self.name = address
self.address = Address(address)
# create a network layer encoder/decoder
self.codec = NPDUCodec()
if _debug: NetworkLayerStateMachine._debug(" - codec: %r", self.codec)
# create a node, added to the network
self.node = Node(self.address, vlan)
if _debug: NetworkLayerStateMachine._debug(" - node: %r", self.node)
# bind this to the node
bind(self, self.codec, self.node)
#
# RouterNode
#
@bacpypes_debugging
class RouterNode:
def __init__(self):
if _debug: RouterNode._debug("__init__")
# a network service access point will be needed
self.nsap = NetworkServiceAccessPoint()
# give the NSAP a generic network layer service element
self.nse = _NetworkServiceElement()
bind(self.nse, self.nsap)
def add_network(self, address, vlan, net):
if _debug: RouterNode._debug("add_network %r %r %r", address, vlan, net)
# convert the address to an Address
address = Address(address)
# create a node, added to the network
node = Node(address, vlan)
if _debug: RouterNode._debug(" - node: %r", node)
# bind the BIP stack to the local network
self.nsap.bind(node, net, address)
#
# RouterStateMachine
#
@bacpypes_debugging
class RouterStateMachine(RouterNode, StateMachine):
def __init__(self):
if _debug: RouterStateMachine._debug("__init__")
RouterNode.__init__(self)
StateMachine.__init__(self)
#
# TestDeviceObject
#
@register_object_type(vendor_id=999)
class TestDeviceObject(LocalDeviceObject):
pass
#
# ApplicationLayerStateMachine
#
@bacpypes_debugging
class ApplicationLayerStateMachine(ApplicationServiceElement, ClientStateMachine):
def __init__(self, address, vlan):
if _debug: ApplicationLayerStateMachine._debug("__init__ %r %r", address, vlan)
# build a name, save the address
self.name = "app @ %s" % (address,)
self.address = Address(address)
# build a local device object
local_device = TestDeviceObject(
objectName=self.name,
objectIdentifier=('device', int(address)),
vendorIdentifier=999,
)
# build an address and save it
self.address = Address(address)
if _debug: ApplicationLayerStateMachine._debug(" - address: %r", self.address)
# continue with initialization
ApplicationServiceElement.__init__(self)
ClientStateMachine.__init__(self, name=local_device.objectName)
# include a application decoder
self.asap = ApplicationServiceAccessPoint()
# pass the device object to the state machine access point so it
# can know if it should support segmentation
self.smap = StateMachineAccessPoint(local_device)
# the segmentation state machines need access to some device
# information cache, usually shared with the application
self.smap.deviceInfoCache = DeviceInfoCache()
# a network service access point will be needed
self.nsap = NetworkServiceAccessPoint()
# give the NSAP a generic network layer service element
self.nse = _NetworkServiceElement()
bind(self.nse, self.nsap)
# bind the top layers
bind(self, self.asap, self.smap, self.nsap)
# create a node, added to the network
self.node = Node(self.address, vlan)
if _debug: ApplicationLayerStateMachine._debug(" - node: %r", self.node)
# bind the stack to the local network
self.nsap.bind(self.node)
def indication(self, apdu):
if _debug: ApplicationLayerStateMachine._debug("indication %r", apdu)
self.receive(apdu)
def confirmation(self, apdu):
if _debug: ApplicationLayerStateMachine._debug("confirmation %r", apdu)
self.receive(apdu)
#
# ApplicationNode
#
class ApplicationNode(Application, WhoIsIAmServices, ReadWritePropertyServices):
_startup_disabled = True
def __init__(self, address, vlan):
if _debug: ApplicationNode._debug("__init__ %r %r", address, vlan)
# build a name, save the address
self.name = "app @ %s" % (address,)
self.address = Address(address)
# build a local device object
local_device = TestDeviceObject(
objectName=self.name,
objectIdentifier=('device', int(address)),
vendorIdentifier=999,
)
# build an address and save it
self.address = Address(address)
if _debug: ApplicationNode._debug(" - address: %r", self.address)
# continue with initialization
Application.__init__(self, local_device)
# include a application decoder
self.asap = ApplicationServiceAccessPoint()
# pass the device object to the state machine access point so it
# can know if it should support segmentation
self.smap = StateMachineAccessPoint(local_device)
# the segmentation state machines need access to the same device
# information cache as the application
self.smap.deviceInfoCache = self.deviceInfoCache
# a network service access point will be needed
self.nsap = NetworkServiceAccessPoint()
# give the NSAP a generic network layer service element
self.nse = _NetworkServiceElement()
bind(self.nse, self.nsap)
# bind the top layers
bind(self, self.asap, self.smap, self.nsap)
# create a node, added to the network
self.node = Node(self.address, vlan)
if _debug: ApplicationNode._debug(" - node: %r", self.node)
# bind the stack to the local network
self.nsap.bind(self.node)
|
from flask import url_for
from flask_wtf import FlaskForm
from wtforms import ValidationError
from wtforms.fields import (BooleanField, PasswordField, StringField,
SubmitField, RadioField)
from wtforms.fields.html5 import EmailField
from wtforms.validators import Email, EqualTo, InputRequired, Length
from ..models import User
class LoginForm(FlaskForm):
email = EmailField(
'Email', validators=[InputRequired(),
Length(1, 64),
Email()])
password = PasswordField('Password', validators=[InputRequired()])
remember_me = BooleanField('Keep me logged in')
submit = SubmitField('Log in')
class InviteUserForm(FlaskForm):
name = StringField(
'Name', validators=[InputRequired(),
Length(1, 64)])
email = EmailField(
'Email', validators=[InputRequired(),
Length(1, 64),
Email()])
submit = SubmitField('Invite')
def validate_email(self, field):
if User.query.filter_by(email=field.data).first():
raise ValidationError("Email already registered.")
def validate_name(self, field):
if User.query.filter_by(name=field.data).first():
raise ValidationError("Name already exists.")
class RequestResetPasswordForm(FlaskForm):
email = EmailField(
'Email', validators=[InputRequired(),
Length(1, 64),
Email()])
submit = SubmitField('Reset password')
# We don't validate the email address so we don't confirm to attackers
# that an account with the given email exists.
class ResetPasswordForm(FlaskForm):
email = EmailField(
'Email', validators=[InputRequired(),
Length(1, 64),
Email()])
new_password = PasswordField(
'New password',
validators=[
InputRequired(),
EqualTo('new_password2', 'Passwords must match.')
])
new_password2 = PasswordField(
'Confirm new password', validators=[InputRequired()])
submit = SubmitField('Reset password')
def validate_email(self, field):
if User.query.filter_by(email=field.data).first() is None:
raise ValidationError('Unknown email address.')
class CreatePasswordForm(FlaskForm):
password = PasswordField(
'Password',
validators=[
InputRequired(),
EqualTo('password2', 'Passwords must match.')
])
password2 = PasswordField(
'Confirm new password', validators=[InputRequired()])
submit = SubmitField('Set password')
class ChangePasswordForm(FlaskForm):
old_password = PasswordField('Old password', validators=[InputRequired()])
new_password = PasswordField(
'New password',
validators=[
InputRequired(),
EqualTo('new_password2', 'Passwords must match.')
])
new_password2 = PasswordField(
'Confirm new password', validators=[InputRequired()])
submit = SubmitField('Update password')
class ChangeEmailForm(FlaskForm):
email = EmailField(
'New email', validators=[InputRequired(),
Length(1, 64),
Email()])
password = PasswordField('Password', validators=[InputRequired()])
submit = SubmitField('Update email')
def validate_email(self, field):
if User.query.filter_by(email=field.data).first():
raise ValidationError('Email already registered.')
class DeleteForm(FlaskForm):
delete = BooleanField('Delete forever', validators=[InputRequired()])
submit = SubmitField('Delete')
class RegistrationForm(FlaskForm):
name = StringField('Name',
validators=[InputRequired(), Length(1, 64)])
email = EmailField('Email',
validators=[InputRequired(), Length(1, 64), Email()])
password = PasswordField(
'Password',
validators=[
InputRequired(),
EqualTo('password2', 'Passwords must match')
])
password2 = PasswordField('Confirm password', validators=[InputRequired()])
submit = SubmitField('Register')
def validate_email(self, field):
if User.query.filter_by(email=field.data).first():
raise ValidationError('Email already registered. (Did you mean to '
'<a href="{}">log in</a> instead?)'.format(url_for('account.login')))
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: privacy.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
if sys.version_info.minor >= 8:
import google.protobuf.descriptor_pb2 as protobuf__descriptor__pb2
else:
import scripts.artifacts.usagestats_pb.protobuf_descriptor_pb2 as protobuf__descriptor__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='privacy.proto',
package='android',
syntax='proto2',
serialized_options=_b('\n\024com.android.incidentP\001'),
serialized_pb=_b('\n\rprivacy.proto\x12\x07\x61ndroid\x1a\x19protobuf_descriptor.proto\"P\n\x0cPrivacyFlags\x12.\n\x04\x64\x65st\x18\x01 \x01(\x0e\x32\x14.android.Destination:\nDEST_UNSET\x12\x10\n\x08patterns\x18\x02 \x03(\t*V\n\x0b\x44\x65stination\x12\x0e\n\nDEST_LOCAL\x10\x00\x12\x11\n\rDEST_EXPLICIT\x10\x64\x12\x13\n\x0e\x44\x45ST_AUTOMATIC\x10\xc8\x01\x12\x0f\n\nDEST_UNSET\x10\xff\x01:H\n\x07privacy\x12\x1d.google.protobuf.FieldOptions\x18\xf3\xd3\xfa\x30 \x01(\x0b\x32\x15.android.PrivacyFlags:N\n\x0bmsg_privacy\x12\x1f.google.protobuf.MessageOptions\x18\xf3\xd3\xfa\x30 \x01(\x0b\x32\x15.android.PrivacyFlagsB\x18\n\x14\x63om.android.incidentP\x01')
,
dependencies=[protobuf__descriptor__pb2.DESCRIPTOR,])
_DESTINATION = _descriptor.EnumDescriptor(
name='Destination',
full_name='android.Destination',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='DEST_LOCAL', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DEST_EXPLICIT', index=1, number=100,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DEST_AUTOMATIC', index=2, number=200,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DEST_UNSET', index=3, number=255,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=135,
serialized_end=221,
)
_sym_db.RegisterEnumDescriptor(_DESTINATION)
Destination = enum_type_wrapper.EnumTypeWrapper(_DESTINATION)
DEST_LOCAL = 0
DEST_EXPLICIT = 100
DEST_AUTOMATIC = 200
DEST_UNSET = 255
PRIVACY_FIELD_NUMBER = 102672883
privacy = _descriptor.FieldDescriptor(
name='privacy', full_name='android.privacy', index=0,
number=102672883, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=True, extension_scope=None,
serialized_options=None, file=DESCRIPTOR)
MSG_PRIVACY_FIELD_NUMBER = 102672883
msg_privacy = _descriptor.FieldDescriptor(
name='msg_privacy', full_name='android.msg_privacy', index=1,
number=102672883, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=True, extension_scope=None,
serialized_options=None, file=DESCRIPTOR)
_PRIVACYFLAGS = _descriptor.Descriptor(
name='PrivacyFlags',
full_name='android.PrivacyFlags',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='dest', full_name='android.PrivacyFlags.dest', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=True, default_value=255,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='patterns', full_name='android.PrivacyFlags.patterns', index=1,
number=2, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=53,
serialized_end=133,
)
_PRIVACYFLAGS.fields_by_name['dest'].enum_type = _DESTINATION
DESCRIPTOR.message_types_by_name['PrivacyFlags'] = _PRIVACYFLAGS
DESCRIPTOR.enum_types_by_name['Destination'] = _DESTINATION
DESCRIPTOR.extensions_by_name['privacy'] = privacy
DESCRIPTOR.extensions_by_name['msg_privacy'] = msg_privacy
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
PrivacyFlags = _reflection.GeneratedProtocolMessageType('PrivacyFlags', (_message.Message,), dict(
DESCRIPTOR = _PRIVACYFLAGS,
__module__ = 'privacy_pb2'
# @@protoc_insertion_point(class_scope:android.PrivacyFlags)
))
_sym_db.RegisterMessage(PrivacyFlags)
privacy.message_type = _PRIVACYFLAGS
protobuf__descriptor__pb2.FieldOptions.RegisterExtension(privacy)
msg_privacy.message_type = _PRIVACYFLAGS
protobuf__descriptor__pb2.MessageOptions.RegisterExtension(msg_privacy)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
|
from .GradientDescent import GradientDescent
class NAG(GradientDescent):
""" NAG, Nesterov Accelerated Gradient, computes the function derivative
base on the next position of the paramente. Looking ahead helps NAG in
correcting its course quicker than Momentum based gradient descent.
Attributes:
f (function): function for optimization
df (function): first derivation of the function
x_t (float): starting variable for analysis
learning_rate (float): learning rate
tolerance (int): tolerance for the distance between two consecutive
estimates in a subsequence that converges
max_iterations (int): maximum number of iterations
convergence_points (list): list to store the history of
points during optimization
n_iterations (int): number of iterations for convegence
gamma (float): fraction of the past vector that contains an
exponentially decaying average of past gradients
u_t (float): Parameter for NAG optimization
u_t_1 (float): Parameter for NAG optimization
"""
def __init__(self, f, df, x_t, learning_rate=1e-3, tolerance=1e-6,
max_iterations=1000, n_history_points=1000, gamma=0.9):
"""Constructor
Args:
name (string): name of the optmizer
f (function): function for optimization
df (function): first derivation of the function
x_t (float): starting variable for analysis
learning_rate (float, optional): learning rate
tolerance (int, optional): tolerance for the distance between two
consecutive estimates in a subsequence that converges
max_iterations (int, optional): maximum number of iterations
n_history_points (int, optional): total amount of history points
to be saved during optization
gamma (float, optional): fraction of the past vector that contains
an exponentially decaying average of past gradients
Returns:
None
"""
GradientDescent.__init__(self, f, df, x_t, learning_rate, tolerance,
max_iterations, n_history_points)
self.name = 'NAG'
self.gamma = gamma
self.__u_t = 0
self.__u_t_1 = 0
def _update_parameter(self, x_t):
"""Computes the current update vector for Nesterov accelerated gradient
Params:
x_t (float): point for calculation
Returns:
(float): update amount
"""
self.__u_t_1 = self.gamma*self.__u_t \
+ self.learning_rate*self.df(x_t - self.gamma*self.__u_t)
self.__u_t = self.__u_t_1
return self.__u_t_1
|
import time
from compas.rpc.services.default import start_service
try:
from xmlrpclib import ServerProxy
except ImportError:
from xmlrpc.client import ServerProxy
def start(port, **kwargs):
start_service(port)
def stop(port, **kwargs):
print('Trying to stop remote RPC proxy...')
server = ServerProxy('http://127.0.0.1:{}'.format(port))
success = False
count = 5
while count:
try:
server.ping()
except Exception:
time.sleep(0.1)
count -= 1
print(" {} attempts left.".format(count))
else:
success = True
break
if not success:
print('RPC server did not respond. Maybe already stopped.')
else:
server.remote_shutdown()
print('RPC server stopped')
def main():
import argparse
parser = argparse.ArgumentParser(
description='COMPAS RCP command-line utility')
commands = parser.add_subparsers(help='Valid RPC commands')
# Command: start
start_command = commands.add_parser('start', help='Start RPC server')
start_command.add_argument(
'--port', '-p', action='store', default=1753, type=int, help='RPC port number')
start_command.set_defaults(func=start)
# Command: stop
stop_command = commands.add_parser(
'stop', help='Try to stop a remote RPC server')
stop_command.add_argument(
'--port', '-p', action='store', default=1753, type=int, help='RPC port number')
stop_command.set_defaults(func=stop)
# Invoke
args = parser.parse_args()
if hasattr(args, 'func'):
args.func(**vars(args))
else:
parser.print_help()
if __name__ == '__main__':
main()
|
from functions import check_hadoop_services, check_local_path
from .exceptions import CustomRequestFailed
from rest_framework import permissions
actions= ['start', 'stop', 'restart', 'upload', 'run', 'check']
class HadoopPermission(permissions.BasePermission):
"""
Validity check for hadoop actions.
"""
def has_permission(self, request, view):
# Check validity of action parameter
action = request.GET.get('action')
if action is None:
raise CustomRequestFailed('No action parameter provided.')
if action not in actions:
keys = ",".join(actions["actions"].keys())
message = "The provided action " + action + ", is not in the list of accepted actions: [" + keys + "]."
raise CustomRequestFailed(message)
running = check_hadoop_services()
if action == "start" and running:
raise CustomRequestFailed("All of the hadoop services are running.")
elif (action != "stop" and action != "start" and action != "check") and not running:
raise CustomRequestFailed("Not all of the hadoop services are running.Start them and try again.")
# Check validity of request parameters per request method
method = request.META['REQUEST_METHOD']
if method == 'POST':
if action == 'upload':
path = request.data.get("path")
dest = request.data.get("dest")
if path is None or dest is None:
raise CustomRequestFailed("No path or dest values provided.")
check_local_path(path)
elif action == 'run':
path = request.data.get("path")
args = request.data.get("args")
if path is None or args is None:
raise CustomRequestFailed("No path or args values provided.")
check_local_path(path)
return True
|
from datetime import (
date,
datetime,
timedelta,
)
from functools import partial
from io import BytesIO
import os
import re
import numpy as np
import pytest
import pandas.util._test_decorators as td
import pandas as pd
from pandas import (
DataFrame,
Index,
MultiIndex,
get_option,
set_option,
)
import pandas._testing as tm
from pandas.io.excel import (
ExcelFile,
ExcelWriter,
_OpenpyxlWriter,
_XlsxWriter,
_XlwtWriter,
register_writer,
)
@pytest.fixture
def path(ext):
"""
Fixture to open file for use in each test case.
"""
with tm.ensure_clean(ext) as file_path:
yield file_path
@pytest.fixture
def set_engine(engine, ext):
"""
Fixture to set engine for use in each test case.
Rather than requiring `engine=...` to be provided explicitly as an
argument in each test, this fixture sets a global option to dictate
which engine should be used to write Excel files. After executing
the test it rolls back said change to the global option.
"""
option_name = f"io.excel.{ext.strip('.')}.writer"
prev_engine = get_option(option_name)
set_option(option_name, engine)
yield
set_option(option_name, prev_engine) # Roll back option change
@pytest.mark.parametrize(
"ext",
[
pytest.param(".xlsx", marks=[td.skip_if_no("openpyxl"), td.skip_if_no("xlrd")]),
pytest.param(".xlsm", marks=[td.skip_if_no("openpyxl"), td.skip_if_no("xlrd")]),
pytest.param(".xls", marks=[td.skip_if_no("xlwt"), td.skip_if_no("xlrd")]),
pytest.param(
".xlsx", marks=[td.skip_if_no("xlsxwriter"), td.skip_if_no("xlrd")]
),
pytest.param(".ods", marks=td.skip_if_no("odf")),
],
)
class TestRoundTrip:
@pytest.mark.parametrize(
"header,expected",
[(None, DataFrame([np.nan] * 4)), (0, DataFrame({"Unnamed: 0": [np.nan] * 3}))],
)
def test_read_one_empty_col_no_header(self, ext, header, expected):
# xref gh-12292
filename = "no_header"
df = DataFrame([["", 1, 100], ["", 2, 200], ["", 3, 300], ["", 4, 400]])
with tm.ensure_clean(ext) as path:
df.to_excel(path, filename, index=False, header=False)
result = pd.read_excel(
path, sheet_name=filename, usecols=[0], header=header
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"header,expected",
[(None, DataFrame([0] + [np.nan] * 4)), (0, DataFrame([np.nan] * 4))],
)
def test_read_one_empty_col_with_header(self, ext, header, expected):
filename = "with_header"
df = DataFrame([["", 1, 100], ["", 2, 200], ["", 3, 300], ["", 4, 400]])
with tm.ensure_clean(ext) as path:
df.to_excel(path, "with_header", index=False, header=True)
result = pd.read_excel(
path, sheet_name=filename, usecols=[0], header=header
)
tm.assert_frame_equal(result, expected)
def test_set_column_names_in_parameter(self, ext):
# GH 12870 : pass down column names associated with
# keyword argument names
refdf = DataFrame([[1, "foo"], [2, "bar"], [3, "baz"]], columns=["a", "b"])
with tm.ensure_clean(ext) as pth:
with ExcelWriter(pth) as writer:
refdf.to_excel(writer, "Data_no_head", header=False, index=False)
refdf.to_excel(writer, "Data_with_head", index=False)
refdf.columns = ["A", "B"]
with ExcelFile(pth) as reader:
xlsdf_no_head = pd.read_excel(
reader, sheet_name="Data_no_head", header=None, names=["A", "B"]
)
xlsdf_with_head = pd.read_excel(
reader,
sheet_name="Data_with_head",
index_col=None,
names=["A", "B"],
)
tm.assert_frame_equal(xlsdf_no_head, refdf)
tm.assert_frame_equal(xlsdf_with_head, refdf)
def test_creating_and_reading_multiple_sheets(self, ext):
# see gh-9450
#
# Test reading multiple sheets, from a runtime
# created Excel file with multiple sheets.
def tdf(col_sheet_name):
d, i = [11, 22, 33], [1, 2, 3]
return DataFrame(d, i, columns=[col_sheet_name])
sheets = ["AAA", "BBB", "CCC"]
dfs = [tdf(s) for s in sheets]
dfs = dict(zip(sheets, dfs))
with tm.ensure_clean(ext) as pth:
with ExcelWriter(pth) as ew:
for sheetname, df in dfs.items():
df.to_excel(ew, sheetname)
dfs_returned = pd.read_excel(pth, sheet_name=sheets, index_col=0)
for s in sheets:
tm.assert_frame_equal(dfs[s], dfs_returned[s])
def test_read_excel_multiindex_empty_level(self, ext):
# see gh-12453
with tm.ensure_clean(ext) as path:
df = DataFrame(
{
("One", "x"): {0: 1},
("Two", "X"): {0: 3},
("Two", "Y"): {0: 7},
("Zero", ""): {0: 0},
}
)
expected = DataFrame(
{
("One", "x"): {0: 1},
("Two", "X"): {0: 3},
("Two", "Y"): {0: 7},
("Zero", "Unnamed: 4_level_1"): {0: 0},
}
)
df.to_excel(path)
actual = pd.read_excel(path, header=[0, 1], index_col=0)
tm.assert_frame_equal(actual, expected)
df = DataFrame(
{
("Beg", ""): {0: 0},
("Middle", "x"): {0: 1},
("Tail", "X"): {0: 3},
("Tail", "Y"): {0: 7},
}
)
expected = DataFrame(
{
("Beg", "Unnamed: 1_level_1"): {0: 0},
("Middle", "x"): {0: 1},
("Tail", "X"): {0: 3},
("Tail", "Y"): {0: 7},
}
)
df.to_excel(path)
actual = pd.read_excel(path, header=[0, 1], index_col=0)
tm.assert_frame_equal(actual, expected)
@pytest.mark.parametrize("c_idx_names", [True, False])
@pytest.mark.parametrize("r_idx_names", [True, False])
@pytest.mark.parametrize("c_idx_levels", [1, 3])
@pytest.mark.parametrize("r_idx_levels", [1, 3])
def test_excel_multindex_roundtrip(
self, ext, c_idx_names, r_idx_names, c_idx_levels, r_idx_levels, request
):
# see gh-4679
with tm.ensure_clean(ext) as pth:
if (c_idx_levels == 1 and c_idx_names) and not (
r_idx_levels == 3 and not r_idx_names
):
mark = pytest.mark.xfail(
reason="Column index name cannot be serialized unless "
"it's a MultiIndex"
)
request.node.add_marker(mark)
# Empty name case current read in as
# unnamed levels, not Nones.
check_names = r_idx_names or r_idx_levels <= 1
df = tm.makeCustomDataframe(
5, 5, c_idx_names, r_idx_names, c_idx_levels, r_idx_levels
)
df.to_excel(pth)
act = pd.read_excel(
pth,
index_col=list(range(r_idx_levels)),
header=list(range(c_idx_levels)),
)
tm.assert_frame_equal(df, act, check_names=check_names)
df.iloc[0, :] = np.nan
df.to_excel(pth)
act = pd.read_excel(
pth,
index_col=list(range(r_idx_levels)),
header=list(range(c_idx_levels)),
)
tm.assert_frame_equal(df, act, check_names=check_names)
df.iloc[-1, :] = np.nan
df.to_excel(pth)
act = pd.read_excel(
pth,
index_col=list(range(r_idx_levels)),
header=list(range(c_idx_levels)),
)
tm.assert_frame_equal(df, act, check_names=check_names)
def test_read_excel_parse_dates(self, ext):
# see gh-11544, gh-12051
df = DataFrame(
{"col": [1, 2, 3], "date_strings": pd.date_range("2012-01-01", periods=3)}
)
df2 = df.copy()
df2["date_strings"] = df2["date_strings"].dt.strftime("%m/%d/%Y")
with tm.ensure_clean(ext) as pth:
df2.to_excel(pth)
res = pd.read_excel(pth, index_col=0)
tm.assert_frame_equal(df2, res)
res = pd.read_excel(pth, parse_dates=["date_strings"], index_col=0)
tm.assert_frame_equal(df, res)
date_parser = lambda x: datetime.strptime(x, "%m/%d/%Y")
res = pd.read_excel(
pth, parse_dates=["date_strings"], date_parser=date_parser, index_col=0
)
tm.assert_frame_equal(df, res)
def test_multiindex_interval_datetimes(self, ext):
# GH 30986
midx = MultiIndex.from_arrays(
[
range(4),
pd.interval_range(
start=pd.Timestamp("2020-01-01"), periods=4, freq="6M"
),
]
)
df = DataFrame(range(4), index=midx)
with tm.ensure_clean(ext) as pth:
df.to_excel(pth)
result = pd.read_excel(pth, index_col=[0, 1])
expected = DataFrame(
range(4),
MultiIndex.from_arrays(
[
range(4),
[
"(2020-01-31, 2020-07-31]",
"(2020-07-31, 2021-01-31]",
"(2021-01-31, 2021-07-31]",
"(2021-07-31, 2022-01-31]",
],
]
),
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"engine,ext",
[
pytest.param(
"openpyxl",
".xlsx",
marks=[td.skip_if_no("openpyxl"), td.skip_if_no("xlrd")],
),
pytest.param(
"openpyxl",
".xlsm",
marks=[td.skip_if_no("openpyxl"), td.skip_if_no("xlrd")],
),
pytest.param(
"xlwt", ".xls", marks=[td.skip_if_no("xlwt"), td.skip_if_no("xlrd")]
),
pytest.param(
"xlsxwriter",
".xlsx",
marks=[td.skip_if_no("xlsxwriter"), td.skip_if_no("xlrd")],
),
pytest.param("odf", ".ods", marks=td.skip_if_no("odf")),
],
)
@pytest.mark.usefixtures("set_engine")
class TestExcelWriter:
def test_excel_sheet_size(self, path):
# GH 26080
breaking_row_count = 2 ** 20 + 1
breaking_col_count = 2 ** 14 + 1
# purposely using two arrays to prevent memory issues while testing
row_arr = np.zeros(shape=(breaking_row_count, 1))
col_arr = np.zeros(shape=(1, breaking_col_count))
row_df = DataFrame(row_arr)
col_df = DataFrame(col_arr)
msg = "sheet is too large"
with pytest.raises(ValueError, match=msg):
row_df.to_excel(path)
with pytest.raises(ValueError, match=msg):
col_df.to_excel(path)
def test_excel_sheet_by_name_raise(self, path, engine):
gt = DataFrame(np.random.randn(10, 2))
gt.to_excel(path)
with ExcelFile(path) as xl:
df = pd.read_excel(xl, sheet_name=0, index_col=0)
tm.assert_frame_equal(gt, df)
msg = "Worksheet named '0' not found"
with pytest.raises(ValueError, match=msg):
pd.read_excel(xl, "0")
def test_excel_writer_context_manager(self, frame, path):
with ExcelWriter(path) as writer:
frame.to_excel(writer, "Data1")
frame2 = frame.copy()
frame2.columns = frame.columns[::-1]
frame2.to_excel(writer, "Data2")
with ExcelFile(path) as reader:
found_df = pd.read_excel(reader, sheet_name="Data1", index_col=0)
found_df2 = pd.read_excel(reader, sheet_name="Data2", index_col=0)
tm.assert_frame_equal(found_df, frame)
tm.assert_frame_equal(found_df2, frame2)
def test_roundtrip(self, frame, path):
frame = frame.copy()
frame["A"][:5] = np.nan
frame.to_excel(path, "test1")
frame.to_excel(path, "test1", columns=["A", "B"])
frame.to_excel(path, "test1", header=False)
frame.to_excel(path, "test1", index=False)
# test roundtrip
frame.to_excel(path, "test1")
recons = pd.read_excel(path, sheet_name="test1", index_col=0)
tm.assert_frame_equal(frame, recons)
frame.to_excel(path, "test1", index=False)
recons = pd.read_excel(path, sheet_name="test1", index_col=None)
recons.index = frame.index
tm.assert_frame_equal(frame, recons)
frame.to_excel(path, "test1", na_rep="NA")
recons = pd.read_excel(path, sheet_name="test1", index_col=0, na_values=["NA"])
tm.assert_frame_equal(frame, recons)
# GH 3611
frame.to_excel(path, "test1", na_rep="88")
recons = pd.read_excel(path, sheet_name="test1", index_col=0, na_values=["88"])
tm.assert_frame_equal(frame, recons)
frame.to_excel(path, "test1", na_rep="88")
recons = pd.read_excel(
path, sheet_name="test1", index_col=0, na_values=[88, 88.0]
)
tm.assert_frame_equal(frame, recons)
# GH 6573
frame.to_excel(path, "Sheet1")
recons = pd.read_excel(path, index_col=0)
tm.assert_frame_equal(frame, recons)
frame.to_excel(path, "0")
recons = pd.read_excel(path, index_col=0)
tm.assert_frame_equal(frame, recons)
# GH 8825 Pandas Series should provide to_excel method
s = frame["A"]
s.to_excel(path)
recons = pd.read_excel(path, index_col=0)
tm.assert_frame_equal(s.to_frame(), recons)
def test_mixed(self, frame, path):
mixed_frame = frame.copy()
mixed_frame["foo"] = "bar"
mixed_frame.to_excel(path, "test1")
with ExcelFile(path) as reader:
recons = pd.read_excel(reader, sheet_name="test1", index_col=0)
tm.assert_frame_equal(mixed_frame, recons)
def test_ts_frame(self, tsframe, path):
df = tsframe
# freq doesn't round-trip
index = pd.DatetimeIndex(np.asarray(df.index), freq=None)
df.index = index
df.to_excel(path, "test1")
with ExcelFile(path) as reader:
recons = pd.read_excel(reader, sheet_name="test1", index_col=0)
tm.assert_frame_equal(df, recons)
def test_basics_with_nan(self, frame, path):
frame = frame.copy()
frame["A"][:5] = np.nan
frame.to_excel(path, "test1")
frame.to_excel(path, "test1", columns=["A", "B"])
frame.to_excel(path, "test1", header=False)
frame.to_excel(path, "test1", index=False)
@pytest.mark.parametrize("np_type", [np.int8, np.int16, np.int32, np.int64])
def test_int_types(self, np_type, path):
# Test np.int values read come back as int
# (rather than float which is Excel's format).
df = DataFrame(np.random.randint(-10, 10, size=(10, 2)), dtype=np_type)
df.to_excel(path, "test1")
with ExcelFile(path) as reader:
recons = pd.read_excel(reader, sheet_name="test1", index_col=0)
int_frame = df.astype(np.int64)
tm.assert_frame_equal(int_frame, recons)
recons2 = pd.read_excel(path, sheet_name="test1", index_col=0)
tm.assert_frame_equal(int_frame, recons2)
# Test with convert_float=False comes back as float.
float_frame = df.astype(float)
float_frame.columns = float_frame.columns.astype(float)
float_frame.index = float_frame.index.astype(float)
with tm.assert_produces_warning(
FutureWarning, match="convert_float is deprecated"
):
recons = pd.read_excel(
path, sheet_name="test1", convert_float=False, index_col=0
)
tm.assert_frame_equal(recons, float_frame)
@pytest.mark.parametrize("np_type", [np.float16, np.float32, np.float64])
def test_float_types(self, np_type, path):
# Test np.float values read come back as float.
df = DataFrame(np.random.random_sample(10), dtype=np_type)
df.to_excel(path, "test1")
with ExcelFile(path) as reader:
recons = pd.read_excel(reader, sheet_name="test1", index_col=0).astype(
np_type
)
tm.assert_frame_equal(df, recons)
@pytest.mark.parametrize("np_type", [np.bool8, np.bool_])
def test_bool_types(self, np_type, path):
# Test np.bool8 and np.bool_ values read come back as float.
df = DataFrame([1, 0, True, False], dtype=np_type)
df.to_excel(path, "test1")
with ExcelFile(path) as reader:
recons = pd.read_excel(reader, sheet_name="test1", index_col=0).astype(
np_type
)
tm.assert_frame_equal(df, recons)
def test_inf_roundtrip(self, path):
df = DataFrame([(1, np.inf), (2, 3), (5, -np.inf)])
df.to_excel(path, "test1")
with ExcelFile(path) as reader:
recons = pd.read_excel(reader, sheet_name="test1", index_col=0)
tm.assert_frame_equal(df, recons)
def test_sheets(self, frame, tsframe, path):
# freq doesn't round-trip
index = pd.DatetimeIndex(np.asarray(tsframe.index), freq=None)
tsframe.index = index
frame = frame.copy()
frame["A"][:5] = np.nan
frame.to_excel(path, "test1")
frame.to_excel(path, "test1", columns=["A", "B"])
frame.to_excel(path, "test1", header=False)
frame.to_excel(path, "test1", index=False)
# Test writing to separate sheets
with ExcelWriter(path) as writer:
frame.to_excel(writer, "test1")
tsframe.to_excel(writer, "test2")
with ExcelFile(path) as reader:
recons = pd.read_excel(reader, sheet_name="test1", index_col=0)
tm.assert_frame_equal(frame, recons)
recons = pd.read_excel(reader, sheet_name="test2", index_col=0)
tm.assert_frame_equal(tsframe, recons)
assert 2 == len(reader.sheet_names)
assert "test1" == reader.sheet_names[0]
assert "test2" == reader.sheet_names[1]
def test_colaliases(self, frame, path):
frame = frame.copy()
frame["A"][:5] = np.nan
frame.to_excel(path, "test1")
frame.to_excel(path, "test1", columns=["A", "B"])
frame.to_excel(path, "test1", header=False)
frame.to_excel(path, "test1", index=False)
# column aliases
col_aliases = Index(["AA", "X", "Y", "Z"])
frame.to_excel(path, "test1", header=col_aliases)
with ExcelFile(path) as reader:
rs = pd.read_excel(reader, sheet_name="test1", index_col=0)
xp = frame.copy()
xp.columns = col_aliases
tm.assert_frame_equal(xp, rs)
def test_roundtrip_indexlabels(self, merge_cells, frame, path):
frame = frame.copy()
frame["A"][:5] = np.nan
frame.to_excel(path, "test1")
frame.to_excel(path, "test1", columns=["A", "B"])
frame.to_excel(path, "test1", header=False)
frame.to_excel(path, "test1", index=False)
# test index_label
df = DataFrame(np.random.randn(10, 2)) >= 0
df.to_excel(path, "test1", index_label=["test"], merge_cells=merge_cells)
with ExcelFile(path) as reader:
recons = pd.read_excel(reader, sheet_name="test1", index_col=0).astype(
np.int64
)
df.index.names = ["test"]
assert df.index.names == recons.index.names
df = DataFrame(np.random.randn(10, 2)) >= 0
df.to_excel(
path,
"test1",
index_label=["test", "dummy", "dummy2"],
merge_cells=merge_cells,
)
with ExcelFile(path) as reader:
recons = pd.read_excel(reader, sheet_name="test1", index_col=0).astype(
np.int64
)
df.index.names = ["test"]
assert df.index.names == recons.index.names
df = DataFrame(np.random.randn(10, 2)) >= 0
df.to_excel(path, "test1", index_label="test", merge_cells=merge_cells)
with ExcelFile(path) as reader:
recons = pd.read_excel(reader, sheet_name="test1", index_col=0).astype(
np.int64
)
df.index.names = ["test"]
tm.assert_frame_equal(df, recons.astype(bool))
frame.to_excel(
path,
"test1",
columns=["A", "B", "C", "D"],
index=False,
merge_cells=merge_cells,
)
# take 'A' and 'B' as indexes (same row as cols 'C', 'D')
df = frame.copy()
df = df.set_index(["A", "B"])
with ExcelFile(path) as reader:
recons = pd.read_excel(reader, sheet_name="test1", index_col=[0, 1])
tm.assert_frame_equal(df, recons)
def test_excel_roundtrip_indexname(self, merge_cells, path):
df = DataFrame(np.random.randn(10, 4))
df.index.name = "foo"
df.to_excel(path, merge_cells=merge_cells)
with ExcelFile(path) as xf:
result = pd.read_excel(xf, sheet_name=xf.sheet_names[0], index_col=0)
tm.assert_frame_equal(result, df)
assert result.index.name == "foo"
def test_excel_roundtrip_datetime(self, merge_cells, tsframe, path):
# datetime.date, not sure what to test here exactly
# freq does not round-trip
index = pd.DatetimeIndex(np.asarray(tsframe.index), freq=None)
tsframe.index = index
tsf = tsframe.copy()
tsf.index = [x.date() for x in tsframe.index]
tsf.to_excel(path, "test1", merge_cells=merge_cells)
with ExcelFile(path) as reader:
recons = pd.read_excel(reader, sheet_name="test1", index_col=0)
tm.assert_frame_equal(tsframe, recons)
def test_excel_date_datetime_format(self, engine, ext, path):
# see gh-4133
#
# Excel output format strings
df = DataFrame(
[
[date(2014, 1, 31), date(1999, 9, 24)],
[datetime(1998, 5, 26, 23, 33, 4), datetime(2014, 2, 28, 13, 5, 13)],
],
index=["DATE", "DATETIME"],
columns=["X", "Y"],
)
df_expected = DataFrame(
[
[datetime(2014, 1, 31), datetime(1999, 9, 24)],
[datetime(1998, 5, 26, 23, 33, 4), datetime(2014, 2, 28, 13, 5, 13)],
],
index=["DATE", "DATETIME"],
columns=["X", "Y"],
)
with tm.ensure_clean(ext) as filename2:
with ExcelWriter(path) as writer1:
df.to_excel(writer1, "test1")
with ExcelWriter(
filename2,
date_format="DD.MM.YYYY",
datetime_format="DD.MM.YYYY HH-MM-SS",
) as writer2:
df.to_excel(writer2, "test1")
with ExcelFile(path) as reader1:
rs1 = pd.read_excel(reader1, sheet_name="test1", index_col=0)
with ExcelFile(filename2) as reader2:
rs2 = pd.read_excel(reader2, sheet_name="test1", index_col=0)
tm.assert_frame_equal(rs1, rs2)
# Since the reader returns a datetime object for dates,
# we need to use df_expected to check the result.
tm.assert_frame_equal(rs2, df_expected)
def test_to_excel_interval_no_labels(self, path):
# see gh-19242
#
# Test writing Interval without labels.
df = DataFrame(np.random.randint(-10, 10, size=(20, 1)), dtype=np.int64)
expected = df.copy()
df["new"] = pd.cut(df[0], 10)
expected["new"] = pd.cut(expected[0], 10).astype(str)
df.to_excel(path, "test1")
with ExcelFile(path) as reader:
recons = pd.read_excel(reader, sheet_name="test1", index_col=0)
tm.assert_frame_equal(expected, recons)
def test_to_excel_interval_labels(self, path):
# see gh-19242
#
# Test writing Interval with labels.
df = DataFrame(np.random.randint(-10, 10, size=(20, 1)), dtype=np.int64)
expected = df.copy()
intervals = pd.cut(
df[0], 10, labels=["A", "B", "C", "D", "E", "F", "G", "H", "I", "J"]
)
df["new"] = intervals
expected["new"] = pd.Series(list(intervals))
df.to_excel(path, "test1")
with ExcelFile(path) as reader:
recons = pd.read_excel(reader, sheet_name="test1", index_col=0)
tm.assert_frame_equal(expected, recons)
def test_to_excel_timedelta(self, path):
# see gh-19242, gh-9155
#
# Test writing timedelta to xls.
df = DataFrame(
np.random.randint(-10, 10, size=(20, 1)), columns=["A"], dtype=np.int64
)
expected = df.copy()
df["new"] = df["A"].apply(lambda x: timedelta(seconds=x))
expected["new"] = expected["A"].apply(
lambda x: timedelta(seconds=x).total_seconds() / 86400
)
df.to_excel(path, "test1")
with ExcelFile(path) as reader:
recons = pd.read_excel(reader, sheet_name="test1", index_col=0)
tm.assert_frame_equal(expected, recons)
def test_to_excel_periodindex(self, tsframe, path):
xp = tsframe.resample("M", kind="period").mean()
xp.to_excel(path, "sht1")
with ExcelFile(path) as reader:
rs = pd.read_excel(reader, sheet_name="sht1", index_col=0)
tm.assert_frame_equal(xp, rs.to_period("M"))
def test_to_excel_multiindex(self, merge_cells, frame, path):
arrays = np.arange(len(frame.index) * 2).reshape(2, -1)
new_index = MultiIndex.from_arrays(arrays, names=["first", "second"])
frame.index = new_index
frame.to_excel(path, "test1", header=False)
frame.to_excel(path, "test1", columns=["A", "B"])
# round trip
frame.to_excel(path, "test1", merge_cells=merge_cells)
with ExcelFile(path) as reader:
df = pd.read_excel(reader, sheet_name="test1", index_col=[0, 1])
tm.assert_frame_equal(frame, df)
# GH13511
def test_to_excel_multiindex_nan_label(self, merge_cells, path):
df = DataFrame({"A": [None, 2, 3], "B": [10, 20, 30], "C": np.random.sample(3)})
df = df.set_index(["A", "B"])
df.to_excel(path, merge_cells=merge_cells)
df1 = pd.read_excel(path, index_col=[0, 1])
tm.assert_frame_equal(df, df1)
# Test for Issue 11328. If column indices are integers, make
# sure they are handled correctly for either setting of
# merge_cells
def test_to_excel_multiindex_cols(self, merge_cells, frame, path):
arrays = np.arange(len(frame.index) * 2).reshape(2, -1)
new_index = MultiIndex.from_arrays(arrays, names=["first", "second"])
frame.index = new_index
new_cols_index = MultiIndex.from_tuples([(40, 1), (40, 2), (50, 1), (50, 2)])
frame.columns = new_cols_index
header = [0, 1]
if not merge_cells:
header = 0
# round trip
frame.to_excel(path, "test1", merge_cells=merge_cells)
with ExcelFile(path) as reader:
df = pd.read_excel(
reader, sheet_name="test1", header=header, index_col=[0, 1]
)
if not merge_cells:
fm = frame.columns.format(sparsify=False, adjoin=False, names=False)
frame.columns = [".".join(map(str, q)) for q in zip(*fm)]
tm.assert_frame_equal(frame, df)
def test_to_excel_multiindex_dates(self, merge_cells, tsframe, path):
# try multiindex with dates
new_index = [tsframe.index, np.arange(len(tsframe.index))]
tsframe.index = MultiIndex.from_arrays(new_index)
tsframe.index.names = ["time", "foo"]
tsframe.to_excel(path, "test1", merge_cells=merge_cells)
with ExcelFile(path) as reader:
recons = pd.read_excel(reader, sheet_name="test1", index_col=[0, 1])
tm.assert_frame_equal(tsframe, recons)
assert recons.index.names == ("time", "foo")
def test_to_excel_multiindex_no_write_index(self, path):
# Test writing and re-reading a MI without the index. GH 5616.
# Initial non-MI frame.
frame1 = DataFrame({"a": [10, 20], "b": [30, 40], "c": [50, 60]})
# Add a MI.
frame2 = frame1.copy()
multi_index = MultiIndex.from_tuples([(70, 80), (90, 100)])
frame2.index = multi_index
# Write out to Excel without the index.
frame2.to_excel(path, "test1", index=False)
# Read it back in.
with ExcelFile(path) as reader:
frame3 = pd.read_excel(reader, sheet_name="test1")
# Test that it is the same as the initial frame.
tm.assert_frame_equal(frame1, frame3)
def test_to_excel_float_format(self, path):
df = DataFrame(
[[0.123456, 0.234567, 0.567567], [12.32112, 123123.2, 321321.2]],
index=["A", "B"],
columns=["X", "Y", "Z"],
)
df.to_excel(path, "test1", float_format="%.2f")
with ExcelFile(path) as reader:
result = pd.read_excel(reader, sheet_name="test1", index_col=0)
expected = DataFrame(
[[0.12, 0.23, 0.57], [12.32, 123123.20, 321321.20]],
index=["A", "B"],
columns=["X", "Y", "Z"],
)
tm.assert_frame_equal(result, expected)
def test_to_excel_output_encoding(self, ext):
# Avoid mixed inferred_type.
df = DataFrame(
[["\u0192", "\u0193", "\u0194"], ["\u0195", "\u0196", "\u0197"]],
index=["A\u0192", "B"],
columns=["X\u0193", "Y", "Z"],
)
with tm.ensure_clean("__tmp_to_excel_float_format__." + ext) as filename:
df.to_excel(filename, sheet_name="TestSheet", encoding="utf8")
result = pd.read_excel(filename, sheet_name="TestSheet", index_col=0)
tm.assert_frame_equal(result, df)
def test_to_excel_unicode_filename(self, ext, path):
with tm.ensure_clean("\u0192u." + ext) as filename:
try:
f = open(filename, "wb")
except UnicodeEncodeError:
pytest.skip("No unicode file names on this system")
finally:
f.close()
df = DataFrame(
[[0.123456, 0.234567, 0.567567], [12.32112, 123123.2, 321321.2]],
index=["A", "B"],
columns=["X", "Y", "Z"],
)
df.to_excel(filename, "test1", float_format="%.2f")
with ExcelFile(filename) as reader:
result = pd.read_excel(reader, sheet_name="test1", index_col=0)
expected = DataFrame(
[[0.12, 0.23, 0.57], [12.32, 123123.20, 321321.20]],
index=["A", "B"],
columns=["X", "Y", "Z"],
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("use_headers", [True, False])
@pytest.mark.parametrize("r_idx_nlevels", [1, 2, 3])
@pytest.mark.parametrize("c_idx_nlevels", [1, 2, 3])
def test_excel_010_hemstring(
self, merge_cells, c_idx_nlevels, r_idx_nlevels, use_headers, path
):
def roundtrip(data, header=True, parser_hdr=0, index=True):
data.to_excel(path, header=header, merge_cells=merge_cells, index=index)
with ExcelFile(path) as xf:
return pd.read_excel(
xf, sheet_name=xf.sheet_names[0], header=parser_hdr
)
# Basic test.
parser_header = 0 if use_headers else None
res = roundtrip(DataFrame([0]), use_headers, parser_header)
assert res.shape == (1, 2)
assert res.iloc[0, 0] is not np.nan
# More complex tests with multi-index.
nrows = 5
ncols = 3
# ensure limited functionality in 0.10
# override of gh-2370 until sorted out in 0.11
df = tm.makeCustomDataframe(
nrows, ncols, r_idx_nlevels=r_idx_nlevels, c_idx_nlevels=c_idx_nlevels
)
# This if will be removed once multi-column Excel writing
# is implemented. For now fixing gh-9794.
if c_idx_nlevels > 1:
msg = (
"Writing to Excel with MultiIndex columns and no index "
"\\('index'=False\\) is not yet implemented."
)
with pytest.raises(NotImplementedError, match=msg):
roundtrip(df, use_headers, index=False)
else:
res = roundtrip(df, use_headers)
if use_headers:
assert res.shape == (nrows, ncols + r_idx_nlevels)
else:
# First row taken as columns.
assert res.shape == (nrows - 1, ncols + r_idx_nlevels)
# No NaNs.
for r in range(len(res.index)):
for c in range(len(res.columns)):
assert res.iloc[r, c] is not np.nan
def test_duplicated_columns(self, path):
# see gh-5235
df = DataFrame([[1, 2, 3], [1, 2, 3], [1, 2, 3]], columns=["A", "B", "B"])
df.to_excel(path, "test1")
expected = DataFrame(
[[1, 2, 3], [1, 2, 3], [1, 2, 3]], columns=["A", "B", "B.1"]
)
# By default, we mangle.
result = pd.read_excel(path, sheet_name="test1", index_col=0)
tm.assert_frame_equal(result, expected)
# Explicitly, we pass in the parameter.
result = pd.read_excel(
path, sheet_name="test1", index_col=0, mangle_dupe_cols=True
)
tm.assert_frame_equal(result, expected)
# see gh-11007, gh-10970
df = DataFrame([[1, 2, 3, 4], [5, 6, 7, 8]], columns=["A", "B", "A", "B"])
df.to_excel(path, "test1")
result = pd.read_excel(path, sheet_name="test1", index_col=0)
expected = DataFrame(
[[1, 2, 3, 4], [5, 6, 7, 8]], columns=["A", "B", "A.1", "B.1"]
)
tm.assert_frame_equal(result, expected)
# see gh-10982
df.to_excel(path, "test1", index=False, header=False)
result = pd.read_excel(path, sheet_name="test1", header=None)
expected = DataFrame([[1, 2, 3, 4], [5, 6, 7, 8]])
tm.assert_frame_equal(result, expected)
msg = "Setting mangle_dupe_cols=False is not supported yet"
with pytest.raises(ValueError, match=msg):
pd.read_excel(path, sheet_name="test1", header=None, mangle_dupe_cols=False)
def test_swapped_columns(self, path):
# Test for issue #5427.
write_frame = DataFrame({"A": [1, 1, 1], "B": [2, 2, 2]})
write_frame.to_excel(path, "test1", columns=["B", "A"])
read_frame = pd.read_excel(path, sheet_name="test1", header=0)
tm.assert_series_equal(write_frame["A"], read_frame["A"])
tm.assert_series_equal(write_frame["B"], read_frame["B"])
def test_invalid_columns(self, path):
# see gh-10982
write_frame = DataFrame({"A": [1, 1, 1], "B": [2, 2, 2]})
with pytest.raises(KeyError, match="Not all names specified"):
write_frame.to_excel(path, "test1", columns=["B", "C"])
with pytest.raises(
KeyError, match="'passes columns are not ALL present dataframe'"
):
write_frame.to_excel(path, "test1", columns=["C", "D"])
@pytest.mark.parametrize(
"to_excel_index,read_excel_index_col",
[
(True, 0), # Include index in write to file
(False, None), # Dont include index in write to file
],
)
def test_write_subset_columns(self, path, to_excel_index, read_excel_index_col):
# GH 31677
write_frame = DataFrame({"A": [1, 1, 1], "B": [2, 2, 2], "C": [3, 3, 3]})
write_frame.to_excel(
path, "col_subset_bug", columns=["A", "B"], index=to_excel_index
)
expected = write_frame[["A", "B"]]
read_frame = pd.read_excel(
path, sheet_name="col_subset_bug", index_col=read_excel_index_col
)
tm.assert_frame_equal(expected, read_frame)
def test_comment_arg(self, path):
# see gh-18735
#
# Test the comment argument functionality to pd.read_excel.
# Create file to read in.
df = DataFrame({"A": ["one", "#one", "one"], "B": ["two", "two", "#two"]})
df.to_excel(path, "test_c")
# Read file without comment arg.
result1 = pd.read_excel(path, sheet_name="test_c", index_col=0)
result1.iloc[1, 0] = None
result1.iloc[1, 1] = None
result1.iloc[2, 1] = None
result2 = pd.read_excel(path, sheet_name="test_c", comment="#", index_col=0)
tm.assert_frame_equal(result1, result2)
def test_comment_default(self, path):
# Re issue #18735
# Test the comment argument default to pd.read_excel
# Create file to read in
df = DataFrame({"A": ["one", "#one", "one"], "B": ["two", "two", "#two"]})
df.to_excel(path, "test_c")
# Read file with default and explicit comment=None
result1 = pd.read_excel(path, sheet_name="test_c")
result2 = pd.read_excel(path, sheet_name="test_c", comment=None)
tm.assert_frame_equal(result1, result2)
def test_comment_used(self, path):
# see gh-18735
#
# Test the comment argument is working as expected when used.
# Create file to read in.
df = DataFrame({"A": ["one", "#one", "one"], "B": ["two", "two", "#two"]})
df.to_excel(path, "test_c")
# Test read_frame_comment against manually produced expected output.
expected = DataFrame({"A": ["one", None, "one"], "B": ["two", None, None]})
result = pd.read_excel(path, sheet_name="test_c", comment="#", index_col=0)
tm.assert_frame_equal(result, expected)
def test_comment_empty_line(self, path):
# Re issue #18735
# Test that pd.read_excel ignores commented lines at the end of file
df = DataFrame({"a": ["1", "#2"], "b": ["2", "3"]})
df.to_excel(path, index=False)
# Test that all-comment lines at EoF are ignored
expected = DataFrame({"a": [1], "b": [2]})
result = pd.read_excel(path, comment="#")
tm.assert_frame_equal(result, expected)
def test_datetimes(self, path):
# Test writing and reading datetimes. For issue #9139. (xref #9185)
datetimes = [
datetime(2013, 1, 13, 1, 2, 3),
datetime(2013, 1, 13, 2, 45, 56),
datetime(2013, 1, 13, 4, 29, 49),
datetime(2013, 1, 13, 6, 13, 42),
datetime(2013, 1, 13, 7, 57, 35),
datetime(2013, 1, 13, 9, 41, 28),
datetime(2013, 1, 13, 11, 25, 21),
datetime(2013, 1, 13, 13, 9, 14),
datetime(2013, 1, 13, 14, 53, 7),
datetime(2013, 1, 13, 16, 37, 0),
datetime(2013, 1, 13, 18, 20, 52),
]
write_frame = DataFrame({"A": datetimes})
write_frame.to_excel(path, "Sheet1")
if path.endswith("xlsx") or path.endswith("xlsm"):
pytest.skip(
"Defaults to openpyxl and fails with floating point error on "
"datetimes; may be fixed on newer versions of openpyxl - GH #38644"
)
read_frame = pd.read_excel(path, sheet_name="Sheet1", header=0)
tm.assert_series_equal(write_frame["A"], read_frame["A"])
def test_bytes_io(self, engine):
# see gh-7074
with BytesIO() as bio:
df = DataFrame(np.random.randn(10, 2))
# Pass engine explicitly, as there is no file path to infer from.
with ExcelWriter(bio, engine=engine) as writer:
df.to_excel(writer)
bio.seek(0)
reread_df = pd.read_excel(bio, index_col=0)
tm.assert_frame_equal(df, reread_df)
def test_write_lists_dict(self, path):
# see gh-8188.
df = DataFrame(
{
"mixed": ["a", ["b", "c"], {"d": "e", "f": 2}],
"numeric": [1, 2, 3.0],
"str": ["apple", "banana", "cherry"],
}
)
df.to_excel(path, "Sheet1")
read = pd.read_excel(path, sheet_name="Sheet1", header=0, index_col=0)
expected = df.copy()
expected.mixed = expected.mixed.apply(str)
expected.numeric = expected.numeric.astype("int64")
tm.assert_frame_equal(read, expected)
def test_render_as_column_name(self, path):
# see gh-34331
df = DataFrame({"render": [1, 2], "data": [3, 4]})
df.to_excel(path, "Sheet1")
read = pd.read_excel(path, "Sheet1", index_col=0)
expected = df
tm.assert_frame_equal(read, expected)
def test_true_and_false_value_options(self, path):
# see gh-13347
df = DataFrame([["foo", "bar"]], columns=["col1", "col2"])
expected = df.replace({"foo": True, "bar": False})
df.to_excel(path)
read_frame = pd.read_excel(
path, true_values=["foo"], false_values=["bar"], index_col=0
)
tm.assert_frame_equal(read_frame, expected)
def test_freeze_panes(self, path):
# see gh-15160
expected = DataFrame([[1, 2], [3, 4]], columns=["col1", "col2"])
expected.to_excel(path, "Sheet1", freeze_panes=(1, 1))
result = pd.read_excel(path, index_col=0)
tm.assert_frame_equal(result, expected)
def test_path_path_lib(self, engine, ext):
df = tm.makeDataFrame()
writer = partial(df.to_excel, engine=engine)
reader = partial(pd.read_excel, index_col=0)
result = tm.round_trip_pathlib(writer, reader, path=f"foo{ext}")
tm.assert_frame_equal(result, df)
def test_path_local_path(self, engine, ext):
df = tm.makeDataFrame()
writer = partial(df.to_excel, engine=engine)
reader = partial(pd.read_excel, index_col=0)
result = tm.round_trip_localpath(writer, reader, path=f"foo{ext}")
tm.assert_frame_equal(result, df)
def test_merged_cell_custom_objects(self, merge_cells, path):
# see GH-27006
mi = MultiIndex.from_tuples(
[
(pd.Period("2018"), pd.Period("2018Q1")),
(pd.Period("2018"), pd.Period("2018Q2")),
]
)
expected = DataFrame(np.ones((2, 2)), columns=mi)
expected.to_excel(path)
with tm.assert_produces_warning(
FutureWarning, match="convert_float is deprecated"
):
result = pd.read_excel(
path, header=[0, 1], index_col=0, convert_float=False
)
# need to convert PeriodIndexes to standard Indexes for assert equal
expected.columns = expected.columns.set_levels(
[[str(i) for i in mi.levels[0]], [str(i) for i in mi.levels[1]]],
level=[0, 1],
)
expected.index = expected.index.astype(np.float64)
tm.assert_frame_equal(expected, result)
@pytest.mark.parametrize("dtype", [None, object])
def test_raise_when_saving_timezones(self, dtype, tz_aware_fixture, path):
# GH 27008, GH 7056
tz = tz_aware_fixture
data = pd.Timestamp("2019", tz=tz)
df = DataFrame([data], dtype=dtype)
with pytest.raises(ValueError, match="Excel does not support"):
df.to_excel(path)
data = data.to_pydatetime()
df = DataFrame([data], dtype=dtype)
with pytest.raises(ValueError, match="Excel does not support"):
df.to_excel(path)
def test_excel_duplicate_columns_with_names(self, path):
# GH#39695
df = DataFrame({"A": [0, 1], "B": [10, 11]})
df.to_excel(path, columns=["A", "B", "A"], index=False)
result = pd.read_excel(path)
expected = DataFrame([[0, 10, 0], [1, 11, 1]], columns=["A", "B", "A.1"])
tm.assert_frame_equal(result, expected)
def test_if_sheet_exists_raises(self, ext):
# GH 40230
msg = "if_sheet_exists is only valid in append mode (mode='a')"
with tm.ensure_clean(ext) as f:
with pytest.raises(ValueError, match=re.escape(msg)):
ExcelWriter(f, if_sheet_exists="replace")
class TestExcelWriterEngineTests:
@pytest.mark.parametrize(
"klass,ext",
[
pytest.param(_XlsxWriter, ".xlsx", marks=td.skip_if_no("xlsxwriter")),
pytest.param(_OpenpyxlWriter, ".xlsx", marks=td.skip_if_no("openpyxl")),
pytest.param(_XlwtWriter, ".xls", marks=td.skip_if_no("xlwt")),
],
)
def test_ExcelWriter_dispatch(self, klass, ext):
with tm.ensure_clean(ext) as path:
with ExcelWriter(path) as writer:
if ext == ".xlsx" and td.safe_import("xlsxwriter"):
# xlsxwriter has preference over openpyxl if both installed
assert isinstance(writer, _XlsxWriter)
else:
assert isinstance(writer, klass)
def test_ExcelWriter_dispatch_raises(self):
with pytest.raises(ValueError, match="No engine"):
ExcelWriter("nothing")
def test_register_writer(self):
# some awkward mocking to test out dispatch and such actually works
called_save = []
called_write_cells = []
class DummyClass(ExcelWriter):
called_save = False
called_write_cells = False
supported_extensions = ["xlsx", "xls"]
engine = "dummy"
def save(self):
called_save.append(True)
def write_cells(self, *args, **kwargs):
called_write_cells.append(True)
def check_called(func):
func()
assert len(called_save) >= 1
assert len(called_write_cells) >= 1
del called_save[:]
del called_write_cells[:]
with pd.option_context("io.excel.xlsx.writer", "dummy"):
path = "something.xlsx"
with tm.ensure_clean(path) as filepath:
register_writer(DummyClass)
with ExcelWriter(filepath) as writer:
assert isinstance(writer, DummyClass)
df = tm.makeCustomDataframe(1, 1)
check_called(lambda: df.to_excel(filepath))
with tm.ensure_clean("something.xls") as filepath:
check_called(lambda: df.to_excel(filepath, engine="dummy"))
@pytest.mark.parametrize(
"ext",
[
pytest.param(".xlsx", marks=td.skip_if_no("xlsxwriter")),
pytest.param(".xlsx", marks=td.skip_if_no("openpyxl")),
pytest.param(".ods", marks=td.skip_if_no("odf")),
],
)
def test_engine_kwargs_and_kwargs_raises(self, ext):
# GH 40430
msg = re.escape("Cannot use both engine_kwargs and **kwargs")
with pytest.raises(ValueError, match=msg):
with ExcelWriter("", engine_kwargs={"a": 1}, b=2):
pass
@td.skip_if_no("xlrd")
@td.skip_if_no("openpyxl")
class TestFSPath:
def test_excelfile_fspath(self):
with tm.ensure_clean("foo.xlsx") as path:
df = DataFrame({"A": [1, 2]})
df.to_excel(path)
with ExcelFile(path) as xl:
result = os.fspath(xl)
assert result == path
def test_excelwriter_fspath(self):
with tm.ensure_clean("foo.xlsx") as path:
with ExcelWriter(path) as writer:
assert os.fspath(writer) == str(path)
|
from mp_api.routes.synthesis.models.core import (
SynthesisRecipe,
SynthesisTypeEnum,
SynthesisSearchResultModel,
)
from mp_api.routes.synthesis.models.materials import (
Component,
ExtractedMaterial,
)
from mp_api.routes.synthesis.models.operations import (
Value,
Conditions,
Operation,
OperationTypeEnum,
)
from mp_api.routes.synthesis.models.reaction import (
FormulaPart,
ReactionFormula,
)
|
"""Control application"""
import time
class Control:
"""
Control class
"""
# Control status modes
STATUS_IDLE = 0
STATUS_CAPTURING = 1
STATUS_DITHERING = 2
STATUS_STOPPING = 3
# Loop delay in seconds
LOOP_DELAY = 0.5
cached_camera_list = None
cached_camera_config = None
def __init__(self, webapp):
self.webapp = webapp
self.current_status = self.STATUS_IDLE
self.current_capture = 0
self.last_image = None
self.last_capture = 0
self.capture_parms = None
self.dither_status = None
def run(self):
while True:
self.loop_iteration()
time.sleep(self.LOOP_DELAY)
def loop_iteration(self):
self.webapp.logger.debug("Control: Looping")
# Control loop
if self.current_status == self.STATUS_CAPTURING:
if self.current_capture == self.capture_parms["captures"]:
print("Finished capturing process")
self.current_status = self.STATUS_STOPPING
else:
self.current_capture += 1
print(
"Stared capturing image {}/{}".format(
self.current_capture, self.capture_parms["captures"]
)
)
# Capture image
self.last_image = self.webapp.dslr.capture_image_bulb(
self.capture_parms["exposure"]
)
self.last_capture = self.current_capture
time.sleep(self.capture_parms["exposure"])
print(
"Finished capturing image {}/{}".format(
self.current_capture, self.capture_parms["captures"]
)
)
# Check dithering
if self.current_capture < self.capture_parms["captures"]:
if self.current_capture % self.capture_parms["dither_n"] == 0:
self.current_status = self.STATUS_DITHERING
try:
self.webapp.guider.start_dither(
dither_px=self.capture_parms["dither_px"],
settle_px=self.capture_parms["settle_px"],
settle_time=self.capture_parms["settle_time"],
settle_timeout=self.capture_parms["settle_timeout"],
)
except Exception as e:
print("Error starting dithering: ", e)
if self.current_status == self.STATUS_DITHERING:
print("Dithering")
try:
settled, settling = self.webapp.guider.check_settled()
if settled:
# Continue capturing
self.dither_status = None
self.current_status = self.STATUS_CAPTURING
else:
self.dither_status = {
"dist": settling.Distance,
"px": settling.SettlePx,
"time": settling.Time,
"settle_time": settling.SettleTime,
}
print("Dithering status: %s" % self.dither_status)
except Exception as e:
# TODO: Status error and error messages
print("Dithering error: ", e)
if self.current_status == self.STATUS_STOPPING:
print("Stopping captures")
self.current_status = self.STATUS_IDLE
print("Stopped captures")
def process_message(self, message):
self.webapp.logger.debug("Control: Message received: %s", message)
def capture_start(
self,
exposure,
captures,
dither,
dither_n,
dither_px,
settle_px,
settle_time,
settle_timeout,
):
# Cache camera list and current camera config
self.cached_camera_list = self.webapp.dslr.get_camera_list()
self.cached_camera_config = self.webapp.dslr.get_config()
# Initialize capture configuration parameters
self.capture_parms = {
"exposure": exposure,
"captures": captures,
"dither": dither,
"dither_n": dither_n,
"dither_px": dither_px,
"settle_px": settle_px,
"settle_time": settle_time,
"settle_timeout": settle_timeout,
}
# Initialize capture status parameters
self.current_capture = 0
self.last_image = None
self.last_capture = 0
self.current_status = self.STATUS_CAPTURING
def capture_stop(self):
self.current_status = self.STATUS_STOPPING
def get_capture_status(self):
return {
"current_status": self.current_status,
"current_capture": self.current_capture,
"last_capture": self.last_capture,
"capture_parms": self.capture_parms,
"dither_status": self.dither_status,
}
def get_capture_image(self):
return self.last_image
|
from .mesonet import MesoInception, MesoNet
|
import unittest
import numpy
import numpy as np
from pyscf.pbc import gto as pgto
import pyscf.pbc.dft as pdft
from pyscf.pbc.df import fft, aft, mdf
##################################################
#
# port from ao2mo/eris.py
#
##################################################
from pyscf import lib
from pyscf.pbc import lib as pbclib
from pyscf.pbc.dft.gen_grid import gen_uniform_grids
from pyscf.pbc.dft.numint import eval_ao
from pyscf.pbc import tools
einsum = np.einsum
"""
(ij|kl) = \int dr1 dr2 i*(r1) j(r1) v(r12) k*(r2) l(r2)
= (ij|G) v(G) (G|kl)
i*(r) j(r) = 1/N \sum_G e^{iGr} (G|ij)
= 1/N \sum_G e^{-iGr} (ij|G)
"forward" FFT:
(G|ij) = \sum_r e^{-iGr} i*(r) j(r) = fft[ i*(r) j(r) ]
"inverse" FFT:
(ij|G) = \sum_r e^{iGr} i*(r) j(r) = N * ifft[ i*(r) j(r) ]
= conj[ \sum_r e^{-iGr} j*(r) i(r) ]
"""
def general(cell, mo_coeffs, kpts=None, compact=0):
'''pyscf-style wrapper to get MO 2-el integrals.'''
assert len(mo_coeffs) == 4
if kpts is not None:
assert len(kpts) == 4
return get_mo_eri(cell, mo_coeffs, kpts)
def get_mo_eri(cell, mo_coeffs, kpts=None):
'''Convenience function to return MO 2-el integrals.'''
mo_coeff12 = mo_coeffs[:2]
mo_coeff34 = mo_coeffs[2:]
if kpts is None:
kpts12 = kpts34 = q = None
else:
kpts12 = kpts[:2]
kpts34 = kpts[2:]
q = kpts12[0] - kpts12[1]
#q = kpts34[1] - kpts34[0]
if q is None:
q = np.zeros(3)
mo_pairs12_kG = get_mo_pairs_G(cell, mo_coeff12, kpts12)
mo_pairs34_invkG = get_mo_pairs_invG(cell, mo_coeff34, kpts34, q)
return assemble_eri(cell, mo_pairs12_kG, mo_pairs34_invkG, q)
def get_mo_pairs_G(cell, mo_coeffs, kpts=None, q=None):
'''Calculate forward (G|ij) FFT of all MO pairs.
TODO: - Implement simplifications for real orbitals.
Args:
mo_coeff: length-2 list of (nao,nmo) ndarrays
The two sets of MO coefficients to use in calculating the
product |ij).
Returns:
mo_pairs_G : (ngs, nmoi*nmoj) ndarray
The FFT of the real-space MO pairs.
'''
coords = gen_uniform_grids(cell)
if kpts is None:
q = np.zeros(3)
aoR = eval_ao(cell, coords)
ngs = aoR.shape[0]
if np.array_equal(mo_coeffs[0], mo_coeffs[1]):
nmoi = nmoj = mo_coeffs[0].shape[1]
moiR = mojR = einsum('ri,ia->ra', aoR, mo_coeffs[0])
else:
nmoi = mo_coeffs[0].shape[1]
nmoj = mo_coeffs[1].shape[1]
moiR = einsum('ri,ia->ra', aoR, mo_coeffs[0])
mojR = einsum('ri,ia->ra', aoR, mo_coeffs[1])
else:
if q is None:
q = kpts[1]-kpts[0]
aoR_ki = eval_ao(cell, coords, kpt=kpts[0])
aoR_kj = eval_ao(cell, coords, kpt=kpts[1])
ngs = aoR_ki.shape[0]
nmoi = mo_coeffs[0].shape[1]
nmoj = mo_coeffs[1].shape[1]
moiR = einsum('ri,ia->ra', aoR_ki, mo_coeffs[0])
mojR = einsum('ri,ia->ra', aoR_kj, mo_coeffs[1])
#mo_pairs_R = einsum('ri,rj->rij', np.conj(moiR), mojR)
mo_pairs_G = np.zeros([ngs,nmoi*nmoj], np.complex128)
fac = np.exp(-1j*np.dot(coords, q))
for i in xrange(nmoi):
for j in xrange(nmoj):
mo_pairs_R_ij = np.conj(moiR[:,i])*mojR[:,j]
mo_pairs_G[:,i*nmoj+j] = tools.fftk(mo_pairs_R_ij, cell.gs, fac)
return mo_pairs_G
def get_mo_pairs_invG(cell, mo_coeffs, kpts=None, q=None):
'''Calculate "inverse" (ij|G) FFT of all MO pairs.
TODO: - Implement simplifications for real orbitals.
Args:
mo_coeff: length-2 list of (nao,nmo) ndarrays
The two sets of MO coefficients to use in calculating the
product |ij).
Returns:
mo_pairs_invG : (ngs, nmoi*nmoj) ndarray
The inverse FFTs of the real-space MO pairs.
'''
coords = gen_uniform_grids(cell)
if kpts is None:
q = np.zeros(3)
aoR = eval_ao(cell, coords)
ngs = aoR.shape[0]
if np.array_equal(mo_coeffs[0], mo_coeffs[1]):
nmoi = nmoj = mo_coeffs[0].shape[1]
moiR = mojR = einsum('ri,ia->ra', aoR, mo_coeffs[0])
else:
nmoi = mo_coeffs[0].shape[1]
nmoj = mo_coeffs[1].shape[1]
moiR = einsum('ri,ia->ra', aoR, mo_coeffs[0])
mojR = einsum('ri,ia->ra', aoR, mo_coeffs[1])
else:
if q is None:
q = kpts[1]-kpts[0]
aoR_ki = eval_ao(cell, coords, kpt=kpts[0])
aoR_kj = eval_ao(cell, coords, kpt=kpts[1])
ngs = aoR_ki.shape[0]
nmoi = mo_coeffs[0].shape[1]
nmoj = mo_coeffs[1].shape[1]
moiR = einsum('ri,ia->ra', aoR_ki, mo_coeffs[0])
mojR = einsum('ri,ia->ra', aoR_kj, mo_coeffs[1])
#mo_pairs_R = einsum('ri,rj->rij', np.conj(moiR), mojR)
mo_pairs_invG = np.zeros([ngs,nmoi*nmoj], np.complex128)
fac = np.exp(1j*np.dot(coords, q))
for i in xrange(nmoi):
for j in xrange(nmoj):
mo_pairs_R_ij = np.conj(moiR[:,i])*mojR[:,j]
mo_pairs_invG[:,i*nmoj+j] = np.conj(tools.fftk(np.conj(mo_pairs_R_ij), cell.gs, fac))
return mo_pairs_invG
def get_mo_pairs_G_old(cell, mo_coeffs, kpts=None, q=None):
'''Calculate forward (G|ij) and "inverse" (ij|G) FFT of all MO pairs.
TODO: - Implement simplifications for real orbitals.
Args:
mo_coeff: length-2 list of (nao,nmo) ndarrays
The two sets of MO coefficients to use in calculating the
product |ij).
Returns:
mo_pairs_G, mo_pairs_invG : (ngs, nmoi*nmoj) ndarray
The FFTs of the real-space MO pairs.
'''
coords = gen_uniform_grids(cell)
if kpts is None:
q = np.zeros(3)
aoR = eval_ao(cell, coords)
ngs = aoR.shape[0]
if np.array_equal(mo_coeffs[0], mo_coeffs[1]):
nmoi = nmoj = mo_coeffs[0].shape[1]
moiR = mojR = einsum('ri,ia->ra', aoR, mo_coeffs[0])
else:
nmoi = mo_coeffs[0].shape[1]
nmoj = mo_coeffs[1].shape[1]
moiR = einsum('ri,ia->ra', aoR, mo_coeffs[0])
mojR = einsum('ri,ia->ra', aoR, mo_coeffs[1])
else:
if q is None:
q = kpts[1]-kpts[0]
aoR_ki = eval_ao(cell, coords, kpt=kpts[0])
aoR_kj = eval_ao(cell, coords, kpt=kpts[1])
ngs = aoR_ki.shape[0]
nmoi = mo_coeffs[0].shape[1]
nmoj = mo_coeffs[1].shape[1]
moiR = einsum('ri,ia->ra', aoR_ki, mo_coeffs[0])
mojR = einsum('ri,ia->ra', aoR_kj, mo_coeffs[1])
mo_pairs_R = np.einsum('ri,rj->rij', np.conj(moiR), mojR)
mo_pairs_G = np.zeros([ngs,nmoi*nmoj], np.complex128)
mo_pairs_invG = np.zeros([ngs,nmoi*nmoj], np.complex128)
fac = np.exp(-1j*np.dot(coords, q))
for i in xrange(nmoi):
for j in xrange(nmoj):
mo_pairs_G[:,i*nmoj+j] = tools.fftk(mo_pairs_R[:,i,j], cell.gs, fac)
mo_pairs_invG[:,i*nmoj+j] = np.conj(tools.fftk(np.conj(mo_pairs_R[:,i,j]), cell.gs,
fac.conj()))
return mo_pairs_G, mo_pairs_invG
def assemble_eri(cell, orb_pair_invG1, orb_pair_G2, q=None):
'''Assemble 4-index electron repulsion integrals.
Returns:
(nmo1*nmo2, nmo3*nmo4) ndarray
'''
if q is None:
q = np.zeros(3)
coulqG = tools.get_coulG(cell, -1.0*q)
ngs = orb_pair_invG1.shape[0]
Jorb_pair_G2 = np.einsum('g,gn->gn',coulqG,orb_pair_G2)*(cell.vol/ngs**2)
eri = np.dot(orb_pair_invG1.T, Jorb_pair_G2)
return eri
def get_ao_pairs_G(cell, kpt=np.zeros(3)):
'''Calculate forward (G|ij) and "inverse" (ij|G) FFT of all AO pairs.
Args:
cell : instance of :class:`Cell`
Returns:
ao_pairs_G, ao_pairs_invG : (ngs, nao*(nao+1)/2) ndarray
The FFTs of the real-space AO pairs.
'''
coords = gen_uniform_grids(cell)
aoR = eval_ao(cell, coords, kpt) # shape = (coords, nao)
ngs, nao = aoR.shape
gamma_point = abs(kpt).sum() < 1e-9
if gamma_point:
npair = nao*(nao+1)//2
ao_pairs_G = np.empty([ngs, npair], np.complex128)
ij = 0
for i in range(nao):
for j in range(i+1):
ao_ij_R = np.conj(aoR[:,i]) * aoR[:,j]
ao_pairs_G[:,ij] = tools.fft(ao_ij_R, cell.gs)
#ao_pairs_invG[:,ij] = ngs*tools.ifft(ao_ij_R, cell.gs)
ij += 1
ao_pairs_invG = ao_pairs_G.conj()
else:
ao_pairs_G = np.zeros([ngs, nao,nao], np.complex128)
for i in range(nao):
for j in range(nao):
ao_ij_R = np.conj(aoR[:,i]) * aoR[:,j]
ao_pairs_G[:,i,j] = tools.fft(ao_ij_R, cell.gs)
ao_pairs_invG = ao_pairs_G.transpose(0,2,1).conj().reshape(-1,nao**2)
ao_pairs_G = ao_pairs_G.reshape(-1,nao**2)
return ao_pairs_G, ao_pairs_invG
def get_ao_eri(cell, kpt=np.zeros(3)):
'''Convenience function to return AO 2-el integrals.'''
ao_pairs_G, ao_pairs_invG = get_ao_pairs_G(cell, kpt)
eri = assemble_eri(cell, ao_pairs_invG, ao_pairs_G)
if abs(kpt).sum() < 1e-9:
eri = eri.real
return eri
##################################################
#
# ao2mo/eris.py end
#
##################################################
cell = pgto.Cell()
cell.atom = 'He 1. .5 .5; C .1 1.3 2.1'
cell.basis = {'He': [(0, (2.5, 1)), (0, (1., 1))],
'C' :'gth-szv',}
cell.pseudo = {'C':'gth-pade'}
cell.a = np.eye(3) * 2.5
cell.gs = [10] * 3
cell.build()
np.random.seed(1)
kpts = np.random.random((4,3))
kpts[3] = kpts[0]-kpts[1]+kpts[2]
kpt0 = np.zeros(3)
cell1 = pgto.Cell()
cell1.atom = 'He 1. .5 .5; He .1 1.3 2.1'
cell1.basis = {'He': [(0, (2.5, 1)), (0, (1., 1))]}
cell1.a = np.eye(3) * 2.5
cell1.gs = [10] * 3
cell1.build()
kdf0 = mdf.MDF(cell1)
kdf0.kpts = kpts
def finger(a):
w = np.cos(np.arange(a.size))
return np.dot(w, a.ravel())
class KnowValues(unittest.TestCase):
def test_aft_get_nuc(self):
df = aft.AFTDF(cell)
v1 = df.get_nuc(kpts[0])
self.assertAlmostEqual(finger(v1), (-6.0893491060887159+0.19823828749533859j), 8)
def test_aft_get_pp(self):
v0 = pgto.pseudo.get_pp(cell, kpts[0])
v1 = aft.AFTDF(cell).get_pp(kpts)
self.assertTrue(np.allclose(v0, v1[0], atol=1e-5, rtol=1e-5))
self.assertAlmostEqual(finger(v1[0]), (-5.6240305085898807+0.22094834207603817j), 8)
v0 = pgto.pseudo.get_pp(cell, kpts[1])
self.assertTrue(np.allclose(v0, v1[1], atol=1e-5, rtol=1e-5))
self.assertAlmostEqual(finger(v1[1]), (-5.53877585793+1.043933371359j) ,8)
self.assertAlmostEqual(finger(v1[2]), (-6.05309558678+0.281728966073j), 8)
self.assertAlmostEqual(finger(v1[3]), (-5.60115995450+0.275973062529j), 8)
def test_aft_get_ao_eri(self):
df0 = fft.FFTDF(cell)
df = aft.AFTDF(cell)
eri0 = df0.get_ao_eri(compact=True)
eri1 = df.get_ao_eri(compact=True)
self.assertTrue(np.allclose(eri0, eri1, atol=1e-5, rtol=1e-5))
self.assertAlmostEqual(finger(eri1), 0.80425361966560172, 8)
eri0 = df0.get_ao_eri(kpts[0])
eri1 = df.get_ao_eri(kpts[0])
self.assertTrue(np.allclose(eri0, eri1, atol=1e-5, rtol=1e-5))
self.assertAlmostEqual(finger(eri1), (2.9346374476387949-0.20479054936779137j), 8)
eri0 = df0.get_ao_eri(kpts)
eri1 = df.get_ao_eri(kpts)
self.assertTrue(np.allclose(eri0, eri1, atol=1e-5, rtol=1e-5))
self.assertAlmostEqual(finger(eri1), (0.33709287302019619-0.94185725020966538j), 8)
def test_get_eri_gamma(self):
odf0 = mdf.MDF(cell1)
odf = aft.AFTDF(cell1)
ref = odf0.get_eri()
eri0000 = odf.get_eri(compact=True)
self.assertTrue(eri0000.dtype == numpy.double)
self.assertTrue(np.allclose(eri0000, ref, atol=1e-6, rtol=1e-6))
self.assertAlmostEqual(finger(eri0000), 0.23714016293926865, 9)
ref = kdf0.get_eri((kpts[0],kpts[0],kpts[0],kpts[0]))
eri1111 = odf.get_eri((kpts[0],kpts[0],kpts[0],kpts[0]))
self.assertTrue(np.allclose(eri1111, ref, atol=1e-6, rtol=1e-6))
self.assertAlmostEqual(finger(eri1111), (1.2410388899583582-5.2370501878355006e-06j), 9)
eri1111 = odf.get_eri((kpts[0]+1e-8,kpts[0]+1e-8,kpts[0],kpts[0]))
self.assertTrue(np.allclose(eri1111, ref, atol=1e-6, rtol=1e-6))
self.assertAlmostEqual(finger(eri1111), (1.2410388899583582-5.2370501878355006e-06j), 9)
def test_get_eri_0011(self):
odf = aft.AFTDF(cell1)
ref = kdf0.get_eri((kpts[0],kpts[0],kpts[1],kpts[1]))
eri0011 = odf.get_eri((kpts[0],kpts[0],kpts[1],kpts[1]))
self.assertTrue(np.allclose(eri0011, ref, atol=1e-3, rtol=1e-3))
self.assertAlmostEqual(finger(eri0011), (1.2410162858084512+0.00074485383749912936j), 9)
ref = fft.FFTDF(cell1).get_mo_eri([numpy.eye(cell1.nao_nr())]*4, (kpts[0],kpts[0],kpts[1],kpts[1]))
eri0011 = odf.get_eri((kpts[0],kpts[0],kpts[1],kpts[1]))
self.assertTrue(np.allclose(eri0011, ref, atol=1e-9, rtol=1e-9))
self.assertAlmostEqual(finger(eri0011), (1.2410162860852818+0.00074485383748954838j), 9)
def test_get_eri_0110(self):
odf = aft.AFTDF(cell1)
ref = kdf0.get_eri((kpts[0],kpts[1],kpts[1],kpts[0]))
eri0110 = odf.get_eri((kpts[0],kpts[1],kpts[1],kpts[0]))
self.assertTrue(np.allclose(eri0110, ref, atol=1e-6, rtol=1e-6))
eri0110 = odf.get_eri((kpts[0]+1e-8,kpts[1]+1e-8,kpts[1],kpts[0]))
self.assertTrue(np.allclose(eri0110, ref, atol=1e-6, rtol=1e-6))
self.assertAlmostEqual(finger(eri0110), (1.2928399254827956-0.011820590601969154j), 9)
ref = fft.FFTDF(cell1).get_mo_eri([numpy.eye(cell1.nao_nr())]*4, (kpts[0],kpts[1],kpts[1],kpts[0]))
eri0110 = odf.get_eri((kpts[0],kpts[1],kpts[1],kpts[0]))
self.assertTrue(np.allclose(eri0110, ref, atol=1e-9, rtol=1e-9))
self.assertAlmostEqual(finger(eri0110), (1.2928399254827956-0.011820590601969154j), 9)
eri0110 = odf.get_eri((kpts[0]+1e-8,kpts[1]+1e-8,kpts[1],kpts[0]))
self.assertTrue(np.allclose(eri0110, ref, atol=1e-9, rtol=1e-9))
self.assertAlmostEqual(finger(eri0110), (1.2928399254827956-0.011820590601969154j), 9)
def test_get_eri_0123(self):
odf = aft.AFTDF(cell1)
ref = kdf0.get_eri(kpts)
eri1111 = odf.get_eri(kpts)
self.assertTrue(np.allclose(eri1111, ref, atol=1e-8, rtol=1e-8))
self.assertAlmostEqual(finger(eri1111), (1.2917759427391706-0.013340252488069412j), 9)
ref = fft.FFTDF(cell1).get_mo_eri([numpy.eye(cell1.nao_nr())]*4, kpts)
self.assertTrue(np.allclose(eri1111, ref, atol=1e-8, rtol=1e-8))
def test_get_mo_eri(self):
df0 = fft.FFTDF(cell)
odf = aft.AFTDF(cell)
nao = cell.nao_nr()
numpy.random.seed(5)
mo =(numpy.random.random((nao,nao)) +
numpy.random.random((nao,nao))*1j)
eri_mo0 = df0.get_mo_eri((mo,)*4, kpts)
eri_mo1 = odf.get_mo_eri((mo,)*4, kpts)
self.assertTrue(np.allclose(eri_mo1, eri_mo0, atol=1e-7, rtol=1e-7))
kpts_t = (kpts[2],kpts[3],kpts[0],kpts[1])
eri_mo2 = df0.get_mo_eri((mo,)*4, kpts_t)
eri_mo2 = eri_mo2.reshape((nao,)*4).transpose(2,3,0,1).reshape(nao**2,-1)
self.assertTrue(np.allclose(eri_mo2, eri_mo0, atol=1e-7, rtol=1e-7))
eri_mo0 = df0.get_mo_eri((mo,)*4, (kpts[0],)*4)
eri_mo1 = odf.get_mo_eri((mo,)*4, (kpts[0],)*4)
self.assertTrue(np.allclose(eri_mo1, eri_mo0, atol=1e-7, rtol=1e-7))
eri_mo0 = df0.get_mo_eri((mo,)*4, (kpts[0],kpts[1],kpts[1],kpts[0],))
eri_mo1 = odf.get_mo_eri((mo,)*4, (kpts[0],kpts[1],kpts[1],kpts[0],))
self.assertTrue(np.allclose(eri_mo1, eri_mo0, atol=1e-7, rtol=1e-7))
eri_mo0 = df0.get_mo_eri((mo,)*4, (kpt0,kpt0,kpts[0],kpts[0],))
eri_mo1 = odf.get_mo_eri((mo,)*4, (kpt0,kpt0,kpts[0],kpts[0],))
self.assertTrue(np.allclose(eri_mo1, eri_mo0, atol=1e-7, rtol=1e-7))
eri_mo0 = df0.get_mo_eri((mo,)*4, (kpts[0],kpts[0],kpt0,kpt0,))
eri_mo1 = odf.get_mo_eri((mo,)*4, (kpts[0],kpts[0],kpt0,kpt0,))
self.assertTrue(np.allclose(eri_mo1, eri_mo0, atol=1e-7, rtol=1e-7))
mo1 = mo[:,:nao//2+1]
eri_mo0 = df0.get_mo_eri((mo1,mo,mo,mo1), (kpts[0],)*4)
eri_mo1 = odf.get_mo_eri((mo1,mo,mo,mo1), (kpts[0],)*4)
self.assertTrue(np.allclose(eri_mo1, eri_mo0, atol=1e-7, rtol=1e-7))
eri_mo0 = df0.get_mo_eri((mo1,mo,mo1,mo), (kpts[0],kpts[1],kpts[1],kpts[0],))
eri_mo1 = odf.get_mo_eri((mo1,mo,mo1,mo), (kpts[0],kpts[1],kpts[1],kpts[0],))
self.assertTrue(np.allclose(eri_mo1, eri_mo0, atol=1e-7, rtol=1e-7))
if __name__ == '__main__':
print("Full Tests for aft")
unittest.main()
|
# Copyright (c) 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ast
import copy
import json
import re
from django.core.urlresolvers import reverse
from django.core import validators as django_validator
from django import forms
from django.forms import widgets
from django.template import defaultfilters
from django.utils.encoding import force_text
from django.utils import html
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms as hz_forms
from horizon import messages
from openstack_dashboard.api import cinder
from openstack_dashboard.api import glance
from openstack_dashboard.api import neutron
from openstack_dashboard.api import nova
from oslo_log import log as logging
from oslo_log import versionutils
import six
from yaql import legacy
from muranodashboard.api import packages as pkg_api
from muranodashboard.common import net
from muranodashboard.environments import api as env_api
LOG = logging.getLogger(__name__)
def with_request(func):
"""Injects request into func
The decorator is meant to be used together with `UpdatableFieldsForm':
apply it to the `update' method of fields inside that form.
"""
def update(self, initial, request=None, **kwargs):
initial_request = initial.get('request')
for key, value in six.iteritems(initial):
if key != 'request' and key not in kwargs:
kwargs[key] = value
if initial_request:
LOG.debug("Using 'request' value from initial dictionary")
func(self, initial_request, **kwargs)
elif request:
LOG.debug("Using direct 'request' value")
func(self, request, **kwargs)
else:
LOG.error("No 'request' value passed neither via initial "
"dictionary, nor directly")
raise forms.ValidationError("Can't get a request information")
return update
def make_yaql_validator(validator_property):
"""Field-level validator uses field's value as its '$' root object."""
expr = validator_property['expr'].spec
message = validator_property.get('message', '')
def validator_func(value):
context = legacy.create_context()
context['$'] = value
if not expr.evaluate(context=context):
raise forms.ValidationError(message)
return validator_func
def get_regex_validator(expr):
try:
validator = expr['validators'][0]
if isinstance(validator, django_validator.RegexValidator):
return validator
except (TypeError, KeyError, IndexError):
pass
return None
# This function is needed if we don't want to change existing services
# regexpValidators
def wrap_regex_validator(validator, message):
def _validator(value):
try:
validator(value)
except forms.ValidationError:
# provide our own message
raise forms.ValidationError(message)
return _validator
def get_murano_images(request):
images = []
try:
# https://bugs.launchpad.net/murano/+bug/1339261 - glance
# client version change alters the API. Other tuple values
# are _more and _prev (in recent glance client)
images = glance.image_list_detailed(request)[0]
except Exception:
LOG.error("Error to request image list from glance ")
exceptions.handle(request, _("Unable to retrieve public images."))
murano_images = []
# filter out the snapshot image type
images = filter(
lambda x: x.properties.get("image_type", '') != 'snapshot', images)
for image in images:
# Additional properties, whose value is always a string data type, are
# only included in the response if they have a value.
murano_property = getattr(image, 'murano_image_info', None)
if murano_property:
try:
murano_metadata = json.loads(murano_property)
except ValueError:
LOG.warning("JSON in image metadata is not valid. "
"Check it in glance.")
messages.error(request, _("Invalid murano image metadata"))
else:
image.murano_property = murano_metadata
murano_images.append(image)
return murano_images
class RawProperty(object):
def __init__(self, key, spec):
self.key = key
self.spec = spec
self.value = None
self.value_evaluated = False
def finalize(self, form_name, service, cls):
def _get(field):
if self.value_evaluated:
return self.value
return service.get_data(form_name, self.spec)
def _set(field, value):
self.value = value
self.value_evaluated = value is not None
if hasattr(cls, self.key):
getattr(cls, self.key).fset(field, value)
def _del(field):
_set(field, None)
return property(_get, _set, _del)
FIELD_ARGS_TO_ESCAPE = ['help_text', 'initial', 'description', 'label']
class CustomPropertiesField(forms.Field):
js_validation = False
def __init__(self, description=None, description_title=None,
*args, **kwargs):
self.description = description
self.description_title = (description_title or
force_text(kwargs.get('label', '')))
for arg in FIELD_ARGS_TO_ESCAPE:
if kwargs.get(arg):
kwargs[arg] = html.escape(force_text(kwargs[arg]))
validators = []
validators_js = []
for validator in kwargs.get('validators', []):
if hasattr(validator, '__call__'): # single regexpValidator
validators.append(validator)
if hasattr(validator, 'regex'):
regex_message = ''
error_messages = kwargs.get('error_messages', {})
if hasattr(validator, 'code') and \
validator.code in error_messages:
regex_message = force_text(
error_messages[validator.code]
)
validators_js. \
append({'regex': force_text(validator.regex.pattern),
'message': regex_message})
else: # mixed list of regexpValidator and YAQL validators
expr = validator.get('expr')
regex_validator = get_regex_validator(expr)
regex_message = validator.get('message', '')
if regex_validator:
validators.append(wrap_regex_validator(
regex_validator, regex_message))
elif isinstance(expr, RawProperty):
validators.append(validator)
if hasattr(regex_validator, 'regex'):
validators_js.\
append({'regex': regex_validator.regex.pattern,
'message': regex_message})
kwargs['validators'] = validators
if validators_js:
self.js_validation = json.dumps(validators_js)
super(CustomPropertiesField, self).__init__(*args, **kwargs)
def widget_attrs(self, widget):
attrs = super(CustomPropertiesField, self).widget_attrs(widget)
if self.js_validation:
attrs['data-validators'] = self.js_validation
return attrs
def clean(self, value):
"""Skip all validators if field is disabled."""
# form is assigned in ServiceConfigurationForm.finalize_fields()
form = self.form
# the only place to ensure that Service object has up-to-date
# cleaned_data
form.service.update_cleaned_data(form.cleaned_data, form=form)
if getattr(self, 'enabled', True):
return super(CustomPropertiesField, self).clean(value)
else:
return super(CustomPropertiesField, self).to_python(value)
@classmethod
def finalize_properties(cls, kwargs, form_name, service):
props = {}
kwargs_ = copy.copy(kwargs)
for key, value in kwargs_.items():
if isinstance(value, RawProperty):
props[key] = value.finalize(form_name, service, cls)
del kwargs[key]
if props:
return type(cls.__name__, (cls,), props)
else:
return cls
class CharField(forms.CharField, CustomPropertiesField):
pass
class PasswordField(CharField):
special_characters = '!@#$%^&*()_+|\/.,~?><:{}-'
password_re = re.compile('^.*(?=.*\d)(?=.*[a-z])(?=.*[A-Z])(?=.*[%s]).*$'
% special_characters)
has_clone = False
original = True
attrs = {'data-type': 'password'}
validate_password = django_validator.RegexValidator(
password_re, _('The password must contain at least one letter, one \
number and one special character'), 'invalid')
@staticmethod
def get_clone_name(name):
return name + '-clone'
def compare(self, name, form_data):
if self.original and self.required:
# run compare only for original fields
# do not run compare for hidden fields (they are not required)
if form_data.get(name) != form_data.get(self.get_clone_name(name)):
raise forms.ValidationError(_(u"{0}{1} don't match").format(
self.label, defaultfilters.pluralize(2)))
def __init__(self, label, *args, **kwargs):
self.confirm_input = kwargs.pop('confirm_input', True)
kwargs.update({'label': label,
'error_messages': kwargs.get('error_messages', {}),
'widget': forms.PasswordInput(attrs=self.attrs,
render_value=True)})
validators = kwargs.get('validators')
help_text = kwargs.get('help_text')
if not validators:
# No custom validators, using default validator
validators = [self.validate_password]
if not help_text:
help_text = _(
'Enter a complex password with at least one letter, '
'one number and one special character')
kwargs['error_messages'].setdefault(
'invalid', self.validate_password.message)
kwargs['min_length'] = kwargs.get('min_length', 7)
kwargs['max_length'] = kwargs.get('max_length', 255)
kwargs['widget'] = forms.PasswordInput(attrs=self.attrs,
render_value=True)
else:
if not help_text:
# NOTE(kzaitsev) There are custom validators for password,
# but no help text let's leave only a generic message,
# since we do not know exact constraints
help_text = _('Enter a password')
kwargs.update({'validators': validators,
'help_text': help_text})
super(PasswordField, self).__init__(*args, **kwargs)
def __deepcopy__(self, memo):
result = super(PasswordField, self).__deepcopy__(memo)
result.error_messages = copy.deepcopy(self.error_messages)
return result
def clone_field(self):
self.has_clone = True
field = copy.deepcopy(self)
field.original = False
field.label = _('Confirm password')
field.error_messages['required'] = _('Please confirm your password')
field.help_text = _('Retype your password')
return field
class IntegerField(forms.IntegerField, CustomPropertiesField):
pass
def _get_title(data):
if isinstance(data, Choice):
return data.title
return data
def _disable_non_ready(data):
if getattr(data, 'enabled', True):
return {}
else:
return {'disabled': 'disabled'}
class ChoiceField(forms.ChoiceField, CustomPropertiesField):
def __init__(self, **kwargs):
choices = kwargs.get('choices') or getattr(self, 'choices', None)
if choices:
if isinstance(choices, dict):
choices = list(choices.items())
kwargs['choices'] = choices
kwargs['widget'] = hz_forms.ThemableSelectWidget(transform=_get_title)
super(ChoiceField, self).__init__(**kwargs)
class DynamicChoiceField(hz_forms.ThemableDynamicChoiceField,
CustomPropertiesField):
pass
class FlavorChoiceField(ChoiceField):
def __init__(self, *args, **kwargs):
if 'requirements' in kwargs:
self.requirements = kwargs.pop('requirements')
super(FlavorChoiceField, self).__init__(*args, **kwargs)
@with_request
def update(self, request, **kwargs):
choices = []
flavors = nova.novaclient(request).flavors.list()
# If no requirements are present, return all the flavors.
if not hasattr(self, 'requirements'):
choices = [(flavor.name, flavor.name) for flavor in flavors]
else:
for flavor in flavors:
# If a flavor doesn't meet a minimum requirement,
# do not add it to the options list and skip to the
# next flavor.
if flavor.vcpus < self.requirements.get('min_vcpus', 0):
continue
if flavor.disk < self.requirements.get('min_disk', 0):
continue
if flavor.ram < self.requirements.get('min_memory_mb', 0):
continue
if 'max_vcpus' in self.requirements:
if flavor.vcpus > self.requirements['max_vcpus']:
continue
if 'max_disk' in self.requirements:
if flavor.disk > self.requirements['max_disk']:
continue
if 'max_memory_mb' in self.requirements:
if flavor.ram > self.requirements['max_memory_mb']:
continue
choices.append((flavor.name, flavor.name))
choices.sort(key=lambda e: e[1])
self.choices = choices
if kwargs.get('form'):
kwargs_form_flavor = kwargs["form"].fields.get('flavor')
else:
kwargs_form_flavor = None
if kwargs_form_flavor:
self.initial = kwargs["form"]["flavor"].value()
else:
# Search through selected flavors
for flavor_name, flavor_name in self.choices:
if 'medium' in flavor_name:
self.initial = flavor_name
break
class KeyPairChoiceField(DynamicChoiceField):
"""This widget allows to select keypair for VMs"""
@with_request
def update(self, request, **kwargs):
self.choices = [('', _('No keypair'))]
for keypair in sorted(
nova.novaclient(request).keypairs.list(),
key=lambda e: e.name):
self.choices.append((keypair.name, keypair.name))
class SecurityGroupChoiceField(DynamicChoiceField):
"""This widget allows to select a security group for VMs"""
@with_request
def update(self, request, **kwargs):
self.choices = [('', _('Application default security group'))]
# TODO(pbourke): remove sorted when supported natively in Horizon
# (https://bugs.launchpad.net/horizon/+bug/1692972)
for secgroup in sorted(
neutron.security_group_list(request),
key=lambda e: e.name_or_id):
if not secgroup.name_or_id.startswith('murano--'):
self.choices.append((secgroup.name_or_id, secgroup.name_or_id))
# NOTE(kzaitsev): for transform to work correctly on horizon SelectWidget
# Choice has to be non-string
class Choice(object):
"""A choice that allows disabling specific choices in a SelectWidget."""
def __init__(self, title, enabled):
self.title = title
self.enabled = enabled
class ImageChoiceField(ChoiceField):
widget = hz_forms.ThemableSelectWidget(
transform=_get_title,
transform_html_attrs=_disable_non_ready)
def __init__(self, *args, **kwargs):
self.image_type = kwargs.pop('image_type', None)
super(ImageChoiceField, self).__init__(*args, **kwargs)
@with_request
def update(self, request, **kwargs):
image_map, image_choices = {}, []
murano_images = get_murano_images(request)
for image in murano_images:
murano_data = image.murano_property
title = murano_data.get('title', image.name)
if image.status == 'active':
title = Choice(title, enabled=True)
else:
title = Choice("{} ({})".format(title, image.status),
enabled=False)
if self.image_type is not None:
itype = murano_data.get('type')
if not self.image_type and itype is None:
continue
prefix = '{type}.'.format(type=self.image_type)
if (not itype.startswith(prefix) and
not self.image_type == itype):
continue
image_map[image.id] = title
for id_, title in sorted(six.iteritems(image_map),
key=lambda e: e[1].title):
image_choices.append((id_, title))
if image_choices:
image_choices.insert(0, ("", _("Select Image")))
else:
image_choices.insert(0, ("", _("No images available")))
self.choices = image_choices
class NetworkChoiceField(ChoiceField):
def __init__(self,
filter=None,
murano_networks=None,
allow_auto=True,
*args,
**kwargs):
self.filter = filter
if murano_networks:
if murano_networks.lower() not in ["exclude", "translate"]:
raise ValueError(_("Invalid value of 'murano_nets' option"))
self.murano_networks = murano_networks
self.allow_auto = allow_auto
super(NetworkChoiceField, self).__init__(*args,
**kwargs)
@with_request
def update(self, request, **kwargs):
"""Populates available networks in the control
This method is called automatically when the form which contains it is
rendered
"""
network_choices = net.get_available_networks(request,
self.filter,
self.murano_networks)
if self.allow_auto:
network_choices.insert(0, ((None, None), _('Auto')))
self.choices = network_choices or []
def to_python(self, value):
"""Converts string representation of widget to tuple value
Is called implicitly during form cleanup phase
"""
if value:
return ast.literal_eval(value)
else: # may happen if no networks are available and "Auto" is disabled
return None, None
class AZoneChoiceField(ChoiceField):
@with_request
def update(self, request, **kwargs):
try:
availability_zones = nova.novaclient(
request).availability_zones.list(detailed=False)
except Exception:
availability_zones = []
exceptions.handle(request,
_("Unable to retrieve availability zones."))
az_choices = [(az.zoneName, az.zoneName)
for az in availability_zones if az.zoneState]
if not az_choices:
az_choices.insert(0, ("", _("No availability zones available")))
az_choices.sort(key=lambda e: e[1])
self.choices = az_choices
class VolumeChoiceField(ChoiceField):
def __init__(self,
include_snapshots=True,
*args,
**kwargs):
self.include_snapshots = include_snapshots
super(VolumeChoiceField, self).__init__(*args, **kwargs)
@with_request
def update(self, request, **kwargs):
"""This widget allows selection of Volumes and Volume Snapshots"""
available = {'status': cinder.VOLUME_STATE_AVAILABLE}
try:
choices = [(volume.id, volume.name)
for volume in cinder.volume_list(request,
search_opts=available)]
except Exception:
choices = []
exceptions.handle(request,
_("Unable to retrieve volume list."))
if self.include_snapshots:
try:
choices.extend((snap.id, snap.name)
for snap in cinder.volume_snapshot_list(request,
search_opts=available))
except Exception:
exceptions.handle(request,
_("Unable to retrieve snapshot list."))
if choices:
choices.sort(key=lambda e: e[1])
choices.insert(0, ("", _("Select volume")))
else:
choices.insert(0, ("", _("No volumes available")))
self.choices = choices
class BooleanField(forms.BooleanField, CustomPropertiesField):
def __init__(self, *args, **kwargs):
if 'widget' in kwargs:
widget = kwargs['widget']
if isinstance(widget, type):
widget = widget(attrs={'class': 'checkbox'})
else:
widget = forms.CheckboxInput(attrs={'class': 'checkbox'})
kwargs['widget'] = widget
kwargs['required'] = False
super(BooleanField, self).__init__(*args, **kwargs)
@versionutils.deprecated(
as_of=versionutils.deprecated.JUNO,
in_favor_of='type boolean (regular BooleanField)',
remove_in=1)
class FloatingIpBooleanField(BooleanField):
pass
class ClusterIPField(forms.GenericIPAddressField, CustomPropertiesField):
def __init__(self, *args, **kwargs):
super(ClusterIPField, self).__init__(protocol='ipv4', *args, **kwargs)
class DatabaseListField(CharField):
validate_mssql_identifier = django_validator.RegexValidator(
re.compile(r'^[a-zA-z_][a-zA-Z0-9_$#@]*$'),
_(u'First symbol should be latin letter or underscore. Subsequent '
u'symbols can be latin letter, numeric, underscore, at sign, '
u'number sign or dollar sign'))
default_error_messages = {'invalid': validate_mssql_identifier.message}
def to_python(self, value):
"""Normalize data to a list of strings."""
if not value:
return []
return [name.strip() for name in value.split(',')]
def validate(self, value):
"""Check if value consists only of valid names."""
super(DatabaseListField, self).validate(value)
for db_name in value:
self.validate_mssql_identifier(db_name)
class ErrorWidget(widgets.Widget):
def __init__(self, *args, **kwargs):
self.message = kwargs.pop(
'message', _("There was an error initialising this field."))
super(ErrorWidget, self).__init__(*args, **kwargs)
def render(self, name, value, attrs=None):
return "<div name={name}>{message}</div>".format(
name=name, message=self.message)
class MuranoTypeWidget(hz_forms.fields.DynamicSelectWidget):
def __init__(self, attrs=None, **kwargs):
if attrs is None:
attrs = {'class': 'murano_add_select'}
else:
attrs.setdefault('class', '')
attrs['class'] += ' murano_add_select'
super(MuranoTypeWidget, self).__init__(attrs=attrs, **kwargs)
class Media(object):
js = ('muranodashboard/js/add-select.js',)
def make_select_cls(fqns):
if not isinstance(fqns, (tuple, list)):
fqns = (fqns,)
class DynamicSelect(hz_forms.DynamicChoiceField, CustomPropertiesField):
widget = MuranoTypeWidget
def __init__(self, empty_value_message=None, *args, **kwargs):
super(DynamicSelect, self).__init__(*args, **kwargs)
if empty_value_message is not None:
self.empty_value_message = empty_value_message
else:
self.empty_value_message = _('Select Application')
@with_request
def update(self, request, environment_id, **kwargs):
matching_classes = []
fqns_seen = set()
# NOTE(kzaitsev): it's possible to have a private
# and public apps with the same fqn, however the engine would
# currently favor private package. Therefore we should squash
# these until we devise a better way to work with this
# situation and versioning
for class_fqn in fqns:
app_found = pkg_api.app_by_fqn(request, class_fqn)
if app_found:
fqns_seen.add(app_found.fully_qualified_name)
matching_classes.append(app_found)
apps_found = pkg_api.apps_that_inherit(request, class_fqn)
for app in apps_found:
if app.fully_qualified_name in fqns_seen:
continue
fqns_seen.add(app.fully_qualified_name)
matching_classes.append(app)
if not matching_classes:
msg = _(
"Couldn't find any apps, required for this field.\n"
"Tried: {fqns}").format(fqns=', '.join(fqns))
self.widget = ErrorWidget(message=msg)
# NOTE(kzaitsev): this closure is needed to allow us have custom
# logic when clicking add button
def _make_link():
ns_url = 'horizon:app-catalog:catalog:add'
ns_url_args = (environment_id, False, True)
# This will prevent horizon from adding an extra '+' button
if not matching_classes:
return ''
return json.dumps([
(app.name, reverse(ns_url, args=((app.id,) + ns_url_args)))
for app in matching_classes])
self.widget.add_item_link = _make_link
apps = env_api.service_list_by_fqns(
request, environment_id,
[app.fully_qualified_name for app in matching_classes])
choices = [('', self.empty_value_message)]
choices.extend([(app['?']['id'],
html.escape(app.name)) for app in apps])
self.choices = choices
# NOTE(tsufiev): streamline the drop-down UX: auto-select the
# single available option in a drop-down
if len(choices) == 2:
self.initial = choices[1][0]
def clean(self, value):
value = super(DynamicSelect, self).clean(value)
return None if value == '' else value
return DynamicSelect
@versionutils.deprecated(
as_of=versionutils.deprecated.JUNO,
in_favor_of='type io.murano.windows.ActiveDirectory with a custom '
'emptyValueMessage attribute',
remove_in=1)
class DomainChoiceField(make_select_cls('io.murano.windows.ActiveDirectory')):
def __init__(self, *args, **kwargs):
super(DomainChoiceField, self).__init__(*args, **kwargs)
self.choices = [('', _('Not in domain'))]
|
"""
This module implements the Prefect context that is available when tasks run.
Tasks can import prefect.context and access attributes that will be overwritten
when the task is run.
Example:
```python
import prefect.context
with prefect.context(a=1, b=2):
print(prefect.context.a) # 1
print(prefect.context.a) # undefined
```
Prefect provides various key / value pairs in context that are always available during task runs:
| Variable | Description |
| :--- | --- |
| `date` | an actual datetime object representing the current time |
| `today` | the current date formatted as `YYYY-MM-DD`|
| `today_nodash` | the current date formatted as `YYYYMMDD`|
| `yesterday` | yesterday's date formatted as `YYYY-MM-DD`|
| `yesterday_nodash` | yesterday's date formatted as `YYYYMMDD`|
| `tomorrow` | tomorrow's date formatted as `YYYY-MM-DD`|
| `tomorrow_nodash` | tomorrow's date formatted as `YYYYMMDD`|
| `logger` | the logger for the current task |
| `config` | the complete [Prefect configuration]\
(https://docs.prefect.io/core/concepts/configuration.html) object that is being used \
during this run |
| `flow_name` | the name of the current flow |
| `scheduled_start_time` | \
a datetime object representing the scheduled start time for the flow run; \
falls back to `now` for unscheduled runs |
| `parameters` | a dictionary of parameter values for the current flow run |
| `map_index` | the map index of the current task (if mapped, otherwise `None`) |
| `task_name` | the name of the current task |
| `task_full_name` | the name of the current task, including map index |
| `task_slug` | the slug of the current task |
| `task_tags` | the tags on the current task |
| `task_run_count` | the run count of the task run - typically only interesting for retrying tasks |
| `task_loop_count` | if the Task utilizes looping, the loop count of the task run |
| `task_run_name` | the run name of the current task (if provided, otherwise `None`) |
| `task_loop_result` | if the Task is looping, the current loop result |
In addition, Prefect Cloud supplies some additional context variables:
| Variable | Description |
| :--- | --- |
| `flow_id` | the id of the current flow |
| `flow_run_id` | the id of the current flow run |
| `flow_run_version` | the state version of the current flow run |
| `flow_run_name` | the name of the current flow run |
| `task_id` | the id of the current task |
| `task_run_id` | the id of the current task run |
| `task_run_version` | the state version of the current task run |
| `resume` | boolean showing if the current task run was manually restarted |
Users can also provide values to context at runtime. For more information, see
the [Context concept
doc](https://docs.prefect.io/core/concepts/execution.html#context).
"""
import contextlib
import threading
from typing import Any, Iterator, MutableMapping
from prefect.configuration import config
from prefect.utilities.collections import DotDict, merge_dicts
class Context(DotDict, threading.local):
"""
A thread safe context store for Prefect data.
The `Context` is a `DotDict` subclass, and can be instantiated the same way.
Args:
- *args (Any): arguments to provide to the `DotDict` constructor (e.g.,
an initial dictionary)
- **kwargs (Any): any key / value pairs to initialize this context with
"""
def __init__(self, *args: Any, **kwargs: Any) -> None:
init = {}
# Initialize with config context
init.update(config.get("context", {}))
# Overwrite with explicit args
init.update(dict(*args, **kwargs))
# Merge in config (with explicit args overwriting)
init["config"] = merge_dicts(config, init.get("config", {}))
super().__init__(init)
def __getstate__(self) -> None:
"""
Because we dynamically update context during runs, we don't ever want to pickle
or "freeze" the contents of context. Consequently it should always be accessed
as an attribute of the prefect module.
"""
raise TypeError(
"Pickling context objects is explicitly not supported. You should always "
"access context as an attribute of the `prefect` module, as in `prefect.context`"
)
def __repr__(self) -> str:
return "<Context>"
@contextlib.contextmanager
def __call__(self, *args: MutableMapping, **kwargs: Any) -> Iterator["Context"]:
"""
A context manager for setting / resetting the Prefect context
Example:
import prefect.context
with prefect.context(dict(a=1, b=2), c=3):
print(prefect.context.a) # 1
"""
# Avoid creating new `Context` object, copy as `dict` instead.
previous_context = self.__dict__.copy()
try:
new_context = dict(*args, **kwargs)
if "config" in new_context:
new_context["config"] = merge_dicts(
self.get("config", {}), new_context["config"]
)
self.update(new_context) # type: ignore
yield self
finally:
self.clear()
self.update(previous_context)
context = Context()
|
# Finds the appropriate MPF branch to go with this mpf-mc branch
import git
import os
import sys
parent_directory = (os.path.realpath(os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir)))
sys.path.insert(1, parent_directory)
# pylint: disable-msg=wrong-import-position
from mpfmc._version import __short_version__
mpf_repo = git.Repo('c:\\projects\\mpf')
this_mpf_mc_branch = os.environ['APPVEYOR_REPO_BRANCH']
if 'origin/{}'.format(this_mpf_mc_branch) in mpf_repo.refs:
mpf_branch = this_mpf_mc_branch
elif 'origin/{}'.format(__short_version__) in mpf_repo.refs:
mpf_branch = __short_version__
else:
mpf_branch = 'dev'
with open('checkout_mpf_branch.bat', 'w') as f:
f.write('cd \\projects\\mpf\n')
f.write('git checkout {}\n'.format(mpf_branch))
|
from data_importers.management.commands import BaseXpressDemocracyClubCsvImporter
class Command(BaseXpressDemocracyClubCsvImporter):
council_id = "E07000070"
addresses_name = "parl.2019-12-12/Version 2/merged.tsv"
stations_name = "parl.2019-12-12/Version 2/merged.tsv"
elections = ["parl.2019-12-12"]
csv_delimiter = "\t"
csv_encoding = "windows-1252"
allow_station_point_from_postcode = False
def address_record_to_dict(self, record):
rec = super().address_record_to_dict(record)
uprn = record.property_urn.strip().lstrip("0")
if uprn == "100091430409":
rec["postcode"] = "CM111HZ"
rec["accept_suggestion"] = False
if uprn == "200004627070":
rec["postcode"] = "CM6 3XD"
if uprn == "100091237355":
rec["postcode"] = "SS11 7DS"
if record.addressline6.strip() == "CM1 1PJ":
return None
if uprn in [
"10091492809" # CM11LA -> CM111LA : 2 The Paddocks, Layland Farm, Sudbury Road, Downham, Billericay, Essex
]:
rec["accept_suggestion"] = True
if uprn in [
"100091234373" # CM11AG -> CM35QY : 99 Watson Heights, Chelmsford, Essex
]:
rec["accept_suggestion"] = False
return rec
|
import json
import os
from errata_tool import ErrataConnector, Erratum
from errata_tool.build import Build
from errata_tool.products import ProductList
from errata_tool.product import Product
from errata_tool.product_version import ProductVersion
from errata_tool.release import Release
from errata_tool.variant import Variant
import requests
import pytest
TESTS_DIR = os.path.dirname(os.path.abspath(__file__))
FIXTURES_DIR = os.path.join(TESTS_DIR, 'fixtures')
class MockResponse(object):
status_code = 200
encoding = 'utf-8'
headers = {'content-type': 'application/json; charset=utf-8'}
def raise_for_status(self):
pass
@property
def _fixture(self):
"""Return path to our static fixture file. """
fdir = os.path.join(FIXTURES_DIR, 'errata.devel.redhat.com/')
filename = self.url.replace('https://errata.devel.redhat.com/', fdir)
# If we need to represent this API endpoint as both a directory and a
# file, check for a ".body" file.
if os.path.isdir(filename):
return filename + '.body'
return filename
def json(self):
try:
with open(self._fixture) as fp:
return json.load(fp)
except IOError:
print('Try ./new-fixture.sh %s' % self.url)
raise
@property
def text(self):
"""Return contents of our static fixture file. """
try:
with open(self._fixture) as fp:
return fp.read()
except IOError:
print('Try ./new-fixture.sh %s' % self.url)
raise
class RequestRecorder(object):
"""Record args to requests.get() or requests.post() """
def __call__(self, url, **kwargs):
"""mocking requests.get() or requests.post() """
self.response = MockResponse()
self.response.url = url
self.kwargs = kwargs
return self.response
@pytest.fixture
def mock_get():
return RequestRecorder()
@pytest.fixture
def mock_post():
return RequestRecorder()
@pytest.fixture
def mock_put():
return RequestRecorder()
@pytest.fixture
def advisory(monkeypatch, mock_get):
monkeypatch.delattr('requests.sessions.Session.request')
monkeypatch.setattr(ErrataConnector, '_auth', None)
monkeypatch.setattr(ErrataConnector, '_username', 'test')
monkeypatch.setattr(requests, 'get', mock_get)
return Erratum(errata_id=33840)
@pytest.fixture
def advisory_none_ship(monkeypatch, mock_get):
monkeypatch.delattr('requests.sessions.Session.request')
monkeypatch.setattr(ErrataConnector, '_auth', None)
monkeypatch.setattr(ErrataConnector, '_username', 'test')
monkeypatch.setattr(requests, 'get', mock_get)
return Erratum(errata_id=43686)
@pytest.fixture
def advisory_with_batch(monkeypatch, mock_get):
monkeypatch.delattr('requests.sessions.Session.request')
monkeypatch.setattr(ErrataConnector, '_auth', None)
monkeypatch.setattr(ErrataConnector, '_username', 'test')
monkeypatch.setattr(requests, 'get', mock_get)
return Erratum(errata_id=46563)
@pytest.fixture
def rhsa(monkeypatch, mock_get):
"""Like the advisory() fixture above, but an RHSA. """
monkeypatch.delattr('requests.sessions.Session.request')
monkeypatch.setattr(ErrataConnector, '_auth', None)
monkeypatch.setattr(ErrataConnector, '_username', 'test')
monkeypatch.setattr(requests, 'get', mock_get)
return Erratum(errata_id=36762)
@pytest.fixture
def productlist(monkeypatch, mock_get):
monkeypatch.delattr('requests.sessions.Session.request')
monkeypatch.setattr(ErrataConnector, '_auth', None)
monkeypatch.setattr(ErrataConnector, '_username', 'test')
monkeypatch.setattr(requests, 'get', mock_get)
return ProductList()
@pytest.fixture
def product(monkeypatch, mock_get):
monkeypatch.delattr('requests.sessions.Session.request')
monkeypatch.setattr(ErrataConnector, '_auth', None)
monkeypatch.setattr(ErrataConnector, '_username', 'test')
monkeypatch.setattr(requests, 'get', mock_get)
return Product('RHCEPH')
@pytest.fixture
def product_version(monkeypatch, mock_get):
monkeypatch.delattr('requests.sessions.Session.request')
monkeypatch.setattr(ErrataConnector, '_auth', None)
monkeypatch.setattr(ErrataConnector, '_username', 'test')
monkeypatch.setattr(requests, 'get', mock_get)
return ProductVersion('RHEL-7-RHCEPH-3.1')
@pytest.fixture
def release(monkeypatch, mock_get):
monkeypatch.delattr('requests.sessions.Session.request')
monkeypatch.setattr(ErrataConnector, '_auth', None)
monkeypatch.setattr(ErrataConnector, '_username', 'test')
monkeypatch.setattr(requests, 'get', mock_get)
return Release(name='rhceph-3.1')
@pytest.fixture
def build(monkeypatch, mock_get):
monkeypatch.delattr('requests.sessions.Session.request')
monkeypatch.setattr(ErrataConnector, '_auth', None)
monkeypatch.setattr(ErrataConnector, '_username', 'test')
monkeypatch.setattr(requests, 'get', mock_get)
return Build('ceph-12.2.5-42.el7cp')
@pytest.fixture
def variant(monkeypatch, mock_get):
monkeypatch.delattr('requests.sessions.Session.request')
monkeypatch.setattr(ErrataConnector, '_auth', None)
monkeypatch.setattr(ErrataConnector, '_username', 'test')
monkeypatch.setattr(requests, 'get', mock_get)
return Variant(name='8Base-RHCEPH-5.0-MON')
|
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
#
# Copyright (C) 2021 Graz University of Technology.
#
# Invenio-Records-Marc21 is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see LICENSE file for more
# details.
"""Pytest configuration.
See https://pytest-invenio.readthedocs.io/ for documentation on which test
fixtures are available.
"""
from copy import deepcopy
import pytest
@pytest.fixture(scope="function")
def marc21_metadata():
"""Record UI metadata."""
return {
"json": {
"leader": {
"undefined": 0,
"record_length": 0,
"record_status": "new",
"encoding_level": "not_applicable",
"type_of_record": "language_material",
"indicator_count": 2,
"bibliographic_level": "monograph_item",
"subfield_code_count": 2,
"base_address_of_data": 0,
"character_coding_scheme": "ucs_unicode",
"descriptive_cataloging_form": "isbd_punctuation_omitteed",
"multipart_resource_record_level": "set",
"length_of_the_length_of_field_portion": 4,
"length_of_the_implementation_defined_portion": 0,
"length_of_the_starting_character_position_portion": 5,
},
"summary": [
{
"summary": "A wonderful serenity has taken possession of my entire soul, like these sweet mornings of spring which I enjoy with my whole heart. I am alone, and feel the charm of existence in this spot, which was created for the bliss of souls like mine. I am so happy, my dear friend, so absorbed in the exquisite sense of mere tranquil existence, that I neglect my talents. I should be incapable of drawing a single stroke at the present moment; and yet I feel that I never was a greater artist than now. When, while the lovely valley teems with vapour around me, and the meridian sun strikes the upper surface of the impenetrable foliage of my trees, and but a few stray gleams steal into the inner sanctuary, I throw myself down among the tall grass by the trickling stream; and, as I lie close to the earth, a thousand unknown plants are noticed by me: when I hear the buzz of the little world among the stalks, and grow familiar with the countless indescribable forms of the insects and flies, then I feel the presence of the Almighty, who formed us in his own image, and the breath ",
"__order__": ["summary", "display_constant_controller"],
"display_constant_controller": "Summary",
}
],
"__order__": [
"leader",
"title_statement",
"subject_added_entry_topical_term",
"subject_added_entry_topical_term",
"subject_added_entry_topical_term",
"subject_added_entry_topical_term",
"summary",
"production_publication_distribution_manufacture_and_copyright_notice",
"main_entry_personal_name",
],
"title_statement": {
"title": "Proceedings of the 3rd International Brain-Computer Interface Workshop and Training Course",
"__order__": [
"title",
"remainder_of_title",
"statement_of_responsibility",
"title_added_entry",
"nonfiling_characters",
],
"title_added_entry": "No added entry",
"remainder_of_title": "Subtitle field.",
"nonfiling_characters": "0",
"statement_of_responsibility": "hrsg. von Josef Frank",
},
"main_entry_personal_name": {
"__order__": ["affiliation", "type_of_personal_name_entry_element"],
"affiliation": "Institute of Solid State Physics (5130)",
"type_of_personal_name_entry_element": "Surname",
},
"subject_added_entry_topical_term": [
{
"__order__": [
"miscellaneous_information",
"level_of_subject",
"thesaurus",
],
"thesaurus": "Source not specified",
"level_of_subject": "No information provided",
"miscellaneous_information": ["Test"],
},
{
"__order__": [
"miscellaneous_information",
"level_of_subject",
"thesaurus",
],
"thesaurus": "Source not specified",
"level_of_subject": "No information provided",
"miscellaneous_information": ["Invenio"],
},
{
"__order__": [
"miscellaneous_information",
"level_of_subject",
"thesaurus",
],
"thesaurus": "Source not specified",
"level_of_subject": "No information provided",
"miscellaneous_information": ["TUGraz"],
},
{
"__order__": [
"topical_term_or_geographic_name_entry_element",
"level_of_subject",
"thesaurus",
],
"thesaurus": "Source not specified",
"level_of_subject": "No information provided",
"topical_term_or_geographic_name_entry_element": "Marc21",
},
],
"production_publication_distribution_manufacture_and_copyright_notice": [
{
"__order__": [
"place_of_production_publication_distribution_manufacture",
"name_of_producer_publisher_distributor_manufacturer",
"name_of_producer_publisher_distributor_manufacturer",
"name_of_producer_publisher_distributor_manufacturer",
"name_of_producer_publisher_distributor_manufacturer",
"date_of_production_publication_distribution_manufacture_or_copyright_notice",
"sequence_of_statements",
],
"sequence_of_statements": "Not applicable/No information provided/Earliest",
"name_of_producer_publisher_distributor_manufacturer": [
"Hulk",
"Thor",
"Captain",
"Black Widow",
],
"place_of_production_publication_distribution_manufacture": [
"Tu Graz"
],
"date_of_production_publication_distribution_manufacture_or_copyright_notice": [
"2004"
],
}
],
}
}
@pytest.fixture(scope="function")
def full_record(marc21_record, marc21_metadata):
"""Full record as is expected by the UI serializer."""
marc21_record
marc21_record["id"] = "9jkx5-hx115"
marc21_record["pid"] = {
"pk": 58,
"status": "R",
"obj_type": "rec",
"pid_type": "marcid",
}
marc21_record["files"] = {"enabled": True}
marc21_record["access"] = {
"files": "restricted",
"embargo": {"until": None, "active": False, "reason": None},
"metadata": "restricted",
"status": "restricted",
}
marc21_record["metadata"] = marc21_metadata
marc21_record["versions"] = {"index": 1, "is_latest": True, "is_latest_draft": True}
return marc21_record
@pytest.fixture(scope="function")
def list_records(full_record):
"""Fixture list of records."""
list_records = {
"hits": {"hits": [deepcopy(full_record), deepcopy(full_record)]},
}
return list_records
|
#-----------------------------------------------------------------------------
# Copyright (c) 2005-2015, PyInstaller Development Team.
#
# Distributed under the terms of the GNU General Public License with exception
# for distributing bootloader.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
import os
import shutil
from ..compat import is_darwin, FileExistsError
from .api import EXE, COLLECT
from .datastruct import Target, TOC, logger, _check_guts_eq
from .utils import _check_path_overlap, _rmtree, add_suffix_to_extensions, checkCache
class BUNDLE(Target):
def __init__(self, *args, **kws):
from ..config import CONF
# BUNDLE only has a sense under Mac OS X, it's a noop on other platforms
if not is_darwin:
return
# .icns icon for app bundle.
# Use icon supplied by user or just use the default one from PyInstaller.
self.icon = kws.get('icon')
if not self.icon:
self.icon = os.path.join(os.path.dirname(os.path.dirname(__file__)),
'bootloader', 'images', 'icon-windowed.icns')
# Ensure icon path is absolute.
self.icon = os.path.abspath(self.icon)
Target.__init__(self)
# .app bundle is created in DISTPATH.
self.name = kws.get('name', None)
base_name = os.path.basename(self.name)
self.name = os.path.join(CONF['distpath'], base_name)
self.appname = os.path.splitext(base_name)[0]
self.version = kws.get("version", "0.0.0")
self.toc = TOC()
self.strip = False
self.upx = False
# .app bundle identifier for Code Signing
self.bundle_identifier = kws.get('bundle_identifier')
if not self.bundle_identifier:
# Fallback to appname.
self.bundle_identifier = self.appname
self.info_plist = kws.get('info_plist', None)
for arg in args:
if isinstance(arg, EXE):
self.toc.append((os.path.basename(arg.name), arg.name, arg.typ))
self.toc.extend(arg.dependencies)
self.strip = arg.strip
self.upx = arg.upx
elif isinstance(arg, TOC):
self.toc.extend(arg)
# TOC doesn't have a strip or upx attribute, so there is no way for us to
# tell which cache we should draw from.
elif isinstance(arg, COLLECT):
self.toc.extend(arg.toc)
self.strip = arg.strip_binaries
self.upx = arg.upx_binaries
else:
logger.info("unsupported entry %s", arg.__class__.__name__)
# Now, find values for app filepath (name), app name (appname), and name
# of the actual executable (exename) from the first EXECUTABLE item in
# toc, which might have come from a COLLECT too (not from an EXE).
for inm, name, typ in self.toc:
if typ == "EXECUTABLE":
self.exename = name
if self.name is None:
self.appname = "Mac%s" % (os.path.splitext(inm)[0],)
self.name = os.path.join(CONF['specpath'], self.appname + ".app")
else:
self.name = os.path.join(CONF['specpath'], self.name)
break
self.__postinit__()
_GUTS = (
# BUNDLE always builds, just want the toc to be written out
('toc', None),
)
def _check_guts(self, data, last_build):
# BUNDLE always needs to be executed, since it will clean the output
# directory anyway to make sure there is no existing cruft accumulating
return 1
def assemble(self):
if _check_path_overlap(self.name) and os.path.isdir(self.name):
_rmtree(self.name)
logger.info("Building BUNDLE %s", self.tocbasename)
# Create a minimal Mac bundle structure
os.makedirs(os.path.join(self.name, "Contents", "MacOS"))
os.makedirs(os.path.join(self.name, "Contents", "Resources"))
os.makedirs(os.path.join(self.name, "Contents", "Frameworks"))
# Copy icns icon to Resources directory.
if os.path.exists(self.icon):
shutil.copy(self.icon, os.path.join(self.name, 'Contents', 'Resources'))
else:
logger.warn("icon not found %s" % self.icon)
# Key/values for a minimal Info.plist file
info_plist_dict = {"CFBundleDisplayName": self.appname,
"CFBundleName": self.appname,
# Required by 'codesign' utility.
# The value for CFBundleIdentifier is used as the default unique
# name of your program for Code Signing purposes.
# It even identifies the APP for access to restricted OS X areas
# like Keychain.
#
# The identifier used for signing must be globally unique. The usal
# form for this identifier is a hierarchical name in reverse DNS
# notation, starting with the toplevel domain, followed by the
# company name, followed by the department within the company, and
# ending with the product name. Usually in the form:
# com.mycompany.department.appname
# Cli option --osx-bundle-identifier sets this value.
"CFBundleIdentifier": self.bundle_identifier,
# Fix for #156 - 'MacOS' must be in the name - not sure why
"CFBundleExecutable": 'MacOS/%s' % os.path.basename(self.exename),
"CFBundleIconFile": os.path.basename(self.icon),
"CFBundleInfoDictionaryVersion": "6.0",
"CFBundlePackageType": "APPL",
"CFBundleShortVersionString": self.version,
# Setting this to 1 will cause Mac OS X *not* to show
# a dock icon for the PyInstaller process which
# decompresses the real executable's contents. As a
# side effect, the main application doesn't get one
# as well, but at startup time the loader will take
# care of transforming the process type.
"LSBackgroundOnly": "0",
}
# Merge info_plist settings from spec file
if isinstance(self.info_plist, dict) and self.info_plist:
info_plist_dict.update(self.info_plist)
info_plist = """<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple Computer//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<dict>"""
for k, v in info_plist_dict.items():
info_plist += "<key>%s</key>\n<string>%s</string>\n" % (k, v)
info_plist += """</dict>
</plist>"""
f = open(os.path.join(self.name, "Contents", "Info.plist"), "w")
f.write(info_plist)
f.close()
links = []
toc = add_suffix_to_extensions(self.toc)
for inm, fnm, typ in toc:
# Copy files from cache. This ensures that are used files with relative
# paths to dynamic library dependencies (@executable_path)
if typ in ('EXTENSION', 'BINARY'):
fnm = checkCache(fnm, strip=self.strip, upx=self.upx, dist_nm=inm)
if typ == 'DATA': # add all data files to a list for symlinking later
links.append((inm, fnm))
else:
tofnm = os.path.join(self.name, "Contents", "MacOS", inm)
todir = os.path.dirname(tofnm)
if not os.path.exists(todir):
os.makedirs(todir)
shutil.copy2(fnm, tofnm)
logger.info('moving BUNDLE data files to Resource directory')
# Mac OS X Code Signing does not work when .app bundle contains
# data files in dir ./Contents/MacOS.
#
# Put all data files in ./Resources and create symlinks in ./MacOS.
bin_dir = os.path.join(self.name, 'Contents', 'MacOS')
res_dir = os.path.join(self.name, 'Contents', 'Resources')
for inm, fnm in links:
if inm != 'base_library.zip': # Don't symlink the base_library.zip for python 3
tofnm = os.path.join(res_dir, inm)
todir = os.path.dirname(tofnm)
if not os.path.exists(todir):
os.makedirs(todir)
shutil.copy2(fnm, tofnm)
base_path = os.path.split(inm)[0]
if base_path:
if not os.path.exists(os.path.join(bin_dir, inm)):
path = ''
for part in iter(base_path.split(os.path.sep)):
# Build path from previous path and the next part of the base path
path = os.path.join(path, part)
try:
relative_source_path = os.path.relpath(os.path.join(res_dir, path),
os.path.split(os.path.join(bin_dir, path))[0])
dest_path = os.path.join(bin_dir, path)
os.symlink(relative_source_path, dest_path)
break
except FileExistsError:
pass
if not os.path.exists(os.path.join(bin_dir, inm)):
relative_source_path = os.path.relpath(os.path.join(res_dir, inm),
os.path.split(os.path.join(bin_dir, inm))[0])
dest_path = os.path.join(bin_dir, inm)
os.symlink(relative_source_path, dest_path)
else: # If path is empty, e.g., a top level file, try to just symlink the file
os.symlink(os.path.relpath(os.path.join(res_dir, inm),
os.path.split(os.path.join(bin_dir, inm))[0]),
os.path.join(bin_dir, inm))
else:
shutil.copy2(fnm, bin_dir)
|
from temboo.Library._23andMe.Ancestry import Ancestry, AncestryInputSet, AncestryResultSet, AncestryChoreographyExecution
from temboo.Library._23andMe.Genomes import Genomes, GenomesInputSet, GenomesResultSet, GenomesChoreographyExecution
from temboo.Library._23andMe.Genotype import Genotype, GenotypeInputSet, GenotypeResultSet, GenotypeChoreographyExecution
from temboo.Library._23andMe.Haplogroups import Haplogroups, HaplogroupsInputSet, HaplogroupsResultSet, HaplogroupsChoreographyExecution
from temboo.Library._23andMe.Names import Names, NamesInputSet, NamesResultSet, NamesChoreographyExecution
from temboo.Library._23andMe.User import User, UserInputSet, UserResultSet, UserChoreographyExecution
|
from collections import Counter, defaultdict
from itertools import chain
from six import iteritems, itervalues, string_types
from . import builtin
from .file_types import generated_file
from .install import can_install
from .. import options as opts, path
from ..build_inputs import build_input
from ..file_types import *
from ..iterutils import flatten, iterate, uniques, isiterable, recursive_walk
from ..objutils import objectify
from ..packages import CommonPackage
from ..safe_str import literal, shell_literal
from ..shell import posix as pshell
from ..shell.syntax import Syntax, Writer
from ..tools.pkg_config import PkgConfigPackage
from ..versioning import simplify_specifiers, Specifier, SpecifierSet
build_input('pkg_config')(lambda build_inputs, env: [])
class Requirement(object):
def __init__(self, name, version=None):
self.name = name
self.version = objectify(version or '', SpecifierSet)
def __and__(self, rhs):
result = Requirement(self.name, self.version)
result &= rhs
return result
def __iand__(self, rhs):
if self.name != rhs.name:
raise ValueError('requirement names do not match')
self.version = self.version & rhs.version
return self
def __eq__(self, rhs):
return (type(self) == type(rhs) and self.name == rhs.name and
self.version == rhs.version)
def __ne__(self, rhs):
return not (self == rhs)
def split(self, single=False):
specs = simplify_specifiers(self.version)
if len(specs) == 0:
return [SimpleRequirement(self.name)]
if single and len(specs) > 1:
raise ValueError(
("multiple specifiers ({}) used in pkg-config requirement " +
"for '{}'").format(self.version, self.name)
)
return [SimpleRequirement(self.name, i) for i in specs]
def __hash__(self):
return hash((self.name, self.version))
def __repr__(self):
return '<Requirement({!r})>'.format(self._string())
def _string(self): # pragma: no cover
return self.name + str(self.version)
class SimpleRequirement(object):
def __init__(self, name, version=None):
self.name = name
self.version = (None if version is None else
objectify(version, Specifier))
def __eq__(self, rhs):
return (type(self) == type(rhs) and self.name == rhs.name and
self.version == rhs.version)
def __ne__(self, rhs):
return not (self == rhs)
def _safe_str(self):
if not self.version:
return shell_literal(self.name)
op = self.version.operator
if op == '==':
op = '='
return shell_literal('{name} {op} {version}'.format(
name=self.name, op=op, version=self.version.version
))
def __hash__(self):
return hash((self.name, self.version))
def __repr__(self):
return '<SimpleRequirement({!r})>'.format(self._string())
def _string(self): # pragma: no cover
return self.name + str(self.version)
class RequirementSet(object):
def __init__(self, iterable=None):
self._reqs = {}
if iterable:
for i in iterable:
self.add(i)
def add(self, item):
if item.name not in self._reqs:
self._reqs[item.name] = item
else:
self._reqs[item.name] &= item
def remove(self, name):
del self._reqs[name]
def update(self, other):
for i in other:
self.add(i)
def merge_from(self, other):
items = list(other)
for i in items:
if i.name in self._reqs:
self._reqs[i.name] &= i
other.remove(i.name)
def split(self, single=False):
return sorted(flatten(i.split(single) for i in self),
key=lambda x: x.name)
def __iter__(self):
return itervalues(self._reqs)
def __repr__(self):
return '<RequirementSet({!r})>'.format(
[i._string() for i in iter(self)]
)
class PkgConfigInfo(object):
directory = path.Path('pkgconfig')
class _simple_property(object):
def __init__(self, fn):
self.fn = fn
def __get__(self, obj, objtype=None):
if obj is None:
return self
return getattr(obj, '_' + self.fn.__name__)
def __set__(self, obj, value):
setattr(obj, '_' + self.fn.__name__, self.fn(obj, value))
def __init__(self, builtins, name=None, desc_name=None, desc=None,
url=None, version=None, requires=None, requires_private=None,
conflicts=None, includes=None, libs=None, libs_private=None,
options=None, link_options=None, link_options_private=None,
lang='c', auto_fill=True):
self._builtins = builtins
self.auto_fill = auto_fill
self.name = name
self.desc_name = desc_name
self.desc = desc
self.url = url
self.version = version
self.lang = lang
self.requires = requires
self.requires_private = requires_private
self.conflicts = conflicts
self.includes = includes
self.libs = libs
self.libs_private = libs_private
self.options = pshell.listify(options, type=opts.option_list)
self.link_options = pshell.listify(link_options, type=opts.option_list)
self.link_options_private = pshell.listify(link_options_private,
type=opts.option_list)
@property
def output(self):
return PkgConfigPcFile(self.directory.append(self.name + '.pc'))
@_simple_property
def includes(self, value):
return uniques(self._builtins['header_directory'](i)
for i in iterate(value)) if value is not None else None
@_simple_property
def libs(self, value):
return (uniques(self._library(i) for i in iterate(value))
if value is not None else None)
@_simple_property
def libs_private(self, value):
return (uniques(self._library(i) for i in iterate(value))
if value is not None else None)
@_simple_property
def requires(self, value):
return (self._filter_packages(iterate(value))
if value is not None else None)
@_simple_property
def requires_private(self, value):
return (self._filter_packages(iterate(value))
if value is not None else None)
@_simple_property
def conflicts(self, value):
return (self._filter_packages(iterate(value))[0]
if value is not None else None)
def _library(self, lib):
if isinstance(lib, DualUseLibrary):
return lib
return self._builtins['library'](lib)
def _write_variable(self, out, name, value):
out.write(name, Syntax.variable)
out.write_literal('=')
out.write(value, Syntax.variable)
out.write_literal('\n')
def _write_field(self, out, name, value, syntax=Syntax.variable, **kwargs):
if value:
out.write(name, Syntax.variable)
out.write_literal(': ')
out.write_each(iterate(value), syntax, **kwargs)
out.write_literal('\n')
def write(self, out, env):
out = Writer(out)
data = self._process_inputs(env)
for i in path.InstallRoot:
if i != path.InstallRoot.bindir:
self._write_variable(out, i.name, env.install_dirs[i])
out.write_literal('\n')
self._write_field(out, 'Name', data['desc_name'])
self._write_field(out, 'Description', data['desc'])
self._write_field(out, 'URL', data['url'])
self._write_field(out, 'Version', data['version'])
self._write_field(out, 'Requires', data['requires'], Syntax.shell,
delim=literal(', '))
self._write_field(out, 'Requires.private', data['requires_private'],
Syntax.shell, delim=literal(', '))
self._write_field(out, 'Conflicts', data['conflicts'],
Syntax.shell, delim=literal(', '))
self._write_field(out, 'Cflags', data['cflags'], Syntax.shell)
self._write_field(out, 'Libs', data['ldflags'], Syntax.shell)
self._write_field(out, 'Libs.private', data['ldflags_private'],
Syntax.shell)
def _process_inputs(self, env):
desc_name = self.desc_name or self.name
includes = self.includes or []
libs = self.libs or []
libs_private = self.libs_private or []
requires, extra = self.requires or [RequirementSet(), []]
requires_private, extra_private = (self.requires_private or
[RequirementSet(), []])
conflicts = self.conflicts or RequirementSet()
fwd_ldflags = opts.option_list(
getattr(i, 'forward_opts', {}).get('link_options', [])
for i in chain(libs, libs_private)
)
# Add all the (unique) dependent libs to libs_private, unless they're
# already in libs.
fwd = chain.from_iterable(
i.forward_opts['libs'] if hasattr(i, 'forward_opts') else []
for i in chain(libs, libs_private)
)
libs_private = uniques(chain(
(i for i in fwd if i not in libs), libs_private
))
# Get the package dependencies for all the libs (public and private)
# that were passed in.
auto_requires, auto_extra = self._filter_packages(chain.from_iterable(
recursive_walk(i, 'package_deps', 'install_deps')
for i in chain(libs, libs_private)
))
requires_private.update(auto_requires)
requires.merge_from(requires_private)
# Get the compiler and linker to use for generating flags.
builder = env.builder(self.lang)
compiler = builder.compiler
linker = builder.linker('executable')
compile_options = opts.option_list(
(opts.include_dir(installify(i, cross=env)) for i in includes),
self.options
)
link_options = opts.option_list(
(opts.lib(installify(i.all[0], cross=env)) for i in libs),
self.link_options
)
link_options_private = opts.option_list(
(opts.lib(installify(i.all[0], cross=env)) for i in libs_private),
fwd_ldflags, self.link_options_private
)
# Add the options from each of the system packages.
for pkg in extra:
compile_options.extend(pkg.compile_options(compiler, None))
link_options.extend(pkg.link_options(linker, None))
for pkg in chain(extra_private, auto_extra):
compile_options.extend(pkg.compile_options(compiler, None))
link_options_private.extend(pkg.link_options(linker, None))
return {
'desc_name': desc_name,
'desc': self.desc or '{} library'.format(desc_name),
'url': self.url,
'version': self.version,
'requires': requires.split(single=True),
'requires_private': requires_private.split(single=True),
'conflicts': conflicts.split(),
'cflags': compiler.flags(compile_options, mode='pkg-config'),
'ldflags': (linker.flags(link_options, mode='pkg-config') +
linker.lib_flags(link_options, mode='pkg-config')),
'ldflags_private': (
linker.flags(link_options_private, mode='pkg-config') +
linker.lib_flags(link_options_private, mode='pkg-config')
)
}
@staticmethod
def _filter_packages(packages):
pkg_config = RequirementSet()
system = []
for i in packages:
if isinstance(i, string_types):
pkg_config.add(Requirement(i))
elif isinstance(i, (tuple, list)):
pkg_config.add(Requirement(*i))
elif isinstance(i, PkgConfigPackage):
pkg_config.add(Requirement(i.name, i.specifier))
elif isinstance(i, CommonPackage):
system.append(i)
else:
raise TypeError('unsupported package type: {}'.format(type(i)))
return pkg_config, uniques(system)
@builtin.function('builtins', 'build_inputs', 'env')
def pkg_config(builtins, build, env, name=None, **kwargs):
if can_install(env):
build['pkg_config'].append(PkgConfigInfo(builtins, name, **kwargs))
@builtin.post('builtins', 'build_inputs', 'env')
def finalize_pkg_config(builtins, build, env):
install = build['install']
defaults = {
'name': build['project'].name,
'version': build['project'].version or '0.0',
'includes': [i for i in install
if isinstance(i, (HeaderFile, HeaderDirectory))],
# Get all the explicitly-installed libraries, fetching the
# DualUseLibrary (i.e. the `parent`) if applicable.
'libs': uniques(getattr(i, 'parent', i) for i in install.explicit
if isinstance(i, Library)),
}
for info in build['pkg_config']:
if not info.auto_fill:
continue
for key, value in iteritems(defaults):
if getattr(info, key) is None:
setattr(info, key, value)
# Make sure we don't have any duplicate pkg-config packages.
dupes = Counter(i.name for i in build['pkg_config'])
for name, count in iteritems(dupes):
if count > 1:
raise ValueError("duplicate pkg-config package '{}'".format(name))
for info in build['pkg_config']:
with generated_file(build, env, info.output) as out:
info.write(out, env)
builtins['install'](info.output)
|
"""SchemaModel components"""
from typing import (
Any,
Callable,
Dict,
Iterable,
List,
Optional,
Set,
Tuple,
Type,
TypeVar,
Union,
cast,
)
from .checks import Check
from .errors import SchemaInitError
from .schema_components import (
Column,
Index,
PandasDtypeInputTypes,
SeriesSchemaBase,
)
AnyCallable = Callable[..., Any]
SchemaComponent = TypeVar("SchemaComponent", bound=SeriesSchemaBase)
CHECK_KEY = "__check_config__"
DATAFRAME_CHECK_KEY = "__dataframe_check_config__"
_CheckList = Union[Check, List[Check]]
def _to_checklist(checks: Optional[_CheckList]) -> List[Check]:
checks = checks or []
if isinstance(checks, Check): # pragma: no cover
return [checks]
return checks
class FieldInfo:
"""Captures extra information about a field.
*new in 0.5.0*
"""
__slots__ = (
"checks",
"nullable",
"allow_duplicates",
"coerce",
"regex",
"check_name",
"alias",
"original_name",
"dtype_kwargs",
)
def __init__(
self,
checks: Optional[_CheckList] = None,
nullable: bool = False,
allow_duplicates: bool = True,
coerce: bool = False,
regex: bool = False,
alias: str = None,
check_name: bool = None,
dtype_kwargs: Dict[str, Any] = None,
) -> None:
self.checks = _to_checklist(checks)
self.nullable = nullable
self.allow_duplicates = allow_duplicates
self.coerce = coerce
self.regex = regex
self.alias = alias
self.check_name = check_name
self.original_name = cast(str, None) # always set by SchemaModel
self.dtype_kwargs = dtype_kwargs
@property
def name(self) -> str:
"""Return the name of the field used in the DataFrame"""
if self.alias is not None:
return self.alias
return self.original_name
def __set_name__(self, owner: Type, name: str) -> None:
self.original_name = name
def __get__(self, instance: Any, owner: Type) -> str:
return self.name
def __set__(self, instance: Any, value: Any) -> None: # pragma: no cover
raise AttributeError(f"Can't set the {self.original_name} field.")
def _to_schema_component(
self,
pandas_dtype: PandasDtypeInputTypes,
component: Type[SchemaComponent],
checks: _CheckList = None,
**kwargs: Any,
) -> SchemaComponent:
if self.dtype_kwargs:
pandas_dtype = pandas_dtype(**self.dtype_kwargs) # type: ignore
checks = self.checks + _to_checklist(checks)
return component(pandas_dtype, checks=checks, **kwargs) # type: ignore
def to_column(
self,
pandas_dtype: PandasDtypeInputTypes,
checks: _CheckList = None,
required: bool = True,
name: str = None,
) -> Column:
"""Create a schema_components.Column from a field."""
return self._to_schema_component(
pandas_dtype,
Column,
nullable=self.nullable,
allow_duplicates=self.allow_duplicates,
coerce=self.coerce,
regex=self.regex,
required=required,
name=name,
checks=checks,
)
def to_index(
self,
pandas_dtype: PandasDtypeInputTypes,
checks: _CheckList = None,
name: str = None,
) -> Index:
"""Create a schema_components.Index from a field."""
return self._to_schema_component(
pandas_dtype,
Index,
nullable=self.nullable,
allow_duplicates=self.allow_duplicates,
coerce=self.coerce,
name=name,
checks=checks,
)
def Field(
*,
eq: Any = None,
ne: Any = None,
gt: Any = None,
ge: Any = None,
lt: Any = None,
le: Any = None,
in_range: Dict[str, Any] = None,
isin: Iterable = None,
notin: Iterable = None,
str_contains: str = None,
str_endswith: str = None,
str_length: Dict[str, Any] = None,
str_matches: str = None,
str_startswith: str = None,
nullable: bool = False,
allow_duplicates: bool = True,
coerce: bool = False,
regex: bool = False,
ignore_na: bool = True,
raise_warning: bool = False,
n_failure_cases: int = 10,
alias: str = None,
check_name: bool = None,
dtype_kwargs: Dict[str, Any] = None,
**kwargs,
) -> Any:
"""Used to provide extra information about a field of a SchemaModel.
*new in 0.5.0*
Some arguments apply only to numeric dtypes and some apply only to ``str``.
See the :ref:`User Guide <schema_models>` for more information.
The keyword-only arguments from ``eq`` to ``str_startswith`` are dispatched
to the built-in `~pandera.checks.Check` methods.
:param nullable: whether or not the column/index is nullable.
:param allow_duplicates: whether or not to accept duplicate values.
:param coerce: coerces the data type if ``True``.
:param regex: whether or not the field name or alias is a regex pattern.
:param ignore_na: whether or not to ignore null values in the checks.
:param raise_warning: raise a warning instead of an Exception.
:param n_failure_cases: report the first n unique failure cases. If None,
report all failure cases.
:param alias: The public name of the column/index.
:param check_name: Whether to check the name of the column/index during
validation. `None` is the default behavior, which translates to `True`
for columns and multi-index, and to `False` for a single index.
:param dtype_kwargs: The parameters to be forwarded to the type of the field.
:param kwargs: Specify custom checks that have been registered with the
:class:`~pandera.extensions.register_check_method` decorator.
"""
# pylint:disable=C0103,W0613,R0914
check_kwargs = {
"ignore_na": ignore_na,
"raise_warning": raise_warning,
"n_failure_cases": n_failure_cases,
}
args = locals()
checks = []
check_dispatch = _check_dispatch()
for key in kwargs:
if key not in check_dispatch:
raise SchemaInitError(
f"custom check '{key}' is not available. Make sure you use "
"pandera.extensions.register_check_method decorator to "
"register your custom check method."
)
for arg_name, check_constructor in check_dispatch.items():
arg_value = args.get(arg_name, kwargs.get(arg_name))
if arg_value is None:
continue
if isinstance(arg_value, dict):
check_ = check_constructor(**arg_value, **check_kwargs)
else:
check_ = check_constructor(arg_value, **check_kwargs)
checks.append(check_)
return FieldInfo(
checks=checks or None,
nullable=nullable,
allow_duplicates=allow_duplicates,
coerce=coerce,
regex=regex,
check_name=check_name,
alias=alias,
dtype_kwargs=dtype_kwargs,
)
def _check_dispatch():
return {
"eq": Check.equal_to,
"ne": Check.not_equal_to,
"gt": Check.greater_than,
"ge": Check.greater_than_or_equal_to,
"lt": Check.less_than,
"le": Check.less_than_or_equal_to,
"in_range": Check.in_range,
"isin": Check.isin,
"notin": Check.notin,
"str_contains": Check.str_contains,
"str_endswith": Check.str_endswith,
"str_matches": Check.str_matches,
"str_length": Check.str_length,
"str_startswith": Check.str_startswith,
**Check.REGISTERED_CUSTOM_CHECKS,
}
class CheckInfo: # pylint:disable=too-few-public-methods
"""Captures extra information about a Check."""
def __init__(
self,
check_fn: AnyCallable,
**check_kwargs: Any,
) -> None:
self.check_fn = check_fn
self.check_kwargs = check_kwargs
def to_check(self, model_cls: Type) -> Check:
"""Create a Check from metadata."""
name = self.check_kwargs.pop("name", None)
if not name:
name = getattr(
self.check_fn, "__name__", self.check_fn.__class__.__name__
)
def _adapter(arg: Any) -> Union[bool, Iterable[bool]]:
return self.check_fn(model_cls, arg)
return Check(_adapter, name=name, **self.check_kwargs)
class FieldCheckInfo(CheckInfo): # pylint:disable=too-few-public-methods
"""Captures extra information about a Check assigned to a field."""
def __init__(
self,
fields: Set[Union[str, FieldInfo]],
check_fn: AnyCallable,
regex: bool = False,
**check_kwargs: Any,
) -> None:
super().__init__(check_fn, **check_kwargs)
self.fields = fields
self.regex = regex
def _to_function_and_classmethod(
fn: Union[AnyCallable, classmethod]
) -> Tuple[AnyCallable, classmethod]:
if isinstance(fn, classmethod):
fn, method = fn.__func__, cast(classmethod, fn)
else:
method = classmethod(fn)
return fn, method
ClassCheck = Callable[[Union[classmethod, AnyCallable]], classmethod]
def check(*fields, regex: bool = False, **check_kwargs) -> ClassCheck:
"""Decorator to make SchemaModel method a column/index check function.
*new in 0.5.0*
This indicates that the decorated method should be used to validate a field
(column or index). The method will be converted to a classmethod. Therefore
its signature must start with `cls` followed by regular check arguments.
See the :ref:`User Guide <schema_model_custom_check>` for more.
:param _fn: Method to decorate.
:param check_kwargs: Keywords arguments forwarded to Check.
"""
def _wrapper(fn: Union[classmethod, AnyCallable]) -> classmethod:
check_fn, check_method = _to_function_and_classmethod(fn)
setattr(
check_method,
CHECK_KEY,
FieldCheckInfo(set(fields), check_fn, regex, **check_kwargs),
)
return check_method
return _wrapper
def dataframe_check(_fn=None, **check_kwargs) -> ClassCheck:
"""Decorator to make SchemaModel method a dataframe-wide check function.
*new in 0.5.0*
Decorate a method on the SchemaModel indicating that it should be used to
validate the DataFrame. The method will be converted to a classmethod.
Therefore its signature must start with `cls` followed by regular check
arguments. See the :ref:`User Guide <schema_model_dataframe_check>` for
more.
:param check_kwargs: Keywords arguments forwarded to Check.
"""
def _wrapper(fn: Union[classmethod, AnyCallable]) -> classmethod:
check_fn, check_method = _to_function_and_classmethod(fn)
setattr(
check_method,
DATAFRAME_CHECK_KEY,
CheckInfo(check_fn, **check_kwargs),
)
return check_method
if _fn:
return _wrapper(_fn) # type: ignore
return _wrapper
|
print('C L A S S I C S O L U T I O N')
range_of_numbers = []
div_by_2 = []
div_by_3 = []
other_numbers = []
for i in range(1,11):
range_of_numbers.append(i)
if i%2 == 0:
div_by_2.append(i)
elif i%3 == 0:
div_by_3.append(i)
else:
other_numbers.append(i)
print('The range of numbers is: ', range_of_numbers)
print('')
print('The even numbers that are divisible by 2: ',div_by_2)
print('The odd numbers, which are divisible by 3: ', div_by_3)
print('The numbers, that are not divisible by 2 and 3: ', other_numbers)
print('')
print('')
print('L I S T C O M P R E H E N S I O N')
range_of_numbers = [i for i in range (1, 11)]
div_by_2 = [i for i in range (1, 11) if i % 2 == 0 ]
div_by_3 = [i for i in range (1, 11) if i % 2 == 1 and i % 3 == 0]
other_numbers = [i for i in range (1, 11) if i % 2 == 1 and i % 3 == 1]
print('The range of numbers is: ', range_of_numbers)
print('')
print('The even numbers that are divisible by 2: ',div_by_2)
print('The odd numbers, which are divisible by 3: ', div_by_3)
print('The numbers, that are not divisible by 2 and 3: ', other_numbers)
|
import csv
import errno
import os
import numpy
import re
import tempfile
import threading
import time
from smqtk.utils import SmqtkObject
def safe_create_dir(d):
"""
Recursively create the given directory, ignoring the already-exists
error if thrown.
:param d: Directory filepath to create
:type d: str
:return: The directory that was created, i.e. the directory that was passed
(in absolute form).
:rtype: str
"""
d = os.path.abspath(os.path.expanduser(d))
try:
os.makedirs(d)
except OSError as ex:
if ex.errno == errno.EEXIST and os.path.exists(d):
pass
else:
raise
return d
def safe_file_write(path, b, tmp_dir=None):
"""
Safely write to a file in such a way that the target file is never
incompletely written to due to error or multiple agents attempting to
writes.
We leverage that most OSs have an atomic move/rename operation by first
writing bytes to a separate temporary file first, then renaming the
temporary file to the final destination path when write is complete.
Temporary files are written to the same directory as the target file unless
otherwise specified.
**NOTE:** *Windows does not have an atomic file rename and this function
currently does not do anything special to ensure atomic rename on Windows.*
:param path: Path to the file to write to.
:type path: str
:param b: Byte iterable to write to file.
:type b: str | bytes
:param tmp_dir: Optional custom directory to write the intermediate
temporary file to. This directory must already exist.
:type tmp_dir: None | str
"""
file_dir = os.path.dirname(path)
file_name = os.path.basename(path)
file_base, file_ext = os.path.splitext(file_name)
# Make sure containing directory exists
safe_create_dir(file_dir)
# Write to a temporary file first, then OS move the temp file to the final
# destination. This is due to, on most OSs, a file rename/move being atomic.
# TODO(paul.tunison): Do something else on windows since moves there are not
# guaranteed atomic.
tmp_dir = file_dir if tmp_dir is None else tmp_dir
fd, fp = tempfile.mkstemp(suffix=file_ext, prefix=file_base + '.',
dir=tmp_dir)
try:
c = os.write(fd, b)
if c != len(b):
raise RuntimeError("Failed to write all bytes to file.")
except:
# Remove temporary file if something bad happens.
os.remove(fp)
raise
finally:
os.close(fd)
os.rename(fp, path)
def make_tempfile(suffix="", prefix="tmp", dir=None, text=False):
"""
Wrapper for ``tempfile.mkstemp`` that closes/discards the file descriptor
returned from the method. Arguments/keywords passed are the same as, and
passed directly to ``tempfile.mkstemp``.
:return: Path to a new user-owned temporary file.
:rtype: str
"""
fd, fp = tempfile.mkstemp(suffix, prefix, dir, text)
os.close(fd)
return fp
def iter_directory_files(d, recurse=True):
"""
Iterates through files in the structure under the given directory.
:param d: base directory path
:type d: str
:param recurse: If true, we recursively descend into all directories under
the given directory. If false, we only return the files in the given
directory and not the sub-directories in the given directory. If this is
an integer (positive), on only recurse that many sub-directories.
:type recurse: bool | int
:return: Generator expression yielding absolute file paths under the given
directory.
:rtype: collections.Iterable[str]
"""
d = os.path.abspath(d)
for dirpath, dirnames, filenames in os.walk(d):
for fname in filenames:
yield os.path.join(dirpath, fname)
if not recurse:
break
elif recurse is not True and dirpath != d:
# must be an integer
level = len(os.path.relpath(dirpath, d).split(os.sep))
if level == recurse:
# Empty directories descending
del dirnames[:]
# else recurse fully
def touch(fname):
"""
Touch a file, creating it if it doesn't exist, setting its updated time to
now.
:param fname: File path to touch.
:type fname: str
"""
with open(fname, 'a'):
os.utime(fname, None)
def exclusive_touch(file_path):
"""
Attempt to touch a file. If that file already exists, we return False.
If the file was touched and created, we return True. Other OSErrors
thrown beside the expected "file already exists" error are passed
upwards.
:param file_path: Path to the file to touch.
:type file_path: str
:return: True if we touched/created the file, false if we couldn't
:rtype: bool
"""
try:
fd = os.open(file_path, os.O_CREAT | os.O_EXCL)
os.close(fd)
return True
except OSError as ex:
if ex.errno == 17: # File exists, could not touch.
return False
else:
raise
def iter_svm_file(filepath, width):
"""
Iterate parsed vectors in a parsed "*.svm" file that encodes a sparce
matrix, where each line consists of multiple "index:value" pairs in index
order. Multiple lines construct a matrix.
:param filepath: Path to the SVM file encoding an array per line
:type filepath: str
:param width: The known number of columns in the sparse vectors.
:param width: int
:return: Generator yielding ndarray vectors
:rtype: collections.Iterable[numpy.core.multiarray.ndarray]
"""
idx_val_re = re.compile("([0-9]+):([-+]?[0-9]*\.?[0-9]*)")
with open(filepath, 'r') as infile:
for line in infile:
v = numpy.zeros(width, dtype=float)
for seg in line.strip().split(' '):
m = idx_val_re.match(seg)
assert m is not None, \
"Invalid index:value match for segment '%s'" % seg
idx, val = int(m.group(1)), float(m.group(2))
v[idx] = val
yield v
def iter_csv_file(filepath):
"""
Iterate parsed vectors in a "*.csv" file that encodes descriptor output
where each line is a descriptor vector. Multiple lines construct a matrix.
:param filepath: Path to the CSV file encoding an array per line.
:type filepath: str
:return: Generator yielding ndarray vectors
:rtype: collections.Iterable[numpy.core.multiarray.ndarray]
"""
with open(filepath) as f:
r = csv.reader(f)
for l in r:
yield numpy.array(l, dtype=float)
def file_mimetype_filemagic(filepath):
"""
Determine file mimetype using the file-magic module.
The file the given path refers to must exist.
:raises ImportError: ``magic`` python module not available.
:raises IOError: ``filepath`` did not refer to an existing file.
:param filepath: Path to the (existing) file to determine the mimetype of.
:type filepath: str
:return: MIMETYPE string identifier.
:rtype: str
"""
import magic
if os.path.isfile(filepath):
d = magic.detect_from_filename(filepath)
return d.mime_type
elif os.path.isdir(filepath):
raise IOError(21, "Is a directory: '%s'" % filepath)
else:
raise IOError(2, "No such file or directory: '%s'" % filepath)
def file_mimetype_tika(filepath):
"""
Determine file mimetype using ``tika`` module.
The file the given path refers to must exist. This function may fail under
multiprocessing situations.
:raises ImportError: ``tika`` python module not available.
:raises IOError: ``filepath`` did not refer to an existing file.
:param filepath: Path to the (existing) file to determine the mimetype of.
:type filepath: str
:return: MIMETYPE string identifier.
:rtype: str
"""
import tika.detector
return tika.detector.from_file(filepath)
class FileModificationMonitor (SmqtkObject, threading.Thread):
"""
Utility object for triggering a callback function when an observed file
changes based on file modification times observed
"""
STATE_WAITING = 0 # Waiting for file to be modified
STATE_WATCHING = 1 # Waiting for file to settle
STATE_SETTLED = 2 # File has stopped being modified for settle period
def __init__(self, filepath, monitor_interval, settle_window, callback):
"""
On a separate thread, monitor the modification time of the file at the
given file path. When the file is updated (after the file has stopped
changing), trigger the provided callback function, given the monitored
file path and the file stat event.
We initially set ourselves as a daemon as that is the most probable
usage of our functionality.
:param filepath: Path to the file to monitor
:type filepath: str
:param monitor_interval: Frequency in seconds at which we check file
modification times. This must be >= 0.
:type monitor_interval: float
:param settle_window: If a recently modified file is not modified again
for this many seconds in a row, we consider the file done being
modified and trigger the triggers ``callback``. This must be >= 0
and should be >= the ``monitor_interval``.
:type settle_window: float
:param callback: Callback function that will be triggered every time
the provided file has been updated and the settle time has safely
expired.
:type callback: (str) -> None
:raises ValueError: The given filepath did not point to an existing,
valid file.
"""
SmqtkObject.__init__(self)
threading.Thread.__init__(self, name=self.__class__.__name__)
self.daemon = True
self.filepath = filepath
self.monitor_interval = monitor_interval
self.settle_window = settle_window
self.callback = callback
self.event_stop = threading.Event()
self.event_stop.set() # make sure false
self.state = self.STATE_WAITING
if not os.path.isfile(self.filepath):
raise ValueError("Provided filepath did not point to an existing, "
"valid file.")
if monitor_interval < 0 or settle_window < 0:
raise ValueError("Monitor and settle times must be >= 0")
def stop(self):
self._log.debug("stopped externally")
self.event_stop.set()
def stopped(self):
return self.event_stop.is_set()
def start(self):
# Clear stop flag
self.event_stop.clear()
super(FileModificationMonitor, self).start()
def run(self):
# self._log.debug("starting run method")
# mtime baseline
last_mtime = os.path.getmtime(self.filepath)
try:
while not self.stopped():
mtime = os.path.getmtime(self.filepath)
# file has been updated
if self.state == self.STATE_WAITING and last_mtime != mtime:
self.state = self.STATE_WATCHING
self._log.debug('change detected '
'(mtime: %f -> %f, diff=%f) '
':: state(WAITING -> WATCHING)',
last_mtime, mtime, mtime - last_mtime)
# Wait until file is not being modified any more
elif self.state == self.STATE_WATCHING:
t = time.time()
if t - mtime >= self.settle_window:
self.state = self.STATE_SETTLED
self._log.debug('file settled '
'(mtime=%f, t=%f, diff=%f) '
':: state(WATCHING -> SETTLED)',
mtime, t, t - mtime)
else:
self._log.debug('waiting for settle '
'(mtime=%f, t=%f, diff=%f)...',
mtime, t, t - mtime)
time.sleep(self.monitor_interval)
elif self.state == self.STATE_SETTLED:
self.callback(self.filepath)
self.state = self.STATE_WAITING
self._log.debug('calling callback '
':: state(SETTLED -> WAITING)')
# waiting for modification
else:
# self._log.debug("waiting...")
time.sleep(self.monitor_interval)
last_mtime = mtime
finally:
self.event_stop.set()
self._log.debug('exiting')
|
# Copyright (c) 2017-2019 Uber Technologies, Inc.
# SPDX-License-Identifier: Apache-2.0
import argparse
import logging
import data
import torch
import pyro
import pyro.distributions as dist
import pyro.poutine as poutine
from pyro.infer import MCMC, NUTS
logging.basicConfig(format="%(message)s", level=logging.INFO)
pyro.set_rng_seed(0)
def model(sigma):
eta = pyro.sample("eta", dist.Normal(torch.zeros(data.J), torch.ones(data.J)))
mu = pyro.sample("mu", dist.Normal(torch.zeros(1), 10 * torch.ones(1)))
tau = pyro.sample("tau", dist.HalfCauchy(scale=25 * torch.ones(1)))
theta = mu + tau * eta
return pyro.sample("obs", dist.Normal(theta, sigma))
def conditioned_model(model, sigma, y):
return poutine.condition(model, data={"obs": y})(sigma)
def main(args):
nuts_kernel = NUTS(conditioned_model, jit_compile=args.jit)
mcmc = MCMC(
nuts_kernel,
num_samples=args.num_samples,
warmup_steps=args.warmup_steps,
num_chains=args.num_chains,
)
mcmc.run(model, data.sigma, data.y)
mcmc.summary(prob=0.5)
if __name__ == "__main__":
assert pyro.__version__.startswith("1.8.0")
parser = argparse.ArgumentParser(description="Eight Schools MCMC")
parser.add_argument(
"--num-samples",
type=int,
default=1000,
help="number of MCMC samples (default: 1000)",
)
parser.add_argument(
"--num-chains",
type=int,
default=1,
help="number of parallel MCMC chains (default: 1)",
)
parser.add_argument(
"--warmup-steps",
type=int,
default=1000,
help="number of MCMC samples for warmup (default: 1000)",
)
parser.add_argument("--jit", action="store_true", default=False)
args = parser.parse_args()
main(args)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""The setup script."""
from setuptools import setup, find_packages
with open('README.rst') as readme_file:
readme = readme_file.read()
with open('HISTORY.rst') as history_file:
history = history_file.read()
requirements = [
'Click>=6.0',
'PyYAML>=3.12',
'Jinja2>=2.10'
]
setup_requirements = [
# TODO(msmart): put setup requirements (distutils extensions, etc.) here
]
test_requirements = [
# TODO: put package test requirements here
]
setup(
name='hreports',
version='0.2.0',
description="A simple wrapper to create and manage reports based on hledger queries.",
long_description=readme + '\n\n' + history,
author="Michael Martinides",
author_email='msmart@posteo.de',
url='https://github.com/msmart/hreports',
packages=find_packages(include=['hreports']),
entry_points={
'console_scripts': [
'hreports=hreports.cli:main'
]
},
include_package_data=True,
install_requires=requirements,
license="MIT license",
zip_safe=False,
keywords='hreports',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
"Programming Language :: Python :: 2",
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
test_suite='tests',
tests_require=test_requirements,
setup_requires=setup_requirements,
)
|
import FWCore.ParameterSet.Config as cms
process = cms.Process("PROD")
process.load("SimG4CMS.Calo.pythiapdt_cfi")
process.load('FWCore.MessageService.MessageLogger_cfi')
process.load("IOMC.EventVertexGenerators.VtxSmearedGauss_cfi")
#process.load("Configuration.Geometry.GeometryExtended2018Reco_cff")
process.load("Configuration.Geometry.GeometryExtended2021Reco_cff")
process.load("Configuration.StandardSequences.MagneticField_cff")
process.load("Configuration.EventContent.EventContent_cff")
process.load('Configuration.StandardSequences.Generator_cff')
process.load('Configuration.StandardSequences.SimIdeal_cff')
process.load("SimG4CMS.Calo.hcalGeometryDetIdTester_cfi")
process.load("Configuration.StandardSequences.FrontierConditions_GlobalTag_cff")
from Configuration.AlCa.autoCond import autoCond
process.GlobalTag.globaltag = autoCond['run2_mc']
if 'MessageLogger' in process.__dict__:
process.MessageLogger.categories.append('G4cerr')
process.load("IOMC.RandomEngine.IOMC_cff")
process.RandomNumberGeneratorService.generator.initialSeed = 456789
process.RandomNumberGeneratorService.g4SimHits.initialSeed = 9876
process.RandomNumberGeneratorService.VtxSmeared.initialSeed = 123456789
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(100)
)
process.source = cms.Source("EmptySource",
firstRun = cms.untracked.uint32(1),
firstEvent = cms.untracked.uint32(1)
)
process.generator = cms.EDProducer("FlatRandomPtGunProducer",
PGunParameters = cms.PSet(
PartID = cms.vint32(13),
MinEta = cms.double(1.22),
MaxEta = cms.double(1.70),
MinPhi = cms.double(-3.14159265359),
MaxPhi = cms.double(3.14159265359),
MinPt = cms.double(100.),
MaxPt = cms.double(100.)
),
Verbosity = cms.untracked.int32(0),
AddAntiParticle = cms.bool(True)
)
process.generation_step = cms.Path(process.pgen)
process.simulation_step = cms.Path(process.psim)
process.analysis_step = cms.Path(process.hcalGeometryDetIdTester)
process.g4SimHits.Physics.type = 'SimG4Core/Physics/FTFP_BERT_EMM'
process.g4SimHits.HCalSD.TestNumberingScheme = True
process.hcalGeometryDetIdTester.testNumbering = True
process.hcalGeometryDetIdTester.dumpHits = True
# Schedule definition
process.schedule = cms.Schedule(process.generation_step,
process.simulation_step,
process.analysis_step,
)
# filter all path with the production filter sequence
for path in process.paths:
getattr(process,path)._seq = process.generator * getattr(process,path)._seq
|
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
"""
These tests check the database is functioning properly,
both in memory and in its file
"""
import datetime
import functools
import json
import os
import shutil
import sys
import pytest
try:
import uuid
_use_uuid = True
except ImportError:
_use_uuid = False
pass
from jsonschema import validate
import llnl.util.lock as lk
from llnl.util.tty.colify import colify
import spack.database
import spack.package
import spack.repo
import spack.spec
import spack.store
from spack.schema.database_index import schema
from spack.util.executable import Executable
from spack.util.mock_package import MockPackageMultiRepo
is_windows = sys.platform == 'win32'
pytestmark = pytest.mark.db
@pytest.fixture()
def upstream_and_downstream_db(tmpdir_factory, gen_mock_layout):
mock_db_root = str(tmpdir_factory.mktemp('mock_db_root'))
upstream_write_db = spack.database.Database(mock_db_root)
upstream_db = spack.database.Database(mock_db_root, is_upstream=True)
# Generate initial DB file to avoid reindex
with open(upstream_write_db._index_path, 'w') as db_file:
upstream_write_db._write_to_file(db_file)
upstream_layout = gen_mock_layout('/a/')
downstream_db_root = str(
tmpdir_factory.mktemp('mock_downstream_db_root'))
downstream_db = spack.database.Database(
downstream_db_root, upstream_dbs=[upstream_db])
with open(downstream_db._index_path, 'w') as db_file:
downstream_db._write_to_file(db_file)
downstream_layout = gen_mock_layout('/b/')
yield upstream_write_db, upstream_db, upstream_layout,\
downstream_db, downstream_layout
@pytest.mark.skipif(sys.platform == 'win32',
reason="Upstreams currently unsupported on Windows")
def test_spec_installed_upstream(upstream_and_downstream_db, config, monkeypatch):
"""Test whether Spec.installed_upstream() works."""
upstream_write_db, upstream_db, upstream_layout, \
downstream_db, downstream_layout = upstream_and_downstream_db
# a known installed spec should say that it's installed
mock_repo = MockPackageMultiRepo()
mock_repo.add_package('x', [], [])
with spack.repo.use_repositories(mock_repo):
spec = spack.spec.Spec("x").concretized()
assert not spec.installed
assert not spec.installed_upstream
upstream_write_db.add(spec, upstream_layout)
upstream_db._read()
monkeypatch.setattr(spack.store, "db", downstream_db)
assert spec.installed
assert spec.installed_upstream
assert spec.copy().installed
# an abstract spec should say it's not installed
spec = spack.spec.Spec("not-a-real-package")
assert not spec.installed
assert not spec.installed_upstream
@pytest.mark.skipif(sys.platform == 'win32',
reason="Upstreams currently unsupported on Windows")
@pytest.mark.usefixtures('config')
def test_installed_upstream(upstream_and_downstream_db):
upstream_write_db, upstream_db, upstream_layout,\
downstream_db, downstream_layout = (upstream_and_downstream_db)
default = ('build', 'link')
mock_repo = MockPackageMultiRepo()
x = mock_repo.add_package('x', [], [])
z = mock_repo.add_package('z', [], [])
y = mock_repo.add_package('y', [z], [default])
mock_repo.add_package('w', [x, y], [default, default])
with spack.repo.use_repositories(mock_repo):
spec = spack.spec.Spec('w')
spec.concretize()
for dep in spec.traverse(root=False):
upstream_write_db.add(dep, upstream_layout)
upstream_db._read()
for dep in spec.traverse(root=False):
record = downstream_db.get_by_hash(dep.dag_hash())
assert record is not None
with pytest.raises(spack.database.ForbiddenLockError):
record = upstream_db.get_by_hash(dep.dag_hash())
new_spec = spack.spec.Spec('w')
new_spec.concretize()
downstream_db.add(new_spec, downstream_layout)
for dep in new_spec.traverse(root=False):
upstream, record = downstream_db.query_by_spec_hash(
dep.dag_hash())
assert upstream
assert record.path == upstream_layout.path_for_spec(dep)
upstream, record = downstream_db.query_by_spec_hash(
new_spec.dag_hash())
assert not upstream
assert record.installed
upstream_db._check_ref_counts()
downstream_db._check_ref_counts()
@pytest.mark.skipif(sys.platform == 'win32',
reason="Upstreams currently unsupported on Windows")
@pytest.mark.usefixtures('config')
def test_removed_upstream_dep(upstream_and_downstream_db):
upstream_write_db, upstream_db, upstream_layout,\
downstream_db, downstream_layout = (upstream_and_downstream_db)
default = ('build', 'link')
mock_repo = MockPackageMultiRepo()
z = mock_repo.add_package('z', [], [])
mock_repo.add_package('y', [z], [default])
with spack.repo.use_repositories(mock_repo):
spec = spack.spec.Spec('y')
spec.concretize()
upstream_write_db.add(spec['z'], upstream_layout)
upstream_db._read()
new_spec = spack.spec.Spec('y')
new_spec.concretize()
downstream_db.add(new_spec, downstream_layout)
upstream_write_db.remove(new_spec['z'])
upstream_db._read()
new_downstream = spack.database.Database(
downstream_db.root, upstream_dbs=[upstream_db])
new_downstream._fail_when_missing_deps = True
with pytest.raises(spack.database.MissingDependenciesError):
new_downstream._read()
@pytest.mark.skipif(sys.platform == 'win32',
reason="Upstreams currently unsupported on Windows")
@pytest.mark.usefixtures('config')
def test_add_to_upstream_after_downstream(upstream_and_downstream_db):
"""An upstream DB can add a package after it is installed in the downstream
DB. When a package is recorded as installed in both, the results should
refer to the downstream DB.
"""
upstream_write_db, upstream_db, upstream_layout,\
downstream_db, downstream_layout = (upstream_and_downstream_db)
mock_repo = MockPackageMultiRepo()
mock_repo.add_package('x', [], [])
with spack.repo.use_repositories(mock_repo):
spec = spack.spec.Spec('x')
spec.concretize()
downstream_db.add(spec, downstream_layout)
upstream_write_db.add(spec, upstream_layout)
upstream_db._read()
upstream, record = downstream_db.query_by_spec_hash(spec.dag_hash())
# Even though the package is recorded as installed in the upstream DB,
# we prefer the locally-installed instance
assert not upstream
qresults = downstream_db.query('x')
assert len(qresults) == 1
queried_spec, = qresults
try:
orig_db = spack.store.db
spack.store.db = downstream_db
assert queried_spec.prefix == downstream_layout.path_for_spec(spec)
finally:
spack.store.db = orig_db
@pytest.mark.skipif(sys.platform == 'win32',
reason="Upstreams currently unsupported on Windows")
@pytest.mark.usefixtures('config', 'temporary_store')
def test_cannot_write_upstream(tmpdir_factory, gen_mock_layout):
roots = [str(tmpdir_factory.mktemp(x)) for x in ['a', 'b']]
layouts = [gen_mock_layout(x) for x in ['/ra/', '/rb/']]
mock_repo = MockPackageMultiRepo()
mock_repo.add_package('x', [], [])
# Instantiate the database that will be used as the upstream DB and make
# sure it has an index file
upstream_db_independent = spack.database.Database(roots[1])
with upstream_db_independent.write_transaction():
pass
upstream_dbs = spack.store._construct_upstream_dbs_from_install_roots(
[roots[1]], _test=True)
with spack.repo.use_repositories(mock_repo):
spec = spack.spec.Spec('x')
spec.concretize()
with pytest.raises(spack.database.ForbiddenLockError):
upstream_dbs[0].add(spec, layouts[1])
@pytest.mark.skipif(sys.platform == 'win32',
reason="Upstreams currently unsupported on Windows")
@pytest.mark.usefixtures('config', 'temporary_store')
def test_recursive_upstream_dbs(tmpdir_factory, gen_mock_layout):
roots = [str(tmpdir_factory.mktemp(x)) for x in ['a', 'b', 'c']]
layouts = [gen_mock_layout(x) for x in ['/ra/', '/rb/', '/rc/']]
default = ('build', 'link')
mock_repo = MockPackageMultiRepo()
z = mock_repo.add_package('z', [], [])
y = mock_repo.add_package('y', [z], [default])
mock_repo.add_package('x', [y], [default])
with spack.repo.use_repositories(mock_repo):
spec = spack.spec.Spec('x')
spec.concretize()
db_c = spack.database.Database(roots[2])
db_c.add(spec['z'], layouts[2])
db_b = spack.database.Database(roots[1], upstream_dbs=[db_c])
db_b.add(spec['y'], layouts[1])
db_a = spack.database.Database(roots[0], upstream_dbs=[db_b, db_c])
db_a.add(spec['x'], layouts[0])
upstream_dbs_from_scratch = (
spack.store._construct_upstream_dbs_from_install_roots(
[roots[1], roots[2]], _test=True))
db_a_from_scratch = spack.database.Database(
roots[0], upstream_dbs=upstream_dbs_from_scratch)
assert db_a_from_scratch.db_for_spec_hash(spec.dag_hash()) == (
db_a_from_scratch)
assert db_a_from_scratch.db_for_spec_hash(spec['y'].dag_hash()) == (
upstream_dbs_from_scratch[0])
assert db_a_from_scratch.db_for_spec_hash(spec['z'].dag_hash()) == (
upstream_dbs_from_scratch[1])
db_a_from_scratch._check_ref_counts()
upstream_dbs_from_scratch[0]._check_ref_counts()
upstream_dbs_from_scratch[1]._check_ref_counts()
assert (db_a_from_scratch.installed_relatives(spec) ==
set(spec.traverse(root=False)))
assert (db_a_from_scratch.installed_relatives(
spec['z'], direction='parents') == set([spec, spec['y']]))
@pytest.fixture()
def usr_folder_exists(monkeypatch):
"""The ``/usr`` folder is assumed to be existing in some tests. This
fixture makes it such that its existence is mocked, so we have no
requirements on the system running tests.
"""
isdir = os.path.isdir
@functools.wraps(os.path.isdir)
def mock_isdir(path):
if path == '/usr':
return True
return isdir(path)
monkeypatch.setattr(os.path, 'isdir', mock_isdir)
def _print_ref_counts():
"""Print out all ref counts for the graph used here, for debugging"""
recs = []
def add_rec(spec):
cspecs = spack.store.db.query(spec, installed=any)
if not cspecs:
recs.append("[ %-7s ] %-20s-" % ('', spec))
else:
key = cspecs[0].dag_hash()
rec = spack.store.db.get_record(cspecs[0])
recs.append("[ %-7s ] %-20s%d" % (key[:7], spec, rec.ref_count))
with spack.store.db.read_transaction():
add_rec('mpileaks ^mpich')
add_rec('callpath ^mpich')
add_rec('mpich')
add_rec('mpileaks ^mpich2')
add_rec('callpath ^mpich2')
add_rec('mpich2')
add_rec('mpileaks ^zmpi')
add_rec('callpath ^zmpi')
add_rec('zmpi')
add_rec('fake')
add_rec('dyninst')
add_rec('libdwarf')
add_rec('libelf')
colify(recs, cols=3)
def _check_merkleiness():
"""Ensure the spack database is a valid merkle graph."""
all_specs = spack.store.db.query(installed=any)
seen = {}
for spec in all_specs:
for dep in spec.dependencies():
hash_key = dep.dag_hash()
if hash_key not in seen:
seen[hash_key] = id(dep)
else:
assert seen[hash_key] == id(dep)
def _check_db_sanity(database):
"""Utility function to check db against install layout."""
pkg_in_layout = sorted(spack.store.layout.all_specs())
actual = sorted(database.query())
externals = sorted([x for x in actual if x.external])
nexpected = len(pkg_in_layout) + len(externals)
assert nexpected == len(actual)
non_external_in_db = sorted([x for x in actual if not x.external])
for e, a in zip(pkg_in_layout, non_external_in_db):
assert e == a
_check_merkleiness()
def _check_remove_and_add_package(database, spec):
"""Remove a spec from the DB, then add it and make sure everything's
still ok once it is added. This checks that it was
removed, that it's back when added again, and that ref
counts are consistent.
"""
original = database.query()
database._check_ref_counts()
# Remove spec
concrete_spec = database.remove(spec)
database._check_ref_counts()
remaining = database.query()
# ensure spec we removed is gone
assert len(original) - 1 == len(remaining)
assert all(s in original for s in remaining)
assert concrete_spec not in remaining
# add it back and make sure everything is ok.
database.add(concrete_spec, spack.store.layout)
installed = database.query()
assert concrete_spec in installed
assert installed == original
# sanity check against direcory layout and check ref counts.
_check_db_sanity(database)
database._check_ref_counts()
def _mock_install(spec):
s = spack.spec.Spec(spec)
s.concretize()
pkg = spack.repo.get(s)
pkg.do_install(fake=True)
def _mock_remove(spec):
specs = spack.store.db.query(spec)
assert len(specs) == 1
spec = specs[0]
spec.package.do_uninstall(spec)
def test_default_queries(database):
# Testing a package whose name *doesn't* start with 'lib'
# to ensure the library has 'lib' prepended to the name
rec = database.get_record('zmpi')
spec = rec.spec
libraries = spec['zmpi'].libs
assert len(libraries) == 1
assert libraries.names[0] == 'zmpi'
headers = spec['zmpi'].headers
assert len(headers) == 1
assert headers.names[0] == 'zmpi'
command = spec['zmpi'].command
assert isinstance(command, Executable)
assert command.name == 'zmpi'
assert os.path.exists(command.path)
# Testing a package whose name *does* start with 'lib'
# to ensure the library doesn't have a double 'lib' prefix
rec = database.get_record('libelf')
spec = rec.spec
libraries = spec['libelf'].libs
assert len(libraries) == 1
assert libraries.names[0] == 'elf'
headers = spec['libelf'].headers
assert len(headers) == 1
assert headers.names[0] == 'libelf'
command = spec['libelf'].command
assert isinstance(command, Executable)
assert command.name == 'libelf'
assert os.path.exists(command.path)
def test_005_db_exists(database):
"""Make sure db cache file exists after creating."""
index_file = os.path.join(database.root, '.spack-db', 'index.json')
lock_file = os.path.join(database.root, '.spack-db', 'lock')
assert os.path.exists(str(index_file))
# Lockfiles not currently supported on Windows
if not is_windows:
assert os.path.exists(str(lock_file))
with open(index_file) as fd:
index_object = json.load(fd)
validate(index_object, schema)
def test_010_all_install_sanity(database):
"""Ensure that the install layout reflects what we think it does."""
all_specs = spack.store.layout.all_specs()
assert len(all_specs) == 15
# Query specs with multiple configurations
mpileaks_specs = [s for s in all_specs if s.satisfies('mpileaks')]
callpath_specs = [s for s in all_specs if s.satisfies('callpath')]
mpi_specs = [s for s in all_specs if s.satisfies('mpi')]
assert len(mpileaks_specs) == 3
assert len(callpath_specs) == 3
assert len(mpi_specs) == 3
# Query specs with single configurations
dyninst_specs = [s for s in all_specs if s.satisfies('dyninst')]
libdwarf_specs = [s for s in all_specs if s.satisfies('libdwarf')]
libelf_specs = [s for s in all_specs if s.satisfies('libelf')]
assert len(dyninst_specs) == 1
assert len(libdwarf_specs) == 1
assert len(libelf_specs) == 1
# Query by dependency
assert len(
[s for s in all_specs if s.satisfies('mpileaks ^mpich')]
) == 1
assert len(
[s for s in all_specs if s.satisfies('mpileaks ^mpich2')]
) == 1
assert len(
[s for s in all_specs if s.satisfies('mpileaks ^zmpi')]
) == 1
def test_015_write_and_read(mutable_database):
# write and read DB
with spack.store.db.write_transaction():
specs = spack.store.db.query()
recs = [spack.store.db.get_record(s) for s in specs]
for spec, rec in zip(specs, recs):
new_rec = spack.store.db.get_record(spec)
assert new_rec.ref_count == rec.ref_count
assert new_rec.spec == rec.spec
assert new_rec.path == rec.path
assert new_rec.installed == rec.installed
def test_017_write_and_read_without_uuid(mutable_database, monkeypatch):
monkeypatch.setattr(spack.database, '_use_uuid', False)
# write and read DB
with spack.store.db.write_transaction():
specs = spack.store.db.query()
recs = [spack.store.db.get_record(s) for s in specs]
for spec, rec in zip(specs, recs):
new_rec = spack.store.db.get_record(spec)
assert new_rec.ref_count == rec.ref_count
assert new_rec.spec == rec.spec
assert new_rec.path == rec.path
assert new_rec.installed == rec.installed
def test_020_db_sanity(database):
"""Make sure query() returns what's actually in the db."""
_check_db_sanity(database)
def test_025_reindex(mutable_database):
"""Make sure reindex works and ref counts are valid."""
spack.store.store.reindex()
_check_db_sanity(mutable_database)
def test_026_reindex_after_deprecate(mutable_database):
"""Make sure reindex works and ref counts are valid after deprecation."""
mpich = mutable_database.query_one('mpich')
zmpi = mutable_database.query_one('zmpi')
mutable_database.deprecate(mpich, zmpi)
spack.store.store.reindex()
_check_db_sanity(mutable_database)
class ReadModify(object):
"""Provide a function which can execute in a separate process that removes
a spec from the database.
"""
def __call__(self):
# check that other process can read DB
_check_db_sanity(spack.store.db)
with spack.store.db.write_transaction():
_mock_remove('mpileaks ^zmpi')
def test_030_db_sanity_from_another_process(mutable_database):
spack_process = spack.subprocess_context.SpackTestProcess(ReadModify())
p = spack_process.create()
p.start()
p.join()
# ensure child process change is visible in parent process
with mutable_database.read_transaction():
assert len(mutable_database.query('mpileaks ^zmpi')) == 0
def test_040_ref_counts(database):
"""Ensure that we got ref counts right when we read the DB."""
database._check_ref_counts()
def test_041_ref_counts_deprecate(mutable_database):
"""Ensure that we have appropriate ref counts after deprecating"""
mpich = mutable_database.query_one('mpich')
zmpi = mutable_database.query_one('zmpi')
mutable_database.deprecate(mpich, zmpi)
mutable_database._check_ref_counts()
def test_050_basic_query(database):
"""Ensure querying database is consistent with what is installed."""
# query everything
total_specs = len(spack.store.db.query())
assert total_specs == 17
# query specs with multiple configurations
mpileaks_specs = database.query('mpileaks')
callpath_specs = database.query('callpath')
mpi_specs = database.query('mpi')
assert len(mpileaks_specs) == 3
assert len(callpath_specs) == 3
assert len(mpi_specs) == 3
# query specs with single configurations
dyninst_specs = database.query('dyninst')
libdwarf_specs = database.query('libdwarf')
libelf_specs = database.query('libelf')
assert len(dyninst_specs) == 1
assert len(libdwarf_specs) == 1
assert len(libelf_specs) == 1
# Query by dependency
assert len(database.query('mpileaks ^mpich')) == 1
assert len(database.query('mpileaks ^mpich2')) == 1
assert len(database.query('mpileaks ^zmpi')) == 1
# Query by date
assert len(database.query(start_date=datetime.datetime.min)) == total_specs
assert len(database.query(start_date=datetime.datetime.max)) == 0
assert len(database.query(end_date=datetime.datetime.min)) == 0
assert len(database.query(end_date=datetime.datetime.max)) == total_specs
def test_060_remove_and_add_root_package(mutable_database):
_check_remove_and_add_package(mutable_database, 'mpileaks ^mpich')
def test_070_remove_and_add_dependency_package(mutable_database):
_check_remove_and_add_package(mutable_database, 'dyninst')
def test_080_root_ref_counts(mutable_database):
rec = mutable_database.get_record('mpileaks ^mpich')
# Remove a top-level spec from the DB
mutable_database.remove('mpileaks ^mpich')
# record no longer in DB
assert mutable_database.query('mpileaks ^mpich', installed=any) == []
# record's deps have updated ref_counts
assert mutable_database.get_record('callpath ^mpich').ref_count == 0
assert mutable_database.get_record('mpich').ref_count == 1
# Put the spec back
mutable_database.add(rec.spec, spack.store.layout)
# record is present again
assert len(mutable_database.query('mpileaks ^mpich', installed=any)) == 1
# dependencies have ref counts updated
assert mutable_database.get_record('callpath ^mpich').ref_count == 1
assert mutable_database.get_record('mpich').ref_count == 2
def test_090_non_root_ref_counts(mutable_database):
mutable_database.get_record('mpileaks ^mpich')
mutable_database.get_record('callpath ^mpich')
# "force remove" a non-root spec from the DB
mutable_database.remove('callpath ^mpich')
# record still in DB but marked uninstalled
assert mutable_database.query('callpath ^mpich', installed=True) == []
assert len(mutable_database.query('callpath ^mpich', installed=any)) == 1
# record and its deps have same ref_counts
assert mutable_database.get_record(
'callpath ^mpich', installed=any
).ref_count == 1
assert mutable_database.get_record('mpich').ref_count == 2
# remove only dependent of uninstalled callpath record
mutable_database.remove('mpileaks ^mpich')
# record and parent are completely gone.
assert mutable_database.query('mpileaks ^mpich', installed=any) == []
assert mutable_database.query('callpath ^mpich', installed=any) == []
# mpich ref count updated properly.
mpich_rec = mutable_database.get_record('mpich')
assert mpich_rec.ref_count == 0
def test_100_no_write_with_exception_on_remove(database):
def fail_while_writing():
with database.write_transaction():
_mock_remove('mpileaks ^zmpi')
raise Exception()
with database.read_transaction():
assert len(database.query('mpileaks ^zmpi', installed=any)) == 1
with pytest.raises(Exception):
fail_while_writing()
# reload DB and make sure zmpi is still there.
with database.read_transaction():
assert len(database.query('mpileaks ^zmpi', installed=any)) == 1
def test_110_no_write_with_exception_on_install(database):
def fail_while_writing():
with database.write_transaction():
_mock_install('cmake')
raise Exception()
with database.read_transaction():
assert database.query('cmake', installed=any) == []
with pytest.raises(Exception):
fail_while_writing()
# reload DB and make sure cmake was not written.
with database.read_transaction():
assert database.query('cmake', installed=any) == []
def test_115_reindex_with_packages_not_in_repo(mutable_database):
# Dont add any package definitions to this repository, the idea is that
# packages should not have to be defined in the repository once they
# are installed
with spack.repo.use_repositories(MockPackageMultiRepo()):
spack.store.store.reindex()
_check_db_sanity(mutable_database)
def test_external_entries_in_db(mutable_database):
rec = mutable_database.get_record('mpileaks ^zmpi')
assert rec.spec.external_path is None
assert not rec.spec.external_modules
rec = mutable_database.get_record('externaltool')
assert rec.spec.external_path == os.sep + \
os.path.join('path', 'to', 'external_tool')
assert not rec.spec.external_modules
assert rec.explicit is False
rec.spec.package.do_install(fake=True, explicit=True)
rec = mutable_database.get_record('externaltool')
assert rec.spec.external_path == os.sep + \
os.path.join('path', 'to', 'external_tool')
assert not rec.spec.external_modules
assert rec.explicit is True
@pytest.mark.regression('8036')
def test_regression_issue_8036(mutable_database, usr_folder_exists):
# The test ensures that the external package prefix is treated as
# existing. Even when the package prefix exists, the package should
# not be considered installed until it is added to the database with
# do_install.
s = spack.spec.Spec('externaltool@0.9')
s.concretize()
assert not s.installed
# Now install the external package and check again the `installed` property
s.package.do_install(fake=True)
assert s.installed
@pytest.mark.regression('11118')
def test_old_external_entries_prefix(mutable_database):
with open(spack.store.db._index_path, 'r') as f:
db_obj = json.loads(f.read())
validate(db_obj, schema)
s = spack.spec.Spec('externaltool')
s.concretize()
db_obj['database']['installs'][s.dag_hash()]['path'] = 'None'
with open(spack.store.db._index_path, 'w') as f:
f.write(json.dumps(db_obj))
if _use_uuid:
with open(spack.store.db._verifier_path, 'w') as f:
f.write(str(uuid.uuid4()))
record = spack.store.db.get_record(s)
assert record.path is None
assert record.spec._prefix is None
assert record.spec.prefix == record.spec.external_path
def test_uninstall_by_spec(mutable_database):
with mutable_database.write_transaction():
for spec in mutable_database.query():
if spec.installed:
spack.package.PackageBase.uninstall_by_spec(spec, force=True)
else:
mutable_database.remove(spec)
assert len(mutable_database.query()) == 0
def test_query_unused_specs(mutable_database):
# This spec installs a fake cmake as a build only dependency
s = spack.spec.Spec('simple-inheritance')
s.concretize()
s.package.do_install(fake=True, explicit=True)
unused = spack.store.db.unused_specs
assert len(unused) == 1
assert unused[0].name == 'cmake'
@pytest.mark.regression('10019')
def test_query_spec_with_conditional_dependency(mutable_database):
# The issue is triggered by having dependencies that are
# conditional on a Boolean variant
s = spack.spec.Spec('hdf5~mpi')
s.concretize()
s.package.do_install(fake=True, explicit=True)
results = spack.store.db.query_local('hdf5 ^mpich')
assert not results
@pytest.mark.regression('10019')
def test_query_spec_with_non_conditional_virtual_dependency(database):
# Ensure the same issue doesn't come up for virtual
# dependency that are not conditional on variants
results = spack.store.db.query_local('mpileaks ^mpich')
assert len(results) == 1
def test_failed_spec_path_error(database):
"""Ensure spec not concrete check is covered."""
s = spack.spec.Spec('a')
with pytest.raises(ValueError, match='Concrete spec required'):
spack.store.db._failed_spec_path(s)
@pytest.mark.db
def test_clear_failure_keep(mutable_database, monkeypatch, capfd):
"""Add test coverage for clear_failure operation when to be retained."""
def _is(db, spec):
return True
# Pretend the spec has been failure locked
monkeypatch.setattr(spack.database.Database, 'prefix_failure_locked', _is)
s = spack.spec.Spec('a')
spack.store.db.clear_failure(s)
out = capfd.readouterr()[0]
assert 'Retaining failure marking' in out
@pytest.mark.db
def test_clear_failure_forced(mutable_database, monkeypatch, capfd):
"""Add test coverage for clear_failure operation when force."""
def _is(db, spec):
return True
# Pretend the spec has been failure locked
monkeypatch.setattr(spack.database.Database, 'prefix_failure_locked', _is)
# Ensure raise OSError when try to remove the non-existent marking
monkeypatch.setattr(spack.database.Database, 'prefix_failure_marked', _is)
s = spack.spec.Spec('a').concretized()
spack.store.db.clear_failure(s, force=True)
out = capfd.readouterr()[1]
assert 'Removing failure marking despite lock' in out
assert 'Unable to remove failure marking' in out
@pytest.mark.db
def test_mark_failed(mutable_database, monkeypatch, tmpdir, capsys):
"""Add coverage to mark_failed."""
def _raise_exc(lock):
raise lk.LockTimeoutError('Mock acquire_write failure')
# Ensure attempt to acquire write lock on the mark raises the exception
monkeypatch.setattr(lk.Lock, 'acquire_write', _raise_exc)
with tmpdir.as_cwd():
s = spack.spec.Spec('a').concretized()
spack.store.db.mark_failed(s)
out = str(capsys.readouterr()[1])
assert 'Unable to mark a as failed' in out
# Clean up the failure mark to ensure it does not interfere with other
# tests using the same spec.
del spack.store.db._prefix_failures[s.prefix]
@pytest.mark.db
def test_prefix_failed(mutable_database, monkeypatch):
"""Add coverage to prefix_failed operation."""
def _is(db, spec):
return True
s = spack.spec.Spec('a').concretized()
# Confirm the spec is not already marked as failed
assert not spack.store.db.prefix_failed(s)
# Check that a failure entry is sufficient
spack.store.db._prefix_failures[s.prefix] = None
assert spack.store.db.prefix_failed(s)
# Remove the entry and check again
del spack.store.db._prefix_failures[s.prefix]
assert not spack.store.db.prefix_failed(s)
# Now pretend that the prefix failure is locked
monkeypatch.setattr(spack.database.Database, 'prefix_failure_locked', _is)
assert spack.store.db.prefix_failed(s)
def test_prefix_read_lock_error(mutable_database, monkeypatch):
"""Cover the prefix read lock exception."""
def _raise(db, spec):
raise lk.LockError('Mock lock error')
s = spack.spec.Spec('a').concretized()
# Ensure subsequent lock operations fail
monkeypatch.setattr(lk.Lock, 'acquire_read', _raise)
with pytest.raises(Exception):
with spack.store.db.prefix_read_lock(s):
assert False
def test_prefix_write_lock_error(mutable_database, monkeypatch):
"""Cover the prefix write lock exception."""
def _raise(db, spec):
raise lk.LockError('Mock lock error')
s = spack.spec.Spec('a').concretized()
# Ensure subsequent lock operations fail
monkeypatch.setattr(lk.Lock, 'acquire_write', _raise)
with pytest.raises(Exception):
with spack.store.db.prefix_write_lock(s):
assert False
@pytest.mark.regression('26600')
def test_database_works_with_empty_dir(tmpdir):
# Create the lockfile and failures directory otherwise
# we'll get a permission error on Database creation
db_dir = tmpdir.ensure_dir('.spack-db')
db_dir.ensure('lock')
db_dir.ensure_dir('failures')
tmpdir.chmod(mode=0o555, rec=1)
db = spack.database.Database(str(tmpdir))
with db.read_transaction():
db.query()
# Check that reading an empty directory didn't create a new index.json
assert not os.path.exists(db._index_path)
@pytest.mark.parametrize('query_arg,exc_type,msg_str', [
(['callpath'], spack.store.MatchError, 'matches multiple packages'),
(['tensorflow'], spack.store.MatchError, 'does not match any')
])
def test_store_find_failures(database, query_arg, exc_type, msg_str):
with pytest.raises(exc_type) as exc_info:
spack.store.find(query_arg, multiple=False)
assert msg_str in str(exc_info.value)
def test_store_find_accept_string(database):
result = spack.store.find('callpath', multiple=True)
assert len(result) == 3
def test_reindex_removed_prefix_is_not_installed(mutable_database, mock_store, capfd):
"""When a prefix of a dependency is removed and the database is reindexed,
the spec should still be added through the dependent, but should be listed as
not installed."""
# Remove libelf from the filesystem
prefix = mutable_database.query_one('libelf').prefix
assert prefix.startswith(str(mock_store))
shutil.rmtree(prefix)
# Reindex should pick up libelf as a dependency of libdwarf
spack.store.store.reindex()
# Reindexing should warn about libelf not being found on the filesystem
err = capfd.readouterr()[1]
assert 'this directory does not contain an installation of the spec' in err
# And we should still have libelf in the database, but not installed.
assert not mutable_database.query_one('libelf', installed=True)
assert mutable_database.query_one('libelf', installed=False)
def test_reindex_when_all_prefixes_are_removed(mutable_database, mock_store):
# Remove all non-external installations from the filesystem
for spec in spack.store.db.query_local():
if not spec.external:
assert spec.prefix.startswith(str(mock_store))
shutil.rmtree(spec.prefix)
# Make sure we have some explicitly installed specs
num = len(mutable_database.query_local(installed=True, explicit=True))
assert num > 0
# Reindex uses the current index to repopulate itself
spack.store.store.reindex()
# Make sure all explicit specs are still there, but are now uninstalled.
specs = mutable_database.query_local(installed=False, explicit=True)
assert len(specs) == num
# And make sure they can be removed from the database (covers the case where
# `ref_count == 0 and not installed`, which hits some obscure branches.
for s in specs:
mutable_database.remove(s)
assert len(mutable_database.query_local(installed=False, explicit=True)) == 0
@pytest.mark.parametrize('spec_str,parent_name,expected_nparents', [
('dyninst', 'callpath', 3),
('libelf', 'dyninst', 1),
('libelf', 'libdwarf', 1)
])
@pytest.mark.regression('11983')
def test_check_parents(spec_str, parent_name, expected_nparents, database):
"""Check that a spec returns the correct number of parents."""
s = database.query_one(spec_str)
parents = s.dependents(name=parent_name)
assert len(parents) == expected_nparents
edges = s.edges_from_dependents(name=parent_name)
assert len(edges) == expected_nparents
def test_consistency_of_dependents_upon_remove(mutable_database):
# Check the initial state
s = mutable_database.query_one('dyninst')
parents = s.dependents(name='callpath')
assert len(parents) == 3
# Remove a dependent (and all its dependents)
mutable_database.remove('mpileaks ^callpath ^mpich2')
mutable_database.remove('callpath ^mpich2')
# Check the final state
s = mutable_database.query_one('dyninst')
parents = s.dependents(name='callpath')
assert len(parents) == 2
@pytest.mark.regression('30187')
def test_query_installed_when_package_unknown(database):
"""Test that we can query the installation status of a spec
when we don't know its package.py
"""
with spack.repo.use_repositories(MockPackageMultiRepo()):
specs = database.query('mpileaks')
for s in specs:
# Assert that we can query the installation methods even though we
# don't have the package.py available
assert s.installed
assert not s.installed_upstream
with pytest.raises(spack.repo.UnknownNamespaceError):
s.package
|
# -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.async_support.base.exchange import Exchange
import base64
import hashlib
import math
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import PermissionDenied
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import OrderNotFound
from ccxt.base.errors import NotSupported
from ccxt.base.errors import RateLimitExceeded
from ccxt.base.errors import ExchangeNotAvailable
from ccxt.base.errors import InvalidNonce
from ccxt.base.decimal_to_precision import SIGNIFICANT_DIGITS
class bitfinex(Exchange):
def describe(self):
return self.deep_extend(super(bitfinex, self).describe(), {
'id': 'bitfinex',
'name': 'Bitfinex',
'countries': ['VG'],
'version': 'v1',
'rateLimit': 1500,
'certified': True,
# new metainfo interface
'has': {
'CORS': False,
'cancelAllOrders': True,
'createDepositAddress': True,
'deposit': True,
'fetchClosedOrders': True,
'fetchDepositAddress': True,
'fetchTradingFee': True,
'fetchTradingFees': True,
'fetchFundingFees': True,
'fetchMyTrades': True,
'fetchOHLCV': True,
'fetchOpenOrders': True,
'fetchOrder': True,
'fetchTickers': True,
'fetchTransactions': True,
'fetchDeposits': False,
'fetchWithdrawals': False,
'withdraw': True,
},
'timeframes': {
'1m': '1m',
'5m': '5m',
'15m': '15m',
'30m': '30m',
'1h': '1h',
'3h': '3h',
'6h': '6h',
'12h': '12h',
'1d': '1D',
'1w': '7D',
'2w': '14D',
'1M': '1M',
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/27766244-e328a50c-5ed2-11e7-947b-041416579bb3.jpg',
'api': {
'v2': 'https://api-pub.bitfinex.com', # https://github.com/ccxt/ccxt/issues/5109
'public': 'https://api.bitfinex.com',
'private': 'https://api.bitfinex.com',
},
'www': 'https://www.bitfinex.com',
'referral': 'https://www.bitfinex.com/?refcode=P61eYxFL',
'doc': [
'https://docs.bitfinex.com/v1/docs',
'https://github.com/bitfinexcom/bitfinex-api-node',
],
},
'api': {
# v2 symbol ids require a 't' prefix
# just the public part of it(use bitfinex2 for everything else)
'v2': {
'get': [
'platform/status',
'tickers',
'ticker/{symbol}',
'trades/{symbol}/hist',
'book/{symbol}/{precision}',
'book/{symbol}/P0',
'book/{symbol}/P1',
'book/{symbol}/P2',
'book/{symbol}/P3',
'book/{symbol}/R0',
'stats1/{key}:{size}:{symbol}:{side}/{section}',
'stats1/{key}:{size}:{symbol}/{section}',
'stats1/{key}:{size}:{symbol}:long/last',
'stats1/{key}:{size}:{symbol}:long/hist',
'stats1/{key}:{size}:{symbol}:short/last',
'stats1/{key}:{size}:{symbol}:short/hist',
'candles/trade:{timeframe}:{symbol}/{section}',
'candles/trade:{timeframe}:{symbol}/last',
'candles/trade:{timeframe}:{symbol}/hist',
],
},
'public': {
'get': [
'book/{symbol}',
# 'candles/{symbol}',
'lendbook/{currency}',
'lends/{currency}',
'pubticker/{symbol}',
'stats/{symbol}',
'symbols',
'symbols_details',
'tickers',
'trades/{symbol}',
],
},
'private': {
'post': [
'account_fees',
'account_infos',
'balances',
'basket_manage',
'credits',
'deposit/new',
'funding/close',
'history',
'history/movements',
'key_info',
'margin_infos',
'mytrades',
'mytrades_funding',
'offer/cancel',
'offer/new',
'offer/status',
'offers',
'offers/hist',
'order/cancel',
'order/cancel/all',
'order/cancel/multi',
'order/cancel/replace',
'order/new',
'order/new/multi',
'order/status',
'orders',
'orders/hist',
'position/claim',
'position/close',
'positions',
'summary',
'taken_funds',
'total_taken_funds',
'transfer',
'unused_taken_funds',
'withdraw',
],
},
},
'fees': {
'trading': {
'tierBased': True,
'percentage': True,
'maker': 0.1 / 100,
'taker': 0.2 / 100,
'tiers': {
'taker': [
[0, 0.2 / 100],
[500000, 0.2 / 100],
[1000000, 0.2 / 100],
[2500000, 0.2 / 100],
[5000000, 0.2 / 100],
[7500000, 0.2 / 100],
[10000000, 0.18 / 100],
[15000000, 0.16 / 100],
[20000000, 0.14 / 100],
[25000000, 0.12 / 100],
[30000000, 0.1 / 100],
],
'maker': [
[0, 0.1 / 100],
[500000, 0.08 / 100],
[1000000, 0.06 / 100],
[2500000, 0.04 / 100],
[5000000, 0.02 / 100],
[7500000, 0],
[10000000, 0],
[15000000, 0],
[20000000, 0],
[25000000, 0],
[30000000, 0],
],
},
},
'funding': {
'tierBased': False, # True for tier-based/progressive
'percentage': False, # fixed commission
# Actually deposit fees are free for larger deposits(> $1000 USD equivalent)
# these values below are deprecated, we should not hardcode fees and limits anymore
# to be reimplemented with bitfinex funding fees from their API or web endpoints
'deposit': {
'BTC': 0.0004,
'IOTA': 0.5,
'ETH': 0.0027,
'BCH': 0.0001,
'LTC': 0.001,
'EOS': 0.24279,
'XMR': 0.04,
'SAN': 0.99269,
'DASH': 0.01,
'ETC': 0.01,
'XRP': 0.02,
'YYW': 16.915,
'NEO': 0,
'ZEC': 0.001,
'BTG': 0,
'OMG': 0.14026,
'DATA': 20.773,
'QASH': 1.9858,
'ETP': 0.01,
'QTUM': 0.01,
'EDO': 0.95001,
'AVT': 1.3045,
'USDT': 0,
'TRX': 28.184,
'ZRX': 1.9947,
'RCN': 10.793,
'TNB': 31.915,
'SNT': 14.976,
'RLC': 1.414,
'GNT': 5.8952,
'SPK': 10.893,
'REP': 0.041168,
'BAT': 6.1546,
'ELF': 1.8753,
'FUN': 32.336,
'SNG': 18.622,
'AID': 8.08,
'MNA': 16.617,
'NEC': 1.6504,
'XTZ': 0.2,
},
'withdraw': {
'BTC': 0.0004,
'IOTA': 0.5,
'ETH': 0.0027,
'BCH': 0.0001,
'LTC': 0.001,
'EOS': 0.24279,
'XMR': 0.04,
'SAN': 0.99269,
'DASH': 0.01,
'ETC': 0.01,
'XRP': 0.02,
'YYW': 16.915,
'NEO': 0,
'ZEC': 0.001,
'BTG': 0,
'OMG': 0.14026,
'DATA': 20.773,
'QASH': 1.9858,
'ETP': 0.01,
'QTUM': 0.01,
'EDO': 0.95001,
'AVT': 1.3045,
'USDT': 20,
'TRX': 28.184,
'ZRX': 1.9947,
'RCN': 10.793,
'TNB': 31.915,
'SNT': 14.976,
'RLC': 1.414,
'GNT': 5.8952,
'SPK': 10.893,
'REP': 0.041168,
'BAT': 6.1546,
'ELF': 1.8753,
'FUN': 32.336,
'SNG': 18.622,
'AID': 8.08,
'MNA': 16.617,
'NEC': 1.6504,
'XTZ': 0.2,
},
},
},
'commonCurrencies': {
'ABS': 'ABYSS',
'AIO': 'AION',
'ALG': 'ALGO', # https://github.com/ccxt/ccxt/issues/6034
'AMP': 'AMPL',
'ATM': 'ATMI',
'ATO': 'ATOM', # https://github.com/ccxt/ccxt/issues/5118
'BAB': 'BCH',
'CTX': 'CTXC',
'DAD': 'DADI',
'DAT': 'DATA',
'DSH': 'DASH',
'DRK': 'DRK',
'GSD': 'GUSD',
'HOT': 'Hydro Protocol',
'IOS': 'IOST',
'IOT': 'IOTA',
'IQX': 'IQ',
'MIT': 'MITH',
'MNA': 'MANA',
'NCA': 'NCASH',
'ORS': 'ORS Group', # conflict with Origin Sport #3230
'POY': 'POLY',
'QSH': 'QASH',
'QTM': 'QTUM',
'SEE': 'SEER',
'SNG': 'SNGLS',
'SPK': 'SPANK',
'STJ': 'STORJ',
'TSD': 'TUSD',
'YYW': 'YOYOW',
'UDC': 'USDC',
'UST': 'USDT',
'UTN': 'UTNP',
'VSY': 'VSYS',
'XCH': 'XCHF',
},
'exceptions': {
'exact': {
'temporarily_unavailable': ExchangeNotAvailable, # Sorry, the service is temporarily unavailable. See https://www.bitfinex.com/ for more info.
'Order could not be cancelled.': OrderNotFound, # non-existent order
'No such order found.': OrderNotFound, # ?
'Order price must be positive.': InvalidOrder, # on price <= 0
'Could not find a key matching the given X-BFX-APIKEY.': AuthenticationError,
'Key price should be a decimal number, e.g. "123.456"': InvalidOrder, # on isNaN(price)
'Key amount should be a decimal number, e.g. "123.456"': InvalidOrder, # on isNaN(amount)
'ERR_RATE_LIMIT': RateLimitExceeded,
'Ratelimit': RateLimitExceeded,
'Nonce is too small.': InvalidNonce,
'No summary found.': ExchangeError, # fetchTradingFees(summary) endpoint can give self vague error message
'Cannot evaluate your available balance, please try again': ExchangeNotAvailable,
},
'broad': {
'This API key does not have permission': PermissionDenied, # authenticated but not authorized
'not enough exchange balance for ': InsufficientFunds, # when buying cost is greater than the available quote currency
'minimum size for ': InvalidOrder, # when amount below limits.amount.min
'Invalid order': InvalidOrder, # ?
'The available balance is only': InsufficientFunds, # {"status":"error","message":"Cannot withdraw 1.0027 ETH from your exchange wallet. The available balance is only 0.0 ETH. If you have limit orders, open positions, unused or active margin funding, self will decrease your available balance. To increase it, you can cancel limit orders or reduce/close your positions.","withdrawal_id":0,"fees":"0.0027"}
},
},
'precisionMode': SIGNIFICANT_DIGITS,
'options': {
'currencyNames': {
'AGI': 'agi',
'AID': 'aid',
'AIO': 'aio',
'ANT': 'ant',
'AVT': 'aventus', # #1811
'BAT': 'bat',
# https://github.com/ccxt/ccxt/issues/5833
'BCH': 'bab', # undocumented
# 'BCH': 'bcash', # undocumented
'BCI': 'bci',
'BFT': 'bft',
'BTC': 'bitcoin',
'BTG': 'bgold',
'CFI': 'cfi',
'DAI': 'dai',
'DADI': 'dad',
'DASH': 'dash',
'DATA': 'datacoin',
'DTH': 'dth',
'EDO': 'eidoo', # #1811
'ELF': 'elf',
'EOS': 'eos',
'ETC': 'ethereumc',
'ETH': 'ethereum',
'ETP': 'metaverse',
'FUN': 'fun',
'GNT': 'golem',
'IOST': 'ios',
'IOTA': 'iota',
# https://github.com/ccxt/ccxt/issues/5833
'LEO': 'let', # ETH chain
# 'LEO': 'les', # EOS chain
'LRC': 'lrc',
'LTC': 'litecoin',
'LYM': 'lym',
'MANA': 'mna',
'MIT': 'mit',
'MKR': 'mkr',
'MTN': 'mtn',
'NEO': 'neo',
'ODE': 'ode',
'OMG': 'omisego',
'OMNI': 'mastercoin',
'QASH': 'qash',
'QTUM': 'qtum', # #1811
'RCN': 'rcn',
'RDN': 'rdn',
'REP': 'rep',
'REQ': 'req',
'RLC': 'rlc',
'SAN': 'santiment',
'SNGLS': 'sng',
'SNT': 'status',
'SPANK': 'spk',
'STORJ': 'stj',
'TNB': 'tnb',
'TRX': 'trx',
'USD': 'wire',
'USDC': 'udc', # https://github.com/ccxt/ccxt/issues/5833
'UTK': 'utk',
'USDT': 'tetheruso', # Tether on Omni
# 'USDT': 'tetheruse', # Tether on ERC20
# 'USDT': 'tetherusl', # Tether on Liquid
# 'USDT': 'tetherusx', # Tether on Tron
# 'USDT': 'tetheruss', # Tether on EOS
'VEE': 'vee',
'WAX': 'wax',
'XLM': 'xlm',
'XMR': 'monero',
'XRP': 'ripple',
'XVG': 'xvg',
'YOYOW': 'yoyow',
'ZEC': 'zcash',
'ZRX': 'zrx',
'XTZ': 'tezos',
},
'orderTypes': {
'limit': 'exchange limit',
'market': 'exchange market',
},
},
})
async def fetch_funding_fees(self, params={}):
await self.load_markets()
response = await self.privatePostAccountFees(params)
fees = response['withdraw']
withdraw = {}
ids = list(fees.keys())
for i in range(0, len(ids)):
id = ids[i]
code = self.safe_currency_code(id)
withdraw[code] = self.safe_float(fees, id)
return {
'info': response,
'withdraw': withdraw,
'deposit': withdraw, # only for deposits of less than $1000
}
async def fetch_trading_fees(self, params={}):
await self.load_markets()
response = await self.privatePostSummary(params)
#
# {
# time: '2019-02-20T15:50:19.152000Z',
# trade_vol_30d: [
# {
# curr: 'Total(USD)',
# vol: 0,
# vol_maker: 0,
# vol_BFX: 0,
# vol_BFX_maker: 0,
# vol_ETHFX: 0,
# vol_ETHFX_maker: 0
# }
# ],
# fees_funding_30d: {},
# fees_funding_total_30d: 0,
# fees_trading_30d: {},
# fees_trading_total_30d: 0,
# maker_fee: 0.001,
# taker_fee: 0.002
# }
#
return {
'info': response,
'maker': self.safe_float(response, 'maker_fee'),
'taker': self.safe_float(response, 'taker_fee'),
}
async def fetch_markets(self, params={}):
ids = await self.publicGetSymbols()
details = await self.publicGetSymbolsDetails()
result = []
for i in range(0, len(details)):
market = details[i]
id = self.safe_string(market, 'pair')
if not self.in_array(id, ids):
continue
id = id.upper()
baseId = None
quoteId = None
if id.find(':') >= 0:
parts = id.split(':')
baseId = parts[0]
quoteId = parts[1]
else:
baseId = id[0:3]
quoteId = id[3:6]
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = base + '/' + quote
precision = {
'price': market['price_precision'],
'amount': None,
}
limits = {
'amount': {
'min': self.safe_float(market, 'minimum_order_size'),
'max': self.safe_float(market, 'maximum_order_size'),
},
'price': {
'min': math.pow(10, -precision['price']),
'max': math.pow(10, precision['price']),
},
}
limits['cost'] = {
'min': limits['amount']['min'] * limits['price']['min'],
'max': None,
}
result.append({
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'baseId': baseId,
'quoteId': quoteId,
'active': True,
'precision': precision,
'limits': limits,
'info': market,
})
return result
def amount_to_precision(self, symbol, amount):
return self.number_to_string(amount)
def calculate_fee(self, symbol, type, side, amount, price, takerOrMaker='taker', params={}):
market = self.markets[symbol]
rate = market[takerOrMaker]
cost = amount * rate
key = 'quote'
if side == 'sell':
cost *= price
else:
key = 'base'
code = market[key]
currency = self.safe_value(self.currencies, code)
if currency is not None:
precision = self.safe_integer(currency, 'precision')
if precision is not None:
cost = float(self.currency_to_precision(code, cost))
return {
'type': takerOrMaker,
'currency': market[key],
'rate': rate,
'cost': cost,
}
async def fetch_balance(self, params={}):
await self.load_markets()
balanceType = self.safe_string(params, 'type', 'exchange')
query = self.omit(params, 'type')
response = await self.privatePostBalances(query)
result = {'info': response}
for i in range(0, len(response)):
balance = response[i]
if balance['type'] == balanceType:
currencyId = self.safe_string(balance, 'currency')
code = self.safe_currency_code(currencyId)
# bitfinex had BCH previously, now it's BAB, but the old
# BCH symbol is kept for backward-compatibility
# we need a workaround here so that the old BCH balance
# would not override the new BAB balance(BAB is unified to BCH)
# https://github.com/ccxt/ccxt/issues/4989
if not (code in result):
account = self.account()
account['free'] = self.safe_float(balance, 'available')
account['total'] = self.safe_float(balance, 'amount')
result[code] = account
return self.parse_balance(result)
async def fetch_order_book(self, symbol, limit=None, params={}):
await self.load_markets()
request = {
'symbol': self.market_id(symbol),
}
if limit is not None:
request['limit_bids'] = limit
request['limit_asks'] = limit
response = await self.publicGetBookSymbol(self.extend(request, params))
return self.parse_order_book(response, None, 'bids', 'asks', 'price', 'amount')
async def fetch_tickers(self, symbols=None, params={}):
await self.load_markets()
response = await self.publicGetTickers(params)
result = {}
for i in range(0, len(response)):
ticker = self.parse_ticker(response[i])
symbol = ticker['symbol']
result[symbol] = ticker
return result
async def fetch_ticker(self, symbol, params={}):
await self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
ticker = await self.publicGetPubtickerSymbol(self.extend(request, params))
return self.parse_ticker(ticker, market)
def parse_ticker(self, ticker, market=None):
timestamp = self.safe_float(ticker, 'timestamp')
if timestamp is not None:
timestamp *= 1000
symbol = None
if market is not None:
symbol = market['symbol']
elif 'pair' in ticker:
marketId = self.safe_string(ticker, 'pair')
if marketId is not None:
if marketId in self.markets_by_id:
market = self.markets_by_id[marketId]
symbol = market['symbol']
else:
baseId = marketId[0:3]
quoteId = marketId[3:6]
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = base + '/' + quote
last = self.safe_float(ticker, 'last_price')
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_float(ticker, 'high'),
'low': self.safe_float(ticker, 'low'),
'bid': self.safe_float(ticker, 'bid'),
'bidVolume': None,
'ask': self.safe_float(ticker, 'ask'),
'askVolume': None,
'vwap': None,
'open': None,
'close': last,
'last': last,
'previousClose': None,
'change': None,
'percentage': None,
'average': self.safe_float(ticker, 'mid'),
'baseVolume': self.safe_float(ticker, 'volume'),
'quoteVolume': None,
'info': ticker,
}
def parse_trade(self, trade, market):
id = self.safe_string(trade, 'tid')
timestamp = self.safe_float(trade, 'timestamp')
if timestamp is not None:
timestamp = int(timestamp) * 1000
type = None
side = self.safe_string_lower(trade, 'type')
orderId = self.safe_string(trade, 'order_id')
price = self.safe_float(trade, 'price')
amount = self.safe_float(trade, 'amount')
cost = None
if price is not None:
if amount is not None:
cost = price * amount
fee = None
if 'fee_amount' in trade:
feeCost = -self.safe_float(trade, 'fee_amount')
feeCurrencyId = self.safe_string(trade, 'fee_currency')
feeCurrencyCode = self.safe_currency_code(feeCurrencyId)
fee = {
'cost': feeCost,
'currency': feeCurrencyCode,
}
return {
'id': id,
'info': trade,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': market['symbol'],
'type': type,
'order': orderId,
'side': side,
'takerOrMaker': None,
'price': price,
'amount': amount,
'cost': cost,
'fee': fee,
}
async def fetch_trades(self, symbol, since=None, limit=50, params={}):
await self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
'limit_trades': limit,
}
if since is not None:
request['timestamp'] = int(since / 1000)
response = await self.publicGetTradesSymbol(self.extend(request, params))
return self.parse_trades(response, market, since, limit)
async def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchMyTrades requires a `symbol` argument')
await self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
if limit is not None:
request['limit_trades'] = limit
if since is not None:
request['timestamp'] = int(since / 1000)
response = await self.privatePostMytrades(self.extend(request, params))
return self.parse_trades(response, market, since, limit)
async def create_order(self, symbol, type, side, amount, price=None, params={}):
await self.load_markets()
request = {
'symbol': self.market_id(symbol),
'side': side,
'amount': self.amount_to_precision(symbol, amount),
'type': self.safe_string(self.options['orderTypes'], type, type),
'ocoorder': False,
'buy_price_oco': 0,
'sell_price_oco': 0,
}
if type == 'market':
request['price'] = str(self.nonce())
else:
request['price'] = self.price_to_precision(symbol, price)
response = await self.privatePostOrderNew(self.extend(request, params))
return self.parse_order(response)
async def edit_order(self, id, symbol, type, side, amount=None, price=None, params={}):
await self.load_markets()
order = {
'order_id': id,
}
if price is not None:
order['price'] = self.price_to_precision(symbol, price)
if amount is not None:
order['amount'] = self.number_to_string(amount)
if symbol is not None:
order['symbol'] = self.market_id(symbol)
if side is not None:
order['side'] = side
if type is not None:
order['type'] = self.safe_string(self.options['orderTypes'], type, type)
response = await self.privatePostOrderCancelReplace(self.extend(order, params))
return self.parse_order(response)
async def cancel_order(self, id, symbol=None, params={}):
await self.load_markets()
request = {
'order_id': int(id),
}
return await self.privatePostOrderCancel(self.extend(request, params))
async def cancel_all_orders(self, symbol=None, params={}):
return await self.privatePostOrderCancelAll(params)
def parse_order(self, order, market=None):
side = self.safe_string(order, 'side')
open = self.safe_value(order, 'is_live')
canceled = self.safe_value(order, 'is_cancelled')
status = None
if open:
status = 'open'
elif canceled:
status = 'canceled'
else:
status = 'closed'
symbol = None
if market is None:
marketId = self.safe_string_upper(order, 'symbol')
if marketId is not None:
if marketId in self.markets_by_id:
market = self.markets_by_id[marketId]
if market is not None:
symbol = market['symbol']
orderType = order['type']
exchange = orderType.find('exchange ') >= 0
if exchange:
parts = order['type'].split(' ')
orderType = parts[1]
timestamp = self.safe_float(order, 'timestamp')
if timestamp is not None:
timestamp = int(timestamp) * 1000
id = self.safe_string(order, 'id')
return {
'info': order,
'id': id,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None,
'symbol': symbol,
'type': orderType,
'side': side,
'price': self.safe_float(order, 'price'),
'average': self.safe_float(order, 'avg_execution_price'),
'amount': self.safe_float(order, 'original_amount'),
'remaining': self.safe_float(order, 'remaining_amount'),
'filled': self.safe_float(order, 'executed_amount'),
'status': status,
'fee': None,
}
async def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
await self.load_markets()
if symbol is not None:
if not (symbol in self.markets):
raise ExchangeError(self.id + ' has no symbol ' + symbol)
response = await self.privatePostOrders(params)
orders = self.parse_orders(response, None, since, limit)
if symbol is not None:
orders = self.filter_by(orders, 'symbol', symbol)
return orders
async def fetch_closed_orders(self, symbol=None, since=None, limit=None, params={}):
await self.load_markets()
request = {}
if limit is not None:
request['limit'] = limit
response = await self.privatePostOrdersHist(self.extend(request, params))
orders = self.parse_orders(response, None, since, limit)
if symbol is not None:
orders = self.filter_by(orders, 'symbol', symbol)
orders = self.filter_by_array(orders, 'status', ['closed', 'canceled'], False)
return orders
async def fetch_order(self, id, symbol=None, params={}):
await self.load_markets()
request = {
'order_id': int(id),
}
response = await self.privatePostOrderStatus(self.extend(request, params))
return self.parse_order(response)
def parse_ohlcv(self, ohlcv, market=None, timeframe='1m', since=None, limit=None):
return [
ohlcv[0],
ohlcv[1],
ohlcv[3],
ohlcv[4],
ohlcv[2],
ohlcv[5],
]
async def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
await self.load_markets()
if limit is None:
limit = 100
market = self.market(symbol)
v2id = 't' + market['id']
request = {
'symbol': v2id,
'timeframe': self.timeframes[timeframe],
'sort': 1,
'limit': limit,
}
if since is not None:
request['start'] = since
response = await self.v2GetCandlesTradeTimeframeSymbolHist(self.extend(request, params))
return self.parse_ohlcvs(response, market, timeframe, since, limit)
def get_currency_name(self, code):
if code in self.options['currencyNames']:
return self.options['currencyNames'][code]
raise NotSupported(self.id + ' ' + code + ' not supported for withdrawal')
async def create_deposit_address(self, code, params={}):
await self.load_markets()
request = {
'renew': 1,
}
response = await self.fetch_deposit_address(code, self.extend(request, params))
address = self.safe_string(response, 'address')
self.check_address(address)
return {
'info': response['info'],
'currency': code,
'address': address,
'tag': None,
}
async def fetch_deposit_address(self, code, params={}):
await self.load_markets()
name = self.get_currency_name(code)
request = {
'method': name,
'wallet_name': 'exchange',
'renew': 0, # a value of 1 will generate a new address
}
response = await self.privatePostDepositNew(self.extend(request, params))
address = self.safe_value(response, 'address')
tag = None
if 'address_pool' in response:
tag = address
address = response['address_pool']
self.check_address(address)
return {
'currency': code,
'address': address,
'tag': tag,
'info': response,
}
async def fetch_transactions(self, code=None, since=None, limit=None, params={}):
if code is None:
raise ArgumentsRequired(self.id + ' fetchTransactions() requires a currency `code` argument')
await self.load_markets()
currency = self.currency(code)
request = {
'currency': currency['id'],
}
if since is not None:
request['since'] = int(since / 1000)
response = await self.privatePostHistoryMovements(self.extend(request, params))
#
# [
# {
# "id":581183,
# "txid": 123456,
# "currency":"BTC",
# "method":"BITCOIN",
# "type":"WITHDRAWAL",
# "amount":".01",
# "description":"3QXYWgRGX2BPYBpUDBssGbeWEa5zq6snBZ, offchain transfer ",
# "address":"3QXYWgRGX2BPYBpUDBssGbeWEa5zq6snBZ",
# "status":"COMPLETED",
# "timestamp":"1443833327.0",
# "timestamp_created": "1443833327.1",
# "fee": 0.1,
# }
# ]
#
return self.parse_transactions(response, currency, since, limit)
def parse_transaction(self, transaction, currency=None):
#
# crypto
#
# {
# "id": 12042490,
# "fee": "-0.02",
# "txid": "EA5B5A66000B66855865EFF2494D7C8D1921FCBE996482157EBD749F2C85E13D",
# "type": "DEPOSIT",
# "amount": "2099.849999",
# "method": "RIPPLE",
# "status": "COMPLETED",
# "address": "2505189261",
# "currency": "XRP",
# "timestamp": "1551730524.0",
# "description": "EA5B5A66000B66855865EFF2494D7C8D1921FCBE996482157EBD749F2C85E13D",
# "timestamp_created": "1551730523.0"
# }
#
# fiat
#
# {
# "id": 12725095,
# "fee": "-60.0",
# "txid": null,
# "type": "WITHDRAWAL",
# "amount": "9943.0",
# "method": "WIRE",
# "status": "SENDING",
# "address": null,
# "currency": "EUR",
# "timestamp": "1561802484.0",
# "description": "Name: bob, AccountAddress: some address, Account: someaccountno, Bank: bank address, SWIFT: foo, Country: UK, Details of Payment: withdrawal name, Intermediary Bank Name: , Intermediary Bank Address: , Intermediary Bank City: , Intermediary Bank Country: , Intermediary Bank Account: , Intermediary Bank SWIFT: , Fee: -60.0",
# "timestamp_created": "1561716066.0"
# }
#
timestamp = self.safe_float(transaction, 'timestamp_created')
if timestamp is not None:
timestamp = int(timestamp * 1000)
updated = self.safe_float(transaction, 'timestamp')
if updated is not None:
updated = int(updated * 1000)
currencyId = self.safe_string(transaction, 'currency')
code = self.safe_currency_code(currencyId, currency)
type = self.safe_string_lower(transaction, 'type') # DEPOSIT or WITHDRAWAL
status = self.parse_transaction_status(self.safe_string(transaction, 'status'))
feeCost = self.safe_float(transaction, 'fee')
if feeCost is not None:
feeCost = abs(feeCost)
return {
'info': transaction,
'id': self.safe_string(transaction, 'id'),
'txid': self.safe_string(transaction, 'txid'),
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'address': self.safe_string(transaction, 'address'), # todo: self is actually the tag for XRP transfers(the address is missing)
'tag': None, # refix it properly for the tag from description
'type': type,
'amount': self.safe_float(transaction, 'amount'),
'currency': code,
'status': status,
'updated': updated,
'fee': {
'currency': code,
'cost': feeCost,
'rate': None,
},
}
def parse_transaction_status(self, status):
statuses = {
'SENDING': 'pending',
'CANCELED': 'canceled',
'ZEROCONFIRMED': 'failed', # ZEROCONFIRMED happens e.g. in a double spend attempt(I had one in my movementsnot )
'COMPLETED': 'ok',
}
return self.safe_string(statuses, status, status)
async def withdraw(self, code, amount, address, tag=None, params={}):
self.check_address(address)
name = self.get_currency_name(code)
request = {
'withdraw_type': name,
'walletselected': 'exchange',
'amount': str(amount),
'address': address,
}
if tag is not None:
request['payment_id'] = tag
responses = await self.privatePostWithdraw(self.extend(request, params))
response = responses[0]
id = self.safe_string(response, 'withdrawal_id')
message = self.safe_string(response, 'message')
errorMessage = self.find_broadly_matched_key(self.exceptions['broad'], message)
if id == 0:
if errorMessage is not None:
ExceptionClass = self.exceptions['broad'][errorMessage]
raise ExceptionClass(self.id + ' ' + message)
raise ExchangeError(self.id + ' withdraw returned an id of zero: ' + self.json(response))
return {
'info': response,
'id': id,
}
def nonce(self):
return self.milliseconds()
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
request = '/' + self.implode_params(path, params)
if api == 'v2':
request = '/' + api + request
else:
request = '/' + self.version + request
query = self.omit(params, self.extract_params(path))
url = self.urls['api'][api] + request
if (api == 'public') or (path.find('/hist') >= 0):
if query:
suffix = '?' + self.urlencode(query)
url += suffix
request += suffix
if api == 'private':
self.check_required_credentials()
nonce = self.nonce()
query = self.extend({
'nonce': str(nonce),
'request': request,
}, query)
body = self.json(query)
query = self.encode(body)
payload = base64.b64encode(query)
secret = self.encode(self.secret)
signature = self.hmac(payload, secret, hashlib.sha384)
headers = {
'X-BFX-APIKEY': self.apiKey,
'X-BFX-PAYLOAD': self.decode(payload),
'X-BFX-SIGNATURE': signature,
}
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, code, reason, url, method, headers, body, response, requestHeaders, requestBody):
if response is None:
return
if code >= 400:
if body[0] == '{':
feedback = self.id + ' ' + body
message = self.safe_string_2(response, 'message', 'error')
self.throw_exactly_matched_exception(self.exceptions['exact'], message, feedback)
self.throw_broadly_matched_exception(self.exceptions['broad'], message, feedback)
raise ExchangeError(feedback) # unknown message
|
from django.http import HttpResponseRedirect
from django.urls import path, reverse
from django.utils.translation import pgettext_lazy
from .views import show_jurisdiction
urlpatterns = [
path("", show_jurisdiction, name="publicbody-show_jurisdiction"),
path(
pgettext_lazy("url part", "entity/"),
lambda r, slug: HttpResponseRedirect(
reverse("publicbody-list", kwargs={"jurisdiction": slug})
),
),
path(
pgettext_lazy("url part", "entities/"),
lambda r, slug: HttpResponseRedirect(
reverse("publicbody-list", kwargs={"jurisdiction": slug})
),
),
]
|
import urwid
class TableCell(urwid.Text):
def __init__(self, content, width = 10, separator=True,align='left'):
self.separator = separator
self._content = content
self._align = align
self._width = width
self._content = self._render_content()
super().__init__(self._content, align, 'clip')
def _render_content(self):
content = str(self._content)
max_length = len(content)
if self.separator:
max_length = self._width - 1
if len(content) > max_length:
content = content[0:max_length - 1]
content += chr(187)
if self._align == 'left':
content = content.ljust(max_length)
else:
content = content.rjust(max_length)
if self.separator:
content += chr(124)
return content
def resize(self, width):
self._width = width
content = self._render_content()
self.set_text(content)
class TableRow(urwid.Columns):
def __init__(self, data, selectable = True):
self.data = data
self._selectable = selectable
columns = self._create_columns(data)
super().__init__(columns, dividechars=1)
def selectable(self):
return True
def _create_columns(self, data):
columns = []
for value in data:
columns.append(TableCell(str(value)))
return columns
class TableHeader(TableRow):
def __init__(self, headers):
super().__init__(headers, False)
class TableView(urwid.ListBox):
def __init__(self, rows=[]):
super().__init__(urwid.SimpleFocusListWalker(rows))
def __iter__(self):
for row in self.body:
yield row
def keypress(self, size, key):
if key in ('up', 'down'):
super().keypress(size, key)
return key
@property
def focused_row(self):
row, index = self.body.get_focus()
return row.original_widget
def add_row(self, data):
row = urwid.AttrMap(TableRow(data.values()), None, focus_map='heading inactive')
self.body.append(row)
class Table(urwid.Frame):
def __init__(self,result):
self._result = result
self._rows = len(self._result)
self.headers = []
if self._rows > 0:
item = self._result[0]
self.headers = item.keys()
self._header = TableHeader(self.headers)
self._body = TableView()
self._build_rows(self._result)
urwid.register_signal(self.__class__, ['keypress','item_selected'])
super().__init__(self._body, header=urwid.AttrMap(self._header, 'heading'), focus_part='body')
def _build_rows(self, data):
for item in data:
self._body.add_row(item)
def keypress(self, size, key):
if key == 'enter':
values = self.focus.focused_row.data
item = zip(self.headers, values)
urwid.emit_signal(self, 'item_selected', self, dict(item))
urwid.emit_signal(self, 'keypress', self, key)
return super().keypress(size, key)
|
# Matthieu Brucher
# Last Change : 2007-08-28 00:36
"""
A simple line search, in fact no searches at all
"""
class SimpleLineSearch(object):
"""
A simple line search, takes a point, adds a step and returns it
"""
def __init__(self, alpha_step = 1., **kwargs):
"""
Needs to have :
- nothing
Can have :
- a step modifier, a factor to modulate the step (alpha_step = 1.)
"""
self.stepSize = alpha_step
def __call__(self, origin, state, **kwargs):
"""
Returns a good candidate
Parameters :
- origin is the origin of the search
- state is the state of the optimizer
"""
direction = state['direction']
if 'initial_alpha_step' in state:
state['alpha_step'] = state['initial_alpha_step']
else:
state['alpha_step'] = self.stepSize
return origin + state['alpha_step'] * direction
|
"""
Helper modules.
These should be stand alone modules that could reasonably be their own
PyPI package. This comes with two benefits:
1. The library is void of any business data, which makes it easier to
understand.
2. It means that it is decoupled making it easy to reuse the code in
different sections of the code. An example is the
:mod:`stack_exchange_graph_data.helpers.progress` module. Which is
easily used in both :func:`stack_exchange_graph_data.helpers.curl.curl`
and :func:`stack_exchange_graph_data.driver.load_xml_stream`. Since
it wraps a stream it's easily transferable to any Python loop, and
due to lacking business logic means there's no monkey patching.
"""
|
# from collections import defaultdict
import os, fileinput, subprocess, sys
import zlib, gzip
import tools, variables
import create_SQL_tables_snakemake as cst
import obo_parser
import random, multiprocessing
from collections import deque
PLATFORM = sys.platform
# def unzip_file(fn_in, fn_out, number_of_processes=4):
# if PLATFORM == "linux": # Debian: "Linux"
# fn_bash_script = "bash_script_pigz.sh"
# with open(fn_bash_script, "w") as fh:
# fh.write("#!/usr/bin/env bash\n")
# shellcmd_1 = "pigz -c -d -p {} {} > {}".format(number_of_processes, fn_in, fn_out)
# fh.write(shellcmd_1 + "\n")
# subprocess.call("chmod 744 ./{}".format(fn_bash_script), shell=True)
# subprocess.call("./{}".format(fn_bash_script), shell=True)
# os.remove(fn_bash_script)
# else:
# tools.gunzip_file(fn_in, fn_out=fn_out)
#
# def split_file_into_chunks_using_delimiter(fn_in, dir_out, num_chunks, recstart, recend):
# if not os.path.exists(dir_out):
# os.makedirs(dir_out)
# size = os.path.getsize(fn_in)
# positions = sorted([random.randint(0, size) for _ in range(num_chunks)])
# # for
from multiprocessing import JoinableQueue
from multiprocessing.context import Process
class Renderer:
queue = None
def __init__(self, nb_workers=2):
self.queue = JoinableQueue()
self.processes = [Process(target=self.upload) for i in range(nb_workers)]
for p in self.processes:
p.start()
def render(self, item):
self.queue.put(item)
def upload(self):
while True:
item = self.queue.get()
if item is None:
break
# process your item here
self.queue.task_done()
def terminate(self):
""" wait until queue is empty and terminate processes """
self.queue.join()
for p in self.processes:
p.terminate()
# r = Renderer()
# r.render(item1)
# r.render(item2)
# r.terminate()
def Protein_2_Function_table_UniProtDump_UPS(fn_in_Functions_table_UPK, fn_in_obo_GO, fn_in_obo_UPK, fn_in_list_uniprot_dumps, fn_in_interpro_parent_2_child_tree, fn_in_hierarchy_reactome, fn_out_Protein_2_Function_table_UniProt_dump, fn_out_UniProtID_2_ENSPs_2_KEGGs_mapping, fn_out_UniProt_AC_2_ID_2_Taxid, verbose=True):
# fn_in_list_uniprot_dumps_temp = []
# for fn_in in fn_in_list_uniprot_dumps:
# fn_out = fn_in.replace("gz", "") + ".temp"
# fn_in_list_uniprot_dumps_temp.append(fn_out)
# unzip_file(fn_in, fn_out, number_of_processes=4)
fn_in_Functions_table_UPK = os.path.join(variables.TABLES_DIR, "Functions_table_UPK.txt")
fn_in_obo_GO = os.path.join(variables.DOWNLOADS_DIR, "go-basic.obo")
fn_in_obo_UPK = os.path.join(variables.DOWNLOADS_DIR, "keywords-all.obo")
fn_in_interpro_parent_2_child_tree = os.path.join(variables.DOWNLOADS_DIR, "interpro_parent_2_child_tree.txt")
fn_in_hierarchy_reactome = os.path.join(variables.DOWNLOADS_DIR, "RCTM_hierarchy.tsv")
etype_UniProtKeywords = variables.id_2_entityTypeNumber_dict["UniProtKeywords"]
etype_GOMF = variables.id_2_entityTypeNumber_dict['GO:0003674']
etype_GOCC = variables.id_2_entityTypeNumber_dict['GO:0005575']
etype_GOBP = variables.id_2_entityTypeNumber_dict['GO:0008150']
etype_interpro = variables.id_2_entityTypeNumber_dict['INTERPRO']
etype_pfam = variables.id_2_entityTypeNumber_dict['PFAM']
etype_reactome = variables.id_2_entityTypeNumber_dict['Reactome']
GO_dag = obo_parser.GODag(obo_file=fn_in_obo_GO, upk=False)
UPK_dag = obo_parser.GODag(obo_file=fn_in_obo_UPK, upk=True)
UPK_Name_2_AN_dict = cst.get_keyword_2_upkan_dict(fn_in_Functions_table_UPK)
# UPKs_not_in_obo_list, GOterms_not_in_obo_temp = [], []
child_2_parent_dict_interpro, _ = cst.get_child_2_direct_parents_and_term_2_level_dict_interpro(fn_in_interpro_parent_2_child_tree)
lineage_dict_interpro = cst.get_lineage_from_child_2_direct_parent_dict(child_2_parent_dict_interpro)
child_2_parent_dict_reactome = cst.get_child_2_direct_parent_dict_RCTM(fn_in_hierarchy_reactome)
counter = 0
num_entries = 1000
num_workers = 10
# pool = multiprocessing.Pool(num_workers)
queue = JoinableQueue()
entries_2_work = deque()
# entries_2_work.append()
for uniprot_dump in fn_in_list_uniprot_dumps:
for entries in yield_entry_UniProt_dat_dump_parallel(uniprot_dump, num_entries):
entries_2_work.append(entries)
stuff = entries, UPK_Name_2_AN_dict, UPK_dag, lineage_dict_interpro, child_2_parent_dict_reactome, GO_dag, etype_UniProtKeywords, etype_GOMF, etype_GOCC, etype_GOBP, etype_interpro, etype_pfam, etype_reactome
# pool.map(bubu, stuff)
queue.join(bubu, stuff)
def yield_entry_UniProt_dat_dump_parallel(fn_in, num_entries=100):
entries = []
counter = 0
for entry in cst.yield_entry_UniProt_dat_dump(fn_in):
entries.append(entry)
counter += 1
if counter % num_entries == 0:
yield entries
entries = []
yield entries
def bubu(entries, UPK_Name_2_AN_dict, UPK_dag, lineage_dict_interpro, child_2_parent_dict_reactome, GO_dag, etype_UniProtKeywords, etype_GOMF, etype_GOCC, etype_GOBP, etype_interpro, etype_pfam, etype_reactome):
for UniProtID, UniProtAC_list, NCBI_Taxid, functions_2_return in parse_uniprot_dat_dump_yield_entry_v2_parallel(entries):
Keywords_list, GOterm_list, InterPro, Pfam, KEGG, Reactome, STRING, *Proteomes = functions_2_return
# ['Complete proteome', 'Reference proteome', 'Transcription', 'Activator', 'Transcription regulation', ['GO:0046782'], ['IPR007031'], ['PF04947'], ['vg:2947773'], [], [], ['UP000008770']]
# for UniProtAN in UniProtAC_and_ID_list:
if len(Keywords_list) > 0:
UPK_ANs, UPKs_not_in_obo_temp = cst.map_keyword_name_2_AN(UPK_Name_2_AN_dict, Keywords_list)
# UPKs_not_in_obo_list += UPKs_not_in_obo_temp
UPK_ANs, UPKs_not_in_obo_temp = cst.get_all_parent_terms(UPK_ANs, UPK_dag)
# UPKs_not_in_obo_list += UPKs_not_in_obo_temp
if len(UPK_ANs) > 0:
# fh_out.write(UniProtID + "\t" + cst.format_list_of_string_2_postgres_array(sorted(UPK_ANs)) + "\t" + etype_UniProtKeywords + "\t" + NCBI_Taxid + "\n")
print(UniProtID + "\t" + cst.format_list_of_string_2_postgres_array(sorted(UPK_ANs)) + "\t" + etype_UniProtKeywords + "\t" + NCBI_Taxid + "\n")
if len(GOterm_list) > 0: # do backtracking, split GO into 3 categories and add etype
GOterm_list, not_in_obo_GO = cst.get_all_parent_terms(GOterm_list, GO_dag)
# GOterms_not_in_obo_temp += not_in_obo_GO
MFs, CPs, BPs, not_in_obo_GO = cst.divide_into_categories(GOterm_list, GO_dag, [], [], [], [])
# GOterms_not_in_obo_temp += not_in_obo_GO
if MFs:
# fh_out.write(UniProtID + "\t" + cst.format_list_of_string_2_postgres_array(sorted(MFs)) + "\t" + etype_GOMF + "\t" + NCBI_Taxid + "\n") # 'Molecular Function', -23
print(UniProtID + "\t" + cst.format_list_of_string_2_postgres_array(sorted(MFs)) + "\t" + etype_GOMF + "\t" + NCBI_Taxid + "\n") # 'Molecular Function', -23
if CPs:
# fh_out.write(UniProtID + "\t" + cst.format_list_of_string_2_postgres_array(sorted(CPs)) + "\t" + etype_GOCC + "\t" + NCBI_Taxid + "\n") # 'Cellular Component', -22
print(UniProtID + "\t" + cst.format_list_of_string_2_postgres_array(sorted(CPs)) + "\t" + etype_GOCC + "\t" + NCBI_Taxid + "\n") # 'Cellular Component', -22
if BPs:
# fh_out.write(UniProtID + "\t" + cst.format_list_of_string_2_postgres_array(sorted(BPs)) + "\t" + etype_GOBP + "\t" + NCBI_Taxid + "\n") # 'Biological Process', -21
print(UniProtID + "\t" + cst.format_list_of_string_2_postgres_array(sorted(BPs)) + "\t" + etype_GOBP + "\t" + NCBI_Taxid + "\n") # 'Biological Process', -21
if len(InterPro) > 0:
InterPro_set = set(InterPro)
for id_ in InterPro:
InterPro_set.update(lineage_dict_interpro[id_])
# fh_out.write(UniProtID + "\t" + cst.format_list_of_string_2_postgres_array(sorted(InterPro_set)) + "\t" + etype_interpro + "\t" + NCBI_Taxid + "\n")
print(UniProtID + "\t" + cst.format_list_of_string_2_postgres_array(sorted(InterPro_set)) + "\t" + etype_interpro + "\t" + NCBI_Taxid + "\n")
if len(Pfam) > 0:
# fh_out.write(UniProtID + "\t" + cst.format_list_of_string_2_postgres_array(sorted(Pfam)) + "\t" + etype_pfam + "\t" + NCBI_Taxid + "\n")
print(UniProtID + "\t" + cst.format_list_of_string_2_postgres_array(sorted(Pfam)) + "\t" + etype_pfam + "\t" + NCBI_Taxid + "\n")
if len(Reactome) > 0:
reactome_list = Reactome.copy()
for term in reactome_list:
reactome_list += list(cst.get_parents_iterative(term, child_2_parent_dict_reactome))
# fh_out.write(UniProtID + "\t" + cst.format_list_of_string_2_postgres_array(sorted(set(reactome_list))) + "\t" + etype_reactome + "\t" + NCBI_Taxid + "\n")
print(UniProtID + "\t" + cst.format_list_of_string_2_postgres_array(sorted(set(reactome_list))) + "\t" + etype_reactome + "\t" + NCBI_Taxid + "\n")
# translation needed from KEGG identifier to pathway, ID vs AC can be easily distinguished via "_"
if len(KEGG) > 0:
# fh_out_UniProtID_2_ENSPs_2_KEGGs_mapping.write(UniProtID + "\t" + ";".join(STRING) + "\t" + ";".join(sorted(set(KEGG))) + "\t" + NCBI_Taxid + "\n")
print("222_UniProtID_2_ENSPs_2_KEGGs_2_Taxid " + UniProtID + "\t" + ";".join(STRING) + "\t" + ";".join(sorted(set(KEGG))) + "\t" + NCBI_Taxid + "\n")
for AC in UniProtAC_list:
# fh_out_UniProt_AC_2_ID_2_Taxid.write("{}\t{}\t{}\n".format(AC, UniProtID, NCBI_Taxid))
print("111_UniProt_AC_2_ID_2_Taxid {}\t{}\t{}\n".format(AC, UniProtID, NCBI_Taxid))
def parse_uniprot_dat_dump_yield_entry_v2_parallel(entries):
"""
UniProtKeywords
GO
InterPro
Pfam
KEGG
Reactome
@KEGG : I have a mapping from UniProt accession (e.g. "P31946") to KEGG entry (e.g. "hsa:7529")
what I'm missing is from KEGG entry to KEGG pathway (e.g.
hsa:7529 path:hsa04110
hsa:7529 path:hsa04114
hsa:7529 path:hsa04722)
"""
# for entry in yield_entry_UniProt_dat_dump(fn_in):
for entry in entries:
UniProtAC_list, Keywords_string, functions_2_return = [], "", []
Functions_other_list = []
UniProtID, NCBI_Taxid = "-1", "-1"
for line in entry:
try:
line_code, rest = line.split(maxsplit=1)
except ValueError:
continue
if line_code == "ID":
UniProtID = rest.split()[0]
elif line_code == "AC":
UniProtAC_list += [UniProtAN.strip() for UniProtAN in rest.split(";") if len(UniProtAN) > 0]
elif line_code == "KW":
Keywords_string += rest
elif line_code == "DR":
Functions_other_list.append(rest)
elif line_code == "OX":
# OX NCBI_TaxID=654924;
# OX NCBI_TaxID=418404 {ECO:0000313|EMBL:QAB05112.1};
if rest.startswith("NCBI_TaxID="):
NCBI_Taxid = rest.replace("NCBI_TaxID=", "").split(";")[0].split()[0]
# UniProtAC_list = sorted(set(UniProtAC_list))Taxid_2_funcEnum_2_scores_table_FIN
Keywords_list = [cst.cleanup_Keyword(keyword) for keyword in sorted(set(Keywords_string.split(";"))) if len(keyword) > 0] # remove empty strings from keywords_list
other_functions = cst.helper_parse_UniProt_dump_other_functions(Functions_other_list)
# GO, InterPro, Pfam, KEGG, Reactome, STRING, Proteomes
functions_2_return.append(Keywords_list)
functions_2_return += other_functions
# Keywords_list, GO, InterPro, Pfam, KEGG, Reactome, STRING, Proteomes
# GO, InterPro, Pfam, KEGG, Reactome, STRING, Proteomes
yield UniProtID, UniProtAC_list, NCBI_Taxid, functions_2_return
if __name__ == "__main__":
Protein_2_Function_table_UniProtDump_UPS()
|
from aiohttp import web, WSMsgType
from aiohttp_security import authorized_userid
from players import AlreadyRegistered
from protocol import handle_command, handle_error, send_command
from commands import ErrorCommand
import json
import logging
from global_defs import global_playground, registry
async def websocket_handler(request):
ws = web.WebSocketResponse()
try:
await ws.prepare(request)
except web.HTTPException as e:
logging.info('Failed to open WebSocket')
else:
user_id = await authorized_userid(request)
logging.info("websocket connection opened with user_id {}".format(user_id))
# if already connected, not permit connection
try:
global_playground.register(user_id)
except AlreadyRegistered:
logging.info("Deliberately closed connection with already connected user_id {}!".format(user_id))
await send_command(ErrorCommand(user_id, msg=f'User id {user_id} already in use'))
await ws.close()
else:
registry.add_socket(user_id, ws)
async for msg in ws:
if msg.type == WSMsgType.TEXT:
await handle_command(json.loads(msg.data), user_id)
elif msg.type == WSMsgType.ERROR:
logging.info('connection closed with exception {} with user_id {}'.format(ws.exception(), user_id))
await handle_error(user_id)
elif msg.type == WSMsgType.BINARY:
logging.info('Received BINARY type message')
elif msg.type == WSMsgType.CLOSE:
logging.info('Received CLOSE type message')
elif msg.type == WSMsgType.CLOSED:
logging.info('Received CLOSED type message')
elif msg.type == WSMsgType.CLOSING:
logging.info('Received CLOSING type message')
elif msg.type == WSMsgType.CONTINUATION:
logging.info('Received CONTINUATION type message')
elif msg.type == WSMsgType.PING:
logging.info('Received PING type message')
elif msg.type == WSMsgType.PONG:
logging.info('Received PONG type message')
logging.info('websocket connection closed with user_id {}'.format(user_id))
await handle_error(user_id)
return ws
|
#coding:utf-8
#
# id: bugs.core_1690
# title: arithmetic exception, numeric overflow, or string truncation in utf8 tables
# decription:
# tracker_id: CORE-1690
# min_versions: []
# versions: 2.1.3
# qmid: None
import pytest
from firebird.qa import db_factory, isql_act, Action
# version: 2.1.3
# resources: None
substitutions_1 = []
init_script_1 = """create table A (C1 INTEGER PRIMARY KEY);
"""
db_1 = db_factory(page_size=4096, charset='UTF8', sql_dialect=3, init=init_script_1)
test_script_1 = """show table A;
"""
act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1)
expected_stdout_1 = """C1 INTEGER Not Null
CONSTRAINT INTEG_2:
Primary key (C1)
"""
@pytest.mark.version('>=2.1.3')
def test_1(act_1: Action):
act_1.expected_stdout = expected_stdout_1
act_1.execute()
assert act_1.clean_stdout == act_1.clean_expected_stdout
|
from django.db import models
from django.urls import reverse
from django_quill.fields import QuillField
from user.models import User
class Article(models.Model):
title = models.CharField(max_length=128, blank=False, verbose_name="заглавие")
cover = models.ImageField(upload_to="cover_images/%Y/%m/%d", blank=True, verbose_name="обложка")
created_at = models.DateTimeField(auto_now_add=True, verbose_name="создано")
author = models.ForeignKey(User, on_delete=models.PROTECT, verbose_name="автор")
content = QuillField()
def get_absolute_url(self):
return reverse("article-detail", kwargs={"pk": self.pk})
def __str__(self):
return self.title
class Meta:
verbose_name = "Статья"
verbose_name_plural = "Статьи"
class Comment(models.Model):
comment = models.TextField(max_length=1024, blank=False)
author = models.ForeignKey(User, on_delete=models.CASCADE, verbose_name="автор комментария")
article = models.ForeignKey(Article, on_delete=models.CASCADE, related_name="comments")
created_at = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.comment
class Meta:
verbose_name = "Комментарий"
verbose_name_plural = "Комментарии"
|
from distutils.core import setup
from Cython.Build import cythonize
from distutils.extension import Extension
import os
pid_dir = os.path.join(os.path.dirname(__file__), "../firmware/lib/pid")
setup(
ext_modules = cythonize([
Extension("cpid", ["cpid.pyx", os.path.join(pid_dir, "src/pid.c")],
include_dirs=[os.path.join(pid_dir, "inc")])
])
)
|
# coding=utf-8
"""
Tests for deepreg/dataset/loader/interface.py
"""
from test.unit.util import is_equal_np
import numpy as np
import pytest
from deepreg.dataset.loader.interface import (
AbstractPairedDataLoader,
AbstractUnpairedDataLoader,
DataLoader,
FileLoader,
GeneratorDataLoader,
)
from deepreg.dataset.loader.nifti_loader import NiftiFileLoader
from deepreg.dataset.loader.paired_loader import PairedDataLoader
from deepreg.dataset.loader.util import normalize_array
class TestDataLoader:
@pytest.mark.parametrize(
"labeled,num_indices,sample_label,seed",
[
(True, 1, "all", 0),
(False, 1, "all", 0),
(None, 1, "all", 0),
(True, 1, "sample", 0),
(True, 1, "all", 0),
(True, 1, None, 0),
(True, 1, "sample", None),
],
)
def test_init(self, labeled, num_indices, sample_label, seed):
"""
Test init function of DataLoader class
:param labeled: bool
:param num_indices: int
:param sample_label: str
:param seed: float/int/None
:return:
"""
DataLoader(
labeled=labeled,
num_indices=num_indices,
sample_label=sample_label,
seed=seed,
)
data_loader = DataLoader(
labeled=labeled,
num_indices=num_indices,
sample_label=sample_label,
seed=seed,
)
with pytest.raises(NotImplementedError):
data_loader.moving_image_shape
with pytest.raises(NotImplementedError):
data_loader.fixed_image_shape
with pytest.raises(NotImplementedError):
data_loader.num_samples
with pytest.raises(NotImplementedError):
data_loader.get_dataset()
data_loader.close()
@pytest.mark.parametrize(
"labeled,moving_shape,fixed_shape,batch_size,data_augmentation",
[
(True, (9, 9, 9), (9, 9, 9), 1, {}),
(
True,
(9, 9, 9),
(15, 15, 15),
1,
{"data_augmentation": {"name": "affine"}},
),
(
True,
(9, 9, 9),
(15, 15, 15),
1,
{
"data_augmentation": [
{"name": "affine"},
{
"name": "ddf",
"field_strength": 1,
"low_res_size": (3, 3, 3),
},
],
},
),
],
)
def test_get_dataset_and_preprocess(
self, labeled, moving_shape, fixed_shape, batch_size, data_augmentation
):
"""
Test get_transforms() function. For that, an Abstract Data Loader is created
only to set the moving and fixed shapes that are used in get_transforms().
Here we test that the get_transform() returns a function and the shape of
the output of this function. See test_preprocess.py for more testing regarding
the concrete params.
:param labeled: bool
:param moving_shape: tuple
:param fixed_shape: tuple
:param batch_size: int
:param data_augmentation: dict
:return:
"""
data_dir_path = [
"data/test/nifti/paired/train",
"data/test/nifti/paired/test",
]
common_args = dict(
file_loader=NiftiFileLoader, labeled=True, sample_label="all", seed=None
)
data_loader = PairedDataLoader(
data_dir_paths=data_dir_path,
fixed_image_shape=fixed_shape,
moving_image_shape=moving_shape,
**common_args,
)
dataset = data_loader.get_dataset_and_preprocess(
training=True,
batch_size=batch_size,
repeat=True,
shuffle_buffer_num_batch=1,
**data_augmentation,
)
for outputs in dataset.take(1):
assert (
outputs["moving_image"].shape
== (batch_size,) + data_loader.moving_image_shape
)
assert (
outputs["fixed_image"].shape
== (batch_size,) + data_loader.fixed_image_shape
)
assert (
outputs["moving_label"].shape
== (batch_size,) + data_loader.moving_image_shape
)
assert (
outputs["fixed_label"].shape
== (batch_size,) + data_loader.fixed_image_shape
)
def test_abstract_paired_data_loader():
"""
Test the functions in AbstractPairedDataLoader
"""
moving_image_shape = (8, 8, 4)
fixed_image_shape = (6, 6, 4)
# test init invalid shape
with pytest.raises(ValueError) as err_info:
AbstractPairedDataLoader(
moving_image_shape=(2, 2),
fixed_image_shape=(3, 3),
labeled=True,
sample_label="sample",
)
assert "moving_image_shape and fixed_image_shape have length of three" in str(
err_info.value
)
# test init valid shapes
data_loader = AbstractPairedDataLoader(
moving_image_shape=moving_image_shape,
fixed_image_shape=fixed_image_shape,
labeled=True,
sample_label="sample",
)
# test properties
assert data_loader.num_indices == 2
assert data_loader.moving_image_shape == moving_image_shape
assert data_loader.fixed_image_shape == fixed_image_shape
assert data_loader.num_samples is None
def test_abstract_unpaired_data_loader():
"""
Test the functions in AbstractUnpairedDataLoader
"""
image_shape = (8, 8, 4)
# test init invalid shape
with pytest.raises(ValueError) as err_info:
AbstractUnpairedDataLoader(
image_shape=(2, 2), labeled=True, sample_label="sample"
)
assert "image_shape has to be length of three" in str(err_info.value)
# test init valid shapes
data_loader = AbstractUnpairedDataLoader(
image_shape=image_shape, labeled=True, sample_label="sample"
)
# test properties
assert data_loader.num_indices == 3
assert data_loader.moving_image_shape == image_shape
assert data_loader.fixed_image_shape == image_shape
assert data_loader.num_samples is None
def test_generator_data_loader(caplog):
"""
Test the functions in GeneratorDataLoader
:param caplog: used to check warning message.
"""
generator = GeneratorDataLoader(labeled=True, num_indices=1, sample_label="all")
# test properties
assert generator.loader_moving_image is None
assert generator.loader_moving_image is None
assert generator.loader_moving_image is None
assert generator.loader_moving_image is None
# not implemented properties / functions
with pytest.raises(NotImplementedError):
generator.sample_index_generator()
# implemented functions
# test get_Dataset
dummy_array = np.random.random(size=(100, 100, 100)).astype(np.float32)
# for unlabeled data
# mock generator
sequence = [
dict(
moving_image=dummy_array,
fixed_image=dummy_array,
moving_label=dummy_array,
fixed_label=dummy_array,
indices=[1],
)
for i in range(3)
]
def mock_generator():
for el in sequence:
yield el
# inputs, no error means passed
generator.data_generator = mock_generator
dataset = generator.get_dataset()
# check dataset output
expected = dict(
moving_image=dummy_array,
fixed_image=dummy_array,
moving_label=dummy_array,
fixed_label=dummy_array,
indices=[1],
)
for got in list(dataset.as_numpy_iterator()):
assert all(is_equal_np(got[key], expected[key]) for key in expected.keys())
# for unlabeled data
generator_unlabeled = GeneratorDataLoader(
labeled=False, num_indices=1, sample_label="all"
)
sequence = [
dict(moving_image=dummy_array, fixed_image=dummy_array, indices=[1])
for i in range(3)
]
# inputs, no error means passed
generator_unlabeled.data_generator = mock_generator
dataset = generator_unlabeled.get_dataset()
# check dataset output
expected = dict(moving_image=dummy_array, fixed_image=dummy_array, indices=[1])
for got in list(dataset.as_numpy_iterator()):
assert all(is_equal_np(got[key], expected[key]) for key in expected.keys())
# test data_generator
# create mock data loader and sample index generator
class MockDataLoader:
def __init__(self, **kwargs):
super().__init__(**kwargs)
def get_data(index):
return dummy_array
def mock_sample_index_generator():
return [[[1], [1], [1]]]
generator = GeneratorDataLoader(labeled=True, num_indices=1, sample_label="all")
generator.sample_index_generator = mock_sample_index_generator
generator.loader_moving_image = MockDataLoader
generator.loader_fixed_image = MockDataLoader
generator.loader_moving_label = MockDataLoader
generator.loader_fixed_label = MockDataLoader
# check data generator output
got = next(generator.data_generator())
expected = dict(
moving_image=normalize_array(dummy_array),
fixed_image=normalize_array(dummy_array),
moving_label=dummy_array,
fixed_label=dummy_array,
indices=np.asarray([1] + [0], dtype=np.float32),
)
assert all(is_equal_np(got[key], expected[key]) for key in expected.keys())
# test validate_images_and_labels
with pytest.raises(ValueError) as err_info:
generator.validate_images_and_labels(
fixed_image=None,
moving_image=dummy_array,
moving_label=None,
fixed_label=None,
image_indices=[1],
)
assert "moving image and fixed image must not be None" in str(err_info.value)
with pytest.raises(ValueError) as err_info:
generator.validate_images_and_labels(
fixed_image=dummy_array,
moving_image=dummy_array,
moving_label=dummy_array,
fixed_label=None,
image_indices=[1],
)
assert "moving label and fixed label must be both None or non-None" in str(
err_info.value
)
with pytest.raises(ValueError) as err_info:
generator.validate_images_and_labels(
fixed_image=dummy_array,
moving_image=dummy_array + 1.0,
moving_label=None,
fixed_label=None,
image_indices=[1],
)
assert "Sample [1]'s moving_image's values are not between [0, 1]" in str(
err_info.value
)
with pytest.raises(ValueError) as err_info:
generator.validate_images_and_labels(
fixed_image=dummy_array,
moving_image=np.random.random(size=(100, 100)),
moving_label=None,
fixed_label=None,
image_indices=[1],
)
assert "Sample [1]'s moving_image' shape should be 3D. " in str(err_info.value)
with pytest.raises(ValueError) as err_info:
generator.validate_images_and_labels(
fixed_image=dummy_array,
moving_image=dummy_array,
moving_label=np.random.random(size=(100, 100)),
fixed_label=dummy_array,
image_indices=[1],
)
assert "Sample [1]'s moving_label' shape should be 3D or 4D. " in str(
err_info.value
)
with pytest.raises(ValueError) as err_info:
generator.validate_images_and_labels(
fixed_image=dummy_array,
moving_image=dummy_array,
moving_label=np.random.random(size=(100, 100, 100, 3)),
fixed_label=np.random.random(size=(100, 100, 100, 4)),
image_indices=[1],
)
assert (
"Sample [1]'s moving image and fixed image have different numbers of labels."
in str(err_info.value)
)
# warning
caplog.clear() # clear previous log
generator.validate_images_and_labels(
fixed_image=dummy_array,
moving_image=dummy_array,
moving_label=np.random.random(size=(100, 100, 90)),
fixed_label=dummy_array,
image_indices=[1],
)
assert "Sample [1]'s moving image and label have different shapes. " in caplog.text
caplog.clear() # clear previous log
generator.validate_images_and_labels(
fixed_image=dummy_array,
moving_image=dummy_array,
moving_label=dummy_array,
fixed_label=np.random.random(size=(100, 100, 90)),
image_indices=[1],
)
assert "Sample [1]'s fixed image and label have different shapes. " in caplog.text
# test sample_image_label method
# for unlabeled input data
got = next(
generator.sample_image_label(
fixed_image=dummy_array,
moving_image=dummy_array,
moving_label=None,
fixed_label=None,
image_indices=[1],
)
)
expected = dict(
moving_image=dummy_array,
fixed_image=dummy_array,
indices=np.asarray([1] + [-1], dtype=np.float32),
)
assert all(is_equal_np(got[key], expected[key]) for key in expected.keys())
# for data with one label
got = next(
generator.sample_image_label(
fixed_image=dummy_array,
moving_image=dummy_array,
moving_label=dummy_array,
fixed_label=dummy_array,
image_indices=[1],
)
)
expected = dict(
moving_image=dummy_array,
fixed_image=dummy_array,
moving_label=dummy_array,
fixed_label=dummy_array,
indices=np.asarray([1] + [0], dtype=np.float32),
)
assert all(is_equal_np(got[key], expected[key]) for key in expected.keys())
# for data with multiple labels
dummy_labels = np.random.random(size=(100, 100, 100, 3))
got = generator.sample_image_label(
fixed_image=dummy_array,
moving_image=dummy_array,
moving_label=dummy_labels,
fixed_label=dummy_labels,
image_indices=[1],
)
for label_index in range(dummy_labels.shape[3]):
got_iter = next(got)
expected = dict(
moving_image=dummy_array,
fixed_image=dummy_array,
moving_label=dummy_labels[..., label_index],
fixed_label=dummy_labels[..., label_index],
indices=np.asarray([1] + [label_index], dtype=np.float32),
)
assert all(is_equal_np(got_iter[key], expected[key]) for key in expected.keys())
def test_file_loader():
"""
Test the functions in FileLoader
"""
# init, no error means passed
loader_grouped = FileLoader(
dir_paths=["/path/grouped_loader/"], name="grouped_loader", grouped=True
)
loader_ungrouped = FileLoader(
dir_paths=["/path/ungrouped_loader/"], name="ungrouped_loader", grouped=False
)
# init fails with repeated paths
with pytest.raises(ValueError) as err_info:
FileLoader(
dir_paths=["/path/ungrouped_loader/", "/path/ungrouped_loader/"],
name="ungrouped_loader",
grouped=False,
)
assert "dir_paths have repeated elements" in str(err_info.value)
# not implemented properties / functions
with pytest.raises(NotImplementedError):
loader_grouped.set_data_structure()
with pytest.raises(NotImplementedError):
loader_grouped.set_group_structure()
with pytest.raises(NotImplementedError):
loader_grouped.get_data(1)
with pytest.raises(NotImplementedError):
loader_grouped.get_data_ids()
with pytest.raises(NotImplementedError):
loader_grouped.get_num_images()
with pytest.raises(NotImplementedError):
loader_grouped.close()
# test grouped file loader functions
assert loader_grouped.group_struct is None
# create mock group structure with nested list
loader_grouped.group_struct = [[1, 2], [3, 4], [5, 6]]
assert loader_grouped.get_num_groups() == 3
assert loader_grouped.get_num_images_per_group() == [2, 2, 2]
with pytest.raises(ValueError) as err_info:
loader_grouped.group_struct = [[], [3, 4], [5, 6]]
loader_grouped.get_num_images_per_group()
assert "Groups of ID [0, 2, 2] are empty." in str(err_info.value)
# test ungrouped file loader
assert loader_ungrouped.group_struct is None
with pytest.raises(AssertionError):
loader_ungrouped.get_num_groups()
with pytest.raises(AssertionError):
loader_ungrouped.get_num_images_per_group()
|
# qubit number=5
# total number=47
import cirq
import qiskit
from qiskit import IBMQ
from qiskit.providers.ibmq import least_busy
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2,floor, sqrt, pi
import numpy as np
import networkx as nx
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f^\pm
# NOTE: use U1 gate (P gate) with \lambda = 180 ==> CZ gate
# or multi_control_Z_gate (issue #127)
controls = QuantumRegister(n, "ofc")
oracle = QuantumCircuit(controls, name="Zf")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.h(controls[n])
if n >= 2:
oracle.mcu1(pi, controls[1:], controls[0])
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.h(input_qubit[0]) # number=3
prog.rx(-1.3603096190043806,input_qubit[2]) # number=28
prog.h(input_qubit[1]) # number=4
prog.h(input_qubit[2]) # number=5
prog.h(input_qubit[3]) # number=6
prog.h(input_qubit[4]) # number=21
Zf = build_oracle(n, f)
repeat = floor(sqrt(2 ** n) * pi / 4)
for i in range(repeat):
prog.append(Zf.to_gate(), [input_qubit[i] for i in range(n)])
prog.h(input_qubit[0]) # number=1
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=7
prog.h(input_qubit[3]) # number=8
prog.h(input_qubit[3]) # number=34
prog.cz(input_qubit[4],input_qubit[3]) # number=35
prog.h(input_qubit[3]) # number=36
prog.h(input_qubit[0]) # number=38
prog.cz(input_qubit[1],input_qubit[0]) # number=39
prog.h(input_qubit[0]) # number=40
prog.x(input_qubit[0]) # number=32
prog.cx(input_qubit[1],input_qubit[0]) # number=33
prog.cx(input_qubit[0],input_qubit[1]) # number=24
prog.x(input_qubit[1]) # number=25
prog.x(input_qubit[1]) # number=41
prog.cx(input_qubit[0],input_qubit[1]) # number=26
prog.x(input_qubit[2]) # number=11
prog.cx(input_qubit[2],input_qubit[3]) # number=30
prog.x(input_qubit[3]) # number=12
prog.h(input_qubit[2]) # number=42
if n>=2:
prog.mcu1(pi,input_qubit[1:],input_qubit[0])
prog.x(input_qubit[0]) # number=13
prog.x(input_qubit[1]) # number=14
prog.x(input_qubit[2]) # number=15
prog.x(input_qubit[4]) # number=46
prog.x(input_qubit[3]) # number=16
prog.h(input_qubit[0]) # number=17
prog.h(input_qubit[1]) # number=18
prog.cx(input_qubit[0],input_qubit[2]) # number=43
prog.x(input_qubit[2]) # number=44
prog.cx(input_qubit[0],input_qubit[2]) # number=45
prog.rx(-1.9697785938008003,input_qubit[1]) # number=37
prog.h(input_qubit[2]) # number=19
prog.h(input_qubit[3]) # number=20
prog.x(input_qubit[1]) # number=22
prog.x(input_qubit[1]) # number=23
# circuit end
for i in range(n):
prog.measure(input_qubit[i], classical[i])
return prog
if __name__ == '__main__':
key = "00000"
f = lambda rep: str(int(rep == key))
prog = make_circuit(5,f)
IBMQ.load_account()
provider = IBMQ.get_provider(hub='ibm-q')
provider.backends()
backend = least_busy(provider.backends(filters=lambda x: x.configuration().n_qubits >= 2 and not x.configuration().simulator and x.status().operational == True))
sample_shot =7924
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit_QC1249.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.depth(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
|
from __future__ import print_function, absolute_import
import os
import sys
cmd = sys.modules["pymol.cmd"]
from pymol import _cmd
import threading
import traceback
if sys.version_info[0] == 2:
import thread
import urllib2
else:
import _thread as thread
import urllib.request as urllib2
import re
import time
import pymol
import chempy.io
from .cmd import DEFAULT_ERROR, DEFAULT_SUCCESS, loadable, _load2str, Shortcut, \
is_string, is_ok
# cache management:
def _cache_validate(_self=cmd):
r = DEFAULT_SUCCESS
try:
_self.lock_data(_self)
_pymol = _self._pymol
if not hasattr(_pymol,"_cache"):
_pymol._cache = []
if not hasattr(_pymol,"_cache_memory"):
_pymol._cache_memory = 0
finally:
_self.unlock_data(_self)
def _cache_clear(_self=cmd):
r = DEFAULT_SUCCESS
try:
_self.lock_data(_self)
_pymol = _self._pymol
_pymol._cache = []
_pymol._cache_memory = 0
finally:
_self.unlock_data(_self)
return r
def _cache_mark(_self=cmd):
r = DEFAULT_SUCCESS
try:
_self.lock_data(_self)
_pymol = _self._pymol
_cache_validate(_self)
for entry in _self._pymol._cache:
entry[5] = 0.0
finally:
_self.unlock_data(_self)
return r
def _cache_purge(max_size, _self=cmd):
r = DEFAULT_SUCCESS
try:
_self.lock_data(_self)
_pymol = _self._pymol
_cache_validate(_self)
if len(_pymol._cache):
cur_size = sum(x[0] for x in _pymol._cache)
if max_size>=0: # purge to reduce size
now = time.time()
# sort by last access time
new_cache = [[(now-x[5])/x[4],x] for x in _pymol._cache]
new_cache.sort()
new_cache = [x[1] for x in new_cache]
# remove oldest entries one by one until size requirement is met
while (cur_size>max_size) and (len(new_cache)>1):
entry = new_cache.pop()
cur_size = cur_size - entry[0]
_pymol._cache = new_cache
_pymol._cache_memory = cur_size
else: # purge to eliminate unused entries
new_cache = []
for entry in _pymol._cache:
if entry[5] == 0.0:
cur_size = cur_size - entry[0]
else:
new_cache.append(entry)
_pymol._cache = new_cache
_pymol._cache_memory = cur_size
result = _pymol._cache_memory
finally:
_self.unlock_data(_self)
return result
def _cache_get(target, hash_size = None, _self=cmd):
result = None
try:
_self.lock_data(_self)
try:
if hash_size == None:
hash_size = len(target[1])
key = target[1][0:hash_size]
# should optimize this with a dictionary lookup, key -> index in _cache
for entry in _self._pymol._cache:
if entry[1][0:hash_size] == key:
if entry[2] == target[2]:
while len(entry)<6:
entry.append(0)
entry[4] = entry[4] + 1 # access count
entry[5] = time.time() # timestamp
result = entry[3]
break
except:
traceback.print_exc()
finally:
_self.unlock_data(_self)
return result
def _cache_set(new_entry, max_size, _self=cmd):
r = DEFAULT_SUCCESS
try:
_self.lock_data(_self)
_pymol = _self._pymol
_cache_validate(_self)
try:
hash_size = len(new_entry[1])
key = new_entry[1][0:hash_size]
count = 0
found = 0
new_entry[4] = new_entry[4] + 1 # incr access count
new_entry[5] = time.time() # timestamp
for entry in _pymol._cache:
if entry[1][0:hash_size] == key:
if entry[2] == new_entry[2]: # dupe (shouldn't happen)
entry[3] = new_entry[3]
found = 1
break
count = count + 1
if not found:
_pymol._cache.append(new_entry)
_pymol._cache_memory = _pymol._cache_memory + new_entry[0]
if max_size > 0:
if _pymol._cache_memory > max_size:
_cache_purge(max_size, _self)
except:
traceback.print_exc()
finally:
_self.unlock_data(_self)
return r
# ray tracing threads
def _ray_anti_spawn(thread_info,_self=cmd):
# WARNING: internal routine, subject to change
# internal routine to support multithreaded raytracing
thread_list = []
for a in thread_info[1:]:
t = threading.Thread(target=_cmd.ray_anti_thread,
args=(_self._COb,a))
t.setDaemon(1)
thread_list.append(t)
for t in thread_list:
t.start()
_cmd.ray_anti_thread(_self._COb,thread_info[0])
for t in thread_list:
t.join()
def _ray_hash_spawn(thread_info,_self=cmd):
# WARNING: internal routine, subject to change
# internal routine to support multithreaded raytracing
thread_list = []
for a in thread_info[1:]:
if a != None:
t = threading.Thread(target=_cmd.ray_hash_thread,
args=(_self._COb,a))
t.setDaemon(1)
thread_list.append(t)
for t in thread_list:
t.start()
if thread_info[0] != None:
_cmd.ray_hash_thread(_self._COb,thread_info[0])
for t in thread_list:
t.join()
def _ray_spawn(thread_info,_self=cmd):
# WARNING: internal routine, subject to change
# internal routine to support multithreaded raytracing
thread_list = []
for a in thread_info[1:]:
t = threading.Thread(target=_cmd.ray_trace_thread,
args=(_self._COb,a))
t.setDaemon(1)
thread_list.append(t)
for t in thread_list:
t.start()
_cmd.ray_trace_thread(_self._COb,thread_info[0])
for t in thread_list:
t.join()
def _coordset_update_thread(list_lock,thread_info,_self=cmd):
# WARNING: internal routine, subject to change
while 1:
list_lock.acquire()
if not len(thread_info):
list_lock.release()
break
else:
info = thread_info.pop(0)
list_lock.release()
_cmd.coordset_update_thread(_self._COb,info)
def _coordset_update_spawn(thread_info,n_thread,_self=cmd):
# WARNING: internal routine, subject to change
if len(thread_info):
list_lock = threading.Lock() # mutex for list
thread_list = []
for a in range(1,n_thread):
t = threading.Thread(target=_coordset_update_thread,
args=(list_lock,thread_info))
t.setDaemon(1)
thread_list.append(t)
for t in thread_list:
t.start()
_coordset_update_thread(list_lock,thread_info)
for t in thread_list:
t.join()
def _object_update_thread(list_lock,thread_info,_self=cmd):
# WARNING: internal routine, subject to change
while 1:
list_lock.acquire()
if not len(thread_info):
list_lock.release()
break
else:
info = thread_info.pop(0)
list_lock.release()
_cmd.object_update_thread(_self._COb,info)
def _object_update_spawn(thread_info,n_thread,_self=cmd):
# WARNING: internal routine, subject to change
if len(thread_info):
list_lock = threading.Lock() # mutex for list
thread_list = []
for a in range(1,n_thread):
t = threading.Thread(target=_object_update_thread,
args=(list_lock,thread_info))
t.setDaemon(1)
thread_list.append(t)
for t in thread_list:
t.start()
_object_update_thread(list_lock,thread_info)
for t in thread_list:
t.join()
# status reporting
# do command (while API already locked)
def _do(cmmd,log=0,echo=1,_self=cmd):
return _cmd.do(_self._COb,cmmd,log,echo)
# movie rendering
def _mpng(prefix, first=-1, last=-1, preserve=0, modal=0,
format=-1, mode=-1, quiet=1,
width=0, height=0,
_self=cmd): # INTERNAL
format = int(format)
# WARNING: internal routine, subject to change
try:
_self.lock(_self)
fname = prefix
if re.search("[0-9]*\.png$",fname): # remove numbering, etc.
fname = re.sub("[0-9]*\.png$","",fname)
if re.search("[0-9]*\.ppm$",fname):
if format<0:
format = 1 # PPM
fname = re.sub("[0-9]*\.ppm$","",fname)
if format<0:
format = 0 # default = PNG
fname = cmd.exp_path(fname)
r = _cmd.mpng_(_self._COb,str(fname),int(first),
int(last),int(preserve),int(modal),
format,int(mode),int(quiet),
int(width), int(height))
finally:
_self.unlock(-1,_self)
return r
# copy image
def _copy_image(_self=cmd,quiet=1):
r = DEFAULT_ERROR
try:
_self.lock(_self)
r = _cmd.copy_image(_self._COb,int(quiet))
finally:
_self.unlock(r,_self)
return r
# loading
def file_read(finfo, _self=cmd):
'''
Read a file, possibly gzipped or bzipped, and return the
uncompressed file contents as a string.
finfo may be a filename, URL or open file handle.
'''
try:
if not is_string(finfo):
handle = finfo
elif '://' in finfo:
req = urllib2.Request(finfo,
headers={'User-Agent': 'PyMOL/' + _self.get_version()[0]})
handle = urllib2.urlopen(req)
else:
handle = open(finfo, 'rb')
contents = handle.read()
handle.close()
except IOError:
raise pymol.CmdException('failed to open file "%s"' % finfo)
if contents[:2] == b'\x1f\x8b': # gzip magic number
import io, gzip
fakestream = io.BytesIO(contents)
return gzip.GzipFile(fileobj=fakestream).read()
if contents[:2] == b'BZ' and contents[4:10] == b'1AY&SY': # bzip magic
import bz2
return bz2.decompress(contents)
return contents
def download_chem_comp(resn, quiet=1, _self=cmd):
'''
WARNING: internal routine, subject to change
Download the chemical components CIF for the given residue name
and return its local filename, or an empty string on failure.
'''
filename = os.path.join(_self.get('fetch_path'), resn + ".cif")
if os.path.exists(filename):
return filename
url = "ftp://ftp.ebi.ac.uk/pub/databases/msd/pdbechem/files/mmcif/" + resn + ".cif"
url = "http://files.rcsb.org/ligands/download/" + resn + ".cif"
if not quiet:
print(' Downloading ' + url)
try:
contents = _self.file_read(url)
if not contents: raise
except:
print(' Error: Download failed')
return ''
try:
with open(filename, 'wb') as handle:
handle.write(contents)
except IOError as e:
print(e)
print('Your "fetch_path" setting might point to a read-only directory')
return ''
if not quiet:
print(' ->' + filename)
return filename
def _load(oname,finfo,state,ftype,finish,discrete,
quiet=1,multiplex=0,zoom=-1,mimic=1,
plugin='',
object_props=None,
atom_props=None, _self=cmd):
# WARNING: internal routine, subject to change
# caller must already hold API lock
# NOTE: state index assumes 1-based state
r = DEFAULT_ERROR
size = 0
if ftype not in (loadable.model,loadable.brick):
if True:
if ftype in _load2str:
finfo = _self.file_read(finfo)
ftype = _load2str[ftype]
r = _cmd.load(_self._COb,str(oname),finfo,int(state)-1,int(ftype),
int(finish),int(discrete),int(quiet),
int(multiplex),int(zoom), plugin,
object_props, atom_props, int(mimic))
else:
try:
x = chempy.io.pkl.fromFile(finfo)
if isinstance(x, (list, tuple)):
for a in x:
r = _cmd.load_object(_self._COb,str(oname),a,int(state)-1,
int(ftype),0,int(discrete),int(quiet),
int(zoom))
if(state>0):
state = state + 1
_cmd.finish_object(_self._COb,str(oname))
else:
r = _cmd.load_object(_self._COb,str(oname),x,
int(state)-1,int(ftype),
int(finish),int(discrete),
int(quiet),int(zoom))
except:
# traceback.print_exc()
print("Load-Error: Unable to load file '%s'." % finfo)
return r
# function keys and other specials
modifier_keys = [
'',
'SHFT',
'CTRL',
'CTSH',
'ALT',
]
special_key_codes = {
# GLUT special key codes (see glutSpecialFunc)
1 : 'F1',
2 : 'F2',
3 : 'F3',
4 : 'F4',
5 : 'F5',
6 : 'F6',
7 : 'F7',
8 : 'F8',
9 : 'F9',
10 : 'F10',
11 : 'F11',
12 : 'F12',
100 : 'left',
101 : 'up',
102 : 'right',
103 : 'down',
104 : 'pgup',
105 : 'pgdn',
106 : 'home',
107 : 'end',
108 : 'insert',
}
special_key_names = set(special_key_codes.values())
def _invoke_key(key, quiet=0, _self=cmd):
'''Invoke a function that was mapped with cmd.set_key()'''
try:
mapping = _self.key_mappings[key]
except KeyError:
mapping = None
if not mapping:
if not quiet:
print(" No key mapping for '%s'" % (key))
return False
if is_string(mapping):
_self.do(mapping)
else:
fn, args, kwargs = mapping
fn(*args, **kwargs)
return True
def _special(k,x,y,m=0,_self=cmd): # INTERNAL (invoked when special key is pressed)
pymol=_self._pymol
# WARNING: internal routine, subject to change
k=int(k)
m=int(m)
# convert numeric codes to string key
try:
key = special_key_codes[k]
if m:
key = modifier_keys[m] + '-' + key
except KeyError:
return False
# check for explicit mapping
if _invoke_key(key, 1, _self):
return True
# check for scenes and views
for (fn, sc) in [
(_self.scene, pymol._scene_dict_sc),
(_self.view, pymol._view_dict_sc),
]:
if key in sc.keywords:
fn(key)
return True
autocomp = sc.interpret(key + '-')
if is_string(autocomp):
fn(autocomp)
return True
print(" No key mapping and no scene or view for '%s'" % (key))
return False
# control keys
def _ctrl(k,_self=cmd):
# WARNING: internal routine, subject to change
_invoke_key('CTRL-' + k, 0, _self)
# alt keys
def _alt(k,_self=cmd):
# WARNING: internal routine, subject to change
_invoke_key('ALT-' + k.upper(), 0, _self)
# command (apple) keys
def _cmmd(k,_self=cmd):
# WARNING: internal routine, subject to change
# command-key on macs
if k in _self.cmmd:
ak = _self.cmmd[k]
if ak[0]!=None:
ak[0](*ak[1], **ak[2])
return None
def _ctsh(k,_self=cmd):
# WARNING: internal routine, subject to change
_invoke_key('CTSH-' + k, 0, _self)
# writing PNG files (thread-unsafe)
def _png(a,width=0,height=0,dpi=-1.0,ray=0,quiet=1,prior=0,format=-1,_self=cmd):
# INTERNAL - can only be safely called by GLUT thread (unless prior == 1)
# WARNING: internal routine, subject to change
try:
_self.lock(_self)
fname = a
if re.search("\.ppm$",fname):
if format<0:
format = 1 # PPM
elif not re.search("\.png$",fname):
if a[0:1] != chr(1): # not an encoded file descriptor (integer)
fname = fname +".png"
if format<0:
format = 0 # PNG
fname = cmd.exp_path(fname)
r = _cmd.png(_self._COb,str(fname),int(width),int(height),
float(dpi),int(ray),int(quiet),int(prior),int(format))
finally:
_self.unlock(-1,_self)
return r
# quitting (thread-specific)
def _quit(code=0, _self=cmd):
pymol=_self._pymol
# WARNING: internal routine, subject to change
try:
_self.lock(_self)
try: # flush and close log if possible to avoid threading exception
if pymol._log_file!=None:
try:
pymol._log_file.flush()
except:
pass
pymol._log_file.close()
del pymol._log_file
except:
pass
if _self.reaper!=None:
try:
_self.reaper.join()
except:
pass
r = _cmd.quit(_self._COb, int(code))
finally:
_self.unlock(-1,_self)
return r
# screen redraws (thread-specific)
def _refresh(swap_buffers=1,_self=cmd): # Only call with GLUT thread!
# WARNING: internal routine, subject to change
r = None
try:
_self.lock(_self)
if hasattr(_self._pymol,'glutThread'):
if thread.get_ident() == _self._pymol.glutThread:
if swap_buffers:
r = _cmd.refresh_now(_self._COb)
else:
r = _cmd.refresh(_self._COb)
else:
r = _cmd.refresh_later(_self._COb)
else:
r = _cmd.refresh_later(_self._COb)
finally:
_self.unlock(-1,_self)
return r
# stereo (platform dependent )
def _sgi_stereo(flag): # SGI-SPECIFIC - bad bad bad
# WARNING: internal routine, subject to change
if sys.platform[0:4]=='irix':
if os.path.exists("/usr/gfx/setmon"):
if flag:
mode = os.environ.get('PYMOL_SGI_STEREO','1024x768_96s')
os.system("/usr/gfx/setmon -n "+mode)
else:
mode = os.environ.get('PYMOL_SGI_MONO','72hz')
os.system("/usr/gfx/setmon -n "+mode)
# color alias interpretation
def _interpret_color(_self,color):
# WARNING: internal routine, subject to change
_validate_color_sc(_self)
new_color = _self.color_sc.interpret(color)
if new_color:
if is_string(new_color):
return new_color
else:
_self.color_sc.auto_err(color,'color')
else:
return color
def _validate_color_sc(_self=cmd):
# WARNING: internal routine, subject to change
if _self.color_sc == None: # update color shortcuts if needed
lst = _self.get_color_indices()
names = [x[0] for x in lst]
names.extend(['default', 'auto', 'current', 'atomic'])
names.extend(_self.get_names_of_type('object:ramp'))
_self.color_sc = Shortcut(names)
def _invalidate_color_sc(_self=cmd):
# WARNING: internal routine, subject to change
_self.color_sc = None
def _get_color_sc(_self=cmd):
# WARNING: internal routine, subject to change
_validate_color_sc(_self=_self)
return _self.color_sc
def _get_feedback(_self=cmd): # INTERNAL
# WARNING: internal routine, subject to change
l = []
if _self.lock_attempt(_self):
try:
r = _cmd.get_feedback(_self._COb)
while r:
l.append(r)
r = _cmd.get_feedback(_self._COb)
finally:
_self.unlock(-1,_self)
else:
l = None
return l
get_feedback = _get_feedback # for legacy compatibility
def _fake_drag(_self=cmd): # internal
_self.lock(_self)
try:
_cmd.fake_drag(_self._COb)
finally:
_self.unlock(-1,_self)
return 1
def _sdof(tx,ty,tz,rx,ry,rz,_self=cmd):
_cmd._sdof(_self._COb,tx,ty,tz,rx,ry,rz)
# testing tools
# for comparing floating point numbers calculated using
# different FPUs and which may show some wobble...
def _dump_floats(lst,format="%7.3f",cnt=9):
# WARNING: internal routine, subject to change
c = cnt
for a in lst:
print(format%a, end=' ')
c = c -1
if c<=0:
print()
c=cnt
if c!=cnt:
print()
def _dump_ufloats(lst,format="%7.3f",cnt=9):
# WARNING: internal routine, subject to change
c = cnt
for a in lst:
print(format%abs(a), end=' ')
c = c -1
if c<=0:
print()
c=cnt
if c!=cnt:
print()
# HUH?
def _adjust_coord(a,i,x):
a.coord[i]=a.coord[i]+x
return None
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import json
import warnings
import pulumi
import pulumi.runtime
from typing import Union
from .. import utilities, tables
class GetAccountResult:
"""
A collection of values returned by getAccount.
"""
def __init__(__self__, id=None, name=None, primary_access_key=None, resource_group_name=None, secondary_access_key=None, sku_name=None, tags=None, x_ms_client_id=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
__self__.id = id
"""
The provider-assigned unique ID for this managed resource.
"""
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
__self__.name = name
if primary_access_key and not isinstance(primary_access_key, str):
raise TypeError("Expected argument 'primary_access_key' to be a str")
__self__.primary_access_key = primary_access_key
"""
The primary key used to authenticate and authorize access to the Maps REST APIs.
"""
if resource_group_name and not isinstance(resource_group_name, str):
raise TypeError("Expected argument 'resource_group_name' to be a str")
__self__.resource_group_name = resource_group_name
if secondary_access_key and not isinstance(secondary_access_key, str):
raise TypeError("Expected argument 'secondary_access_key' to be a str")
__self__.secondary_access_key = secondary_access_key
"""
The primary key used to authenticate and authorize access to the Maps REST APIs. The second key is given to provide seamless key regeneration.
"""
if sku_name and not isinstance(sku_name, str):
raise TypeError("Expected argument 'sku_name' to be a str")
__self__.sku_name = sku_name
"""
The sku of the Azure Maps Account.
"""
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
__self__.tags = tags
if x_ms_client_id and not isinstance(x_ms_client_id, str):
raise TypeError("Expected argument 'x_ms_client_id' to be a str")
__self__.x_ms_client_id = x_ms_client_id
"""
A unique identifier for the Maps Account.
"""
class AwaitableGetAccountResult(GetAccountResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetAccountResult(
id=self.id,
name=self.name,
primary_access_key=self.primary_access_key,
resource_group_name=self.resource_group_name,
secondary_access_key=self.secondary_access_key,
sku_name=self.sku_name,
tags=self.tags,
x_ms_client_id=self.x_ms_client_id)
def get_account(name=None,resource_group_name=None,tags=None,opts=None):
"""
Use this data source to access information about an existing Azure Maps Account.
:param str name: Specifies the name of the Maps Account.
:param str resource_group_name: Specifies the name of the Resource Group in which the Maps Account is located.
"""
__args__ = dict()
__args__['name'] = name
__args__['resourceGroupName'] = resource_group_name
__args__['tags'] = tags
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure:maps/getAccount:getAccount', __args__, opts=opts).value
return AwaitableGetAccountResult(
id=__ret__.get('id'),
name=__ret__.get('name'),
primary_access_key=__ret__.get('primaryAccessKey'),
resource_group_name=__ret__.get('resourceGroupName'),
secondary_access_key=__ret__.get('secondaryAccessKey'),
sku_name=__ret__.get('skuName'),
tags=__ret__.get('tags'),
x_ms_client_id=__ret__.get('xMsClientId'))
|
import os
import unittest
from conans.model.ref import ConanFileReference
from conans.paths import CONANFILE
from conans.test.utils.cpp_test_files import cpp_hello_conan_files
from conans.test.utils.tools import TestClient, TestServer
from conans.util.files import load
class OnlySourceTest(unittest.TestCase):
def setUp(self):
test_server = TestServer()
self.servers = {"default": test_server}
def _create(self, client, number, version, deps=None, export=True):
files = cpp_hello_conan_files(number, version, deps, build=False, config=False)
client.save(files, clean_first=True)
if export:
client.run("export . lasote/stable")
def conan_test_test(self):
'''Checks --build in test command'''
client = TestClient(servers=self.servers, users={"default": [("lasote", "mypass")]})
self._create(client, "Hello0", "0.0")
self._create(client, "Hello1", "1.1", ["Hello0/0.0@lasote/stable"])
# Now test out Hello2
self._create(client, "Hello2", "2.2", ["Hello1/1.1@lasote/stable"], export=True)
hello2conanfile = load(os.path.join(client.current_folder, CONANFILE))
client.save({CONANFILE: hello2conanfile})
test_conanfile = '''
from conans.model.conan_file import ConanFile
class DefaultNameConan(ConanFile):
settings = "os", "compiler", "arch"
requires = "Hello2/2.2@lasote/stable"
generators = "cmake"
def test(self):
pass
'''
client.save({"test/%s" % CONANFILE: test_conanfile})
# Should recognize the hello package
# Will Fail because Hello0/0.0 and Hello1/1.1 has not built packages
# and by default no packages are built
client.run("create . lasote/stable", assert_error=True)
self.assertIn('Try to build it from sources with "--build Hello0"', client.out)
# We generate the package for Hello0/0.0
client.run("install Hello0/0.0@lasote/stable --build Hello0")
# Still missing Hello1/1.1
client.run("create . lasote/stable", assert_error=True)
self.assertIn('Try to build it from sources with "--build Hello1"', client.out)
# We generate the package for Hello1/1.1
client.run("install Hello1/1.1@lasote/stable --build Hello1")
# Now Hello2 should be built and not fail
client.run("create . lasote/stable")
self.assertNotIn("Can't find a 'Hello2/2.2@lasote/stable' package", client.out)
self.assertIn('Hello2/2.2@lasote/stable: Forced build from source',
client.out)
# Now package is generated but should be built again
client.run("create . lasote/stable")
self.assertIn('Hello2/2.2@lasote/stable: Forced build from source',
client.out)
def build_policies_update_test(self):
client = TestClient(servers=self.servers, users={"default": [("lasote", "mypass")]})
conanfile = """
from conans import ConanFile
class MyPackage(ConanFile):
name = "test"
version = "1.9"
build_policy = 'always'
def source(self):
self.output.info("Getting sources")
def build(self):
self.output.info("Building sources")
def package(self):
self.output.info("Packaging this test package")
"""
files = {CONANFILE: conanfile}
client.save(files, clean_first=True)
client.run("export . lasote/stable")
client.run("install test/1.9@lasote/stable")
self.assertIn("Getting sources", client.out)
self.assertIn("Building sources", client.out)
self.assertIn("Packaging this test package", client.out)
self.assertIn("Building package from source as defined by build_policy='always'",
client.out)
client.run("upload test/1.9@lasote/stable")
def build_policies_in_conanfile_test(self):
client = TestClient(servers=self.servers, users={"default": [("lasote", "mypass")]})
files = cpp_hello_conan_files("Hello0", "1.0", [], config=False, build=False)
# --- Build policy to missing ---
files[CONANFILE] = files[CONANFILE].replace("exports = '*'",
"exports = '*'\n build_policy = 'missing'")
client.save(files, clean_first=True)
client.run("export . lasote/stable")
# Install, it will build automatically if missing (without the --build missing option)
client.run("install Hello0/1.0@lasote/stable")
self.assertIn("Building", client.out)
self.assertNotIn("Generator txt created conanbuildinfo.txt", client.out)
# Try to do it again, now we have the package, so no build is done
client.run("install Hello0/1.0@lasote/stable")
self.assertNotIn("Building", client.out)
self.assertNotIn("Generator txt created conanbuildinfo.txt", client.out)
# Try now to upload all packages, should not crash because of the "missing" build policy
client.run("upload Hello0/1.0@lasote/stable --all")
# --- Build policy to always ---
files[CONANFILE] = files[CONANFILE].replace("build_policy = 'missing'",
"build_policy = 'always'")
client.save(files, clean_first=True)
client.run("export . lasote/stable")
# Install, it will build automatically if missing (without the --build missing option)
client.run("install Hello0/1.0@lasote/stable")
self.assertIn("Detected build_policy 'always', trying to remove source folder",
client.out)
self.assertIn("Building", client.out)
self.assertNotIn("Generator txt created conanbuildinfo.txt", client.out)
# Try to do it again, now we have the package, but we build again
client.run("install Hello0/1.0@lasote/stable")
self.assertIn("Building", client.out)
self.assertIn("Detected build_policy 'always', trying to remove source folder",
client.out)
self.assertNotIn("Generator txt created conanbuildinfo.txt", client.out)
# Try now to upload all packages, should crash because of the "always" build policy
client.run("upload Hello0/1.0@lasote/stable --all", assert_error=True)
self.assertIn("no packages can be uploaded", client.out)
def reuse_test(self):
client = TestClient(servers=self.servers, users={"default": [("lasote", "mypass")]})
ref = ConanFileReference.loads("Hello0/0.1@lasote/stable")
files = cpp_hello_conan_files("Hello0", "0.1")
files[CONANFILE] = files[CONANFILE].replace("build", "build2")
client.save(files)
client.run("export . lasote/stable")
client.run("install %s --build missing" % str(ref))
self.assertTrue(os.path.exists(client.cache.package_layout(ref).builds()))
self.assertTrue(os.path.exists(client.cache.package_layout(ref).packages()))
# Upload
client.run("upload %s --all" % str(ref))
# Now from other "computer" install the uploaded conans with same options (nothing)
other_client = TestClient(servers=self.servers, users={"default": [("lasote", "mypass")]})
other_client.run("install %s --build missing" % str(ref))
self.assertFalse(os.path.exists(other_client.cache.package_layout(ref).builds()))
self.assertTrue(os.path.exists(other_client.cache.package_layout(ref).packages()))
# Now from other "computer" install the uploaded conans with same options (nothing)
other_client = TestClient(servers=self.servers, users={"default": [("lasote", "mypass")]})
other_client.run("install %s --build" % str(ref))
self.assertTrue(os.path.exists(other_client.cache.package_layout(ref).builds()))
self.assertTrue(os.path.exists(other_client.cache.package_layout(ref).packages()))
# Use an invalid pattern and check that its not builded from source
other_client = TestClient(servers=self.servers, users={"default": [("lasote", "mypass")]})
other_client.run("install %s --build HelloInvalid" % str(ref))
self.assertIn("No package matching 'HelloInvalid' pattern", other_client.out)
self.assertFalse(os.path.exists(other_client.cache.package_layout(ref).builds()))
# self.assertFalse(os.path.exists(other_client.cache.package_layout(ref).packages()))
# Use another valid pattern and check that its not builded from source
other_client = TestClient(servers=self.servers, users={"default": [("lasote", "mypass")]})
other_client.run("install %s --build HelloInvalid -b Hello" % str(ref))
self.assertIn("No package matching 'HelloInvalid' pattern", other_client.out)
# self.assertFalse(os.path.exists(other_client.cache.package_layout(ref).builds()))
# self.assertFalse(os.path.exists(other_client.cache.package_layout(ref).packages()))
# Now even if the package is in local store, check that's rebuilded
other_client.run("install %s -b Hello*" % str(ref))
self.assertIn("Copying sources to build folder", other_client.out)
other_client.run("install %s" % str(ref))
self.assertNotIn("Copying sources to build folder", other_client.out)
|
# AUTOGENERATED! DO NOT EDIT! File to edit: 16_servers_started_via_docker.ipynb (unless otherwise specified).
__all__ = ['BaseServer', 'FastAPIUvicornServer', 'DjangoGunicornWSGIServer', 'NginxDockerServer']
# Cell
import time
import subprocess
from pydantic import BaseModel
from .files import BenchmarkFile
from .registry import register_model
@register_model
class BaseServer(BaseModel):
protocol: str = "http"
name: str = "base_server"
host: str = "localhost"
port: int = 8000
view: str = ""
def start(self):
pass
def stop(self):
pass
def is_running(self):
return False
def file_to_url(self, file: BenchmarkFile):
path = f"{self.view}/{file.path}" if self.view else f"{file.path}"
return f"{self.protocol}://{self.host}:{self.port}/{path}"
def params(self):
return {
"class_name": self.__class__.__name__,
"parameters": self.dict(),
}
# Cell
@register_model
class FastAPIUvicornServer(BaseServer):
name: str = "fastAPI/uvicorn"
def get_pid(self):
kwargs = {"shell": True, "capture_output": True, "text": True}
output = subprocess.run(
f"ps aux | grep will_it_saturate.fastapi.main:app", **kwargs
)
lines = [l for l in output.stdout.split("\n") if len(l) > 0 and "grep" not in l]
if len(lines) > 0:
pid = lines[0].split()[1]
return pid
@property
def started(self):
return self.get_pid() is not None
def start_server(self):
subprocess.Popen(
[
"uvicorn",
"--host",
str(self.host),
"--port",
str(self.port),
"--no-access-log",
"will_it_saturate.fastapi.main:app",
]
)
# subprocess.Popen(["uvicorn", "will_it_saturate.fastapi.main:app"])
def stop_server(self):
subprocess.check_output(["kill", self.get_pid()])
time.sleep(1) # dunno why this is necessary
def start(self):
if not self.started:
self.start_server()
def stop(self):
if self.started:
self.stop_server()
# Cell
@register_model
class DjangoGunicornWSGIServer(BaseServer):
name: str = "django/gunicorn/wsgi"
def get_pids(self):
kwargs = {"shell": True, "capture_output": True, "text": True}
output = subprocess.run(f"ps aux | grep will_it_saturate.django.wsgi", **kwargs)
lines = [l for l in output.stdout.split("\n") if len(l) > 0 and "grep" not in l]
print(len(lines))
pids = []
for line in lines:
pid = line.split()[1]
pids.append(pid)
return pids
@property
def started(self):
return len(self.get_pids()) > 0
def start_server(self):
subprocess.Popen(
[
"gunicorn",
"--backlog",
"10000",
"-w",
"8",
"-b" f":{self.port}",
"will_it_saturate.django.wsgi",
]
)
time.sleep(2)
def stop_server(self):
kill_command = ["kill"]
kill_command.extend(self.get_pids())
subprocess.check_output(kill_command)
time.sleep(1) # dunno why this is necessary
def start(self):
if not self.started:
self.start_server()
def stop(self):
if self.started:
self.stop_server()
# Cell
from pathlib import Path
from .registry import register_model
@register_model
class NginxDockerServer(BaseServer):
name: str = "nginx/docker"
docker_name: str = "wis-nginx"
port: int = 8000
data_root: str = "data"
subprocess_kwargs = {"shell": True, "capture_output": True, "text": True}
def write_dockerfile(self):
dockerfile = f"""
FROM nginx
COPY {self.data_root} /usr/share/nginx/html/{self.data_root}
"""
with Path("Dockerfile.nginx").open("w") as f:
f.write(dockerfile)
@property
def docker_id(self):
output = subprocess.run(
f"docker ps | grep {self.docker_name}", **self.subprocess_kwargs
)
if len(output.stdout) > 0:
return output.stdout.split()[0]
@property
def started(self):
return self.docker_id is not None
def stop_container(self, docker_id):
output = subprocess.run(f"docker kill {docker_id}", **self.subprocess_kwargs)
print(output.stdout)
def remove_container(self):
output = subprocess.run(
f"docker rm {self.docker_name}", **self.subprocess_kwargs
)
print(output.stdout)
def build_container(self):
output = subprocess.run(
f"docker build -f Dockerfile.nginx -t {self.docker_name} .",
**self.subprocess_kwargs,
)
print(output.stdout)
def start_container(self):
output = subprocess.run(
f"docker run --name {self.docker_name} -d -p {self.port}:80 {self.docker_name}",
**self.subprocess_kwargs,
)
print(output.stdout)
def start_server(self):
self.write_dockerfile()
self.remove_container()
self.build_container()
self.start_container()
def stop_server(self):
if self.started:
self.stop_container(self.docker_id)
self.remove_container()
time.sleep(1) # dunno why this is necessary
def start(self):
if not self.started:
self.start_server()
def stop(self):
if self.started:
self.stop_server()
|
import os
import torch
import random
import numpy as np
from functools import reduce
from visdom import Visdom
from torchvision.transforms import ToPILImage, ToTensor
from shutil import copyfile
import torch.nn.functional as F
def add_prefix(path, pref):
"""
Add prefix to file in path
Args:
path: path to file
pref: prefixvectors2line
Returns:
path to file with named with prefix
"""
splitted_path = list(os.path.split(path))
splitted_path[-1] = pref + splitted_path[-1]
return reduce(lambda x, y: x + '/' + y, splitted_path)
class AbstractCallback(object):
def per_batch(self, args):
raise RuntimeError("Don\'t implement batch callback method")
def per_epoch(self, args):
raise RuntimeError("Don\'t implement epoch callback method")
def early_stopping(self, args):
raise RuntimeError("Don\'t implement early stopping callback method")
class SaveModelPerEpoch(AbstractCallback):
def __init__(self, path, save_step=1):
self.path = path
self.step=save_step
if not os.path.isdir(path):
os.makedirs(path)
def per_batch(self, args):
pass
def per_epoch(self, args):
if args['n'] % self.step == 0:
args['model'].save(
os.path.join(self.path, 'model-{}.trh'.format(args['n']))
)
def early_stopping(self, args):
args['model'].save(
os.path.join(self.path, 'early_model-{}.trh'.format(args['n']))
)
class SaveOptimizerPerEpoch(AbstractCallback):
def __init__(self, path, save_step=1):
self.path = path
self.step=save_step
if not os.path.isdir(path):
os.makedirs(path)
def per_batch(self, args):
pass
def per_epoch(self, args):
if args['n'] % self.step == 0:
torch.save(args['optimize_state'], (
os.path.join(
self.path,
'optimize_state-{}.trh'.format(args['n'])
)
))
def early_stopping(self, args):
torch.save(args['optimize_state'], (
os.path.join(
self.path,
'early_optimize_state-{}.trh'.format(args['n'])
)
))
class VisPlot(AbstractCallback):
def __init__(self, title, server='http://localhost', port=8080,
logname=None):
self.viz = Visdom(server=server, port=port, log_to_filename=logname)
self.windows = {}
self.title = title
def register_scatterplot(self, name, xlabel, ylabel, legend=None):
options = dict(title=self.title, markersize=5,
xlabel=xlabel, ylabel=ylabel) if legend is None \
else dict(title=self.title, markersize=5,
xlabel=xlabel, ylabel=ylabel,
legend=legend)
self.windows[name] = [None, options]
def update_scatterplot(self, name, x, y1, y2=None, window_size=100):
"""
Update plot
Args:
name: name of updating plot
x: x values for plotting
y1: y values for plotting
y2: plot can contains two graphs
window_size: window size for plot smoothing (by mean in window)
Returns:
"""
if y2 is None:
self.windows[name][0] = self.viz.line(
np.array([y1], dtype=np.float32),
np.array([x], dtype=np.float32),
win=self.windows[name][0],
opts=self.windows[name][1],
update='append' if self.windows[name][0] is not None else None
)
else:
self.windows[name][0] = self.viz.line(
np.array([[y1, y2]], dtype=np.float32),
np.array([x], dtype=np.float32),
win=self.windows[name][0],
opts=self.windows[name][1],
update='append' if self.windows[name][0] is not None else None
)
def per_batch(self, args, keyward='per_batch'):
for win in self.windows.keys():
if keyward in win:
if 'train' in win and 'acc' not in win:
self.update_scatterplot(
win,
args['n'],
args['loss']
)
if 'train' in win and 'acc' in win and 'loss' not in win:
self.update_scatterplot(
win,
args['n'],
args['acc']
)
if 'train' in win and 'acc' in win and 'loss' in win:
self.update_scatterplot(
win,
args['n'],
args['loss'],
args['acc']
)
def per_epoch(self, args, keyward='per_epoch'):
for win in self.windows.keys():
if keyward in win:
if 'train' in win and 'validation' in win and 'acc' not in win:
self.update_scatterplot(
win,
args['n'],
args['loss'],
args['val loss']
)
if 'train' in win and 'validation' in win and 'acc' in win:
self.update_scatterplot(
win,
args['n'],
args['acc'],
args['val acc']
)
if 'validation' in win and 'roc' in win:
self.update_scatterplot(
win,
args['n'],
args['val roc']
)
if 'validation' in win and 'acc' in win and 'train' not in win:
self.update_scatterplot(
win,
args['n'],
args['val acc'],
)
def early_stopping(self, args):
pass
class VisImagesGrid(AbstractCallback):
def __init__(self, title, server='http://localhost', port=8080,
vis_step=1, scale=10, grid_size=8):
self.viz = Visdom(server=server, port=port)
self.title = title + 'Image'
self.windows = {1: None}
self.n = 0
self.step = vis_step
self.scale = scale
self.to_image = ToPILImage()
self.to_tensor = ToTensor()
self.grid_size = grid_size
random.seed()
def per_batch(self, args, label=1):
"""
Per batch visualization
Args:
args: input tensor in [0, 1] values format
label: 1
Returns:
"""
if self.n % self.step == 0:
# i = random.randint(0, args['img'].size(0) - 1)
# i = (args['img'].size(0) - 1) // 2
for win in self.windows.keys():
if win == label:
_grid_size = int(np.sqrt(args['img'].size(0)))
grid_size = _grid_size \
if _grid_size < self.grid_size else self.grid_size
imgs = args['img'].to('cpu')
grid_lines = []
for line in range(grid_size):
grid_lines.append(
torch.cat(
tuple(imgs[line*grid_size:(line+1)*grid_size]),
dim=2
)
)
grid = torch.cat(tuple(grid_lines), dim=1)
self.windows[win] = self.viz.image(
F.interpolate(
grid.unsqueeze(0),
scale_factor=(self.scale, self.scale)
).squeeze(0),
win=self.windows[win],
opts=dict(title=self.title)
)
self.n += 1
if self.n >= 1000000000:
self.n = 0
def per_epoch(self, args):
pass
def early_stopping(self, args):
pass
def add_window(self, label):
self.windows[label] = None
|
from phoney import __version__
def test_version():
assert __version__ == '0.1.0'
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: train-atari.py
# Author: Yuxin Wu
import argparse
import cv2
import gym
import multiprocessing as mp
import numpy as np
import pickle
import os
import six
import sys
import uuid
import tensorflow as tf
from six.moves import queue
from tensorpack import *
from tensorpack.tfutils.gradproc import MapGradient, SummaryGradient
from tensorpack.utils.concurrency import ensure_proc_terminate, start_proc_mask_signal
from tensorpack.utils.gpu import get_num_gpu
from tensorpack.utils.serialize import dumps
from atari_wrapper import FireResetEnv, FrameStack, LimitLength, MapState
from common import Evaluator, eval_model_multithread, play_n_episodes
from simulator import SimulatorMaster, SimulatorProcess, TransitionExperience
if six.PY3:
from concurrent import futures
CancelledError = futures.CancelledError
else:
CancelledError = Exception
IMAGE_SIZE = (84, 84)
FRAME_HISTORY = 4
GAMMA = 0.99
STATE_SHAPE = IMAGE_SIZE + (3, )
LOCAL_TIME_MAX = 5
STEPS_PER_EPOCH = 6000
EVAL_EPISODE = 50
BATCH_SIZE = 128
PREDICT_BATCH_SIZE = 16 # batch for efficient forward
SIMULATOR_PROC = mp.cpu_count() * 2
PREDICTOR_THREAD_PER_GPU = 4
NUM_ACTIONS = None
ENV_NAME = None
LOG_DIRNAME = None
MODEL_DIRNAME = None
EPOCHES = 100
dirname = '/mnt/research/judy/reward_shaping/sanity_qfunc_learn/'
def process_rewards(rewards, episodes=10):
discounted_rewards = []
for i in range(episodes):
rs = rewards[i]
discounted_r = np.zeros((rs.shape[0], 1))
rs = np.clip(rs, -1, 1)
R = 0
for t in reversed(range(len(rs))):
R = R * GAMMA + rs[t]
discounted_r[t] = R
discounted_rewards.append(discounted_r)
#break
return discounted_rewards
def find_available_data(pathdir="/mnt/research/judy/reward_shaping/expert_data/"):
file_ids = []
for file in os.listdir(pathdir):
if file.endswith(".npz"):
file_id = file.strip(".npz").split("_")[1]
file_ids.append(file_id)
return file_ids
def get_player(train=False, dumpdir=None):
env = gym.make(ENV_NAME)
if dumpdir:
env = gym.wrappers.Monitor(env, dumpdir, video_callable=lambda _: True)
env = FireResetEnv(env)
env = MapState(env, lambda im: cv2.resize(im, IMAGE_SIZE))
env = FrameStack(env, 4)
if train:
env = LimitLength(env, 60000)
return env
class SupervisedModel(ModelDesc):
def inputs(self):
assert NUM_ACTIONS is not None
return [tf.TensorSpec((None,) + STATE_SHAPE + (FRAME_HISTORY, ), tf.uint8, 'state'),
tf.TensorSpec((None,), tf.int64, 'action'),
tf.TensorSpec((None,), tf.float32, 'futurereward'),
]
def _get_NN_prediction(self, state):
assert state.shape.rank == 5 # Batch, H, W, Channel, History
state = tf.transpose(state, [0, 1, 2, 4, 3]) # swap channel & history, to be compatible with old models
image = tf.reshape(state, [-1] + list(STATE_SHAPE[:2]) + [STATE_SHAPE[2] * FRAME_HISTORY])
image = tf.cast(image, tf.float32) / 255.0
with argscope(Conv2D, activation=tf.nn.relu):
l = Conv2D('conv0', image, 32, 5)
l = MaxPooling('pool0', l, 2)
l = Conv2D('conv1', l, 32, 5)
l = MaxPooling('pool1', l, 2)
l = Conv2D('conv2', l, 64, 4)
l = MaxPooling('pool2', l, 2)
l = Conv2D('conv3', l, 64, 3)
l = FullyConnected('fc0', l, 512)
l = PReLU('prelu', l)
logits = FullyConnected('fc-pi', l, NUM_ACTIONS) # unnormalized policy
q_value = FullyConnected('fc-v', l, 1)
return logits, q_value
def build_graph(self, resume=False):
## create graph, session
tf.reset_default_graph()
sess = tf.Session()
action = tf.placeholder(dtype=tf.int64, shape=(None,1))
state = tf.placeholder(dtype=tf.uint8, shape= (None,) + STATE_SHAPE + (FRAME_HISTORY, ) )
futurereward = tf.placeholder(dtype=tf.float32, shape=(None,1))
logits, q_value = self._get_NN_prediction(state)
policy = tf.nn.softmax(logits, name='policy')
log_probs = tf.log(policy + 1e-6)
one_hot_actions = tf.one_hot(action, NUM_ACTIONS)
one_hot_actions = tf.reshape(one_hot_actions, [-1, NUM_ACTIONS])
xentropy_loss = tf.losses.softmax_cross_entropy(
one_hot_actions, # one-hot-labels
logits, # logits
)
value_loss = tf.nn.l2_loss(q_value - futurereward, name='q_value_loss')
entropy_beta = tf.get_variable(
'entropy_beta',
shape=[],
initializer=tf.constant_initializer(0.01),
trainable=False
)
cost = tf.add_n([xentropy_loss * entropy_beta, value_loss])
confience_a_given_s = tf.reduce_mean(
tf.reduce_sum(
policy * one_hot_actions, 1)
)
lr = tf.get_variable('learning_rate', initializer=1e-4, trainable=False)
optimizer_op = tf.train.AdamOptimizer(lr, epsilon=1e-3).minimize(cost)
########### Add gradient clipping #########
# opt = tf.train.AdamOptimizer(lr, epsilon=1e-3)#.minimize(cost)
# gvs = opt.compute_gradients(cost)
# capped_gvs = [(tf.clip_by_value(grad, -1., 1.), var) for grad, var in gvs]
# tf.clip_by_norm(grad, 0.1 * tf.cast(tf.size(grad), tf.float32))
# optimizer_op = opt.apply_gradients(capped_gvs)
########### Add gradient clipping #########
# Create a summary to monitor cost tensors
tf.summary.scalar("loss", cost)
tf.summary.scalar("cross_entropy_loss", xentropy_loss)
tf.summary.scalar("q_value_loss", value_loss)
# Create a summary to monitor confidence tensor
tf.summary.scalar("mean_pi_a_given_s", confience_a_given_s)
# Merge all summaries into a single op
merged = tf.summary.merge_all()
# Create a summary to log real future rewards
tf.summary.scalar("futurereward", futurereward)
## TBD load parameter, or init parameter
saver = tf.compat.v1.train.Saver()
if resume:
print('loading and building pretrained policy')
saver.restore(sess, tf.train.latest_checkpoint(MODEL_DIRNAME))
print('loaded and built successfully')
else:
init = tf.global_variables_initializer()
sess.run(init)
print('model initialized successfully')
writer = tf.compat.v1.summary.FileWriter(LOG_DIRNAME, sess.graph)
results = {}
results["cost"] = cost
results["policy"] = policy
results["logits"] = logits
results["merged"] = merged
results["writer"] = writer
results["actions_ph"] = action
results["futurereward_ph"] = futurereward
results["states_ph"] = state
results["optimizer"] = optimizer_op
results["saver"] = saver
results["q_value"] = q_value
results["futurereward"] = futurereward
#self.writer = writer
#self.optimizer = opt
#self.actions_ph = action
#self.futurereward_ph = futurereward
#self.states_ph = state
self.handler = results
self.sess = sess
def train(self, file_ids, epoches=1, initial_episode=0):
episode_index = initial_episode
for epoch in range(epoches):
for file_id in file_ids:
states, actions, rewards = self.load_data(file_id=file_id)
episodes = len(rewards) # how many episodes are in this file
rewards = process_rewards(rewards, episodes=episodes) # get discounted rewards
## start training
##### DEBUG ######
#weights = [v for v in tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES) if v.name == 'fc-pi/b:0']
##### DEBUG ######
for e in range(episodes):
episode_index += 1
# get each episode
print("File id = {}, Episode id ={}".format(file_id, episode_index))
e_state, e_action, e_reward = states[e], actions[e], rewards[e]
# state steps should be 1 more than action/reward steps
stride = BATCH_SIZE
pos, frame_size = 0, len(e_action)
while True:
end = frame_size if pos+stride>=frame_size else pos+stride
batch_x = np.reshape(e_state[pos:end], (-1,) + STATE_SHAPE + (FRAME_HISTORY,) )
batch_y = np.reshape(e_action[pos:end], (-1, 1))
batch_r = np.reshape(e_reward[pos:end], (-1,1))
_, loss_val, tf_summary = self.sess.run(
[
self.handler["optimizer"],
self.handler["cost"],
self.handler["merged"],
# weights[0], # DEBUG
],
feed_dict={
self.handler["states_ph"]:batch_x,
self.handler["futurereward_ph"]:batch_r,
self.handler["actions_ph"]:batch_y
}
)
pos = end
## release memory space for each mini-batch
del batch_x, batch_y, batch_r
if pos >= frame_size:
# end of pisode
break
## print("Weight value: ", weight)
information = "Update Episode {:2d}, Episode Length {:5d}, Running Loss {:.4f}".format(episode_index, frame_size, loss_val)
logger.info(information)
self.handler["writer"].add_summary(tf_summary, episode_index)
## save session and Episode index
self.handler["saver"].save(self.sess, os.path.join(MODEL_DIRNAME, "checkpoint.ckpt") )
fp = open(os.path.join(MODEL_DIRNAME, "step.p"), "wb")
pickle.dump(episode_index, fp)
fp.close()
del states, actions, rewards
#loss_summary = tf.Summary(value=[tf.Summary.Value(tag="running_loss", simple_value=loss_val)])
#writer.add_summary(loss_summary, global_step=episode_number)
def load_data(self, pathdir="/mnt/research/judy/reward_shaping/expert_data/", file_id=1):
path = os.path.join(pathdir, "batch_{}.npz".format(file_id))
data = np.load(path, allow_pickle=True)
states = data["observations"]
actions = data["actions"]
rewards = data["rewards"]
return states, actions, rewards
def train(args):
assert tf.test.is_gpu_available(), "Training requires GPUs!"
logger.set_logger_dir(LOG_DIRNAME)
# assign GPUs for training & inference
num_gpu = get_num_gpu()
if num_gpu == 0:
logger.warn("Training without GPU !")
exit()
# setup model
model=SupervisedModel()
model.build_graph(resume=args.resume)
# training model using loaded expert data
file_ids = sorted(find_available_data())
#N = int( min(10, len(file_ids) * 0.3))
#file_ids = file_ids[:N]
step_file = os.path.join(MODEL_DIRNAME, "step.p")
if args.resume and os.path.exists(step_file):
with open(step_file, 'rb') as f:
initial_episode = pickle.load(f)
else:
initial_episode = 0
model.train(file_ids, epoches=EPOCHES, initial_episode=initial_episode)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', help='comma separated list of GPU(s) to use.')
parser.add_argument('--load', help='load model', default="/mnt/research/judy/reward_shaping/Pong-v0.npz", type=str)
parser.add_argument('--env', help='env', default="Pong-v0", type=str)
parser.add_argument('--task', help='task to perform',
choices=['play', 'eval', 'train', 'dump_video'], default='train')
parser.add_argument('--output', help='output directory for submission', default='output_dir')
parser.add_argument('--episode', help='number of episode to eval', default=1, type=int)
parser.add_argument('--render', help='If render the environment', default=False, type=bool)
parser.add_argument('--save', help='If save episodes', default=False, type=bool)
parser.add_argument('--save_id', help='Index of Batches to be collected', default=1, type=int)
parser.add_argument('--resume', help='Resume Model', default=True)
args = parser.parse_args()
ENV_NAME = args.env
NUM_ACTIONS = get_player().action_space.n
LOG_DIRNAME = os.path.join(dirname, 'supervised-atari-{}'.format(ENV_NAME))
MODEL_DIRNAME = os.path.join(dirname, "model_checkpoint")
logger.info("Environment: {}, number of actions: {}".format(ENV_NAME, NUM_ACTIONS))
if args.gpu:
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
train(args)
|
import signal
import requests
import docker
import json
import time
import sys
def signal_handler(signal, frame):
print("Stopping Grafana...")
docker_client = client.from_env()
try:
grafana = [
c for c in docker_client.containers.list()
if c.attrs['Config']['Image'] == "grafana/grafana"
][0]
grafana.stop()
except Exception as e:
pass
sys.exit(0)
if __name__ == '__main__':
signal.signal(signal.SIGINT, signal_handler)
print("(1/3) Initializing Grafana")
client = docker.from_env()
container = client.containers.run(
"grafana/grafana", ports={'3000/tcp': 3000}, detach=True)
print("(2/3) Grafana Initialized")
time.sleep(3)
with open('Clipper_DataSource.json', 'r') as f:
datasource = json.load(f)
requests.post(
'http://admin:admin@localhost:3000/api/datasources', data=datasource)
print('(3/3) Clipper Data Source Added')
print(
'Please login to http://localhost:3000 using username and password "admin"'
)
print('''
After Login, Click "Home" -> "Import Dashboard" -> "Upload json File" -> "Clipper_Dashboard.json"
''')
while True:
time.sleep(1)
|
"""
This module implements a remote pool to use with AMP.
"""
from twisted.protocols import amp
class AMPProxy(amp.AMP):
"""
A Proxy AMP protocol that forwards calls to a wrapped
callRemote-like callable.
"""
def __init__(self, wrapped, child):
"""
@param wrapped: A callRemote-like callable that takes an
L{amp.Command} as first argument and other
optional keyword arguments afterwards.
@type wrapped: L{callable}.
@param child: The protocol class of the process pool children.
Used to forward only the methods that are actually
understood correctly by them.
@type child: L{amp.AMP}
"""
amp.AMP.__init__(self)
self.wrapped = wrapped
self.child = child
localCd = set(self._commandDispatch.keys())
childCd = set(self.child._commandDispatch.keys())
assert localCd.intersection(childCd) == set([b"StartTLS"]), \
"Illegal method overriding in Proxy"
def locateResponder(self, name):
"""
This is a custom locator to forward calls to the children
processes while keeping the ProcessPool a transparent MITM.
This way of working has a few limitations, the first of which
is the fact that children won't be able to take advantage of
any dynamic locator except for the default L{CommandLocator}
that is based on the _commandDispatch attribute added by the
metaclass. This limitation might be lifted in the future.
"""
if name == "StartTLS":
# This is a special case where the proxy takes precedence
return amp.AMP.locateResponder(self, "StartTLS")
# Get the dict of commands from the child AMP implementation.
cd = self.child._commandDispatch
if name in cd:
# If the command is there, then we forward stuff to it.
commandClass, _responderFunc = cd[name]
# We need to wrap the doWork function because the wrapping
# call doesn't pass the command as first argument since it
# thinks that we are the actual receivers and callable is
# already the responder while it isn't.
doWork = lambda **kw: self.wrapped(commandClass, **kw)
# Now let's call the right function and wrap the result
# dictionary.
return self._wrapWithSerialization(doWork, commandClass)
# of course if the name of the command is not in the child it
# means that it might be in this class, so fallback to the
# default behavior of this module.
return amp.AMP.locateResponder(self, name)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.