hexsha stringlengths 40 40 | size int64 2 1.02M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 2 1.02M | avg_line_length float64 1 417k | max_line_length int64 1 987k | alphanum_fraction float64 0 1 | content_no_comment stringlengths 0 1.01M | is_comment_constant_removed bool 1
class | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f71ff5e2374591301bbf01a9ea272bd250502167 | 7,448 | py | Python | RestPy/ixnetwork_restpy/testplatform/sessions/ixnetwork/topology/instruction.py | ralfjon/IxNetwork | c0c834fbc465af69c12fd6b7cee4628baba7fff1 | [
"MIT"
] | null | null | null | RestPy/ixnetwork_restpy/testplatform/sessions/ixnetwork/topology/instruction.py | ralfjon/IxNetwork | c0c834fbc465af69c12fd6b7cee4628baba7fff1 | [
"MIT"
] | null | null | null | RestPy/ixnetwork_restpy/testplatform/sessions/ixnetwork/topology/instruction.py | ralfjon/IxNetwork | c0c834fbc465af69c12fd6b7cee4628baba7fff1 | [
"MIT"
] | null | null | null |
# Copyright 1997 - 2018 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
class Instruction(Base):
"""The Instruction class encapsulates a user managed instruction node in the ixnetwork hierarchy.
An instance of the class can be obtained by accessing the Instruction property from a parent instance.
The internal properties list will be empty when the property is accessed and is populated from the server using the find method.
The internal properties list can be managed by the user by using the add and remove methods.
"""
_SDM_NAME = 'instruction'
def __init__(self, parent):
super(Instruction, self).__init__(parent)
@property
def Actions(self):
"""An instance of the Actions class.
Returns:
obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.actions.Actions)
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.actions import Actions
return Actions(self)
@property
def Field(self):
"""An instance of the Field class.
Returns:
obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.field.Field)
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.field import Field
return Field(self)
@property
def Count(self):
"""Number of elements inside associated multiplier-scaled container object, e.g. number of devices inside a Device Group
Returns:
number
"""
return self._get_attribute('count')
@property
def Description(self):
"""Description of the field.
Returns:
str
"""
return self._get_attribute('description')
@Description.setter
def Description(self, value):
self._set_attribute('description', value)
@property
def DisplayName(self):
"""Display name used by GUI.
Returns:
str
"""
return self._get_attribute('displayName')
@property
def IsEditable(self):
"""Information on the requirement of the field.
Returns:
bool
"""
return self._get_attribute('isEditable')
@IsEditable.setter
def IsEditable(self, value):
self._set_attribute('isEditable', value)
@property
def IsEnabled(self):
"""Enables disables the field.
Returns:
bool
"""
return self._get_attribute('isEnabled')
@IsEnabled.setter
def IsEnabled(self, value):
self._set_attribute('isEnabled', value)
@property
def IsRequired(self):
"""Information on the requirement of the field.
Returns:
bool
"""
return self._get_attribute('isRequired')
@IsRequired.setter
def IsRequired(self, value):
self._set_attribute('isRequired', value)
@property
def Name(self):
"""Name of packet field
Returns:
str
"""
return self._get_attribute('name')
@Name.setter
def Name(self, value):
self._set_attribute('name', value)
def add(self, Description=None, IsEditable=None, IsEnabled=None, IsRequired=None, Name=None):
"""Adds a new instruction node on the server and retrieves it in this instance.
Args:
Description (str): Description of the field.
IsEditable (bool): Information on the requirement of the field.
IsEnabled (bool): Enables disables the field.
IsRequired (bool): Information on the requirement of the field.
Name (str): Name of packet field
Returns:
self: This instance with all currently retrieved instruction data using find and the newly added instruction data available through an iterator or index
Raises:
ServerError: The server has encountered an uncategorized error condition
"""
return self._create(locals())
def remove(self):
"""Deletes all the instruction data in this instance from server.
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
self._delete()
def find(self, Count=None, Description=None, DisplayName=None, IsEditable=None, IsEnabled=None, IsRequired=None, Name=None):
"""Finds and retrieves instruction data from the server.
All named parameters support regex and can be used to selectively retrieve instruction data from the server.
By default the find method takes no parameters and will retrieve all instruction data from the server.
Args:
Count (number): Number of elements inside associated multiplier-scaled container object, e.g. number of devices inside a Device Group
Description (str): Description of the field.
DisplayName (str): Display name used by GUI.
IsEditable (bool): Information on the requirement of the field.
IsEnabled (bool): Enables disables the field.
IsRequired (bool): Information on the requirement of the field.
Name (str): Name of packet field
Returns:
self: This instance with matching instruction data retrieved from the server available through an iterator or index
Raises:
ServerError: The server has encountered an uncategorized error condition
"""
return self._select(locals())
def read(self, href):
"""Retrieves a single instance of instruction data from the server.
Args:
href (str): An href to the instance to be retrieved
Returns:
self: This instance with the instruction data from the server available through an iterator or index
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
return self._read(href)
def AddAction(self, Arg2):
"""Executes the addAction operation on the server.
Adds an Action item.
Args:
Arg1 (str(None|/api/v1/sessions/1/ixnetwork/topology)): The method internally sets Arg1 to the current href for this instance
Arg2 (str):
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
Arg1 = self.href
return self._execute('AddAction', payload=locals(), response_object=None)
| 33.102222 | 156 | 0.733217 |
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
class Instruction(Base):
_SDM_NAME = 'instruction'
def __init__(self, parent):
super(Instruction, self).__init__(parent)
@property
def Actions(self):
from ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.actions import Actions
return Actions(self)
@property
def Field(self):
from ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.field import Field
return Field(self)
@property
def Count(self):
return self._get_attribute('count')
@property
def Description(self):
return self._get_attribute('description')
@Description.setter
def Description(self, value):
self._set_attribute('description', value)
@property
def DisplayName(self):
return self._get_attribute('displayName')
@property
def IsEditable(self):
return self._get_attribute('isEditable')
@IsEditable.setter
def IsEditable(self, value):
self._set_attribute('isEditable', value)
@property
def IsEnabled(self):
return self._get_attribute('isEnabled')
@IsEnabled.setter
def IsEnabled(self, value):
self._set_attribute('isEnabled', value)
@property
def IsRequired(self):
return self._get_attribute('isRequired')
@IsRequired.setter
def IsRequired(self, value):
self._set_attribute('isRequired', value)
@property
def Name(self):
return self._get_attribute('name')
@Name.setter
def Name(self, value):
self._set_attribute('name', value)
def add(self, Description=None, IsEditable=None, IsEnabled=None, IsRequired=None, Name=None):
return self._create(locals())
def remove(self):
self._delete()
def find(self, Count=None, Description=None, DisplayName=None, IsEditable=None, IsEnabled=None, IsRequired=None, Name=None):
return self._select(locals())
def read(self, href):
return self._read(href)
def AddAction(self, Arg2):
Arg1 = self.href
return self._execute('AddAction', payload=locals(), response_object=None)
| true | true |
f71ff7f5175b2552338f6b41f9f1520efde5ebe9 | 1,203 | py | Python | pytorch_basic_template/model/model_entry.py | ldylab/deep_learning_with_pytorch | c86a2e24ee94ade1a78b66f10eb69b6e1fdd4463 | [
"MIT"
] | null | null | null | pytorch_basic_template/model/model_entry.py | ldylab/deep_learning_with_pytorch | c86a2e24ee94ade1a78b66f10eb69b6e1fdd4463 | [
"MIT"
] | null | null | null | pytorch_basic_template/model/model_entry.py | ldylab/deep_learning_with_pytorch | c86a2e24ee94ade1a78b66f10eb69b6e1fdd4463 | [
"MIT"
] | null | null | null | # from model.base.fcn import CustomFcn
# from model.best.fcn import DeepLabv3Fcn
# from model.better.fcn import Resnet101Fcn
# from model.sota.fcn import LightFcn
from model.alexnet.alexnet_model import AlexNet
from model.lenet5.lenet_5_model import LeNet5
from model.vggnet.vggnet16 import VGG16
from model.densenet.densenet_model import DenseNet121
from model.resnet.resnet34_model import resnet34
from model.resnet.resnet101_model import resnet101, resnet50
from model.cotnet.cotnet_model import cotnet50
import torch.nn as nn
def select_model(args):
type2model = {
'alexnet_fcn': AlexNet(args),
'lenet5_fcn': LeNet5(args),
'vggnet16_fcn': VGG16(args),
'densenet121_fcn': DenseNet121(num_classes=args.classes_num, grayscale=False),
'resnet34_fcn': resnet34(num_classes=args.classes_num),
'resnet101_fcn': resnet101(num_classes=args.classes_num),
'resnet50_fcn': resnet50(num_classes=args.classes_num),
'cotnet50_fcn': cotnet50(num_classes=args.classes_num)
}
model = type2model[args.model_type]
return model
def equip_multi_gpu(model, args):
model = nn.DataParallel(model, device_ids=args.gpus)
return model
| 36.454545 | 86 | 0.758936 |
from model.alexnet.alexnet_model import AlexNet
from model.lenet5.lenet_5_model import LeNet5
from model.vggnet.vggnet16 import VGG16
from model.densenet.densenet_model import DenseNet121
from model.resnet.resnet34_model import resnet34
from model.resnet.resnet101_model import resnet101, resnet50
from model.cotnet.cotnet_model import cotnet50
import torch.nn as nn
def select_model(args):
type2model = {
'alexnet_fcn': AlexNet(args),
'lenet5_fcn': LeNet5(args),
'vggnet16_fcn': VGG16(args),
'densenet121_fcn': DenseNet121(num_classes=args.classes_num, grayscale=False),
'resnet34_fcn': resnet34(num_classes=args.classes_num),
'resnet101_fcn': resnet101(num_classes=args.classes_num),
'resnet50_fcn': resnet50(num_classes=args.classes_num),
'cotnet50_fcn': cotnet50(num_classes=args.classes_num)
}
model = type2model[args.model_type]
return model
def equip_multi_gpu(model, args):
model = nn.DataParallel(model, device_ids=args.gpus)
return model
| true | true |
f71ff8d04b827e68cb215f95a82095cadf50e4ca | 1,094 | py | Python | data/p4VQE/R4/benchmark/startPyquil330.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | data/p4VQE/R4/benchmark/startPyquil330.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | data/p4VQE/R4/benchmark/startPyquil330.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | # qubit number=4
# total number=10
import pyquil
from pyquil.api import local_forest_runtime, QVMConnection
from pyquil import Program, get_qc
from pyquil.gates import *
import numpy as np
conn = QVMConnection()
def make_circuit()-> Program:
prog = Program() # circuit begin
prog += H(1) # number=2
prog += H(2) # number=3
prog += H(3) # number=4
prog += Y(3) # number=5
prog += SWAP(1,0) # number=6
prog += Y(2) # number=8
prog += Y(2) # number=9
# circuit end
return prog
def summrise_results(bitstrings) -> dict:
d = {}
for l in bitstrings:
if d.get(l) is None:
d[l] = 1
else:
d[l] = d[l] + 1
return d
if __name__ == '__main__':
prog = make_circuit()
qvm = get_qc('4q-qvm')
results = qvm.run_and_measure(prog,1024)
bitstrings = np.vstack([results[i] for i in qvm.qubits()]).T
bitstrings = [''.join(map(str, l)) for l in bitstrings]
writefile = open("../data/startPyquil330.csv","w")
print(summrise_results(bitstrings),file=writefile)
writefile.close()
| 22.326531 | 64 | 0.608775 |
import pyquil
from pyquil.api import local_forest_runtime, QVMConnection
from pyquil import Program, get_qc
from pyquil.gates import *
import numpy as np
conn = QVMConnection()
def make_circuit()-> Program:
prog = Program()
prog += H(1)
prog += H(2)
prog += H(3)
prog += Y(3)
prog += SWAP(1,0)
prog += Y(2)
prog += Y(2)
return prog
def summrise_results(bitstrings) -> dict:
d = {}
for l in bitstrings:
if d.get(l) is None:
d[l] = 1
else:
d[l] = d[l] + 1
return d
if __name__ == '__main__':
prog = make_circuit()
qvm = get_qc('4q-qvm')
results = qvm.run_and_measure(prog,1024)
bitstrings = np.vstack([results[i] for i in qvm.qubits()]).T
bitstrings = [''.join(map(str, l)) for l in bitstrings]
writefile = open("../data/startPyquil330.csv","w")
print(summrise_results(bitstrings),file=writefile)
writefile.close()
| true | true |
f71ff98e011cc1d66aab1506e6db05f161b7b1cb | 150 | py | Python | src/blog/views.py | master-stm/blog-dhango-ar | dd904e2af9bc6b7f85da6063f2abcaf12d572b47 | [
"bzip2-1.0.6"
] | null | null | null | src/blog/views.py | master-stm/blog-dhango-ar | dd904e2af9bc6b7f85da6063f2abcaf12d572b47 | [
"bzip2-1.0.6"
] | null | null | null | src/blog/views.py | master-stm/blog-dhango-ar | dd904e2af9bc6b7f85da6063f2abcaf12d572b47 | [
"bzip2-1.0.6"
] | null | null | null | from django.shortcuts import render
# Create your views here.
def home(request):
return render(request, 'blog/index.html', {'title': 'Home'})
| 16.666667 | 64 | 0.7 | from django.shortcuts import render
def home(request):
return render(request, 'blog/index.html', {'title': 'Home'})
| true | true |
f71ffa32ecb22bbfb515cf38fa8b15f86b7fb720 | 5,517 | py | Python | atron_cli/atron.py | atron-cc/atron | 49244fbd5ca3d372f6e3e74cda388ddea3acf00e | [
"MIT"
] | 4 | 2019-05-11T01:21:15.000Z | 2020-02-08T18:00:39.000Z | atron_cli/atron.py | atron-cc/atron | 49244fbd5ca3d372f6e3e74cda388ddea3acf00e | [
"MIT"
] | null | null | null | atron_cli/atron.py | atron-cc/atron | 49244fbd5ca3d372f6e3e74cda388ddea3acf00e | [
"MIT"
] | null | null | null | import click
import time
import platform
import os
from minifier import minify
from .board import Board, BoardException, DirectoryExistsError
from .board import PyboardError
_board = None
@click.group()
@click.option(
"--port",
"-p",
envvar="ATRON_PORT",
default="",
type=click.STRING,
help="Name of serial port for connected board. Can optionally specify with ATRON_PORT environment variable.",
metavar="PORT",
)
@click.option(
"--baud",
"-b",
envvar="ATRON_BAUD",
default=115200,
type=click.INT,
help="Baud rate for the serial connection (default 115200). Can optionally specify with ATRON_BAUD environment variable.",
metavar="BAUD",
)
@click.version_option()
def cli(port, baud):
global _board
if platform.system() == "Windows":
if port == '':
click.secho('you have to choose a COM port.', bold=True, fg='red')
return
if not re.match("^COM(\d+)$", port):
click.secho('invalid port {}'.format(port), fg='red')
return
else:
if port == '':
port = '/dev/ttyUSB0'
seconds = 1
while True:
try:
_board = Board(port, baud)
break
except BoardException as error:
click.secho(str(error), bold=True, fg='yellow')
click.secho(
'reonnecting to board after {} seconds. press ctrl+c to cancel'.format(seconds), fg='green')
time.sleep(seconds)
seconds *= 2
@cli.command()
@click.option(
"-h",
"--hard",
"hard",
is_flag=True,
default=False,
help="Perform a hard reboot, including running init.py",
)
def reset(hard):
if not hard:
_board.soft_reset()
return
# TODO: Hard reset is not implemented.
@cli.command()
def raw_command():
click.secho(
'the raw-command is under construction and may have some bugs.', fg='yellow')
click.secho('entering raw-command mode ...', fg='green')
_board.soft_reset()
time.sleep(1)
_board.board.enter_raw_repl()
try:
while True:
command = raw_input(">>> ")
result = _board.board.exec_raw(command)
if result[0]:
print(result[0])
finally:
_board.board.exit_raw_repl()
_board.soft_reset()
@cli.command()
@click.argument("remote_folder")
def rmdir(remote_folder):
_board.files.rmdir(remote_folder)
@cli.command()
@click.argument(
"local",
default="main.py",
)
@click.argument(
"remote",
default="main.py",
)
def upload(local, remote):
if remote is None:
remote = os.path.basename(os.path.abspath(local))
_board.files.put(remote, minify(local))
@cli.command()
@click.argument(
"local",
default="main.py",
)
@click.argument(
"remote",
required=False,
)
def put(local, remote):
if remote is None:
remote = os.path.basename(os.path.abspath(local))
if os.path.isdir(local):
board_files = _board.files
for parent, child_dirs, child_files in os.walk(local):
remote_parent = posixpath.normpath(
posixpath.join(remote, os.path.relpath(parent, local))
)
try:
board_files.mkdir(remote_parent)
for filename in child_files:
with open(os.path.join(parent, filename), "rb") as infile:
remote_filename = posixpath.join(
remote_parent, filename)
board_files.put(remote_filename, infile.read())
except DirectoryExistsError:
pass
else:
with open(local, "rb") as infile:
_board.files.put(remote, infile.read())
@cli.command()
@click.argument("remote_file")
def rm(remote_file):
_board.files.rm(remote_file)
@cli.command()
@click.argument("local_file")
@click.option(
"--no-output",
"-n",
is_flag=True,
help="Run the code without waiting for it to finish and print output. Use this when running code with main loops that never return.",
)
def run(local_file, no_output):
try:
output = _board.files.run(local_file, not no_output)
if output is not None:
click.secho(output.decode("utf-8"))
except IOError:
click.echo(
"Failed to find or read input file: {0}".format(local_file), err=True
)
@cli.command()
@click.argument("directory", default="/")
@click.option(
"--long_format",
"-l",
is_flag=True,
help="Print long format info including size of files. Note the size of directories is not supported and will show 0 values.",
)
@click.option(
"--recursive",
"-r",
is_flag=True,
help="recursively list all files and (empty) directories.",
)
def ls(directory, long_format, recursive):
try:
files = _board.files.ls(directory, long_format=long_format, recursive=recursive)
except PyboardError as err:
click.secho('PyBoard Exception.', fg='red')
click.secho(str(err), fg='yellow')
return
for f in files:
if not long_format:
click.secho(
f,
fg='green' if os.path.splitext(f)[1].lower() == '.py' else 'white',
)
else:
click.echo(f)
if __name__ == '__main__':
try:
cli()
finally:
if _board is not None:
try:
_board.close()
except:
pass
| 25.780374 | 138 | 0.589813 | import click
import time
import platform
import os
from minifier import minify
from .board import Board, BoardException, DirectoryExistsError
from .board import PyboardError
_board = None
@click.group()
@click.option(
"--port",
"-p",
envvar="ATRON_PORT",
default="",
type=click.STRING,
help="Name of serial port for connected board. Can optionally specify with ATRON_PORT environment variable.",
metavar="PORT",
)
@click.option(
"--baud",
"-b",
envvar="ATRON_BAUD",
default=115200,
type=click.INT,
help="Baud rate for the serial connection (default 115200). Can optionally specify with ATRON_BAUD environment variable.",
metavar="BAUD",
)
@click.version_option()
def cli(port, baud):
global _board
if platform.system() == "Windows":
if port == '':
click.secho('you have to choose a COM port.', bold=True, fg='red')
return
if not re.match("^COM(\d+)$", port):
click.secho('invalid port {}'.format(port), fg='red')
return
else:
if port == '':
port = '/dev/ttyUSB0'
seconds = 1
while True:
try:
_board = Board(port, baud)
break
except BoardException as error:
click.secho(str(error), bold=True, fg='yellow')
click.secho(
'reonnecting to board after {} seconds. press ctrl+c to cancel'.format(seconds), fg='green')
time.sleep(seconds)
seconds *= 2
@cli.command()
@click.option(
"-h",
"--hard",
"hard",
is_flag=True,
default=False,
help="Perform a hard reboot, including running init.py",
)
def reset(hard):
if not hard:
_board.soft_reset()
return
@cli.command()
def raw_command():
click.secho(
'the raw-command is under construction and may have some bugs.', fg='yellow')
click.secho('entering raw-command mode ...', fg='green')
_board.soft_reset()
time.sleep(1)
_board.board.enter_raw_repl()
try:
while True:
command = raw_input(">>> ")
result = _board.board.exec_raw(command)
if result[0]:
print(result[0])
finally:
_board.board.exit_raw_repl()
_board.soft_reset()
@cli.command()
@click.argument("remote_folder")
def rmdir(remote_folder):
_board.files.rmdir(remote_folder)
@cli.command()
@click.argument(
"local",
default="main.py",
)
@click.argument(
"remote",
default="main.py",
)
def upload(local, remote):
if remote is None:
remote = os.path.basename(os.path.abspath(local))
_board.files.put(remote, minify(local))
@cli.command()
@click.argument(
"local",
default="main.py",
)
@click.argument(
"remote",
required=False,
)
def put(local, remote):
if remote is None:
remote = os.path.basename(os.path.abspath(local))
if os.path.isdir(local):
board_files = _board.files
for parent, child_dirs, child_files in os.walk(local):
remote_parent = posixpath.normpath(
posixpath.join(remote, os.path.relpath(parent, local))
)
try:
board_files.mkdir(remote_parent)
for filename in child_files:
with open(os.path.join(parent, filename), "rb") as infile:
remote_filename = posixpath.join(
remote_parent, filename)
board_files.put(remote_filename, infile.read())
except DirectoryExistsError:
pass
else:
with open(local, "rb") as infile:
_board.files.put(remote, infile.read())
@cli.command()
@click.argument("remote_file")
def rm(remote_file):
_board.files.rm(remote_file)
@cli.command()
@click.argument("local_file")
@click.option(
"--no-output",
"-n",
is_flag=True,
help="Run the code without waiting for it to finish and print output. Use this when running code with main loops that never return.",
)
def run(local_file, no_output):
try:
output = _board.files.run(local_file, not no_output)
if output is not None:
click.secho(output.decode("utf-8"))
except IOError:
click.echo(
"Failed to find or read input file: {0}".format(local_file), err=True
)
@cli.command()
@click.argument("directory", default="/")
@click.option(
"--long_format",
"-l",
is_flag=True,
help="Print long format info including size of files. Note the size of directories is not supported and will show 0 values.",
)
@click.option(
"--recursive",
"-r",
is_flag=True,
help="recursively list all files and (empty) directories.",
)
def ls(directory, long_format, recursive):
try:
files = _board.files.ls(directory, long_format=long_format, recursive=recursive)
except PyboardError as err:
click.secho('PyBoard Exception.', fg='red')
click.secho(str(err), fg='yellow')
return
for f in files:
if not long_format:
click.secho(
f,
fg='green' if os.path.splitext(f)[1].lower() == '.py' else 'white',
)
else:
click.echo(f)
if __name__ == '__main__':
try:
cli()
finally:
if _board is not None:
try:
_board.close()
except:
pass
| true | true |
f71ffa36ec9cfc1d58a5f56d219d392194ca7a79 | 331 | py | Python | Projetos/surf05.py | anderson-br-ti/python | d65d851f0934267dff9256dfdac09b100efb3b45 | [
"MIT"
] | null | null | null | Projetos/surf05.py | anderson-br-ti/python | d65d851f0934267dff9256dfdac09b100efb3b45 | [
"MIT"
] | null | null | null | Projetos/surf05.py | anderson-br-ti/python | d65d851f0934267dff9256dfdac09b100efb3b45 | [
"MIT"
] | null | null | null | f = open('surf.txt')
notas = []
nomes = []
for linha in f:
nome, pontos = linha.split()
notas.append(float(pontos))
nomes.append(nome)
f.close()
notas.sort(reverse=True)
nomes.sort(reverse=True)
print ('%s %4.2f' %(nomes[0], notas[0]))
print ('%s %4.2f' %(nomes[1], notas[1]))
print ('%s %4.2f' %(nomes[2], notas[2]))
| 22.066667 | 40 | 0.601208 | f = open('surf.txt')
notas = []
nomes = []
for linha in f:
nome, pontos = linha.split()
notas.append(float(pontos))
nomes.append(nome)
f.close()
notas.sort(reverse=True)
nomes.sort(reverse=True)
print ('%s %4.2f' %(nomes[0], notas[0]))
print ('%s %4.2f' %(nomes[1], notas[1]))
print ('%s %4.2f' %(nomes[2], notas[2]))
| true | true |
f71ffa96a121e4b599332fda00ecec1c3e395215 | 381 | py | Python | sayhello/__init__.py | IshunChin/cfn-tutorial | 0a282fe3a2affa60c5c46702206128bb19e60869 | [
"MIT"
] | null | null | null | sayhello/__init__.py | IshunChin/cfn-tutorial | 0a282fe3a2affa60c5c46702206128bb19e60869 | [
"MIT"
] | null | null | null | sayhello/__init__.py | IshunChin/cfn-tutorial | 0a282fe3a2affa60c5c46702206128bb19e60869 | [
"MIT"
] | null | null | null | from flask import Flask
from flask_bootstrap import Bootstrap
from flask_moment import Moment
from flask_sqlalchemy import SQLAlchemy
app = Flask('sayhello')
app.config.from_pyfile('settings.py')
app.jinja_env.trim_blocks = True
app.jinja_env.lstrip_blocks = True
db = SQLAlchemy(app)
bootstrap = Bootstrap(app)
moment = Moment(app)
from sayhello import views, errors, commands
| 23.8125 | 44 | 0.808399 | from flask import Flask
from flask_bootstrap import Bootstrap
from flask_moment import Moment
from flask_sqlalchemy import SQLAlchemy
app = Flask('sayhello')
app.config.from_pyfile('settings.py')
app.jinja_env.trim_blocks = True
app.jinja_env.lstrip_blocks = True
db = SQLAlchemy(app)
bootstrap = Bootstrap(app)
moment = Moment(app)
from sayhello import views, errors, commands
| true | true |
f71ffb327a8627122eb965383d8d9493e4611a68 | 23,768 | py | Python | examples/sentence_similarity/gensen_train.py | gohanlon/nlp | 7b07109a2066eb2152c370ef38600230668a9c8d | [
"MIT"
] | 4,407 | 2019-10-29T21:35:19.000Z | 2022-03-31T13:56:37.000Z | examples/sentence_similarity/gensen_train.py | shubham9g17/nlp-recipes | a5cd2303187239799ae0b1597a7c16eb99a97108 | [
"MIT"
] | 134 | 2019-10-30T23:38:59.000Z | 2022-03-01T11:42:53.000Z | examples/sentence_similarity/gensen_train.py | shubham9g17/nlp-recipes | a5cd2303187239799ae0b1597a7c16eb99a97108 | [
"MIT"
] | 726 | 2019-10-31T15:21:52.000Z | 2022-03-31T10:18:22.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
"""
The GenSen training process follows the steps:
1. Create or load the dataset vocabulary
2. Train on the training dataset for each batch epoch (batch size = 48 updates)
3. Evaluate on the validation dataset for every 10 epoches
4. Find the local minimum point on validation loss
5. Save the best model and stop the training process
AzureML provides AI Compute to train the model and track the performance.
This training process is based on GPU only.
"""
import argparse
import json
import logging
import os
import time
import horovod.torch as hvd
import mlflow
import numpy as np
import torch
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.nn.functional as f
import torch.optim as optim
from utils_nlp.models.gensen.multi_task_model import MultitaskModel
from utils_nlp.models.gensen.utils import (
BufferedDataIterator,
NLIIterator,
compute_validation_loss,
)
cudnn.benchmark = True
logger = logging.getLogger(__name__)
hvd.init()
if torch.cuda.is_available():
# Horovod: pin GPU to local rank.
torch.cuda.set_device(hvd.local_rank())
def metric_average(value, name):
"""
Sync the validation loss with nodes.
:param value:
:param name:
:return:
"""
tensor = torch.tensor(value)
avg_tensor = hvd.allreduce(tensor, name=name)
return avg_tensor.item()
def setup_horovod(model, learning_rate):
""" Setup for Horovod usage.
Args:
model(MultitaskModel): The MultitaskModel object.
learning_rate(float): Learning rate for the model.
Returns: hvd.DistributedOptimizer: Optimizer to use for computing
gradients and applying updates.
"""
# Horovod: scale learning rate by the number of GPUs.
optimizer = optim.Adam(model.parameters(), lr=learning_rate * hvd.size())
# Horovod: broadcast parameters & optimizer state.
hvd.broadcast_parameters(model.state_dict(), root_rank=0)
hvd.broadcast_optimizer_state(optimizer, root_rank=0)
# Horovod: (optional) compression algorithm.
compression = hvd.Compression.fp16
# Horovod: wrap optimizer with DistributedOptimizer.
optimizer = hvd.DistributedOptimizer(
optimizer,
named_parameters=model.named_parameters(),
compression=compression,
)
return optimizer
def setup_logging(config):
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s - %(levelname)s - %(message)s",
filename="log/%s" % (config["data"]["task"]),
filemode="w",
)
console = logging.StreamHandler()
console.setLevel(logging.INFO)
formatter = logging.Formatter("%(asctime)s - %(levelname)s - %(message)s")
console.setFormatter(formatter)
logging.getLogger("").addHandler(console)
def log_config(config):
logging.info("Model Parameters : ")
logging.info("Task : %s " % (config["data"]["task"]))
logging.info(
"Source Word Embedding Dim : %s" % (config["model"]["dim_word_src"])
)
logging.info(
"Target Word Embedding Dim : %s" % (config["model"]["dim_word_trg"])
)
logging.info("Source RNN Hidden Dim : %s" % (config["model"]["dim_src"]))
logging.info("Target RNN Hidden Dim : %s" % (config["model"]["dim_trg"]))
logging.info(
"Source RNN Bidirectional : %s" % (config["model"]["bidirectional"])
)
logging.info("Batch Size : %d " % (config["training"]["batch_size"]))
logging.info("Optimizer : %s " % (config["training"]["optimizer"]))
logging.info("Learning Rate : %f " % (config["training"]["lrate"]))
def evaluate(
config,
train_iterator,
model,
loss_criterion,
monitor_epoch,
min_val_loss,
min_val_loss_epoch,
save_dir,
starting_time,
model_state,
max_epoch,
):
""" Function to validate the model.
Args:
max_epoch(int): Limit training to specified number of epochs.
model_state(dict): Saved model weights.
config(dict): Config object.
train_iterator(BufferedDataIterator): BufferedDataIterator object.
model(MultitaskModel): The MultitaskModel object.
loss_criterion(nn.CrossEntropyLoss): Cross entropy loss.
monitor_epoch(int): Current epoch count.
min_val_loss(float): Minimum validation loss
min_val_loss_epoch(int): Epoch where the minimum validation
loss was seen.
save_dir(str): Directory path to save the model dictionary.
starting_time(time.Time): Starting time of the training.
Returns:
bool: Whether to continue training or not.
"""
break_flag = 0
for task_idx, task in enumerate(train_iterator.tasknames):
if "skipthought" in task:
continue
validation_loss = compute_validation_loss(
config,
model,
train_iterator,
loss_criterion,
task_idx,
lowercase=True,
)
validation_loss = metric_average(validation_loss, "val_loss")
logging.info("%s Validation Loss : %.3f" % (task, validation_loss))
# Horovod: print output only on first rank.
if hvd.rank() == 0:
# log the best val accuracy to AML run
logging.info(
"Best Validation Loss: {}".format(np.float(validation_loss))
)
# If the validation loss is small enough, and it starts to go up.
# Should stop training.
# Small is defined by the number of epochs it lasts.
if validation_loss < min_val_loss:
min_val_loss = validation_loss
min_val_loss_epoch = monitor_epoch
model_state = model.state_dict()
logging.info(
"Monitor epoch: %d Validation Loss: %.3f Min Validation Epoch: "
"%d Loss : %.3f "
% (
monitor_epoch,
validation_loss,
min_val_loss_epoch,
min_val_loss,
)
)
if (monitor_epoch - min_val_loss_epoch) > config["training"][
"stop_patience"
] or (max_epoch is not None and monitor_epoch >= max_epoch):
logging.info("Saving model ...")
# Save the name with validation loss.
torch.save(
model_state,
open(os.path.join(save_dir, "best_model.model"), "wb"),
)
# Let the training end.
break_flag = 1
break
if break_flag == 1:
logging.info("##### Training stopped at ##### %f" % min_val_loss)
logging.info(
"##### Training Time ##### %f seconds"
% (time.time() - starting_time)
)
return True, min_val_loss_epoch, min_val_loss, model_state
else:
return False, min_val_loss_epoch, min_val_loss, model_state
def evaluate_nli(nli_iterator, model, batch_size, n_gpus):
"""
Args:
nli_iterator(NLIIterator): NLIIterator object.
model(MultitaskModel): Multitask model object.
batch_size(int): Batch size.
n_gpus(int): Number of gpus
"""
n_correct = 0.0
n_wrong = 0.0
for j in range(0, len(nli_iterator.dev_lines), batch_size * n_gpus):
minibatch = nli_iterator.get_parallel_minibatch(
j, batch_size * n_gpus, "dev"
)
class_logits = model(
minibatch, -1, return_hidden=False, paired_trg=None
)
class_preds = (
f.softmax(class_logits).data.cpu().numpy().argmax(axis=-1)
)
labels = minibatch["labels"].data.cpu().numpy()
for pred, label in zip(class_preds, labels):
if pred == label:
n_correct += 1.0
else:
n_wrong += 1.0
logging.info("NLI Dev Acc : %.5f" % (n_correct / (n_correct + n_wrong)))
n_correct = 0.0
n_wrong = 0.0
for j in range(0, len(nli_iterator.test_lines), batch_size * n_gpus):
minibatch = nli_iterator.get_parallel_minibatch(
j, batch_size * n_gpus, "test"
)
class_logits = model(
minibatch, -1, return_hidden=False, paired_trg=None
)
class_preds = (
f.softmax(class_logits).data.cpu().numpy().argmax(axis=-1)
)
labels = minibatch["labels"].data.cpu().numpy()
for pred, label in zip(class_preds, labels):
if pred == label:
n_correct += 1.0
else:
n_wrong += 1.0
logging.info("NLI Test Acc : %.5f" % (n_correct / (n_correct + n_wrong)))
logging.info("******************************************************")
def train(config, data_folder, learning_rate=0.0001, max_epoch=None):
""" Train the Gensen model.
Args:
max_epoch(int): Limit training to specified number of epochs.
config(dict): Loaded json file as a python object.
data_folder(str): Path to the folder containing the data.
learning_rate(float): Learning rate for the model.
"""
owd = os.getcwd()
os.chdir(data_folder)
try:
with mlflow.start_run():
save_dir = config["data"]["save_dir"]
if not os.path.exists("./log"):
os.makedirs("./log")
os.makedirs(save_dir, exist_ok=True)
setup_logging(config)
batch_size = config["training"]["batch_size"]
src_vocab_size = config["model"]["n_words_src"]
trg_vocab_size = config["model"]["n_words_trg"]
max_len_src = config["data"]["max_src_length"]
max_len_trg = config["data"]["max_trg_length"]
model_state = {}
train_src = [item["train_src"] for item in config["data"]["paths"]]
train_trg = [item["train_trg"] for item in config["data"]["paths"]]
tasknames = [item["taskname"] for item in config["data"]["paths"]]
# Keep track of indicies to train forward and backward jointly
if (
"skipthought_next" in tasknames
and "skipthought_previous" in tasknames
):
skipthought_idx = tasknames.index("skipthought_next")
skipthought_backward_idx = tasknames.index(
"skipthought_previous"
)
paired_tasks = {
skipthought_idx: skipthought_backward_idx,
skipthought_backward_idx: skipthought_idx,
}
else:
paired_tasks = None
skipthought_idx = None
skipthought_backward_idx = None
train_iterator = BufferedDataIterator(
train_src,
train_trg,
src_vocab_size,
trg_vocab_size,
tasknames,
save_dir,
buffer_size=1e6,
lowercase=True,
seed=(hvd.rank() + 1) * 12345,
)
nli_iterator = NLIIterator(
train=config["data"]["nli_train"],
dev=config["data"]["nli_dev"],
test=config["data"]["nli_test"],
vocab_size=-1,
vocab=os.path.join(save_dir, "src_vocab.pkl"),
seed=(hvd.rank() + 1) * 12345,
)
src_vocab_size = len(train_iterator.src[0]["word2id"])
trg_vocab_size = len(train_iterator.trg[0]["word2id"])
# Logging set up.
logging.info("Finished creating iterator ...")
log_config(config)
logging.info(
"Found %d words in source : "
% (len(train_iterator.src[0]["id2word"]))
)
for idx, taskname in enumerate(tasknames):
logging.info(
"Found %d target words in task %s "
% (len(train_iterator.trg[idx]["id2word"]), taskname)
)
logging.info("Found %d words in src " % src_vocab_size)
logging.info("Found %d words in trg " % trg_vocab_size)
weight_mask = torch.ones(trg_vocab_size).cuda()
weight_mask[train_iterator.trg[0]["word2id"]["<pad>"]] = 0
loss_criterion = nn.CrossEntropyLoss(weight=weight_mask).cuda()
nli_criterion = nn.CrossEntropyLoss().cuda()
model = MultitaskModel(
src_emb_dim=config["model"]["dim_word_src"],
trg_emb_dim=config["model"]["dim_word_trg"],
src_vocab_size=src_vocab_size,
trg_vocab_size=trg_vocab_size,
src_hidden_dim=config["model"]["dim_src"],
trg_hidden_dim=config["model"]["dim_trg"],
bidirectional=config["model"]["bidirectional"],
pad_token_src=train_iterator.src[0]["word2id"]["<pad>"],
pad_token_trg=train_iterator.trg[0]["word2id"]["<pad>"],
nlayers_src=config["model"]["n_layers_src"],
dropout=config["model"]["dropout"],
num_tasks=len(train_iterator.src),
paired_tasks=paired_tasks,
).cuda()
optimizer = setup_horovod(model, learning_rate=learning_rate)
logging.info(model)
n_gpus = config["training"]["n_gpus"]
model = torch.nn.DataParallel(model, device_ids=range(n_gpus))
task_losses = [[] for _ in tasknames]
task_idxs = [0 for _ in tasknames]
nli_losses = []
updates = 0
nli_ctr = 0
nli_epoch = 0
monitor_epoch = 0
nli_mbatch_ctr = 0
mbatch_times = []
min_val_loss = 10000000
min_val_loss_epoch = -1
rng_num_tasks = (
len(tasknames) - 1 if paired_tasks else len(tasknames)
)
logging.info("OS Environ: \n {} \n\n".format(os.environ))
mlflow.log_param("learning_rate", learning_rate)
logging.info("Commencing Training ...")
start = time.time()
while True:
batch_start_time = time.time()
# Train NLI once every 10 minibatches of other tasks
if nli_ctr % 10 == 0:
minibatch = nli_iterator.get_parallel_minibatch(
nli_mbatch_ctr, batch_size * n_gpus
)
optimizer.zero_grad()
class_logits = model(
minibatch, -1, return_hidden=False, paired_trg=None
)
loss = nli_criterion(
class_logits.contiguous().view(
-1, class_logits.size(1)
),
minibatch["labels"].contiguous().view(-1),
)
# nli_losses.append(loss.data[0])
nli_losses.append(loss.item())
loss.backward()
torch.nn.utils.clip_grad_norm(model.parameters(), 1.0)
optimizer.step()
nli_mbatch_ctr += batch_size * n_gpus
if nli_mbatch_ctr >= len(nli_iterator.train_lines):
nli_mbatch_ctr = 0
nli_epoch += 1
else:
# Sample a random task
task_idx = np.random.randint(low=0, high=rng_num_tasks)
# Get a minibatch corresponding to the sampled task
minibatch = train_iterator.get_parallel_minibatch(
task_idx,
task_idxs[task_idx],
batch_size * n_gpus,
max_len_src,
max_len_trg,
)
"""Increment pointer into task and if current buffer is
exhausted, fetch new buffer. """
task_idxs[task_idx] += batch_size * n_gpus
if task_idxs[task_idx] >= train_iterator.buffer_size:
train_iterator.fetch_buffer(task_idx)
task_idxs[task_idx] = 0
if task_idx == skipthought_idx:
minibatch_back = train_iterator.get_parallel_minibatch(
skipthought_backward_idx,
task_idxs[skipthought_backward_idx],
batch_size * n_gpus,
max_len_src,
max_len_trg,
)
task_idxs[skipthought_backward_idx] += (
batch_size * n_gpus
)
if (
task_idxs[skipthought_backward_idx]
>= train_iterator.buffer_size
):
train_iterator.fetch_buffer(
skipthought_backward_idx
)
task_idxs[skipthought_backward_idx] = 0
optimizer.zero_grad()
decoder_logit, decoder_logit_2 = model(
minibatch,
task_idx,
paired_trg=minibatch_back["input_trg"],
)
loss_f = loss_criterion(
decoder_logit.contiguous().view(
-1, decoder_logit.size(2)
),
minibatch["output_trg"].contiguous().view(-1),
)
loss_b = loss_criterion(
decoder_logit_2.contiguous().view(
-1, decoder_logit_2.size(2)
),
minibatch_back["output_trg"].contiguous().view(-1),
)
task_losses[task_idx].append(loss_f.data[0])
task_losses[skipthought_backward_idx].append(
loss_b.data[0]
)
loss = loss_f + loss_b
else:
optimizer.zero_grad()
decoder_logit = model(minibatch, task_idx)
loss = loss_criterion(
decoder_logit.contiguous().view(
-1, decoder_logit.size(2)
),
minibatch["output_trg"].contiguous().view(-1),
)
task_losses[task_idx].append(loss.item())
loss.backward()
# For distributed optimizer need to sync before gradient
# clipping.
optimizer.synchronize()
torch.nn.utils.clip_grad_norm(model.parameters(), 1.0)
optimizer.step()
end = time.time()
mbatch_times.append(end - batch_start_time)
# Validations
if (
updates % config["management"]["monitor_loss"] == 0
and updates != 0
):
monitor_epoch += 1
for idx, task in enumerate(tasknames):
logging.info(
"Seq2Seq Examples Processed : %d %s Loss : %.5f Num %s "
"minibatches : %d"
% (
updates,
task,
np.mean(task_losses[idx]),
task,
len(task_losses[idx]),
)
)
mlflow.log_metric(
"validation_loss",
np.mean(task_losses[idx]),
step=monitor_epoch,
)
logging.info(
"Round: %d NLI Epoch : %d NLI Examples Processed : %d NLI "
"Loss : %.5f "
% (
nli_ctr,
nli_epoch,
nli_mbatch_ctr,
np.mean(nli_losses),
)
)
mlflow.log_metric(
"nli_loss", np.mean(nli_losses), step=nli_epoch
)
logging.info(
"Average time per minibatch : %.5f"
% (np.mean(mbatch_times))
)
mlflow.log_metric(
"minibatch_avg_duration", np.mean(mbatch_times)
)
task_losses = [[] for _ in tasknames]
mbatch_times = []
nli_losses = []
# For validate and break if done.
logging.info("############################")
logging.info("##### Evaluating model #####")
logging.info("############################")
training_complete, min_val_loss_epoch, min_val_loss, model_state = evaluate(
config=config,
train_iterator=train_iterator,
model=model,
loss_criterion=loss_criterion,
monitor_epoch=monitor_epoch,
min_val_loss=min_val_loss,
min_val_loss_epoch=min_val_loss_epoch,
save_dir=save_dir,
starting_time=start,
model_state=model_state,
max_epoch=max_epoch,
)
if training_complete:
mlflow.log_metric("min_val_loss", float(min_val_loss))
mlflow.log_metric("learning_rate", learning_rate)
break
logging.info("Evaluating on NLI")
evaluate_nli(
nli_iterator=nli_iterator,
model=model,
n_gpus=n_gpus,
batch_size=batch_size,
)
updates += batch_size * n_gpus
nli_ctr += 1
logging.info("Updates: %d" % updates)
finally:
os.chdir(owd)
def read_config(json_file):
"""Read JSON config."""
json_object = json.load(open(json_file, "r", encoding="utf-8"))
return json_object
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--config", help="path to json config", required=True)
parser.add_argument("--data_folder", type=str, help="data folder")
# Add learning rate to tune model.
parser.add_argument(
"--learning_rate", type=float, default=0.0001, help="learning rate"
)
parser.add_argument(
"--max_epoch",
type=int,
default=None,
help="Limit training to specified number of epochs.",
)
args = parser.parse_args()
data_path = args.data_folder
lr = args.learning_rate
config_file_path = args.config
max_epoch = args.max_epoch
config_obj = read_config(config_file_path)
train(config_obj, data_path, lr, max_epoch)
| 36.849612 | 96 | 0.51847 |
import argparse
import json
import logging
import os
import time
import horovod.torch as hvd
import mlflow
import numpy as np
import torch
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.nn.functional as f
import torch.optim as optim
from utils_nlp.models.gensen.multi_task_model import MultitaskModel
from utils_nlp.models.gensen.utils import (
BufferedDataIterator,
NLIIterator,
compute_validation_loss,
)
cudnn.benchmark = True
logger = logging.getLogger(__name__)
hvd.init()
if torch.cuda.is_available():
torch.cuda.set_device(hvd.local_rank())
def metric_average(value, name):
tensor = torch.tensor(value)
avg_tensor = hvd.allreduce(tensor, name=name)
return avg_tensor.item()
def setup_horovod(model, learning_rate):
optimizer = optim.Adam(model.parameters(), lr=learning_rate * hvd.size())
hvd.broadcast_parameters(model.state_dict(), root_rank=0)
hvd.broadcast_optimizer_state(optimizer, root_rank=0)
compression = hvd.Compression.fp16
optimizer = hvd.DistributedOptimizer(
optimizer,
named_parameters=model.named_parameters(),
compression=compression,
)
return optimizer
def setup_logging(config):
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s - %(levelname)s - %(message)s",
filename="log/%s" % (config["data"]["task"]),
filemode="w",
)
console = logging.StreamHandler()
console.setLevel(logging.INFO)
formatter = logging.Formatter("%(asctime)s - %(levelname)s - %(message)s")
console.setFormatter(formatter)
logging.getLogger("").addHandler(console)
def log_config(config):
logging.info("Model Parameters : ")
logging.info("Task : %s " % (config["data"]["task"]))
logging.info(
"Source Word Embedding Dim : %s" % (config["model"]["dim_word_src"])
)
logging.info(
"Target Word Embedding Dim : %s" % (config["model"]["dim_word_trg"])
)
logging.info("Source RNN Hidden Dim : %s" % (config["model"]["dim_src"]))
logging.info("Target RNN Hidden Dim : %s" % (config["model"]["dim_trg"]))
logging.info(
"Source RNN Bidirectional : %s" % (config["model"]["bidirectional"])
)
logging.info("Batch Size : %d " % (config["training"]["batch_size"]))
logging.info("Optimizer : %s " % (config["training"]["optimizer"]))
logging.info("Learning Rate : %f " % (config["training"]["lrate"]))
def evaluate(
config,
train_iterator,
model,
loss_criterion,
monitor_epoch,
min_val_loss,
min_val_loss_epoch,
save_dir,
starting_time,
model_state,
max_epoch,
):
break_flag = 0
for task_idx, task in enumerate(train_iterator.tasknames):
if "skipthought" in task:
continue
validation_loss = compute_validation_loss(
config,
model,
train_iterator,
loss_criterion,
task_idx,
lowercase=True,
)
validation_loss = metric_average(validation_loss, "val_loss")
logging.info("%s Validation Loss : %.3f" % (task, validation_loss))
if hvd.rank() == 0:
logging.info(
"Best Validation Loss: {}".format(np.float(validation_loss))
)
if validation_loss < min_val_loss:
min_val_loss = validation_loss
min_val_loss_epoch = monitor_epoch
model_state = model.state_dict()
logging.info(
"Monitor epoch: %d Validation Loss: %.3f Min Validation Epoch: "
"%d Loss : %.3f "
% (
monitor_epoch,
validation_loss,
min_val_loss_epoch,
min_val_loss,
)
)
if (monitor_epoch - min_val_loss_epoch) > config["training"][
"stop_patience"
] or (max_epoch is not None and monitor_epoch >= max_epoch):
logging.info("Saving model ...")
torch.save(
model_state,
open(os.path.join(save_dir, "best_model.model"), "wb"),
)
break_flag = 1
break
if break_flag == 1:
logging.info("##### Training stopped at ##### %f" % min_val_loss)
logging.info(
"##### Training Time ##### %f seconds"
% (time.time() - starting_time)
)
return True, min_val_loss_epoch, min_val_loss, model_state
else:
return False, min_val_loss_epoch, min_val_loss, model_state
def evaluate_nli(nli_iterator, model, batch_size, n_gpus):
n_correct = 0.0
n_wrong = 0.0
for j in range(0, len(nli_iterator.dev_lines), batch_size * n_gpus):
minibatch = nli_iterator.get_parallel_minibatch(
j, batch_size * n_gpus, "dev"
)
class_logits = model(
minibatch, -1, return_hidden=False, paired_trg=None
)
class_preds = (
f.softmax(class_logits).data.cpu().numpy().argmax(axis=-1)
)
labels = minibatch["labels"].data.cpu().numpy()
for pred, label in zip(class_preds, labels):
if pred == label:
n_correct += 1.0
else:
n_wrong += 1.0
logging.info("NLI Dev Acc : %.5f" % (n_correct / (n_correct + n_wrong)))
n_correct = 0.0
n_wrong = 0.0
for j in range(0, len(nli_iterator.test_lines), batch_size * n_gpus):
minibatch = nli_iterator.get_parallel_minibatch(
j, batch_size * n_gpus, "test"
)
class_logits = model(
minibatch, -1, return_hidden=False, paired_trg=None
)
class_preds = (
f.softmax(class_logits).data.cpu().numpy().argmax(axis=-1)
)
labels = minibatch["labels"].data.cpu().numpy()
for pred, label in zip(class_preds, labels):
if pred == label:
n_correct += 1.0
else:
n_wrong += 1.0
logging.info("NLI Test Acc : %.5f" % (n_correct / (n_correct + n_wrong)))
logging.info("******************************************************")
def train(config, data_folder, learning_rate=0.0001, max_epoch=None):
owd = os.getcwd()
os.chdir(data_folder)
try:
with mlflow.start_run():
save_dir = config["data"]["save_dir"]
if not os.path.exists("./log"):
os.makedirs("./log")
os.makedirs(save_dir, exist_ok=True)
setup_logging(config)
batch_size = config["training"]["batch_size"]
src_vocab_size = config["model"]["n_words_src"]
trg_vocab_size = config["model"]["n_words_trg"]
max_len_src = config["data"]["max_src_length"]
max_len_trg = config["data"]["max_trg_length"]
model_state = {}
train_src = [item["train_src"] for item in config["data"]["paths"]]
train_trg = [item["train_trg"] for item in config["data"]["paths"]]
tasknames = [item["taskname"] for item in config["data"]["paths"]]
if (
"skipthought_next" in tasknames
and "skipthought_previous" in tasknames
):
skipthought_idx = tasknames.index("skipthought_next")
skipthought_backward_idx = tasknames.index(
"skipthought_previous"
)
paired_tasks = {
skipthought_idx: skipthought_backward_idx,
skipthought_backward_idx: skipthought_idx,
}
else:
paired_tasks = None
skipthought_idx = None
skipthought_backward_idx = None
train_iterator = BufferedDataIterator(
train_src,
train_trg,
src_vocab_size,
trg_vocab_size,
tasknames,
save_dir,
buffer_size=1e6,
lowercase=True,
seed=(hvd.rank() + 1) * 12345,
)
nli_iterator = NLIIterator(
train=config["data"]["nli_train"],
dev=config["data"]["nli_dev"],
test=config["data"]["nli_test"],
vocab_size=-1,
vocab=os.path.join(save_dir, "src_vocab.pkl"),
seed=(hvd.rank() + 1) * 12345,
)
src_vocab_size = len(train_iterator.src[0]["word2id"])
trg_vocab_size = len(train_iterator.trg[0]["word2id"])
logging.info("Finished creating iterator ...")
log_config(config)
logging.info(
"Found %d words in source : "
% (len(train_iterator.src[0]["id2word"]))
)
for idx, taskname in enumerate(tasknames):
logging.info(
"Found %d target words in task %s "
% (len(train_iterator.trg[idx]["id2word"]), taskname)
)
logging.info("Found %d words in src " % src_vocab_size)
logging.info("Found %d words in trg " % trg_vocab_size)
weight_mask = torch.ones(trg_vocab_size).cuda()
weight_mask[train_iterator.trg[0]["word2id"]["<pad>"]] = 0
loss_criterion = nn.CrossEntropyLoss(weight=weight_mask).cuda()
nli_criterion = nn.CrossEntropyLoss().cuda()
model = MultitaskModel(
src_emb_dim=config["model"]["dim_word_src"],
trg_emb_dim=config["model"]["dim_word_trg"],
src_vocab_size=src_vocab_size,
trg_vocab_size=trg_vocab_size,
src_hidden_dim=config["model"]["dim_src"],
trg_hidden_dim=config["model"]["dim_trg"],
bidirectional=config["model"]["bidirectional"],
pad_token_src=train_iterator.src[0]["word2id"]["<pad>"],
pad_token_trg=train_iterator.trg[0]["word2id"]["<pad>"],
nlayers_src=config["model"]["n_layers_src"],
dropout=config["model"]["dropout"],
num_tasks=len(train_iterator.src),
paired_tasks=paired_tasks,
).cuda()
optimizer = setup_horovod(model, learning_rate=learning_rate)
logging.info(model)
n_gpus = config["training"]["n_gpus"]
model = torch.nn.DataParallel(model, device_ids=range(n_gpus))
task_losses = [[] for _ in tasknames]
task_idxs = [0 for _ in tasknames]
nli_losses = []
updates = 0
nli_ctr = 0
nli_epoch = 0
monitor_epoch = 0
nli_mbatch_ctr = 0
mbatch_times = []
min_val_loss = 10000000
min_val_loss_epoch = -1
rng_num_tasks = (
len(tasknames) - 1 if paired_tasks else len(tasknames)
)
logging.info("OS Environ: \n {} \n\n".format(os.environ))
mlflow.log_param("learning_rate", learning_rate)
logging.info("Commencing Training ...")
start = time.time()
while True:
batch_start_time = time.time()
if nli_ctr % 10 == 0:
minibatch = nli_iterator.get_parallel_minibatch(
nli_mbatch_ctr, batch_size * n_gpus
)
optimizer.zero_grad()
class_logits = model(
minibatch, -1, return_hidden=False, paired_trg=None
)
loss = nli_criterion(
class_logits.contiguous().view(
-1, class_logits.size(1)
),
minibatch["labels"].contiguous().view(-1),
)
nli_losses.append(loss.item())
loss.backward()
torch.nn.utils.clip_grad_norm(model.parameters(), 1.0)
optimizer.step()
nli_mbatch_ctr += batch_size * n_gpus
if nli_mbatch_ctr >= len(nli_iterator.train_lines):
nli_mbatch_ctr = 0
nli_epoch += 1
else:
task_idx = np.random.randint(low=0, high=rng_num_tasks)
minibatch = train_iterator.get_parallel_minibatch(
task_idx,
task_idxs[task_idx],
batch_size * n_gpus,
max_len_src,
max_len_trg,
)
"""Increment pointer into task and if current buffer is
exhausted, fetch new buffer. """
task_idxs[task_idx] += batch_size * n_gpus
if task_idxs[task_idx] >= train_iterator.buffer_size:
train_iterator.fetch_buffer(task_idx)
task_idxs[task_idx] = 0
if task_idx == skipthought_idx:
minibatch_back = train_iterator.get_parallel_minibatch(
skipthought_backward_idx,
task_idxs[skipthought_backward_idx],
batch_size * n_gpus,
max_len_src,
max_len_trg,
)
task_idxs[skipthought_backward_idx] += (
batch_size * n_gpus
)
if (
task_idxs[skipthought_backward_idx]
>= train_iterator.buffer_size
):
train_iterator.fetch_buffer(
skipthought_backward_idx
)
task_idxs[skipthought_backward_idx] = 0
optimizer.zero_grad()
decoder_logit, decoder_logit_2 = model(
minibatch,
task_idx,
paired_trg=minibatch_back["input_trg"],
)
loss_f = loss_criterion(
decoder_logit.contiguous().view(
-1, decoder_logit.size(2)
),
minibatch["output_trg"].contiguous().view(-1),
)
loss_b = loss_criterion(
decoder_logit_2.contiguous().view(
-1, decoder_logit_2.size(2)
),
minibatch_back["output_trg"].contiguous().view(-1),
)
task_losses[task_idx].append(loss_f.data[0])
task_losses[skipthought_backward_idx].append(
loss_b.data[0]
)
loss = loss_f + loss_b
else:
optimizer.zero_grad()
decoder_logit = model(minibatch, task_idx)
loss = loss_criterion(
decoder_logit.contiguous().view(
-1, decoder_logit.size(2)
),
minibatch["output_trg"].contiguous().view(-1),
)
task_losses[task_idx].append(loss.item())
loss.backward()
optimizer.synchronize()
torch.nn.utils.clip_grad_norm(model.parameters(), 1.0)
optimizer.step()
end = time.time()
mbatch_times.append(end - batch_start_time)
if (
updates % config["management"]["monitor_loss"] == 0
and updates != 0
):
monitor_epoch += 1
for idx, task in enumerate(tasknames):
logging.info(
"Seq2Seq Examples Processed : %d %s Loss : %.5f Num %s "
"minibatches : %d"
% (
updates,
task,
np.mean(task_losses[idx]),
task,
len(task_losses[idx]),
)
)
mlflow.log_metric(
"validation_loss",
np.mean(task_losses[idx]),
step=monitor_epoch,
)
logging.info(
"Round: %d NLI Epoch : %d NLI Examples Processed : %d NLI "
"Loss : %.5f "
% (
nli_ctr,
nli_epoch,
nli_mbatch_ctr,
np.mean(nli_losses),
)
)
mlflow.log_metric(
"nli_loss", np.mean(nli_losses), step=nli_epoch
)
logging.info(
"Average time per minibatch : %.5f"
% (np.mean(mbatch_times))
)
mlflow.log_metric(
"minibatch_avg_duration", np.mean(mbatch_times)
)
task_losses = [[] for _ in tasknames]
mbatch_times = []
nli_losses = []
logging.info("############################")
logging.info("##### Evaluating model #####")
logging.info("############################")
training_complete, min_val_loss_epoch, min_val_loss, model_state = evaluate(
config=config,
train_iterator=train_iterator,
model=model,
loss_criterion=loss_criterion,
monitor_epoch=monitor_epoch,
min_val_loss=min_val_loss,
min_val_loss_epoch=min_val_loss_epoch,
save_dir=save_dir,
starting_time=start,
model_state=model_state,
max_epoch=max_epoch,
)
if training_complete:
mlflow.log_metric("min_val_loss", float(min_val_loss))
mlflow.log_metric("learning_rate", learning_rate)
break
logging.info("Evaluating on NLI")
evaluate_nli(
nli_iterator=nli_iterator,
model=model,
n_gpus=n_gpus,
batch_size=batch_size,
)
updates += batch_size * n_gpus
nli_ctr += 1
logging.info("Updates: %d" % updates)
finally:
os.chdir(owd)
def read_config(json_file):
json_object = json.load(open(json_file, "r", encoding="utf-8"))
return json_object
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--config", help="path to json config", required=True)
parser.add_argument("--data_folder", type=str, help="data folder")
parser.add_argument(
"--learning_rate", type=float, default=0.0001, help="learning rate"
)
parser.add_argument(
"--max_epoch",
type=int,
default=None,
help="Limit training to specified number of epochs.",
)
args = parser.parse_args()
data_path = args.data_folder
lr = args.learning_rate
config_file_path = args.config
max_epoch = args.max_epoch
config_obj = read_config(config_file_path)
train(config_obj, data_path, lr, max_epoch)
| true | true |
f71ffb8011fd1e0f44abc963af4799e254cbef2d | 6,006 | py | Python | rb_missions/scripts/acoustic_docking.py | vanttec/vanttec_usv | 5c7b45a61728404b4c957028eac7bc361f1b2077 | [
"MIT"
] | 13 | 2020-08-18T18:47:11.000Z | 2022-03-30T08:07:25.000Z | rb_missions/scripts/acoustic_docking.py | vanttec/vanttec_usv | 5c7b45a61728404b4c957028eac7bc361f1b2077 | [
"MIT"
] | 2 | 2021-05-07T03:56:11.000Z | 2021-08-10T04:18:21.000Z | rb_missions/scripts/acoustic_docking.py | vanttec/vanttec_usv | 5c7b45a61728404b4c957028eac7bc361f1b2077 | [
"MIT"
] | 5 | 2020-12-21T17:29:29.000Z | 2022-02-15T07:51:07.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
----------------------------------------------------------
@file: acoustic_docking.py
@date: Wed Jun 3, 2020
@author: Alejandro Gonzalez Garcia
@e-mail: alexglzg97@gmail.com
@brief: Motion planning. ROS node to follow an acoustic
signal for autonomous docking.
@version: 1.0
Open source
---------------------------------------------------------
'''
import math
import time
import os
import numpy as np
import rospy
from geometry_msgs.msg import Pose, Pose2D, PoseArray
from std_msgs.msg import Int32, Float32MultiArray, Float64, String
from visualization_msgs.msg import Marker, MarkerArray
class AcousticDocking:
def __init__(self):
self.ned_x = 0
self.ned_y = 0
self.yaw = 0
self.activated = True
self.distance = 0
self.signal_angle = 0
self.x1 = 0
self.y1 = 0
self.x2 = 0
self.y2 = 0
self.x_body_origin = 0
self.y_body_origin = 0
self.correction_distance = 2
# ROS Subscribers
rospy.Subscriber("/vectornav/ins_2d/NED_pose", Pose2D, self.ins_pose_callback)
rospy.Subscriber("/usv_perception/hydrophones/acoustic_signal", Float64, self.signal_callback)
rospy.Subscriber("/usv_perception/lidar_detector/dock", PoseArray, self.dock_callback)
# ROS Publishers
self.path_pub = rospy.Publisher("/mission/waypoints", Float32MultiArray, queue_size=10)
self.status_pub = rospy.Publisher("/mission/status", Int32, queue_size=10)
self.test = rospy.Publisher("/mission/state", Int32, queue_size=10)
def ins_pose_callback(self,pose):
self.ned_x = pose.x
self.ned_y = pose.y
self.yaw = pose.theta
def signal_callback(self, signal):
self.signal_angle = signal.data
def dock_callback(self, dock):
self.x1 = dock.poses[0].position.x
self.y1 = dock.poses[0].position.y
self.x2 = dock.poses[1].position.x
self.y2 = dock.poses[1].position.y
def calculate_distance_to_dock(self):
'''
@name: calculate_distance_to_dock
@brief: Calculates the distance between the USV and the dock.
@param: --
@return: --
'''
xc = min([self.x1,self.x2]) + abs(self.x1 - self.x2)/2
yc = min([self.y1,self.y2]) + abs(self.y1 - self.y2)/2
self.distance = math.pow(xc*xc + yc*yc, 0.5)
def dock(self):
'''
@name: dock
@brief: Calculates the intersection point between the USV and the pinger
location at the dock. Returns two waypoints as desired positions. The first
waypoint is perpendicularly in front of the pinger to straighten the path.
the second waypoint is the location of the pinger in the dock, for docking.
@param: --
@return: --
'''
if self.y1 < self.y2:
yl = self.y1
xl = self.x1
yr = self.y2
xr = self.x2
else:
yl = self.y2
xl = self.x2
yr = self.y1
xr = self.x1
yd = yl - yr
xd = xl - xr
alpha = math.atan2(yd,xd) + math.pi/2
if (abs(alpha) > (math.pi)):
alpha = (alpha/abs(alpha))*(abs(alpha) - 2*math.pi)
x_beta, y_beta = self.aux_to_body(1,0,self.signal_angle,self.x_body_origin,self.y_body_origin)
common_denominator = (xl - xr)*(self.y_body_origin - y_beta) - (yl - yr)*(self.x_body_origin - x_beta)
x_pinger = ((xl*yr-yl*xr)*(self.x_body_origin-x_beta)-(xl-xr)*(self.x_body_origin*y_beta-self.y_body_origin*x_beta)) / common_denominator
y_pinger = ((xl*yr-yl*xr)*(self.y_body_origin-y_beta)-(yl-yr)*(self.x_body_origin*y_beta-self.y_body_origin*x_beta)) / common_denominator
x_aux, y_aux = self.aux_to_body(-self.correction_distance,0,alpha,x_pinger,y_pinger)
path_array = Float32MultiArray()
path_array.layout.data_offset = 5
path_array.data = [x_aux, y_aux, x_pinger, y_pinger, 2]
self.desired(path_array)
def aux_to_body(self, aux_x2, aux_y2, alpha, body_x1, body_y1):
'''
@name: aux_to_body
@brief: Coordinate transformation between auxiliary and body reference frames.
@param: aux_x2: target x coordinate in aux reference frame
aux_y2: target y coordinate in aux reference frame
alpha: angle between aux and body reference frames
body_x1: aux x coordinate in body reference frame
body_y1: aux y coordinate in body reference frame
@return: body_x2: target x coordinate in body reference frame
body_y2: target y coordinate in body reference frame
'''
p = np.array([[aux_x2],[aux_y2]])
J = self.rotation_matrix(alpha)
n = J.dot(p)
body_x2 = n[0] + body_x1
body_y2 = n[1] + body_y1
return (body_x2, body_y2)
def rotation_matrix(self, angle):
'''
@name: rotation_matrix
@brief: Transformation matrix template.
@param: angle: angle of rotation
@return: J: transformation matrix
'''
J = np.array([[math.cos(angle), -1*math.sin(angle)],
[math.sin(angle), math.cos(angle)]])
return (J)
def desired(self, path):
self.path_pub.publish(path)
def main():
rospy.init_node("acoustic_docking", anonymous=False)
rate = rospy.Rate(20)
acousticDocking = AcousticDocking()
last_detection = []
while not rospy.is_shutdown() and acousticDocking.activated:
acousticDocking.calculate_distance_to_dock()
if (acousticDocking.distance >= 5):
acousticDocking.dock()
else:
acousticDocking.status_pub.publish(1)
rate.sleep()
rospy.spin()
if __name__ == "__main__":
try:
main()
except rospy.ROSInterruptException:
pass
| 33.741573 | 145 | 0.604729 |
import math
import time
import os
import numpy as np
import rospy
from geometry_msgs.msg import Pose, Pose2D, PoseArray
from std_msgs.msg import Int32, Float32MultiArray, Float64, String
from visualization_msgs.msg import Marker, MarkerArray
class AcousticDocking:
def __init__(self):
self.ned_x = 0
self.ned_y = 0
self.yaw = 0
self.activated = True
self.distance = 0
self.signal_angle = 0
self.x1 = 0
self.y1 = 0
self.x2 = 0
self.y2 = 0
self.x_body_origin = 0
self.y_body_origin = 0
self.correction_distance = 2
rospy.Subscriber("/vectornav/ins_2d/NED_pose", Pose2D, self.ins_pose_callback)
rospy.Subscriber("/usv_perception/hydrophones/acoustic_signal", Float64, self.signal_callback)
rospy.Subscriber("/usv_perception/lidar_detector/dock", PoseArray, self.dock_callback)
self.path_pub = rospy.Publisher("/mission/waypoints", Float32MultiArray, queue_size=10)
self.status_pub = rospy.Publisher("/mission/status", Int32, queue_size=10)
self.test = rospy.Publisher("/mission/state", Int32, queue_size=10)
def ins_pose_callback(self,pose):
self.ned_x = pose.x
self.ned_y = pose.y
self.yaw = pose.theta
def signal_callback(self, signal):
self.signal_angle = signal.data
def dock_callback(self, dock):
self.x1 = dock.poses[0].position.x
self.y1 = dock.poses[0].position.y
self.x2 = dock.poses[1].position.x
self.y2 = dock.poses[1].position.y
def calculate_distance_to_dock(self):
xc = min([self.x1,self.x2]) + abs(self.x1 - self.x2)/2
yc = min([self.y1,self.y2]) + abs(self.y1 - self.y2)/2
self.distance = math.pow(xc*xc + yc*yc, 0.5)
def dock(self):
if self.y1 < self.y2:
yl = self.y1
xl = self.x1
yr = self.y2
xr = self.x2
else:
yl = self.y2
xl = self.x2
yr = self.y1
xr = self.x1
yd = yl - yr
xd = xl - xr
alpha = math.atan2(yd,xd) + math.pi/2
if (abs(alpha) > (math.pi)):
alpha = (alpha/abs(alpha))*(abs(alpha) - 2*math.pi)
x_beta, y_beta = self.aux_to_body(1,0,self.signal_angle,self.x_body_origin,self.y_body_origin)
common_denominator = (xl - xr)*(self.y_body_origin - y_beta) - (yl - yr)*(self.x_body_origin - x_beta)
x_pinger = ((xl*yr-yl*xr)*(self.x_body_origin-x_beta)-(xl-xr)*(self.x_body_origin*y_beta-self.y_body_origin*x_beta)) / common_denominator
y_pinger = ((xl*yr-yl*xr)*(self.y_body_origin-y_beta)-(yl-yr)*(self.x_body_origin*y_beta-self.y_body_origin*x_beta)) / common_denominator
x_aux, y_aux = self.aux_to_body(-self.correction_distance,0,alpha,x_pinger,y_pinger)
path_array = Float32MultiArray()
path_array.layout.data_offset = 5
path_array.data = [x_aux, y_aux, x_pinger, y_pinger, 2]
self.desired(path_array)
def aux_to_body(self, aux_x2, aux_y2, alpha, body_x1, body_y1):
p = np.array([[aux_x2],[aux_y2]])
J = self.rotation_matrix(alpha)
n = J.dot(p)
body_x2 = n[0] + body_x1
body_y2 = n[1] + body_y1
return (body_x2, body_y2)
def rotation_matrix(self, angle):
J = np.array([[math.cos(angle), -1*math.sin(angle)],
[math.sin(angle), math.cos(angle)]])
return (J)
def desired(self, path):
self.path_pub.publish(path)
def main():
rospy.init_node("acoustic_docking", anonymous=False)
rate = rospy.Rate(20)
acousticDocking = AcousticDocking()
last_detection = []
while not rospy.is_shutdown() and acousticDocking.activated:
acousticDocking.calculate_distance_to_dock()
if (acousticDocking.distance >= 5):
acousticDocking.dock()
else:
acousticDocking.status_pub.publish(1)
rate.sleep()
rospy.spin()
if __name__ == "__main__":
try:
main()
except rospy.ROSInterruptException:
pass
| true | true |
f71ffbe737f15846e14c72d6820690ac7cf93d67 | 4,873 | py | Python | lula/util.py | wiseodd/lula | a52b27c118ed136a62d8d7d1a898067d5ac685fb | [
"MIT"
] | 15 | 2021-06-07T14:25:35.000Z | 2021-12-26T16:41:01.000Z | lula/util.py | wiseodd/lula | a52b27c118ed136a62d8d7d1a898067d5ac685fb | [
"MIT"
] | 1 | 2022-03-11T01:03:12.000Z | 2022-03-11T01:03:12.000Z | lula/util.py | wiseodd/lula | a52b27c118ed136a62d8d7d1a898067d5ac685fb | [
"MIT"
] | 2 | 2021-06-19T05:41:05.000Z | 2022-03-23T11:51:06.000Z | import numpy as np
import torch
from torch import nn
from torch.nn import functional as F
class MaskedLinear(nn.Module):
def __init__(self, base_layer, m_in, m_out):
"""
The standard nn.Linear layer, but with gradient masking to enforce the LULA construction.
"""
super(MaskedLinear, self).__init__()
# Extend the weight matrix
W_base = base_layer.weight.data.clone() # (n_out, n_in)
n_out, n_in = W_base.shape
W = torch.randn(n_out+m_out, n_in+m_in)
W[0:n_out, 0:n_in] = W_base.clone()
W[0:n_out, n_in:] = 0 # Upper-right quadrant
self.weight = nn.Parameter(W)
# Extend the bias vector
if base_layer.bias is not None:
b_base = base_layer.bias.data.clone()
b = torch.randn(n_out+m_out)
b[:n_out] = b_base.clone()
self.bias = nn.Parameter(b)
else:
self.bias = None
# Apply gradient mask to the weight and bias
self.mask_w = torch.zeros(n_out+m_out, n_in+m_in)
self.mask_w[n_out:, :] = 1 # Lower half
self.mask_b = torch.zeros(n_out+m_out)
self.mask_b[n_out:] = 1
self.switch_grad_mask(True)
# For safekeeping
self.W_base, self.b_base = W_base, b_base
self.n_out, self.n_in = n_out, n_in
self.m_out, self.m_in = m_out, m_in
def forward(self, x):
return F.linear(x, self.weight, self.bias)
def switch_grad_mask(self, on=True):
if on:
self.grad_handle_w = self.weight.register_hook(lambda grad: grad.mul_(self.mask_w))
self.grad_handle_b = self.bias.register_hook(lambda grad: grad.mul_(self.mask_b))
else:
self.grad_handle_w.remove()
self.grad_handle_b.remove()
def to_gpu(self):
self.mask_w = self.mask_w.cuda()
self.mask_b = self.mask_b.cuda()
def to_unmasked(self):
lin = nn.Linear(self.n_in+self.m_in, self.n_out+self.m_out)
lin.weight = self.weight
lin.bias = self.bias
return lin
def extra_repr(self):
return 'in_features={}, out_features={}, bias={}'.format(
self.n_in+self.m_in, self.n_out+self.m_out, self.bias is not None
)
class MaskedConv2d(nn.Module):
def __init__(self, base_layer, m_in, m_out):
"""
The standard nn.Conv2d layer, but with gradient masking to enforce the LULA construction.
"""
super(MaskedConv2d, self).__init__()
self.kernel_size = base_layer.kernel_size
self.stride = base_layer.stride
self.padding = base_layer.padding
self.dilation = base_layer.dilation
self.groups = base_layer.groups
# Extend the weight matrix
W_base = base_layer.weight.data.clone() # (n_out, n_in, k, k)
n_out, n_in, k, _ = W_base.shape # Num of channels
W = torch.randn(n_out+m_out, n_in+m_in, k, k)
W[0:n_out, 0:n_in, :, :] = W_base.clone()
W[0:n_out, n_in:, :, :] = 0 # Upper-right quadrant
self.weight = nn.Parameter(W)
# Extend the bias vector
if base_layer.bias is not None:
b_base = base_layer.bias.data.clone()
b = torch.randn(n_out+m_out)
b[:n_out] = b_base.clone()
self.bias = nn.Parameter(b)
else:
self.bias = None
# Apply gradient mask to the weight and bias
self.mask_w = torch.zeros(n_out+m_out, n_in+m_in, k, k)
self.mask_w[n_out:, :, :, :] = 1 # Lower half
self.mask_b = torch.zeros(n_out+m_out)
self.mask_b[n_out:] = 1
self.switch_grad_mask(True)
# For safekeeping
self.W_base, self.b_base = W_base, b_base
self.n_out, self.n_in = n_out, n_in
self.m_out, self.m_in = m_out, m_in
def forward(self, x):
return F.conv2d(x, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups)
def switch_grad_mask(self, on=True):
if on:
self.grad_handle_w = self.weight.register_hook(lambda grad: grad.mul_(self.mask_w))
self.grad_handle_b = self.bias.register_hook(lambda grad: grad.mul_(self.mask_b))
else:
self.grad_handle_w.remove()
self.grad_handle_b.remove()
def to_gpu(self):
self.mask_w = self.mask_w.cuda()
self.mask_b = self.mask_b.cuda()
def to_unmasked(self):
conv = nn.Conv2d(self.n_in+self.m_in, self.n_out+self.m_out, self.kernel_size, self.stride, self.padding, self.dilation, self.groups)
conv.weight = self.weight
conv.bias = self.bias
return conv
def extra_repr(self):
return 'in_channels={}, out_channels={}, bias={}'.format(
self.n_in+self.m_in, self.n_out+self.m_out, self.bias is not None
)
| 32.059211 | 141 | 0.60353 | import numpy as np
import torch
from torch import nn
from torch.nn import functional as F
class MaskedLinear(nn.Module):
def __init__(self, base_layer, m_in, m_out):
super(MaskedLinear, self).__init__()
W_base = base_layer.weight.data.clone()
n_out, n_in = W_base.shape
W = torch.randn(n_out+m_out, n_in+m_in)
W[0:n_out, 0:n_in] = W_base.clone()
W[0:n_out, n_in:] = 0
self.weight = nn.Parameter(W)
if base_layer.bias is not None:
b_base = base_layer.bias.data.clone()
b = torch.randn(n_out+m_out)
b[:n_out] = b_base.clone()
self.bias = nn.Parameter(b)
else:
self.bias = None
self.mask_w = torch.zeros(n_out+m_out, n_in+m_in)
self.mask_w[n_out:, :] = 1
self.mask_b = torch.zeros(n_out+m_out)
self.mask_b[n_out:] = 1
self.switch_grad_mask(True)
self.W_base, self.b_base = W_base, b_base
self.n_out, self.n_in = n_out, n_in
self.m_out, self.m_in = m_out, m_in
def forward(self, x):
return F.linear(x, self.weight, self.bias)
def switch_grad_mask(self, on=True):
if on:
self.grad_handle_w = self.weight.register_hook(lambda grad: grad.mul_(self.mask_w))
self.grad_handle_b = self.bias.register_hook(lambda grad: grad.mul_(self.mask_b))
else:
self.grad_handle_w.remove()
self.grad_handle_b.remove()
def to_gpu(self):
self.mask_w = self.mask_w.cuda()
self.mask_b = self.mask_b.cuda()
def to_unmasked(self):
lin = nn.Linear(self.n_in+self.m_in, self.n_out+self.m_out)
lin.weight = self.weight
lin.bias = self.bias
return lin
def extra_repr(self):
return 'in_features={}, out_features={}, bias={}'.format(
self.n_in+self.m_in, self.n_out+self.m_out, self.bias is not None
)
class MaskedConv2d(nn.Module):
def __init__(self, base_layer, m_in, m_out):
super(MaskedConv2d, self).__init__()
self.kernel_size = base_layer.kernel_size
self.stride = base_layer.stride
self.padding = base_layer.padding
self.dilation = base_layer.dilation
self.groups = base_layer.groups
W_base = base_layer.weight.data.clone()
n_out, n_in, k, _ = W_base.shape
W = torch.randn(n_out+m_out, n_in+m_in, k, k)
W[0:n_out, 0:n_in, :, :] = W_base.clone()
W[0:n_out, n_in:, :, :] = 0
self.weight = nn.Parameter(W)
if base_layer.bias is not None:
b_base = base_layer.bias.data.clone()
b = torch.randn(n_out+m_out)
b[:n_out] = b_base.clone()
self.bias = nn.Parameter(b)
else:
self.bias = None
self.mask_w = torch.zeros(n_out+m_out, n_in+m_in, k, k)
self.mask_w[n_out:, :, :, :] = 1
self.mask_b = torch.zeros(n_out+m_out)
self.mask_b[n_out:] = 1
self.switch_grad_mask(True)
self.W_base, self.b_base = W_base, b_base
self.n_out, self.n_in = n_out, n_in
self.m_out, self.m_in = m_out, m_in
def forward(self, x):
return F.conv2d(x, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups)
def switch_grad_mask(self, on=True):
if on:
self.grad_handle_w = self.weight.register_hook(lambda grad: grad.mul_(self.mask_w))
self.grad_handle_b = self.bias.register_hook(lambda grad: grad.mul_(self.mask_b))
else:
self.grad_handle_w.remove()
self.grad_handle_b.remove()
def to_gpu(self):
self.mask_w = self.mask_w.cuda()
self.mask_b = self.mask_b.cuda()
def to_unmasked(self):
conv = nn.Conv2d(self.n_in+self.m_in, self.n_out+self.m_out, self.kernel_size, self.stride, self.padding, self.dilation, self.groups)
conv.weight = self.weight
conv.bias = self.bias
return conv
def extra_repr(self):
return 'in_channels={}, out_channels={}, bias={}'.format(
self.n_in+self.m_in, self.n_out+self.m_out, self.bias is not None
)
| true | true |
f71ffe7210fe58d0bbb802af2106a7f260b2e296 | 5,184 | py | Python | line6.py | ChrBarth/pypod | 4dccf6e5f5f3584672e2bab5281220a15ee51de5 | [
"MIT"
] | 4 | 2021-04-26T07:24:27.000Z | 2022-01-17T23:10:47.000Z | line6.py | ChrBarth/pypod | 4dccf6e5f5f3584672e2bab5281220a15ee51de5 | [
"MIT"
] | null | null | null | line6.py | ChrBarth/pypod | 4dccf6e5f5f3584672e2bab5281220a15ee51de5 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Some useful POD-Variables
# The Program names:
PROGRAMS = [ "1A", "1B", "1C", "1D",
"2A", "2B", "2C", "2D",
"3A", "3B", "3C", "3D",
"4A", "4B", "4C", "4D",
"5A", "5B", "5C", "5D",
"6A", "6B", "6C", "6D",
"7A", "7B", "7C", "7D",
"8A", "8B", "8C", "8D",
"9A", "9B", "9C", "9D" ]
# The Amp Models:
amp_names = [
'Tube Preamp',
'POD Clean Line 6',
'POD Crunch Line 6',
'POD Drive Line 6',
'POD Layer Line 6',
'Small Tweed',
'Tweed Blues',
'Black Panel',
'Modern Class A',
'Brit Class A',
'Brit Blues',
'Brit Classic',
'Brit Hi Gain',
'Rectified ’94',
'Modern Hi Gain',
'Fuzz Box',
'Jazz Clean',
'Boutique #1',
'Boutique #2',
'Brit Class A #2',
'Brit Class A #3',
'Small Tweed #2',
'Black Panel #2',
'Boutique #3',
'California Crunch #1',
'California Crunch #2',
'Rectified #2',
'Modern Hi Gain #2',
'Line 6 Twang',
'Line 6 Crunch #2',
'Line 6 Blues',
'Line 6 Insane' ]
# The Cab names:
cab_names = [
"1x 8 ’60 Fender Tweed Champ",
"1x12 ’52 Fender Tweed Deluxe",
"1x12 ’60 Vox AC15",
"1x12 ’64 Fender Blackface Deluxe",
"1x12 ’98 Line 6 Flextone",
"2x12 ’65 Fender Blackface Twin",
"2x12 ’67 VOX AC30",
"2x12 ’95 Matchless Chieftain",
"2x12 ’98 Pod custom 2x12",
"4x10 ’59 Fender Bassman",
"4x10 ’98 Pod custom 4x10 cab",
"4x12 ’96 Marshall with V30s",
"4x12 ’78 Marshall with 70s",
"4x12 ’97 Marshall off axis",
"4x12 ’98 Pod custom 4x12",
"No Cabinet" ]
# The effect types:
fx_names = [
"Chorus2",
"Flanger1",
"Rotary",
"Flanger2",
"Delay/Chorus1",
"Delay/Tremolo",
"Delay",
"Delay/Comp",
"Chorus1",
"Tremolo",
"Bypass",
"Compressor",
"Delay/Chorus2",
"Delay/Flanger1",
"Delay/Swell",
"Delay/Flanger2" ]
cc_commands = {
"AmpModel (0-32)": 12, # 0-32 (0=Tube Preamp,...)
"Drive": 13, # 0-127
"Bass": 14, # 0-127
"Mid": 15, # 0-127
"Treble": 16, # 0-127
"BrightSwitch (0-63: OFF, 64-127: ON)": 73, # 0-63: OFF, 64-127: ON
"Channel Vol": 17, # 0-127
"Presence": 21, # 0-127
"Noise Gate (0-63: OFF, 64-127: ON)": 22, # 0-63: OFF, 64-127: ON
"GateThreshhold": 23, # 0-127
"GateDecay": 24, # 0-127
"Effect": 19, # 0-15 (0=Bypass,...)
"EffectTweak": 1, # 0-127
"Distortion (0-63: OFF, 64-127: ON)": 25, # 0-63: OFF, 64-127: ON
"DriveBoost (0-63: OFF, 64-127: ON)": 26, # 0-63: OFF, 64-127: ON
"Presence (0-63: OFF, 64-127: ON)": 27, # 0-63: OFF, 64-127: ON
"Delay (0-63: OFF, 64-127: ON)": 28, # 0-63: OFF, 64-127: ON
"DelayTime": 30, # 0-127 = 0-3150ms
"DelayTime2": 62, # 0-127 (Extra precision (???))
"DelayRepeats": 32, # 0-127
"DelayLevel": 34, # 0-127
"Reverb (0-63: OFF, 64-127: ON)": 36, # 0-63: OFF; 64-127: ON
"ReverbType (0-63: Spring, 64-127: Hall)": 37, # 0-63: SPRING, 64-127: HALL
"ReverbDecay": 38, # 0-127
"ReverbTone": 39, # 0-127
"ReverbDiffusion": 40, # 0-127
"ReverbDensity": 41, # 0-127
"ReverbLevel": 18, # 0-127
"CompressionRatio": 42, # 0-21=OFF, 22-44=1.4:1, 45-67=2:1, 68-90=3:1, 91-113=6:1, 114-127=INF
"Wah (0-63: OFF, 64-127: ON)": 43, # 0-63: OFF, 64-127: ON
"WahPedal": 4, # 0-127 (Pedal Position)
"WahBottom": 44, # 0-127 (Bottom frequency)
"WahTop": 45, # 0-127 (Top frequency)
"Volume": 7, # 0-127 (Volume Pedal)
"VolumeMin": 46, # 0-127 ???
"VolumePrePost (0-63: Pre Tube, 64-127: Post Tube)": 47, # 0-63: PRE TUBE, 64-127: POST TUBE
"VolSwell (0-63: OFF, 64-127: ON)": 48, # 0-63: OFF, 64-127: ON
"VolSwellRamp": 49, # 0-127
#"TapTempo": 64, # 64-127 = A TAP (=sending 2 in a second sets to 120bpm?)
"Modulation (0-63: OFF, 64-127: ON)": 50, # 0-63: OFF, 64-127: ON (Chorus/Rotary/Tremolo)
"Speed": 51, # 0-127 (Chorus/Flanger)
"Depth": 52, # 0-127 (Chorus/Flanger)
"Feedback": 53, # 0-63: NEGATIVE: 64-127: POSITIVE
"ChorusPreDelay": 54, # 0-127
"RotarySpeed": 55, # 0-127
"RotaryMaxSpeed": 56, # 0-127
"RotaryMinSpeed": 57, # 0-127
"TremoloSpeed": 58, # 0-127
"TremoloDepth": 59, # 0-127
"CabinetType (0-15)": 71, # 0-15 (0=No Cab, ...)
"AIRAmbienceLevel": 72 # 0-127
}
compression_values = [ [ 0, "Off" ],
[ 22, "1.4:1" ],
[ 45, "2:1" ],
[ 68, "3:1" ],
[ 91, "6:1" ],
[ 114, "INF" ] ]
reverb_types = [ [ 0, "Spring" ], [ 64, "Hall" ] ]
volume_pos = [ [ 0, "Pre-Tube" ], [ 64, "Post-Tube" ] ]
| 33.882353 | 103 | 0.470486 |
PROGRAMS = [ "1A", "1B", "1C", "1D",
"2A", "2B", "2C", "2D",
"3A", "3B", "3C", "3D",
"4A", "4B", "4C", "4D",
"5A", "5B", "5C", "5D",
"6A", "6B", "6C", "6D",
"7A", "7B", "7C", "7D",
"8A", "8B", "8C", "8D",
"9A", "9B", "9C", "9D" ]
amp_names = [
'Tube Preamp',
'POD Clean Line 6',
'POD Crunch Line 6',
'POD Drive Line 6',
'POD Layer Line 6',
'Small Tweed',
'Tweed Blues',
'Black Panel',
'Modern Class A',
'Brit Class A',
'Brit Blues',
'Brit Classic',
'Brit Hi Gain',
'Rectified ’94',
'Modern Hi Gain',
'Fuzz Box',
'Jazz Clean',
'Boutique #1',
'Boutique #2',
'Brit Class A #2',
'Brit Class A #3',
'Small Tweed #2',
'Black Panel #2',
'Boutique #3',
'California Crunch #1',
'California Crunch #2',
'Rectified #2',
'Modern Hi Gain #2',
'Line 6 Twang',
'Line 6 Crunch #2',
'Line 6 Blues',
'Line 6 Insane' ]
cab_names = [
"1x 8 ’60 Fender Tweed Champ",
"1x12 ’52 Fender Tweed Deluxe",
"1x12 ’60 Vox AC15",
"1x12 ’64 Fender Blackface Deluxe",
"1x12 ’98 Line 6 Flextone",
"2x12 ’65 Fender Blackface Twin",
"2x12 ’67 VOX AC30",
"2x12 ’95 Matchless Chieftain",
"2x12 ’98 Pod custom 2x12",
"4x10 ’59 Fender Bassman",
"4x10 ’98 Pod custom 4x10 cab",
"4x12 ’96 Marshall with V30s",
"4x12 ’78 Marshall with 70s",
"4x12 ’97 Marshall off axis",
"4x12 ’98 Pod custom 4x12",
"No Cabinet" ]
fx_names = [
"Chorus2",
"Flanger1",
"Rotary",
"Flanger2",
"Delay/Chorus1",
"Delay/Tremolo",
"Delay",
"Delay/Comp",
"Chorus1",
"Tremolo",
"Bypass",
"Compressor",
"Delay/Chorus2",
"Delay/Flanger1",
"Delay/Swell",
"Delay/Flanger2" ]
cc_commands = {
"AmpModel (0-32)": 12,
"Drive": 13,
"Bass": 14,
"Mid": 15,
"Treble": 16,
"BrightSwitch (0-63: OFF, 64-127: ON)": 73,
"Channel Vol": 17,
"Presence": 21,
"Noise Gate (0-63: OFF, 64-127: ON)": 22,
"GateThreshhold": 23,
"GateDecay": 24,
"Effect": 19,
"EffectTweak": 1,
"Distortion (0-63: OFF, 64-127: ON)": 25,
"DriveBoost (0-63: OFF, 64-127: ON)": 26,
"Presence (0-63: OFF, 64-127: ON)": 27,
"Delay (0-63: OFF, 64-127: ON)": 28,
"DelayTime": 30,
"DelayTime2": 62,
"DelayRepeats": 32,
"DelayLevel": 34,
"Reverb (0-63: OFF, 64-127: ON)": 36,
"ReverbType (0-63: Spring, 64-127: Hall)": 37,
"ReverbDecay": 38,
"ReverbTone": 39,
"ReverbDiffusion": 40,
"ReverbDensity": 41,
"ReverbLevel": 18,
"CompressionRatio": 42,
"Wah (0-63: OFF, 64-127: ON)": 43,
"WahPedal": 4,
"WahBottom": 44,
"WahTop": 45,
"Volume": 7,
"VolumeMin": 46,
"VolumePrePost (0-63: Pre Tube, 64-127: Post Tube)": 47,
"VolSwell (0-63: OFF, 64-127: ON)": 48,
"VolSwellRamp": 49,
"Speed": 51,
"Depth": 52,
"Feedback": 53,
"ChorusPreDelay": 54,
"RotarySpeed": 55,
"RotaryMaxSpeed": 56,
"RotaryMinSpeed": 57,
"TremoloSpeed": 58,
"TremoloDepth": 59,
"CabinetType (0-15)": 71,
"AIRAmbienceLevel": 72
}
compression_values = [ [ 0, "Off" ],
[ 22, "1.4:1" ],
[ 45, "2:1" ],
[ 68, "3:1" ],
[ 91, "6:1" ],
[ 114, "INF" ] ]
reverb_types = [ [ 0, "Spring" ], [ 64, "Hall" ] ]
volume_pos = [ [ 0, "Pre-Tube" ], [ 64, "Post-Tube" ] ]
| true | true |
f71fff32cfd2c5d668696fc8401b13ffda826fad | 118 | py | Python | commands/enlist.py | SirChopwood/Arma-3-Bot | 5aa751beb6a362af1fcefe3c8b1d2572b3ffc76f | [
"MIT"
] | 1 | 2020-10-30T18:37:39.000Z | 2020-10-30T18:37:39.000Z | commands/enlist.py | SirChopwood/Requisitions-Officer-Bot | 5aa751beb6a362af1fcefe3c8b1d2572b3ffc76f | [
"MIT"
] | null | null | null | commands/enlist.py | SirChopwood/Requisitions-Officer-Bot | 5aa751beb6a362af1fcefe3c8b1d2572b3ffc76f | [
"MIT"
] | null | null | null | async def Main(self, message, command, arguments):
await self.run_file("section_slot_assign", message, arguments)
| 39.333333 | 66 | 0.771186 | async def Main(self, message, command, arguments):
await self.run_file("section_slot_assign", message, arguments)
| true | true |
f71fffe42a961cd5e75a8c0d5975af883c4d0f2f | 2,763 | py | Python | _preflight_hook_experiment.py | BapeHiks/pythonista_startup | 060c355e9ecefa069227ae80c061cf532f9148e1 | [
"MIT"
] | 22 | 2016-04-05T14:56:11.000Z | 2022-02-03T02:52:23.000Z | _preflight_hook_experiment.py | BapeHiks/pythonista_startup | 060c355e9ecefa069227ae80c061cf532f9148e1 | [
"MIT"
] | 2 | 2016-04-28T08:45:16.000Z | 2017-04-24T21:55:37.000Z | _preflight_hook_experiment.py | BapeHiks/pythonista_startup | 060c355e9ecefa069227ae80c061cf532f9148e1 | [
"MIT"
] | 3 | 2017-04-23T16:47:33.000Z | 2020-08-05T16:14:49.000Z | """Highly unreliable way to register "preflight hooks", which are run every time you run a script (but not an editor action)."""
from __future__ import absolute_import, division, print_function
def run():
print(u"Installing preflight hooks...")
# There's no official way to add hooks that run before every script run.
# However Pythonista's preflight code imports pythonista_startup once to check what names it contains.
# So we hack __import__ to run all functions in preflight_hooks whenever pythonista_startup is imported by specific bytecodes.
try:
import builtins
except ImportError:
import __builtin__ as builtins
preflight_hooks = []
def _make_new_import():
import sys
_real_import = builtins.__import__
def __import__(name, *args, **kwargs):
if name == "pythonista_startup":
try:
f = sys._getframe(1)
except ValueError:
pass
else:
# These blobs are the bytecodes of the main function of Pythonista's preflight code (from Pythonista 2 and 3 respectively), which is run once before every script run.
if f.f_code.co_code in (
b'y\x0e\x00d\x00\x00d\x01\x00l\x00\x00TWn\x07\x00\x01\x01\x01n\x01\x00Xd\x02\x00S',
##b'y\x1c\x00d\x01\x00d\x00\x00l\x00\x00}\x00\x00t\x01\x00|\x00\x00\x83\x01\x00}\x01\x00Wn\x0e\x00\x01\x01\x01g\x00\x00}\x01\x00Yn\x01\x00Xy\x15\x00t\x02\x00\x83\x00\x00\x01t\x03\x00|\x01\x00\x83\x01\x00\x01Wn\x08\x00\x01\x01\x01Yn\x01\x00Xd\x00\x00S',
b'y\x1c\x00d\x01\x00d\x00\x00l\x00\x00}\x00\x00t\x01\x00|\x00\x00\x83\x01\x00}\x01\x00Wn\x0e\x00\x01\x01\x01g\x00\x00}\x01\x00Yn\x01\x00Xyy\x00d\x01\x00d\x00\x00l\x02\x00}\x02\x00d\x01\x00d\x00\x00l\x03\x00}\x03\x00d\x01\x00d\x00\x00l\x04\x00}\x04\x00d\x01\x00d\x00\x00l\x05\x00}\x05\x00|\x02\x00j\x06\x00d\x00\x00\x83\x01\x00\x01|\x03\x00j\x06\x00d\x00\x00\x83\x01\x00\x01|\x04\x00j\x06\x00d\x00\x00\x83\x01\x00\x01|\x05\x00j\x06\x00d\x00\x00\x83\x01\x00\x01t\x07\x00\x83\x00\x00\x01t\x08\x00|\x01\x00\x83\x01\x00\x01Wn\x08\x00\x01\x01\x01Yn\x01\x00Xd\x00\x00S',
):
for hook in preflight_hooks:
hook()
return _real_import(name, *args, **kwargs)
__import__.patched = True
return __import__
if not getattr(builtins.__import__, "patched", False):
builtins.__import__ = _make_new_import()
del builtins
del _make_new_import
print(u"Done installing preflight hooks.")
if __name__ == "__main__":
run()
| 49.339286 | 587 | 0.631198 |
from __future__ import absolute_import, division, print_function
def run():
print(u"Installing preflight hooks...")
# However Pythonista's preflight code imports pythonista_startup once to check what names it contains.
try:
import builtins
except ImportError:
import __builtin__ as builtins
preflight_hooks = []
def _make_new_import():
import sys
_real_import = builtins.__import__
def __import__(name, *args, **kwargs):
if name == "pythonista_startup":
try:
f = sys._getframe(1)
except ValueError:
pass
else:
if f.f_code.co_code in (
b'y\x0e\x00d\x00\x00d\x01\x00l\x00\x00TWn\x07\x00\x01\x01\x01n\x01\x00Xd\x02\x00S',
##b'y\x1c\x00d\x01\x00d\x00\x00l\x00\x00}\x00\x00t\x01\x00|\x00\x00\x83\x01\x00}\x01\x00Wn\x0e\x00\x01\x01\x01g\x00\x00}\x01\x00Yn\x01\x00Xy\x15\x00t\x02\x00\x83\x00\x00\x01t\x03\x00|\x01\x00\x83\x01\x00\x01Wn\x08\x00\x01\x01\x01Yn\x01\x00Xd\x00\x00S',
b'y\x1c\x00d\x01\x00d\x00\x00l\x00\x00}\x00\x00t\x01\x00|\x00\x00\x83\x01\x00}\x01\x00Wn\x0e\x00\x01\x01\x01g\x00\x00}\x01\x00Yn\x01\x00Xyy\x00d\x01\x00d\x00\x00l\x02\x00}\x02\x00d\x01\x00d\x00\x00l\x03\x00}\x03\x00d\x01\x00d\x00\x00l\x04\x00}\x04\x00d\x01\x00d\x00\x00l\x05\x00}\x05\x00|\x02\x00j\x06\x00d\x00\x00\x83\x01\x00\x01|\x03\x00j\x06\x00d\x00\x00\x83\x01\x00\x01|\x04\x00j\x06\x00d\x00\x00\x83\x01\x00\x01|\x05\x00j\x06\x00d\x00\x00\x83\x01\x00\x01t\x07\x00\x83\x00\x00\x01t\x08\x00|\x01\x00\x83\x01\x00\x01Wn\x08\x00\x01\x01\x01Yn\x01\x00Xd\x00\x00S',
):
for hook in preflight_hooks:
hook()
return _real_import(name, *args, **kwargs)
__import__.patched = True
return __import__
if not getattr(builtins.__import__, "patched", False):
builtins.__import__ = _make_new_import()
del builtins
del _make_new_import
print(u"Done installing preflight hooks.")
if __name__ == "__main__":
run()
| true | true |
f7200016b3e4bb76f1473df3974edfb197cc475a | 2,321 | py | Python | tests/hazmat/backends/test_commoncrypto.py | balabit-deps/balabit-os-6-python-cryptography | c31d184a56a18bad89a6444313367be71b5b0877 | [
"Apache-2.0",
"BSD-3-Clause"
] | 1 | 2015-09-25T16:03:32.000Z | 2015-09-25T16:03:32.000Z | tests/hazmat/backends/test_commoncrypto.py | balabit-deps/balabit-os-6-python-cryptography | c31d184a56a18bad89a6444313367be71b5b0877 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | tests/hazmat/backends/test_commoncrypto.py | balabit-deps/balabit-os-6-python-cryptography | c31d184a56a18bad89a6444313367be71b5b0877 | [
"Apache-2.0",
"BSD-3-Clause"
] | 1 | 2020-07-17T12:26:45.000Z | 2020-07-17T12:26:45.000Z | # This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
import pytest
from cryptography import utils
from cryptography.exceptions import InternalError, _Reasons
from cryptography.hazmat.backends import _available_backends
from cryptography.hazmat.primitives.ciphers import Cipher, CipherAlgorithm
from cryptography.hazmat.primitives.ciphers.algorithms import AES
from cryptography.hazmat.primitives.ciphers.modes import CBC, GCM
from ...utils import raises_unsupported_algorithm
@utils.register_interface(CipherAlgorithm)
class DummyCipher(object):
name = "dummy-cipher"
block_size = None
key_size = None
@pytest.mark.skipif("commoncrypto" not in
[i.name for i in _available_backends()],
reason="CommonCrypto not available")
class TestCommonCrypto(object):
def test_supports_cipher(self):
from cryptography.hazmat.backends.commoncrypto.backend import backend
assert backend.cipher_supported(None, None) is False
def test_register_duplicate_cipher_adapter(self):
from cryptography.hazmat.backends.commoncrypto.backend import backend
with pytest.raises(ValueError):
backend._register_cipher_adapter(
AES, backend._lib.kCCAlgorithmAES128,
CBC, backend._lib.kCCModeCBC
)
def test_handle_response(self):
from cryptography.hazmat.backends.commoncrypto.backend import backend
with pytest.raises(ValueError):
backend._check_cipher_response(backend._lib.kCCAlignmentError)
with pytest.raises(InternalError):
backend._check_cipher_response(backend._lib.kCCMemoryFailure)
with pytest.raises(InternalError):
backend._check_cipher_response(backend._lib.kCCDecodeError)
def test_nonexistent_aead_cipher(self):
from cryptography.hazmat.backends.commoncrypto.backend import Backend
b = Backend()
cipher = Cipher(
DummyCipher(), GCM(b"fake_iv_here"), backend=b,
)
with raises_unsupported_algorithm(_Reasons.UNSUPPORTED_CIPHER):
cipher.encryptor()
| 37.435484 | 79 | 0.734166 |
from __future__ import absolute_import, division, print_function
import pytest
from cryptography import utils
from cryptography.exceptions import InternalError, _Reasons
from cryptography.hazmat.backends import _available_backends
from cryptography.hazmat.primitives.ciphers import Cipher, CipherAlgorithm
from cryptography.hazmat.primitives.ciphers.algorithms import AES
from cryptography.hazmat.primitives.ciphers.modes import CBC, GCM
from ...utils import raises_unsupported_algorithm
@utils.register_interface(CipherAlgorithm)
class DummyCipher(object):
name = "dummy-cipher"
block_size = None
key_size = None
@pytest.mark.skipif("commoncrypto" not in
[i.name for i in _available_backends()],
reason="CommonCrypto not available")
class TestCommonCrypto(object):
def test_supports_cipher(self):
from cryptography.hazmat.backends.commoncrypto.backend import backend
assert backend.cipher_supported(None, None) is False
def test_register_duplicate_cipher_adapter(self):
from cryptography.hazmat.backends.commoncrypto.backend import backend
with pytest.raises(ValueError):
backend._register_cipher_adapter(
AES, backend._lib.kCCAlgorithmAES128,
CBC, backend._lib.kCCModeCBC
)
def test_handle_response(self):
from cryptography.hazmat.backends.commoncrypto.backend import backend
with pytest.raises(ValueError):
backend._check_cipher_response(backend._lib.kCCAlignmentError)
with pytest.raises(InternalError):
backend._check_cipher_response(backend._lib.kCCMemoryFailure)
with pytest.raises(InternalError):
backend._check_cipher_response(backend._lib.kCCDecodeError)
def test_nonexistent_aead_cipher(self):
from cryptography.hazmat.backends.commoncrypto.backend import Backend
b = Backend()
cipher = Cipher(
DummyCipher(), GCM(b"fake_iv_here"), backend=b,
)
with raises_unsupported_algorithm(_Reasons.UNSUPPORTED_CIPHER):
cipher.encryptor()
| true | true |
f720016578b11272f4a943f87114d1bf4673f739 | 15,319 | py | Python | sdk/python/pulumi_azure_nextgen/storagesync/latest/get_registered_server.py | pulumi/pulumi-azure-nextgen | 452736b0a1cf584c2d4c04666e017af6e9b2c15c | [
"Apache-2.0"
] | 31 | 2020-09-21T09:41:01.000Z | 2021-02-26T13:21:59.000Z | sdk/python/pulumi_azure_nextgen/storagesync/latest/get_registered_server.py | pulumi/pulumi-azure-nextgen | 452736b0a1cf584c2d4c04666e017af6e9b2c15c | [
"Apache-2.0"
] | 231 | 2020-09-21T09:38:45.000Z | 2021-03-01T11:16:03.000Z | sdk/python/pulumi_azure_nextgen/storagesync/latest/get_registered_server.py | pulumi/pulumi-azure-nextgen | 452736b0a1cf584c2d4c04666e017af6e9b2c15c | [
"Apache-2.0"
] | 4 | 2020-09-29T14:14:59.000Z | 2021-02-10T20:38:16.000Z | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = [
'GetRegisteredServerResult',
'AwaitableGetRegisteredServerResult',
'get_registered_server',
]
warnings.warn("""The 'latest' version is deprecated. Please migrate to the function in the top-level module: 'azure-nextgen:storagesync:getRegisteredServer'.""", DeprecationWarning)
@pulumi.output_type
class GetRegisteredServerResult:
"""
Registered Server resource.
"""
def __init__(__self__, agent_version=None, cluster_id=None, cluster_name=None, discovery_endpoint_uri=None, friendly_name=None, id=None, last_heart_beat=None, last_operation_name=None, last_workflow_id=None, management_endpoint_uri=None, monitoring_configuration=None, monitoring_endpoint_uri=None, name=None, provisioning_state=None, resource_location=None, server_certificate=None, server_id=None, server_management_error_code=None, server_os_version=None, server_role=None, service_location=None, storage_sync_service_uid=None, type=None):
if agent_version and not isinstance(agent_version, str):
raise TypeError("Expected argument 'agent_version' to be a str")
pulumi.set(__self__, "agent_version", agent_version)
if cluster_id and not isinstance(cluster_id, str):
raise TypeError("Expected argument 'cluster_id' to be a str")
pulumi.set(__self__, "cluster_id", cluster_id)
if cluster_name and not isinstance(cluster_name, str):
raise TypeError("Expected argument 'cluster_name' to be a str")
pulumi.set(__self__, "cluster_name", cluster_name)
if discovery_endpoint_uri and not isinstance(discovery_endpoint_uri, str):
raise TypeError("Expected argument 'discovery_endpoint_uri' to be a str")
pulumi.set(__self__, "discovery_endpoint_uri", discovery_endpoint_uri)
if friendly_name and not isinstance(friendly_name, str):
raise TypeError("Expected argument 'friendly_name' to be a str")
pulumi.set(__self__, "friendly_name", friendly_name)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if last_heart_beat and not isinstance(last_heart_beat, str):
raise TypeError("Expected argument 'last_heart_beat' to be a str")
pulumi.set(__self__, "last_heart_beat", last_heart_beat)
if last_operation_name and not isinstance(last_operation_name, str):
raise TypeError("Expected argument 'last_operation_name' to be a str")
pulumi.set(__self__, "last_operation_name", last_operation_name)
if last_workflow_id and not isinstance(last_workflow_id, str):
raise TypeError("Expected argument 'last_workflow_id' to be a str")
pulumi.set(__self__, "last_workflow_id", last_workflow_id)
if management_endpoint_uri and not isinstance(management_endpoint_uri, str):
raise TypeError("Expected argument 'management_endpoint_uri' to be a str")
pulumi.set(__self__, "management_endpoint_uri", management_endpoint_uri)
if monitoring_configuration and not isinstance(monitoring_configuration, str):
raise TypeError("Expected argument 'monitoring_configuration' to be a str")
pulumi.set(__self__, "monitoring_configuration", monitoring_configuration)
if monitoring_endpoint_uri and not isinstance(monitoring_endpoint_uri, str):
raise TypeError("Expected argument 'monitoring_endpoint_uri' to be a str")
pulumi.set(__self__, "monitoring_endpoint_uri", monitoring_endpoint_uri)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if resource_location and not isinstance(resource_location, str):
raise TypeError("Expected argument 'resource_location' to be a str")
pulumi.set(__self__, "resource_location", resource_location)
if server_certificate and not isinstance(server_certificate, str):
raise TypeError("Expected argument 'server_certificate' to be a str")
pulumi.set(__self__, "server_certificate", server_certificate)
if server_id and not isinstance(server_id, str):
raise TypeError("Expected argument 'server_id' to be a str")
pulumi.set(__self__, "server_id", server_id)
if server_management_error_code and not isinstance(server_management_error_code, int):
raise TypeError("Expected argument 'server_management_error_code' to be a int")
pulumi.set(__self__, "server_management_error_code", server_management_error_code)
if server_os_version and not isinstance(server_os_version, str):
raise TypeError("Expected argument 'server_os_version' to be a str")
pulumi.set(__self__, "server_os_version", server_os_version)
if server_role and not isinstance(server_role, str):
raise TypeError("Expected argument 'server_role' to be a str")
pulumi.set(__self__, "server_role", server_role)
if service_location and not isinstance(service_location, str):
raise TypeError("Expected argument 'service_location' to be a str")
pulumi.set(__self__, "service_location", service_location)
if storage_sync_service_uid and not isinstance(storage_sync_service_uid, str):
raise TypeError("Expected argument 'storage_sync_service_uid' to be a str")
pulumi.set(__self__, "storage_sync_service_uid", storage_sync_service_uid)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="agentVersion")
def agent_version(self) -> Optional[str]:
"""
Registered Server Agent Version
"""
return pulumi.get(self, "agent_version")
@property
@pulumi.getter(name="clusterId")
def cluster_id(self) -> Optional[str]:
"""
Registered Server clusterId
"""
return pulumi.get(self, "cluster_id")
@property
@pulumi.getter(name="clusterName")
def cluster_name(self) -> Optional[str]:
"""
Registered Server clusterName
"""
return pulumi.get(self, "cluster_name")
@property
@pulumi.getter(name="discoveryEndpointUri")
def discovery_endpoint_uri(self) -> Optional[str]:
"""
Resource discoveryEndpointUri
"""
return pulumi.get(self, "discovery_endpoint_uri")
@property
@pulumi.getter(name="friendlyName")
def friendly_name(self) -> Optional[str]:
"""
Friendly Name
"""
return pulumi.get(self, "friendly_name")
@property
@pulumi.getter
def id(self) -> str:
"""
Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="lastHeartBeat")
def last_heart_beat(self) -> Optional[str]:
"""
Registered Server last heart beat
"""
return pulumi.get(self, "last_heart_beat")
@property
@pulumi.getter(name="lastOperationName")
def last_operation_name(self) -> Optional[str]:
"""
Resource Last Operation Name
"""
return pulumi.get(self, "last_operation_name")
@property
@pulumi.getter(name="lastWorkflowId")
def last_workflow_id(self) -> Optional[str]:
"""
Registered Server lastWorkflowId
"""
return pulumi.get(self, "last_workflow_id")
@property
@pulumi.getter(name="managementEndpointUri")
def management_endpoint_uri(self) -> Optional[str]:
"""
Management Endpoint Uri
"""
return pulumi.get(self, "management_endpoint_uri")
@property
@pulumi.getter(name="monitoringConfiguration")
def monitoring_configuration(self) -> Optional[str]:
"""
Monitoring Configuration
"""
return pulumi.get(self, "monitoring_configuration")
@property
@pulumi.getter(name="monitoringEndpointUri")
def monitoring_endpoint_uri(self) -> Optional[str]:
"""
Telemetry Endpoint Uri
"""
return pulumi.get(self, "monitoring_endpoint_uri")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> Optional[str]:
"""
Registered Server Provisioning State
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="resourceLocation")
def resource_location(self) -> Optional[str]:
"""
Resource Location
"""
return pulumi.get(self, "resource_location")
@property
@pulumi.getter(name="serverCertificate")
def server_certificate(self) -> Optional[str]:
"""
Registered Server Certificate
"""
return pulumi.get(self, "server_certificate")
@property
@pulumi.getter(name="serverId")
def server_id(self) -> Optional[str]:
"""
Registered Server serverId
"""
return pulumi.get(self, "server_id")
@property
@pulumi.getter(name="serverManagementErrorCode")
def server_management_error_code(self) -> Optional[int]:
"""
Registered Server Management Error Code
"""
return pulumi.get(self, "server_management_error_code")
@property
@pulumi.getter(name="serverOSVersion")
def server_os_version(self) -> Optional[str]:
"""
Registered Server OS Version
"""
return pulumi.get(self, "server_os_version")
@property
@pulumi.getter(name="serverRole")
def server_role(self) -> Optional[str]:
"""
Registered Server serverRole
"""
return pulumi.get(self, "server_role")
@property
@pulumi.getter(name="serviceLocation")
def service_location(self) -> Optional[str]:
"""
Service Location
"""
return pulumi.get(self, "service_location")
@property
@pulumi.getter(name="storageSyncServiceUid")
def storage_sync_service_uid(self) -> Optional[str]:
"""
Registered Server storageSyncServiceUid
"""
return pulumi.get(self, "storage_sync_service_uid")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type")
class AwaitableGetRegisteredServerResult(GetRegisteredServerResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetRegisteredServerResult(
agent_version=self.agent_version,
cluster_id=self.cluster_id,
cluster_name=self.cluster_name,
discovery_endpoint_uri=self.discovery_endpoint_uri,
friendly_name=self.friendly_name,
id=self.id,
last_heart_beat=self.last_heart_beat,
last_operation_name=self.last_operation_name,
last_workflow_id=self.last_workflow_id,
management_endpoint_uri=self.management_endpoint_uri,
monitoring_configuration=self.monitoring_configuration,
monitoring_endpoint_uri=self.monitoring_endpoint_uri,
name=self.name,
provisioning_state=self.provisioning_state,
resource_location=self.resource_location,
server_certificate=self.server_certificate,
server_id=self.server_id,
server_management_error_code=self.server_management_error_code,
server_os_version=self.server_os_version,
server_role=self.server_role,
service_location=self.service_location,
storage_sync_service_uid=self.storage_sync_service_uid,
type=self.type)
def get_registered_server(resource_group_name: Optional[str] = None,
server_id: Optional[str] = None,
storage_sync_service_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetRegisteredServerResult:
"""
Registered Server resource.
Latest API Version: 2020-03-01.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str server_id: GUID identifying the on-premises server.
:param str storage_sync_service_name: Name of Storage Sync Service resource.
"""
pulumi.log.warn("get_registered_server is deprecated: The 'latest' version is deprecated. Please migrate to the function in the top-level module: 'azure-nextgen:storagesync:getRegisteredServer'.")
__args__ = dict()
__args__['resourceGroupName'] = resource_group_name
__args__['serverId'] = server_id
__args__['storageSyncServiceName'] = storage_sync_service_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:storagesync/latest:getRegisteredServer', __args__, opts=opts, typ=GetRegisteredServerResult).value
return AwaitableGetRegisteredServerResult(
agent_version=__ret__.agent_version,
cluster_id=__ret__.cluster_id,
cluster_name=__ret__.cluster_name,
discovery_endpoint_uri=__ret__.discovery_endpoint_uri,
friendly_name=__ret__.friendly_name,
id=__ret__.id,
last_heart_beat=__ret__.last_heart_beat,
last_operation_name=__ret__.last_operation_name,
last_workflow_id=__ret__.last_workflow_id,
management_endpoint_uri=__ret__.management_endpoint_uri,
monitoring_configuration=__ret__.monitoring_configuration,
monitoring_endpoint_uri=__ret__.monitoring_endpoint_uri,
name=__ret__.name,
provisioning_state=__ret__.provisioning_state,
resource_location=__ret__.resource_location,
server_certificate=__ret__.server_certificate,
server_id=__ret__.server_id,
server_management_error_code=__ret__.server_management_error_code,
server_os_version=__ret__.server_os_version,
server_role=__ret__.server_role,
service_location=__ret__.service_location,
storage_sync_service_uid=__ret__.storage_sync_service_uid,
type=__ret__.type)
| 42.671309 | 546 | 0.687447 |
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = [
'GetRegisteredServerResult',
'AwaitableGetRegisteredServerResult',
'get_registered_server',
]
warnings.warn("""The 'latest' version is deprecated. Please migrate to the function in the top-level module: 'azure-nextgen:storagesync:getRegisteredServer'.""", DeprecationWarning)
@pulumi.output_type
class GetRegisteredServerResult:
def __init__(__self__, agent_version=None, cluster_id=None, cluster_name=None, discovery_endpoint_uri=None, friendly_name=None, id=None, last_heart_beat=None, last_operation_name=None, last_workflow_id=None, management_endpoint_uri=None, monitoring_configuration=None, monitoring_endpoint_uri=None, name=None, provisioning_state=None, resource_location=None, server_certificate=None, server_id=None, server_management_error_code=None, server_os_version=None, server_role=None, service_location=None, storage_sync_service_uid=None, type=None):
if agent_version and not isinstance(agent_version, str):
raise TypeError("Expected argument 'agent_version' to be a str")
pulumi.set(__self__, "agent_version", agent_version)
if cluster_id and not isinstance(cluster_id, str):
raise TypeError("Expected argument 'cluster_id' to be a str")
pulumi.set(__self__, "cluster_id", cluster_id)
if cluster_name and not isinstance(cluster_name, str):
raise TypeError("Expected argument 'cluster_name' to be a str")
pulumi.set(__self__, "cluster_name", cluster_name)
if discovery_endpoint_uri and not isinstance(discovery_endpoint_uri, str):
raise TypeError("Expected argument 'discovery_endpoint_uri' to be a str")
pulumi.set(__self__, "discovery_endpoint_uri", discovery_endpoint_uri)
if friendly_name and not isinstance(friendly_name, str):
raise TypeError("Expected argument 'friendly_name' to be a str")
pulumi.set(__self__, "friendly_name", friendly_name)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if last_heart_beat and not isinstance(last_heart_beat, str):
raise TypeError("Expected argument 'last_heart_beat' to be a str")
pulumi.set(__self__, "last_heart_beat", last_heart_beat)
if last_operation_name and not isinstance(last_operation_name, str):
raise TypeError("Expected argument 'last_operation_name' to be a str")
pulumi.set(__self__, "last_operation_name", last_operation_name)
if last_workflow_id and not isinstance(last_workflow_id, str):
raise TypeError("Expected argument 'last_workflow_id' to be a str")
pulumi.set(__self__, "last_workflow_id", last_workflow_id)
if management_endpoint_uri and not isinstance(management_endpoint_uri, str):
raise TypeError("Expected argument 'management_endpoint_uri' to be a str")
pulumi.set(__self__, "management_endpoint_uri", management_endpoint_uri)
if monitoring_configuration and not isinstance(monitoring_configuration, str):
raise TypeError("Expected argument 'monitoring_configuration' to be a str")
pulumi.set(__self__, "monitoring_configuration", monitoring_configuration)
if monitoring_endpoint_uri and not isinstance(monitoring_endpoint_uri, str):
raise TypeError("Expected argument 'monitoring_endpoint_uri' to be a str")
pulumi.set(__self__, "monitoring_endpoint_uri", monitoring_endpoint_uri)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if resource_location and not isinstance(resource_location, str):
raise TypeError("Expected argument 'resource_location' to be a str")
pulumi.set(__self__, "resource_location", resource_location)
if server_certificate and not isinstance(server_certificate, str):
raise TypeError("Expected argument 'server_certificate' to be a str")
pulumi.set(__self__, "server_certificate", server_certificate)
if server_id and not isinstance(server_id, str):
raise TypeError("Expected argument 'server_id' to be a str")
pulumi.set(__self__, "server_id", server_id)
if server_management_error_code and not isinstance(server_management_error_code, int):
raise TypeError("Expected argument 'server_management_error_code' to be a int")
pulumi.set(__self__, "server_management_error_code", server_management_error_code)
if server_os_version and not isinstance(server_os_version, str):
raise TypeError("Expected argument 'server_os_version' to be a str")
pulumi.set(__self__, "server_os_version", server_os_version)
if server_role and not isinstance(server_role, str):
raise TypeError("Expected argument 'server_role' to be a str")
pulumi.set(__self__, "server_role", server_role)
if service_location and not isinstance(service_location, str):
raise TypeError("Expected argument 'service_location' to be a str")
pulumi.set(__self__, "service_location", service_location)
if storage_sync_service_uid and not isinstance(storage_sync_service_uid, str):
raise TypeError("Expected argument 'storage_sync_service_uid' to be a str")
pulumi.set(__self__, "storage_sync_service_uid", storage_sync_service_uid)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="agentVersion")
def agent_version(self) -> Optional[str]:
return pulumi.get(self, "agent_version")
@property
@pulumi.getter(name="clusterId")
def cluster_id(self) -> Optional[str]:
return pulumi.get(self, "cluster_id")
@property
@pulumi.getter(name="clusterName")
def cluster_name(self) -> Optional[str]:
return pulumi.get(self, "cluster_name")
@property
@pulumi.getter(name="discoveryEndpointUri")
def discovery_endpoint_uri(self) -> Optional[str]:
return pulumi.get(self, "discovery_endpoint_uri")
@property
@pulumi.getter(name="friendlyName")
def friendly_name(self) -> Optional[str]:
return pulumi.get(self, "friendly_name")
@property
@pulumi.getter
def id(self) -> str:
return pulumi.get(self, "id")
@property
@pulumi.getter(name="lastHeartBeat")
def last_heart_beat(self) -> Optional[str]:
return pulumi.get(self, "last_heart_beat")
@property
@pulumi.getter(name="lastOperationName")
def last_operation_name(self) -> Optional[str]:
return pulumi.get(self, "last_operation_name")
@property
@pulumi.getter(name="lastWorkflowId")
def last_workflow_id(self) -> Optional[str]:
return pulumi.get(self, "last_workflow_id")
@property
@pulumi.getter(name="managementEndpointUri")
def management_endpoint_uri(self) -> Optional[str]:
return pulumi.get(self, "management_endpoint_uri")
@property
@pulumi.getter(name="monitoringConfiguration")
def monitoring_configuration(self) -> Optional[str]:
return pulumi.get(self, "monitoring_configuration")
@property
@pulumi.getter(name="monitoringEndpointUri")
def monitoring_endpoint_uri(self) -> Optional[str]:
return pulumi.get(self, "monitoring_endpoint_uri")
@property
@pulumi.getter
def name(self) -> str:
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> Optional[str]:
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="resourceLocation")
def resource_location(self) -> Optional[str]:
return pulumi.get(self, "resource_location")
@property
@pulumi.getter(name="serverCertificate")
def server_certificate(self) -> Optional[str]:
return pulumi.get(self, "server_certificate")
@property
@pulumi.getter(name="serverId")
def server_id(self) -> Optional[str]:
return pulumi.get(self, "server_id")
@property
@pulumi.getter(name="serverManagementErrorCode")
def server_management_error_code(self) -> Optional[int]:
return pulumi.get(self, "server_management_error_code")
@property
@pulumi.getter(name="serverOSVersion")
def server_os_version(self) -> Optional[str]:
return pulumi.get(self, "server_os_version")
@property
@pulumi.getter(name="serverRole")
def server_role(self) -> Optional[str]:
return pulumi.get(self, "server_role")
@property
@pulumi.getter(name="serviceLocation")
def service_location(self) -> Optional[str]:
return pulumi.get(self, "service_location")
@property
@pulumi.getter(name="storageSyncServiceUid")
def storage_sync_service_uid(self) -> Optional[str]:
return pulumi.get(self, "storage_sync_service_uid")
@property
@pulumi.getter
def type(self) -> str:
return pulumi.get(self, "type")
class AwaitableGetRegisteredServerResult(GetRegisteredServerResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetRegisteredServerResult(
agent_version=self.agent_version,
cluster_id=self.cluster_id,
cluster_name=self.cluster_name,
discovery_endpoint_uri=self.discovery_endpoint_uri,
friendly_name=self.friendly_name,
id=self.id,
last_heart_beat=self.last_heart_beat,
last_operation_name=self.last_operation_name,
last_workflow_id=self.last_workflow_id,
management_endpoint_uri=self.management_endpoint_uri,
monitoring_configuration=self.monitoring_configuration,
monitoring_endpoint_uri=self.monitoring_endpoint_uri,
name=self.name,
provisioning_state=self.provisioning_state,
resource_location=self.resource_location,
server_certificate=self.server_certificate,
server_id=self.server_id,
server_management_error_code=self.server_management_error_code,
server_os_version=self.server_os_version,
server_role=self.server_role,
service_location=self.service_location,
storage_sync_service_uid=self.storage_sync_service_uid,
type=self.type)
def get_registered_server(resource_group_name: Optional[str] = None,
server_id: Optional[str] = None,
storage_sync_service_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetRegisteredServerResult:
pulumi.log.warn("get_registered_server is deprecated: The 'latest' version is deprecated. Please migrate to the function in the top-level module: 'azure-nextgen:storagesync:getRegisteredServer'.")
__args__ = dict()
__args__['resourceGroupName'] = resource_group_name
__args__['serverId'] = server_id
__args__['storageSyncServiceName'] = storage_sync_service_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:storagesync/latest:getRegisteredServer', __args__, opts=opts, typ=GetRegisteredServerResult).value
return AwaitableGetRegisteredServerResult(
agent_version=__ret__.agent_version,
cluster_id=__ret__.cluster_id,
cluster_name=__ret__.cluster_name,
discovery_endpoint_uri=__ret__.discovery_endpoint_uri,
friendly_name=__ret__.friendly_name,
id=__ret__.id,
last_heart_beat=__ret__.last_heart_beat,
last_operation_name=__ret__.last_operation_name,
last_workflow_id=__ret__.last_workflow_id,
management_endpoint_uri=__ret__.management_endpoint_uri,
monitoring_configuration=__ret__.monitoring_configuration,
monitoring_endpoint_uri=__ret__.monitoring_endpoint_uri,
name=__ret__.name,
provisioning_state=__ret__.provisioning_state,
resource_location=__ret__.resource_location,
server_certificate=__ret__.server_certificate,
server_id=__ret__.server_id,
server_management_error_code=__ret__.server_management_error_code,
server_os_version=__ret__.server_os_version,
server_role=__ret__.server_role,
service_location=__ret__.service_location,
storage_sync_service_uid=__ret__.storage_sync_service_uid,
type=__ret__.type)
| true | true |
f72001cd4e99d72b538406d817ae842ce6ada978 | 37,141 | py | Python | test/functional/rpc_psbt.py | GumFruit/cpuchain | c2fb213eb1e376a7457a8eecf907eca719eb4c99 | [
"MIT"
] | null | null | null | test/functional/rpc_psbt.py | GumFruit/cpuchain | c2fb213eb1e376a7457a8eecf907eca719eb4c99 | [
"MIT"
] | null | null | null | test/functional/rpc_psbt.py | GumFruit/cpuchain | c2fb213eb1e376a7457a8eecf907eca719eb4c99 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2018-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the Partially Signed Transaction RPCs.
"""
from decimal import Decimal
from itertools import product
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_approx,
assert_equal,
assert_greater_than,
assert_raises_rpc_error,
find_output,
)
import json
import os
MAX_BIP125_RBF_SEQUENCE = 0xfffffffd
# Create one-input, one-output, no-fee transaction:
class PSBTTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 3
self.extra_args = [
["-walletrbf=1"],
["-walletrbf=0", "-changetype=legacy"],
[]
]
self.supports_cli = False
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
# TODO: Re-enable this test with segwit v1
def test_utxo_conversion(self):
mining_node = self.nodes[2]
offline_node = self.nodes[0]
online_node = self.nodes[1]
# Disconnect offline node from others
# Topology of test network is linear, so this one call is enough
self.disconnect_nodes(0, 1)
# Create watchonly on online_node
online_node.createwallet(wallet_name='wonline', disable_private_keys=True)
wonline = online_node.get_wallet_rpc('wonline')
w2 = online_node.get_wallet_rpc('')
# Mine a transaction that credits the offline address
offline_addr = offline_node.getnewaddress(address_type="p2sh-segwit")
online_addr = w2.getnewaddress(address_type="p2sh-segwit")
wonline.importaddress(offline_addr, "", False)
mining_node.sendtoaddress(address=offline_addr, amount=1.0)
mining_node.generate(nblocks=1)
self.sync_blocks([mining_node, online_node])
# Construct an unsigned PSBT on the online node (who doesn't know the output is Segwit, so will include a non-witness UTXO)
utxos = wonline.listunspent(addresses=[offline_addr])
raw = wonline.createrawtransaction([{"txid":utxos[0]["txid"], "vout":utxos[0]["vout"]}],[{online_addr:0.9999}])
psbt = wonline.walletprocesspsbt(online_node.converttopsbt(raw))["psbt"]
assert "non_witness_utxo" in mining_node.decodepsbt(psbt)["inputs"][0]
# Have the offline node sign the PSBT (which will update the UTXO to segwit)
signed_psbt = offline_node.walletprocesspsbt(psbt)["psbt"]
assert "witness_utxo" in mining_node.decodepsbt(signed_psbt)["inputs"][0]
# Make sure we can mine the resulting transaction
txid = mining_node.sendrawtransaction(mining_node.finalizepsbt(signed_psbt)["hex"])
mining_node.generate(1)
self.sync_blocks([mining_node, online_node])
assert_equal(online_node.gettxout(txid,0)["confirmations"], 1)
wonline.unloadwallet()
# Reconnect
self.connect_nodes(0, 1)
self.connect_nodes(0, 2)
def assert_change_type(self, psbtx, expected_type):
"""Assert that the given PSBT has a change output with the given type."""
# The decodepsbt RPC is stateless and independent of any settings, we can always just call it on the first node
decoded_psbt = self.nodes[0].decodepsbt(psbtx["psbt"])
changepos = psbtx["changepos"]
assert_equal(decoded_psbt["tx"]["vout"][changepos]["scriptPubKey"]["type"], expected_type)
def run_test(self):
# Create and fund a raw tx for sending 10 CPU
psbtx1 = self.nodes[0].walletcreatefundedpsbt([], {self.nodes[2].getnewaddress():10})['psbt']
# If inputs are specified, do not automatically add more:
utxo1 = self.nodes[0].listunspent()[0]
assert_raises_rpc_error(-4, "Insufficient funds", self.nodes[0].walletcreatefundedpsbt, [{"txid": utxo1['txid'], "vout": utxo1['vout']}], {self.nodes[2].getnewaddress():90})
psbtx1 = self.nodes[0].walletcreatefundedpsbt([{"txid": utxo1['txid'], "vout": utxo1['vout']}], {self.nodes[2].getnewaddress():90}, 0, {"add_inputs": True})['psbt']
assert_equal(len(self.nodes[0].decodepsbt(psbtx1)['tx']['vin']), 2)
# Inputs argument can be null
self.nodes[0].walletcreatefundedpsbt(None, {self.nodes[2].getnewaddress():10})
# Node 1 should not be able to add anything to it but still return the psbtx same as before
psbtx = self.nodes[1].walletprocesspsbt(psbtx1)['psbt']
assert_equal(psbtx1, psbtx)
# Sign the transaction and send
signed_tx = self.nodes[0].walletprocesspsbt(psbtx)['psbt']
final_tx = self.nodes[0].finalizepsbt(signed_tx)['hex']
self.nodes[0].sendrawtransaction(final_tx)
# Manually selected inputs can be locked:
assert_equal(len(self.nodes[0].listlockunspent()), 0)
utxo1 = self.nodes[0].listunspent()[0]
psbtx1 = self.nodes[0].walletcreatefundedpsbt([{"txid": utxo1['txid'], "vout": utxo1['vout']}], {self.nodes[2].getnewaddress():1}, 0,{"lockUnspents": True})["psbt"]
assert_equal(len(self.nodes[0].listlockunspent()), 1)
# Locks are ignored for manually selected inputs
self.nodes[0].walletcreatefundedpsbt([{"txid": utxo1['txid'], "vout": utxo1['vout']}], {self.nodes[2].getnewaddress():1}, 0)
# Create p2sh, p2wpkh, and p2wsh addresses
pubkey0 = self.nodes[0].getaddressinfo(self.nodes[0].getnewaddress())['pubkey']
pubkey1 = self.nodes[1].getaddressinfo(self.nodes[1].getnewaddress())['pubkey']
pubkey2 = self.nodes[2].getaddressinfo(self.nodes[2].getnewaddress())['pubkey']
# Setup watchonly wallets
self.nodes[2].createwallet(wallet_name='wmulti', disable_private_keys=True)
wmulti = self.nodes[2].get_wallet_rpc('wmulti')
# Create all the addresses
p2sh = wmulti.addmultisigaddress(2, [pubkey0, pubkey1, pubkey2], "", "legacy")['address']
p2wsh = wmulti.addmultisigaddress(2, [pubkey0, pubkey1, pubkey2], "", "bech32")['address']
p2sh_p2wsh = wmulti.addmultisigaddress(2, [pubkey0, pubkey1, pubkey2], "", "p2sh-segwit")['address']
if not self.options.descriptors:
wmulti.importaddress(p2sh)
wmulti.importaddress(p2wsh)
wmulti.importaddress(p2sh_p2wsh)
p2wpkh = self.nodes[1].getnewaddress("", "bech32")
p2pkh = self.nodes[1].getnewaddress("", "legacy")
p2sh_p2wpkh = self.nodes[1].getnewaddress("", "p2sh-segwit")
# fund those addresses
rawtx = self.nodes[0].createrawtransaction([], {p2sh:10, p2wsh:10, p2wpkh:10, p2sh_p2wsh:10, p2sh_p2wpkh:10, p2pkh:10})
rawtx = self.nodes[0].fundrawtransaction(rawtx, {"changePosition":3})
signed_tx = self.nodes[0].signrawtransactionwithwallet(rawtx['hex'])['hex']
txid = self.nodes[0].sendrawtransaction(signed_tx)
self.nodes[0].generate(6)
self.sync_all()
# Find the output pos
p2sh_pos = -1
p2wsh_pos = -1
p2wpkh_pos = -1
p2pkh_pos = -1
p2sh_p2wsh_pos = -1
p2sh_p2wpkh_pos = -1
decoded = self.nodes[0].decoderawtransaction(signed_tx)
for out in decoded['vout']:
if out['scriptPubKey']['address'] == p2sh:
p2sh_pos = out['n']
elif out['scriptPubKey']['address'] == p2wsh:
p2wsh_pos = out['n']
elif out['scriptPubKey']['address'] == p2wpkh:
p2wpkh_pos = out['n']
elif out['scriptPubKey']['address'] == p2sh_p2wsh:
p2sh_p2wsh_pos = out['n']
elif out['scriptPubKey']['address'] == p2sh_p2wpkh:
p2sh_p2wpkh_pos = out['n']
elif out['scriptPubKey']['address'] == p2pkh:
p2pkh_pos = out['n']
inputs = [{"txid": txid, "vout": p2wpkh_pos}, {"txid": txid, "vout": p2sh_p2wpkh_pos}, {"txid": txid, "vout": p2pkh_pos}]
outputs = [{self.nodes[1].getnewaddress(): 29.99}]
# spend single key from node 1
created_psbt = self.nodes[1].walletcreatefundedpsbt(inputs, outputs)
walletprocesspsbt_out = self.nodes[1].walletprocesspsbt(created_psbt['psbt'])
# Make sure it has both types of UTXOs
decoded = self.nodes[1].decodepsbt(walletprocesspsbt_out['psbt'])
assert 'non_witness_utxo' in decoded['inputs'][0]
assert 'witness_utxo' in decoded['inputs'][0]
# Check decodepsbt fee calculation (input values shall only be counted once per UTXO)
assert_equal(decoded['fee'], created_psbt['fee'])
assert_equal(walletprocesspsbt_out['complete'], True)
self.nodes[1].sendrawtransaction(self.nodes[1].finalizepsbt(walletprocesspsbt_out['psbt'])['hex'])
self.log.info("Test walletcreatefundedpsbt fee rate of 10000 sat/vB and 0.1 CPU/kvB produces a total fee at or slightly below -maxtxfee (~0.05290000)")
res1 = self.nodes[1].walletcreatefundedpsbt(inputs, outputs, 0, {"fee_rate": 10000, "add_inputs": True})
assert_approx(res1["fee"], 0.055, 0.005)
res2 = self.nodes[1].walletcreatefundedpsbt(inputs, outputs, 0, {"feeRate": "0.1", "add_inputs": True})
assert_approx(res2["fee"], 0.055, 0.005)
self.log.info("Test min fee rate checks with walletcreatefundedpsbt are bypassed, e.g. a fee_rate under 1 sat/vB is allowed")
res3 = self.nodes[1].walletcreatefundedpsbt(inputs, outputs, 0, {"fee_rate": "0.999", "add_inputs": True})
assert_approx(res3["fee"], 0.00000381, 0.0000001)
res4 = self.nodes[1].walletcreatefundedpsbt(inputs, outputs, 0, {"feeRate": 0.00000999, "add_inputs": True})
assert_approx(res4["fee"], 0.00000381, 0.0000001)
self.log.info("Test min fee rate checks with walletcreatefundedpsbt are bypassed and that funding non-standard 'zero-fee' transactions is valid")
for param, zero_value in product(["fee_rate", "feeRate"], [0, 0.000, 0.00000000, "0", "0.000", "0.00000000"]):
assert_equal(0, self.nodes[1].walletcreatefundedpsbt(inputs, outputs, 0, {param: zero_value, "add_inputs": True})["fee"])
self.log.info("Test invalid fee rate settings")
for param, value in {("fee_rate", 100000), ("feeRate", 1)}:
assert_raises_rpc_error(-4, "Fee exceeds maximum configured by user (e.g. -maxtxfee, maxfeerate)",
self.nodes[1].walletcreatefundedpsbt, inputs, outputs, 0, {param: value, "add_inputs": True})
assert_raises_rpc_error(-3, "Amount out of range",
self.nodes[1].walletcreatefundedpsbt, inputs, outputs, 0, {param: -1, "add_inputs": True})
assert_raises_rpc_error(-3, "Amount is not a number or string",
self.nodes[1].walletcreatefundedpsbt, inputs, outputs, 0, {param: {"foo": "bar"}, "add_inputs": True})
# Test fee rate values that don't pass fixed-point parsing checks.
for invalid_value in ["", 0.000000001, 1e-09, 1.111111111, 1111111111111111, "31.999999999999999999999"]:
assert_raises_rpc_error(-3, "Invalid amount",
self.nodes[1].walletcreatefundedpsbt, inputs, outputs, 0, {param: invalid_value, "add_inputs": True})
# Test fee_rate values that cannot be represented in sat/vB.
for invalid_value in [0.0001, 0.00000001, 0.00099999, 31.99999999, "0.0001", "0.00000001", "0.00099999", "31.99999999"]:
assert_raises_rpc_error(-3, "Invalid amount",
self.nodes[1].walletcreatefundedpsbt, inputs, outputs, 0, {"fee_rate": invalid_value, "add_inputs": True})
self.log.info("- raises RPC error if both feeRate and fee_rate are passed")
assert_raises_rpc_error(-8, "Cannot specify both fee_rate (sat/vB) and feeRate (CPU/kvB)",
self.nodes[1].walletcreatefundedpsbt, inputs, outputs, 0, {"fee_rate": 0.1, "feeRate": 0.1, "add_inputs": True})
self.log.info("- raises RPC error if both feeRate and estimate_mode passed")
assert_raises_rpc_error(-8, "Cannot specify both estimate_mode and feeRate",
self.nodes[1].walletcreatefundedpsbt, inputs, outputs, 0, {"estimate_mode": "economical", "feeRate": 0.1, "add_inputs": True})
for param in ["feeRate", "fee_rate"]:
self.log.info("- raises RPC error if both {} and conf_target are passed".format(param))
assert_raises_rpc_error(-8, "Cannot specify both conf_target and {}. Please provide either a confirmation "
"target in blocks for automatic fee estimation, or an explicit fee rate.".format(param),
self.nodes[1].walletcreatefundedpsbt ,inputs, outputs, 0, {param: 1, "conf_target": 1, "add_inputs": True})
self.log.info("- raises RPC error if both fee_rate and estimate_mode are passed")
assert_raises_rpc_error(-8, "Cannot specify both estimate_mode and fee_rate",
self.nodes[1].walletcreatefundedpsbt ,inputs, outputs, 0, {"fee_rate": 1, "estimate_mode": "economical", "add_inputs": True})
self.log.info("- raises RPC error with invalid estimate_mode settings")
for k, v in {"number": 42, "object": {"foo": "bar"}}.items():
assert_raises_rpc_error(-3, "Expected type string for estimate_mode, got {}".format(k),
self.nodes[1].walletcreatefundedpsbt, inputs, outputs, 0, {"estimate_mode": v, "conf_target": 0.1, "add_inputs": True})
for mode in ["", "foo", Decimal("3.141592")]:
assert_raises_rpc_error(-8, 'Invalid estimate_mode parameter, must be one of: "unset", "economical", "conservative"',
self.nodes[1].walletcreatefundedpsbt, inputs, outputs, 0, {"estimate_mode": mode, "conf_target": 0.1, "add_inputs": True})
self.log.info("- raises RPC error with invalid conf_target settings")
for mode in ["unset", "economical", "conservative"]:
self.log.debug("{}".format(mode))
for k, v in {"string": "", "object": {"foo": "bar"}}.items():
assert_raises_rpc_error(-3, "Expected type number for conf_target, got {}".format(k),
self.nodes[1].walletcreatefundedpsbt, inputs, outputs, 0, {"estimate_mode": mode, "conf_target": v, "add_inputs": True})
for n in [-1, 0, 1009]:
assert_raises_rpc_error(-8, "Invalid conf_target, must be between 1 and 1008", # max value of 1008 per src/policy/fees.h
self.nodes[1].walletcreatefundedpsbt, inputs, outputs, 0, {"estimate_mode": mode, "conf_target": n, "add_inputs": True})
self.log.info("Test walletcreatefundedpsbt with too-high fee rate produces total fee well above -maxtxfee and raises RPC error")
# previously this was silently capped at -maxtxfee
for bool_add, outputs_array in {True: outputs, False: [{self.nodes[1].getnewaddress(): 1}]}.items():
msg = "Fee exceeds maximum configured by user (e.g. -maxtxfee, maxfeerate)"
assert_raises_rpc_error(-4, msg, self.nodes[1].walletcreatefundedpsbt, inputs, outputs_array, 0, {"fee_rate": 1000000, "add_inputs": bool_add})
assert_raises_rpc_error(-4, msg, self.nodes[1].walletcreatefundedpsbt, inputs, outputs_array, 0, {"feeRate": 1, "add_inputs": bool_add})
self.log.info("Test various PSBT operations")
# partially sign multisig things with node 1
psbtx = wmulti.walletcreatefundedpsbt(inputs=[{"txid":txid,"vout":p2wsh_pos},{"txid":txid,"vout":p2sh_pos},{"txid":txid,"vout":p2sh_p2wsh_pos}], outputs={self.nodes[1].getnewaddress():29.99}, options={'changeAddress': self.nodes[1].getrawchangeaddress()})['psbt']
walletprocesspsbt_out = self.nodes[1].walletprocesspsbt(psbtx)
psbtx = walletprocesspsbt_out['psbt']
assert_equal(walletprocesspsbt_out['complete'], False)
# Unload wmulti, we don't need it anymore
wmulti.unloadwallet()
# partially sign with node 2. This should be complete and sendable
walletprocesspsbt_out = self.nodes[2].walletprocesspsbt(psbtx)
assert_equal(walletprocesspsbt_out['complete'], True)
self.nodes[2].sendrawtransaction(self.nodes[2].finalizepsbt(walletprocesspsbt_out['psbt'])['hex'])
# check that walletprocesspsbt fails to decode a non-psbt
rawtx = self.nodes[1].createrawtransaction([{"txid":txid,"vout":p2wpkh_pos}], {self.nodes[1].getnewaddress():9.99})
assert_raises_rpc_error(-22, "TX decode failed", self.nodes[1].walletprocesspsbt, rawtx)
# Convert a non-psbt to psbt and make sure we can decode it
rawtx = self.nodes[0].createrawtransaction([], {self.nodes[1].getnewaddress():10})
rawtx = self.nodes[0].fundrawtransaction(rawtx)
new_psbt = self.nodes[0].converttopsbt(rawtx['hex'])
self.nodes[0].decodepsbt(new_psbt)
# Make sure that a non-psbt with signatures cannot be converted
# Error could be either "TX decode failed" (segwit inputs causes parsing to fail) or "Inputs must not have scriptSigs and scriptWitnesses"
# We must set iswitness=True because the serialized transaction has inputs and is therefore a witness transaction
signedtx = self.nodes[0].signrawtransactionwithwallet(rawtx['hex'])
assert_raises_rpc_error(-22, "", self.nodes[0].converttopsbt, hexstring=signedtx['hex'], iswitness=True)
assert_raises_rpc_error(-22, "", self.nodes[0].converttopsbt, hexstring=signedtx['hex'], permitsigdata=False, iswitness=True)
# Unless we allow it to convert and strip signatures
self.nodes[0].converttopsbt(signedtx['hex'], True)
# Explicitly allow converting non-empty txs
new_psbt = self.nodes[0].converttopsbt(rawtx['hex'])
self.nodes[0].decodepsbt(new_psbt)
# Create outputs to nodes 1 and 2
node1_addr = self.nodes[1].getnewaddress()
node2_addr = self.nodes[2].getnewaddress()
txid1 = self.nodes[0].sendtoaddress(node1_addr, 13)
txid2 = self.nodes[0].sendtoaddress(node2_addr, 13)
blockhash = self.nodes[0].generate(6)[0]
self.sync_all()
vout1 = find_output(self.nodes[1], txid1, 13, blockhash=blockhash)
vout2 = find_output(self.nodes[2], txid2, 13, blockhash=blockhash)
# Create a psbt spending outputs from nodes 1 and 2
psbt_orig = self.nodes[0].createpsbt([{"txid":txid1, "vout":vout1}, {"txid":txid2, "vout":vout2}], {self.nodes[0].getnewaddress():25.999})
# Update psbts, should only have data for one input and not the other
psbt1 = self.nodes[1].walletprocesspsbt(psbt_orig, False, "ALL")['psbt']
psbt1_decoded = self.nodes[0].decodepsbt(psbt1)
assert psbt1_decoded['inputs'][0] and not psbt1_decoded['inputs'][1]
# Check that BIP32 path was added
assert "bip32_derivs" in psbt1_decoded['inputs'][0]
psbt2 = self.nodes[2].walletprocesspsbt(psbt_orig, False, "ALL", False)['psbt']
psbt2_decoded = self.nodes[0].decodepsbt(psbt2)
assert not psbt2_decoded['inputs'][0] and psbt2_decoded['inputs'][1]
# Check that BIP32 paths were not added
assert "bip32_derivs" not in psbt2_decoded['inputs'][1]
# Sign PSBTs (workaround issue #18039)
psbt1 = self.nodes[1].walletprocesspsbt(psbt_orig)['psbt']
psbt2 = self.nodes[2].walletprocesspsbt(psbt_orig)['psbt']
# Combine, finalize, and send the psbts
combined = self.nodes[0].combinepsbt([psbt1, psbt2])
finalized = self.nodes[0].finalizepsbt(combined)['hex']
self.nodes[0].sendrawtransaction(finalized)
self.nodes[0].generate(6)
self.sync_all()
# Test additional args in walletcreatepsbt
# Make sure both pre-included and funded inputs
# have the correct sequence numbers based on
# replaceable arg
block_height = self.nodes[0].getblockcount()
unspent = self.nodes[0].listunspent()[0]
psbtx_info = self.nodes[0].walletcreatefundedpsbt([{"txid":unspent["txid"], "vout":unspent["vout"]}], [{self.nodes[2].getnewaddress():unspent["amount"]+1}], block_height+2, {"replaceable": False, "add_inputs": True}, False)
decoded_psbt = self.nodes[0].decodepsbt(psbtx_info["psbt"])
for tx_in, psbt_in in zip(decoded_psbt["tx"]["vin"], decoded_psbt["inputs"]):
assert_greater_than(tx_in["sequence"], MAX_BIP125_RBF_SEQUENCE)
assert "bip32_derivs" not in psbt_in
assert_equal(decoded_psbt["tx"]["locktime"], block_height+2)
# Same construction with only locktime set and RBF explicitly enabled
psbtx_info = self.nodes[0].walletcreatefundedpsbt([{"txid":unspent["txid"], "vout":unspent["vout"]}], [{self.nodes[2].getnewaddress():unspent["amount"]+1}], block_height, {"replaceable": True, "add_inputs": True}, True)
decoded_psbt = self.nodes[0].decodepsbt(psbtx_info["psbt"])
for tx_in, psbt_in in zip(decoded_psbt["tx"]["vin"], decoded_psbt["inputs"]):
assert_equal(tx_in["sequence"], MAX_BIP125_RBF_SEQUENCE)
assert "bip32_derivs" in psbt_in
assert_equal(decoded_psbt["tx"]["locktime"], block_height)
# Same construction without optional arguments
psbtx_info = self.nodes[0].walletcreatefundedpsbt([], [{self.nodes[2].getnewaddress():unspent["amount"]+1}])
decoded_psbt = self.nodes[0].decodepsbt(psbtx_info["psbt"])
for tx_in, psbt_in in zip(decoded_psbt["tx"]["vin"], decoded_psbt["inputs"]):
assert_equal(tx_in["sequence"], MAX_BIP125_RBF_SEQUENCE)
assert "bip32_derivs" in psbt_in
assert_equal(decoded_psbt["tx"]["locktime"], 0)
# Same construction without optional arguments, for a node with -walletrbf=0
unspent1 = self.nodes[1].listunspent()[0]
psbtx_info = self.nodes[1].walletcreatefundedpsbt([{"txid":unspent1["txid"], "vout":unspent1["vout"]}], [{self.nodes[2].getnewaddress():unspent1["amount"]+1}], block_height, {"add_inputs": True})
decoded_psbt = self.nodes[1].decodepsbt(psbtx_info["psbt"])
for tx_in, psbt_in in zip(decoded_psbt["tx"]["vin"], decoded_psbt["inputs"]):
assert_greater_than(tx_in["sequence"], MAX_BIP125_RBF_SEQUENCE)
assert "bip32_derivs" in psbt_in
# Make sure change address wallet does not have P2SH innerscript access to results in success
# when attempting BnB coin selection
self.nodes[0].walletcreatefundedpsbt([], [{self.nodes[2].getnewaddress():unspent["amount"]+1}], block_height+2, {"changeAddress":self.nodes[1].getnewaddress()}, False)
# Make sure the wallet's change type is respected by default
small_output = {self.nodes[0].getnewaddress():0.1}
psbtx_native = self.nodes[0].walletcreatefundedpsbt([], [small_output])
self.assert_change_type(psbtx_native, "witness_v0_keyhash")
psbtx_legacy = self.nodes[1].walletcreatefundedpsbt([], [small_output])
self.assert_change_type(psbtx_legacy, "pubkeyhash")
# Make sure the change type of the wallet can also be overwritten
psbtx_np2wkh = self.nodes[1].walletcreatefundedpsbt([], [small_output], 0, {"change_type":"p2sh-segwit"})
self.assert_change_type(psbtx_np2wkh, "scripthash")
# Make sure the change type cannot be specified if a change address is given
invalid_options = {"change_type":"legacy","changeAddress":self.nodes[0].getnewaddress()}
assert_raises_rpc_error(-8, "both change address and address type options", self.nodes[0].walletcreatefundedpsbt, [], [small_output], 0, invalid_options)
# Regression test for 14473 (mishandling of already-signed witness transaction):
psbtx_info = self.nodes[0].walletcreatefundedpsbt([{"txid":unspent["txid"], "vout":unspent["vout"]}], [{self.nodes[2].getnewaddress():unspent["amount"]+1}], 0, {"add_inputs": True})
complete_psbt = self.nodes[0].walletprocesspsbt(psbtx_info["psbt"])
double_processed_psbt = self.nodes[0].walletprocesspsbt(complete_psbt["psbt"])
assert_equal(complete_psbt, double_processed_psbt)
# We don't care about the decode result, but decoding must succeed.
self.nodes[0].decodepsbt(double_processed_psbt["psbt"])
# Make sure unsafe inputs are included if specified
self.nodes[2].createwallet(wallet_name="unsafe")
wunsafe = self.nodes[2].get_wallet_rpc("unsafe")
self.nodes[0].sendtoaddress(wunsafe.getnewaddress(), 2)
self.sync_mempools()
assert_raises_rpc_error(-4, "Insufficient funds", wunsafe.walletcreatefundedpsbt, [], [{self.nodes[0].getnewaddress(): 1}])
wunsafe.walletcreatefundedpsbt([], [{self.nodes[0].getnewaddress(): 1}], 0, {"include_unsafe": True})
# BIP 174 Test Vectors
# Check that unknown values are just passed through
unknown_psbt = "cHNidP8BAD8CAAAAAf//////////////////////////////////////////AAAAAAD/////AQAAAAAAAAAAA2oBAAAAAAAACg8BAgMEBQYHCAkPAQIDBAUGBwgJCgsMDQ4PAAA="
unknown_out = self.nodes[0].walletprocesspsbt(unknown_psbt)['psbt']
assert_equal(unknown_psbt, unknown_out)
# Open the data file
with open(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data/rpc_psbt.json'), encoding='utf-8') as f:
d = json.load(f)
invalids = d['invalid']
valids = d['valid']
creators = d['creator']
signers = d['signer']
combiners = d['combiner']
finalizers = d['finalizer']
extractors = d['extractor']
# Invalid PSBTs
for invalid in invalids:
assert_raises_rpc_error(-22, "TX decode failed", self.nodes[0].decodepsbt, invalid)
# Valid PSBTs
for valid in valids:
self.nodes[0].decodepsbt(valid)
# Creator Tests
for creator in creators:
created_tx = self.nodes[0].createpsbt(creator['inputs'], creator['outputs'])
assert_equal(created_tx, creator['result'])
# Signer tests
for i, signer in enumerate(signers):
self.nodes[2].createwallet(wallet_name="wallet{}".format(i))
wrpc = self.nodes[2].get_wallet_rpc("wallet{}".format(i))
for key in signer['privkeys']:
wrpc.importprivkey(key)
signed_tx = wrpc.walletprocesspsbt(signer['psbt'])['psbt']
assert_equal(signed_tx, signer['result'])
# Combiner test
for combiner in combiners:
combined = self.nodes[2].combinepsbt(combiner['combine'])
assert_equal(combined, combiner['result'])
# Empty combiner test
assert_raises_rpc_error(-8, "Parameter 'txs' cannot be empty", self.nodes[0].combinepsbt, [])
# Finalizer test
for finalizer in finalizers:
finalized = self.nodes[2].finalizepsbt(finalizer['finalize'], False)['psbt']
assert_equal(finalized, finalizer['result'])
# Extractor test
for extractor in extractors:
extracted = self.nodes[2].finalizepsbt(extractor['extract'], True)['hex']
assert_equal(extracted, extractor['result'])
# Unload extra wallets
for i, signer in enumerate(signers):
self.nodes[2].unloadwallet("wallet{}".format(i))
# TODO: Re-enable this for segwit v1
# self.test_utxo_conversion()
# Test that psbts with p2pkh outputs are created properly
p2pkh = self.nodes[0].getnewaddress(address_type='legacy')
psbt = self.nodes[1].walletcreatefundedpsbt([], [{p2pkh : 1}], 0, {"includeWatching" : True}, True)
self.nodes[0].decodepsbt(psbt['psbt'])
# Test decoding error: invalid base64
assert_raises_rpc_error(-22, "TX decode failed invalid base64", self.nodes[0].decodepsbt, ";definitely not base64;")
# Send to all types of addresses
addr1 = self.nodes[1].getnewaddress("", "bech32")
txid1 = self.nodes[0].sendtoaddress(addr1, 11)
vout1 = find_output(self.nodes[0], txid1, 11)
addr2 = self.nodes[1].getnewaddress("", "legacy")
txid2 = self.nodes[0].sendtoaddress(addr2, 11)
vout2 = find_output(self.nodes[0], txid2, 11)
addr3 = self.nodes[1].getnewaddress("", "p2sh-segwit")
txid3 = self.nodes[0].sendtoaddress(addr3, 11)
vout3 = find_output(self.nodes[0], txid3, 11)
self.sync_all()
def test_psbt_input_keys(psbt_input, keys):
"""Check that the psbt input has only the expected keys."""
assert_equal(set(keys), set(psbt_input.keys()))
# Create a PSBT. None of the inputs are filled initially
psbt = self.nodes[1].createpsbt([{"txid":txid1, "vout":vout1},{"txid":txid2, "vout":vout2},{"txid":txid3, "vout":vout3}], {self.nodes[0].getnewaddress():32.999})
decoded = self.nodes[1].decodepsbt(psbt)
test_psbt_input_keys(decoded['inputs'][0], [])
test_psbt_input_keys(decoded['inputs'][1], [])
test_psbt_input_keys(decoded['inputs'][2], [])
# Update a PSBT with UTXOs from the node
# Bech32 inputs should be filled with witness UTXO. Other inputs should not be filled because they are non-witness
updated = self.nodes[1].utxoupdatepsbt(psbt)
decoded = self.nodes[1].decodepsbt(updated)
test_psbt_input_keys(decoded['inputs'][0], ['witness_utxo'])
test_psbt_input_keys(decoded['inputs'][1], [])
test_psbt_input_keys(decoded['inputs'][2], [])
# Try again, now while providing descriptors, making P2SH-segwit work, and causing bip32_derivs and redeem_script to be filled in
descs = [self.nodes[1].getaddressinfo(addr)['desc'] for addr in [addr1,addr2,addr3]]
updated = self.nodes[1].utxoupdatepsbt(psbt=psbt, descriptors=descs)
decoded = self.nodes[1].decodepsbt(updated)
test_psbt_input_keys(decoded['inputs'][0], ['witness_utxo', 'bip32_derivs'])
test_psbt_input_keys(decoded['inputs'][1], [])
test_psbt_input_keys(decoded['inputs'][2], ['witness_utxo', 'bip32_derivs', 'redeem_script'])
# Two PSBTs with a common input should not be joinable
psbt1 = self.nodes[1].createpsbt([{"txid":txid1, "vout":vout1}], {self.nodes[0].getnewaddress():Decimal('10.999')})
assert_raises_rpc_error(-8, "exists in multiple PSBTs", self.nodes[1].joinpsbts, [psbt1, updated])
# Join two distinct PSBTs
addr4 = self.nodes[1].getnewaddress("", "p2sh-segwit")
txid4 = self.nodes[0].sendtoaddress(addr4, 5)
vout4 = find_output(self.nodes[0], txid4, 5)
self.nodes[0].generate(6)
self.sync_all()
psbt2 = self.nodes[1].createpsbt([{"txid":txid4, "vout":vout4}], {self.nodes[0].getnewaddress():Decimal('4.999')})
psbt2 = self.nodes[1].walletprocesspsbt(psbt2)['psbt']
psbt2_decoded = self.nodes[0].decodepsbt(psbt2)
assert "final_scriptwitness" in psbt2_decoded['inputs'][0] and "final_scriptSig" in psbt2_decoded['inputs'][0]
joined = self.nodes[0].joinpsbts([psbt, psbt2])
joined_decoded = self.nodes[0].decodepsbt(joined)
assert len(joined_decoded['inputs']) == 4 and len(joined_decoded['outputs']) == 2 and "final_scriptwitness" not in joined_decoded['inputs'][3] and "final_scriptSig" not in joined_decoded['inputs'][3]
# Check that joining shuffles the inputs and outputs
# 10 attempts should be enough to get a shuffled join
shuffled = False
for _ in range(10):
shuffled_joined = self.nodes[0].joinpsbts([psbt, psbt2])
shuffled |= joined != shuffled_joined
if shuffled:
break
assert shuffled
# Newly created PSBT needs UTXOs and updating
addr = self.nodes[1].getnewaddress("", "p2sh-segwit")
txid = self.nodes[0].sendtoaddress(addr, 7)
addrinfo = self.nodes[1].getaddressinfo(addr)
blockhash = self.nodes[0].generate(6)[0]
self.sync_all()
vout = find_output(self.nodes[0], txid, 7, blockhash=blockhash)
psbt = self.nodes[1].createpsbt([{"txid":txid, "vout":vout}], {self.nodes[0].getnewaddress("", "p2sh-segwit"):Decimal('6.999')})
analyzed = self.nodes[0].analyzepsbt(psbt)
assert not analyzed['inputs'][0]['has_utxo'] and not analyzed['inputs'][0]['is_final'] and analyzed['inputs'][0]['next'] == 'updater' and analyzed['next'] == 'updater'
# After update with wallet, only needs signing
updated = self.nodes[1].walletprocesspsbt(psbt, False, 'ALL', True)['psbt']
analyzed = self.nodes[0].analyzepsbt(updated)
assert analyzed['inputs'][0]['has_utxo'] and not analyzed['inputs'][0]['is_final'] and analyzed['inputs'][0]['next'] == 'signer' and analyzed['next'] == 'signer' and analyzed['inputs'][0]['missing']['signatures'][0] == addrinfo['embedded']['witness_program']
# Check fee and size things
assert analyzed['fee'] == Decimal('0.001') and analyzed['estimated_vsize'] == 134 and analyzed['estimated_feerate'] == Decimal('0.00746268')
# After signing and finalizing, needs extracting
signed = self.nodes[1].walletprocesspsbt(updated)['psbt']
analyzed = self.nodes[0].analyzepsbt(signed)
assert analyzed['inputs'][0]['has_utxo'] and analyzed['inputs'][0]['is_final'] and analyzed['next'] == 'extractor'
self.log.info("PSBT spending unspendable outputs should have error message and Creator as next")
analysis = self.nodes[0].analyzepsbt('cHNidP8BAJoCAAAAAljoeiG1ba8MI76OcHBFbDNvfLqlyHV5JPVFiHuyq911AAAAAAD/////g40EJ9DsZQpoqka7CwmK6kQiwHGyyng1Kgd5WdB86h0BAAAAAP////8CcKrwCAAAAAAWAEHYXCtx0AYLCcmIauuBXlCZHdoSTQDh9QUAAAAAFv8/wADXYP/7//////8JxOh0LR2HAI8AAAAAAAEBIADC6wsAAAAAF2oUt/X69ELjeX2nTof+fZ10l+OyAokDAQcJAwEHEAABAACAAAEBIADC6wsAAAAAF2oUt/X69ELjeX2nTof+fZ10l+OyAokDAQcJAwEHENkMak8AAAAA')
assert_equal(analysis['next'], 'creator')
assert_equal(analysis['error'], 'PSBT is not valid. Input 0 spends unspendable output')
self.log.info("PSBT with invalid values should have error message and Creator as next")
analysis = self.nodes[0].analyzepsbt('cHNidP8BAHECAAAAAfA00BFgAm6tp86RowwH6BMImQNL5zXUcTT97XoLGz0BAAAAAAD/////AgD5ApUAAAAAFgAUKNw0x8HRctAgmvoevm4u1SbN7XL87QKVAAAAABYAFPck4gF7iL4NL4wtfRAKgQbghiTUAAAAAAABAR8AgIFq49AHABYAFJUDtxf2PHo641HEOBOAIvFMNTr2AAAA')
assert_equal(analysis['next'], 'creator')
assert_equal(analysis['error'], 'PSBT is not valid. Input 0 has invalid value')
self.log.info("PSBT with signed, but not finalized, inputs should have Finalizer as next")
analysis = self.nodes[0].analyzepsbt('cHNidP8BAHECAAAAAZYezcxdnbXoQCmrD79t/LzDgtUo9ERqixk8wgioAobrAAAAAAD9////AlDDAAAAAAAAFgAUy/UxxZuzZswcmFnN/E9DGSiHLUsuGPUFAAAAABYAFLsH5o0R38wXx+X2cCosTMCZnQ4baAAAAAABAR8A4fUFAAAAABYAFOBI2h5thf3+Lflb2LGCsVSZwsltIgIC/i4dtVARCRWtROG0HHoGcaVklzJUcwo5homgGkSNAnJHMEQCIGx7zKcMIGr7cEES9BR4Kdt/pzPTK3fKWcGyCJXb7MVnAiALOBgqlMH4GbC1HDh/HmylmO54fyEy4lKde7/BT/PWxwEBAwQBAAAAIgYC/i4dtVARCRWtROG0HHoGcaVklzJUcwo5homgGkSNAnIYDwVpQ1QAAIABAACAAAAAgAAAAAAAAAAAAAAiAgL+CIiB59NSCssOJRGiMYQK1chahgAaaJpIXE41Cyir+xgPBWlDVAAAgAEAAIAAAACAAQAAAAAAAAAA')
assert_equal(analysis['next'], 'finalizer')
analysis = self.nodes[0].analyzepsbt('cHNidP8BAHECAAAAAfA00BFgAm6tp86RowwH6BMImQNL5zXUcTT97XoLGz0BAAAAAAD/////AgCAgWrj0AcAFgAUKNw0x8HRctAgmvoevm4u1SbN7XL87QKVAAAAABYAFPck4gF7iL4NL4wtfRAKgQbghiTUAAAAAAABAR8A8gUqAQAAABYAFJUDtxf2PHo641HEOBOAIvFMNTr2AAAA')
assert_equal(analysis['next'], 'creator')
assert_equal(analysis['error'], 'PSBT is not valid. Output amount invalid')
analysis = self.nodes[0].analyzepsbt('cHNidP8BAJoCAAAAAkvEW8NnDtdNtDpsmze+Ht2LH35IJcKv00jKAlUs21RrAwAAAAD/////S8Rbw2cO1020OmybN74e3Ysffkglwq/TSMoCVSzbVGsBAAAAAP7///8CwLYClQAAAAAWABSNJKzjaUb3uOxixsvh1GGE3fW7zQD5ApUAAAAAFgAUKNw0x8HRctAgmvoevm4u1SbN7XIAAAAAAAEAnQIAAAACczMa321tVHuN4GKWKRncycI22aX3uXgwSFUKM2orjRsBAAAAAP7///9zMxrfbW1Ue43gYpYpGdzJwjbZpfe5eDBIVQozaiuNGwAAAAAA/v///wIA+QKVAAAAABl2qRT9zXUVA8Ls5iVqynLHe5/vSe1XyYisQM0ClQAAAAAWABRmWQUcjSjghQ8/uH4Bn/zkakwLtAAAAAAAAQEfQM0ClQAAAAAWABRmWQUcjSjghQ8/uH4Bn/zkakwLtAAAAA==')
assert_equal(analysis['next'], 'creator')
assert_equal(analysis['error'], 'PSBT is not valid. Input 0 specifies invalid prevout')
assert_raises_rpc_error(-25, 'Inputs missing or spent', self.nodes[0].walletprocesspsbt, 'cHNidP8BAJoCAAAAAkvEW8NnDtdNtDpsmze+Ht2LH35IJcKv00jKAlUs21RrAwAAAAD/////S8Rbw2cO1020OmybN74e3Ysffkglwq/TSMoCVSzbVGsBAAAAAP7///8CwLYClQAAAAAWABSNJKzjaUb3uOxixsvh1GGE3fW7zQD5ApUAAAAAFgAUKNw0x8HRctAgmvoevm4u1SbN7XIAAAAAAAEAnQIAAAACczMa321tVHuN4GKWKRncycI22aX3uXgwSFUKM2orjRsBAAAAAP7///9zMxrfbW1Ue43gYpYpGdzJwjbZpfe5eDBIVQozaiuNGwAAAAAA/v///wIA+QKVAAAAABl2qRT9zXUVA8Ls5iVqynLHe5/vSe1XyYisQM0ClQAAAAAWABRmWQUcjSjghQ8/uH4Bn/zkakwLtAAAAAAAAQEfQM0ClQAAAAAWABRmWQUcjSjghQ8/uH4Bn/zkakwLtAAAAA==')
if __name__ == '__main__':
PSBTTest().main()
| 61.593698 | 584 | 0.675991 |
from decimal import Decimal
from itertools import product
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_approx,
assert_equal,
assert_greater_than,
assert_raises_rpc_error,
find_output,
)
import json
import os
MAX_BIP125_RBF_SEQUENCE = 0xfffffffd
class PSBTTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 3
self.extra_args = [
["-walletrbf=1"],
["-walletrbf=0", "-changetype=legacy"],
[]
]
self.supports_cli = False
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def test_utxo_conversion(self):
mining_node = self.nodes[2]
offline_node = self.nodes[0]
online_node = self.nodes[1]
self.disconnect_nodes(0, 1)
online_node.createwallet(wallet_name='wonline', disable_private_keys=True)
wonline = online_node.get_wallet_rpc('wonline')
w2 = online_node.get_wallet_rpc('')
offline_addr = offline_node.getnewaddress(address_type="p2sh-segwit")
online_addr = w2.getnewaddress(address_type="p2sh-segwit")
wonline.importaddress(offline_addr, "", False)
mining_node.sendtoaddress(address=offline_addr, amount=1.0)
mining_node.generate(nblocks=1)
self.sync_blocks([mining_node, online_node])
utxos = wonline.listunspent(addresses=[offline_addr])
raw = wonline.createrawtransaction([{"txid":utxos[0]["txid"], "vout":utxos[0]["vout"]}],[{online_addr:0.9999}])
psbt = wonline.walletprocesspsbt(online_node.converttopsbt(raw))["psbt"]
assert "non_witness_utxo" in mining_node.decodepsbt(psbt)["inputs"][0]
# Have the offline node sign the PSBT (which will update the UTXO to segwit)
signed_psbt = offline_node.walletprocesspsbt(psbt)["psbt"]
assert "witness_utxo" in mining_node.decodepsbt(signed_psbt)["inputs"][0]
# Make sure we can mine the resulting transaction
txid = mining_node.sendrawtransaction(mining_node.finalizepsbt(signed_psbt)["hex"])
mining_node.generate(1)
self.sync_blocks([mining_node, online_node])
assert_equal(online_node.gettxout(txid,0)["confirmations"], 1)
wonline.unloadwallet()
# Reconnect
self.connect_nodes(0, 1)
self.connect_nodes(0, 2)
def assert_change_type(self, psbtx, expected_type):
# The decodepsbt RPC is stateless and independent of any settings, we can always just call it on the first node
decoded_psbt = self.nodes[0].decodepsbt(psbtx["psbt"])
changepos = psbtx["changepos"]
assert_equal(decoded_psbt["tx"]["vout"][changepos]["scriptPubKey"]["type"], expected_type)
def run_test(self):
# Create and fund a raw tx for sending 10 CPU
psbtx1 = self.nodes[0].walletcreatefundedpsbt([], {self.nodes[2].getnewaddress():10})['psbt']
# If inputs are specified, do not automatically add more:
utxo1 = self.nodes[0].listunspent()[0]
assert_raises_rpc_error(-4, "Insufficient funds", self.nodes[0].walletcreatefundedpsbt, [{"txid": utxo1['txid'], "vout": utxo1['vout']}], {self.nodes[2].getnewaddress():90})
psbtx1 = self.nodes[0].walletcreatefundedpsbt([{"txid": utxo1['txid'], "vout": utxo1['vout']}], {self.nodes[2].getnewaddress():90}, 0, {"add_inputs": True})['psbt']
assert_equal(len(self.nodes[0].decodepsbt(psbtx1)['tx']['vin']), 2)
# Inputs argument can be null
self.nodes[0].walletcreatefundedpsbt(None, {self.nodes[2].getnewaddress():10})
# Node 1 should not be able to add anything to it but still return the psbtx same as before
psbtx = self.nodes[1].walletprocesspsbt(psbtx1)['psbt']
assert_equal(psbtx1, psbtx)
# Sign the transaction and send
signed_tx = self.nodes[0].walletprocesspsbt(psbtx)['psbt']
final_tx = self.nodes[0].finalizepsbt(signed_tx)['hex']
self.nodes[0].sendrawtransaction(final_tx)
# Manually selected inputs can be locked:
assert_equal(len(self.nodes[0].listlockunspent()), 0)
utxo1 = self.nodes[0].listunspent()[0]
psbtx1 = self.nodes[0].walletcreatefundedpsbt([{"txid": utxo1['txid'], "vout": utxo1['vout']}], {self.nodes[2].getnewaddress():1}, 0,{"lockUnspents": True})["psbt"]
assert_equal(len(self.nodes[0].listlockunspent()), 1)
# Locks are ignored for manually selected inputs
self.nodes[0].walletcreatefundedpsbt([{"txid": utxo1['txid'], "vout": utxo1['vout']}], {self.nodes[2].getnewaddress():1}, 0)
# Create p2sh, p2wpkh, and p2wsh addresses
pubkey0 = self.nodes[0].getaddressinfo(self.nodes[0].getnewaddress())['pubkey']
pubkey1 = self.nodes[1].getaddressinfo(self.nodes[1].getnewaddress())['pubkey']
pubkey2 = self.nodes[2].getaddressinfo(self.nodes[2].getnewaddress())['pubkey']
# Setup watchonly wallets
self.nodes[2].createwallet(wallet_name='wmulti', disable_private_keys=True)
wmulti = self.nodes[2].get_wallet_rpc('wmulti')
# Create all the addresses
p2sh = wmulti.addmultisigaddress(2, [pubkey0, pubkey1, pubkey2], "", "legacy")['address']
p2wsh = wmulti.addmultisigaddress(2, [pubkey0, pubkey1, pubkey2], "", "bech32")['address']
p2sh_p2wsh = wmulti.addmultisigaddress(2, [pubkey0, pubkey1, pubkey2], "", "p2sh-segwit")['address']
if not self.options.descriptors:
wmulti.importaddress(p2sh)
wmulti.importaddress(p2wsh)
wmulti.importaddress(p2sh_p2wsh)
p2wpkh = self.nodes[1].getnewaddress("", "bech32")
p2pkh = self.nodes[1].getnewaddress("", "legacy")
p2sh_p2wpkh = self.nodes[1].getnewaddress("", "p2sh-segwit")
# fund those addresses
rawtx = self.nodes[0].createrawtransaction([], {p2sh:10, p2wsh:10, p2wpkh:10, p2sh_p2wsh:10, p2sh_p2wpkh:10, p2pkh:10})
rawtx = self.nodes[0].fundrawtransaction(rawtx, {"changePosition":3})
signed_tx = self.nodes[0].signrawtransactionwithwallet(rawtx['hex'])['hex']
txid = self.nodes[0].sendrawtransaction(signed_tx)
self.nodes[0].generate(6)
self.sync_all()
# Find the output pos
p2sh_pos = -1
p2wsh_pos = -1
p2wpkh_pos = -1
p2pkh_pos = -1
p2sh_p2wsh_pos = -1
p2sh_p2wpkh_pos = -1
decoded = self.nodes[0].decoderawtransaction(signed_tx)
for out in decoded['vout']:
if out['scriptPubKey']['address'] == p2sh:
p2sh_pos = out['n']
elif out['scriptPubKey']['address'] == p2wsh:
p2wsh_pos = out['n']
elif out['scriptPubKey']['address'] == p2wpkh:
p2wpkh_pos = out['n']
elif out['scriptPubKey']['address'] == p2sh_p2wsh:
p2sh_p2wsh_pos = out['n']
elif out['scriptPubKey']['address'] == p2sh_p2wpkh:
p2sh_p2wpkh_pos = out['n']
elif out['scriptPubKey']['address'] == p2pkh:
p2pkh_pos = out['n']
inputs = [{"txid": txid, "vout": p2wpkh_pos}, {"txid": txid, "vout": p2sh_p2wpkh_pos}, {"txid": txid, "vout": p2pkh_pos}]
outputs = [{self.nodes[1].getnewaddress(): 29.99}]
# spend single key from node 1
created_psbt = self.nodes[1].walletcreatefundedpsbt(inputs, outputs)
walletprocesspsbt_out = self.nodes[1].walletprocesspsbt(created_psbt['psbt'])
# Make sure it has both types of UTXOs
decoded = self.nodes[1].decodepsbt(walletprocesspsbt_out['psbt'])
assert 'non_witness_utxo' in decoded['inputs'][0]
assert 'witness_utxo' in decoded['inputs'][0]
# Check decodepsbt fee calculation (input values shall only be counted once per UTXO)
assert_equal(decoded['fee'], created_psbt['fee'])
assert_equal(walletprocesspsbt_out['complete'], True)
self.nodes[1].sendrawtransaction(self.nodes[1].finalizepsbt(walletprocesspsbt_out['psbt'])['hex'])
self.log.info("Test walletcreatefundedpsbt fee rate of 10000 sat/vB and 0.1 CPU/kvB produces a total fee at or slightly below -maxtxfee (~0.05290000)")
res1 = self.nodes[1].walletcreatefundedpsbt(inputs, outputs, 0, {"fee_rate": 10000, "add_inputs": True})
assert_approx(res1["fee"], 0.055, 0.005)
res2 = self.nodes[1].walletcreatefundedpsbt(inputs, outputs, 0, {"feeRate": "0.1", "add_inputs": True})
assert_approx(res2["fee"], 0.055, 0.005)
self.log.info("Test min fee rate checks with walletcreatefundedpsbt are bypassed, e.g. a fee_rate under 1 sat/vB is allowed")
res3 = self.nodes[1].walletcreatefundedpsbt(inputs, outputs, 0, {"fee_rate": "0.999", "add_inputs": True})
assert_approx(res3["fee"], 0.00000381, 0.0000001)
res4 = self.nodes[1].walletcreatefundedpsbt(inputs, outputs, 0, {"feeRate": 0.00000999, "add_inputs": True})
assert_approx(res4["fee"], 0.00000381, 0.0000001)
self.log.info("Test min fee rate checks with walletcreatefundedpsbt are bypassed and that funding non-standard 'zero-fee' transactions is valid")
for param, zero_value in product(["fee_rate", "feeRate"], [0, 0.000, 0.00000000, "0", "0.000", "0.00000000"]):
assert_equal(0, self.nodes[1].walletcreatefundedpsbt(inputs, outputs, 0, {param: zero_value, "add_inputs": True})["fee"])
self.log.info("Test invalid fee rate settings")
for param, value in {("fee_rate", 100000), ("feeRate", 1)}:
assert_raises_rpc_error(-4, "Fee exceeds maximum configured by user (e.g. -maxtxfee, maxfeerate)",
self.nodes[1].walletcreatefundedpsbt, inputs, outputs, 0, {param: value, "add_inputs": True})
assert_raises_rpc_error(-3, "Amount out of range",
self.nodes[1].walletcreatefundedpsbt, inputs, outputs, 0, {param: -1, "add_inputs": True})
assert_raises_rpc_error(-3, "Amount is not a number or string",
self.nodes[1].walletcreatefundedpsbt, inputs, outputs, 0, {param: {"foo": "bar"}, "add_inputs": True})
# Test fee rate values that don't pass fixed-point parsing checks.
for invalid_value in ["", 0.000000001, 1e-09, 1.111111111, 1111111111111111, "31.999999999999999999999"]:
assert_raises_rpc_error(-3, "Invalid amount",
self.nodes[1].walletcreatefundedpsbt, inputs, outputs, 0, {param: invalid_value, "add_inputs": True})
for invalid_value in [0.0001, 0.00000001, 0.00099999, 31.99999999, "0.0001", "0.00000001", "0.00099999", "31.99999999"]:
assert_raises_rpc_error(-3, "Invalid amount",
self.nodes[1].walletcreatefundedpsbt, inputs, outputs, 0, {"fee_rate": invalid_value, "add_inputs": True})
self.log.info("- raises RPC error if both feeRate and fee_rate are passed")
assert_raises_rpc_error(-8, "Cannot specify both fee_rate (sat/vB) and feeRate (CPU/kvB)",
self.nodes[1].walletcreatefundedpsbt, inputs, outputs, 0, {"fee_rate": 0.1, "feeRate": 0.1, "add_inputs": True})
self.log.info("- raises RPC error if both feeRate and estimate_mode passed")
assert_raises_rpc_error(-8, "Cannot specify both estimate_mode and feeRate",
self.nodes[1].walletcreatefundedpsbt, inputs, outputs, 0, {"estimate_mode": "economical", "feeRate": 0.1, "add_inputs": True})
for param in ["feeRate", "fee_rate"]:
self.log.info("- raises RPC error if both {} and conf_target are passed".format(param))
assert_raises_rpc_error(-8, "Cannot specify both conf_target and {}. Please provide either a confirmation "
"target in blocks for automatic fee estimation, or an explicit fee rate.".format(param),
self.nodes[1].walletcreatefundedpsbt ,inputs, outputs, 0, {param: 1, "conf_target": 1, "add_inputs": True})
self.log.info("- raises RPC error if both fee_rate and estimate_mode are passed")
assert_raises_rpc_error(-8, "Cannot specify both estimate_mode and fee_rate",
self.nodes[1].walletcreatefundedpsbt ,inputs, outputs, 0, {"fee_rate": 1, "estimate_mode": "economical", "add_inputs": True})
self.log.info("- raises RPC error with invalid estimate_mode settings")
for k, v in {"number": 42, "object": {"foo": "bar"}}.items():
assert_raises_rpc_error(-3, "Expected type string for estimate_mode, got {}".format(k),
self.nodes[1].walletcreatefundedpsbt, inputs, outputs, 0, {"estimate_mode": v, "conf_target": 0.1, "add_inputs": True})
for mode in ["", "foo", Decimal("3.141592")]:
assert_raises_rpc_error(-8, 'Invalid estimate_mode parameter, must be one of: "unset", "economical", "conservative"',
self.nodes[1].walletcreatefundedpsbt, inputs, outputs, 0, {"estimate_mode": mode, "conf_target": 0.1, "add_inputs": True})
self.log.info("- raises RPC error with invalid conf_target settings")
for mode in ["unset", "economical", "conservative"]:
self.log.debug("{}".format(mode))
for k, v in {"string": "", "object": {"foo": "bar"}}.items():
assert_raises_rpc_error(-3, "Expected type number for conf_target, got {}".format(k),
self.nodes[1].walletcreatefundedpsbt, inputs, outputs, 0, {"estimate_mode": mode, "conf_target": v, "add_inputs": True})
for n in [-1, 0, 1009]:
assert_raises_rpc_error(-8, "Invalid conf_target, must be between 1 and 1008",
self.nodes[1].walletcreatefundedpsbt, inputs, outputs, 0, {"estimate_mode": mode, "conf_target": n, "add_inputs": True})
self.log.info("Test walletcreatefundedpsbt with too-high fee rate produces total fee well above -maxtxfee and raises RPC error")
for bool_add, outputs_array in {True: outputs, False: [{self.nodes[1].getnewaddress(): 1}]}.items():
msg = "Fee exceeds maximum configured by user (e.g. -maxtxfee, maxfeerate)"
assert_raises_rpc_error(-4, msg, self.nodes[1].walletcreatefundedpsbt, inputs, outputs_array, 0, {"fee_rate": 1000000, "add_inputs": bool_add})
assert_raises_rpc_error(-4, msg, self.nodes[1].walletcreatefundedpsbt, inputs, outputs_array, 0, {"feeRate": 1, "add_inputs": bool_add})
self.log.info("Test various PSBT operations")
psbtx = wmulti.walletcreatefundedpsbt(inputs=[{"txid":txid,"vout":p2wsh_pos},{"txid":txid,"vout":p2sh_pos},{"txid":txid,"vout":p2sh_p2wsh_pos}], outputs={self.nodes[1].getnewaddress():29.99}, options={'changeAddress': self.nodes[1].getrawchangeaddress()})['psbt']
walletprocesspsbt_out = self.nodes[1].walletprocesspsbt(psbtx)
psbtx = walletprocesspsbt_out['psbt']
assert_equal(walletprocesspsbt_out['complete'], False)
wmulti.unloadwallet()
# partially sign with node 2. This should be complete and sendable
walletprocesspsbt_out = self.nodes[2].walletprocesspsbt(psbtx)
assert_equal(walletprocesspsbt_out['complete'], True)
self.nodes[2].sendrawtransaction(self.nodes[2].finalizepsbt(walletprocesspsbt_out['psbt'])['hex'])
# check that walletprocesspsbt fails to decode a non-psbt
rawtx = self.nodes[1].createrawtransaction([{"txid":txid,"vout":p2wpkh_pos}], {self.nodes[1].getnewaddress():9.99})
assert_raises_rpc_error(-22, "TX decode failed", self.nodes[1].walletprocesspsbt, rawtx)
# Convert a non-psbt to psbt and make sure we can decode it
rawtx = self.nodes[0].createrawtransaction([], {self.nodes[1].getnewaddress():10})
rawtx = self.nodes[0].fundrawtransaction(rawtx)
new_psbt = self.nodes[0].converttopsbt(rawtx['hex'])
self.nodes[0].decodepsbt(new_psbt)
# Make sure that a non-psbt with signatures cannot be converted
# Error could be either "TX decode failed" (segwit inputs causes parsing to fail) or "Inputs must not have scriptSigs and scriptWitnesses"
# We must set iswitness=True because the serialized transaction has inputs and is therefore a witness transaction
signedtx = self.nodes[0].signrawtransactionwithwallet(rawtx['hex'])
assert_raises_rpc_error(-22, "", self.nodes[0].converttopsbt, hexstring=signedtx['hex'], iswitness=True)
assert_raises_rpc_error(-22, "", self.nodes[0].converttopsbt, hexstring=signedtx['hex'], permitsigdata=False, iswitness=True)
# Unless we allow it to convert and strip signatures
self.nodes[0].converttopsbt(signedtx['hex'], True)
# Explicitly allow converting non-empty txs
new_psbt = self.nodes[0].converttopsbt(rawtx['hex'])
self.nodes[0].decodepsbt(new_psbt)
# Create outputs to nodes 1 and 2
node1_addr = self.nodes[1].getnewaddress()
node2_addr = self.nodes[2].getnewaddress()
txid1 = self.nodes[0].sendtoaddress(node1_addr, 13)
txid2 = self.nodes[0].sendtoaddress(node2_addr, 13)
blockhash = self.nodes[0].generate(6)[0]
self.sync_all()
vout1 = find_output(self.nodes[1], txid1, 13, blockhash=blockhash)
vout2 = find_output(self.nodes[2], txid2, 13, blockhash=blockhash)
# Create a psbt spending outputs from nodes 1 and 2
psbt_orig = self.nodes[0].createpsbt([{"txid":txid1, "vout":vout1}, {"txid":txid2, "vout":vout2}], {self.nodes[0].getnewaddress():25.999})
# Update psbts, should only have data for one input and not the other
psbt1 = self.nodes[1].walletprocesspsbt(psbt_orig, False, "ALL")['psbt']
psbt1_decoded = self.nodes[0].decodepsbt(psbt1)
assert psbt1_decoded['inputs'][0] and not psbt1_decoded['inputs'][1]
# Check that BIP32 path was added
assert "bip32_derivs" in psbt1_decoded['inputs'][0]
psbt2 = self.nodes[2].walletprocesspsbt(psbt_orig, False, "ALL", False)['psbt']
psbt2_decoded = self.nodes[0].decodepsbt(psbt2)
assert not psbt2_decoded['inputs'][0] and psbt2_decoded['inputs'][1]
# Check that BIP32 paths were not added
assert "bip32_derivs" not in psbt2_decoded['inputs'][1]
# Sign PSBTs (workaround issue #18039)
psbt1 = self.nodes[1].walletprocesspsbt(psbt_orig)['psbt']
psbt2 = self.nodes[2].walletprocesspsbt(psbt_orig)['psbt']
# Combine, finalize, and send the psbts
combined = self.nodes[0].combinepsbt([psbt1, psbt2])
finalized = self.nodes[0].finalizepsbt(combined)['hex']
self.nodes[0].sendrawtransaction(finalized)
self.nodes[0].generate(6)
self.sync_all()
# Test additional args in walletcreatepsbt
# Make sure both pre-included and funded inputs
# have the correct sequence numbers based on
# replaceable arg
block_height = self.nodes[0].getblockcount()
unspent = self.nodes[0].listunspent()[0]
psbtx_info = self.nodes[0].walletcreatefundedpsbt([{"txid":unspent["txid"], "vout":unspent["vout"]}], [{self.nodes[2].getnewaddress():unspent["amount"]+1}], block_height+2, {"replaceable": False, "add_inputs": True}, False)
decoded_psbt = self.nodes[0].decodepsbt(psbtx_info["psbt"])
for tx_in, psbt_in in zip(decoded_psbt["tx"]["vin"], decoded_psbt["inputs"]):
assert_greater_than(tx_in["sequence"], MAX_BIP125_RBF_SEQUENCE)
assert "bip32_derivs" not in psbt_in
assert_equal(decoded_psbt["tx"]["locktime"], block_height+2)
# Same construction with only locktime set and RBF explicitly enabled
psbtx_info = self.nodes[0].walletcreatefundedpsbt([{"txid":unspent["txid"], "vout":unspent["vout"]}], [{self.nodes[2].getnewaddress():unspent["amount"]+1}], block_height, {"replaceable": True, "add_inputs": True}, True)
decoded_psbt = self.nodes[0].decodepsbt(psbtx_info["psbt"])
for tx_in, psbt_in in zip(decoded_psbt["tx"]["vin"], decoded_psbt["inputs"]):
assert_equal(tx_in["sequence"], MAX_BIP125_RBF_SEQUENCE)
assert "bip32_derivs" in psbt_in
assert_equal(decoded_psbt["tx"]["locktime"], block_height)
# Same construction without optional arguments
psbtx_info = self.nodes[0].walletcreatefundedpsbt([], [{self.nodes[2].getnewaddress():unspent["amount"]+1}])
decoded_psbt = self.nodes[0].decodepsbt(psbtx_info["psbt"])
for tx_in, psbt_in in zip(decoded_psbt["tx"]["vin"], decoded_psbt["inputs"]):
assert_equal(tx_in["sequence"], MAX_BIP125_RBF_SEQUENCE)
assert "bip32_derivs" in psbt_in
assert_equal(decoded_psbt["tx"]["locktime"], 0)
# Same construction without optional arguments, for a node with -walletrbf=0
unspent1 = self.nodes[1].listunspent()[0]
psbtx_info = self.nodes[1].walletcreatefundedpsbt([{"txid":unspent1["txid"], "vout":unspent1["vout"]}], [{self.nodes[2].getnewaddress():unspent1["amount"]+1}], block_height, {"add_inputs": True})
decoded_psbt = self.nodes[1].decodepsbt(psbtx_info["psbt"])
for tx_in, psbt_in in zip(decoded_psbt["tx"]["vin"], decoded_psbt["inputs"]):
assert_greater_than(tx_in["sequence"], MAX_BIP125_RBF_SEQUENCE)
assert "bip32_derivs" in psbt_in
# Make sure change address wallet does not have P2SH innerscript access to results in success
# when attempting BnB coin selection
self.nodes[0].walletcreatefundedpsbt([], [{self.nodes[2].getnewaddress():unspent["amount"]+1}], block_height+2, {"changeAddress":self.nodes[1].getnewaddress()}, False)
# Make sure the wallet's change type is respected by default
small_output = {self.nodes[0].getnewaddress():0.1}
psbtx_native = self.nodes[0].walletcreatefundedpsbt([], [small_output])
self.assert_change_type(psbtx_native, "witness_v0_keyhash")
psbtx_legacy = self.nodes[1].walletcreatefundedpsbt([], [small_output])
self.assert_change_type(psbtx_legacy, "pubkeyhash")
psbtx_np2wkh = self.nodes[1].walletcreatefundedpsbt([], [small_output], 0, {"change_type":"p2sh-segwit"})
self.assert_change_type(psbtx_np2wkh, "scripthash")
invalid_options = {"change_type":"legacy","changeAddress":self.nodes[0].getnewaddress()}
assert_raises_rpc_error(-8, "both change address and address type options", self.nodes[0].walletcreatefundedpsbt, [], [small_output], 0, invalid_options)
psbtx_info = self.nodes[0].walletcreatefundedpsbt([{"txid":unspent["txid"], "vout":unspent["vout"]}], [{self.nodes[2].getnewaddress():unspent["amount"]+1}], 0, {"add_inputs": True})
complete_psbt = self.nodes[0].walletprocesspsbt(psbtx_info["psbt"])
double_processed_psbt = self.nodes[0].walletprocesspsbt(complete_psbt["psbt"])
assert_equal(complete_psbt, double_processed_psbt)
self.nodes[0].decodepsbt(double_processed_psbt["psbt"])
# Make sure unsafe inputs are included if specified
self.nodes[2].createwallet(wallet_name="unsafe")
wunsafe = self.nodes[2].get_wallet_rpc("unsafe")
self.nodes[0].sendtoaddress(wunsafe.getnewaddress(), 2)
self.sync_mempools()
assert_raises_rpc_error(-4, "Insufficient funds", wunsafe.walletcreatefundedpsbt, [], [{self.nodes[0].getnewaddress(): 1}])
wunsafe.walletcreatefundedpsbt([], [{self.nodes[0].getnewaddress(): 1}], 0, {"include_unsafe": True})
# BIP 174 Test Vectors
# Check that unknown values are just passed through
unknown_psbt = "cHNidP8BAD8CAAAAAf//////////////////////////////////////////AAAAAAD/////AQAAAAAAAAAAA2oBAAAAAAAACg8BAgMEBQYHCAkPAQIDBAUGBwgJCgsMDQ4PAAA="
unknown_out = self.nodes[0].walletprocesspsbt(unknown_psbt)['psbt']
assert_equal(unknown_psbt, unknown_out)
# Open the data file
with open(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data/rpc_psbt.json'), encoding='utf-8') as f:
d = json.load(f)
invalids = d['invalid']
valids = d['valid']
creators = d['creator']
signers = d['signer']
combiners = d['combiner']
finalizers = d['finalizer']
extractors = d['extractor']
# Invalid PSBTs
for invalid in invalids:
assert_raises_rpc_error(-22, "TX decode failed", self.nodes[0].decodepsbt, invalid)
# Valid PSBTs
for valid in valids:
self.nodes[0].decodepsbt(valid)
# Creator Tests
for creator in creators:
created_tx = self.nodes[0].createpsbt(creator['inputs'], creator['outputs'])
assert_equal(created_tx, creator['result'])
# Signer tests
for i, signer in enumerate(signers):
self.nodes[2].createwallet(wallet_name="wallet{}".format(i))
wrpc = self.nodes[2].get_wallet_rpc("wallet{}".format(i))
for key in signer['privkeys']:
wrpc.importprivkey(key)
signed_tx = wrpc.walletprocesspsbt(signer['psbt'])['psbt']
assert_equal(signed_tx, signer['result'])
# Combiner test
for combiner in combiners:
combined = self.nodes[2].combinepsbt(combiner['combine'])
assert_equal(combined, combiner['result'])
# Empty combiner test
assert_raises_rpc_error(-8, "Parameter 'txs' cannot be empty", self.nodes[0].combinepsbt, [])
# Finalizer test
for finalizer in finalizers:
finalized = self.nodes[2].finalizepsbt(finalizer['finalize'], False)['psbt']
assert_equal(finalized, finalizer['result'])
# Extractor test
for extractor in extractors:
extracted = self.nodes[2].finalizepsbt(extractor['extract'], True)['hex']
assert_equal(extracted, extractor['result'])
# Unload extra wallets
for i, signer in enumerate(signers):
self.nodes[2].unloadwallet("wallet{}".format(i))
# TODO: Re-enable this for segwit v1
# self.test_utxo_conversion()
# Test that psbts with p2pkh outputs are created properly
p2pkh = self.nodes[0].getnewaddress(address_type='legacy')
psbt = self.nodes[1].walletcreatefundedpsbt([], [{p2pkh : 1}], 0, {"includeWatching" : True}, True)
self.nodes[0].decodepsbt(psbt['psbt'])
# Test decoding error: invalid base64
assert_raises_rpc_error(-22, "TX decode failed invalid base64", self.nodes[0].decodepsbt, ";definitely not base64;")
# Send to all types of addresses
addr1 = self.nodes[1].getnewaddress("", "bech32")
txid1 = self.nodes[0].sendtoaddress(addr1, 11)
vout1 = find_output(self.nodes[0], txid1, 11)
addr2 = self.nodes[1].getnewaddress("", "legacy")
txid2 = self.nodes[0].sendtoaddress(addr2, 11)
vout2 = find_output(self.nodes[0], txid2, 11)
addr3 = self.nodes[1].getnewaddress("", "p2sh-segwit")
txid3 = self.nodes[0].sendtoaddress(addr3, 11)
vout3 = find_output(self.nodes[0], txid3, 11)
self.sync_all()
def test_psbt_input_keys(psbt_input, keys):
assert_equal(set(keys), set(psbt_input.keys()))
# Create a PSBT. None of the inputs are filled initially
psbt = self.nodes[1].createpsbt([{"txid":txid1, "vout":vout1},{"txid":txid2, "vout":vout2},{"txid":txid3, "vout":vout3}], {self.nodes[0].getnewaddress():32.999})
decoded = self.nodes[1].decodepsbt(psbt)
test_psbt_input_keys(decoded['inputs'][0], [])
test_psbt_input_keys(decoded['inputs'][1], [])
test_psbt_input_keys(decoded['inputs'][2], [])
# Update a PSBT with UTXOs from the node
# Bech32 inputs should be filled with witness UTXO. Other inputs should not be filled because they are non-witness
updated = self.nodes[1].utxoupdatepsbt(psbt)
decoded = self.nodes[1].decodepsbt(updated)
test_psbt_input_keys(decoded['inputs'][0], ['witness_utxo'])
test_psbt_input_keys(decoded['inputs'][1], [])
test_psbt_input_keys(decoded['inputs'][2], [])
# Try again, now while providing descriptors, making P2SH-segwit work, and causing bip32_derivs and redeem_script to be filled in
descs = [self.nodes[1].getaddressinfo(addr)['desc'] for addr in [addr1,addr2,addr3]]
updated = self.nodes[1].utxoupdatepsbt(psbt=psbt, descriptors=descs)
decoded = self.nodes[1].decodepsbt(updated)
test_psbt_input_keys(decoded['inputs'][0], ['witness_utxo', 'bip32_derivs'])
test_psbt_input_keys(decoded['inputs'][1], [])
test_psbt_input_keys(decoded['inputs'][2], ['witness_utxo', 'bip32_derivs', 'redeem_script'])
# Two PSBTs with a common input should not be joinable
psbt1 = self.nodes[1].createpsbt([{"txid":txid1, "vout":vout1}], {self.nodes[0].getnewaddress():Decimal('10.999')})
assert_raises_rpc_error(-8, "exists in multiple PSBTs", self.nodes[1].joinpsbts, [psbt1, updated])
# Join two distinct PSBTs
addr4 = self.nodes[1].getnewaddress("", "p2sh-segwit")
txid4 = self.nodes[0].sendtoaddress(addr4, 5)
vout4 = find_output(self.nodes[0], txid4, 5)
self.nodes[0].generate(6)
self.sync_all()
psbt2 = self.nodes[1].createpsbt([{"txid":txid4, "vout":vout4}], {self.nodes[0].getnewaddress():Decimal('4.999')})
psbt2 = self.nodes[1].walletprocesspsbt(psbt2)['psbt']
psbt2_decoded = self.nodes[0].decodepsbt(psbt2)
assert "final_scriptwitness" in psbt2_decoded['inputs'][0] and "final_scriptSig" in psbt2_decoded['inputs'][0]
joined = self.nodes[0].joinpsbts([psbt, psbt2])
joined_decoded = self.nodes[0].decodepsbt(joined)
assert len(joined_decoded['inputs']) == 4 and len(joined_decoded['outputs']) == 2 and "final_scriptwitness" not in joined_decoded['inputs'][3] and "final_scriptSig" not in joined_decoded['inputs'][3]
# Check that joining shuffles the inputs and outputs
# 10 attempts should be enough to get a shuffled join
shuffled = False
for _ in range(10):
shuffled_joined = self.nodes[0].joinpsbts([psbt, psbt2])
shuffled |= joined != shuffled_joined
if shuffled:
break
assert shuffled
# Newly created PSBT needs UTXOs and updating
addr = self.nodes[1].getnewaddress("", "p2sh-segwit")
txid = self.nodes[0].sendtoaddress(addr, 7)
addrinfo = self.nodes[1].getaddressinfo(addr)
blockhash = self.nodes[0].generate(6)[0]
self.sync_all()
vout = find_output(self.nodes[0], txid, 7, blockhash=blockhash)
psbt = self.nodes[1].createpsbt([{"txid":txid, "vout":vout}], {self.nodes[0].getnewaddress("", "p2sh-segwit"):Decimal('6.999')})
analyzed = self.nodes[0].analyzepsbt(psbt)
assert not analyzed['inputs'][0]['has_utxo'] and not analyzed['inputs'][0]['is_final'] and analyzed['inputs'][0]['next'] == 'updater' and analyzed['next'] == 'updater'
# After update with wallet, only needs signing
updated = self.nodes[1].walletprocesspsbt(psbt, False, 'ALL', True)['psbt']
analyzed = self.nodes[0].analyzepsbt(updated)
assert analyzed['inputs'][0]['has_utxo'] and not analyzed['inputs'][0]['is_final'] and analyzed['inputs'][0]['next'] == 'signer' and analyzed['next'] == 'signer' and analyzed['inputs'][0]['missing']['signatures'][0] == addrinfo['embedded']['witness_program']
# Check fee and size things
assert analyzed['fee'] == Decimal('0.001') and analyzed['estimated_vsize'] == 134 and analyzed['estimated_feerate'] == Decimal('0.00746268')
# After signing and finalizing, needs extracting
signed = self.nodes[1].walletprocesspsbt(updated)['psbt']
analyzed = self.nodes[0].analyzepsbt(signed)
assert analyzed['inputs'][0]['has_utxo'] and analyzed['inputs'][0]['is_final'] and analyzed['next'] == 'extractor'
self.log.info("PSBT spending unspendable outputs should have error message and Creator as next")
analysis = self.nodes[0].analyzepsbt('cHNidP8BAJoCAAAAAljoeiG1ba8MI76OcHBFbDNvfLqlyHV5JPVFiHuyq911AAAAAAD/////g40EJ9DsZQpoqka7CwmK6kQiwHGyyng1Kgd5WdB86h0BAAAAAP////8CcKrwCAAAAAAWAEHYXCtx0AYLCcmIauuBXlCZHdoSTQDh9QUAAAAAFv8/wADXYP/7//////8JxOh0LR2HAI8AAAAAAAEBIADC6wsAAAAAF2oUt/X69ELjeX2nTof+fZ10l+OyAokDAQcJAwEHEAABAACAAAEBIADC6wsAAAAAF2oUt/X69ELjeX2nTof+fZ10l+OyAokDAQcJAwEHENkMak8AAAAA')
assert_equal(analysis['next'], 'creator')
assert_equal(analysis['error'], 'PSBT is not valid. Input 0 spends unspendable output')
self.log.info("PSBT with invalid values should have error message and Creator as next")
analysis = self.nodes[0].analyzepsbt('cHNidP8BAHECAAAAAfA00BFgAm6tp86RowwH6BMImQNL5zXUcTT97XoLGz0BAAAAAAD/////AgD5ApUAAAAAFgAUKNw0x8HRctAgmvoevm4u1SbN7XL87QKVAAAAABYAFPck4gF7iL4NL4wtfRAKgQbghiTUAAAAAAABAR8AgIFq49AHABYAFJUDtxf2PHo641HEOBOAIvFMNTr2AAAA')
assert_equal(analysis['next'], 'creator')
assert_equal(analysis['error'], 'PSBT is not valid. Input 0 has invalid value')
self.log.info("PSBT with signed, but not finalized, inputs should have Finalizer as next")
analysis = self.nodes[0].analyzepsbt('cHNidP8BAHECAAAAAZYezcxdnbXoQCmrD79t/LzDgtUo9ERqixk8wgioAobrAAAAAAD9////AlDDAAAAAAAAFgAUy/UxxZuzZswcmFnN/E9DGSiHLUsuGPUFAAAAABYAFLsH5o0R38wXx+X2cCosTMCZnQ4baAAAAAABAR8A4fUFAAAAABYAFOBI2h5thf3+Lflb2LGCsVSZwsltIgIC/i4dtVARCRWtROG0HHoGcaVklzJUcwo5homgGkSNAnJHMEQCIGx7zKcMIGr7cEES9BR4Kdt/pzPTK3fKWcGyCJXb7MVnAiALOBgqlMH4GbC1HDh/HmylmO54fyEy4lKde7/BT/PWxwEBAwQBAAAAIgYC/i4dtVARCRWtROG0HHoGcaVklzJUcwo5homgGkSNAnIYDwVpQ1QAAIABAACAAAAAgAAAAAAAAAAAAAAiAgL+CIiB59NSCssOJRGiMYQK1chahgAaaJpIXE41Cyir+xgPBWlDVAAAgAEAAIAAAACAAQAAAAAAAAAA')
assert_equal(analysis['next'], 'finalizer')
analysis = self.nodes[0].analyzepsbt('cHNidP8BAHECAAAAAfA00BFgAm6tp86RowwH6BMImQNL5zXUcTT97XoLGz0BAAAAAAD/////AgCAgWrj0AcAFgAUKNw0x8HRctAgmvoevm4u1SbN7XL87QKVAAAAABYAFPck4gF7iL4NL4wtfRAKgQbghiTUAAAAAAABAR8A8gUqAQAAABYAFJUDtxf2PHo641HEOBOAIvFMNTr2AAAA')
assert_equal(analysis['next'], 'creator')
assert_equal(analysis['error'], 'PSBT is not valid. Output amount invalid')
analysis = self.nodes[0].analyzepsbt('cHNidP8BAJoCAAAAAkvEW8NnDtdNtDpsmze+Ht2LH35IJcKv00jKAlUs21RrAwAAAAD/////S8Rbw2cO1020OmybN74e3Ysffkglwq/TSMoCVSzbVGsBAAAAAP7///8CwLYClQAAAAAWABSNJKzjaUb3uOxixsvh1GGE3fW7zQD5ApUAAAAAFgAUKNw0x8HRctAgmvoevm4u1SbN7XIAAAAAAAEAnQIAAAACczMa321tVHuN4GKWKRncycI22aX3uXgwSFUKM2orjRsBAAAAAP7///9zMxrfbW1Ue43gYpYpGdzJwjbZpfe5eDBIVQozaiuNGwAAAAAA/v///wIA+QKVAAAAABl2qRT9zXUVA8Ls5iVqynLHe5/vSe1XyYisQM0ClQAAAAAWABRmWQUcjSjghQ8/uH4Bn/zkakwLtAAAAAAAAQEfQM0ClQAAAAAWABRmWQUcjSjghQ8/uH4Bn/zkakwLtAAAAA==')
assert_equal(analysis['next'], 'creator')
assert_equal(analysis['error'], 'PSBT is not valid. Input 0 specifies invalid prevout')
assert_raises_rpc_error(-25, 'Inputs missing or spent', self.nodes[0].walletprocesspsbt, 'cHNidP8BAJoCAAAAAkvEW8NnDtdNtDpsmze+Ht2LH35IJcKv00jKAlUs21RrAwAAAAD/////S8Rbw2cO1020OmybN74e3Ysffkglwq/TSMoCVSzbVGsBAAAAAP7///8CwLYClQAAAAAWABSNJKzjaUb3uOxixsvh1GGE3fW7zQD5ApUAAAAAFgAUKNw0x8HRctAgmvoevm4u1SbN7XIAAAAAAAEAnQIAAAACczMa321tVHuN4GKWKRncycI22aX3uXgwSFUKM2orjRsBAAAAAP7///9zMxrfbW1Ue43gYpYpGdzJwjbZpfe5eDBIVQozaiuNGwAAAAAA/v///wIA+QKVAAAAABl2qRT9zXUVA8Ls5iVqynLHe5/vSe1XyYisQM0ClQAAAAAWABRmWQUcjSjghQ8/uH4Bn/zkakwLtAAAAAAAAQEfQM0ClQAAAAAWABRmWQUcjSjghQ8/uH4Bn/zkakwLtAAAAA==')
if __name__ == '__main__':
PSBTTest().main()
| true | true |
f72001fbbd35b8a678f898e1db551767c3665048 | 30,425 | py | Python | src/waldur_azure/migrations/0003_redesign.py | opennode/nodeconductor-assembly-waldur | cad9966389dc9b52b13d2301940c99cf4b243900 | [
"MIT"
] | 2 | 2017-01-20T15:26:25.000Z | 2017-08-03T04:38:08.000Z | src/waldur_azure/migrations/0003_redesign.py | opennode/nodeconductor-assembly-waldur | cad9966389dc9b52b13d2301940c99cf4b243900 | [
"MIT"
] | null | null | null | src/waldur_azure/migrations/0003_redesign.py | opennode/nodeconductor-assembly-waldur | cad9966389dc9b52b13d2301940c99cf4b243900 | [
"MIT"
] | null | null | null | # Generated by Django 1.11.18 on 2019-01-28 14:05
import re
import django.core.validators
import django.db.models.deletion
import django.utils.timezone
import django_fsm
import model_utils.fields
from django.db import migrations, models
import waldur_azure.validators
import waldur_core.core.fields
import waldur_core.core.models
import waldur_core.core.shims
import waldur_core.core.validators
import waldur_core.logging.loggers
class Migration(migrations.Migration):
dependencies = [
('structure', '0005_customer_domain'),
('core', '0003_enlarge_username'),
('waldur_azure', '0002_immutable_default_json'),
]
operations = [
migrations.CreateModel(
name='Location',
fields=[
(
'id',
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name='ID',
),
),
(
'name',
models.CharField(
max_length=150,
validators=[waldur_core.core.validators.validate_name],
verbose_name='name',
),
),
('uuid', waldur_core.core.fields.UUIDField()),
('latitude', models.FloatField(blank=True, null=True)),
('longitude', models.FloatField(blank=True, null=True)),
('backend_id', models.CharField(db_index=True, max_length=255)),
(
'settings',
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name='+',
to='structure.ServiceSettings',
),
),
],
options={
'abstract': False,
},
bases=(waldur_core.core.models.BackendModelMixin, models.Model),
),
migrations.CreateModel(
name='Network',
fields=[
(
'id',
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name='ID',
),
),
(
'created',
model_utils.fields.AutoCreatedField(
default=django.utils.timezone.now,
editable=False,
verbose_name='created',
),
),
(
'modified',
model_utils.fields.AutoLastModifiedField(
default=django.utils.timezone.now,
editable=False,
verbose_name='modified',
),
),
(
'description',
models.CharField(
blank=True, max_length=500, verbose_name='description'
),
),
('uuid', waldur_core.core.fields.UUIDField()),
('error_message', models.TextField(blank=True)),
(
'runtime_state',
models.CharField(
blank=True, max_length=150, verbose_name='runtime state'
),
),
(
'state',
django_fsm.FSMIntegerField(
choices=[
(5, 'Creation Scheduled'),
(6, 'Creating'),
(1, 'Update Scheduled'),
(2, 'Updating'),
(7, 'Deletion Scheduled'),
(8, 'Deleting'),
(3, 'OK'),
(4, 'Erred'),
],
default=5,
),
),
('backend_id', models.CharField(blank=True, max_length=255)),
(
'name',
models.CharField(
max_length=64,
validators=[
django.core.validators.RegexValidator(
message='The name can contain only letters, numbers, underscore, period and hyphens.',
regex=re.compile('[a-zA-Z][a-zA-Z0-9._-]+$'),
)
],
),
),
('cidr', models.CharField(max_length=32)),
],
options={
'abstract': False,
},
bases=(
waldur_core.core.models.DescendantMixin,
waldur_core.core.models.BackendModelMixin,
waldur_core.logging.loggers.LoggableMixin,
models.Model,
),
),
migrations.CreateModel(
name='NetworkInterface',
fields=[
(
'id',
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name='ID',
),
),
(
'created',
model_utils.fields.AutoCreatedField(
default=django.utils.timezone.now,
editable=False,
verbose_name='created',
),
),
(
'modified',
model_utils.fields.AutoLastModifiedField(
default=django.utils.timezone.now,
editable=False,
verbose_name='modified',
),
),
(
'description',
models.CharField(
blank=True, max_length=500, verbose_name='description'
),
),
('uuid', waldur_core.core.fields.UUIDField()),
('error_message', models.TextField(blank=True)),
(
'runtime_state',
models.CharField(
blank=True, max_length=150, verbose_name='runtime state'
),
),
(
'state',
django_fsm.FSMIntegerField(
choices=[
(5, 'Creation Scheduled'),
(6, 'Creating'),
(1, 'Update Scheduled'),
(2, 'Updating'),
(7, 'Deletion Scheduled'),
(8, 'Deleting'),
(3, 'OK'),
(4, 'Erred'),
],
default=5,
),
),
('backend_id', models.CharField(blank=True, max_length=255)),
(
'name',
models.CharField(
max_length=80,
validators=[
django.core.validators.RegexValidator(
message='The name can contain only letters, numbers, underscore, period and hyphens.',
regex=re.compile('[a-zA-Z][a-zA-Z0-9._-]+$'),
)
],
),
),
('config_name', models.CharField(max_length=255)),
],
options={
'abstract': False,
},
bases=(
waldur_core.core.models.DescendantMixin,
waldur_core.core.models.BackendModelMixin,
waldur_core.logging.loggers.LoggableMixin,
models.Model,
),
),
migrations.CreateModel(
name='ResourceGroup',
fields=[
(
'id',
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name='ID',
),
),
(
'created',
model_utils.fields.AutoCreatedField(
default=django.utils.timezone.now,
editable=False,
verbose_name='created',
),
),
(
'modified',
model_utils.fields.AutoLastModifiedField(
default=django.utils.timezone.now,
editable=False,
verbose_name='modified',
),
),
(
'description',
models.CharField(
blank=True, max_length=500, verbose_name='description'
),
),
('uuid', waldur_core.core.fields.UUIDField()),
('error_message', models.TextField(blank=True)),
(
'runtime_state',
models.CharField(
blank=True, max_length=150, verbose_name='runtime state'
),
),
(
'state',
django_fsm.FSMIntegerField(
choices=[
(5, 'Creation Scheduled'),
(6, 'Creating'),
(1, 'Update Scheduled'),
(2, 'Updating'),
(7, 'Deletion Scheduled'),
(8, 'Deleting'),
(3, 'OK'),
(4, 'Erred'),
],
default=5,
),
),
('backend_id', models.CharField(blank=True, max_length=255)),
(
'name',
models.CharField(
max_length=90,
validators=[
django.core.validators.RegexValidator(
message='The name can include alphanumeric, underscore, parentheses, hyphen, period (except at end), and Unicode characters that match the allowed characters.',
regex=re.compile('^[-\\w._()]+$'),
)
],
),
),
(
'location',
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to='waldur_azure.Location',
),
),
],
options={
'abstract': False,
},
bases=(
waldur_core.core.models.DescendantMixin,
waldur_core.core.models.BackendModelMixin,
waldur_core.logging.loggers.LoggableMixin,
models.Model,
),
),
migrations.CreateModel(
name='Size',
fields=[
(
'id',
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name='ID',
),
),
(
'name',
models.CharField(
max_length=150,
validators=[waldur_core.core.validators.validate_name],
verbose_name='name',
),
),
('uuid', waldur_core.core.fields.UUIDField()),
('backend_id', models.CharField(db_index=True, max_length=255)),
('max_data_disk_count', models.PositiveIntegerField()),
('memory_in_mb', models.PositiveIntegerField()),
('number_of_cores', models.PositiveIntegerField()),
('os_disk_size_in_mb', models.PositiveIntegerField()),
('resource_disk_size_in_mb', models.PositiveIntegerField()),
(
'settings',
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name='+',
to='structure.ServiceSettings',
),
),
],
options={
'abstract': False,
},
bases=(waldur_core.core.models.BackendModelMixin, models.Model),
),
migrations.CreateModel(
name='SQLDatabase',
fields=[
(
'id',
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name='ID',
),
),
(
'created',
model_utils.fields.AutoCreatedField(
default=django.utils.timezone.now,
editable=False,
verbose_name='created',
),
),
(
'modified',
model_utils.fields.AutoLastModifiedField(
default=django.utils.timezone.now,
editable=False,
verbose_name='modified',
),
),
(
'description',
models.CharField(
blank=True, max_length=500, verbose_name='description'
),
),
(
'name',
models.CharField(
max_length=150,
validators=[waldur_core.core.validators.validate_name],
verbose_name='name',
),
),
('uuid', waldur_core.core.fields.UUIDField()),
('error_message', models.TextField(blank=True)),
(
'runtime_state',
models.CharField(
blank=True, max_length=150, verbose_name='runtime state'
),
),
(
'state',
django_fsm.FSMIntegerField(
choices=[
(5, 'Creation Scheduled'),
(6, 'Creating'),
(1, 'Update Scheduled'),
(2, 'Updating'),
(7, 'Deletion Scheduled'),
(8, 'Deleting'),
(3, 'OK'),
(4, 'Erred'),
],
default=5,
),
),
('backend_id', models.CharField(blank=True, max_length=255)),
('charset', models.CharField(blank=True, max_length=255)),
('collation', models.CharField(blank=True, max_length=255)),
(
'resource_group',
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to='waldur_azure.ResourceGroup',
),
),
],
options={
'abstract': False,
},
bases=(
waldur_core.core.models.DescendantMixin,
waldur_core.core.models.BackendModelMixin,
waldur_core.logging.loggers.LoggableMixin,
models.Model,
),
),
migrations.CreateModel(
name='SQLServer',
fields=[
(
'id',
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name='ID',
),
),
(
'created',
model_utils.fields.AutoCreatedField(
default=django.utils.timezone.now,
editable=False,
verbose_name='created',
),
),
(
'modified',
model_utils.fields.AutoLastModifiedField(
default=django.utils.timezone.now,
editable=False,
verbose_name='modified',
),
),
(
'description',
models.CharField(
blank=True, max_length=500, verbose_name='description'
),
),
(
'name',
models.CharField(
max_length=150,
validators=[
django.core.validators.RegexValidator(
message='The name can only be made up of lowercase letters "a"-"z", the numbers 0-9 and the hyphen. The hyphen may not lead or trail in the name.',
regex=re.compile('[a-z0-9][a-z0-9-]+[a-z0-9]$'),
)
],
verbose_name='name',
),
),
('uuid', waldur_core.core.fields.UUIDField()),
('error_message', models.TextField(blank=True)),
(
'runtime_state',
models.CharField(
blank=True, max_length=150, verbose_name='runtime state'
),
),
(
'state',
django_fsm.FSMIntegerField(
choices=[
(5, 'Creation Scheduled'),
(6, 'Creating'),
(1, 'Update Scheduled'),
(2, 'Updating'),
(7, 'Deletion Scheduled'),
(8, 'Deleting'),
(3, 'OK'),
(4, 'Erred'),
],
default=5,
),
),
('backend_id', models.CharField(blank=True, max_length=255)),
('username', models.CharField(max_length=50)),
('password', models.CharField(max_length=50)),
('storage_mb', models.PositiveIntegerField(null=True)),
(
'resource_group',
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to='waldur_azure.ResourceGroup',
),
),
],
options={
'abstract': False,
},
bases=(
waldur_core.core.models.DescendantMixin,
waldur_core.core.models.BackendModelMixin,
waldur_core.logging.loggers.LoggableMixin,
models.Model,
),
),
migrations.CreateModel(
name='SubNet',
fields=[
(
'id',
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name='ID',
),
),
(
'created',
model_utils.fields.AutoCreatedField(
default=django.utils.timezone.now,
editable=False,
verbose_name='created',
),
),
(
'modified',
model_utils.fields.AutoLastModifiedField(
default=django.utils.timezone.now,
editable=False,
verbose_name='modified',
),
),
(
'description',
models.CharField(
blank=True, max_length=500, verbose_name='description'
),
),
('uuid', waldur_core.core.fields.UUIDField()),
('error_message', models.TextField(blank=True)),
(
'runtime_state',
models.CharField(
blank=True, max_length=150, verbose_name='runtime state'
),
),
(
'state',
django_fsm.FSMIntegerField(
choices=[
(5, 'Creation Scheduled'),
(6, 'Creating'),
(1, 'Update Scheduled'),
(2, 'Updating'),
(7, 'Deletion Scheduled'),
(8, 'Deleting'),
(3, 'OK'),
(4, 'Erred'),
],
default=5,
),
),
('backend_id', models.CharField(blank=True, max_length=255)),
(
'name',
models.CharField(
max_length=80,
validators=[
django.core.validators.RegexValidator(
message='The name can contain only letters, numbers, underscore, period and hyphens.',
regex=re.compile('[a-zA-Z][a-zA-Z0-9._-]+$'),
)
],
),
),
('cidr', models.CharField(max_length=32)),
(
'network',
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to='waldur_azure.Network',
),
),
(
'resource_group',
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to='waldur_azure.ResourceGroup',
),
),
],
options={
'abstract': False,
},
bases=(
waldur_core.core.models.DescendantMixin,
waldur_core.core.models.BackendModelMixin,
waldur_core.logging.loggers.LoggableMixin,
models.Model,
),
),
migrations.RemoveField(
model_name='instanceendpoint',
name='instance',
),
migrations.RemoveField(
model_name='virtualmachine',
name='private_ips',
),
migrations.RemoveField(
model_name='virtualmachine',
name='public_ips',
),
migrations.RemoveField(
model_name='virtualmachine',
name='user_password',
),
migrations.RemoveField(
model_name='virtualmachine',
name='user_username',
),
migrations.AddField(
model_name='image',
name='offer',
field=models.CharField(default=None, max_length=255),
preserve_default=False,
),
migrations.AddField(
model_name='image',
name='publisher',
field=models.CharField(default=None, max_length=255),
preserve_default=False,
),
migrations.AddField(
model_name='image',
name='settings',
field=models.ForeignKey(
default=None,
on_delete=django.db.models.deletion.CASCADE,
related_name='+',
to='structure.ServiceSettings',
),
preserve_default=False,
),
migrations.AddField(
model_name='image',
name='sku',
field=models.CharField(default=None, max_length=255),
preserve_default=False,
),
migrations.AddField(
model_name='image',
name='version',
field=models.CharField(default=None, max_length=255),
preserve_default=False,
),
migrations.AddField(
model_name='virtualmachine',
name='image',
field=models.ForeignKey(
default=None,
on_delete=django.db.models.deletion.CASCADE,
to='waldur_azure.Image',
),
preserve_default=False,
),
migrations.AddField(
model_name='virtualmachine',
name='password',
field=models.CharField(
default=None,
max_length=72,
validators=[
django.core.validators.MinLengthValidator(6),
django.core.validators.MaxLengthValidator(72),
waldur_azure.validators.validate_password,
],
),
preserve_default=False,
),
migrations.AddField(
model_name='virtualmachine',
name='ssh_key',
field=models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
to='core.SshPublicKey',
),
),
migrations.AddField(
model_name='virtualmachine',
name='username',
field=models.CharField(
default=None,
max_length=32,
validators=[waldur_azure.validators.VirtualMachineUsernameValidator],
),
preserve_default=False,
),
migrations.AlterField(
model_name='image',
name='backend_id',
field=models.CharField(db_index=True, max_length=255),
),
migrations.AlterField(
model_name='virtualmachine',
name='name',
field=models.CharField(
max_length=15,
validators=[
django.core.validators.RegexValidator(
message='The name can contain only letters, numbers, and hyphens. The name must be shorter than 15 characters and start with a letter and must end with a letter or a number.',
regex=re.compile('[a-zA-Z][a-zA-Z0-9-]{0,13}[a-zA-Z0-9]$'),
)
],
),
),
migrations.AlterUniqueTogether(
name='image',
unique_together=set([('settings', 'backend_id')]),
),
migrations.DeleteModel(
name='InstanceEndpoint',
),
migrations.AddField(
model_name='sqldatabase',
name='server',
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to='waldur_azure.SQLServer'
),
),
migrations.AddField(
model_name='networkinterface',
name='resource_group',
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to='waldur_azure.ResourceGroup',
),
),
migrations.AddField(
model_name='networkinterface',
name='subnet',
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to='waldur_azure.SubNet'
),
),
migrations.AddField(
model_name='network',
name='resource_group',
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to='waldur_azure.ResourceGroup',
),
),
migrations.AddField(
model_name='virtualmachine',
name='network_interface',
field=models.ForeignKey(
default=None,
on_delete=django.db.models.deletion.CASCADE,
to='waldur_azure.NetworkInterface',
),
preserve_default=False,
),
migrations.AddField(
model_name='virtualmachine',
name='resource_group',
field=models.ForeignKey(
default=None,
on_delete=django.db.models.deletion.CASCADE,
to='waldur_azure.ResourceGroup',
),
preserve_default=False,
),
migrations.AddField(
model_name='virtualmachine',
name='size',
field=models.ForeignKey(
default=None,
on_delete=django.db.models.deletion.CASCADE,
to='waldur_azure.Size',
),
preserve_default=False,
),
migrations.AlterUniqueTogether(
name='size',
unique_together=set([('settings', 'backend_id')]),
),
]
| 36.745169 | 199 | 0.396647 |
import re
import django.core.validators
import django.db.models.deletion
import django.utils.timezone
import django_fsm
import model_utils.fields
from django.db import migrations, models
import waldur_azure.validators
import waldur_core.core.fields
import waldur_core.core.models
import waldur_core.core.shims
import waldur_core.core.validators
import waldur_core.logging.loggers
class Migration(migrations.Migration):
dependencies = [
('structure', '0005_customer_domain'),
('core', '0003_enlarge_username'),
('waldur_azure', '0002_immutable_default_json'),
]
operations = [
migrations.CreateModel(
name='Location',
fields=[
(
'id',
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name='ID',
),
),
(
'name',
models.CharField(
max_length=150,
validators=[waldur_core.core.validators.validate_name],
verbose_name='name',
),
),
('uuid', waldur_core.core.fields.UUIDField()),
('latitude', models.FloatField(blank=True, null=True)),
('longitude', models.FloatField(blank=True, null=True)),
('backend_id', models.CharField(db_index=True, max_length=255)),
(
'settings',
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name='+',
to='structure.ServiceSettings',
),
),
],
options={
'abstract': False,
},
bases=(waldur_core.core.models.BackendModelMixin, models.Model),
),
migrations.CreateModel(
name='Network',
fields=[
(
'id',
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name='ID',
),
),
(
'created',
model_utils.fields.AutoCreatedField(
default=django.utils.timezone.now,
editable=False,
verbose_name='created',
),
),
(
'modified',
model_utils.fields.AutoLastModifiedField(
default=django.utils.timezone.now,
editable=False,
verbose_name='modified',
),
),
(
'description',
models.CharField(
blank=True, max_length=500, verbose_name='description'
),
),
('uuid', waldur_core.core.fields.UUIDField()),
('error_message', models.TextField(blank=True)),
(
'runtime_state',
models.CharField(
blank=True, max_length=150, verbose_name='runtime state'
),
),
(
'state',
django_fsm.FSMIntegerField(
choices=[
(5, 'Creation Scheduled'),
(6, 'Creating'),
(1, 'Update Scheduled'),
(2, 'Updating'),
(7, 'Deletion Scheduled'),
(8, 'Deleting'),
(3, 'OK'),
(4, 'Erred'),
],
default=5,
),
),
('backend_id', models.CharField(blank=True, max_length=255)),
(
'name',
models.CharField(
max_length=64,
validators=[
django.core.validators.RegexValidator(
message='The name can contain only letters, numbers, underscore, period and hyphens.',
regex=re.compile('[a-zA-Z][a-zA-Z0-9._-]+$'),
)
],
),
),
('cidr', models.CharField(max_length=32)),
],
options={
'abstract': False,
},
bases=(
waldur_core.core.models.DescendantMixin,
waldur_core.core.models.BackendModelMixin,
waldur_core.logging.loggers.LoggableMixin,
models.Model,
),
),
migrations.CreateModel(
name='NetworkInterface',
fields=[
(
'id',
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name='ID',
),
),
(
'created',
model_utils.fields.AutoCreatedField(
default=django.utils.timezone.now,
editable=False,
verbose_name='created',
),
),
(
'modified',
model_utils.fields.AutoLastModifiedField(
default=django.utils.timezone.now,
editable=False,
verbose_name='modified',
),
),
(
'description',
models.CharField(
blank=True, max_length=500, verbose_name='description'
),
),
('uuid', waldur_core.core.fields.UUIDField()),
('error_message', models.TextField(blank=True)),
(
'runtime_state',
models.CharField(
blank=True, max_length=150, verbose_name='runtime state'
),
),
(
'state',
django_fsm.FSMIntegerField(
choices=[
(5, 'Creation Scheduled'),
(6, 'Creating'),
(1, 'Update Scheduled'),
(2, 'Updating'),
(7, 'Deletion Scheduled'),
(8, 'Deleting'),
(3, 'OK'),
(4, 'Erred'),
],
default=5,
),
),
('backend_id', models.CharField(blank=True, max_length=255)),
(
'name',
models.CharField(
max_length=80,
validators=[
django.core.validators.RegexValidator(
message='The name can contain only letters, numbers, underscore, period and hyphens.',
regex=re.compile('[a-zA-Z][a-zA-Z0-9._-]+$'),
)
],
),
),
('config_name', models.CharField(max_length=255)),
],
options={
'abstract': False,
},
bases=(
waldur_core.core.models.DescendantMixin,
waldur_core.core.models.BackendModelMixin,
waldur_core.logging.loggers.LoggableMixin,
models.Model,
),
),
migrations.CreateModel(
name='ResourceGroup',
fields=[
(
'id',
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name='ID',
),
),
(
'created',
model_utils.fields.AutoCreatedField(
default=django.utils.timezone.now,
editable=False,
verbose_name='created',
),
),
(
'modified',
model_utils.fields.AutoLastModifiedField(
default=django.utils.timezone.now,
editable=False,
verbose_name='modified',
),
),
(
'description',
models.CharField(
blank=True, max_length=500, verbose_name='description'
),
),
('uuid', waldur_core.core.fields.UUIDField()),
('error_message', models.TextField(blank=True)),
(
'runtime_state',
models.CharField(
blank=True, max_length=150, verbose_name='runtime state'
),
),
(
'state',
django_fsm.FSMIntegerField(
choices=[
(5, 'Creation Scheduled'),
(6, 'Creating'),
(1, 'Update Scheduled'),
(2, 'Updating'),
(7, 'Deletion Scheduled'),
(8, 'Deleting'),
(3, 'OK'),
(4, 'Erred'),
],
default=5,
),
),
('backend_id', models.CharField(blank=True, max_length=255)),
(
'name',
models.CharField(
max_length=90,
validators=[
django.core.validators.RegexValidator(
message='The name can include alphanumeric, underscore, parentheses, hyphen, period (except at end), and Unicode characters that match the allowed characters.',
regex=re.compile('^[-\\w._()]+$'),
)
],
),
),
(
'location',
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to='waldur_azure.Location',
),
),
],
options={
'abstract': False,
},
bases=(
waldur_core.core.models.DescendantMixin,
waldur_core.core.models.BackendModelMixin,
waldur_core.logging.loggers.LoggableMixin,
models.Model,
),
),
migrations.CreateModel(
name='Size',
fields=[
(
'id',
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name='ID',
),
),
(
'name',
models.CharField(
max_length=150,
validators=[waldur_core.core.validators.validate_name],
verbose_name='name',
),
),
('uuid', waldur_core.core.fields.UUIDField()),
('backend_id', models.CharField(db_index=True, max_length=255)),
('max_data_disk_count', models.PositiveIntegerField()),
('memory_in_mb', models.PositiveIntegerField()),
('number_of_cores', models.PositiveIntegerField()),
('os_disk_size_in_mb', models.PositiveIntegerField()),
('resource_disk_size_in_mb', models.PositiveIntegerField()),
(
'settings',
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name='+',
to='structure.ServiceSettings',
),
),
],
options={
'abstract': False,
},
bases=(waldur_core.core.models.BackendModelMixin, models.Model),
),
migrations.CreateModel(
name='SQLDatabase',
fields=[
(
'id',
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name='ID',
),
),
(
'created',
model_utils.fields.AutoCreatedField(
default=django.utils.timezone.now,
editable=False,
verbose_name='created',
),
),
(
'modified',
model_utils.fields.AutoLastModifiedField(
default=django.utils.timezone.now,
editable=False,
verbose_name='modified',
),
),
(
'description',
models.CharField(
blank=True, max_length=500, verbose_name='description'
),
),
(
'name',
models.CharField(
max_length=150,
validators=[waldur_core.core.validators.validate_name],
verbose_name='name',
),
),
('uuid', waldur_core.core.fields.UUIDField()),
('error_message', models.TextField(blank=True)),
(
'runtime_state',
models.CharField(
blank=True, max_length=150, verbose_name='runtime state'
),
),
(
'state',
django_fsm.FSMIntegerField(
choices=[
(5, 'Creation Scheduled'),
(6, 'Creating'),
(1, 'Update Scheduled'),
(2, 'Updating'),
(7, 'Deletion Scheduled'),
(8, 'Deleting'),
(3, 'OK'),
(4, 'Erred'),
],
default=5,
),
),
('backend_id', models.CharField(blank=True, max_length=255)),
('charset', models.CharField(blank=True, max_length=255)),
('collation', models.CharField(blank=True, max_length=255)),
(
'resource_group',
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to='waldur_azure.ResourceGroup',
),
),
],
options={
'abstract': False,
},
bases=(
waldur_core.core.models.DescendantMixin,
waldur_core.core.models.BackendModelMixin,
waldur_core.logging.loggers.LoggableMixin,
models.Model,
),
),
migrations.CreateModel(
name='SQLServer',
fields=[
(
'id',
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name='ID',
),
),
(
'created',
model_utils.fields.AutoCreatedField(
default=django.utils.timezone.now,
editable=False,
verbose_name='created',
),
),
(
'modified',
model_utils.fields.AutoLastModifiedField(
default=django.utils.timezone.now,
editable=False,
verbose_name='modified',
),
),
(
'description',
models.CharField(
blank=True, max_length=500, verbose_name='description'
),
),
(
'name',
models.CharField(
max_length=150,
validators=[
django.core.validators.RegexValidator(
message='The name can only be made up of lowercase letters "a"-"z", the numbers 0-9 and the hyphen. The hyphen may not lead or trail in the name.',
regex=re.compile('[a-z0-9][a-z0-9-]+[a-z0-9]$'),
)
],
verbose_name='name',
),
),
('uuid', waldur_core.core.fields.UUIDField()),
('error_message', models.TextField(blank=True)),
(
'runtime_state',
models.CharField(
blank=True, max_length=150, verbose_name='runtime state'
),
),
(
'state',
django_fsm.FSMIntegerField(
choices=[
(5, 'Creation Scheduled'),
(6, 'Creating'),
(1, 'Update Scheduled'),
(2, 'Updating'),
(7, 'Deletion Scheduled'),
(8, 'Deleting'),
(3, 'OK'),
(4, 'Erred'),
],
default=5,
),
),
('backend_id', models.CharField(blank=True, max_length=255)),
('username', models.CharField(max_length=50)),
('password', models.CharField(max_length=50)),
('storage_mb', models.PositiveIntegerField(null=True)),
(
'resource_group',
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to='waldur_azure.ResourceGroup',
),
),
],
options={
'abstract': False,
},
bases=(
waldur_core.core.models.DescendantMixin,
waldur_core.core.models.BackendModelMixin,
waldur_core.logging.loggers.LoggableMixin,
models.Model,
),
),
migrations.CreateModel(
name='SubNet',
fields=[
(
'id',
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name='ID',
),
),
(
'created',
model_utils.fields.AutoCreatedField(
default=django.utils.timezone.now,
editable=False,
verbose_name='created',
),
),
(
'modified',
model_utils.fields.AutoLastModifiedField(
default=django.utils.timezone.now,
editable=False,
verbose_name='modified',
),
),
(
'description',
models.CharField(
blank=True, max_length=500, verbose_name='description'
),
),
('uuid', waldur_core.core.fields.UUIDField()),
('error_message', models.TextField(blank=True)),
(
'runtime_state',
models.CharField(
blank=True, max_length=150, verbose_name='runtime state'
),
),
(
'state',
django_fsm.FSMIntegerField(
choices=[
(5, 'Creation Scheduled'),
(6, 'Creating'),
(1, 'Update Scheduled'),
(2, 'Updating'),
(7, 'Deletion Scheduled'),
(8, 'Deleting'),
(3, 'OK'),
(4, 'Erred'),
],
default=5,
),
),
('backend_id', models.CharField(blank=True, max_length=255)),
(
'name',
models.CharField(
max_length=80,
validators=[
django.core.validators.RegexValidator(
message='The name can contain only letters, numbers, underscore, period and hyphens.',
regex=re.compile('[a-zA-Z][a-zA-Z0-9._-]+$'),
)
],
),
),
('cidr', models.CharField(max_length=32)),
(
'network',
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to='waldur_azure.Network',
),
),
(
'resource_group',
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to='waldur_azure.ResourceGroup',
),
),
],
options={
'abstract': False,
},
bases=(
waldur_core.core.models.DescendantMixin,
waldur_core.core.models.BackendModelMixin,
waldur_core.logging.loggers.LoggableMixin,
models.Model,
),
),
migrations.RemoveField(
model_name='instanceendpoint',
name='instance',
),
migrations.RemoveField(
model_name='virtualmachine',
name='private_ips',
),
migrations.RemoveField(
model_name='virtualmachine',
name='public_ips',
),
migrations.RemoveField(
model_name='virtualmachine',
name='user_password',
),
migrations.RemoveField(
model_name='virtualmachine',
name='user_username',
),
migrations.AddField(
model_name='image',
name='offer',
field=models.CharField(default=None, max_length=255),
preserve_default=False,
),
migrations.AddField(
model_name='image',
name='publisher',
field=models.CharField(default=None, max_length=255),
preserve_default=False,
),
migrations.AddField(
model_name='image',
name='settings',
field=models.ForeignKey(
default=None,
on_delete=django.db.models.deletion.CASCADE,
related_name='+',
to='structure.ServiceSettings',
),
preserve_default=False,
),
migrations.AddField(
model_name='image',
name='sku',
field=models.CharField(default=None, max_length=255),
preserve_default=False,
),
migrations.AddField(
model_name='image',
name='version',
field=models.CharField(default=None, max_length=255),
preserve_default=False,
),
migrations.AddField(
model_name='virtualmachine',
name='image',
field=models.ForeignKey(
default=None,
on_delete=django.db.models.deletion.CASCADE,
to='waldur_azure.Image',
),
preserve_default=False,
),
migrations.AddField(
model_name='virtualmachine',
name='password',
field=models.CharField(
default=None,
max_length=72,
validators=[
django.core.validators.MinLengthValidator(6),
django.core.validators.MaxLengthValidator(72),
waldur_azure.validators.validate_password,
],
),
preserve_default=False,
),
migrations.AddField(
model_name='virtualmachine',
name='ssh_key',
field=models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
to='core.SshPublicKey',
),
),
migrations.AddField(
model_name='virtualmachine',
name='username',
field=models.CharField(
default=None,
max_length=32,
validators=[waldur_azure.validators.VirtualMachineUsernameValidator],
),
preserve_default=False,
),
migrations.AlterField(
model_name='image',
name='backend_id',
field=models.CharField(db_index=True, max_length=255),
),
migrations.AlterField(
model_name='virtualmachine',
name='name',
field=models.CharField(
max_length=15,
validators=[
django.core.validators.RegexValidator(
message='The name can contain only letters, numbers, and hyphens. The name must be shorter than 15 characters and start with a letter and must end with a letter or a number.',
regex=re.compile('[a-zA-Z][a-zA-Z0-9-]{0,13}[a-zA-Z0-9]$'),
)
],
),
),
migrations.AlterUniqueTogether(
name='image',
unique_together=set([('settings', 'backend_id')]),
),
migrations.DeleteModel(
name='InstanceEndpoint',
),
migrations.AddField(
model_name='sqldatabase',
name='server',
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to='waldur_azure.SQLServer'
),
),
migrations.AddField(
model_name='networkinterface',
name='resource_group',
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to='waldur_azure.ResourceGroup',
),
),
migrations.AddField(
model_name='networkinterface',
name='subnet',
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to='waldur_azure.SubNet'
),
),
migrations.AddField(
model_name='network',
name='resource_group',
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to='waldur_azure.ResourceGroup',
),
),
migrations.AddField(
model_name='virtualmachine',
name='network_interface',
field=models.ForeignKey(
default=None,
on_delete=django.db.models.deletion.CASCADE,
to='waldur_azure.NetworkInterface',
),
preserve_default=False,
),
migrations.AddField(
model_name='virtualmachine',
name='resource_group',
field=models.ForeignKey(
default=None,
on_delete=django.db.models.deletion.CASCADE,
to='waldur_azure.ResourceGroup',
),
preserve_default=False,
),
migrations.AddField(
model_name='virtualmachine',
name='size',
field=models.ForeignKey(
default=None,
on_delete=django.db.models.deletion.CASCADE,
to='waldur_azure.Size',
),
preserve_default=False,
),
migrations.AlterUniqueTogether(
name='size',
unique_together=set([('settings', 'backend_id')]),
),
]
| true | true |
f7200243f3b4d289ac50951e7f5c03cf8e464b4c | 7,970 | py | Python | src/pyrad_proc/pyrad/EGG-INFO/scripts/rewrite_monitoring.py | jfigui/pyrad | 7811d593bb09a7f8a621c0e8ae3f32c2b85a0254 | [
"BSD-3-Clause"
] | 41 | 2016-12-01T08:46:06.000Z | 2021-06-24T21:14:33.000Z | src/pyrad_proc/pyrad/EGG-INFO/scripts/rewrite_monitoring.py | jfigui/pyrad | 7811d593bb09a7f8a621c0e8ae3f32c2b85a0254 | [
"BSD-3-Clause"
] | 42 | 2017-02-23T14:52:49.000Z | 2021-02-01T10:43:52.000Z | src/pyrad_proc/pyrad/EGG-INFO/scripts/rewrite_monitoring.py | jfigui/pyrad | 7811d593bb09a7f8a621c0e8ae3f32c2b85a0254 | [
"BSD-3-Clause"
] | 21 | 2016-08-25T15:02:12.000Z | 2021-05-27T04:09:40.000Z | #!/home/daniel/anaconda3/bin/python
# -*- coding: utf-8 -*-
"""
================================================
rewrite_monitoring
================================================
This program rewrites a monitoring time series files into the correct
time order
"""
# Author: fvj
# License: BSD 3 clause
import datetime
import atexit
import numpy as np
import os
from pyrad.io import read_monitoring_ts, write_monitoring_ts
from pyrad.graph import plot_monitoring_ts
from pyrad.io import generate_field_name_str, get_fieldname_pyart
print(__doc__)
def main():
"""
"""
input_base = (
'/store/msrad/radar/pyrad_products/')
output_base = (
'/store/msrad/radar/pyrad_products/')
rad_vec = ['D']
var_vec = ['PhiDP0', 'RhoHV_rain', 'ZDR_prec', 'ZDR_snow', 'dBZ_bias']
year_vec = [datetime.datetime(2018, 1, 1)]
plot_data = True
print("====== Monitoring rewriting started: %s" %
datetime.datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S"))
atexit.register(_print_end_msg,
"====== Monitoring rewriting finished: ")
for i, rad in enumerate(rad_vec):
print('Processing Radar '+rad)
for j, var in enumerate(var_vec):
if var == 'dBZ':
basedir = 'rad4alp_gc_PH'+rad
dsdir = 'monitoring_clt_Zh'
mon_type = 'GC_MONITORING'
quantiles = [50., 95., 99.]
elif var == 'dBZv':
basedir = 'rad4alp_gc_PH'+rad
dsdir = 'monitoring_clt_Zv'
mon_type = 'GC_MONITORING'
quantiles = [50., 95., 99.]
elif var == 'RhoHV_rain':
basedir = 'rad4alp_dataquality_PL'+rad
dsdir = 'monitoring_RhoHV'
mon_type = 'MONITORING'
quantiles = [65., 80., 95.]
elif var == 'PhiDP0':
basedir = 'rad4alp_dataquality_PL'+rad
dsdir = 'monitoring_PhiDP0'
mon_type = 'MONITORING'
quantiles = [25., 50., 75.]
elif var == 'ZDR_prec':
basedir = 'rad4alp_dataquality_PL'+rad
dsdir = 'monitoring_ZDR'
mon_type = 'MONITORING'
quantiles = [25., 50., 75.]
elif var == 'ZDR_snow':
basedir = 'rad4alp_dataquality_PL'+rad
dsdir = 'monitoring_ZDR_snow'
mon_type = 'MONITORING'
quantiles = [25., 50., 75.]
elif var == 'dBZ_bias':
basedir = 'rad4alp_dataquality_PL'+rad
dsdir = 'monitoring_Zh_bias'
mon_type = 'MONITORING'
quantiles = [25., 50., 75.]
input_path = input_base+basedir+'/'+dsdir+'/VOL_TS/'
output_path = output_base+basedir+'/'+dsdir+'/VOL_TS/'
if not os.path.isdir(output_path):
os.makedirs(output_path)
print('- Processing Variable '+var)
for k, year in enumerate(year_vec):
print('-- Processing Year '+year.strftime('%Y'))
fname_input = (
input_path+year.strftime('%Y')+'_'+rad +
'_ts_'+mon_type+'_'+var+'.csv')
fname_output = (
output_path+year.strftime('%Y')+'_'+rad +
'_ts_'+mon_type+'_'+var+'.csv')
figfname = [
output_path+year.strftime('%Y')+'_'+rad +
'_ts_'+mon_type+'_'+var+'.png']
date, np_t_vec, cquant_vec, lquant_vec, hquant_vec = (
read_monitoring_ts(fname_input, sort_by_date=True))
if date is None:
continue
val_vec = np.ma.asarray(
[lquant_vec, cquant_vec, hquant_vec]).T
fname = write_monitoring_ts(
date, np_t_vec, val_vec, quantiles, var,
fname_output, rewrite=True)
print('written file '+fname)
if not plot_data:
continue
titldate = (date[0].strftime('%Y%m%d')+'-' +
date[-1].strftime('%Y%m%d'))
titl = rad+' Monitoring '+titldate
labely = generate_field_name_str(var)
if var == 'dBZ':
if rad == 'A':
ref_value = 49.5
vmin = 44.5
vmax = 54.5
np_min = 100000
elif rad == 'D':
ref_value = 48.5
vmin = 43.5
vmax = 53.5
np_min = 20000
elif rad == 'L':
ref_value = 67.
vmin = 62.
vmax = 72.
np_min = 100000
elif rad == 'P':
ref_value = 69.
vmin = 64.
vmax = 74.
np_min = 100000
elif rad == 'W':
ref_value = 27.5
vmin = 22.5
vmax = 32.5
np_min = 100000
elif var == 'dBZv':
if rad == 'A':
ref_value = 51.5
vmin = 46.5
vmax = 56.5
np_min = 100000
elif rad == 'D':
ref_value = 50.5
vmin = 45.5
vmax = 55.5
np_min = 20000
elif rad == 'L':
ref_value = 69.5
vmin = 64.5
vmax = 74.5
np_min = 100000
elif rad == 'P':
ref_value = 68.5
vmin = 63.5
vmax = 73.5
np_min = 100000
elif rad == 'W':
ref_value = 26.5
vmin = 21.5
vmax = 31.5
np_min = 100000
elif var == 'RhoHV_rain':
ref_value = 0.99
vmin = 0.95
vmax = 1.01
np_min = 5000
elif var == 'PhiDP0':
ref_value = 0.
vmin = -20.
vmax = 20.
np_min = 500000
elif var == 'ZDR_prec':
ref_value = 0.2
vmin = -2.
vmax = 2.
np_min = 5000
elif var == 'ZDR_snow':
ref_value = 0.2
vmin = -2.
vmax = 2.
np_min = 5000
elif var == 'dBZ_bias':
ref_value = 0.
vmin = -30.
vmax = 30.
np_min = 100
fname = plot_monitoring_ts(
date, np_t_vec, cquant_vec, lquant_vec, hquant_vec,
get_fieldname_pyart(var), figfname,
ref_value=ref_value, vmin=vmin, vmax=vmax, np_min=np_min,
labelx='Time UTC', labely=labely, titl=titl)
print('plotted file '+' '.join(fname))
def _print_end_msg(text):
"""
prints end message
Parameters
----------
text : str
the text to be printed
Returns
-------
Nothing
"""
print(text + datetime.datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S"))
# ---------------------------------------------------------
# Start main:
# ---------------------------------------------------------
if __name__ == "__main__":
main()
| 33.628692 | 77 | 0.408281 |
import datetime
import atexit
import numpy as np
import os
from pyrad.io import read_monitoring_ts, write_monitoring_ts
from pyrad.graph import plot_monitoring_ts
from pyrad.io import generate_field_name_str, get_fieldname_pyart
print(__doc__)
def main():
input_base = (
'/store/msrad/radar/pyrad_products/')
output_base = (
'/store/msrad/radar/pyrad_products/')
rad_vec = ['D']
var_vec = ['PhiDP0', 'RhoHV_rain', 'ZDR_prec', 'ZDR_snow', 'dBZ_bias']
year_vec = [datetime.datetime(2018, 1, 1)]
plot_data = True
print("====== Monitoring rewriting started: %s" %
datetime.datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S"))
atexit.register(_print_end_msg,
"====== Monitoring rewriting finished: ")
for i, rad in enumerate(rad_vec):
print('Processing Radar '+rad)
for j, var in enumerate(var_vec):
if var == 'dBZ':
basedir = 'rad4alp_gc_PH'+rad
dsdir = 'monitoring_clt_Zh'
mon_type = 'GC_MONITORING'
quantiles = [50., 95., 99.]
elif var == 'dBZv':
basedir = 'rad4alp_gc_PH'+rad
dsdir = 'monitoring_clt_Zv'
mon_type = 'GC_MONITORING'
quantiles = [50., 95., 99.]
elif var == 'RhoHV_rain':
basedir = 'rad4alp_dataquality_PL'+rad
dsdir = 'monitoring_RhoHV'
mon_type = 'MONITORING'
quantiles = [65., 80., 95.]
elif var == 'PhiDP0':
basedir = 'rad4alp_dataquality_PL'+rad
dsdir = 'monitoring_PhiDP0'
mon_type = 'MONITORING'
quantiles = [25., 50., 75.]
elif var == 'ZDR_prec':
basedir = 'rad4alp_dataquality_PL'+rad
dsdir = 'monitoring_ZDR'
mon_type = 'MONITORING'
quantiles = [25., 50., 75.]
elif var == 'ZDR_snow':
basedir = 'rad4alp_dataquality_PL'+rad
dsdir = 'monitoring_ZDR_snow'
mon_type = 'MONITORING'
quantiles = [25., 50., 75.]
elif var == 'dBZ_bias':
basedir = 'rad4alp_dataquality_PL'+rad
dsdir = 'monitoring_Zh_bias'
mon_type = 'MONITORING'
quantiles = [25., 50., 75.]
input_path = input_base+basedir+'/'+dsdir+'/VOL_TS/'
output_path = output_base+basedir+'/'+dsdir+'/VOL_TS/'
if not os.path.isdir(output_path):
os.makedirs(output_path)
print('- Processing Variable '+var)
for k, year in enumerate(year_vec):
print('-- Processing Year '+year.strftime('%Y'))
fname_input = (
input_path+year.strftime('%Y')+'_'+rad +
'_ts_'+mon_type+'_'+var+'.csv')
fname_output = (
output_path+year.strftime('%Y')+'_'+rad +
'_ts_'+mon_type+'_'+var+'.csv')
figfname = [
output_path+year.strftime('%Y')+'_'+rad +
'_ts_'+mon_type+'_'+var+'.png']
date, np_t_vec, cquant_vec, lquant_vec, hquant_vec = (
read_monitoring_ts(fname_input, sort_by_date=True))
if date is None:
continue
val_vec = np.ma.asarray(
[lquant_vec, cquant_vec, hquant_vec]).T
fname = write_monitoring_ts(
date, np_t_vec, val_vec, quantiles, var,
fname_output, rewrite=True)
print('written file '+fname)
if not plot_data:
continue
titldate = (date[0].strftime('%Y%m%d')+'-' +
date[-1].strftime('%Y%m%d'))
titl = rad+' Monitoring '+titldate
labely = generate_field_name_str(var)
if var == 'dBZ':
if rad == 'A':
ref_value = 49.5
vmin = 44.5
vmax = 54.5
np_min = 100000
elif rad == 'D':
ref_value = 48.5
vmin = 43.5
vmax = 53.5
np_min = 20000
elif rad == 'L':
ref_value = 67.
vmin = 62.
vmax = 72.
np_min = 100000
elif rad == 'P':
ref_value = 69.
vmin = 64.
vmax = 74.
np_min = 100000
elif rad == 'W':
ref_value = 27.5
vmin = 22.5
vmax = 32.5
np_min = 100000
elif var == 'dBZv':
if rad == 'A':
ref_value = 51.5
vmin = 46.5
vmax = 56.5
np_min = 100000
elif rad == 'D':
ref_value = 50.5
vmin = 45.5
vmax = 55.5
np_min = 20000
elif rad == 'L':
ref_value = 69.5
vmin = 64.5
vmax = 74.5
np_min = 100000
elif rad == 'P':
ref_value = 68.5
vmin = 63.5
vmax = 73.5
np_min = 100000
elif rad == 'W':
ref_value = 26.5
vmin = 21.5
vmax = 31.5
np_min = 100000
elif var == 'RhoHV_rain':
ref_value = 0.99
vmin = 0.95
vmax = 1.01
np_min = 5000
elif var == 'PhiDP0':
ref_value = 0.
vmin = -20.
vmax = 20.
np_min = 500000
elif var == 'ZDR_prec':
ref_value = 0.2
vmin = -2.
vmax = 2.
np_min = 5000
elif var == 'ZDR_snow':
ref_value = 0.2
vmin = -2.
vmax = 2.
np_min = 5000
elif var == 'dBZ_bias':
ref_value = 0.
vmin = -30.
vmax = 30.
np_min = 100
fname = plot_monitoring_ts(
date, np_t_vec, cquant_vec, lquant_vec, hquant_vec,
get_fieldname_pyart(var), figfname,
ref_value=ref_value, vmin=vmin, vmax=vmax, np_min=np_min,
labelx='Time UTC', labely=labely, titl=titl)
print('plotted file '+' '.join(fname))
def _print_end_msg(text):
print(text + datetime.datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S"))
if __name__ == "__main__":
main()
| true | true |
f72002f8e1ad1752270b6c4051b237ce04dec27e | 13,299 | py | Python | Scripts/simulation/interactions/jog_interaction.py | velocist/TS4CheatsInfo | b59ea7e5f4bd01d3b3bd7603843d525a9c179867 | [
"Apache-2.0"
] | null | null | null | Scripts/simulation/interactions/jog_interaction.py | velocist/TS4CheatsInfo | b59ea7e5f4bd01d3b3bd7603843d525a9c179867 | [
"Apache-2.0"
] | null | null | null | Scripts/simulation/interactions/jog_interaction.py | velocist/TS4CheatsInfo | b59ea7e5f4bd01d3b3bd7603843d525a9c179867 | [
"Apache-2.0"
] | null | null | null | # uncompyle6 version 3.7.4
# Python bytecode 3.7 (3394)
# Decompiled from: Python 3.7.9 (tags/v3.7.9:13c94747c7, Aug 17 2020, 18:58:18) [MSC v.1900 64 bit (AMD64)]
# Embedded file name: T:\InGame\Gameplay\Scripts\Server\interactions\jog_interaction.py
# Compiled at: 2020-07-22 05:56:20
# Size of source mod 2**32: 16676 bytes
from _math import Vector3
import itertools, random
from balloon.tunable_balloon import TunableBalloon
from element_utils import do_all
from event_testing.results import TestResult
from interactions import TargetType
from interactions.base.super_interaction import SuperInteraction
from interactions.constraints import Circle, ANYWHERE
from interactions.utils.routing import FollowPath, PlanRoute, get_route_element_for_path
from routing.walkstyle.walkstyle_request import WalkStyleRequest
from routing.waypoints.waypoint_generator_variant import TunableWaypointGeneratorVariant
from routing.waypoints.waypoint_stitching import WaypointStitchingVariant
from sims4 import random
from sims4.tuning.tunable import TunableRange, Tunable, OptionalTunable
from sims4.tuning.tunable_base import GroupNames
from sims4.utils import flexmethod
import element_utils, routing, sims4.log
logger = sims4.log.Logger('WaypointInteraction')
class _WaypointGeneratorRallyable:
def __init__(self, waypoint_info):
self._original_generator = waypoint_info
def get_start_constraint(self):
return self._original_generator.get_start_constraint()
def get_waypoint_constraints_gen(self, routing_agent, waypoint_count):
yield from self._original_generator.get_waypoint_constraints_gen(routing_agent, waypoint_count)
if False:
yield None
class WaypointInteraction(SuperInteraction):
INSTANCE_TUNABLES = {'waypoint_constraint':TunableWaypointGeneratorVariant(tuning_group=GroupNames.ROUTING),
'waypoint_count':TunableRange(description='\n The number of waypoints to select, from spawn points in the zone, to\n visit for a Jog prior to returning to the original location.\n ',
tunable_type=int,
default=2,
minimum=2,
tuning_group=GroupNames.ROUTING),
'waypoint_walk_style':WalkStyleRequest.TunableFactory(description='\n The walkstyle to use when routing between waypoints.\n ',
tuning_group=GroupNames.ROUTING),
'waypoint_stitching':WaypointStitchingVariant(tuning_group=GroupNames.ROUTING),
'waypoint_randomize_orientation':Tunable(description='\n Make Waypoint orientation random. Default is velocity aligned.\n ',
tunable_type=bool,
default=False,
tuning_group=GroupNames.ROUTING),
'waypoint_clear_locomotion_mask':Tunable(description='\n If enabled, override the locomotion queue mask. This mask controls\n which Animation Requests and XEvents get blocked during locomotion.\n By default, the mask blocks everything. If cleared, it blocks\n nothing. It also lowers the animation track used by locomotion to \n 9,999 from the default of 10,000. Use with care, ask your GPE.\n ',
tunable_type=bool,
default=False,
tuning_group=GroupNames.ROUTING),
'waypoint_override_agent_radius':OptionalTunable(description='\n If enabled, use the specified value as the agent radius when\n generating goals for the waypoints. The agent radius is restored\n for the actual route.\n ',
tunable=TunableRange(description='\n The value to use as the agent radius when generating goals. \n ',
tunable_type=float,
minimum=0,
maximum=1.0,
default=0.123),
tuning_group=GroupNames.ROUTING),
'waypoint_route_fail_balloon':OptionalTunable(description='\n Tuning for balloon to show when failing to plan a aroute for this waypoint interaction. \n ',
tunable=TunableBalloon(locked_args={'balloon_delay':0,
'balloon_delay_random_offset':0,
'balloon_chance':100}),
tuning_group=GroupNames.ROUTING)}
def __init__(self, aop, *args, waypoint_generator=None, **kwargs):
(super().__init__)(aop, *args, **kwargs)
waypoint_info = kwargs.get('waypoint_info')
if waypoint_info is not None:
self._waypoint_generator = _WaypointGeneratorRallyable(waypoint_info)
else:
if aop.target is None:
if self.target_type is TargetType.ACTOR:
target = self.sim
else:
target = aop.target
elif waypoint_generator is None:
self._waypoint_generator = self.waypoint_constraint(self.context, target)
else:
self._waypoint_generator = waypoint_generator
self._routing_infos = None
self._goal_size = 0.0
self.register_on_finishing_callback(self._clean_up_waypoint_generator)
@classmethod
def _test(cls, target, context, **interaction_parameters):
sim = context.sim
routing_master = sim.routing_master
if routing_master is not None:
if sim.parent is not routing_master:
return TestResult(False, '{} cannot run Waypoint interactions because they are following {}', sim, routing_master)
return (super()._test)(target, context, **interaction_parameters)
def _get_starting_constraint(self, *args, **kwargs):
constraint = ANYWHERE
target = self.target
if self._waypoint_generator.is_for_vehicle and target is not None and target.vehicle_component is not None:
constraint = target.is_in_inventory() or Circle((target.position), (target.vehicle_component.minimum_route_distance), routing_surface=(target.routing_surface))
constraint = constraint.intersect(self._waypoint_generator.get_water_constraint())
else:
constraint = self._waypoint_generator.get_start_constraint()
posture_constraint = self._waypoint_generator.get_posture_constraint()
if posture_constraint is not None:
constraint = constraint.intersect(posture_constraint)
return constraint
@flexmethod
def _constraint_gen(cls, inst, *args, **kwargs):
inst_or_cls = inst if inst is not None else cls
if inst is not None:
constraint = (inst._get_starting_constraint)(*args, **kwargs)
yield constraint
yield from (super(__class__, inst_or_cls)._constraint_gen)(*args, **kwargs)
def cancel(self, *args, **kwargs):
for sim_primitive in list(self.sim.primitives):
if isinstance(sim_primitive, FollowPath):
sim_primitive.detach()
return (super().cancel)(*args, **kwargs)
def _clean_up_waypoint_generator(self, _):
self._waypoint_generator.clean_up()
def _get_goals_for_constraint(self, constraint, routing_agent):
goals = []
handles = constraint.get_connectivity_handles(routing_agent)
for handle in handles:
goals.extend(handle.get_goals(always_reject_invalid_goals=True))
return goals
def _show_route_fail_balloon(self):
balloon_tuning = self.waypoint_route_fail_balloon
if balloon_tuning is None:
return
else:
return self.is_user_directed or None
balloon_requests = balloon_tuning(self)
if balloon_requests:
chosen_balloon = random.random.choice(balloon_requests)
if chosen_balloon is not None:
chosen_balloon.distribute()
def _run_interaction_gen(self, timeline):
all_sims = self.required_sims()
if not all_sims:
return
self._routing_infos = []
routing_agent = self.sim
for sim in all_sims:
routing_context = sim.routing_context
routing_agent = sim
vehicle = None if not sim.posture.is_vehicle else sim.parent
if vehicle is not None:
if vehicle.vehicle_component is not None:
routing_agent = vehicle
routing_context = vehicle.routing_component.pathplan_context
self._routing_infos.append((routing_agent, routing_context))
waypoints = []
default_agent_radius = None
if self.waypoint_override_agent_radius is not None:
if routing_agent.routing_component is not None:
default_agent_radius = routing_agent.routing_component._pathplan_context.agent_radius
routing_agent.routing_component._pathplan_context.agent_radius = self.waypoint_override_agent_radius
else:
try:
for constraint in self._waypoint_generator.get_waypoint_constraints_gen(routing_agent, self.waypoint_count):
goals = self._get_goals_for_constraint(constraint, routing_agent)
if not goals:
continue
if self.waypoint_randomize_orientation:
for goal in goals:
goal.orientation = sims4.math.angle_to_yaw_quaternion(random.uniform(0.0, sims4.math.TWO_PI))
waypoints.append(goals)
finally:
if default_agent_radius is not None:
routing_agent.routing_component._pathplan_context.agent_radius = default_agent_radius
return waypoints or False
self._goal_size = max((info[0].routing_component.get_routing_context().agent_goal_radius for info in self._routing_infos))
self._goal_size *= self._goal_size
if self.staging:
for route_waypoints in itertools.cycle(self.waypoint_stitching(waypoints, self._waypoint_generator.loops)):
result = yield from self._do_route_to_constraint_gen(route_waypoints, timeline)
if not result:
return result
else:
for route_waypoints in self.waypoint_stitching(waypoints, self._waypoint_generator.loops):
result = yield from self._do_route_to_constraint_gen(route_waypoints, timeline)
return result
return True
if False:
yield None
def _do_route_to_constraint_gen(self, waypoints, timeline):
if self.is_finishing:
return False
plan_primitives = []
for i, routing_info in enumerate(self._routing_infos):
routing_agent = routing_info[0]
routing_context = routing_info[1]
route = routing.Route((routing_agent.routing_location), (waypoints[(-1)]), waypoints=(waypoints[:-1]), routing_context=routing_context)
plan_primitive = PlanRoute(route, routing_agent, interaction=self)
result = yield from element_utils.run_child(timeline, plan_primitive)
if not result:
self._show_route_fail_balloon()
return False
plan_primitive.path.nodes and plan_primitive.path.nodes.plan_success or self._show_route_fail_balloon()
return False
plan_primitive.path.blended_orientation = self.waypoint_randomize_orientation
plan_primitives.append(plan_primitive)
if i == len(self._routing_infos) - 1:
continue
for node in plan_primitive.path.nodes:
position = Vector3(*node.position)
for goal in itertools.chain.from_iterable(waypoints):
if goal.routing_surface_id != node.routing_surface_id:
continue
dist_sq = (Vector3(*goal.position) - position).magnitude_2d_squared()
if dist_sq < self._goal_size:
goal.cost = routing.get_default_obstacle_cost()
route_primitives = []
track_override = None
mask_override = None
if self.waypoint_clear_locomotion_mask:
mask_override = 0
track_override = 9999
for plan_primitive in plan_primitives:
sequence = get_route_element_for_path((plan_primitive.sim), (plan_primitive.path), interaction=self,
force_follow_path=True,
track_override=track_override,
mask_override=mask_override)
walkstyle_request = self.waypoint_walk_style(plan_primitive.sim)
sequence = walkstyle_request(sequence=sequence)
route_primitives.append(sequence)
result = yield from element_utils.run_child(timeline, do_all(*route_primitives))
return result
if False:
yield None
@classmethod
def get_rallyable_aops_gen(cls, target, context, **kwargs):
key = 'waypoint_info'
if key not in kwargs:
waypoint_generator = cls.waypoint_constraint(context, target)
kwargs[key] = waypoint_generator
yield from (super().get_rallyable_aops_gen)(target, context, rally_constraint=waypoint_generator.get_start_constraint(), **kwargs)
if False:
yield None | 50.759542 | 471 | 0.666892 |
from _math import Vector3
import itertools, random
from balloon.tunable_balloon import TunableBalloon
from element_utils import do_all
from event_testing.results import TestResult
from interactions import TargetType
from interactions.base.super_interaction import SuperInteraction
from interactions.constraints import Circle, ANYWHERE
from interactions.utils.routing import FollowPath, PlanRoute, get_route_element_for_path
from routing.walkstyle.walkstyle_request import WalkStyleRequest
from routing.waypoints.waypoint_generator_variant import TunableWaypointGeneratorVariant
from routing.waypoints.waypoint_stitching import WaypointStitchingVariant
from sims4 import random
from sims4.tuning.tunable import TunableRange, Tunable, OptionalTunable
from sims4.tuning.tunable_base import GroupNames
from sims4.utils import flexmethod
import element_utils, routing, sims4.log
logger = sims4.log.Logger('WaypointInteraction')
class _WaypointGeneratorRallyable:
def __init__(self, waypoint_info):
self._original_generator = waypoint_info
def get_start_constraint(self):
return self._original_generator.get_start_constraint()
def get_waypoint_constraints_gen(self, routing_agent, waypoint_count):
yield from self._original_generator.get_waypoint_constraints_gen(routing_agent, waypoint_count)
if False:
yield None
class WaypointInteraction(SuperInteraction):
INSTANCE_TUNABLES = {'waypoint_constraint':TunableWaypointGeneratorVariant(tuning_group=GroupNames.ROUTING),
'waypoint_count':TunableRange(description='\n The number of waypoints to select, from spawn points in the zone, to\n visit for a Jog prior to returning to the original location.\n ',
tunable_type=int,
default=2,
minimum=2,
tuning_group=GroupNames.ROUTING),
'waypoint_walk_style':WalkStyleRequest.TunableFactory(description='\n The walkstyle to use when routing between waypoints.\n ',
tuning_group=GroupNames.ROUTING),
'waypoint_stitching':WaypointStitchingVariant(tuning_group=GroupNames.ROUTING),
'waypoint_randomize_orientation':Tunable(description='\n Make Waypoint orientation random. Default is velocity aligned.\n ',
tunable_type=bool,
default=False,
tuning_group=GroupNames.ROUTING),
'waypoint_clear_locomotion_mask':Tunable(description='\n If enabled, override the locomotion queue mask. This mask controls\n which Animation Requests and XEvents get blocked during locomotion.\n By default, the mask blocks everything. If cleared, it blocks\n nothing. It also lowers the animation track used by locomotion to \n 9,999 from the default of 10,000. Use with care, ask your GPE.\n ',
tunable_type=bool,
default=False,
tuning_group=GroupNames.ROUTING),
'waypoint_override_agent_radius':OptionalTunable(description='\n If enabled, use the specified value as the agent radius when\n generating goals for the waypoints. The agent radius is restored\n for the actual route.\n ',
tunable=TunableRange(description='\n The value to use as the agent radius when generating goals. \n ',
tunable_type=float,
minimum=0,
maximum=1.0,
default=0.123),
tuning_group=GroupNames.ROUTING),
'waypoint_route_fail_balloon':OptionalTunable(description='\n Tuning for balloon to show when failing to plan a aroute for this waypoint interaction. \n ',
tunable=TunableBalloon(locked_args={'balloon_delay':0,
'balloon_delay_random_offset':0,
'balloon_chance':100}),
tuning_group=GroupNames.ROUTING)}
def __init__(self, aop, *args, waypoint_generator=None, **kwargs):
(super().__init__)(aop, *args, **kwargs)
waypoint_info = kwargs.get('waypoint_info')
if waypoint_info is not None:
self._waypoint_generator = _WaypointGeneratorRallyable(waypoint_info)
else:
if aop.target is None:
if self.target_type is TargetType.ACTOR:
target = self.sim
else:
target = aop.target
elif waypoint_generator is None:
self._waypoint_generator = self.waypoint_constraint(self.context, target)
else:
self._waypoint_generator = waypoint_generator
self._routing_infos = None
self._goal_size = 0.0
self.register_on_finishing_callback(self._clean_up_waypoint_generator)
@classmethod
def _test(cls, target, context, **interaction_parameters):
sim = context.sim
routing_master = sim.routing_master
if routing_master is not None:
if sim.parent is not routing_master:
return TestResult(False, '{} cannot run Waypoint interactions because they are following {}', sim, routing_master)
return (super()._test)(target, context, **interaction_parameters)
def _get_starting_constraint(self, *args, **kwargs):
constraint = ANYWHERE
target = self.target
if self._waypoint_generator.is_for_vehicle and target is not None and target.vehicle_component is not None:
constraint = target.is_in_inventory() or Circle((target.position), (target.vehicle_component.minimum_route_distance), routing_surface=(target.routing_surface))
constraint = constraint.intersect(self._waypoint_generator.get_water_constraint())
else:
constraint = self._waypoint_generator.get_start_constraint()
posture_constraint = self._waypoint_generator.get_posture_constraint()
if posture_constraint is not None:
constraint = constraint.intersect(posture_constraint)
return constraint
@flexmethod
def _constraint_gen(cls, inst, *args, **kwargs):
inst_or_cls = inst if inst is not None else cls
if inst is not None:
constraint = (inst._get_starting_constraint)(*args, **kwargs)
yield constraint
yield from (super(__class__, inst_or_cls)._constraint_gen)(*args, **kwargs)
def cancel(self, *args, **kwargs):
for sim_primitive in list(self.sim.primitives):
if isinstance(sim_primitive, FollowPath):
sim_primitive.detach()
return (super().cancel)(*args, **kwargs)
def _clean_up_waypoint_generator(self, _):
self._waypoint_generator.clean_up()
def _get_goals_for_constraint(self, constraint, routing_agent):
goals = []
handles = constraint.get_connectivity_handles(routing_agent)
for handle in handles:
goals.extend(handle.get_goals(always_reject_invalid_goals=True))
return goals
def _show_route_fail_balloon(self):
balloon_tuning = self.waypoint_route_fail_balloon
if balloon_tuning is None:
return
else:
return self.is_user_directed or None
balloon_requests = balloon_tuning(self)
if balloon_requests:
chosen_balloon = random.random.choice(balloon_requests)
if chosen_balloon is not None:
chosen_balloon.distribute()
def _run_interaction_gen(self, timeline):
all_sims = self.required_sims()
if not all_sims:
return
self._routing_infos = []
routing_agent = self.sim
for sim in all_sims:
routing_context = sim.routing_context
routing_agent = sim
vehicle = None if not sim.posture.is_vehicle else sim.parent
if vehicle is not None:
if vehicle.vehicle_component is not None:
routing_agent = vehicle
routing_context = vehicle.routing_component.pathplan_context
self._routing_infos.append((routing_agent, routing_context))
waypoints = []
default_agent_radius = None
if self.waypoint_override_agent_radius is not None:
if routing_agent.routing_component is not None:
default_agent_radius = routing_agent.routing_component._pathplan_context.agent_radius
routing_agent.routing_component._pathplan_context.agent_radius = self.waypoint_override_agent_radius
else:
try:
for constraint in self._waypoint_generator.get_waypoint_constraints_gen(routing_agent, self.waypoint_count):
goals = self._get_goals_for_constraint(constraint, routing_agent)
if not goals:
continue
if self.waypoint_randomize_orientation:
for goal in goals:
goal.orientation = sims4.math.angle_to_yaw_quaternion(random.uniform(0.0, sims4.math.TWO_PI))
waypoints.append(goals)
finally:
if default_agent_radius is not None:
routing_agent.routing_component._pathplan_context.agent_radius = default_agent_radius
return waypoints or False
self._goal_size = max((info[0].routing_component.get_routing_context().agent_goal_radius for info in self._routing_infos))
self._goal_size *= self._goal_size
if self.staging:
for route_waypoints in itertools.cycle(self.waypoint_stitching(waypoints, self._waypoint_generator.loops)):
result = yield from self._do_route_to_constraint_gen(route_waypoints, timeline)
if not result:
return result
else:
for route_waypoints in self.waypoint_stitching(waypoints, self._waypoint_generator.loops):
result = yield from self._do_route_to_constraint_gen(route_waypoints, timeline)
return result
return True
if False:
yield None
def _do_route_to_constraint_gen(self, waypoints, timeline):
if self.is_finishing:
return False
plan_primitives = []
for i, routing_info in enumerate(self._routing_infos):
routing_agent = routing_info[0]
routing_context = routing_info[1]
route = routing.Route((routing_agent.routing_location), (waypoints[(-1)]), waypoints=(waypoints[:-1]), routing_context=routing_context)
plan_primitive = PlanRoute(route, routing_agent, interaction=self)
result = yield from element_utils.run_child(timeline, plan_primitive)
if not result:
self._show_route_fail_balloon()
return False
plan_primitive.path.nodes and plan_primitive.path.nodes.plan_success or self._show_route_fail_balloon()
return False
plan_primitive.path.blended_orientation = self.waypoint_randomize_orientation
plan_primitives.append(plan_primitive)
if i == len(self._routing_infos) - 1:
continue
for node in plan_primitive.path.nodes:
position = Vector3(*node.position)
for goal in itertools.chain.from_iterable(waypoints):
if goal.routing_surface_id != node.routing_surface_id:
continue
dist_sq = (Vector3(*goal.position) - position).magnitude_2d_squared()
if dist_sq < self._goal_size:
goal.cost = routing.get_default_obstacle_cost()
route_primitives = []
track_override = None
mask_override = None
if self.waypoint_clear_locomotion_mask:
mask_override = 0
track_override = 9999
for plan_primitive in plan_primitives:
sequence = get_route_element_for_path((plan_primitive.sim), (plan_primitive.path), interaction=self,
force_follow_path=True,
track_override=track_override,
mask_override=mask_override)
walkstyle_request = self.waypoint_walk_style(plan_primitive.sim)
sequence = walkstyle_request(sequence=sequence)
route_primitives.append(sequence)
result = yield from element_utils.run_child(timeline, do_all(*route_primitives))
return result
if False:
yield None
@classmethod
def get_rallyable_aops_gen(cls, target, context, **kwargs):
key = 'waypoint_info'
if key not in kwargs:
waypoint_generator = cls.waypoint_constraint(context, target)
kwargs[key] = waypoint_generator
yield from (super().get_rallyable_aops_gen)(target, context, rally_constraint=waypoint_generator.get_start_constraint(), **kwargs)
if False:
yield None | true | true |
f72003c0391c7dabf487c7375b1a310ce99ae57b | 2,718 | py | Python | test/test_mct.py | pistoia/qiskit-aqua | c7900ffdabc1499145739bfab29a392709bee1a0 | [
"Apache-2.0"
] | null | null | null | test/test_mct.py | pistoia/qiskit-aqua | c7900ffdabc1499145739bfab29a392709bee1a0 | [
"Apache-2.0"
] | null | null | null | test/test_mct.py | pistoia/qiskit-aqua | c7900ffdabc1499145739bfab29a392709bee1a0 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2018 IBM.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
import unittest
import itertools
import numpy as np
from parameterized import parameterized
from qiskit import QuantumCircuit, QuantumRegister
from qiskit import execute as q_execute
from qiskit.quantum_info import state_fidelity
from qiskit.aqua import get_aer_backend
from test.common import QiskitAquaTestCase
num_controls = [i + 1 for i in range(7)]
modes = ['basic', 'advanced', 'noancilla']
class TestMCT(QiskitAquaTestCase):
@parameterized.expand(
itertools.product(num_controls, modes)
)
def test_mct(self, num_controls, mode):
c = QuantumRegister(num_controls, name='c')
o = QuantumRegister(1, name='o')
subsets = [tuple(range(i)) for i in range(num_controls + 1)]
for subset in subsets:
qc = QuantumCircuit(o, c)
if mode == 'basic':
if num_controls <= 2:
num_ancillae = 0
else:
num_ancillae = num_controls - 2
elif mode == 'noancilla':
num_ancillae = 0
else:
if num_controls <= 4:
num_ancillae = 0
else:
num_ancillae = 1
if num_ancillae > 0:
a = QuantumRegister(num_ancillae, name='a')
qc.add_register(a)
for idx in subset:
qc.x(c[idx])
qc.mct(
[c[i] for i in range(num_controls)],
o[0],
[a[i] for i in range(num_ancillae)],
mode=mode
)
for idx in subset:
qc.x(c[idx])
vec = np.asarray(q_execute(qc, get_aer_backend(
'statevector_simulator')).result().get_statevector(qc, decimals=16))
vec_o = [0, 1] if len(subset) == num_controls else [1, 0]
f = state_fidelity(vec, np.array(vec_o + [0] * (2 ** (num_controls + num_ancillae + 1) - 2)))
self.assertAlmostEqual(f, 1)
if __name__ == '__main__':
unittest.main()
| 34.405063 | 105 | 0.577999 |
import unittest
import itertools
import numpy as np
from parameterized import parameterized
from qiskit import QuantumCircuit, QuantumRegister
from qiskit import execute as q_execute
from qiskit.quantum_info import state_fidelity
from qiskit.aqua import get_aer_backend
from test.common import QiskitAquaTestCase
num_controls = [i + 1 for i in range(7)]
modes = ['basic', 'advanced', 'noancilla']
class TestMCT(QiskitAquaTestCase):
@parameterized.expand(
itertools.product(num_controls, modes)
)
def test_mct(self, num_controls, mode):
c = QuantumRegister(num_controls, name='c')
o = QuantumRegister(1, name='o')
subsets = [tuple(range(i)) for i in range(num_controls + 1)]
for subset in subsets:
qc = QuantumCircuit(o, c)
if mode == 'basic':
if num_controls <= 2:
num_ancillae = 0
else:
num_ancillae = num_controls - 2
elif mode == 'noancilla':
num_ancillae = 0
else:
if num_controls <= 4:
num_ancillae = 0
else:
num_ancillae = 1
if num_ancillae > 0:
a = QuantumRegister(num_ancillae, name='a')
qc.add_register(a)
for idx in subset:
qc.x(c[idx])
qc.mct(
[c[i] for i in range(num_controls)],
o[0],
[a[i] for i in range(num_ancillae)],
mode=mode
)
for idx in subset:
qc.x(c[idx])
vec = np.asarray(q_execute(qc, get_aer_backend(
'statevector_simulator')).result().get_statevector(qc, decimals=16))
vec_o = [0, 1] if len(subset) == num_controls else [1, 0]
f = state_fidelity(vec, np.array(vec_o + [0] * (2 ** (num_controls + num_ancillae + 1) - 2)))
self.assertAlmostEqual(f, 1)
if __name__ == '__main__':
unittest.main()
| true | true |
f72005233f11455f1e95662ff8e8514dc68a23af | 3,738 | py | Python | letsencrypt/configuration.py | meehow/letsencrypt | 64073b234a6b87a574d873599a8d4dbf11729d5c | [
"Apache-2.0"
] | 1 | 2021-09-05T14:18:00.000Z | 2021-09-05T14:18:00.000Z | letsencrypt/configuration.py | meehow/letsencrypt | 64073b234a6b87a574d873599a8d4dbf11729d5c | [
"Apache-2.0"
] | null | null | null | letsencrypt/configuration.py | meehow/letsencrypt | 64073b234a6b87a574d873599a8d4dbf11729d5c | [
"Apache-2.0"
] | null | null | null | """Let's Encrypt user-supplied configuration."""
import os
import urlparse
import zope.interface
from acme import challenges
from letsencrypt import constants
from letsencrypt import errors
from letsencrypt import interfaces
class NamespaceConfig(object):
"""Configuration wrapper around :class:`argparse.Namespace`.
For more documentation, including available attributes, please see
:class:`letsencrypt.interfaces.IConfig`. However, note that
the following attributes are dynamically resolved using
:attr:`~letsencrypt.interfaces.IConfig.work_dir` and relative
paths defined in :py:mod:`letsencrypt.constants`:
- `accounts_dir`
- `csr_dir`
- `in_progress_dir`
- `key_dir`
- `renewer_config_file`
- `temp_checkpoint_dir`
:ivar namespace: Namespace typically produced by
:meth:`argparse.ArgumentParser.parse_args`.
:type namespace: :class:`argparse.Namespace`
"""
zope.interface.implements(interfaces.IConfig)
def __init__(self, namespace):
self.namespace = namespace
if self.simple_http_port == self.dvsni_port:
raise errors.Error(
"Trying to run SimpleHTTP and DVSNI "
"on the same port ({0})".format(self.dvsni_port))
def __getattr__(self, name):
return getattr(self.namespace, name)
@property
def server_path(self):
"""File path based on ``server``."""
parsed = urlparse.urlparse(self.namespace.server)
return (parsed.netloc + parsed.path).replace('/', os.path.sep)
@property
def accounts_dir(self): # pylint: disable=missing-docstring
return os.path.join(
self.namespace.config_dir, constants.ACCOUNTS_DIR, self.server_path)
@property
def backup_dir(self): # pylint: disable=missing-docstring
return os.path.join(self.namespace.work_dir, constants.BACKUP_DIR)
@property
def csr_dir(self): # pylint: disable=missing-docstring
return os.path.join(self.namespace.config_dir, constants.CSR_DIR)
@property
def in_progress_dir(self): # pylint: disable=missing-docstring
return os.path.join(self.namespace.work_dir, constants.IN_PROGRESS_DIR)
@property
def key_dir(self): # pylint: disable=missing-docstring
return os.path.join(self.namespace.config_dir, constants.KEY_DIR)
@property
def temp_checkpoint_dir(self): # pylint: disable=missing-docstring
return os.path.join(
self.namespace.work_dir, constants.TEMP_CHECKPOINT_DIR)
@property
def simple_http_port(self): # pylint: disable=missing-docstring
if self.namespace.simple_http_port is not None:
return self.namespace.simple_http_port
else:
return challenges.SimpleHTTPResponse.PORT
class RenewerConfiguration(object):
"""Configuration wrapper for renewer."""
def __init__(self, namespace):
self.namespace = namespace
def __getattr__(self, name):
return getattr(self.namespace, name)
@property
def archive_dir(self): # pylint: disable=missing-docstring
return os.path.join(self.namespace.config_dir, constants.ARCHIVE_DIR)
@property
def live_dir(self): # pylint: disable=missing-docstring
return os.path.join(self.namespace.config_dir, constants.LIVE_DIR)
@property
def renewal_configs_dir(self): # pylint: disable=missing-docstring
return os.path.join(
self.namespace.config_dir, constants.RENEWAL_CONFIGS_DIR)
@property
def renewer_config_file(self): # pylint: disable=missing-docstring
return os.path.join(
self.namespace.config_dir, constants.RENEWER_CONFIG_FILENAME)
| 32.789474 | 80 | 0.697164 | import os
import urlparse
import zope.interface
from acme import challenges
from letsencrypt import constants
from letsencrypt import errors
from letsencrypt import interfaces
class NamespaceConfig(object):
zope.interface.implements(interfaces.IConfig)
def __init__(self, namespace):
self.namespace = namespace
if self.simple_http_port == self.dvsni_port:
raise errors.Error(
"Trying to run SimpleHTTP and DVSNI "
"on the same port ({0})".format(self.dvsni_port))
def __getattr__(self, name):
return getattr(self.namespace, name)
@property
def server_path(self):
parsed = urlparse.urlparse(self.namespace.server)
return (parsed.netloc + parsed.path).replace('/', os.path.sep)
@property
def accounts_dir(self):
return os.path.join(
self.namespace.config_dir, constants.ACCOUNTS_DIR, self.server_path)
@property
def backup_dir(self):
return os.path.join(self.namespace.work_dir, constants.BACKUP_DIR)
@property
def csr_dir(self):
return os.path.join(self.namespace.config_dir, constants.CSR_DIR)
@property
def in_progress_dir(self):
return os.path.join(self.namespace.work_dir, constants.IN_PROGRESS_DIR)
@property
def key_dir(self):
return os.path.join(self.namespace.config_dir, constants.KEY_DIR)
@property
def temp_checkpoint_dir(self):
return os.path.join(
self.namespace.work_dir, constants.TEMP_CHECKPOINT_DIR)
@property
def simple_http_port(self):
if self.namespace.simple_http_port is not None:
return self.namespace.simple_http_port
else:
return challenges.SimpleHTTPResponse.PORT
class RenewerConfiguration(object):
def __init__(self, namespace):
self.namespace = namespace
def __getattr__(self, name):
return getattr(self.namespace, name)
@property
def archive_dir(self):
return os.path.join(self.namespace.config_dir, constants.ARCHIVE_DIR)
@property
def live_dir(self):
return os.path.join(self.namespace.config_dir, constants.LIVE_DIR)
@property
def renewal_configs_dir(self):
return os.path.join(
self.namespace.config_dir, constants.RENEWAL_CONFIGS_DIR)
@property
def renewer_config_file(self):
return os.path.join(
self.namespace.config_dir, constants.RENEWER_CONFIG_FILENAME)
| true | true |
f72005518b34101337fb593f9f38ae1ba0642602 | 8,708 | py | Python | src/modules/display_tickets.py | dat-adi/eisen-tickets | bedd6786da5c49d0021ca97e6e4f33b7a07f5be4 | [
"MIT"
] | null | null | null | src/modules/display_tickets.py | dat-adi/eisen-tickets | bedd6786da5c49d0021ca97e6e4f33b7a07f5be4 | [
"MIT"
] | 11 | 2020-07-31T05:48:51.000Z | 2022-01-16T08:03:28.000Z | src/modules/display_tickets.py | dat-adi/eisen-tickets | bedd6786da5c49d0021ca97e6e4f33b7a07f5be4 | [
"MIT"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
# GUI import
import tkinter as tk
# Styling the GUI
from tkinter import ttk
# Database connection
from modules.create_db_components import create_connection
# Deletes the ticket from the database
from modules.removing_tickets import delete_ticket
"""This module is used to display all the tickets present in the
Database."""
# Owned
__author__ = "Datta Adithya"
__credits__ = ["Datta Adithya"]
__license__ = "MIT"
__maintainer__ = "Datta Adithya"
__email__ = "dat.adithya@gmail.com"
# fonts for the project
text_font = ("Helvetica", 12)
# functions to retrieve all of the records from the database
def do_cat(conn):
cur = conn.cursor()
cur.execute('SELECT * FROM tickets WHERE category = "DO"')
conn.commit()
rows = cur.fetchall()
return rows
def dec_cat(conn):
cur = conn.cursor()
cur.execute('SELECT * FROM tickets WHERE category = "DEC"')
conn.commit()
rows = cur.fetchall()
return rows
def dlg_cat(conn):
cur = conn.cursor()
cur.execute('SELECT * FROM tickets WHERE category = "DLG"')
conn.commit()
rows = cur.fetchall()
return rows
def del_cat(conn):
cur = conn.cursor()
cur.execute('SELECT * FROM tickets WHERE category = "DEL"')
conn.commit()
rows = cur.fetchall()
return rows
# GUI for the project
class windows(tk.Tk):
def __init__(self, conn, *args, **kwargs):
tk.Tk.__init__(self, *args, **kwargs)
self.wm_title("Eisen's Tickets")
self.iconbitmap(self, default="../../assets/logo.ico")
self.conn = conn
container = tk.Frame(self, height=400, width=600)
container.pack(side="top", fill="both", expand=True)
container.grid_rowconfigure(0, weight=1)
container.grid_columnconfigure(0, weight=1)
self.frames = {}
for F in (MainPage, EisenDisplay, DoPage, DecPage, DlgPage, DelPage):
frame = F(container, self, self.conn)
self.frames[F] = frame
frame.grid(row=0, column=0, sticky="nsew")
self.show_frame(MainPage)
def show_frame(self, cont):
frame = self.frames[cont]
frame.tkraise()
def ticket_display(self, ticket):
new_window = tk.Toplevel(self)
ticket_id = ticket[0]
timestamp = ticket[1]
category = ticket[2]
task = ticket[3]
more_info = ticket[4]
fields = ["Ticket ID", "Timestamp", "Category", "Task", "More Info"]
details = [ticket_id, timestamp, category, task, more_info]
r = 0
for field in fields:
tk.Label(new_window, text=field, relief=tk.RIDGE, width=15).grid(
row=r, column=0
)
tk.Label(new_window, text=details[r], relief=tk.SUNKEN, width=100).grid(
row=r, column=1
)
r += 1
tk.Button(
new_window,
relief=tk.RIDGE,
text="Delete Ticket",
background="#FF3333",
command=lambda: delete_ticket(self.conn, ticket_id),
).grid(row=r, column=0, columnspan=2, sticky="ew")
# Pages made for navigation through the different categories
class MainPage(tk.Frame):
def __init__(self, parent, controller, conn):
tk.Frame.__init__(self, parent)
self.conn = conn
label = tk.Label(self, text="Start Page", font=text_font)
label.pack(padx=10, pady=10)
eisen_display_button = ttk.Button(
self,
text="Display Selection",
command=lambda: controller.show_frame(EisenDisplay),
)
eisen_display_button.pack(side="bottom", fill=tk.X)
class EisenDisplay(tk.Frame):
def __init__(self, parent, controller, conn):
tk.Frame.__init__(self, parent)
self.conn = conn
label = tk.Label(self, text="Eisen Display", font=text_font)
label.pack(padx=10, pady=10)
main_button = ttk.Button(
self,
text="Return to main page",
command=lambda: controller.show_frame(MainPage),
)
main_button.pack(side="bottom", fill=tk.X)
del_button = ttk.Button(
self, text="Eisen Delete", command=lambda: controller.show_frame(DelPage)
)
del_button.pack(side="bottom", fill=tk.X)
dlg_button = ttk.Button(
self, text="Eisen Delegate", command=lambda: controller.show_frame(DlgPage)
)
dlg_button.pack(side="bottom", fill=tk.X)
dec_button = ttk.Button(
self, text="Eisen Decide", command=lambda: controller.show_frame(DecPage)
)
dec_button.pack(side="bottom", fill=tk.X)
do_button = ttk.Button(
self, text="Eisen Do", command=lambda: controller.show_frame(DoPage)
)
do_button.pack(side="bottom", fill=tk.X)
class DoPage(tk.Frame):
def __init__(self, parent, controller, conn):
tk.Frame.__init__(self, parent)
label = tk.Label(self, text="Eisen's Do Page", font=text_font)
label.pack(padx=10, pady=10)
do_rows = do_cat(conn)
for element in do_rows:
tk.Button(
self,
text=element[3],
fg="black",
command=lambda ele=element: controller.ticket_display(ele),
).pack(fill=tk.X)
eisen_display_button = ttk.Button(
self,
text="Display Selection",
command=lambda: controller.show_frame(EisenDisplay),
)
eisen_display_button.pack(side="bottom", fill=tk.X)
dec_button = ttk.Button(
self, text="Eisen Decide", command=lambda: controller.show_frame(DecPage)
)
dec_button.pack(side="bottom", fill=tk.X)
class DecPage(tk.Frame):
def __init__(self, parent, controller, conn):
tk.Frame.__init__(self, parent)
label = tk.Label(self, text="Eisen's Decide Page", font=text_font)
label.pack(padx=10, pady=10)
dec_rows = dec_cat(conn)
for element in dec_rows:
tk.Button(
self,
text=element[3],
fg="black",
command=lambda ele=element: controller.ticket_display(ele),
).pack(fill=tk.X)
eisen_display_button = ttk.Button(
self,
text="Display Selection",
command=lambda: controller.show_frame(EisenDisplay),
)
eisen_display_button.pack(side="bottom", fill=tk.X)
dlg_button = ttk.Button(
self, text="Eisen Delegate", command=lambda: controller.show_frame(DlgPage)
)
dlg_button.pack(side="bottom", fill=tk.X)
class DlgPage(tk.Frame):
def __init__(self, parent, controller, conn):
tk.Frame.__init__(self, parent)
label = tk.Label(self, text="Eisen's Delegate Page", font=text_font)
label.pack(padx=10, pady=10)
dlg_rows = dlg_cat(conn)
for element in dlg_rows:
tk.Button(
self,
text=element[3],
fg="black",
command=lambda ele=element: controller.ticket_display(ele),
).pack(fill=tk.X)
eisen_display_button = ttk.Button(
self,
text="Display Selection",
command=lambda: controller.show_frame(EisenDisplay),
)
eisen_display_button.pack(side="bottom", fill=tk.X)
del_button = ttk.Button(
self, text="Eisen Delete", command=lambda: controller.show_frame(DelPage)
)
del_button.pack(side="bottom", fill=tk.X)
class DelPage(tk.Frame):
def __init__(self, parent, controller, conn):
tk.Frame.__init__(self, parent)
label = tk.Label(self, text="Eisen's Delete Page", font=text_font)
label.pack(padx=10, pady=10)
del_rows = del_cat(conn)
for element in del_rows:
tk.Button(
self,
text=element[3],
fg="black",
command=lambda ele=element: controller.ticket_display(ele),
).pack(fill=tk.X)
eisen_display_button = ttk.Button(
self,
text="Display Selection",
command=lambda: controller.show_frame(EisenDisplay),
)
eisen_display_button.pack(side="bottom", fill=tk.X)
do_button = ttk.Button(
self, text="Eisen Do", command=lambda: controller.show_frame(DoPage)
)
do_button.pack(side="bottom", fill=tk.X)
if __name__ == "__main__":
connection = create_connection(r"D:\eisen-tickets\assets\tickets.db")
four_windows = windows(connection)
four_windows.mainloop()
| 30.989324 | 87 | 0.598071 |
import tkinter as tk
from tkinter import ttk
from modules.create_db_components import create_connection
from modules.removing_tickets import delete_ticket
__author__ = "Datta Adithya"
__credits__ = ["Datta Adithya"]
__license__ = "MIT"
__maintainer__ = "Datta Adithya"
__email__ = "dat.adithya@gmail.com"
text_font = ("Helvetica", 12)
def do_cat(conn):
cur = conn.cursor()
cur.execute('SELECT * FROM tickets WHERE category = "DO"')
conn.commit()
rows = cur.fetchall()
return rows
def dec_cat(conn):
cur = conn.cursor()
cur.execute('SELECT * FROM tickets WHERE category = "DEC"')
conn.commit()
rows = cur.fetchall()
return rows
def dlg_cat(conn):
cur = conn.cursor()
cur.execute('SELECT * FROM tickets WHERE category = "DLG"')
conn.commit()
rows = cur.fetchall()
return rows
def del_cat(conn):
cur = conn.cursor()
cur.execute('SELECT * FROM tickets WHERE category = "DEL"')
conn.commit()
rows = cur.fetchall()
return rows
class windows(tk.Tk):
def __init__(self, conn, *args, **kwargs):
tk.Tk.__init__(self, *args, **kwargs)
self.wm_title("Eisen's Tickets")
self.iconbitmap(self, default="../../assets/logo.ico")
self.conn = conn
container = tk.Frame(self, height=400, width=600)
container.pack(side="top", fill="both", expand=True)
container.grid_rowconfigure(0, weight=1)
container.grid_columnconfigure(0, weight=1)
self.frames = {}
for F in (MainPage, EisenDisplay, DoPage, DecPage, DlgPage, DelPage):
frame = F(container, self, self.conn)
self.frames[F] = frame
frame.grid(row=0, column=0, sticky="nsew")
self.show_frame(MainPage)
def show_frame(self, cont):
frame = self.frames[cont]
frame.tkraise()
def ticket_display(self, ticket):
new_window = tk.Toplevel(self)
ticket_id = ticket[0]
timestamp = ticket[1]
category = ticket[2]
task = ticket[3]
more_info = ticket[4]
fields = ["Ticket ID", "Timestamp", "Category", "Task", "More Info"]
details = [ticket_id, timestamp, category, task, more_info]
r = 0
for field in fields:
tk.Label(new_window, text=field, relief=tk.RIDGE, width=15).grid(
row=r, column=0
)
tk.Label(new_window, text=details[r], relief=tk.SUNKEN, width=100).grid(
row=r, column=1
)
r += 1
tk.Button(
new_window,
relief=tk.RIDGE,
text="Delete Ticket",
background="#FF3333",
command=lambda: delete_ticket(self.conn, ticket_id),
).grid(row=r, column=0, columnspan=2, sticky="ew")
# Pages made for navigation through the different categories
class MainPage(tk.Frame):
def __init__(self, parent, controller, conn):
tk.Frame.__init__(self, parent)
self.conn = conn
label = tk.Label(self, text="Start Page", font=text_font)
label.pack(padx=10, pady=10)
eisen_display_button = ttk.Button(
self,
text="Display Selection",
command=lambda: controller.show_frame(EisenDisplay),
)
eisen_display_button.pack(side="bottom", fill=tk.X)
class EisenDisplay(tk.Frame):
def __init__(self, parent, controller, conn):
tk.Frame.__init__(self, parent)
self.conn = conn
label = tk.Label(self, text="Eisen Display", font=text_font)
label.pack(padx=10, pady=10)
main_button = ttk.Button(
self,
text="Return to main page",
command=lambda: controller.show_frame(MainPage),
)
main_button.pack(side="bottom", fill=tk.X)
del_button = ttk.Button(
self, text="Eisen Delete", command=lambda: controller.show_frame(DelPage)
)
del_button.pack(side="bottom", fill=tk.X)
dlg_button = ttk.Button(
self, text="Eisen Delegate", command=lambda: controller.show_frame(DlgPage)
)
dlg_button.pack(side="bottom", fill=tk.X)
dec_button = ttk.Button(
self, text="Eisen Decide", command=lambda: controller.show_frame(DecPage)
)
dec_button.pack(side="bottom", fill=tk.X)
do_button = ttk.Button(
self, text="Eisen Do", command=lambda: controller.show_frame(DoPage)
)
do_button.pack(side="bottom", fill=tk.X)
class DoPage(tk.Frame):
def __init__(self, parent, controller, conn):
tk.Frame.__init__(self, parent)
label = tk.Label(self, text="Eisen's Do Page", font=text_font)
label.pack(padx=10, pady=10)
do_rows = do_cat(conn)
for element in do_rows:
tk.Button(
self,
text=element[3],
fg="black",
command=lambda ele=element: controller.ticket_display(ele),
).pack(fill=tk.X)
eisen_display_button = ttk.Button(
self,
text="Display Selection",
command=lambda: controller.show_frame(EisenDisplay),
)
eisen_display_button.pack(side="bottom", fill=tk.X)
dec_button = ttk.Button(
self, text="Eisen Decide", command=lambda: controller.show_frame(DecPage)
)
dec_button.pack(side="bottom", fill=tk.X)
class DecPage(tk.Frame):
def __init__(self, parent, controller, conn):
tk.Frame.__init__(self, parent)
label = tk.Label(self, text="Eisen's Decide Page", font=text_font)
label.pack(padx=10, pady=10)
dec_rows = dec_cat(conn)
for element in dec_rows:
tk.Button(
self,
text=element[3],
fg="black",
command=lambda ele=element: controller.ticket_display(ele),
).pack(fill=tk.X)
eisen_display_button = ttk.Button(
self,
text="Display Selection",
command=lambda: controller.show_frame(EisenDisplay),
)
eisen_display_button.pack(side="bottom", fill=tk.X)
dlg_button = ttk.Button(
self, text="Eisen Delegate", command=lambda: controller.show_frame(DlgPage)
)
dlg_button.pack(side="bottom", fill=tk.X)
class DlgPage(tk.Frame):
def __init__(self, parent, controller, conn):
tk.Frame.__init__(self, parent)
label = tk.Label(self, text="Eisen's Delegate Page", font=text_font)
label.pack(padx=10, pady=10)
dlg_rows = dlg_cat(conn)
for element in dlg_rows:
tk.Button(
self,
text=element[3],
fg="black",
command=lambda ele=element: controller.ticket_display(ele),
).pack(fill=tk.X)
eisen_display_button = ttk.Button(
self,
text="Display Selection",
command=lambda: controller.show_frame(EisenDisplay),
)
eisen_display_button.pack(side="bottom", fill=tk.X)
del_button = ttk.Button(
self, text="Eisen Delete", command=lambda: controller.show_frame(DelPage)
)
del_button.pack(side="bottom", fill=tk.X)
class DelPage(tk.Frame):
def __init__(self, parent, controller, conn):
tk.Frame.__init__(self, parent)
label = tk.Label(self, text="Eisen's Delete Page", font=text_font)
label.pack(padx=10, pady=10)
del_rows = del_cat(conn)
for element in del_rows:
tk.Button(
self,
text=element[3],
fg="black",
command=lambda ele=element: controller.ticket_display(ele),
).pack(fill=tk.X)
eisen_display_button = ttk.Button(
self,
text="Display Selection",
command=lambda: controller.show_frame(EisenDisplay),
)
eisen_display_button.pack(side="bottom", fill=tk.X)
do_button = ttk.Button(
self, text="Eisen Do", command=lambda: controller.show_frame(DoPage)
)
do_button.pack(side="bottom", fill=tk.X)
if __name__ == "__main__":
connection = create_connection(r"D:\eisen-tickets\assets\tickets.db")
four_windows = windows(connection)
four_windows.mainloop()
| true | true |
f720055934c6413aad908cb155527edb52b40062 | 1,116 | py | Python | root/scripts/includes/python_logger.py | DragonCrafted87/docker-alpine-base | 033199c1d7d6d57271f16841b132469c78658dcf | [
"MIT"
] | null | null | null | root/scripts/includes/python_logger.py | DragonCrafted87/docker-alpine-base | 033199c1d7d6d57271f16841b132469c78658dcf | [
"MIT"
] | null | null | null | root/scripts/includes/python_logger.py | DragonCrafted87/docker-alpine-base | 033199c1d7d6d57271f16841b132469c78658dcf | [
"MIT"
] | null | null | null | #!/usr/bin/python3
# -*- coding: utf-8 -*-
from logging import DEBUG
from logging import INFO
from logging import Formatter
from logging import StreamHandler
from logging import getLogger
from sys import stderr
from sys import stdout
class LogLevelFilter:
def __init__(self, level):
self.__level = level
def filter(self, log_record):
return log_record.levelno == self.__level
def create_logger(name=None):
# create logger
log = getLogger(name)
log.setLevel(DEBUG)
# create formatter and add it to the handlers
log_format = Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
# create console handler with a higher log level
info_handler = StreamHandler(stdout)
info_handler.setLevel(INFO)
info_handler.setFormatter(log_format)
log.addHandler(info_handler)
# create console handler with a higher log level
debug_handler = StreamHandler(stderr)
debug_handler.setLevel(DEBUG)
debug_handler.setFormatter(log_format)
debug_handler.addFilter(LogLevelFilter(DEBUG))
log.addHandler(debug_handler)
return log
| 25.953488 | 82 | 0.728495 |
from logging import DEBUG
from logging import INFO
from logging import Formatter
from logging import StreamHandler
from logging import getLogger
from sys import stderr
from sys import stdout
class LogLevelFilter:
def __init__(self, level):
self.__level = level
def filter(self, log_record):
return log_record.levelno == self.__level
def create_logger(name=None):
log = getLogger(name)
log.setLevel(DEBUG)
log_format = Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
info_handler = StreamHandler(stdout)
info_handler.setLevel(INFO)
info_handler.setFormatter(log_format)
log.addHandler(info_handler)
debug_handler = StreamHandler(stderr)
debug_handler.setLevel(DEBUG)
debug_handler.setFormatter(log_format)
debug_handler.addFilter(LogLevelFilter(DEBUG))
log.addHandler(debug_handler)
return log
| true | true |
f720066904260a4d87d72e6f5790ddf77d44d217 | 101 | py | Python | codes_auto/1603.running-sum-of-1d-array.py | smartmark-pro/leetcode_record | 6504b733d892a705571eb4eac836fb10e94e56db | [
"MIT"
] | null | null | null | codes_auto/1603.running-sum-of-1d-array.py | smartmark-pro/leetcode_record | 6504b733d892a705571eb4eac836fb10e94e56db | [
"MIT"
] | null | null | null | codes_auto/1603.running-sum-of-1d-array.py | smartmark-pro/leetcode_record | 6504b733d892a705571eb4eac836fb10e94e56db | [
"MIT"
] | null | null | null | #
# @lc app=leetcode.cn id=1603 lang=python3
#
# [1603] running-sum-of-1d-array
#
None
# @lc code=end | 14.428571 | 42 | 0.673267 |
None
| true | true |
f72006ab8586955d57cb0232db8f0b952693aca0 | 1,522 | py | Python | setup.py | Rishk/alpha_vantage | 1cb28a98cb0c8526b85d163be96a37fd2d16ff95 | [
"MIT"
] | 1 | 2019-12-27T17:50:59.000Z | 2019-12-27T17:50:59.000Z | setup.py | Rishk/alpha_vantage | 1cb28a98cb0c8526b85d163be96a37fd2d16ff95 | [
"MIT"
] | null | null | null | setup.py | Rishk/alpha_vantage | 1cb28a98cb0c8526b85d163be96a37fd2d16ff95 | [
"MIT"
] | null | null | null | from setuptools import setup, find_packages
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
try:
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
except IOError:
long_description = 'Python module to get stock data from the Alpha Vantage Api'
setup(
name='alpha_vantage',
version='2.0.0',
author='Romel J. Torres',
author_email='romel.torres@gmail.com',
license='MIT',
description='Python module to get stock data from the Alpha Vantage Api',
long_description=long_description,
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Office/Business :: Financial :: Investment',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
url='https://github.com/RomelTorres/alpha_vantage',
install_requires=[
'requests',
'simplejson'
],
test_requires=[
'nose',
'requests_mock'
],
extras_requires={
'pandas': ['pandas'],
},
keywords=['stocks', 'market', 'finance', 'alpha_vantage', 'quotes',
'shares'],
packages=find_packages(
exclude=['helpers', 'test_alpha_vantage', 'images']),
package_data={
'alpha_vantage': [],
}
)
| 30.44 | 83 | 0.61498 | from setuptools import setup, find_packages
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
try:
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
except IOError:
long_description = 'Python module to get stock data from the Alpha Vantage Api'
setup(
name='alpha_vantage',
version='2.0.0',
author='Romel J. Torres',
author_email='romel.torres@gmail.com',
license='MIT',
description='Python module to get stock data from the Alpha Vantage Api',
long_description=long_description,
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Office/Business :: Financial :: Investment',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
url='https://github.com/RomelTorres/alpha_vantage',
install_requires=[
'requests',
'simplejson'
],
test_requires=[
'nose',
'requests_mock'
],
extras_requires={
'pandas': ['pandas'],
},
keywords=['stocks', 'market', 'finance', 'alpha_vantage', 'quotes',
'shares'],
packages=find_packages(
exclude=['helpers', 'test_alpha_vantage', 'images']),
package_data={
'alpha_vantage': [],
}
)
| true | true |
f72007805c2d3ca886d768516835db709c0dc08b | 1,117 | py | Python | python files/area_calcs.py | dhbesson/abc_visualization | b024bf551e0e331e3f7bd9d63dbe1437a3c25aa7 | [
"MIT"
] | null | null | null | python files/area_calcs.py | dhbesson/abc_visualization | b024bf551e0e331e3f7bd9d63dbe1437a3c25aa7 | [
"MIT"
] | null | null | null | python files/area_calcs.py | dhbesson/abc_visualization | b024bf551e0e331e3f7bd9d63dbe1437a3c25aa7 | [
"MIT"
] | null | null | null | import requests, csv, sys, os, time, json, codecs
server = "https://cloudrf.com"
# dir = "calculations/antennas_1W_2m"
# Open CSV file
import codecs
# csvfile = csv.reader(codecs.open('antennas.csv', 'rU', 'utf-16'))
uid = 'YOUR CLOUDRF UID HERE'
key = 'YOUR CLOUDRF KEY HERE'
def calc_area(dir,csvfile_loc):
n = 0
csvfile = csv.DictReader(open(csvfile_loc))
if not os.path.exists(dir):
os.makedirs(dir)
for row in csvfile:
# Pause script. Important otherwise server will ban you.
time.sleep(1)
start_time = time.time() # Stopwatch start
# print row
r = requests.post(server + "/API/area", data=row)
print(r.text)
# try:
j = json.loads(r.text)
r = requests.get(j['kmz'])
fn = dir + os.sep + str(row['nam']) + ".kmz"
file = open(fn, "wb")
file.write(r.content)
file.close()
print("Saved to %s" % fn)
elapsed = round(time.time() - start_time, 1) # Stopwatch
print("Elapsed: " + str(elapsed) + "s")
n = n + 1 | 27.243902 | 68 | 0.554163 | import requests, csv, sys, os, time, json, codecs
server = "https://cloudrf.com"
import codecs
uid = 'YOUR CLOUDRF UID HERE'
key = 'YOUR CLOUDRF KEY HERE'
def calc_area(dir,csvfile_loc):
n = 0
csvfile = csv.DictReader(open(csvfile_loc))
if not os.path.exists(dir):
os.makedirs(dir)
for row in csvfile:
time.sleep(1)
start_time = time.time()
r = requests.post(server + "/API/area", data=row)
print(r.text)
j = json.loads(r.text)
r = requests.get(j['kmz'])
fn = dir + os.sep + str(row['nam']) + ".kmz"
file = open(fn, "wb")
file.write(r.content)
file.close()
print("Saved to %s" % fn)
elapsed = round(time.time() - start_time, 1)
print("Elapsed: " + str(elapsed) + "s")
n = n + 1 | true | true |
f72008a164fc940a1cd12de39700a25410e41ad5 | 3,485 | py | Python | src/the_tale/the_tale/game/bills/models.py | al-arz/the-tale | 542770257eb6ebd56a5ac44ea1ef93ff4ab19eb5 | [
"BSD-3-Clause"
] | null | null | null | src/the_tale/the_tale/game/bills/models.py | al-arz/the-tale | 542770257eb6ebd56a5ac44ea1ef93ff4ab19eb5 | [
"BSD-3-Clause"
] | null | null | null | src/the_tale/the_tale/game/bills/models.py | al-arz/the-tale | 542770257eb6ebd56a5ac44ea1ef93ff4ab19eb5 | [
"BSD-3-Clause"
] | null | null | null |
import smart_imports
smart_imports.all()
class Bill(django_models.Model):
CAPTION_MIN_LENGTH = 6
CAPTION_MAX_LENGTH = 256
created_at = django_models.DateTimeField(auto_now_add=True, null=False)
updated_at = django_models.DateTimeField(auto_now_add=True, null=False) # MUST setupped by hand
voting_end_at = django_models.DateTimeField(null=True, blank=True)
created_at_turn = django_models.IntegerField(null=False)
applyed_at_turn = django_models.IntegerField(null=True, blank=True)
ended_at = django_models.DateTimeField(null=True, blank=True)
owner = django_models.ForeignKey('accounts.Account', null=True, related_name='+', on_delete=django_models.SET_NULL)
caption = django_models.CharField(max_length=CAPTION_MAX_LENGTH)
type = rels_django.RelationIntegerField(relation=relations.BILL_TYPE, db_index=True)
state = rels_django.RelationIntegerField(relation=relations.BILL_STATE, db_index=True)
approved_by_moderator = django_models.BooleanField(default=False, db_index=True)
remove_initiator = django_models.ForeignKey('accounts.Account', null=True, blank=True, related_name='+', on_delete=django_models.SET_NULL)
technical_data = django_models.TextField(null=False, blank=True, default={})
chronicle_on_accepted = django_models.TextField(null=False, blank=True, default='')
# we should not remove bill when ocasionally remove forum thread
forum_thread = django_models.ForeignKey(forum_models.Thread, null=True, blank=True, related_name='+', on_delete=django_models.SET_NULL)
votes_for = django_models.IntegerField(default=0)
votes_against = django_models.IntegerField(default=0)
votes_refrained = django_models.IntegerField(default=0)
# fields to store config values after processing state (since they can be changed in future)
min_votes_percents_required = django_models.FloatField(default=0.0)
is_declined = django_models.BooleanField(blank=True, default=False)
declined_by = django_models.ForeignKey('bills.Bill', null=True, default=None, related_name='+', blank=True, on_delete=django_models.SET_NULL)
depends_on = django_models.ForeignKey('bills.Bill', null=True, default=None, related_name='+', blank=True, on_delete=django_models.SET_NULL)
def __str__(self):
return '{}-{}'.format(self.id, self.caption)
class Meta:
permissions = (("moderate_bill", "Может администрировать записи в Книге Судеб"), )
class Actor(django_models.Model):
# ATTENTION: if you want to make building an actor, remember, that after it recreated
# (for same person after destroying previouse building)
# it first fully removed from base (previouse building) and only then created
created_at = django_models.DateTimeField(auto_now_add=True, null=False)
bill = django_models.ForeignKey(Bill, null=False, on_delete=django_models.CASCADE)
place = django_models.ForeignKey('places.Place', null=True, related_name='+', on_delete=django_models.CASCADE)
class Vote(django_models.Model):
created_at = django_models.DateTimeField(auto_now_add=True, null=False)
owner = django_models.ForeignKey('accounts.Account', null=True, related_name='+', on_delete=django_models.SET_NULL)
bill = django_models.ForeignKey(Bill, null=False, on_delete=django_models.CASCADE)
type = rels_django.RelationIntegerField(relation=relations.VOTE_TYPE, db_index=True)
class Meta:
unique_together = (('owner', 'bill'),)
| 42.5 | 145 | 0.766428 |
import smart_imports
smart_imports.all()
class Bill(django_models.Model):
CAPTION_MIN_LENGTH = 6
CAPTION_MAX_LENGTH = 256
created_at = django_models.DateTimeField(auto_now_add=True, null=False)
updated_at = django_models.DateTimeField(auto_now_add=True, null=False)
voting_end_at = django_models.DateTimeField(null=True, blank=True)
created_at_turn = django_models.IntegerField(null=False)
applyed_at_turn = django_models.IntegerField(null=True, blank=True)
ended_at = django_models.DateTimeField(null=True, blank=True)
owner = django_models.ForeignKey('accounts.Account', null=True, related_name='+', on_delete=django_models.SET_NULL)
caption = django_models.CharField(max_length=CAPTION_MAX_LENGTH)
type = rels_django.RelationIntegerField(relation=relations.BILL_TYPE, db_index=True)
state = rels_django.RelationIntegerField(relation=relations.BILL_STATE, db_index=True)
approved_by_moderator = django_models.BooleanField(default=False, db_index=True)
remove_initiator = django_models.ForeignKey('accounts.Account', null=True, blank=True, related_name='+', on_delete=django_models.SET_NULL)
technical_data = django_models.TextField(null=False, blank=True, default={})
chronicle_on_accepted = django_models.TextField(null=False, blank=True, default='')
forum_thread = django_models.ForeignKey(forum_models.Thread, null=True, blank=True, related_name='+', on_delete=django_models.SET_NULL)
votes_for = django_models.IntegerField(default=0)
votes_against = django_models.IntegerField(default=0)
votes_refrained = django_models.IntegerField(default=0)
min_votes_percents_required = django_models.FloatField(default=0.0)
is_declined = django_models.BooleanField(blank=True, default=False)
declined_by = django_models.ForeignKey('bills.Bill', null=True, default=None, related_name='+', blank=True, on_delete=django_models.SET_NULL)
depends_on = django_models.ForeignKey('bills.Bill', null=True, default=None, related_name='+', blank=True, on_delete=django_models.SET_NULL)
def __str__(self):
return '{}-{}'.format(self.id, self.caption)
class Meta:
permissions = (("moderate_bill", "Может администрировать записи в Книге Судеб"), )
class Actor(django_models.Model):
created_at = django_models.DateTimeField(auto_now_add=True, null=False)
bill = django_models.ForeignKey(Bill, null=False, on_delete=django_models.CASCADE)
place = django_models.ForeignKey('places.Place', null=True, related_name='+', on_delete=django_models.CASCADE)
class Vote(django_models.Model):
created_at = django_models.DateTimeField(auto_now_add=True, null=False)
owner = django_models.ForeignKey('accounts.Account', null=True, related_name='+', on_delete=django_models.SET_NULL)
bill = django_models.ForeignKey(Bill, null=False, on_delete=django_models.CASCADE)
type = rels_django.RelationIntegerField(relation=relations.VOTE_TYPE, db_index=True)
class Meta:
unique_together = (('owner', 'bill'),)
| true | true |
f72008f42f54ea078b631fc42689eec8279d667b | 486 | py | Python | April 2021/Furthest Building You Can Reach.py | parikshitgupta1/leetcode | eba6c11740dc7597204af127c0f4c2163376294f | [
"MIT"
] | null | null | null | April 2021/Furthest Building You Can Reach.py | parikshitgupta1/leetcode | eba6c11740dc7597204af127c0f4c2163376294f | [
"MIT"
] | null | null | null | April 2021/Furthest Building You Can Reach.py | parikshitgupta1/leetcode | eba6c11740dc7597204af127c0f4c2163376294f | [
"MIT"
] | null | null | null | class Solution:
def furthestBuilding(self, H, bricks, ladders):
jumps_pq = []
for i in range(len(H) - 1):
jump_height = H[i + 1] - H[i]
if jump_height <= 0: continue
heappush(jumps_pq, jump_height)
if len(jumps_pq) > ladders:
bricks -= heappop(jumps_pq)
if(bricks < 0) : return i
return len(H) - 1
a = Solution()
print(a.furthestBuilding([4,12,2,7,3,18,20,3,19], 10, 2))
| 28.588235 | 57 | 0.522634 | class Solution:
def furthestBuilding(self, H, bricks, ladders):
jumps_pq = []
for i in range(len(H) - 1):
jump_height = H[i + 1] - H[i]
if jump_height <= 0: continue
heappush(jumps_pq, jump_height)
if len(jumps_pq) > ladders:
bricks -= heappop(jumps_pq)
if(bricks < 0) : return i
return len(H) - 1
a = Solution()
print(a.furthestBuilding([4,12,2,7,3,18,20,3,19], 10, 2))
| true | true |
f72008f569b6e13c90d5063ad392e029122f1919 | 228 | py | Python | Itertools/itertools.product.py | AndreasGeiger/hackerrank-python | a436c207e62b32f70a6b4279bb641a3c4d90e112 | [
"MIT"
] | null | null | null | Itertools/itertools.product.py | AndreasGeiger/hackerrank-python | a436c207e62b32f70a6b4279bb641a3c4d90e112 | [
"MIT"
] | null | null | null | Itertools/itertools.product.py | AndreasGeiger/hackerrank-python | a436c207e62b32f70a6b4279bb641a3c4d90e112 | [
"MIT"
] | null | null | null | from itertools import product
listA = list(map(int, input().split()))
listB = list(map(int, input().split()))
productLists = list(product(listA, listB))
for i in range(len(productLists)):
print(productLists[i], end=" ")
| 20.727273 | 42 | 0.684211 | from itertools import product
listA = list(map(int, input().split()))
listB = list(map(int, input().split()))
productLists = list(product(listA, listB))
for i in range(len(productLists)):
print(productLists[i], end=" ")
| true | true |
f7200945eace3e7c67af32832e8436d62e73a7ee | 3,102 | py | Python | nativepython/type_wrappers/range_wrapper.py | szymonlipinski/nativepython | 5f0bcc709b99a43681488f2753eccc2ac37a0334 | [
"Apache-2.0"
] | null | null | null | nativepython/type_wrappers/range_wrapper.py | szymonlipinski/nativepython | 5f0bcc709b99a43681488f2753eccc2ac37a0334 | [
"Apache-2.0"
] | null | null | null | nativepython/type_wrappers/range_wrapper.py | szymonlipinski/nativepython | 5f0bcc709b99a43681488f2753eccc2ac37a0334 | [
"Apache-2.0"
] | null | null | null | # Coyright 2017-2019 Nativepython Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nativepython.type_wrappers.wrapper import Wrapper
import nativepython.native_ast as native_ast
class RangeWrapper(Wrapper):
is_pod = True
is_empty = False
is_pass_by_ref = False
def __init__(self):
super().__init__((range, "type"))
def getNativeLayoutType(self):
return native_ast.Type.Void()
def convert_call(self, context, expr, args, kwargs):
if len(args) == 1 and not kwargs:
arg = args[0].toInt64()
if not arg:
return None
return context.pushPod(
_RangeInstanceWrapper,
arg.nonref_expr
)
return super().convert_call(context, expr, args, kwargs)
class RangeInstanceWrapper(Wrapper):
is_pod = True
is_empty = False
is_pass_by_ref = False
def __init__(self):
super().__init__((range, "instance"))
def getNativeLayoutType(self):
return native_ast.Int64
def convert_method_call(self, context, expr, methodname, args, kwargs):
if methodname == "__iter__" and not args and not kwargs:
return context.push(
_RangeIteratorWrapper,
lambda instance:
instance.expr.ElementPtrIntegers(0, 0).store(-1) >>
instance.expr.ElementPtrIntegers(0, 1).store(expr.nonref_expr)
)
return super().convert_method_call(context, expr, methodname, args, kwargs)
class RangeIteratorWrapper(Wrapper):
is_pod = True
is_empty = False
is_pass_by_ref = True
def __init__(self):
super().__init__((range, "iterator"))
def getNativeLayoutType(self):
return native_ast.Type.Struct(
element_types=(("count", native_ast.Int64), ("len", native_ast.Int64)),
name="range_storage"
)
def convert_next(self, context, expr):
context.pushEffect(
expr.expr.ElementPtrIntegers(0, 0).store(
expr.expr.ElementPtrIntegers(0, 0).load().add(1)
)
)
canContinue = context.pushPod(
bool,
expr.expr.ElementPtrIntegers(0, 0).load().lt(
expr.expr.ElementPtrIntegers(0, 1).load()
)
)
nextExpr = context.pushReference(int, expr.expr.ElementPtrIntegers(0, 0))
return nextExpr, canContinue
_RangeWrapper = RangeWrapper()
_RangeInstanceWrapper = RangeInstanceWrapper()
_RangeIteratorWrapper = RangeIteratorWrapper()
| 31.333333 | 83 | 0.640554 |
from nativepython.type_wrappers.wrapper import Wrapper
import nativepython.native_ast as native_ast
class RangeWrapper(Wrapper):
is_pod = True
is_empty = False
is_pass_by_ref = False
def __init__(self):
super().__init__((range, "type"))
def getNativeLayoutType(self):
return native_ast.Type.Void()
def convert_call(self, context, expr, args, kwargs):
if len(args) == 1 and not kwargs:
arg = args[0].toInt64()
if not arg:
return None
return context.pushPod(
_RangeInstanceWrapper,
arg.nonref_expr
)
return super().convert_call(context, expr, args, kwargs)
class RangeInstanceWrapper(Wrapper):
is_pod = True
is_empty = False
is_pass_by_ref = False
def __init__(self):
super().__init__((range, "instance"))
def getNativeLayoutType(self):
return native_ast.Int64
def convert_method_call(self, context, expr, methodname, args, kwargs):
if methodname == "__iter__" and not args and not kwargs:
return context.push(
_RangeIteratorWrapper,
lambda instance:
instance.expr.ElementPtrIntegers(0, 0).store(-1) >>
instance.expr.ElementPtrIntegers(0, 1).store(expr.nonref_expr)
)
return super().convert_method_call(context, expr, methodname, args, kwargs)
class RangeIteratorWrapper(Wrapper):
is_pod = True
is_empty = False
is_pass_by_ref = True
def __init__(self):
super().__init__((range, "iterator"))
def getNativeLayoutType(self):
return native_ast.Type.Struct(
element_types=(("count", native_ast.Int64), ("len", native_ast.Int64)),
name="range_storage"
)
def convert_next(self, context, expr):
context.pushEffect(
expr.expr.ElementPtrIntegers(0, 0).store(
expr.expr.ElementPtrIntegers(0, 0).load().add(1)
)
)
canContinue = context.pushPod(
bool,
expr.expr.ElementPtrIntegers(0, 0).load().lt(
expr.expr.ElementPtrIntegers(0, 1).load()
)
)
nextExpr = context.pushReference(int, expr.expr.ElementPtrIntegers(0, 0))
return nextExpr, canContinue
_RangeWrapper = RangeWrapper()
_RangeInstanceWrapper = RangeInstanceWrapper()
_RangeIteratorWrapper = RangeIteratorWrapper()
| true | true |
f72009f2a950749e199cacf800fa7cbce9a95e33 | 1,856 | py | Python | ml-agents-envs/mlagents_envs/registry/base_registry_entry.py | bobcy2015/ml-agents | 5d02292ad889f1884fa98bd92f127f17cbfe0112 | [
"Apache-2.0"
] | 13,653 | 2017-09-19T15:56:02.000Z | 2022-03-31T18:55:07.000Z | ml-agents-envs/mlagents_envs/registry/base_registry_entry.py | bobcy2015/ml-agents | 5d02292ad889f1884fa98bd92f127f17cbfe0112 | [
"Apache-2.0"
] | 3,623 | 2017-09-20T02:50:20.000Z | 2022-03-31T06:37:25.000Z | ml-agents-envs/mlagents_envs/registry/base_registry_entry.py | bobcy2015/ml-agents | 5d02292ad889f1884fa98bd92f127f17cbfe0112 | [
"Apache-2.0"
] | 4,130 | 2017-09-19T17:36:34.000Z | 2022-03-31T12:54:55.000Z | from abc import abstractmethod
from typing import Any, Optional
from mlagents_envs.base_env import BaseEnv
class BaseRegistryEntry:
def __init__(
self,
identifier: str,
expected_reward: Optional[float],
description: Optional[str],
):
"""
BaseRegistryEntry allows launching a Unity Environment with its make method.
:param identifier: The name of the Unity Environment.
:param expected_reward: The cumulative reward that an Agent must receive
for the task to be considered solved.
:param description: A description of the Unity Environment. Contains human
readable information about potential special arguments that the make method can
take as well as information regarding the observation, reward, actions,
behaviors and number of agents in the Environment.
"""
self._identifier = identifier
self._expected_reward = expected_reward
self._description = description
@property
def identifier(self) -> str:
"""
The unique identifier of the entry
"""
return self._identifier
@property
def expected_reward(self) -> Optional[float]:
"""
The cumulative reward that an Agent must receive for the task to be considered
solved.
"""
return self._expected_reward
@property
def description(self) -> Optional[str]:
"""
A description of the Unity Environment the entry can make.
"""
return self._description
@abstractmethod
def make(self, **kwargs: Any) -> BaseEnv:
"""
This method creates a Unity BaseEnv (usually a UnityEnvironment).
"""
raise NotImplementedError(
f"The make() method not implemented for entry {self.identifier}"
)
| 32.561404 | 87 | 0.649246 | from abc import abstractmethod
from typing import Any, Optional
from mlagents_envs.base_env import BaseEnv
class BaseRegistryEntry:
def __init__(
self,
identifier: str,
expected_reward: Optional[float],
description: Optional[str],
):
self._identifier = identifier
self._expected_reward = expected_reward
self._description = description
@property
def identifier(self) -> str:
return self._identifier
@property
def expected_reward(self) -> Optional[float]:
return self._expected_reward
@property
def description(self) -> Optional[str]:
return self._description
@abstractmethod
def make(self, **kwargs: Any) -> BaseEnv:
raise NotImplementedError(
f"The make() method not implemented for entry {self.identifier}"
)
| true | true |
f7200a2703afba3d344293f747d73f0e8c66c472 | 14,266 | py | Python | sdk/python/pulumi_azure_native/storagesync/v20180701/get_registered_server.py | pulumi-bot/pulumi-azure-native | f7b9490b5211544318e455e5cceafe47b628e12c | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/storagesync/v20180701/get_registered_server.py | pulumi-bot/pulumi-azure-native | f7b9490b5211544318e455e5cceafe47b628e12c | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/storagesync/v20180701/get_registered_server.py | pulumi-bot/pulumi-azure-native | f7b9490b5211544318e455e5cceafe47b628e12c | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = [
'GetRegisteredServerResult',
'AwaitableGetRegisteredServerResult',
'get_registered_server',
]
@pulumi.output_type
class GetRegisteredServerResult:
"""
Registered Server resource.
"""
def __init__(__self__, agent_version=None, cluster_id=None, cluster_name=None, discovery_endpoint_uri=None, friendly_name=None, id=None, last_heart_beat=None, last_operation_name=None, last_workflow_id=None, management_endpoint_uri=None, monitoring_configuration=None, name=None, provisioning_state=None, resource_location=None, server_certificate=None, server_id=None, server_managementt_error_code=None, server_os_version=None, server_role=None, service_location=None, storage_sync_service_uid=None, type=None):
if agent_version and not isinstance(agent_version, str):
raise TypeError("Expected argument 'agent_version' to be a str")
pulumi.set(__self__, "agent_version", agent_version)
if cluster_id and not isinstance(cluster_id, str):
raise TypeError("Expected argument 'cluster_id' to be a str")
pulumi.set(__self__, "cluster_id", cluster_id)
if cluster_name and not isinstance(cluster_name, str):
raise TypeError("Expected argument 'cluster_name' to be a str")
pulumi.set(__self__, "cluster_name", cluster_name)
if discovery_endpoint_uri and not isinstance(discovery_endpoint_uri, str):
raise TypeError("Expected argument 'discovery_endpoint_uri' to be a str")
pulumi.set(__self__, "discovery_endpoint_uri", discovery_endpoint_uri)
if friendly_name and not isinstance(friendly_name, str):
raise TypeError("Expected argument 'friendly_name' to be a str")
pulumi.set(__self__, "friendly_name", friendly_name)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if last_heart_beat and not isinstance(last_heart_beat, str):
raise TypeError("Expected argument 'last_heart_beat' to be a str")
pulumi.set(__self__, "last_heart_beat", last_heart_beat)
if last_operation_name and not isinstance(last_operation_name, str):
raise TypeError("Expected argument 'last_operation_name' to be a str")
pulumi.set(__self__, "last_operation_name", last_operation_name)
if last_workflow_id and not isinstance(last_workflow_id, str):
raise TypeError("Expected argument 'last_workflow_id' to be a str")
pulumi.set(__self__, "last_workflow_id", last_workflow_id)
if management_endpoint_uri and not isinstance(management_endpoint_uri, str):
raise TypeError("Expected argument 'management_endpoint_uri' to be a str")
pulumi.set(__self__, "management_endpoint_uri", management_endpoint_uri)
if monitoring_configuration and not isinstance(monitoring_configuration, str):
raise TypeError("Expected argument 'monitoring_configuration' to be a str")
pulumi.set(__self__, "monitoring_configuration", monitoring_configuration)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if resource_location and not isinstance(resource_location, str):
raise TypeError("Expected argument 'resource_location' to be a str")
pulumi.set(__self__, "resource_location", resource_location)
if server_certificate and not isinstance(server_certificate, str):
raise TypeError("Expected argument 'server_certificate' to be a str")
pulumi.set(__self__, "server_certificate", server_certificate)
if server_id and not isinstance(server_id, str):
raise TypeError("Expected argument 'server_id' to be a str")
pulumi.set(__self__, "server_id", server_id)
if server_managementt_error_code and not isinstance(server_managementt_error_code, int):
raise TypeError("Expected argument 'server_managementt_error_code' to be a int")
pulumi.set(__self__, "server_managementt_error_code", server_managementt_error_code)
if server_os_version and not isinstance(server_os_version, str):
raise TypeError("Expected argument 'server_os_version' to be a str")
pulumi.set(__self__, "server_os_version", server_os_version)
if server_role and not isinstance(server_role, str):
raise TypeError("Expected argument 'server_role' to be a str")
pulumi.set(__self__, "server_role", server_role)
if service_location and not isinstance(service_location, str):
raise TypeError("Expected argument 'service_location' to be a str")
pulumi.set(__self__, "service_location", service_location)
if storage_sync_service_uid and not isinstance(storage_sync_service_uid, str):
raise TypeError("Expected argument 'storage_sync_service_uid' to be a str")
pulumi.set(__self__, "storage_sync_service_uid", storage_sync_service_uid)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="agentVersion")
def agent_version(self) -> Optional[str]:
"""
Registered Server Agent Version
"""
return pulumi.get(self, "agent_version")
@property
@pulumi.getter(name="clusterId")
def cluster_id(self) -> Optional[str]:
"""
Registered Server clusterId
"""
return pulumi.get(self, "cluster_id")
@property
@pulumi.getter(name="clusterName")
def cluster_name(self) -> Optional[str]:
"""
Registered Server clusterName
"""
return pulumi.get(self, "cluster_name")
@property
@pulumi.getter(name="discoveryEndpointUri")
def discovery_endpoint_uri(self) -> Optional[str]:
"""
Resource discoveryEndpointUri
"""
return pulumi.get(self, "discovery_endpoint_uri")
@property
@pulumi.getter(name="friendlyName")
def friendly_name(self) -> Optional[str]:
"""
Friendly Name
"""
return pulumi.get(self, "friendly_name")
@property
@pulumi.getter
def id(self) -> str:
"""
Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="lastHeartBeat")
def last_heart_beat(self) -> Optional[str]:
"""
Registered Server last heart beat
"""
return pulumi.get(self, "last_heart_beat")
@property
@pulumi.getter(name="lastOperationName")
def last_operation_name(self) -> Optional[str]:
"""
Resource Last Operation Name
"""
return pulumi.get(self, "last_operation_name")
@property
@pulumi.getter(name="lastWorkflowId")
def last_workflow_id(self) -> Optional[str]:
"""
Registered Server lastWorkflowId
"""
return pulumi.get(self, "last_workflow_id")
@property
@pulumi.getter(name="managementEndpointUri")
def management_endpoint_uri(self) -> Optional[str]:
"""
Management Endpoint Uri
"""
return pulumi.get(self, "management_endpoint_uri")
@property
@pulumi.getter(name="monitoringConfiguration")
def monitoring_configuration(self) -> Optional[str]:
"""
Monitoring Configuration
"""
return pulumi.get(self, "monitoring_configuration")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> Optional[str]:
"""
Registered Server Provisioning State
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="resourceLocation")
def resource_location(self) -> Optional[str]:
"""
Resource Location
"""
return pulumi.get(self, "resource_location")
@property
@pulumi.getter(name="serverCertificate")
def server_certificate(self) -> Optional[str]:
"""
Registered Server Certificate
"""
return pulumi.get(self, "server_certificate")
@property
@pulumi.getter(name="serverId")
def server_id(self) -> Optional[str]:
"""
Registered Server serverId
"""
return pulumi.get(self, "server_id")
@property
@pulumi.getter(name="serverManagementtErrorCode")
def server_managementt_error_code(self) -> Optional[int]:
"""
Registered Server Management Error Code
"""
return pulumi.get(self, "server_managementt_error_code")
@property
@pulumi.getter(name="serverOSVersion")
def server_os_version(self) -> Optional[str]:
"""
Registered Server OS Version
"""
return pulumi.get(self, "server_os_version")
@property
@pulumi.getter(name="serverRole")
def server_role(self) -> Optional[str]:
"""
Registered Server serverRole
"""
return pulumi.get(self, "server_role")
@property
@pulumi.getter(name="serviceLocation")
def service_location(self) -> Optional[str]:
"""
Service Location
"""
return pulumi.get(self, "service_location")
@property
@pulumi.getter(name="storageSyncServiceUid")
def storage_sync_service_uid(self) -> Optional[str]:
"""
Registered Server storageSyncServiceUid
"""
return pulumi.get(self, "storage_sync_service_uid")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type")
class AwaitableGetRegisteredServerResult(GetRegisteredServerResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetRegisteredServerResult(
agent_version=self.agent_version,
cluster_id=self.cluster_id,
cluster_name=self.cluster_name,
discovery_endpoint_uri=self.discovery_endpoint_uri,
friendly_name=self.friendly_name,
id=self.id,
last_heart_beat=self.last_heart_beat,
last_operation_name=self.last_operation_name,
last_workflow_id=self.last_workflow_id,
management_endpoint_uri=self.management_endpoint_uri,
monitoring_configuration=self.monitoring_configuration,
name=self.name,
provisioning_state=self.provisioning_state,
resource_location=self.resource_location,
server_certificate=self.server_certificate,
server_id=self.server_id,
server_managementt_error_code=self.server_managementt_error_code,
server_os_version=self.server_os_version,
server_role=self.server_role,
service_location=self.service_location,
storage_sync_service_uid=self.storage_sync_service_uid,
type=self.type)
def get_registered_server(resource_group_name: Optional[str] = None,
server_id: Optional[str] = None,
storage_sync_service_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetRegisteredServerResult:
"""
Registered Server resource.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str server_id: GUID identifying the on-premises server.
:param str storage_sync_service_name: Name of Storage Sync Service resource.
"""
__args__ = dict()
__args__['resourceGroupName'] = resource_group_name
__args__['serverId'] = server_id
__args__['storageSyncServiceName'] = storage_sync_service_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:storagesync/v20180701:getRegisteredServer', __args__, opts=opts, typ=GetRegisteredServerResult).value
return AwaitableGetRegisteredServerResult(
agent_version=__ret__.agent_version,
cluster_id=__ret__.cluster_id,
cluster_name=__ret__.cluster_name,
discovery_endpoint_uri=__ret__.discovery_endpoint_uri,
friendly_name=__ret__.friendly_name,
id=__ret__.id,
last_heart_beat=__ret__.last_heart_beat,
last_operation_name=__ret__.last_operation_name,
last_workflow_id=__ret__.last_workflow_id,
management_endpoint_uri=__ret__.management_endpoint_uri,
monitoring_configuration=__ret__.monitoring_configuration,
name=__ret__.name,
provisioning_state=__ret__.provisioning_state,
resource_location=__ret__.resource_location,
server_certificate=__ret__.server_certificate,
server_id=__ret__.server_id,
server_managementt_error_code=__ret__.server_managementt_error_code,
server_os_version=__ret__.server_os_version,
server_role=__ret__.server_role,
service_location=__ret__.service_location,
storage_sync_service_uid=__ret__.storage_sync_service_uid,
type=__ret__.type)
| 41.71345 | 517 | 0.683233 |
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = [
'GetRegisteredServerResult',
'AwaitableGetRegisteredServerResult',
'get_registered_server',
]
@pulumi.output_type
class GetRegisteredServerResult:
def __init__(__self__, agent_version=None, cluster_id=None, cluster_name=None, discovery_endpoint_uri=None, friendly_name=None, id=None, last_heart_beat=None, last_operation_name=None, last_workflow_id=None, management_endpoint_uri=None, monitoring_configuration=None, name=None, provisioning_state=None, resource_location=None, server_certificate=None, server_id=None, server_managementt_error_code=None, server_os_version=None, server_role=None, service_location=None, storage_sync_service_uid=None, type=None):
if agent_version and not isinstance(agent_version, str):
raise TypeError("Expected argument 'agent_version' to be a str")
pulumi.set(__self__, "agent_version", agent_version)
if cluster_id and not isinstance(cluster_id, str):
raise TypeError("Expected argument 'cluster_id' to be a str")
pulumi.set(__self__, "cluster_id", cluster_id)
if cluster_name and not isinstance(cluster_name, str):
raise TypeError("Expected argument 'cluster_name' to be a str")
pulumi.set(__self__, "cluster_name", cluster_name)
if discovery_endpoint_uri and not isinstance(discovery_endpoint_uri, str):
raise TypeError("Expected argument 'discovery_endpoint_uri' to be a str")
pulumi.set(__self__, "discovery_endpoint_uri", discovery_endpoint_uri)
if friendly_name and not isinstance(friendly_name, str):
raise TypeError("Expected argument 'friendly_name' to be a str")
pulumi.set(__self__, "friendly_name", friendly_name)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if last_heart_beat and not isinstance(last_heart_beat, str):
raise TypeError("Expected argument 'last_heart_beat' to be a str")
pulumi.set(__self__, "last_heart_beat", last_heart_beat)
if last_operation_name and not isinstance(last_operation_name, str):
raise TypeError("Expected argument 'last_operation_name' to be a str")
pulumi.set(__self__, "last_operation_name", last_operation_name)
if last_workflow_id and not isinstance(last_workflow_id, str):
raise TypeError("Expected argument 'last_workflow_id' to be a str")
pulumi.set(__self__, "last_workflow_id", last_workflow_id)
if management_endpoint_uri and not isinstance(management_endpoint_uri, str):
raise TypeError("Expected argument 'management_endpoint_uri' to be a str")
pulumi.set(__self__, "management_endpoint_uri", management_endpoint_uri)
if monitoring_configuration and not isinstance(monitoring_configuration, str):
raise TypeError("Expected argument 'monitoring_configuration' to be a str")
pulumi.set(__self__, "monitoring_configuration", monitoring_configuration)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if resource_location and not isinstance(resource_location, str):
raise TypeError("Expected argument 'resource_location' to be a str")
pulumi.set(__self__, "resource_location", resource_location)
if server_certificate and not isinstance(server_certificate, str):
raise TypeError("Expected argument 'server_certificate' to be a str")
pulumi.set(__self__, "server_certificate", server_certificate)
if server_id and not isinstance(server_id, str):
raise TypeError("Expected argument 'server_id' to be a str")
pulumi.set(__self__, "server_id", server_id)
if server_managementt_error_code and not isinstance(server_managementt_error_code, int):
raise TypeError("Expected argument 'server_managementt_error_code' to be a int")
pulumi.set(__self__, "server_managementt_error_code", server_managementt_error_code)
if server_os_version and not isinstance(server_os_version, str):
raise TypeError("Expected argument 'server_os_version' to be a str")
pulumi.set(__self__, "server_os_version", server_os_version)
if server_role and not isinstance(server_role, str):
raise TypeError("Expected argument 'server_role' to be a str")
pulumi.set(__self__, "server_role", server_role)
if service_location and not isinstance(service_location, str):
raise TypeError("Expected argument 'service_location' to be a str")
pulumi.set(__self__, "service_location", service_location)
if storage_sync_service_uid and not isinstance(storage_sync_service_uid, str):
raise TypeError("Expected argument 'storage_sync_service_uid' to be a str")
pulumi.set(__self__, "storage_sync_service_uid", storage_sync_service_uid)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="agentVersion")
def agent_version(self) -> Optional[str]:
return pulumi.get(self, "agent_version")
@property
@pulumi.getter(name="clusterId")
def cluster_id(self) -> Optional[str]:
return pulumi.get(self, "cluster_id")
@property
@pulumi.getter(name="clusterName")
def cluster_name(self) -> Optional[str]:
return pulumi.get(self, "cluster_name")
@property
@pulumi.getter(name="discoveryEndpointUri")
def discovery_endpoint_uri(self) -> Optional[str]:
return pulumi.get(self, "discovery_endpoint_uri")
@property
@pulumi.getter(name="friendlyName")
def friendly_name(self) -> Optional[str]:
return pulumi.get(self, "friendly_name")
@property
@pulumi.getter
def id(self) -> str:
return pulumi.get(self, "id")
@property
@pulumi.getter(name="lastHeartBeat")
def last_heart_beat(self) -> Optional[str]:
return pulumi.get(self, "last_heart_beat")
@property
@pulumi.getter(name="lastOperationName")
def last_operation_name(self) -> Optional[str]:
return pulumi.get(self, "last_operation_name")
@property
@pulumi.getter(name="lastWorkflowId")
def last_workflow_id(self) -> Optional[str]:
return pulumi.get(self, "last_workflow_id")
@property
@pulumi.getter(name="managementEndpointUri")
def management_endpoint_uri(self) -> Optional[str]:
return pulumi.get(self, "management_endpoint_uri")
@property
@pulumi.getter(name="monitoringConfiguration")
def monitoring_configuration(self) -> Optional[str]:
return pulumi.get(self, "monitoring_configuration")
@property
@pulumi.getter
def name(self) -> str:
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> Optional[str]:
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="resourceLocation")
def resource_location(self) -> Optional[str]:
return pulumi.get(self, "resource_location")
@property
@pulumi.getter(name="serverCertificate")
def server_certificate(self) -> Optional[str]:
return pulumi.get(self, "server_certificate")
@property
@pulumi.getter(name="serverId")
def server_id(self) -> Optional[str]:
return pulumi.get(self, "server_id")
@property
@pulumi.getter(name="serverManagementtErrorCode")
def server_managementt_error_code(self) -> Optional[int]:
return pulumi.get(self, "server_managementt_error_code")
@property
@pulumi.getter(name="serverOSVersion")
def server_os_version(self) -> Optional[str]:
return pulumi.get(self, "server_os_version")
@property
@pulumi.getter(name="serverRole")
def server_role(self) -> Optional[str]:
return pulumi.get(self, "server_role")
@property
@pulumi.getter(name="serviceLocation")
def service_location(self) -> Optional[str]:
return pulumi.get(self, "service_location")
@property
@pulumi.getter(name="storageSyncServiceUid")
def storage_sync_service_uid(self) -> Optional[str]:
return pulumi.get(self, "storage_sync_service_uid")
@property
@pulumi.getter
def type(self) -> str:
return pulumi.get(self, "type")
class AwaitableGetRegisteredServerResult(GetRegisteredServerResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetRegisteredServerResult(
agent_version=self.agent_version,
cluster_id=self.cluster_id,
cluster_name=self.cluster_name,
discovery_endpoint_uri=self.discovery_endpoint_uri,
friendly_name=self.friendly_name,
id=self.id,
last_heart_beat=self.last_heart_beat,
last_operation_name=self.last_operation_name,
last_workflow_id=self.last_workflow_id,
management_endpoint_uri=self.management_endpoint_uri,
monitoring_configuration=self.monitoring_configuration,
name=self.name,
provisioning_state=self.provisioning_state,
resource_location=self.resource_location,
server_certificate=self.server_certificate,
server_id=self.server_id,
server_managementt_error_code=self.server_managementt_error_code,
server_os_version=self.server_os_version,
server_role=self.server_role,
service_location=self.service_location,
storage_sync_service_uid=self.storage_sync_service_uid,
type=self.type)
def get_registered_server(resource_group_name: Optional[str] = None,
server_id: Optional[str] = None,
storage_sync_service_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetRegisteredServerResult:
__args__ = dict()
__args__['resourceGroupName'] = resource_group_name
__args__['serverId'] = server_id
__args__['storageSyncServiceName'] = storage_sync_service_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:storagesync/v20180701:getRegisteredServer', __args__, opts=opts, typ=GetRegisteredServerResult).value
return AwaitableGetRegisteredServerResult(
agent_version=__ret__.agent_version,
cluster_id=__ret__.cluster_id,
cluster_name=__ret__.cluster_name,
discovery_endpoint_uri=__ret__.discovery_endpoint_uri,
friendly_name=__ret__.friendly_name,
id=__ret__.id,
last_heart_beat=__ret__.last_heart_beat,
last_operation_name=__ret__.last_operation_name,
last_workflow_id=__ret__.last_workflow_id,
management_endpoint_uri=__ret__.management_endpoint_uri,
monitoring_configuration=__ret__.monitoring_configuration,
name=__ret__.name,
provisioning_state=__ret__.provisioning_state,
resource_location=__ret__.resource_location,
server_certificate=__ret__.server_certificate,
server_id=__ret__.server_id,
server_managementt_error_code=__ret__.server_managementt_error_code,
server_os_version=__ret__.server_os_version,
server_role=__ret__.server_role,
service_location=__ret__.service_location,
storage_sync_service_uid=__ret__.storage_sync_service_uid,
type=__ret__.type)
| true | true |
f7200af076a3e5be760cbc8be2e5197296b523de | 428 | py | Python | exams/forms.py | WillOnGit/exam-timetable | fec170025c39144299d61ea323eed3a000b61cf9 | [
"MIT"
] | null | null | null | exams/forms.py | WillOnGit/exam-timetable | fec170025c39144299d61ea323eed3a000b61cf9 | [
"MIT"
] | null | null | null | exams/forms.py | WillOnGit/exam-timetable | fec170025c39144299d61ea323eed3a000b61cf9 | [
"MIT"
] | null | null | null | from .models import ExamVenue
from django.forms import ModelForm
class RestrictedResponseForm(ModelForm):
def __init__(self, *args, **kwargs):
super(RestrictedResponseForm, self).__init__(*args,**kwargs)
try:
self.fields['assigned_venue'].queryset = ExamVenue.objects.filter(exam=self.instance.exam)
except:
self.fields['assigned_venue'].queryset = ExamVenue.objects.none()
| 35.666667 | 102 | 0.700935 | from .models import ExamVenue
from django.forms import ModelForm
class RestrictedResponseForm(ModelForm):
def __init__(self, *args, **kwargs):
super(RestrictedResponseForm, self).__init__(*args,**kwargs)
try:
self.fields['assigned_venue'].queryset = ExamVenue.objects.filter(exam=self.instance.exam)
except:
self.fields['assigned_venue'].queryset = ExamVenue.objects.none()
| true | true |
f7200b2aa450884bed39014c6f0f6fc44dd2a5aa | 4,253 | py | Python | src/Infraestructura/ccutils/databases/configuration.py | lbarriosh/cygnus-cloud | 1a17fbb55de69adba2ec42db4c9a063865af4fbd | [
"Apache-2.0"
] | 3 | 2017-09-03T22:01:35.000Z | 2019-01-10T05:40:44.000Z | src/web/CygnusCloud/modules/ccutils/databases/configuration.py | lbarriosh/cygnus-cloud | 1a17fbb55de69adba2ec42db4c9a063865af4fbd | [
"Apache-2.0"
] | null | null | null | src/web/CygnusCloud/modules/ccutils/databases/configuration.py | lbarriosh/cygnus-cloud | 1a17fbb55de69adba2ec42db4c9a063865af4fbd | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf8 -*-
'''
========================================================================
CygnusCloud
========================================================================
File: configuration.py
Version: 3.0
Description: Database configurator definitions
Copyright 2012-13 Luis Barrios Hernández, Adrián Fernández Hernández,
Samuel Guayerbas Martín
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import MySQLdb
import os.path
from ccutils.processes.childProcessManager import ChildProcessManager
class DBConfigurator(object):
"""
This class provides methods to configure databases.
"""
def __init__(self, rootPassword):
'''
Initializes the configurator's state
Args:
rootPassword: root's password
'''
self.__rootPassword = rootPassword
def addUser(self, user, password, databaseName, allPrivileges=True):
'''
Adds a new MySQL user
Args:
user: the new user's name
password: the new user's password
databaseName: the database's name
allPrivileges: if True, the new user will be able to do everything
with the database. If False, the new user will only be able to execute queries
in the database.
Returns:
Nothing
'''
conn = MySQLdb.Connection(host="localhost", user="root", passwd=self.__rootPassword)
cursor = conn.cursor()
if (allPrivileges):
privileges = "ALL"
else :
privileges = "SELECT"
cursor.execute("GRANT " + privileges + " ON " + databaseName + ".* TO '" + user + "'@'" + "localhost" + "' IDENTIFIED BY '" + password + "';")
cursor.close()
conn.close()
def createDatabase(self, databaseName):
'''
Creates a MySQL database
Args:
databaseName: the new database's name
Returns:
Nothing
'''
db = MySQLdb.connect(host='localhost',user="root",passwd=self.__rootPassword)
command = "CREATE DATABASE " + databaseName + ";"
cursor = db.cursor()
cursor.execute(command)
cursor.close()
db.close()
def dropDatabase(self, databaseName):
'''
Deletes a MySQL database
Args:
databaseName: the database to delete's name
Returns:
Nothing
'''
db = MySQLdb.connect(host='localhost',user="root",passwd=self.__rootPassword)
command = "DROP DATABASE " + databaseName + ";"
cursor = db.cursor()
cursor.execute(command)
cursor.close()
db.close()
def runSQLScript(self, database, sqlFilePath, username="root", password=None):
'''
Runs a SQL script
Args:
databaseName: the MySQL database to use
sqlFilePath: the SQL script path
username: a MySQL user name.
password: the user's password
'''
passwordCommand = ""
if (password != None and len(password) != 0) :
passwordCommand = "-p" + str(password)
filePath = os.path.abspath(sqlFilePath)
command = "mysql -uroot {0} -e \"source {1}\"".format(passwordCommand, filePath)
ChildProcessManager.runCommandInForeground(command, Exception)
@staticmethod
def __isEmpty__(string):
for c in string :
if (c != ' ' and c != '\n' and c != '\r' and c != 't') :
return False
return True | 36.350427 | 151 | 0.556078 |
import MySQLdb
import os.path
from ccutils.processes.childProcessManager import ChildProcessManager
class DBConfigurator(object):
def __init__(self, rootPassword):
self.__rootPassword = rootPassword
def addUser(self, user, password, databaseName, allPrivileges=True):
conn = MySQLdb.Connection(host="localhost", user="root", passwd=self.__rootPassword)
cursor = conn.cursor()
if (allPrivileges):
privileges = "ALL"
else :
privileges = "SELECT"
cursor.execute("GRANT " + privileges + " ON " + databaseName + ".* TO '" + user + "'@'" + "localhost" + "' IDENTIFIED BY '" + password + "';")
cursor.close()
conn.close()
def createDatabase(self, databaseName):
db = MySQLdb.connect(host='localhost',user="root",passwd=self.__rootPassword)
command = "CREATE DATABASE " + databaseName + ";"
cursor = db.cursor()
cursor.execute(command)
cursor.close()
db.close()
def dropDatabase(self, databaseName):
db = MySQLdb.connect(host='localhost',user="root",passwd=self.__rootPassword)
command = "DROP DATABASE " + databaseName + ";"
cursor = db.cursor()
cursor.execute(command)
cursor.close()
db.close()
def runSQLScript(self, database, sqlFilePath, username="root", password=None):
passwordCommand = ""
if (password != None and len(password) != 0) :
passwordCommand = "-p" + str(password)
filePath = os.path.abspath(sqlFilePath)
command = "mysql -uroot {0} -e \"source {1}\"".format(passwordCommand, filePath)
ChildProcessManager.runCommandInForeground(command, Exception)
@staticmethod
def __isEmpty__(string):
for c in string :
if (c != ' ' and c != '\n' and c != '\r' and c != 't') :
return False
return True | true | true |
f7200b3bdc7b35e8600608ed9b126abac2ec17a1 | 557 | py | Python | shop/migrations/0024_auto_20200311_0154.py | manson800819/test | 6df7d92eababe76a54585cb8102a00a6d79ca467 | [
"MIT"
] | null | null | null | shop/migrations/0024_auto_20200311_0154.py | manson800819/test | 6df7d92eababe76a54585cb8102a00a6d79ca467 | [
"MIT"
] | null | null | null | shop/migrations/0024_auto_20200311_0154.py | manson800819/test | 6df7d92eababe76a54585cb8102a00a6d79ca467 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.11.29 on 2020-03-11 01:54
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('shop', '0023_auto_20200311_0137'),
]
operations = [
migrations.AlterField(
model_name='product',
name='type1',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='products_t', to='shop.Type1'),
),
]
| 25.318182 | 125 | 0.653501 |
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('shop', '0023_auto_20200311_0137'),
]
operations = [
migrations.AlterField(
model_name='product',
name='type1',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='products_t', to='shop.Type1'),
),
]
| true | true |
f7200b4756a033cb419019d6f292992490dafe65 | 28,992 | py | Python | geoopt/manifolds/base.py | grapefroot/geoopt | 8f219a820e24b87ac68136ff66af11b25d5c04c5 | [
"Apache-2.0"
] | 4 | 2020-01-27T15:37:19.000Z | 2020-12-06T02:51:03.000Z | geoopt/manifolds/base.py | grapefroot/geoopt | 8f219a820e24b87ac68136ff66af11b25d5c04c5 | [
"Apache-2.0"
] | null | null | null | geoopt/manifolds/base.py | grapefroot/geoopt | 8f219a820e24b87ac68136ff66af11b25d5c04c5 | [
"Apache-2.0"
] | 1 | 2021-05-07T22:01:41.000Z | 2021-05-07T22:01:41.000Z | import abc
import torch.nn
import itertools
from typing import Optional, Tuple, Union
__all__ = ["Manifold", "ScalingInfo"]
class ScalingInfo(object):
"""
Scaling info for each argument that requires rescaling.
.. code:: python
scaled_value = value * scaling ** power if power != 0 else value
For results it is not always required to set powers of scaling, then it is no-op.
The convention for this info is the following. The output of a function is either a tuple or a single object.
In any case, outputs are treated as positionals. Function inputs, in contrast, are treated by keywords.
It is a common practice to maintain function signature when overriding, so this way may be considered
as a sufficient in this particular scenario. The only required info for formula above is ``power``.
"""
# marks method to be not working with Scaled wrapper
NotCompatible = object()
__slots__ = ["kwargs", "results"]
def __init__(self, *results: float, **kwargs: float):
self.results = results
self.kwargs = kwargs
class ScalingStorage(dict):
"""
Helper class to make implementation transparent.
This is just a dictionary with additional overriden ``__call__``
for more explicit and elegant API to declare members. A usage example may be found in :class:`Manifold`.
Methods that require rescaling when wrapped into :class:`Scaled` should be defined as follows
1. Regular methods like ``dist``, ``dist2``, ``expmap``, ``retr`` etc. that are already present in the base class
do not require registration, it has already happened in the base :class:`Manifold` class.
2. New methods (like in :class:`PoincareBall`) should be treated with care.
.. code-block:: python
class PoincareBall(Manifold):
# make a class copy of __scaling__ info. Default methods are already present there
__scaling__ = Manifold.__scaling__.copy()
... # here come regular implementation of the required methods
@__scaling__(ScalingInfo(1)) # rescale output according to rule `out * scaling ** 1`
def dist0(self, x: torch.Tensor, *, dim=-1, keepdim=False):
return math.dist0(x, c=self.c, dim=dim, keepdim=keepdim)
@__scaling__(ScalingInfo(u=-1)) # rescale argument `u` according to the rule `out * scaling ** -1`
def expmap0(self, u: torch.Tensor, *, dim=-1, project=True):
res = math.expmap0(u, c=self.c, dim=dim)
if project:
return math.project(res, c=self.c, dim=dim)
else:
return res
... # other special methods implementation
3. Some methods are not compliant with the above rescaling rules. We should mark them as `NotCompatible`
.. code-block:: python
# continuation of the PoincareBall definition
@__scaling__(ScalingInfo.NotCompatible)
def mobius_fn_apply(
self, fn: callable, x: torch.Tensor, *args, dim=-1, project=True, **kwargs
):
res = math.mobius_fn_apply(fn, x, *args, c=self.c, dim=dim, **kwargs)
if project:
return math.project(res, c=self.c, dim=dim)
else:
return res
"""
def __call__(self, scaling_info: ScalingInfo):
def register(fn):
self[fn.__name__] = scaling_info
return fn
return register
def copy(self):
return self.__class__(self)
class Manifold(torch.nn.Module, metaclass=abc.ABCMeta):
__scaling__ = ScalingStorage() # will be filled along with implementation below
name = None
ndim = None
reversible = None
forward = NotImplemented
def __init__(self, **kwargs):
super().__init__()
@property
def device(self) -> Optional[torch.device]:
"""
Manifold device.
Returns
-------
Optional[torch.device]
"""
p = next(itertools.chain(self.buffers(), self.parameters()), None)
if p is not None:
return p.device
else:
return None
@property
def dtype(self) -> Optional[torch.dtype]:
"""
Manifold dtype.
Returns
-------
Optional[torch.dtype]
"""
p = next(itertools.chain(self.buffers(), self.parameters()), None)
if p is not None:
return p.dtype
else:
return None
def check_point(
self, x: torch.Tensor, *, explain=False
) -> Union[Tuple[bool, Optional[str]], bool]:
"""
Check if point is valid to be used with the manifold.
Parameters
----------
x : torch.Tensor
point on the manifold
explain: bool
return an additional information on check
Returns
-------
bool
boolean indicating if tensor is valid and reason of failure if False
Notes
-----
This check is compatible to what optimizer expects, last dimensions are treated as manifold dimensions
"""
ok, reason = self._check_shape(x.shape, "x")
if explain:
return ok, reason
else:
return ok
def assert_check_point(self, x: torch.Tensor):
"""
Check if point is valid to be used with the manifold and raise an error with informative message on failure.
Parameters
----------
x : torch.Tensor
point on the manifold
Notes
-----
This check is compatible to what optimizer expects, last dimensions are treated as manifold dimensions
"""
ok, reason = self._check_shape(x.shape, "x")
if not ok:
raise ValueError(
"`x` seems to be not valid "
"tensor for {} manifold.\nerror: {}".format(self.name, reason)
)
def check_vector(self, u: torch.Tensor, *, explain=False):
"""
Check if vector is valid to be used with the manifold.
Parameters
----------
u : torch.Tensor
vector on the tangent plane
explain: bool
return an additional information on check
Returns
-------
bool
boolean indicating if tensor is valid and reason of failure if False
Notes
-----
This check is compatible to what optimizer expects, last dimensions are treated as manifold dimensions
"""
ok, reason = self._check_shape(u.shape, "u")
if explain:
return ok, reason
else:
return ok
def assert_check_vector(self, u: torch.Tensor):
"""
Check if vector is valid to be used with the manifold and raise an error with informative message on failure.
Parameters
----------
u : torch.Tensor
vector on the tangent plane
Notes
-----
This check is compatible to what optimizer expects, last dimensions are treated as manifold dimensions
"""
ok, reason = self._check_shape(u.shape, "u")
if not ok:
raise ValueError(
"`u` seems to be not valid "
"tensor for {} manifold.\nerror: {}".format(self.name, reason)
)
def check_point_on_manifold(
self, x: torch.Tensor, *, explain=False, atol=1e-5, rtol=1e-5
) -> Union[Tuple[bool, Optional[str]], bool]:
"""
Check if point :math:`x` is lying on the manifold.
Parameters
----------
x : torch.Tensor
point on the manifold
atol: float
absolute tolerance as in :func:`numpy.allclose`
rtol: float
relative tolerance as in :func:`numpy.allclose`
explain: bool
return an additional information on check
Returns
-------
bool
boolean indicating if tensor is valid and reason of failure if False
Notes
-----
This check is compatible to what optimizer expects, last dimensions are treated as manifold dimensions
"""
ok, reason = self._check_shape(x.shape, "x")
if ok:
ok, reason = self._check_point_on_manifold(x, atol=atol, rtol=rtol)
if explain:
return ok, reason
else:
return ok
def assert_check_point_on_manifold(self, x: torch.Tensor, *, atol=1e-5, rtol=1e-5):
"""
Check if point :math`x` is lying on the manifold and raise an error with informative message on failure.
Parameters
----------
x : torch.Tensor
point on the manifold
atol: float
absolute tolerance as in :func:`numpy.allclose`
rtol: float
relative tolerance as in :func:`numpy.allclose`
"""
self.assert_check_point(x)
ok, reason = self._check_point_on_manifold(x, atol=atol, rtol=rtol)
if not ok:
raise ValueError(
"`x` seems to be a tensor "
"not lying on {} manifold.\nerror: {}".format(self.name, reason)
)
def check_vector_on_tangent(
self,
x: torch.Tensor,
u: torch.Tensor,
*,
ok_point=False,
explain=False,
atol=1e-5,
rtol=1e-5
) -> Union[Tuple[bool, Optional[str]], bool]:
"""
Check if :math:`u` is lying on the tangent space to x.
Parameters
----------
x : torch.Tensor
point on the manifold
u : torch.Tensor
vector on the tangent space to :math:`x`
atol: float
absolute tolerance as in :func:`numpy.allclose`
rtol: float
relative tolerance as in :func:`numpy.allclose`
explain: bool
return an additional information on check
ok_point: bool
is a check for point required?
Returns
-------
bool
boolean indicating if tensor is valid and reason of failure if False
"""
if not ok_point:
ok, reason = self._check_shape(x.shape, "x")
if ok:
ok, reason = self._check_shape(u.shape, "u")
if ok:
ok, reason = self._check_point_on_manifold(x, atol=atol, rtol=rtol)
else:
ok = True
reason = None
if ok:
ok, reason = self._check_vector_on_tangent(x, u, atol=atol, rtol=rtol)
if explain:
return ok, reason
else:
return ok
def assert_check_vector_on_tangent(
self, x: torch.Tensor, u: torch.Tensor, *, ok_point=False, atol=1e-5, rtol=1e-5
):
"""
Check if u :math:`u` is lying on the tangent space to x and raise an error on fail.
Parameters
----------
x : torch.Tensor
point on the manifold
u : torch.Tensor
vector on the tangent space to :math:`x`
atol: float
absolute tolerance as in :func:`numpy.allclose`
rtol: float
relative tolerance as in :func:`numpy.allclose`
ok_point: bool
is a check for point required?
"""
if not ok_point:
ok, reason = self._check_shape(x.shape, "x")
if ok:
ok, reason = self._check_shape(u.shape, "u")
if ok:
ok, reason = self._check_point_on_manifold(x, atol=atol, rtol=rtol)
else:
ok = True
reason = None
if ok:
ok, reason = self._check_vector_on_tangent(x, u, atol=atol, rtol=rtol)
if not ok:
raise ValueError(
"`u` seems to be a tensor "
"not lying on tangent space to `x` for {} manifold.\nerror: {}".format(
self.name, reason
)
)
@__scaling__(ScalingInfo(1))
def dist(self, x: torch.Tensor, y: torch.Tensor, *, keepdim=False) -> torch.Tensor:
"""
Compute distance between 2 points on the manifold that is the shortest path along geodesics.
Parameters
----------
x : torch.Tensor
point on the manifold
y : torch.Tensor
point on the manifold
keepdim : bool
keep the last dim?
Returns
-------
torch.Tensor
distance between two points
"""
raise NotImplementedError
@__scaling__(ScalingInfo(2))
def dist2(self, x: torch.Tensor, y: torch.Tensor, *, keepdim=False) -> torch.Tensor:
"""
Compute squared distance between 2 points on the manifold that is the shortest path along geodesics.
Parameters
----------
x : torch.Tensor
point on the manifold
y : torch.Tensor
point on the manifold
keepdim : bool
keep the last dim?
Returns
-------
torch.Tensor
squared distance between two points
"""
return self.dist(x, y, keepdim=keepdim) ** 2
@abc.abstractmethod
@__scaling__(ScalingInfo(u=-1))
def retr(self, x: torch.Tensor, u: torch.Tensor) -> torch.Tensor:
"""
Perform a retraction from point :math:`x` with given direction :math:`u`.
Parameters
----------
x : torch.Tensor
point on the manifold
u : torch.Tensor
tangent vector at point :math:`x`
Returns
-------
torch.Tensor
transported point
"""
raise NotImplementedError
@abc.abstractmethod
@__scaling__(ScalingInfo(u=-1))
def expmap(self, x: torch.Tensor, u: torch.Tensor) -> torch.Tensor:
r"""
Perform an exponential map :math:`\operatorname{Exp}_x(u)`.
Parameters
----------
x : torch.Tensor
point on the manifold
u : torch.Tensor
tangent vector at point :math:`x`
Returns
-------
torch.Tensor
transported point
"""
raise NotImplementedError
@__scaling__(ScalingInfo(1))
def logmap(self, x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
r"""
Perform an logarithmic map :math:`\operatorname{Log}_{x}(y)`.
Parameters
----------
x : torch.Tensor
point on the manifold
y : torch.Tensor
point on the manifold
Returns
-------
torch.Tensor
tangent vector
"""
raise NotImplementedError
@__scaling__(ScalingInfo(u=-1))
def expmap_transp(
self, x: torch.Tensor, u: torch.Tensor, v: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Perform an exponential map and vector transport from point :math:`x` with given direction :math:`u`.
Parameters
----------
x : torch.Tensor
point on the manifold
u : torch.Tensor
tangent vector at point :math:`x`
v : torch.Tensor
tangent vector at point :math:`x` to be transported
Returns
-------
torch.Tensor
transported point
"""
y = self.expmap(x, u)
v_transp = self.transp(x, y, v)
return y, v_transp
@__scaling__(ScalingInfo(u=-1))
def retr_transp(
self, x: torch.Tensor, u: torch.Tensor, v: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Perform a retraction + vector transport at once.
Parameters
----------
x : torch.Tensor
point on the manifold
u : torch.Tensor
tangent vector at point :math:`x`
v : torch.Tensor
tangent vector at point :math:`x` to be transported
Returns
-------
Tuple[torch.Tensor, torch.Tensor]
transported point and vectors
Notes
-----
Sometimes this is a far more optimal way to preform retraction + vector transport
"""
y = self.retr(x, u)
v_transp = self.transp(x, y, v)
return y, v_transp
@__scaling__(ScalingInfo(u=-1))
def transp_follow_retr(
self, x: torch.Tensor, u: torch.Tensor, v: torch.Tensor
) -> torch.Tensor:
r"""
Perform vector transport following :math:`u`: :math:`\mathfrak{T}_{x\to\operatorname{retr}(x, u)}(v)`.
This operation is sometimes is much more simpler and can be optimized.
Parameters
----------
x : torch.Tensor
point on the manifold
u : torch.Tensor
tangent vector at point :math:`x`
v : torch.Tensor
tangent vector at point :math:`x` to be transported
Returns
-------
torch.Tensor
transported tensor
"""
y = self.retr(x, u)
return self.transp(x, y, v)
@__scaling__(ScalingInfo(u=-1))
def transp_follow_expmap(
self, x: torch.Tensor, u: torch.Tensor, v: torch.Tensor
) -> torch.Tensor:
r"""
Perform vector transport following :math:`u`: :math:`\mathfrak{T}_{x\to\operatorname{Exp}(x, u)}(v)`.
Here, :math:`\operatorname{Exp}` is the best possible approximation of the true exponential map.
There are cases when the exact variant is hard or impossible implement, therefore a
fallback, non-exact, implementation is used.
Parameters
----------
x : torch.Tensor
point on the manifold
u : torch.Tensor
tangent vector at point :math:`x`
v : torch.Tensor
tangent vector at point :math:`x` to be transported
Returns
-------
torch.Tensor
transported tensor
"""
y = self.expmap(x, u)
return self.transp(x, y, v)
def transp(self, x: torch.Tensor, y: torch.Tensor, v: torch.Tensor) -> torch.Tensor:
r"""
Perform vector transport :math:`\mathfrak{T}_{x\to y}(v)`.
Parameters
----------
x : torch.Tensor
start point on the manifold
y : torch.Tensor
target point on the manifold
v : torch.Tensor
tangent vector at point :math:`x`
Returns
-------
torch.Tensor
transported tensor
"""
raise NotImplementedError
@abc.abstractmethod
def inner(
self, x: torch.Tensor, u: torch.Tensor, v=None, *, keepdim=False
) -> torch.Tensor:
"""
Inner product for tangent vectors at point :math:`x`.
Parameters
----------
x : torch.Tensor
point on the manifold
u : torch.Tensor
tangent vector at point :math:`x`
v : Optional[torch.Tensor]
tangent vector at point :math:`x`
keepdim : bool
keep the last dim?
Returns
-------
torch.Tensor
inner product (broadcasted)
"""
raise NotImplementedError
def component_inner(
self, x: torch.Tensor, u: torch.Tensor, v: torch.Tensor = None
) -> torch.Tensor:
"""
Inner product for tangent vectors at point :math:`x` according to components of the manifold.
The result of the function is same as ``inner`` with ``keepdim=True`` for
all the manifolds except ProductManifold. For this manifold it acts different way
computing inner product for each component and then building an output correctly
tiling and reshaping the result.
Parameters
----------
x : torch.Tensor
point on the manifold
u : torch.Tensor
tangent vector at point :math:`x`
v : Optional[torch.Tensor]
tangent vector at point :math:`x`
Returns
-------
torch.Tensor
inner product component wise (broadcasted)
Notes
-----
The purpose of this method is better adaptive properties in optimization since ProductManifold
will "hide" the structure in public API.
"""
return self.inner(x, u, v, keepdim=True)
def norm(self, x: torch.Tensor, u: torch.Tensor, *, keepdim=False) -> torch.Tensor:
"""
Norm of a tangent vector at point :math:`x`.
Parameters
----------
x : torch.Tensor
point on the manifold
u : torch.Tensor
tangent vector at point :math:`x`
keepdim : bool
keep the last dim?
Returns
-------
torch.Tensor
inner product (broadcasted)
"""
return self.inner(x, u, keepdim=keepdim) ** 0.5
@abc.abstractmethod
def proju(self, x: torch.Tensor, u: torch.Tensor) -> torch.Tensor:
"""
Project vector :math:`u` on a tangent space for :math:`x`, usually is the same as :meth:`egrad2rgrad`.
Parameters
----------
x torch.Tensor
point on the manifold
u torch.Tensor
vector to be projected
Returns
-------
torch.Tensor
projected vector
"""
raise NotImplementedError
@abc.abstractmethod
def egrad2rgrad(self, x: torch.Tensor, u: torch.Tensor) -> torch.Tensor:
"""
Transform gradient computed using autodiff to the correct Riemannian gradient for the point :math:`x`.
Parameters
----------
x torch.Tensor
point on the manifold
u torch.Tensor
gradient to be projected
Returns
-------
torch.Tensor
grad vector in the Riemannian manifold
"""
raise NotImplementedError
@abc.abstractmethod
def projx(self, x: torch.Tensor) -> torch.Tensor:
"""
Project point :math:`x` on the manifold.
Parameters
----------
x torch.Tensor
point to be projected
Returns
-------
torch.Tensor
projected point
"""
raise NotImplementedError
def _check_shape(
self, shape: Tuple[int], name: str
) -> Union[Tuple[bool, Optional[str]], bool]:
"""
Util to check shape.
Exhaustive implementation for checking if
a given point has valid dimension size,
shape, etc. It should return boolean and
a reason of failure if check is not passed
Parameters
----------
shape : Tuple[int]
shape of point on the manifold
name : str
name to be present in errors
Returns
-------
bool, str or None
check result and the reason of fail if any
"""
ok = len(shape) >= self.ndim
if not ok:
reason = "'{}' on the {} requires more than {} dim".format(
name, self, self.ndim
)
else:
reason = None
return ok, reason
def _assert_check_shape(self, shape: Tuple[int], name: str):
"""
Util to check shape and raise an error if needed.
Exhaustive implementation for checking if
a given point has valid dimension size,
shape, etc. It will raise a ValueError if check is not passed
Parameters
----------
shape : tuple
shape of point on the manifold
name : str
name to be present in errors
Raises
------
ValueError
"""
ok, reason = self._check_shape(shape, name)
if not ok:
raise ValueError(reason)
@abc.abstractmethod
def _check_point_on_manifold(
self, x: torch.Tensor, *, atol=1e-5, rtol=1e-5
) -> Union[Tuple[bool, Optional[str]], bool]:
"""
Util to check point lies on the manifold.
Exhaustive implementation for checking if
a given point lies on the manifold. It
should return boolean and a reason of
failure if check is not passed. You can
assume assert_check_point is already
passed beforehand
Parameters
----------
x torch.Tensor
point on the manifold
atol: float
absolute tolerance as in :func:`numpy.allclose`
rtol: float
relative tolerance as in :func:`numpy.allclose`
Returns
-------
bool, str or None
check result and the reason of fail if any
"""
# return True, None
raise NotImplementedError
@abc.abstractmethod
def _check_vector_on_tangent(
self, x: torch.Tensor, u: torch.Tensor, *, atol=1e-5, rtol=1e-5
) -> Union[Tuple[bool, Optional[str]], bool]:
"""
Util to check a vector belongs to the tangent space of a point.
Exhaustive implementation for checking if
a given point lies in the tangent space at x
of the manifold. It should return a boolean
indicating whether the test was passed
and a reason of failure if check is not passed.
You can assume assert_check_point is already
passed beforehand
Parameters
----------
x torch.Tensor
u torch.Tensor
atol : float
absolute tolerance
rtol :
relative tolerance
Returns
-------
bool, str or None
check result and the reason of fail if any
"""
# return True, None
raise NotImplementedError
def extra_repr(self):
return ""
def __repr__(self):
extra = self.extra_repr()
if extra:
return self.name + "({}) manifold".format(extra)
else:
return self.name + " manifold"
def unpack_tensor(self, tensor: torch.Tensor) -> torch.Tensor:
"""
Construct a point on the manifold.
This method should help to work with product and compound manifolds.
Internally all points on the manifold are stored in an intuitive format.
However, there might be cases, when this representation is simpler or more efficient to store in
a different way that is hard to use in practice.
Parameters
----------
tensor : torch.Tensor
Returns
-------
torch.Tensor
"""
return tensor
def pack_point(self, *tensors: torch.Tensor) -> torch.Tensor:
"""
Construct a tensor representation of a manifold point.
In case of regular manifolds this will return the same tensor. However, for e.g. Product manifold
this function will pack all non-batch dimensions.
Parameters
----------
tensors : Tuple[torch.Tensor]
Returns
-------
torch.Tensor
"""
if len(tensors) != 1:
raise ValueError("1 tensor expected, got {}".format(len(tensors)))
return tensors[0]
def random(self, *size, dtype=None, device=None, **kwargs) -> torch.Tensor:
"""
Random sampling on the manifold.
The exact implementation depends on manifold and usually does not follow all
assumptions about uniform measure, etc.
"""
raise NotImplementedError
def origin(
self,
*size: Union[int, Tuple[int]],
dtype=None,
device=None,
seed: Optional[int] = 42
) -> torch.Tensor:
"""
Create some reasonable point on the manifold in a deterministic way.
For some manifolds there may exist e.g. zero vector or some analogy.
In case it is possible to define this special point, this point is returned with the desired size.
In other case, the returned point is sampled on the manifold in a deterministic way.
Parameters
----------
size : Union[int, Tuple[int]]
the desired shape
device : torch.device
the desired device
dtype : torch.dtype
the desired dtype
seed : Optional[int]
A parameter controlling deterministic randomness for manifolds that do not provide ``.origin``,
but provide ``.random``. (default: 42)
Returns
-------
torch.Tensor
"""
if seed is not None:
# we promise pseudorandom behaviour but do not want to modify global seed
state = torch.random.get_rng_state()
torch.random.manual_seed(seed)
try:
return self.random(*size, dtype=dtype, device=device)
finally:
torch.random.set_rng_state(state)
else:
return self.random(*size, dtype=dtype, device=device)
| 30.485804 | 117 | 0.559051 | import abc
import torch.nn
import itertools
from typing import Optional, Tuple, Union
__all__ = ["Manifold", "ScalingInfo"]
class ScalingInfo(object):
NotCompatible = object()
__slots__ = ["kwargs", "results"]
def __init__(self, *results: float, **kwargs: float):
self.results = results
self.kwargs = kwargs
class ScalingStorage(dict):
def __call__(self, scaling_info: ScalingInfo):
def register(fn):
self[fn.__name__] = scaling_info
return fn
return register
def copy(self):
return self.__class__(self)
class Manifold(torch.nn.Module, metaclass=abc.ABCMeta):
__scaling__ = ScalingStorage()
name = None
ndim = None
reversible = None
forward = NotImplemented
def __init__(self, **kwargs):
super().__init__()
@property
def device(self) -> Optional[torch.device]:
p = next(itertools.chain(self.buffers(), self.parameters()), None)
if p is not None:
return p.device
else:
return None
@property
def dtype(self) -> Optional[torch.dtype]:
p = next(itertools.chain(self.buffers(), self.parameters()), None)
if p is not None:
return p.dtype
else:
return None
def check_point(
self, x: torch.Tensor, *, explain=False
) -> Union[Tuple[bool, Optional[str]], bool]:
ok, reason = self._check_shape(x.shape, "x")
if explain:
return ok, reason
else:
return ok
def assert_check_point(self, x: torch.Tensor):
ok, reason = self._check_shape(x.shape, "x")
if not ok:
raise ValueError(
"`x` seems to be not valid "
"tensor for {} manifold.\nerror: {}".format(self.name, reason)
)
def check_vector(self, u: torch.Tensor, *, explain=False):
ok, reason = self._check_shape(u.shape, "u")
if explain:
return ok, reason
else:
return ok
def assert_check_vector(self, u: torch.Tensor):
ok, reason = self._check_shape(u.shape, "u")
if not ok:
raise ValueError(
"`u` seems to be not valid "
"tensor for {} manifold.\nerror: {}".format(self.name, reason)
)
def check_point_on_manifold(
self, x: torch.Tensor, *, explain=False, atol=1e-5, rtol=1e-5
) -> Union[Tuple[bool, Optional[str]], bool]:
ok, reason = self._check_shape(x.shape, "x")
if ok:
ok, reason = self._check_point_on_manifold(x, atol=atol, rtol=rtol)
if explain:
return ok, reason
else:
return ok
def assert_check_point_on_manifold(self, x: torch.Tensor, *, atol=1e-5, rtol=1e-5):
self.assert_check_point(x)
ok, reason = self._check_point_on_manifold(x, atol=atol, rtol=rtol)
if not ok:
raise ValueError(
"`x` seems to be a tensor "
"not lying on {} manifold.\nerror: {}".format(self.name, reason)
)
def check_vector_on_tangent(
self,
x: torch.Tensor,
u: torch.Tensor,
*,
ok_point=False,
explain=False,
atol=1e-5,
rtol=1e-5
) -> Union[Tuple[bool, Optional[str]], bool]:
if not ok_point:
ok, reason = self._check_shape(x.shape, "x")
if ok:
ok, reason = self._check_shape(u.shape, "u")
if ok:
ok, reason = self._check_point_on_manifold(x, atol=atol, rtol=rtol)
else:
ok = True
reason = None
if ok:
ok, reason = self._check_vector_on_tangent(x, u, atol=atol, rtol=rtol)
if explain:
return ok, reason
else:
return ok
def assert_check_vector_on_tangent(
self, x: torch.Tensor, u: torch.Tensor, *, ok_point=False, atol=1e-5, rtol=1e-5
):
if not ok_point:
ok, reason = self._check_shape(x.shape, "x")
if ok:
ok, reason = self._check_shape(u.shape, "u")
if ok:
ok, reason = self._check_point_on_manifold(x, atol=atol, rtol=rtol)
else:
ok = True
reason = None
if ok:
ok, reason = self._check_vector_on_tangent(x, u, atol=atol, rtol=rtol)
if not ok:
raise ValueError(
"`u` seems to be a tensor "
"not lying on tangent space to `x` for {} manifold.\nerror: {}".format(
self.name, reason
)
)
@__scaling__(ScalingInfo(1))
def dist(self, x: torch.Tensor, y: torch.Tensor, *, keepdim=False) -> torch.Tensor:
raise NotImplementedError
@__scaling__(ScalingInfo(2))
def dist2(self, x: torch.Tensor, y: torch.Tensor, *, keepdim=False) -> torch.Tensor:
return self.dist(x, y, keepdim=keepdim) ** 2
@abc.abstractmethod
@__scaling__(ScalingInfo(u=-1))
def retr(self, x: torch.Tensor, u: torch.Tensor) -> torch.Tensor:
raise NotImplementedError
@abc.abstractmethod
@__scaling__(ScalingInfo(u=-1))
def expmap(self, x: torch.Tensor, u: torch.Tensor) -> torch.Tensor:
raise NotImplementedError
@__scaling__(ScalingInfo(1))
def logmap(self, x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
raise NotImplementedError
@__scaling__(ScalingInfo(u=-1))
def expmap_transp(
self, x: torch.Tensor, u: torch.Tensor, v: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor]:
y = self.expmap(x, u)
v_transp = self.transp(x, y, v)
return y, v_transp
@__scaling__(ScalingInfo(u=-1))
def retr_transp(
self, x: torch.Tensor, u: torch.Tensor, v: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor]:
y = self.retr(x, u)
v_transp = self.transp(x, y, v)
return y, v_transp
@__scaling__(ScalingInfo(u=-1))
def transp_follow_retr(
self, x: torch.Tensor, u: torch.Tensor, v: torch.Tensor
) -> torch.Tensor:
y = self.retr(x, u)
return self.transp(x, y, v)
@__scaling__(ScalingInfo(u=-1))
def transp_follow_expmap(
self, x: torch.Tensor, u: torch.Tensor, v: torch.Tensor
) -> torch.Tensor:
y = self.expmap(x, u)
return self.transp(x, y, v)
def transp(self, x: torch.Tensor, y: torch.Tensor, v: torch.Tensor) -> torch.Tensor:
raise NotImplementedError
@abc.abstractmethod
def inner(
self, x: torch.Tensor, u: torch.Tensor, v=None, *, keepdim=False
) -> torch.Tensor:
raise NotImplementedError
def component_inner(
self, x: torch.Tensor, u: torch.Tensor, v: torch.Tensor = None
) -> torch.Tensor:
return self.inner(x, u, v, keepdim=True)
def norm(self, x: torch.Tensor, u: torch.Tensor, *, keepdim=False) -> torch.Tensor:
return self.inner(x, u, keepdim=keepdim) ** 0.5
@abc.abstractmethod
def proju(self, x: torch.Tensor, u: torch.Tensor) -> torch.Tensor:
raise NotImplementedError
@abc.abstractmethod
def egrad2rgrad(self, x: torch.Tensor, u: torch.Tensor) -> torch.Tensor:
raise NotImplementedError
@abc.abstractmethod
def projx(self, x: torch.Tensor) -> torch.Tensor:
raise NotImplementedError
def _check_shape(
self, shape: Tuple[int], name: str
) -> Union[Tuple[bool, Optional[str]], bool]:
ok = len(shape) >= self.ndim
if not ok:
reason = "'{}' on the {} requires more than {} dim".format(
name, self, self.ndim
)
else:
reason = None
return ok, reason
def _assert_check_shape(self, shape: Tuple[int], name: str):
ok, reason = self._check_shape(shape, name)
if not ok:
raise ValueError(reason)
@abc.abstractmethod
def _check_point_on_manifold(
self, x: torch.Tensor, *, atol=1e-5, rtol=1e-5
) -> Union[Tuple[bool, Optional[str]], bool]:
raise NotImplementedError
@abc.abstractmethod
def _check_vector_on_tangent(
self, x: torch.Tensor, u: torch.Tensor, *, atol=1e-5, rtol=1e-5
) -> Union[Tuple[bool, Optional[str]], bool]:
raise NotImplementedError
def extra_repr(self):
return ""
def __repr__(self):
extra = self.extra_repr()
if extra:
return self.name + "({}) manifold".format(extra)
else:
return self.name + " manifold"
def unpack_tensor(self, tensor: torch.Tensor) -> torch.Tensor:
return tensor
def pack_point(self, *tensors: torch.Tensor) -> torch.Tensor:
if len(tensors) != 1:
raise ValueError("1 tensor expected, got {}".format(len(tensors)))
return tensors[0]
def random(self, *size, dtype=None, device=None, **kwargs) -> torch.Tensor:
raise NotImplementedError
def origin(
self,
*size: Union[int, Tuple[int]],
dtype=None,
device=None,
seed: Optional[int] = 42
) -> torch.Tensor:
if seed is not None:
state = torch.random.get_rng_state()
torch.random.manual_seed(seed)
try:
return self.random(*size, dtype=dtype, device=device)
finally:
torch.random.set_rng_state(state)
else:
return self.random(*size, dtype=dtype, device=device)
| true | true |
f7200b476d9f3051e09445e9614d3e89cf93fb90 | 256 | py | Python | app/recipe/urls.py | rahulsudhakar10/receipe-api-project | 29f205607905bbee347ea9ca505751f4d4cd508a | [
"MIT"
] | null | null | null | app/recipe/urls.py | rahulsudhakar10/receipe-api-project | 29f205607905bbee347ea9ca505751f4d4cd508a | [
"MIT"
] | null | null | null | app/recipe/urls.py | rahulsudhakar10/receipe-api-project | 29f205607905bbee347ea9ca505751f4d4cd508a | [
"MIT"
] | null | null | null | from django.urls import path, include
from rest_framework.routers import DefaultRouter
from . import views
router = DefaultRouter()
router.register('tags', views.TagViewSet)
app_name = 'recipe'
urlpatterns = [
path('', include(router.urls))
]
| 17.066667 | 48 | 0.734375 | from django.urls import path, include
from rest_framework.routers import DefaultRouter
from . import views
router = DefaultRouter()
router.register('tags', views.TagViewSet)
app_name = 'recipe'
urlpatterns = [
path('', include(router.urls))
]
| true | true |
f7200bcd5453259f229956e2f1faa902e06be465 | 6,058 | py | Python | selfdrive/controls/lib/longitudinal_planner.py | mogorman/openpilot-1 | 1d19166992149a7dea3536644d67e9e0e2e385fd | [
"MIT"
] | 1 | 2021-06-10T18:00:03.000Z | 2021-06-10T18:00:03.000Z | selfdrive/controls/lib/longitudinal_planner.py | mogorman/openpilot-1 | 1d19166992149a7dea3536644d67e9e0e2e385fd | [
"MIT"
] | 1 | 2021-05-29T00:57:16.000Z | 2021-05-29T00:57:16.000Z | selfdrive/controls/lib/longitudinal_planner.py | mogorman/openpilot-1 | 1d19166992149a7dea3536644d67e9e0e2e385fd | [
"MIT"
] | 2 | 2021-11-16T01:49:54.000Z | 2022-01-14T04:03:23.000Z | #!/usr/bin/env python3
import math
import numpy as np
from common.numpy_fast import interp
from common.cached_params import CachedParams
import cereal.messaging as messaging
from common.realtime import DT_MDL
from selfdrive.modeld.constants import T_IDXS
from selfdrive.config import Conversions as CV
from selfdrive.controls.lib.longcontrol import LongCtrlState
from selfdrive.controls.lib.longitudinal_mpc_lib.long_mpc import LongitudinalMpc
from selfdrive.controls.lib.longitudinal_mpc_lib.long_mpc import T_IDXS as T_IDXS_MPC
from selfdrive.controls.lib.drive_helpers import V_CRUISE_MAX, CONTROL_N
from selfdrive.swaglog import cloudlog
LON_MPC_STEP = 0.2 # first step is 0.2s
AWARENESS_DECEL = -0.2 # car smoothly decel at .2m/s^2 when user is distracted
A_CRUISE_MIN = -1.2
A_CRUISE_MAX_VALS = [1.2, 1.2, 0.8, 0.6]
A_CRUISE_MAX_BP = [0., 15., 25., 40.]
# Lookup table for turns
_A_TOTAL_MAX_V = [1.7, 3.2]
_A_TOTAL_MAX_BP = [20., 40.]
def get_max_accel(v_ego):
return interp(v_ego, A_CRUISE_MAX_BP, A_CRUISE_MAX_VALS)
def limit_accel_in_turns(v_ego, angle_steers, a_target, CP):
"""
This function returns a limited long acceleration allowed, depending on the existing lateral acceleration
this should avoid accelerating when losing the target in turns
"""
a_total_max = interp(v_ego, _A_TOTAL_MAX_BP, _A_TOTAL_MAX_V)
a_y = v_ego**2 * angle_steers * CV.DEG_TO_RAD / (CP.steerRatio * CP.wheelbase)
a_x_allowed = math.sqrt(max(a_total_max**2 - a_y**2, 0.))
return [a_target[0], min(a_target[1], a_x_allowed)]
class Planner():
def __init__(self, CP, init_v=0.0, init_a=0.0):
self.CP = CP
self.mpc = LongitudinalMpc()
self.fcw = False
self.cachedParams = CachedParams()
self.v_desired = init_v
self.a_desired = init_a
self.alpha = np.exp(-DT_MDL/2.0)
self.v_desired_trajectory = np.zeros(CONTROL_N)
self.a_desired_trajectory = np.zeros(CONTROL_N)
self.j_desired_trajectory = np.zeros(CONTROL_N)
def update(self, sm, CP, lateral_planner):
v_ego = sm['carState'].vEgo
a_ego = sm['carState'].aEgo
v_cruise_kph = sm['controlsState'].vCruise
v_cruise_kph = min(v_cruise_kph, V_CRUISE_MAX)
v_cruise = v_cruise_kph * CV.KPH_TO_MS
long_control_state = sm['controlsState'].longControlState
force_slow_decel = sm['controlsState'].forceDecel
enabled = (long_control_state == LongCtrlState.pid) or (long_control_state == LongCtrlState.stopping)
if not enabled or sm['carState'].gasPressed:
self.v_desired = v_ego
self.a_desired = a_ego
# Prevent divergence, smooth in current v_ego
self.v_desired = self.alpha * self.v_desired + (1 - self.alpha) * v_ego
self.v_desired = max(0.0, self.v_desired)
accel_limits = [A_CRUISE_MIN, get_max_accel(v_ego)]
if not self.cachedParams.get('jvePilot.settings.slowInCurves', 5000) == "1":
accel_limits = limit_accel_in_turns(v_ego, sm['carState'].steeringAngleDeg, accel_limits, self.CP)
if force_slow_decel:
# if required so, force a smooth deceleration
accel_limits[1] = min(accel_limits[1], AWARENESS_DECEL)
accel_limits[0] = min(accel_limits[0], accel_limits[1])
# clip limits, cannot init MPC outside of bounds
accel_limits[0] = min(accel_limits[0], self.a_desired + 0.05)
accel_limits[1] = max(accel_limits[1], self.a_desired - 0.05)
self.mpc.set_accel_limits(accel_limits[0], accel_limits[1])
self.mpc.set_cur_state(self.v_desired, self.a_desired)
self.mpc.update(sm['carState'], sm['radarState'], v_cruise)
self.v_desired_trajectory = np.interp(T_IDXS[:CONTROL_N], T_IDXS_MPC, self.mpc.v_solution)
self.a_desired_trajectory = np.interp(T_IDXS[:CONTROL_N], T_IDXS_MPC, self.mpc.a_solution)
self.j_desired_trajectory = np.interp(T_IDXS[:CONTROL_N], T_IDXS_MPC[:-1], self.mpc.j_solution)
#TODO counter is only needed because radar is glitchy, remove once radar is gone
self.fcw = self.mpc.crash_cnt > 5
if self.fcw:
cloudlog.info("FCW triggered")
# Interpolate 0.05 seconds and save as starting point for next iteration
a_prev = self.a_desired
self.a_desired = float(interp(DT_MDL, T_IDXS[:CONTROL_N], self.a_desired_trajectory))
self.v_desired = self.v_desired + DT_MDL * (self.a_desired + a_prev)/2.0
if lateral_planner.lateralPlan and self.cachedParams.get('jvePilot.settings.slowInCurves', 5000) == "1":
curvs = list(lateral_planner.lateralPlan.curvatures)
if len(curvs):
# find the largest curvature in the solution and use that.
curv = abs(curvs[-1])
if curv != 0:
self.v_desired = float(min(self.v_desired, self.limit_speed_in_curv(sm, curv)))
def publish(self, sm, pm):
plan_send = messaging.new_message('longitudinalPlan')
plan_send.valid = sm.all_alive_and_valid(service_list=['carState', 'controlsState'])
longitudinalPlan = plan_send.longitudinalPlan
longitudinalPlan.modelMonoTime = sm.logMonoTime['modelV2']
longitudinalPlan.processingDelay = (plan_send.logMonoTime / 1e9) - sm.logMonoTime['modelV2']
longitudinalPlan.speeds = [float(x) for x in self.v_desired_trajectory]
longitudinalPlan.accels = [float(x) for x in self.a_desired_trajectory]
longitudinalPlan.jerks = [float(x) for x in self.j_desired_trajectory]
longitudinalPlan.hasLead = sm['radarState'].leadOne.status
longitudinalPlan.longitudinalPlanSource = self.mpc.source
longitudinalPlan.fcw = self.fcw
pm.send('longitudinalPlan', plan_send)
def limit_speed_in_curv(self, sm, curv):
v_ego = sm['carState'].vEgo
a_y_max = 2.975 - v_ego * 0.0375 # ~1.85 @ 75mph, ~2.6 @ 25mph
# drop off
drop_off = self.cachedParams.get_float('jvePilot.settings.slowInCurves.speedDropOff', 5000)
if drop_off != 2 and a_y_max > 0:
a_y_max = np.sqrt(a_y_max) ** drop_off
v_curvature = np.sqrt(a_y_max / np.clip(curv, 1e-4, None))
model_speed = np.min(v_curvature)
return model_speed * self.cachedParams.get_float('jvePilot.settings.slowInCurves.speedRatio', 5000) | 40.657718 | 108 | 0.73176 |
import math
import numpy as np
from common.numpy_fast import interp
from common.cached_params import CachedParams
import cereal.messaging as messaging
from common.realtime import DT_MDL
from selfdrive.modeld.constants import T_IDXS
from selfdrive.config import Conversions as CV
from selfdrive.controls.lib.longcontrol import LongCtrlState
from selfdrive.controls.lib.longitudinal_mpc_lib.long_mpc import LongitudinalMpc
from selfdrive.controls.lib.longitudinal_mpc_lib.long_mpc import T_IDXS as T_IDXS_MPC
from selfdrive.controls.lib.drive_helpers import V_CRUISE_MAX, CONTROL_N
from selfdrive.swaglog import cloudlog
LON_MPC_STEP = 0.2
AWARENESS_DECEL = -0.2
A_CRUISE_MIN = -1.2
A_CRUISE_MAX_VALS = [1.2, 1.2, 0.8, 0.6]
A_CRUISE_MAX_BP = [0., 15., 25., 40.]
_A_TOTAL_MAX_V = [1.7, 3.2]
_A_TOTAL_MAX_BP = [20., 40.]
def get_max_accel(v_ego):
return interp(v_ego, A_CRUISE_MAX_BP, A_CRUISE_MAX_VALS)
def limit_accel_in_turns(v_ego, angle_steers, a_target, CP):
a_total_max = interp(v_ego, _A_TOTAL_MAX_BP, _A_TOTAL_MAX_V)
a_y = v_ego**2 * angle_steers * CV.DEG_TO_RAD / (CP.steerRatio * CP.wheelbase)
a_x_allowed = math.sqrt(max(a_total_max**2 - a_y**2, 0.))
return [a_target[0], min(a_target[1], a_x_allowed)]
class Planner():
def __init__(self, CP, init_v=0.0, init_a=0.0):
self.CP = CP
self.mpc = LongitudinalMpc()
self.fcw = False
self.cachedParams = CachedParams()
self.v_desired = init_v
self.a_desired = init_a
self.alpha = np.exp(-DT_MDL/2.0)
self.v_desired_trajectory = np.zeros(CONTROL_N)
self.a_desired_trajectory = np.zeros(CONTROL_N)
self.j_desired_trajectory = np.zeros(CONTROL_N)
def update(self, sm, CP, lateral_planner):
v_ego = sm['carState'].vEgo
a_ego = sm['carState'].aEgo
v_cruise_kph = sm['controlsState'].vCruise
v_cruise_kph = min(v_cruise_kph, V_CRUISE_MAX)
v_cruise = v_cruise_kph * CV.KPH_TO_MS
long_control_state = sm['controlsState'].longControlState
force_slow_decel = sm['controlsState'].forceDecel
enabled = (long_control_state == LongCtrlState.pid) or (long_control_state == LongCtrlState.stopping)
if not enabled or sm['carState'].gasPressed:
self.v_desired = v_ego
self.a_desired = a_ego
self.v_desired = self.alpha * self.v_desired + (1 - self.alpha) * v_ego
self.v_desired = max(0.0, self.v_desired)
accel_limits = [A_CRUISE_MIN, get_max_accel(v_ego)]
if not self.cachedParams.get('jvePilot.settings.slowInCurves', 5000) == "1":
accel_limits = limit_accel_in_turns(v_ego, sm['carState'].steeringAngleDeg, accel_limits, self.CP)
if force_slow_decel:
accel_limits[1] = min(accel_limits[1], AWARENESS_DECEL)
accel_limits[0] = min(accel_limits[0], accel_limits[1])
accel_limits[0] = min(accel_limits[0], self.a_desired + 0.05)
accel_limits[1] = max(accel_limits[1], self.a_desired - 0.05)
self.mpc.set_accel_limits(accel_limits[0], accel_limits[1])
self.mpc.set_cur_state(self.v_desired, self.a_desired)
self.mpc.update(sm['carState'], sm['radarState'], v_cruise)
self.v_desired_trajectory = np.interp(T_IDXS[:CONTROL_N], T_IDXS_MPC, self.mpc.v_solution)
self.a_desired_trajectory = np.interp(T_IDXS[:CONTROL_N], T_IDXS_MPC, self.mpc.a_solution)
self.j_desired_trajectory = np.interp(T_IDXS[:CONTROL_N], T_IDXS_MPC[:-1], self.mpc.j_solution)
self.fcw = self.mpc.crash_cnt > 5
if self.fcw:
cloudlog.info("FCW triggered")
a_prev = self.a_desired
self.a_desired = float(interp(DT_MDL, T_IDXS[:CONTROL_N], self.a_desired_trajectory))
self.v_desired = self.v_desired + DT_MDL * (self.a_desired + a_prev)/2.0
if lateral_planner.lateralPlan and self.cachedParams.get('jvePilot.settings.slowInCurves', 5000) == "1":
curvs = list(lateral_planner.lateralPlan.curvatures)
if len(curvs):
curv = abs(curvs[-1])
if curv != 0:
self.v_desired = float(min(self.v_desired, self.limit_speed_in_curv(sm, curv)))
def publish(self, sm, pm):
plan_send = messaging.new_message('longitudinalPlan')
plan_send.valid = sm.all_alive_and_valid(service_list=['carState', 'controlsState'])
longitudinalPlan = plan_send.longitudinalPlan
longitudinalPlan.modelMonoTime = sm.logMonoTime['modelV2']
longitudinalPlan.processingDelay = (plan_send.logMonoTime / 1e9) - sm.logMonoTime['modelV2']
longitudinalPlan.speeds = [float(x) for x in self.v_desired_trajectory]
longitudinalPlan.accels = [float(x) for x in self.a_desired_trajectory]
longitudinalPlan.jerks = [float(x) for x in self.j_desired_trajectory]
longitudinalPlan.hasLead = sm['radarState'].leadOne.status
longitudinalPlan.longitudinalPlanSource = self.mpc.source
longitudinalPlan.fcw = self.fcw
pm.send('longitudinalPlan', plan_send)
def limit_speed_in_curv(self, sm, curv):
v_ego = sm['carState'].vEgo
a_y_max = 2.975 - v_ego * 0.0375
drop_off = self.cachedParams.get_float('jvePilot.settings.slowInCurves.speedDropOff', 5000)
if drop_off != 2 and a_y_max > 0:
a_y_max = np.sqrt(a_y_max) ** drop_off
v_curvature = np.sqrt(a_y_max / np.clip(curv, 1e-4, None))
model_speed = np.min(v_curvature)
return model_speed * self.cachedParams.get_float('jvePilot.settings.slowInCurves.speedRatio', 5000) | true | true |
f7200bced83e516dd159f591c79e95855c52a38f | 23,009 | py | Python | sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_08_01/aio/operations/_application_gateway_private_endpoint_connections_operations.py | praveenkuttappan/azure-sdk-for-python | 4b79413667b7539750a6c7dde15737013a3d4bd5 | [
"MIT"
] | 2,728 | 2015-01-09T10:19:32.000Z | 2022-03-31T14:50:33.000Z | sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_08_01/aio/operations/_application_gateway_private_endpoint_connections_operations.py | v-xuto/azure-sdk-for-python | 9c6296d22094c5ede410bc83749e8df8694ccacc | [
"MIT"
] | 17,773 | 2015-01-05T15:57:17.000Z | 2022-03-31T23:50:25.000Z | sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_08_01/aio/operations/_application_gateway_private_endpoint_connections_operations.py | v-xuto/azure-sdk-for-python | 9c6296d22094c5ede410bc83749e8df8694ccacc | [
"MIT"
] | 1,916 | 2015-01-19T05:05:41.000Z | 2022-03-31T19:36:44.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ApplicationGatewayPrivateEndpointConnectionsOperations:
"""ApplicationGatewayPrivateEndpointConnectionsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_08_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _delete_initial(
self,
resource_group_name: str,
application_gateway_name: str,
connection_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-08-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'applicationGatewayName': self._serialize.url("application_gateway_name", application_gateway_name, 'str'),
'connectionName': self._serialize.url("connection_name", connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationGateways/{applicationGatewayName}/privateEndpointConnections/{connectionName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
application_gateway_name: str,
connection_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes the specified private endpoint connection on application gateway.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param application_gateway_name: The name of the application gateway.
:type application_gateway_name: str
:param connection_name: The name of the application gateway private endpoint connection.
:type connection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
application_gateway_name=application_gateway_name,
connection_name=connection_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'applicationGatewayName': self._serialize.url("application_gateway_name", application_gateway_name, 'str'),
'connectionName': self._serialize.url("connection_name", connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationGateways/{applicationGatewayName}/privateEndpointConnections/{connectionName}'} # type: ignore
async def _update_initial(
self,
resource_group_name: str,
application_gateway_name: str,
connection_name: str,
parameters: "_models.ApplicationGatewayPrivateEndpointConnection",
**kwargs: Any
) -> Optional["_models.ApplicationGatewayPrivateEndpointConnection"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.ApplicationGatewayPrivateEndpointConnection"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-08-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'applicationGatewayName': self._serialize.url("application_gateway_name", application_gateway_name, 'str'),
'connectionName': self._serialize.url("connection_name", connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'ApplicationGatewayPrivateEndpointConnection')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ApplicationGatewayPrivateEndpointConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationGateways/{applicationGatewayName}/privateEndpointConnections/{connectionName}'} # type: ignore
async def begin_update(
self,
resource_group_name: str,
application_gateway_name: str,
connection_name: str,
parameters: "_models.ApplicationGatewayPrivateEndpointConnection",
**kwargs: Any
) -> AsyncLROPoller["_models.ApplicationGatewayPrivateEndpointConnection"]:
"""Updates the specified private endpoint connection on application gateway.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param application_gateway_name: The name of the application gateway.
:type application_gateway_name: str
:param connection_name: The name of the application gateway private endpoint connection.
:type connection_name: str
:param parameters: Parameters supplied to update application gateway private endpoint
connection operation.
:type parameters: ~azure.mgmt.network.v2020_08_01.models.ApplicationGatewayPrivateEndpointConnection
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either ApplicationGatewayPrivateEndpointConnection or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2020_08_01.models.ApplicationGatewayPrivateEndpointConnection]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ApplicationGatewayPrivateEndpointConnection"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._update_initial(
resource_group_name=resource_group_name,
application_gateway_name=application_gateway_name,
connection_name=connection_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ApplicationGatewayPrivateEndpointConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'applicationGatewayName': self._serialize.url("application_gateway_name", application_gateway_name, 'str'),
'connectionName': self._serialize.url("connection_name", connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationGateways/{applicationGatewayName}/privateEndpointConnections/{connectionName}'} # type: ignore
async def get(
self,
resource_group_name: str,
application_gateway_name: str,
connection_name: str,
**kwargs: Any
) -> "_models.ApplicationGatewayPrivateEndpointConnection":
"""Gets the specified private endpoint connection on application gateway.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param application_gateway_name: The name of the application gateway.
:type application_gateway_name: str
:param connection_name: The name of the application gateway private endpoint connection.
:type connection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ApplicationGatewayPrivateEndpointConnection, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_08_01.models.ApplicationGatewayPrivateEndpointConnection
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ApplicationGatewayPrivateEndpointConnection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-08-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'applicationGatewayName': self._serialize.url("application_gateway_name", application_gateway_name, 'str'),
'connectionName': self._serialize.url("connection_name", connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ApplicationGatewayPrivateEndpointConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationGateways/{applicationGatewayName}/privateEndpointConnections/{connectionName}'} # type: ignore
def list(
self,
resource_group_name: str,
application_gateway_name: str,
**kwargs: Any
) -> AsyncIterable["_models.ApplicationGatewayPrivateEndpointConnectionListResult"]:
"""Lists all private endpoint connections on an application gateway.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param application_gateway_name: The name of the application gateway.
:type application_gateway_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ApplicationGatewayPrivateEndpointConnectionListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2020_08_01.models.ApplicationGatewayPrivateEndpointConnectionListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ApplicationGatewayPrivateEndpointConnectionListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-08-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'applicationGatewayName': self._serialize.url("application_gateway_name", application_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('ApplicationGatewayPrivateEndpointConnectionListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationGateways/{applicationGatewayName}/privateEndpointConnections'} # type: ignore
| 53.509302 | 241 | 0.690556 |
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ApplicationGatewayPrivateEndpointConnectionsOperations:
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _delete_initial(
self,
resource_group_name: str,
application_gateway_name: str,
connection_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-08-01"
accept = "application/json"
url = self._delete_initial.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'applicationGatewayName': self._serialize.url("application_gateway_name", application_gateway_name, 'str'),
'connectionName': self._serialize.url("connection_name", connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
header_parameters = {}
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationGateways/{applicationGatewayName}/privateEndpointConnections/{connectionName}'}
async def begin_delete(
self,
resource_group_name: str,
application_gateway_name: str,
connection_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
polling = kwargs.pop('polling', True)
cls = kwargs.pop('cls', None)
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None)
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
application_gateway_name=application_gateway_name,
connection_name=connection_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'applicationGatewayName': self._serialize.url("application_gateway_name", application_gateway_name, 'str'),
'connectionName': self._serialize.url("connection_name", connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationGateways/{applicationGatewayName}/privateEndpointConnections/{connectionName}'}
async def _update_initial(
self,
resource_group_name: str,
application_gateway_name: str,
connection_name: str,
parameters: "_models.ApplicationGatewayPrivateEndpointConnection",
**kwargs: Any
) -> Optional["_models.ApplicationGatewayPrivateEndpointConnection"]:
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-08-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
url = self._update_initial.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'applicationGatewayName': self._serialize.url("application_gateway_name", application_gateway_name, 'str'),
'connectionName': self._serialize.url("connection_name", connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
header_parameters = {}
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {}
body_content = self._serialize.body(parameters, 'ApplicationGatewayPrivateEndpointConnection')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ApplicationGatewayPrivateEndpointConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationGateways/{applicationGatewayName}/privateEndpointConnections/{connectionName}'}
async def begin_update(
self,
resource_group_name: str,
application_gateway_name: str,
connection_name: str,
parameters: "_models.ApplicationGatewayPrivateEndpointConnection",
**kwargs: Any
) -> AsyncLROPoller["_models.ApplicationGatewayPrivateEndpointConnection"]:
polling = kwargs.pop('polling', True)
cls = kwargs.pop('cls', None)
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None)
if cont_token is None:
raw_result = await self._update_initial(
resource_group_name=resource_group_name,
application_gateway_name=application_gateway_name,
connection_name=connection_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ApplicationGatewayPrivateEndpointConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'applicationGatewayName': self._serialize.url("application_gateway_name", application_gateway_name, 'str'),
'connectionName': self._serialize.url("connection_name", connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationGateways/{applicationGatewayName}/privateEndpointConnections/{connectionName}'}
async def get(
self,
resource_group_name: str,
application_gateway_name: str,
connection_name: str,
**kwargs: Any
) -> "_models.ApplicationGatewayPrivateEndpointConnection":
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-08-01"
accept = "application/json"
url = self.get.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'applicationGatewayName': self._serialize.url("application_gateway_name", application_gateway_name, 'str'),
'connectionName': self._serialize.url("connection_name", connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
header_parameters = {}
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ApplicationGatewayPrivateEndpointConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationGateways/{applicationGatewayName}/privateEndpointConnections/{connectionName}'}
def list(
self,
resource_group_name: str,
application_gateway_name: str,
**kwargs: Any
) -> AsyncIterable["_models.ApplicationGatewayPrivateEndpointConnectionListResult"]:
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-08-01"
accept = "application/json"
def prepare_request(next_link=None):
header_parameters = {}
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
url = self.list.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'applicationGatewayName': self._serialize.url("application_gateway_name", application_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {}
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('ApplicationGatewayPrivateEndpointConnectionListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationGateways/{applicationGatewayName}/privateEndpointConnections'}
| true | true |
f7200c78df71a4b408145caba6e03ce542b4d9df | 3,753 | py | Python | tests/system/action/motion_comment_section/test_sort.py | MJJojo97/openslides-backend | af0d1edb0070e352d46f285a1ba0bbe3702d49ae | [
"MIT"
] | null | null | null | tests/system/action/motion_comment_section/test_sort.py | MJJojo97/openslides-backend | af0d1edb0070e352d46f285a1ba0bbe3702d49ae | [
"MIT"
] | 19 | 2021-11-22T16:25:54.000Z | 2021-11-25T13:38:13.000Z | tests/system/action/motion_comment_section/test_sort.py | MJJojo97/openslides-backend | af0d1edb0070e352d46f285a1ba0bbe3702d49ae | [
"MIT"
] | null | null | null | from openslides_backend.permissions.permissions import Permissions
from tests.system.action.base import BaseActionTestCase
class MotionCommentSectionSortActionTest(BaseActionTestCase):
def setUp(self) -> None:
super().setUp()
self.permission_test_model = {
"motion_comment_section/31": {
"meeting_id": 1,
"name": "name_loisueb",
},
"motion_comment_section/32": {
"meeting_id": 1,
"name": "name_blanumop",
},
}
def test_sort_correct_1(self) -> None:
self.set_models(
{
"meeting/222": {
"name": "name_SNLGsvIV",
"is_active_in_organization_id": 1,
},
"motion_comment_section/31": {
"meeting_id": 222,
"name": "name_loisueb",
},
"motion_comment_section/32": {
"meeting_id": 222,
"name": "name_blanumop",
},
}
)
response = self.request(
"motion_comment_section.sort",
{"meeting_id": 222, "motion_comment_section_ids": [32, 31]},
)
self.assert_status_code(response, 200)
model_31 = self.get_model("motion_comment_section/31")
assert model_31.get("weight") == 2
model_32 = self.get_model("motion_comment_section/32")
assert model_32.get("weight") == 1
def test_sort_missing_model(self) -> None:
self.set_models(
{
"meeting/222": {
"name": "name_SNLGsvIV",
"is_active_in_organization_id": 1,
},
"motion_comment_section/31": {
"meeting_id": 222,
"name": "name_loisueb",
},
}
)
response = self.request(
"motion_comment_section.sort",
{"meeting_id": 222, "motion_comment_section_ids": [32, 31]},
)
self.assert_status_code(response, 400)
assert "Id 32 not in db_instances." in response.json["message"]
def test_sort_another_section_db(self) -> None:
self.set_models(
{
"meeting/222": {
"name": "name_SNLGsvIV",
"is_active_in_organization_id": 1,
},
"motion_comment_section/31": {
"meeting_id": 222,
"name": "name_loisueb",
},
"motion_comment_section/32": {
"meeting_id": 222,
"name": "name_blanumop",
},
"motion_comment_section/33": {
"meeting_id": 222,
"name": "name_polusiem",
},
}
)
response = self.request(
"motion_comment_section.sort",
{"meeting_id": 222, "motion_comment_section_ids": [32, 31]},
)
self.assert_status_code(response, 400)
assert "Additional db_instances found." in response.json["message"]
def test_sort_no_permissions(self) -> None:
self.base_permission_test(
self.permission_test_model,
"motion_comment_section.sort",
{"meeting_id": 1, "motion_comment_section_ids": [32, 31]},
)
def test_sort_permissions(self) -> None:
self.base_permission_test(
self.permission_test_model,
"motion_comment_section.sort",
{"meeting_id": 1, "motion_comment_section_ids": [32, 31]},
Permissions.Motion.CAN_MANAGE,
)
| 34.75 | 75 | 0.501998 | from openslides_backend.permissions.permissions import Permissions
from tests.system.action.base import BaseActionTestCase
class MotionCommentSectionSortActionTest(BaseActionTestCase):
def setUp(self) -> None:
super().setUp()
self.permission_test_model = {
"motion_comment_section/31": {
"meeting_id": 1,
"name": "name_loisueb",
},
"motion_comment_section/32": {
"meeting_id": 1,
"name": "name_blanumop",
},
}
def test_sort_correct_1(self) -> None:
self.set_models(
{
"meeting/222": {
"name": "name_SNLGsvIV",
"is_active_in_organization_id": 1,
},
"motion_comment_section/31": {
"meeting_id": 222,
"name": "name_loisueb",
},
"motion_comment_section/32": {
"meeting_id": 222,
"name": "name_blanumop",
},
}
)
response = self.request(
"motion_comment_section.sort",
{"meeting_id": 222, "motion_comment_section_ids": [32, 31]},
)
self.assert_status_code(response, 200)
model_31 = self.get_model("motion_comment_section/31")
assert model_31.get("weight") == 2
model_32 = self.get_model("motion_comment_section/32")
assert model_32.get("weight") == 1
def test_sort_missing_model(self) -> None:
self.set_models(
{
"meeting/222": {
"name": "name_SNLGsvIV",
"is_active_in_organization_id": 1,
},
"motion_comment_section/31": {
"meeting_id": 222,
"name": "name_loisueb",
},
}
)
response = self.request(
"motion_comment_section.sort",
{"meeting_id": 222, "motion_comment_section_ids": [32, 31]},
)
self.assert_status_code(response, 400)
assert "Id 32 not in db_instances." in response.json["message"]
def test_sort_another_section_db(self) -> None:
self.set_models(
{
"meeting/222": {
"name": "name_SNLGsvIV",
"is_active_in_organization_id": 1,
},
"motion_comment_section/31": {
"meeting_id": 222,
"name": "name_loisueb",
},
"motion_comment_section/32": {
"meeting_id": 222,
"name": "name_blanumop",
},
"motion_comment_section/33": {
"meeting_id": 222,
"name": "name_polusiem",
},
}
)
response = self.request(
"motion_comment_section.sort",
{"meeting_id": 222, "motion_comment_section_ids": [32, 31]},
)
self.assert_status_code(response, 400)
assert "Additional db_instances found." in response.json["message"]
def test_sort_no_permissions(self) -> None:
self.base_permission_test(
self.permission_test_model,
"motion_comment_section.sort",
{"meeting_id": 1, "motion_comment_section_ids": [32, 31]},
)
def test_sort_permissions(self) -> None:
self.base_permission_test(
self.permission_test_model,
"motion_comment_section.sort",
{"meeting_id": 1, "motion_comment_section_ids": [32, 31]},
Permissions.Motion.CAN_MANAGE,
)
| true | true |
f7200c97efca195813bca2dc2b1dffe77ce84bea | 4,104 | py | Python | test/functional/feature_includeconf.py | kaboela/litecoinz | b793b04a717416726a7b1013b21b07fb35dbc4a2 | [
"MIT"
] | 8 | 2020-06-05T16:30:36.000Z | 2021-09-28T08:39:52.000Z | test/functional/feature_includeconf.py | kaboela/litecoinz | b793b04a717416726a7b1013b21b07fb35dbc4a2 | [
"MIT"
] | 8 | 2020-04-04T11:24:26.000Z | 2021-05-09T18:53:53.000Z | test/functional/feature_includeconf.py | kaboela/litecoinz | b793b04a717416726a7b1013b21b07fb35dbc4a2 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Tests the includeconf argument
Verify that:
1. adding includeconf to the configuration file causes the includeconf
file to be loaded in the correct order.
2. includeconf cannot be used as a command line argument.
3. includeconf cannot be used recursively (ie includeconf can only
be used from the base config file).
4. multiple includeconf arguments can be specified in the main config
file.
"""
import os
from test_framework.test_framework import BitcoinTestFramework
class IncludeConfTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = False
self.num_nodes = 1
def setup_chain(self):
super().setup_chain()
# Create additional config files
# - tmpdir/node0/relative.conf
with open(os.path.join(self.options.tmpdir, "node0", "relative.conf"), "w", encoding="utf8") as f:
f.write("uacomment=relative\n")
# - tmpdir/node0/relative2.conf
with open(os.path.join(self.options.tmpdir, "node0", "relative2.conf"), "w", encoding="utf8") as f:
f.write("uacomment=relative2\n")
with open(os.path.join(self.options.tmpdir, "node0", "litecoinz.conf"), "a", encoding='utf8') as f:
f.write("uacomment=main\nincludeconf=relative.conf\n")
def run_test(self):
self.log.info("-includeconf works from config file. subversion should end with 'main; relative)/'")
subversion = self.nodes[0].getnetworkinfo()["subversion"]
assert subversion.endswith("main; relative)/")
self.log.info("-includeconf cannot be used as command-line arg")
self.stop_node(0)
self.nodes[0].assert_start_raises_init_error(extra_args=["-includeconf=relative2.conf"], expected_msg="Error: Error parsing command line arguments: -includeconf cannot be used from commandline; -includeconf=relative2.conf")
self.log.info("-includeconf cannot be used recursively. subversion should end with 'main; relative)/'")
with open(os.path.join(self.options.tmpdir, "node0", "relative.conf"), "a", encoding="utf8") as f:
f.write("includeconf=relative2.conf\n")
self.start_node(0)
subversion = self.nodes[0].getnetworkinfo()["subversion"]
assert subversion.endswith("main; relative)/")
self.stop_node(0, expected_stderr="warning: -includeconf cannot be used from included files; ignoring -includeconf=relative2.conf")
self.log.info("-includeconf cannot contain invalid arg")
# Commented out as long as we ignore invalid arguments in configuration files
#with open(os.path.join(self.options.tmpdir, "node0", "relative.conf"), "w", encoding="utf8") as f:
# f.write("foo=bar\n")
#self.nodes[0].assert_start_raises_init_error(expected_msg="Error: Error reading configuration file: Invalid configuration value foo")
self.log.info("-includeconf cannot be invalid path")
os.remove(os.path.join(self.options.tmpdir, "node0", "relative.conf"))
self.nodes[0].assert_start_raises_init_error(expected_msg="Error: Error reading configuration file: Failed to include configuration file relative.conf")
self.log.info("multiple -includeconf args can be used from the base config file. subversion should end with 'main; relative; relative2)/'")
with open(os.path.join(self.options.tmpdir, "node0", "relative.conf"), "w", encoding="utf8") as f:
# Restore initial file contents
f.write("uacomment=relative\n")
with open(os.path.join(self.options.tmpdir, "node0", "litecoinz.conf"), "a", encoding='utf8') as f:
f.write("includeconf=relative2.conf\n")
self.start_node(0)
subversion = self.nodes[0].getnetworkinfo()["subversion"]
assert subversion.endswith("main; relative; relative2)/")
if __name__ == '__main__':
IncludeConfTest().main()
| 49.445783 | 231 | 0.694932 |
import os
from test_framework.test_framework import BitcoinTestFramework
class IncludeConfTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = False
self.num_nodes = 1
def setup_chain(self):
super().setup_chain()
with open(os.path.join(self.options.tmpdir, "node0", "relative.conf"), "w", encoding="utf8") as f:
f.write("uacomment=relative\n")
with open(os.path.join(self.options.tmpdir, "node0", "relative2.conf"), "w", encoding="utf8") as f:
f.write("uacomment=relative2\n")
with open(os.path.join(self.options.tmpdir, "node0", "litecoinz.conf"), "a", encoding='utf8') as f:
f.write("uacomment=main\nincludeconf=relative.conf\n")
def run_test(self):
self.log.info("-includeconf works from config file. subversion should end with 'main; relative)/'")
subversion = self.nodes[0].getnetworkinfo()["subversion"]
assert subversion.endswith("main; relative)/")
self.log.info("-includeconf cannot be used as command-line arg")
self.stop_node(0)
self.nodes[0].assert_start_raises_init_error(extra_args=["-includeconf=relative2.conf"], expected_msg="Error: Error parsing command line arguments: -includeconf cannot be used from commandline; -includeconf=relative2.conf")
self.log.info("-includeconf cannot be used recursively. subversion should end with 'main; relative)/'")
with open(os.path.join(self.options.tmpdir, "node0", "relative.conf"), "a", encoding="utf8") as f:
f.write("includeconf=relative2.conf\n")
self.start_node(0)
subversion = self.nodes[0].getnetworkinfo()["subversion"]
assert subversion.endswith("main; relative)/")
self.stop_node(0, expected_stderr="warning: -includeconf cannot be used from included files; ignoring -includeconf=relative2.conf")
self.log.info("-includeconf cannot contain invalid arg")
self.log.info("-includeconf cannot be invalid path")
os.remove(os.path.join(self.options.tmpdir, "node0", "relative.conf"))
self.nodes[0].assert_start_raises_init_error(expected_msg="Error: Error reading configuration file: Failed to include configuration file relative.conf")
self.log.info("multiple -includeconf args can be used from the base config file. subversion should end with 'main; relative; relative2)/'")
with open(os.path.join(self.options.tmpdir, "node0", "relative.conf"), "w", encoding="utf8") as f:
f.write("uacomment=relative\n")
with open(os.path.join(self.options.tmpdir, "node0", "litecoinz.conf"), "a", encoding='utf8') as f:
f.write("includeconf=relative2.conf\n")
self.start_node(0)
subversion = self.nodes[0].getnetworkinfo()["subversion"]
assert subversion.endswith("main; relative; relative2)/")
if __name__ == '__main__':
IncludeConfTest().main()
| true | true |
f72010304316b8649123167ea9d4b94f50b6f1f1 | 25,588 | py | Python | plaidcloud/utilities/tests/test_remote_dimension.py | PlaidCloud/public-utilities | 663e94f2657a02a4249177945e0880bb968c3439 | [
"Apache-2.0"
] | null | null | null | plaidcloud/utilities/tests/test_remote_dimension.py | PlaidCloud/public-utilities | 663e94f2657a02a4249177945e0880bb968c3439 | [
"Apache-2.0"
] | 48 | 2020-10-30T10:15:39.000Z | 2022-03-25T17:23:57.000Z | plaidcloud/utilities/tests/test_remote_dimension.py | PlaidCloud/plaid-utilities | 1031cb87580bbe110f56455925e483a0ae177fe1 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# coding=utf-8
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import filecmp
import os
import unittest
from unittest import TestCase
import numpy as np
import pandas as pd
from pandas.testing import assert_frame_equal
from plaidcloud.utilities.connect import create_connection
from plaidcloud.utilities.remote.dimension import Dimensions
from plaidcloud.utilities.remote.dimension import MAIN
from plaidcloud.utilities.remote.dimension import ROOT
__author__ = 'Dave Parsons'
__copyright__ = 'Copyright 2010-2020, Tartan Solutions, Inc'
__credits__ = ['Dave Parsons']
__license__ = 'Proprietary'
__maintainer__ = 'Dave Parsons'
__email__ = 'dave.parsons@tartansolutions.com'
# Folders for comparison
BASELINE = './dim_baseline/'
FOLDER = './dim_current/'
conn = create_connection(verify_ssl=False)
class TestDimension(TestCase):
"""Test Redis Dimension code"""
def assertFileEqual(self, file1, file2, **kwargs):
return self.assertTrue(filecmp.cmp(file1, file2, shallow=False))
def assertFrameEqual(self, df1, df2, **kwargs):
return assert_frame_equal(df1, df2, check_names=True, check_like=True, **kwargs)
def setUp(self):
if not os.path.exists(BASELINE):
os.makedirs(BASELINE)
self.periods = 'periods_rpc_test'
self.dims = Dimensions(conn=conn)
self.dim = self.dims.get_dimension(name=self.periods, replace=False)
return
def test_001_load_hierarchy_main(self):
df_main = pd.DataFrame(
[
[ROOT, 'Year'],
['Year', 'Q1'],
['Year', 'Q2'],
['Year', 'Q3'],
['Year', 'Q4'],
['Q1', 'January'],
['Q1', 'February'],
['Q1', 'March'],
['Q2', 'April'],
['Q2', 'May'],
['Q2', 'June'],
['Q3', 'July'],
['Q3', 'August'],
['Q3', 'September'],
['Q4', 'October'],
['Q4', 'November'],
['Q4', 'December'],
],
columns=['ParentName', 'ChildName']
)
# Clear down the dimension and reload
self.dim.clear()
# main hierarchy
df_results = self.dim.load_hierarchy_from_dataframe(df_main, 'ParentName', 'ChildName')
df_results.to_csv(f'{FOLDER}df_main_load.csv', index=False)
# Create a backup file to allow reloading in tests
data = self.dims.backup(self.periods)
with open(f'{FOLDER}periods.yaml', 'w') as file:
file.write(data)
self.assertFileEqual(f'{FOLDER}df_main_load.csv', f'{BASELINE}df_main_load.csv')
return
def test_002_save_hierarchy_main(self):
# main hierarchy
df = self.dim.save_hierarchy_to_dataframe(MAIN)
df.drop(labels='index', axis=1, inplace=True)
df.to_csv(f'{FOLDER}df_main_hierarchy.csv', index=False)
self.assertFileEqual(f'{FOLDER}df_main_hierarchy.csv', f'{BASELINE}df_main_hierarchy.csv')
return
def test_003_load_hierarchy_halves(self):
df_halves = pd.DataFrame(
[
[ROOT, 'H1', '~', 'halves'],
[ROOT, 'H2', '~', 'halves'],
['H1', 'Q1', '+', 'halves'],
['H1', 'Q2', '+', 'halves'],
['H2', 'Q3', '+', 'halves'],
['H2', 'Q4', '+', 'halves'],
],
columns=['ParentName', 'ChildName', 'ConsolidationType', 'Hierarchy']
)
# halves hierarchy
df_results = self.dim.load_hierarchy_from_dataframe(df_halves, 'ParentName', 'ChildName',
'ConsolidationType', hierarchy='Hierarchy')
df_results.to_csv(f'{FOLDER}df_halves_load.csv', index=False)
self.assertFileEqual(f'{FOLDER}df_halves_load.csv', f'{BASELINE}df_halves_load.csv')
return
def test_004_save_hierarchy_halves(self):
# halves hierarchy
df = self.dim.save_hierarchy_to_dataframe('halves')
df.drop(labels='index', axis=1, inplace=True)
df.to_csv(f'{FOLDER}df_halves_hierarchy.csv', index=False)
self.assertFileEqual(f'{FOLDER}df_halves_hierarchy.csv', f'{BASELINE}df_halves_hierarchy.csv')
return
def test_005_load_hierarchy_financial(self):
df_financial = pd.DataFrame(
[
[ROOT, 'YTD', '+', 'financial'],
[ROOT, 'YTG', '+', 'financial'],
['YTD', 'January', '+', 'financial'],
['YTD', 'February', '+', 'financial'],
['YTD', 'March', '+', 'financial'],
['YTD', 'April', '+', 'financial'],
['YTG', 'May', '-', 'financial'],
['YTG', 'June', '-', 'financial'],
['YTG', 'July', '-', 'financial'],
['YTG', 'August', '-', 'financial'],
['YTG', 'September', '-', 'financial'],
['YTG', 'October', '-', 'financial'],
['YTG', 'November', '-', 'financial'],
['YTG', 'December', '-', 'financial'],
],
columns=['ParentName', 'ChildName', 'ConsolidationType', 'Hierarchy']
)
# financial hierarchy
df_results = self.dim.load_hierarchy_from_dataframe(df_financial, 'ParentName', 'ChildName',
'ConsolidationType', hierarchy='Hierarchy')
df_results.to_csv(f'{FOLDER}df_financial_load.csv', index=False)
self.assertFileEqual(f'{FOLDER}df_financial_load.csv', f'{BASELINE}df_financial_load.csv')
return
def test_006_save_hierarchy_financial(self):
# financial hierarchy
df = self.dim.save_hierarchy_to_dataframe('financial')
df.drop(labels='index', axis=1, inplace=True)
df.to_csv(f'{FOLDER}df_financial_hierarchy.csv', index=False)
self.assertFileEqual(f'{FOLDER}df_financial_hierarchy.csv', f'{BASELINE}df_financial_hierarchy.csv')
return
def test_007_load_hierarchy_errors(self):
# This dataframe includes specific errors so check out the results dataframe
df_test = pd.DataFrame(
[
['', '', '+', 'main'],
[' ', ' ', '+', 'main'],
['Q5', '', '+', 'main'],
[np.NaN, np.NaN, '+', 'main'],
[None, None, '+', 'main'],
['None', 'None', '+', 'main'],
['Q5', 'Q5', '+', 'main'],
['Q5', ROOT, '+', 'main'],
['Q5', 'Donk:tober', '+', 'main'],
['Donk:tober', 'Janusday', '+', 'main'],
['Year', 'Q5', '+', 'main'],
['Year', 'Q5', '+', 'main'],
['Q4', 'Badtober', '+', 'halves'],
['Q6', 'Craptober', '+', ''],
],
columns=['ParentName', 'ChildName', 'ConsolidationType', 'Hierarchy']
)
df_results = self.dim.load_hierarchy_from_dataframe(df_test, 'ParentName', 'ChildName',
'ConsolidationType', hierarchy='Hierarchy')
df_results.to_csv(f'{FOLDER}df_complex_load.csv', index=False)
self.assertFileEqual(f'{FOLDER}df_complex_load.csv', f'{BASELINE}df_complex_load.csv')
return
def test_008_load_save_aliases(self):
df_aliases = pd.DataFrame(
[
['Trimestre 1', 'French', 'Q1'],
['Trimestre 2', 'French', 'Q2'],
['Trimestre 3', 'French', 'Q3'],
['Trimestre 4', 'French', 'Q4'],
['Janvier', 'French', 'January'],
['Fevier', 'French', 'February'],
['Mars', 'French', 'March'],
['Avril', 'French', 'April'],
['Mai', 'French', 'May'],
['Juin', 'French', 'June'],
['Julliet', 'French', 'July'],
['Aout', 'French', 'August'],
['Septembre', 'French', 'September'],
['Octobre', 'French', 'October'],
['Novembre', 'French', 'November'],
['Decembre', 'French', 'December'],
['Haneri 1', 'Welsh', 'H1'],
['Haneri 2', 'Welsh', 'H2'],
['Ionawr', 'Welsh', 'January'],
['Chwefror', 'Welsh', 'February'],
['Mawrth', 'Welsh', 'March'],
['Ebrill', 'Welsh', 'April'],
['Mai', 'Welsh', 'May'],
['Mehefin', 'Welsh', 'June'],
['Gorffennaf', 'Welsh', 'July'],
['Awst', 'Welsh', 'August'],
['Medi', 'Welsh', 'September'],
['Hydref', 'Welsh', 'October'],
['Tachwedd', 'Welsh', 'November'],
['Rhagfyr', 'Welsh', 'December'],
['Январь', 'Russian', 'January'],
['Февраль', 'Russian', 'February'],
['Март', 'Russian', 'March'],
['Апрель', 'Russian', 'April'],
['Май', 'Russian', 'May'],
['Июнь', 'Russian', 'June'],
['Июль', 'Russian', 'July'],
['Август', 'Russian', 'August'],
['Сентябрь', 'Russian', 'September'],
['Октябрь', 'Russian', 'October'],
['Ноябрь', 'Russian', 'November'],
['Декабрь', 'Russian', 'December'],
['일월', 'Korean', 'January'],
['이월', 'Korean', 'February'],
['삼월', 'Korean', 'March'],
['사월', 'Korean', 'April'],
['오월', 'Korean', 'May'],
['유월', 'Korean', 'June'],
['칠월', 'Korean', 'July'],
['팔월', 'Korean', 'August'],
['구월', 'Korean', 'September'],
['시월', 'Korean', 'October'],
['십일월', 'Korean', 'November'],
['십이월', 'Korean', 'December'],
['☃️', 'Emoji', 'January'],
['💘', 'Emoji', 'February'],
['☘️', 'Emoji', 'March'],
['☔', 'Emoji', 'April'],
['🌺', 'Emoji', 'May'],
['🌞', 'Emoji', 'June'],
['🍦', 'Emoji', 'July'],
['🏖️', 'Emoji', 'August'],
['🍎', 'Emoji', 'September'],
['🎃', 'Emoji', 'October'],
['🍂', 'Emoji', 'November'],
['🎅', 'Emoji', 'December'],
],
columns=['AliasValue', 'AliasName', 'NodeName']
)
# Aliases
self.dim.load_aliases_from_dataframe(df_aliases, 'NodeName', 'AliasName', 'AliasValue')
df = self.dim.save_aliases_to_dataframe(None)
df.drop(labels='index', axis=1, inplace=True)
df.sort_values(by=['name', 'node', 'value'], axis=0, inplace=True)
df.to_csv(f'{FOLDER}df_aliases.csv', index=False)
self.assertFileEqual(f'{FOLDER}df_aliases.csv', f'{BASELINE}df_aliases.csv')
return
def test_009_load_save_properties(self):
df_properties = pd.DataFrame(
[
['Magenta', 'Colour', ROOT],
['Purple', 'Colour', 'Year'],
['Red', 'Colour', 'Q1'],
['Orange', 'Colour', 'Q2'],
['Green', 'Colour', 'April'],
['Green', 'Colour', 'May'],
['Blue', 'Colour', 'July'],
['Blue', 'Colour', 'August'],
['Blue', 'Colour', 'September'],
['White', 'Colour', 'Q4'],
['Red', 'Colour', 'October'],
['Green', 'Colour', 'November'],
['Red', 'Colour', 'December'],
['Winter', 'Season', 'Q1'],
['Spring', 'Season', 'Q2'],
['Summer', 'Season', 'Q3'],
['Autumn', 'Season', 'Q4'],
],
columns=['PropertyValue', 'PropertyName', 'NodeName']
)
# Properties
self.dim.load_properties_from_dataframe(df_properties, 'NodeName', 'PropertyName', 'PropertyValue')
df = self.dim.save_properties_to_dataframe(None)
df.drop(labels='index', axis=1, inplace=True)
df.sort_values(by=['name', 'node', 'value'], axis=0, inplace=True)
df.to_csv(f'{FOLDER}df_properties.csv', index=False)
self.assertFileEqual(f'{FOLDER}df_properties.csv', f'{BASELINE}df_properties.csv')
return
def test_010_load_save_values(self):
df_values = pd.DataFrame(
[
[-10.0, 'Costs', 'January'],
[-100.0, 'Costs', 'February'],
[-1000.0, 'Costs', 'March'],
[-20.0, 'Costs', 'April'],
[-200.0, 'Costs', 'May'],
[-2000.0, 'Costs', 'June'],
[-30.0, 'Costs', 'July'],
[-300.0, 'Costs', 'August'],
[-3000.0, 'Costs', 'September'],
[-40.0, 'Costs', 'October'],
[-400.0, 'Costs', 'November'],
[-4000.0, 'Costs', 'December'],
[10.0, 'Profit', 'January'],
[100.0, 'Profit', 'February'],
[1000.0, 'Profit', 'March'],
[20.0, 'Profit', 'April'],
[200.0, 'Profit', 'May'],
[2000.0, 'Profit', 'June'],
[30.0, 'Profit', 'July'],
[300.0, 'Profit', 'August'],
[3000.0, 'Profit', 'September'],
[40.0, 'Profit', 'October'],
[400.0, 'Profit', 'November'],
[4000.0, 'Profit', 'December'],
],
columns=['Value', 'ValueName', 'NodeName']
)
# Values
self.dim.load_values_from_dataframe(df_values, 'NodeName', 'ValueName', 'Value')
df = self.dim.save_values_to_dataframe(None)
df.drop(labels='index', axis=1, inplace=True)
df.sort_values(by=['name', 'node', 'value'], axis=0, inplace=True)
df.to_csv(f'{FOLDER}df_values.csv', index=False)
self.assertFileEqual(f'{FOLDER}df_values.csv', f'{BASELINE}df_values.csv')
return
def test_011_get_hierarchy_dataframe(self):
df = self.dim.get_hierarchy_dataframe(hierarchy=MAIN)
df = df.reindex(columns=sorted(df.columns))
df.to_csv(f'{FOLDER}df_get_hierarchy_main.csv', index=False)
self.assertFileEqual(f'{FOLDER}df_get_hierarchy_main.csv', f'{BASELINE}df_get_hierarchy_main.csv')
return
def test_012_get_aliases_dataframe(self):
df = self.dim.get_aliases_dataframe()
df = df.reindex(columns=sorted(df.columns))
df.sort_values(by=list(df.columns), axis=0, inplace=True)
df.to_csv(f'{FOLDER}df_get_aliases.csv', index=False)
self.assertFileEqual(f'{FOLDER}df_get_aliases.csv', f'{BASELINE}df_get_aliases.csv')
return
def test_013_get_attributes_dataframe(self):
df = self.dim.get_attributes_dataframe()
df.drop(labels='index', axis=1, inplace=True)
df = df.reindex(columns=sorted(df.columns))
df.sort_values(by=list(df.columns), axis=0, inplace=True)
df.to_csv(f'{FOLDER}df_get_attributes.csv', index=False)
self.assertFileEqual(f'{FOLDER}df_get_attributes.csv', f'{BASELINE}df_get_attributes.csv')
return
def test_014_get_consolidation_dataframe(self):
df = self.dim.get_consolidation_dataframe('Costs', hierarchy=MAIN)
df.to_csv(f'{FOLDER}df_get_consolidation_costs_main.csv', index=False)
self.assertFileEqual(f'{FOLDER}df_get_consolidation_costs_main.csv', f'{BASELINE}df_get_consolidation_costs_main.csv')
return
def test_015_get_properties_dataframe(self):
df = self.dim.get_properties_dataframe()
df.drop(labels='index', axis=1, inplace=True)
df = df.reindex(columns=sorted(df.columns))
df.sort_values(by=list(df.columns), axis=0, inplace=True)
df.to_csv(f'{FOLDER}df_get_properties.csv', index=False)
self.assertFileEqual(f'{FOLDER}df_get_properties.csv', f'{BASELINE}df_get_properties.csv')
return
def test_016_get_values_dataframe(self):
df = self.dim.get_values_dataframe()
df = df.reindex(columns=sorted(df.columns))
df.sort_values(by=list(df.columns), axis=0, inplace=True)
df.to_csv(f'{FOLDER}df_get_values.csv', index=False)
self.assertFileEqual(f'{FOLDER}df_get_values.csv', f'{BASELINE}df_get_values.csv')
return
def test_017_get_hierarchy_table(self):
df = self.dim.hierarchy_table(hierarchy=MAIN)
df = df.reindex(columns=sorted(df.columns))
df.sort_values(by=list(df.columns), axis=0, inplace=True)
df.to_csv(f'{FOLDER}df_get_hierarchy_table_main.csv', index=False)
self.assertFileEqual(f'{FOLDER}df_get_hierarchy_table_main.csv', f'{BASELINE}df_get_hierarchy_table_main.csv')
return
def test_018_get_all_leaves(self):
expected = ['April',
'August',
'December',
'February',
'January',
'Janusday',
'July',
'June',
'March',
'May',
'November',
'October',
'September']
nodes = sorted(self.dim.get_all_leaves(hierarchy=MAIN))
return self.assertListEqual(expected, nodes)
def test_019_get_all_nodes(self):
expected = ['!!root!!',
'April',
'August',
'December',
'Donk-tober',
'February',
'January',
'Janusday',
'July',
'June',
'March',
'May',
'November',
'October',
'Q1',
'Q2',
'Q3',
'Q4',
'Q5',
'September',
'Year']
nodes = sorted(self.dim.get_all_nodes(hierarchy=MAIN))
return self.assertListEqual(expected, nodes)
def test_020_get_all_parents(self):
expected = ['!!root!!', 'Donk-tober', 'Q1', 'Q2', 'Q3', 'Q4', 'Q5', 'Year']
nodes = sorted(self.dim.get_all_parents(hierarchy=MAIN))
return self.assertListEqual(expected, nodes)
def test_021_get_ancestors(self):
expected = [[0, 'February'], [1, 'Q1'], [2, 'Year'], [3, '!!root!!']]
nodes = self.dim.get_ancestors('February', hierarchy=MAIN)
return self.assertListEqual(expected, nodes)
def test_022_get_ancestor_at_generation(self):
expected = 'Year'
node = self.dim.get_ancestor_at_generation('February', 1, hierarchy=MAIN)
return self.assertEqual(expected, node)
def test_023_get_ancestor_at_level(self):
expected = 'Year'
node = self.dim.get_ancestor_at_level('February', 2, hierarchy=MAIN)
return self.assertEqual(expected, node)
def test_024_get_bottom(self):
expected = 'March'
node = self.dim.get_bottom('Q1', hierarchy=MAIN)
return self.assertEqual(expected, node)
def test_025_get_top(self):
expected = 'January'
node = self.dim.get_top('Q1', hierarchy=MAIN)
return self.assertEqual(expected, node)
def test_026_get_down(self):
expected = 'March'
node = self.dim.get_down('Q1', 'February', hierarchy=MAIN)
return self.assertEqual(expected, node)
def test_027_get_up(self):
expected = 'January'
node = self.dim.get_up('Q1', 'February', hierarchy=MAIN)
return self.assertEqual(expected, node)
def test_028_get_children(self):
expected = ['January', 'February', 'March']
nodes = self.dim.get_children('Q1', hierarchy=MAIN)
return self.assertListEqual(expected, nodes)
def test_029_get_children_count(self):
expected = 3
count = self.dim.get_children_count('Q1', hierarchy=MAIN)
return self.assertEqual(expected, count)
def test_030_get_generation(self):
expected = 2
count = self.dim.get_generation('Q1', hierarchy=MAIN)
return self.assertEqual(expected, count)
def test_031_get_grandparent(self):
expected = 'Year'
node = self.dim.get_grandparent('February', hierarchy=MAIN)
return self.assertEqual(expected, node)
def test_032_get_leaves(self):
expected = [[2, 'January'],
[2, 'February'],
[2, 'March'],
[2, 'April'],
[2, 'May'],
[2, 'June'],
[2, 'July'],
[2, 'August'],
[2, 'September'],
[2, 'October'],
[2, 'November'],
[2, 'December'],
[3, 'Janusday']]
nodes = self.dim.get_leaves('Year', hierarchy=MAIN)
return self.assertEqual(expected, nodes)
def test_033_get_leaves_at_generation(self):
expected = [[2, 'January'],
[2, 'February'],
[2, 'March'],
[2, 'April'],
[2, 'May'],
[2, 'June'],
[2, 'July'],
[2, 'August'],
[2, 'September'],
[2, 'October'],
[2, 'November'],
[2, 'December']]
nodes = self.dim.get_leaves_at_generation('Year', 2, hierarchy=MAIN)
return self.assertEqual(expected, nodes)
def test_034_get_leaves_at_level(self):
expected = [[3, 'January'],
[3, 'February'],
[3, 'March'],
[3, 'April'],
[3, 'May'],
[3, 'June'],
[3, 'July'],
[3, 'August'],
[3, 'September'],
[3, 'October'],
[3, 'November'],
[3, 'December']]
nodes = self.dim.get_leaves_at_level('February', 0, hierarchy=MAIN)
return self.assertEqual(expected, nodes)
def test_035_get_parent(self):
expected = 'Q1'
nodes = self.dim.get_parent('February', hierarchy=MAIN)
return self.assertEqual(expected, nodes)
def test_036_get_parents(self):
expected = [['financial', 'halves', 'main'], ['YTD', 'Q1', 'Q1']]
nodes = self.dim.get_parents('February')
return self.assertEqual(expected, nodes)
def test_037_get_siblings(self):
expected = ['January', 'February', 'March']
nodes = self.dim.get_siblings('February', hierarchy=MAIN)
return self.assertEqual(expected, nodes)
def test_038_get_difference(self):
expected = sorted(['Janusday', 'Year', 'Q5', 'Donk-tober'])
nodes = sorted(self.dim.get_difference(['halves']))
return self.assertEqual(expected, nodes)
def test_039_get_intersection(self):
expected = sorted(['!!root!!', 'April', 'August', 'December', 'February', 'January', 'July', 'June', 'March',
'May', 'November', 'October', 'Q1', 'Q2', 'Q3', 'Q4', 'September'])
nodes = sorted(self.dim.get_intersection(['halves']))
return self.assertEqual(expected, nodes)
def test_040_get_union(self):
expected = sorted(['!!root!!', 'April', 'August', 'December', 'Donk-tober', 'February', 'H1', 'H2', 'January',
'Janusday', 'July', 'June', 'March', 'May', 'November', 'October', 'Q1', 'Q2', 'Q3', 'Q4',
'Q5', 'September', 'Year'])
nodes = sorted(self.dim.get_union(['halves']))
return self.assertEqual(expected, nodes)
def test_041_add_node_to_alt(self):
expected = 'H2'
self.dim.add_node('H2', 'Q5', '+', hierarchy='halves', after='Q4')
node = self.dim.get_parent('Q5', hierarchy='halves')
return self.assertEqual(expected, node)
def test_042_move_node_in_alt(self):
expected = 'H1'
self.dim.move_node('Q5', 'H1', hierarchy='halves', before='Q2')
node = self.dim.get_parent('Q5', hierarchy='halves')
return self.assertEqual(expected, node)
def test_043_rename_node(self):
expected = 'Q5'
self.dim.rename_node('Donk-tober', 'Davetober')
node = self.dim.get_parent('Davetober', hierarchy=MAIN)
return self.assertEqual(expected, node)
def test_044_delete_node(self):
self.dim.delete_node('Year', 'Q5', hierarchy=MAIN)
node = self.dim.node_exists('Q5')
return self.assertFalse(node)
def test_045_default_alias_dataframe(self):
self.dim.set_default_aliases(primary='Welsh', secondary='French')
df = self.dim.get_aliases_dataframe()
df = df.reindex(columns=sorted(df.columns))
df.sort_values(by=list(df.columns), axis=0, inplace=True)
df.to_csv(f'{FOLDER}df_get_default_aliases.csv', index=False)
self.assertFileEqual(f'{FOLDER}df_get_default_aliases.csv', f'{BASELINE}df_get_default_aliases.csv')
pass
def tearDown(self):
self.dim = None
self.dims = None
| 40.745223 | 126 | 0.523409 |
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import filecmp
import os
import unittest
from unittest import TestCase
import numpy as np
import pandas as pd
from pandas.testing import assert_frame_equal
from plaidcloud.utilities.connect import create_connection
from plaidcloud.utilities.remote.dimension import Dimensions
from plaidcloud.utilities.remote.dimension import MAIN
from plaidcloud.utilities.remote.dimension import ROOT
__author__ = 'Dave Parsons'
__copyright__ = 'Copyright 2010-2020, Tartan Solutions, Inc'
__credits__ = ['Dave Parsons']
__license__ = 'Proprietary'
__maintainer__ = 'Dave Parsons'
__email__ = 'dave.parsons@tartansolutions.com'
BASELINE = './dim_baseline/'
FOLDER = './dim_current/'
conn = create_connection(verify_ssl=False)
class TestDimension(TestCase):
def assertFileEqual(self, file1, file2, **kwargs):
return self.assertTrue(filecmp.cmp(file1, file2, shallow=False))
def assertFrameEqual(self, df1, df2, **kwargs):
return assert_frame_equal(df1, df2, check_names=True, check_like=True, **kwargs)
def setUp(self):
if not os.path.exists(BASELINE):
os.makedirs(BASELINE)
self.periods = 'periods_rpc_test'
self.dims = Dimensions(conn=conn)
self.dim = self.dims.get_dimension(name=self.periods, replace=False)
return
def test_001_load_hierarchy_main(self):
df_main = pd.DataFrame(
[
[ROOT, 'Year'],
['Year', 'Q1'],
['Year', 'Q2'],
['Year', 'Q3'],
['Year', 'Q4'],
['Q1', 'January'],
['Q1', 'February'],
['Q1', 'March'],
['Q2', 'April'],
['Q2', 'May'],
['Q2', 'June'],
['Q3', 'July'],
['Q3', 'August'],
['Q3', 'September'],
['Q4', 'October'],
['Q4', 'November'],
['Q4', 'December'],
],
columns=['ParentName', 'ChildName']
)
self.dim.clear()
df_results = self.dim.load_hierarchy_from_dataframe(df_main, 'ParentName', 'ChildName')
df_results.to_csv(f'{FOLDER}df_main_load.csv', index=False)
data = self.dims.backup(self.periods)
with open(f'{FOLDER}periods.yaml', 'w') as file:
file.write(data)
self.assertFileEqual(f'{FOLDER}df_main_load.csv', f'{BASELINE}df_main_load.csv')
return
def test_002_save_hierarchy_main(self):
df = self.dim.save_hierarchy_to_dataframe(MAIN)
df.drop(labels='index', axis=1, inplace=True)
df.to_csv(f'{FOLDER}df_main_hierarchy.csv', index=False)
self.assertFileEqual(f'{FOLDER}df_main_hierarchy.csv', f'{BASELINE}df_main_hierarchy.csv')
return
def test_003_load_hierarchy_halves(self):
df_halves = pd.DataFrame(
[
[ROOT, 'H1', '~', 'halves'],
[ROOT, 'H2', '~', 'halves'],
['H1', 'Q1', '+', 'halves'],
['H1', 'Q2', '+', 'halves'],
['H2', 'Q3', '+', 'halves'],
['H2', 'Q4', '+', 'halves'],
],
columns=['ParentName', 'ChildName', 'ConsolidationType', 'Hierarchy']
)
df_results = self.dim.load_hierarchy_from_dataframe(df_halves, 'ParentName', 'ChildName',
'ConsolidationType', hierarchy='Hierarchy')
df_results.to_csv(f'{FOLDER}df_halves_load.csv', index=False)
self.assertFileEqual(f'{FOLDER}df_halves_load.csv', f'{BASELINE}df_halves_load.csv')
return
def test_004_save_hierarchy_halves(self):
df = self.dim.save_hierarchy_to_dataframe('halves')
df.drop(labels='index', axis=1, inplace=True)
df.to_csv(f'{FOLDER}df_halves_hierarchy.csv', index=False)
self.assertFileEqual(f'{FOLDER}df_halves_hierarchy.csv', f'{BASELINE}df_halves_hierarchy.csv')
return
def test_005_load_hierarchy_financial(self):
df_financial = pd.DataFrame(
[
[ROOT, 'YTD', '+', 'financial'],
[ROOT, 'YTG', '+', 'financial'],
['YTD', 'January', '+', 'financial'],
['YTD', 'February', '+', 'financial'],
['YTD', 'March', '+', 'financial'],
['YTD', 'April', '+', 'financial'],
['YTG', 'May', '-', 'financial'],
['YTG', 'June', '-', 'financial'],
['YTG', 'July', '-', 'financial'],
['YTG', 'August', '-', 'financial'],
['YTG', 'September', '-', 'financial'],
['YTG', 'October', '-', 'financial'],
['YTG', 'November', '-', 'financial'],
['YTG', 'December', '-', 'financial'],
],
columns=['ParentName', 'ChildName', 'ConsolidationType', 'Hierarchy']
)
df_results = self.dim.load_hierarchy_from_dataframe(df_financial, 'ParentName', 'ChildName',
'ConsolidationType', hierarchy='Hierarchy')
df_results.to_csv(f'{FOLDER}df_financial_load.csv', index=False)
self.assertFileEqual(f'{FOLDER}df_financial_load.csv', f'{BASELINE}df_financial_load.csv')
return
def test_006_save_hierarchy_financial(self):
df = self.dim.save_hierarchy_to_dataframe('financial')
df.drop(labels='index', axis=1, inplace=True)
df.to_csv(f'{FOLDER}df_financial_hierarchy.csv', index=False)
self.assertFileEqual(f'{FOLDER}df_financial_hierarchy.csv', f'{BASELINE}df_financial_hierarchy.csv')
return
def test_007_load_hierarchy_errors(self):
df_test = pd.DataFrame(
[
['', '', '+', 'main'],
[' ', ' ', '+', 'main'],
['Q5', '', '+', 'main'],
[np.NaN, np.NaN, '+', 'main'],
[None, None, '+', 'main'],
['None', 'None', '+', 'main'],
['Q5', 'Q5', '+', 'main'],
['Q5', ROOT, '+', 'main'],
['Q5', 'Donk:tober', '+', 'main'],
['Donk:tober', 'Janusday', '+', 'main'],
['Year', 'Q5', '+', 'main'],
['Year', 'Q5', '+', 'main'],
['Q4', 'Badtober', '+', 'halves'],
['Q6', 'Craptober', '+', ''],
],
columns=['ParentName', 'ChildName', 'ConsolidationType', 'Hierarchy']
)
df_results = self.dim.load_hierarchy_from_dataframe(df_test, 'ParentName', 'ChildName',
'ConsolidationType', hierarchy='Hierarchy')
df_results.to_csv(f'{FOLDER}df_complex_load.csv', index=False)
self.assertFileEqual(f'{FOLDER}df_complex_load.csv', f'{BASELINE}df_complex_load.csv')
return
def test_008_load_save_aliases(self):
df_aliases = pd.DataFrame(
[
['Trimestre 1', 'French', 'Q1'],
['Trimestre 2', 'French', 'Q2'],
['Trimestre 3', 'French', 'Q3'],
['Trimestre 4', 'French', 'Q4'],
['Janvier', 'French', 'January'],
['Fevier', 'French', 'February'],
['Mars', 'French', 'March'],
['Avril', 'French', 'April'],
['Mai', 'French', 'May'],
['Juin', 'French', 'June'],
['Julliet', 'French', 'July'],
['Aout', 'French', 'August'],
['Septembre', 'French', 'September'],
['Octobre', 'French', 'October'],
['Novembre', 'French', 'November'],
['Decembre', 'French', 'December'],
['Haneri 1', 'Welsh', 'H1'],
['Haneri 2', 'Welsh', 'H2'],
['Ionawr', 'Welsh', 'January'],
['Chwefror', 'Welsh', 'February'],
['Mawrth', 'Welsh', 'March'],
['Ebrill', 'Welsh', 'April'],
['Mai', 'Welsh', 'May'],
['Mehefin', 'Welsh', 'June'],
['Gorffennaf', 'Welsh', 'July'],
['Awst', 'Welsh', 'August'],
['Medi', 'Welsh', 'September'],
['Hydref', 'Welsh', 'October'],
['Tachwedd', 'Welsh', 'November'],
['Rhagfyr', 'Welsh', 'December'],
['Январь', 'Russian', 'January'],
['Февраль', 'Russian', 'February'],
['Март', 'Russian', 'March'],
['Апрель', 'Russian', 'April'],
['Май', 'Russian', 'May'],
['Июнь', 'Russian', 'June'],
['Июль', 'Russian', 'July'],
['Август', 'Russian', 'August'],
['Сентябрь', 'Russian', 'September'],
['Октябрь', 'Russian', 'October'],
['Ноябрь', 'Russian', 'November'],
['Декабрь', 'Russian', 'December'],
['일월', 'Korean', 'January'],
['이월', 'Korean', 'February'],
['삼월', 'Korean', 'March'],
['사월', 'Korean', 'April'],
['오월', 'Korean', 'May'],
['유월', 'Korean', 'June'],
['칠월', 'Korean', 'July'],
['팔월', 'Korean', 'August'],
['구월', 'Korean', 'September'],
['시월', 'Korean', 'October'],
['십일월', 'Korean', 'November'],
['십이월', 'Korean', 'December'],
['☃️', 'Emoji', 'January'],
['💘', 'Emoji', 'February'],
['☘️', 'Emoji', 'March'],
['☔', 'Emoji', 'April'],
['🌺', 'Emoji', 'May'],
['🌞', 'Emoji', 'June'],
['🍦', 'Emoji', 'July'],
['🏖️', 'Emoji', 'August'],
['🍎', 'Emoji', 'September'],
['🎃', 'Emoji', 'October'],
['🍂', 'Emoji', 'November'],
['🎅', 'Emoji', 'December'],
],
columns=['AliasValue', 'AliasName', 'NodeName']
)
self.dim.load_aliases_from_dataframe(df_aliases, 'NodeName', 'AliasName', 'AliasValue')
df = self.dim.save_aliases_to_dataframe(None)
df.drop(labels='index', axis=1, inplace=True)
df.sort_values(by=['name', 'node', 'value'], axis=0, inplace=True)
df.to_csv(f'{FOLDER}df_aliases.csv', index=False)
self.assertFileEqual(f'{FOLDER}df_aliases.csv', f'{BASELINE}df_aliases.csv')
return
def test_009_load_save_properties(self):
df_properties = pd.DataFrame(
[
['Magenta', 'Colour', ROOT],
['Purple', 'Colour', 'Year'],
['Red', 'Colour', 'Q1'],
['Orange', 'Colour', 'Q2'],
['Green', 'Colour', 'April'],
['Green', 'Colour', 'May'],
['Blue', 'Colour', 'July'],
['Blue', 'Colour', 'August'],
['Blue', 'Colour', 'September'],
['White', 'Colour', 'Q4'],
['Red', 'Colour', 'October'],
['Green', 'Colour', 'November'],
['Red', 'Colour', 'December'],
['Winter', 'Season', 'Q1'],
['Spring', 'Season', 'Q2'],
['Summer', 'Season', 'Q3'],
['Autumn', 'Season', 'Q4'],
],
columns=['PropertyValue', 'PropertyName', 'NodeName']
)
self.dim.load_properties_from_dataframe(df_properties, 'NodeName', 'PropertyName', 'PropertyValue')
df = self.dim.save_properties_to_dataframe(None)
df.drop(labels='index', axis=1, inplace=True)
df.sort_values(by=['name', 'node', 'value'], axis=0, inplace=True)
df.to_csv(f'{FOLDER}df_properties.csv', index=False)
self.assertFileEqual(f'{FOLDER}df_properties.csv', f'{BASELINE}df_properties.csv')
return
def test_010_load_save_values(self):
df_values = pd.DataFrame(
[
[-10.0, 'Costs', 'January'],
[-100.0, 'Costs', 'February'],
[-1000.0, 'Costs', 'March'],
[-20.0, 'Costs', 'April'],
[-200.0, 'Costs', 'May'],
[-2000.0, 'Costs', 'June'],
[-30.0, 'Costs', 'July'],
[-300.0, 'Costs', 'August'],
[-3000.0, 'Costs', 'September'],
[-40.0, 'Costs', 'October'],
[-400.0, 'Costs', 'November'],
[-4000.0, 'Costs', 'December'],
[10.0, 'Profit', 'January'],
[100.0, 'Profit', 'February'],
[1000.0, 'Profit', 'March'],
[20.0, 'Profit', 'April'],
[200.0, 'Profit', 'May'],
[2000.0, 'Profit', 'June'],
[30.0, 'Profit', 'July'],
[300.0, 'Profit', 'August'],
[3000.0, 'Profit', 'September'],
[40.0, 'Profit', 'October'],
[400.0, 'Profit', 'November'],
[4000.0, 'Profit', 'December'],
],
columns=['Value', 'ValueName', 'NodeName']
)
self.dim.load_values_from_dataframe(df_values, 'NodeName', 'ValueName', 'Value')
df = self.dim.save_values_to_dataframe(None)
df.drop(labels='index', axis=1, inplace=True)
df.sort_values(by=['name', 'node', 'value'], axis=0, inplace=True)
df.to_csv(f'{FOLDER}df_values.csv', index=False)
self.assertFileEqual(f'{FOLDER}df_values.csv', f'{BASELINE}df_values.csv')
return
def test_011_get_hierarchy_dataframe(self):
df = self.dim.get_hierarchy_dataframe(hierarchy=MAIN)
df = df.reindex(columns=sorted(df.columns))
df.to_csv(f'{FOLDER}df_get_hierarchy_main.csv', index=False)
self.assertFileEqual(f'{FOLDER}df_get_hierarchy_main.csv', f'{BASELINE}df_get_hierarchy_main.csv')
return
def test_012_get_aliases_dataframe(self):
df = self.dim.get_aliases_dataframe()
df = df.reindex(columns=sorted(df.columns))
df.sort_values(by=list(df.columns), axis=0, inplace=True)
df.to_csv(f'{FOLDER}df_get_aliases.csv', index=False)
self.assertFileEqual(f'{FOLDER}df_get_aliases.csv', f'{BASELINE}df_get_aliases.csv')
return
def test_013_get_attributes_dataframe(self):
df = self.dim.get_attributes_dataframe()
df.drop(labels='index', axis=1, inplace=True)
df = df.reindex(columns=sorted(df.columns))
df.sort_values(by=list(df.columns), axis=0, inplace=True)
df.to_csv(f'{FOLDER}df_get_attributes.csv', index=False)
self.assertFileEqual(f'{FOLDER}df_get_attributes.csv', f'{BASELINE}df_get_attributes.csv')
return
def test_014_get_consolidation_dataframe(self):
df = self.dim.get_consolidation_dataframe('Costs', hierarchy=MAIN)
df.to_csv(f'{FOLDER}df_get_consolidation_costs_main.csv', index=False)
self.assertFileEqual(f'{FOLDER}df_get_consolidation_costs_main.csv', f'{BASELINE}df_get_consolidation_costs_main.csv')
return
def test_015_get_properties_dataframe(self):
df = self.dim.get_properties_dataframe()
df.drop(labels='index', axis=1, inplace=True)
df = df.reindex(columns=sorted(df.columns))
df.sort_values(by=list(df.columns), axis=0, inplace=True)
df.to_csv(f'{FOLDER}df_get_properties.csv', index=False)
self.assertFileEqual(f'{FOLDER}df_get_properties.csv', f'{BASELINE}df_get_properties.csv')
return
def test_016_get_values_dataframe(self):
df = self.dim.get_values_dataframe()
df = df.reindex(columns=sorted(df.columns))
df.sort_values(by=list(df.columns), axis=0, inplace=True)
df.to_csv(f'{FOLDER}df_get_values.csv', index=False)
self.assertFileEqual(f'{FOLDER}df_get_values.csv', f'{BASELINE}df_get_values.csv')
return
def test_017_get_hierarchy_table(self):
df = self.dim.hierarchy_table(hierarchy=MAIN)
df = df.reindex(columns=sorted(df.columns))
df.sort_values(by=list(df.columns), axis=0, inplace=True)
df.to_csv(f'{FOLDER}df_get_hierarchy_table_main.csv', index=False)
self.assertFileEqual(f'{FOLDER}df_get_hierarchy_table_main.csv', f'{BASELINE}df_get_hierarchy_table_main.csv')
return
def test_018_get_all_leaves(self):
expected = ['April',
'August',
'December',
'February',
'January',
'Janusday',
'July',
'June',
'March',
'May',
'November',
'October',
'September']
nodes = sorted(self.dim.get_all_leaves(hierarchy=MAIN))
return self.assertListEqual(expected, nodes)
def test_019_get_all_nodes(self):
expected = ['!!root!!',
'April',
'August',
'December',
'Donk-tober',
'February',
'January',
'Janusday',
'July',
'June',
'March',
'May',
'November',
'October',
'Q1',
'Q2',
'Q3',
'Q4',
'Q5',
'September',
'Year']
nodes = sorted(self.dim.get_all_nodes(hierarchy=MAIN))
return self.assertListEqual(expected, nodes)
def test_020_get_all_parents(self):
expected = ['!!root!!', 'Donk-tober', 'Q1', 'Q2', 'Q3', 'Q4', 'Q5', 'Year']
nodes = sorted(self.dim.get_all_parents(hierarchy=MAIN))
return self.assertListEqual(expected, nodes)
def test_021_get_ancestors(self):
expected = [[0, 'February'], [1, 'Q1'], [2, 'Year'], [3, '!!root!!']]
nodes = self.dim.get_ancestors('February', hierarchy=MAIN)
return self.assertListEqual(expected, nodes)
def test_022_get_ancestor_at_generation(self):
expected = 'Year'
node = self.dim.get_ancestor_at_generation('February', 1, hierarchy=MAIN)
return self.assertEqual(expected, node)
def test_023_get_ancestor_at_level(self):
expected = 'Year'
node = self.dim.get_ancestor_at_level('February', 2, hierarchy=MAIN)
return self.assertEqual(expected, node)
def test_024_get_bottom(self):
expected = 'March'
node = self.dim.get_bottom('Q1', hierarchy=MAIN)
return self.assertEqual(expected, node)
def test_025_get_top(self):
expected = 'January'
node = self.dim.get_top('Q1', hierarchy=MAIN)
return self.assertEqual(expected, node)
def test_026_get_down(self):
expected = 'March'
node = self.dim.get_down('Q1', 'February', hierarchy=MAIN)
return self.assertEqual(expected, node)
def test_027_get_up(self):
expected = 'January'
node = self.dim.get_up('Q1', 'February', hierarchy=MAIN)
return self.assertEqual(expected, node)
def test_028_get_children(self):
expected = ['January', 'February', 'March']
nodes = self.dim.get_children('Q1', hierarchy=MAIN)
return self.assertListEqual(expected, nodes)
def test_029_get_children_count(self):
expected = 3
count = self.dim.get_children_count('Q1', hierarchy=MAIN)
return self.assertEqual(expected, count)
def test_030_get_generation(self):
expected = 2
count = self.dim.get_generation('Q1', hierarchy=MAIN)
return self.assertEqual(expected, count)
def test_031_get_grandparent(self):
expected = 'Year'
node = self.dim.get_grandparent('February', hierarchy=MAIN)
return self.assertEqual(expected, node)
def test_032_get_leaves(self):
expected = [[2, 'January'],
[2, 'February'],
[2, 'March'],
[2, 'April'],
[2, 'May'],
[2, 'June'],
[2, 'July'],
[2, 'August'],
[2, 'September'],
[2, 'October'],
[2, 'November'],
[2, 'December'],
[3, 'Janusday']]
nodes = self.dim.get_leaves('Year', hierarchy=MAIN)
return self.assertEqual(expected, nodes)
def test_033_get_leaves_at_generation(self):
expected = [[2, 'January'],
[2, 'February'],
[2, 'March'],
[2, 'April'],
[2, 'May'],
[2, 'June'],
[2, 'July'],
[2, 'August'],
[2, 'September'],
[2, 'October'],
[2, 'November'],
[2, 'December']]
nodes = self.dim.get_leaves_at_generation('Year', 2, hierarchy=MAIN)
return self.assertEqual(expected, nodes)
def test_034_get_leaves_at_level(self):
expected = [[3, 'January'],
[3, 'February'],
[3, 'March'],
[3, 'April'],
[3, 'May'],
[3, 'June'],
[3, 'July'],
[3, 'August'],
[3, 'September'],
[3, 'October'],
[3, 'November'],
[3, 'December']]
nodes = self.dim.get_leaves_at_level('February', 0, hierarchy=MAIN)
return self.assertEqual(expected, nodes)
def test_035_get_parent(self):
expected = 'Q1'
nodes = self.dim.get_parent('February', hierarchy=MAIN)
return self.assertEqual(expected, nodes)
def test_036_get_parents(self):
expected = [['financial', 'halves', 'main'], ['YTD', 'Q1', 'Q1']]
nodes = self.dim.get_parents('February')
return self.assertEqual(expected, nodes)
def test_037_get_siblings(self):
expected = ['January', 'February', 'March']
nodes = self.dim.get_siblings('February', hierarchy=MAIN)
return self.assertEqual(expected, nodes)
def test_038_get_difference(self):
expected = sorted(['Janusday', 'Year', 'Q5', 'Donk-tober'])
nodes = sorted(self.dim.get_difference(['halves']))
return self.assertEqual(expected, nodes)
def test_039_get_intersection(self):
expected = sorted(['!!root!!', 'April', 'August', 'December', 'February', 'January', 'July', 'June', 'March',
'May', 'November', 'October', 'Q1', 'Q2', 'Q3', 'Q4', 'September'])
nodes = sorted(self.dim.get_intersection(['halves']))
return self.assertEqual(expected, nodes)
def test_040_get_union(self):
expected = sorted(['!!root!!', 'April', 'August', 'December', 'Donk-tober', 'February', 'H1', 'H2', 'January',
'Janusday', 'July', 'June', 'March', 'May', 'November', 'October', 'Q1', 'Q2', 'Q3', 'Q4',
'Q5', 'September', 'Year'])
nodes = sorted(self.dim.get_union(['halves']))
return self.assertEqual(expected, nodes)
def test_041_add_node_to_alt(self):
expected = 'H2'
self.dim.add_node('H2', 'Q5', '+', hierarchy='halves', after='Q4')
node = self.dim.get_parent('Q5', hierarchy='halves')
return self.assertEqual(expected, node)
def test_042_move_node_in_alt(self):
expected = 'H1'
self.dim.move_node('Q5', 'H1', hierarchy='halves', before='Q2')
node = self.dim.get_parent('Q5', hierarchy='halves')
return self.assertEqual(expected, node)
def test_043_rename_node(self):
expected = 'Q5'
self.dim.rename_node('Donk-tober', 'Davetober')
node = self.dim.get_parent('Davetober', hierarchy=MAIN)
return self.assertEqual(expected, node)
def test_044_delete_node(self):
self.dim.delete_node('Year', 'Q5', hierarchy=MAIN)
node = self.dim.node_exists('Q5')
return self.assertFalse(node)
def test_045_default_alias_dataframe(self):
self.dim.set_default_aliases(primary='Welsh', secondary='French')
df = self.dim.get_aliases_dataframe()
df = df.reindex(columns=sorted(df.columns))
df.sort_values(by=list(df.columns), axis=0, inplace=True)
df.to_csv(f'{FOLDER}df_get_default_aliases.csv', index=False)
self.assertFileEqual(f'{FOLDER}df_get_default_aliases.csv', f'{BASELINE}df_get_default_aliases.csv')
pass
def tearDown(self):
self.dim = None
self.dims = None
| true | true |
f7201125ce532819474be57b6c62cb7fcba4cd59 | 33,749 | py | Python | sdks/python/apache_beam/dataframe/pandas_doctests_test.py | psobot/beam | d9da8a4dc818b01a86d2dce2e78c0d78b47038bb | [
"Apache-2.0",
"BSD-3-Clause"
] | 5 | 2019-07-27T11:54:33.000Z | 2021-06-06T11:53:36.000Z | sdks/python/apache_beam/dataframe/pandas_doctests_test.py | psobot/beam | d9da8a4dc818b01a86d2dce2e78c0d78b47038bb | [
"Apache-2.0",
"BSD-3-Clause"
] | 12 | 2019-04-15T15:27:23.000Z | 2019-07-01T18:13:10.000Z | sdks/python/apache_beam/dataframe/pandas_doctests_test.py | psobot/beam | d9da8a4dc818b01a86d2dce2e78c0d78b47038bb | [
"Apache-2.0",
"BSD-3-Clause"
] | 1 | 2021-06-03T19:54:48.000Z | 2021-06-03T19:54:48.000Z | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import unittest
import pandas as pd
from apache_beam.dataframe import doctests
from apache_beam.dataframe.pandas_top_level_functions import _is_top_level_function
@unittest.skipIf(sys.platform == 'win32', '[BEAM-10626]')
class DoctestTest(unittest.TestCase):
def test_ndframe_tests(self):
# IO methods are tested in io_test.py
skip_writes = {
f'pandas.core.generic.NDFrame.{name}': ['*']
for name in dir(pd.core.generic.NDFrame) if name.startswith('to_')
}
result = doctests.testmod(
pd.core.generic,
use_beam=False,
report=True,
wont_implement_ok={
'pandas.core.generic.NDFrame.first': ['*'],
'pandas.core.generic.NDFrame.head': ['*'],
'pandas.core.generic.NDFrame.last': ['*'],
'pandas.core.generic.NDFrame.shift': ['*'],
'pandas.core.generic.NDFrame.tail': ['*'],
'pandas.core.generic.NDFrame.take': ['*'],
'pandas.core.generic.NDFrame.values': ['*'],
'pandas.core.generic.NDFrame.tz_localize': [
"s.tz_localize('CET', ambiguous='infer')",
# np.array is not a deferred object. This use-case is possible
# with a deferred Series though, which is tested in
# frames_test.py
"s.tz_localize('CET', ambiguous=np.array([True, True, False]))",
],
'pandas.core.generic.NDFrame.truncate': [
# These inputs rely on tail (wont implement, order
# sensitive) for verification
"df.tail()",
"df.loc['2016-01-05':'2016-01-10', :].tail()",
],
'pandas.core.generic.NDFrame.replace': [
"s.replace([1, 2], method='bfill')",
# Relies on method='pad'
"s.replace('a', None)",
],
'pandas.core.generic.NDFrame.fillna': [
"df.fillna(method='ffill')",
'df.fillna(value=values, limit=1)',
],
'pandas.core.generic.NDFrame.sort_values': ['*'],
'pandas.core.generic.NDFrame.mask': [
'df.where(m, -df) == np.where(m, df, -df)'
],
'pandas.core.generic.NDFrame.where': [
'df.where(m, -df) == np.where(m, df, -df)'
],
'pandas.core.generic.NDFrame.interpolate': ['*'],
},
not_implemented_ok={
'pandas.core.generic.NDFrame.asof': ['*'],
'pandas.core.generic.NDFrame.at_time': ['*'],
'pandas.core.generic.NDFrame.between_time': ['*'],
'pandas.core.generic.NDFrame.describe': ['*'],
'pandas.core.generic.NDFrame.ewm': ['*'],
'pandas.core.generic.NDFrame.expanding': ['*'],
'pandas.core.generic.NDFrame.flags': ['*'],
'pandas.core.generic.NDFrame.pct_change': ['*'],
'pandas.core.generic.NDFrame.rank': ['*'],
'pandas.core.generic.NDFrame.reindex': ['*'],
'pandas.core.generic.NDFrame.reindex_like': ['*'],
'pandas.core.generic.NDFrame.replace': ['*'],
'pandas.core.generic.NDFrame.resample': ['*'],
'pandas.core.generic.NDFrame.rolling': ['*'],
'pandas.core.generic.NDFrame.sample': ['*'],
'pandas.core.generic.NDFrame.set_flags': ['*'],
'pandas.core.generic.NDFrame.squeeze': ['*'],
'pandas.core.generic.NDFrame.transform': ['*'],
'pandas.core.generic.NDFrame.truncate': ['*'],
'pandas.core.generic.NDFrame.xs': ['*'],
# argsort unimplemented
'pandas.core.generic.NDFrame.abs': [
'df.loc[(df.c - 43).abs().argsort()]',
],
},
skip={
# Internal test
'pandas.core.generic.NDFrame._set_axis_name': ['*'],
# Fails to construct test series. asfreq is not implemented anyway.
'pandas.core.generic.NDFrame.asfreq': ['*'],
'pandas.core.generic.NDFrame.astype': ['*'],
'pandas.core.generic.NDFrame.convert_dtypes': ['*'],
'pandas.core.generic.NDFrame.copy': ['*'],
'pandas.core.generic.NDFrame.droplevel': ['*'],
'pandas.core.generic.NDFrame.infer_objects': ['*'],
'pandas.core.generic.NDFrame.rank': [
# Modified dataframe
'df'
],
'pandas.core.generic.NDFrame.rename': [
# Seems to be an upstream bug. The actual error has a different
# message:
# TypeError: Index(...) must be called with a collection of
# some kind, 2 was passed
# pandas doctests only verify the type of exception
'df.rename(2)'
],
# Tests rely on setting index
'pandas.core.generic.NDFrame.rename_axis': ['*'],
# Raises right exception, but testing framework has matching issues.
'pandas.core.generic.NDFrame.replace': [
"df.replace({'a string': 'new value', True: False}) # raises"
],
'pandas.core.generic.NDFrame.squeeze': ['*'],
# NameError
'pandas.core.generic.NDFrame.resample': ['df'],
# Skipped so we don't need to install natsort
'pandas.core.generic.NDFrame.sort_values': [
'from natsort import index_natsorted',
'df.sort_values(\n'
' by="time",\n'
' key=lambda x: np.argsort(index_natsorted(df["time"]))\n'
')'
],
**skip_writes
})
self.assertEqual(result.failed, 0)
def test_dataframe_tests(self):
result = doctests.testmod(
pd.core.frame,
use_beam=False,
report=True,
wont_implement_ok={
'pandas.core.frame.DataFrame.T': ['*'],
'pandas.core.frame.DataFrame.cummax': ['*'],
'pandas.core.frame.DataFrame.cummin': ['*'],
'pandas.core.frame.DataFrame.cumsum': ['*'],
'pandas.core.frame.DataFrame.cumprod': ['*'],
'pandas.core.frame.DataFrame.diff': ['*'],
'pandas.core.frame.DataFrame.fillna': [
"df.fillna(method='ffill')",
'df.fillna(value=values, limit=1)',
],
'pandas.core.frame.DataFrame.items': ['*'],
'pandas.core.frame.DataFrame.itertuples': ['*'],
'pandas.core.frame.DataFrame.iterrows': ['*'],
'pandas.core.frame.DataFrame.iteritems': ['*'],
# default keep is 'first'
'pandas.core.frame.DataFrame.nlargest': [
"df.nlargest(3, 'population')",
"df.nlargest(3, ['population', 'GDP'])",
"df.nlargest(3, 'population', keep='last')"
],
'pandas.core.frame.DataFrame.nsmallest': [
"df.nsmallest(3, 'population')",
"df.nsmallest(3, ['population', 'GDP'])",
"df.nsmallest(3, 'population', keep='last')",
],
'pandas.core.frame.DataFrame.replace': [
"s.replace([1, 2], method='bfill')",
# Relies on method='pad'
"s.replace('a', None)",
],
'pandas.core.frame.DataFrame.to_records': ['*'],
'pandas.core.frame.DataFrame.to_dict': ['*'],
'pandas.core.frame.DataFrame.to_numpy': ['*'],
'pandas.core.frame.DataFrame.to_string': ['*'],
'pandas.core.frame.DataFrame.transpose': ['*'],
'pandas.core.frame.DataFrame.shape': ['*'],
'pandas.core.frame.DataFrame.shift': [
'df.shift(periods=3, freq="D")',
'df.shift(periods=3, freq="infer")'
],
'pandas.core.frame.DataFrame.unstack': ['*'],
'pandas.core.frame.DataFrame.memory_usage': ['*'],
'pandas.core.frame.DataFrame.info': ['*'],
# Not equal to df.agg('mode', axis='columns', numeric_only=True)
# because there can be multiple columns if a row has more than one
# mode
'pandas.core.frame.DataFrame.mode': [
"df.mode(axis='columns', numeric_only=True)"
],
'pandas.core.frame.DataFrame.append': [
'df.append(df2, ignore_index=True)',
"for i in range(5):\n" +
" df = df.append({'A': i}, ignore_index=True)",
],
'pandas.core.frame.DataFrame.sort_index': ['*'],
'pandas.core.frame.DataFrame.sort_values': ['*'],
'pandas.core.frame.DataFrame.melt': [
"df.melt(id_vars=['A'], value_vars=['B'])",
"df.melt(id_vars=['A'], value_vars=['B', 'C'])",
"df.melt(col_level=0, id_vars=['A'], value_vars=['B'])",
"df.melt(id_vars=[('A', 'D')], value_vars=[('B', 'E')])",
"df.melt(id_vars=['A'], value_vars=['B'],\n" +
" var_name='myVarname', value_name='myValname')"
]
},
not_implemented_ok={
'pandas.core.frame.DataFrame.transform': ['*'],
'pandas.core.frame.DataFrame.reindex': ['*'],
'pandas.core.frame.DataFrame.reindex_axis': ['*'],
'pandas.core.frame.DataFrame.round': [
'df.round(decimals)',
],
# We should be able to support pivot and pivot_table for categorical
# columns
'pandas.core.frame.DataFrame.pivot': ['*'],
# We can implement this as a zipping operator, but it won't have the
# same capability. The doctest includes an example that branches on
# a deferred result.
'pandas.core.frame.DataFrame.combine': ['*'],
# Can be implemented as a zipping operator
'pandas.core.frame.DataFrame.combine_first': ['*'],
# Difficult to parallelize but should be possible?
'pandas.core.frame.DataFrame.dot': [
# reindex not supported
's2 = s.reindex([1, 0, 2, 3])',
'df.dot(s2)',
],
# Trivially elementwise for axis=columns. Relies on global indexing
# for axis=rows.
# Difficult to determine proxy, need to inspect function
'pandas.core.frame.DataFrame.apply': ['*'],
# Cross-join not implemented
'pandas.core.frame.DataFrame.merge': [
"df1.merge(df2, how='cross')"
],
# TODO(BEAM-11711)
'pandas.core.frame.DataFrame.set_index': [
"df.set_index([s, s**2])",
],
},
skip={
# Throws NotImplementedError when modifying df
'pandas.core.frame.DataFrame.transform': ['df'],
'pandas.core.frame.DataFrame.axes': [
# Returns deferred index.
'df.axes',
],
'pandas.core.frame.DataFrame.compare': ['*'],
'pandas.core.frame.DataFrame.cov': [
# Relies on setting entries ahead of time.
"df.loc[df.index[:5], 'a'] = np.nan",
"df.loc[df.index[5:10], 'b'] = np.nan",
'df.cov(min_periods=12)',
],
'pandas.core.frame.DataFrame.drop_duplicates': ['*'],
'pandas.core.frame.DataFrame.duplicated': ['*'],
'pandas.core.frame.DataFrame.idxmax': ['*'],
'pandas.core.frame.DataFrame.idxmin': ['*'],
'pandas.core.frame.DataFrame.rename': [
# Returns deferred index.
'df.index',
'df.rename(index=str).index',
],
'pandas.core.frame.DataFrame.set_index': [
# TODO(BEAM-11711): This could pass in the index as
# a DeferredIndex, and we should fail it as order-sensitive.
"df.set_index([pd.Index([1, 2, 3, 4]), 'year'])",
],
'pandas.core.frame.DataFrame.set_axis': ['*'],
'pandas.core.frame.DataFrame.to_markdown': ['*'],
'pandas.core.frame.DataFrame.to_parquet': ['*'],
'pandas.core.frame.DataFrame.value_counts': ['*'],
'pandas.core.frame.DataFrame.to_records': [
'df.index = df.index.rename("I")',
'index_dtypes = f"<S{df.index.str.len().max()}"', # 1.x
'index_dtypes = "<S{}".format(df.index.str.len().max())', #0.x
'df.to_records(index_dtypes=index_dtypes)',
],
# These tests use the static method pd.pivot_table, which doesn't
# actually raise NotImplementedError
'pandas.core.frame.DataFrame.pivot_table': ['*'],
# Expected to raise a ValueError, but we raise NotImplementedError
'pandas.core.frame.DataFrame.pivot': [
"df.pivot(index='foo', columns='bar', values='baz')"
],
'pandas.core.frame.DataFrame.append': [
'df',
# pylint: disable=line-too-long
"pd.concat([pd.DataFrame([i], columns=['A']) for i in range(5)],\n"
" ignore_index=True)"
],
'pandas.core.frame.DataFrame.eval': ['df'],
'pandas.core.frame.DataFrame.melt': [
"df.columns = [list('ABC'), list('DEF')]", "df"
],
'pandas.core.frame.DataFrame.merge': [
# Order-sensitive index, checked in frames_test.py.
"df1.merge(df2, left_on='lkey', right_on='rkey')",
"df1.merge(df2, left_on='lkey', right_on='rkey',\n"
" suffixes=('_left', '_right'))",
"df1.merge(df2, how='left', on='a')",
],
# Raises right exception, but testing framework has matching issues.
'pandas.core.frame.DataFrame.replace': [
"df.replace({'a string': 'new value', True: False}) # raises"
],
'pandas.core.frame.DataFrame.to_sparse': ['type(df)'],
# Skipped because "seen_wont_implement" is reset before getting to
# these calls, so the NameError they raise is not ignored.
'pandas.core.frame.DataFrame.T': [
'df1_transposed.dtypes', 'df2_transposed.dtypes'
],
'pandas.core.frame.DataFrame.transpose': [
'df1_transposed.dtypes', 'df2_transposed.dtypes'
],
# Skipped because the relies on iloc to set a cell to NA. Test is
# replicated in frames_test::DeferredFrameTest::test_applymap.
'pandas.core.frame.DataFrame.applymap': [
'df_copy.iloc[0, 0] = pd.NA',
"df_copy.applymap(lambda x: len(str(x)), na_action='ignore')",
],
# Skipped so we don't need to install natsort
'pandas.core.frame.DataFrame.sort_values': [
'from natsort import index_natsorted',
'df.sort_values(\n'
' by="time",\n'
' key=lambda x: np.argsort(index_natsorted(df["time"]))\n'
')'
],
# Mode that we don't yet support, documentation added in pandas
# 1.2.0 (https://github.com/pandas-dev/pandas/issues/35912)
'pandas.core.frame.DataFrame.aggregate': [
"df.agg(x=('A', max), y=('B', 'min'), z=('C', np.mean))"
],
})
self.assertEqual(result.failed, 0)
def test_series_tests(self):
result = doctests.testmod(
pd.core.series,
use_beam=False,
report=True,
wont_implement_ok={
'pandas.core.series.Series.__array__': ['*'],
'pandas.core.series.Series.array': ['*'],
'pandas.core.series.Series.cummax': ['*'],
'pandas.core.series.Series.cummin': ['*'],
'pandas.core.series.Series.cumsum': ['*'],
'pandas.core.series.Series.cumprod': ['*'],
'pandas.core.series.Series.diff': ['*'],
'pandas.core.series.Series.dot': [
's.dot(arr)', # non-deferred result
],
'pandas.core.series.Series.fillna': [
"df.fillna(method='ffill')",
'df.fillna(value=values, limit=1)',
],
'pandas.core.series.Series.items': ['*'],
'pandas.core.series.Series.iteritems': ['*'],
# default keep is 'first'
'pandas.core.series.Series.nlargest': [
"s.nlargest()",
"s.nlargest(3)",
"s.nlargest(3, keep='last')",
],
'pandas.core.series.Series.memory_usage': ['*'],
'pandas.core.series.Series.nsmallest': [
"s.nsmallest()",
"s.nsmallest(3)",
"s.nsmallest(3, keep='last')",
],
'pandas.core.series.Series.pop': ['*'],
'pandas.core.series.Series.searchsorted': ['*'],
'pandas.core.series.Series.shift': ['*'],
'pandas.core.series.Series.take': ['*'],
'pandas.core.series.Series.to_dict': ['*'],
'pandas.core.series.Series.unique': ['*'],
'pandas.core.series.Series.unstack': ['*'],
'pandas.core.series.Series.values': ['*'],
'pandas.core.series.Series.view': ['*'],
'pandas.core.series.Series.append': [
's1.append(s2, ignore_index=True)',
],
'pandas.core.series.Series.sort_index': ['*'],
'pandas.core.series.Series.sort_values': ['*'],
'pandas.core.series.Series.argmax': ['*'],
'pandas.core.series.Series.argmin': ['*'],
},
not_implemented_ok={
'pandas.core.series.Series.transform': ['*'],
'pandas.core.series.Series.groupby': [
'ser.groupby(["a", "b", "a", "b"]).mean()',
'ser.groupby(["a", "b", "a", np.nan]).mean()',
'ser.groupby(["a", "b", "a", np.nan], dropna=False).mean()',
# Grouping by a series is not supported
'ser.groupby(ser > 100).mean()',
],
'pandas.core.series.Series.reindex': ['*'],
},
skip={
# error formatting
'pandas.core.series.Series.append': [
's1.append(s2, verify_integrity=True)',
],
# Throws NotImplementedError when modifying df
'pandas.core.series.Series.transform': ['df'],
'pandas.core.series.Series.autocorr': ['*'],
'pandas.core.series.Series.combine': ['*'],
'pandas.core.series.Series.combine_first': ['*'],
'pandas.core.series.Series.compare': ['*'],
'pandas.core.series.Series.cov': [
# Differs in LSB on jenkins.
"s1.cov(s2)",
],
'pandas.core.series.Series.drop_duplicates': ['*'],
'pandas.core.series.Series.duplicated': ['*'],
'pandas.core.series.Series.explode': ['*'],
'pandas.core.series.Series.idxmax': ['*'],
'pandas.core.series.Series.idxmin': ['*'],
'pandas.core.series.Series.nonzero': ['*'],
'pandas.core.series.Series.quantile': ['*'],
'pandas.core.series.Series.pop': ['ser'], # testing side effect
'pandas.core.series.Series.repeat': ['*'],
'pandas.core.series.Series.replace': ['*'],
'pandas.core.series.Series.reset_index': ['*'],
'pandas.core.series.Series.searchsorted': [
# This doctest seems to be incorrectly parsed.
"x = pd.Categorical(['apple', 'bread', 'bread',"
],
'pandas.core.series.Series.set_axis': ['*'],
'pandas.core.series.Series.to_csv': ['*'],
'pandas.core.series.Series.to_markdown': ['*'],
'pandas.core.series.Series.update': ['*'],
'pandas.core.series.Series.view': [
# Inspection after modification.
's'
],
})
self.assertEqual(result.failed, 0)
def test_string_tests(self):
PD_VERSION = tuple(int(v) for v in pd.__version__.split('.'))
if PD_VERSION < (1, 2, 0):
module = pd.core.strings
else:
# Definitions were moved to accessor in pandas 1.2.0
module = pd.core.strings.accessor
module_name = module.__name__
result = doctests.testmod(
module,
use_beam=False,
wont_implement_ok={
# These methods can accept deferred series objects, but not lists
f'{module_name}.StringMethods.cat': [
"s.str.cat(['A', 'B', 'C', 'D'], sep=',')",
"s.str.cat(['A', 'B', 'C', 'D'], sep=',', na_rep='-')",
"s.str.cat(['A', 'B', 'C', 'D'], na_rep='-')"
],
f'{module_name}.StringMethods.repeat': [
's.str.repeat(repeats=[1, 2, 3])'
],
f'{module_name}.str_repeat': ['s.str.repeat(repeats=[1, 2, 3])'],
f'{module_name}.StringMethods.get_dummies': ['*'],
f'{module_name}.str_get_dummies': ['*'],
},
skip={
# count() on Series with a NaN produces mismatched type if we
# have a NaN-only partition.
f'{module_name}.StringMethods.count': ["s.str.count('a')"],
f'{module_name}.str_count': ["s.str.count('a')"],
# Produce None instead of NaN, see
# frames_test.py::DeferredFrameTest::test_str_split
f'{module_name}.StringMethods.rsplit': [
's.str.split(expand=True)',
's.str.rsplit("/", n=1, expand=True)',
],
f'{module_name}.StringMethods.split': [
's.str.split(expand=True)',
's.str.rsplit("/", n=1, expand=True)',
],
# Bad test strings in pandas 1.1.x
f'{module_name}.str_replace': [
"pd.Series(['foo', 'fuz', np.nan]).str.replace('f', repr)"
],
f'{module_name}.StringMethods.replace': [
"pd.Series(['foo', 'fuz', np.nan]).str.replace('f', repr)"
],
# output has incorrect formatting in 1.2.x
f'{module_name}.StringMethods.extractall': ['*']
})
self.assertEqual(result.failed, 0)
def test_datetime_tests(self):
# TODO(BEAM-10721)
datetimelike_result = doctests.testmod(
pd.core.arrays.datetimelike,
use_beam=False,
skip={
'pandas.core.arrays.datetimelike.AttributesMixin._unbox_scalar': [
'*'
],
'pandas.core.arrays.datetimelike.TimelikeOps.ceil': ['*'],
'pandas.core.arrays.datetimelike.TimelikeOps.floor': ['*'],
'pandas.core.arrays.datetimelike.TimelikeOps.round': ['*'],
})
datetime_result = doctests.testmod(
pd.core.arrays.datetimes,
use_beam=False,
skip={
'pandas.core.arrays.datetimes.DatetimeArray.day': ['*'],
'pandas.core.arrays.datetimes.DatetimeArray.hour': ['*'],
'pandas.core.arrays.datetimes.DatetimeArray.microsecond': ['*'],
'pandas.core.arrays.datetimes.DatetimeArray.minute': ['*'],
'pandas.core.arrays.datetimes.DatetimeArray.month': ['*'],
'pandas.core.arrays.datetimes.DatetimeArray.nanosecond': ['*'],
'pandas.core.arrays.datetimes.DatetimeArray.second': ['*'],
'pandas.core.arrays.datetimes.DatetimeArray.year': ['*'],
'pandas.core.arrays.datetimes.DatetimeArray.is_leap_year': ['*'],
'pandas.core.arrays.datetimes.DatetimeArray.is_month_end': ['*'],
'pandas.core.arrays.datetimes.DatetimeArray.is_month_start': ['*'],
'pandas.core.arrays.datetimes.DatetimeArray.is_quarter_end': ['*'],
'pandas.core.arrays.datetimes.DatetimeArray.is_quarter_start': [
'*'
],
'pandas.core.arrays.datetimes.DatetimeArray.is_year_end': ['*'],
'pandas.core.arrays.datetimes.DatetimeArray.is_year_start': ['*'],
'pandas.core.arrays.datetimes.DatetimeArray.to_period': ['*'],
'pandas.core.arrays.datetimes.DatetimeArray.tz_localize': ['*'],
})
self.assertEqual(datetimelike_result.failed, 0)
self.assertEqual(datetime_result.failed, 0)
def test_indexing_tests(self):
result = doctests.testmod(
pd.core.indexing,
use_beam=False,
skip={
'pandas.core.indexing._IndexSlice': ['*'],
'pandas.core.indexing.IndexingMixin.at': ['*'],
'pandas.core.indexing.IndexingMixin.iat': ['*'],
'pandas.core.indexing.IndexingMixin.iloc': ['*'],
'pandas.core.indexing.IndexingMixin.loc': ['*'],
'pandas.core.indexing._AtIndexer': ['*'],
'pandas.core.indexing._LocIndexer': ['*'],
'pandas.core.indexing._iAtIndexer': ['*'],
'pandas.core.indexing._iLocIndexer': ['*'],
})
self.assertEqual(result.failed, 0)
def test_groupby_tests(self):
result = doctests.testmod(
pd.core.groupby.groupby,
use_beam=False,
wont_implement_ok={
'pandas.core.groupby.groupby.GroupBy.head': ['*'],
'pandas.core.groupby.groupby.GroupBy.tail': ['*'],
'pandas.core.groupby.groupby.GroupBy.nth': ['*'],
'pandas.core.groupby.groupby.GroupBy.cumcount': ['*'],
},
not_implemented_ok={
'pandas.core.groupby.groupby.GroupBy.describe': ['*'],
'pandas.core.groupby.groupby.GroupBy.ngroup': ['*'],
'pandas.core.groupby.groupby.GroupBy.resample': ['*'],
'pandas.core.groupby.groupby.GroupBy.sample': ['*'],
'pandas.core.groupby.groupby.GroupBy.quantile': ['*'],
'pandas.core.groupby.groupby.BaseGroupBy.pipe': ['*'],
# pipe tests are in a different location in pandas 1.1.x
'pandas.core.groupby.groupby._GroupBy.pipe': ['*'],
'pandas.core.groupby.groupby.GroupBy.nth': [
"df.groupby('A', as_index=False).nth(1)",
],
},
skip={
# Uses iloc to mutate a DataFrame
'pandas.core.groupby.groupby.GroupBy.resample': [
'df.iloc[2, 0] = 5',
'df',
],
# TODO: Raise wont implement for list passed as a grouping column
# Currently raises unhashable type: list
'pandas.core.groupby.groupby.GroupBy.ngroup': [
'df.groupby(["A", [1,1,2,3,2,1]]).ngroup()'
],
})
self.assertEqual(result.failed, 0)
result = doctests.testmod(
pd.core.groupby.generic,
use_beam=False,
wont_implement_ok={
# Returns an array by default, not a Series. WontImplement
# (non-deferred)
'pandas.core.groupby.generic.SeriesGroupBy.unique': ['*'],
# TODO: Is take actually deprecated?
'pandas.core.groupby.generic.DataFrameGroupBy.take': ['*'],
'pandas.core.groupby.generic.SeriesGroupBy.take': ['*'],
'pandas.core.groupby.generic.SeriesGroupBy.nsmallest': [
"s.nsmallest(3, keep='last')",
"s.nsmallest(3)",
"s.nsmallest()",
],
'pandas.core.groupby.generic.SeriesGroupBy.nlargest': [
"s.nlargest(3, keep='last')",
"s.nlargest(3)",
"s.nlargest()",
],
'pandas.core.groupby.generic.DataFrameGroupBy.diff': ['*'],
'pandas.core.groupby.generic.SeriesGroupBy.diff': ['*'],
'pandas.core.groupby.generic.DataFrameGroupBy.hist': ['*'],
'pandas.core.groupby.generic.DataFrameGroupBy.fillna': [
"df.fillna(method='ffill')",
'df.fillna(value=values, limit=1)',
],
'pandas.core.groupby.generic.SeriesGroupBy.fillna': [
"df.fillna(method='ffill')",
'df.fillna(value=values, limit=1)',
],
},
not_implemented_ok={
'pandas.core.groupby.generic.DataFrameGroupBy.transform': ['*'],
'pandas.core.groupby.generic.DataFrameGroupBy.idxmax': ['*'],
'pandas.core.groupby.generic.DataFrameGroupBy.idxmin': ['*'],
'pandas.core.groupby.generic.DataFrameGroupBy.filter': ['*'],
'pandas.core.groupby.generic.DataFrameGroupBy.nunique': ['*'],
'pandas.core.groupby.generic.SeriesGroupBy.transform': ['*'],
'pandas.core.groupby.generic.SeriesGroupBy.idxmax': ['*'],
'pandas.core.groupby.generic.SeriesGroupBy.idxmin': ['*'],
'pandas.core.groupby.generic.SeriesGroupBy.filter': ['*'],
'pandas.core.groupby.generic.SeriesGroupBy.describe': ['*'],
},
skip={
'pandas.core.groupby.generic.SeriesGroupBy.cov': [
# Floating point comparison fails
's1.cov(s2)',
],
'pandas.core.groupby.generic.DataFrameGroupBy.cov': [
# Mutates input DataFrame with loc
# TODO: Replicate in frames_test.py
"df.loc[df.index[:5], 'a'] = np.nan",
"df.loc[df.index[5:10], 'b'] = np.nan",
"df.cov(min_periods=12)",
],
# These examples rely on grouping by a list
'pandas.core.groupby.generic.SeriesGroupBy.aggregate': ['*'],
'pandas.core.groupby.generic.DataFrameGroupBy.aggregate': ['*'],
})
self.assertEqual(result.failed, 0)
def test_top_level(self):
tests = {
name: func.__doc__
for (name, func) in pd.__dict__.items()
if _is_top_level_function(func) and getattr(func, '__doc__', None)
}
# IO methods are tested in io_test.py
skip_reads = {name: ['*'] for name in dir(pd) if name.startswith('read_')}
result = doctests.teststrings(
tests,
use_beam=False,
report=True,
not_implemented_ok={
'concat': ['pd.concat([s1, s2], ignore_index=True)'],
'crosstab': ['*'],
'cut': ['*'],
'eval': ['*'],
'factorize': ['*'],
'get_dummies': ['*'],
'infer_freq': ['*'],
'lreshape': ['*'],
'melt': ['*'],
'merge': ["df1.merge(df2, how='cross')"],
'merge_asof': ['*'],
'pivot': ['*'],
'pivot_table': ['*'],
'qcut': ['*'],
'reset_option': ['*'],
'set_eng_float_format': ['*'],
'set_option': ['*'],
'to_numeric': ['*'],
'to_timedelta': ['*'],
'unique': ['*'],
'value_counts': ['*'],
'wide_to_long': ['*'],
},
wont_implement_ok={
'to_datetime': ['s.head()'],
'to_pickle': ['*'],
'melt': [
"pd.melt(df, id_vars=['A'], value_vars=['B'])",
"pd.melt(df, id_vars=['A'], value_vars=['B', 'C'])",
"pd.melt(df, col_level=0, id_vars=['A'], value_vars=['B'])",
"pd.melt(df, id_vars=[('A', 'D')], value_vars=[('B', 'E')])",
"pd.melt(df, id_vars=['A'], value_vars=['B'],\n" +
" var_name='myVarname', value_name='myValname')"
],
},
skip={
# error formatting
'concat': ['pd.concat([df5, df6], verify_integrity=True)'],
# doctest DeprecationWarning
'melt': ['df'],
# Order-sensitive re-indexing.
'merge': [
"df1.merge(df2, left_on='lkey', right_on='rkey')",
"df1.merge(df2, left_on='lkey', right_on='rkey',\n"
" suffixes=('_left', '_right'))",
"df1.merge(df2, how='left', on='a')",
],
# Not an actual test.
'option_context': ['*'],
'factorize': ['codes', 'uniques'],
# Bad top-level use of un-imported function.
'merge_ordered': [
'merge_ordered(df1, df2, fill_method="ffill", left_by="group")'
],
# Expected error.
'pivot': ["df.pivot(index='foo', columns='bar', values='baz')"],
# Never written.
'to_pickle': ['os.remove("./dummy.pkl")'],
**skip_reads
})
self.assertEqual(result.failed, 0)
if __name__ == '__main__':
unittest.main()
| 44.406579 | 83 | 0.518919 |
import sys
import unittest
import pandas as pd
from apache_beam.dataframe import doctests
from apache_beam.dataframe.pandas_top_level_functions import _is_top_level_function
@unittest.skipIf(sys.platform == 'win32', '[BEAM-10626]')
class DoctestTest(unittest.TestCase):
def test_ndframe_tests(self):
skip_writes = {
f'pandas.core.generic.NDFrame.{name}': ['*']
for name in dir(pd.core.generic.NDFrame) if name.startswith('to_')
}
result = doctests.testmod(
pd.core.generic,
use_beam=False,
report=True,
wont_implement_ok={
'pandas.core.generic.NDFrame.first': ['*'],
'pandas.core.generic.NDFrame.head': ['*'],
'pandas.core.generic.NDFrame.last': ['*'],
'pandas.core.generic.NDFrame.shift': ['*'],
'pandas.core.generic.NDFrame.tail': ['*'],
'pandas.core.generic.NDFrame.take': ['*'],
'pandas.core.generic.NDFrame.values': ['*'],
'pandas.core.generic.NDFrame.tz_localize': [
"s.tz_localize('CET', ambiguous='infer')",
"s.tz_localize('CET', ambiguous=np.array([True, True, False]))",
],
'pandas.core.generic.NDFrame.truncate': [
"df.tail()",
"df.loc['2016-01-05':'2016-01-10', :].tail()",
],
'pandas.core.generic.NDFrame.replace': [
"s.replace([1, 2], method='bfill')",
"s.replace('a', None)",
],
'pandas.core.generic.NDFrame.fillna': [
"df.fillna(method='ffill')",
'df.fillna(value=values, limit=1)',
],
'pandas.core.generic.NDFrame.sort_values': ['*'],
'pandas.core.generic.NDFrame.mask': [
'df.where(m, -df) == np.where(m, df, -df)'
],
'pandas.core.generic.NDFrame.where': [
'df.where(m, -df) == np.where(m, df, -df)'
],
'pandas.core.generic.NDFrame.interpolate': ['*'],
},
not_implemented_ok={
'pandas.core.generic.NDFrame.asof': ['*'],
'pandas.core.generic.NDFrame.at_time': ['*'],
'pandas.core.generic.NDFrame.between_time': ['*'],
'pandas.core.generic.NDFrame.describe': ['*'],
'pandas.core.generic.NDFrame.ewm': ['*'],
'pandas.core.generic.NDFrame.expanding': ['*'],
'pandas.core.generic.NDFrame.flags': ['*'],
'pandas.core.generic.NDFrame.pct_change': ['*'],
'pandas.core.generic.NDFrame.rank': ['*'],
'pandas.core.generic.NDFrame.reindex': ['*'],
'pandas.core.generic.NDFrame.reindex_like': ['*'],
'pandas.core.generic.NDFrame.replace': ['*'],
'pandas.core.generic.NDFrame.resample': ['*'],
'pandas.core.generic.NDFrame.rolling': ['*'],
'pandas.core.generic.NDFrame.sample': ['*'],
'pandas.core.generic.NDFrame.set_flags': ['*'],
'pandas.core.generic.NDFrame.squeeze': ['*'],
'pandas.core.generic.NDFrame.transform': ['*'],
'pandas.core.generic.NDFrame.truncate': ['*'],
'pandas.core.generic.NDFrame.xs': ['*'],
'pandas.core.generic.NDFrame.abs': [
'df.loc[(df.c - 43).abs().argsort()]',
],
},
skip={
'pandas.core.generic.NDFrame._set_axis_name': ['*'],
'pandas.core.generic.NDFrame.asfreq': ['*'],
'pandas.core.generic.NDFrame.astype': ['*'],
'pandas.core.generic.NDFrame.convert_dtypes': ['*'],
'pandas.core.generic.NDFrame.copy': ['*'],
'pandas.core.generic.NDFrame.droplevel': ['*'],
'pandas.core.generic.NDFrame.infer_objects': ['*'],
'pandas.core.generic.NDFrame.rank': [
'df'
],
'pandas.core.generic.NDFrame.rename': [
'df.rename(2)'
],
'pandas.core.generic.NDFrame.rename_axis': ['*'],
'pandas.core.generic.NDFrame.replace': [
"df.replace({'a string': 'new value', True: False}) # raises"
],
'pandas.core.generic.NDFrame.squeeze': ['*'],
'pandas.core.generic.NDFrame.resample': ['df'],
'pandas.core.generic.NDFrame.sort_values': [
'from natsort import index_natsorted',
'df.sort_values(\n'
' by="time",\n'
' key=lambda x: np.argsort(index_natsorted(df["time"]))\n'
')'
],
**skip_writes
})
self.assertEqual(result.failed, 0)
def test_dataframe_tests(self):
result = doctests.testmod(
pd.core.frame,
use_beam=False,
report=True,
wont_implement_ok={
'pandas.core.frame.DataFrame.T': ['*'],
'pandas.core.frame.DataFrame.cummax': ['*'],
'pandas.core.frame.DataFrame.cummin': ['*'],
'pandas.core.frame.DataFrame.cumsum': ['*'],
'pandas.core.frame.DataFrame.cumprod': ['*'],
'pandas.core.frame.DataFrame.diff': ['*'],
'pandas.core.frame.DataFrame.fillna': [
"df.fillna(method='ffill')",
'df.fillna(value=values, limit=1)',
],
'pandas.core.frame.DataFrame.items': ['*'],
'pandas.core.frame.DataFrame.itertuples': ['*'],
'pandas.core.frame.DataFrame.iterrows': ['*'],
'pandas.core.frame.DataFrame.iteritems': ['*'],
# default keep is 'first'
'pandas.core.frame.DataFrame.nlargest': [
"df.nlargest(3, 'population')",
"df.nlargest(3, ['population', 'GDP'])",
"df.nlargest(3, 'population', keep='last')"
],
'pandas.core.frame.DataFrame.nsmallest': [
"df.nsmallest(3, 'population')",
"df.nsmallest(3, ['population', 'GDP'])",
"df.nsmallest(3, 'population', keep='last')",
],
'pandas.core.frame.DataFrame.replace': [
"s.replace([1, 2], method='bfill')",
# Relies on method='pad'
"s.replace('a', None)",
],
'pandas.core.frame.DataFrame.to_records': ['*'],
'pandas.core.frame.DataFrame.to_dict': ['*'],
'pandas.core.frame.DataFrame.to_numpy': ['*'],
'pandas.core.frame.DataFrame.to_string': ['*'],
'pandas.core.frame.DataFrame.transpose': ['*'],
'pandas.core.frame.DataFrame.shape': ['*'],
'pandas.core.frame.DataFrame.shift': [
'df.shift(periods=3, freq="D")',
'df.shift(periods=3, freq="infer")'
],
'pandas.core.frame.DataFrame.unstack': ['*'],
'pandas.core.frame.DataFrame.memory_usage': ['*'],
'pandas.core.frame.DataFrame.info': ['*'],
# Not equal to df.agg('mode', axis='columns', numeric_only=True)
# because there can be multiple columns if a row has more than one
# mode
'pandas.core.frame.DataFrame.mode': [
"df.mode(axis='columns', numeric_only=True)"
],
'pandas.core.frame.DataFrame.append': [
'df.append(df2, ignore_index=True)',
"for i in range(5):\n" +
" df = df.append({'A': i}, ignore_index=True)",
],
'pandas.core.frame.DataFrame.sort_index': ['*'],
'pandas.core.frame.DataFrame.sort_values': ['*'],
'pandas.core.frame.DataFrame.melt': [
"df.melt(id_vars=['A'], value_vars=['B'])",
"df.melt(id_vars=['A'], value_vars=['B', 'C'])",
"df.melt(col_level=0, id_vars=['A'], value_vars=['B'])",
"df.melt(id_vars=[('A', 'D')], value_vars=[('B', 'E')])",
"df.melt(id_vars=['A'], value_vars=['B'],\n" +
" var_name='myVarname', value_name='myValname')"
]
},
not_implemented_ok={
'pandas.core.frame.DataFrame.transform': ['*'],
'pandas.core.frame.DataFrame.reindex': ['*'],
'pandas.core.frame.DataFrame.reindex_axis': ['*'],
'pandas.core.frame.DataFrame.round': [
'df.round(decimals)',
],
# We should be able to support pivot and pivot_table for categorical
# columns
'pandas.core.frame.DataFrame.pivot': ['*'],
# We can implement this as a zipping operator, but it won't have the
'pandas.core.frame.DataFrame.combine': ['*'],
'pandas.core.frame.DataFrame.combine_first': ['*'],
'pandas.core.frame.DataFrame.dot': [
's2 = s.reindex([1, 0, 2, 3])',
'df.dot(s2)',
],
'pandas.core.frame.DataFrame.apply': ['*'],
'pandas.core.frame.DataFrame.merge': [
"df1.merge(df2, how='cross')"
],
'pandas.core.frame.DataFrame.set_index': [
"df.set_index([s, s**2])",
],
},
skip={
'pandas.core.frame.DataFrame.transform': ['df'],
'pandas.core.frame.DataFrame.axes': [
'df.axes',
],
'pandas.core.frame.DataFrame.compare': ['*'],
'pandas.core.frame.DataFrame.cov': [
"df.loc[df.index[:5], 'a'] = np.nan",
"df.loc[df.index[5:10], 'b'] = np.nan",
'df.cov(min_periods=12)',
],
'pandas.core.frame.DataFrame.drop_duplicates': ['*'],
'pandas.core.frame.DataFrame.duplicated': ['*'],
'pandas.core.frame.DataFrame.idxmax': ['*'],
'pandas.core.frame.DataFrame.idxmin': ['*'],
'pandas.core.frame.DataFrame.rename': [
'df.index',
'df.rename(index=str).index',
],
'pandas.core.frame.DataFrame.set_index': [
"df.set_index([pd.Index([1, 2, 3, 4]), 'year'])",
],
'pandas.core.frame.DataFrame.set_axis': ['*'],
'pandas.core.frame.DataFrame.to_markdown': ['*'],
'pandas.core.frame.DataFrame.to_parquet': ['*'],
'pandas.core.frame.DataFrame.value_counts': ['*'],
'pandas.core.frame.DataFrame.to_records': [
'df.index = df.index.rename("I")',
'index_dtypes = f"<S{df.index.str.len().max()}"',
'index_dtypes = "<S{}".format(df.index.str.len().max())',
'df.to_records(index_dtypes=index_dtypes)',
],
# actually raise NotImplementedError
'pandas.core.frame.DataFrame.pivot_table': ['*'],
# Expected to raise a ValueError, but we raise NotImplementedError
'pandas.core.frame.DataFrame.pivot': [
"df.pivot(index='foo', columns='bar', values='baz')"
],
'pandas.core.frame.DataFrame.append': [
'df',
# pylint: disable=line-too-long
"pd.concat([pd.DataFrame([i], columns=['A']) for i in range(5)],\n"
" ignore_index=True)"
],
'pandas.core.frame.DataFrame.eval': ['df'],
'pandas.core.frame.DataFrame.melt': [
"df.columns = [list('ABC'), list('DEF')]", "df"
],
'pandas.core.frame.DataFrame.merge': [
# Order-sensitive index, checked in frames_test.py.
"df1.merge(df2, left_on='lkey', right_on='rkey')",
"df1.merge(df2, left_on='lkey', right_on='rkey',\n"
" suffixes=('_left', '_right'))",
"df1.merge(df2, how='left', on='a')",
],
# Raises right exception, but testing framework has matching issues.
'pandas.core.frame.DataFrame.replace': [
"df.replace({'a string': 'new value', True: False}) # raises"
],
'pandas.core.frame.DataFrame.to_sparse': ['type(df)'],
# Skipped because "seen_wont_implement" is reset before getting to
# these calls, so the NameError they raise is not ignored.
'pandas.core.frame.DataFrame.T': [
'df1_transposed.dtypes', 'df2_transposed.dtypes'
],
'pandas.core.frame.DataFrame.transpose': [
'df1_transposed.dtypes', 'df2_transposed.dtypes'
],
# Skipped because the relies on iloc to set a cell to NA. Test is
# replicated in frames_test::DeferredFrameTest::test_applymap.
'pandas.core.frame.DataFrame.applymap': [
'df_copy.iloc[0, 0] = pd.NA',
"df_copy.applymap(lambda x: len(str(x)), na_action='ignore')",
],
# Skipped so we don't need to install natsort
'pandas.core.frame.DataFrame.sort_values': [
'from natsort import index_natsorted',
'df.sort_values(\n'
' by="time",\n'
' key=lambda x: np.argsort(index_natsorted(df["time"]))\n'
')'
],
# 1.2.0 (https://github.com/pandas-dev/pandas/issues/35912)
'pandas.core.frame.DataFrame.aggregate': [
"df.agg(x=('A', max), y=('B', 'min'), z=('C', np.mean))"
],
})
self.assertEqual(result.failed, 0)
def test_series_tests(self):
result = doctests.testmod(
pd.core.series,
use_beam=False,
report=True,
wont_implement_ok={
'pandas.core.series.Series.__array__': ['*'],
'pandas.core.series.Series.array': ['*'],
'pandas.core.series.Series.cummax': ['*'],
'pandas.core.series.Series.cummin': ['*'],
'pandas.core.series.Series.cumsum': ['*'],
'pandas.core.series.Series.cumprod': ['*'],
'pandas.core.series.Series.diff': ['*'],
'pandas.core.series.Series.dot': [
's.dot(arr)', # non-deferred result
],
'pandas.core.series.Series.fillna': [
"df.fillna(method='ffill')",
'df.fillna(value=values, limit=1)',
],
'pandas.core.series.Series.items': ['*'],
'pandas.core.series.Series.iteritems': ['*'],
# default keep is 'first'
'pandas.core.series.Series.nlargest': [
"s.nlargest()",
"s.nlargest(3)",
"s.nlargest(3, keep='last')",
],
'pandas.core.series.Series.memory_usage': ['*'],
'pandas.core.series.Series.nsmallest': [
"s.nsmallest()",
"s.nsmallest(3)",
"s.nsmallest(3, keep='last')",
],
'pandas.core.series.Series.pop': ['*'],
'pandas.core.series.Series.searchsorted': ['*'],
'pandas.core.series.Series.shift': ['*'],
'pandas.core.series.Series.take': ['*'],
'pandas.core.series.Series.to_dict': ['*'],
'pandas.core.series.Series.unique': ['*'],
'pandas.core.series.Series.unstack': ['*'],
'pandas.core.series.Series.values': ['*'],
'pandas.core.series.Series.view': ['*'],
'pandas.core.series.Series.append': [
's1.append(s2, ignore_index=True)',
],
'pandas.core.series.Series.sort_index': ['*'],
'pandas.core.series.Series.sort_values': ['*'],
'pandas.core.series.Series.argmax': ['*'],
'pandas.core.series.Series.argmin': ['*'],
},
not_implemented_ok={
'pandas.core.series.Series.transform': ['*'],
'pandas.core.series.Series.groupby': [
'ser.groupby(["a", "b", "a", "b"]).mean()',
'ser.groupby(["a", "b", "a", np.nan]).mean()',
'ser.groupby(["a", "b", "a", np.nan], dropna=False).mean()',
# Grouping by a series is not supported
'ser.groupby(ser > 100).mean()',
],
'pandas.core.series.Series.reindex': ['*'],
},
skip={
# error formatting
'pandas.core.series.Series.append': [
's1.append(s2, verify_integrity=True)',
],
# Throws NotImplementedError when modifying df
'pandas.core.series.Series.transform': ['df'],
'pandas.core.series.Series.autocorr': ['*'],
'pandas.core.series.Series.combine': ['*'],
'pandas.core.series.Series.combine_first': ['*'],
'pandas.core.series.Series.compare': ['*'],
'pandas.core.series.Series.cov': [
# Differs in LSB on jenkins.
"s1.cov(s2)",
],
'pandas.core.series.Series.drop_duplicates': ['*'],
'pandas.core.series.Series.duplicated': ['*'],
'pandas.core.series.Series.explode': ['*'],
'pandas.core.series.Series.idxmax': ['*'],
'pandas.core.series.Series.idxmin': ['*'],
'pandas.core.series.Series.nonzero': ['*'],
'pandas.core.series.Series.quantile': ['*'],
'pandas.core.series.Series.pop': ['ser'], # testing side effect
'pandas.core.series.Series.repeat': ['*'],
'pandas.core.series.Series.replace': ['*'],
'pandas.core.series.Series.reset_index': ['*'],
'pandas.core.series.Series.searchsorted': [
# This doctest seems to be incorrectly parsed.
"x = pd.Categorical(['apple', 'bread', 'bread',"
],
'pandas.core.series.Series.set_axis': ['*'],
'pandas.core.series.Series.to_csv': ['*'],
'pandas.core.series.Series.to_markdown': ['*'],
'pandas.core.series.Series.update': ['*'],
'pandas.core.series.Series.view': [
# Inspection after modification.
's'
],
})
self.assertEqual(result.failed, 0)
def test_string_tests(self):
PD_VERSION = tuple(int(v) for v in pd.__version__.split('.'))
if PD_VERSION < (1, 2, 0):
module = pd.core.strings
else:
# Definitions were moved to accessor in pandas 1.2.0
module = pd.core.strings.accessor
module_name = module.__name__
result = doctests.testmod(
module,
use_beam=False,
wont_implement_ok={
# These methods can accept deferred series objects, but not lists
f'{module_name}.StringMethods.cat': [
"s.str.cat(['A', 'B', 'C', 'D'], sep=',')",
"s.str.cat(['A', 'B', 'C', 'D'], sep=',', na_rep='-')",
"s.str.cat(['A', 'B', 'C', 'D'], na_rep='-')"
],
f'{module_name}.StringMethods.repeat': [
's.str.repeat(repeats=[1, 2, 3])'
],
f'{module_name}.str_repeat': ['s.str.repeat(repeats=[1, 2, 3])'],
f'{module_name}.StringMethods.get_dummies': ['*'],
f'{module_name}.str_get_dummies': ['*'],
},
skip={
# count() on Series with a NaN produces mismatched type if we
# have a NaN-only partition.
f'{module_name}.StringMethods.count': ["s.str.count('a')"],
f'{module_name}.str_count': ["s.str.count('a')"],
# Produce None instead of NaN, see
# frames_test.py::DeferredFrameTest::test_str_split
f'{module_name}.StringMethods.rsplit': [
's.str.split(expand=True)',
's.str.rsplit("/", n=1, expand=True)',
],
f'{module_name}.StringMethods.split': [
's.str.split(expand=True)',
's.str.rsplit("/", n=1, expand=True)',
],
# Bad test strings in pandas 1.1.x
f'{module_name}.str_replace': [
"pd.Series(['foo', 'fuz', np.nan]).str.replace('f', repr)"
],
f'{module_name}.StringMethods.replace': [
"pd.Series(['foo', 'fuz', np.nan]).str.replace('f', repr)"
],
# output has incorrect formatting in 1.2.x
f'{module_name}.StringMethods.extractall': ['*']
})
self.assertEqual(result.failed, 0)
def test_datetime_tests(self):
# TODO(BEAM-10721)
datetimelike_result = doctests.testmod(
pd.core.arrays.datetimelike,
use_beam=False,
skip={
'pandas.core.arrays.datetimelike.AttributesMixin._unbox_scalar': [
'*'
],
'pandas.core.arrays.datetimelike.TimelikeOps.ceil': ['*'],
'pandas.core.arrays.datetimelike.TimelikeOps.floor': ['*'],
'pandas.core.arrays.datetimelike.TimelikeOps.round': ['*'],
})
datetime_result = doctests.testmod(
pd.core.arrays.datetimes,
use_beam=False,
skip={
'pandas.core.arrays.datetimes.DatetimeArray.day': ['*'],
'pandas.core.arrays.datetimes.DatetimeArray.hour': ['*'],
'pandas.core.arrays.datetimes.DatetimeArray.microsecond': ['*'],
'pandas.core.arrays.datetimes.DatetimeArray.minute': ['*'],
'pandas.core.arrays.datetimes.DatetimeArray.month': ['*'],
'pandas.core.arrays.datetimes.DatetimeArray.nanosecond': ['*'],
'pandas.core.arrays.datetimes.DatetimeArray.second': ['*'],
'pandas.core.arrays.datetimes.DatetimeArray.year': ['*'],
'pandas.core.arrays.datetimes.DatetimeArray.is_leap_year': ['*'],
'pandas.core.arrays.datetimes.DatetimeArray.is_month_end': ['*'],
'pandas.core.arrays.datetimes.DatetimeArray.is_month_start': ['*'],
'pandas.core.arrays.datetimes.DatetimeArray.is_quarter_end': ['*'],
'pandas.core.arrays.datetimes.DatetimeArray.is_quarter_start': [
'*'
],
'pandas.core.arrays.datetimes.DatetimeArray.is_year_end': ['*'],
'pandas.core.arrays.datetimes.DatetimeArray.is_year_start': ['*'],
'pandas.core.arrays.datetimes.DatetimeArray.to_period': ['*'],
'pandas.core.arrays.datetimes.DatetimeArray.tz_localize': ['*'],
})
self.assertEqual(datetimelike_result.failed, 0)
self.assertEqual(datetime_result.failed, 0)
def test_indexing_tests(self):
result = doctests.testmod(
pd.core.indexing,
use_beam=False,
skip={
'pandas.core.indexing._IndexSlice': ['*'],
'pandas.core.indexing.IndexingMixin.at': ['*'],
'pandas.core.indexing.IndexingMixin.iat': ['*'],
'pandas.core.indexing.IndexingMixin.iloc': ['*'],
'pandas.core.indexing.IndexingMixin.loc': ['*'],
'pandas.core.indexing._AtIndexer': ['*'],
'pandas.core.indexing._LocIndexer': ['*'],
'pandas.core.indexing._iAtIndexer': ['*'],
'pandas.core.indexing._iLocIndexer': ['*'],
})
self.assertEqual(result.failed, 0)
def test_groupby_tests(self):
result = doctests.testmod(
pd.core.groupby.groupby,
use_beam=False,
wont_implement_ok={
'pandas.core.groupby.groupby.GroupBy.head': ['*'],
'pandas.core.groupby.groupby.GroupBy.tail': ['*'],
'pandas.core.groupby.groupby.GroupBy.nth': ['*'],
'pandas.core.groupby.groupby.GroupBy.cumcount': ['*'],
},
not_implemented_ok={
'pandas.core.groupby.groupby.GroupBy.describe': ['*'],
'pandas.core.groupby.groupby.GroupBy.ngroup': ['*'],
'pandas.core.groupby.groupby.GroupBy.resample': ['*'],
'pandas.core.groupby.groupby.GroupBy.sample': ['*'],
'pandas.core.groupby.groupby.GroupBy.quantile': ['*'],
'pandas.core.groupby.groupby.BaseGroupBy.pipe': ['*'],
# pipe tests are in a different location in pandas 1.1.x
'pandas.core.groupby.groupby._GroupBy.pipe': ['*'],
'pandas.core.groupby.groupby.GroupBy.nth': [
"df.groupby('A', as_index=False).nth(1)",
],
},
skip={
# Uses iloc to mutate a DataFrame
'pandas.core.groupby.groupby.GroupBy.resample': [
'df.iloc[2, 0] = 5',
'df',
],
# TODO: Raise wont implement for list passed as a grouping column
# Currently raises unhashable type: list
'pandas.core.groupby.groupby.GroupBy.ngroup': [
'df.groupby(["A", [1,1,2,3,2,1]]).ngroup()'
],
})
self.assertEqual(result.failed, 0)
result = doctests.testmod(
pd.core.groupby.generic,
use_beam=False,
wont_implement_ok={
# Returns an array by default, not a Series. WontImplement
# (non-deferred)
'pandas.core.groupby.generic.SeriesGroupBy.unique': ['*'],
# TODO: Is take actually deprecated?
'pandas.core.groupby.generic.DataFrameGroupBy.take': ['*'],
'pandas.core.groupby.generic.SeriesGroupBy.take': ['*'],
'pandas.core.groupby.generic.SeriesGroupBy.nsmallest': [
"s.nsmallest(3, keep='last')",
"s.nsmallest(3)",
"s.nsmallest()",
],
'pandas.core.groupby.generic.SeriesGroupBy.nlargest': [
"s.nlargest(3, keep='last')",
"s.nlargest(3)",
"s.nlargest()",
],
'pandas.core.groupby.generic.DataFrameGroupBy.diff': ['*'],
'pandas.core.groupby.generic.SeriesGroupBy.diff': ['*'],
'pandas.core.groupby.generic.DataFrameGroupBy.hist': ['*'],
'pandas.core.groupby.generic.DataFrameGroupBy.fillna': [
"df.fillna(method='ffill')",
'df.fillna(value=values, limit=1)',
],
'pandas.core.groupby.generic.SeriesGroupBy.fillna': [
"df.fillna(method='ffill')",
'df.fillna(value=values, limit=1)',
],
},
not_implemented_ok={
'pandas.core.groupby.generic.DataFrameGroupBy.transform': ['*'],
'pandas.core.groupby.generic.DataFrameGroupBy.idxmax': ['*'],
'pandas.core.groupby.generic.DataFrameGroupBy.idxmin': ['*'],
'pandas.core.groupby.generic.DataFrameGroupBy.filter': ['*'],
'pandas.core.groupby.generic.DataFrameGroupBy.nunique': ['*'],
'pandas.core.groupby.generic.SeriesGroupBy.transform': ['*'],
'pandas.core.groupby.generic.SeriesGroupBy.idxmax': ['*'],
'pandas.core.groupby.generic.SeriesGroupBy.idxmin': ['*'],
'pandas.core.groupby.generic.SeriesGroupBy.filter': ['*'],
'pandas.core.groupby.generic.SeriesGroupBy.describe': ['*'],
},
skip={
'pandas.core.groupby.generic.SeriesGroupBy.cov': [
# Floating point comparison fails
's1.cov(s2)',
],
'pandas.core.groupby.generic.DataFrameGroupBy.cov': [
# Mutates input DataFrame with loc
# TODO: Replicate in frames_test.py
"df.loc[df.index[:5], 'a'] = np.nan",
"df.loc[df.index[5:10], 'b'] = np.nan",
"df.cov(min_periods=12)",
],
# These examples rely on grouping by a list
'pandas.core.groupby.generic.SeriesGroupBy.aggregate': ['*'],
'pandas.core.groupby.generic.DataFrameGroupBy.aggregate': ['*'],
})
self.assertEqual(result.failed, 0)
def test_top_level(self):
tests = {
name: func.__doc__
for (name, func) in pd.__dict__.items()
if _is_top_level_function(func) and getattr(func, '__doc__', None)
}
# IO methods are tested in io_test.py
skip_reads = {name: ['*'] for name in dir(pd) if name.startswith('read_')}
result = doctests.teststrings(
tests,
use_beam=False,
report=True,
not_implemented_ok={
'concat': ['pd.concat([s1, s2], ignore_index=True)'],
'crosstab': ['*'],
'cut': ['*'],
'eval': ['*'],
'factorize': ['*'],
'get_dummies': ['*'],
'infer_freq': ['*'],
'lreshape': ['*'],
'melt': ['*'],
'merge': ["df1.merge(df2, how='cross')"],
'merge_asof': ['*'],
'pivot': ['*'],
'pivot_table': ['*'],
'qcut': ['*'],
'reset_option': ['*'],
'set_eng_float_format': ['*'],
'set_option': ['*'],
'to_numeric': ['*'],
'to_timedelta': ['*'],
'unique': ['*'],
'value_counts': ['*'],
'wide_to_long': ['*'],
},
wont_implement_ok={
'to_datetime': ['s.head()'],
'to_pickle': ['*'],
'melt': [
"pd.melt(df, id_vars=['A'], value_vars=['B'])",
"pd.melt(df, id_vars=['A'], value_vars=['B', 'C'])",
"pd.melt(df, col_level=0, id_vars=['A'], value_vars=['B'])",
"pd.melt(df, id_vars=[('A', 'D')], value_vars=[('B', 'E')])",
"pd.melt(df, id_vars=['A'], value_vars=['B'],\n" +
" var_name='myVarname', value_name='myValname')"
],
},
skip={
# error formatting
'concat': ['pd.concat([df5, df6], verify_integrity=True)'],
# doctest DeprecationWarning
'melt': ['df'],
# Order-sensitive re-indexing.
'merge': [
"df1.merge(df2, left_on='lkey', right_on='rkey')",
"df1.merge(df2, left_on='lkey', right_on='rkey',\n"
" suffixes=('_left', '_right'))",
"df1.merge(df2, how='left', on='a')",
],
# Not an actual test.
'option_context': ['*'],
'factorize': ['codes', 'uniques'],
# Bad top-level use of un-imported function.
'merge_ordered': [
'merge_ordered(df1, df2, fill_method="ffill", left_by="group")'
],
# Expected error.
'pivot': ["df.pivot(index='foo', columns='bar', values='baz')"],
# Never written.
'to_pickle': ['os.remove("./dummy.pkl")'],
**skip_reads
})
self.assertEqual(result.failed, 0)
if __name__ == '__main__':
unittest.main()
| true | true |
f720120c884a1396999a7662659cbb6bb8cb01bb | 811 | py | Python | manage.py | oguzhanunlu/validate_json | 79cda734934195bd59055d7f04288a7b538f9542 | [
"Apache-2.0"
] | null | null | null | manage.py | oguzhanunlu/validate_json | 79cda734934195bd59055d7f04288a7b538f9542 | [
"Apache-2.0"
] | null | null | null | manage.py | oguzhanunlu/validate_json | 79cda734934195bd59055d7f04288a7b538f9542 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "validate_json.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| 35.26087 | 77 | 0.644883 |
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "validate_json.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| true | true |
f72012960cef127dbd4634d4f311534488584e40 | 27,649 | py | Python | salt/modules/snapper.py | amaclean199/salt | 8aaac011b4616e3c9e74a1daafb4a2146a5a430f | [
"Apache-2.0"
] | 12 | 2015-01-21T00:18:25.000Z | 2021-07-11T07:35:26.000Z | salt/modules/snapper.py | amaclean199/salt | 8aaac011b4616e3c9e74a1daafb4a2146a5a430f | [
"Apache-2.0"
] | 1 | 2015-10-05T22:03:10.000Z | 2015-10-05T22:03:10.000Z | salt/modules/snapper.py | amaclean199/salt | 8aaac011b4616e3c9e74a1daafb4a2146a5a430f | [
"Apache-2.0"
] | 12 | 2015-01-05T09:50:42.000Z | 2019-08-19T01:43:40.000Z | # -*- coding: utf-8 -*-
'''
Module to manage filesystem snapshots with snapper
.. versionadded:: 2016.11.0
:codeauthor: Duncan Mac-Vicar P. <dmacvicar@suse.de>
:codeauthor: Pablo Suárez Hernández <psuarezhernandez@suse.de>
:depends: ``dbus`` Python module.
:depends: ``snapper`` http://snapper.io, available in most distros
:maturity: new
:platform: Linux
'''
from __future__ import absolute_import, unicode_literals, print_function
import logging
import os
import time
import difflib
try:
from pwd import getpwuid
HAS_PWD = True
except ImportError:
HAS_PWD = False
from salt.exceptions import CommandExecutionError
import salt.utils.files
# import 3rd party libs
from salt.ext import six
try:
import dbus # pylint: disable=wrong-import-order
HAS_DBUS = True
except ImportError:
HAS_DBUS = False
DBUS_STATUS_MAP = {
1: "created",
2: "deleted",
4: "type changed",
8: "modified",
16: "permission changed",
32: "owner changed",
64: "group changed",
128: "extended attributes changed",
256: "ACL info changed",
}
SNAPPER_DBUS_OBJECT = 'org.opensuse.Snapper'
SNAPPER_DBUS_PATH = '/org/opensuse/Snapper'
SNAPPER_DBUS_INTERFACE = 'org.opensuse.Snapper'
# pylint: disable=invalid-name
log = logging.getLogger(__name__)
bus = None
system_bus_error = None
snapper = None
snapper_error = None
if HAS_DBUS:
try:
bus = dbus.SystemBus()
except dbus.DBusException as exc:
log.warning(exc)
system_bus_error = exc
else:
if SNAPPER_DBUS_OBJECT in bus.list_activatable_names():
try:
snapper = dbus.Interface(bus.get_object(SNAPPER_DBUS_OBJECT,
SNAPPER_DBUS_PATH),
dbus_interface=SNAPPER_DBUS_INTERFACE)
except (dbus.DBusException, ValueError) as exc:
log.warning(exc)
snapper_error = exc
else:
snapper_error = 'snapper is missing'
# pylint: enable=invalid-name
def __virtual__():
error_msg = 'The snapper module cannot be loaded: {0}'
if not HAS_DBUS:
return False, error_msg.format('missing python dbus module')
elif not snapper:
return False, error_msg.format(snapper_error)
elif not bus:
return False, error_msg.format(system_bus_error)
elif not HAS_PWD:
return False, error_msg.format('pwd module not available')
return 'snapper'
def _snapshot_to_data(snapshot):
'''
Returns snapshot data from a D-Bus response.
A snapshot D-Bus response is a dbus.Struct containing the
information related to a snapshot:
[id, type, pre_snapshot, timestamp, user, description,
cleanup_algorithm, userdata]
id: dbus.UInt32
type: dbus.UInt16
pre_snapshot: dbus.UInt32
timestamp: dbus.Int64
user: dbus.UInt32
description: dbus.String
cleaup_algorithm: dbus.String
userdata: dbus.Dictionary
'''
data = {}
data['id'] = snapshot[0]
data['type'] = ['single', 'pre', 'post'][snapshot[1]]
if data['type'] == 'post':
data['pre'] = snapshot[2]
if snapshot[3] != -1:
data['timestamp'] = snapshot[3]
else:
data['timestamp'] = int(time.time())
data['user'] = getpwuid(snapshot[4])[0]
data['description'] = snapshot[5]
data['cleanup'] = snapshot[6]
data['userdata'] = {}
for key, value in snapshot[7].items():
data['userdata'][key] = value
return data
def _dbus_exception_to_reason(exc, args):
'''
Returns a error message from a snapper DBusException
'''
error = exc.get_dbus_name()
if error == 'error.unknown_config':
return "Unknown configuration '{0}'".format(args['config'])
elif error == 'error.illegal_snapshot':
return 'Invalid snapshot'
else:
return exc.get_dbus_name()
def list_snapshots(config='root'):
'''
List available snapshots
CLI example:
.. code-block:: bash
salt '*' snapper.list_snapshots config=myconfig
'''
try:
snapshots = snapper.ListSnapshots(config)
return [_snapshot_to_data(s) for s in snapshots]
except dbus.DBusException as exc:
raise CommandExecutionError(
'Error encountered while listing snapshots: {0}'
.format(_dbus_exception_to_reason(exc, locals()))
)
def get_snapshot(number=0, config='root'):
'''
Get detailed information about a given snapshot
CLI example:
.. code-block:: bash
salt '*' snapper.get_snapshot 1
'''
try:
snapshot = snapper.GetSnapshot(config, int(number))
return _snapshot_to_data(snapshot)
except dbus.DBusException as exc:
raise CommandExecutionError(
'Error encountered while retrieving snapshot: {0}'
.format(_dbus_exception_to_reason(exc, locals()))
)
def list_configs():
'''
List all available configs
CLI example:
.. code-block:: bash
salt '*' snapper.list_configs
'''
try:
configs = snapper.ListConfigs()
return dict((config[0], config[2]) for config in configs)
except dbus.DBusException as exc:
raise CommandExecutionError(
'Error encountered while listing configurations: {0}'
.format(_dbus_exception_to_reason(exc, locals()))
)
def _config_filter(value):
if isinstance(value, bool):
return 'yes' if value else 'no'
return value
def set_config(name='root', **kwargs):
'''
Set configuration values
CLI example:
.. code-block:: bash
salt '*' snapper.set_config SYNC_ACL=True
Keys are case insensitive as they will be always uppercased to
snapper convention. The above example is equivalent to:
.. code-block:: bash
salt '*' snapper.set_config sync_acl=True
'''
try:
data = dict((k.upper(), _config_filter(v)) for k, v in
kwargs.items() if not k.startswith('__'))
snapper.SetConfig(name, data)
except dbus.DBusException as exc:
raise CommandExecutionError(
'Error encountered while setting configuration {0}: {1}'
.format(name, _dbus_exception_to_reason(exc, locals()))
)
return True
def _get_last_snapshot(config='root'):
'''
Returns the last existing created snapshot
'''
snapshot_list = sorted(list_snapshots(config), key=lambda x: x['id'])
return snapshot_list[-1]
def status_to_string(dbus_status):
'''
Converts a numeric dbus snapper status into a string
CLI Example:
.. code-block:: bash
salt '*' snapper.status_to_string <dbus_status>
'''
status_tuple = (
dbus_status & 0b000000001, dbus_status & 0b000000010, dbus_status & 0b000000100,
dbus_status & 0b000001000, dbus_status & 0b000010000, dbus_status & 0b000100000,
dbus_status & 0b001000000, dbus_status & 0b010000000, dbus_status & 0b100000000
)
return [DBUS_STATUS_MAP[status] for status in status_tuple if status]
def get_config(name='root'):
'''
Retrieves all values from a given configuration
CLI example:
.. code-block:: bash
salt '*' snapper.get_config
'''
try:
config = snapper.GetConfig(name)
return config
except dbus.DBusException as exc:
raise CommandExecutionError(
'Error encountered while retrieving configuration: {0}'
.format(_dbus_exception_to_reason(exc, locals()))
)
def create_config(name=None,
subvolume=None,
fstype=None,
template=None,
extra_opts=None):
'''
Creates a new Snapper configuration
name
Name of the new Snapper configuration.
subvolume
Path to the related subvolume.
fstype
Filesystem type of the subvolume.
template
Configuration template to use. (Default: default)
extra_opts
Extra Snapper configuration opts dictionary. It will override the values provided
by the given template (if any).
CLI example:
.. code-block:: bash
salt '*' snapper.create_config name=myconfig subvolume=/foo/bar/ fstype=btrfs
salt '*' snapper.create_config name=myconfig subvolume=/foo/bar/ fstype=btrfs template="default"
salt '*' snapper.create_config name=myconfig subvolume=/foo/bar/ fstype=btrfs extra_opts='{"NUMBER_CLEANUP": False}'
'''
def raise_arg_error(argname):
raise CommandExecutionError(
'You must provide a "{0}" for the new configuration'.format(argname)
)
if not name:
raise_arg_error("name")
if not subvolume:
raise_arg_error("subvolume")
if not fstype:
raise_arg_error("fstype")
if not template:
template = ""
try:
snapper.CreateConfig(name, subvolume, fstype, template)
if extra_opts:
set_config(name, **extra_opts)
return get_config(name)
except dbus.DBusException as exc:
raise CommandExecutionError(
'Error encountered while creating the new configuration: {0}'
.format(_dbus_exception_to_reason(exc, locals()))
)
def create_snapshot(config='root', snapshot_type='single', pre_number=None,
description=None, cleanup_algorithm='number', userdata=None,
**kwargs):
'''
Creates an snapshot
config
Configuration name.
snapshot_type
Specifies the type of the new snapshot. Possible values are
single, pre and post.
pre_number
For post snapshots the number of the pre snapshot must be
provided.
description
Description for the snapshot. If not given, the salt job will be used.
cleanup_algorithm
Set the cleanup algorithm for the snapshot.
number
Deletes old snapshots when a certain number of snapshots
is reached.
timeline
Deletes old snapshots but keeps a number of hourly,
daily, weekly, monthly and yearly snapshots.
empty-pre-post
Deletes pre/post snapshot pairs with empty diffs.
userdata
Set userdata for the snapshot (key-value pairs).
Returns the number of the created snapshot.
CLI example:
.. code-block:: bash
salt '*' snapper.create_snapshot
'''
if not userdata:
userdata = {}
jid = kwargs.get('__pub_jid')
if description is None and jid is not None:
description = 'salt job {0}'.format(jid)
if jid is not None:
userdata['salt_jid'] = jid
new_nr = None
try:
if snapshot_type == 'single':
new_nr = snapper.CreateSingleSnapshot(config, description,
cleanup_algorithm, userdata)
elif snapshot_type == 'pre':
new_nr = snapper.CreatePreSnapshot(config, description,
cleanup_algorithm, userdata)
elif snapshot_type == 'post':
if pre_number is None:
raise CommandExecutionError(
"pre snapshot number 'pre_number' needs to be"
"specified for snapshots of the 'post' type")
new_nr = snapper.CreatePostSnapshot(config, pre_number, description,
cleanup_algorithm, userdata)
else:
raise CommandExecutionError(
"Invalid snapshot type '{0}'", format(snapshot_type))
except dbus.DBusException as exc:
raise CommandExecutionError(
'Error encountered while listing changed files: {0}'
.format(_dbus_exception_to_reason(exc, locals()))
)
return new_nr
def delete_snapshot(snapshots_ids=None, config="root"):
'''
Deletes an snapshot
config
Configuration name. (Default: root)
snapshots_ids
List of the snapshots IDs to be deleted.
CLI example:
.. code-block:: bash
salt '*' snapper.delete_snapshot 54
salt '*' snapper.delete_snapshot config=root 54
salt '*' snapper.delete_snapshot config=root snapshots_ids=[54,55,56]
'''
if not snapshots_ids:
raise CommandExecutionError('Error: No snapshot ID has been provided')
try:
current_snapshots_ids = [x['id'] for x in list_snapshots(config)]
if not isinstance(snapshots_ids, list):
snapshots_ids = [snapshots_ids]
if not set(snapshots_ids).issubset(set(current_snapshots_ids)):
raise CommandExecutionError(
"Error: Snapshots '{0}' not found".format(", ".join(
[six.text_type(x) for x in set(snapshots_ids).difference(
set(current_snapshots_ids))]))
)
snapper.DeleteSnapshots(config, snapshots_ids)
return {config: {"ids": snapshots_ids, "status": "deleted"}}
except dbus.DBusException as exc:
raise CommandExecutionError(_dbus_exception_to_reason(exc, locals()))
def modify_snapshot(snapshot_id=None,
description=None,
userdata=None,
cleanup=None,
config="root"):
'''
Modify attributes of an existing snapshot.
config
Configuration name. (Default: root)
snapshot_id
ID of the snapshot to be modified.
cleanup
Change the cleanup method of the snapshot. (str)
description
Change the description of the snapshot. (str)
userdata
Change the userdata dictionary of the snapshot. (dict)
CLI example:
.. code-block:: bash
salt '*' snapper.modify_snapshot 54 description="my snapshot description"
salt '*' snapper.modify_snapshot 54 description="my snapshot description"
salt '*' snapper.modify_snapshot 54 userdata='{"foo": "bar"}'
salt '*' snapper.modify_snapshot snapshot_id=54 cleanup="number"
'''
if not snapshot_id:
raise CommandExecutionError('Error: No snapshot ID has been provided')
snapshot = get_snapshot(config=config, number=snapshot_id)
try:
# Updating only the explicitly provided attributes by the user
updated_opts = {
'description': description if description is not None else snapshot['description'],
'cleanup': cleanup if cleanup is not None else snapshot['cleanup'],
'userdata': userdata if userdata is not None else snapshot['userdata'],
}
snapper.SetSnapshot(config,
snapshot_id,
updated_opts['description'],
updated_opts['cleanup'],
updated_opts['userdata'])
return get_snapshot(config=config, number=snapshot_id)
except dbus.DBusException as exc:
raise CommandExecutionError(_dbus_exception_to_reason(exc, locals()))
def _get_num_interval(config, num_pre, num_post):
'''
Returns numerical interval based on optionals num_pre, num_post values
'''
post = int(num_post) if num_post else 0
pre = int(num_pre) if num_pre is not None else _get_last_snapshot(config)['id']
return pre, post
def _is_text_file(filename):
'''
Checks if a file is a text file
'''
type_of_file = os.popen('file -bi {0}'.format(filename), 'r').read()
return type_of_file.startswith('text')
def run(function, *args, **kwargs):
'''
Runs a function from an execution module creating pre and post snapshots
and associating the salt job id with those snapshots for easy undo and
cleanup.
function
Salt function to call.
config
Configuration name. (default: "root")
description
A description for the snapshots. (default: None)
userdata
Data to include in the snapshot metadata. (default: None)
cleanup_algorithm
Snapper cleanup algorithm. (default: "number")
`*args`
args for the function to call. (default: None)
`**kwargs`
kwargs for the function to call (default: None)
This would run append text to /etc/motd using the file.append
module, and will create two snapshots, pre and post with the associated
metadata. The jid will be available as salt_jid in the userdata of the
snapshot.
You can immediately see the changes
CLI Example:
.. code-block:: bash
salt '*' snapper.run file.append args='["/etc/motd", "some text"]'
'''
config = kwargs.pop("config", "root")
description = kwargs.pop("description", "snapper.run[{0}]".format(function))
cleanup_algorithm = kwargs.pop("cleanup_algorithm", "number")
userdata = kwargs.pop("userdata", {})
func_kwargs = dict((k, v) for k, v in kwargs.items() if not k.startswith('__'))
kwargs = dict((k, v) for k, v in kwargs.items() if k.startswith('__'))
pre_nr = __salt__['snapper.create_snapshot'](
config=config,
snapshot_type='pre',
description=description,
cleanup_algorithm=cleanup_algorithm,
userdata=userdata,
**kwargs)
if function not in __salt__:
raise CommandExecutionError(
'function "{0}" does not exist'.format(function)
)
try:
ret = __salt__[function](*args, **func_kwargs)
except CommandExecutionError as exc:
ret = "\n".join([six.text_type(exc), __salt__[function].__doc__])
__salt__['snapper.create_snapshot'](
config=config,
snapshot_type='post',
pre_number=pre_nr,
description=description,
cleanup_algorithm=cleanup_algorithm,
userdata=userdata,
**kwargs)
return ret
def status(config='root', num_pre=None, num_post=None):
'''
Returns a comparison between two snapshots
config
Configuration name.
num_pre
first snapshot ID to compare. Default is last snapshot
num_post
last snapshot ID to compare. Default is 0 (current state)
CLI example:
.. code-block:: bash
salt '*' snapper.status
salt '*' snapper.status num_pre=19 num_post=20
'''
try:
pre, post = _get_num_interval(config, num_pre, num_post)
snapper.CreateComparison(config, int(pre), int(post))
files = snapper.GetFiles(config, int(pre), int(post))
status_ret = {}
SUBVOLUME = list_configs()[config]['SUBVOLUME']
for file in files:
# In case of SUBVOLUME is included in filepath we remove it
# to prevent from filepath starting with double '/'
_filepath = file[0][len(SUBVOLUME):] if file[0].startswith(SUBVOLUME) else file[0]
status_ret[os.path.normpath(SUBVOLUME + _filepath)] = {'status': status_to_string(file[1])}
return status_ret
except dbus.DBusException as exc:
raise CommandExecutionError(
'Error encountered while listing changed files: {0}'
.format(_dbus_exception_to_reason(exc, locals()))
)
def changed_files(config='root', num_pre=None, num_post=None):
'''
Returns the files changed between two snapshots
config
Configuration name.
num_pre
first snapshot ID to compare. Default is last snapshot
num_post
last snapshot ID to compare. Default is 0 (current state)
CLI example:
.. code-block:: bash
salt '*' snapper.changed_files
salt '*' snapper.changed_files num_pre=19 num_post=20
'''
return status(config, num_pre, num_post).keys()
def undo(config='root', files=None, num_pre=None, num_post=None):
'''
Undo all file changes that happened between num_pre and num_post, leaving
the files into the state of num_pre.
.. warning::
If one of the files has changes after num_post, they will be overwritten
The snapshots are used to determine the file list, but the current
version of the files will be overwritten by the versions in num_pre.
You to undo changes between num_pre and the current version of the
files use num_post=0.
CLI Example:
.. code-block:: bash
salt '*' snapper.undo
'''
pre, post = _get_num_interval(config, num_pre, num_post)
changes = status(config, pre, post)
changed = set(changes.keys())
requested = set(files or changed)
if not requested.issubset(changed):
raise CommandExecutionError(
'Given file list contains files that are not present'
'in the changed filelist: {0}'.format(changed - requested))
cmdret = __salt__['cmd.run']('snapper -c {0} undochange {1}..{2} {3}'.format(
config, pre, post, ' '.join(requested)))
try:
components = cmdret.split(' ')
ret = {}
for comp in components:
key, val = comp.split(':')
ret[key] = val
return ret
except ValueError as exc:
raise CommandExecutionError(
'Error while processing Snapper response: {0}'.format(cmdret))
def _get_jid_snapshots(jid, config='root'):
'''
Returns pre/post snapshots made by a given Salt jid
Looks for 'salt_jid' entries into snapshots userdata which are created
when 'snapper.run' is executed.
'''
jid_snapshots = [x for x in list_snapshots(config) if x['userdata'].get("salt_jid") == jid]
pre_snapshot = [x for x in jid_snapshots if x['type'] == "pre"]
post_snapshot = [x for x in jid_snapshots if x['type'] == "post"]
if not pre_snapshot or not post_snapshot:
raise CommandExecutionError("Jid '{0}' snapshots not found".format(jid))
return (
pre_snapshot[0]['id'],
post_snapshot[0]['id']
)
def undo_jid(jid, config='root'):
'''
Undo the changes applied by a salt job
jid
The job id to lookup
config
Configuration name.
CLI Example:
.. code-block:: bash
salt '*' snapper.undo_jid jid=20160607130930720112
'''
pre_snapshot, post_snapshot = _get_jid_snapshots(jid, config=config)
return undo(config, num_pre=pre_snapshot, num_post=post_snapshot)
def diff(config='root', filename=None, num_pre=None, num_post=None):
'''
Returns the differences between two snapshots
config
Configuration name.
filename
if not provided the showing differences between snapshots for
all "text" files
num_pre
first snapshot ID to compare. Default is last snapshot
num_post
last snapshot ID to compare. Default is 0 (current state)
CLI Example:
.. code-block:: bash
salt '*' snapper.diff
salt '*' snapper.diff filename=/var/log/snapper.log num_pre=19 num_post=20
'''
try:
pre, post = _get_num_interval(config, num_pre, num_post)
files = changed_files(config, pre, post)
if filename:
files = [filename] if filename in files else []
SUBVOLUME = list_configs()[config]['SUBVOLUME']
pre_mount = snapper.MountSnapshot(config, pre, False) if pre else SUBVOLUME
post_mount = snapper.MountSnapshot(config, post, False) if post else SUBVOLUME
files_diff = dict()
for filepath in [filepath for filepath in files if not os.path.isdir(filepath)]:
_filepath = filepath
if filepath.startswith(SUBVOLUME):
_filepath = filepath[len(SUBVOLUME):]
# Just in case, removing possible double '/' from the final file paths
pre_file = os.path.normpath(pre_mount + "/" + _filepath).replace("//", "/")
post_file = os.path.normpath(post_mount + "/" + _filepath).replace("//", "/")
if os.path.isfile(pre_file):
pre_file_exists = True
with salt.utils.files.fopen(pre_file) as rfh:
pre_file_content = [salt.utils.stringutils.to_unicode(_l)
for _l in rfh.readlines()]
else:
pre_file_content = []
pre_file_exists = False
if os.path.isfile(post_file):
post_file_exists = True
with salt.utils.files.fopen(post_file) as rfh:
post_file_content = [salt.utils.stringutils.to_unicode(_l)
for _l in rfh.readlines()]
else:
post_file_content = []
post_file_exists = False
if _is_text_file(pre_file) or _is_text_file(post_file):
files_diff[filepath] = {
'comment': "text file changed",
'diff': ''.join(difflib.unified_diff(pre_file_content,
post_file_content,
fromfile=pre_file,
tofile=post_file))}
if pre_file_exists and not post_file_exists:
files_diff[filepath]['comment'] = "text file deleted"
if not pre_file_exists and post_file_exists:
files_diff[filepath]['comment'] = "text file created"
elif not _is_text_file(pre_file) and not _is_text_file(post_file):
# This is a binary file
files_diff[filepath] = {'comment': "binary file changed"}
if pre_file_exists:
files_diff[filepath]['old_sha256_digest'] = __salt__['hashutil.sha256_digest'](''.join(pre_file_content))
if post_file_exists:
files_diff[filepath]['new_sha256_digest'] = __salt__['hashutil.sha256_digest'](''.join(post_file_content))
if post_file_exists and not pre_file_exists:
files_diff[filepath]['comment'] = "binary file created"
if pre_file_exists and not post_file_exists:
files_diff[filepath]['comment'] = "binary file deleted"
if pre:
snapper.UmountSnapshot(config, pre, False)
if post:
snapper.UmountSnapshot(config, post, False)
return files_diff
except dbus.DBusException as exc:
raise CommandExecutionError(
'Error encountered while showing differences between snapshots: {0}'
.format(_dbus_exception_to_reason(exc, locals()))
)
def diff_jid(jid, config='root'):
'''
Returns the changes applied by a `jid`
jid
The job id to lookup
config
Configuration name.
CLI Example:
.. code-block:: bash
salt '*' snapper.diff_jid jid=20160607130930720112
'''
pre_snapshot, post_snapshot = _get_jid_snapshots(jid, config=config)
return diff(config, num_pre=pre_snapshot, num_post=post_snapshot)
def create_baseline(tag="baseline", config='root'):
'''
Creates a snapshot marked as baseline
tag
Tag name for the baseline
config
Configuration name.
CLI Example:
.. code-block:: bash
salt '*' snapper.create_baseline
salt '*' snapper.create_baseline my_custom_baseline
'''
return __salt__['snapper.create_snapshot'](config=config,
snapshot_type='single',
description="baseline snapshot",
cleanup_algorithm="number",
userdata={"baseline_tag": tag})
| 30.823857 | 126 | 0.619733 |
from __future__ import absolute_import, unicode_literals, print_function
import logging
import os
import time
import difflib
try:
from pwd import getpwuid
HAS_PWD = True
except ImportError:
HAS_PWD = False
from salt.exceptions import CommandExecutionError
import salt.utils.files
from salt.ext import six
try:
import dbus
HAS_DBUS = True
except ImportError:
HAS_DBUS = False
DBUS_STATUS_MAP = {
1: "created",
2: "deleted",
4: "type changed",
8: "modified",
16: "permission changed",
32: "owner changed",
64: "group changed",
128: "extended attributes changed",
256: "ACL info changed",
}
SNAPPER_DBUS_OBJECT = 'org.opensuse.Snapper'
SNAPPER_DBUS_PATH = '/org/opensuse/Snapper'
SNAPPER_DBUS_INTERFACE = 'org.opensuse.Snapper'
log = logging.getLogger(__name__)
bus = None
system_bus_error = None
snapper = None
snapper_error = None
if HAS_DBUS:
try:
bus = dbus.SystemBus()
except dbus.DBusException as exc:
log.warning(exc)
system_bus_error = exc
else:
if SNAPPER_DBUS_OBJECT in bus.list_activatable_names():
try:
snapper = dbus.Interface(bus.get_object(SNAPPER_DBUS_OBJECT,
SNAPPER_DBUS_PATH),
dbus_interface=SNAPPER_DBUS_INTERFACE)
except (dbus.DBusException, ValueError) as exc:
log.warning(exc)
snapper_error = exc
else:
snapper_error = 'snapper is missing'
def __virtual__():
error_msg = 'The snapper module cannot be loaded: {0}'
if not HAS_DBUS:
return False, error_msg.format('missing python dbus module')
elif not snapper:
return False, error_msg.format(snapper_error)
elif not bus:
return False, error_msg.format(system_bus_error)
elif not HAS_PWD:
return False, error_msg.format('pwd module not available')
return 'snapper'
def _snapshot_to_data(snapshot):
data = {}
data['id'] = snapshot[0]
data['type'] = ['single', 'pre', 'post'][snapshot[1]]
if data['type'] == 'post':
data['pre'] = snapshot[2]
if snapshot[3] != -1:
data['timestamp'] = snapshot[3]
else:
data['timestamp'] = int(time.time())
data['user'] = getpwuid(snapshot[4])[0]
data['description'] = snapshot[5]
data['cleanup'] = snapshot[6]
data['userdata'] = {}
for key, value in snapshot[7].items():
data['userdata'][key] = value
return data
def _dbus_exception_to_reason(exc, args):
error = exc.get_dbus_name()
if error == 'error.unknown_config':
return "Unknown configuration '{0}'".format(args['config'])
elif error == 'error.illegal_snapshot':
return 'Invalid snapshot'
else:
return exc.get_dbus_name()
def list_snapshots(config='root'):
try:
snapshots = snapper.ListSnapshots(config)
return [_snapshot_to_data(s) for s in snapshots]
except dbus.DBusException as exc:
raise CommandExecutionError(
'Error encountered while listing snapshots: {0}'
.format(_dbus_exception_to_reason(exc, locals()))
)
def get_snapshot(number=0, config='root'):
try:
snapshot = snapper.GetSnapshot(config, int(number))
return _snapshot_to_data(snapshot)
except dbus.DBusException as exc:
raise CommandExecutionError(
'Error encountered while retrieving snapshot: {0}'
.format(_dbus_exception_to_reason(exc, locals()))
)
def list_configs():
try:
configs = snapper.ListConfigs()
return dict((config[0], config[2]) for config in configs)
except dbus.DBusException as exc:
raise CommandExecutionError(
'Error encountered while listing configurations: {0}'
.format(_dbus_exception_to_reason(exc, locals()))
)
def _config_filter(value):
if isinstance(value, bool):
return 'yes' if value else 'no'
return value
def set_config(name='root', **kwargs):
try:
data = dict((k.upper(), _config_filter(v)) for k, v in
kwargs.items() if not k.startswith('__'))
snapper.SetConfig(name, data)
except dbus.DBusException as exc:
raise CommandExecutionError(
'Error encountered while setting configuration {0}: {1}'
.format(name, _dbus_exception_to_reason(exc, locals()))
)
return True
def _get_last_snapshot(config='root'):
snapshot_list = sorted(list_snapshots(config), key=lambda x: x['id'])
return snapshot_list[-1]
def status_to_string(dbus_status):
status_tuple = (
dbus_status & 0b000000001, dbus_status & 0b000000010, dbus_status & 0b000000100,
dbus_status & 0b000001000, dbus_status & 0b000010000, dbus_status & 0b000100000,
dbus_status & 0b001000000, dbus_status & 0b010000000, dbus_status & 0b100000000
)
return [DBUS_STATUS_MAP[status] for status in status_tuple if status]
def get_config(name='root'):
try:
config = snapper.GetConfig(name)
return config
except dbus.DBusException as exc:
raise CommandExecutionError(
'Error encountered while retrieving configuration: {0}'
.format(_dbus_exception_to_reason(exc, locals()))
)
def create_config(name=None,
subvolume=None,
fstype=None,
template=None,
extra_opts=None):
def raise_arg_error(argname):
raise CommandExecutionError(
'You must provide a "{0}" for the new configuration'.format(argname)
)
if not name:
raise_arg_error("name")
if not subvolume:
raise_arg_error("subvolume")
if not fstype:
raise_arg_error("fstype")
if not template:
template = ""
try:
snapper.CreateConfig(name, subvolume, fstype, template)
if extra_opts:
set_config(name, **extra_opts)
return get_config(name)
except dbus.DBusException as exc:
raise CommandExecutionError(
'Error encountered while creating the new configuration: {0}'
.format(_dbus_exception_to_reason(exc, locals()))
)
def create_snapshot(config='root', snapshot_type='single', pre_number=None,
description=None, cleanup_algorithm='number', userdata=None,
**kwargs):
if not userdata:
userdata = {}
jid = kwargs.get('__pub_jid')
if description is None and jid is not None:
description = 'salt job {0}'.format(jid)
if jid is not None:
userdata['salt_jid'] = jid
new_nr = None
try:
if snapshot_type == 'single':
new_nr = snapper.CreateSingleSnapshot(config, description,
cleanup_algorithm, userdata)
elif snapshot_type == 'pre':
new_nr = snapper.CreatePreSnapshot(config, description,
cleanup_algorithm, userdata)
elif snapshot_type == 'post':
if pre_number is None:
raise CommandExecutionError(
"pre snapshot number 'pre_number' needs to be"
"specified for snapshots of the 'post' type")
new_nr = snapper.CreatePostSnapshot(config, pre_number, description,
cleanup_algorithm, userdata)
else:
raise CommandExecutionError(
"Invalid snapshot type '{0}'", format(snapshot_type))
except dbus.DBusException as exc:
raise CommandExecutionError(
'Error encountered while listing changed files: {0}'
.format(_dbus_exception_to_reason(exc, locals()))
)
return new_nr
def delete_snapshot(snapshots_ids=None, config="root"):
if not snapshots_ids:
raise CommandExecutionError('Error: No snapshot ID has been provided')
try:
current_snapshots_ids = [x['id'] for x in list_snapshots(config)]
if not isinstance(snapshots_ids, list):
snapshots_ids = [snapshots_ids]
if not set(snapshots_ids).issubset(set(current_snapshots_ids)):
raise CommandExecutionError(
"Error: Snapshots '{0}' not found".format(", ".join(
[six.text_type(x) for x in set(snapshots_ids).difference(
set(current_snapshots_ids))]))
)
snapper.DeleteSnapshots(config, snapshots_ids)
return {config: {"ids": snapshots_ids, "status": "deleted"}}
except dbus.DBusException as exc:
raise CommandExecutionError(_dbus_exception_to_reason(exc, locals()))
def modify_snapshot(snapshot_id=None,
description=None,
userdata=None,
cleanup=None,
config="root"):
if not snapshot_id:
raise CommandExecutionError('Error: No snapshot ID has been provided')
snapshot = get_snapshot(config=config, number=snapshot_id)
try:
updated_opts = {
'description': description if description is not None else snapshot['description'],
'cleanup': cleanup if cleanup is not None else snapshot['cleanup'],
'userdata': userdata if userdata is not None else snapshot['userdata'],
}
snapper.SetSnapshot(config,
snapshot_id,
updated_opts['description'],
updated_opts['cleanup'],
updated_opts['userdata'])
return get_snapshot(config=config, number=snapshot_id)
except dbus.DBusException as exc:
raise CommandExecutionError(_dbus_exception_to_reason(exc, locals()))
def _get_num_interval(config, num_pre, num_post):
post = int(num_post) if num_post else 0
pre = int(num_pre) if num_pre is not None else _get_last_snapshot(config)['id']
return pre, post
def _is_text_file(filename):
type_of_file = os.popen('file -bi {0}'.format(filename), 'r').read()
return type_of_file.startswith('text')
def run(function, *args, **kwargs):
config = kwargs.pop("config", "root")
description = kwargs.pop("description", "snapper.run[{0}]".format(function))
cleanup_algorithm = kwargs.pop("cleanup_algorithm", "number")
userdata = kwargs.pop("userdata", {})
func_kwargs = dict((k, v) for k, v in kwargs.items() if not k.startswith('__'))
kwargs = dict((k, v) for k, v in kwargs.items() if k.startswith('__'))
pre_nr = __salt__['snapper.create_snapshot'](
config=config,
snapshot_type='pre',
description=description,
cleanup_algorithm=cleanup_algorithm,
userdata=userdata,
**kwargs)
if function not in __salt__:
raise CommandExecutionError(
'function "{0}" does not exist'.format(function)
)
try:
ret = __salt__[function](*args, **func_kwargs)
except CommandExecutionError as exc:
ret = "\n".join([six.text_type(exc), __salt__[function].__doc__])
__salt__['snapper.create_snapshot'](
config=config,
snapshot_type='post',
pre_number=pre_nr,
description=description,
cleanup_algorithm=cleanup_algorithm,
userdata=userdata,
**kwargs)
return ret
def status(config='root', num_pre=None, num_post=None):
try:
pre, post = _get_num_interval(config, num_pre, num_post)
snapper.CreateComparison(config, int(pre), int(post))
files = snapper.GetFiles(config, int(pre), int(post))
status_ret = {}
SUBVOLUME = list_configs()[config]['SUBVOLUME']
for file in files:
_filepath = file[0][len(SUBVOLUME):] if file[0].startswith(SUBVOLUME) else file[0]
status_ret[os.path.normpath(SUBVOLUME + _filepath)] = {'status': status_to_string(file[1])}
return status_ret
except dbus.DBusException as exc:
raise CommandExecutionError(
'Error encountered while listing changed files: {0}'
.format(_dbus_exception_to_reason(exc, locals()))
)
def changed_files(config='root', num_pre=None, num_post=None):
return status(config, num_pre, num_post).keys()
def undo(config='root', files=None, num_pre=None, num_post=None):
pre, post = _get_num_interval(config, num_pre, num_post)
changes = status(config, pre, post)
changed = set(changes.keys())
requested = set(files or changed)
if not requested.issubset(changed):
raise CommandExecutionError(
'Given file list contains files that are not present'
'in the changed filelist: {0}'.format(changed - requested))
cmdret = __salt__['cmd.run']('snapper -c {0} undochange {1}..{2} {3}'.format(
config, pre, post, ' '.join(requested)))
try:
components = cmdret.split(' ')
ret = {}
for comp in components:
key, val = comp.split(':')
ret[key] = val
return ret
except ValueError as exc:
raise CommandExecutionError(
'Error while processing Snapper response: {0}'.format(cmdret))
def _get_jid_snapshots(jid, config='root'):
jid_snapshots = [x for x in list_snapshots(config) if x['userdata'].get("salt_jid") == jid]
pre_snapshot = [x for x in jid_snapshots if x['type'] == "pre"]
post_snapshot = [x for x in jid_snapshots if x['type'] == "post"]
if not pre_snapshot or not post_snapshot:
raise CommandExecutionError("Jid '{0}' snapshots not found".format(jid))
return (
pre_snapshot[0]['id'],
post_snapshot[0]['id']
)
def undo_jid(jid, config='root'):
pre_snapshot, post_snapshot = _get_jid_snapshots(jid, config=config)
return undo(config, num_pre=pre_snapshot, num_post=post_snapshot)
def diff(config='root', filename=None, num_pre=None, num_post=None):
try:
pre, post = _get_num_interval(config, num_pre, num_post)
files = changed_files(config, pre, post)
if filename:
files = [filename] if filename in files else []
SUBVOLUME = list_configs()[config]['SUBVOLUME']
pre_mount = snapper.MountSnapshot(config, pre, False) if pre else SUBVOLUME
post_mount = snapper.MountSnapshot(config, post, False) if post else SUBVOLUME
files_diff = dict()
for filepath in [filepath for filepath in files if not os.path.isdir(filepath)]:
_filepath = filepath
if filepath.startswith(SUBVOLUME):
_filepath = filepath[len(SUBVOLUME):]
pre_file = os.path.normpath(pre_mount + "/" + _filepath).replace("//", "/")
post_file = os.path.normpath(post_mount + "/" + _filepath).replace("//", "/")
if os.path.isfile(pre_file):
pre_file_exists = True
with salt.utils.files.fopen(pre_file) as rfh:
pre_file_content = [salt.utils.stringutils.to_unicode(_l)
for _l in rfh.readlines()]
else:
pre_file_content = []
pre_file_exists = False
if os.path.isfile(post_file):
post_file_exists = True
with salt.utils.files.fopen(post_file) as rfh:
post_file_content = [salt.utils.stringutils.to_unicode(_l)
for _l in rfh.readlines()]
else:
post_file_content = []
post_file_exists = False
if _is_text_file(pre_file) or _is_text_file(post_file):
files_diff[filepath] = {
'comment': "text file changed",
'diff': ''.join(difflib.unified_diff(pre_file_content,
post_file_content,
fromfile=pre_file,
tofile=post_file))}
if pre_file_exists and not post_file_exists:
files_diff[filepath]['comment'] = "text file deleted"
if not pre_file_exists and post_file_exists:
files_diff[filepath]['comment'] = "text file created"
elif not _is_text_file(pre_file) and not _is_text_file(post_file):
files_diff[filepath] = {'comment': "binary file changed"}
if pre_file_exists:
files_diff[filepath]['old_sha256_digest'] = __salt__['hashutil.sha256_digest'](''.join(pre_file_content))
if post_file_exists:
files_diff[filepath]['new_sha256_digest'] = __salt__['hashutil.sha256_digest'](''.join(post_file_content))
if post_file_exists and not pre_file_exists:
files_diff[filepath]['comment'] = "binary file created"
if pre_file_exists and not post_file_exists:
files_diff[filepath]['comment'] = "binary file deleted"
if pre:
snapper.UmountSnapshot(config, pre, False)
if post:
snapper.UmountSnapshot(config, post, False)
return files_diff
except dbus.DBusException as exc:
raise CommandExecutionError(
'Error encountered while showing differences between snapshots: {0}'
.format(_dbus_exception_to_reason(exc, locals()))
)
def diff_jid(jid, config='root'):
pre_snapshot, post_snapshot = _get_jid_snapshots(jid, config=config)
return diff(config, num_pre=pre_snapshot, num_post=post_snapshot)
def create_baseline(tag="baseline", config='root'):
return __salt__['snapper.create_snapshot'](config=config,
snapshot_type='single',
description="baseline snapshot",
cleanup_algorithm="number",
userdata={"baseline_tag": tag})
| true | true |
f72013a2dc273fdddd592328a01ea75807d6c262 | 1,098 | py | Python | cli/tests/test_managers/test_run.py | hackerwins/polyaxon | ff56a098283ca872abfbaae6ba8abba479ffa394 | [
"Apache-2.0"
] | null | null | null | cli/tests/test_managers/test_run.py | hackerwins/polyaxon | ff56a098283ca872abfbaae6ba8abba479ffa394 | [
"Apache-2.0"
] | null | null | null | cli/tests/test_managers/test_run.py | hackerwins/polyaxon | ff56a098283ca872abfbaae6ba8abba479ffa394 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
#
# Copyright 2019 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
from __future__ import absolute_import, division, print_function
from unittest import TestCase
import pytest
from polyaxon_sdk import V1Run
from polyaxon.managers.run import RunManager
@pytest.mark.managers_mark
class TestRunManager(TestCase):
def test_default_props(self):
assert RunManager.IS_GLOBAL is False
assert RunManager.IS_POLYAXON_DIR is True
assert RunManager.CONFIG_FILE_NAME == ".polyaxonrun"
assert RunManager.CONFIG == V1Run
| 30.5 | 74 | 0.762295 |
from __future__ import absolute_import, division, print_function
from unittest import TestCase
import pytest
from polyaxon_sdk import V1Run
from polyaxon.managers.run import RunManager
@pytest.mark.managers_mark
class TestRunManager(TestCase):
def test_default_props(self):
assert RunManager.IS_GLOBAL is False
assert RunManager.IS_POLYAXON_DIR is True
assert RunManager.CONFIG_FILE_NAME == ".polyaxonrun"
assert RunManager.CONFIG == V1Run
| true | true |
f720140e054f1279701550bef9197a63ff8e51bf | 88,490 | py | Python | src/neo4j_loader/load_csv_data.py | cebriggs7135/ontology-api | 4d2512dcec532cfdfdcd5ff88216e402afbbe2af | [
"MIT"
] | null | null | null | src/neo4j_loader/load_csv_data.py | cebriggs7135/ontology-api | 4d2512dcec532cfdfdcd5ff88216e402afbbe2af | [
"MIT"
] | null | null | null | src/neo4j_loader/load_csv_data.py | cebriggs7135/ontology-api | 4d2512dcec532cfdfdcd5ff88216e402afbbe2af | [
"MIT"
] | null | null | null | '''
Created on Oct 20, 2020
@author: chb69
'''
import sys
import os
import types
import mysql.connector
from mysql.connector import errorcode
import csv
import argparse
"""
this list includes the prefixes for several informatics resources found in the PheKnowLator mapping data.
This might be useful in the future:
bto: is BRENDA Tissue ontology (human brain related)
fbbt: Flybase
caro: Common Anatomy Reference Ontology
xao: Frog
zfa: Zebrafish
ma: mouse anatomy
wbbt: Wormbase
fao: fungal anatomy ontology
https://ncit.nci.nih.gov/ncitbrowser/conceptreport.jsp?dictionary=nci_thesaurus&code=<code> resolves to NCI terms might be able to map these**
vsao: Vertebrate Skeletal Anatomy Ontology
kupo: Kidney and Urinary Pathway Ontology
mp: mouse phenotype
emapa: Mouse Developmental Anatomy Ontology
caloha:ts- an ontology of human anatomy and human cell types
pmid: PubMed
mat: Minimal Anatomy Terminology
miaa: ???
efo: ??? (found Experimental Factor Ontology but that doesn't look right)
ehdaa: Human Developmental Anatomy Ontology
vhog: Vertebrate Homologous Ontology Group
pba: ???
bams: BAMS Neuroanatomical Ontology
mba: ???
ev: eVOC (Expressed Sequence Annotation for Humans)
dhba: ???
http://www.snomedbrowser.com/codes/details/<code> resolves to SNOMED terms. We might be able to import these
nlxanat: a NIF ontology http://uri.neuinfo.org/nif/nifstd/nlx_anat
aao: Amphibian Gross Anatomy Ontology
tao: Teleost Anatomy Ontology
tgma: Mosquito Gross Anatomy Ontology
hao: Hymenoptera Anatomy Ontology
"""
config = {}
def load_config(root_path, filename):
'''This method was heavily borrowed from the flask config.py file's from_pyfile method.
It reads a file containing python constants and loads it into a dictionary.
:param root_path: the path leading to the config file
:param filename: the filename of the config relative to the
root path.
'''
filename = os.path.join(root_path, filename)
d = types.ModuleType("config")
d.__file__ = filename
return_dict = {}
try:
with open(filename, mode="rb") as config_file:
exec(compile(config_file.read(), filename, "exec"), d.__dict__)
for config_key in d.__dict__:
if str(config_key).startswith('__') == False:
return_dict[config_key] = d.__dict__[config_key]
except OSError as e:
e.strerror = f"Unable to load configuration file ({e.strerror})"
raise
return return_dict
def create_database(config):
''' Create the initial database. This method uses the SQL script found in
the TABLE_CREATE_SQL_FILEPATH of the config file to build the database.
:param dict config: The configuration settings
'''
connection = None
try:
connection = mysql.connector.connect(
host=config['MYSQL_HOSTNAME'],
user=config['MYSQL_USERNAME'],
password=config['MYSQL_PASSWORD'])
cursor = connection.cursor()
with open(config['TABLE_CREATE_SQL_FILEPATH'], encoding="utf-8") as f:
commands = f.read().split(';')
for command in commands:
if str(command).strip() != "":
print('Executing: ' + command)
cursor.execute(command)
print ("Done creating database tables.")
except mysql.connector.Error as err:
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print("Something is wrong with your user name or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
print("Database does not exist")
else:
print(err)
finally:
if connection != None:
connection.close()
def create_indices(config):
''' Create the indices in the mysql database to improve performance in the
transform step. There is a set of default indices that need to be created.
These are found in the config parameter INDEX_CREATE_SQL_FILEPATH. After these
are created, a series of custom indices need to be added to the various tables
created from the other config parameters.
:param dict config: The configuration settings
'''
connection = None
try:
connection = mysql.connector.connect(
host=config['MYSQL_HOSTNAME'],
user=config['MYSQL_USERNAME'],
password=config['MYSQL_PASSWORD'],
database=config['MYSQL_DATABASE_NAME'])
cursor = connection.cursor()
"""
with open(config['INDEX_CREATE_SQL_FILEPATH'], encoding="utf-8") as f:
# this code creates the "default" indices
commands = f.read().split(';')
for command in commands:
if str(command).strip() != "":
print('Executing: ' + command)
cursor.execute(command)
"""
# the code below creates the indices for the tables created from entries in the
# app.cfg file
for table_info in config['NODE_METADATA_FILE_TABLE_INFO']:
sql = "ALTER TABLE {table_name} ADD INDEX {table_name}_ontology_uri_idx (ontology_uri(500))".format(table_name=table_info['table_name'])
cursor.execute(sql)
sql = "ALTER TABLE {table_name} ADD INDEX {table_name}_node_label_idx (node_label(500))".format(table_name=table_info['table_name'])
cursor.execute(sql)
sql = "ALTER TABLE {table_name} ADD INDEX {table_name}_codeid_idx (codeid(500))".format(table_name=table_info['table_name'])
cursor.execute(sql)
sql = "ALTER TABLE {table_name} ADD INDEX {table_name}_sab_idx (sab(50))".format(table_name=table_info['table_name'])
cursor.execute(sql)
for table_info in config['EDGE_LIST_FILE_TABLE_INFO']:
sql = "ALTER TABLE {table_name} ADD INDEX {table_name}_subject_idx (subject(50))".format(table_name=table_info['table_name'])
cursor.execute(sql)
sql = "ALTER TABLE {table_name} ADD INDEX {table_name}_predicate_idx (predicate(100))".format(table_name=table_info['table_name'])
cursor.execute(sql)
sql = "ALTER TABLE {table_name} ADD INDEX {table_name}_object_idx (object(100))".format(table_name=table_info['table_name'])
cursor.execute(sql)
sql = "ALTER TABLE {table_name} ADD INDEX {table_name}_sab_idx (sab(50))".format(table_name=table_info['table_name'])
cursor.execute(sql)
for table_info in config['DBXREF_FILE_TABLE_INFO']:
sql = "ALTER TABLE {table_name} ADD INDEX {table_name}_ontology_uri_idx (ontology_uri(50))".format(table_name=table_info['table_name'])
cursor.execute(sql)
sql = "ALTER TABLE {table_name} ADD FULLTEXT INDEX {table_name}_dbxrefs_idx (dbxrefs(700))".format(table_name=table_info['table_name'])
cursor.execute(sql)
for table_info in config['RELATIONS_FILE_TABLE_INFO']:
sql = "ALTER TABLE {table_name} ADD INDEX {table_name}_relation_id_idx (relation_id(100))".format(table_name=table_info['table_name'])
cursor.execute(sql)
sql = "ALTER TABLE {table_name} ADD INDEX {table_name}_relation_label_idx (relation_label(50))".format(table_name=table_info['table_name'])
cursor.execute(sql)
sql = "ALTER TABLE {table_name} ADD INDEX {table_name}_inverse_relation_label_idx (inverse_relation_label(50))".format(table_name=table_info['table_name'])
cursor.execute(sql)
for table_info in config['SYNONYM_LIST_FILE_TABLE_INFO']:
sql = "ALTER TABLE {table_name} ADD INDEX {table_name}_ontology_uri_idx (ontology_uri(500))".format(table_name=table_info['table_name'])
cursor.execute(sql)
sql = "ALTER TABLE {table_name} ADD INDEX {table_name}_synonym_idx (synonym(500))".format(table_name=table_info['table_name'])
cursor.execute(sql)
sql = "ALTER TABLE {table_name} ADD INDEX {table_name}_sab_idx (sab(50))".format(table_name=table_info['table_name'])
cursor.execute(sql)
print ("Done creating database indices.")
except mysql.connector.Error as err:
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print("Something is wrong with your user name or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
print("Database does not exist")
else:
print(err)
finally:
if connection != None:
connection.close()
def load_edge_list(config):
'''
Load all of the edge_list CSV files into a series of mysql tables.
param dict config: the configuration data for this application
'''
edge_list_list = config['EDGE_LIST_FILE_TABLE_INFO']
connection = None
sql = ''
record_count = 0
try:
connection = mysql.connector.connect(
host=config['MYSQL_HOSTNAME'],
user=config['MYSQL_USERNAME'],
password=config['MYSQL_PASSWORD'],
database=config['MYSQL_DATABASE_NAME'],
charset='utf8mb4',collation='utf8mb4_bin')
cursor = connection.cursor(dictionary=True)
for table_data in edge_list_list:
# walk through the list of edge_list files found in the config file.
# for each entry, read the corresponding file and load it into the referenced
# mysql table.
table_name = table_data['table_name']
file_name = table_data['file_name']
sab = table_data['sab']
drop_table_sql = "DROP TABLE IF EXISTS {table_name}".format(table_name=table_name)
cursor.execute(drop_table_sql)
table_create_sql = """CREATE TABLE {table_name} (
id INT NOT NULL AUTO_INCREMENT,
subject VARCHAR(2048) NOT NULL,
predicate VARCHAR(2048) NOT NULL,
object VARCHAR(2048) NOT NULL,
sab VARCHAR(50),
PRIMARY KEY(id)
)""".format(table_name=table_name)
# this is the generic SQL to create the edge_list tables
cursor.execute(table_create_sql)
connection.commit()
print("Created table: " + table_name)
file_path = os.path.join(config['ONTOLOGY_SOURCE_DIR'], file_name)
load_file(config, file_path, table_name)
sql = "UPDATE {table_name} SET sab = '{sab}'".format(table_name=table_name,sab=sab)
# add the SAB for all records in table
cursor.execute(sql)
connection.commit()
except mysql.connector.Error as err:
print("Error in SQL: " + sql )
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print("Something is wrong with your user name or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
print("Database does not exist")
else:
print(err)
connection.rollback()
finally:
if connection != None:
connection.close()
def load_synonym_list(config):
'''
Load all of the synonym CSV files into a series of mysql tables.
param dict config: the configuration data for this application
'''
if 'SYNONYM_LIST_FILE_TABLE_INFO' not in config:
#don't run this code if the synonym file is missing
return
synonym_list = config['SYNONYM_LIST_FILE_TABLE_INFO']
connection = None
sql = ''
record_count = 0
try:
connection = mysql.connector.connect(
host=config['MYSQL_HOSTNAME'],
user=config['MYSQL_USERNAME'],
password=config['MYSQL_PASSWORD'],
database=config['MYSQL_DATABASE_NAME'],
charset='utf8mb4',collation='utf8mb4_bin')
cursor = connection.cursor(dictionary=True)
for table_data in synonym_list:
# walk through the list of synonym files found in the config file.
# for each entry, read the corresponding file and load it into the referenced
# mysql table.
table_name = table_data['table_name']
file_name = table_data['file_name']
sab = table_data['sab']
drop_table_sql = "DROP TABLE IF EXISTS {table_name}".format(table_name=table_name)
cursor.execute(drop_table_sql)
table_create_sql = """CREATE TABLE {table_name} (
id INT NOT NULL AUTO_INCREMENT,
ontology_uri VARCHAR(2048) NOT NULL,
codeid VARCHAR(2048) NOT NULL,
synonym VARCHAR(2048) NOT NULL,
sab VARCHAR(50),
PRIMARY KEY(id)
)""".format(table_name=table_name)
# this is the generic SQL to create a synonym table
cursor.execute(table_create_sql)
connection.commit()
print("Created table: " + table_name)
file_path = os.path.join(config['ONTOLOGY_SOURCE_DIR'], file_name)
load_file(config, file_path, table_name)
sql = "UPDATE {table_name} SET sab = '{sab}'".format(table_name=table_name,sab=sab)
# add the SAB for all records in table
cursor.execute(sql)
connection.commit()
except mysql.connector.Error as err:
print("Error in SQL: " + sql )
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print("Something is wrong with your user name or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
print("Database does not exist")
else:
print(err)
connection.rollback()
finally:
if connection != None:
connection.close()
def load_relations(config):
'''
Load all of the relations CSV files into a series of mysql tables.
param dict config: the configuration data for this application
'''
node_metadata_list = config['RELATIONS_FILE_TABLE_INFO']
connection = None
sql = ''
record_count = 0
try:
connection = mysql.connector.connect(
host=config['MYSQL_HOSTNAME'],
user=config['MYSQL_USERNAME'],
password=config['MYSQL_PASSWORD'],
database=config['MYSQL_DATABASE_NAME'],
charset='utf8mb4',collation='utf8mb4_bin')
cursor = connection.cursor(dictionary=True)
for table_data in node_metadata_list:
# walk through the list of relations files found in the config file.
# for each entry, read the corresponding file and load it into the referenced
# mysql table.
table_name = table_data['table_name']
file_name = table_data['file_name']
sab = table_data['sab']
drop_table_sql = "DROP TABLE IF EXISTS {table_name}".format(table_name=table_name)
cursor.execute(drop_table_sql)
table_create_sql = """CREATE TABLE {table_name} (
id INT NOT NULL AUTO_INCREMENT,
relation_id VARCHAR(2048) NOT NULL,
relation_label VARCHAR(2048) NOT NULL,
inverse_relation_label VARCHAR(2048),
PRIMARY KEY(id)
)""".format(table_name=table_name)
# this is the generic create relations SQL statement
cursor.execute(table_create_sql)
connection.commit()
print("Created table: " + table_name)
file_path = os.path.join(config['ONTOLOGY_SOURCE_DIR'], file_name)
load_file(config, file_path, table_name)
except mysql.connector.Error as err:
print("Error in SQL: " + sql )
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print("Something is wrong with your user name or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
print("Database does not exist")
else:
print(err)
connection.rollback()
finally:
if connection != None:
connection.close()
def create_missing_codeids(config):
node_metadata_list = config['NODE_METADATA_FILE_TABLE_INFO']
connection = None
sql = ''
record_count = 0
try:
connection = mysql.connector.connect(
host=config['MYSQL_HOSTNAME'],
user=config['MYSQL_USERNAME'],
password=config['MYSQL_PASSWORD'],
database=config['MYSQL_DATABASE_NAME'],
charset='utf8mb4',collation='utf8mb4_bin')
cursor = connection.cursor(dictionary=True)
for table_data in node_metadata_list:
table_name = table_data['table_name']
sql = """UPDATE {table_name}
SET codeid = REPLACE(REPLACE(ontology_uri, 'http://purl.obolibrary.org/obo/',''), '_', ' ')
WHERE codeid IS NULL""".format(table_name=table_name)
# add a codeid for all records in table
cursor.execute(sql)
connection.commit()
except mysql.connector.Error as err:
print("Error in SQL: " + sql )
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print("Something is wrong with your user name or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
print("Database does not exist")
else:
print(err)
connection.rollback()
finally:
if connection != None:
connection.close()
def fix_dbxrefs(config):
connection = None
sql = ''
record_count = 0
try:
connection = mysql.connector.connect(
host=config['MYSQL_HOSTNAME'],
user=config['MYSQL_USERNAME'],
password=config['MYSQL_PASSWORD'],
database=config['MYSQL_DATABASE_NAME'],
charset='utf8mb4',collation='utf8mb4_bin')
cursor = connection.cursor(dictionary=True)
table_name = 'dbxrefs'
sql = """UPDATE {table_name}
SET xref = UPPER(xref)""".format(table_name=table_name)
# uppercase all dbxrefs data in table
cursor.execute(sql)
connection.commit()
sql = """UPDATE {table_name}
SET xref = REPLACE(xref, 'NCIT:', 'NCI:') WHERE xref LIKE 'NCIT:%'""".format(table_name=table_name)
# convert all the NCI codes
cursor.execute(sql)
connection.commit()
sql = """UPDATE {table_name}
SET xref = REPLACE(xref, 'HTTP://WWW.SNOMEDBROWSER.COM/CODES/DETAILS/', 'SNOMEDCT_US:') WHERE xref LIKE 'HTTP://WWW.SNOMEDBROWSER.COM/CODES/DETAILS/%'""".format(table_name=table_name)
# convert all the SNOMED codes
cursor.execute(sql)
connection.commit()
sql = """UPDATE {table_name}
SET xref = REPLACE(xref, 'MESH:', 'MSH:') WHERE xref LIKE 'MESH:%'
AND instr(xref, 'MESH:D') > 0
AND instr(xref, 'MESH:D24') = 0""".format(table_name=table_name)
# convert all the MeSH codes
cursor.execute(sql)
connection.commit()
sql = """UPDATE {table_name}
SET xref = REPLACE(xref, ':', ' ')""".format(table_name=table_name)
# replace all remaining colons with spaces dbxrefs data in table
cursor.execute(sql)
connection.commit()
except mysql.connector.Error as err:
print("Error in SQL: " + sql )
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print("Something is wrong with your user name or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
print("Database does not exist")
else:
print(err)
connection.rollback()
finally:
if connection != None:
connection.close()
def load_node_metadata(config):
'''
Load all of the node_metadata CSV files into a series of mysql tables.
param dict config: the configuration data for this application
'''
node_metadata_list = config['NODE_METADATA_FILE_TABLE_INFO']
connection = None
sql = ''
record_count = 0
try:
connection = mysql.connector.connect(
host=config['MYSQL_HOSTNAME'],
user=config['MYSQL_USERNAME'],
password=config['MYSQL_PASSWORD'],
database=config['MYSQL_DATABASE_NAME'],
charset='utf8mb4',collation='utf8mb4_bin')
cursor = connection.cursor(dictionary=True)
for table_data in node_metadata_list:
# walk through the list of node_metadata files found in the config file.
# for each entry, read the corresponding file and load it into the referenced
# mysql table.
table_name = table_data['table_name']
file_name = table_data['file_name']
sab = table_data['sab']
drop_table_sql = "DROP TABLE IF EXISTS {table_name}".format(table_name=table_name)
cursor.execute(drop_table_sql)
table_create_sql = """CREATE TABLE {table_name} (
id INT NOT NULL AUTO_INCREMENT,
ontology_uri VARCHAR(2048) NOT NULL,
codeid VARCHAR(2048),
node_label VARCHAR(2048) NOT NULL,
node_definition VARCHAR(2048) NOT NULL,
sab VARCHAR(50),
PRIMARY KEY(id)
)""".format(table_name=table_name)
# this SQL creates the generic node_metadata table
cursor.execute(table_create_sql)
connection.commit()
print("Created table: " + table_name)
file_path = os.path.join(config['ONTOLOGY_SOURCE_DIR'], file_name)
load_file(config, file_path, table_name)
sql = "UPDATE {table_name} SET sab = '{sab}'".format(table_name=table_name,sab=sab)
# add the SAB for all records in table
cursor.execute(sql)
connection.commit()
except mysql.connector.Error as err:
print("Error in SQL: " + sql )
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print("Something is wrong with your user name or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
print("Database does not exist")
else:
print(err)
connection.rollback()
finally:
if connection != None:
connection.close()
def load_dbxref(config):
'''
Load all of the dbxref CSV files into a series of mysql tables.
param dict config: the configuration data for this application
'''
dbxref_list = config['DBXREF_FILE_TABLE_INFO']
connection = None
sql = ''
record_count = 0
try:
connection = mysql.connector.connect(
host=config['MYSQL_HOSTNAME'],
user=config['MYSQL_USERNAME'],
password=config['MYSQL_PASSWORD'],
database=config['MYSQL_DATABASE_NAME'],
charset='utf8mb4',collation='utf8mb4_bin')
cursor = connection.cursor(dictionary=True)
for table_data in dbxref_list:
# walk through the list of dbxref files found in the config file.
# for each entry, read the corresponding file and load it into the referenced
# mysql table.
table_name = table_data['table_name']
file_name = table_data['file_name']
sab = table_data['sab']
drop_table_sql = "DROP TABLE IF EXISTS {table_name}".format(table_name=table_name)
cursor.execute(drop_table_sql)
table_create_sql = """CREATE TABLE {table_name} (
id INT NOT NULL AUTO_INCREMENT,
ontology_uri VARCHAR(2048) NOT NULL,
dbxrefs VARCHAR(5120) NOT NULL,
sab VARCHAR(50),
PRIMARY KEY(id)
)""".format(table_name=table_name)
# this is the SQL to create a generic dbxref table
cursor.execute(table_create_sql)
connection.commit()
print("Created table: " + table_name)
file_path = os.path.join(config['ONTOLOGY_SOURCE_DIR'], file_name)
load_file(config, file_path, table_name)
except mysql.connector.Error as err:
print("Error in SQL: " + sql )
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print("Something is wrong with your user name or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
print("Database does not exist")
else:
print(err)
connection.rollback()
finally:
if connection != None:
connection.close()
def load_umls_codes(config):
file_path = os.path.join(config['UMLS_SOURCE_DIR'],'CODEs.csv')
table_name = 'umls_codes'
load_file(config, file_path, table_name)
def load_umls_defs(config):
file_path = os.path.join(config['UMLS_SOURCE_DIR'],'DEFs.csv')
table_name = 'umls_defs'
load_file(config, file_path, table_name)
def load_umls_suis(config):
file_path = os.path.join(config['UMLS_SOURCE_DIR'],'SUIs.csv')
table_name = 'umls_suis'
load_file(config, file_path, table_name)
def load_umls_cuis(config):
file_path = os.path.join(config['UMLS_SOURCE_DIR'],'CUIs.csv')
table_name = 'umls_cuis'
load_file(config, file_path, table_name)
def load_umls_tuis(config):
file_path = os.path.join(config['UMLS_SOURCE_DIR'],'TUIs.csv')
table_name = 'umls_tuis'
load_file(config, file_path, table_name)
def load_umls_code_suis(config):
file_path = os.path.join(config['UMLS_SOURCE_DIR'],'CODE-SUIs.csv')
table_name = 'umls_code_suis'
load_file(config, file_path, table_name)
def load_umls_cui_codes(config):
file_path = os.path.join(config['UMLS_SOURCE_DIR'],'CUI-CODEs.csv')
table_name = 'umls_cui_codes'
load_file(config, file_path, table_name)
def load_umls_cui_cuis(config):
file_path = os.path.join(config['UMLS_SOURCE_DIR'],'CUI-CUIs.csv')
table_name = 'umls_cui_cuis'
load_file(config, file_path, table_name)
def load_umls_cui_suis(config):
file_path = os.path.join(config['UMLS_SOURCE_DIR'],'CUI-SUIs.csv')
table_name = 'umls_cui_suis'
load_file(config, file_path, table_name)
def load_umls_cui_tuis(config):
file_path = os.path.join(config['UMLS_SOURCE_DIR'],'CUI-TUIs.csv')
table_name = 'umls_cui_tuis'
load_file(config, file_path, table_name)
def load_umls_def_rel(config):
file_path = os.path.join(config['UMLS_SOURCE_DIR'],'DEFrel.csv')
table_name = 'umls_def_rel'
load_file(config, file_path, table_name)
def load_umls_tui_rel(config):
file_path = os.path.join(config['UMLS_SOURCE_DIR'],'TUIrel.csv')
table_name = 'umls_tui_rel'
load_file(config, file_path, table_name)
def build_xref_table(config):
'''
Build the dbxrefs table by reading the ontology_dbxref table. The ontology_dbxref table contains a column dbxrefs.
This method takes dbxrefs, a pipe-delimited list of xrefs, and splits it into separate entries (ex: xref1|xref2|xref3).
Each individual xref becomes a new row in the dbxrefs table.
:param dict config: The configuration settings
'''
dbxref_list = config['DBXREF_FILE_TABLE_INFO']
connection = None
sql = ''
try:
connection = mysql.connector.connect(
host=config['MYSQL_HOSTNAME'],
user=config['MYSQL_USERNAME'],
password=config['MYSQL_PASSWORD'],
database=config['MYSQL_DATABASE_NAME'],
charset='utf8mb4',collation='utf8mb4_bin')
cursor = connection.cursor(dictionary=True)
drop_table_sql = "DROP TABLE IF EXISTS dbxrefs"
cursor.execute(drop_table_sql)
create_table_sql = """CREATE TABLE dbxrefs (
id INT NOT NULL AUTO_INCREMENT,
ontology_uri VARCHAR(2048) NOT NULL,
xref VARCHAR(2048) NOT NULL,
PRIMARY KEY(id)
);"""
cursor.execute(create_table_sql)
for table_data in dbxref_list:
table_name = table_data['table_name']
sab = table_data['sab']
cursor.execute("SELECT ontology_uri, dbxrefs FROM {table_name}".format(table_name=table_name))
print("Loading {sab} data into table {table_name}".format(table_name="dbxrefs", sab=sab), end='', flush=True)
result = cursor.fetchall()
record_count = 0
for row in result:
ontology_uri = row['ontology_uri']
all_xrefs = row['dbxrefs']
xref_list = all_xrefs.split('|')
# For each row in the ontology_dbxref table, split the dbxrefs column into a list
for ref in xref_list:
# for each xref in the list, insert a new row into the dbxrefs table
ref = ref.replace("'","''")
sql = "INSERT INTO dbxrefs (ontology_uri, xref) VALUES ('{ontology_uri}','{ref}')".format(ontology_uri=ontology_uri, ref=ref)
cursor.execute(sql)
record_count = record_count + 1
#commit every 10,000 records
if record_count % 10000 == 0:
print('.', end='', flush=True)
connection.commit()
print('') # do this to disable the 'end' flag in prior print statements
connection.commit()
print ("Done loading the {table_name} table.".format(table_name="dbxrefs"))
except mysql.connector.Error as err:
print("Error in SQL: " + sql )
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print("Something is wrong with your user name or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
print("Database does not exist")
else:
print(err)
connection.rollback()
finally:
if connection != None:
connection.close()
def load_file(config, file_path, table_name):
'''
Load a CSV or tab-delimited file into a mysql table.
param dict config: the configuration data for this application
param str file_path: the full path to the CSV or tab-delimited file that will be loaded
param str table_name: the name of the table in the database that will contain the data from file_path
'''
connection = None
sql = ''
try:
connection = mysql.connector.connect(
host=config['MYSQL_HOSTNAME'],
user=config['MYSQL_USERNAME'],
password=config['MYSQL_PASSWORD'],
database=config['MYSQL_DATABASE_NAME'],
charset='utf8mb4',collation='utf8mb4_bin')
cursor = connection.cursor()
record_count = 0
with open(file_path) as csvfile:
myCSVReader = None
if file_path.endswith('.txt'):
# this code determines whether we are loading a CSV or tab-delimited file
myCSVReader = csv.DictReader(csvfile, delimiter='\t')
else:
myCSVReader = csv.DictReader(csvfile)
field_names = myCSVReader.fieldnames
# the following statements remove some extra columns from the UMLS exported files
if 'name_lc' in field_names:
field_names.remove('name_lc')
if 'REL' in field_names:
field_names.remove('REL')
if 'RELA' in field_names:
field_names.remove('RELA')
if (file_path.endswith('CUI-SUIs.csv') or
file_path.endswith('CUI-TUIs.csv') or
file_path.endswith('DEFrel.csv') or
file_path.endswith('TUIrel.csv')):
# add a field for type if the UMLS file contains relationship data
field_names.append('type')
field_list_str = '%s' % ', '.join(map(str, field_names))
# the next two lines "cleanup" the column names from the file into a SQL compliant column name
field_list_str = field_list_str.replace(':ID', '')
field_list_str = field_list_str.replace(':', '')
value_list_str = ''
for field in field_names:
# Build a list of column names for the insert SQL statement
value_list_str += '%({field})s, '.format(field=field)
value_list_str = value_list_str[:-2]
sql = """INSERT INTO {table_name}({field_list})
VALUE ({value_list})""".format(table_name=table_name, field_list=field_list_str, value_list=value_list_str)
print("Loading data from {file_name} into table {table_name}".format(file_name=file_path, table_name=table_name), end='', flush=True)
for row in myCSVReader:
# for some of the files, specify the 'type' column
if file_path.endswith('CUI-SUIs.csv'):
row['type'] = 'PREF_TERM'
if file_path.endswith('CUI-TUIs.csv'):
row['type'] = 'STY'
if file_path.endswith('DEFrel.csv'):
row['type'] = 'DEF'
if file_path.endswith('TUIrel.csv'):
row['type'] = 'ISA_STY'
# use row directly when csv headers match column names.
# remove data from a row if the column header is None
if table_name == 'suis':
if None in row.keys():
row.pop(None)
if None in row.keys():
row.pop(None)
cursor.execute(sql, row)
record_count = record_count + 1
#commit every 200,000 records
if record_count % 200000 == 0:
print('.', end='', flush=True)
connection.commit()
print('') # do this to disable the 'end' flag in prior print statements
connection.commit()
print ("Done loading the {table_name} table.".format(table_name=table_name))
except mysql.connector.Error as err:
print("Error in SQL: " + sql )
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print("Something is wrong with your user name or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
print("Database does not exist")
else:
print(err)
connection.rollback()
finally:
if connection != None:
connection.close()
def extract_non_umls(config):
load_node_metadata(config)
load_relations(config)
load_dbxref(config)
load_edge_list(config)
load_synonym_list(config)
# This code is temporary. It should be moved to a pre-processing step
create_missing_codeids(config)
# END This code is temporary. It should be moved to a pre-processing step
def extract(config):
'''
The extract method loads the CSV and tab-delimited files into mysql tables mirroring their file structure.
param dict config: The configuration data for this application
'''
create_database(config)
load_node_metadata(config)
load_relations(config)
load_dbxref(config)
load_edge_list(config)
load_synonym_list(config)
load_umls_codes(config)
load_umls_defs(config)
load_umls_suis(config)
load_umls_cuis(config)
load_umls_tuis(config)
load_umls_cui_codes(config)
load_umls_code_suis(config)
load_umls_cui_cuis(config)
load_umls_cui_suis(config)
load_umls_cui_tuis(config)
load_umls_def_rel(config)
load_umls_tui_rel(config)
# This code is temporary. It should be moved to a pre-processing step
create_missing_codeids(config)
# END This code is temporary. It should be moved to a pre-processing step
create_indices(config)
print("Done with extract process")
def build_ambiguous_codes_table(config):
'''
Construct a table called temp_ambiguous_codes (ontology_uri, codeid). This table contains a subset of
the codes that map to more than one CUI. These codes are "ambiguous" because we cannot use them in our automated
processing. Our code cannot decide which of the CUIs should be assigned the preferred term from the data we are loading.
Also, these connections tend to conflate items (ex: left hand, right hand, and hand are all the same).
We will use this table to "filter out" some of the data during the ETL process.
param dict config: The configuration data for this application.
'''
connection = None
sql = ''
try:
connection = mysql.connector.connect(
host=config['MYSQL_HOSTNAME'],
user=config['MYSQL_USERNAME'],
password=config['MYSQL_PASSWORD'],
database=config['MYSQL_DATABASE_NAME'],
charset='utf8mb4',collation='utf8mb4_bin')
cursor = connection.cursor(dictionary=True)
drop_table_sql = "DROP TABLE IF EXISTS temp_ambiguous_codes"
cursor.execute(drop_table_sql)
create_table_sql = """CREATE TABLE temp_ambiguous_codes (
id INT NOT NULL AUTO_INCREMENT,
ontology_uri VARCHAR(2048) NOT NULL,
codeid VARCHAR(2048),
PRIMARY KEY(id)
);"""
cursor.execute(create_table_sql)
print("Created table temp_ambiguous_codes")
sql = """INSERT INTO temp_ambiguous_codes (ontology_uri, codeid)
SELECT DISTINCT ontology_uri, xref as codeid
FROM dbxrefs, umls_cui_codes as rel
WHERE xref = rel.end_id
GROUP BY ontology_uri, xref
HAVING COUNT(DISTINCT rel.start_id) > 1"""
"""This query builds the temp_ambiguous_codes table. It inserts codes with
more than 1 CUI into the temp_ambiguous_codes table.
"""
cursor.execute(sql)
connection.commit()
print("Loaded codes into table temp_ambiguous_codes")
except mysql.connector.Error as err:
print("Error in SQL: " + sql )
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print("Something is wrong with your user name or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
print("Database does not exist")
else:
print(err)
connection.rollback()
finally:
if connection != None:
connection.close()
def temp_build_ccf_code_cui_table(config):
connection = None
sql = ''
try:
connection = mysql.connector.connect(
host=config['MYSQL_HOSTNAME'],
user=config['MYSQL_USERNAME'],
password=config['MYSQL_PASSWORD'],
database=config['MYSQL_DATABASE_NAME'],
charset='utf8mb4',collation='utf8mb4_bin')
cursor = connection.cursor(dictionary=True)
drop_table_sql = "DROP TABLE IF EXISTS temp_ccf_cui_codes"
cursor.execute(drop_table_sql)
create_table_sql = """CREATE TABLE temp_ccf_cui_codes (
id INT NOT NULL AUTO_INCREMENT,
codeid VARCHAR(2048) NOT NULL,
cui VARCHAR(2048),
PRIMARY KEY(id)
);"""
cursor.execute(create_table_sql)
print("Created table temp_ccf_cui_codes")
sql = "ALTER TABLE temp_ccf_cui_codes ADD INDEX temp_ccf_cui_codes_codeid(codeid(50))"
cursor.execute(sql)
sql = "ALTER TABLE temp_ccf_cui_codes ADD INDEX temp_ccf_cui_codes_cui_idx(cui(50))"
cursor.execute(sql)
cursor = connection.cursor()
record_count = 0
file_path = '/home/chb69/umls_data/ccf/CCF-CUI.csv'
with open(file_path) as csvfile:
myCSVReader = None
if file_path.endswith('.txt'):
# this code determines whether we are loading a CSV or tab-delimited file
myCSVReader = csv.DictReader(csvfile, delimiter='\t')
else:
myCSVReader = csv.DictReader(csvfile)
field_names = myCSVReader.fieldnames
#a.name,b.CodeID,c.CUI,d.name
print("Loading data from {file_name} into table {table_name}".format(file_name=file_path, table_name='temp_ccf_cui_codes'), end='', flush=True)
for row in myCSVReader:
sql = "INSERT INTO temp_ccf_cui_codes (codeid, cui) VALUES ('{codeid}','{cui}')".format(codeid=row['b.CodeID'],cui=row['c.CUI'])
cursor.execute(sql)
connection.commit()
print ("Done loading the {table_name} table.".format(table_name="temp_ccf_cui_codes"))
except mysql.connector.Error as err:
print("Error in SQL: " + sql )
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print("Something is wrong with your user name or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
print("Database does not exist")
else:
print(err)
connection.rollback()
finally:
if connection != None:
connection.close()
def build_ontology_uri_to_umls_map_table(config):
'''
Construct a table called ontology_uri_map (ontology_uri, cui, codeid, type, sab). This table is a mapping
between the dbxref data and the UMLS data. The table is built from dbxrefs and cui_codes (ULMS)
tables. The ontology_uri is the primary key within the dbxref data. The cui and codeid are the main keys
within the UMLS data. Each record in ontology_uri_map allows one to move between both systems.
param dict config: The configuration data for this application.
'''
connection = None
sql = ''
try:
connection = mysql.connector.connect(
host=config['MYSQL_HOSTNAME'],
user=config['MYSQL_USERNAME'],
password=config['MYSQL_PASSWORD'],
database=config['MYSQL_DATABASE_NAME'],
charset='utf8mb4',collation='utf8mb4_bin')
cursor = connection.cursor(dictionary=True)
drop_table_sql = "DROP TABLE IF EXISTS ontology_uri_map"
cursor.execute(drop_table_sql)
create_table_sql = """CREATE TABLE ontology_uri_map (
id INT NOT NULL AUTO_INCREMENT,
ontology_uri VARCHAR(2048) NOT NULL,
cui VARCHAR(2048),
codeid VARCHAR(2048),
type VARCHAR(50),
mapping_type VARCHAR(50),
sab VARCHAR(50),
PRIMARY KEY(id)
);"""
cursor.execute(create_table_sql)
print("Created table ontology_uri_map")
sql = """INSERT INTO ontology_uri_map (ontology_uri, cui)
SELECT DISTINCT ontology_uri, substr(xref,6) as CUI FROM dbxrefs
WHERE xref LIKE 'UMLS%'"""
# This query loads all the ontology_uri's that map directly to a UMLS CUI according to the dbxrefs table
# these records will have their codeid column set to NULL
cursor.execute(sql)
connection.commit()
print("Loaded UMLS map into table ontology_uri_map")
sql = """INSERT INTO ontology_uri_map (ontology_uri, codeid, cui, type, sab)
SELECT DISTINCT ontology_uri, xref as codeid, rel.start_id as cui, 'PT' as type, substring_index(xref,' ', 1) as sab
FROM dbxrefs, umls_cui_codes as rel
WHERE xref = rel.end_id
AND (ontology_uri, xref) NOT IN (SELECT ontology_uri,codeid FROM temp_ambiguous_codes)"""
# This query loads all the ontology_uri's that map to a code according to the dbxrefs table
cursor.execute(sql)
connection.commit()
print("Loaded map into table ontology_uri_map")
# add indices after loading to speed up the load
sql = "ALTER TABLE ontology_uri_map ADD INDEX ontology_uri_map_ontology_uri_idx(ontology_uri(50))"
cursor.execute(sql)
sql = "ALTER TABLE ontology_uri_map ADD INDEX ontology_uri_map_cui_idx(cui(50))"
cursor.execute(sql)
sql = "ALTER TABLE ontology_uri_map ADD INDEX ontology_uri_map_codeid_idx(codeid(50))"
cursor.execute(sql)
sql = "ALTER TABLE ontology_uri_map ADD INDEX ontology_uri_map_type_idx(type(50))"
cursor.execute(sql)
sql = "ALTER TABLE ontology_uri_map ADD INDEX ontology_uri_map_mapping_type_idx(mapping_type(50))"
cursor.execute(sql)
sql = "ALTER TABLE ontology_uri_map ADD INDEX ontology_uri_map_sab_idx(sab(50))"
cursor.execute(sql)
print("Built indices for table ontology_uri_map")
sql = """UPDATE ontology_uri_map SET mapping_type = 'PRIMARY' where codeid is null AND ontology_uri IN (
SELECT ontology_uri from (SELECT ontology_uri FROM ontology_uri_map
where codeid is null
group by ontology_uri
having count(distinct cui) = 1) as table_one)"""
# This query sets all the PRIMARY CUIs
cursor.execute(sql)
connection.commit()
print("Loaded PRIMARY CUI map data into table ontology_uri_map")
except mysql.connector.Error as err:
print("Error in SQL: " + sql )
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print("Something is wrong with your user name or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
print("Database does not exist")
else:
print(err)
connection.rollback()
finally:
if connection != None:
connection.close()
def build_relations_table(config):
'''
Create a new table called relations. This table will contains a superset of all relations
loaded so far. After this table is loaded, UPDATE it to add the inverse relations (if necessary).
param dict config: the configuration data for this application
'''
relations_table_info = config['RELATIONS_FILE_TABLE_INFO']
connection = None
sql = ''
try:
connection = mysql.connector.connect(
host=config['MYSQL_HOSTNAME'],
user=config['MYSQL_USERNAME'],
password=config['MYSQL_PASSWORD'],
database=config['MYSQL_DATABASE_NAME'],
charset='utf8mb4',collation='utf8mb4_bin')
cursor = connection.cursor(dictionary=True)
drop_table_sql = "DROP TABLE IF EXISTS relations"
cursor.execute(drop_table_sql)
create_table_sql = """CREATE TABLE relations (
id INT NOT NULL AUTO_INCREMENT,
relation_id VARCHAR(2048) NOT NULL,
relation_label VARCHAR(2048) NOT NULL,
inverse_relation_label VARCHAR(2048),
sab VARCHAR(50),
PRIMARY KEY(id));"""
# step 1: create the new relations table
cursor.execute(create_table_sql)
print("Created table relations")
for table_info in relations_table_info:
# step 2: for each entry in the RELATIONS_FILE_TABLE_INFO config entry,
# insert the data from the table referenced by RELATIONS_FILE_TABLE_INFO into the relations table
table_name = table_info['table_name']
sab = table_info['sab']
sql = """INSERT INTO relations (relation_id, relation_label, inverse_relation_label, sab)
SELECT relation_id, relation_label, inverse_relation_label, '{sab}' FROM {table_name}""".format(table_name=table_name, sab=sab)
cursor.execute(sql)
connection.commit()
print("Loaded {sab} relations data into table relations".format(sab=sab))
sql = """UPDATE relations r1
LEFT JOIN relations r2
ON r1.relation_id = r2.relation_id
SET r1.inverse_relation_label = CONCAT('inverse ', r2.relation_label)
WHERE r2.inverse_relation_label IS NULL"""
"""After the 'normal' or 'forward' relations are loaded, find any records in the relations table that
have inverse_relation_label set to NULL. For each record missing an inverse_relation_label, create an
inverse_relation_label equal to 'inverse ' + relation_label
"""
cursor.execute(sql)
connection.commit()
print("Added inverse relations for {sab} data into table relations".format(sab=sab))
except mysql.connector.Error as err:
print("Error in SQL: " + sql )
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print("Something is wrong with your user name or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
print("Database does not exist")
else:
print(err)
connection.rollback()
finally:
if connection != None:
connection.close()
def insert_new_cui_cui_relations(config):
'''
Extract all relationships between two UMLS CUIs found in the PheKnowLator data. This method only
inserts data into the cui_cuis table. It does not create new CUIs. It adds both the "regular" relations
plus their inverse relations.
param dict config: The configuration data for this application.
'''
connection = None
sql = ''
try:
connection = mysql.connector.connect(
host=config['MYSQL_HOSTNAME'],
user=config['MYSQL_USERNAME'],
password=config['MYSQL_PASSWORD'],
database=config['MYSQL_DATABASE_NAME'],
charset='utf8mb4',collation='utf8mb4_bin')
cursor = connection.cursor(dictionary=True)
edge_list_file_info = config['EDGE_LIST_FILE_TABLE_INFO']
for edge_list_info in edge_list_file_info:
# walk through all the existing edge_list tables and load the data into the
# umls_cui_cuis table
sab = edge_list_info['sab']
table_name = edge_list_info['table_name']
sql = """DELETE FROM umls_cui_cuis WHERE sab = '{sab}'""".format(sab=sab)
cursor.execute(sql)
connection.commit()
print('')
print("Deleted {sab} map from table umls_cui_cuis".format(sab=sab))
sql = """INSERT INTO umls_cui_cuis (start_id, type, end_id, sab)
SELECT DISTINCT subject_table.cui as start_id, lower(replace(rel.relation_label,' ','_')) as type, object_table.cui as end_id, 'UBERON' as sab
FROM {table_name} el, relations rel, ontology_uri_map subject_table, ontology_uri_map object_table
WHERE rel.relation_id = el.predicate
AND subject_table.ontology_uri = el.subject
AND subject_table.mapping_type = 'PRIMARY'
AND object_table.ontology_uri = el.object
AND object_table.mapping_type = 'PRIMARY'
AND subject_table.cui != object_table.cui
AND el.sab = '{sab}'""".format(table_name=table_name,sab=sab)
"""
This query needs some explanation. Basically, the edge_list table is the central table in the query. We use the edge_list
table structure (subject, predicate, object) to find records where the edge_list contains relationships between
the subject CUI and the object CUI. This record will become a new relationship between 2 CUIs. Lastly, we map from the
edge_list relation_id to the "English" relation_label. We replace the spaces in the relation_label with underscores ('_').
This becomes the label for the relationship in the CUI to CUI relationship.
"""
cursor.execute(sql)
connection.commit()
print("Loaded {sab} map into table umls_cui_cuis".format(sab=sab))
sql = """INSERT INTO umls_cui_cuis (start_id, type, end_id, sab)
SELECT DISTINCT object_table.cui as start_id, lower(replace(rel.inverse_relation_label,' ','_')) as type, subject_table.cui as end_id, 'UBERON' as sab
FROM {table_name} el, relations rel, ontology_uri_map subject_table, ontology_uri_map object_table
WHERE rel.relation_id = el.predicate
AND subject_table.ontology_uri = el.subject
AND subject_table.mapping_type = 'PRIMARY'
AND object_table.ontology_uri = el.object
AND object_table.mapping_type = 'PRIMARY'
AND subject_table.cui != object_table.cui
AND rel.inverse_relation_label IS NOT NULL
AND el.sab = '{sab}'""".format(table_name=table_name,sab=sab)
"""
This query is basically the same as the initial query above, but there are two important differences:
- the relationship used is the inverse_relation_label from the pkl_relations table.
- the subject and object are swapped since we are creating the inverse relationship
"""
cursor.execute(sql)
connection.commit()
print("Loaded {sab} inverse relation map into table umls_cui_cuis".format(sab=sab))
except mysql.connector.Error as err:
print("Error in SQL: " + sql )
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print("Something is wrong with your user name or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
print("Database does not exist")
else:
print(err)
connection.rollback()
finally:
if connection != None:
connection.close()
def insert_new_terms(config):
'''
The method creates new labels (Term nodes) in the graph for each node_metadata table.
Adding a Term node affects several tables: suis, code_suis, cui_suis, and new_sui_map. The new_sui_map
does not represent data in the graph, it merely tracks minted SUIs between application runs to avoid changing the
SUI and losing its connection to the UMLS codes.
param dict config: The configuration data for this application.
'''
connection = None
sql = ''
try:
connection = mysql.connector.connect(
host=config['MYSQL_HOSTNAME'],
user=config['MYSQL_USERNAME'],
password=config['MYSQL_PASSWORD'],
database=config['MYSQL_DATABASE_NAME'],
charset='utf8mb4',collation='utf8mb4_bin')
cursor = connection.cursor(dictionary=True)
truncate_table_sql = "TRUNCATE suis_updated"
cursor.execute(truncate_table_sql)
connection.commit()
truncate_table_sql = "TRUNCATE code_suis_updated"
cursor.execute(truncate_table_sql)
connection.commit()
truncate_table_sql = "TRUNCATE new_sui_map"
cursor.execute(truncate_table_sql)
connection.commit()
truncate_table_sql = """TRUNCATE cui_suis_updated"""
cursor.execute(truncate_table_sql)
connection.commit()
print ("Copying cui_suis INTO cui_suis_updated")
sql = """INSERT INTO cui_suis_updated SELECT * FROM umls_cui_suis"""
cursor.execute(sql)
connection.commit()
sql = "DROP INDEX suis_updated_sui_idx ON suis_updated"
cursor.execute(sql)
connection.commit()
sql = "DROP INDEX suis_updated_name_idx ON suis_updated"
cursor.execute(sql)
connection.commit()
print ("Copying suis INTO suis_updated")
sql = """INSERT INTO suis_updated SELECT * FROM umls_suis"""
cursor.execute(sql)
connection.commit()
sql = "ALTER TABLE suis_updated ADD INDEX suis_updated_sui_idx (sui(100))"
cursor.execute(sql)
connection.commit()
sql = "ALTER TABLE suis_updated ADD INDEX suis_updated_name_idx (name(500))"
cursor.execute(sql)
connection.commit()
sql = "DROP INDEX code_suis_updated_start_id_idx ON code_suis_updated"
cursor.execute(sql)
connection.commit()
sql = "DROP INDEX code_suis_updated_end_id_idx ON code_suis_updated"
cursor.execute(sql)
connection.commit()
sql = "DROP INDEX code_suis_updated_type_idx ON code_suis_updated"
cursor.execute(sql)
connection.commit()
sql = "DROP INDEX code_suis_updated_cui_idx ON code_suis_updated"
cursor.execute(sql)
connection.commit()
print ("Copying code_suis INTO code_suis_updated")
sql = """INSERT INTO code_suis_updated SELECT * FROM umls_code_suis"""
cursor.execute(sql)
connection.commit()
sql = "ALTER TABLE code_suis_updated ADD INDEX code_suis_updated_start_id_idx (start_id(100))"
cursor.execute(sql)
connection.commit()
sql = "ALTER TABLE code_suis_updated ADD INDEX code_suis_updated_end_id_idx (end_id(100))"
cursor.execute(sql)
connection.commit()
sql = "ALTER TABLE code_suis_updated ADD INDEX code_suis_updated_type_idx (type(100))"
cursor.execute(sql)
connection.commit()
sql = "ALTER TABLE code_suis_updated ADD INDEX code_suis_updated_cui_idx (cui(100))"
cursor.execute(sql)
connection.commit()
node_metadata_info = config['NODE_METADATA_FILE_TABLE_INFO']
record_count = 1 # start SUI numbering at one
for table_info in node_metadata_info:
# for each entry in the NODE_METADATA_FILE_TABLE_INFO config entry, query the node_metadata
# table and find all missing terms. Then add the missing terms to the appropriate database tables
table_name = table_info['table_name']
sab = table_info['sab']
dict_new_suis = {}
""" keep an in-memory list of the new SUIs generated
The SQL includes a list of existing SUIs when it is initially executed.
During execution, new SUIs are created but they are missing from the ones
retrieved by the SQL (i.e. a "dirty read"). Therefore, the new SUIs are not found and will
create duplicate SUIs with the same labels. This in-memory list provides
lookup services to avoid recreating the labels."""
sql = """SELECT oum.ontology_uri as ontology_uri, oum.cui AS cui, IFNULL(oum.codeid,nm.codeid) AS codeid, nm.node_label AS label, '{sab}' as sab, su.sui AS sui, 'PT' AS term_type
FROM {table_name} nm
INNER JOIN ontology_uri_map oum
ON nm.ontology_uri = oum.ontology_uri
AND oum.mapping_type = 'PRIMARY'
LEFT OUTER JOIN suis_updated su
ON nm.node_label = su.name
WHERE oum.codeid is null OR oum.codeid NOT IN (select start_id FROM code_suis_updated)""".format(table_name=table_name,sab=sab)
"""This query joins the ontology_uri_map data to the label from the node_metadata table. The query only returns
records where the codeid is NULL or the codeid is missing from the code_suis_updated table. These represent
records that need a new SUI minted."""
cursor.execute(sql)
result = cursor.fetchall()
print("")
print ("Loading tables suis_updated, code_suis_updated, and new_sui_map for SAB: {sab}".format(sab=sab), end='', flush=True)
for row in result:
ontology_uri = row['ontology_uri']
cui = row['cui']
codeid = row['codeid']
code_list = str(codeid).split(' ')
code = code_list[1]
label = row['label']
term_type = row['term_type']
sui = row['sui']
if sui == None:
if label in dict_new_suis.keys():
# if the label already exists, then use the existing SUI
sui = dict_new_suis[label]
else:
# if the label does not exist, then mint a new SUI
sui = 'HS' + str(record_count).zfill(6)
# mint a new SUI prefixed with 'HS'
sql = """INSERT INTO suis_updated (sui, name) VALUES ('{sui}',"{name}")""".format(sui=sui,name=label)
cursor.execute(sql)
sql = """INSERT INTO new_sui_map (codeid, sui, name) VALUES ('{codeid}','{sui}',"{name}")""".format(codeid=codeid,sui=sui,name=label)
cursor.execute(sql)
dict_new_suis[label] = sui
# add the new SUI to the in memory list
sql = """INSERT INTO code_suis_updated (start_id, end_id, type, cui) VALUES ('{codeid}','{sui}','{term_type}','{cui}')""".format(codeid=codeid,sui=sui,cui=cui,term_type=term_type)
cursor.execute(sql)
if 'HC' in cui and term_type == 'PT':
#insert a new HCUI into the cui_suis_updated table since it does not exist in the table yet.
sql = """INSERT INTO cui_suis_updated (start_id, end_id, type) VALUES ('{cui}','{sui}','PREF_TERM')""".format(cui=cui,sui=sui)
cursor.execute(sql)
record_count = record_count + 1
#commit every 10,000 records
if record_count % 10000 == 0:
print('.', end='', flush=True)
connection.commit()
connection.commit()
print('')
insert_new_synonyms(config, record_count)
# after the for loop completes, add all the synonymous terms. This is done outside of the for loop
# because there is not necessarily a 1 to 1 relationship between the node_metadata entries and the synoymous files.
# However, there is a dependency because the insert_new_synonym method needs to continue the SUI numbering.
# This method is executed after the commit because then we do not need to worry about a situation where some of the
# terms have not yet been committed to the database.
except mysql.connector.Error as err:
print("Error in SQL: " + sql )
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print("Something is wrong with your user name or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
print("Database does not exist")
else:
print(err)
connection.rollback()
finally:
if connection != None:
connection.close()
def insert_new_synonyms(config, record_count):
'''
The method creates new labels (Term nodes) in the graph for each synonym table.
This method is basically identical to the insert_new_terms method. The only differences are the
a) the config entry used (this uses SYNONYM_LIST_FILE_TABLE_INFO) and b) SQL used to find the synonyms
Adding a Term node affects several tables: suis, code_suis, cui_suis, and new_sui_map. The new_sui_map
does not represent data in the graph, it merely tracks minted SUIs between application runs to avoid changing the
SUI and losing its connection to the UMLS codes.
param dict config: The configuration data for this application.
'''
if 'SYNONYM_LIST_FILE_TABLE_INFO' not in config:
#skip this method if there are no synonym files defined
return
synonym_list = config['SYNONYM_LIST_FILE_TABLE_INFO']
connection = None
sql = ''
try:
connection = mysql.connector.connect(
host=config['MYSQL_HOSTNAME'],
user=config['MYSQL_USERNAME'],
password=config['MYSQL_PASSWORD'],
database=config['MYSQL_DATABASE_NAME'],
charset='utf8mb4',collation='utf8mb4_bin')
cursor = connection.cursor(dictionary=True)
for table_info in synonym_list:
sab = table_info['sab']
table_name = table_info['table_name']
dict_new_suis = {}
""" keep an in-memory list of the new SUIs generated
The SQL includes a list of existing SUIs when it is initially executed.
During execution, new SUIs are created but they are missing from the ones
retrieved by the SQL (i.e. a "dirty read"). Therefore, the new SUIs are not found and will
create duplicate SUIs with the same labels. This in-memory list provides
lookup services to avoid recreating the labels."""
sql = """SELECT DISTINCT oum.ontology_uri as ontology_uri, oum.cui AS cui,nm.codeid AS codeid, nm.synonym AS label, '{sab}' as sab, su.sui AS sui, 'SY' AS term_type
FROM {table_name} nm
INNER JOIN ontology_uri_map oum
ON nm.ontology_uri = oum.ontology_uri
LEFT OUTER JOIN suis_updated su
ON nm.synonym = su.name""".format(table_name=table_name,sab=sab)
"""This query joins the ontology_uri_map data to the label from the node_metadata table. The query only returns
records where the codeid is NULL or the codeid is missing from the code_suis_updated table. These represent
records that need a new SUI minted."""
cursor.execute(sql)
result = cursor.fetchall()
print ("Loading tables suis_updated, code_suis_updated, and new_sui_map for SAB: {sab}".format(sab=sab), end='', flush=True)
for row in result:
ontology_uri = row['ontology_uri']
cui = row['cui']
codeid = row['codeid']
code_list = str(codeid).split(' ')
code = code_list[1]
label = row['label']
term_type = row['term_type']
sui = row['sui']
if sui == None:
if label in dict_new_suis.keys():
# if the label already exists, then use the existing SUI
sui = dict_new_suis[label]
else:
# if the label does not exist, then mint a new SUI
sui = 'HS' + str(record_count).zfill(6)
# mint a new SUI prefixed with 'HS'
sql = """INSERT INTO suis_updated (sui, name) VALUES ('{sui}',"{name}")""".format(sui=sui,name=label)
cursor.execute(sql)
sql = """INSERT INTO new_sui_map (codeid, sui, name) VALUES ('{codeid}','{sui}',"{name}")""".format(codeid=codeid,sui=sui,name=label)
cursor.execute(sql)
dict_new_suis[label] = sui
# add the new SUI to the in memory list
sql = """INSERT INTO code_suis_updated (start_id, end_id, type, cui) VALUES ('{codeid}','{sui}','{term_type}','{cui}')""".format(codeid=codeid,sui=sui,cui=cui,term_type=term_type)
cursor.execute(sql)
record_count = record_count + 1
#commit every 10,000 records
if record_count % 10000 == 0:
print('.', end='', flush=True)
connection.commit()
connection.commit()
except mysql.connector.Error as err:
print("Error in SQL: " + sql )
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print("Something is wrong with your user name or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
print("Database does not exist")
else:
print(err)
connection.rollback()
finally:
if connection != None:
connection.close()
def insert_new_cuis(config):
'''
Find every entry in the node_metadata tables that is missing from the ontology_uri_map table. This indicates a
record that was not mapped to any existing UMLS code. This means the record needs a new CUI minted for it.
param dict config: The configuration data for this application.
'''
connection = None
sql = ''
try:
connection = mysql.connector.connect(
host=config['MYSQL_HOSTNAME'],
user=config['MYSQL_USERNAME'],
password=config['MYSQL_PASSWORD'],
database=config['MYSQL_DATABASE_NAME'],
charset='utf8mb4',collation='utf8mb4_bin')
cursor = connection.cursor(dictionary=True)
truncate_table_sql = "TRUNCATE cuis_updated"
cursor.execute(truncate_table_sql)
connection.commit()
print ("Truncating cui_codes_updated")
sql = """TRUNCATE cui_codes_updated"""
cursor.execute(sql)
connection.commit()
print ("Copying cuis INTO cuis_updated")
sql = """INSERT INTO cuis_updated SELECT * FROM umls_cuis"""
cursor.execute(sql)
connection.commit()
print ("Deleting HuBMAP CUIs")
sql = """DELETE FROM ontology_uri_map WHERE cui LIKE 'HC%'"""
cursor.execute(sql)
connection.commit()
print ("Copying cuis INTO cui_codes_updated")
sql = """INSERT INTO cui_codes_updated SELECT * FROM umls_cui_codes"""
cursor.execute(sql)
connection.commit()
node_metadata_info = config['NODE_METADATA_FILE_TABLE_INFO']
record_count = 1 # start HCUI numbering at one
print ("Creating new HCUI's and codes")
for table_info in node_metadata_info:
sab = table_info['sab']
table_name = table_info['table_name']
print ("Deleting {sab} codes from umls_codes".format(sab=sab))
sql = """DELETE FROM umls_codes WHERE sab = '{sab}'""".format(sab=sab)
# remove old records for the sab
cursor.execute(sql)
connection.commit()
print("Loading node metadata for {sab}".format(sab=sab))
sql = """SELECT ontology_uri AS ontology_uri, codeid AS codeid, sab AS sab FROM {table_name} nm
WHERE nm.ontology_uri NOT IN (SELECT ontology_uri FROM ontology_uri_map WHERE mapping_type = 'PRIMARY')""".format(table_name=table_name)
"""Find all the records in the current node_metadata table that were not mapped to an UMLS terms."""
cursor.execute(sql)
result = cursor.fetchall()
for row in result:
ontology_uri = row['ontology_uri']
cui = 'HC' + str(record_count).zfill(6)
# mint a new CUI using the HC prefix
record_count = record_count + 1
current_sab = sab
codeid = row['codeid']
code_list = str(codeid).split(' ')
code = code_list[1]
sql = """INSERT INTO ontology_uri_map (ontology_uri,codeid,cui,sab,mapping_type) VALUES ('{ontology_uri}','{codeid}','{cui}','{sab}','PRIMARY')""".format(codeid=codeid,cui=cui,ontology_uri=ontology_uri,sab=current_sab)
# add the new HCUI to the ontology_uri_map
cursor.execute(sql)
sql = """INSERT INTO cuis_updated (cui) VALUES ('{cui}')""".format(cui=cui)
# add the new HCUI to the cuis_updated table
cursor.execute(sql)
connection.commit()
sql = """INSERT INTO umls_codes (codeid, sab,code) VALUES ('{codeid}','{sab}','{code}')""".format(codeid=codeid,sab=current_sab,code=code)
# add the new Code information to umls_codes
cursor.execute(sql)
connection.commit()
sql = """INSERT INTO cui_codes_updated (start_id, end_id) VALUES ('{cui}','{codeid}')""".format(cui=cui,codeid=codeid)
# connect the new HCUI to its new Code
cursor.execute(sql)
connection.commit()
except mysql.connector.Error as err:
print("Error in SQL: " + sql )
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print("Something is wrong with your user name or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
print("Database does not exist")
else:
print(err)
connection.rollback()
finally:
if connection != None:
connection.close()
def insert_new_codes(config):
'''
Create the new codes in the graph. This code creates new codes plus connects them to the appropriate
CUIs.
Note: By the time this code executes, the insert_new_cuis method should have already inserted.
So this code does not need to insert them. This also means this method is dependent upon the insert_new_cuis method.
param dict config: The configuration data for this application.
'''
connection = None
sql = ''
try:
connection = mysql.connector.connect(
host=config['MYSQL_HOSTNAME'],
user=config['MYSQL_USERNAME'],
password=config['MYSQL_PASSWORD'],
database=config['MYSQL_DATABASE_NAME'],
charset='utf8mb4',collation='utf8mb4_bin')
cursor = connection.cursor(dictionary=True)
node_metadata_info = config['NODE_METADATA_FILE_TABLE_INFO']
for table_info in node_metadata_info:
table_name = table_info['table_name']
current_sab = table_info['sab']
sql = """SELECT nm.ontology_uri as ontology_uri, nm.codeid as codeid, oum.cui as cui, nm.sab as sab
FROM {table_name} nm, ontology_uri_map oum
WHERE oum.ontology_uri = nm.ontology_uri
and oum.codeid IS NOT NULL
and nm.codeid not in (select codeid from umls_codes)""".format(table_name=table_name)
# this SQL finds all the codes in the current node_metadata missing from the umls_codes table
# these are the codes we need to add
cursor.execute(sql)
result = cursor.fetchall()
print ("Creating new codes for sab: {sab}".format(sab=current_sab))
for row in result:
cui = row['cui']
codeid = row['codeid']
code_list = str(codeid).split(' ')
code = code_list[1]
sql = """INSERT INTO umls_codes (codeid, sab,code) VALUES ('{codeid}','{sab}','{code}')""".format(codeid=codeid,sab=current_sab,code=code)
cursor.execute(sql)
connection.commit()
sql = """INSERT INTO cui_codes_updated (start_id, end_id) VALUES ('{cui}','{codeid}')""".format(cui=cui,codeid=codeid)
cursor.execute(sql)
connection.commit()
except mysql.connector.Error as err:
print("Error in SQL: " + sql )
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print("Something is wrong with your user name or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
print("Database does not exist")
else:
print(err)
connection.rollback()
finally:
if connection != None:
connection.close()
def insert_new_defs(config):
'''
Add the defintions from the PHeKnowLator data for the UBERON and CL nodes.
param dict config: The configuration data for this application.
'''
connection = None
sql = ''
try:
connection = mysql.connector.connect(
host=config['MYSQL_HOSTNAME'],
user=config['MYSQL_USERNAME'],
password=config['MYSQL_PASSWORD'],
database=config['MYSQL_DATABASE_NAME'],
charset='utf8mb4',collation='utf8mb4_bin')
cursor = connection.cursor(dictionary=True)
truncate_table_sql = "TRUNCATE defs_updated"
cursor.execute(truncate_table_sql)
connection.commit()
truncate_table_sql = "TRUNCATE def_rel_updated"
cursor.execute(truncate_table_sql)
connection.commit()
truncate_table_sql = "TRUNCATE new_def_map"
cursor.execute(truncate_table_sql)
connection.commit()
print("")
print ("Copying defs INTO defs_updated")
sql = """INSERT INTO defs_updated SELECT * FROM umls_defs"""
cursor.execute(sql)
connection.commit()
print ("Copying def_rel INTO def_rel_updated")
sql = """INSERT INTO def_rel_updated SELECT * FROM umls_def_rel"""
cursor.execute(sql)
connection.commit()
node_metadata_info = config['NODE_METADATA_FILE_TABLE_INFO']
record_count = 1 # start SUI numbering at one
for table_info in node_metadata_info:
table_name = table_info['table_name']
sab = table_info['sab']
sql = """SELECT oum.cui, nm.node_definition, '{sab}' as sab
FROM {table_name} nm, ontology_uri_map oum
WHERE nm.ontology_uri = oum.ontology_uri
AND oum.mapping_type = 'PRIMARY'
AND node_definition <> 'None'
AND node_definition <> '.'""".format(table_name=table_name,sab=sab)
cursor.execute(sql)
result = cursor.fetchall()
print("")
print ("Loading tables defs_updated, def_rels_updated, and new_def_map", end='', flush=True)
for row in result:
cui = row['cui']
node_definition = row['node_definition']
sab = row['sab']
atui = 'HAT' + str(record_count).zfill(6)
record_count = record_count + 1
if '"' in node_definition:
node_definition = node_definition.replace('"','\\"')
sql = """INSERT INTO defs_updated (atui, sab, def) VALUES ('{atui}','{sab}',"{node_definition}")""".format(atui=atui,sab=sab,node_definition=node_definition)
cursor.execute(sql)
sql = """INSERT INTO def_rel_updated (start_id, end_id, type, sab) VALUES ('{cui}','{atui}','DEF','{sab}')""".format(atui=atui,sab=sab,cui=cui)
cursor.execute(sql)
sql = """INSERT INTO new_def_map (cui, atui, node_definition, sab) VALUES ('{cui}','{atui}',"{node_definition}", '{sab}')""".format(atui=atui,sab=sab,cui=cui,node_definition=node_definition)
cursor.execute(sql)
#commit every 10,000 records
if record_count % 10000 == 0:
print('.', end='', flush=True)
connection.commit()
connection.commit()
except mysql.connector.Error as err:
print("Error in SQL: " + sql )
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print("Something is wrong with your user name or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
print("Database does not exist")
else:
print(err)
connection.rollback()
finally:
if connection != None:
connection.close()
def transform(config):
'''
This coordinates the transform methods.
param dict config: The configuration data for this application.
'''
build_xref_table(config)
# This code is temporary. It should be moved to a pre-processing step
fix_dbxrefs(config)
# END This code is temporary. It should be moved to a pre-processing step
build_ambiguous_codes_table(config)
build_ontology_uri_to_umls_map_table(config)
build_relations_table(config)
insert_new_cuis(config)
insert_new_codes(config)
insert_new_terms(config)
insert_new_defs(config)
insert_new_cui_cui_relations(config)
print('') # do this to disable the 'end' flag in prior print statements
print("Done with transform process")
def load(config):
'''
This method initiates the .CSV file export process.
param dict config: The configuration data for this application.
'''
export_files(config)
print('') # do this to disable the 'end' flag in prior print statements
print("Done with load process")
def export_files(config):
'''
This method walks through the subset of mysql tables and generates a sets of .CSV files. These
.CSV files adhere to the Neo4j 'CSV file header format' found here:
https://neo4j.com/docs/operations-manual/current/tools/import/file-header-format/
This method matches the mysql table with a file_name and manages any column header adjustments
that need to be made.
param dict config: The configuration data for this application.
'''
connection = None
sql = ''
try:
connection = mysql.connector.connect(
host=config['MYSQL_HOSTNAME'],
user=config['MYSQL_USERNAME'],
password=config['MYSQL_PASSWORD'],
database=config['MYSQL_DATABASE_NAME'],
charset='utf8mb4',collation='utf8mb4_bin')
cursor = connection.cursor(dictionary=True)
export_table_info = [{'table_name': 'umls_codes', 'file_name':'CODEs.csv','sql_columns':['codeid','sab','code'],'file_columns':['CodeID:ID','SAB','CODE']},
{'table_name': 'umls_tui_rel', 'file_name':'TUIrel.csv','sql_columns':['start_id','end_id'],'file_columns':[':START_ID',':END_ID']},
{'table_name': 'umls_cui_tuis', 'file_name':'CUI-TUIs.csv','sql_columns':['start_id','end_id'],'file_columns':[':START_ID',':END_ID']},
{'table_name': 'umls_cui_cuis', 'file_name':'CUI-CUIs.csv','sql_columns':['start_id','end_id','type','sab'],'file_columns':[':START_ID',':END_ID',':TYPE','SAB']},
{'table_name': 'cui_codes_updated', 'file_name':'CUI-CODEs.csv','sql_columns':['start_id','end_id'],'file_columns':[':START_ID',':END_ID']},
{'table_name': 'code_suis_updated', 'file_name':'CODE-SUIs.csv','sql_columns':['start_id','end_id','type','cui'],'file_columns':[':START_ID',':END_ID',':TYPE','CUI']},
{'table_name': 'cui_suis_updated', 'file_name':'CUI-SUIs.csv','sql_columns':['start_id','end_id'],'file_columns':[':START_ID',':END_ID']},
{'table_name': 'cuis_updated', 'file_name':'CUIs.csv','sql_columns':['cui'],'file_columns':['CUI:ID']},
{'table_name': 'suis_updated', 'file_name':'SUIs.csv','sql_columns':['sui','name'],'file_columns':['SUI:ID','name']},
{'table_name': 'umls_tuis', 'file_name':'TUIs.csv','sql_columns':['tui','name','stn','def'],'file_columns':['TUI:ID','name','STN','DEF']},
{'table_name': 'defs_updated', 'file_name':'DEFs.csv','sql_columns':['atui','sab','def'],'file_columns':['ATUI:ID','SAB','DEF']},
{'table_name': 'def_rel_updated', 'file_name':'DEFrel.csv','sql_columns':['start_id','end_id'],'file_columns':[':START_ID',':END_ID']}]
'''
This method walks through the subset of mysql tables found in the export_table_info variable. Each entry
in export_table_info contains:
table_name: the mysql table name to export
file_name: the name of the .CSV file to be generated
sql_columns: a list of the columns to be includes in the SELECT statement
file_columns: a list of the column headers to use when writing the data to the .CSV files
The sql_columns and file_columns should map 1:1. For example in the table_name umls_codes and file_name CODEs.csv entry:
codeid SQL column becomes -> CodeID:ID in the .CSV file
sab SQL column becomes -> SAB in the .CSV file
code SQL column becomes -> CODE in the .CSV file
'''
for export_info in export_table_info:
# walk through all the entries in the export_table_info list
table_name = export_info['table_name']
file_name = export_info['file_name']
sql_columns = export_info['sql_columns']
file_columns = export_info['file_columns']
file_path = os.path.join(config['OUTPUT_DIR'],file_name)
# set the output file path
sql = """SELECT DISTINCT {col_list} FROM {table_name}""".format(table_name=table_name,col_list=",".join(sql_columns))
# build the SELECT statement from the sql_columns variable. Also, apply a SQL 'DISTINCT' keyword to avoid duplicates
cursor.execute(sql)
result = cursor.fetchall()
print("")
print ("Writing data from {table_name} to file {file_path}".format(table_name=table_name,file_path=file_path), end='', flush=True)
f = open(file_path, 'w')
record_count = 0
writer = csv.writer(f,quoting=csv.QUOTE_ALL)
writer.writerow(file_columns)
# write the file_columns as the headers for the .CSV file
data_rows = []
for result_row in result:
data_list = []
for field in sql_columns:
data_list.append(result_row[field])
data_rows.append(data_list)
record_count = record_count + 1
#write every 100,000 records
if record_count % 100000 == 0:
print('.', end='', flush=True)
writer.writerows(data_rows)
# clear data_rows
data_rows = []
writer.writerows(data_rows)
f.close()
except mysql.connector.Error as err:
print("Error in SQL: " + sql )
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print("Something is wrong with your user name or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
print("Database does not exist")
else:
print(err)
connection.rollback()
finally:
if connection != None:
connection.close()
# utility function
def isascii(s):
"""Check if the characters in string s are in ASCII, U+0-U+7F."""
return len(s) == len(s.encode())
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('commands', type=str, nargs='+',default='extract transform load')
command_list = []
try:
args = parser.parse_args()
command_list = args.commands
except:
command_list = ['extract','extract_non_umls','transform','load']
file_path = os.path.join(os.path.abspath(os.path.dirname(__file__)))
#file_path = '/home/chb69/git/ontology-api/src/neo4j_loader'
file_name = 'app.cfg'
config = load_config(file_path, file_name)
#extract_non_umls(config)
#transform(config)
#load(config)
if 'extract_non_umls' in command_list:
extract_non_umls(config)
if 'extract' in command_list:
extract(config)
if 'transform' in command_list:
transform(config)
if 'load' in command_list:
load(config)
print("Done")
| 44.691919 | 234 | 0.609108 |
import sys
import os
import types
import mysql.connector
from mysql.connector import errorcode
import csv
import argparse
config = {}
def load_config(root_path, filename):
filename = os.path.join(root_path, filename)
d = types.ModuleType("config")
d.__file__ = filename
return_dict = {}
try:
with open(filename, mode="rb") as config_file:
exec(compile(config_file.read(), filename, "exec"), d.__dict__)
for config_key in d.__dict__:
if str(config_key).startswith('__') == False:
return_dict[config_key] = d.__dict__[config_key]
except OSError as e:
e.strerror = f"Unable to load configuration file ({e.strerror})"
raise
return return_dict
def create_database(config):
connection = None
try:
connection = mysql.connector.connect(
host=config['MYSQL_HOSTNAME'],
user=config['MYSQL_USERNAME'],
password=config['MYSQL_PASSWORD'])
cursor = connection.cursor()
with open(config['TABLE_CREATE_SQL_FILEPATH'], encoding="utf-8") as f:
commands = f.read().split(';')
for command in commands:
if str(command).strip() != "":
print('Executing: ' + command)
cursor.execute(command)
print ("Done creating database tables.")
except mysql.connector.Error as err:
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print("Something is wrong with your user name or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
print("Database does not exist")
else:
print(err)
finally:
if connection != None:
connection.close()
def create_indices(config):
connection = None
try:
connection = mysql.connector.connect(
host=config['MYSQL_HOSTNAME'],
user=config['MYSQL_USERNAME'],
password=config['MYSQL_PASSWORD'],
database=config['MYSQL_DATABASE_NAME'])
cursor = connection.cursor()
for table_info in config['NODE_METADATA_FILE_TABLE_INFO']:
sql = "ALTER TABLE {table_name} ADD INDEX {table_name}_ontology_uri_idx (ontology_uri(500))".format(table_name=table_info['table_name'])
cursor.execute(sql)
sql = "ALTER TABLE {table_name} ADD INDEX {table_name}_node_label_idx (node_label(500))".format(table_name=table_info['table_name'])
cursor.execute(sql)
sql = "ALTER TABLE {table_name} ADD INDEX {table_name}_codeid_idx (codeid(500))".format(table_name=table_info['table_name'])
cursor.execute(sql)
sql = "ALTER TABLE {table_name} ADD INDEX {table_name}_sab_idx (sab(50))".format(table_name=table_info['table_name'])
cursor.execute(sql)
for table_info in config['EDGE_LIST_FILE_TABLE_INFO']:
sql = "ALTER TABLE {table_name} ADD INDEX {table_name}_subject_idx (subject(50))".format(table_name=table_info['table_name'])
cursor.execute(sql)
sql = "ALTER TABLE {table_name} ADD INDEX {table_name}_predicate_idx (predicate(100))".format(table_name=table_info['table_name'])
cursor.execute(sql)
sql = "ALTER TABLE {table_name} ADD INDEX {table_name}_object_idx (object(100))".format(table_name=table_info['table_name'])
cursor.execute(sql)
sql = "ALTER TABLE {table_name} ADD INDEX {table_name}_sab_idx (sab(50))".format(table_name=table_info['table_name'])
cursor.execute(sql)
for table_info in config['DBXREF_FILE_TABLE_INFO']:
sql = "ALTER TABLE {table_name} ADD INDEX {table_name}_ontology_uri_idx (ontology_uri(50))".format(table_name=table_info['table_name'])
cursor.execute(sql)
sql = "ALTER TABLE {table_name} ADD FULLTEXT INDEX {table_name}_dbxrefs_idx (dbxrefs(700))".format(table_name=table_info['table_name'])
cursor.execute(sql)
for table_info in config['RELATIONS_FILE_TABLE_INFO']:
sql = "ALTER TABLE {table_name} ADD INDEX {table_name}_relation_id_idx (relation_id(100))".format(table_name=table_info['table_name'])
cursor.execute(sql)
sql = "ALTER TABLE {table_name} ADD INDEX {table_name}_relation_label_idx (relation_label(50))".format(table_name=table_info['table_name'])
cursor.execute(sql)
sql = "ALTER TABLE {table_name} ADD INDEX {table_name}_inverse_relation_label_idx (inverse_relation_label(50))".format(table_name=table_info['table_name'])
cursor.execute(sql)
for table_info in config['SYNONYM_LIST_FILE_TABLE_INFO']:
sql = "ALTER TABLE {table_name} ADD INDEX {table_name}_ontology_uri_idx (ontology_uri(500))".format(table_name=table_info['table_name'])
cursor.execute(sql)
sql = "ALTER TABLE {table_name} ADD INDEX {table_name}_synonym_idx (synonym(500))".format(table_name=table_info['table_name'])
cursor.execute(sql)
sql = "ALTER TABLE {table_name} ADD INDEX {table_name}_sab_idx (sab(50))".format(table_name=table_info['table_name'])
cursor.execute(sql)
print ("Done creating database indices.")
except mysql.connector.Error as err:
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print("Something is wrong with your user name or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
print("Database does not exist")
else:
print(err)
finally:
if connection != None:
connection.close()
def load_edge_list(config):
edge_list_list = config['EDGE_LIST_FILE_TABLE_INFO']
connection = None
sql = ''
record_count = 0
try:
connection = mysql.connector.connect(
host=config['MYSQL_HOSTNAME'],
user=config['MYSQL_USERNAME'],
password=config['MYSQL_PASSWORD'],
database=config['MYSQL_DATABASE_NAME'],
charset='utf8mb4',collation='utf8mb4_bin')
cursor = connection.cursor(dictionary=True)
for table_data in edge_list_list:
table_name = table_data['table_name']
file_name = table_data['file_name']
sab = table_data['sab']
drop_table_sql = "DROP TABLE IF EXISTS {table_name}".format(table_name=table_name)
cursor.execute(drop_table_sql)
table_create_sql = """CREATE TABLE {table_name} (
id INT NOT NULL AUTO_INCREMENT,
subject VARCHAR(2048) NOT NULL,
predicate VARCHAR(2048) NOT NULL,
object VARCHAR(2048) NOT NULL,
sab VARCHAR(50),
PRIMARY KEY(id)
)""".format(table_name=table_name)
cursor.execute(table_create_sql)
connection.commit()
print("Created table: " + table_name)
file_path = os.path.join(config['ONTOLOGY_SOURCE_DIR'], file_name)
load_file(config, file_path, table_name)
sql = "UPDATE {table_name} SET sab = '{sab}'".format(table_name=table_name,sab=sab)
cursor.execute(sql)
connection.commit()
except mysql.connector.Error as err:
print("Error in SQL: " + sql )
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print("Something is wrong with your user name or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
print("Database does not exist")
else:
print(err)
connection.rollback()
finally:
if connection != None:
connection.close()
def load_synonym_list(config):
if 'SYNONYM_LIST_FILE_TABLE_INFO' not in config:
return
synonym_list = config['SYNONYM_LIST_FILE_TABLE_INFO']
connection = None
sql = ''
record_count = 0
try:
connection = mysql.connector.connect(
host=config['MYSQL_HOSTNAME'],
user=config['MYSQL_USERNAME'],
password=config['MYSQL_PASSWORD'],
database=config['MYSQL_DATABASE_NAME'],
charset='utf8mb4',collation='utf8mb4_bin')
cursor = connection.cursor(dictionary=True)
for table_data in synonym_list:
# walk through the list of synonym files found in the config file.
# for each entry, read the corresponding file and load it into the referenced
# mysql table.
table_name = table_data['table_name']
file_name = table_data['file_name']
sab = table_data['sab']
drop_table_sql = "DROP TABLE IF EXISTS {table_name}".format(table_name=table_name)
cursor.execute(drop_table_sql)
table_create_sql = """CREATE TABLE {table_name} (
id INT NOT NULL AUTO_INCREMENT,
ontology_uri VARCHAR(2048) NOT NULL,
codeid VARCHAR(2048) NOT NULL,
synonym VARCHAR(2048) NOT NULL,
sab VARCHAR(50),
PRIMARY KEY(id)
)""".format(table_name=table_name)
# this is the generic SQL to create a synonym table
cursor.execute(table_create_sql)
connection.commit()
print("Created table: " + table_name)
file_path = os.path.join(config['ONTOLOGY_SOURCE_DIR'], file_name)
load_file(config, file_path, table_name)
sql = "UPDATE {table_name} SET sab = '{sab}'".format(table_name=table_name,sab=sab)
# add the SAB for all records in table
cursor.execute(sql)
connection.commit()
except mysql.connector.Error as err:
print("Error in SQL: " + sql )
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print("Something is wrong with your user name or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
print("Database does not exist")
else:
print(err)
connection.rollback()
finally:
if connection != None:
connection.close()
def load_relations(config):
node_metadata_list = config['RELATIONS_FILE_TABLE_INFO']
connection = None
sql = ''
record_count = 0
try:
connection = mysql.connector.connect(
host=config['MYSQL_HOSTNAME'],
user=config['MYSQL_USERNAME'],
password=config['MYSQL_PASSWORD'],
database=config['MYSQL_DATABASE_NAME'],
charset='utf8mb4',collation='utf8mb4_bin')
cursor = connection.cursor(dictionary=True)
for table_data in node_metadata_list:
# walk through the list of relations files found in the config file.
# for each entry, read the corresponding file and load it into the referenced
# mysql table.
table_name = table_data['table_name']
file_name = table_data['file_name']
sab = table_data['sab']
drop_table_sql = "DROP TABLE IF EXISTS {table_name}".format(table_name=table_name)
cursor.execute(drop_table_sql)
table_create_sql = """CREATE TABLE {table_name} (
id INT NOT NULL AUTO_INCREMENT,
relation_id VARCHAR(2048) NOT NULL,
relation_label VARCHAR(2048) NOT NULL,
inverse_relation_label VARCHAR(2048),
PRIMARY KEY(id)
)""".format(table_name=table_name)
# this is the generic create relations SQL statement
cursor.execute(table_create_sql)
connection.commit()
print("Created table: " + table_name)
file_path = os.path.join(config['ONTOLOGY_SOURCE_DIR'], file_name)
load_file(config, file_path, table_name)
except mysql.connector.Error as err:
print("Error in SQL: " + sql )
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print("Something is wrong with your user name or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
print("Database does not exist")
else:
print(err)
connection.rollback()
finally:
if connection != None:
connection.close()
def create_missing_codeids(config):
node_metadata_list = config['NODE_METADATA_FILE_TABLE_INFO']
connection = None
sql = ''
record_count = 0
try:
connection = mysql.connector.connect(
host=config['MYSQL_HOSTNAME'],
user=config['MYSQL_USERNAME'],
password=config['MYSQL_PASSWORD'],
database=config['MYSQL_DATABASE_NAME'],
charset='utf8mb4',collation='utf8mb4_bin')
cursor = connection.cursor(dictionary=True)
for table_data in node_metadata_list:
table_name = table_data['table_name']
sql = """UPDATE {table_name}
SET codeid = REPLACE(REPLACE(ontology_uri, 'http://purl.obolibrary.org/obo/',''), '_', ' ')
WHERE codeid IS NULL""".format(table_name=table_name)
# add a codeid for all records in table
cursor.execute(sql)
connection.commit()
except mysql.connector.Error as err:
print("Error in SQL: " + sql )
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print("Something is wrong with your user name or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
print("Database does not exist")
else:
print(err)
connection.rollback()
finally:
if connection != None:
connection.close()
def fix_dbxrefs(config):
connection = None
sql = ''
record_count = 0
try:
connection = mysql.connector.connect(
host=config['MYSQL_HOSTNAME'],
user=config['MYSQL_USERNAME'],
password=config['MYSQL_PASSWORD'],
database=config['MYSQL_DATABASE_NAME'],
charset='utf8mb4',collation='utf8mb4_bin')
cursor = connection.cursor(dictionary=True)
table_name = 'dbxrefs'
sql = """UPDATE {table_name}
SET xref = UPPER(xref)""".format(table_name=table_name)
# uppercase all dbxrefs data in table
cursor.execute(sql)
connection.commit()
sql = """UPDATE {table_name}
SET xref = REPLACE(xref, 'NCIT:', 'NCI:') WHERE xref LIKE 'NCIT:%'""".format(table_name=table_name)
# convert all the NCI codes
cursor.execute(sql)
connection.commit()
sql = """UPDATE {table_name}
SET xref = REPLACE(xref, 'HTTP://WWW.SNOMEDBROWSER.COM/CODES/DETAILS/', 'SNOMEDCT_US:') WHERE xref LIKE 'HTTP://WWW.SNOMEDBROWSER.COM/CODES/DETAILS/%'""".format(table_name=table_name)
# convert all the SNOMED codes
cursor.execute(sql)
connection.commit()
sql = """UPDATE {table_name}
SET xref = REPLACE(xref, 'MESH:', 'MSH:') WHERE xref LIKE 'MESH:%'
AND instr(xref, 'MESH:D') > 0
AND instr(xref, 'MESH:D24') = 0""".format(table_name=table_name)
# convert all the MeSH codes
cursor.execute(sql)
connection.commit()
sql = """UPDATE {table_name}
SET xref = REPLACE(xref, ':', ' ')""".format(table_name=table_name)
# replace all remaining colons with spaces dbxrefs data in table
cursor.execute(sql)
connection.commit()
except mysql.connector.Error as err:
print("Error in SQL: " + sql )
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print("Something is wrong with your user name or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
print("Database does not exist")
else:
print(err)
connection.rollback()
finally:
if connection != None:
connection.close()
def load_node_metadata(config):
node_metadata_list = config['NODE_METADATA_FILE_TABLE_INFO']
connection = None
sql = ''
record_count = 0
try:
connection = mysql.connector.connect(
host=config['MYSQL_HOSTNAME'],
user=config['MYSQL_USERNAME'],
password=config['MYSQL_PASSWORD'],
database=config['MYSQL_DATABASE_NAME'],
charset='utf8mb4',collation='utf8mb4_bin')
cursor = connection.cursor(dictionary=True)
for table_data in node_metadata_list:
# walk through the list of node_metadata files found in the config file.
# for each entry, read the corresponding file and load it into the referenced
# mysql table.
table_name = table_data['table_name']
file_name = table_data['file_name']
sab = table_data['sab']
drop_table_sql = "DROP TABLE IF EXISTS {table_name}".format(table_name=table_name)
cursor.execute(drop_table_sql)
table_create_sql = """CREATE TABLE {table_name} (
id INT NOT NULL AUTO_INCREMENT,
ontology_uri VARCHAR(2048) NOT NULL,
codeid VARCHAR(2048),
node_label VARCHAR(2048) NOT NULL,
node_definition VARCHAR(2048) NOT NULL,
sab VARCHAR(50),
PRIMARY KEY(id)
)""".format(table_name=table_name)
# this SQL creates the generic node_metadata table
cursor.execute(table_create_sql)
connection.commit()
print("Created table: " + table_name)
file_path = os.path.join(config['ONTOLOGY_SOURCE_DIR'], file_name)
load_file(config, file_path, table_name)
sql = "UPDATE {table_name} SET sab = '{sab}'".format(table_name=table_name,sab=sab)
# add the SAB for all records in table
cursor.execute(sql)
connection.commit()
except mysql.connector.Error as err:
print("Error in SQL: " + sql )
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print("Something is wrong with your user name or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
print("Database does not exist")
else:
print(err)
connection.rollback()
finally:
if connection != None:
connection.close()
def load_dbxref(config):
dbxref_list = config['DBXREF_FILE_TABLE_INFO']
connection = None
sql = ''
record_count = 0
try:
connection = mysql.connector.connect(
host=config['MYSQL_HOSTNAME'],
user=config['MYSQL_USERNAME'],
password=config['MYSQL_PASSWORD'],
database=config['MYSQL_DATABASE_NAME'],
charset='utf8mb4',collation='utf8mb4_bin')
cursor = connection.cursor(dictionary=True)
for table_data in dbxref_list:
# walk through the list of dbxref files found in the config file.
# for each entry, read the corresponding file and load it into the referenced
# mysql table.
table_name = table_data['table_name']
file_name = table_data['file_name']
sab = table_data['sab']
drop_table_sql = "DROP TABLE IF EXISTS {table_name}".format(table_name=table_name)
cursor.execute(drop_table_sql)
table_create_sql = """CREATE TABLE {table_name} (
id INT NOT NULL AUTO_INCREMENT,
ontology_uri VARCHAR(2048) NOT NULL,
dbxrefs VARCHAR(5120) NOT NULL,
sab VARCHAR(50),
PRIMARY KEY(id)
)""".format(table_name=table_name)
# this is the SQL to create a generic dbxref table
cursor.execute(table_create_sql)
connection.commit()
print("Created table: " + table_name)
file_path = os.path.join(config['ONTOLOGY_SOURCE_DIR'], file_name)
load_file(config, file_path, table_name)
except mysql.connector.Error as err:
print("Error in SQL: " + sql )
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print("Something is wrong with your user name or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
print("Database does not exist")
else:
print(err)
connection.rollback()
finally:
if connection != None:
connection.close()
def load_umls_codes(config):
file_path = os.path.join(config['UMLS_SOURCE_DIR'],'CODEs.csv')
table_name = 'umls_codes'
load_file(config, file_path, table_name)
def load_umls_defs(config):
file_path = os.path.join(config['UMLS_SOURCE_DIR'],'DEFs.csv')
table_name = 'umls_defs'
load_file(config, file_path, table_name)
def load_umls_suis(config):
file_path = os.path.join(config['UMLS_SOURCE_DIR'],'SUIs.csv')
table_name = 'umls_suis'
load_file(config, file_path, table_name)
def load_umls_cuis(config):
file_path = os.path.join(config['UMLS_SOURCE_DIR'],'CUIs.csv')
table_name = 'umls_cuis'
load_file(config, file_path, table_name)
def load_umls_tuis(config):
file_path = os.path.join(config['UMLS_SOURCE_DIR'],'TUIs.csv')
table_name = 'umls_tuis'
load_file(config, file_path, table_name)
def load_umls_code_suis(config):
file_path = os.path.join(config['UMLS_SOURCE_DIR'],'CODE-SUIs.csv')
table_name = 'umls_code_suis'
load_file(config, file_path, table_name)
def load_umls_cui_codes(config):
file_path = os.path.join(config['UMLS_SOURCE_DIR'],'CUI-CODEs.csv')
table_name = 'umls_cui_codes'
load_file(config, file_path, table_name)
def load_umls_cui_cuis(config):
file_path = os.path.join(config['UMLS_SOURCE_DIR'],'CUI-CUIs.csv')
table_name = 'umls_cui_cuis'
load_file(config, file_path, table_name)
def load_umls_cui_suis(config):
file_path = os.path.join(config['UMLS_SOURCE_DIR'],'CUI-SUIs.csv')
table_name = 'umls_cui_suis'
load_file(config, file_path, table_name)
def load_umls_cui_tuis(config):
file_path = os.path.join(config['UMLS_SOURCE_DIR'],'CUI-TUIs.csv')
table_name = 'umls_cui_tuis'
load_file(config, file_path, table_name)
def load_umls_def_rel(config):
file_path = os.path.join(config['UMLS_SOURCE_DIR'],'DEFrel.csv')
table_name = 'umls_def_rel'
load_file(config, file_path, table_name)
def load_umls_tui_rel(config):
file_path = os.path.join(config['UMLS_SOURCE_DIR'],'TUIrel.csv')
table_name = 'umls_tui_rel'
load_file(config, file_path, table_name)
def build_xref_table(config):
dbxref_list = config['DBXREF_FILE_TABLE_INFO']
connection = None
sql = ''
try:
connection = mysql.connector.connect(
host=config['MYSQL_HOSTNAME'],
user=config['MYSQL_USERNAME'],
password=config['MYSQL_PASSWORD'],
database=config['MYSQL_DATABASE_NAME'],
charset='utf8mb4',collation='utf8mb4_bin')
cursor = connection.cursor(dictionary=True)
drop_table_sql = "DROP TABLE IF EXISTS dbxrefs"
cursor.execute(drop_table_sql)
create_table_sql = """CREATE TABLE dbxrefs (
id INT NOT NULL AUTO_INCREMENT,
ontology_uri VARCHAR(2048) NOT NULL,
xref VARCHAR(2048) NOT NULL,
PRIMARY KEY(id)
);"""
cursor.execute(create_table_sql)
for table_data in dbxref_list:
table_name = table_data['table_name']
sab = table_data['sab']
cursor.execute("SELECT ontology_uri, dbxrefs FROM {table_name}".format(table_name=table_name))
print("Loading {sab} data into table {table_name}".format(table_name="dbxrefs", sab=sab), end='', flush=True)
result = cursor.fetchall()
record_count = 0
for row in result:
ontology_uri = row['ontology_uri']
all_xrefs = row['dbxrefs']
xref_list = all_xrefs.split('|')
# For each row in the ontology_dbxref table, split the dbxrefs column into a list
for ref in xref_list:
# for each xref in the list, insert a new row into the dbxrefs table
ref = ref.replace("'","''")
sql = "INSERT INTO dbxrefs (ontology_uri, xref) VALUES ('{ontology_uri}','{ref}')".format(ontology_uri=ontology_uri, ref=ref)
cursor.execute(sql)
record_count = record_count + 1
if record_count % 10000 == 0:
print('.', end='', flush=True)
connection.commit()
print('')
connection.commit()
print ("Done loading the {table_name} table.".format(table_name="dbxrefs"))
except mysql.connector.Error as err:
print("Error in SQL: " + sql )
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print("Something is wrong with your user name or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
print("Database does not exist")
else:
print(err)
connection.rollback()
finally:
if connection != None:
connection.close()
def load_file(config, file_path, table_name):
connection = None
sql = ''
try:
connection = mysql.connector.connect(
host=config['MYSQL_HOSTNAME'],
user=config['MYSQL_USERNAME'],
password=config['MYSQL_PASSWORD'],
database=config['MYSQL_DATABASE_NAME'],
charset='utf8mb4',collation='utf8mb4_bin')
cursor = connection.cursor()
record_count = 0
with open(file_path) as csvfile:
myCSVReader = None
if file_path.endswith('.txt'):
myCSVReader = csv.DictReader(csvfile, delimiter='\t')
else:
myCSVReader = csv.DictReader(csvfile)
field_names = myCSVReader.fieldnames
if 'name_lc' in field_names:
field_names.remove('name_lc')
if 'REL' in field_names:
field_names.remove('REL')
if 'RELA' in field_names:
field_names.remove('RELA')
if (file_path.endswith('CUI-SUIs.csv') or
file_path.endswith('CUI-TUIs.csv') or
file_path.endswith('DEFrel.csv') or
file_path.endswith('TUIrel.csv')):
field_names.append('type')
field_list_str = '%s' % ', '.join(map(str, field_names))
field_list_str = field_list_str.replace(':ID', '')
field_list_str = field_list_str.replace(':', '')
value_list_str = ''
for field in field_names:
value_list_str += '%({field})s, '.format(field=field)
value_list_str = value_list_str[:-2]
sql = """INSERT INTO {table_name}({field_list})
VALUE ({value_list})""".format(table_name=table_name, field_list=field_list_str, value_list=value_list_str)
print("Loading data from {file_name} into table {table_name}".format(file_name=file_path, table_name=table_name), end='', flush=True)
for row in myCSVReader:
if file_path.endswith('CUI-SUIs.csv'):
row['type'] = 'PREF_TERM'
if file_path.endswith('CUI-TUIs.csv'):
row['type'] = 'STY'
if file_path.endswith('DEFrel.csv'):
row['type'] = 'DEF'
if file_path.endswith('TUIrel.csv'):
row['type'] = 'ISA_STY'
if table_name == 'suis':
if None in row.keys():
row.pop(None)
if None in row.keys():
row.pop(None)
cursor.execute(sql, row)
record_count = record_count + 1
if record_count % 200000 == 0:
print('.', end='', flush=True)
connection.commit()
print('')
connection.commit()
print ("Done loading the {table_name} table.".format(table_name=table_name))
except mysql.connector.Error as err:
print("Error in SQL: " + sql )
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print("Something is wrong with your user name or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
print("Database does not exist")
else:
print(err)
connection.rollback()
finally:
if connection != None:
connection.close()
def extract_non_umls(config):
load_node_metadata(config)
load_relations(config)
load_dbxref(config)
load_edge_list(config)
load_synonym_list(config)
create_missing_codeids(config)
def extract(config):
create_database(config)
load_node_metadata(config)
load_relations(config)
load_dbxref(config)
load_edge_list(config)
load_synonym_list(config)
load_umls_codes(config)
load_umls_defs(config)
load_umls_suis(config)
load_umls_cuis(config)
load_umls_tuis(config)
load_umls_cui_codes(config)
load_umls_code_suis(config)
load_umls_cui_cuis(config)
load_umls_cui_suis(config)
load_umls_cui_tuis(config)
load_umls_def_rel(config)
load_umls_tui_rel(config)
create_missing_codeids(config)
create_indices(config)
print("Done with extract process")
def build_ambiguous_codes_table(config):
connection = None
sql = ''
try:
connection = mysql.connector.connect(
host=config['MYSQL_HOSTNAME'],
user=config['MYSQL_USERNAME'],
password=config['MYSQL_PASSWORD'],
database=config['MYSQL_DATABASE_NAME'],
charset='utf8mb4',collation='utf8mb4_bin')
cursor = connection.cursor(dictionary=True)
drop_table_sql = "DROP TABLE IF EXISTS temp_ambiguous_codes"
cursor.execute(drop_table_sql)
create_table_sql = """CREATE TABLE temp_ambiguous_codes (
id INT NOT NULL AUTO_INCREMENT,
ontology_uri VARCHAR(2048) NOT NULL,
codeid VARCHAR(2048),
PRIMARY KEY(id)
);"""
cursor.execute(create_table_sql)
print("Created table temp_ambiguous_codes")
sql = """INSERT INTO temp_ambiguous_codes (ontology_uri, codeid)
SELECT DISTINCT ontology_uri, xref as codeid
FROM dbxrefs, umls_cui_codes as rel
WHERE xref = rel.end_id
GROUP BY ontology_uri, xref
HAVING COUNT(DISTINCT rel.start_id) > 1"""
cursor.execute(sql)
connection.commit()
print("Loaded codes into table temp_ambiguous_codes")
except mysql.connector.Error as err:
print("Error in SQL: " + sql )
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print("Something is wrong with your user name or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
print("Database does not exist")
else:
print(err)
connection.rollback()
finally:
if connection != None:
connection.close()
def temp_build_ccf_code_cui_table(config):
connection = None
sql = ''
try:
connection = mysql.connector.connect(
host=config['MYSQL_HOSTNAME'],
user=config['MYSQL_USERNAME'],
password=config['MYSQL_PASSWORD'],
database=config['MYSQL_DATABASE_NAME'],
charset='utf8mb4',collation='utf8mb4_bin')
cursor = connection.cursor(dictionary=True)
drop_table_sql = "DROP TABLE IF EXISTS temp_ccf_cui_codes"
cursor.execute(drop_table_sql)
create_table_sql = """CREATE TABLE temp_ccf_cui_codes (
id INT NOT NULL AUTO_INCREMENT,
codeid VARCHAR(2048) NOT NULL,
cui VARCHAR(2048),
PRIMARY KEY(id)
);"""
cursor.execute(create_table_sql)
print("Created table temp_ccf_cui_codes")
sql = "ALTER TABLE temp_ccf_cui_codes ADD INDEX temp_ccf_cui_codes_codeid(codeid(50))"
cursor.execute(sql)
sql = "ALTER TABLE temp_ccf_cui_codes ADD INDEX temp_ccf_cui_codes_cui_idx(cui(50))"
cursor.execute(sql)
cursor = connection.cursor()
record_count = 0
file_path = '/home/chb69/umls_data/ccf/CCF-CUI.csv'
with open(file_path) as csvfile:
myCSVReader = None
if file_path.endswith('.txt'):
myCSVReader = csv.DictReader(csvfile, delimiter='\t')
else:
myCSVReader = csv.DictReader(csvfile)
field_names = myCSVReader.fieldnames
print("Loading data from {file_name} into table {table_name}".format(file_name=file_path, table_name='temp_ccf_cui_codes'), end='', flush=True)
for row in myCSVReader:
sql = "INSERT INTO temp_ccf_cui_codes (codeid, cui) VALUES ('{codeid}','{cui}')".format(codeid=row['b.CodeID'],cui=row['c.CUI'])
cursor.execute(sql)
connection.commit()
print ("Done loading the {table_name} table.".format(table_name="temp_ccf_cui_codes"))
except mysql.connector.Error as err:
print("Error in SQL: " + sql )
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print("Something is wrong with your user name or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
print("Database does not exist")
else:
print(err)
connection.rollback()
finally:
if connection != None:
connection.close()
def build_ontology_uri_to_umls_map_table(config):
connection = None
sql = ''
try:
connection = mysql.connector.connect(
host=config['MYSQL_HOSTNAME'],
user=config['MYSQL_USERNAME'],
password=config['MYSQL_PASSWORD'],
database=config['MYSQL_DATABASE_NAME'],
charset='utf8mb4',collation='utf8mb4_bin')
cursor = connection.cursor(dictionary=True)
drop_table_sql = "DROP TABLE IF EXISTS ontology_uri_map"
cursor.execute(drop_table_sql)
create_table_sql = """CREATE TABLE ontology_uri_map (
id INT NOT NULL AUTO_INCREMENT,
ontology_uri VARCHAR(2048) NOT NULL,
cui VARCHAR(2048),
codeid VARCHAR(2048),
type VARCHAR(50),
mapping_type VARCHAR(50),
sab VARCHAR(50),
PRIMARY KEY(id)
);"""
cursor.execute(create_table_sql)
print("Created table ontology_uri_map")
sql = """INSERT INTO ontology_uri_map (ontology_uri, cui)
SELECT DISTINCT ontology_uri, substr(xref,6) as CUI FROM dbxrefs
WHERE xref LIKE 'UMLS%'"""
# these records will have their codeid column set to NULL
cursor.execute(sql)
connection.commit()
print("Loaded UMLS map into table ontology_uri_map")
sql = """INSERT INTO ontology_uri_map (ontology_uri, codeid, cui, type, sab)
SELECT DISTINCT ontology_uri, xref as codeid, rel.start_id as cui, 'PT' as type, substring_index(xref,' ', 1) as sab
FROM dbxrefs, umls_cui_codes as rel
WHERE xref = rel.end_id
AND (ontology_uri, xref) NOT IN (SELECT ontology_uri,codeid FROM temp_ambiguous_codes)"""
# This query loads all the ontology_uri's that map to a code according to the dbxrefs table
cursor.execute(sql)
connection.commit()
print("Loaded map into table ontology_uri_map")
sql = "ALTER TABLE ontology_uri_map ADD INDEX ontology_uri_map_ontology_uri_idx(ontology_uri(50))"
cursor.execute(sql)
sql = "ALTER TABLE ontology_uri_map ADD INDEX ontology_uri_map_cui_idx(cui(50))"
cursor.execute(sql)
sql = "ALTER TABLE ontology_uri_map ADD INDEX ontology_uri_map_codeid_idx(codeid(50))"
cursor.execute(sql)
sql = "ALTER TABLE ontology_uri_map ADD INDEX ontology_uri_map_type_idx(type(50))"
cursor.execute(sql)
sql = "ALTER TABLE ontology_uri_map ADD INDEX ontology_uri_map_mapping_type_idx(mapping_type(50))"
cursor.execute(sql)
sql = "ALTER TABLE ontology_uri_map ADD INDEX ontology_uri_map_sab_idx(sab(50))"
cursor.execute(sql)
print("Built indices for table ontology_uri_map")
sql = """UPDATE ontology_uri_map SET mapping_type = 'PRIMARY' where codeid is null AND ontology_uri IN (
SELECT ontology_uri from (SELECT ontology_uri FROM ontology_uri_map
where codeid is null
group by ontology_uri
having count(distinct cui) = 1) as table_one)"""
cursor.execute(sql)
connection.commit()
print("Loaded PRIMARY CUI map data into table ontology_uri_map")
except mysql.connector.Error as err:
print("Error in SQL: " + sql )
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print("Something is wrong with your user name or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
print("Database does not exist")
else:
print(err)
connection.rollback()
finally:
if connection != None:
connection.close()
def build_relations_table(config):
relations_table_info = config['RELATIONS_FILE_TABLE_INFO']
connection = None
sql = ''
try:
connection = mysql.connector.connect(
host=config['MYSQL_HOSTNAME'],
user=config['MYSQL_USERNAME'],
password=config['MYSQL_PASSWORD'],
database=config['MYSQL_DATABASE_NAME'],
charset='utf8mb4',collation='utf8mb4_bin')
cursor = connection.cursor(dictionary=True)
drop_table_sql = "DROP TABLE IF EXISTS relations"
cursor.execute(drop_table_sql)
create_table_sql = """CREATE TABLE relations (
id INT NOT NULL AUTO_INCREMENT,
relation_id VARCHAR(2048) NOT NULL,
relation_label VARCHAR(2048) NOT NULL,
inverse_relation_label VARCHAR(2048),
sab VARCHAR(50),
PRIMARY KEY(id));"""
cursor.execute(create_table_sql)
print("Created table relations")
for table_info in relations_table_info:
table_name = table_info['table_name']
sab = table_info['sab']
sql = """INSERT INTO relations (relation_id, relation_label, inverse_relation_label, sab)
SELECT relation_id, relation_label, inverse_relation_label, '{sab}' FROM {table_name}""".format(table_name=table_name, sab=sab)
cursor.execute(sql)
connection.commit()
print("Loaded {sab} relations data into table relations".format(sab=sab))
sql = """UPDATE relations r1
LEFT JOIN relations r2
ON r1.relation_id = r2.relation_id
SET r1.inverse_relation_label = CONCAT('inverse ', r2.relation_label)
WHERE r2.inverse_relation_label IS NULL"""
cursor.execute(sql)
connection.commit()
print("Added inverse relations for {sab} data into table relations".format(sab=sab))
except mysql.connector.Error as err:
print("Error in SQL: " + sql )
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print("Something is wrong with your user name or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
print("Database does not exist")
else:
print(err)
connection.rollback()
finally:
if connection != None:
connection.close()
def insert_new_cui_cui_relations(config):
connection = None
sql = ''
try:
connection = mysql.connector.connect(
host=config['MYSQL_HOSTNAME'],
user=config['MYSQL_USERNAME'],
password=config['MYSQL_PASSWORD'],
database=config['MYSQL_DATABASE_NAME'],
charset='utf8mb4',collation='utf8mb4_bin')
cursor = connection.cursor(dictionary=True)
edge_list_file_info = config['EDGE_LIST_FILE_TABLE_INFO']
for edge_list_info in edge_list_file_info:
sab = edge_list_info['sab']
table_name = edge_list_info['table_name']
sql = """DELETE FROM umls_cui_cuis WHERE sab = '{sab}'""".format(sab=sab)
cursor.execute(sql)
connection.commit()
print('')
print("Deleted {sab} map from table umls_cui_cuis".format(sab=sab))
sql = """INSERT INTO umls_cui_cuis (start_id, type, end_id, sab)
SELECT DISTINCT subject_table.cui as start_id, lower(replace(rel.relation_label,' ','_')) as type, object_table.cui as end_id, 'UBERON' as sab
FROM {table_name} el, relations rel, ontology_uri_map subject_table, ontology_uri_map object_table
WHERE rel.relation_id = el.predicate
AND subject_table.ontology_uri = el.subject
AND subject_table.mapping_type = 'PRIMARY'
AND object_table.ontology_uri = el.object
AND object_table.mapping_type = 'PRIMARY'
AND subject_table.cui != object_table.cui
AND el.sab = '{sab}'""".format(table_name=table_name,sab=sab)
cursor.execute(sql)
connection.commit()
print("Loaded {sab} map into table umls_cui_cuis".format(sab=sab))
sql = """INSERT INTO umls_cui_cuis (start_id, type, end_id, sab)
SELECT DISTINCT object_table.cui as start_id, lower(replace(rel.inverse_relation_label,' ','_')) as type, subject_table.cui as end_id, 'UBERON' as sab
FROM {table_name} el, relations rel, ontology_uri_map subject_table, ontology_uri_map object_table
WHERE rel.relation_id = el.predicate
AND subject_table.ontology_uri = el.subject
AND subject_table.mapping_type = 'PRIMARY'
AND object_table.ontology_uri = el.object
AND object_table.mapping_type = 'PRIMARY'
AND subject_table.cui != object_table.cui
AND rel.inverse_relation_label IS NOT NULL
AND el.sab = '{sab}'""".format(table_name=table_name,sab=sab)
cursor.execute(sql)
connection.commit()
print("Loaded {sab} inverse relation map into table umls_cui_cuis".format(sab=sab))
except mysql.connector.Error as err:
print("Error in SQL: " + sql )
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print("Something is wrong with your user name or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
print("Database does not exist")
else:
print(err)
connection.rollback()
finally:
if connection != None:
connection.close()
def insert_new_terms(config):
connection = None
sql = ''
try:
connection = mysql.connector.connect(
host=config['MYSQL_HOSTNAME'],
user=config['MYSQL_USERNAME'],
password=config['MYSQL_PASSWORD'],
database=config['MYSQL_DATABASE_NAME'],
charset='utf8mb4',collation='utf8mb4_bin')
cursor = connection.cursor(dictionary=True)
truncate_table_sql = "TRUNCATE suis_updated"
cursor.execute(truncate_table_sql)
connection.commit()
truncate_table_sql = "TRUNCATE code_suis_updated"
cursor.execute(truncate_table_sql)
connection.commit()
truncate_table_sql = "TRUNCATE new_sui_map"
cursor.execute(truncate_table_sql)
connection.commit()
truncate_table_sql = """TRUNCATE cui_suis_updated"""
cursor.execute(truncate_table_sql)
connection.commit()
print ("Copying cui_suis INTO cui_suis_updated")
sql = """INSERT INTO cui_suis_updated SELECT * FROM umls_cui_suis"""
cursor.execute(sql)
connection.commit()
sql = "DROP INDEX suis_updated_sui_idx ON suis_updated"
cursor.execute(sql)
connection.commit()
sql = "DROP INDEX suis_updated_name_idx ON suis_updated"
cursor.execute(sql)
connection.commit()
print ("Copying suis INTO suis_updated")
sql = """INSERT INTO suis_updated SELECT * FROM umls_suis"""
cursor.execute(sql)
connection.commit()
sql = "ALTER TABLE suis_updated ADD INDEX suis_updated_sui_idx (sui(100))"
cursor.execute(sql)
connection.commit()
sql = "ALTER TABLE suis_updated ADD INDEX suis_updated_name_idx (name(500))"
cursor.execute(sql)
connection.commit()
sql = "DROP INDEX code_suis_updated_start_id_idx ON code_suis_updated"
cursor.execute(sql)
connection.commit()
sql = "DROP INDEX code_suis_updated_end_id_idx ON code_suis_updated"
cursor.execute(sql)
connection.commit()
sql = "DROP INDEX code_suis_updated_type_idx ON code_suis_updated"
cursor.execute(sql)
connection.commit()
sql = "DROP INDEX code_suis_updated_cui_idx ON code_suis_updated"
cursor.execute(sql)
connection.commit()
print ("Copying code_suis INTO code_suis_updated")
sql = """INSERT INTO code_suis_updated SELECT * FROM umls_code_suis"""
cursor.execute(sql)
connection.commit()
sql = "ALTER TABLE code_suis_updated ADD INDEX code_suis_updated_start_id_idx (start_id(100))"
cursor.execute(sql)
connection.commit()
sql = "ALTER TABLE code_suis_updated ADD INDEX code_suis_updated_end_id_idx (end_id(100))"
cursor.execute(sql)
connection.commit()
sql = "ALTER TABLE code_suis_updated ADD INDEX code_suis_updated_type_idx (type(100))"
cursor.execute(sql)
connection.commit()
sql = "ALTER TABLE code_suis_updated ADD INDEX code_suis_updated_cui_idx (cui(100))"
cursor.execute(sql)
connection.commit()
node_metadata_info = config['NODE_METADATA_FILE_TABLE_INFO']
record_count = 1
for table_info in node_metadata_info:
table_name = table_info['table_name']
sab = table_info['sab']
dict_new_suis = {}
sql = """SELECT oum.ontology_uri as ontology_uri, oum.cui AS cui, IFNULL(oum.codeid,nm.codeid) AS codeid, nm.node_label AS label, '{sab}' as sab, su.sui AS sui, 'PT' AS term_type
FROM {table_name} nm
INNER JOIN ontology_uri_map oum
ON nm.ontology_uri = oum.ontology_uri
AND oum.mapping_type = 'PRIMARY'
LEFT OUTER JOIN suis_updated su
ON nm.node_label = su.name
WHERE oum.codeid is null OR oum.codeid NOT IN (select start_id FROM code_suis_updated)""".format(table_name=table_name,sab=sab)
cursor.execute(sql)
result = cursor.fetchall()
print("")
print ("Loading tables suis_updated, code_suis_updated, and new_sui_map for SAB: {sab}".format(sab=sab), end='', flush=True)
for row in result:
ontology_uri = row['ontology_uri']
cui = row['cui']
codeid = row['codeid']
code_list = str(codeid).split(' ')
code = code_list[1]
label = row['label']
term_type = row['term_type']
sui = row['sui']
if sui == None:
if label in dict_new_suis.keys():
sui = dict_new_suis[label]
else:
sui = 'HS' + str(record_count).zfill(6)
sql = """INSERT INTO suis_updated (sui, name) VALUES ('{sui}',"{name}")""".format(sui=sui,name=label)
cursor.execute(sql)
sql = """INSERT INTO new_sui_map (codeid, sui, name) VALUES ('{codeid}','{sui}',"{name}")""".format(codeid=codeid,sui=sui,name=label)
cursor.execute(sql)
dict_new_suis[label] = sui
sql = """INSERT INTO code_suis_updated (start_id, end_id, type, cui) VALUES ('{codeid}','{sui}','{term_type}','{cui}')""".format(codeid=codeid,sui=sui,cui=cui,term_type=term_type)
cursor.execute(sql)
if 'HC' in cui and term_type == 'PT':
sql = """INSERT INTO cui_suis_updated (start_id, end_id, type) VALUES ('{cui}','{sui}','PREF_TERM')""".format(cui=cui,sui=sui)
cursor.execute(sql)
record_count = record_count + 1
if record_count % 10000 == 0:
print('.', end='', flush=True)
connection.commit()
connection.commit()
print('')
insert_new_synonyms(config, record_count)
except mysql.connector.Error as err:
print("Error in SQL: " + sql )
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print("Something is wrong with your user name or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
print("Database does not exist")
else:
print(err)
connection.rollback()
finally:
if connection != None:
connection.close()
def insert_new_synonyms(config, record_count):
if 'SYNONYM_LIST_FILE_TABLE_INFO' not in config:
return
synonym_list = config['SYNONYM_LIST_FILE_TABLE_INFO']
connection = None
sql = ''
try:
connection = mysql.connector.connect(
host=config['MYSQL_HOSTNAME'],
user=config['MYSQL_USERNAME'],
password=config['MYSQL_PASSWORD'],
database=config['MYSQL_DATABASE_NAME'],
charset='utf8mb4',collation='utf8mb4_bin')
cursor = connection.cursor(dictionary=True)
for table_info in synonym_list:
sab = table_info['sab']
table_name = table_info['table_name']
dict_new_suis = {}
sql = """SELECT DISTINCT oum.ontology_uri as ontology_uri, oum.cui AS cui,nm.codeid AS codeid, nm.synonym AS label, '{sab}' as sab, su.sui AS sui, 'SY' AS term_type
FROM {table_name} nm
INNER JOIN ontology_uri_map oum
ON nm.ontology_uri = oum.ontology_uri
LEFT OUTER JOIN suis_updated su
ON nm.synonym = su.name""".format(table_name=table_name,sab=sab)
cursor.execute(sql)
result = cursor.fetchall()
print ("Loading tables suis_updated, code_suis_updated, and new_sui_map for SAB: {sab}".format(sab=sab), end='', flush=True)
for row in result:
ontology_uri = row['ontology_uri']
cui = row['cui']
codeid = row['codeid']
code_list = str(codeid).split(' ')
code = code_list[1]
label = row['label']
term_type = row['term_type']
sui = row['sui']
if sui == None:
if label in dict_new_suis.keys():
sui = dict_new_suis[label]
else:
sui = 'HS' + str(record_count).zfill(6)
sql = """INSERT INTO suis_updated (sui, name) VALUES ('{sui}',"{name}")""".format(sui=sui,name=label)
cursor.execute(sql)
sql = """INSERT INTO new_sui_map (codeid, sui, name) VALUES ('{codeid}','{sui}',"{name}")""".format(codeid=codeid,sui=sui,name=label)
cursor.execute(sql)
dict_new_suis[label] = sui
sql = """INSERT INTO code_suis_updated (start_id, end_id, type, cui) VALUES ('{codeid}','{sui}','{term_type}','{cui}')""".format(codeid=codeid,sui=sui,cui=cui,term_type=term_type)
cursor.execute(sql)
record_count = record_count + 1
if record_count % 10000 == 0:
print('.', end='', flush=True)
connection.commit()
connection.commit()
except mysql.connector.Error as err:
print("Error in SQL: " + sql )
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print("Something is wrong with your user name or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
print("Database does not exist")
else:
print(err)
connection.rollback()
finally:
if connection != None:
connection.close()
def insert_new_cuis(config):
connection = None
sql = ''
try:
connection = mysql.connector.connect(
host=config['MYSQL_HOSTNAME'],
user=config['MYSQL_USERNAME'],
password=config['MYSQL_PASSWORD'],
database=config['MYSQL_DATABASE_NAME'],
charset='utf8mb4',collation='utf8mb4_bin')
cursor = connection.cursor(dictionary=True)
truncate_table_sql = "TRUNCATE cuis_updated"
cursor.execute(truncate_table_sql)
connection.commit()
print ("Truncating cui_codes_updated")
sql = """TRUNCATE cui_codes_updated"""
cursor.execute(sql)
connection.commit()
print ("Copying cuis INTO cuis_updated")
sql = """INSERT INTO cuis_updated SELECT * FROM umls_cuis"""
cursor.execute(sql)
connection.commit()
print ("Deleting HuBMAP CUIs")
sql = """DELETE FROM ontology_uri_map WHERE cui LIKE 'HC%'"""
cursor.execute(sql)
connection.commit()
print ("Copying cuis INTO cui_codes_updated")
sql = """INSERT INTO cui_codes_updated SELECT * FROM umls_cui_codes"""
cursor.execute(sql)
connection.commit()
node_metadata_info = config['NODE_METADATA_FILE_TABLE_INFO']
record_count = 1
print ("Creating new HCUI's and codes")
for table_info in node_metadata_info:
sab = table_info['sab']
table_name = table_info['table_name']
print ("Deleting {sab} codes from umls_codes".format(sab=sab))
sql = """DELETE FROM umls_codes WHERE sab = '{sab}'""".format(sab=sab)
# remove old records for the sab
cursor.execute(sql)
connection.commit()
print("Loading node metadata for {sab}".format(sab=sab))
sql = """SELECT ontology_uri AS ontology_uri, codeid AS codeid, sab AS sab FROM {table_name} nm
WHERE nm.ontology_uri NOT IN (SELECT ontology_uri FROM ontology_uri_map WHERE mapping_type = 'PRIMARY')""".format(table_name=table_name)
cursor.execute(sql)
result = cursor.fetchall()
for row in result:
ontology_uri = row['ontology_uri']
cui = 'HC' + str(record_count).zfill(6)
# mint a new CUI using the HC prefix
record_count = record_count + 1
current_sab = sab
codeid = row['codeid']
code_list = str(codeid).split(' ')
code = code_list[1]
sql = """INSERT INTO ontology_uri_map (ontology_uri,codeid,cui,sab,mapping_type) VALUES ('{ontology_uri}','{codeid}','{cui}','{sab}','PRIMARY')""".format(codeid=codeid,cui=cui,ontology_uri=ontology_uri,sab=current_sab)
# add the new HCUI to the ontology_uri_map
cursor.execute(sql)
sql = """INSERT INTO cuis_updated (cui) VALUES ('{cui}')""".format(cui=cui)
# add the new HCUI to the cuis_updated table
cursor.execute(sql)
connection.commit()
sql = """INSERT INTO umls_codes (codeid, sab,code) VALUES ('{codeid}','{sab}','{code}')""".format(codeid=codeid,sab=current_sab,code=code)
# add the new Code information to umls_codes
cursor.execute(sql)
connection.commit()
sql = """INSERT INTO cui_codes_updated (start_id, end_id) VALUES ('{cui}','{codeid}')""".format(cui=cui,codeid=codeid)
# connect the new HCUI to its new Code
cursor.execute(sql)
connection.commit()
except mysql.connector.Error as err:
print("Error in SQL: " + sql )
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print("Something is wrong with your user name or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
print("Database does not exist")
else:
print(err)
connection.rollback()
finally:
if connection != None:
connection.close()
def insert_new_codes(config):
connection = None
sql = ''
try:
connection = mysql.connector.connect(
host=config['MYSQL_HOSTNAME'],
user=config['MYSQL_USERNAME'],
password=config['MYSQL_PASSWORD'],
database=config['MYSQL_DATABASE_NAME'],
charset='utf8mb4',collation='utf8mb4_bin')
cursor = connection.cursor(dictionary=True)
node_metadata_info = config['NODE_METADATA_FILE_TABLE_INFO']
for table_info in node_metadata_info:
table_name = table_info['table_name']
current_sab = table_info['sab']
sql = """SELECT nm.ontology_uri as ontology_uri, nm.codeid as codeid, oum.cui as cui, nm.sab as sab
FROM {table_name} nm, ontology_uri_map oum
WHERE oum.ontology_uri = nm.ontology_uri
and oum.codeid IS NOT NULL
and nm.codeid not in (select codeid from umls_codes)""".format(table_name=table_name)
# this SQL finds all the codes in the current node_metadata missing from the umls_codes table
# these are the codes we need to add
cursor.execute(sql)
result = cursor.fetchall()
print ("Creating new codes for sab: {sab}".format(sab=current_sab))
for row in result:
cui = row['cui']
codeid = row['codeid']
code_list = str(codeid).split(' ')
code = code_list[1]
sql = """INSERT INTO umls_codes (codeid, sab,code) VALUES ('{codeid}','{sab}','{code}')""".format(codeid=codeid,sab=current_sab,code=code)
cursor.execute(sql)
connection.commit()
sql = """INSERT INTO cui_codes_updated (start_id, end_id) VALUES ('{cui}','{codeid}')""".format(cui=cui,codeid=codeid)
cursor.execute(sql)
connection.commit()
except mysql.connector.Error as err:
print("Error in SQL: " + sql )
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print("Something is wrong with your user name or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
print("Database does not exist")
else:
print(err)
connection.rollback()
finally:
if connection != None:
connection.close()
def insert_new_defs(config):
connection = None
sql = ''
try:
connection = mysql.connector.connect(
host=config['MYSQL_HOSTNAME'],
user=config['MYSQL_USERNAME'],
password=config['MYSQL_PASSWORD'],
database=config['MYSQL_DATABASE_NAME'],
charset='utf8mb4',collation='utf8mb4_bin')
cursor = connection.cursor(dictionary=True)
truncate_table_sql = "TRUNCATE defs_updated"
cursor.execute(truncate_table_sql)
connection.commit()
truncate_table_sql = "TRUNCATE def_rel_updated"
cursor.execute(truncate_table_sql)
connection.commit()
truncate_table_sql = "TRUNCATE new_def_map"
cursor.execute(truncate_table_sql)
connection.commit()
print("")
print ("Copying defs INTO defs_updated")
sql = """INSERT INTO defs_updated SELECT * FROM umls_defs"""
cursor.execute(sql)
connection.commit()
print ("Copying def_rel INTO def_rel_updated")
sql = """INSERT INTO def_rel_updated SELECT * FROM umls_def_rel"""
cursor.execute(sql)
connection.commit()
node_metadata_info = config['NODE_METADATA_FILE_TABLE_INFO']
record_count = 1 # start SUI numbering at one
for table_info in node_metadata_info:
table_name = table_info['table_name']
sab = table_info['sab']
sql = """SELECT oum.cui, nm.node_definition, '{sab}' as sab
FROM {table_name} nm, ontology_uri_map oum
WHERE nm.ontology_uri = oum.ontology_uri
AND oum.mapping_type = 'PRIMARY'
AND node_definition <> 'None'
AND node_definition <> '.'""".format(table_name=table_name,sab=sab)
cursor.execute(sql)
result = cursor.fetchall()
print("")
print ("Loading tables defs_updated, def_rels_updated, and new_def_map", end='', flush=True)
for row in result:
cui = row['cui']
node_definition = row['node_definition']
sab = row['sab']
atui = 'HAT' + str(record_count).zfill(6)
record_count = record_count + 1
if '"' in node_definition:
node_definition = node_definition.replace('"','\\"')
sql = """INSERT INTO defs_updated (atui, sab, def) VALUES ('{atui}','{sab}',"{node_definition}")""".format(atui=atui,sab=sab,node_definition=node_definition)
cursor.execute(sql)
sql = """INSERT INTO def_rel_updated (start_id, end_id, type, sab) VALUES ('{cui}','{atui}','DEF','{sab}')""".format(atui=atui,sab=sab,cui=cui)
cursor.execute(sql)
sql = """INSERT INTO new_def_map (cui, atui, node_definition, sab) VALUES ('{cui}','{atui}',"{node_definition}", '{sab}')""".format(atui=atui,sab=sab,cui=cui,node_definition=node_definition)
cursor.execute(sql)
#commit every 10,000 records
if record_count % 10000 == 0:
print('.', end='', flush=True)
connection.commit()
connection.commit()
except mysql.connector.Error as err:
print("Error in SQL: " + sql )
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print("Something is wrong with your user name or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
print("Database does not exist")
else:
print(err)
connection.rollback()
finally:
if connection != None:
connection.close()
def transform(config):
build_xref_table(config)
# This code is temporary. It should be moved to a pre-processing step
fix_dbxrefs(config)
# END This code is temporary. It should be moved to a pre-processing step
build_ambiguous_codes_table(config)
build_ontology_uri_to_umls_map_table(config)
build_relations_table(config)
insert_new_cuis(config)
insert_new_codes(config)
insert_new_terms(config)
insert_new_defs(config)
insert_new_cui_cui_relations(config)
print('') # do this to disable the 'end' flag in prior print statements
print("Done with transform process")
def load(config):
export_files(config)
print('') # do this to disable the 'end' flag in prior print statements
print("Done with load process")
def export_files(config):
connection = None
sql = ''
try:
connection = mysql.connector.connect(
host=config['MYSQL_HOSTNAME'],
user=config['MYSQL_USERNAME'],
password=config['MYSQL_PASSWORD'],
database=config['MYSQL_DATABASE_NAME'],
charset='utf8mb4',collation='utf8mb4_bin')
cursor = connection.cursor(dictionary=True)
export_table_info = [{'table_name': 'umls_codes', 'file_name':'CODEs.csv','sql_columns':['codeid','sab','code'],'file_columns':['CodeID:ID','SAB','CODE']},
{'table_name': 'umls_tui_rel', 'file_name':'TUIrel.csv','sql_columns':['start_id','end_id'],'file_columns':[':START_ID',':END_ID']},
{'table_name': 'umls_cui_tuis', 'file_name':'CUI-TUIs.csv','sql_columns':['start_id','end_id'],'file_columns':[':START_ID',':END_ID']},
{'table_name': 'umls_cui_cuis', 'file_name':'CUI-CUIs.csv','sql_columns':['start_id','end_id','type','sab'],'file_columns':[':START_ID',':END_ID',':TYPE','SAB']},
{'table_name': 'cui_codes_updated', 'file_name':'CUI-CODEs.csv','sql_columns':['start_id','end_id'],'file_columns':[':START_ID',':END_ID']},
{'table_name': 'code_suis_updated', 'file_name':'CODE-SUIs.csv','sql_columns':['start_id','end_id','type','cui'],'file_columns':[':START_ID',':END_ID',':TYPE','CUI']},
{'table_name': 'cui_suis_updated', 'file_name':'CUI-SUIs.csv','sql_columns':['start_id','end_id'],'file_columns':[':START_ID',':END_ID']},
{'table_name': 'cuis_updated', 'file_name':'CUIs.csv','sql_columns':['cui'],'file_columns':['CUI:ID']},
{'table_name': 'suis_updated', 'file_name':'SUIs.csv','sql_columns':['sui','name'],'file_columns':['SUI:ID','name']},
{'table_name': 'umls_tuis', 'file_name':'TUIs.csv','sql_columns':['tui','name','stn','def'],'file_columns':['TUI:ID','name','STN','DEF']},
{'table_name': 'defs_updated', 'file_name':'DEFs.csv','sql_columns':['atui','sab','def'],'file_columns':['ATUI:ID','SAB','DEF']},
{'table_name': 'def_rel_updated', 'file_name':'DEFrel.csv','sql_columns':['start_id','end_id'],'file_columns':[':START_ID',':END_ID']}]
for export_info in export_table_info:
# walk through all the entries in the export_table_info list
table_name = export_info['table_name']
file_name = export_info['file_name']
sql_columns = export_info['sql_columns']
file_columns = export_info['file_columns']
file_path = os.path.join(config['OUTPUT_DIR'],file_name)
# set the output file path
sql = """SELECT DISTINCT {col_list} FROM {table_name}""".format(table_name=table_name,col_list=",".join(sql_columns))
# build the SELECT statement from the sql_columns variable. Also, apply a SQL 'DISTINCT' keyword to avoid duplicates
cursor.execute(sql)
result = cursor.fetchall()
print("")
print ("Writing data from {table_name} to file {file_path}".format(table_name=table_name,file_path=file_path), end='', flush=True)
f = open(file_path, 'w')
record_count = 0
writer = csv.writer(f,quoting=csv.QUOTE_ALL)
writer.writerow(file_columns)
# write the file_columns as the headers for the .CSV file
data_rows = []
for result_row in result:
data_list = []
for field in sql_columns:
data_list.append(result_row[field])
data_rows.append(data_list)
record_count = record_count + 1
#write every 100,000 records
if record_count % 100000 == 0:
print('.', end='', flush=True)
writer.writerows(data_rows)
# clear data_rows
data_rows = []
writer.writerows(data_rows)
f.close()
except mysql.connector.Error as err:
print("Error in SQL: " + sql )
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print("Something is wrong with your user name or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
print("Database does not exist")
else:
print(err)
connection.rollback()
finally:
if connection != None:
connection.close()
# utility function
def isascii(s):
return len(s) == len(s.encode())
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('commands', type=str, nargs='+',default='extract transform load')
command_list = []
try:
args = parser.parse_args()
command_list = args.commands
except:
command_list = ['extract','extract_non_umls','transform','load']
file_path = os.path.join(os.path.abspath(os.path.dirname(__file__)))
#file_path = '/home/chb69/git/ontology-api/src/neo4j_loader'
file_name = 'app.cfg'
config = load_config(file_path, file_name)
#extract_non_umls(config)
#transform(config)
#load(config)
if 'extract_non_umls' in command_list:
extract_non_umls(config)
if 'extract' in command_list:
extract(config)
if 'transform' in command_list:
transform(config)
if 'load' in command_list:
load(config)
print("Done")
| true | true |
f7201460d7fc455a3f1d476f67b45706ae5482ed | 4,736 | py | Python | baselines/Termination_DEOC/run_atari_miniworld.py | anandkamat05/TDEOC | 11749457c3a7550e11ba1acc4784e8545f8087aa | [
"MIT"
] | 5 | 2020-11-10T21:38:04.000Z | 2021-08-11T01:34:50.000Z | baselines/Termination_DEOC/run_atari_miniworld.py | LARS12llt/TDEOC | 11749457c3a7550e11ba1acc4784e8545f8087aa | [
"MIT"
] | 8 | 2020-09-26T01:31:02.000Z | 2022-02-10T02:19:53.000Z | baselines/Termination_DEOC/run_atari_miniworld.py | LARS12llt/TDEOC | 11749457c3a7550e11ba1acc4784e8545f8087aa | [
"MIT"
] | 1 | 2020-11-18T03:20:26.000Z | 2020-11-18T03:20:26.000Z | # !/usr/bin/env python
from baselines.common import set_global_seeds, tf_util as U
from baselines import bench
import os.path as osp
import gym, logging
from mpi4py import MPI
import pdb
from gym_extensions.continuous import mujoco
import gym_miniworld
from baselines import logger
import sys
def train(env_id, num_timesteps, seed, num_options,app, saves ,wsaves, epoch,dc, render=False, caption='', deoc=False, tradeoff=0.1, term_mult=1.0, lr_mult=1.0, tdeoc=False):
from baselines.Termination_DEOC import cnn_policy, pposgd_simple
# U.make_session(num_cpu=1).__enter__()
# set_global_seeds(seed)
rank = MPI.COMM_WORLD.Get_rank()
sess = U.single_threaded_session()
sess.__enter__()
if rank == 0:
logger.configure()
else:
logger.configure(format_strs=[])
workerseed = seed + 10000 * MPI.COMM_WORLD.Get_rank() if seed is not None else None
set_global_seeds(workerseed)
env = gym.make(env_id)
env.seed(workerseed)
def policy_fn(name, ob_space, ac_space):
return cnn_policy.CnnPolicy(name=name, ob_space=ob_space, ac_space=ac_space,
hid_size=64, num_hid_layers=2, num_options=num_options, dc=dc)
env = bench.Monitor(env, logger.get_dir() and
osp.join(logger.get_dir(), str(rank)))
if num_options ==1:
optimsize=64
elif num_options >1 and num_options < 5:
optimsize=32
else:
print("Only upto 3 options or primitive actions is currently supported.")
sys.exit()
# ATARI HYPERPARAMETERS
# pposgd_simple.learn(env, policy_fn,
# max_timesteps=num_timesteps*1.1,
# timesteps_per_batch=256,
# clip_param=0.2, entcoeff=0.001,
# optim_epochs=4, optim_stepsize=1e-3, optim_batchsize=optimsize,
# gamma=0.99, lam=0.95, schedule='linear', num_options=num_options,
# app=app, saves=saves, wsaves=wsaves, epoch=epoch, seed=seed,dc=dc, render=render, caption=caption,
# deoc=deoc, tradeoff=tradeoff, term_mult=term_mult, lr_mult=lr_mult, tdeoc=tdeoc
# )
# MINIWORLD HYPERPARAMETERS
pposgd_simple.learn(env, policy_fn,
max_timesteps=num_timesteps,
timesteps_per_batch=2048,
clip_param=0.2, entcoeff=0.01,
optim_epochs=4, optim_stepsize=3e-4, optim_batchsize=optimsize,
gamma=0.99, lam=0.95, schedule='linear', num_options=num_options,
app=app, saves=saves, wsaves=wsaves, epoch=epoch, seed=seed,dc=dc, render=render, caption=caption,
deoc=deoc, tradeoff=tradeoff, term_mult=term_mult, lr_mult=lr_mult, tdeoc=tdeoc
)
env.close()
def main():
import argparse
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--env', help='environment ID', default='MiniWorld-OneRoom-v0')
parser.add_argument('--seed', help='RNG seed', type=int, default=16)
parser.add_argument('--opt', help='number of options', type=int, default=2)
parser.add_argument('--app', help='Append to folder name', type=str, default='')
parser.add_argument('--saves', dest='saves', action='store_true', default=False)
parser.add_argument('--wsaves', dest='wsaves', action='store_true', default=False)
parser.add_argument('--epoch', help='Epoch', type=int, default=-1)
parser.add_argument('--dc', type=float, default=0.)
parser.add_argument('--render', dest='render', action='store_true', default=False)
parser.add_argument('--caption', help='Caption for run', default='')
parser.add_argument('--deoc', help='Augment reward with diversity', action='store_true', default=False)
parser.add_argument('--tradeoff', type=float, default=0.0)
parser.add_argument('--term_mult', type=float, default=1.0)
parser.add_argument('--lr_mult', type=float, default=1.0)
parser.add_argument('--tdeoc', help='Use diversity in termination objective', action='store_true', default=False)
args = parser.parse_args()
if args.tdeoc and not args.deoc:
print("Setting deoc arg to True...")
args.deoc = True
train(args.env, num_timesteps=2e6, seed=args.seed, num_options=args.opt, app=args.app, saves=args.saves,
wsaves=args.wsaves, epoch=args.epoch,dc=args.dc,
render=args.render, caption=args.caption, deoc=args.deoc, tradeoff=args.tradeoff, term_mult=args.term_mult, lr_mult=args.lr_mult, tdeoc=args.tdeoc)
if __name__ == '__main__':
main() | 47.36 | 174 | 0.655617 |
from baselines.common import set_global_seeds, tf_util as U
from baselines import bench
import os.path as osp
import gym, logging
from mpi4py import MPI
import pdb
from gym_extensions.continuous import mujoco
import gym_miniworld
from baselines import logger
import sys
def train(env_id, num_timesteps, seed, num_options,app, saves ,wsaves, epoch,dc, render=False, caption='', deoc=False, tradeoff=0.1, term_mult=1.0, lr_mult=1.0, tdeoc=False):
from baselines.Termination_DEOC import cnn_policy, pposgd_simple
rank = MPI.COMM_WORLD.Get_rank()
sess = U.single_threaded_session()
sess.__enter__()
if rank == 0:
logger.configure()
else:
logger.configure(format_strs=[])
workerseed = seed + 10000 * MPI.COMM_WORLD.Get_rank() if seed is not None else None
set_global_seeds(workerseed)
env = gym.make(env_id)
env.seed(workerseed)
def policy_fn(name, ob_space, ac_space):
return cnn_policy.CnnPolicy(name=name, ob_space=ob_space, ac_space=ac_space,
hid_size=64, num_hid_layers=2, num_options=num_options, dc=dc)
env = bench.Monitor(env, logger.get_dir() and
osp.join(logger.get_dir(), str(rank)))
if num_options ==1:
optimsize=64
elif num_options >1 and num_options < 5:
optimsize=32
else:
print("Only upto 3 options or primitive actions is currently supported.")
sys.exit()
pposgd_simple.learn(env, policy_fn,
max_timesteps=num_timesteps,
timesteps_per_batch=2048,
clip_param=0.2, entcoeff=0.01,
optim_epochs=4, optim_stepsize=3e-4, optim_batchsize=optimsize,
gamma=0.99, lam=0.95, schedule='linear', num_options=num_options,
app=app, saves=saves, wsaves=wsaves, epoch=epoch, seed=seed,dc=dc, render=render, caption=caption,
deoc=deoc, tradeoff=tradeoff, term_mult=term_mult, lr_mult=lr_mult, tdeoc=tdeoc
)
env.close()
def main():
import argparse
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--env', help='environment ID', default='MiniWorld-OneRoom-v0')
parser.add_argument('--seed', help='RNG seed', type=int, default=16)
parser.add_argument('--opt', help='number of options', type=int, default=2)
parser.add_argument('--app', help='Append to folder name', type=str, default='')
parser.add_argument('--saves', dest='saves', action='store_true', default=False)
parser.add_argument('--wsaves', dest='wsaves', action='store_true', default=False)
parser.add_argument('--epoch', help='Epoch', type=int, default=-1)
parser.add_argument('--dc', type=float, default=0.)
parser.add_argument('--render', dest='render', action='store_true', default=False)
parser.add_argument('--caption', help='Caption for run', default='')
parser.add_argument('--deoc', help='Augment reward with diversity', action='store_true', default=False)
parser.add_argument('--tradeoff', type=float, default=0.0)
parser.add_argument('--term_mult', type=float, default=1.0)
parser.add_argument('--lr_mult', type=float, default=1.0)
parser.add_argument('--tdeoc', help='Use diversity in termination objective', action='store_true', default=False)
args = parser.parse_args()
if args.tdeoc and not args.deoc:
print("Setting deoc arg to True...")
args.deoc = True
train(args.env, num_timesteps=2e6, seed=args.seed, num_options=args.opt, app=args.app, saves=args.saves,
wsaves=args.wsaves, epoch=args.epoch,dc=args.dc,
render=args.render, caption=args.caption, deoc=args.deoc, tradeoff=args.tradeoff, term_mult=args.term_mult, lr_mult=args.lr_mult, tdeoc=args.tdeoc)
if __name__ == '__main__':
main() | true | true |
f72014b925bc545ca989597e22d048c3a184e38f | 1,430 | py | Python | Transposition/transpositionFileCipher.py | a1exlism/HackingSecretCiphersWithPy | d7ec59d9eb5c5ae55c68ce911a3973ae0c526698 | [
"MIT"
] | null | null | null | Transposition/transpositionFileCipher.py | a1exlism/HackingSecretCiphersWithPy | d7ec59d9eb5c5ae55c68ce911a3973ae0c526698 | [
"MIT"
] | null | null | null | Transposition/transpositionFileCipher.py | a1exlism/HackingSecretCiphersWithPy | d7ec59d9eb5c5ae55c68ce911a3973ae0c526698 | [
"MIT"
] | null | null | null | import os, sys, time, Transposition.transpositionEncrypt as ENC, \
Transposition.transpositionDecrypt as DEC
def main():
f_key = 10
# f_mode = 'encrypt'
f_mode = 'decrypt'
if f_mode == 'decrypt':
input_filename = 'frankenstein.encrypt.txt'
else:
input_filename = 'frankenstein.txt'
output_filename = f'frankenstein.{f_mode}.txt'
if not os.path.exists(input_filename):
print(f'File {input_filename} not exist, Quitting...')
sys.exit()
if os.path.exists(output_filename):
print(
f'File {output_filename} existed, will be overwrite. (C)ontinue or (Q)uit?')
response = input('> ')
if not response.lower().startswith('c'):
sys.exit()
# read file
file_obj = open(input_filename)
content = file_obj.read()
file_obj.close()
print(f'{f_mode.title()}ing...')
start_time = time.time()
if f_mode == 'encrypt':
transformed = ENC.encrypt_msg(f_key, content)
else:
transformed = DEC.decrypt_msg(f_key, content)
total_time = round(time.time() - start_time, 2)
print(f'{f_mode.title()}sion tookes {total_time} seconds.')
# write to file
output_file_obj = open(output_filename, 'w')
output_file_obj.write(transformed)
output_file_obj.close()
print(f'{output_filename} with {len(content)} {f_mode}ed done.')
if __name__ == '__main__':
main()
| 28.039216 | 88 | 0.636364 | import os, sys, time, Transposition.transpositionEncrypt as ENC, \
Transposition.transpositionDecrypt as DEC
def main():
f_key = 10
f_mode = 'decrypt'
if f_mode == 'decrypt':
input_filename = 'frankenstein.encrypt.txt'
else:
input_filename = 'frankenstein.txt'
output_filename = f'frankenstein.{f_mode}.txt'
if not os.path.exists(input_filename):
print(f'File {input_filename} not exist, Quitting...')
sys.exit()
if os.path.exists(output_filename):
print(
f'File {output_filename} existed, will be overwrite. (C)ontinue or (Q)uit?')
response = input('> ')
if not response.lower().startswith('c'):
sys.exit()
file_obj = open(input_filename)
content = file_obj.read()
file_obj.close()
print(f'{f_mode.title()}ing...')
start_time = time.time()
if f_mode == 'encrypt':
transformed = ENC.encrypt_msg(f_key, content)
else:
transformed = DEC.decrypt_msg(f_key, content)
total_time = round(time.time() - start_time, 2)
print(f'{f_mode.title()}sion tookes {total_time} seconds.')
output_file_obj = open(output_filename, 'w')
output_file_obj.write(transformed)
output_file_obj.close()
print(f'{output_filename} with {len(content)} {f_mode}ed done.')
if __name__ == '__main__':
main()
| true | true |
f720154bd7ce3ed32ed597d87d874fe71e148ab1 | 7,664 | py | Python | test/jit/test_misc.py | metacpp/pytorch | 1e7a4d6bbe1fac4fb94f6b62f24c6e242db1e952 | [
"Intel"
] | 1 | 2022-03-02T00:28:04.000Z | 2022-03-02T00:28:04.000Z | test/jit/test_misc.py | metacpp/pytorch | 1e7a4d6bbe1fac4fb94f6b62f24c6e242db1e952 | [
"Intel"
] | 1 | 2022-03-01T06:10:50.000Z | 2022-03-01T06:10:50.000Z | test/jit/test_misc.py | metacpp/pytorch | 1e7a4d6bbe1fac4fb94f6b62f24c6e242db1e952 | [
"Intel"
] | null | null | null | # Owner(s): ["oncall: jit"]
from typing import Any, Dict, List, Optional, Tuple
from torch.testing._internal.jit_utils import JitTestCase, make_global
from torch.testing import FileCheck
from torch import jit
from jit.test_module_interface import TestModuleInterface # noqa: F401
import os
import sys
import torch
import torch.testing._internal.jit_utils
import torch.nn as nn
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
if __name__ == '__main__':
raise RuntimeError("This test file is not meant to be run directly, use:\n\n"
"\tpython test/test_jit.py TESTNAME\n\n"
"instead.")
class TestMisc(JitTestCase):
def test_joined_str(self):
def func(x):
hello, test = "Hello", "test"
print(f"{hello + ' ' + test}, I'm a {test}")
print("format blank")
hi = 'hi'
print(f"stuff before {hi}")
print(f"{hi} stuff after")
return x + 1
x = torch.arange(4., requires_grad=True)
# TODO: Add support for f-strings in string parser frontend
# self.checkScript(func, [x], optimize=True, capture_output=True)
with self.capture_stdout() as captured:
out = func(x)
scripted = torch.jit.script(func)
with self.capture_stdout() as captured_script:
out_script = func(x)
self.assertEqual(out, out_script)
self.assertEqual(captured, captured_script)
def test_kwarg_support(self):
with self.assertRaisesRegex(torch.jit.frontend.NotSupportedError, "variable number of arguments"):
class M(torch.nn.Module):
def forward(self, *, n_tokens: int, device_name: str = 2):
pass
torch.jit.script(M())
class M(torch.nn.Module):
def forward(self, *, n_tokens: int, device_name: str):
return n_tokens, device_name
sm = torch.jit.script(M())
with self.assertRaisesRegex(RuntimeError, "missing value for argument 'n_tokens'"):
sm()
with self.assertRaisesRegex(RuntimeError, "positional arg"):
sm(3, 'hello')
self.assertEqual(sm(n_tokens=3, device_name='hello'), (3, 'hello'))
def test_tuple_subscripted_assign(self):
with self.assertRaisesRegex(RuntimeError, "subscripted assignment"):
@torch.jit.script
def foo(a: Tuple[int, int]) -> None:
a[0] = a[1]
with self.assertRaisesRegex(RuntimeError, "augmented assignment"):
@torch.jit.script
def bar(a: Tuple[int, int]) -> None:
a[0] += a[1]
def test_subexpression_List_Future(self):
@torch.jit.script
def fn(x: List[torch.jit.Future[int]]) -> torch.jit.Future[int]:
return x[0]
FileCheck().check('Future[int]').check('Future[int]').run(fn.graph)
def test_subexpression_Future_annotate(self):
@torch.jit.script
def fn() -> torch.jit.Future[int]:
x: List[torch.jit.Future[int]] = []
return x[0]
FileCheck().check("Future[int][]").run(fn.graph)
def test_future_isinstance(self):
@torch.jit.script
def fn(x: Any) -> torch.jit.Future[int]:
assert isinstance(x, jit.Future[int])
return x
FileCheck().check("Future[int]").run(fn.graph)
def test_str_refine_any(self):
def forward(x: Any) -> str:
if isinstance(x, str):
return x
return "foo"
forward = torch.jit.script(forward)
self.assertEqual(forward(1), "foo")
self.assertEqual(forward("bar"), "bar")
def test_subexpression_Tuple_int_int_Future(self):
@torch.jit.script
def fn(x: Tuple[int, int, torch.jit.Future[int]]) -> Tuple[int, torch.jit.Future[int]]:
return x[0], x[2]
FileCheck().check('(int, int, Future[int])').check('(int, Future[int])').run(fn.graph)
def test_subexpression_Dict_int_Future(self):
@torch.jit.script
def fn(x: Dict[int, torch.jit.Future[int]], y: int) -> torch.jit.Future[int]:
return x[y]
FileCheck().check('Dict(int, Future(int))').check('Future[int]').run(fn.graph)
def test_subexpression_Optional(self):
@torch.jit.script
def fn(x: Optional[Dict[int, torch.jit.Future[int]]]) -> Optional[torch.jit.Future[int]]:
if x is not None:
return x[0]
else:
return None
FileCheck().check('Dict(int, Future(int))?').run(fn.graph)
def test_if_returning_any(self):
"""
Check that an if statement can return different
types early from each branch when the return
type of the function is Any.
"""
def if_function(inp: torch.Tensor) -> Any:
if inp.shape[0] == 1:
return inp * inp
else:
return "str"
self.checkScript(if_function, (torch.randn(5),))
def test_export_opnames_interface(self):
@torch.jit.interface
class OneTwoModule(nn.Module):
def one(self, x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
pass
def two(self, x: torch.Tensor) -> torch.Tensor:
pass
def forward(self, x: torch.Tensor) -> torch.Tensor:
pass
class FooMod(nn.Module):
def one(self, x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
return x + y
def two(self, x: torch.Tensor) -> torch.Tensor:
return 2 * x
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.one(self.two(x), x)
class BarMod(nn.Module):
def one(self, x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
return x * y
def two(self, x: torch.Tensor) -> torch.Tensor:
return 2 / x
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.two(self.one(x, x))
make_global(OneTwoModule)
class M(nn.Module):
sub : OneTwoModule
def __init__(self):
super(M, self).__init__()
self.sub = BarMod()
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.sub.forward(x)
def use_module_interface(mod_list: List[OneTwoModule], x: torch.Tensor):
return mod_list[0].forward(x) + mod_list[1].forward(x)
torch._C._enable_mobile_interface_call_export()
scripted_M_mod = torch.jit.script(M())
self.assertTrue(set(['aten::mul.Scalar', 'aten::mul.Tensor', 'aten::reciprocal']).issubset(
set(torch.jit.export_opnames(scripted_M_mod))))
scripted_M_mod.sub = torch.jit.script(FooMod())
self.assertTrue(set(['aten::add.Tensor', 'aten::mul.Scalar']).issubset(
set(torch.jit.export_opnames(scripted_M_mod))))
def test_broadcasting_list(self):
"""
Test BroadcastingList and torch.nn._size_N_t alias
"""
from torch._jit_internal import BroadcastingList2
from torch.nn.common_types import _size_2_t
def sum_i(x: _size_2_t) -> int:
return x[0] + x[1]
def sum_f(x: BroadcastingList2[float]) -> float:
return x[0] + x[1]
self.assertTrue(torch.jit.script(sum_i)(4) == 8)
self.assertTrue(torch.jit.script(sum_f)(4.5) == 9.)
| 33.762115 | 106 | 0.584551 |
from typing import Any, Dict, List, Optional, Tuple
from torch.testing._internal.jit_utils import JitTestCase, make_global
from torch.testing import FileCheck
from torch import jit
from jit.test_module_interface import TestModuleInterface
import os
import sys
import torch
import torch.testing._internal.jit_utils
import torch.nn as nn
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
if __name__ == '__main__':
raise RuntimeError("This test file is not meant to be run directly, use:\n\n"
"\tpython test/test_jit.py TESTNAME\n\n"
"instead.")
class TestMisc(JitTestCase):
def test_joined_str(self):
def func(x):
hello, test = "Hello", "test"
print(f"{hello + ' ' + test}, I'm a {test}")
print("format blank")
hi = 'hi'
print(f"stuff before {hi}")
print(f"{hi} stuff after")
return x + 1
x = torch.arange(4., requires_grad=True)
# TODO: Add support for f-strings in string parser frontend
# self.checkScript(func, [x], optimize=True, capture_output=True)
with self.capture_stdout() as captured:
out = func(x)
scripted = torch.jit.script(func)
with self.capture_stdout() as captured_script:
out_script = func(x)
self.assertEqual(out, out_script)
self.assertEqual(captured, captured_script)
def test_kwarg_support(self):
with self.assertRaisesRegex(torch.jit.frontend.NotSupportedError, "variable number of arguments"):
class M(torch.nn.Module):
def forward(self, *, n_tokens: int, device_name: str = 2):
pass
torch.jit.script(M())
class M(torch.nn.Module):
def forward(self, *, n_tokens: int, device_name: str):
return n_tokens, device_name
sm = torch.jit.script(M())
with self.assertRaisesRegex(RuntimeError, "missing value for argument 'n_tokens'"):
sm()
with self.assertRaisesRegex(RuntimeError, "positional arg"):
sm(3, 'hello')
self.assertEqual(sm(n_tokens=3, device_name='hello'), (3, 'hello'))
def test_tuple_subscripted_assign(self):
with self.assertRaisesRegex(RuntimeError, "subscripted assignment"):
@torch.jit.script
def foo(a: Tuple[int, int]) -> None:
a[0] = a[1]
with self.assertRaisesRegex(RuntimeError, "augmented assignment"):
@torch.jit.script
def bar(a: Tuple[int, int]) -> None:
a[0] += a[1]
def test_subexpression_List_Future(self):
@torch.jit.script
def fn(x: List[torch.jit.Future[int]]) -> torch.jit.Future[int]:
return x[0]
FileCheck().check('Future[int]').check('Future[int]').run(fn.graph)
def test_subexpression_Future_annotate(self):
@torch.jit.script
def fn() -> torch.jit.Future[int]:
x: List[torch.jit.Future[int]] = []
return x[0]
FileCheck().check("Future[int][]").run(fn.graph)
def test_future_isinstance(self):
@torch.jit.script
def fn(x: Any) -> torch.jit.Future[int]:
assert isinstance(x, jit.Future[int])
return x
FileCheck().check("Future[int]").run(fn.graph)
def test_str_refine_any(self):
def forward(x: Any) -> str:
if isinstance(x, str):
return x
return "foo"
forward = torch.jit.script(forward)
self.assertEqual(forward(1), "foo")
self.assertEqual(forward("bar"), "bar")
def test_subexpression_Tuple_int_int_Future(self):
@torch.jit.script
def fn(x: Tuple[int, int, torch.jit.Future[int]]) -> Tuple[int, torch.jit.Future[int]]:
return x[0], x[2]
FileCheck().check('(int, int, Future[int])').check('(int, Future[int])').run(fn.graph)
def test_subexpression_Dict_int_Future(self):
@torch.jit.script
def fn(x: Dict[int, torch.jit.Future[int]], y: int) -> torch.jit.Future[int]:
return x[y]
FileCheck().check('Dict(int, Future(int))').check('Future[int]').run(fn.graph)
def test_subexpression_Optional(self):
@torch.jit.script
def fn(x: Optional[Dict[int, torch.jit.Future[int]]]) -> Optional[torch.jit.Future[int]]:
if x is not None:
return x[0]
else:
return None
FileCheck().check('Dict(int, Future(int))?').run(fn.graph)
def test_if_returning_any(self):
def if_function(inp: torch.Tensor) -> Any:
if inp.shape[0] == 1:
return inp * inp
else:
return "str"
self.checkScript(if_function, (torch.randn(5),))
def test_export_opnames_interface(self):
@torch.jit.interface
class OneTwoModule(nn.Module):
def one(self, x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
pass
def two(self, x: torch.Tensor) -> torch.Tensor:
pass
def forward(self, x: torch.Tensor) -> torch.Tensor:
pass
class FooMod(nn.Module):
def one(self, x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
return x + y
def two(self, x: torch.Tensor) -> torch.Tensor:
return 2 * x
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.one(self.two(x), x)
class BarMod(nn.Module):
def one(self, x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
return x * y
def two(self, x: torch.Tensor) -> torch.Tensor:
return 2 / x
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.two(self.one(x, x))
make_global(OneTwoModule)
class M(nn.Module):
sub : OneTwoModule
def __init__(self):
super(M, self).__init__()
self.sub = BarMod()
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.sub.forward(x)
def use_module_interface(mod_list: List[OneTwoModule], x: torch.Tensor):
return mod_list[0].forward(x) + mod_list[1].forward(x)
torch._C._enable_mobile_interface_call_export()
scripted_M_mod = torch.jit.script(M())
self.assertTrue(set(['aten::mul.Scalar', 'aten::mul.Tensor', 'aten::reciprocal']).issubset(
set(torch.jit.export_opnames(scripted_M_mod))))
scripted_M_mod.sub = torch.jit.script(FooMod())
self.assertTrue(set(['aten::add.Tensor', 'aten::mul.Scalar']).issubset(
set(torch.jit.export_opnames(scripted_M_mod))))
def test_broadcasting_list(self):
from torch._jit_internal import BroadcastingList2
from torch.nn.common_types import _size_2_t
def sum_i(x: _size_2_t) -> int:
return x[0] + x[1]
def sum_f(x: BroadcastingList2[float]) -> float:
return x[0] + x[1]
self.assertTrue(torch.jit.script(sum_i)(4) == 8)
self.assertTrue(torch.jit.script(sum_f)(4.5) == 9.)
| true | true |
f72015bb9160c0942bb83fe1f3d4aa6377a9797d | 5,563 | py | Python | natlas-server/app/elastic/client.py | m4rcu5/natlas | d1057c5349a5443cecffb3db9a6428f7271b07ad | [
"Apache-2.0"
] | null | null | null | natlas-server/app/elastic/client.py | m4rcu5/natlas | d1057c5349a5443cecffb3db9a6428f7271b07ad | [
"Apache-2.0"
] | null | null | null | natlas-server/app/elastic/client.py | m4rcu5/natlas | d1057c5349a5443cecffb3db9a6428f7271b07ad | [
"Apache-2.0"
] | null | null | null | import json
from config import Config
import elasticsearch
import time
from datetime import datetime
import logging
from opencensus.trace import execution_context
from opencensus.trace import span as span_module
import semver
class ElasticClient:
es = None
lastReconnectAttempt = None
mapping = {}
natlasIndices = ["nmap", "nmap_history"]
status = False
# Quiets the elasticsearch logger because otherwise connection errors print tracebacks to the WARNING level, even when the exception is handled.
logger = logging.getLogger('elasticsearch')
logger.setLevel('ERROR')
def __init__(self, elasticURL):
# Elastic is initialized outside an application context so we have to instatiate Config ourselves to get BASEDIR
with open(Config().BASEDIR + '/defaults/elastic/mapping.json') as mapfile:
self.mapping = json.loads(mapfile.read())
try:
self.es = elasticsearch.Elasticsearch(elasticURL, timeout=5, max_retries=1)
self.status = self._ping()
if self.status:
self.esversion = semver.VersionInfo.parse(self.es.info()['version']['number'])
self.logger.info("Elastic Version: " + str(self.esversion))
self._initialize_indices()
self.logger.info("Initialized Elasticsearch indices")
except Exception:
self.status = False
raise
finally:
# Set the lastReconnectAttempt to the timestamp after initialization
self.lastReconnectAttempt = datetime.utcnow()
return
def _initialize_indices(self):
''' Check each required index and make sure it exists, if it doesn't then create it '''
for index in self.natlasIndices:
if not self.es.indices.exists(index):
self.es.indices.create(index)
# Avoid a race condition
time.sleep(2)
for index in self.natlasIndices:
if self.esversion.match(">=7.0.0"):
self.es.indices.put_mapping(index=index, doc_type='_doc', body=self.mapping, include_type_name=True)
else:
self.es.indices.put_mapping(index=index, doc_type='_doc', body=self.mapping)
def _ping(self):
''' Returns True if the cluster is up, False otherwise'''
with self._new_trace_span(operation='ping'):
return self.es.ping()
def _attempt_reconnect(self):
''' Attempt to reconnect if we haven't tried to reconnect too recently '''
now = datetime.utcnow()
delta = now - self.lastReconnectAttempt
if delta.seconds < 30:
return self.status
else:
self.status = self._ping()
return self.status
def _check_status(self):
''' If we're in a known bad state, try to reconnect '''
if not self.status and not self._attempt_reconnect():
raise elasticsearch.ConnectionError
return self.status
def get_collection(self, **kwargs):
''' Execute a search and return a collection of results '''
results = self.execute_search(**kwargs)
if not results:
return 0, []
docsources = self.collate_source(results['hits']['hits'])
return results['hits']['total'], docsources
def get_single_host(self, **kwargs):
''' Execute a search and return a single result '''
results = self.execute_search(**kwargs)
if not results or results['hits']['total'] == 0:
return 0, None
return results['hits']['total'], results['hits']['hits'][0]['_source']
def collate_source(self, documents):
return map(lambda doc: doc['_source'], documents)
# Mid-level query executor abstraction.
def execute_search(self, **kwargs):
''' Execute an arbitrary search.'''
with self._new_trace_span(operation='search', **kwargs) as span:
results = self._execute_raw_query(self.es.search, doc_type='_doc', rest_total_hits_as_int=True, **kwargs)
span.add_attribute('es.hits.total', results['hits']['total'])
self._attach_shard_span_attrs(span, results)
return results
def execute_count(self, **kwargs):
''' Executes an arbitrary count.'''
results = None
with self._new_trace_span(operation='count', **kwargs) as span:
results = self._execute_raw_query(self.es.count, doc_type='_doc', **kwargs)
self._attach_shard_span_attrs(span, results)
if not results:
return 0
return results
def execute_delete_by_query(self, **kwargs):
''' Executes an arbitrary delete_by_query.'''
with self._new_trace_span(operation='delete_by', **kwargs) as span:
results = self._execute_raw_query(self.es.delete_by_query, doc_type='_doc', **kwargs)
self._attach_shard_span_attrs(span, results)
return results
def execute_index(self, **kwargs):
''' Executes an arbitrary index. '''
with self._new_trace_span(operation='index', **kwargs):
results = self._execute_raw_query(self.es.index, doc_type='_doc', **kwargs)
return results
# Inner-most query executor. All queries route through here.
def _execute_raw_query(self, func, **kwargs):
''' Wraps the es client to make sure that ConnectionErrors are handled uniformly '''
self._check_status()
try:
return func(**kwargs)
except elasticsearch.ConnectionError:
self.status = False
raise elasticsearch.ConnectionError
# Tracing methods
def _new_trace_span(self, operation, **kwargs):
tracer = execution_context.get_opencensus_tracer()
span_name = "elasticsearch"
if 'index' in kwargs:
span_name += '.' + operation
span = tracer.span(name=span_name)
span.span_kind = span_module.SpanKind.CLIENT
if 'index' in kwargs:
span.add_attribute('es.index', kwargs['index'])
if 'body' in kwargs:
span.add_attribute('es.query', kwargs['body'])
return span
def _attach_shard_span_attrs(self, span, results):
span.add_attribute('es.shards.total', results['_shards']['total'])
span.add_attribute('es.shards.successful', results['_shards']['successful'])
| 35.660256 | 145 | 0.732339 | import json
from config import Config
import elasticsearch
import time
from datetime import datetime
import logging
from opencensus.trace import execution_context
from opencensus.trace import span as span_module
import semver
class ElasticClient:
es = None
lastReconnectAttempt = None
mapping = {}
natlasIndices = ["nmap", "nmap_history"]
status = False
logger = logging.getLogger('elasticsearch')
logger.setLevel('ERROR')
def __init__(self, elasticURL):
with open(Config().BASEDIR + '/defaults/elastic/mapping.json') as mapfile:
self.mapping = json.loads(mapfile.read())
try:
self.es = elasticsearch.Elasticsearch(elasticURL, timeout=5, max_retries=1)
self.status = self._ping()
if self.status:
self.esversion = semver.VersionInfo.parse(self.es.info()['version']['number'])
self.logger.info("Elastic Version: " + str(self.esversion))
self._initialize_indices()
self.logger.info("Initialized Elasticsearch indices")
except Exception:
self.status = False
raise
finally:
self.lastReconnectAttempt = datetime.utcnow()
return
def _initialize_indices(self):
for index in self.natlasIndices:
if not self.es.indices.exists(index):
self.es.indices.create(index)
time.sleep(2)
for index in self.natlasIndices:
if self.esversion.match(">=7.0.0"):
self.es.indices.put_mapping(index=index, doc_type='_doc', body=self.mapping, include_type_name=True)
else:
self.es.indices.put_mapping(index=index, doc_type='_doc', body=self.mapping)
def _ping(self):
with self._new_trace_span(operation='ping'):
return self.es.ping()
def _attempt_reconnect(self):
now = datetime.utcnow()
delta = now - self.lastReconnectAttempt
if delta.seconds < 30:
return self.status
else:
self.status = self._ping()
return self.status
def _check_status(self):
if not self.status and not self._attempt_reconnect():
raise elasticsearch.ConnectionError
return self.status
def get_collection(self, **kwargs):
results = self.execute_search(**kwargs)
if not results:
return 0, []
docsources = self.collate_source(results['hits']['hits'])
return results['hits']['total'], docsources
def get_single_host(self, **kwargs):
results = self.execute_search(**kwargs)
if not results or results['hits']['total'] == 0:
return 0, None
return results['hits']['total'], results['hits']['hits'][0]['_source']
def collate_source(self, documents):
return map(lambda doc: doc['_source'], documents)
def execute_search(self, **kwargs):
with self._new_trace_span(operation='search', **kwargs) as span:
results = self._execute_raw_query(self.es.search, doc_type='_doc', rest_total_hits_as_int=True, **kwargs)
span.add_attribute('es.hits.total', results['hits']['total'])
self._attach_shard_span_attrs(span, results)
return results
def execute_count(self, **kwargs):
results = None
with self._new_trace_span(operation='count', **kwargs) as span:
results = self._execute_raw_query(self.es.count, doc_type='_doc', **kwargs)
self._attach_shard_span_attrs(span, results)
if not results:
return 0
return results
def execute_delete_by_query(self, **kwargs):
with self._new_trace_span(operation='delete_by', **kwargs) as span:
results = self._execute_raw_query(self.es.delete_by_query, doc_type='_doc', **kwargs)
self._attach_shard_span_attrs(span, results)
return results
def execute_index(self, **kwargs):
with self._new_trace_span(operation='index', **kwargs):
results = self._execute_raw_query(self.es.index, doc_type='_doc', **kwargs)
return results
def _execute_raw_query(self, func, **kwargs):
self._check_status()
try:
return func(**kwargs)
except elasticsearch.ConnectionError:
self.status = False
raise elasticsearch.ConnectionError
def _new_trace_span(self, operation, **kwargs):
tracer = execution_context.get_opencensus_tracer()
span_name = "elasticsearch"
if 'index' in kwargs:
span_name += '.' + operation
span = tracer.span(name=span_name)
span.span_kind = span_module.SpanKind.CLIENT
if 'index' in kwargs:
span.add_attribute('es.index', kwargs['index'])
if 'body' in kwargs:
span.add_attribute('es.query', kwargs['body'])
return span
def _attach_shard_span_attrs(self, span, results):
span.add_attribute('es.shards.total', results['_shards']['total'])
span.add_attribute('es.shards.successful', results['_shards']['successful'])
| true | true |
f7201983075ff6e117cd811c5af6d092bb3c77bd | 79,711 | py | Python | src/pretix/control/views/orders.py | joelbcastillo/pretix | 1005437c69d5fed2a0ea2525b41481b0952fe6f1 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | src/pretix/control/views/orders.py | joelbcastillo/pretix | 1005437c69d5fed2a0ea2525b41481b0952fe6f1 | [
"ECL-2.0",
"Apache-2.0"
] | 1 | 2020-06-12T00:09:40.000Z | 2020-06-12T00:09:40.000Z | src/pretix/control/views/orders.py | joelbcastillo/pretix | 1005437c69d5fed2a0ea2525b41481b0952fe6f1 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | import json
import logging
import mimetypes
import os
import re
from datetime import datetime, time, timedelta
from decimal import Decimal, DecimalException
from urllib.parse import urlencode
import vat_moss.id
from django.conf import settings
from django.contrib import messages
from django.core.files import File
from django.db import transaction
from django.db.models import (
Count, IntegerField, OuterRef, Prefetch, ProtectedError, Q, Subquery, Sum,
)
from django.forms import formset_factory
from django.http import (
FileResponse, Http404, HttpResponseNotAllowed, HttpResponseRedirect,
JsonResponse,
)
from django.shortcuts import get_object_or_404, redirect, render
from django.urls import reverse
from django.utils import formats
from django.utils.functional import cached_property
from django.utils.http import is_safe_url
from django.utils.timezone import make_aware, now
from django.utils.translation import ugettext_lazy as _
from django.views.generic import (
DetailView, FormView, ListView, TemplateView, View,
)
from i18nfield.strings import LazyI18nString
from pretix.base.channels import get_all_sales_channels
from pretix.base.email import get_email_context
from pretix.base.i18n import language
from pretix.base.models import (
CachedCombinedTicket, CachedFile, CachedTicket, Invoice, InvoiceAddress,
Item, ItemVariation, LogEntry, Order, QuestionAnswer, Quota,
generate_position_secret, generate_secret,
)
from pretix.base.models.orders import (
OrderFee, OrderPayment, OrderPosition, OrderRefund,
)
from pretix.base.models.tax import EU_COUNTRIES, cc_to_vat_prefix
from pretix.base.payment import PaymentException
from pretix.base.services import tickets
from pretix.base.services.export import export
from pretix.base.services.invoices import (
generate_cancellation, generate_invoice, invoice_pdf, invoice_pdf_task,
invoice_qualified, regenerate_invoice,
)
from pretix.base.services.locking import LockTimeoutException
from pretix.base.services.mail import SendMailException, render_mail
from pretix.base.services.orders import (
OrderChangeManager, OrderError, approve_order, cancel_order, deny_order,
extend_order, mark_order_expired, mark_order_refunded,
notify_user_changed_order,
)
from pretix.base.services.stats import order_overview
from pretix.base.services.tickets import generate
from pretix.base.signals import (
order_modified, register_data_exporters, register_ticket_outputs,
)
from pretix.base.templatetags.money import money_filter
from pretix.base.templatetags.rich_text import markdown_compile_email
from pretix.base.views.mixins import OrderQuestionsViewMixin
from pretix.base.views.tasks import AsyncAction
from pretix.control.forms.filter import (
EventOrderFilterForm, OverviewFilterForm, RefundFilterForm,
)
from pretix.control.forms.orders import (
CancelForm, CommentForm, ConfirmPaymentForm, ExporterForm, ExtendForm,
MarkPaidForm, OrderContactForm, OrderFeeChangeForm, OrderLocaleForm,
OrderMailForm, OrderPositionAddForm, OrderPositionAddFormset,
OrderPositionChangeForm, OrderRefundForm, OtherOperationsForm,
)
from pretix.control.permissions import EventPermissionRequiredMixin
from pretix.control.views import PaginationMixin
from pretix.helpers.safedownload import check_token
from pretix.presale.signals import question_form_fields
logger = logging.getLogger(__name__)
class OrderList(EventPermissionRequiredMixin, PaginationMixin, ListView):
model = Order
context_object_name = 'orders'
template_name = 'pretixcontrol/orders/index.html'
permission = 'can_view_orders'
def get_queryset(self):
qs = Order.objects.filter(
event=self.request.event
).select_related('invoice_address')
if self.filter_form.is_valid():
qs = self.filter_form.filter_qs(qs)
return qs
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
ctx['filter_form'] = self.filter_form
# Only compute this annotations for this page (query optimization)
s = OrderPosition.objects.filter(
order=OuterRef('pk')
).order_by().values('order').annotate(k=Count('id')).values('k')
annotated = {
o['pk']: o
for o in
Order.annotate_overpayments(Order.objects).filter(
pk__in=[o.pk for o in ctx['orders']]
).annotate(
pcnt=Subquery(s, output_field=IntegerField())
).values(
'pk', 'pcnt', 'is_overpaid', 'is_underpaid', 'is_pending_with_full_payment', 'has_external_refund',
'has_pending_refund'
)
}
for o in ctx['orders']:
if o.pk not in annotated:
continue
o.pcnt = annotated.get(o.pk)['pcnt']
o.is_overpaid = annotated.get(o.pk)['is_overpaid']
o.is_underpaid = annotated.get(o.pk)['is_underpaid']
o.is_pending_with_full_payment = annotated.get(o.pk)['is_pending_with_full_payment']
o.has_external_refund = annotated.get(o.pk)['has_external_refund']
o.has_pending_refund = annotated.get(o.pk)['has_pending_refund']
if ctx['page_obj'].paginator.count < 1000:
# Performance safeguard: Only count positions if the data set is small
ctx['sums'] = self.get_queryset().annotate(
pcnt=Subquery(s, output_field=IntegerField())
).aggregate(
s=Sum('total'), pc=Sum('pcnt'), c=Count('id')
)
else:
ctx['sums'] = self.get_queryset().aggregate(s=Sum('total'), c=Count('id'))
return ctx
@cached_property
def filter_form(self):
return EventOrderFilterForm(data=self.request.GET, event=self.request.event)
class OrderView(EventPermissionRequiredMixin, DetailView):
context_object_name = 'order'
model = Order
def get_object(self, queryset=None):
try:
return Order.objects.get(
event=self.request.event,
code=self.kwargs['code'].upper()
)
except Order.DoesNotExist:
raise Http404()
def _redirect_back(self):
return redirect('control:event.order',
event=self.request.event.slug,
organizer=self.request.event.organizer.slug,
code=self.order.code)
@cached_property
def order(self):
return self.get_object()
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
ctx['can_generate_invoice'] = invoice_qualified(self.order) and (
self.request.event.settings.invoice_generate in ('admin', 'user', 'paid', 'True')
) and (
not self.order.invoices.exists()
or (
self.order.status in (Order.STATUS_PAID, Order.STATUS_PENDING)
and self.order.invoices.filter(is_cancellation=True).count() >= self.order.invoices.filter(is_cancellation=False).count()
)
)
return ctx
def get_order_url(self):
return reverse('control:event.order', kwargs={
'event': self.request.event.slug,
'organizer': self.request.event.organizer.slug,
'code': self.order.code
})
class OrderDetail(OrderView):
template_name = 'pretixcontrol/order/index.html'
permission = 'can_view_orders'
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
ctx['items'] = self.get_items()
ctx['event'] = self.request.event
ctx['payments'] = self.order.payments.order_by('-created')
ctx['refunds'] = self.order.refunds.select_related('payment').order_by('-created')
for p in ctx['payments']:
if p.payment_provider:
p.html_info = (p.payment_provider.payment_control_render(self.request, p) or "").strip()
for r in ctx['refunds']:
if r.payment_provider:
r.html_info = (r.payment_provider.refund_control_render(self.request, r) or "").strip()
ctx['invoices'] = list(self.order.invoices.all().select_related('event'))
ctx['comment_form'] = CommentForm(initial={
'comment': self.order.comment,
'checkin_attention': self.order.checkin_attention
})
ctx['display_locale'] = dict(settings.LANGUAGES)[self.object.locale or self.request.event.settings.locale]
ctx['overpaid'] = self.order.pending_sum * -1
ctx['sales_channel'] = get_all_sales_channels().get(self.order.sales_channel)
ctx['download_buttons'] = self.download_buttons
return ctx
@cached_property
def download_buttons(self):
buttons = []
responses = register_ticket_outputs.send(self.request.event)
for receiver, response in responses:
provider = response(self.request.event)
buttons.append({
'text': provider.download_button_text or 'Ticket',
'icon': provider.download_button_icon or 'fa-download',
'identifier': provider.identifier,
'multi': provider.multi_download_enabled,
'javascript_required': provider.javascript_required
})
return buttons
def get_items(self):
queryset = self.object.all_positions
cartpos = queryset.order_by(
'item', 'variation'
).select_related(
'item', 'variation', 'addon_to', 'tax_rule'
).prefetch_related(
'item__questions', 'issued_gift_cards',
Prefetch('answers', queryset=QuestionAnswer.objects.prefetch_related('options').select_related('question')),
'checkins', 'checkins__list'
).order_by('positionid')
positions = []
for p in cartpos:
responses = question_form_fields.send(sender=self.request.event, position=p)
p.additional_fields = []
data = p.meta_info_data
for r, response in sorted(responses, key=lambda r: str(r[0])):
if response:
for key, value in response.items():
p.additional_fields.append({
'answer': data.get('question_form_data', {}).get(key),
'question': value.label
})
p.has_questions = (
p.additional_fields or
(p.item.admission and self.request.event.settings.attendee_names_asked) or
(p.item.admission and self.request.event.settings.attendee_emails_asked) or
p.item.questions.all()
)
p.cache_answers()
positions.append(p)
positions.sort(key=lambda p: p.sort_key)
return {
'positions': positions,
'raw': cartpos,
'total': self.object.total,
'fees': self.object.all_fees.all(),
'net_total': self.object.net_total,
'tax_total': self.object.tax_total,
}
class OrderDownload(AsyncAction, OrderView):
task = generate
permission = 'can_view_orders'
def get_success_url(self, value):
return self.get_self_url()
def get_error_url(self):
return self.get_order_url()
def get_self_url(self):
return reverse('control:event.order.download.ticket', kwargs=self.kwargs)
@cached_property
def output(self):
responses = register_ticket_outputs.send(self.request.event)
for receiver, response in responses:
provider = response(self.request.event)
if provider.identifier == self.kwargs.get('output'):
return provider
@cached_property
def order_position(self):
try:
return self.order.positions.get(pk=self.kwargs.get('position'))
except OrderPosition.DoesNotExist:
return None
def get(self, request, *args, **kwargs):
if 'async_id' in request.GET and settings.HAS_CELERY:
return self.get_result(request)
ct = self.get_last_ct()
if ct:
return self.success(ct)
return self.http_method_not_allowed(request)
def post(self, request, *args, **kwargs):
if not self.output:
return self.error(_('You requested an invalid ticket output type.'))
if not self.order_position:
raise Http404(_('Unknown order code or not authorized to access this order.'))
if 'position' in kwargs and not self.order_position.generate_ticket:
return self.error(_('Ticket download is not enabled for this product.'))
ct = self.get_last_ct()
if ct:
return self.success(ct)
return self.do('orderposition' if 'position' in kwargs else 'order',
self.order_position.pk if 'position' in kwargs else self.order.pk,
self.output.identifier)
def get_success_message(self, value):
return ""
def success(self, value):
if "ajax" in self.request.POST or "ajax" in self.request.GET:
return JsonResponse({
'ready': True,
'success': True,
'redirect': self.get_success_url(value),
'message': str(self.get_success_message(value))
})
if isinstance(value, CachedTicket):
if value.type == 'text/uri-list':
resp = HttpResponseRedirect(value.file.file.read())
return resp
else:
resp = FileResponse(value.file.file, content_type=value.type)
resp['Content-Disposition'] = 'attachment; filename="{}-{}-{}-{}{}"'.format(
self.request.event.slug.upper(), self.order.code, self.order_position.positionid,
self.output.identifier, value.extension
)
return resp
elif isinstance(value, CachedCombinedTicket):
resp = FileResponse(value.file.file, content_type=value.type)
resp['Content-Disposition'] = 'attachment; filename="{}-{}-{}{}"'.format(
self.request.event.slug.upper(), self.order.code, self.output.identifier, value.extension
)
return resp
else:
return redirect(self.get_self_url())
def get_last_ct(self):
if 'position' in self.kwargs:
ct = CachedTicket.objects.filter(
order_position=self.order_position, provider=self.output.identifier, file__isnull=False
).last()
else:
ct = CachedCombinedTicket.objects.filter(
order=self.order, provider=self.output.identifier, file__isnull=False
).last()
if not ct or not ct.file:
return None
return ct
class OrderComment(OrderView):
permission = 'can_change_orders'
def post(self, *args, **kwargs):
form = CommentForm(self.request.POST)
if form.is_valid():
if form.cleaned_data.get('comment') != self.order.comment:
self.order.comment = form.cleaned_data.get('comment')
self.order.log_action('pretix.event.order.comment', user=self.request.user, data={
'new_comment': form.cleaned_data.get('comment')
})
if form.cleaned_data.get('checkin_attention') != self.order.checkin_attention:
self.order.checkin_attention = form.cleaned_data.get('checkin_attention')
self.order.log_action('pretix.event.order.checkin_attention', user=self.request.user, data={
'new_value': form.cleaned_data.get('checkin_attention')
})
self.order.save(update_fields=['checkin_attention', 'comment'])
messages.success(self.request, _('The comment has been updated.'))
else:
messages.error(self.request, _('Could not update the comment.'))
return redirect(self.get_order_url())
def get(self, *args, **kwargs):
return HttpResponseNotAllowed(['POST'])
class OrderApprove(OrderView):
permission = 'can_change_orders'
def post(self, *args, **kwargs):
if self.order.require_approval:
try:
approve_order(self.order, user=self.request.user)
except OrderError as e:
messages.error(self.request, str(e))
else:
messages.success(self.request, _('The order has been approved.'))
return redirect(self.get_order_url())
def get(self, *args, **kwargs):
return render(self.request, 'pretixcontrol/order/approve.html', {
'order': self.order,
})
class OrderDelete(OrderView):
permission = 'can_change_orders'
def post(self, *args, **kwargs):
if self.order.testmode:
try:
with transaction.atomic():
self.order.gracefully_delete(user=self.request.user)
messages.success(self.request, _('The order has been deleted.'))
return redirect(reverse('control:event.orders', kwargs={
'event': self.request.event.slug,
'organizer': self.request.organizer.slug,
}))
except ProtectedError:
messages.error(self.request, _('The order could not be deleted as some constraints (e.g. data created '
'by plug-ins) do not allow it.'))
return self.get(self.request, *self.args, **self.kwargs)
return redirect(self.get_order_url())
def get(self, *args, **kwargs):
if not self.order.testmode:
messages.error(self.request, _('Only orders created in test mode can be deleted.'))
return redirect(self.get_order_url())
return render(self.request, 'pretixcontrol/order/delete.html', {
'order': self.order,
})
class OrderDeny(OrderView):
permission = 'can_change_orders'
def post(self, *args, **kwargs):
if self.order.require_approval:
try:
deny_order(self.order, user=self.request.user,
comment=self.request.POST.get('comment'),
send_mail=self.request.POST.get('send_email') == 'on')
except OrderError as e:
messages.error(self.request, str(e))
else:
messages.success(self.request, _('The order has been denied and is therefore now canceled.'))
return redirect(self.get_order_url())
def get(self, *args, **kwargs):
return render(self.request, 'pretixcontrol/order/deny.html', {
'order': self.order,
})
class OrderPaymentCancel(OrderView):
permission = 'can_change_orders'
@cached_property
def payment(self):
return get_object_or_404(self.order.payments, pk=self.kwargs['payment'])
def post(self, *args, **kwargs):
if self.payment.state in (OrderPayment.PAYMENT_STATE_CREATED, OrderPayment.PAYMENT_STATE_PENDING):
try:
with transaction.atomic():
self.payment.payment_provider.cancel_payment(self.payment)
self.order.log_action('pretix.event.order.payment.canceled', {
'local_id': self.payment.local_id,
'provider': self.payment.provider,
}, user=self.request.user if self.request.user.is_authenticated else None)
except PaymentException as e:
self.order.log_action(
'pretix.event.order.payment.canceled.failed',
{
'local_id': self.payment.local_id,
'provider': self.payment.provider,
'error': str(e)
},
user=self.request.user if self.request.user.is_authenticated else None,
)
messages.error(self.request, str(e))
else:
messages.success(self.request, _('This payment has been canceled.'))
else:
messages.error(self.request, _('This payment can not be canceled at the moment.'))
return redirect(self.get_order_url())
def get(self, *args, **kwargs):
return render(self.request, 'pretixcontrol/order/pay_cancel.html', {
'order': self.order,
})
class OrderRefundCancel(OrderView):
permission = 'can_change_orders'
@cached_property
def refund(self):
return get_object_or_404(self.order.refunds, pk=self.kwargs['refund'])
def post(self, *args, **kwargs):
if self.refund.state in (OrderRefund.REFUND_STATE_CREATED, OrderRefund.REFUND_STATE_TRANSIT,
OrderRefund.REFUND_STATE_EXTERNAL):
with transaction.atomic():
self.refund.state = OrderRefund.REFUND_STATE_CANCELED
self.refund.save()
self.order.log_action('pretix.event.order.refund.canceled', {
'local_id': self.refund.local_id,
'provider': self.refund.provider,
}, user=self.request.user)
messages.success(self.request, _('The refund has been canceled.'))
else:
messages.error(self.request, _('This refund can not be canceled at the moment.'))
if "next" in self.request.GET and is_safe_url(self.request.GET.get("next"), allowed_hosts=None):
return redirect(self.request.GET.get("next"))
return redirect(self.get_order_url())
def get(self, *args, **kwargs):
return render(self.request, 'pretixcontrol/order/refund_cancel.html', {
'order': self.order,
})
class OrderRefundProcess(OrderView):
permission = 'can_change_orders'
@cached_property
def refund(self):
return get_object_or_404(self.order.refunds, pk=self.kwargs['refund'])
def post(self, *args, **kwargs):
if self.refund.state == OrderRefund.REFUND_STATE_EXTERNAL:
self.refund.done(user=self.request.user)
if self.request.POST.get("action") == "r" and self.order.status != Order.STATUS_CANCELED:
mark_order_refunded(self.order, user=self.request.user)
elif not (self.order.status == Order.STATUS_PAID and self.order.pending_sum <= 0):
self.order.status = Order.STATUS_PENDING
self.order.set_expires(
now(),
self.order.event.subevents.filter(
id__in=self.order.positions.values_list('subevent_id', flat=True))
)
self.order.save(update_fields=['status', 'expires'])
messages.success(self.request, _('The refund has been processed.'))
else:
messages.error(self.request, _('This refund can not be processed at the moment.'))
if "next" in self.request.GET and is_safe_url(self.request.GET.get("next"), allowed_hosts=None):
return redirect(self.request.GET.get("next"))
return redirect(self.get_order_url())
def get(self, *args, **kwargs):
return render(self.request, 'pretixcontrol/order/refund_process.html', {
'order': self.order,
'refund': self.refund,
'pending_sum': self.order.pending_sum + self.refund.amount,
'propose_cancel': self.order.pending_sum + self.refund.amount >= self.order.total
})
class OrderRefundDone(OrderView):
permission = 'can_change_orders'
@cached_property
def refund(self):
return get_object_or_404(self.order.refunds, pk=self.kwargs['refund'])
def post(self, *args, **kwargs):
if self.refund.state in (OrderRefund.REFUND_STATE_CREATED, OrderRefund.REFUND_STATE_TRANSIT):
self.refund.done(user=self.request.user)
messages.success(self.request, _('The refund has been marked as done.'))
else:
messages.error(self.request, _('This refund can not be processed at the moment.'))
if "next" in self.request.GET and is_safe_url(self.request.GET.get("next"), allowed_hosts=None):
return redirect(self.request.GET.get("next"))
return redirect(self.get_order_url())
def get(self, *args, **kwargs):
return render(self.request, 'pretixcontrol/order/refund_done.html', {
'order': self.order,
})
class OrderPaymentConfirm(OrderView):
permission = 'can_change_orders'
@cached_property
def payment(self):
return get_object_or_404(self.order.payments, pk=self.kwargs['payment'])
@cached_property
def mark_paid_form(self):
return ConfirmPaymentForm(
instance=self.order,
data=self.request.POST if self.request.method == "POST" else None,
)
def post(self, *args, **kwargs):
if self.payment.state in (OrderPayment.PAYMENT_STATE_CREATED, OrderPayment.PAYMENT_STATE_PENDING):
if not self.mark_paid_form.is_valid():
return render(self.request, 'pretixcontrol/order/pay_complete.html', {
'form': self.mark_paid_form,
'order': self.order,
})
try:
self.payment.confirm(user=self.request.user,
count_waitinglist=False,
force=self.mark_paid_form.cleaned_data.get('force', False))
except Quota.QuotaExceededException as e:
messages.error(self.request, str(e))
except PaymentException as e:
messages.error(self.request, str(e))
except SendMailException:
messages.warning(self.request,
_('The payment has been marked as complete, but we were unable to send a '
'confirmation mail.'))
else:
messages.success(self.request, _('The payment has been marked as complete.'))
else:
messages.error(self.request, _('This payment can not be confirmed at the moment.'))
return redirect(self.get_order_url())
def get(self, *args, **kwargs):
return render(self.request, 'pretixcontrol/order/pay_complete.html', {
'form': self.mark_paid_form,
'order': self.order,
})
class OrderRefundView(OrderView):
permission = 'can_change_orders'
@cached_property
def start_form(self):
return OrderRefundForm(
order=self.order,
data=self.request.POST if self.request.method == "POST" else (
self.request.GET if "start-action" in self.request.GET else None
),
prefix='start',
initial={
'partial_amount': self.order.payment_refund_sum,
'action': (
'mark_pending' if self.order.status == Order.STATUS_PAID
else 'do_nothing'
)
}
)
def choose_form(self):
payments = list(self.order.payments.filter(state=OrderPayment.PAYMENT_STATE_CONFIRMED))
if self.start_form.cleaned_data.get('mode') == 'full':
full_refund = self.order.payment_refund_sum
else:
full_refund = self.start_form.cleaned_data.get('partial_amount')
proposals = self.order.propose_auto_refunds(full_refund, payments=payments)
to_refund = full_refund - sum(proposals.values())
for p in payments:
p.propose_refund = proposals.get(p, 0)
if 'perform' in self.request.POST:
refund_selected = Decimal('0.00')
refunds = []
is_valid = True
manual_value = self.request.POST.get('refund-manual', '0') or '0'
manual_value = formats.sanitize_separators(manual_value)
try:
manual_value = Decimal(manual_value)
except (DecimalException, TypeError):
messages.error(self.request, _('You entered an invalid number.'))
is_valid = False
else:
refund_selected += manual_value
if manual_value:
refunds.append(OrderRefund(
order=self.order,
payment=None,
source=OrderRefund.REFUND_SOURCE_ADMIN,
state=(
OrderRefund.REFUND_STATE_DONE
if self.request.POST.get('manual_state') == 'done'
else OrderRefund.REFUND_STATE_CREATED
),
amount=manual_value,
provider='manual'
))
giftcard_value = self.request.POST.get('refund-new-giftcard', '0') or '0'
giftcard_value = formats.sanitize_separators(giftcard_value)
try:
giftcard_value = Decimal(giftcard_value)
except (DecimalException, TypeError):
messages.error(self.request, _('You entered an invalid number.'))
is_valid = False
else:
if giftcard_value:
refund_selected += giftcard_value
giftcard = self.request.organizer.issued_gift_cards.create(
currency=self.request.event.currency,
testmode=self.order.testmode
)
giftcard.log_action('pretix.giftcards.created', user=self.request.user, data={})
refunds.append(OrderRefund(
order=self.order,
payment=None,
source=OrderRefund.REFUND_SOURCE_ADMIN,
state=OrderRefund.REFUND_STATE_CREATED,
execution_date=now(),
amount=giftcard_value,
provider='giftcard',
info=json.dumps({
'gift_card': giftcard.pk
})
))
offsetting_value = self.request.POST.get('refund-offsetting', '0') or '0'
offsetting_value = formats.sanitize_separators(offsetting_value)
try:
offsetting_value = Decimal(offsetting_value)
except (DecimalException, TypeError):
messages.error(self.request, _('You entered an invalid number.'))
is_valid = False
else:
if offsetting_value:
refund_selected += offsetting_value
try:
order = Order.objects.get(code=self.request.POST.get('order-offsetting'),
event__organizer=self.request.organizer)
except Order.DoesNotExist:
messages.error(self.request, _('You entered an order that could not be found.'))
is_valid = False
else:
refunds.append(OrderRefund(
order=self.order,
payment=None,
source=OrderRefund.REFUND_SOURCE_ADMIN,
state=OrderRefund.REFUND_STATE_DONE,
execution_date=now(),
amount=offsetting_value,
provider='offsetting',
info=json.dumps({
'orders': [order.code]
})
))
for p in payments:
value = self.request.POST.get('refund-{}'.format(p.pk), '0') or '0'
value = formats.sanitize_separators(value)
try:
value = Decimal(value)
except (DecimalException, TypeError):
messages.error(self.request, _('You entered an invalid number.'))
is_valid = False
else:
if value == 0:
continue
elif value > p.available_amount:
messages.error(self.request, _('You can not refund more than the amount of a '
'payment that is not yet refunded.'))
is_valid = False
break
elif value != p.amount and not p.partial_refund_possible:
messages.error(self.request, _('You selected a partial refund for a payment method that '
'only supports full refunds.'))
is_valid = False
break
elif (p.partial_refund_possible or p.full_refund_possible) and value > 0:
refund_selected += value
refunds.append(OrderRefund(
order=self.order,
payment=p,
source=OrderRefund.REFUND_SOURCE_ADMIN,
state=OrderRefund.REFUND_STATE_CREATED,
amount=value,
provider=p.provider
))
any_success = False
if refund_selected == full_refund and is_valid:
for r in refunds:
r.save()
self.order.log_action('pretix.event.order.refund.created', {
'local_id': r.local_id,
'provider': r.provider,
}, user=self.request.user)
if r.payment or r.provider == "offsetting" or r.provider == "giftcard":
try:
r.payment_provider.execute_refund(r)
except PaymentException as e:
r.state = OrderRefund.REFUND_STATE_FAILED
r.save()
messages.error(self.request, _('One of the refunds failed to be processed. You should '
'retry to refund in a different way. The error message '
'was: {}').format(str(e)))
else:
any_success = True
if r.state == OrderRefund.REFUND_STATE_DONE:
messages.success(self.request, _('A refund of {} has been processed.').format(
money_filter(r.amount, self.request.event.currency)
))
elif r.state == OrderRefund.REFUND_STATE_CREATED:
messages.info(self.request, _('A refund of {} has been saved, but not yet '
'fully executed. You can mark it as complete '
'below.').format(
money_filter(r.amount, self.request.event.currency)
))
else:
any_success = True
if any_success:
if self.start_form.cleaned_data.get('action') == 'mark_refunded':
mark_order_refunded(self.order, user=self.request.user)
elif self.start_form.cleaned_data.get('action') == 'mark_pending':
if not (self.order.status == Order.STATUS_PAID and self.order.pending_sum <= 0):
self.order.status = Order.STATUS_PENDING
self.order.set_expires(
now(),
self.order.event.subevents.filter(
id__in=self.order.positions.values_list('subevent_id', flat=True))
)
self.order.save(update_fields=['status', 'expires'])
if giftcard_value and self.order.email:
messages.success(self.request, _('A new gift card was created. You can now send the user their '
'gift card code.'))
return redirect(reverse('control:event.order.sendmail', kwargs={
'event': self.request.event.slug,
'organizer': self.request.event.organizer.slug,
'code': self.order.code
}) + '?' + urlencode({
'subject': _('Your gift card code'),
'message': _('Hello,\n\nwe have refunded you {amount} for your order.\n\nYou can use the gift '
'card code {giftcard} to pay for future ticket purchases in our shop.\n\n'
'Your {event} team').format(
event="{event}",
amount=money_filter(giftcard_value, self.request.event.currency),
giftcard=giftcard.secret,
)
}))
return redirect(self.get_order_url())
else:
messages.error(self.request, _('The refunds you selected do not match the selected total refund '
'amount.'))
return render(self.request, 'pretixcontrol/order/refund_choose.html', {
'payments': payments,
'remainder': to_refund,
'order': self.order,
'partial_amount': (
self.request.POST.get('start-partial_amount') if self.request.method == 'POST'
else self.request.GET.get('start-partial_amount')
),
'start_form': self.start_form
})
def post(self, *args, **kwargs):
if self.start_form.is_valid():
return self.choose_form()
return self.get(*args, **kwargs)
def get(self, *args, **kwargs):
if self.start_form.is_valid():
return self.choose_form()
return render(self.request, 'pretixcontrol/order/refund_start.html', {
'form': self.start_form,
'order': self.order,
})
class OrderTransition(OrderView):
permission = 'can_change_orders'
@cached_property
def mark_paid_form(self):
return MarkPaidForm(
instance=self.order,
data=self.request.POST if self.request.method == "POST" else None,
)
@cached_property
def mark_canceled_form(self):
return CancelForm(
instance=self.order,
data=self.request.POST if self.request.method == "POST" else None,
)
def post(self, *args, **kwargs):
to = self.request.POST.get('status', '')
if self.order.status in (Order.STATUS_PENDING, Order.STATUS_EXPIRED) and to == 'p' and self.mark_paid_form.is_valid():
ps = self.mark_paid_form.cleaned_data['amount']
try:
p = self.order.payments.get(
state__in=(OrderPayment.PAYMENT_STATE_PENDING, OrderPayment.PAYMENT_STATE_CREATED),
provider='manual',
amount=ps
)
except OrderPayment.DoesNotExist:
for p in self.order.payments.filter(state__in=(OrderPayment.PAYMENT_STATE_PENDING,
OrderPayment.PAYMENT_STATE_CREATED)):
try:
with transaction.atomic():
p.payment_provider.cancel_payment(p)
self.order.log_action('pretix.event.order.payment.canceled', {
'local_id': p.local_id,
'provider': p.provider,
}, user=self.request.user if self.request.user.is_authenticated else None)
except PaymentException as e:
self.order.log_action(
'pretix.event.order.payment.canceled.failed',
{
'local_id': p.local_id,
'provider': p.provider,
'error': str(e)
},
user=self.request.user if self.request.user.is_authenticated else None,
)
p = self.order.payments.create(
state=OrderPayment.PAYMENT_STATE_CREATED,
provider='manual',
amount=ps,
fee=None
)
payment_date = None
if self.mark_paid_form.cleaned_data['payment_date'] != now().date():
payment_date = make_aware(datetime.combine(
self.mark_paid_form.cleaned_data['payment_date'],
time(hour=0, minute=0, second=0)
), self.order.event.timezone)
try:
p.confirm(user=self.request.user, count_waitinglist=False, payment_date=payment_date,
force=self.mark_paid_form.cleaned_data.get('force', False))
except Quota.QuotaExceededException as e:
p.state = OrderPayment.PAYMENT_STATE_FAILED
p.save()
self.order.log_action('pretix.event.order.payment.failed', {
'local_id': p.local_id,
'provider': p.provider,
'message': str(e)
})
messages.error(self.request, str(e))
except PaymentException as e:
p.state = OrderPayment.PAYMENT_STATE_FAILED
p.save()
self.order.log_action('pretix.event.order.payment.failed', {
'local_id': p.local_id,
'provider': p.provider,
'message': str(e)
})
messages.error(self.request, str(e))
except SendMailException:
messages.warning(self.request, _('The order has been marked as paid, but we were unable to send a '
'confirmation mail.'))
else:
messages.success(self.request, _('The payment has been created successfully.'))
elif self.order.cancel_allowed() and to == 'c' and self.mark_canceled_form.is_valid():
try:
cancel_order(self.order, user=self.request.user,
send_mail=self.mark_canceled_form.cleaned_data['send_email'],
cancellation_fee=self.mark_canceled_form.cleaned_data.get('cancellation_fee'))
except OrderError as e:
messages.error(self.request, str(e))
else:
self.order.refresh_from_db()
if self.order.pending_sum < 0:
messages.success(self.request, _('The order has been canceled. You can now select how you want to '
'transfer the money back to the user.'))
return redirect(reverse('control:event.order.refunds.start', kwargs={
'event': self.request.event.slug,
'organizer': self.request.event.organizer.slug,
'code': self.order.code
}) + '?start-action=do_nothing&start-mode=partial&start-partial_amount={}'.format(
self.order.pending_sum * -1
))
messages.success(self.request, _('The order has been canceled.'))
elif self.order.status == Order.STATUS_PENDING and to == 'e':
mark_order_expired(self.order, user=self.request.user)
messages.success(self.request, _('The order has been marked as expired.'))
return redirect(self.get_order_url())
def get(self, *args, **kwargs):
to = self.request.GET.get('status', '')
if self.order.status in (Order.STATUS_PENDING, Order.STATUS_EXPIRED) and to == 'p':
return render(self.request, 'pretixcontrol/order/pay.html', {
'form': self.mark_paid_form,
'order': self.order,
})
elif self.order.cancel_allowed() and to == 'c':
return render(self.request, 'pretixcontrol/order/cancel.html', {
'form': self.mark_canceled_form,
'order': self.order,
})
else:
return HttpResponseNotAllowed(['POST'])
class OrderInvoiceCreate(OrderView):
permission = 'can_change_orders'
def post(self, *args, **kwargs):
has_inv = self.order.invoices.exists() and not (
self.order.status in (Order.STATUS_PAID, Order.STATUS_PENDING)
and self.order.invoices.filter(is_cancellation=True).count() >= self.order.invoices.filter(is_cancellation=False).count()
)
if self.request.event.settings.get('invoice_generate') not in ('admin', 'user', 'paid', 'True') or not invoice_qualified(self.order):
messages.error(self.request, _('You cannot generate an invoice for this order.'))
elif has_inv:
messages.error(self.request, _('An invoice for this order already exists.'))
else:
inv = generate_invoice(self.order)
self.order.log_action('pretix.event.order.invoice.generated', user=self.request.user, data={
'invoice': inv.pk
})
messages.success(self.request, _('The invoice has been generated.'))
return redirect(self.get_order_url())
def get(self, *args, **kwargs):
return HttpResponseNotAllowed(['POST'])
class OrderCheckVATID(OrderView):
permission = 'can_change_orders'
def post(self, *args, **kwargs):
try:
ia = self.order.invoice_address
except InvoiceAddress.DoesNotExist:
messages.error(self.request, _('No VAT ID specified.'))
return redirect(self.get_order_url())
else:
if not ia.vat_id:
messages.error(self.request, _('No VAT ID specified.'))
return redirect(self.get_order_url())
if not ia.country:
messages.error(self.request, _('No country specified.'))
return redirect(self.get_order_url())
if str(ia.country) not in EU_COUNTRIES:
messages.error(self.request, _('VAT ID could not be checked since a non-EU country has been '
'specified.'))
return redirect(self.get_order_url())
if ia.vat_id[:2] != cc_to_vat_prefix(str(ia.country)):
messages.error(self.request, _('Your VAT ID does not match the selected country.'))
return redirect(self.get_order_url())
try:
result = vat_moss.id.validate(ia.vat_id)
if result:
country_code, normalized_id, company_name = result
ia.vat_id_validated = True
ia.vat_id = normalized_id
ia.save()
except vat_moss.errors.InvalidError:
messages.error(self.request, _('This VAT ID is not valid.'))
except vat_moss.errors.WebServiceUnavailableError:
logger.exception('VAT ID checking failed for country {}'.format(ia.country))
messages.error(self.request, _('The VAT ID could not be checked, as the VAT checking service of '
'the country is currently not available.'))
else:
messages.success(self.request, _('This VAT ID is valid.'))
return redirect(self.get_order_url())
def get(self, *args, **kwargs): # NOQA
return HttpResponseNotAllowed(['POST'])
class OrderInvoiceRegenerate(OrderView):
permission = 'can_change_orders'
def post(self, *args, **kwargs):
try:
inv = self.order.invoices.get(pk=kwargs.get('id'))
except Invoice.DoesNotExist:
messages.error(self.request, _('Unknown invoice.'))
else:
if inv.canceled:
messages.error(self.request, _('The invoice has already been canceled.'))
elif inv.shredded:
messages.error(self.request, _('The invoice has been cleaned of personal data.'))
else:
inv = regenerate_invoice(inv)
self.order.log_action('pretix.event.order.invoice.regenerated', user=self.request.user, data={
'invoice': inv.pk
})
messages.success(self.request, _('The invoice has been regenerated.'))
return redirect(self.get_order_url())
def get(self, *args, **kwargs): # NOQA
return HttpResponseNotAllowed(['POST'])
class OrderInvoiceReissue(OrderView):
permission = 'can_change_orders'
def post(self, *args, **kwargs):
try:
inv = self.order.invoices.get(pk=kwargs.get('id'))
except Invoice.DoesNotExist:
messages.error(self.request, _('Unknown invoice.'))
else:
if inv.canceled:
messages.error(self.request, _('The invoice has already been canceled.'))
elif inv.shredded:
messages.error(self.request, _('The invoice has been cleaned of personal data.'))
else:
c = generate_cancellation(inv)
if self.order.status != Order.STATUS_CANCELED:
inv = generate_invoice(self.order)
else:
inv = c
self.order.log_action('pretix.event.order.invoice.reissued', user=self.request.user, data={
'invoice': inv.pk
})
messages.success(self.request, _('The invoice has been reissued.'))
return redirect(self.get_order_url())
def get(self, *args, **kwargs): # NOQA
return HttpResponseNotAllowed(['POST'])
class OrderResendLink(OrderView):
permission = 'can_change_orders'
def post(self, *args, **kwargs):
try:
if 'position' in kwargs:
p = get_object_or_404(self.order.positions, pk=kwargs['position'])
p.resend_link(user=self.request.user)
else:
self.order.resend_link(user=self.request.user)
except SendMailException:
messages.error(self.request, _('There was an error sending the mail. Please try again later.'))
return redirect(self.get_order_url())
messages.success(self.request, _('The email has been queued to be sent.'))
return redirect(self.get_order_url())
def get(self, *args, **kwargs):
return HttpResponseNotAllowed(['POST'])
class InvoiceDownload(EventPermissionRequiredMixin, View):
permission = 'can_view_orders'
def get_order_url(self):
return reverse('control:event.order', kwargs={
'event': self.request.event.slug,
'organizer': self.request.event.organizer.slug,
'code': self.invoice.order.code
})
def get(self, request, *args, **kwargs):
try:
self.invoice = Invoice.objects.get(
event=self.request.event,
id=self.kwargs['invoice']
)
except Invoice.DoesNotExist:
raise Http404(_('This invoice has not been found'))
if not self.invoice.file:
invoice_pdf(self.invoice.pk)
self.invoice = Invoice.objects.get(pk=self.invoice.pk)
if self.invoice.shredded:
messages.error(request, _('The invoice file is no longer stored on the server.'))
return redirect(self.get_order_url())
if not self.invoice.file:
# This happens if we have celery installed and the file will be generated in the background
messages.warning(request, _('The invoice file has not yet been generated, we will generate it for you '
'now. Please try again in a few seconds.'))
return redirect(self.get_order_url())
try:
resp = FileResponse(self.invoice.file.file, content_type='application/pdf')
except FileNotFoundError:
invoice_pdf_task.apply(args=(self.invoice.pk,))
return self.get(request, *args, **kwargs)
resp['Content-Disposition'] = 'inline; filename="{}.pdf"'.format(self.invoice.number)
resp._csp_ignore = True # Some browser's PDF readers do not work with CSP
return resp
class OrderExtend(OrderView):
permission = 'can_change_orders'
def post(self, *args, **kwargs):
if self.form.is_valid():
try:
extend_order(
self.order,
new_date=self.form.cleaned_data.get('expires'),
force=self.form.cleaned_data.get('quota_ignore', False),
user=self.request.user
)
messages.success(self.request, _('The payment term has been changed.'))
except OrderError as e:
messages.error(self.request, str(e))
return self._redirect_here()
except LockTimeoutException:
messages.error(self.request, _('We were not able to process the request completely as the '
'server was too busy.'))
return self._redirect_back()
else:
return self.get(*args, **kwargs)
def dispatch(self, request, *args, **kwargs):
if self.order.status not in (Order.STATUS_PENDING, Order.STATUS_EXPIRED):
messages.error(self.request, _('This action is only allowed for pending orders.'))
return self._redirect_back()
return super().dispatch(request, *kwargs, **kwargs)
def _redirect_here(self):
return redirect('control:event.order.extend',
event=self.request.event.slug,
organizer=self.request.event.organizer.slug,
code=self.order.code)
def get(self, *args, **kwargs):
return render(self.request, 'pretixcontrol/order/extend.html', {
'order': self.order,
'form': self.form,
})
@cached_property
def form(self):
return ExtendForm(instance=self.order,
data=self.request.POST if self.request.method == "POST" else None)
class OrderChange(OrderView):
permission = 'can_change_orders'
template_name = 'pretixcontrol/order/change.html'
def dispatch(self, request, *args, **kwargs):
if self.order.status not in (Order.STATUS_PENDING, Order.STATUS_PAID):
messages.error(self.request, _('This action is only allowed for pending or paid orders.'))
return self._redirect_back()
return super().dispatch(request, *args, **kwargs)
@cached_property
def other_form(self):
return OtherOperationsForm(prefix='other', order=self.order,
data=self.request.POST if self.request.method == "POST" else None)
@cached_property
def add_formset(self):
ff = formset_factory(
OrderPositionAddForm, formset=OrderPositionAddFormset,
can_order=False, can_delete=True, extra=0
)
return ff(
prefix='add',
order=self.order,
data=self.request.POST if self.request.method == "POST" else None
)
@cached_property
def fees(self):
fees = list(self.order.fees.all())
for f in fees:
f.form = OrderFeeChangeForm(prefix='of-{}'.format(f.pk), instance=f,
data=self.request.POST if self.request.method == "POST" else None)
try:
ia = self.order.invoice_address
except InvoiceAddress.DoesNotExist:
ia = None
f.apply_tax = self.request.event.settings.tax_rate_default and self.request.event.settings.tax_rate_default.tax_applicable(invoice_address=ia)
return fees
@cached_property
def positions(self):
positions = list(self.order.positions.all())
for p in positions:
p.form = OrderPositionChangeForm(prefix='op-{}'.format(p.pk), instance=p,
initial={'seat': p.seat.seat_guid if p.seat else None},
data=self.request.POST if self.request.method == "POST" else None)
try:
ia = self.order.invoice_address
except InvoiceAddress.DoesNotExist:
ia = None
p.apply_tax = p.item.tax_rule and p.item.tax_rule.tax_applicable(invoice_address=ia)
return positions
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
ctx['positions'] = self.positions
ctx['fees'] = self.fees
ctx['add_formset'] = self.add_formset
ctx['other_form'] = self.other_form
return ctx
def _process_other(self, ocm):
if not self.other_form.is_valid():
return False
else:
if self.other_form.cleaned_data['recalculate_taxes']:
ocm.recalculate_taxes()
return True
def _process_add(self, ocm):
if not self.add_formset.is_valid():
return False
else:
for f in self.add_formset.forms:
if f in self.add_formset.deleted_forms or not f.has_changed():
continue
if '-' in f.cleaned_data['itemvar']:
itemid, varid = f.cleaned_data['itemvar'].split('-')
else:
itemid, varid = f.cleaned_data['itemvar'], None
item = Item.objects.get(pk=itemid, event=self.request.event)
if varid:
variation = ItemVariation.objects.get(pk=varid, item=item)
else:
variation = None
try:
ocm.add_position(item, variation,
f.cleaned_data['price'],
f.cleaned_data.get('addon_to'),
f.cleaned_data.get('subevent'),
f.cleaned_data.get('seat'))
except OrderError as e:
f.custom_error = str(e)
return False
return True
def _process_fees(self, ocm):
for f in self.fees:
if not f.form.is_valid():
return False
try:
if f.form.cleaned_data['operation_cancel']:
ocm.cancel_fee(f)
continue
if f.form.cleaned_data['value'] != f.value:
ocm.change_fee(f, f.form.cleaned_data['value'])
except OrderError as e:
f.custom_error = str(e)
return False
return True
def _process_change(self, ocm):
for p in self.positions:
if not p.form.is_valid():
return False
try:
if p.form.cleaned_data['operation_cancel']:
ocm.cancel(p)
continue
if p.form.cleaned_data['itemvar']:
if '-' in p.form.cleaned_data['itemvar']:
itemid, varid = p.form.cleaned_data['itemvar'].split('-')
else:
itemid, varid = p.form.cleaned_data['itemvar'], None
item = Item.objects.get(pk=itemid, event=self.request.event)
if varid:
variation = ItemVariation.objects.get(pk=varid, item=item)
else:
variation = None
if item != p.item or variation != p.variation:
ocm.change_item(p, item, variation)
if self.request.event.has_subevents and p.form.cleaned_data['subevent'] and p.form.cleaned_data['subevent'] != p.subevent:
ocm.change_subevent(p, p.form.cleaned_data['subevent'])
if p.seat and p.form.cleaned_data['seat'] and p.form.cleaned_data['seat'] != p.seat.seat_guid:
ocm.change_seat(p, p.form.cleaned_data['seat'])
if p.form.cleaned_data['price'] != p.price:
ocm.change_price(p, p.form.cleaned_data['price'])
if p.form.cleaned_data['operation_split']:
ocm.split(p)
if p.form.cleaned_data['operation_secret']:
ocm.regenerate_secret(p)
except OrderError as e:
p.custom_error = str(e)
return False
return True
def post(self, *args, **kwargs):
notify = self.other_form.cleaned_data['notify'] if self.other_form.is_valid() else True
ocm = OrderChangeManager(
self.order,
user=self.request.user,
notify=notify,
reissue_invoice=self.other_form.cleaned_data['reissue_invoice'] if self.other_form.is_valid() else True
)
form_valid = self._process_add(ocm) and self._process_fees(ocm) and self._process_change(ocm) and self._process_other(ocm)
if not form_valid:
messages.error(self.request, _('An error occurred. Please see the details below.'))
else:
try:
ocm.commit(check_quotas=not self.other_form.cleaned_data['ignore_quotas'])
except OrderError as e:
messages.error(self.request, str(e))
else:
if notify:
messages.success(self.request, _('The order has been changed and the user has been notified.'))
else:
messages.success(self.request, _('The order has been changed.'))
return self._redirect_back()
return self.get(*args, **kwargs)
class OrderModifyInformation(OrderQuestionsViewMixin, OrderView):
permission = 'can_change_orders'
template_name = 'pretixcontrol/order/change_questions.html'
only_user_visible = False
all_optional = True
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
ctx['other_form'] = self.other_form
return ctx
@cached_property
def other_form(self):
return OtherOperationsForm(prefix='other', order=self.order, initial={'notify': False},
data=self.request.POST if self.request.method == "POST" else None)
def post(self, request, *args, **kwargs):
failed = not self.save() or not self.invoice_form.is_valid() or not self.other_form.is_valid()
notify = self.other_form.cleaned_data['notify'] if self.other_form.is_valid() else True
if failed:
messages.error(self.request,
_("We had difficulties processing your input. Please review the errors below."))
return self.get(request, *args, **kwargs)
if notify:
notify_user_changed_order(self.order)
if hasattr(self.invoice_form, 'save'):
self.invoice_form.save()
self.order.log_action('pretix.event.order.modified', {
'invoice_data': self.invoice_form.cleaned_data,
'data': [{
k: (f.cleaned_data.get(k).name if isinstance(f.cleaned_data.get(k), File) else f.cleaned_data.get(k))
for k in f.changed_data
} for f in self.forms]
}, user=request.user)
if self.invoice_form.has_changed():
success_message = ('The invoice address has been updated. If you want to generate a new invoice, '
'you need to do this manually.')
messages.success(self.request, _(success_message))
tickets.invalidate_cache.apply_async(kwargs={'event': self.request.event.pk, 'order': self.order.pk})
order_modified.send(sender=self.request.event, order=self.order)
return redirect(self.get_order_url())
class OrderContactChange(OrderView):
permission = 'can_change_orders'
template_name = 'pretixcontrol/order/change_contact.html'
def get_context_data(self, **kwargs):
ctx = super().get_context_data()
ctx['form'] = self.form
return ctx
@cached_property
def form(self):
return OrderContactForm(
instance=self.order,
data=self.request.POST if self.request.method == "POST" else None
)
def post(self, *args, **kwargs):
old_email = self.order.email
changed = False
if self.form.is_valid():
new_email = self.form.cleaned_data['email']
if new_email != old_email:
changed = True
self.order.log_action(
'pretix.event.order.contact.changed',
data={
'old_email': old_email,
'new_email': self.form.cleaned_data['email'],
},
user=self.request.user,
)
if self.form.cleaned_data['regenerate_secrets']:
changed = True
self.order.secret = generate_secret()
for op in self.order.all_positions.all():
op.secret = generate_position_secret()
op.save()
tickets.invalidate_cache.apply_async(kwargs={'event': self.request.event.pk, 'order': self.order.pk})
self.order.log_action('pretix.event.order.secret.changed', user=self.request.user)
self.form.save()
if changed:
messages.success(self.request, _('The order has been changed.'))
else:
messages.success(self.request, _('Nothing about the order had to be changed.'))
return redirect(self.get_order_url())
return self.get(*args, **kwargs)
class OrderLocaleChange(OrderView):
permission = 'can_change_orders'
template_name = 'pretixcontrol/order/change_locale.html'
def get_context_data(self, **kwargs):
ctx = super().get_context_data()
ctx['form'] = self.form
return ctx
@cached_property
def form(self):
return OrderLocaleForm(
instance=self.order,
data=self.request.POST if self.request.method == "POST" else None
)
def post(self, *args, **kwargs):
old_locale = self.order.locale
if self.form.is_valid():
self.order.log_action(
'pretix.event.order.locale.changed',
data={
'old_locale': old_locale,
'new_locale': self.form.cleaned_data['locale'],
},
user=self.request.user,
)
self.form.save()
tickets.invalidate_cache.apply_async(kwargs={'event': self.request.event.pk, 'order': self.order.pk})
messages.success(self.request, _('The order has been changed.'))
return redirect(self.get_order_url())
return self.get(*args, **kwargs)
class OrderViewMixin:
def get_object(self, queryset=None):
try:
return Order.objects.get(
event=self.request.event,
code=self.kwargs['code'].upper()
)
except Order.DoesNotExist:
raise Http404()
@cached_property
def order(self):
return self.get_object()
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
ctx['order'] = self.order
return ctx
class OrderSendMail(EventPermissionRequiredMixin, OrderViewMixin, FormView):
template_name = 'pretixcontrol/order/sendmail.html'
permission = 'can_change_orders'
form_class = OrderMailForm
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs['order'] = Order.objects.get(
event=self.request.event,
code=self.kwargs['code'].upper()
)
kwargs['initial'] = {}
if self.request.GET.get('subject'):
kwargs['initial']['subject'] = self.request.GET.get('subject')
if self.request.GET.get('message'):
kwargs['initial']['message'] = self.request.GET.get('message')
return kwargs
def form_invalid(self, form):
messages.error(self.request, _('We could not send the email. See below for details.'))
return super().form_invalid(form)
def form_valid(self, form):
order = Order.objects.get(
event=self.request.event,
code=self.kwargs['code'].upper()
)
self.preview_output = {}
with language(order.locale):
email_context = get_email_context(event=order.event, order=order)
email_template = LazyI18nString(form.cleaned_data['message'])
email_content = render_mail(email_template, email_context)
if self.request.POST.get('action') == 'preview':
self.preview_output = {
'subject': _('Subject: {subject}').format(subject=form.cleaned_data['subject']),
'html': markdown_compile_email(email_content)
}
return self.get(self.request, *self.args, **self.kwargs)
else:
try:
order.send_mail(
form.cleaned_data['subject'], email_template,
email_context, 'pretix.event.order.email.custom_sent',
self.request.user, auto_email=False
)
messages.success(self.request,
_('Your message has been queued and will be sent to {}.'.format(order.email)))
except SendMailException:
messages.error(
self.request,
_('Failed to send mail to the following user: {}'.format(order.email))
)
return super(OrderSendMail, self).form_valid(form)
def get_success_url(self):
return reverse('control:event.order', kwargs={
'event': self.request.event.slug,
'organizer': self.request.event.organizer.slug,
'code': self.kwargs['code']
})
def get_context_data(self, *args, **kwargs):
ctx = super().get_context_data(*args, **kwargs)
ctx['preview_output'] = getattr(self, 'preview_output', None)
return ctx
class OrderEmailHistory(EventPermissionRequiredMixin, OrderViewMixin, ListView):
template_name = 'pretixcontrol/order/mail_history.html'
permission = 'can_view_orders'
model = LogEntry
context_object_name = 'logs'
paginate_by = 10
def get_queryset(self):
order = get_object_or_404(
Order,
event=self.request.event,
code=self.kwargs['code'].upper()
)
qs = order.all_logentries()
qs = qs.filter(
action_type__contains="order.email"
)
return qs
class AnswerDownload(EventPermissionRequiredMixin, OrderViewMixin, ListView):
permission = 'can_view_orders'
def get(self, request, *args, **kwargs):
answid = kwargs.get('answer')
token = request.GET.get('token', '')
answer = get_object_or_404(QuestionAnswer, orderposition__order=self.order, id=answid)
if not answer.file:
raise Http404()
if not check_token(request, answer, token):
raise Http404(_("This link is no longer valid. Please go back, refresh the page, and try again."))
ftype, ignored = mimetypes.guess_type(answer.file.name)
resp = FileResponse(answer.file, content_type=ftype or 'application/binary')
resp['Content-Disposition'] = 'attachment; filename="{}-{}-{}-{}"'.format(
self.request.event.slug.upper(), self.order.code,
answer.orderposition.positionid,
os.path.basename(answer.file.name).split('.', 1)[1]
)
return resp
class OverView(EventPermissionRequiredMixin, TemplateView):
template_name = 'pretixcontrol/orders/overview.html'
permission = 'can_view_orders'
@cached_property
def filter_form(self):
return OverviewFilterForm(data=self.request.GET, event=self.request.event)
def get_context_data(self, **kwargs):
ctx = super().get_context_data()
if self.filter_form.is_valid():
ctx['items_by_category'], ctx['total'] = order_overview(
self.request.event,
subevent=self.filter_form.cleaned_data.get('subevent'),
date_filter=self.filter_form.cleaned_data['date_axis'],
date_from=self.filter_form.cleaned_data['date_from'],
date_until=self.filter_form.cleaned_data['date_until'],
fees=True
)
else:
ctx['items_by_category'], ctx['total'] = order_overview(
self.request.event,
fees=True
)
ctx['subevent_warning'] = (
self.request.event.has_subevents and
self.filter_form.is_valid() and
self.filter_form.cleaned_data.get('subevent') and
OrderFee.objects.filter(order__event=self.request.event).exclude(value=0).exists()
)
ctx['filter_form'] = self.filter_form
return ctx
class OrderGo(EventPermissionRequiredMixin, View):
permission = 'can_view_orders'
def get_order(self, code):
try:
return Order.objects.get(code=code, event=self.request.event)
except Order.DoesNotExist:
return Order.objects.get(code=Order.normalize_code(code), event=self.request.event)
def get(self, request, *args, **kwargs):
code = request.GET.get("code", "").upper().strip()
if '://' in code:
m = re.match('.*/ORDER/([A-Z0-9]{' + str(settings.ENTROPY['order_code']) + '})/.*', code)
if m:
code = m.group(1)
try:
if code.startswith(request.event.slug.upper()):
code = code[len(request.event.slug):]
if code.startswith('-'):
code = code[1:]
order = self.get_order(code)
return redirect('control:event.order', event=request.event.slug, organizer=request.event.organizer.slug,
code=order.code)
except Order.DoesNotExist:
try:
i = self.request.event.invoices.get(Q(invoice_no=code) | Q(full_invoice_no=code))
return redirect('control:event.order', event=request.event.slug, organizer=request.event.organizer.slug,
code=i.order.code)
except Invoice.DoesNotExist:
pass
messages.error(request, _('There is no order with the given order code.'))
return redirect('control:event.orders', event=request.event.slug, organizer=request.event.organizer.slug)
class ExportMixin:
@cached_property
def exporters(self):
exporters = []
responses = register_data_exporters.send(self.request.event)
for ex in sorted([response(self.request.event) for r, response in responses], key=lambda ex: str(ex.verbose_name)):
if self.request.GET.get("identifier") and ex.identifier != self.request.GET.get("identifier"):
continue
# Use form parse cycle to generate useful defaults
test_form = ExporterForm(data=self.request.GET, prefix=ex.identifier)
test_form.fields = ex.export_form_fields
test_form.is_valid()
initial = {
k: v for k, v in test_form.cleaned_data.items() if ex.identifier + "-" + k in self.request.GET
}
ex.form = ExporterForm(
data=(self.request.POST if self.request.method == 'POST' else None),
prefix=ex.identifier,
initial=initial
)
ex.form.fields = ex.export_form_fields
exporters.append(ex)
return exporters
class ExportDoView(EventPermissionRequiredMixin, ExportMixin, AsyncAction, View):
permission = 'can_view_orders'
known_errortypes = ['ExportError']
task = export
def get_success_message(self, value):
return None
def get_success_url(self, value):
return reverse('cachedfile.download', kwargs={'id': str(value)})
def get_error_url(self):
return reverse('control:event.orders.export', kwargs={
'event': self.request.event.slug,
'organizer': self.request.event.organizer.slug
})
@cached_property
def exporter(self):
for ex in self.exporters:
if ex.identifier == self.request.POST.get("exporter"):
return ex
def post(self, request, *args, **kwargs):
if not self.exporter:
messages.error(self.request, _('The selected exporter was not found.'))
return redirect('control:event.orders.export', kwargs={
'event': self.request.event.slug,
'organizer': self.request.event.organizer.slug
})
if not self.exporter.form.is_valid():
messages.error(self.request, _('There was a problem processing your input. See below for error details.'))
return self.get(request, *args, **kwargs)
cf = CachedFile()
cf.date = now()
cf.expires = now() + timedelta(days=3)
cf.save()
return self.do(self.request.event.id, str(cf.id), self.exporter.identifier, self.exporter.form.cleaned_data)
class ExportView(EventPermissionRequiredMixin, ExportMixin, TemplateView):
permission = 'can_view_orders'
template_name = 'pretixcontrol/orders/export.html'
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
ctx['exporters'] = self.exporters
return ctx
class RefundList(EventPermissionRequiredMixin, PaginationMixin, ListView):
model = OrderRefund
context_object_name = 'refunds'
template_name = 'pretixcontrol/orders/refunds.html'
permission = 'can_view_orders'
def get_queryset(self):
qs = OrderRefund.objects.filter(
order__event=self.request.event
).select_related('order')
if self.filter_form.is_valid():
qs = self.filter_form.filter_qs(qs)
return qs.distinct()
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
ctx['filter_form'] = self.filter_form
return ctx
@cached_property
def filter_form(self):
return RefundFilterForm(data=self.request.GET, event=self.request.event,
initial={'status': 'open'})
| 42.264581 | 154 | 0.575303 | import json
import logging
import mimetypes
import os
import re
from datetime import datetime, time, timedelta
from decimal import Decimal, DecimalException
from urllib.parse import urlencode
import vat_moss.id
from django.conf import settings
from django.contrib import messages
from django.core.files import File
from django.db import transaction
from django.db.models import (
Count, IntegerField, OuterRef, Prefetch, ProtectedError, Q, Subquery, Sum,
)
from django.forms import formset_factory
from django.http import (
FileResponse, Http404, HttpResponseNotAllowed, HttpResponseRedirect,
JsonResponse,
)
from django.shortcuts import get_object_or_404, redirect, render
from django.urls import reverse
from django.utils import formats
from django.utils.functional import cached_property
from django.utils.http import is_safe_url
from django.utils.timezone import make_aware, now
from django.utils.translation import ugettext_lazy as _
from django.views.generic import (
DetailView, FormView, ListView, TemplateView, View,
)
from i18nfield.strings import LazyI18nString
from pretix.base.channels import get_all_sales_channels
from pretix.base.email import get_email_context
from pretix.base.i18n import language
from pretix.base.models import (
CachedCombinedTicket, CachedFile, CachedTicket, Invoice, InvoiceAddress,
Item, ItemVariation, LogEntry, Order, QuestionAnswer, Quota,
generate_position_secret, generate_secret,
)
from pretix.base.models.orders import (
OrderFee, OrderPayment, OrderPosition, OrderRefund,
)
from pretix.base.models.tax import EU_COUNTRIES, cc_to_vat_prefix
from pretix.base.payment import PaymentException
from pretix.base.services import tickets
from pretix.base.services.export import export
from pretix.base.services.invoices import (
generate_cancellation, generate_invoice, invoice_pdf, invoice_pdf_task,
invoice_qualified, regenerate_invoice,
)
from pretix.base.services.locking import LockTimeoutException
from pretix.base.services.mail import SendMailException, render_mail
from pretix.base.services.orders import (
OrderChangeManager, OrderError, approve_order, cancel_order, deny_order,
extend_order, mark_order_expired, mark_order_refunded,
notify_user_changed_order,
)
from pretix.base.services.stats import order_overview
from pretix.base.services.tickets import generate
from pretix.base.signals import (
order_modified, register_data_exporters, register_ticket_outputs,
)
from pretix.base.templatetags.money import money_filter
from pretix.base.templatetags.rich_text import markdown_compile_email
from pretix.base.views.mixins import OrderQuestionsViewMixin
from pretix.base.views.tasks import AsyncAction
from pretix.control.forms.filter import (
EventOrderFilterForm, OverviewFilterForm, RefundFilterForm,
)
from pretix.control.forms.orders import (
CancelForm, CommentForm, ConfirmPaymentForm, ExporterForm, ExtendForm,
MarkPaidForm, OrderContactForm, OrderFeeChangeForm, OrderLocaleForm,
OrderMailForm, OrderPositionAddForm, OrderPositionAddFormset,
OrderPositionChangeForm, OrderRefundForm, OtherOperationsForm,
)
from pretix.control.permissions import EventPermissionRequiredMixin
from pretix.control.views import PaginationMixin
from pretix.helpers.safedownload import check_token
from pretix.presale.signals import question_form_fields
logger = logging.getLogger(__name__)
class OrderList(EventPermissionRequiredMixin, PaginationMixin, ListView):
model = Order
context_object_name = 'orders'
template_name = 'pretixcontrol/orders/index.html'
permission = 'can_view_orders'
def get_queryset(self):
qs = Order.objects.filter(
event=self.request.event
).select_related('invoice_address')
if self.filter_form.is_valid():
qs = self.filter_form.filter_qs(qs)
return qs
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
ctx['filter_form'] = self.filter_form
s = OrderPosition.objects.filter(
order=OuterRef('pk')
).order_by().values('order').annotate(k=Count('id')).values('k')
annotated = {
o['pk']: o
for o in
Order.annotate_overpayments(Order.objects).filter(
pk__in=[o.pk for o in ctx['orders']]
).annotate(
pcnt=Subquery(s, output_field=IntegerField())
).values(
'pk', 'pcnt', 'is_overpaid', 'is_underpaid', 'is_pending_with_full_payment', 'has_external_refund',
'has_pending_refund'
)
}
for o in ctx['orders']:
if o.pk not in annotated:
continue
o.pcnt = annotated.get(o.pk)['pcnt']
o.is_overpaid = annotated.get(o.pk)['is_overpaid']
o.is_underpaid = annotated.get(o.pk)['is_underpaid']
o.is_pending_with_full_payment = annotated.get(o.pk)['is_pending_with_full_payment']
o.has_external_refund = annotated.get(o.pk)['has_external_refund']
o.has_pending_refund = annotated.get(o.pk)['has_pending_refund']
if ctx['page_obj'].paginator.count < 1000:
ctx['sums'] = self.get_queryset().annotate(
pcnt=Subquery(s, output_field=IntegerField())
).aggregate(
s=Sum('total'), pc=Sum('pcnt'), c=Count('id')
)
else:
ctx['sums'] = self.get_queryset().aggregate(s=Sum('total'), c=Count('id'))
return ctx
@cached_property
def filter_form(self):
return EventOrderFilterForm(data=self.request.GET, event=self.request.event)
class OrderView(EventPermissionRequiredMixin, DetailView):
context_object_name = 'order'
model = Order
def get_object(self, queryset=None):
try:
return Order.objects.get(
event=self.request.event,
code=self.kwargs['code'].upper()
)
except Order.DoesNotExist:
raise Http404()
def _redirect_back(self):
return redirect('control:event.order',
event=self.request.event.slug,
organizer=self.request.event.organizer.slug,
code=self.order.code)
@cached_property
def order(self):
return self.get_object()
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
ctx['can_generate_invoice'] = invoice_qualified(self.order) and (
self.request.event.settings.invoice_generate in ('admin', 'user', 'paid', 'True')
) and (
not self.order.invoices.exists()
or (
self.order.status in (Order.STATUS_PAID, Order.STATUS_PENDING)
and self.order.invoices.filter(is_cancellation=True).count() >= self.order.invoices.filter(is_cancellation=False).count()
)
)
return ctx
def get_order_url(self):
return reverse('control:event.order', kwargs={
'event': self.request.event.slug,
'organizer': self.request.event.organizer.slug,
'code': self.order.code
})
class OrderDetail(OrderView):
template_name = 'pretixcontrol/order/index.html'
permission = 'can_view_orders'
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
ctx['items'] = self.get_items()
ctx['event'] = self.request.event
ctx['payments'] = self.order.payments.order_by('-created')
ctx['refunds'] = self.order.refunds.select_related('payment').order_by('-created')
for p in ctx['payments']:
if p.payment_provider:
p.html_info = (p.payment_provider.payment_control_render(self.request, p) or "").strip()
for r in ctx['refunds']:
if r.payment_provider:
r.html_info = (r.payment_provider.refund_control_render(self.request, r) or "").strip()
ctx['invoices'] = list(self.order.invoices.all().select_related('event'))
ctx['comment_form'] = CommentForm(initial={
'comment': self.order.comment,
'checkin_attention': self.order.checkin_attention
})
ctx['display_locale'] = dict(settings.LANGUAGES)[self.object.locale or self.request.event.settings.locale]
ctx['overpaid'] = self.order.pending_sum * -1
ctx['sales_channel'] = get_all_sales_channels().get(self.order.sales_channel)
ctx['download_buttons'] = self.download_buttons
return ctx
@cached_property
def download_buttons(self):
buttons = []
responses = register_ticket_outputs.send(self.request.event)
for receiver, response in responses:
provider = response(self.request.event)
buttons.append({
'text': provider.download_button_text or 'Ticket',
'icon': provider.download_button_icon or 'fa-download',
'identifier': provider.identifier,
'multi': provider.multi_download_enabled,
'javascript_required': provider.javascript_required
})
return buttons
def get_items(self):
queryset = self.object.all_positions
cartpos = queryset.order_by(
'item', 'variation'
).select_related(
'item', 'variation', 'addon_to', 'tax_rule'
).prefetch_related(
'item__questions', 'issued_gift_cards',
Prefetch('answers', queryset=QuestionAnswer.objects.prefetch_related('options').select_related('question')),
'checkins', 'checkins__list'
).order_by('positionid')
positions = []
for p in cartpos:
responses = question_form_fields.send(sender=self.request.event, position=p)
p.additional_fields = []
data = p.meta_info_data
for r, response in sorted(responses, key=lambda r: str(r[0])):
if response:
for key, value in response.items():
p.additional_fields.append({
'answer': data.get('question_form_data', {}).get(key),
'question': value.label
})
p.has_questions = (
p.additional_fields or
(p.item.admission and self.request.event.settings.attendee_names_asked) or
(p.item.admission and self.request.event.settings.attendee_emails_asked) or
p.item.questions.all()
)
p.cache_answers()
positions.append(p)
positions.sort(key=lambda p: p.sort_key)
return {
'positions': positions,
'raw': cartpos,
'total': self.object.total,
'fees': self.object.all_fees.all(),
'net_total': self.object.net_total,
'tax_total': self.object.tax_total,
}
class OrderDownload(AsyncAction, OrderView):
task = generate
permission = 'can_view_orders'
def get_success_url(self, value):
return self.get_self_url()
def get_error_url(self):
return self.get_order_url()
def get_self_url(self):
return reverse('control:event.order.download.ticket', kwargs=self.kwargs)
@cached_property
def output(self):
responses = register_ticket_outputs.send(self.request.event)
for receiver, response in responses:
provider = response(self.request.event)
if provider.identifier == self.kwargs.get('output'):
return provider
@cached_property
def order_position(self):
try:
return self.order.positions.get(pk=self.kwargs.get('position'))
except OrderPosition.DoesNotExist:
return None
def get(self, request, *args, **kwargs):
if 'async_id' in request.GET and settings.HAS_CELERY:
return self.get_result(request)
ct = self.get_last_ct()
if ct:
return self.success(ct)
return self.http_method_not_allowed(request)
def post(self, request, *args, **kwargs):
if not self.output:
return self.error(_('You requested an invalid ticket output type.'))
if not self.order_position:
raise Http404(_('Unknown order code or not authorized to access this order.'))
if 'position' in kwargs and not self.order_position.generate_ticket:
return self.error(_('Ticket download is not enabled for this product.'))
ct = self.get_last_ct()
if ct:
return self.success(ct)
return self.do('orderposition' if 'position' in kwargs else 'order',
self.order_position.pk if 'position' in kwargs else self.order.pk,
self.output.identifier)
def get_success_message(self, value):
return ""
def success(self, value):
if "ajax" in self.request.POST or "ajax" in self.request.GET:
return JsonResponse({
'ready': True,
'success': True,
'redirect': self.get_success_url(value),
'message': str(self.get_success_message(value))
})
if isinstance(value, CachedTicket):
if value.type == 'text/uri-list':
resp = HttpResponseRedirect(value.file.file.read())
return resp
else:
resp = FileResponse(value.file.file, content_type=value.type)
resp['Content-Disposition'] = 'attachment; filename="{}-{}-{}-{}{}"'.format(
self.request.event.slug.upper(), self.order.code, self.order_position.positionid,
self.output.identifier, value.extension
)
return resp
elif isinstance(value, CachedCombinedTicket):
resp = FileResponse(value.file.file, content_type=value.type)
resp['Content-Disposition'] = 'attachment; filename="{}-{}-{}{}"'.format(
self.request.event.slug.upper(), self.order.code, self.output.identifier, value.extension
)
return resp
else:
return redirect(self.get_self_url())
def get_last_ct(self):
if 'position' in self.kwargs:
ct = CachedTicket.objects.filter(
order_position=self.order_position, provider=self.output.identifier, file__isnull=False
).last()
else:
ct = CachedCombinedTicket.objects.filter(
order=self.order, provider=self.output.identifier, file__isnull=False
).last()
if not ct or not ct.file:
return None
return ct
class OrderComment(OrderView):
permission = 'can_change_orders'
def post(self, *args, **kwargs):
form = CommentForm(self.request.POST)
if form.is_valid():
if form.cleaned_data.get('comment') != self.order.comment:
self.order.comment = form.cleaned_data.get('comment')
self.order.log_action('pretix.event.order.comment', user=self.request.user, data={
'new_comment': form.cleaned_data.get('comment')
})
if form.cleaned_data.get('checkin_attention') != self.order.checkin_attention:
self.order.checkin_attention = form.cleaned_data.get('checkin_attention')
self.order.log_action('pretix.event.order.checkin_attention', user=self.request.user, data={
'new_value': form.cleaned_data.get('checkin_attention')
})
self.order.save(update_fields=['checkin_attention', 'comment'])
messages.success(self.request, _('The comment has been updated.'))
else:
messages.error(self.request, _('Could not update the comment.'))
return redirect(self.get_order_url())
def get(self, *args, **kwargs):
return HttpResponseNotAllowed(['POST'])
class OrderApprove(OrderView):
permission = 'can_change_orders'
def post(self, *args, **kwargs):
if self.order.require_approval:
try:
approve_order(self.order, user=self.request.user)
except OrderError as e:
messages.error(self.request, str(e))
else:
messages.success(self.request, _('The order has been approved.'))
return redirect(self.get_order_url())
def get(self, *args, **kwargs):
return render(self.request, 'pretixcontrol/order/approve.html', {
'order': self.order,
})
class OrderDelete(OrderView):
permission = 'can_change_orders'
def post(self, *args, **kwargs):
if self.order.testmode:
try:
with transaction.atomic():
self.order.gracefully_delete(user=self.request.user)
messages.success(self.request, _('The order has been deleted.'))
return redirect(reverse('control:event.orders', kwargs={
'event': self.request.event.slug,
'organizer': self.request.organizer.slug,
}))
except ProtectedError:
messages.error(self.request, _('The order could not be deleted as some constraints (e.g. data created '
'by plug-ins) do not allow it.'))
return self.get(self.request, *self.args, **self.kwargs)
return redirect(self.get_order_url())
def get(self, *args, **kwargs):
if not self.order.testmode:
messages.error(self.request, _('Only orders created in test mode can be deleted.'))
return redirect(self.get_order_url())
return render(self.request, 'pretixcontrol/order/delete.html', {
'order': self.order,
})
class OrderDeny(OrderView):
permission = 'can_change_orders'
def post(self, *args, **kwargs):
if self.order.require_approval:
try:
deny_order(self.order, user=self.request.user,
comment=self.request.POST.get('comment'),
send_mail=self.request.POST.get('send_email') == 'on')
except OrderError as e:
messages.error(self.request, str(e))
else:
messages.success(self.request, _('The order has been denied and is therefore now canceled.'))
return redirect(self.get_order_url())
def get(self, *args, **kwargs):
return render(self.request, 'pretixcontrol/order/deny.html', {
'order': self.order,
})
class OrderPaymentCancel(OrderView):
permission = 'can_change_orders'
@cached_property
def payment(self):
return get_object_or_404(self.order.payments, pk=self.kwargs['payment'])
def post(self, *args, **kwargs):
if self.payment.state in (OrderPayment.PAYMENT_STATE_CREATED, OrderPayment.PAYMENT_STATE_PENDING):
try:
with transaction.atomic():
self.payment.payment_provider.cancel_payment(self.payment)
self.order.log_action('pretix.event.order.payment.canceled', {
'local_id': self.payment.local_id,
'provider': self.payment.provider,
}, user=self.request.user if self.request.user.is_authenticated else None)
except PaymentException as e:
self.order.log_action(
'pretix.event.order.payment.canceled.failed',
{
'local_id': self.payment.local_id,
'provider': self.payment.provider,
'error': str(e)
},
user=self.request.user if self.request.user.is_authenticated else None,
)
messages.error(self.request, str(e))
else:
messages.success(self.request, _('This payment has been canceled.'))
else:
messages.error(self.request, _('This payment can not be canceled at the moment.'))
return redirect(self.get_order_url())
def get(self, *args, **kwargs):
return render(self.request, 'pretixcontrol/order/pay_cancel.html', {
'order': self.order,
})
class OrderRefundCancel(OrderView):
permission = 'can_change_orders'
@cached_property
def refund(self):
return get_object_or_404(self.order.refunds, pk=self.kwargs['refund'])
def post(self, *args, **kwargs):
if self.refund.state in (OrderRefund.REFUND_STATE_CREATED, OrderRefund.REFUND_STATE_TRANSIT,
OrderRefund.REFUND_STATE_EXTERNAL):
with transaction.atomic():
self.refund.state = OrderRefund.REFUND_STATE_CANCELED
self.refund.save()
self.order.log_action('pretix.event.order.refund.canceled', {
'local_id': self.refund.local_id,
'provider': self.refund.provider,
}, user=self.request.user)
messages.success(self.request, _('The refund has been canceled.'))
else:
messages.error(self.request, _('This refund can not be canceled at the moment.'))
if "next" in self.request.GET and is_safe_url(self.request.GET.get("next"), allowed_hosts=None):
return redirect(self.request.GET.get("next"))
return redirect(self.get_order_url())
def get(self, *args, **kwargs):
return render(self.request, 'pretixcontrol/order/refund_cancel.html', {
'order': self.order,
})
class OrderRefundProcess(OrderView):
permission = 'can_change_orders'
@cached_property
def refund(self):
return get_object_or_404(self.order.refunds, pk=self.kwargs['refund'])
def post(self, *args, **kwargs):
if self.refund.state == OrderRefund.REFUND_STATE_EXTERNAL:
self.refund.done(user=self.request.user)
if self.request.POST.get("action") == "r" and self.order.status != Order.STATUS_CANCELED:
mark_order_refunded(self.order, user=self.request.user)
elif not (self.order.status == Order.STATUS_PAID and self.order.pending_sum <= 0):
self.order.status = Order.STATUS_PENDING
self.order.set_expires(
now(),
self.order.event.subevents.filter(
id__in=self.order.positions.values_list('subevent_id', flat=True))
)
self.order.save(update_fields=['status', 'expires'])
messages.success(self.request, _('The refund has been processed.'))
else:
messages.error(self.request, _('This refund can not be processed at the moment.'))
if "next" in self.request.GET and is_safe_url(self.request.GET.get("next"), allowed_hosts=None):
return redirect(self.request.GET.get("next"))
return redirect(self.get_order_url())
def get(self, *args, **kwargs):
return render(self.request, 'pretixcontrol/order/refund_process.html', {
'order': self.order,
'refund': self.refund,
'pending_sum': self.order.pending_sum + self.refund.amount,
'propose_cancel': self.order.pending_sum + self.refund.amount >= self.order.total
})
class OrderRefundDone(OrderView):
permission = 'can_change_orders'
@cached_property
def refund(self):
return get_object_or_404(self.order.refunds, pk=self.kwargs['refund'])
def post(self, *args, **kwargs):
if self.refund.state in (OrderRefund.REFUND_STATE_CREATED, OrderRefund.REFUND_STATE_TRANSIT):
self.refund.done(user=self.request.user)
messages.success(self.request, _('The refund has been marked as done.'))
else:
messages.error(self.request, _('This refund can not be processed at the moment.'))
if "next" in self.request.GET and is_safe_url(self.request.GET.get("next"), allowed_hosts=None):
return redirect(self.request.GET.get("next"))
return redirect(self.get_order_url())
def get(self, *args, **kwargs):
return render(self.request, 'pretixcontrol/order/refund_done.html', {
'order': self.order,
})
class OrderPaymentConfirm(OrderView):
permission = 'can_change_orders'
@cached_property
def payment(self):
return get_object_or_404(self.order.payments, pk=self.kwargs['payment'])
@cached_property
def mark_paid_form(self):
return ConfirmPaymentForm(
instance=self.order,
data=self.request.POST if self.request.method == "POST" else None,
)
def post(self, *args, **kwargs):
if self.payment.state in (OrderPayment.PAYMENT_STATE_CREATED, OrderPayment.PAYMENT_STATE_PENDING):
if not self.mark_paid_form.is_valid():
return render(self.request, 'pretixcontrol/order/pay_complete.html', {
'form': self.mark_paid_form,
'order': self.order,
})
try:
self.payment.confirm(user=self.request.user,
count_waitinglist=False,
force=self.mark_paid_form.cleaned_data.get('force', False))
except Quota.QuotaExceededException as e:
messages.error(self.request, str(e))
except PaymentException as e:
messages.error(self.request, str(e))
except SendMailException:
messages.warning(self.request,
_('The payment has been marked as complete, but we were unable to send a '
'confirmation mail.'))
else:
messages.success(self.request, _('The payment has been marked as complete.'))
else:
messages.error(self.request, _('This payment can not be confirmed at the moment.'))
return redirect(self.get_order_url())
def get(self, *args, **kwargs):
return render(self.request, 'pretixcontrol/order/pay_complete.html', {
'form': self.mark_paid_form,
'order': self.order,
})
class OrderRefundView(OrderView):
permission = 'can_change_orders'
@cached_property
def start_form(self):
return OrderRefundForm(
order=self.order,
data=self.request.POST if self.request.method == "POST" else (
self.request.GET if "start-action" in self.request.GET else None
),
prefix='start',
initial={
'partial_amount': self.order.payment_refund_sum,
'action': (
'mark_pending' if self.order.status == Order.STATUS_PAID
else 'do_nothing'
)
}
)
def choose_form(self):
payments = list(self.order.payments.filter(state=OrderPayment.PAYMENT_STATE_CONFIRMED))
if self.start_form.cleaned_data.get('mode') == 'full':
full_refund = self.order.payment_refund_sum
else:
full_refund = self.start_form.cleaned_data.get('partial_amount')
proposals = self.order.propose_auto_refunds(full_refund, payments=payments)
to_refund = full_refund - sum(proposals.values())
for p in payments:
p.propose_refund = proposals.get(p, 0)
if 'perform' in self.request.POST:
refund_selected = Decimal('0.00')
refunds = []
is_valid = True
manual_value = self.request.POST.get('refund-manual', '0') or '0'
manual_value = formats.sanitize_separators(manual_value)
try:
manual_value = Decimal(manual_value)
except (DecimalException, TypeError):
messages.error(self.request, _('You entered an invalid number.'))
is_valid = False
else:
refund_selected += manual_value
if manual_value:
refunds.append(OrderRefund(
order=self.order,
payment=None,
source=OrderRefund.REFUND_SOURCE_ADMIN,
state=(
OrderRefund.REFUND_STATE_DONE
if self.request.POST.get('manual_state') == 'done'
else OrderRefund.REFUND_STATE_CREATED
),
amount=manual_value,
provider='manual'
))
giftcard_value = self.request.POST.get('refund-new-giftcard', '0') or '0'
giftcard_value = formats.sanitize_separators(giftcard_value)
try:
giftcard_value = Decimal(giftcard_value)
except (DecimalException, TypeError):
messages.error(self.request, _('You entered an invalid number.'))
is_valid = False
else:
if giftcard_value:
refund_selected += giftcard_value
giftcard = self.request.organizer.issued_gift_cards.create(
currency=self.request.event.currency,
testmode=self.order.testmode
)
giftcard.log_action('pretix.giftcards.created', user=self.request.user, data={})
refunds.append(OrderRefund(
order=self.order,
payment=None,
source=OrderRefund.REFUND_SOURCE_ADMIN,
state=OrderRefund.REFUND_STATE_CREATED,
execution_date=now(),
amount=giftcard_value,
provider='giftcard',
info=json.dumps({
'gift_card': giftcard.pk
})
))
offsetting_value = self.request.POST.get('refund-offsetting', '0') or '0'
offsetting_value = formats.sanitize_separators(offsetting_value)
try:
offsetting_value = Decimal(offsetting_value)
except (DecimalException, TypeError):
messages.error(self.request, _('You entered an invalid number.'))
is_valid = False
else:
if offsetting_value:
refund_selected += offsetting_value
try:
order = Order.objects.get(code=self.request.POST.get('order-offsetting'),
event__organizer=self.request.organizer)
except Order.DoesNotExist:
messages.error(self.request, _('You entered an order that could not be found.'))
is_valid = False
else:
refunds.append(OrderRefund(
order=self.order,
payment=None,
source=OrderRefund.REFUND_SOURCE_ADMIN,
state=OrderRefund.REFUND_STATE_DONE,
execution_date=now(),
amount=offsetting_value,
provider='offsetting',
info=json.dumps({
'orders': [order.code]
})
))
for p in payments:
value = self.request.POST.get('refund-{}'.format(p.pk), '0') or '0'
value = formats.sanitize_separators(value)
try:
value = Decimal(value)
except (DecimalException, TypeError):
messages.error(self.request, _('You entered an invalid number.'))
is_valid = False
else:
if value == 0:
continue
elif value > p.available_amount:
messages.error(self.request, _('You can not refund more than the amount of a '
'payment that is not yet refunded.'))
is_valid = False
break
elif value != p.amount and not p.partial_refund_possible:
messages.error(self.request, _('You selected a partial refund for a payment method that '
'only supports full refunds.'))
is_valid = False
break
elif (p.partial_refund_possible or p.full_refund_possible) and value > 0:
refund_selected += value
refunds.append(OrderRefund(
order=self.order,
payment=p,
source=OrderRefund.REFUND_SOURCE_ADMIN,
state=OrderRefund.REFUND_STATE_CREATED,
amount=value,
provider=p.provider
))
any_success = False
if refund_selected == full_refund and is_valid:
for r in refunds:
r.save()
self.order.log_action('pretix.event.order.refund.created', {
'local_id': r.local_id,
'provider': r.provider,
}, user=self.request.user)
if r.payment or r.provider == "offsetting" or r.provider == "giftcard":
try:
r.payment_provider.execute_refund(r)
except PaymentException as e:
r.state = OrderRefund.REFUND_STATE_FAILED
r.save()
messages.error(self.request, _('One of the refunds failed to be processed. You should '
'retry to refund in a different way. The error message '
'was: {}').format(str(e)))
else:
any_success = True
if r.state == OrderRefund.REFUND_STATE_DONE:
messages.success(self.request, _('A refund of {} has been processed.').format(
money_filter(r.amount, self.request.event.currency)
))
elif r.state == OrderRefund.REFUND_STATE_CREATED:
messages.info(self.request, _('A refund of {} has been saved, but not yet '
'fully executed. You can mark it as complete '
'below.').format(
money_filter(r.amount, self.request.event.currency)
))
else:
any_success = True
if any_success:
if self.start_form.cleaned_data.get('action') == 'mark_refunded':
mark_order_refunded(self.order, user=self.request.user)
elif self.start_form.cleaned_data.get('action') == 'mark_pending':
if not (self.order.status == Order.STATUS_PAID and self.order.pending_sum <= 0):
self.order.status = Order.STATUS_PENDING
self.order.set_expires(
now(),
self.order.event.subevents.filter(
id__in=self.order.positions.values_list('subevent_id', flat=True))
)
self.order.save(update_fields=['status', 'expires'])
if giftcard_value and self.order.email:
messages.success(self.request, _('A new gift card was created. You can now send the user their '
'gift card code.'))
return redirect(reverse('control:event.order.sendmail', kwargs={
'event': self.request.event.slug,
'organizer': self.request.event.organizer.slug,
'code': self.order.code
}) + '?' + urlencode({
'subject': _('Your gift card code'),
'message': _('Hello,\n\nwe have refunded you {amount} for your order.\n\nYou can use the gift '
'card code {giftcard} to pay for future ticket purchases in our shop.\n\n'
'Your {event} team').format(
event="{event}",
amount=money_filter(giftcard_value, self.request.event.currency),
giftcard=giftcard.secret,
)
}))
return redirect(self.get_order_url())
else:
messages.error(self.request, _('The refunds you selected do not match the selected total refund '
'amount.'))
return render(self.request, 'pretixcontrol/order/refund_choose.html', {
'payments': payments,
'remainder': to_refund,
'order': self.order,
'partial_amount': (
self.request.POST.get('start-partial_amount') if self.request.method == 'POST'
else self.request.GET.get('start-partial_amount')
),
'start_form': self.start_form
})
def post(self, *args, **kwargs):
if self.start_form.is_valid():
return self.choose_form()
return self.get(*args, **kwargs)
def get(self, *args, **kwargs):
if self.start_form.is_valid():
return self.choose_form()
return render(self.request, 'pretixcontrol/order/refund_start.html', {
'form': self.start_form,
'order': self.order,
})
class OrderTransition(OrderView):
permission = 'can_change_orders'
@cached_property
def mark_paid_form(self):
return MarkPaidForm(
instance=self.order,
data=self.request.POST if self.request.method == "POST" else None,
)
@cached_property
def mark_canceled_form(self):
return CancelForm(
instance=self.order,
data=self.request.POST if self.request.method == "POST" else None,
)
def post(self, *args, **kwargs):
to = self.request.POST.get('status', '')
if self.order.status in (Order.STATUS_PENDING, Order.STATUS_EXPIRED) and to == 'p' and self.mark_paid_form.is_valid():
ps = self.mark_paid_form.cleaned_data['amount']
try:
p = self.order.payments.get(
state__in=(OrderPayment.PAYMENT_STATE_PENDING, OrderPayment.PAYMENT_STATE_CREATED),
provider='manual',
amount=ps
)
except OrderPayment.DoesNotExist:
for p in self.order.payments.filter(state__in=(OrderPayment.PAYMENT_STATE_PENDING,
OrderPayment.PAYMENT_STATE_CREATED)):
try:
with transaction.atomic():
p.payment_provider.cancel_payment(p)
self.order.log_action('pretix.event.order.payment.canceled', {
'local_id': p.local_id,
'provider': p.provider,
}, user=self.request.user if self.request.user.is_authenticated else None)
except PaymentException as e:
self.order.log_action(
'pretix.event.order.payment.canceled.failed',
{
'local_id': p.local_id,
'provider': p.provider,
'error': str(e)
},
user=self.request.user if self.request.user.is_authenticated else None,
)
p = self.order.payments.create(
state=OrderPayment.PAYMENT_STATE_CREATED,
provider='manual',
amount=ps,
fee=None
)
payment_date = None
if self.mark_paid_form.cleaned_data['payment_date'] != now().date():
payment_date = make_aware(datetime.combine(
self.mark_paid_form.cleaned_data['payment_date'],
time(hour=0, minute=0, second=0)
), self.order.event.timezone)
try:
p.confirm(user=self.request.user, count_waitinglist=False, payment_date=payment_date,
force=self.mark_paid_form.cleaned_data.get('force', False))
except Quota.QuotaExceededException as e:
p.state = OrderPayment.PAYMENT_STATE_FAILED
p.save()
self.order.log_action('pretix.event.order.payment.failed', {
'local_id': p.local_id,
'provider': p.provider,
'message': str(e)
})
messages.error(self.request, str(e))
except PaymentException as e:
p.state = OrderPayment.PAYMENT_STATE_FAILED
p.save()
self.order.log_action('pretix.event.order.payment.failed', {
'local_id': p.local_id,
'provider': p.provider,
'message': str(e)
})
messages.error(self.request, str(e))
except SendMailException:
messages.warning(self.request, _('The order has been marked as paid, but we were unable to send a '
'confirmation mail.'))
else:
messages.success(self.request, _('The payment has been created successfully.'))
elif self.order.cancel_allowed() and to == 'c' and self.mark_canceled_form.is_valid():
try:
cancel_order(self.order, user=self.request.user,
send_mail=self.mark_canceled_form.cleaned_data['send_email'],
cancellation_fee=self.mark_canceled_form.cleaned_data.get('cancellation_fee'))
except OrderError as e:
messages.error(self.request, str(e))
else:
self.order.refresh_from_db()
if self.order.pending_sum < 0:
messages.success(self.request, _('The order has been canceled. You can now select how you want to '
'transfer the money back to the user.'))
return redirect(reverse('control:event.order.refunds.start', kwargs={
'event': self.request.event.slug,
'organizer': self.request.event.organizer.slug,
'code': self.order.code
}) + '?start-action=do_nothing&start-mode=partial&start-partial_amount={}'.format(
self.order.pending_sum * -1
))
messages.success(self.request, _('The order has been canceled.'))
elif self.order.status == Order.STATUS_PENDING and to == 'e':
mark_order_expired(self.order, user=self.request.user)
messages.success(self.request, _('The order has been marked as expired.'))
return redirect(self.get_order_url())
def get(self, *args, **kwargs):
to = self.request.GET.get('status', '')
if self.order.status in (Order.STATUS_PENDING, Order.STATUS_EXPIRED) and to == 'p':
return render(self.request, 'pretixcontrol/order/pay.html', {
'form': self.mark_paid_form,
'order': self.order,
})
elif self.order.cancel_allowed() and to == 'c':
return render(self.request, 'pretixcontrol/order/cancel.html', {
'form': self.mark_canceled_form,
'order': self.order,
})
else:
return HttpResponseNotAllowed(['POST'])
class OrderInvoiceCreate(OrderView):
permission = 'can_change_orders'
def post(self, *args, **kwargs):
has_inv = self.order.invoices.exists() and not (
self.order.status in (Order.STATUS_PAID, Order.STATUS_PENDING)
and self.order.invoices.filter(is_cancellation=True).count() >= self.order.invoices.filter(is_cancellation=False).count()
)
if self.request.event.settings.get('invoice_generate') not in ('admin', 'user', 'paid', 'True') or not invoice_qualified(self.order):
messages.error(self.request, _('You cannot generate an invoice for this order.'))
elif has_inv:
messages.error(self.request, _('An invoice for this order already exists.'))
else:
inv = generate_invoice(self.order)
self.order.log_action('pretix.event.order.invoice.generated', user=self.request.user, data={
'invoice': inv.pk
})
messages.success(self.request, _('The invoice has been generated.'))
return redirect(self.get_order_url())
def get(self, *args, **kwargs):
return HttpResponseNotAllowed(['POST'])
class OrderCheckVATID(OrderView):
permission = 'can_change_orders'
def post(self, *args, **kwargs):
try:
ia = self.order.invoice_address
except InvoiceAddress.DoesNotExist:
messages.error(self.request, _('No VAT ID specified.'))
return redirect(self.get_order_url())
else:
if not ia.vat_id:
messages.error(self.request, _('No VAT ID specified.'))
return redirect(self.get_order_url())
if not ia.country:
messages.error(self.request, _('No country specified.'))
return redirect(self.get_order_url())
if str(ia.country) not in EU_COUNTRIES:
messages.error(self.request, _('VAT ID could not be checked since a non-EU country has been '
'specified.'))
return redirect(self.get_order_url())
if ia.vat_id[:2] != cc_to_vat_prefix(str(ia.country)):
messages.error(self.request, _('Your VAT ID does not match the selected country.'))
return redirect(self.get_order_url())
try:
result = vat_moss.id.validate(ia.vat_id)
if result:
country_code, normalized_id, company_name = result
ia.vat_id_validated = True
ia.vat_id = normalized_id
ia.save()
except vat_moss.errors.InvalidError:
messages.error(self.request, _('This VAT ID is not valid.'))
except vat_moss.errors.WebServiceUnavailableError:
logger.exception('VAT ID checking failed for country {}'.format(ia.country))
messages.error(self.request, _('The VAT ID could not be checked, as the VAT checking service of '
'the country is currently not available.'))
else:
messages.success(self.request, _('This VAT ID is valid.'))
return redirect(self.get_order_url())
def get(self, *args, **kwargs):
return HttpResponseNotAllowed(['POST'])
class OrderInvoiceRegenerate(OrderView):
permission = 'can_change_orders'
def post(self, *args, **kwargs):
try:
inv = self.order.invoices.get(pk=kwargs.get('id'))
except Invoice.DoesNotExist:
messages.error(self.request, _('Unknown invoice.'))
else:
if inv.canceled:
messages.error(self.request, _('The invoice has already been canceled.'))
elif inv.shredded:
messages.error(self.request, _('The invoice has been cleaned of personal data.'))
else:
inv = regenerate_invoice(inv)
self.order.log_action('pretix.event.order.invoice.regenerated', user=self.request.user, data={
'invoice': inv.pk
})
messages.success(self.request, _('The invoice has been regenerated.'))
return redirect(self.get_order_url())
def get(self, *args, **kwargs):
return HttpResponseNotAllowed(['POST'])
class OrderInvoiceReissue(OrderView):
permission = 'can_change_orders'
def post(self, *args, **kwargs):
try:
inv = self.order.invoices.get(pk=kwargs.get('id'))
except Invoice.DoesNotExist:
messages.error(self.request, _('Unknown invoice.'))
else:
if inv.canceled:
messages.error(self.request, _('The invoice has already been canceled.'))
elif inv.shredded:
messages.error(self.request, _('The invoice has been cleaned of personal data.'))
else:
c = generate_cancellation(inv)
if self.order.status != Order.STATUS_CANCELED:
inv = generate_invoice(self.order)
else:
inv = c
self.order.log_action('pretix.event.order.invoice.reissued', user=self.request.user, data={
'invoice': inv.pk
})
messages.success(self.request, _('The invoice has been reissued.'))
return redirect(self.get_order_url())
def get(self, *args, **kwargs):
return HttpResponseNotAllowed(['POST'])
class OrderResendLink(OrderView):
permission = 'can_change_orders'
def post(self, *args, **kwargs):
try:
if 'position' in kwargs:
p = get_object_or_404(self.order.positions, pk=kwargs['position'])
p.resend_link(user=self.request.user)
else:
self.order.resend_link(user=self.request.user)
except SendMailException:
messages.error(self.request, _('There was an error sending the mail. Please try again later.'))
return redirect(self.get_order_url())
messages.success(self.request, _('The email has been queued to be sent.'))
return redirect(self.get_order_url())
def get(self, *args, **kwargs):
return HttpResponseNotAllowed(['POST'])
class InvoiceDownload(EventPermissionRequiredMixin, View):
permission = 'can_view_orders'
def get_order_url(self):
return reverse('control:event.order', kwargs={
'event': self.request.event.slug,
'organizer': self.request.event.organizer.slug,
'code': self.invoice.order.code
})
def get(self, request, *args, **kwargs):
try:
self.invoice = Invoice.objects.get(
event=self.request.event,
id=self.kwargs['invoice']
)
except Invoice.DoesNotExist:
raise Http404(_('This invoice has not been found'))
if not self.invoice.file:
invoice_pdf(self.invoice.pk)
self.invoice = Invoice.objects.get(pk=self.invoice.pk)
if self.invoice.shredded:
messages.error(request, _('The invoice file is no longer stored on the server.'))
return redirect(self.get_order_url())
if not self.invoice.file:
messages.warning(request, _('The invoice file has not yet been generated, we will generate it for you '
'now. Please try again in a few seconds.'))
return redirect(self.get_order_url())
try:
resp = FileResponse(self.invoice.file.file, content_type='application/pdf')
except FileNotFoundError:
invoice_pdf_task.apply(args=(self.invoice.pk,))
return self.get(request, *args, **kwargs)
resp['Content-Disposition'] = 'inline; filename="{}.pdf"'.format(self.invoice.number)
resp._csp_ignore = True
return resp
class OrderExtend(OrderView):
permission = 'can_change_orders'
def post(self, *args, **kwargs):
if self.form.is_valid():
try:
extend_order(
self.order,
new_date=self.form.cleaned_data.get('expires'),
force=self.form.cleaned_data.get('quota_ignore', False),
user=self.request.user
)
messages.success(self.request, _('The payment term has been changed.'))
except OrderError as e:
messages.error(self.request, str(e))
return self._redirect_here()
except LockTimeoutException:
messages.error(self.request, _('We were not able to process the request completely as the '
'server was too busy.'))
return self._redirect_back()
else:
return self.get(*args, **kwargs)
def dispatch(self, request, *args, **kwargs):
if self.order.status not in (Order.STATUS_PENDING, Order.STATUS_EXPIRED):
messages.error(self.request, _('This action is only allowed for pending orders.'))
return self._redirect_back()
return super().dispatch(request, *kwargs, **kwargs)
def _redirect_here(self):
return redirect('control:event.order.extend',
event=self.request.event.slug,
organizer=self.request.event.organizer.slug,
code=self.order.code)
def get(self, *args, **kwargs):
return render(self.request, 'pretixcontrol/order/extend.html', {
'order': self.order,
'form': self.form,
})
@cached_property
def form(self):
return ExtendForm(instance=self.order,
data=self.request.POST if self.request.method == "POST" else None)
class OrderChange(OrderView):
permission = 'can_change_orders'
template_name = 'pretixcontrol/order/change.html'
def dispatch(self, request, *args, **kwargs):
if self.order.status not in (Order.STATUS_PENDING, Order.STATUS_PAID):
messages.error(self.request, _('This action is only allowed for pending or paid orders.'))
return self._redirect_back()
return super().dispatch(request, *args, **kwargs)
@cached_property
def other_form(self):
return OtherOperationsForm(prefix='other', order=self.order,
data=self.request.POST if self.request.method == "POST" else None)
@cached_property
def add_formset(self):
ff = formset_factory(
OrderPositionAddForm, formset=OrderPositionAddFormset,
can_order=False, can_delete=True, extra=0
)
return ff(
prefix='add',
order=self.order,
data=self.request.POST if self.request.method == "POST" else None
)
@cached_property
def fees(self):
fees = list(self.order.fees.all())
for f in fees:
f.form = OrderFeeChangeForm(prefix='of-{}'.format(f.pk), instance=f,
data=self.request.POST if self.request.method == "POST" else None)
try:
ia = self.order.invoice_address
except InvoiceAddress.DoesNotExist:
ia = None
f.apply_tax = self.request.event.settings.tax_rate_default and self.request.event.settings.tax_rate_default.tax_applicable(invoice_address=ia)
return fees
@cached_property
def positions(self):
positions = list(self.order.positions.all())
for p in positions:
p.form = OrderPositionChangeForm(prefix='op-{}'.format(p.pk), instance=p,
initial={'seat': p.seat.seat_guid if p.seat else None},
data=self.request.POST if self.request.method == "POST" else None)
try:
ia = self.order.invoice_address
except InvoiceAddress.DoesNotExist:
ia = None
p.apply_tax = p.item.tax_rule and p.item.tax_rule.tax_applicable(invoice_address=ia)
return positions
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
ctx['positions'] = self.positions
ctx['fees'] = self.fees
ctx['add_formset'] = self.add_formset
ctx['other_form'] = self.other_form
return ctx
def _process_other(self, ocm):
if not self.other_form.is_valid():
return False
else:
if self.other_form.cleaned_data['recalculate_taxes']:
ocm.recalculate_taxes()
return True
def _process_add(self, ocm):
if not self.add_formset.is_valid():
return False
else:
for f in self.add_formset.forms:
if f in self.add_formset.deleted_forms or not f.has_changed():
continue
if '-' in f.cleaned_data['itemvar']:
itemid, varid = f.cleaned_data['itemvar'].split('-')
else:
itemid, varid = f.cleaned_data['itemvar'], None
item = Item.objects.get(pk=itemid, event=self.request.event)
if varid:
variation = ItemVariation.objects.get(pk=varid, item=item)
else:
variation = None
try:
ocm.add_position(item, variation,
f.cleaned_data['price'],
f.cleaned_data.get('addon_to'),
f.cleaned_data.get('subevent'),
f.cleaned_data.get('seat'))
except OrderError as e:
f.custom_error = str(e)
return False
return True
def _process_fees(self, ocm):
for f in self.fees:
if not f.form.is_valid():
return False
try:
if f.form.cleaned_data['operation_cancel']:
ocm.cancel_fee(f)
continue
if f.form.cleaned_data['value'] != f.value:
ocm.change_fee(f, f.form.cleaned_data['value'])
except OrderError as e:
f.custom_error = str(e)
return False
return True
def _process_change(self, ocm):
for p in self.positions:
if not p.form.is_valid():
return False
try:
if p.form.cleaned_data['operation_cancel']:
ocm.cancel(p)
continue
if p.form.cleaned_data['itemvar']:
if '-' in p.form.cleaned_data['itemvar']:
itemid, varid = p.form.cleaned_data['itemvar'].split('-')
else:
itemid, varid = p.form.cleaned_data['itemvar'], None
item = Item.objects.get(pk=itemid, event=self.request.event)
if varid:
variation = ItemVariation.objects.get(pk=varid, item=item)
else:
variation = None
if item != p.item or variation != p.variation:
ocm.change_item(p, item, variation)
if self.request.event.has_subevents and p.form.cleaned_data['subevent'] and p.form.cleaned_data['subevent'] != p.subevent:
ocm.change_subevent(p, p.form.cleaned_data['subevent'])
if p.seat and p.form.cleaned_data['seat'] and p.form.cleaned_data['seat'] != p.seat.seat_guid:
ocm.change_seat(p, p.form.cleaned_data['seat'])
if p.form.cleaned_data['price'] != p.price:
ocm.change_price(p, p.form.cleaned_data['price'])
if p.form.cleaned_data['operation_split']:
ocm.split(p)
if p.form.cleaned_data['operation_secret']:
ocm.regenerate_secret(p)
except OrderError as e:
p.custom_error = str(e)
return False
return True
def post(self, *args, **kwargs):
notify = self.other_form.cleaned_data['notify'] if self.other_form.is_valid() else True
ocm = OrderChangeManager(
self.order,
user=self.request.user,
notify=notify,
reissue_invoice=self.other_form.cleaned_data['reissue_invoice'] if self.other_form.is_valid() else True
)
form_valid = self._process_add(ocm) and self._process_fees(ocm) and self._process_change(ocm) and self._process_other(ocm)
if not form_valid:
messages.error(self.request, _('An error occurred. Please see the details below.'))
else:
try:
ocm.commit(check_quotas=not self.other_form.cleaned_data['ignore_quotas'])
except OrderError as e:
messages.error(self.request, str(e))
else:
if notify:
messages.success(self.request, _('The order has been changed and the user has been notified.'))
else:
messages.success(self.request, _('The order has been changed.'))
return self._redirect_back()
return self.get(*args, **kwargs)
class OrderModifyInformation(OrderQuestionsViewMixin, OrderView):
permission = 'can_change_orders'
template_name = 'pretixcontrol/order/change_questions.html'
only_user_visible = False
all_optional = True
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
ctx['other_form'] = self.other_form
return ctx
@cached_property
def other_form(self):
return OtherOperationsForm(prefix='other', order=self.order, initial={'notify': False},
data=self.request.POST if self.request.method == "POST" else None)
def post(self, request, *args, **kwargs):
failed = not self.save() or not self.invoice_form.is_valid() or not self.other_form.is_valid()
notify = self.other_form.cleaned_data['notify'] if self.other_form.is_valid() else True
if failed:
messages.error(self.request,
_("We had difficulties processing your input. Please review the errors below."))
return self.get(request, *args, **kwargs)
if notify:
notify_user_changed_order(self.order)
if hasattr(self.invoice_form, 'save'):
self.invoice_form.save()
self.order.log_action('pretix.event.order.modified', {
'invoice_data': self.invoice_form.cleaned_data,
'data': [{
k: (f.cleaned_data.get(k).name if isinstance(f.cleaned_data.get(k), File) else f.cleaned_data.get(k))
for k in f.changed_data
} for f in self.forms]
}, user=request.user)
if self.invoice_form.has_changed():
success_message = ('The invoice address has been updated. If you want to generate a new invoice, '
'you need to do this manually.')
messages.success(self.request, _(success_message))
tickets.invalidate_cache.apply_async(kwargs={'event': self.request.event.pk, 'order': self.order.pk})
order_modified.send(sender=self.request.event, order=self.order)
return redirect(self.get_order_url())
class OrderContactChange(OrderView):
permission = 'can_change_orders'
template_name = 'pretixcontrol/order/change_contact.html'
def get_context_data(self, **kwargs):
ctx = super().get_context_data()
ctx['form'] = self.form
return ctx
@cached_property
def form(self):
return OrderContactForm(
instance=self.order,
data=self.request.POST if self.request.method == "POST" else None
)
def post(self, *args, **kwargs):
old_email = self.order.email
changed = False
if self.form.is_valid():
new_email = self.form.cleaned_data['email']
if new_email != old_email:
changed = True
self.order.log_action(
'pretix.event.order.contact.changed',
data={
'old_email': old_email,
'new_email': self.form.cleaned_data['email'],
},
user=self.request.user,
)
if self.form.cleaned_data['regenerate_secrets']:
changed = True
self.order.secret = generate_secret()
for op in self.order.all_positions.all():
op.secret = generate_position_secret()
op.save()
tickets.invalidate_cache.apply_async(kwargs={'event': self.request.event.pk, 'order': self.order.pk})
self.order.log_action('pretix.event.order.secret.changed', user=self.request.user)
self.form.save()
if changed:
messages.success(self.request, _('The order has been changed.'))
else:
messages.success(self.request, _('Nothing about the order had to be changed.'))
return redirect(self.get_order_url())
return self.get(*args, **kwargs)
class OrderLocaleChange(OrderView):
permission = 'can_change_orders'
template_name = 'pretixcontrol/order/change_locale.html'
def get_context_data(self, **kwargs):
ctx = super().get_context_data()
ctx['form'] = self.form
return ctx
@cached_property
def form(self):
return OrderLocaleForm(
instance=self.order,
data=self.request.POST if self.request.method == "POST" else None
)
def post(self, *args, **kwargs):
old_locale = self.order.locale
if self.form.is_valid():
self.order.log_action(
'pretix.event.order.locale.changed',
data={
'old_locale': old_locale,
'new_locale': self.form.cleaned_data['locale'],
},
user=self.request.user,
)
self.form.save()
tickets.invalidate_cache.apply_async(kwargs={'event': self.request.event.pk, 'order': self.order.pk})
messages.success(self.request, _('The order has been changed.'))
return redirect(self.get_order_url())
return self.get(*args, **kwargs)
class OrderViewMixin:
def get_object(self, queryset=None):
try:
return Order.objects.get(
event=self.request.event,
code=self.kwargs['code'].upper()
)
except Order.DoesNotExist:
raise Http404()
@cached_property
def order(self):
return self.get_object()
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
ctx['order'] = self.order
return ctx
class OrderSendMail(EventPermissionRequiredMixin, OrderViewMixin, FormView):
template_name = 'pretixcontrol/order/sendmail.html'
permission = 'can_change_orders'
form_class = OrderMailForm
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs['order'] = Order.objects.get(
event=self.request.event,
code=self.kwargs['code'].upper()
)
kwargs['initial'] = {}
if self.request.GET.get('subject'):
kwargs['initial']['subject'] = self.request.GET.get('subject')
if self.request.GET.get('message'):
kwargs['initial']['message'] = self.request.GET.get('message')
return kwargs
def form_invalid(self, form):
messages.error(self.request, _('We could not send the email. See below for details.'))
return super().form_invalid(form)
def form_valid(self, form):
order = Order.objects.get(
event=self.request.event,
code=self.kwargs['code'].upper()
)
self.preview_output = {}
with language(order.locale):
email_context = get_email_context(event=order.event, order=order)
email_template = LazyI18nString(form.cleaned_data['message'])
email_content = render_mail(email_template, email_context)
if self.request.POST.get('action') == 'preview':
self.preview_output = {
'subject': _('Subject: {subject}').format(subject=form.cleaned_data['subject']),
'html': markdown_compile_email(email_content)
}
return self.get(self.request, *self.args, **self.kwargs)
else:
try:
order.send_mail(
form.cleaned_data['subject'], email_template,
email_context, 'pretix.event.order.email.custom_sent',
self.request.user, auto_email=False
)
messages.success(self.request,
_('Your message has been queued and will be sent to {}.'.format(order.email)))
except SendMailException:
messages.error(
self.request,
_('Failed to send mail to the following user: {}'.format(order.email))
)
return super(OrderSendMail, self).form_valid(form)
def get_success_url(self):
return reverse('control:event.order', kwargs={
'event': self.request.event.slug,
'organizer': self.request.event.organizer.slug,
'code': self.kwargs['code']
})
def get_context_data(self, *args, **kwargs):
ctx = super().get_context_data(*args, **kwargs)
ctx['preview_output'] = getattr(self, 'preview_output', None)
return ctx
class OrderEmailHistory(EventPermissionRequiredMixin, OrderViewMixin, ListView):
template_name = 'pretixcontrol/order/mail_history.html'
permission = 'can_view_orders'
model = LogEntry
context_object_name = 'logs'
paginate_by = 10
def get_queryset(self):
order = get_object_or_404(
Order,
event=self.request.event,
code=self.kwargs['code'].upper()
)
qs = order.all_logentries()
qs = qs.filter(
action_type__contains="order.email"
)
return qs
class AnswerDownload(EventPermissionRequiredMixin, OrderViewMixin, ListView):
permission = 'can_view_orders'
def get(self, request, *args, **kwargs):
answid = kwargs.get('answer')
token = request.GET.get('token', '')
answer = get_object_or_404(QuestionAnswer, orderposition__order=self.order, id=answid)
if not answer.file:
raise Http404()
if not check_token(request, answer, token):
raise Http404(_("This link is no longer valid. Please go back, refresh the page, and try again."))
ftype, ignored = mimetypes.guess_type(answer.file.name)
resp = FileResponse(answer.file, content_type=ftype or 'application/binary')
resp['Content-Disposition'] = 'attachment; filename="{}-{}-{}-{}"'.format(
self.request.event.slug.upper(), self.order.code,
answer.orderposition.positionid,
os.path.basename(answer.file.name).split('.', 1)[1]
)
return resp
class OverView(EventPermissionRequiredMixin, TemplateView):
template_name = 'pretixcontrol/orders/overview.html'
permission = 'can_view_orders'
@cached_property
def filter_form(self):
return OverviewFilterForm(data=self.request.GET, event=self.request.event)
def get_context_data(self, **kwargs):
ctx = super().get_context_data()
if self.filter_form.is_valid():
ctx['items_by_category'], ctx['total'] = order_overview(
self.request.event,
subevent=self.filter_form.cleaned_data.get('subevent'),
date_filter=self.filter_form.cleaned_data['date_axis'],
date_from=self.filter_form.cleaned_data['date_from'],
date_until=self.filter_form.cleaned_data['date_until'],
fees=True
)
else:
ctx['items_by_category'], ctx['total'] = order_overview(
self.request.event,
fees=True
)
ctx['subevent_warning'] = (
self.request.event.has_subevents and
self.filter_form.is_valid() and
self.filter_form.cleaned_data.get('subevent') and
OrderFee.objects.filter(order__event=self.request.event).exclude(value=0).exists()
)
ctx['filter_form'] = self.filter_form
return ctx
class OrderGo(EventPermissionRequiredMixin, View):
permission = 'can_view_orders'
def get_order(self, code):
try:
return Order.objects.get(code=code, event=self.request.event)
except Order.DoesNotExist:
return Order.objects.get(code=Order.normalize_code(code), event=self.request.event)
def get(self, request, *args, **kwargs):
code = request.GET.get("code", "").upper().strip()
if '://' in code:
m = re.match('.*/ORDER/([A-Z0-9]{' + str(settings.ENTROPY['order_code']) + '})/.*', code)
if m:
code = m.group(1)
try:
if code.startswith(request.event.slug.upper()):
code = code[len(request.event.slug):]
if code.startswith('-'):
code = code[1:]
order = self.get_order(code)
return redirect('control:event.order', event=request.event.slug, organizer=request.event.organizer.slug,
code=order.code)
except Order.DoesNotExist:
try:
i = self.request.event.invoices.get(Q(invoice_no=code) | Q(full_invoice_no=code))
return redirect('control:event.order', event=request.event.slug, organizer=request.event.organizer.slug,
code=i.order.code)
except Invoice.DoesNotExist:
pass
messages.error(request, _('There is no order with the given order code.'))
return redirect('control:event.orders', event=request.event.slug, organizer=request.event.organizer.slug)
class ExportMixin:
@cached_property
def exporters(self):
exporters = []
responses = register_data_exporters.send(self.request.event)
for ex in sorted([response(self.request.event) for r, response in responses], key=lambda ex: str(ex.verbose_name)):
if self.request.GET.get("identifier") and ex.identifier != self.request.GET.get("identifier"):
continue
# Use form parse cycle to generate useful defaults
test_form = ExporterForm(data=self.request.GET, prefix=ex.identifier)
test_form.fields = ex.export_form_fields
test_form.is_valid()
initial = {
k: v for k, v in test_form.cleaned_data.items() if ex.identifier + "-" + k in self.request.GET
}
ex.form = ExporterForm(
data=(self.request.POST if self.request.method == 'POST' else None),
prefix=ex.identifier,
initial=initial
)
ex.form.fields = ex.export_form_fields
exporters.append(ex)
return exporters
class ExportDoView(EventPermissionRequiredMixin, ExportMixin, AsyncAction, View):
permission = 'can_view_orders'
known_errortypes = ['ExportError']
task = export
def get_success_message(self, value):
return None
def get_success_url(self, value):
return reverse('cachedfile.download', kwargs={'id': str(value)})
def get_error_url(self):
return reverse('control:event.orders.export', kwargs={
'event': self.request.event.slug,
'organizer': self.request.event.organizer.slug
})
@cached_property
def exporter(self):
for ex in self.exporters:
if ex.identifier == self.request.POST.get("exporter"):
return ex
def post(self, request, *args, **kwargs):
if not self.exporter:
messages.error(self.request, _('The selected exporter was not found.'))
return redirect('control:event.orders.export', kwargs={
'event': self.request.event.slug,
'organizer': self.request.event.organizer.slug
})
if not self.exporter.form.is_valid():
messages.error(self.request, _('There was a problem processing your input. See below for error details.'))
return self.get(request, *args, **kwargs)
cf = CachedFile()
cf.date = now()
cf.expires = now() + timedelta(days=3)
cf.save()
return self.do(self.request.event.id, str(cf.id), self.exporter.identifier, self.exporter.form.cleaned_data)
class ExportView(EventPermissionRequiredMixin, ExportMixin, TemplateView):
permission = 'can_view_orders'
template_name = 'pretixcontrol/orders/export.html'
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
ctx['exporters'] = self.exporters
return ctx
class RefundList(EventPermissionRequiredMixin, PaginationMixin, ListView):
model = OrderRefund
context_object_name = 'refunds'
template_name = 'pretixcontrol/orders/refunds.html'
permission = 'can_view_orders'
def get_queryset(self):
qs = OrderRefund.objects.filter(
order__event=self.request.event
).select_related('order')
if self.filter_form.is_valid():
qs = self.filter_form.filter_qs(qs)
return qs.distinct()
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
ctx['filter_form'] = self.filter_form
return ctx
@cached_property
def filter_form(self):
return RefundFilterForm(data=self.request.GET, event=self.request.event,
initial={'status': 'open'})
| true | true |
f7201a7642abbd76d0bee748789b15d308c71b10 | 9,682 | py | Python | vspk/cli/cli.py | mohaimenhasan/vspk-python | 4c7b297427048340b250cc3c74d9214dc0d4bde1 | [
"BSD-3-Clause"
] | 19 | 2016-03-07T12:34:22.000Z | 2020-06-11T11:09:02.000Z | vspk/cli/cli.py | mohaimenhasan/vspk-python | 4c7b297427048340b250cc3c74d9214dc0d4bde1 | [
"BSD-3-Clause"
] | 40 | 2016-06-13T15:36:54.000Z | 2020-11-10T18:14:43.000Z | vspk/cli/cli.py | mohaimenhasan/vspk-python | 4c7b297427048340b250cc3c74d9214dc0d4bde1 | [
"BSD-3-Clause"
] | 15 | 2016-06-10T22:06:01.000Z | 2020-12-15T18:37:42.000Z | #!/usr/bin/env python
#
# Copyright (c) 2015, Alcatel-Lucent Inc, 2017 Nokia
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import argparse
import sys
sys.path.append("../")
class _HelpAction(argparse._HelpAction):
def __call__(self, parser, namespace, values, option_string=None):
parser.print_help()
subparsers_actions = [
action for action in parser._actions
if isinstance(action, argparse._SubParsersAction)]
for subparsers_action in subparsers_actions:
for choice, subparser in subparsers_action.choices.items():
print("\n{}:\n{}".format(choice.upper(), "-" * (len(choice) + 1)))
print(subparser.format_help())
parser.exit()
def main(argv=sys.argv):
default_parser = argparse.ArgumentParser(description="CLI for VSD", add_help=False)
default_parser.add_argument("-v", "--verbose", help="Activate verbose mode", action="store_true")
default_parser.add_argument("--username", help="Username to get an api key or set 'VSD_USERNAME' in your variable environment")
default_parser.add_argument("--password", help="Password to get an api key or set 'VSD_PASSWORD' in your variable environment")
default_parser.add_argument("--api", help="URL of the API endpoint or set 'VSD_API_URL' in your variable environment")
default_parser.add_argument("--version", help="Version of the API or set 'VSD_API_VERSION' in your variable environment")
default_parser.add_argument("--enterprise", help="Name of the enterprise to connect or set 'VSD_ENTERPRISE' in your variable environment")
default_parser.add_argument("--json", help="Add this option get a JSON output or set VSD_JSON_OUTPUT=True", action="store_true")
parser = argparse.ArgumentParser(description="CLI for VSD Software Development Kit", add_help=False)
parser.add_argument("-h", "--help", action=_HelpAction, help="help for help if you need some help")
subparsers = parser.add_subparsers(dest="command",
title="All available commands")
# List Command
list_parser = subparsers.add_parser("list", description="List all objects", parents=[default_parser])
list_parser.add_argument("list", help="Name of the object (See command 'objects' to list all objects name)")
list_parser.add_argument("--in", dest="parent_infos", nargs=2, help="Specify the PARENT_NAME and PARENT_UUID")
list_parser.add_argument("-f", "--filter", dest="filter", help="Specify a filter predicate")
list_parser.add_argument("-x", "--fields", dest="fields", help="Specify output fields", nargs="+", type=str)
list_parser.add_argument("-q", "--query", dest="query_parameters", nargs="*", help="List of Key=Value that will be sent as query parameters", required=False)
list_parser.add_argument("-p", "--page", dest="page", help="The page number that needs to be retreived. This value is ignored unless you also configure the page size parameter. Default value is 0", type=int, default=0)
list_parser.add_argument("-s", "--page-size", dest="page_size", help="The size of a single page that needs to be retreived. If this is configured, the list command will only return a maximum of this amount of results", type=int)
# Count Command
count_parser = subparsers.add_parser("count", description="Count all objects", parents=[default_parser])
count_parser.add_argument("count", help="Name of the object (See command 'objects' to list all objects name)")
count_parser.add_argument("--in", dest="parent_infos", nargs=2, help="Specify the parent name and its uuid")
count_parser.add_argument("-f", "--filter", dest="filter", help="Specify a filter predicate")
count_parser.add_argument("-x", "--fields", dest="fields", help="Specify output fields", nargs="+", type=str)
count_parser.add_argument("-q", "--query", dest="query_parameters", nargs="*", help="List of Key=Value that will be sent as query parameters", required=False)
# Show Command
show_parser = subparsers.add_parser("show", description="Show a specific object", parents=[default_parser])
show_parser.add_argument("show", help="Name of the object to show (See command 'objects' to list all objects name)")
show_parser.add_argument("-i", "--id", dest="id", help="Identifier of the object to show", required=True)
show_parser.add_argument("-x", "--fields", dest="fields", help="Specify output fields", nargs="+", type=str)
# Create Command
create_parser = subparsers.add_parser("create", description="Create a new object", parents=[default_parser])
create_parser.add_argument("create", help="Name of the object to create (See command 'objects' to list all objects name)")
create_parser.add_argument("--in", dest="parent_infos", nargs=2, help="Specify the parent name and its uuid")
create_parser.add_argument("-p", "--params", dest="params", nargs="*", help="List of Key=Value parameters", required=True)
# Update Command
update_parser = subparsers.add_parser("update", description="Update an existing object", parents=[default_parser])
update_parser.add_argument("update", help="Name of the object to update (See command 'objects' to list all objects name)")
update_parser.add_argument("-i", "--id", dest="id", help="Identifier of the object to show", required=True)
update_parser.add_argument("-p", "--params", dest="params", nargs="*", help="List of Key=Value parameters", required=True)
# Delete Command
delete_parser = subparsers.add_parser("delete", description="Delete an existing object", parents=[default_parser])
delete_parser.add_argument("delete", help="Name of the object to update (See command 'objects' to list all objects name)")
delete_parser.add_argument("-i", "--id", dest="id", help="Identifier of the object to show", required=True)
# Assign Command
assign_parser = subparsers.add_parser('assign', description="Assign a set of new objects according to their identifier", parents=[default_parser])
assign_parser.add_argument('assign', help='Name of the object to assign (See command `objects` to list all objects name)')
assign_parser.add_argument('--ids', dest='ids', nargs='*', help='Identifier of the object to assign', required=True)
assign_parser.add_argument('--to', dest='parent_infos', nargs=2, help="Specify the resource name and its uuid", required=True)
# Unassign Command
unassign_parser = subparsers.add_parser('unassign', description="Unassign a set of new objects according to their identifier", parents=[default_parser])
unassign_parser.add_argument('unassign', help='Name of the object to unassign (See command `objects` to list all objects name)')
unassign_parser.add_argument('--ids', dest='ids', nargs='*', help='Identifier of the object to unassign', required=True)
unassign_parser.add_argument('--from', dest='parent_infos', nargs=2, help="Specify the resource name and its uuid", required=True)
# Reassign Command
reassign_parser = subparsers.add_parser('reassign', description="Reassign all objects according to their identifier", parents=[default_parser])
reassign_parser.add_argument('reassign', help='Name of the object to reassign (See command `objects` to list all objects name)')
reassign_parser.add_argument('--ids', dest='ids', nargs='*', help='Identifier of the object to reassign. If --ids is not specified, it will remove all assigned objects')
reassign_parser.add_argument('--to', dest='parent_infos', nargs=2, help="Specify the resource name and its uuid", required=True)
# Resources Command
objects_parser = subparsers.add_parser("objects", description="Explore all objects", parents=[default_parser])
objects_parser.add_argument("-f", "--filter", dest="filter", help="Filter by name (ex: -f nsg)")
objects_parser.add_argument("-p", "--parent", dest="parent", help="Filter by parent (ex -p enterprise)")
objects_parser.add_argument("-c", "--child", dest="child", help="Filter by children (ex: -c domain)")
args = parser.parse_args()
from commands import CLICommand
CLICommand.execute(args)
if __name__ == "__main__":
main() | 68.666667 | 232 | 0.727639 |
import argparse
import sys
sys.path.append("../")
class _HelpAction(argparse._HelpAction):
def __call__(self, parser, namespace, values, option_string=None):
parser.print_help()
subparsers_actions = [
action for action in parser._actions
if isinstance(action, argparse._SubParsersAction)]
for subparsers_action in subparsers_actions:
for choice, subparser in subparsers_action.choices.items():
print("\n{}:\n{}".format(choice.upper(), "-" * (len(choice) + 1)))
print(subparser.format_help())
parser.exit()
def main(argv=sys.argv):
default_parser = argparse.ArgumentParser(description="CLI for VSD", add_help=False)
default_parser.add_argument("-v", "--verbose", help="Activate verbose mode", action="store_true")
default_parser.add_argument("--username", help="Username to get an api key or set 'VSD_USERNAME' in your variable environment")
default_parser.add_argument("--password", help="Password to get an api key or set 'VSD_PASSWORD' in your variable environment")
default_parser.add_argument("--api", help="URL of the API endpoint or set 'VSD_API_URL' in your variable environment")
default_parser.add_argument("--version", help="Version of the API or set 'VSD_API_VERSION' in your variable environment")
default_parser.add_argument("--enterprise", help="Name of the enterprise to connect or set 'VSD_ENTERPRISE' in your variable environment")
default_parser.add_argument("--json", help="Add this option get a JSON output or set VSD_JSON_OUTPUT=True", action="store_true")
parser = argparse.ArgumentParser(description="CLI for VSD Software Development Kit", add_help=False)
parser.add_argument("-h", "--help", action=_HelpAction, help="help for help if you need some help")
subparsers = parser.add_subparsers(dest="command",
title="All available commands")
list_parser = subparsers.add_parser("list", description="List all objects", parents=[default_parser])
list_parser.add_argument("list", help="Name of the object (See command 'objects' to list all objects name)")
list_parser.add_argument("--in", dest="parent_infos", nargs=2, help="Specify the PARENT_NAME and PARENT_UUID")
list_parser.add_argument("-f", "--filter", dest="filter", help="Specify a filter predicate")
list_parser.add_argument("-x", "--fields", dest="fields", help="Specify output fields", nargs="+", type=str)
list_parser.add_argument("-q", "--query", dest="query_parameters", nargs="*", help="List of Key=Value that will be sent as query parameters", required=False)
list_parser.add_argument("-p", "--page", dest="page", help="The page number that needs to be retreived. This value is ignored unless you also configure the page size parameter. Default value is 0", type=int, default=0)
list_parser.add_argument("-s", "--page-size", dest="page_size", help="The size of a single page that needs to be retreived. If this is configured, the list command will only return a maximum of this amount of results", type=int)
count_parser = subparsers.add_parser("count", description="Count all objects", parents=[default_parser])
count_parser.add_argument("count", help="Name of the object (See command 'objects' to list all objects name)")
count_parser.add_argument("--in", dest="parent_infos", nargs=2, help="Specify the parent name and its uuid")
count_parser.add_argument("-f", "--filter", dest="filter", help="Specify a filter predicate")
count_parser.add_argument("-x", "--fields", dest="fields", help="Specify output fields", nargs="+", type=str)
count_parser.add_argument("-q", "--query", dest="query_parameters", nargs="*", help="List of Key=Value that will be sent as query parameters", required=False)
show_parser = subparsers.add_parser("show", description="Show a specific object", parents=[default_parser])
show_parser.add_argument("show", help="Name of the object to show (See command 'objects' to list all objects name)")
show_parser.add_argument("-i", "--id", dest="id", help="Identifier of the object to show", required=True)
show_parser.add_argument("-x", "--fields", dest="fields", help="Specify output fields", nargs="+", type=str)
create_parser = subparsers.add_parser("create", description="Create a new object", parents=[default_parser])
create_parser.add_argument("create", help="Name of the object to create (See command 'objects' to list all objects name)")
create_parser.add_argument("--in", dest="parent_infos", nargs=2, help="Specify the parent name and its uuid")
create_parser.add_argument("-p", "--params", dest="params", nargs="*", help="List of Key=Value parameters", required=True)
update_parser = subparsers.add_parser("update", description="Update an existing object", parents=[default_parser])
update_parser.add_argument("update", help="Name of the object to update (See command 'objects' to list all objects name)")
update_parser.add_argument("-i", "--id", dest="id", help="Identifier of the object to show", required=True)
update_parser.add_argument("-p", "--params", dest="params", nargs="*", help="List of Key=Value parameters", required=True)
delete_parser = subparsers.add_parser("delete", description="Delete an existing object", parents=[default_parser])
delete_parser.add_argument("delete", help="Name of the object to update (See command 'objects' to list all objects name)")
delete_parser.add_argument("-i", "--id", dest="id", help="Identifier of the object to show", required=True)
assign_parser = subparsers.add_parser('assign', description="Assign a set of new objects according to their identifier", parents=[default_parser])
assign_parser.add_argument('assign', help='Name of the object to assign (See command `objects` to list all objects name)')
assign_parser.add_argument('--ids', dest='ids', nargs='*', help='Identifier of the object to assign', required=True)
assign_parser.add_argument('--to', dest='parent_infos', nargs=2, help="Specify the resource name and its uuid", required=True)
unassign_parser = subparsers.add_parser('unassign', description="Unassign a set of new objects according to their identifier", parents=[default_parser])
unassign_parser.add_argument('unassign', help='Name of the object to unassign (See command `objects` to list all objects name)')
unassign_parser.add_argument('--ids', dest='ids', nargs='*', help='Identifier of the object to unassign', required=True)
unassign_parser.add_argument('--from', dest='parent_infos', nargs=2, help="Specify the resource name and its uuid", required=True)
reassign_parser = subparsers.add_parser('reassign', description="Reassign all objects according to their identifier", parents=[default_parser])
reassign_parser.add_argument('reassign', help='Name of the object to reassign (See command `objects` to list all objects name)')
reassign_parser.add_argument('--ids', dest='ids', nargs='*', help='Identifier of the object to reassign. If --ids is not specified, it will remove all assigned objects')
reassign_parser.add_argument('--to', dest='parent_infos', nargs=2, help="Specify the resource name and its uuid", required=True)
objects_parser = subparsers.add_parser("objects", description="Explore all objects", parents=[default_parser])
objects_parser.add_argument("-f", "--filter", dest="filter", help="Filter by name (ex: -f nsg)")
objects_parser.add_argument("-p", "--parent", dest="parent", help="Filter by parent (ex -p enterprise)")
objects_parser.add_argument("-c", "--child", dest="child", help="Filter by children (ex: -c domain)")
args = parser.parse_args()
from commands import CLICommand
CLICommand.execute(args)
if __name__ == "__main__":
main() | true | true |
f7201ae4d61deb09277603a9c4d12dfb5b5dd40b | 1,565 | py | Python | deepmath/deephol/public/proof_assistant.py | LaudateCorpus1/deepmath | b5b721f54de1d5d6a02d78f5da5995237f9995f9 | [
"Apache-2.0"
] | 830 | 2016-11-07T21:46:27.000Z | 2022-03-23T08:01:03.000Z | deepmath/deephol/public/proof_assistant.py | LaudateCorpus1/deepmath | b5b721f54de1d5d6a02d78f5da5995237f9995f9 | [
"Apache-2.0"
] | 26 | 2016-11-07T22:06:31.000Z | 2022-02-16T00:18:29.000Z | deepmath/deephol/public/proof_assistant.py | LaudateCorpus1/deepmath | b5b721f54de1d5d6a02d78f5da5995237f9995f9 | [
"Apache-2.0"
] | 168 | 2016-11-07T21:48:55.000Z | 2022-03-19T02:47:14.000Z | """A python client interface for ProofAssistantService."""
from __future__ import absolute_import
from __future__ import division
# Import Type Annotations
from __future__ import print_function
import grpc
import tensorflow as tf
from deepmath.proof_assistant import proof_assistant_pb2
from deepmath.proof_assistant import proof_assistant_pb2_grpc
tf.flags.DEFINE_string(
'proof_assistant_server_address', 'localhost:2000',
'address (including port) of the proof assistant server')
FLAGS = tf.flags.FLAGS
GIGABYTE = 1024 * 1024 * 1024
GRPC_MAX_MESSAGE_LENGTH = GIGABYTE
class ProofAssistant(object):
"""Class for intefacing a proof assistant."""
def __init__(self):
self.channel = grpc.insecure_channel(
FLAGS.proof_assistant_server_address,
options=[('grpc.max_send_message_length', GRPC_MAX_MESSAGE_LENGTH),
('grpc.max_receive_message_length', GRPC_MAX_MESSAGE_LENGTH)])
self.stub = proof_assistant_pb2_grpc.ProofAssistantServiceStub(self.channel)
def ApplyTactic(self, request: proof_assistant_pb2.ApplyTacticRequest
) -> proof_assistant_pb2.ApplyTacticResponse:
return self.stub.ApplyTactic(request)
def VerifyProof(self, request: proof_assistant_pb2.VerifyProofRequest
) -> proof_assistant_pb2.VerifyProofResponse:
return self.stub.VerifyProof(request)
def RegisterTheorem(self, request: proof_assistant_pb2.RegisterTheoremRequest
) -> proof_assistant_pb2.RegisterTheoremResponse:
return self.stub.RegisterTheorem(request)
| 36.395349 | 80 | 0.771885 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import grpc
import tensorflow as tf
from deepmath.proof_assistant import proof_assistant_pb2
from deepmath.proof_assistant import proof_assistant_pb2_grpc
tf.flags.DEFINE_string(
'proof_assistant_server_address', 'localhost:2000',
'address (including port) of the proof assistant server')
FLAGS = tf.flags.FLAGS
GIGABYTE = 1024 * 1024 * 1024
GRPC_MAX_MESSAGE_LENGTH = GIGABYTE
class ProofAssistant(object):
def __init__(self):
self.channel = grpc.insecure_channel(
FLAGS.proof_assistant_server_address,
options=[('grpc.max_send_message_length', GRPC_MAX_MESSAGE_LENGTH),
('grpc.max_receive_message_length', GRPC_MAX_MESSAGE_LENGTH)])
self.stub = proof_assistant_pb2_grpc.ProofAssistantServiceStub(self.channel)
def ApplyTactic(self, request: proof_assistant_pb2.ApplyTacticRequest
) -> proof_assistant_pb2.ApplyTacticResponse:
return self.stub.ApplyTactic(request)
def VerifyProof(self, request: proof_assistant_pb2.VerifyProofRequest
) -> proof_assistant_pb2.VerifyProofResponse:
return self.stub.VerifyProof(request)
def RegisterTheorem(self, request: proof_assistant_pb2.RegisterTheoremRequest
) -> proof_assistant_pb2.RegisterTheoremResponse:
return self.stub.RegisterTheorem(request)
| true | true |
f7201bd62f21c953c3e3878bc9213acf8336f68d | 1,120 | py | Python | src/data_utils/image_dataset.py | lindsey98/dml_cross_entropy | 4312cb295e972abda7b0e2bdadecf1965c5d7ed5 | [
"BSD-3-Clause"
] | null | null | null | src/data_utils/image_dataset.py | lindsey98/dml_cross_entropy | 4312cb295e972abda7b0e2bdadecf1965c5d7ed5 | [
"BSD-3-Clause"
] | null | null | null | src/data_utils/image_dataset.py | lindsey98/dml_cross_entropy | 4312cb295e972abda7b0e2bdadecf1965c5d7ed5 | [
"BSD-3-Clause"
] | null | null | null | from PIL import Image
from torch.utils.data import Dataset
from src.data_utils.utils import load_data
class ImageDataset(Dataset):
def __init__(self, samples: list, transform, preload: bool = False, num_workers=None):
self.transform = transform
self.samples = samples
self.targets = [label for _, label in self.samples]
self.preloaded = False
if preload:
image_paths = [image_path for image_path, _ in self.samples]
self.images = load_data(image_paths, num_workers=num_workers)
self.preloaded = True
print(self.__class__.__name__ + ' loaded with {} images'.format(len(self.images.keys())))
def __len__(self):
return len(self.samples)
def __getitem__(self, index):
image_path, label = self.samples[index]
if self.preloaded:
image = self.images[image_path].convert('RGB')
else:
image = Image.open(image_path).convert('RGB')
if self.transform is not None:
image = self.transform(image)
return image, label, index
| 31.111111 | 101 | 0.63125 | from PIL import Image
from torch.utils.data import Dataset
from src.data_utils.utils import load_data
class ImageDataset(Dataset):
def __init__(self, samples: list, transform, preload: bool = False, num_workers=None):
self.transform = transform
self.samples = samples
self.targets = [label for _, label in self.samples]
self.preloaded = False
if preload:
image_paths = [image_path for image_path, _ in self.samples]
self.images = load_data(image_paths, num_workers=num_workers)
self.preloaded = True
print(self.__class__.__name__ + ' loaded with {} images'.format(len(self.images.keys())))
def __len__(self):
return len(self.samples)
def __getitem__(self, index):
image_path, label = self.samples[index]
if self.preloaded:
image = self.images[image_path].convert('RGB')
else:
image = Image.open(image_path).convert('RGB')
if self.transform is not None:
image = self.transform(image)
return image, label, index
| true | true |
f7201d90ee64fded6d4df4a84c0dcb782c4684f0 | 20,761 | py | Python | Lib/site-packages/git/objects/commit.py | nemarugommula/ecommerce | 60185e79655fbaf0fcad9e877a886fe9eb3c4451 | [
"bzip2-1.0.6"
] | null | null | null | Lib/site-packages/git/objects/commit.py | nemarugommula/ecommerce | 60185e79655fbaf0fcad9e877a886fe9eb3c4451 | [
"bzip2-1.0.6"
] | 7 | 2020-02-12T03:06:52.000Z | 2021-06-10T19:33:14.000Z | Lib/site-packages/git/objects/commit.py | nemarugommula/ecommerce | 60185e79655fbaf0fcad9e877a886fe9eb3c4451 | [
"bzip2-1.0.6"
] | null | null | null | # commit.py
# Copyright (C) 2008, 2009 Michael Trier (mtrier@gmail.com) and contributors
#
# This module is part of GitPython and is released under
# the BSD License: http://www.opensource.org/licenses/bsd-license.php
from gitdb import IStream
from git.util import (
hex_to_bin,
Actor,
Iterable,
Stats,
finalize_process
)
from git.diff import Diffable
from .tree import Tree
from . import base
from .util import (
Traversable,
Serializable,
parse_date,
altz_to_utctz_str,
parse_actor_and_date,
from_timestamp,
)
from git.compat import text_type
from time import (
time,
daylight,
altzone,
timezone,
localtime
)
import os
from io import BytesIO
import logging
log = logging.getLogger('git.objects.commit')
log.addHandler(logging.NullHandler())
__all__ = ('Commit', )
class Commit(base.Object, Iterable, Diffable, Traversable, Serializable):
"""Wraps a git Commit object.
This class will act lazily on some of its attributes and will query the
value on demand only if it involves calling the git binary."""
# ENVIRONMENT VARIABLES
# read when creating new commits
env_author_date = "GIT_AUTHOR_DATE"
env_committer_date = "GIT_COMMITTER_DATE"
# CONFIGURATION KEYS
conf_encoding = 'i18n.commitencoding'
# INVARIANTS
default_encoding = "UTF-8"
# object configuration
type = "commit"
__slots__ = ("tree",
"author", "authored_date", "author_tz_offset",
"committer", "committed_date", "committer_tz_offset",
"message", "parents", "encoding", "gpgsig")
_id_attribute_ = "hexsha"
def __init__(self, repo, binsha, tree=None, author=None, authored_date=None, author_tz_offset=None,
committer=None, committed_date=None, committer_tz_offset=None,
message=None, parents=None, encoding=None, gpgsig=None):
"""Instantiate a new Commit. All keyword arguments taking None as default will
be implicitly set on first query.
:param binsha: 20 byte sha1
:param parents: tuple( Commit, ... )
is a tuple of commit ids or actual Commits
:param tree: Tree
Tree object
:param author: Actor
is the author Actor object
:param authored_date: int_seconds_since_epoch
is the authored DateTime - use time.gmtime() to convert it into a
different format
:param author_tz_offset: int_seconds_west_of_utc
is the timezone that the authored_date is in
:param committer: Actor
is the committer string
:param committed_date: int_seconds_since_epoch
is the committed DateTime - use time.gmtime() to convert it into a
different format
:param committer_tz_offset: int_seconds_west_of_utc
is the timezone that the committed_date is in
:param message: string
is the commit message
:param encoding: string
encoding of the message, defaults to UTF-8
:param parents:
List or tuple of Commit objects which are our parent(s) in the commit
dependency graph
:return: git.Commit
:note:
Timezone information is in the same format and in the same sign
as what time.altzone returns. The sign is inverted compared to git's
UTC timezone."""
super(Commit, self).__init__(repo, binsha)
if tree is not None:
assert isinstance(tree, Tree), "Tree needs to be a Tree instance, was %s" % type(tree)
if tree is not None:
self.tree = tree
if author is not None:
self.author = author
if authored_date is not None:
self.authored_date = authored_date
if author_tz_offset is not None:
self.author_tz_offset = author_tz_offset
if committer is not None:
self.committer = committer
if committed_date is not None:
self.committed_date = committed_date
if committer_tz_offset is not None:
self.committer_tz_offset = committer_tz_offset
if message is not None:
self.message = message
if parents is not None:
self.parents = parents
if encoding is not None:
self.encoding = encoding
if gpgsig is not None:
self.gpgsig = gpgsig
@classmethod
def _get_intermediate_items(cls, commit):
return commit.parents
def _set_cache_(self, attr):
if attr in Commit.__slots__:
# read the data in a chunk, its faster - then provide a file wrapper
_binsha, _typename, self.size, stream = self.repo.odb.stream(self.binsha)
self._deserialize(BytesIO(stream.read()))
else:
super(Commit, self)._set_cache_(attr)
# END handle attrs
@property
def authored_datetime(self):
return from_timestamp(self.authored_date, self.author_tz_offset)
@property
def committed_datetime(self):
return from_timestamp(self.committed_date, self.committer_tz_offset)
@property
def summary(self):
""":return: First line of the commit message"""
return self.message.split('\n', 1)[0]
def count(self, paths='', **kwargs):
"""Count the number of commits reachable from this commit
:param paths:
is an optional path or a list of paths restricting the return value
to commits actually containing the paths
:param kwargs:
Additional options to be passed to git-rev-list. They must not alter
the output style of the command, or parsing will yield incorrect results
:return: int defining the number of reachable commits"""
# yes, it makes a difference whether empty paths are given or not in our case
# as the empty paths version will ignore merge commits for some reason.
if paths:
return len(self.repo.git.rev_list(self.hexsha, '--', paths, **kwargs).splitlines())
return len(self.repo.git.rev_list(self.hexsha, **kwargs).splitlines())
@property
def name_rev(self):
"""
:return:
String describing the commits hex sha based on the closest Reference.
Mostly useful for UI purposes"""
return self.repo.git.name_rev(self)
@classmethod
def iter_items(cls, repo, rev, paths='', **kwargs):
"""Find all commits matching the given criteria.
:param repo: is the Repo
:param rev: revision specifier, see git-rev-parse for viable options
:param paths:
is an optional path or list of paths, if set only Commits that include the path
or paths will be considered
:param kwargs:
optional keyword arguments to git rev-list where
``max_count`` is the maximum number of commits to fetch
``skip`` is the number of commits to skip
``since`` all commits since i.e. '1970-01-01'
:return: iterator yielding Commit items"""
if 'pretty' in kwargs:
raise ValueError("--pretty cannot be used as parsing expects single sha's only")
# END handle pretty
# use -- in any case, to prevent possibility of ambiguous arguments
# see https://github.com/gitpython-developers/GitPython/issues/264
args = ['--']
if paths:
args.extend((paths, ))
# END if paths
proc = repo.git.rev_list(rev, args, as_process=True, **kwargs)
return cls._iter_from_process_or_stream(repo, proc)
def iter_parents(self, paths='', **kwargs):
"""Iterate _all_ parents of this commit.
:param paths:
Optional path or list of paths limiting the Commits to those that
contain at least one of the paths
:param kwargs: All arguments allowed by git-rev-list
:return: Iterator yielding Commit objects which are parents of self """
# skip ourselves
skip = kwargs.get("skip", 1)
if skip == 0: # skip ourselves
skip = 1
kwargs['skip'] = skip
return self.iter_items(self.repo, self, paths, **kwargs)
@property
def stats(self):
"""Create a git stat from changes between this commit and its first parent
or from all changes done if this is the very first commit.
:return: git.Stats"""
if not self.parents:
text = self.repo.git.diff_tree(self.hexsha, '--', numstat=True, root=True)
text2 = ""
for line in text.splitlines()[1:]:
(insertions, deletions, filename) = line.split("\t")
text2 += "%s\t%s\t%s\n" % (insertions, deletions, filename)
text = text2
else:
text = self.repo.git.diff(self.parents[0].hexsha, self.hexsha, '--', numstat=True)
return Stats._list_from_string(self.repo, text)
@classmethod
def _iter_from_process_or_stream(cls, repo, proc_or_stream):
"""Parse out commit information into a list of Commit objects
We expect one-line per commit, and parse the actual commit information directly
from our lighting fast object database
:param proc: git-rev-list process instance - one sha per line
:return: iterator returning Commit objects"""
stream = proc_or_stream
if not hasattr(stream, 'readline'):
stream = proc_or_stream.stdout
readline = stream.readline
while True:
line = readline()
if not line:
break
hexsha = line.strip()
if len(hexsha) > 40:
# split additional information, as returned by bisect for instance
hexsha, _ = line.split(None, 1)
# END handle extra info
assert len(hexsha) == 40, "Invalid line: %s" % hexsha
yield Commit(repo, hex_to_bin(hexsha))
# END for each line in stream
# TODO: Review this - it seems process handling got a bit out of control
# due to many developers trying to fix the open file handles issue
if hasattr(proc_or_stream, 'wait'):
finalize_process(proc_or_stream)
@classmethod
def create_from_tree(cls, repo, tree, message, parent_commits=None, head=False, author=None, committer=None,
author_date=None, commit_date=None):
"""Commit the given tree, creating a commit object.
:param repo: Repo object the commit should be part of
:param tree: Tree object or hex or bin sha
the tree of the new commit
:param message: Commit message. It may be an empty string if no message is provided.
It will be converted to a string in any case.
:param parent_commits:
Optional Commit objects to use as parents for the new commit.
If empty list, the commit will have no parents at all and become
a root commit.
If None , the current head commit will be the parent of the
new commit object
:param head:
If True, the HEAD will be advanced to the new commit automatically.
Else the HEAD will remain pointing on the previous commit. This could
lead to undesired results when diffing files.
:param author: The name of the author, optional. If unset, the repository
configuration is used to obtain this value.
:param committer: The name of the committer, optional. If unset, the
repository configuration is used to obtain this value.
:param author_date: The timestamp for the author field
:param commit_date: The timestamp for the committer field
:return: Commit object representing the new commit
:note:
Additional information about the committer and Author are taken from the
environment or from the git configuration, see git-commit-tree for
more information"""
if parent_commits is None:
try:
parent_commits = [repo.head.commit]
except ValueError:
# empty repositories have no head commit
parent_commits = []
# END handle parent commits
else:
for p in parent_commits:
if not isinstance(p, cls):
raise ValueError("Parent commit '%r' must be of type %s" % (p, cls))
# end check parent commit types
# END if parent commits are unset
# retrieve all additional information, create a commit object, and
# serialize it
# Generally:
# * Environment variables override configuration values
# * Sensible defaults are set according to the git documentation
# COMMITER AND AUTHOR INFO
cr = repo.config_reader()
env = os.environ
committer = committer or Actor.committer(cr)
author = author or Actor.author(cr)
# PARSE THE DATES
unix_time = int(time())
is_dst = daylight and localtime().tm_isdst > 0
offset = altzone if is_dst else timezone
author_date_str = env.get(cls.env_author_date, '')
if author_date:
author_time, author_offset = parse_date(author_date)
elif author_date_str:
author_time, author_offset = parse_date(author_date_str)
else:
author_time, author_offset = unix_time, offset
# END set author time
committer_date_str = env.get(cls.env_committer_date, '')
if commit_date:
committer_time, committer_offset = parse_date(commit_date)
elif committer_date_str:
committer_time, committer_offset = parse_date(committer_date_str)
else:
committer_time, committer_offset = unix_time, offset
# END set committer time
# assume utf8 encoding
enc_section, enc_option = cls.conf_encoding.split('.')
conf_encoding = cr.get_value(enc_section, enc_option, cls.default_encoding)
# if the tree is no object, make sure we create one - otherwise
# the created commit object is invalid
if isinstance(tree, str):
tree = repo.tree(tree)
# END tree conversion
# CREATE NEW COMMIT
new_commit = cls(repo, cls.NULL_BIN_SHA, tree,
author, author_time, author_offset,
committer, committer_time, committer_offset,
message, parent_commits, conf_encoding)
stream = BytesIO()
new_commit._serialize(stream)
streamlen = stream.tell()
stream.seek(0)
istream = repo.odb.store(IStream(cls.type, streamlen, stream))
new_commit.binsha = istream.binsha
if head:
# need late import here, importing git at the very beginning throws
# as well ...
import git.refs
try:
repo.head.set_commit(new_commit, logmsg=message)
except ValueError:
# head is not yet set to the ref our HEAD points to
# Happens on first commit
master = git.refs.Head.create(repo, repo.head.ref, new_commit, logmsg="commit (initial): %s" % message)
repo.head.set_reference(master, logmsg='commit: Switching to %s' % master)
# END handle empty repositories
# END advance head handling
return new_commit
#{ Serializable Implementation
def _serialize(self, stream):
write = stream.write
write(("tree %s\n" % self.tree).encode('ascii'))
for p in self.parents:
write(("parent %s\n" % p).encode('ascii'))
a = self.author
aname = a.name
c = self.committer
fmt = "%s %s <%s> %s %s\n"
write((fmt % ("author", aname, a.email,
self.authored_date,
altz_to_utctz_str(self.author_tz_offset))).encode(self.encoding))
# encode committer
aname = c.name
write((fmt % ("committer", aname, c.email,
self.committed_date,
altz_to_utctz_str(self.committer_tz_offset))).encode(self.encoding))
if self.encoding != self.default_encoding:
write(("encoding %s\n" % self.encoding).encode('ascii'))
try:
if self.__getattribute__('gpgsig') is not None:
write(b"gpgsig")
for sigline in self.gpgsig.rstrip("\n").split("\n"):
write((" " + sigline + "\n").encode('ascii'))
except AttributeError:
pass
write(b"\n")
# write plain bytes, be sure its encoded according to our encoding
if isinstance(self.message, text_type):
write(self.message.encode(self.encoding))
else:
write(self.message)
# END handle encoding
return self
def _deserialize(self, stream):
""":param from_rev_list: if true, the stream format is coming from the rev-list command
Otherwise it is assumed to be a plain data stream from our object"""
readline = stream.readline
self.tree = Tree(self.repo, hex_to_bin(readline().split()[1]), Tree.tree_id << 12, '')
self.parents = []
next_line = None
while True:
parent_line = readline()
if not parent_line.startswith(b'parent'):
next_line = parent_line
break
# END abort reading parents
self.parents.append(type(self)(self.repo, hex_to_bin(parent_line.split()[-1].decode('ascii'))))
# END for each parent line
self.parents = tuple(self.parents)
# we don't know actual author encoding before we have parsed it, so keep the lines around
author_line = next_line
committer_line = readline()
# we might run into one or more mergetag blocks, skip those for now
next_line = readline()
while next_line.startswith(b'mergetag '):
next_line = readline()
while next_line.startswith(b' '):
next_line = readline()
# end skip mergetags
# now we can have the encoding line, or an empty line followed by the optional
# message.
self.encoding = self.default_encoding
self.gpgsig = None
# read headers
enc = next_line
buf = enc.strip()
while buf:
if buf[0:10] == b"encoding ":
self.encoding = buf[buf.find(' ') + 1:].decode(
self.encoding, 'ignore')
elif buf[0:7] == b"gpgsig ":
sig = buf[buf.find(b' ') + 1:] + b"\n"
is_next_header = False
while True:
sigbuf = readline()
if not sigbuf:
break
if sigbuf[0:1] != b" ":
buf = sigbuf.strip()
is_next_header = True
break
sig += sigbuf[1:]
# end read all signature
self.gpgsig = sig.rstrip(b"\n").decode(self.encoding, 'ignore')
if is_next_header:
continue
buf = readline().strip()
# decode the authors name
try:
self.author, self.authored_date, self.author_tz_offset = \
parse_actor_and_date(author_line.decode(self.encoding, 'replace'))
except UnicodeDecodeError:
log.error("Failed to decode author line '%s' using encoding %s", author_line, self.encoding,
exc_info=True)
try:
self.committer, self.committed_date, self.committer_tz_offset = \
parse_actor_and_date(committer_line.decode(self.encoding, 'replace'))
except UnicodeDecodeError:
log.error("Failed to decode committer line '%s' using encoding %s", committer_line, self.encoding,
exc_info=True)
# END handle author's encoding
# a stream from our data simply gives us the plain message
# The end of our message stream is marked with a newline that we strip
self.message = stream.read()
try:
self.message = self.message.decode(self.encoding, 'replace')
except UnicodeDecodeError:
log.error("Failed to decode message '%s' using encoding %s", self.message, self.encoding, exc_info=True)
# END exception handling
return self
#} END serializable implementation
| 38.878277 | 119 | 0.607822 |
from gitdb import IStream
from git.util import (
hex_to_bin,
Actor,
Iterable,
Stats,
finalize_process
)
from git.diff import Diffable
from .tree import Tree
from . import base
from .util import (
Traversable,
Serializable,
parse_date,
altz_to_utctz_str,
parse_actor_and_date,
from_timestamp,
)
from git.compat import text_type
from time import (
time,
daylight,
altzone,
timezone,
localtime
)
import os
from io import BytesIO
import logging
log = logging.getLogger('git.objects.commit')
log.addHandler(logging.NullHandler())
__all__ = ('Commit', )
class Commit(base.Object, Iterable, Diffable, Traversable, Serializable):
env_author_date = "GIT_AUTHOR_DATE"
env_committer_date = "GIT_COMMITTER_DATE"
conf_encoding = 'i18n.commitencoding'
default_encoding = "UTF-8"
type = "commit"
__slots__ = ("tree",
"author", "authored_date", "author_tz_offset",
"committer", "committed_date", "committer_tz_offset",
"message", "parents", "encoding", "gpgsig")
_id_attribute_ = "hexsha"
def __init__(self, repo, binsha, tree=None, author=None, authored_date=None, author_tz_offset=None,
committer=None, committed_date=None, committer_tz_offset=None,
message=None, parents=None, encoding=None, gpgsig=None):
super(Commit, self).__init__(repo, binsha)
if tree is not None:
assert isinstance(tree, Tree), "Tree needs to be a Tree instance, was %s" % type(tree)
if tree is not None:
self.tree = tree
if author is not None:
self.author = author
if authored_date is not None:
self.authored_date = authored_date
if author_tz_offset is not None:
self.author_tz_offset = author_tz_offset
if committer is not None:
self.committer = committer
if committed_date is not None:
self.committed_date = committed_date
if committer_tz_offset is not None:
self.committer_tz_offset = committer_tz_offset
if message is not None:
self.message = message
if parents is not None:
self.parents = parents
if encoding is not None:
self.encoding = encoding
if gpgsig is not None:
self.gpgsig = gpgsig
@classmethod
def _get_intermediate_items(cls, commit):
return commit.parents
def _set_cache_(self, attr):
if attr in Commit.__slots__:
_binsha, _typename, self.size, stream = self.repo.odb.stream(self.binsha)
self._deserialize(BytesIO(stream.read()))
else:
super(Commit, self)._set_cache_(attr)
@property
def authored_datetime(self):
return from_timestamp(self.authored_date, self.author_tz_offset)
@property
def committed_datetime(self):
return from_timestamp(self.committed_date, self.committer_tz_offset)
@property
def summary(self):
return self.message.split('\n', 1)[0]
def count(self, paths='', **kwargs):
if paths:
return len(self.repo.git.rev_list(self.hexsha, '--', paths, **kwargs).splitlines())
return len(self.repo.git.rev_list(self.hexsha, **kwargs).splitlines())
@property
def name_rev(self):
return self.repo.git.name_rev(self)
@classmethod
def iter_items(cls, repo, rev, paths='', **kwargs):
if 'pretty' in kwargs:
raise ValueError("--pretty cannot be used as parsing expects single sha's only")
# END handle pretty
# use -- in any case, to prevent possibility of ambiguous arguments
# see https://github.com/gitpython-developers/GitPython/issues/264
args = ['--']
if paths:
args.extend((paths, ))
# END if paths
proc = repo.git.rev_list(rev, args, as_process=True, **kwargs)
return cls._iter_from_process_or_stream(repo, proc)
def iter_parents(self, paths='', **kwargs):
# skip ourselves
skip = kwargs.get("skip", 1)
if skip == 0: # skip ourselves
skip = 1
kwargs['skip'] = skip
return self.iter_items(self.repo, self, paths, **kwargs)
@property
def stats(self):
if not self.parents:
text = self.repo.git.diff_tree(self.hexsha, '--', numstat=True, root=True)
text2 = ""
for line in text.splitlines()[1:]:
(insertions, deletions, filename) = line.split("\t")
text2 += "%s\t%s\t%s\n" % (insertions, deletions, filename)
text = text2
else:
text = self.repo.git.diff(self.parents[0].hexsha, self.hexsha, '--', numstat=True)
return Stats._list_from_string(self.repo, text)
@classmethod
def _iter_from_process_or_stream(cls, repo, proc_or_stream):
stream = proc_or_stream
if not hasattr(stream, 'readline'):
stream = proc_or_stream.stdout
readline = stream.readline
while True:
line = readline()
if not line:
break
hexsha = line.strip()
if len(hexsha) > 40:
# split additional information, as returned by bisect for instance
hexsha, _ = line.split(None, 1)
# END handle extra info
assert len(hexsha) == 40, "Invalid line: %s" % hexsha
yield Commit(repo, hex_to_bin(hexsha))
# END for each line in stream
# TODO: Review this - it seems process handling got a bit out of control
# due to many developers trying to fix the open file handles issue
if hasattr(proc_or_stream, 'wait'):
finalize_process(proc_or_stream)
@classmethod
def create_from_tree(cls, repo, tree, message, parent_commits=None, head=False, author=None, committer=None,
author_date=None, commit_date=None):
if parent_commits is None:
try:
parent_commits = [repo.head.commit]
except ValueError:
# empty repositories have no head commit
parent_commits = []
# END handle parent commits
else:
for p in parent_commits:
if not isinstance(p, cls):
raise ValueError("Parent commit '%r' must be of type %s" % (p, cls))
# end check parent commit types
# END if parent commits are unset
# retrieve all additional information, create a commit object, and
# serialize it
# Generally:
# * Environment variables override configuration values
# * Sensible defaults are set according to the git documentation
# COMMITER AND AUTHOR INFO
cr = repo.config_reader()
env = os.environ
committer = committer or Actor.committer(cr)
author = author or Actor.author(cr)
# PARSE THE DATES
unix_time = int(time())
is_dst = daylight and localtime().tm_isdst > 0
offset = altzone if is_dst else timezone
author_date_str = env.get(cls.env_author_date, '')
if author_date:
author_time, author_offset = parse_date(author_date)
elif author_date_str:
author_time, author_offset = parse_date(author_date_str)
else:
author_time, author_offset = unix_time, offset
# END set author time
committer_date_str = env.get(cls.env_committer_date, '')
if commit_date:
committer_time, committer_offset = parse_date(commit_date)
elif committer_date_str:
committer_time, committer_offset = parse_date(committer_date_str)
else:
committer_time, committer_offset = unix_time, offset
# END set committer time
# assume utf8 encoding
enc_section, enc_option = cls.conf_encoding.split('.')
conf_encoding = cr.get_value(enc_section, enc_option, cls.default_encoding)
# if the tree is no object, make sure we create one - otherwise
# the created commit object is invalid
if isinstance(tree, str):
tree = repo.tree(tree)
# END tree conversion
# CREATE NEW COMMIT
new_commit = cls(repo, cls.NULL_BIN_SHA, tree,
author, author_time, author_offset,
committer, committer_time, committer_offset,
message, parent_commits, conf_encoding)
stream = BytesIO()
new_commit._serialize(stream)
streamlen = stream.tell()
stream.seek(0)
istream = repo.odb.store(IStream(cls.type, streamlen, stream))
new_commit.binsha = istream.binsha
if head:
# need late import here, importing git at the very beginning throws
# as well ...
import git.refs
try:
repo.head.set_commit(new_commit, logmsg=message)
except ValueError:
# head is not yet set to the ref our HEAD points to
# Happens on first commit
master = git.refs.Head.create(repo, repo.head.ref, new_commit, logmsg="commit (initial): %s" % message)
repo.head.set_reference(master, logmsg='commit: Switching to %s' % master)
# END handle empty repositories
# END advance head handling
return new_commit
#{ Serializable Implementation
def _serialize(self, stream):
write = stream.write
write(("tree %s\n" % self.tree).encode('ascii'))
for p in self.parents:
write(("parent %s\n" % p).encode('ascii'))
a = self.author
aname = a.name
c = self.committer
fmt = "%s %s <%s> %s %s\n"
write((fmt % ("author", aname, a.email,
self.authored_date,
altz_to_utctz_str(self.author_tz_offset))).encode(self.encoding))
# encode committer
aname = c.name
write((fmt % ("committer", aname, c.email,
self.committed_date,
altz_to_utctz_str(self.committer_tz_offset))).encode(self.encoding))
if self.encoding != self.default_encoding:
write(("encoding %s\n" % self.encoding).encode('ascii'))
try:
if self.__getattribute__('gpgsig') is not None:
write(b"gpgsig")
for sigline in self.gpgsig.rstrip("\n").split("\n"):
write((" " + sigline + "\n").encode('ascii'))
except AttributeError:
pass
write(b"\n")
# write plain bytes, be sure its encoded according to our encoding
if isinstance(self.message, text_type):
write(self.message.encode(self.encoding))
else:
write(self.message)
# END handle encoding
return self
def _deserialize(self, stream):
readline = stream.readline
self.tree = Tree(self.repo, hex_to_bin(readline().split()[1]), Tree.tree_id << 12, '')
self.parents = []
next_line = None
while True:
parent_line = readline()
if not parent_line.startswith(b'parent'):
next_line = parent_line
break
# END abort reading parents
self.parents.append(type(self)(self.repo, hex_to_bin(parent_line.split()[-1].decode('ascii'))))
# END for each parent line
self.parents = tuple(self.parents)
# we don't know actual author encoding before we have parsed it, so keep the lines around
author_line = next_line
committer_line = readline()
next_line = readline()
while next_line.startswith(b'mergetag '):
next_line = readline()
while next_line.startswith(b' '):
next_line = readline()
self.encoding = self.default_encoding
self.gpgsig = None
enc = next_line
buf = enc.strip()
while buf:
if buf[0:10] == b"encoding ":
self.encoding = buf[buf.find(' ') + 1:].decode(
self.encoding, 'ignore')
elif buf[0:7] == b"gpgsig ":
sig = buf[buf.find(b' ') + 1:] + b"\n"
is_next_header = False
while True:
sigbuf = readline()
if not sigbuf:
break
if sigbuf[0:1] != b" ":
buf = sigbuf.strip()
is_next_header = True
break
sig += sigbuf[1:]
self.gpgsig = sig.rstrip(b"\n").decode(self.encoding, 'ignore')
if is_next_header:
continue
buf = readline().strip()
try:
self.author, self.authored_date, self.author_tz_offset = \
parse_actor_and_date(author_line.decode(self.encoding, 'replace'))
except UnicodeDecodeError:
log.error("Failed to decode author line '%s' using encoding %s", author_line, self.encoding,
exc_info=True)
try:
self.committer, self.committed_date, self.committer_tz_offset = \
parse_actor_and_date(committer_line.decode(self.encoding, 'replace'))
except UnicodeDecodeError:
log.error("Failed to decode committer line '%s' using encoding %s", committer_line, self.encoding,
exc_info=True)
# a stream from our data simply gives us the plain message
# The end of our message stream is marked with a newline that we strip
self.message = stream.read()
try:
self.message = self.message.decode(self.encoding, 'replace')
except UnicodeDecodeError:
log.error("Failed to decode message '%s' using encoding %s", self.message, self.encoding, exc_info=True)
# END exception handling
return self
#} END serializable implementation
| true | true |
f7201e595590259f3f2e768dbecb2c84ac98021f | 14,932 | py | Python | run_validation.py | ASinanSaglam/atomizer_analysis | 8dfc1230b2ad0c691885f8fd7119d6169cd7d1ed | [
"MIT"
] | null | null | null | run_validation.py | ASinanSaglam/atomizer_analysis | 8dfc1230b2ad0c691885f8fd7119d6169cd7d1ed | [
"MIT"
] | null | null | null | run_validation.py | ASinanSaglam/atomizer_analysis | 8dfc1230b2ad0c691885f8fd7119d6169cd7d1ed | [
"MIT"
] | null | null | null | # %matplotlib notebook
import os, re, sys, urllib, requests, base64, IPython, io, pickle, glob
sys.path.append("/home/monoid/Development/fresh_atomizer_checks/atomizer/SBMLparser/test/manual")
import itertools as itt
import numpy as np
import subprocess as sb
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import roadrunner, h5py
from bs4 import BeautifulSoup as BS
from IPython.display import Image, display
from matplotlib import rcParams
import analyzerTools as AT
def run_test(analyzer, test_no, t_end=1000, atomize=False, db=None, meta=None):
if(analyzer.run_single_test(test_no, t_end=100, atomize=atomize,meta=meta)):
if meta:
meta[test_no]["success"] = True
print("run successful {}".format(test_no))
#if db is not None:
# # Save results into a DataFrame
# res = analyzer.all_results[test_no]
# sbml, bngl, rmsd, valid_per, keys = res[0],res[1],res[2],res[3],res[4]
# for key in keys:
# # couldn't get the curation keys
# if len(key) == 2:
# skey, bkey = key
# # got curation keys
# elif len(key) == 3:
# skey, bkey, ckey = key
# else:
# print("couldn't find keys")
# IPython.embed()
# sys.exit()
# # setting up the database
# db.at["{:010d}".format(test_no), "{}_sbml".format(skey)] = res[0][skey]
# db.at["{:010d}".format(test_no), "{}_bngl".format(bkey)] = res[1][bkey]
# analyzer.plot_results(test_no, legend=True, save_fig=True)
# if(analyzer.run_old_test(test_no, t_end=100, atomize=atomize)):
# print("run successful {}".format(test_no))
# analyzer.plot_old_results(test_no, legend=False, save_fig=True)
else:
if meta:
meta[test_no]["success"] = False
print("run failed {}".format(test_no))
def uniquefy_names(keys):
unique_keys = []
if len(keys[0]) == 3:
bkeys_d = {}
skeys_d = {}
ckeys_d = {}
for key in keys:
bkey, skey, ckey = key
if bkey in bkeys_d.keys():
bkey_new = bkey + "_{}".format(bkeys_d[bkey])
bkeys_d[bkey] += 1
bkey = bkey_new
else:
bkeys_d[bkey] = 1
if skey in skeys_d.keys():
skey_new = skey + "_{}".format(skeys_d[skey])
skeys_d[skey] += 1
skey = skey_new
else:
skeys_d[skey] = 1
if ckey in ckeys_d.keys():
ckey_new = ckey + "_{}".format(ckeys_d[ckey])
ckeys_d[ckey] += 1
ckey = ckey_new
else:
ckeys_d[ckey] = 1
unique_keys.append( (bkey,skey,ckey) )
else:
bkeys_d = {}
skeys_d = {}
for key in keys:
bkey, skey = key
if bkey in bkeys_d.keys():
bkey_new = bkey + "_{}".format(bkeys_d[bkey])
bkeys_d[bkey] += 1
bkey = bkey_new
else:
bkeys_d[bkey] = 1
if skey in skeys_d.keys():
skey_new = skey + "_{}".format(skeys_d[skey])
skeys_d[skey] += 1
skey = skey_new
else:
skeys_d[skey] = 1
unique_keys.append( (bkey,skey) )
return unique_keys
def update_results(results, h5file):
for key in results:
if "{:010d}".format(key) in h5file:
continue
# create a model group
res_grp = h5file.create_group("{:010d}".format(key))
# pull dataframes
sres, bres, _, _, keys_used = results[key]
# names
if len(keys_used) == 0:
continue
if len(keys_used[0]) == 2:
names_to_use = [keys_used[i][1] for i in range(len(keys_used))]
skeyd = dict([(keys_used[i][1],keys_used[i][0]) for i in range(len(keys_used))])
bkeyd = dict([(keys_used[i][1],keys_used[i][1]) for i in range(len(keys_used))])
skn = list(map(lambda x: skeyd[x], names_to_use))
bkn = list(map(lambda x: bkeyd[x], names_to_use))
else:
names_to_use = [keys_used[i][2] for i in range(len(keys_used))]
skeyd = dict([(keys_used[i][2],keys_used[i][0]) for i in range(len(keys_used))])
bkeyd = dict([(keys_used[i][2],keys_used[i][1]) for i in range(len(keys_used))])
skn = list(map(lambda x: skeyd[x], names_to_use))
bkn = list(map(lambda x: bkeyd[x], names_to_use))
# make structured arrays
sdtype = np.dtype({"names":names_to_use,
"formats": ["<f8" for i in range(len(names_to_use))]})
bdtype = np.dtype({"names":names_to_use,
"formats": ["<f8" for i in range(len(names_to_use))]})
# if len(names_to_use) != sres[skn].shape[1]:
# # we have multiple datasets per name, drop one
# for iname,name in enumerate(names_to_use):
# if len(sres[name].shape) > 1:
# #
stupl = list(map(tuple, sres[skn].values))
btupl = list(map(tuple, bres[bkn].values))
sarr = np.array(stupl, dtype=sdtype)
barr = np.array(btupl, dtype=bdtype)
# add the data in, if it exists
if sarr.shape[0] != 0:
sg = res_grp.create_dataset("sbml_data", data=sarr)
if barr.shape[0] != 0:
bg = res_grp.create_dataset("bngl_data", data=barr)
print("updated results")
return True
def save_meta(meta, fname="meta_data.pickle"):
if os.path.isfile(fname):
with open(fname, "rb") as f:
m = pickle.load(f)
for key in meta:
m[key] = meta[key]
with open(fname, "wb") as f:
pickle.dump(m, f)
else:
with open(fname, "wb") as f:
pickle.dump(meta, f)
# All the paths we need
# The BNG2.pl file for bionetgen runs
bng_path = "/home/monoid/apps/BioNetGen-2.5.0/BNG2.pl"
# This is the python file that can be called from the command line
sbml_translator_path = "/home/monoid/Development/fresh_atomizer_checks/atomizer/SBMLparser/sbmlTranslator.py"
# if you give this the ATOMIZER ANALYZER 5000 will import atomizer and run internally
# translator_package_path = "/home/monoid/Development/fresh_atomizer_checks/atomizer/SBMLparser"
translator_package_path = None
# This is neccesary for atomizer, has default naming conventions and a lot more
# this path will be sym linked to everywhere you want to run translator under
config_path = "/home/monoid/Development/fresh_atomizer_checks/atomizer/SBMLparser/config"
# the path to the folder that contains 5 zero padded folders for each test
tests_path = "/home/monoid/Development/fresh_atomizer_checks/atomizer/SBMLparser/test/curated"
# Now we also add COPASI PATH!!_!_
copasi_path = "/home/monoid/apps/copasi/4.27/bin/CopasiSE"
# change directory to where we want to run the tests
os.chdir("/home/monoid/Development/fresh_atomizer_checks/atomizer/SBMLparser/test/analyzerTools")
# The analyzer setup
ba = AT.BiomodelAnalyzer(bng_path, sbml_translator_path, config_path, tests_path,
translator_import=translator_package_path, copasi_path=copasi_path)
# Let's re-run everything
tests = list(range(908,915))
known_issues = set([24,25,34,154,155,196,201,589,613,668,669,696,468, # Not implemented
643,644,645, # Complex "i" is used in function/parameter
63,245,248,305,556,575,578,542, # rule named used as parameter
342,429,457,547,570,627,637,638, # compartment used as parameter
527,562,592,593,596,723,250, # Actually broken, even in Copasi
304,324,330,331,341,343,345,349,367,371,374,377,381,533,548,
549,551,618,642,670,671,680,682,684,118,252,673,531,532,555,
561, # no reactions
306,307,308,309,310,311,388,390,391,393,409,
428,505,512,528,557,566,567,719,641,71,90,173,
253, # assignment rules used in reactions
610, # function defs for v16/v17
558,568,674,722,412,445,302,208,268,51,55,162,180,179,579,
691,465,466,238,312,538,603,604,605,215, # Uses time
635,636, # Uses not only time but also encoded strings for parameters
119, # single reaction, not really suitable for translation
47,483,484,486,487, # initial states should result in no reactions,
164,165,167,326,375,400,554,577,664,672,693,698,
234,237,286,450, # Uses piecewise definitions
396,398,507,522,705,
499,474, # SBML modeller is careless and uses species that should be params
607, # Function not defined properly/links to another function
319,206,39,145,353,385,392,463,608,470,472, # non-integer stoichiometry
161,182,239, # true multi-compartment model
271 # multi-compartment and the modeller has issues
])
# Need to figure out, mostly CVODE
list_of_fails = set([246,336,378,383,384,387,438,9,107,123,183,192,269,
279,292,328,617,678,606, # new ones
616, # Legitimate bug, if species name is very simple AND rate constant
# only depenent on the species concentration AND we end up generating
# an observable with the same name as species name, then BNGL thinkg
# we are giving obs name as the rate constant, leading to a bug
255, # Circular dependency in funcs?
401,402,403, # if func messes with func ordering
559, # can't load copasi result
64, # Due to website addition? also in too long set
232, # BNG takes too long?
172,176,177 # doesn't end up translating, takes a long time?
])
#too_long = set([64,574,426,70,217,247,503,469,471,473,506,451,595, # WAAAY TOO LONG - debug
# 332,334, # ATOMIZER BREAKS THESE
# 217,247,293,426,469 # too long when atomized
# ])
too_long = set([64 ,172,176,177,212,217,235,247,293,385,
426,451,457,463,469,470,471,472,473,474,
496,497,503,505,506,574,595,835,
863, # transl too long
232,608, # BNG takes too long
63,70, # long but completes?
269 # due to long CVODE error
])
################# NEW CHECKS ##############
# A complete new set of checks to see the latest state of the tool as we are
# writing the manuscript.
new_checks = set([64,217,235,496, # too long
497,498, # skey ratio index out of range?
63, # fairly long but does complete
119,465,468, # no data?
247,269,469,470,471,472,473,474,
503,505,506,595,606,608,835,863 # long, didn't check if completes
])
################# RUN FAILS ###############
run_fails = set([9,24,25,34,51,55,107,
123,154,155,162,164,165,167,172,176,177,179,180,183,192,
201,208,215,232,234,237,238,245,246,248,250,255,268,279,286,292,
302,305,312,326,328,332,334,336,353,375,383,384,385,387,396,398,
400,401,402,403,412,426,429,438,445,450,451,457,463,466,483,484,
486,487,499,507,522,527,531,532,538,542,547,554,555,556,558,559,
561,562,574,575,577,578,579,589,592,593,599,600,602,607,610,617,
627,635,636,637,638,643,644,645,664,668,669,672,673,674,675,678,
687,688,692,693,696,698,705,722,723,730,731,748,749,757,759,760,
763,764,766,775,801,802,808,815,824,826,833,837,840,841,849,851,
858,859,876,879,880 # run_failed
])
################# EVENTS #################
w_event = set([1,7,56,77,81,87,88,95,96,97,101,104,109, # models with events
111,117,120,121,122,124,125,126,127,128,129,130,131, # models with events
132,133,134,135,136,137,139,140,141,142,144,148,149, # models with events
152,153,158,186,187,188,189,193,194,195,196,227,235, # models with events
241,244,256,265,281,285,287,297,301,316,317,318,327, # models with events
337,338,339,340,342,344,404,408,422,436,437,439,479, # models with events
480,488,493,494,496,497,534,535,536,537,540,541,563, # models with events
570,571,597,598,601,612,613,620,621,628,632,634,650, # models with events
659,681,695,699,702,706,711,718,727,734,735,736,786, # models with events
789,791,794,806,814,816,817,818,820,822,825,829,834, # models with events
856,860,862,864,901]) # models with events
################# END CHECKS ##############
all_issues = known_issues.union(w_event)
all_issues = all_issues.union(list_of_fails)
# Load in database
# dbname = "validation.h5"
# if os.path.isfile(dbname):
# db = pd.read_hdf(dbname,key="validation")
# else:
# db = pd.DataFrame()
# run tests
# try:
if os.path.isfile("results.h5"):
os.remove("results.h5")
# results_file = h5py.File("results.h5","a")
results_file = h5py.File("results.h5","w")
else:
results_file = h5py.File("results.h5","w")
meta_data = {}
for test_no in tests:
#if test_no in all_issues:
# continue
# if test_no in w_event or test_no in new_checks or test_no in run_fails:
# if test_no in new_checks or test_no in run_fails:
# continue
if test_no in too_long:
meta_data[test_no] = {"too_long":True}
continue
if (os.path.isfile("/home/monoid/Development/fresh_atomizer_checks/atomizer/SBMLparser/test/curated/BIOMD{0:010d}.xml".format(test_no))):
#run_test(ba, test_no, t_end=100, atomize=False, db=db)
meta_data[test_no] = {"file":True, "too_long":False}
run_test(ba, test_no, t_end=100, atomize=True, meta=meta_data)
update_results(ba.all_results,results_file)
else:
meta_data[test_no] = {"file":False}
print("number {} doesn't exist".format(test_no))
save_meta(meta_data)
# with open("validation.pickle", 'wb') as f:
# pickle.dump(ba.all_results, f)
#except:
# with open("validation.pickle", 'wb') as f:
# pickle.dump(ba.all_results, f)
# db.to_hdf(dbname,"validation")
| 47.858974 | 141 | 0.575676 |
import os, re, sys, urllib, requests, base64, IPython, io, pickle, glob
sys.path.append("/home/monoid/Development/fresh_atomizer_checks/atomizer/SBMLparser/test/manual")
import itertools as itt
import numpy as np
import subprocess as sb
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import roadrunner, h5py
from bs4 import BeautifulSoup as BS
from IPython.display import Image, display
from matplotlib import rcParams
import analyzerTools as AT
def run_test(analyzer, test_no, t_end=1000, atomize=False, db=None, meta=None):
if(analyzer.run_single_test(test_no, t_end=100, atomize=atomize,meta=meta)):
if meta:
meta[test_no]["success"] = True
print("run successful {}".format(test_no))
2:
# skey, bkey = key
# # got curation keys
# elif len(key) == 3:
# skey, bkey, ckey = key
# else:
# print("couldn't find keys")
else:
if meta:
meta[test_no]["success"] = False
print("run failed {}".format(test_no))
def uniquefy_names(keys):
unique_keys = []
if len(keys[0]) == 3:
bkeys_d = {}
skeys_d = {}
ckeys_d = {}
for key in keys:
bkey, skey, ckey = key
if bkey in bkeys_d.keys():
bkey_new = bkey + "_{}".format(bkeys_d[bkey])
bkeys_d[bkey] += 1
bkey = bkey_new
else:
bkeys_d[bkey] = 1
if skey in skeys_d.keys():
skey_new = skey + "_{}".format(skeys_d[skey])
skeys_d[skey] += 1
skey = skey_new
else:
skeys_d[skey] = 1
if ckey in ckeys_d.keys():
ckey_new = ckey + "_{}".format(ckeys_d[ckey])
ckeys_d[ckey] += 1
ckey = ckey_new
else:
ckeys_d[ckey] = 1
unique_keys.append( (bkey,skey,ckey) )
else:
bkeys_d = {}
skeys_d = {}
for key in keys:
bkey, skey = key
if bkey in bkeys_d.keys():
bkey_new = bkey + "_{}".format(bkeys_d[bkey])
bkeys_d[bkey] += 1
bkey = bkey_new
else:
bkeys_d[bkey] = 1
if skey in skeys_d.keys():
skey_new = skey + "_{}".format(skeys_d[skey])
skeys_d[skey] += 1
skey = skey_new
else:
skeys_d[skey] = 1
unique_keys.append( (bkey,skey) )
return unique_keys
def update_results(results, h5file):
for key in results:
if "{:010d}".format(key) in h5file:
continue
res_grp = h5file.create_group("{:010d}".format(key))
sres, bres, _, _, keys_used = results[key]
if len(keys_used) == 0:
continue
if len(keys_used[0]) == 2:
names_to_use = [keys_used[i][1] for i in range(len(keys_used))]
skeyd = dict([(keys_used[i][1],keys_used[i][0]) for i in range(len(keys_used))])
bkeyd = dict([(keys_used[i][1],keys_used[i][1]) for i in range(len(keys_used))])
skn = list(map(lambda x: skeyd[x], names_to_use))
bkn = list(map(lambda x: bkeyd[x], names_to_use))
else:
names_to_use = [keys_used[i][2] for i in range(len(keys_used))]
skeyd = dict([(keys_used[i][2],keys_used[i][0]) for i in range(len(keys_used))])
bkeyd = dict([(keys_used[i][2],keys_used[i][1]) for i in range(len(keys_used))])
skn = list(map(lambda x: skeyd[x], names_to_use))
bkn = list(map(lambda x: bkeyd[x], names_to_use))
sdtype = np.dtype({"names":names_to_use,
"formats": ["<f8" for i in range(len(names_to_use))]})
bdtype = np.dtype({"names":names_to_use,
"formats": ["<f8" for i in range(len(names_to_use))]})
(map(tuple, sres[skn].values))
btupl = list(map(tuple, bres[bkn].values))
sarr = np.array(stupl, dtype=sdtype)
barr = np.array(btupl, dtype=bdtype)
if sarr.shape[0] != 0:
sg = res_grp.create_dataset("sbml_data", data=sarr)
if barr.shape[0] != 0:
bg = res_grp.create_dataset("bngl_data", data=barr)
print("updated results")
return True
def save_meta(meta, fname="meta_data.pickle"):
if os.path.isfile(fname):
with open(fname, "rb") as f:
m = pickle.load(f)
for key in meta:
m[key] = meta[key]
with open(fname, "wb") as f:
pickle.dump(m, f)
else:
with open(fname, "wb") as f:
pickle.dump(meta, f)
bng_path = "/home/monoid/apps/BioNetGen-2.5.0/BNG2.pl"
sbml_translator_path = "/home/monoid/Development/fresh_atomizer_checks/atomizer/SBMLparser/sbmlTranslator.py"
translator_package_path = None
config_path = "/home/monoid/Development/fresh_atomizer_checks/atomizer/SBMLparser/config"
tests_path = "/home/monoid/Development/fresh_atomizer_checks/atomizer/SBMLparser/test/curated"
copasi_path = "/home/monoid/apps/copasi/4.27/bin/CopasiSE"
os.chdir("/home/monoid/Development/fresh_atomizer_checks/atomizer/SBMLparser/test/analyzerTools")
ba = AT.BiomodelAnalyzer(bng_path, sbml_translator_path, config_path, tests_path,
translator_import=translator_package_path, copasi_path=copasi_path)
tests = list(range(908,915))
known_issues = set([24,25,34,154,155,196,201,589,613,668,669,696,468, # Not implemented
643,644,645, # Complex "i" is used in function/parameter
63,245,248,305,556,575,578,542, # rule named used as parameter
342,429,457,547,570,627,637,638, # compartment used as parameter
527,562,592,593,596,723,250, # Actually broken, even in Copasi
304,324,330,331,341,343,345,349,367,371,374,377,381,533,548,
549,551,618,642,670,671,680,682,684,118,252,673,531,532,555,
561, # no reactions
306,307,308,309,310,311,388,390,391,393,409,
428,505,512,528,557,566,567,719,641,71,90,173,
253, # assignment rules used in reactions
610, # function defs for v16/v17
558,568,674,722,412,445,302,208,268,51,55,162,180,179,579,
691,465,466,238,312,538,603,604,605,215, # Uses time
635,636, # Uses not only time but also encoded strings for parameters
119, # single reaction, not really suitable for translation
47,483,484,486,487, # initial states should result in no reactions,
164,165,167,326,375,400,554,577,664,672,693,698,
234,237,286,450, # Uses piecewise definitions
396,398,507,522,705,
499,474, # SBML modeller is careless and uses species that should be params
607, # Function not defined properly/links to another function
319,206,39,145,353,385,392,463,608,470,472, # non-integer stoichiometry
161,182,239, # true multi-compartment model
271 # multi-compartment and the modeller has issues
])
# Need to figure out, mostly CVODE
list_of_fails = set([246,336,378,383,384,387,438,9,107,123,183,192,269,
279,292,328,617,678,606, # new ones
616, # Legitimate bug, if species name is very simple AND rate constant
# only depenent on the species concentration AND we end up generating
# an observable with the same name as species name, then BNGL thinkg
# we are giving obs name as the rate constant, leading to a bug
255, # Circular dependency in funcs?
401,402,403, # if func messes with func ordering
559, # can't load copasi result
64,
232,
172,176,177
])
#too_long = set([64,574,426,70,217,247,503,469,471,473,506,451,595, # WAAAY TOO LONG - debug
# 332,334, # ATOMIZER BREAKS THESE
# 217,247,293,426,469 # too long when atomized
# ])
too_long = set([64 ,172,176,177,212,217,235,247,293,385,
426,451,457,463,469,470,471,472,473,474,
496,497,503,505,506,574,595,835,
863, # transl too long
232,608, # BNG takes too long
63,70, # long but completes?
269 # due to long CVODE error
])
################# NEW CHECKS ##############
# A complete new set of checks to see the latest state of the tool as we are
# writing the manuscript.
new_checks = set([64,217,235,496, # too long
497,498, # skey ratio index out of range?
63, # fairly long but does complete
119,465,468, # no data?
247,269,469,470,471,472,473,474,
503,505,506,595,606,608,835,863 # long, didn't check if completes
])
731,748,749,757,759,760,
763,764,766,775,801,802,808,815,824,826,833,837,840,841,849,851,
858,859,876,879,880
])
856,860,862,864,901])
no, t_end=100, atomize=True, meta=meta_data)
update_results(ba.all_results,results_file)
else:
meta_data[test_no] = {"file":False}
print("number {} doesn't exist".format(test_no))
save_meta(meta_data)
# with open("validation.pickle", 'wb') as f:
# pickle.dump(ba.all_results, f)
#except:
# with open("validation.pickle", 'wb') as f:
# pickle.dump(ba.all_results, f)
# db.to_hdf(dbname,"validation")
| true | true |
f7201f03fb11fc26e5295dea810629ac3fa330da | 5,030 | py | Python | cedar/forms.py | stewardshiptools/stewardshiptools | ee5d27e7b0d5d4947f34ad02bdf63a06ad0a5c3e | [
"MIT"
] | null | null | null | cedar/forms.py | stewardshiptools/stewardshiptools | ee5d27e7b0d5d4947f34ad02bdf63a06ad0a5c3e | [
"MIT"
] | 11 | 2020-03-24T15:29:46.000Z | 2022-03-11T23:14:48.000Z | cedar/forms.py | stewardshiptools/stewardshiptools | ee5d27e7b0d5d4947f34ad02bdf63a06ad0a5c3e | [
"MIT"
] | null | null | null | from os.path import join
from email.mime.image import MIMEImage
from django.conf import settings
from django.forms import ModelForm, ValidationError, ChoiceField
from django.forms.models import BaseInlineFormSet
from django.forms.models import inlineformset_factory
from django.contrib.auth.forms import ReadOnlyPasswordHashWidget, ReadOnlyPasswordHashField, PasswordResetForm
from django.contrib.auth.tokens import default_token_generator
from django.core.mail import EmailMultiAlternatives
from django.template import loader
from crm.models import Person
from crm.forms import PersonSettingsForm
from django.contrib.auth.models import User
from security.forms import SecurityLevelModelFormMixin
from security.models import SecurityLevel
class UserAdminForm(SecurityLevelModelFormMixin, ModelForm):
"""
Override the user admin form so that we can force firstname, lastname
to be required --- needed for pushing changes over to crm.Person.
"""
password = ReadOnlyPasswordHashField(label=("Password"),
help_text=("Raw passwords are not stored, so there is no way to see "
"this user's password, but you can change the password "
"using <a href=\"password/\">this form</a>."))
# Need to minimally declare security_level here so that the user admin can see it.
# The mixin will take care of details.
security_level = ChoiceField()
def __init__(self, *args, **kwargs):
super(UserAdminForm, self).__init__(*args, **kwargs)
self.fields['first_name'].required = True
self.fields['last_name'].required = True
self.fields['email'].required = True
# self.fields['password'].widget = ReadOnlyPasswordHashWidget()
def get_security_level_default(self):
level_range = [x[0] for x in SecurityLevel.level_choices]
return max(level_range) # Default users to the lowest security level.
class Meta:
model = User
fields = '__all__'
class UserSettingsForm(ModelForm):
"""
This is the form used by the user menu to update user
profile settings. hides user password (for now).
"""
def __init__(self, *args, **kwargs):
super(UserSettingsForm, self).__init__(*args, **kwargs)
self.fields['first_name'].required = True
self.fields['last_name'].required = True
self.fields['email'].required = True
class Meta:
model = User
fields = ('first_name',
'last_name',
'email',)
class CedarPasswordResetForm(PasswordResetForm):
def send_mail(self, subject_template_name, email_template_name,
context, from_email, to_email, html_email_template_name=None):
"""
Sends a django.core.mail.EmailMultiAlternatives to `to_email`.
"""
subject = loader.render_to_string(subject_template_name, context)
# Email subject *must not* contain newlines
subject = ''.join(subject.splitlines())
body = loader.render_to_string(email_template_name, context)
email_message = EmailMultiAlternatives(subject, body, from_email, [to_email])
if html_email_template_name is not None:
html_email = loader.render_to_string(html_email_template_name, context)
email_message.attach_alternative(html_email, 'text/html')
email_message.mixed_subtype = 'related'
with open(join(settings.STATIC_ROOT, 'css/cedarbox_icon_gry.png'), 'rb') as fp:
logo_img = MIMEImage(fp.read())
logo_img.add_header('Content-ID', '<{}>'.format('cedarbox_icon_gry.png'))
email_message.attach(logo_img)
email_message.send()
def save(self, domain_override=None,
subject_template_name='registration/password_reset_subject.txt',
email_template_name='registration/password_reset_email.html',
use_https=False, token_generator=default_token_generator,
from_email=None, request=None, html_email_template_name=None):
# If domain_override hasn't been provided. Let's override it ourself using the request.
if domain_override is None and request is not None:
domain_override = request.META['HTTP_HOST']
return super(CedarPasswordResetForm, self).save(
domain_override=domain_override,
subject_template_name=subject_template_name,
email_template_name=email_template_name,
use_https=use_https, token_generator=token_generator,
from_email=from_email, request=request, html_email_template_name=html_email_template_name
)
UserSettingsFormset = inlineformset_factory(User,
Person,
form=PersonSettingsForm,
extra=1
)
| 43.362069 | 110 | 0.663022 | from os.path import join
from email.mime.image import MIMEImage
from django.conf import settings
from django.forms import ModelForm, ValidationError, ChoiceField
from django.forms.models import BaseInlineFormSet
from django.forms.models import inlineformset_factory
from django.contrib.auth.forms import ReadOnlyPasswordHashWidget, ReadOnlyPasswordHashField, PasswordResetForm
from django.contrib.auth.tokens import default_token_generator
from django.core.mail import EmailMultiAlternatives
from django.template import loader
from crm.models import Person
from crm.forms import PersonSettingsForm
from django.contrib.auth.models import User
from security.forms import SecurityLevelModelFormMixin
from security.models import SecurityLevel
class UserAdminForm(SecurityLevelModelFormMixin, ModelForm):
password = ReadOnlyPasswordHashField(label=("Password"),
help_text=("Raw passwords are not stored, so there is no way to see "
"this user's password, but you can change the password "
"using <a href=\"password/\">this form</a>."))
# Need to minimally declare security_level here so that the user admin can see it.
# The mixin will take care of details.
security_level = ChoiceField()
def __init__(self, *args, **kwargs):
super(UserAdminForm, self).__init__(*args, **kwargs)
self.fields['first_name'].required = True
self.fields['last_name'].required = True
self.fields['email'].required = True
# self.fields['password'].widget = ReadOnlyPasswordHashWidget()
def get_security_level_default(self):
level_range = [x[0] for x in SecurityLevel.level_choices]
return max(level_range) # Default users to the lowest security level.
class Meta:
model = User
fields = '__all__'
class UserSettingsForm(ModelForm):
def __init__(self, *args, **kwargs):
super(UserSettingsForm, self).__init__(*args, **kwargs)
self.fields['first_name'].required = True
self.fields['last_name'].required = True
self.fields['email'].required = True
class Meta:
model = User
fields = ('first_name',
'last_name',
'email',)
class CedarPasswordResetForm(PasswordResetForm):
def send_mail(self, subject_template_name, email_template_name,
context, from_email, to_email, html_email_template_name=None):
subject = loader.render_to_string(subject_template_name, context)
# Email subject *must not* contain newlines
subject = ''.join(subject.splitlines())
body = loader.render_to_string(email_template_name, context)
email_message = EmailMultiAlternatives(subject, body, from_email, [to_email])
if html_email_template_name is not None:
html_email = loader.render_to_string(html_email_template_name, context)
email_message.attach_alternative(html_email, 'text/html')
email_message.mixed_subtype = 'related'
with open(join(settings.STATIC_ROOT, 'css/cedarbox_icon_gry.png'), 'rb') as fp:
logo_img = MIMEImage(fp.read())
logo_img.add_header('Content-ID', '<{}>'.format('cedarbox_icon_gry.png'))
email_message.attach(logo_img)
email_message.send()
def save(self, domain_override=None,
subject_template_name='registration/password_reset_subject.txt',
email_template_name='registration/password_reset_email.html',
use_https=False, token_generator=default_token_generator,
from_email=None, request=None, html_email_template_name=None):
# If domain_override hasn't been provided. Let's override it ourself using the request.
if domain_override is None and request is not None:
domain_override = request.META['HTTP_HOST']
return super(CedarPasswordResetForm, self).save(
domain_override=domain_override,
subject_template_name=subject_template_name,
email_template_name=email_template_name,
use_https=use_https, token_generator=token_generator,
from_email=from_email, request=request, html_email_template_name=html_email_template_name
)
UserSettingsFormset = inlineformset_factory(User,
Person,
form=PersonSettingsForm,
extra=1
)
| true | true |
f7201f3e709cd60912d88e118c8edfe5cfcff4bb | 136 | py | Python | dora/tests/test_share.py | kingjr/dora | f70fab1620c6cad6fc094be15ab22994bd08dd01 | [
"MIT"
] | null | null | null | dora/tests/test_share.py | kingjr/dora | f70fab1620c6cad6fc094be15ab22994bd08dd01 | [
"MIT"
] | null | null | null | dora/tests/test_share.py | kingjr/dora | f70fab1620c6cad6fc094be15ab22994bd08dd01 | [
"MIT"
] | null | null | null | from dora.share import dump, load
def test_dump_load():
x = [1, 2, 4, {'youpi': 'test', 'b': 56.3}]
assert load(dump(x)) == x
| 19.428571 | 47 | 0.566176 | from dora.share import dump, load
def test_dump_load():
x = [1, 2, 4, {'youpi': 'test', 'b': 56.3}]
assert load(dump(x)) == x
| true | true |
f7201f51ef7235aa37e52cd053e5a1d6d1e724eb | 4,346 | py | Python | kale/utils/download.py | SheffieldAI/pykale | be7670941fb06835883c80477b26702d407017db | [
"MIT"
] | 324 | 2020-11-05T19:07:11.000Z | 2022-03-16T21:31:39.000Z | kale/utils/download.py | SheffieldAI/pykale | be7670941fb06835883c80477b26702d407017db | [
"MIT"
] | 212 | 2020-10-31T15:18:59.000Z | 2022-03-25T14:13:09.000Z | kale/utils/download.py | sz144/pykale | 1f5cce57a50f7772520a482e8135a391eb0517f5 | [
"MIT"
] | 52 | 2020-10-28T15:43:48.000Z | 2022-02-24T02:29:52.000Z | # ===============================================================================
# Author: Xianyuan Liu, xianyuan.liu@outlook.com
# Raivo Koot, rekoot1@sheffield.ac.uk
# Haiping Lu, h.lu@sheffield.ac.uk or hplu@ieee.org
# ===============================================================================
"""Data downloading and compressed data extraction functions, Based on
https://github.com/pytorch/vision/blob/master/torchvision/datasets/utils.py
https://github.com/pytorch/pytorch/blob/master/torch/hub.py
"""
import logging
import os
from pathlib import Path
from torch.hub import download_url_to_file
from torchvision.datasets.utils import download_and_extract_archive, download_file_from_google_drive, extract_archive
def download_file_by_url(url, output_directory, output_file_name, file_format=None):
"""Download file/compressed file by url.
Args:
url (string): URL of the object to download
output_directory (string, optional): Full path where object will be saved
Abosolute path recommended. Relative path also works.
output_file_name (string, optional): File name which object will be saved as
file_format (string, optional): File format
For compressed file, support ["tar.xz", "tar", "tar.gz", "tgz", "gz", "zip"]
Example: (Grab the raw link from GitHub. Notice that using "raw" in the URL.)
>>> url = "https://github.com/pykale/data/raw/main/videos/video_test_data/ADL/annotations/labels_train_test/adl_P_04_train.pkl"
>>> download_file_by_url(url, "data", "a.pkl", "pkl")
>>> url = "https://github.com/pykale/data/raw/main/videos/video_test_data.zip"
>>> download_file_by_url(url, "data", "video_test_data.zip", "zip")
"""
output_directory = Path(output_directory).absolute()
file = Path(output_directory).joinpath(output_file_name)
if os.path.exists(file):
logging.info("Skipping Download and Extraction")
return
if not os.path.exists(output_directory):
os.makedirs(output_directory)
if file_format in ["tar.xz", "tar", "tar.gz", "tgz", "gz", "zip"]:
logging.info("Downloading and extracting {}.".format(output_file_name))
download_and_extract_archive(url=url, download_root=output_directory, filename=output_file_name)
logging.info("Datasets downloaded and extracted in {}".format(file))
else:
logging.info("Downloading {}.".format(output_file_name))
download_url_to_file(url, file)
logging.info("Datasets downloaded in {}".format(file))
def download_file_gdrive(id, output_directory, output_file_name, file_format=None):
"""Download file/compressed file by Google Drive id.
Args:
id (string): Google Drive file id of the object to download
output_directory (string, optional): Full path where object will be saved
Abosolute path recommended. Relative path also works.
output_file_name (string, optional): File name which object will be saved as
file_format (string, optional): File format
For compressed file, support ["tar.xz", "tar", "tar.gz", "tgz", "gz", "zip"]
Example:
>>> gdrive_id = "1U4D23R8u8MJX9KVKb92bZZX-tbpKWtga"
>>> download_file_gdrive(gdrive_id, "data", "demo_datasets.zip", "zip")
>>> gdrive_id = "1SV7fmAnWj-6AU9X5BGOrvGMoh2Gu9Nih"
>>> download_file_gdrive(gdrive_id, "data", "dummy_data.csv", "csv")
"""
output_directory = Path(output_directory).absolute()
file = Path(output_directory).joinpath(output_file_name)
if os.path.exists(file):
logging.info("Skipping Download and Extraction")
return
os.makedirs(output_directory, exist_ok=True)
logging.info("Downloading {}.".format(output_file_name))
download_file_from_google_drive(id, output_directory, output_file_name)
if file_format is not None and file_format in ["tar.xz", "tar", "tar.gz", "tgz", "gz", "zip"]:
logging.info("Extracting {}.".format(output_file_name))
extract_archive(file.as_posix())
logging.info("Datasets downloaded and extracted in {}".format(file))
else:
logging.info("Datasets downloaded in {}".format(file))
| 46.731183 | 135 | 0.654395 |
import logging
import os
from pathlib import Path
from torch.hub import download_url_to_file
from torchvision.datasets.utils import download_and_extract_archive, download_file_from_google_drive, extract_archive
def download_file_by_url(url, output_directory, output_file_name, file_format=None):
output_directory = Path(output_directory).absolute()
file = Path(output_directory).joinpath(output_file_name)
if os.path.exists(file):
logging.info("Skipping Download and Extraction")
return
if not os.path.exists(output_directory):
os.makedirs(output_directory)
if file_format in ["tar.xz", "tar", "tar.gz", "tgz", "gz", "zip"]:
logging.info("Downloading and extracting {}.".format(output_file_name))
download_and_extract_archive(url=url, download_root=output_directory, filename=output_file_name)
logging.info("Datasets downloaded and extracted in {}".format(file))
else:
logging.info("Downloading {}.".format(output_file_name))
download_url_to_file(url, file)
logging.info("Datasets downloaded in {}".format(file))
def download_file_gdrive(id, output_directory, output_file_name, file_format=None):
output_directory = Path(output_directory).absolute()
file = Path(output_directory).joinpath(output_file_name)
if os.path.exists(file):
logging.info("Skipping Download and Extraction")
return
os.makedirs(output_directory, exist_ok=True)
logging.info("Downloading {}.".format(output_file_name))
download_file_from_google_drive(id, output_directory, output_file_name)
if file_format is not None and file_format in ["tar.xz", "tar", "tar.gz", "tgz", "gz", "zip"]:
logging.info("Extracting {}.".format(output_file_name))
extract_archive(file.as_posix())
logging.info("Datasets downloaded and extracted in {}".format(file))
else:
logging.info("Datasets downloaded in {}".format(file))
| true | true |
f7202019c7b0f25327f421ee4dd02e608acbb151 | 581 | py | Python | snooker/models/snooker_org/player.py | mgorsk1/snooker | 97a3868bd47c3aa3f134d34ee65e8dab21b98227 | [
"Apache-2.0"
] | 1 | 2021-04-11T17:48:28.000Z | 2021-04-11T17:48:28.000Z | snooker/models/snooker_org/player.py | mgorsk1/snooker | 97a3868bd47c3aa3f134d34ee65e8dab21b98227 | [
"Apache-2.0"
] | 4 | 2021-04-11T17:26:23.000Z | 2021-04-12T06:46:24.000Z | snooker/models/snooker_org/player.py | mgorsk1/snooker | 97a3868bd47c3aa3f134d34ee65e8dab21b98227 | [
"Apache-2.0"
] | null | null | null | from dataclasses import dataclass
from dataclasses_json import dataclass_json
from snooker.models import JsonModel
@dataclass_json
@dataclass
class Player(JsonModel):
ID: int
Type: int
FirstName: str
MiddleName: str
LastName: str
TeamName: str
TeamNumber: int
TeamSeason: int
ShortName: str
Nationality: str
Sex: str
BioPage: str
Born: str
Twitter: str
SurnameFirst: bool
License: str
Club: str
URL: str
Photo: str
PhotoSource: str
FirstSeasonAsPro: int
LastSeasonAsPro: int
Info: str
| 17.088235 | 43 | 0.678141 | from dataclasses import dataclass
from dataclasses_json import dataclass_json
from snooker.models import JsonModel
@dataclass_json
@dataclass
class Player(JsonModel):
ID: int
Type: int
FirstName: str
MiddleName: str
LastName: str
TeamName: str
TeamNumber: int
TeamSeason: int
ShortName: str
Nationality: str
Sex: str
BioPage: str
Born: str
Twitter: str
SurnameFirst: bool
License: str
Club: str
URL: str
Photo: str
PhotoSource: str
FirstSeasonAsPro: int
LastSeasonAsPro: int
Info: str
| true | true |
f7202096a51dcc8e270109c64d1240dfd04ce0a3 | 2,783 | py | Python | jr/plot/meg.py | kingjr/jr-tools | 8a4c9c42a9e36e224279566945e798869904c4c8 | [
"BSD-2-Clause"
] | 11 | 2016-01-21T22:41:28.000Z | 2018-10-07T12:55:18.000Z | jr/plot/meg.py | kingjr/jr-tools | 8a4c9c42a9e36e224279566945e798869904c4c8 | [
"BSD-2-Clause"
] | 2 | 2016-12-12T14:25:47.000Z | 2018-05-07T18:57:42.000Z | jr/plot/meg.py | kingjr/jr-tools | 8a4c9c42a9e36e224279566945e798869904c4c8 | [
"BSD-2-Clause"
] | 17 | 2016-03-15T17:34:04.000Z | 2020-03-15T00:31:14.000Z | import matplotlib.pyplot as plt
import numpy as np
from . import pretty_plot
def plot_butterfly(evoked, ax=None, sig=None, color=None, ch_type=None):
from mne import pick_types
if ch_type is not None:
picks = pick_types(evoked.info, ch_type)
evoked = evoked.copy()
evoked = evoked.pick_types(ch_type)
sig = sig[picks, :] if sig is not None else None
times = evoked.times * 1e3
data = evoked.data
ax = plt.gca() if ax is None else ax
ax.plot(times, data.T, color='k', alpha=.5)
gfp = np.vstack((data.max(0), data.min(0)))
if sig is not None:
sig = np.array(np.sum(sig, axis=0) > 0., dtype=int)
ax.fill_between(np.hstack((times, times[::-1])),
np.hstack((sig * gfp[0, :] + (1 - sig) * gfp[1, :],
gfp[1, ::-1])),
facecolor=color, edgecolor='none', alpha=.5,
zorder=len(data) + 1)
ax.axvline(0, color='k')
ax.set_xlabel('Times (ms)')
ax.set_xlim(min(times), max(times))
xticks = np.arange(np.ceil(min(times)/1e2) * 1e2,
np.floor(max(times)/1e2) * 1e2 + 1e-10, 100)
ax.set_xticks(xticks)
ax.set_xticklabels(['%i' % t if t in [xticks[0], xticks[-1], 0]
else '' for t in xticks])
ax.set_yticks([np.min(data), np.max(data)])
ax.set_ylim(np.min(data), np.max(data))
ax.set_xlim(np.min(times), np.max(times))
pretty_plot(ax)
return ax
def plot_gfp(evoked, ax=None, sig=None, color=None, ch_type='mag'):
from mne import pick_types
if ch_type is not None:
picks = pick_types(evoked.info, ch_type)
evoked = evoked.copy()
evoked = evoked.pick_types(ch_type)
sig = sig[picks, :] if sig is not None else None
times = evoked.times * 1e3
gfp = np.std(evoked.data, axis=0)
ax = plt.gca() if ax is None else ax
ax.plot(times, gfp, color='k', alpha=.5)
if sig is not None:
sig = np.array(np.sum(sig, axis=0) > 0., dtype=int)
ax.fill_between(np.hstack((times, times[::-1])),
np.hstack((sig * gfp, np.zeros_like(gfp))),
facecolor=color, edgecolor='none', alpha=.5)
ax.axvline(0, color='k')
ax.set_xlabel('Times (ms)')
ax.set_xlim(min(times), max(times))
xticks = np.arange(np.ceil(min(times)/1e2) * 1e2,
np.floor(max(times)/1e2) * 1e2 + 1e-10, 100)
ax.set_xticks(xticks)
ax.set_xticklabels(['%i' % t if t in [xticks[0], xticks[-1], 0]
else '' for t in xticks])
ax.set_yticks([np.min(gfp), np.max(gfp)])
ax.set_ylim(np.min(gfp), np.max(gfp))
ax.set_xlim(np.min(times), np.max(times))
pretty_plot(ax)
return ax
| 40.333333 | 75 | 0.565936 | import matplotlib.pyplot as plt
import numpy as np
from . import pretty_plot
def plot_butterfly(evoked, ax=None, sig=None, color=None, ch_type=None):
from mne import pick_types
if ch_type is not None:
picks = pick_types(evoked.info, ch_type)
evoked = evoked.copy()
evoked = evoked.pick_types(ch_type)
sig = sig[picks, :] if sig is not None else None
times = evoked.times * 1e3
data = evoked.data
ax = plt.gca() if ax is None else ax
ax.plot(times, data.T, color='k', alpha=.5)
gfp = np.vstack((data.max(0), data.min(0)))
if sig is not None:
sig = np.array(np.sum(sig, axis=0) > 0., dtype=int)
ax.fill_between(np.hstack((times, times[::-1])),
np.hstack((sig * gfp[0, :] + (1 - sig) * gfp[1, :],
gfp[1, ::-1])),
facecolor=color, edgecolor='none', alpha=.5,
zorder=len(data) + 1)
ax.axvline(0, color='k')
ax.set_xlabel('Times (ms)')
ax.set_xlim(min(times), max(times))
xticks = np.arange(np.ceil(min(times)/1e2) * 1e2,
np.floor(max(times)/1e2) * 1e2 + 1e-10, 100)
ax.set_xticks(xticks)
ax.set_xticklabels(['%i' % t if t in [xticks[0], xticks[-1], 0]
else '' for t in xticks])
ax.set_yticks([np.min(data), np.max(data)])
ax.set_ylim(np.min(data), np.max(data))
ax.set_xlim(np.min(times), np.max(times))
pretty_plot(ax)
return ax
def plot_gfp(evoked, ax=None, sig=None, color=None, ch_type='mag'):
from mne import pick_types
if ch_type is not None:
picks = pick_types(evoked.info, ch_type)
evoked = evoked.copy()
evoked = evoked.pick_types(ch_type)
sig = sig[picks, :] if sig is not None else None
times = evoked.times * 1e3
gfp = np.std(evoked.data, axis=0)
ax = plt.gca() if ax is None else ax
ax.plot(times, gfp, color='k', alpha=.5)
if sig is not None:
sig = np.array(np.sum(sig, axis=0) > 0., dtype=int)
ax.fill_between(np.hstack((times, times[::-1])),
np.hstack((sig * gfp, np.zeros_like(gfp))),
facecolor=color, edgecolor='none', alpha=.5)
ax.axvline(0, color='k')
ax.set_xlabel('Times (ms)')
ax.set_xlim(min(times), max(times))
xticks = np.arange(np.ceil(min(times)/1e2) * 1e2,
np.floor(max(times)/1e2) * 1e2 + 1e-10, 100)
ax.set_xticks(xticks)
ax.set_xticklabels(['%i' % t if t in [xticks[0], xticks[-1], 0]
else '' for t in xticks])
ax.set_yticks([np.min(gfp), np.max(gfp)])
ax.set_ylim(np.min(gfp), np.max(gfp))
ax.set_xlim(np.min(times), np.max(times))
pretty_plot(ax)
return ax
| true | true |
f72020bbaf98f9d38b10dfa1e51aa9ccf67bf9f2 | 7,055 | py | Python | phdhelper/suMMSary/suMMSary.py | jmsplank/phdhelper | c06dd06669b42dbe4c9e1a6eeec3d0ad3885d2eb | [
"MIT"
] | null | null | null | phdhelper/suMMSary/suMMSary.py | jmsplank/phdhelper | c06dd06669b42dbe4c9e1a6eeec3d0ad3885d2eb | [
"MIT"
] | null | null | null | phdhelper/suMMSary/suMMSary.py | jmsplank/phdhelper | c06dd06669b42dbe4c9e1a6eeec3d0ad3885d2eb | [
"MIT"
] | null | null | null | import numpy as np
import pyspedas
from phdhelper.helpers import title_print
from phdhelper.helpers.CONSTANTS import c, k_B, m_e, m_i, mu_0, q
from pytplot import data_quants
import matplotlib.pyplot as plt
from datetime import datetime as dt
from cached_property import cached_property
class EventHandler:
FPI = None
FPI_DIST = None
FSM = None
FGM = None
trange = None
probe = None
def load_FGM(self):
self.FGM = pyspedas.mms.fgm(
trange=self.trange, probe=self.probe, data_rate="brst"
)
def load_FSM(self):
raise NotImplementedError()
def load_FPI_DIST(self):
self.FPI_DIST = pyspedas.mms.fpi(
trange=self.trange,
probe=self.probe,
data_rate="brst",
datatype="dis-dist",
)
def load_FPI(self):
self.FPI = pyspedas.mms.fpi(
trange=self.trange, probe=self.probe, data_rate="brst"
)
@staticmethod
def get_tplot_data(var_str, sl=None, time=False):
if not time:
if sl is None:
# Get all data
return data_quants[var_str].values
else:
return data_quants[var_str].values[sl]
else:
if sl is None:
# Get all data
return data_quants[var_str].coords["time"].values
else:
return data_quants[var_str].coords["time"].values[sl]
class TimeMMS(EventHandler):
def __init__(self, kw):
self.kw = kw
@cached_property
def timestamp(self):
return self.get_tplot_data(self.kw, time=True)
@cached_property
def date_time(self):
return np.array([dt.utcfromtimestamp(t) for t in self.timestamp])
def date_string(self, fmt="%H:%M"):
return np.array([dt.strftime(t, fmt) for t in self.date_time])
class Species(EventHandler):
def __init__(self, kw) -> None:
self.kw = kw
@cached_property
def value(self):
return self.get_tplot_data(self.kw)
@cached_property
def time(self):
return TimeMMS(self.kw)
def plot(self):
plt.plot(self.value)
def __repr__(self):
return (
f"Species({self.kw})"
"Available properties:"
" value"
"Available methods:"
" plot"
)
class MultiSpecies:
def __init__(self, ion_kw: str, electron_kw: str) -> None:
self.ion_kw = ion_kw
self.electron_kw = electron_kw
@cached_property
def ion(self):
return Species(self.ion_kw)
@cached_property
def electron(self):
return Species(self.electron_kw)
class Event(EventHandler):
def __init__(
self, trange: str, required_instruments: str, probe: str = "1"
) -> None:
self.trange = trange
self.required_instruments = required_instruments.upper()
self.probe = probe
if "FGM" in required_instruments:
self.load_FGM()
if "FPI" in required_instruments:
self.load_FPI()
if "FSM" in required_instruments:
self.load_FSM()
if "FPI_DIST" in required_instruments:
self.load_FPI_DIST()
@cached_property
def B(self):
return Species(f"mms{self.probe}_fgm_b_gse_brst_l2")
@cached_property
def v(self):
return MultiSpecies(
f"mms{self.probe}_dis_bulkv_gse_brst",
f"mms{self.probe}_des_bulkv_gse_brst",
)
@cached_property
def T(self):
return MultiSpecies(
f"mms{self.probe}_dis_temppara_brst",
f"mms{self.probe}_dis_tempperp_brst",
)
@cached_property
def E(self):
return MultiSpecies(
f"mms{self.probe}_dis_energyspectr_omni_brst",
f"mms{self.probe}_des_energyspectr_omni_brst",
)
# @property
# def v_0(self, species="i"):
# title_print("Calculating background flow speed")
# species = self.Species(species)
# if species.ion:
# self.v_0_i = np.mean(np.linalg.norm(self.v_i, axis=1))
# if species.elec:
# self.v_0_e = np.mean(np.linalg.norm(self.v_e, axis=1))
# @property
# def v_A(self):
# title_print("Calculating Alfven speed")
# self.v_A = self.mean_B / np.sqrt(mu_0 * self.number_density_i) / 1e3
# @property
# def number_density(self, species="i"):
# species = self.Species(species)
# if species.ion:
# self.number_density_i = (
# self.get_tplot_data(f"mms{self.probe}_dis_numberdensity_brst") * 1e6
# ).mean()
# if species.elec:
# self.number_density_e = (
# self.get_tplot_data(f"mms{self.probe}_des_numberdensity_brst") * 1e6
# ).mean()
# @property
# def beta(self, species="i"):
# title_print("Calculating plasma betas")
# species = self.Species(species)
# magPress = self.mean_B ** 2 / (2 * mu_0)
# if species.ion:
# self.beta_i = (
# self.number_density_i * k_B * self.T_i[:, 0].mean()
# ) / magPress
# if species.elec:
# self.beta_e = (
# self.number_density_e * k_B * self.T_e[:, 0].mean()
# ) / magPress
# @property
# def rho(self, species="i"):
# title_print("Calculating gyroradius")
# species = self.Species(species)
# if species.ion:
# i_thermal_velocity = np.sqrt(self.T_i[:, 1].mean() * 2 * q / m_i) / 1e3
# i_gyrofrequency = q * self.mean_B / m_i
# self.rho_i = i_thermal_velocity / i_gyrofrequency
# if species.elec:
# e_thermal_velocity = np.sqrt(self.T_i[:, 1].mean() * 2 * q / m_e) / 1e3
# e_gyrofrequency = q * self.mean_B / m_e
# self.rho_e = e_thermal_velocity / e_gyrofrequency
# @property
# def p(self, species="i"):
# title_print("Calculating Intertial length")
# species = self.Species(species)
# if species.ion:
# i_plasma_frequency = 1.32e3 * np.sqrt(self.number_density_i)
# self.p_i = c / i_plasma_frequency
# self.p_i /= 1e3
# if species.elec:
# e_plasma_frequency = 5.64e4 * np.sqrt(self.number_density_e)
# self.p_e = c / e_plasma_frequency
# self.p_e /= 1e3
# @property
# def time(self, var="B"):
# title_print("Getting time arrays")
# var = var.split("|")
# if "B" in var:
# self.time_B = self.get_tplot_data(
# f"mms{self.probe}_fgm_b_gse_brst_l2", time=True
# )
# if "V" in var:
# self.time_V = self.get_tplot_data(
# f"mms{self.probe}_dis_bulkv_gse_brst", time=True
# )
# if "e" in var:
# self.time_e = self.get_tplot_data(
# f"mms{self.probe}_des_temppara_brst", time=True
# ) | 30.021277 | 86 | 0.569667 | import numpy as np
import pyspedas
from phdhelper.helpers import title_print
from phdhelper.helpers.CONSTANTS import c, k_B, m_e, m_i, mu_0, q
from pytplot import data_quants
import matplotlib.pyplot as plt
from datetime import datetime as dt
from cached_property import cached_property
class EventHandler:
FPI = None
FPI_DIST = None
FSM = None
FGM = None
trange = None
probe = None
def load_FGM(self):
self.FGM = pyspedas.mms.fgm(
trange=self.trange, probe=self.probe, data_rate="brst"
)
def load_FSM(self):
raise NotImplementedError()
def load_FPI_DIST(self):
self.FPI_DIST = pyspedas.mms.fpi(
trange=self.trange,
probe=self.probe,
data_rate="brst",
datatype="dis-dist",
)
def load_FPI(self):
self.FPI = pyspedas.mms.fpi(
trange=self.trange, probe=self.probe, data_rate="brst"
)
@staticmethod
def get_tplot_data(var_str, sl=None, time=False):
if not time:
if sl is None:
return data_quants[var_str].values
else:
return data_quants[var_str].values[sl]
else:
if sl is None:
return data_quants[var_str].coords["time"].values
else:
return data_quants[var_str].coords["time"].values[sl]
class TimeMMS(EventHandler):
def __init__(self, kw):
self.kw = kw
@cached_property
def timestamp(self):
return self.get_tplot_data(self.kw, time=True)
@cached_property
def date_time(self):
return np.array([dt.utcfromtimestamp(t) for t in self.timestamp])
def date_string(self, fmt="%H:%M"):
return np.array([dt.strftime(t, fmt) for t in self.date_time])
class Species(EventHandler):
def __init__(self, kw) -> None:
self.kw = kw
@cached_property
def value(self):
return self.get_tplot_data(self.kw)
@cached_property
def time(self):
return TimeMMS(self.kw)
def plot(self):
plt.plot(self.value)
def __repr__(self):
return (
f"Species({self.kw})"
"Available properties:"
" value"
"Available methods:"
" plot"
)
class MultiSpecies:
def __init__(self, ion_kw: str, electron_kw: str) -> None:
self.ion_kw = ion_kw
self.electron_kw = electron_kw
@cached_property
def ion(self):
return Species(self.ion_kw)
@cached_property
def electron(self):
return Species(self.electron_kw)
class Event(EventHandler):
def __init__(
self, trange: str, required_instruments: str, probe: str = "1"
) -> None:
self.trange = trange
self.required_instruments = required_instruments.upper()
self.probe = probe
if "FGM" in required_instruments:
self.load_FGM()
if "FPI" in required_instruments:
self.load_FPI()
if "FSM" in required_instruments:
self.load_FSM()
if "FPI_DIST" in required_instruments:
self.load_FPI_DIST()
@cached_property
def B(self):
return Species(f"mms{self.probe}_fgm_b_gse_brst_l2")
@cached_property
def v(self):
return MultiSpecies(
f"mms{self.probe}_dis_bulkv_gse_brst",
f"mms{self.probe}_des_bulkv_gse_brst",
)
@cached_property
def T(self):
return MultiSpecies(
f"mms{self.probe}_dis_temppara_brst",
f"mms{self.probe}_dis_tempperp_brst",
)
@cached_property
def E(self):
return MultiSpecies(
f"mms{self.probe}_dis_energyspectr_omni_brst",
f"mms{self.probe}_des_energyspectr_omni_brst",
)
| true | true |
f7202111926ff8fda7156a7c3de15389c446a8d2 | 1,643 | py | Python | models.py | Joshua-Barawa/MyHome | 99b5a96f2d7f442afcccfbf042b10a94e0684ee3 | [
"PostgreSQL",
"Unlicense"
] | null | null | null | models.py | Joshua-Barawa/MyHome | 99b5a96f2d7f442afcccfbf042b10a94e0684ee3 | [
"PostgreSQL",
"Unlicense"
] | null | null | null | models.py | Joshua-Barawa/MyHome | 99b5a96f2d7f442afcccfbf042b10a94e0684ee3 | [
"PostgreSQL",
"Unlicense"
] | null | null | null | from run import db
from flask_login import UserMixin
class Post(db.Model):
__tablename__ = "posts"
id = db.Column(db.Integer, primary_key=True)
image = db.Column(db.Text)
location = db.Column(db.String(255))
title = db.Column(db.String(255))
description = db.Column(db.String)
price = db.Column(db.Integer)
owner = db.Column(db.String(255))
def __init__(self, image, location, title,description, price, owner):
self.image = image
self.location = location
self.title = title
self.description = description
self.price = price
self.owner = owner
class User(UserMixin, db.Model):
__tablename__ = "users"
id = db.Column(db.Integer, primary_key=True)
full_names = db.Column(db.String(255), nullable=False)
email = db.Column(db.String(255), nullable=False)
mobile_number = db.Column(db.Integer, nullable=False)
member_since = db.Column(db.Date)
password = db.Column(db.String(255), nullable=False)
def __init__(self, full_names, email, mobile_number, member_since, password):
self.full_names = full_names
self.email = email
self.mobile_number = mobile_number
self.member_since = member_since
self.password = password
class Comment(db.Model):
__tablename__ = "comments"
id = db.Column(db.Integer, primary_key=True)
post_id = db.Column(db.Integer, db.ForeignKey('posts.id'))
name = db.Column(db.String(255))
desc = db.Column(db.String(255))
def __init__(self, post_id, name, desc):
self.post_id = post_id
self.name = name
self.desc = desc
| 31.596154 | 81 | 0.664029 | from run import db
from flask_login import UserMixin
class Post(db.Model):
__tablename__ = "posts"
id = db.Column(db.Integer, primary_key=True)
image = db.Column(db.Text)
location = db.Column(db.String(255))
title = db.Column(db.String(255))
description = db.Column(db.String)
price = db.Column(db.Integer)
owner = db.Column(db.String(255))
def __init__(self, image, location, title,description, price, owner):
self.image = image
self.location = location
self.title = title
self.description = description
self.price = price
self.owner = owner
class User(UserMixin, db.Model):
__tablename__ = "users"
id = db.Column(db.Integer, primary_key=True)
full_names = db.Column(db.String(255), nullable=False)
email = db.Column(db.String(255), nullable=False)
mobile_number = db.Column(db.Integer, nullable=False)
member_since = db.Column(db.Date)
password = db.Column(db.String(255), nullable=False)
def __init__(self, full_names, email, mobile_number, member_since, password):
self.full_names = full_names
self.email = email
self.mobile_number = mobile_number
self.member_since = member_since
self.password = password
class Comment(db.Model):
__tablename__ = "comments"
id = db.Column(db.Integer, primary_key=True)
post_id = db.Column(db.Integer, db.ForeignKey('posts.id'))
name = db.Column(db.String(255))
desc = db.Column(db.String(255))
def __init__(self, post_id, name, desc):
self.post_id = post_id
self.name = name
self.desc = desc
| true | true |
f720216f491ba23be45a34478f494e95df19d04f | 1,046 | py | Python | perceptron/perceptron.py | coderatwork7/AI-algorithms | 63850ae051956d8ed363fa28e5dc51ad26e86198 | [
"Apache-2.0"
] | 10 | 2020-06-26T13:19:46.000Z | 2021-02-05T09:26:49.000Z | perceptron/perceptron.py | somiljain7/AI-algorithms- | 11e9c012cc2f5fb4493bc1ec6b14ddc9cf0fc2d4 | [
"Apache-2.0"
] | 4 | 2020-07-17T11:03:38.000Z | 2020-10-17T05:23:17.000Z | perceptron/perceptron.py | somiljain7/AI-algorithms- | 11e9c012cc2f5fb4493bc1ec6b14ddc9cf0fc2d4 | [
"Apache-2.0"
] | 9 | 2020-06-26T13:19:49.000Z | 2021-01-02T18:59:30.000Z | import pandas as pd
# TODO: Set weight1, weight2, and bias
weight1 = 1.5
weight2 = 1.5
bias = -2.0
# DON'T CHANGE ANYTHING BELOW
# Inputs and outputs
test_inputs = [(0, 0), (0, 1), (1, 0), (1, 1)]
correct_outputs = [False, False, False, True]
outputs = []
# Generate and check output
for test_input, correct_output in zip(test_inputs, correct_outputs):
linear_combination = weight1 * test_input[0] + weight2 * test_input[1] + bias
output = int(linear_combination >= 0)
is_correct_string = 'Yes' if output == correct_output else 'No'
outputs.append([test_input[0], test_input[1], linear_combination, output, is_correct_string])
# Print output
num_wrong = len([output[4] for output in outputs if output[4] == 'No'])
output_frame = pd.DataFrame(outputs, columns=['Input 1', ' Input 2', ' Linear Combination', ' Activation Output', ' Is Correct'])
if not num_wrong:
print('Nice! You got it all correct.\n')
else:
print('You got {} wrong. Keep trying!\n'.format(num_wrong))
print(output_frame.to_string(index=False)) | 36.068966 | 133 | 0.698853 | import pandas as pd
weight1 = 1.5
weight2 = 1.5
bias = -2.0
# Inputs and outputs
test_inputs = [(0, 0), (0, 1), (1, 0), (1, 1)]
correct_outputs = [False, False, False, True]
outputs = []
# Generate and check output
for test_input, correct_output in zip(test_inputs, correct_outputs):
linear_combination = weight1 * test_input[0] + weight2 * test_input[1] + bias
output = int(linear_combination >= 0)
is_correct_string = 'Yes' if output == correct_output else 'No'
outputs.append([test_input[0], test_input[1], linear_combination, output, is_correct_string])
# Print output
num_wrong = len([output[4] for output in outputs if output[4] == 'No'])
output_frame = pd.DataFrame(outputs, columns=['Input 1', ' Input 2', ' Linear Combination', ' Activation Output', ' Is Correct'])
if not num_wrong:
print('Nice! You got it all correct.\n')
else:
print('You got {} wrong. Keep trying!\n'.format(num_wrong))
print(output_frame.to_string(index=False)) | true | true |
f72021be8eed22953bf2936035e521ed13862e22 | 4,547 | py | Python | tests/runnable/proxy/proxy_simple_test_case.py | gift-surg/puma | 58beae3459a0c8d96adfe9af323e26868428df4d | [
"Apache-2.0"
] | null | null | null | tests/runnable/proxy/proxy_simple_test_case.py | gift-surg/puma | 58beae3459a0c8d96adfe9af323e26868428df4d | [
"Apache-2.0"
] | 13 | 2020-05-04T14:14:58.000Z | 2020-07-29T16:37:03.000Z | tests/runnable/proxy/proxy_simple_test_case.py | gift-surg/puma | 58beae3459a0c8d96adfe9af323e26868428df4d | [
"Apache-2.0"
] | null | null | null | from typing import List
from unittest import TestCase
from puma.attribute import copied
from puma.buffer import Publishable
from puma.runnable import CommandDrivenRunnable
from puma.runnable.decorator.run_in_child_scope import run_in_child_scope
from puma.scope_id import get_current_scope_id
from tests.runnable.proxy.proxy_test_case import ProxyTestCase, RunnableTestInterface
from tests.runnable.proxy.proxy_test_environment import ProxyTestEnvironment
from tests.runnable.proxy.proxy_test_helpers import CallResponse, HasMethodThatReturnsValue, SendsCallsToBufferImpl, from_scope_id
class SimpleProxyTestCase(ProxyTestCase):
def __init__(self, expected_result_value: str):
self._expected_result_value = expected_result_value
def create_demo_interface(self, call_response_publishable: Publishable[CallResponse]) -> RunnableTestInterface:
return SendsCallsToBufferRunnable(SendsCallsToBufferImpl(call_response_publishable))
def perform_commands(self, test_case: TestCase, test_interface: HasMethodThatReturnsValue) -> None:
test_interface.no_args()
test_interface.one_arg(get_current_scope_id())
test_interface.two_args(get_current_scope_id(), "2")
value = test_interface.returns_value(get_current_scope_id(), 3)
test_interface.two_args(get_current_scope_id(), value)
def check_results(self, test_case: TestCase, proxy_test_env: ProxyTestEnvironment, commands: List[CallResponse]) -> None:
test_case.assertEqual(self._get_expected_command_count(), len(commands))
# Ensure the correct commands were called (can't verify first argument, as it is generated - it will be checked later)
test_case.assertEqual("no_args", commands[0].method_name)
test_case.assertEqual([], commands[0].args)
test_case.assertEqual("one_arg", commands[1].method_name)
test_case.assertEqual("two_args", commands[2].method_name)
test_case.assertEqual("2", commands[2].args[1])
test_case.assertEqual("returns_value", commands[3].method_name)
test_case.assertEqual(3, commands[3].args[1])
test_case.assertEqual("two_args", commands[4].method_name)
test_case.assertEqual(self._expected_result_value, commands[4].args[1])
# Ensure all commands ran in the same scope
command_run_scope_ids = set()
for c in commands:
command_run_scope_ids.add(c.scope_id)
test_case.assertEqual(1, len(command_run_scope_ids), f"Not all commands were run in the same scope - {command_run_scope_ids}")
# Ensure all commands called in the same scope
command_called_scope_ids = set()
for c in commands:
if len(c.args) > 0:
command_called_scope_ids.add(c.args[0])
test_case.assertEqual(1, len(command_called_scope_ids), f"Not all commands were called in the same scope - {command_called_scope_ids}")
command_called_scope = from_scope_id(command_called_scope_ids.pop())
command_run_scope = from_scope_id(command_run_scope_ids.pop())
# Ensure commands weren't called from or run in the main thread
main_thread_scope = from_scope_id(get_current_scope_id())
test_case.assertNotEqual(main_thread_scope, command_called_scope)
test_case.assertNotEqual(main_thread_scope, command_run_scope)
# Ensure commands were called from the expected scope
proxy_test_env.environment_verifier.verify(test_case, command_called_scope, command_run_scope)
def _get_expected_command_count(self) -> int:
return 5
class SendsCallsToBufferRunnable(CommandDrivenRunnable, RunnableTestInterface):
_wrapped_instance: HasMethodThatReturnsValue = copied("_wrapped_instance")
def __init__(self, wrapped_interface: HasMethodThatReturnsValue) -> None:
super().__init__(self.__class__.__name__, [])
self._wrapped_instance = wrapped_interface
@run_in_child_scope
def no_args(self) -> None:
self._wrapped_instance.no_args()
@run_in_child_scope
def one_arg(self, a: str) -> None:
self._wrapped_instance.one_arg(a)
@run_in_child_scope
def two_args(self, a: str, b: str) -> None:
self._wrapped_instance.two_args(a, b)
def returns_value(self, a: str, b: int) -> str:
self._in_child_returns_value(a, b)
return f"Called by {self.__class__.__name__}"
@run_in_child_scope
def _in_child_returns_value(self, a: str, b: int) -> None:
self._wrapped_instance.returns_value(a, b)
| 44.578431 | 143 | 0.744667 | from typing import List
from unittest import TestCase
from puma.attribute import copied
from puma.buffer import Publishable
from puma.runnable import CommandDrivenRunnable
from puma.runnable.decorator.run_in_child_scope import run_in_child_scope
from puma.scope_id import get_current_scope_id
from tests.runnable.proxy.proxy_test_case import ProxyTestCase, RunnableTestInterface
from tests.runnable.proxy.proxy_test_environment import ProxyTestEnvironment
from tests.runnable.proxy.proxy_test_helpers import CallResponse, HasMethodThatReturnsValue, SendsCallsToBufferImpl, from_scope_id
class SimpleProxyTestCase(ProxyTestCase):
def __init__(self, expected_result_value: str):
self._expected_result_value = expected_result_value
def create_demo_interface(self, call_response_publishable: Publishable[CallResponse]) -> RunnableTestInterface:
return SendsCallsToBufferRunnable(SendsCallsToBufferImpl(call_response_publishable))
def perform_commands(self, test_case: TestCase, test_interface: HasMethodThatReturnsValue) -> None:
test_interface.no_args()
test_interface.one_arg(get_current_scope_id())
test_interface.two_args(get_current_scope_id(), "2")
value = test_interface.returns_value(get_current_scope_id(), 3)
test_interface.two_args(get_current_scope_id(), value)
def check_results(self, test_case: TestCase, proxy_test_env: ProxyTestEnvironment, commands: List[CallResponse]) -> None:
test_case.assertEqual(self._get_expected_command_count(), len(commands))
test_case.assertEqual("no_args", commands[0].method_name)
test_case.assertEqual([], commands[0].args)
test_case.assertEqual("one_arg", commands[1].method_name)
test_case.assertEqual("two_args", commands[2].method_name)
test_case.assertEqual("2", commands[2].args[1])
test_case.assertEqual("returns_value", commands[3].method_name)
test_case.assertEqual(3, commands[3].args[1])
test_case.assertEqual("two_args", commands[4].method_name)
test_case.assertEqual(self._expected_result_value, commands[4].args[1])
# Ensure all commands ran in the same scope
command_run_scope_ids = set()
for c in commands:
command_run_scope_ids.add(c.scope_id)
test_case.assertEqual(1, len(command_run_scope_ids), f"Not all commands were run in the same scope - {command_run_scope_ids}")
# Ensure all commands called in the same scope
command_called_scope_ids = set()
for c in commands:
if len(c.args) > 0:
command_called_scope_ids.add(c.args[0])
test_case.assertEqual(1, len(command_called_scope_ids), f"Not all commands were called in the same scope - {command_called_scope_ids}")
command_called_scope = from_scope_id(command_called_scope_ids.pop())
command_run_scope = from_scope_id(command_run_scope_ids.pop())
# Ensure commands weren't called from or run in the main thread
main_thread_scope = from_scope_id(get_current_scope_id())
test_case.assertNotEqual(main_thread_scope, command_called_scope)
test_case.assertNotEqual(main_thread_scope, command_run_scope)
proxy_test_env.environment_verifier.verify(test_case, command_called_scope, command_run_scope)
def _get_expected_command_count(self) -> int:
return 5
class SendsCallsToBufferRunnable(CommandDrivenRunnable, RunnableTestInterface):
_wrapped_instance: HasMethodThatReturnsValue = copied("_wrapped_instance")
def __init__(self, wrapped_interface: HasMethodThatReturnsValue) -> None:
super().__init__(self.__class__.__name__, [])
self._wrapped_instance = wrapped_interface
@run_in_child_scope
def no_args(self) -> None:
self._wrapped_instance.no_args()
@run_in_child_scope
def one_arg(self, a: str) -> None:
self._wrapped_instance.one_arg(a)
@run_in_child_scope
def two_args(self, a: str, b: str) -> None:
self._wrapped_instance.two_args(a, b)
def returns_value(self, a: str, b: int) -> str:
self._in_child_returns_value(a, b)
return f"Called by {self.__class__.__name__}"
@run_in_child_scope
def _in_child_returns_value(self, a: str, b: int) -> None:
self._wrapped_instance.returns_value(a, b)
| true | true |
f72021c2f7bb1ce0bb2f560b50785e5b281d956f | 1,492 | py | Python | rdkit/VLib/Supply.py | docking-org/rdk | 6eb710254f027b348a8e3089e6a92c3d40de0949 | [
"PostgreSQL"
] | 1 | 2019-01-23T06:02:24.000Z | 2019-01-23T06:02:24.000Z | rdkit/VLib/Supply.py | Mike575/rdkit | 373a89021e478f878c6011a201e3fb8f4a122093 | [
"PostgreSQL"
] | null | null | null | rdkit/VLib/Supply.py | Mike575/rdkit | 373a89021e478f878c6011a201e3fb8f4a122093 | [
"PostgreSQL"
] | 1 | 2022-03-30T03:22:10.000Z | 2022-03-30T03:22:10.000Z | # $Id$
#
# Copyright (C) 2003 Rational Discovery LLC
# All Rights Reserved
#
from rdkit import six
from rdkit.VLib.Node import VLibNode
class SupplyNode(VLibNode):
""" base class for nodes which supply things
Assumptions:
1) no parents
Usage Example:
>>> supplier = SupplyNode(contents=[1,2,3])
>>> supplier.next()
1
>>> supplier.next()
2
>>> supplier.next()
3
>>> supplier.next()
Traceback (most recent call last):
...
StopIteration
>>> supplier.reset()
>>> supplier.next()
1
>>> [x for x in supplier]
[1, 2, 3]
"""
def __init__(self, contents=None, **kwargs):
VLibNode.__init__(self, **kwargs)
if contents is not None:
self._contents = contents
else:
self._contents = []
self._pos = 0
def reset(self):
VLibNode.reset(self)
self._pos = 0
def next(self):
if self._pos == len(self._contents):
raise StopIteration
res = self._contents[self._pos]
self._pos += 1
return res
def AddParent(self, parent, notify=1):
raise ValueError('SupplyNodes do not have parents')
if six.PY3:
SupplyNode.__next__ = SupplyNode.next
# ------------------------------------
#
# doctest boilerplate
#
def _runDoctests(verbose=None): # pragma: nocover
import sys
import doctest
failed, _ = doctest.testmod(optionflags=doctest.ELLIPSIS, verbose=verbose)
sys.exit(failed)
if __name__ == '__main__': # pragma: nocover
_runDoctests()
| 19.128205 | 76 | 0.618633 |
from rdkit import six
from rdkit.VLib.Node import VLibNode
class SupplyNode(VLibNode):
def __init__(self, contents=None, **kwargs):
VLibNode.__init__(self, **kwargs)
if contents is not None:
self._contents = contents
else:
self._contents = []
self._pos = 0
def reset(self):
VLibNode.reset(self)
self._pos = 0
def next(self):
if self._pos == len(self._contents):
raise StopIteration
res = self._contents[self._pos]
self._pos += 1
return res
def AddParent(self, parent, notify=1):
raise ValueError('SupplyNodes do not have parents')
if six.PY3:
SupplyNode.__next__ = SupplyNode.next
def _runDoctests(verbose=None):
import sys
import doctest
failed, _ = doctest.testmod(optionflags=doctest.ELLIPSIS, verbose=verbose)
sys.exit(failed)
if __name__ == '__main__':
_runDoctests()
| true | true |
f720232e9180d661ee8772c24e32166039323b47 | 1,192 | py | Python | src/web/modules/entrance/migrations/0081_enrolledusersgroup.py | fossabot/SIStema | 1427dda2082688a9482c117d0e24ad380fdc26a6 | [
"MIT"
] | 5 | 2018-03-08T17:22:27.000Z | 2018-03-11T14:20:53.000Z | src/web/modules/entrance/migrations/0081_enrolledusersgroup.py | fossabot/SIStema | 1427dda2082688a9482c117d0e24ad380fdc26a6 | [
"MIT"
] | 263 | 2018-03-08T18:05:12.000Z | 2022-03-11T23:26:20.000Z | src/web/modules/entrance/migrations/0081_enrolledusersgroup.py | fossabot/SIStema | 1427dda2082688a9482c117d0e24ad380fdc26a6 | [
"MIT"
] | 6 | 2018-03-12T19:48:19.000Z | 2022-01-14T04:58:52.000Z | # Generated by Django 2.0.3 on 2018-05-20 18:52
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('groups', '0004_auto_20180312_2139'),
('schools', '0018_auto_20180407_1742'),
('entrance', '0080_auto_20180520_2114'),
]
operations = [
migrations.CreateModel(
name='EnrolledUsersGroup',
fields=[
('abstractgroup_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='groups.AbstractGroup')),
('parallel', models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='+', to='schools.Parallel')),
('session', models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='+', to='schools.Session')),
],
options={
'abstract': False,
'base_manager_name': 'objects',
},
bases=('groups.abstractgroup',),
),
]
| 39.733333 | 204 | 0.628356 |
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('groups', '0004_auto_20180312_2139'),
('schools', '0018_auto_20180407_1742'),
('entrance', '0080_auto_20180520_2114'),
]
operations = [
migrations.CreateModel(
name='EnrolledUsersGroup',
fields=[
('abstractgroup_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='groups.AbstractGroup')),
('parallel', models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='+', to='schools.Parallel')),
('session', models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='+', to='schools.Session')),
],
options={
'abstract': False,
'base_manager_name': 'objects',
},
bases=('groups.abstractgroup',),
),
]
| true | true |
f7202400abc5ff8124f922b02029da0f1f0056b4 | 5,477 | py | Python | tests/endtoend/test_linux_consumption.py | Amithmh/azure-functions-python-worker | 291a85279f92c88a5a05cddaaf70b2def81a45f2 | [
"MIT"
] | 277 | 2018-01-25T23:13:03.000Z | 2022-02-22T06:12:04.000Z | tests/endtoend/test_linux_consumption.py | Amithmh/azure-functions-python-worker | 291a85279f92c88a5a05cddaaf70b2def81a45f2 | [
"MIT"
] | 731 | 2018-01-18T18:54:38.000Z | 2022-03-29T00:01:46.000Z | tests/endtoend/test_linux_consumption.py | YunchuWang/azure-functions-python-worker | 1f23e038a506c6412e4efbf07eb471a6afab0c2a | [
"MIT"
] | 109 | 2018-01-18T02:22:57.000Z | 2022-02-15T18:59:54.000Z | from unittest import TestCase, skip
import os
import sys
from requests import Request
from azure_functions_worker.testutils_lc import (
LinuxConsumptionWebHostController
)
@skip('Flaky test and needs stabilization')
class TestLinuxConsumption(TestCase):
"""Test worker behaviors on specific scenarios.
SCM_RUN_FROM_PACKAGE: built function apps are acquired from
-> "Simple Batch" Subscription
-> "AzureFunctionsPythonWorkerCILinuxDevOps" Resource Group
-> "pythonworker<python_major><python_minor>sa" Storage Account
-> "python-worker-lc-apps" Blob Container
For a list of scenario names:
https://pythonworker39sa.blob.core.windows.net/python-worker-lc-apps?restype=container&comp=list
"""
@classmethod
def setUpClass(cls):
cls._py_version = f'{sys.version_info.major}.{sys.version_info.minor}'
cls._py_shortform = f'{sys.version_info.major}{sys.version_info.minor}'
cls._storage = os.getenv('AzureWebJobsStorage')
if cls._storage is None:
raise RuntimeError('Environment variable AzureWebJobsStorage is '
'required before running Linux Consumption test')
def test_placeholder_mode_root_returns_ok(self):
"""In any circumstances, a placeholder container should returns 200
even when it is not specialized.
"""
with LinuxConsumptionWebHostController("3", self._py_version) as ctrl:
req = Request('GET', ctrl.url)
resp = ctrl.send_request(req)
self.assertTrue(resp.ok)
def test_http_no_auth(self):
"""An HttpTrigger function app with 'azure-functions' library
should return 200.
"""
with LinuxConsumptionWebHostController("3", self._py_version) as ctrl:
ctrl.assign_container(env={
"AzureWebJobsStorage": self._storage,
"SCM_RUN_FROM_PACKAGE": self._get_blob_url("HttpNoAuth")
})
req = Request('GET', f'{ctrl.url}/api/HttpTrigger')
resp = ctrl.send_request(req)
self.assertEqual(resp.status_code, 200)
def test_common_libraries(self):
"""A function app with the following requirements.txt:
azure-functions
azure-eventhub
azure-storage-blob
numpy
cryptography
pyodbc
requests
should return 200 after importing all libraries.
"""
with LinuxConsumptionWebHostController("3", self._py_version) as ctrl:
ctrl.assign_container(env={
"AzureWebJobsStorage": self._storage,
"SCM_RUN_FROM_PACKAGE": self._get_blob_url("CommonLibraries")
})
req = Request('GET', f'{ctrl.url}/api/HttpTrigger')
resp = ctrl.send_request(req)
content = resp.json()
self.assertIn('azure.functions', content)
self.assertIn('azure.storage.blob', content)
self.assertIn('numpy', content)
self.assertIn('cryptography', content)
self.assertIn('pyodbc', content)
self.assertIn('requests', content)
self.assertEqual(resp.status_code, 200)
def test_new_protobuf(self):
"""A function app with the following requirements.txt:
azure-functions==1.7.0
protobuf==3.15.8
grpcio==1.33.2
should return 200 after importing all libraries.
"""
with LinuxConsumptionWebHostController("3", self._py_version) as ctrl:
ctrl.assign_container(env={
"AzureWebJobsStorage": self._storage,
"SCM_RUN_FROM_PACKAGE": self._get_blob_url("NewProtobuf")
})
req = Request('GET', f'{ctrl.url}/api/HttpTrigger')
resp = ctrl.send_request(req)
content = resp.json()
# Worker always picks up the SDK version bundled with the image
# Version of the packages are inconsistent due to isolation's bug
self.assertIn('azure.functions', content)
self.assertIn('google.protobuf', content)
self.assertIn('grpc', content)
self.assertEqual(resp.status_code, 200)
def test_old_protobuf(self):
"""A function app with the following requirements.txt:
azure-functions==1.5.0
protobuf==3.8.0
grpcio==1.27.1
should return 200 after importing all libraries.
"""
with LinuxConsumptionWebHostController("3", self._py_version) as ctrl:
ctrl.assign_container(env={
"AzureWebJobsStorage": self._storage,
"SCM_RUN_FROM_PACKAGE": self._get_blob_url("NewProtobuf")
})
req = Request('GET', f'{ctrl.url}/api/HttpTrigger')
resp = ctrl.send_request(req)
content = resp.json()
# Worker always picks up the SDK version bundled with the image
# Version of the packages are inconsistent due to isolation's bug
self.assertIn('azure.functions', content)
self.assertIn('google.protobuf', content)
self.assertIn('grpc', content)
self.assertEqual(resp.status_code, 200)
def _get_blob_url(self, scenario_name: str) -> str:
return (
f'https://pythonworker{self._py_shortform}sa.blob.core.windows.net/'
f'python-worker-lc-apps/{scenario_name}{self._py_shortform}.zip'
)
| 38.570423 | 104 | 0.628994 | from unittest import TestCase, skip
import os
import sys
from requests import Request
from azure_functions_worker.testutils_lc import (
LinuxConsumptionWebHostController
)
@skip('Flaky test and needs stabilization')
class TestLinuxConsumption(TestCase):
@classmethod
def setUpClass(cls):
cls._py_version = f'{sys.version_info.major}.{sys.version_info.minor}'
cls._py_shortform = f'{sys.version_info.major}{sys.version_info.minor}'
cls._storage = os.getenv('AzureWebJobsStorage')
if cls._storage is None:
raise RuntimeError('Environment variable AzureWebJobsStorage is '
'required before running Linux Consumption test')
def test_placeholder_mode_root_returns_ok(self):
with LinuxConsumptionWebHostController("3", self._py_version) as ctrl:
req = Request('GET', ctrl.url)
resp = ctrl.send_request(req)
self.assertTrue(resp.ok)
def test_http_no_auth(self):
with LinuxConsumptionWebHostController("3", self._py_version) as ctrl:
ctrl.assign_container(env={
"AzureWebJobsStorage": self._storage,
"SCM_RUN_FROM_PACKAGE": self._get_blob_url("HttpNoAuth")
})
req = Request('GET', f'{ctrl.url}/api/HttpTrigger')
resp = ctrl.send_request(req)
self.assertEqual(resp.status_code, 200)
def test_common_libraries(self):
with LinuxConsumptionWebHostController("3", self._py_version) as ctrl:
ctrl.assign_container(env={
"AzureWebJobsStorage": self._storage,
"SCM_RUN_FROM_PACKAGE": self._get_blob_url("CommonLibraries")
})
req = Request('GET', f'{ctrl.url}/api/HttpTrigger')
resp = ctrl.send_request(req)
content = resp.json()
self.assertIn('azure.functions', content)
self.assertIn('azure.storage.blob', content)
self.assertIn('numpy', content)
self.assertIn('cryptography', content)
self.assertIn('pyodbc', content)
self.assertIn('requests', content)
self.assertEqual(resp.status_code, 200)
def test_new_protobuf(self):
with LinuxConsumptionWebHostController("3", self._py_version) as ctrl:
ctrl.assign_container(env={
"AzureWebJobsStorage": self._storage,
"SCM_RUN_FROM_PACKAGE": self._get_blob_url("NewProtobuf")
})
req = Request('GET', f'{ctrl.url}/api/HttpTrigger')
resp = ctrl.send_request(req)
content = resp.json()
self.assertIn('azure.functions', content)
self.assertIn('google.protobuf', content)
self.assertIn('grpc', content)
self.assertEqual(resp.status_code, 200)
def test_old_protobuf(self):
with LinuxConsumptionWebHostController("3", self._py_version) as ctrl:
ctrl.assign_container(env={
"AzureWebJobsStorage": self._storage,
"SCM_RUN_FROM_PACKAGE": self._get_blob_url("NewProtobuf")
})
req = Request('GET', f'{ctrl.url}/api/HttpTrigger')
resp = ctrl.send_request(req)
content = resp.json()
# Worker always picks up the SDK version bundled with the image
# Version of the packages are inconsistent due to isolation's bug
self.assertIn('azure.functions', content)
self.assertIn('google.protobuf', content)
self.assertIn('grpc', content)
self.assertEqual(resp.status_code, 200)
def _get_blob_url(self, scenario_name: str) -> str:
return (
f'https://pythonworker{self._py_shortform}sa.blob.core.windows.net/'
f'python-worker-lc-apps/{scenario_name}{self._py_shortform}.zip'
)
| true | true |
f7202503138d6110cfc58387559f90552ccc359f | 3,224 | py | Python | testflows/_core/contrib/pygments/lexers/special.py | testflows/TestFlows-Core | 0aa17247dffd2f7199465031ab16cc4f12c9cfb0 | [
"Apache-2.0"
] | 3 | 2020-06-25T19:23:19.000Z | 2021-10-20T19:29:56.000Z | testflows/_core/contrib/pygments/lexers/special.py | testflows/TestFlows-Core | 0aa17247dffd2f7199465031ab16cc4f12c9cfb0 | [
"Apache-2.0"
] | null | null | null | testflows/_core/contrib/pygments/lexers/special.py | testflows/TestFlows-Core | 0aa17247dffd2f7199465031ab16cc4f12c9cfb0 | [
"Apache-2.0"
] | 1 | 2020-02-24T12:31:45.000Z | 2020-02-24T12:31:45.000Z | # -*- coding: utf-8 -*-
"""
pygments.lexers.special
~~~~~~~~~~~~~~~~~~~~~~~
Special lexers.
:copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from testflows._core.contrib.pygments.lexer import Lexer
from testflows._core.contrib.pygments.token import Token, Error, Text
from testflows._core.contrib.pygments.util import get_choice_opt, text_type, BytesIO
__all__ = ['TextLexer', 'RawTokenLexer']
class TextLexer(Lexer):
"""
"Null" lexer, doesn't highlight anything.
"""
name = 'Text only'
aliases = ['text']
filenames = ['*.txt']
mimetypes = ['text/plain']
priority = 0.01
def get_tokens_unprocessed(self, text):
yield 0, Text, text
def analyse_text(text):
return TextLexer.priority
_ttype_cache = {}
line_re = re.compile(b'.*?\n')
class RawTokenLexer(Lexer):
"""
Recreate a token stream formatted with the `RawTokenFormatter`. This
lexer raises exceptions during parsing if the token stream in the
file is malformed.
Additional options accepted:
`compress`
If set to ``"gz"`` or ``"bz2"``, decompress the token stream with
the given compression algorithm before lexing (default: ``""``).
"""
name = 'Raw token data'
aliases = ['raw']
filenames = []
mimetypes = ['application/x-pygments-tokens']
def __init__(self, **options):
self.compress = get_choice_opt(options, 'compress',
['', 'none', 'gz', 'bz2'], '')
Lexer.__init__(self, **options)
def get_tokens(self, text):
if isinstance(text, text_type):
# raw token stream never has any non-ASCII characters
text = text.encode('ascii')
if self.compress == 'gz':
import gzip
gzipfile = gzip.GzipFile('', 'rb', 9, BytesIO(text))
text = gzipfile.read()
elif self.compress == 'bz2':
import bz2
text = bz2.decompress(text)
# do not call Lexer.get_tokens() because we do not want Unicode
# decoding to occur, and stripping is not optional.
text = text.strip(b'\n') + b'\n'
for i, t, v in self.get_tokens_unprocessed(text):
yield t, v
def get_tokens_unprocessed(self, text):
length = 0
for match in line_re.finditer(text):
try:
ttypestr, val = match.group().split(b'\t', 1)
except ValueError:
val = match.group().decode('ascii', 'replace')
ttype = Error
else:
ttype = _ttype_cache.get(ttypestr)
if not ttype:
ttype = Token
ttypes = ttypestr.split('.')[1:]
for ttype_ in ttypes:
if not ttype_ or not ttype_[0].isupper():
raise ValueError('malformed token name')
ttype = getattr(ttype, ttype_)
_ttype_cache[ttypestr] = ttype
val = val[2:-2].decode('unicode-escape')
yield length, ttype, val
length += len(val)
| 30.704762 | 84 | 0.563275 |
import re
from testflows._core.contrib.pygments.lexer import Lexer
from testflows._core.contrib.pygments.token import Token, Error, Text
from testflows._core.contrib.pygments.util import get_choice_opt, text_type, BytesIO
__all__ = ['TextLexer', 'RawTokenLexer']
class TextLexer(Lexer):
name = 'Text only'
aliases = ['text']
filenames = ['*.txt']
mimetypes = ['text/plain']
priority = 0.01
def get_tokens_unprocessed(self, text):
yield 0, Text, text
def analyse_text(text):
return TextLexer.priority
_ttype_cache = {}
line_re = re.compile(b'.*?\n')
class RawTokenLexer(Lexer):
name = 'Raw token data'
aliases = ['raw']
filenames = []
mimetypes = ['application/x-pygments-tokens']
def __init__(self, **options):
self.compress = get_choice_opt(options, 'compress',
['', 'none', 'gz', 'bz2'], '')
Lexer.__init__(self, **options)
def get_tokens(self, text):
if isinstance(text, text_type):
text = text.encode('ascii')
if self.compress == 'gz':
import gzip
gzipfile = gzip.GzipFile('', 'rb', 9, BytesIO(text))
text = gzipfile.read()
elif self.compress == 'bz2':
import bz2
text = bz2.decompress(text)
text = text.strip(b'\n') + b'\n'
for i, t, v in self.get_tokens_unprocessed(text):
yield t, v
def get_tokens_unprocessed(self, text):
length = 0
for match in line_re.finditer(text):
try:
ttypestr, val = match.group().split(b'\t', 1)
except ValueError:
val = match.group().decode('ascii', 'replace')
ttype = Error
else:
ttype = _ttype_cache.get(ttypestr)
if not ttype:
ttype = Token
ttypes = ttypestr.split('.')[1:]
for ttype_ in ttypes:
if not ttype_ or not ttype_[0].isupper():
raise ValueError('malformed token name')
ttype = getattr(ttype, ttype_)
_ttype_cache[ttypestr] = ttype
val = val[2:-2].decode('unicode-escape')
yield length, ttype, val
length += len(val)
| true | true |
f720257844a4e01f8b77214cce15f42a5d7c3254 | 2,958 | py | Python | chemicals/exceptions.py | daar/chemicals | df3be046055055b99ae762e7a4b852a63134fc82 | [
"MIT"
] | 76 | 2020-08-29T07:47:11.000Z | 2022-03-27T03:16:46.000Z | chemicals/exceptions.py | edafricano/chemicals | 0e827ad43283d74a37cd002dc638f2a07c33bc1b | [
"MIT"
] | 20 | 2020-08-31T04:44:53.000Z | 2022-03-25T05:40:07.000Z | chemicals/exceptions.py | edafricano/chemicals | 0e827ad43283d74a37cd002dc638f2a07c33bc1b | [
"MIT"
] | 13 | 2020-09-01T04:57:01.000Z | 2022-02-23T03:36:58.000Z | # -*- coding: utf-8 -*-
"""Chemical Engineering Design Library (ChEDL). Utilities for process modeling.
Copyright (C) 2020 Caleb Bell <Caleb.Andrew.Bell@gmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
This module contains various exception classes that may be raised by chemicals.
For reporting bugs, adding feature requests, or submitting pull requests,
please use the `GitHub issue tracker <https://github.com/CalebBell/chemicals/>`_.
.. contents:: :local:
.. autoclass:: chemicals.exceptions.UnderspecifiedError
.. autoclass:: chemicals.exceptions.OverspeficiedError
.. autoclass:: chemicals.exceptions.TrivialSolutionError
.. autoclass:: chemicals.exceptions.PhaseCountReducedError
.. autoclass:: chemicals.exceptions.PhaseExistenceImpossible
"""
__all__ = ['TrivialSolutionError',
'PhaseCountReducedError',
'PhaseExistenceImpossible',
'UnderspecifiedError',
'OverspeficiedError']
class UnderspecifiedError(Exception):
"""Generic error to raise when not enough values are given."""
class OverspeficiedError(Exception):
"""Generic error to raise when too many values are given."""
class TrivialSolutionError(Exception):
"""Error raised SS converges to trivial solution."""
def __init__(self, message, comp_difference, iterations, err):
super().__init__(message)
self.comp_difference = comp_difference
self.iterations = iterations
self.err = err
class PhaseCountReducedError(Exception):
"""Error raised SS inner flash loop says all Ks are under 1 or above 1."""
def __init__(self, message, zs=None, Ks=None):
super().__init__(message)
self.zs = zs
self.Ks = Ks
class PhaseExistenceImpossible(Exception):
"""Error raised SS inner flash loop says all Ks are under 1 or above 1."""
def __init__(self, message, zs=None, T=None, P=None):
super().__init__(message)
self.zs = zs
self.T = T
self.P = P
| 41.083333 | 81 | 0.742732 |
__all__ = ['TrivialSolutionError',
'PhaseCountReducedError',
'PhaseExistenceImpossible',
'UnderspecifiedError',
'OverspeficiedError']
class UnderspecifiedError(Exception):
class OverspeficiedError(Exception):
class TrivialSolutionError(Exception):
def __init__(self, message, comp_difference, iterations, err):
super().__init__(message)
self.comp_difference = comp_difference
self.iterations = iterations
self.err = err
class PhaseCountReducedError(Exception):
def __init__(self, message, zs=None, Ks=None):
super().__init__(message)
self.zs = zs
self.Ks = Ks
class PhaseExistenceImpossible(Exception):
def __init__(self, message, zs=None, T=None, P=None):
super().__init__(message)
self.zs = zs
self.T = T
self.P = P
| true | true |
f72026e5abe659fb49e585df6c351997a8a096ba | 4,714 | py | Python | kernel/graph_sage.py | muhanzhang/NestedGNN | a5adccf62d397ad7f83bc73be34eba3765df73fa | [
"MIT"
] | 21 | 2021-11-05T00:42:30.000Z | 2022-03-29T13:38:31.000Z | kernel/graph_sage.py | Ender-li/NestedGNN | a5adccf62d397ad7f83bc73be34eba3765df73fa | [
"MIT"
] | null | null | null | kernel/graph_sage.py | Ender-li/NestedGNN | a5adccf62d397ad7f83bc73be34eba3765df73fa | [
"MIT"
] | 5 | 2021-11-05T00:42:32.000Z | 2022-03-25T08:28:17.000Z | import torch
import torch.nn.functional as F
from torch.nn import Linear
from torch_geometric.nn import SAGEConv, global_mean_pool
class NestedGraphSAGE(torch.nn.Module):
def __init__(self, dataset, num_layers, hidden, use_z=False, use_rd=False):
super(NestedGraphSAGE, self).__init__()
self.use_rd = use_rd
self.use_z = use_z
if self.use_rd:
self.rd_projection = torch.nn.Linear(1, 8)
if self.use_z:
self.z_embedding = torch.nn.Embedding(1000, 8)
input_dim = dataset.num_features
if self.use_z or self.use_rd:
input_dim += 8
self.conv1 = SAGEConv(input_dim, hidden)
self.convs = torch.nn.ModuleList()
for i in range(num_layers - 1):
self.convs.append(SAGEConv(hidden, hidden))
self.lin1 = torch.nn.Linear(num_layers * hidden, hidden)
self.lin2 = Linear(hidden, dataset.num_classes)
def reset_parameters(self):
if self.use_rd:
self.rd_projection.reset_parameters()
if self.use_z:
self.z_embedding.reset_parameters()
self.conv1.reset_parameters()
for conv in self.convs:
conv.reset_parameters()
self.lin1.reset_parameters()
self.lin2.reset_parameters()
def forward(self, data):
x, edge_index, batch = data.x, data.edge_index, data.batch
# node label embedding
z_emb = 0
if self.use_z and 'z' in data:
### computing input node embedding
z_emb = self.z_embedding(data.z)
if z_emb.ndim == 3:
z_emb = z_emb.sum(dim=1)
if self.use_rd and 'rd' in data:
rd_proj = self.rd_projection(data.rd)
z_emb += rd_proj
if self.use_rd or self.use_z:
x = torch.cat([z_emb, x], -1)
x = F.relu(self.conv1(x, edge_index))
xs = [x]
for conv in self.convs:
x = F.relu(conv(x, edge_index))
xs += [x]
x = global_mean_pool(torch.cat(xs, dim=1), data.node_to_subgraph)
x = global_mean_pool(x, data.subgraph_to_graph)
x = F.relu(self.lin1(x))
x = F.dropout(x, p=0.5, training=self.training)
x = self.lin2(x)
return F.log_softmax(x, dim=-1)
def __repr__(self):
return self.__class__.__name__
class GraphSAGE(torch.nn.Module):
def __init__(self, dataset, num_layers, hidden, *args, **kwargs):
super(GraphSAGE, self).__init__()
self.conv1 = SAGEConv(dataset.num_features, hidden)
self.convs = torch.nn.ModuleList()
for i in range(num_layers - 1):
self.convs.append(SAGEConv(hidden, hidden))
self.lin1 = torch.nn.Linear(num_layers * hidden, hidden)
self.lin2 = Linear(hidden, dataset.num_classes)
def reset_parameters(self):
self.conv1.reset_parameters()
for conv in self.convs:
conv.reset_parameters()
self.lin1.reset_parameters()
self.lin2.reset_parameters()
def forward(self, data):
x, edge_index, batch = data.x, data.edge_index, data.batch
x = F.relu(self.conv1(x, edge_index))
xs = [x]
for conv in self.convs:
x = F.relu(conv(x, edge_index))
xs += [x]
x = global_mean_pool(torch.cat(xs, dim=1), batch)
x = F.relu(self.lin1(x))
x = F.dropout(x, p=0.5, training=self.training)
x = self.lin2(x)
return F.log_softmax(x, dim=-1)
def __repr__(self):
return self.__class__.__name__
class GraphSAGEWithoutJK(torch.nn.Module):
def __init__(self, dataset, num_layers, hidden):
super(GraphSAGEWithoutJK, self).__init__()
self.conv1 = SAGEConv(dataset.num_features, hidden)
self.convs = torch.nn.ModuleList()
for i in range(num_layers - 1):
self.convs.append(SAGEConv(hidden, hidden))
self.lin1 = Linear(hidden, hidden)
self.lin2 = Linear(hidden, dataset.num_classes)
def reset_parameters(self):
self.conv1.reset_parameters()
for conv in self.convs:
conv.reset_parameters()
self.lin1.reset_parameters()
self.lin2.reset_parameters()
def forward(self, data):
x, edge_index, batch = data.x, data.edge_index, data.batch
x = F.relu(self.conv1(x, edge_index))
for conv in self.convs:
x = F.relu(conv(x, edge_index))
x = global_mean_pool(x, batch)
x = F.relu(self.lin1(x))
x = F.dropout(x, p=0.5, training=self.training)
x = self.lin2(x)
return F.log_softmax(x, dim=-1)
def __repr__(self):
return self.__class__.__name__
| 34.661765 | 79 | 0.604158 | import torch
import torch.nn.functional as F
from torch.nn import Linear
from torch_geometric.nn import SAGEConv, global_mean_pool
class NestedGraphSAGE(torch.nn.Module):
def __init__(self, dataset, num_layers, hidden, use_z=False, use_rd=False):
super(NestedGraphSAGE, self).__init__()
self.use_rd = use_rd
self.use_z = use_z
if self.use_rd:
self.rd_projection = torch.nn.Linear(1, 8)
if self.use_z:
self.z_embedding = torch.nn.Embedding(1000, 8)
input_dim = dataset.num_features
if self.use_z or self.use_rd:
input_dim += 8
self.conv1 = SAGEConv(input_dim, hidden)
self.convs = torch.nn.ModuleList()
for i in range(num_layers - 1):
self.convs.append(SAGEConv(hidden, hidden))
self.lin1 = torch.nn.Linear(num_layers * hidden, hidden)
self.lin2 = Linear(hidden, dataset.num_classes)
def reset_parameters(self):
if self.use_rd:
self.rd_projection.reset_parameters()
if self.use_z:
self.z_embedding.reset_parameters()
self.conv1.reset_parameters()
for conv in self.convs:
conv.reset_parameters()
self.lin1.reset_parameters()
self.lin2.reset_parameters()
def forward(self, data):
x, edge_index, batch = data.x, data.edge_index, data.batch
z_emb = 0
if self.use_z and 'z' in data:
b.ndim == 3:
z_emb = z_emb.sum(dim=1)
if self.use_rd and 'rd' in data:
rd_proj = self.rd_projection(data.rd)
z_emb += rd_proj
if self.use_rd or self.use_z:
x = torch.cat([z_emb, x], -1)
x = F.relu(self.conv1(x, edge_index))
xs = [x]
for conv in self.convs:
x = F.relu(conv(x, edge_index))
xs += [x]
x = global_mean_pool(torch.cat(xs, dim=1), data.node_to_subgraph)
x = global_mean_pool(x, data.subgraph_to_graph)
x = F.relu(self.lin1(x))
x = F.dropout(x, p=0.5, training=self.training)
x = self.lin2(x)
return F.log_softmax(x, dim=-1)
def __repr__(self):
return self.__class__.__name__
class GraphSAGE(torch.nn.Module):
def __init__(self, dataset, num_layers, hidden, *args, **kwargs):
super(GraphSAGE, self).__init__()
self.conv1 = SAGEConv(dataset.num_features, hidden)
self.convs = torch.nn.ModuleList()
for i in range(num_layers - 1):
self.convs.append(SAGEConv(hidden, hidden))
self.lin1 = torch.nn.Linear(num_layers * hidden, hidden)
self.lin2 = Linear(hidden, dataset.num_classes)
def reset_parameters(self):
self.conv1.reset_parameters()
for conv in self.convs:
conv.reset_parameters()
self.lin1.reset_parameters()
self.lin2.reset_parameters()
def forward(self, data):
x, edge_index, batch = data.x, data.edge_index, data.batch
x = F.relu(self.conv1(x, edge_index))
xs = [x]
for conv in self.convs:
x = F.relu(conv(x, edge_index))
xs += [x]
x = global_mean_pool(torch.cat(xs, dim=1), batch)
x = F.relu(self.lin1(x))
x = F.dropout(x, p=0.5, training=self.training)
x = self.lin2(x)
return F.log_softmax(x, dim=-1)
def __repr__(self):
return self.__class__.__name__
class GraphSAGEWithoutJK(torch.nn.Module):
def __init__(self, dataset, num_layers, hidden):
super(GraphSAGEWithoutJK, self).__init__()
self.conv1 = SAGEConv(dataset.num_features, hidden)
self.convs = torch.nn.ModuleList()
for i in range(num_layers - 1):
self.convs.append(SAGEConv(hidden, hidden))
self.lin1 = Linear(hidden, hidden)
self.lin2 = Linear(hidden, dataset.num_classes)
def reset_parameters(self):
self.conv1.reset_parameters()
for conv in self.convs:
conv.reset_parameters()
self.lin1.reset_parameters()
self.lin2.reset_parameters()
def forward(self, data):
x, edge_index, batch = data.x, data.edge_index, data.batch
x = F.relu(self.conv1(x, edge_index))
for conv in self.convs:
x = F.relu(conv(x, edge_index))
x = global_mean_pool(x, batch)
x = F.relu(self.lin1(x))
x = F.dropout(x, p=0.5, training=self.training)
x = self.lin2(x)
return F.log_softmax(x, dim=-1)
def __repr__(self):
return self.__class__.__name__
| true | true |
f72028feee490e4956e5e24af2343761f0b5aef8 | 485 | py | Python | tests/numpy/asarray.py | Fryguy/py2rb | 0d2fbc5a86b82707a1d83241a21af6b2cc22c0b8 | [
"MIT"
] | 124 | 2017-08-19T05:37:16.000Z | 2022-03-08T18:24:18.000Z | tests/numpy/asarray.py | JeMaMokuma/py2rb | 0d2fbc5a86b82707a1d83241a21af6b2cc22c0b8 | [
"MIT"
] | 15 | 2017-12-16T05:59:31.000Z | 2022-02-08T02:51:17.000Z | tests/numpy/asarray.py | JeMaMokuma/py2rb | 0d2fbc5a86b82707a1d83241a21af6b2cc22c0b8 | [
"MIT"
] | 18 | 2017-09-25T11:57:04.000Z | 2022-02-19T17:33:48.000Z | import numpy as np
def print_matrix(data):
data_i = []
for i in list(data):
data_j = []
for j in i:
data_j.append(int("%d" % j))
data_i.append(data_j)
print(data_i)
def print_array(data):
datas = []
for i in data:
datas.append(float("%.3f" % i))
print(datas)
x = np.asarray([[1.,2.],[3.,4.]])
print_matrix(x)
x = np.asarray([1.,2.])
print_array(x)
y = np.asarray([3.,4.])
print_array(y)
z = (x + y)[0]
print(z)
| 16.724138 | 40 | 0.536082 | import numpy as np
def print_matrix(data):
data_i = []
for i in list(data):
data_j = []
for j in i:
data_j.append(int("%d" % j))
data_i.append(data_j)
print(data_i)
def print_array(data):
datas = []
for i in data:
datas.append(float("%.3f" % i))
print(datas)
x = np.asarray([[1.,2.],[3.,4.]])
print_matrix(x)
x = np.asarray([1.,2.])
print_array(x)
y = np.asarray([3.,4.])
print_array(y)
z = (x + y)[0]
print(z)
| true | true |
f7202aa5da9abcf694e4ba95a5903c7130725bcc | 604 | py | Python | src/msequence.py | piraaa/VideoDigitalWatermarking | 6439881dc88fb7257a3dd9856b185e5c667b89b4 | [
"MIT"
] | 38 | 2017-11-06T08:59:23.000Z | 2022-02-21T01:42:50.000Z | src/msequence.py | qiuqiu888888/VideoDigitalWatermarking | 6439881dc88fb7257a3dd9856b185e5c667b89b4 | [
"MIT"
] | 2 | 2018-10-01T15:56:37.000Z | 2018-10-01T15:59:19.000Z | src/msequence.py | qiuqiu888888/VideoDigitalWatermarking | 6439881dc88fb7257a3dd9856b185e5c667b89b4 | [
"MIT"
] | 9 | 2017-09-09T02:39:44.000Z | 2021-10-19T08:56:57.000Z | #
# msequence.py
# Created by pira on 2017/07/28.
#
#coding: utf-8
u"""For M-Sequence."""
import numpy as np
def generateM(N):
u"""Create M-Sequence.
@param N : length 2**N-1
@return m : M-Sequence
"""
p = pow(2, N)
m = [0] * (p-1)
for i in np.arange(1,p,2):
f = p^i
a = p
#i = int()
for j in np.arange(N, p):
if (a&p) == p:
a ^= f
if a == 1:
break
a <<= 1
if j == p-1:
init = 1
lfsr = init & (p-1)
f >>= 1
for k in np.arange(0, p-1):
lfsr = (lfsr>>1)^(-(int)(lfsr&1) & f)
m[k] = (lfsr&1) * 2-1
return m
#test
#m = generateM(3)
#print(m) | 14.731707 | 41 | 0.490066 |
import numpy as np
def generateM(N):
p = pow(2, N)
m = [0] * (p-1)
for i in np.arange(1,p,2):
f = p^i
a = p
for j in np.arange(N, p):
if (a&p) == p:
a ^= f
if a == 1:
break
a <<= 1
if j == p-1:
init = 1
lfsr = init & (p-1)
f >>= 1
for k in np.arange(0, p-1):
lfsr = (lfsr>>1)^(-(int)(lfsr&1) & f)
m[k] = (lfsr&1) * 2-1
return m
| true | true |
f7202be0208820f01bdac492ac81ef39a38c8248 | 1,426 | py | Python | manganelo/chapterdownload.py | nixonjoshua98/manganelo | 4450d05a3cf0ef500565c4e263e06edf42f580b6 | [
"MIT"
] | 22 | 2020-03-17T16:01:27.000Z | 2022-03-06T18:04:41.000Z | manganelo/chapterdownload.py | nixonjoshua98/manganelo | 4450d05a3cf0ef500565c4e263e06edf42f580b6 | [
"MIT"
] | 9 | 2020-05-13T03:19:45.000Z | 2022-03-13T23:05:32.000Z | manganelo/chapterdownload.py | nixonjoshua98/manganelo | 4450d05a3cf0ef500565c4e263e06edf42f580b6 | [
"MIT"
] | 8 | 2021-02-10T17:21:34.000Z | 2022-02-15T10:22:38.000Z | import os
import tempfile
from bs4 import BeautifulSoup
from PIL import Image
from reportlab.pdfgen import canvas
from manganelo import utils, siterequests
def download_chapter(url, path):
path = utils.validate_path(path)
r = siterequests.get(url)
soup = BeautifulSoup(r.content, "html.parser")
urls = _get_image_urls_from_soup(soup)
with tempfile.TemporaryDirectory() as dir_:
if images := _download_images(dir_, urls):
_create_pdf(path, images)
return path
def _get_image_urls_from_soup(soup):
def valid(url: str):
return url.endswith((".png", ".jpg"))
return [url for url in map(lambda ele: ele["src"], soup.find_all("img")) if valid(url)]
def _download_images(dir_, urls: list):
images = []
for i, url in enumerate(urls):
image = siterequests.get_image(url)
if image is not None:
ext = url.split(".")[-1]
path = utils.save_image(image, os.path.join(dir_, f"{i}.{ext}"))
if path:
images.append(path)
return images
def _create_pdf(path, images: list):
pdf = canvas.Canvas(path)
for image in images:
# noinspection PyBroadException
try:
with Image.open(image) as img:
w, h = img.size
except BaseException:
continue
pdf.setPageSize((w, h)) # Set the page dimensions to the image dimensions
pdf.drawImage(image, x=0, y=0) # Insert the image onto the current page
pdf.showPage() # Create a new page ready for the next image
pdf.save() | 19.534247 | 88 | 0.706171 | import os
import tempfile
from bs4 import BeautifulSoup
from PIL import Image
from reportlab.pdfgen import canvas
from manganelo import utils, siterequests
def download_chapter(url, path):
path = utils.validate_path(path)
r = siterequests.get(url)
soup = BeautifulSoup(r.content, "html.parser")
urls = _get_image_urls_from_soup(soup)
with tempfile.TemporaryDirectory() as dir_:
if images := _download_images(dir_, urls):
_create_pdf(path, images)
return path
def _get_image_urls_from_soup(soup):
def valid(url: str):
return url.endswith((".png", ".jpg"))
return [url for url in map(lambda ele: ele["src"], soup.find_all("img")) if valid(url)]
def _download_images(dir_, urls: list):
images = []
for i, url in enumerate(urls):
image = siterequests.get_image(url)
if image is not None:
ext = url.split(".")[-1]
path = utils.save_image(image, os.path.join(dir_, f"{i}.{ext}"))
if path:
images.append(path)
return images
def _create_pdf(path, images: list):
pdf = canvas.Canvas(path)
for image in images:
try:
with Image.open(image) as img:
w, h = img.size
except BaseException:
continue
pdf.setPageSize((w, h))
pdf.drawImage(image, x=0, y=0)
pdf.showPage()
pdf.save() | true | true |
f7202cd5480a280b65ca98c820e6e6468a9b083f | 71 | py | Python | trial/config/test_cfg.py | ygtxr1997/mmsegmentation | 9cd8c61ba1cd27fe743edc5f546d2710a3c81110 | [
"Apache-2.0"
] | null | null | null | trial/config/test_cfg.py | ygtxr1997/mmsegmentation | 9cd8c61ba1cd27fe743edc5f546d2710a3c81110 | [
"Apache-2.0"
] | null | null | null | trial/config/test_cfg.py | ygtxr1997/mmsegmentation | 9cd8c61ba1cd27fe743edc5f546d2710a3c81110 | [
"Apache-2.0"
] | null | null | null | a = 1
b = dict(b1=[0, 1, 2],
b2=None,)
c = (1, 2)
d = 'string' | 14.2 | 22 | 0.394366 | a = 1
b = dict(b1=[0, 1, 2],
b2=None,)
c = (1, 2)
d = 'string' | true | true |
f7202dae123914878302027d30e8bb56a37777a3 | 2,484 | py | Python | docs/source/conf.py | deeghuge/ibm-spectrum-scale-csi | 572a94a263aa9a850e8377eacfe3d25be8df12c8 | [
"Apache-2.0"
] | null | null | null | docs/source/conf.py | deeghuge/ibm-spectrum-scale-csi | 572a94a263aa9a850e8377eacfe3d25be8df12c8 | [
"Apache-2.0"
] | null | null | null | docs/source/conf.py | deeghuge/ibm-spectrum-scale-csi | 572a94a263aa9a850e8377eacfe3d25be8df12c8 | [
"Apache-2.0"
] | null | null | null | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = 'IBM Spectrum Scale CSI'
copyright = '2019, IBM'
author = 'John Dunham'
master_doc = 'index'
# The full version, including alpha/beta/rc tags
release = '1.0.1'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
import os
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if on_rtd:
html_theme = 'default'
else:
html_theme = 'sphinx_rtd_theme'
html_theme_options = {
'collapse_navigation': True,
'sticky_navigation': True
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add md to suffix.
source_suffix = ['.md', '.rst']
# Markdown support.
source_parsers = { '.md' : 'recommonmark.parser.CommonMarkParser' }
# collection of substitutions.
rst_epilog="""
.. |driver-repo| replace:: GitHubDriver_
.. |operator-repo| replace:: GitHubOperator_
.. _GitHubOperator: https://github.com/IBM/
"""
| 30.666667 | 79 | 0.669485 |
project = 'IBM Spectrum Scale CSI'
copyright = '2019, IBM'
author = 'John Dunham'
master_doc = 'index'
release = '1.0.1'
extensions = [
]
templates_path = ['_templates']
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
import os
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if on_rtd:
html_theme = 'default'
else:
html_theme = 'sphinx_rtd_theme'
html_theme_options = {
'collapse_navigation': True,
'sticky_navigation': True
}
html_static_path = ['_static']
source_suffix = ['.md', '.rst']
source_parsers = { '.md' : 'recommonmark.parser.CommonMarkParser' }
rst_epilog="""
.. |driver-repo| replace:: GitHubDriver_
.. |operator-repo| replace:: GitHubOperator_
.. _GitHubOperator: https://github.com/IBM/
"""
| true | true |
f7202e4624fb1dd921db04fe9bd81a4baf484a71 | 4,378 | py | Python | contrib/seeds/generate-seeds.py | vas191/LONGNETWORK-0.7d | 4ed2d9ba26744c1404a7aeef3f75e0c19310aea2 | [
"MIT"
] | 6 | 2020-09-24T00:20:50.000Z | 2021-08-05T06:48:51.000Z | contrib/seeds/generate-seeds.py | vas191/LONGNETWORK-0.7d | 4ed2d9ba26744c1404a7aeef3f75e0c19310aea2 | [
"MIT"
] | 4 | 2020-07-17T17:05:25.000Z | 2021-05-08T10:47:05.000Z | contrib/seeds/generate-seeds.py | vas191/LONGNETWORK-0.7d | 4ed2d9ba26744c1404a7aeef3f75e0c19310aea2 | [
"MIT"
] | 4 | 2020-05-10T21:34:04.000Z | 2021-06-04T06:51:01.000Z | #!/usr/bin/python
# Copyright (c) 2014 Wladimir J. van der Laan
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Script to generate list of seed nodes for chainparams.cpp.
This script expects two text files in the directory that is passed as an
argument:
nodes_main.txt
nodes_test.txt
These files must consist of lines in the format
<ip>
<ip>:<port>
[<ipv6>]
[<ipv6>]:<port>
<onion>.onion
0xDDBBCCAA (IPv4 little-endian old pnSeeds format)
The output will be two data structures with the peers in binary format:
static SeedSpec6 pnSeed6_main[]={
...
}
static SeedSpec6 pnSeed6_test[]={
...
}
These should be pasted into `src/chainparamsseeds.h`.
'''
from __future__ import print_function, division
from base64 import b32decode
from binascii import a2b_hex
import sys, os
import re
# ipv4 in ipv6 prefix
pchIPv4 = bytearray([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff])
# tor-specific ipv6 prefix
pchOnionCat = bytearray([0xFD,0x87,0xD8,0x7E,0xEB,0x43])
def name_to_ipv6(addr):
if len(addr)>6 and addr.endswith('.onion'):
vchAddr = b32decode(addr[0:-6], True)
if len(vchAddr) != 16-len(pchOnionCat):
raise ValueError('Invalid onion %s' % s)
return pchOnionCat + vchAddr
elif '.' in addr: # IPv4
return pchIPv4 + bytearray((int(x) for x in addr.split('.')))
elif ':' in addr: # IPv6
sub = [[], []] # prefix, suffix
x = 0
addr = addr.split(':')
for i,comp in enumerate(addr):
if comp == '':
if i == 0 or i == (len(addr)-1): # skip empty component at beginning or end
continue
x += 1 # :: skips to suffix
assert(x < 2)
else: # two bytes per component
val = int(comp, 16)
sub[x].append(val >> 8)
sub[x].append(val & 0xff)
nullbytes = 16 - len(sub[0]) - len(sub[1])
assert((x == 0 and nullbytes == 0) or (x == 1 and nullbytes > 0))
return bytearray(sub[0] + ([0] * nullbytes) + sub[1])
elif addr.startswith('0x'): # IPv4-in-little-endian
return pchIPv4 + bytearray(reversed(a2b_hex(addr[2:])))
else:
raise ValueError('Could not parse address %s' % addr)
def parse_spec(s, defaultport):
match = re.match('\[([0-9a-fA-F:]+)\](?::([0-9]+))?$', s)
if match: # ipv6
host = match.group(1)
port = match.group(2)
elif s.count(':') > 1: # ipv6, no port
host = s
port = ''
else:
(host,_,port) = s.partition(':')
if not port:
port = defaultport
else:
port = int(port)
host = name_to_ipv6(host)
return (host,port)
def process_nodes(g, f, structname, defaultport):
g.write('static SeedSpec6 %s[] = {\n' % structname)
first = True
for line in f:
comment = line.find('#')
if comment != -1:
line = line[0:comment]
line = line.strip()
if not line:
continue
if not first:
g.write(',\n')
first = False
(host,port) = parse_spec(line, defaultport)
hoststr = ','.join(('0x%02x' % b) for b in host)
g.write(' {{%s}, %i}' % (hoststr, port))
g.write('\n};\n')
def main():
if len(sys.argv)<2:
print(('Usage: %s <path_to_nodes_txt>' % sys.argv[0]), file=sys.stderr)
exit(1)
g = sys.stdout
indir = sys.argv[1]
g.write('#ifndef BITCOIN_CHAINPARAMSSEEDS_H\n')
g.write('#define BITCOIN_CHAINPARAMSSEEDS_H\n')
g.write('/**\n')
g.write(' * List of fixed seed nodes for the bitcoin network\n')
g.write(' * AUTOGENERATED by contrib/seeds/generate-seeds.py\n')
g.write(' *\n')
g.write(' * Each line contains a 16-byte IPv6 address and a port.\n')
g.write(' * IPv4 as well as onion addresses are wrapped inside a IPv6 address accordingly.\n')
g.write(' */\n')
with open(os.path.join(indir,'nodes_main.txt'),'r') as f:
process_nodes(g, f, 'pnSeed6_main', 8778)
g.write('\n')
with open(os.path.join(indir,'nodes_test.txt'),'r') as f:
process_nodes(g, f, 'pnSeed6_test', 18777)
g.write('#endif // BITCOIN_CHAINPARAMSSEEDS_H\n')
if __name__ == '__main__':
main()
| 31.496403 | 98 | 0.582458 |
from __future__ import print_function, division
from base64 import b32decode
from binascii import a2b_hex
import sys, os
import re
pchIPv4 = bytearray([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff])
pchOnionCat = bytearray([0xFD,0x87,0xD8,0x7E,0xEB,0x43])
def name_to_ipv6(addr):
if len(addr)>6 and addr.endswith('.onion'):
vchAddr = b32decode(addr[0:-6], True)
if len(vchAddr) != 16-len(pchOnionCat):
raise ValueError('Invalid onion %s' % s)
return pchOnionCat + vchAddr
elif '.' in addr:
return pchIPv4 + bytearray((int(x) for x in addr.split('.')))
elif ':' in addr:
sub = [[], []]
x = 0
addr = addr.split(':')
for i,comp in enumerate(addr):
if comp == '':
if i == 0 or i == (len(addr)-1):
continue
x += 1
assert(x < 2)
else:
val = int(comp, 16)
sub[x].append(val >> 8)
sub[x].append(val & 0xff)
nullbytes = 16 - len(sub[0]) - len(sub[1])
assert((x == 0 and nullbytes == 0) or (x == 1 and nullbytes > 0))
return bytearray(sub[0] + ([0] * nullbytes) + sub[1])
elif addr.startswith('0x'):
return pchIPv4 + bytearray(reversed(a2b_hex(addr[2:])))
else:
raise ValueError('Could not parse address %s' % addr)
def parse_spec(s, defaultport):
match = re.match('\[([0-9a-fA-F:]+)\](?::([0-9]+))?$', s)
if match:
host = match.group(1)
port = match.group(2)
elif s.count(':') > 1:
host = s
port = ''
else:
(host,_,port) = s.partition(':')
if not port:
port = defaultport
else:
port = int(port)
host = name_to_ipv6(host)
return (host,port)
def process_nodes(g, f, structname, defaultport):
g.write('static SeedSpec6 %s[] = {\n' % structname)
first = True
for line in f:
comment = line.find('#')
if comment != -1:
line = line[0:comment]
line = line.strip()
if not line:
continue
if not first:
g.write(',\n')
first = False
(host,port) = parse_spec(line, defaultport)
hoststr = ','.join(('0x%02x' % b) for b in host)
g.write(' {{%s}, %i}' % (hoststr, port))
g.write('\n};\n')
def main():
if len(sys.argv)<2:
print(('Usage: %s <path_to_nodes_txt>' % sys.argv[0]), file=sys.stderr)
exit(1)
g = sys.stdout
indir = sys.argv[1]
g.write('#ifndef BITCOIN_CHAINPARAMSSEEDS_H\n')
g.write('#define BITCOIN_CHAINPARAMSSEEDS_H\n')
g.write('/**\n')
g.write(' * List of fixed seed nodes for the bitcoin network\n')
g.write(' * AUTOGENERATED by contrib/seeds/generate-seeds.py\n')
g.write(' *\n')
g.write(' * Each line contains a 16-byte IPv6 address and a port.\n')
g.write(' * IPv4 as well as onion addresses are wrapped inside a IPv6 address accordingly.\n')
g.write(' */\n')
with open(os.path.join(indir,'nodes_main.txt'),'r') as f:
process_nodes(g, f, 'pnSeed6_main', 8778)
g.write('\n')
with open(os.path.join(indir,'nodes_test.txt'),'r') as f:
process_nodes(g, f, 'pnSeed6_test', 18777)
g.write('#endif // BITCOIN_CHAINPARAMSSEEDS_H\n')
if __name__ == '__main__':
main()
| true | true |
f7202e6fbf154aac2f8ed227635669a6c0396ba0 | 1,622 | py | Python | 2020-08-month-long-challenge/day02.py | jkbockstael/leetcode | 8ef5c907fb153c37dc97f6524493ceca2044ea38 | [
"Unlicense"
] | null | null | null | 2020-08-month-long-challenge/day02.py | jkbockstael/leetcode | 8ef5c907fb153c37dc97f6524493ceca2044ea38 | [
"Unlicense"
] | null | null | null | 2020-08-month-long-challenge/day02.py | jkbockstael/leetcode | 8ef5c907fb153c37dc97f6524493ceca2044ea38 | [
"Unlicense"
] | null | null | null | #!/usr/bin/env python3
# Day 2: Design HashSet
#
# Design a HashSet without using any built-in hash table libraries.
#
# To be specific, your design should include these functions:
# - add(value): Insert a value into the HashSet.
# - contains(value) : Return whether the value exists in the HashSet or not.
# - remove(value): Remove a value in the HashSet. If the value does not exist
# in the HashSet, do nothing.
#
# Note:
# - All values will be in the range of [0, 1000000]
# - The number of operations will be in the range of [1, 10000]
# - Please do not use the built-in HashSet library.
class MyHashSet:
def __init__(self, memory=16):
self.values = [[] for _ in range(2**memory)]
self.memory = memory
def add(self, key: int) -> None:
hashed = self.hash(key)
if key not in self.values[hashed]:
self.values[hashed].append(key)
def remove(self, key: int) -> None:
hashed = self.hash(key)
if key in self.values[hashed]:
self.values[hashed].remove(key)
def contains(self, key: int) -> bool:
hashed = self.hash(key)
return key in self.values[hashed]
def hash(self, key) -> int:
# Multiplicative hash, for simplicity
a = 27644437 #prime
w = 64 # word size
m = self.memory # size of output set
return (a * key) >> (w - m)
# Tests
hash_set = MyHashSet()
hash_set.add(1)
hash_set.add(2)
assert hash_set.contains(1) == True
assert hash_set.contains(3) == False
hash_set.add(2)
assert hash_set.contains(2) == True
hash_set.remove(2)
assert hash_set.contains(2) == False
| 30.037037 | 77 | 0.645499 |
class MyHashSet:
def __init__(self, memory=16):
self.values = [[] for _ in range(2**memory)]
self.memory = memory
def add(self, key: int) -> None:
hashed = self.hash(key)
if key not in self.values[hashed]:
self.values[hashed].append(key)
def remove(self, key: int) -> None:
hashed = self.hash(key)
if key in self.values[hashed]:
self.values[hashed].remove(key)
def contains(self, key: int) -> bool:
hashed = self.hash(key)
return key in self.values[hashed]
def hash(self, key) -> int:
a = 27644437
w = 64
m = self.memory
return (a * key) >> (w - m)
hash_set = MyHashSet()
hash_set.add(1)
hash_set.add(2)
assert hash_set.contains(1) == True
assert hash_set.contains(3) == False
hash_set.add(2)
assert hash_set.contains(2) == True
hash_set.remove(2)
assert hash_set.contains(2) == False
| true | true |
f7202f4a73d2cb0fec38c026a6674123334607c1 | 1,014 | py | Python | macropolo/environments/sheer_env.py | cfpb/macro-polo | 7caf519b623df00a3f16a6119504db09c8983b7b | [
"CC0-1.0"
] | 1 | 2015-07-11T17:52:24.000Z | 2015-07-11T17:52:24.000Z | macropolo/environments/sheer_env.py | cfpb/macro-polo | 7caf519b623df00a3f16a6119504db09c8983b7b | [
"CC0-1.0"
] | 11 | 2015-03-10T15:40:42.000Z | 2016-05-05T22:54:37.000Z | macropolo/environments/sheer_env.py | cfpb/macro-polo | 7caf519b623df00a3f16a6119504db09c8983b7b | [
"CC0-1.0"
] | 6 | 2015-03-09T13:39:12.000Z | 2021-02-21T10:34:15.000Z | # -*- coding: utf-8 -*-
import markdown
from sheer.templates import date_formatter
from .jinja2_env import Jinja2Environment
class SheerEnvironment(Jinja2Environment):
def setup_environment(self):
"""
Set up a Jinja2 environment that like the one created by Sheer.
"""
# Setup the Jinja2 environment
super(SheerEnvironment, self).setup_environment()
# Sheer filters that are added to the default. These are generally
# filters we don't need to worry about mocking. We'll mock Sheer
# filters that return data from Elasticsearch with `mock_filter()`
# on a macro-by-macro basis. Using lambdas here for brevity.
# XXX: We should change Sheer to make it easier to replicate its
# environment.
self.filters['date'] = lambda value, format="%Y-%m-%d", \
tz="America/New_York": date_formatter(value, format)
self.filters['markdown'] = lambda raw_text: \
markdown.markdown(raw_text)
| 34.965517 | 74 | 0.66568 |
import markdown
from sheer.templates import date_formatter
from .jinja2_env import Jinja2Environment
class SheerEnvironment(Jinja2Environment):
def setup_environment(self):
super(SheerEnvironment, self).setup_environment()
self.filters['date'] = lambda value, format="%Y-%m-%d", \
tz="America/New_York": date_formatter(value, format)
self.filters['markdown'] = lambda raw_text: \
markdown.markdown(raw_text)
| true | true |
f7202fba7f46a7622a91a03218bcc2c4f060a7c1 | 1,559 | py | Python | samples/generated_samples/dialogflow_generated_dialogflow_v2_knowledge_bases_list_knowledge_bases_sync.py | rkdfc93/python-dialogflow | a59cff0298ef18674c0b4133ef0a6ab82e288920 | [
"Apache-2.0"
] | 171 | 2018-09-19T21:16:18.000Z | 2020-12-07T17:41:10.000Z | samples/generated_samples/dialogflow_generated_dialogflow_v2_knowledge_bases_list_knowledge_bases_sync.py | rkdfc93/python-dialogflow | a59cff0298ef18674c0b4133ef0a6ab82e288920 | [
"Apache-2.0"
] | 150 | 2018-09-25T14:04:28.000Z | 2020-12-09T21:45:43.000Z | samples/generated_samples/dialogflow_generated_dialogflow_v2_knowledge_bases_list_knowledge_bases_sync.py | rkdfc93/python-dialogflow | a59cff0298ef18674c0b4133ef0a6ab82e288920 | [
"Apache-2.0"
] | 75 | 2018-09-22T14:12:18.000Z | 2020-12-08T07:12:12.000Z | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for ListKnowledgeBases
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-dialogflow
# [START dialogflow_generated_dialogflow_v2_KnowledgeBases_ListKnowledgeBases_sync]
from google.cloud import dialogflow_v2
def sample_list_knowledge_bases():
# Create a client
client = dialogflow_v2.KnowledgeBasesClient()
# Initialize request argument(s)
request = dialogflow_v2.ListKnowledgeBasesRequest(
parent="parent_value",
)
# Make the request
page_result = client.list_knowledge_bases(request=request)
# Handle the response
for response in page_result:
print(response)
# [END dialogflow_generated_dialogflow_v2_KnowledgeBases_ListKnowledgeBases_sync]
| 33.170213 | 85 | 0.7678 |
from google.cloud import dialogflow_v2
def sample_list_knowledge_bases():
client = dialogflow_v2.KnowledgeBasesClient()
request = dialogflow_v2.ListKnowledgeBasesRequest(
parent="parent_value",
)
page_result = client.list_knowledge_bases(request=request)
for response in page_result:
print(response)
| true | true |
f7203043a8bcf8301e573ee5c313e973c2484f62 | 5,458 | py | Python | scripts/rewritepass.py | CEisenhofer/alive2 | e7cfe7d8dcd8cfaafa1b0f7549e4e2dabee60b87 | [
"MIT"
] | 1 | 2022-02-09T22:10:09.000Z | 2022-02-09T22:10:09.000Z | scripts/rewritepass.py | CEisenhofer/alive2 | e7cfe7d8dcd8cfaafa1b0f7549e4e2dabee60b87 | [
"MIT"
] | null | null | null | scripts/rewritepass.py | CEisenhofer/alive2 | e7cfe7d8dcd8cfaafa1b0f7549e4e2dabee60b87 | [
"MIT"
] | 1 | 2022-02-23T18:33:44.000Z | 2022-02-23T18:33:44.000Z | #!/usr/bin/python
import os
import re
import sys
if len(sys.argv) != 3 and len(sys.argv) != 4:
print("Use: %s <PassRegistry.def path> <passes> [run-tests]" % sys.argv[0])
exit(1)
passregpath = sys.argv[1]
def skip_first_pass(s):
count = 0
for i in range(len(s)):
c = s[i]
if c == '(':
count += 1
elif c == ')':
count -= 1
if count == 0:
return s[i+2:]
return ''
def wrap_str(arg, lst):
for e in lst:
arg = "%s(%s)" % (e, arg)
return arg
def wrap(args):
passes = args.split(',')
pass_types = {
"module" : [],
"cgscc" : [],
"function" : [],
"loop" : ["function"],
"loop-mssa" : ["function"],
}
firstpass = ''
type = None
skip = ['verify', 'invalidate<all>']
for p in passes:
if not any(p.startswith(s) for s in skip):
firstpass = p
break
# decorated already: function(foo)
for ty,lst in pass_types.items():
if firstpass.startswith(ty + '('):
if lst:
return wrap_str(args, lst)
# check if we have function(foo), globalopt
next_pass = args
while True:
next_pass = skip_first_pass(next_pass)
if not next_pass:
return args
next_pass = wrap(next_pass)
if next_pass.startswith(ty + '('):
continue
# function(x), cgscc(y)
for ty,lst in pass_types.items():
if next_pass.startswith(ty + '('):
return wrap_str(args, ['module'])
override = {
# pass -> (type, prepend-type?)
'devirt<' : ('cgscc', True),
'loop-mssa' : ('loop', False),
}
for arg,(ty,prepend) in override.items():
if firstpass.startswith(arg):
return wrap_str(args, ([ty] if prepend else []) + pass_types[ty])
# strip e.g. require<foo> -> foo
strip = [
r'require<([^>]+)>',
r'repeat<\d+>\(([^)]+)\)',
r'invalidate<([^>]+)>',
r'<[^>]+>()'
]
for s in strip:
firstpass = re.sub(s, '\\1', firstpass)
# check LLVM's PassRegistry.def file
txt = open(passregpath, 'r').read()
p = re.escape(firstpass)
m = re.search(r'^([A-Z_]+)_(?:PASS|ANALYSIS)[A-Z_]*\("' + p, txt, re.MULTILINE)
if m is None:
return wrap_str(args, ['module'])
type = m.group(1)
# Some loop passes must use loop-mssa instead of loop
# And there's no place to get this info
loop_mssa = {
'licm',
'simple-loop-unswitch',
}
if p in loop_mssa:
type = 'LOOP-MSSA'
type = {
'CGSCC' : 'cgscc',
'FUNCTION' : 'function',
'FUNCTION_ALIAS' : 'function',
'LOOP' : 'loop',
'LOOPNEST' : 'loop',
'LOOP-MSSA' : 'loop-mssa',
'MODULE' : 'module',
'MODULE_ALIAS' : 'module',
}[type]
return wrap_str(args, [type] + pass_types[type])
def run_opt(passes):
error = os.popen('echo "" | opt -passes="%s" -disable-output 2>&1' %
passes).close()
return error is None
if len(sys.argv) == 3:
print(wrap(sys.argv[2].strip("'\"")))
else:
tests = [
('sroa', 'function(sroa)'),
('simplifycfg', 'function(simplifycfg)'),
('licm', 'function(loop-mssa(licm))'),
('loop-mssa(licm)', 'function(loop-mssa(licm))'),
('argpromotion', 'cgscc(argpromotion)'),
('loop-extract', 'module(loop-extract)'),
('loop-mssa(simple-loop-unswitch<nontrivial>)', 'function(loop-mssa(simple-loop-unswitch<nontrivial>))'),
('sroa,verify', 'function(sroa,verify)'),
('verify,sroa', 'function(verify,sroa)'),
('loop-mssa(loop-instsimplify)', 'function(loop-mssa(loop-instsimplify))'),
('loop-unroll-and-jam', 'function(loop(loop-unroll-and-jam))'),
('require<basic-aa>,sroa', 'function(require<basic-aa>,sroa)'),
('cgscc(repeat<2>(inline,function(dce)))', 'cgscc(repeat<2>(inline,function(dce)))'),
('repeat<2>(sroa)', 'function(repeat<2>(sroa))'),
('cgscc(devirt<4>(inline))', 'cgscc(devirt<4>(inline))'),
('devirt<1>(inline,function(gvn))', 'cgscc(devirt<1>(inline,function(gvn)))'),
('require<opt-remark-emit>,loop(loop-unroll-full)', 'function(require<opt-remark-emit>,loop(loop-unroll-full))'),
('invalidate<domtree>,early-cse<memssa>', 'function(invalidate<domtree>,early-cse<memssa>)'),
('function(loop-vectorize,instcombine)', 'function(loop-vectorize,instcombine)'),
('function(loop-vectorize),function(instcombine)', 'function(loop-vectorize),function(instcombine)'),
('function(loop-vectorize),function(instcombine),globalopt', 'module(function(loop-vectorize),function(instcombine),globalopt)'),
('function(ee-instrument),function(ee-instrument),cgscc(inline),function(ee-instrument<post-inline>)',
'module(function(ee-instrument),function(ee-instrument),cgscc(inline),function(ee-instrument<post-inline>))'),
('function(print<demanded-bits>),attributor', 'module(function(print<demanded-bits>),attributor)'),
('function(tailcallelim),cgscc(inline)', 'module(function(tailcallelim),cgscc(inline))'),
('function(slp-vectorizer),module(hotcoldsplit)', 'module(function(slp-vectorizer),module(hotcoldsplit))'),
('verify', 'module(verify)'),
('default<O2>', 'module(default<O2>)')
]
for i,o in tests:
if wrap(i) != o:
print('FAIL:', i)
print('Got:', wrap(i))
print('Expected:', o)
print()
elif not run_opt(i):
print('FAIL running input:', i, '\n')
elif not run_opt(o + ',globalopt'):
print('FAIL running output:', o, '\n')
else:
print('PASS:', i)
| 32.295858 | 133 | 0.593441 |
import os
import re
import sys
if len(sys.argv) != 3 and len(sys.argv) != 4:
print("Use: %s <PassRegistry.def path> <passes> [run-tests]" % sys.argv[0])
exit(1)
passregpath = sys.argv[1]
def skip_first_pass(s):
count = 0
for i in range(len(s)):
c = s[i]
if c == '(':
count += 1
elif c == ')':
count -= 1
if count == 0:
return s[i+2:]
return ''
def wrap_str(arg, lst):
for e in lst:
arg = "%s(%s)" % (e, arg)
return arg
def wrap(args):
passes = args.split(',')
pass_types = {
"module" : [],
"cgscc" : [],
"function" : [],
"loop" : ["function"],
"loop-mssa" : ["function"],
}
firstpass = ''
type = None
skip = ['verify', 'invalidate<all>']
for p in passes:
if not any(p.startswith(s) for s in skip):
firstpass = p
break
for ty,lst in pass_types.items():
if firstpass.startswith(ty + '('):
if lst:
return wrap_str(args, lst)
next_pass = args
while True:
next_pass = skip_first_pass(next_pass)
if not next_pass:
return args
next_pass = wrap(next_pass)
if next_pass.startswith(ty + '('):
continue
for ty,lst in pass_types.items():
if next_pass.startswith(ty + '('):
return wrap_str(args, ['module'])
override = {
'devirt<' : ('cgscc', True),
'loop-mssa' : ('loop', False),
}
for arg,(ty,prepend) in override.items():
if firstpass.startswith(arg):
return wrap_str(args, ([ty] if prepend else []) + pass_types[ty])
strip = [
r'require<([^>]+)>',
r'repeat<\d+>\(([^)]+)\)',
r'invalidate<([^>]+)>',
r'<[^>]+>()'
]
for s in strip:
firstpass = re.sub(s, '\\1', firstpass)
txt = open(passregpath, 'r').read()
p = re.escape(firstpass)
m = re.search(r'^([A-Z_]+)_(?:PASS|ANALYSIS)[A-Z_]*\("' + p, txt, re.MULTILINE)
if m is None:
return wrap_str(args, ['module'])
type = m.group(1)
# Some loop passes must use loop-mssa instead of loop
# And there's no place to get this info
loop_mssa = {
'licm',
'simple-loop-unswitch',
}
if p in loop_mssa:
type = 'LOOP-MSSA'
type = {
'CGSCC' : 'cgscc',
'FUNCTION' : 'function',
'FUNCTION_ALIAS' : 'function',
'LOOP' : 'loop',
'LOOPNEST' : 'loop',
'LOOP-MSSA' : 'loop-mssa',
'MODULE' : 'module',
'MODULE_ALIAS' : 'module',
}[type]
return wrap_str(args, [type] + pass_types[type])
def run_opt(passes):
error = os.popen('echo "" | opt -passes="%s" -disable-output 2>&1' %
passes).close()
return error is None
if len(sys.argv) == 3:
print(wrap(sys.argv[2].strip("'\"")))
else:
tests = [
('sroa', 'function(sroa)'),
('simplifycfg', 'function(simplifycfg)'),
('licm', 'function(loop-mssa(licm))'),
('loop-mssa(licm)', 'function(loop-mssa(licm))'),
('argpromotion', 'cgscc(argpromotion)'),
('loop-extract', 'module(loop-extract)'),
('loop-mssa(simple-loop-unswitch<nontrivial>)', 'function(loop-mssa(simple-loop-unswitch<nontrivial>))'),
('sroa,verify', 'function(sroa,verify)'),
('verify,sroa', 'function(verify,sroa)'),
('loop-mssa(loop-instsimplify)', 'function(loop-mssa(loop-instsimplify))'),
('loop-unroll-and-jam', 'function(loop(loop-unroll-and-jam))'),
('require<basic-aa>,sroa', 'function(require<basic-aa>,sroa)'),
('cgscc(repeat<2>(inline,function(dce)))', 'cgscc(repeat<2>(inline,function(dce)))'),
('repeat<2>(sroa)', 'function(repeat<2>(sroa))'),
('cgscc(devirt<4>(inline))', 'cgscc(devirt<4>(inline))'),
('devirt<1>(inline,function(gvn))', 'cgscc(devirt<1>(inline,function(gvn)))'),
('require<opt-remark-emit>,loop(loop-unroll-full)', 'function(require<opt-remark-emit>,loop(loop-unroll-full))'),
('invalidate<domtree>,early-cse<memssa>', 'function(invalidate<domtree>,early-cse<memssa>)'),
('function(loop-vectorize,instcombine)', 'function(loop-vectorize,instcombine)'),
('function(loop-vectorize),function(instcombine)', 'function(loop-vectorize),function(instcombine)'),
('function(loop-vectorize),function(instcombine),globalopt', 'module(function(loop-vectorize),function(instcombine),globalopt)'),
('function(ee-instrument),function(ee-instrument),cgscc(inline),function(ee-instrument<post-inline>)',
'module(function(ee-instrument),function(ee-instrument),cgscc(inline),function(ee-instrument<post-inline>))'),
('function(print<demanded-bits>),attributor', 'module(function(print<demanded-bits>),attributor)'),
('function(tailcallelim),cgscc(inline)', 'module(function(tailcallelim),cgscc(inline))'),
('function(slp-vectorizer),module(hotcoldsplit)', 'module(function(slp-vectorizer),module(hotcoldsplit))'),
('verify', 'module(verify)'),
('default<O2>', 'module(default<O2>)')
]
for i,o in tests:
if wrap(i) != o:
print('FAIL:', i)
print('Got:', wrap(i))
print('Expected:', o)
print()
elif not run_opt(i):
print('FAIL running input:', i, '\n')
elif not run_opt(o + ',globalopt'):
print('FAIL running output:', o, '\n')
else:
print('PASS:', i)
| true | true |
f72031a89a64427da851b239767808dec0087b18 | 8,293 | py | Python | ProgramsToRead/ExercisesLists/List004.py | ItanuRomero/PythonStudyPrograms | 2b784b2af068b34e65ddf817ca8d99c1ca3a710e | [
"MIT"
] | null | null | null | ProgramsToRead/ExercisesLists/List004.py | ItanuRomero/PythonStudyPrograms | 2b784b2af068b34e65ddf817ca8d99c1ca3a710e | [
"MIT"
] | null | null | null | ProgramsToRead/ExercisesLists/List004.py | ItanuRomero/PythonStudyPrograms | 2b784b2af068b34e65ddf817ca8d99c1ca3a710e | [
"MIT"
] | null | null | null | # Lista 04 - Itanu Romero - 2o. semestre
def questao01():
"""
Elabore um programa que efetue a leitura de duas strings e informe o seu conteúdo,
seguido de seu compri- mento. Indique também se as
duas strings possuem o mesmo comprimento e se são iguais ou diferentes no conteúdo.
"""
dicionario = {}
for i in range(2):
palavra = input('Digite uma palavra: ')
dicionario[i] = [palavra, len(palavra)]
print(dicionario)
if dicionario[0][0] == dicionario[1][0]:
print('Conteúdo iguais')
if dicionario[0][1] == dicionario[1][1]:
print('Comprimento iguais')
def questao02():
"""
Elabore um programa que solicite ao usuário, o seu nome e em seguida
mostre o seu nome de trás para frente utilizando somente letras maiúsculas.
"""
nome = input('Digite seu nome: ')
print(nome[::-1].upper())
def questao03():
"""
Elaborar um programa que solicite a digitação de um número
de CPF no formato xxx.xxx.xxx-xx e indique se é um número válido ou inválido
através da validação dos dígitos verificadores e dos caracteres de formatação.
"""
cpf = input("Digite seu CPF\n")
if len(cpf) == 14 and cpf[3] == "." and cpf[7] == "." and cpf[11] == "-":
print("É um CPF")
else:
print("Não é um CPF")
def questao04():
"""
Elaborar um programa que a partir da digitação de uma frase,
o programa informe quantos espaços
em branco e quantos são, e quantas vezes aparecem cada uma das vogais a, e, i, o, u.
"""
frase = input('Digite uma frase: ').lower()
vogais = ['a', 'e', 'i', 'o', 'u']
vogais_na_frase = 0
espacos_em_branco = 0
for i in frase:
if i in vogais:
vogais_na_frase += 1
if i in " ":
espacos_em_branco += 1
print(f'Numeros de vogais: {vogais_na_frase}')
print(f'Numeros de espacos em branco: {espacos_em_branco}')
def questao05():
"""
Faça um programa que leia um número de telefone,
e corrija o número no caso deste conter somente 7 dígitos,
acrescentando o ’3’ na frente.
O usuário pode informar o número com ou sem o traço separador.
"""
telefone = input('Digite um telefone: ')
traco = False
for i in telefone:
if i == '-':
traco = True
if len(telefone) == 7 or len(telefone) == 8 and traco:
telefone = '3' + telefone
print(f'Seu telefone é: {telefone}')
def questao06():
"""
Desenvolva um jogo em que o usuário tenha que adivinhar uma palavra que
será mostrada com as letras embaralhadas. O programa terá uma lista de
palavras lidas de uma lista a ser fixada inicialmente pelo programador e
escolherá uma aleatoriamente. O jogador terá uma única tentativa para adivinhar
a palavra. Ao final a palavra deve ser mostrada na tela, informando se o usuário
ganhou ou perdeu o jogo.
Observação: Refaça, possibilitando ao jogador tentar até 5 vezes.
"""
import random
animais = ['gato', 'cachorro', 'cavalo', 'jumento', 'peixe', 'zebra', 'papagaio', 'girafa', 'pomba', 'lagosta']
escolhida = random.choice(animais)
shuffled = list(escolhida)
random.shuffle(shuffled)
shuffled = "".join(shuffled)
print(f'A palavra embaralhada é {shuffled}\n')
tentativa = input('Qual a palavra embaralhada? ')
if escolhida == tentativa.lower():
print('Você acertou, parabéns')
else:
print('Você errou')
print(f'A palavra era {escolhida}')
def questao07():
"""
Elabore um programa que efetue a leitura de
cinco números inteiros, adicione-os a uma lista e mostre-a.
"""
lista = []
for i in range(5):
numero = int(input('Digite o um número: '))
lista.append(numero)
print(lista)
def questao08():
"""
Elabore um programa que efetue a leitura de quinze números inteiros,
adicione-os a uma lista e mostre-a de forma invertida, do último para o primeiro.
"""
lista = []
for i in range(15):
numero = int(input('Digite o um número: '))
lista.append(numero)
print(lista[::-1])
def questao09():
"""
Elabore um programa que efetue a leitura de quatro notas reais,
adicione-as a uma lista e mostre-as, inclusive a média aritmética,
arredondar duas casas decimais. Verifique e exiba as devidas mensagens
se o aluno está aprovado ou não, considerando que a média de aprovação
é maior ou igual a 7.0, e em prova exame, se
média aritmética entre 4.0 e menor que 7.0. E reprovado, se menor que 4.0.
"""
lista = []
soma = 0
for i in range(4):
nota = float(input('Digite sua nota: '))
soma = soma + nota
lista.append(nota)
media = round(soma / 4, 2)
print(f'Suas notas são {lista}sendo assim sua média é {media}')
if media >= 7:
print('Você está aprovado')
elif 4 <= media < 7:
print('Pegou exame')
else:
print('Reprovou')
def questao10():
"""
Faça um programa que leia uma lista com dez caracteres,
e diga quantas consoantes foram lidas. Imprima as consoantes.
"""
vogais = ['a', 'e', 'i', 'o', 'u']
lista = []
j = 0
for i in range(10):
caracter = input('Digite um caracter: ')
caracter = caracter.lower()
if caracter in vogais:
pass
else:
lista.append(caracter)
j += 1
print(f'Foram inseridas {j} consoantes, são elas {lista}')
def questao11():
"""
Faça um programa que leia 15 números inteiros e armazene-os em uma lista NUMEROS.
Armazene os números
pares na lista PAR e os números ímpares na lista IMPAR. Imprima os três vetores.
"""
numeros = []
par = []
impar = []
for i in range(10):
numero = int(input('Digite um número: '))
numeros.append(numero)
if numero % 2 == 0:
par.append(numero)
else:
impar.append(numero)
print(f'Os números digitados foram {numeros}\n'
f'Dentre eles esses são pares {par} e estes são ímpares {impar}')
def questao12():
"""
Elabore um programa que efetue a leitura de quatro notas reais de10 alunos,
calcule e armazene em uma lista,
a média de cada aluno, imprima o número de alunos com média maior ou igual a 7.0.
"""
lista = []
k = 0
for i in range(1, 11):
soma = 0
for j in range(1, 5):
nota = float(input(f'Digite a {j}ª nota do aluno "{i}\n'))
soma = soma + nota
media = soma / 4
lista.append(media)
if media >= 7:
k += 1
print(f'A média dos 10 alunos eh {lista} sendo {k} acima da média')
def questao13():
"""
Faça um programa que carregue uma lista com os modelos
de cinco carros (exemplo de modelos: FUSCA, GOL, VECTRA etc).
Carregue uma outra lista com o consumo desses carros, isto é,
quantos quilômetros cada um desses carros faz com um litro de combustível.
Calcule e mostre:
O modelo do carro mais econômico;
Quantos litros de combustível cada um dos carros
cadastrados consome para percorrer uma distância de
1000 quilômetros e quanto isto custará, considerando
um que a gasolina custe 2,25 o litro.
Abaixo segue uma tela de exemplo. O disposição das
informações deve ser o mais próxima possível ao exemplo.
Os dados são fictícios e podem mudar a cada execução do programa.
Relatório Final
1 - SUV - 10.0 - 100.0 litros - R 399.0
2 - IDEA - 12.0 - 83.3 litros - R 332.5
3 - GOL - 10.0 - 100.0 litros - R 399.0
4 - BMW - 20.0 - 50.0 litros - R 199.5
5 - UNO - 2.0 - 500.0 litros - R 1995.0
O menor consumo é do BMW.
"""
carros = ['Fusca', 'Gol', 'Vectra', 'Uno', 'Amarok']
consumo = [20.0, 18.0, 9.5, 15.0, 5.7]
economico = 9999
j = 0
for i in consumo:
print(f'{j + 1}-{carros[j]} - {i} - {round(1000 / i, 1)} litros - R${round(1000 / i * 2.25, 1)}')
if i < economico:
economico = i
carro = j
j += 1
print(f'O menor consumo é do {carros[carro]}')
# Main Program
# Veja o enunciado:
help(questao13())
# Chame determinadas funcoes atraves de:
questao13() | 32.778656 | 115 | 0.615459 |
def questao01():
dicionario = {}
for i in range(2):
palavra = input('Digite uma palavra: ')
dicionario[i] = [palavra, len(palavra)]
print(dicionario)
if dicionario[0][0] == dicionario[1][0]:
print('Conteúdo iguais')
if dicionario[0][1] == dicionario[1][1]:
print('Comprimento iguais')
def questao02():
nome = input('Digite seu nome: ')
print(nome[::-1].upper())
def questao03():
cpf = input("Digite seu CPF\n")
if len(cpf) == 14 and cpf[3] == "." and cpf[7] == "." and cpf[11] == "-":
print("É um CPF")
else:
print("Não é um CPF")
def questao04():
frase = input('Digite uma frase: ').lower()
vogais = ['a', 'e', 'i', 'o', 'u']
vogais_na_frase = 0
espacos_em_branco = 0
for i in frase:
if i in vogais:
vogais_na_frase += 1
if i in " ":
espacos_em_branco += 1
print(f'Numeros de vogais: {vogais_na_frase}')
print(f'Numeros de espacos em branco: {espacos_em_branco}')
def questao05():
telefone = input('Digite um telefone: ')
traco = False
for i in telefone:
if i == '-':
traco = True
if len(telefone) == 7 or len(telefone) == 8 and traco:
telefone = '3' + telefone
print(f'Seu telefone é: {telefone}')
def questao06():
import random
animais = ['gato', 'cachorro', 'cavalo', 'jumento', 'peixe', 'zebra', 'papagaio', 'girafa', 'pomba', 'lagosta']
escolhida = random.choice(animais)
shuffled = list(escolhida)
random.shuffle(shuffled)
shuffled = "".join(shuffled)
print(f'A palavra embaralhada é {shuffled}\n')
tentativa = input('Qual a palavra embaralhada? ')
if escolhida == tentativa.lower():
print('Você acertou, parabéns')
else:
print('Você errou')
print(f'A palavra era {escolhida}')
def questao07():
lista = []
for i in range(5):
numero = int(input('Digite o um número: '))
lista.append(numero)
print(lista)
def questao08():
lista = []
for i in range(15):
numero = int(input('Digite o um número: '))
lista.append(numero)
print(lista[::-1])
def questao09():
lista = []
soma = 0
for i in range(4):
nota = float(input('Digite sua nota: '))
soma = soma + nota
lista.append(nota)
media = round(soma / 4, 2)
print(f'Suas notas são {lista}sendo assim sua média é {media}')
if media >= 7:
print('Você está aprovado')
elif 4 <= media < 7:
print('Pegou exame')
else:
print('Reprovou')
def questao10():
vogais = ['a', 'e', 'i', 'o', 'u']
lista = []
j = 0
for i in range(10):
caracter = input('Digite um caracter: ')
caracter = caracter.lower()
if caracter in vogais:
pass
else:
lista.append(caracter)
j += 1
print(f'Foram inseridas {j} consoantes, são elas {lista}')
def questao11():
numeros = []
par = []
impar = []
for i in range(10):
numero = int(input('Digite um número: '))
numeros.append(numero)
if numero % 2 == 0:
par.append(numero)
else:
impar.append(numero)
print(f'Os números digitados foram {numeros}\n'
f'Dentre eles esses são pares {par} e estes são ímpares {impar}')
def questao12():
lista = []
k = 0
for i in range(1, 11):
soma = 0
for j in range(1, 5):
nota = float(input(f'Digite a {j}ª nota do aluno "{i}\n'))
soma = soma + nota
media = soma / 4
lista.append(media)
if media >= 7:
k += 1
print(f'A média dos 10 alunos eh {lista} sendo {k} acima da média')
def questao13():
carros = ['Fusca', 'Gol', 'Vectra', 'Uno', 'Amarok']
consumo = [20.0, 18.0, 9.5, 15.0, 5.7]
economico = 9999
j = 0
for i in consumo:
print(f'{j + 1}-{carros[j]} - {i} - {round(1000 / i, 1)} litros - R${round(1000 / i * 2.25, 1)}')
if i < economico:
economico = i
carro = j
j += 1
print(f'O menor consumo é do {carros[carro]}')
# Main Program
# Veja o enunciado:
help(questao13())
# Chame determinadas funcoes atraves de:
questao13() | true | true |
f72031f79a1842bf727ed4d56e27279ae150037a | 99 | py | Python | config.py | cballam/flask-nanoblog | b8e3034a8e647c90645ffdeb489e944c6d8042cd | [
"MIT"
] | null | null | null | config.py | cballam/flask-nanoblog | b8e3034a8e647c90645ffdeb489e944c6d8042cd | [
"MIT"
] | null | null | null | config.py | cballam/flask-nanoblog | b8e3034a8e647c90645ffdeb489e944c6d8042cd | [
"MIT"
] | null | null | null | import os
SQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.getcwd() + '/blog.db'
SECRET_KEY = 'secret'
| 19.8 | 65 | 0.676768 | import os
SQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.getcwd() + '/blog.db'
SECRET_KEY = 'secret'
| true | true |
f720323103b02c71f2a9840e6439b99bbd9ea402 | 42,611 | py | Python | nidmresults/objects/inference.py | mih/nidmresults | 438f7cce6abc4a4379b629bd76f4d427891e033f | [
"MIT"
] | 1 | 2018-12-04T16:53:45.000Z | 2018-12-04T16:53:45.000Z | nidmresults/objects/inference.py | mih/nidmresults | 438f7cce6abc4a4379b629bd76f4d427891e033f | [
"MIT"
] | 2 | 2018-04-11T14:01:38.000Z | 2019-05-29T15:14:49.000Z | nidmresults/objects/inference.py | cmaumet/nidmresults | 438f7cce6abc4a4379b629bd76f4d427891e033f | [
"MIT"
] | null | null | null | """
Objects describing the Inference activity, its inputs and outputs as specified
in NIDM-Results.
Specification: http://nidm.nidash.org/specs/nidm-results.html
@author: Camille Maumet <c.m.j.maumet@warwick.ac.uk>
@copyright: University of Warwick 2013-2014
"""
from nidmresults.objects.constants import *
from nidmresults.objects.generic import *
import uuid
from math import erf, sqrt
import rdflib
from prov.model import Literal
from prov.constants import XSD_FLOAT
from prov.model import Identifier
class Inference(object):
"""
Object representing an Inference step: including an Inference activity, its
inputs and outputs.
"""
def __init__(
self, inference, height_thresh, extent_thresh,
peak_criteria, cluster_criteria, disp_mask, excursion_set,
clusters, search_space, software_id):
super(Inference, self).__init__()
self.excursion_set = excursion_set
self.inference_act = inference
self.height_thresh = height_thresh
self.extent_thresh = extent_thresh
self.clusters = clusters
self.software_id = software_id
self.peak_criteria = peak_criteria
self.cluster_criteria = cluster_criteria
self.disp_mask = disp_mask
self.search_space = search_space
class InferenceActivity(NIDMObject):
"""
Object representing an Inference activity.
"""
def __init__(self, oid=None, tail=None, label=None, contrast_name=None,
inference_type=None, partial_degree=None):
super(InferenceActivity, self).__init__(oid=oid)
if inference_type is None:
self.type = NIDM_INFERENCE
else:
self.type = inference_type
self.prov_type = PROV['Activity']
if tail is None:
tail = NIDM_ONE_TAILED_TEST
self.tail = tail
if label is None:
label = "Inference"
if contrast_name:
label += ": " + contrast_name
self.label = label
self.partial_degree = partial_degree
@classmethod
def get_query(klass, oid=None):
if oid is None:
oid_var = "?oid"
else:
oid_var = "<" + str(oid) + ">"
query = """
prefix nidm_Inference: <http://purl.org/nidash/nidm#NIDM_0000049>
prefix nidm_ConjunctionInference: <http://purl.org/nidash/nidm#NIDM_0000011>
prefix nidm_hasAlternativeHypothesis: <http://purl.org/nidash/nidm#NIDM_000009\
7>
prefix spm_PartialConjunctionInference: <http://purl.org/nidash/spm#SPM_000000\
5>
prefix spm_PartialConjunctionDegree: <http://purl.org/nidash/spm#SPM_0000015>
SELECT DISTINCT * WHERE {
{
""" + oid_var + """ a nidm_Inference: .
} UNION {
""" + oid_var + """ a nidm_ConjunctionInference: .
} UNION {
""" + oid_var + """ a spm_PartialConjunctionInference: .
}
""" + oid_var + """ rdfs:label ?label ;
a ?inference_type ;
nidm_hasAlternativeHypothesis: ?tail .
OPTIONAL {""" + oid_var + """ spm_PartialConjunctionDegree: ?partial_degree .} .
FILTER ( ?inference_type NOT IN (prov:Activity))
}
"""
return query
def export(self, nidm_version, export_dir):
"""
Create prov entities and activities.
"""
# In FSL we have a single thresholding (extent, height) applied to all
# contrasts
# FIXME: Deal with two-tailed inference?
atts = (
(PROV['type'], self.type),
(PROV['label'], self.label),
(NIDM_HAS_ALTERNATIVE_HYPOTHESIS, self.tail))
if self.partial_degree is not None:
atts += (
(SPM_PARTIAL_CONJUNCTION_DEGREE, self.partial_degree),)
self.add_attributes(atts)
class ExcursionSet(NIDMObject):
"""
Object representing a ExcursionSet entity.
"""
def __init__(self, location, coord_space, visu=None,
oid=None, fmt=None, label=None,
sha=None, filename=None, inference=None, suffix='',
clust_map=None, mip=None, num_clusters=None, p_value=None):
super(ExcursionSet, self).__init__(oid)
if not filename:
filename = 'ExcursionSet' + suffix + '.nii.gz'
else:
filename = location
self.filename = filename
self.file = NIDMFile(self.id, location, filename, sha)
self.type = NIDM_EXCURSION_SET_MAP
self.prov_type = PROV['Entity']
self.visu = visu
if label is None:
label = "Excursion Set Map"
self.label = label
self.coord_space = coord_space
self.clust_map = clust_map
self.mip = mip
# FIXME Not used for export yet (only for reading)
self.inference = inference
self.num_clusters = num_clusters
self.p_value = p_value
@classmethod
def get_query(klass, oid=None):
if oid is None:
oid_var = "?oid"
else:
oid_var = "<" + str(oid) + ">"
query = """
prefix nidm_ExcursionSetMap: <http://purl.org/nidash/nidm#NIDM_0000025>
prefix nidm_hasClusterLabelsMap: <http://purl.org/nidash/nidm#NIDM_0000098>
prefix nidm_hasMaximumIntensityProjection: <http://purl.org/nidash/nidm#NIDM_0\
000138>
prefix nidm_inCoordinateSpace: <http://purl.org/nidash/nidm#NIDM_0000104>
prefix nidm_numberOfSupraThresholdClusters: <http://purl.org/nidash/nidm#NIDM_\
0000111>
prefix nidm_pValue: <http://purl.org/nidash/nidm#NIDM_0000114>
SELECT DISTINCT * WHERE {
""" + oid_var + """ a nidm_ExcursionSetMap: ;
prov:atLocation ?location ;
rdfs:label ?label ;
dct:format ?fmt ;
nfo:fileName ?filename ;
crypto:sha512 ?sha .
OPTIONAL {""" + oid_var + """ nidm_numberOfSupraThresholdClusters: ?num_clusters .} .
OPTIONAL {""" + oid_var + """ nidm_pValue: ?p_value .} .
}
ORDER BY ?peak_label
"""
return query
def export(self, nidm_version, export_dir):
"""
Create prov entities and activities.
"""
# Create "Excursion set" entity
self.add_attributes((
(PROV['type'], self.type),
(NIDM_IN_COORDINATE_SPACE, self.coord_space.id),
(PROV['label'], self.label),
))
if self.visu is not None:
self.add_attributes((
(DC['description'], self.visu.id),
))
if self.clust_map is not None:
self.add_attributes((
(NIDM_HAS_CLUSTER_LABELS_MAP, self.clust_map.id),
))
if self.mip is not None:
self.add_attributes((
(NIDM_HAS_MAXIMUM_INTENSITY_PROJECTION, self.mip.id),
))
if self.num_clusters is not None:
self.add_attributes((
(NIDM_NUMBER_OF_CLUSTERS, self.num_clusters),
))
if self.p_value is not None:
self.add_attributes((
(NIDM_P_VALUE, self.p_value),
))
class ClusterLabelsMap(NIDMObject):
"""
Object representing a ClusterLabelsMap entity.
"""
def __init__(self, location, coord_space,
oid=None, fmt=None, label=None,
sha=None, filename=None, suffix='', temporary=False):
super(ClusterLabelsMap, self).__init__(oid)
if not filename:
filename = 'ClusterLabels' + suffix + '.nii.gz'
self.filename = filename
self.file = NIDMFile(self.id, location, filename, sha,
temporary=temporary)
self.type = NIDM_CLUSTER_LABELS_MAP
self.prov_type = PROV['Entity']
if label is None:
label = "Cluster Labels Map"
self.label = label
self.coord_space = coord_space
@classmethod
def get_query(klass, oid=None):
if oid is None:
oid_var = "?oid"
else:
oid_var = "<" + str(oid) + ">"
query = """
prefix nidm_ClusterLabelsMap: <http://purl.org/nidash/nidm#NIDM_0000008>
SELECT DISTINCT * WHERE {
""" + oid_var + """ a nidm_ClusterLabelsMap: ;
nfo:fileName ?filename ;
crypto:sha512 ?sha ;
prov:atLocation ?location ;
dct:format ?fmt .
}
"""
return query
def export(self, nidm_version, export_dir):
"""
Create prov entities and activities.
"""
# Create "Cluster Labels Map" entity
self.add_attributes((
(PROV['type'], self.type),
(NIDM_IN_COORDINATE_SPACE, self.coord_space.id),
(PROV['label'], self.label)
))
class HeightThreshold(NIDMObject):
"""
Object representing a HeightThreshold entity.
"""
def __init__(self, stat_threshold=None, p_corr_threshold=None,
p_uncorr_threshold=None, threshold_type=None, value=None,
label=None, version={'num': '1.3.0'}, oid=None,
equiv_thresh=None):
super(HeightThreshold, self).__init__(oid=oid)
if not stat_threshold and not p_corr_threshold and \
not p_uncorr_threshold and not value:
raise Exception('No threshold defined')
if isinstance(threshold_type, str):
threshold_type = Identifier(threshold_type)
thresh_desc = ""
if stat_threshold is not None:
thresh_desc = "Z>" + str(stat_threshold)
if version['num'] == "1.0.0":
user_threshold_type = "Z-Statistic"
else:
threshold_type = OBO_STATISTIC
value = stat_threshold
elif p_uncorr_threshold is not None:
thresh_desc = "p<" + \
str(p_uncorr_threshold) + " (uncorrected)"
if version['num'] == "1.0.0":
user_threshold_type = "p-value uncorrected"
else:
threshold_type = NIDM_P_VALUE_UNCORRECTED_CLASS
value = p_uncorr_threshold
elif p_corr_threshold is not None:
thresh_desc = "p<" + str(p_corr_threshold) + " (FWE)"
if version['num'] == "1.0.0":
user_threshold_type = "p-value FWE"
else:
threshold_type = OBO_P_VALUE_FWER
value = p_corr_threshold
if version['num'] == "1.0.0":
self.user_threshold_type = user_threshold_type
self.p_uncorr_threshold = p_uncorr_threshold
self.p_corr_threshold = p_corr_threshold
self.stat_threshold = stat_threshold
else:
self.value = value
self.threshold_type = threshold_type
if not label:
self.label = "Height Threshold: " + thresh_desc
else:
self.label = label
self.type = NIDM_HEIGHT_THRESHOLD
self.prov_type = PROV['Entity']
self.equiv_thresh = equiv_thresh
@classmethod
def get_query(klass, oid=None):
if oid is None:
oid_var = "?oid"
else:
oid_var = "<" + str(oid) + ">"
query = """
prefix nidm_HeightThreshold: <http://purl.org/nidash/nidm#NIDM_0000034>
prefix nidm_hasAlternativeHypothesis: <http://purl.org/nidash/nidm#NIDM_000009\
7>
SELECT DISTINCT * WHERE {
""" + oid_var + """ a nidm_HeightThreshold: ;
a ?threshold_type ;
rdfs:label ?label ;
prov:value ?value .
FILTER ( ?threshold_type NOT IN (prov:Entity, nidm_HeightThreshold:) )
}
"""
return query
def export(self, version, export_dir):
"""
Create prov entities and activities.
"""
atts = [
(PROV['type'], self.type),
(PROV['label'], self.label),
]
if version['num'] == "1.0.0":
atts += [
(NIDM_USER_SPECIFIED_THRESHOLD_TYPE, self.user_threshold_type),
(PROV['value'], self.stat_threshold),
(NIDM_P_VALUE_UNCORRECTED, self.p_uncorr_threshold),
(NIDM_P_VALUE_FWER, self.p_corr_threshold)
]
else:
atts += [
(PROV['type'], self.threshold_type),
(PROV['value'], self.value)
]
if self.equiv_thresh is not None:
for equiv in self.equiv_thresh:
atts += [
(NIDM_EQUIVALENT_THRESHOLD, equiv.id)
]
self.add_attributes([(k, v) for k, v in atts if v is not None])
class ExtentThreshold(NIDMObject):
"""
Object representing an ExtentThreshold entity.
"""
def __init__(self, extent=None, p_corr=None, p_uncorr=None,
extent_rsl=None, label=None, version={'num': '1.3.0'},
value=None, oid=None, equiv_thresh=None, threshold_type=None):
super(ExtentThreshold, self).__init__(oid=oid)
self.type = NIDM_EXTENT_THRESHOLD
self.prov_type = PROV['Entity']
thresh_desc = ""
if threshold_type is not None:
self.threshold_type = threshold_type
else:
if extent is not None:
thresh_desc = "k>" + str(extent)
# NIDM-Results 1.0.0
user_threshold_type = "Cluster-size in voxels"
# NIDM-Results > 1.0.0
threshold_type = OBO_STATISTIC
elif p_uncorr is not None:
thresh_desc = "p<" + str(self.p_uncorr) + " (uncorrected)"
# NIDM-Results 1.0.0
user_threshold_type = "p-value uncorrected"
# NIDM-Results > 1.0.0
threshold_type = NIDM_P_VALUE_UNCORRECTED_CLASS
value = p_uncorr
elif p_corr is not None:
thresh_desc = "p<" + str(p_corr) + " (FWE)"
# NIDM-Results 1.0.0
user_threshold_type = "p-value FWE"
# NIDM-Results > 1.0.0
threshold_type = OBO_P_VALUE_FWER
value = p_corr
else:
thresh_desc = "k>=0"
extent = 0
if version['num'] == "1.0.0":
p_uncorr = 1.0
p_corr = 1.0
user_threshold_type = None
else:
threshold_type = OBO_STATISTIC
self.threshold_type = threshold_type
self.value = value
if version['num'] == "1.0.0":
self.user_threshold_type = user_threshold_type
self.p_uncorr = p_uncorr
self.p_corr = p_corr
else:
self.threshold_type = threshold_type
self.extent = extent
self.extent_rsl = extent_rsl
if label is None:
self.label = "Extent Threshold: " + thresh_desc
else:
self.label = label
self.equiv_thresh = equiv_thresh
@classmethod
def get_query(klass, oid=None):
if oid is None:
oid_var = "?oid"
else:
oid_var = "<" + str(oid) + ">"
query = """
prefix nidm_ExtentThreshold: <http://purl.org/nidash/nidm#NIDM_0000026>
prefix nidm_clusterSizeInVoxels: <http://purl.org/nidash/nidm#NIDM_0000084>
prefix nidm_clusterSizeInResels: <http://purl.org/nidash/nidm#NIDM_0000156>
SELECT DISTINCT * WHERE {
""" + oid_var + """ a nidm_ExtentThreshold: ;
a ?threshold_type ;
rdfs:label ?label .
OPTIONAL {""" + oid_var + """ prov:value ?value .} .
OPTIONAL {""" + oid_var + """ nidm_clusterSizeInVoxels: ?extent .} .
OPTIONAL {""" + oid_var + """ nidm_clusterSizeInResels: ?extent_rsl .} .
FILTER ( ?threshold_type NOT IN (prov:Entity, nidm_ExtentThreshold:) )
}
"""
return query
def export(self, version, export_dir):
"""
Create prov entities and activities.
"""
atts = [
(PROV['type'], self.type),
]
atts += [
(PROV['label'], self.label)
]
if self.extent_rsl is not None:
atts += [
(NIDM_CLUSTER_SIZE_IN_RESELS, self.extent_rsl),
]
if self.extent is not None:
atts += [
(NIDM_CLUSTER_SIZE_IN_VOXELS, self.extent),
]
if version['num'] == "1.0.0":
atts += [
(NIDM_USER_SPECIFIED_THRESHOLD_TYPE, self.user_threshold_type),
(NIDM_P_VALUE_UNCORRECTED, self.p_uncorr),
(NIDM_P_VALUE_FWER, self.p_corr)
]
else:
atts += [
(PROV['type'], self.threshold_type)
]
if self.value is not None:
atts += [
(PROV['value'], self.value)
]
if self.equiv_thresh is not None:
for equiv in self.equiv_thresh:
atts += [
(NIDM_EQUIVALENT_THRESHOLD, equiv.id)
]
self.add_attributes([(k, v) for k, v in atts if v is not None])
class Cluster(NIDMObject):
"""
Object representing a Cluster entity.
"""
def __init__(self, cluster_num, size, pFWER, peaks,
x=None, y=None, z=None, x_std=None, y_std=None, z_std=None,
suffix='', clust_size_resels=None, pFDR=None, punc=None,
label=None, oid=None, cog=None):
super(Cluster, self).__init__(oid=oid)
self.num = cluster_num
if cog is not None:
self.cog = cog
else:
if x and y and z:
self.cog = CenterOfGravity(
cluster_num, x=x, y=y, z=z, x_std=x_std, y_std=y_std,
z_std=z_std)
else:
self.cog = None
self.peaks = peaks
self.size = size
self.pFWER = pFWER
self.type = NIDM_SIGNIFICANT_CLUSTER
self.prov_type = PROV['Entity']
self.punc = punc
self.pFDR = pFDR
if not label:
cluster_naming = "Supra-Threshold Cluster"
self.label = "%s %04d" % (cluster_naming, self.num)
else:
self.label = label
self.clust_size_resels = clust_size_resels
@classmethod
def get_query(klass, oid=None):
if oid is None:
oid_var = "?oid"
else:
oid_var = "<" + str(oid) + ">"
query = """
prefix nidm_SupraThresholdCluster: <http://purl.org/nidash/nidm#NIDM_0000070>
prefix nidm_clusterSizeInVoxels: <http://purl.org/nidash/nidm#NIDM_0000084>
prefix nidm_clusterLabelId: <http://purl.org/nidash/nidm#NIDM_0000082>
prefix nidm_clusterSizeInResels: <http://purl.org/nidash/nidm#NIDM_0000156>
prefix nidm_pValueUncorrected: <http://purl.org/nidash/nidm#NIDM_0000116>
prefix nidm_pValueFWER: <http://purl.org/nidash/nidm#NIDM_0000115>
prefix nidm_qValueFDR: <http://purl.org/nidash/nidm#NIDM_0000119>
SELECT DISTINCT * WHERE {
""" + oid_var + """ a nidm_SupraThresholdCluster: ;
rdfs:label ?label ;
nidm_clusterSizeInVoxels: ?size ;
nidm_clusterLabelId: ?cluster_num .
OPTIONAL {""" + oid_var + """ nidm_clusterSizeInResels: ?clust_size_resels .} .
OPTIONAL {""" + oid_var + """ nidm_pValueUncorrected: ?punc .} .
OPTIONAL {""" + oid_var + """ nidm_pValueFWER: ?pFWER .} .
OPTIONAL {""" + oid_var + """ nidm_qValueFDR: ?pFDR .} .
}
"""
return query
def export(self, nidm_version, export_dir):
"""
Create prov entities and activities.
"""
if nidm_version['num'] in ["1.0.0", "1.1.0"]:
self.label = self.label.replace("Supra-Threshold", "Significant")
# FIXME deal with multiple contrasts
atts = (
(PROV['type'], NIDM_SIGNIFICANT_CLUSTER),
(PROV['label'], self.label),
(NIDM_CLUSTER_LABEL_ID, self.num),
(NIDM_CLUSTER_SIZE_IN_VOXELS, self.size)
)
if self.clust_size_resels is not None:
atts = atts + (
(NIDM_CLUSTER_SIZE_IN_RESELS, self.clust_size_resels),
)
if self.punc is not None:
atts = atts + (
(NIDM_P_VALUE_UNCORRECTED,
Literal(self.punc, datatype=XSD_FLOAT)),
)
if self.pFDR is not None:
atts = atts + (
(NIDM_Q_VALUE_FDR, Literal(self.pFDR, datatype=XSD_FLOAT)),
)
if self.pFWER is not None:
atts = atts + (
(NIDM_P_VALUE_FWER, Literal(self.pFWER, datatype=XSD_FLOAT)),
)
self.add_attributes(atts)
class DisplayMaskMap(NIDMObject):
"""
Object representing a DisplayMaskMap entity.
"""
def __init__(self, contrast_num, mask_file, mask_num, coord_space,
sha=None, filename=None, fmt=None, label=None, oid=None,
derfrom_id=None, derfrom_filename=None, derfrom_fmt=None,
derfrom_sha=None, isderfrommap=False):
super(DisplayMaskMap, self).__init__(oid=oid)
if not filename:
filename = 'DisplayMask' + str(mask_num) + '.nii.gz'
self.file = NIDMFile(self.id, mask_file, filename,
sha=sha, fmt=fmt)
self.coord_space = coord_space
self.type = NIDM_DISPLAY_MASK_MAP
self.prov_type = PROV['Entity']
if not label:
self.label = "Display Mask Map " + str(mask_num)
else:
self.label = label
if derfrom_id is not None:
self.derfrom = DisplayMaskMap(
None, None, None,
coord_space=None, oid=derfrom_id,
filename=derfrom_filename, sha=derfrom_sha,
fmt=derfrom_fmt,
isderfrommap=True)
else:
self.derfrom = None
self.isderfrommap = isderfrommap
@classmethod
def get_query(klass, oid=None):
if oid is None:
oid_var = "?oid"
else:
oid_var = "<" + str(oid) + ">"
query = """
prefix nidm_DisplayMaskMap: <http://purl.org/nidash/nidm#NIDM_0000020>
SELECT DISTINCT * WHERE {
""" + oid_var + """ a nidm_DisplayMaskMap: ;
rdfs:label ?label ;
nfo:fileName ?filename ;
crypto:sha512 ?sha ;
prov:atLocation ?mask_file ;
dct:format ?fmt .
OPTIONAL {""" + oid_var + """ prov:wasDerivedFrom ?derfrom_id .
?derfrom_id a nidm_DisplayMaskMap: ;
nfo:fileName ?derfrom_filename ;
dct:format ?derfrom_fmt ;
crypto:sha512 ?derfrom_sha .
} .
}
"""
return query
def export(self, nidm_version, export_dir):
"""
Create prov entities and activities.
"""
atts = (
(PROV['type'], self.type),
)
if not self.isderfrommap:
atts = atts + (
(NIDM_IN_COORDINATE_SPACE, self.coord_space.id),
(PROV['label'], self.label))
self.add_attributes(atts)
class PeakCriteria(NIDMObject):
"""
Object representing a PeakCriteria entity.
"""
def __init__(self, contrast_num, peak_dist, num_peak=None, label=None,
oid=None):
super(PeakCriteria, self).__init__(oid=oid)
self.num_peak = num_peak
self.peak_dist = peak_dist
self.type = NIDM_PEAK_DEFINITION_CRITERIA
self.prov_type = PROV['Entity']
if not label:
self.label = "Peak Definition Criteria"
else:
self.label = label
@classmethod
def get_query(klass, oid=None):
if oid is None:
oid_var = "?oid"
else:
oid_var = "<" + str(oid) + ">"
query = """
prefix nidm_PeakDefinitionCriteria: <http://purl.org/nidash/nidm#NIDM_0000063>
prefix nidm_minDistanceBetweenPeaks: <http://purl.org/nidash/nidm#NIDM_0000109>
prefix nidm_maxNumberOfPeaksPerCluster: <http://purl.org/nidash/nidm#NIDM_0000\
108>
SELECT DISTINCT * WHERE {
""" + oid_var + """ a nidm_PeakDefinitionCriteria: ;
rdfs:label ?label ;
nidm_minDistanceBetweenPeaks: ?peak_dist .
OPTIONAL { """ + oid_var + """ nidm_maxNumberOfPeaksPerCluster: ?num_peak .} .
}
"""
return query
def export(self, nidm_version, export_dir):
"""
Create prov entities and activities.
"""
num_peak = ()
if self.num_peak:
num_peak = ((NIDM_MAX_NUMBER_OF_PEAKS_PER_CLUSTER, self.num_peak),)
# Create "Peak definition criteria" entity
self.add_attributes((
(PROV['type'], self.type),
(PROV['label'], self.label),
(NIDM_MIN_DISTANCE_BETWEEN_PEAKS, self.peak_dist)
) + num_peak)
class ClusterCriteria(NIDMObject):
"""
Object representing a ClusterCriteria entity.
"""
def __init__(self, contrast_num, connectivity, label=None, oid=None):
super(ClusterCriteria, self).__init__(oid=oid)
self.connectivity = connectivity
self.type = NIDM_CLUSTER_DEFINITION_CRITERIA
self.prov_type = PROV['Entity']
if not label:
self.label = ("Cluster Connectivity Criterion: " +
str(self.connectivity))
else:
self.label = label
@classmethod
def get_query(klass, oid=None):
if oid is None:
oid_var = "?oid"
else:
oid_var = "<" + str(oid) + ">"
query = """
prefix nidm_ClusterDefinitionCriteria: <http://purl.org/nidash/nidm#NIDM_00000\
07>
prefix nidm_hasConnectivityCriterion: <http://purl.org/nidash/nidm#NIDM_000009\
9>
SELECT DISTINCT * WHERE {
""" + oid_var + """ a nidm_ClusterDefinitionCriteria: ;
rdfs:label ?label ;
nidm_hasConnectivityCriterion: ?connectivity .
}
"""
return query
def export(self, nidm_version, export_dir):
"""
Create prov entities and activities.
"""
# Create "Cluster definition criteria" entity
if isinstance(self.connectivity, int):
if self.connectivity == 6:
self.connectivity = NIDM_VOXEL6CONNECTED
elif self.connectivity == 18:
self.connectivity = NIDM_VOXEL18CONNECTED
elif self.connectivity == 26:
self.connectivity = NIDM_VOXEL26CONNECTED
# FIXME if connectivity is missing
if self.connectivity is not None:
atts = (
(PROV['type'], self.type),
(PROV['label'], self.label),
(NIDM_HAS_CONNECTIVITY_CRITERION, self.connectivity))
else:
atts = (
(PROV['type'], NIDM_CLUSTER_DEFINITION_CRITERIA),
(PROV['label'], label))
self.add_attributes(atts)
class CenterOfGravity(NIDMObject):
"""
Object representing a CenterOfGravity entity.
"""
def __init__(self, cluster_num, x=None, y=None, z=None, x_std=None,
y_std=None, z_std=None, oid=None, coord_vector=None,
coord_vector_std=None, label=None, coord_id=None):
# Note: coord_id argument is only here for compatibility
# with the query outputs
super(CenterOfGravity, self).__init__(oid=oid)
self.cluster_num = cluster_num
self.coordinate = Coordinate("%04d" % cluster_num, x=x, y=y, z=z,
x_std=x_std, y_std=y_std, z_std=z_std,
coord_vector_std=coord_vector_std,
coord_vector=coord_vector, oid=coord_id)
self.type = NIDM_CLUSTER_CENTER_OF_GRAVITY
self.prov_type = PROV['Entity']
if label is None:
label = "Center of gravity " + str(self.cluster_num)
self.label = label
@classmethod
def get_query(klass, oid=None):
if oid is None:
oid_var = "?oid"
else:
oid_var = "<" + str(oid) + ">"
query = """
prefix nidm_ClusterCenterOfGravity: <http://purl.org/nidash/nidm#NIDM_0000140>
prefix nidm_coordinateVector: <http://purl.org/nidash/nidm#NIDM_0000086>
prefix nidm_coordinateVectorInVoxels: <http://purl.org/nidash/nidm#NIDM_000013\
9>
SELECT DISTINCT * WHERE {
""" + oid_var + """ a nidm_ClusterCenterOfGravity: ;
rdfs:label ?label ;
prov:atLocation ?coord_id .
?coord_id a nidm_Coordinate: ;
nidm_coordinateVector: ?coord_vector_std .
OPTIONAL { ?coord_id nidm_coordinateVectorInVoxels: ?coord_vector .} .
}
"""
return query
def export(self, nidm_version, export_dir):
"""
Create prov entities and activities.
"""
self.add_attributes((
(PROV['type'], self.type),
(PROV['label'], self.label),
(PROV['location'], self.coordinate.id)))
class SearchSpace(NIDMObject):
"""
Object representing a SearchSpace entity.
"""
def __init__(self, search_space_file, vol_in_voxels, vol_in_units,
vol_in_resels, resel_size_in_voxels,
random_field_stationarity, noise_fwhm_in_voxels,
noise_fwhm_in_units, coord_space,
expected_num_voxels=None, expected_num_clusters=None,
height_critical_fwe05=None, height_critical_fdr05=None,
extent_critical_fwe05=None, extent_critical_fdr05=None,
search_vol_geom=None, noise_roughness=None,
filename=None, sha=None, fmt=None,
label=None, oid=None):
super(SearchSpace, self).__init__(oid=oid)
if not filename:
filename = 'SearchSpaceMask.nii.gz'
self.file = NIDMFile(self.id, search_space_file, filename,
sha=sha, fmt=fmt)
self.coord_space = coord_space
self.resel_size_in_voxels = resel_size_in_voxels
self.search_volume_in_voxels = vol_in_voxels
self.search_volume_in_units = vol_in_units
self.search_volume_in_resels = vol_in_resels
self.rf_stationarity = random_field_stationarity
self.noise_fwhm_in_voxels = noise_fwhm_in_voxels
self.noise_fwhm_in_units = noise_fwhm_in_units
self.type = NIDM_SEARCH_SPACE_MASK_MAP
self.prov_type = PROV['Entity']
self.label = "Search Space Mask Map"
self.expected_num_voxels = expected_num_voxels
self.expected_num_clusters = expected_num_clusters
self.height_critical_fwe05 = height_critical_fwe05
self.height_critical_fdr05 = height_critical_fdr05
self.extent_critical_fwe05 = extent_critical_fwe05
self.extent_critical_fdr05 = extent_critical_fdr05
self.search_vol_geom = search_vol_geom
self.noise_roughness = noise_roughness
@classmethod
def get_query(klass, oid=None):
if oid is None:
oid_var = "?oid"
else:
oid_var = "<" + str(oid) + ">"
query = """
prefix nidm_SearchSpaceMaskMap: <http://purl.org/nidash/nidm#NIDM_0000068>
prefix nidm_expectedNumberOfVoxelsPerCluster: <http://purl.org/nidash/nidm#NID\
M_0000143>
prefix nidm_expectedNumberOfClusters: <http://purl.org/nidash/nidm#NIDM_000014\
1>
prefix nidm_heightCriticalThresholdFWE05: <http://purl.org/nidash/nidm#NIDM_00\
00147>
prefix nidm_heightCriticalThresholdFDR05: <http://purl.org/nidash/nidm#NIDM_00\
00146>
prefix nidm_searchVolumeInVoxels: <http://purl.org/nidash/nidm#NIDM_0000121>
prefix nidm_searchVolumeInUnits: <http://purl.org/nidash/nidm#NIDM_0000136>
prefix nidm_searchVolumeInResels: <http://purl.org/nidash/nidm#NIDM_0000149>
prefix nidm_reselSizeInVoxels: <http://purl.org/nidash/nidm#NIDM_0000148>
prefix nidm_noiseFWHMInVoxels: <http://purl.org/nidash/nidm#NIDM_0000159>
prefix nidm_noiseFWHMInUnits: <http://purl.org/nidash/nidm#NIDM_0000157>
prefix nidm_randomFieldStationarity: <http://purl.org/nidash/nidm#NIDM_0000120>
prefix spm_smallestSignificantClusterSizeInVoxelsFWE05: <http://purl.org/nidas\
h/spm#SPM_0000014>
prefix spm_smallestSignificantClusterSizeInVoxelsFDR05: <http://purl.org/nidas\
h/spm#SPM_0000013>
prefix spm_searchVolumeReselsGeometry: <http://purl.org/nidash/spm#SPM_0000010>
prefix nidm_noiseRoughnessInVoxels: <http://purl.org/nidash/nidm#NIDM_0000145>
SELECT DISTINCT * WHERE {
""" + oid_var + """ a nidm_SearchSpaceMaskMap: ;
rdfs:label ?label ;
nidm_searchVolumeInVoxels: ?vol_in_voxels ;
nidm_searchVolumeInUnits: ?vol_in_units ;
nidm_searchVolumeInResels: ?vol_in_resels ;
nidm_reselSizeInVoxels: ?resel_size_in_voxels ;
nidm_reselSizeInVoxels: ?resel_size_in_voxels ;
nidm_noiseFWHMInVoxels: ?noise_fwhm_in_voxels ;
nidm_noiseFWHMInUnits: ?noise_fwhm_in_units ;
nidm_randomFieldStationarity: ?random_field_stationarity ;
prov:atLocation ?search_space_file ;
dct:format ?fmt ;
nfo:fileName ?filename ;
crypto:sha512 ?sha .
OPTIONAL {""" + oid_var + """ nidm_expectedNumberOfVoxelsPerCluster: ?expected_num_voxels } .
OPTIONAL {""" + oid_var + """ nidm_expectedNumberOfClusters: ?expected_num_clusters } .
OPTIONAL {""" + oid_var + """ nidm_heightCriticalThresholdFWE05: ?height_critical_fwe05 } .
OPTIONAL {""" + oid_var + """ nidm_heightCriticalThresholdFDR05: ?height_critical_fdr05 } .
OPTIONAL {""" + oid_var + """ spm_smallestSignificantClusterSizeInVoxelsFWE05: ?extent_critical_fwe05 } .
OPTIONAL {""" + oid_var + """ spm_smallestSignificantClusterSizeInVoxelsFDR05: ?extent_critical_fdr05 } .
OPTIONAL {""" + oid_var + """ spm_searchVolumeReselsGeometry: ?search_vol_geom } .
OPTIONAL {""" + oid_var + """ nidm_noiseRoughnessInVoxels: ?noise_roughness } .
}
"""
return query
# Generate prov for search space entity generated by the inference activity
def export(self, version, export_dir):
"""
Create prov entities and activities.
"""
atts = (
(PROV['label'], self.label),
(PROV['type'], NIDM_SEARCH_SPACE_MASK_MAP),
(NIDM_RANDOM_FIELD_STATIONARITY, self.rf_stationarity),
(NIDM_IN_COORDINATE_SPACE, self.coord_space.id),
(NIDM_SEARCH_VOLUME_IN_VOXELS, self.search_volume_in_voxels),
(NIDM_SEARCH_VOLUME_IN_UNITS, self.search_volume_in_units),
(NIDM_SEARCH_VOLUME_IN_RESELS, self.search_volume_in_resels),
(NIDM_RESEL_SIZE_IN_VOXELS, self.resel_size_in_voxels))
# Noise FWHM was introduced in NIDM-Results 1.1.0
if self.noise_fwhm_in_voxels is not None:
if (version['major'] > 1) or \
(version['major'] >= 1 and
(version['minor'] > 0 or version['revision'] > 0)):
atts = atts + (
(NIDM_NOISE_FWHM_IN_VOXELS, self.noise_fwhm_in_voxels),
(NIDM_NOISE_FWHM_IN_UNITS, self.noise_fwhm_in_units))
if self.expected_num_voxels is not None:
atts = atts + ((NIDM_EXPECTED_NUMBER_OF_VOXELS_PER_CLUSTER,
self.expected_num_voxels),)
if self.expected_num_clusters is not None:
atts = atts + ((NIDM_EXPECTED_NUMBER_OF_CLUSTERS,
self.expected_num_clusters),)
if self.height_critical_fwe05 is not None:
atts = atts + ((NIDM_HEIGHT_CRITICAL_THRESHOLD_FWE_05,
self.height_critical_fwe05),)
if self.height_critical_fdr05 is not None:
atts = atts + ((NIDM_HEIGHT_CRITICAL_THRESHOLD_FDR_05,
self.height_critical_fdr05),)
if self.extent_critical_fwe05 is not None:
atts = atts + ((
SPM_SMALLEST_SIGNIFICANT_CLUSTER_SIZE_IN_VOXELS_FWE05,
self.extent_critical_fwe05),)
if self.extent_critical_fdr05 is not None:
atts = atts + ((
SPM_SMALLEST_SIGNIFICANT_CLUSTER_SIZE_IN_VOXELS_FDR05,
self.extent_critical_fdr05),)
if self.search_vol_geom is not None:
atts = atts + ((SPM_SEARCH_VOLUME_RESELS_GEOMETRY,
self.search_vol_geom),)
if self.noise_roughness:
atts = atts + ((NIDM_NOISE_ROUGHNESS_IN_VOXELS,
self.noise_roughness),)
# Create "Search Space Mask map" entity
self.add_attributes(atts)
class Coordinate(NIDMObject):
"""
Object representing a Coordinate entity.
"""
def __init__(self, label_id, coord_vector=None, coord_vector_std=None,
x=None, y=None, z=None, x_std=None, y_std=None, z_std=None,
label=None, oid=None):
super(Coordinate, self).__init__(oid=oid)
self.label_id = label_id
if x is not None and y is not None and z is not None:
self.coord_vector = [x, y, z]
else:
if coord_vector and not type(coord_vector) is list:
coord_vector = json.loads(coord_vector)
self.coord_vector = coord_vector
if x_std is not None and y_std is not None and z_std is not None:
self.coord_vector_std = [x_std, y_std, z_std]
else:
if coord_vector_std and not type(coord_vector_std) is list:
coord_vector_std = json.loads(coord_vector_std)
self.coord_vector_std = coord_vector_std
self.type = NIDM_COORDINATE
self.prov_type = PROV['Entity']
if label is not None:
self.label = label
else:
self.label = "Coordinate " + self.label_id
def __str__(self):
return '%s\t%s' % (self.label, self.coord_vector)
def export(self, nidm_version, export_dir):
"""
Create prov entities and activities.
"""
# We can not have this as a dictionnary because we want to keep the
# duplicate prov:type attribute
atts = ( # (PROV['type'],PROV['Location']),
(PROV['type'], NIDM_COORDINATE),
(PROV['type'], PROV['Location']),
(PROV['label'], self.label)
)
if self.coord_vector is not None:
atts = atts +\
((NIDM_COORDINATE_VECTOR_IN_VOXELS,
json.dumps(self.coord_vector)),)
# FSL unnormalised subject-level analyses do not provide coordinates in
# voxels
if self.coord_vector_std is not None:
atts = atts +\
((NIDM_COORDINATE_VECTOR, json.dumps(self.coord_vector_std)),)
self.add_attributes(atts)
class Peak(NIDMObject):
"""
Object representing a Peak entity.
"""
def __init__(self, equiv_z, p_unc=None, p_fwer=None, label=None,
coord_label=None, exc_set_id=None, oid=None, suffix='',
p_fdr=None, value=None, coord_id=None, *args, **kwargs):
super(Peak, self).__init__(oid)
# FIXME: Currently assumes less than 10 clusters per contrast
# cluster_num = cluster_index
# FIXME: Currently assumes less than 100 peaks
if oid is not None:
self.label = label
peak_unique_id = label[5:]
peak_index = peak_unique_id
# cluster_index, peak_index = peak_unique_id.split("_")
else:
peak_unique_id = suffix
self.label = "Peak " + peak_unique_id
self.equiv_z = equiv_z
self.p_unc = p_unc
self.p_fwer = p_fwer
self.coordinate = Coordinate(
str(peak_unique_id), label=coord_label, oid=coord_id, **kwargs)
self.type = NIDM_PEAK
self.prov_type = PROV['Entity']
# self.cluster = cluster_id
self.exc_set_id = exc_set_id
self.value = value
self.p_fdr = p_fdr
@classmethod
def get_query(klass, oid=None):
if oid is None:
oid_var = "?oid"
else:
oid_var = "<" + str(oid) + ">"
query = """
prefix nidm_Peak: <http://purl.org/nidash/nidm#NIDM_0000062>
prefix nidm_pValueUncorrected: <http://purl.org/nidash/nidm#NIDM_0000116>
prefix nidm_equivalentZStatistic: <http://purl.org/nidash/nidm#NIDM_0000092>
prefix nidm_pValueFWER: <http://purl.org/nidash/nidm#NIDM_0000115>
prefix nidm_qValueFDR: <http://purl.org/nidash/nidm#NIDM_0000119>
prefix nidm_coordinateVectorInVoxels: <http://purl.org/nidash/nidm#NIDM_000013\
9>
prefix nidm_coordinateVector: <http://purl.org/nidash/nidm#NIDM_0000086>
SELECT DISTINCT * WHERE {
""" + oid_var + """ a nidm_Peak: ;
rdfs:label ?label ;
prov:atLocation ?coord_id .
?coord_id a nidm_Coordinate: ;
rdfs:label ?coord_label ;
nidm_coordinateVector: ?coord_vector_std .
OPTIONAL {?coord_id nidm_coordinateVectorInVoxels: ?coord_vector .} .
OPTIONAL {""" + oid_var + """ prov:value ?value .} .
OPTIONAL {""" + oid_var + """ nidm_pValueUncorrected: ?p_unc .} .
OPTIONAL {""" + oid_var + """ nidm_equivalentZStatistic: ?equiv_z .} .
OPTIONAL {""" + oid_var + """ nidm_pValueFWER: ?p_fwer .} .
OPTIONAL {""" + oid_var + """ nidm_qValueFDR: ?p_fdr .} .
}
"""
return query
def __str__(self):
return '%s \tz=%.2f \tp=%.2e (unc.) \t%s' % (
self.label, self.equiv_z, self.p_unc, str(self.coordinate))
def export(self, nidm_version, export_dir):
"""
Create prov entities and activities.
"""
if self.p_unc is None:
norm_cdf_z = (1.0 + erf(self.equiv_z / sqrt(2.0))) / 2.0
self.p_unc = 1 - norm_cdf_z
atts = (
(PROV['type'], self.type),
(PROV['label'], self.label),
(PROV['location'], self.coordinate.id))
if self.value is not None:
atts = atts + (
(PROV['value'], self.value),
)
if self.p_unc is not None:
atts = atts + (
(NIDM_P_VALUE_UNCORRECTED,
Literal(self.p_unc, datatype=XSD_FLOAT)),
)
if self.equiv_z is not None:
atts = atts + (
(NIDM_EQUIVALENT_ZSTATISTIC,
Literal(self.equiv_z, datatype=XSD_FLOAT)),
)
if self.p_fdr is not None:
atts = atts + (
(NIDM_Q_VALUE_FDR,
Literal(self.p_fdr, datatype=XSD_FLOAT)),
)
if self.p_fwer is not None:
atts = atts + (
(NIDM_P_VALUE_FWER,
Literal(self.p_fwer, datatype=XSD_FLOAT)),
)
self.add_attributes(atts)
| 34.308374 | 109 | 0.594518 | from nidmresults.objects.constants import *
from nidmresults.objects.generic import *
import uuid
from math import erf, sqrt
import rdflib
from prov.model import Literal
from prov.constants import XSD_FLOAT
from prov.model import Identifier
class Inference(object):
def __init__(
self, inference, height_thresh, extent_thresh,
peak_criteria, cluster_criteria, disp_mask, excursion_set,
clusters, search_space, software_id):
super(Inference, self).__init__()
self.excursion_set = excursion_set
self.inference_act = inference
self.height_thresh = height_thresh
self.extent_thresh = extent_thresh
self.clusters = clusters
self.software_id = software_id
self.peak_criteria = peak_criteria
self.cluster_criteria = cluster_criteria
self.disp_mask = disp_mask
self.search_space = search_space
class InferenceActivity(NIDMObject):
def __init__(self, oid=None, tail=None, label=None, contrast_name=None,
inference_type=None, partial_degree=None):
super(InferenceActivity, self).__init__(oid=oid)
if inference_type is None:
self.type = NIDM_INFERENCE
else:
self.type = inference_type
self.prov_type = PROV['Activity']
if tail is None:
tail = NIDM_ONE_TAILED_TEST
self.tail = tail
if label is None:
label = "Inference"
if contrast_name:
label += ": " + contrast_name
self.label = label
self.partial_degree = partial_degree
@classmethod
def get_query(klass, oid=None):
if oid is None:
oid_var = "?oid"
else:
oid_var = "<" + str(oid) + ">"
query = """
prefix nidm_Inference: <http://purl.org/nidash/nidm#NIDM_0000049>
prefix nidm_ConjunctionInference: <http://purl.org/nidash/nidm#NIDM_0000011>
prefix nidm_hasAlternativeHypothesis: <http://purl.org/nidash/nidm#NIDM_000009\
7>
prefix spm_PartialConjunctionInference: <http://purl.org/nidash/spm#SPM_000000\
5>
prefix spm_PartialConjunctionDegree: <http://purl.org/nidash/spm#SPM_0000015>
SELECT DISTINCT * WHERE {
{
""" + oid_var + """ a nidm_Inference: .
} UNION {
""" + oid_var + """ a nidm_ConjunctionInference: .
} UNION {
""" + oid_var + """ a spm_PartialConjunctionInference: .
}
""" + oid_var + """ rdfs:label ?label ;
a ?inference_type ;
nidm_hasAlternativeHypothesis: ?tail .
OPTIONAL {""" + oid_var + """ spm_PartialConjunctionDegree: ?partial_degree .} .
FILTER ( ?inference_type NOT IN (prov:Activity))
}
"""
return query
def export(self, nidm_version, export_dir):
atts = (
(PROV['type'], self.type),
(PROV['label'], self.label),
(NIDM_HAS_ALTERNATIVE_HYPOTHESIS, self.tail))
if self.partial_degree is not None:
atts += (
(SPM_PARTIAL_CONJUNCTION_DEGREE, self.partial_degree),)
self.add_attributes(atts)
class ExcursionSet(NIDMObject):
def __init__(self, location, coord_space, visu=None,
oid=None, fmt=None, label=None,
sha=None, filename=None, inference=None, suffix='',
clust_map=None, mip=None, num_clusters=None, p_value=None):
super(ExcursionSet, self).__init__(oid)
if not filename:
filename = 'ExcursionSet' + suffix + '.nii.gz'
else:
filename = location
self.filename = filename
self.file = NIDMFile(self.id, location, filename, sha)
self.type = NIDM_EXCURSION_SET_MAP
self.prov_type = PROV['Entity']
self.visu = visu
if label is None:
label = "Excursion Set Map"
self.label = label
self.coord_space = coord_space
self.clust_map = clust_map
self.mip = mip
self.inference = inference
self.num_clusters = num_clusters
self.p_value = p_value
@classmethod
def get_query(klass, oid=None):
if oid is None:
oid_var = "?oid"
else:
oid_var = "<" + str(oid) + ">"
query = """
prefix nidm_ExcursionSetMap: <http://purl.org/nidash/nidm#NIDM_0000025>
prefix nidm_hasClusterLabelsMap: <http://purl.org/nidash/nidm#NIDM_0000098>
prefix nidm_hasMaximumIntensityProjection: <http://purl.org/nidash/nidm#NIDM_0\
000138>
prefix nidm_inCoordinateSpace: <http://purl.org/nidash/nidm#NIDM_0000104>
prefix nidm_numberOfSupraThresholdClusters: <http://purl.org/nidash/nidm#NIDM_\
0000111>
prefix nidm_pValue: <http://purl.org/nidash/nidm#NIDM_0000114>
SELECT DISTINCT * WHERE {
""" + oid_var + """ a nidm_ExcursionSetMap: ;
prov:atLocation ?location ;
rdfs:label ?label ;
dct:format ?fmt ;
nfo:fileName ?filename ;
crypto:sha512 ?sha .
OPTIONAL {""" + oid_var + """ nidm_numberOfSupraThresholdClusters: ?num_clusters .} .
OPTIONAL {""" + oid_var + """ nidm_pValue: ?p_value .} .
}
ORDER BY ?peak_label
"""
return query
def export(self, nidm_version, export_dir):
self.add_attributes((
(PROV['type'], self.type),
(NIDM_IN_COORDINATE_SPACE, self.coord_space.id),
(PROV['label'], self.label),
))
if self.visu is not None:
self.add_attributes((
(DC['description'], self.visu.id),
))
if self.clust_map is not None:
self.add_attributes((
(NIDM_HAS_CLUSTER_LABELS_MAP, self.clust_map.id),
))
if self.mip is not None:
self.add_attributes((
(NIDM_HAS_MAXIMUM_INTENSITY_PROJECTION, self.mip.id),
))
if self.num_clusters is not None:
self.add_attributes((
(NIDM_NUMBER_OF_CLUSTERS, self.num_clusters),
))
if self.p_value is not None:
self.add_attributes((
(NIDM_P_VALUE, self.p_value),
))
class ClusterLabelsMap(NIDMObject):
def __init__(self, location, coord_space,
oid=None, fmt=None, label=None,
sha=None, filename=None, suffix='', temporary=False):
super(ClusterLabelsMap, self).__init__(oid)
if not filename:
filename = 'ClusterLabels' + suffix + '.nii.gz'
self.filename = filename
self.file = NIDMFile(self.id, location, filename, sha,
temporary=temporary)
self.type = NIDM_CLUSTER_LABELS_MAP
self.prov_type = PROV['Entity']
if label is None:
label = "Cluster Labels Map"
self.label = label
self.coord_space = coord_space
@classmethod
def get_query(klass, oid=None):
if oid is None:
oid_var = "?oid"
else:
oid_var = "<" + str(oid) + ">"
query = """
prefix nidm_ClusterLabelsMap: <http://purl.org/nidash/nidm#NIDM_0000008>
SELECT DISTINCT * WHERE {
""" + oid_var + """ a nidm_ClusterLabelsMap: ;
nfo:fileName ?filename ;
crypto:sha512 ?sha ;
prov:atLocation ?location ;
dct:format ?fmt .
}
"""
return query
def export(self, nidm_version, export_dir):
self.add_attributes((
(PROV['type'], self.type),
(NIDM_IN_COORDINATE_SPACE, self.coord_space.id),
(PROV['label'], self.label)
))
class HeightThreshold(NIDMObject):
def __init__(self, stat_threshold=None, p_corr_threshold=None,
p_uncorr_threshold=None, threshold_type=None, value=None,
label=None, version={'num': '1.3.0'}, oid=None,
equiv_thresh=None):
super(HeightThreshold, self).__init__(oid=oid)
if not stat_threshold and not p_corr_threshold and \
not p_uncorr_threshold and not value:
raise Exception('No threshold defined')
if isinstance(threshold_type, str):
threshold_type = Identifier(threshold_type)
thresh_desc = ""
if stat_threshold is not None:
thresh_desc = "Z>" + str(stat_threshold)
if version['num'] == "1.0.0":
user_threshold_type = "Z-Statistic"
else:
threshold_type = OBO_STATISTIC
value = stat_threshold
elif p_uncorr_threshold is not None:
thresh_desc = "p<" + \
str(p_uncorr_threshold) + " (uncorrected)"
if version['num'] == "1.0.0":
user_threshold_type = "p-value uncorrected"
else:
threshold_type = NIDM_P_VALUE_UNCORRECTED_CLASS
value = p_uncorr_threshold
elif p_corr_threshold is not None:
thresh_desc = "p<" + str(p_corr_threshold) + " (FWE)"
if version['num'] == "1.0.0":
user_threshold_type = "p-value FWE"
else:
threshold_type = OBO_P_VALUE_FWER
value = p_corr_threshold
if version['num'] == "1.0.0":
self.user_threshold_type = user_threshold_type
self.p_uncorr_threshold = p_uncorr_threshold
self.p_corr_threshold = p_corr_threshold
self.stat_threshold = stat_threshold
else:
self.value = value
self.threshold_type = threshold_type
if not label:
self.label = "Height Threshold: " + thresh_desc
else:
self.label = label
self.type = NIDM_HEIGHT_THRESHOLD
self.prov_type = PROV['Entity']
self.equiv_thresh = equiv_thresh
@classmethod
def get_query(klass, oid=None):
if oid is None:
oid_var = "?oid"
else:
oid_var = "<" + str(oid) + ">"
query = """
prefix nidm_HeightThreshold: <http://purl.org/nidash/nidm#NIDM_0000034>
prefix nidm_hasAlternativeHypothesis: <http://purl.org/nidash/nidm#NIDM_000009\
7>
SELECT DISTINCT * WHERE {
""" + oid_var + """ a nidm_HeightThreshold: ;
a ?threshold_type ;
rdfs:label ?label ;
prov:value ?value .
FILTER ( ?threshold_type NOT IN (prov:Entity, nidm_HeightThreshold:) )
}
"""
return query
def export(self, version, export_dir):
atts = [
(PROV['type'], self.type),
(PROV['label'], self.label),
]
if version['num'] == "1.0.0":
atts += [
(NIDM_USER_SPECIFIED_THRESHOLD_TYPE, self.user_threshold_type),
(PROV['value'], self.stat_threshold),
(NIDM_P_VALUE_UNCORRECTED, self.p_uncorr_threshold),
(NIDM_P_VALUE_FWER, self.p_corr_threshold)
]
else:
atts += [
(PROV['type'], self.threshold_type),
(PROV['value'], self.value)
]
if self.equiv_thresh is not None:
for equiv in self.equiv_thresh:
atts += [
(NIDM_EQUIVALENT_THRESHOLD, equiv.id)
]
self.add_attributes([(k, v) for k, v in atts if v is not None])
class ExtentThreshold(NIDMObject):
def __init__(self, extent=None, p_corr=None, p_uncorr=None,
extent_rsl=None, label=None, version={'num': '1.3.0'},
value=None, oid=None, equiv_thresh=None, threshold_type=None):
super(ExtentThreshold, self).__init__(oid=oid)
self.type = NIDM_EXTENT_THRESHOLD
self.prov_type = PROV['Entity']
thresh_desc = ""
if threshold_type is not None:
self.threshold_type = threshold_type
else:
if extent is not None:
thresh_desc = "k>" + str(extent)
user_threshold_type = "Cluster-size in voxels"
threshold_type = OBO_STATISTIC
elif p_uncorr is not None:
thresh_desc = "p<" + str(self.p_uncorr) + " (uncorrected)"
user_threshold_type = "p-value uncorrected"
threshold_type = NIDM_P_VALUE_UNCORRECTED_CLASS
value = p_uncorr
elif p_corr is not None:
thresh_desc = "p<" + str(p_corr) + " (FWE)"
user_threshold_type = "p-value FWE"
threshold_type = OBO_P_VALUE_FWER
value = p_corr
else:
thresh_desc = "k>=0"
extent = 0
if version['num'] == "1.0.0":
p_uncorr = 1.0
p_corr = 1.0
user_threshold_type = None
else:
threshold_type = OBO_STATISTIC
self.threshold_type = threshold_type
self.value = value
if version['num'] == "1.0.0":
self.user_threshold_type = user_threshold_type
self.p_uncorr = p_uncorr
self.p_corr = p_corr
else:
self.threshold_type = threshold_type
self.extent = extent
self.extent_rsl = extent_rsl
if label is None:
self.label = "Extent Threshold: " + thresh_desc
else:
self.label = label
self.equiv_thresh = equiv_thresh
@classmethod
def get_query(klass, oid=None):
if oid is None:
oid_var = "?oid"
else:
oid_var = "<" + str(oid) + ">"
query = """
prefix nidm_ExtentThreshold: <http://purl.org/nidash/nidm#NIDM_0000026>
prefix nidm_clusterSizeInVoxels: <http://purl.org/nidash/nidm#NIDM_0000084>
prefix nidm_clusterSizeInResels: <http://purl.org/nidash/nidm#NIDM_0000156>
SELECT DISTINCT * WHERE {
""" + oid_var + """ a nidm_ExtentThreshold: ;
a ?threshold_type ;
rdfs:label ?label .
OPTIONAL {""" + oid_var + """ prov:value ?value .} .
OPTIONAL {""" + oid_var + """ nidm_clusterSizeInVoxels: ?extent .} .
OPTIONAL {""" + oid_var + """ nidm_clusterSizeInResels: ?extent_rsl .} .
FILTER ( ?threshold_type NOT IN (prov:Entity, nidm_ExtentThreshold:) )
}
"""
return query
def export(self, version, export_dir):
atts = [
(PROV['type'], self.type),
]
atts += [
(PROV['label'], self.label)
]
if self.extent_rsl is not None:
atts += [
(NIDM_CLUSTER_SIZE_IN_RESELS, self.extent_rsl),
]
if self.extent is not None:
atts += [
(NIDM_CLUSTER_SIZE_IN_VOXELS, self.extent),
]
if version['num'] == "1.0.0":
atts += [
(NIDM_USER_SPECIFIED_THRESHOLD_TYPE, self.user_threshold_type),
(NIDM_P_VALUE_UNCORRECTED, self.p_uncorr),
(NIDM_P_VALUE_FWER, self.p_corr)
]
else:
atts += [
(PROV['type'], self.threshold_type)
]
if self.value is not None:
atts += [
(PROV['value'], self.value)
]
if self.equiv_thresh is not None:
for equiv in self.equiv_thresh:
atts += [
(NIDM_EQUIVALENT_THRESHOLD, equiv.id)
]
self.add_attributes([(k, v) for k, v in atts if v is not None])
class Cluster(NIDMObject):
def __init__(self, cluster_num, size, pFWER, peaks,
x=None, y=None, z=None, x_std=None, y_std=None, z_std=None,
suffix='', clust_size_resels=None, pFDR=None, punc=None,
label=None, oid=None, cog=None):
super(Cluster, self).__init__(oid=oid)
self.num = cluster_num
if cog is not None:
self.cog = cog
else:
if x and y and z:
self.cog = CenterOfGravity(
cluster_num, x=x, y=y, z=z, x_std=x_std, y_std=y_std,
z_std=z_std)
else:
self.cog = None
self.peaks = peaks
self.size = size
self.pFWER = pFWER
self.type = NIDM_SIGNIFICANT_CLUSTER
self.prov_type = PROV['Entity']
self.punc = punc
self.pFDR = pFDR
if not label:
cluster_naming = "Supra-Threshold Cluster"
self.label = "%s %04d" % (cluster_naming, self.num)
else:
self.label = label
self.clust_size_resels = clust_size_resels
@classmethod
def get_query(klass, oid=None):
if oid is None:
oid_var = "?oid"
else:
oid_var = "<" + str(oid) + ">"
query = """
prefix nidm_SupraThresholdCluster: <http://purl.org/nidash/nidm#NIDM_0000070>
prefix nidm_clusterSizeInVoxels: <http://purl.org/nidash/nidm#NIDM_0000084>
prefix nidm_clusterLabelId: <http://purl.org/nidash/nidm#NIDM_0000082>
prefix nidm_clusterSizeInResels: <http://purl.org/nidash/nidm#NIDM_0000156>
prefix nidm_pValueUncorrected: <http://purl.org/nidash/nidm#NIDM_0000116>
prefix nidm_pValueFWER: <http://purl.org/nidash/nidm#NIDM_0000115>
prefix nidm_qValueFDR: <http://purl.org/nidash/nidm#NIDM_0000119>
SELECT DISTINCT * WHERE {
""" + oid_var + """ a nidm_SupraThresholdCluster: ;
rdfs:label ?label ;
nidm_clusterSizeInVoxels: ?size ;
nidm_clusterLabelId: ?cluster_num .
OPTIONAL {""" + oid_var + """ nidm_clusterSizeInResels: ?clust_size_resels .} .
OPTIONAL {""" + oid_var + """ nidm_pValueUncorrected: ?punc .} .
OPTIONAL {""" + oid_var + """ nidm_pValueFWER: ?pFWER .} .
OPTIONAL {""" + oid_var + """ nidm_qValueFDR: ?pFDR .} .
}
"""
return query
def export(self, nidm_version, export_dir):
if nidm_version['num'] in ["1.0.0", "1.1.0"]:
self.label = self.label.replace("Supra-Threshold", "Significant")
atts = (
(PROV['type'], NIDM_SIGNIFICANT_CLUSTER),
(PROV['label'], self.label),
(NIDM_CLUSTER_LABEL_ID, self.num),
(NIDM_CLUSTER_SIZE_IN_VOXELS, self.size)
)
if self.clust_size_resels is not None:
atts = atts + (
(NIDM_CLUSTER_SIZE_IN_RESELS, self.clust_size_resels),
)
if self.punc is not None:
atts = atts + (
(NIDM_P_VALUE_UNCORRECTED,
Literal(self.punc, datatype=XSD_FLOAT)),
)
if self.pFDR is not None:
atts = atts + (
(NIDM_Q_VALUE_FDR, Literal(self.pFDR, datatype=XSD_FLOAT)),
)
if self.pFWER is not None:
atts = atts + (
(NIDM_P_VALUE_FWER, Literal(self.pFWER, datatype=XSD_FLOAT)),
)
self.add_attributes(atts)
class DisplayMaskMap(NIDMObject):
def __init__(self, contrast_num, mask_file, mask_num, coord_space,
sha=None, filename=None, fmt=None, label=None, oid=None,
derfrom_id=None, derfrom_filename=None, derfrom_fmt=None,
derfrom_sha=None, isderfrommap=False):
super(DisplayMaskMap, self).__init__(oid=oid)
if not filename:
filename = 'DisplayMask' + str(mask_num) + '.nii.gz'
self.file = NIDMFile(self.id, mask_file, filename,
sha=sha, fmt=fmt)
self.coord_space = coord_space
self.type = NIDM_DISPLAY_MASK_MAP
self.prov_type = PROV['Entity']
if not label:
self.label = "Display Mask Map " + str(mask_num)
else:
self.label = label
if derfrom_id is not None:
self.derfrom = DisplayMaskMap(
None, None, None,
coord_space=None, oid=derfrom_id,
filename=derfrom_filename, sha=derfrom_sha,
fmt=derfrom_fmt,
isderfrommap=True)
else:
self.derfrom = None
self.isderfrommap = isderfrommap
@classmethod
def get_query(klass, oid=None):
if oid is None:
oid_var = "?oid"
else:
oid_var = "<" + str(oid) + ">"
query = """
prefix nidm_DisplayMaskMap: <http://purl.org/nidash/nidm#NIDM_0000020>
SELECT DISTINCT * WHERE {
""" + oid_var + """ a nidm_DisplayMaskMap: ;
rdfs:label ?label ;
nfo:fileName ?filename ;
crypto:sha512 ?sha ;
prov:atLocation ?mask_file ;
dct:format ?fmt .
OPTIONAL {""" + oid_var + """ prov:wasDerivedFrom ?derfrom_id .
?derfrom_id a nidm_DisplayMaskMap: ;
nfo:fileName ?derfrom_filename ;
dct:format ?derfrom_fmt ;
crypto:sha512 ?derfrom_sha .
} .
}
"""
return query
def export(self, nidm_version, export_dir):
atts = (
(PROV['type'], self.type),
)
if not self.isderfrommap:
atts = atts + (
(NIDM_IN_COORDINATE_SPACE, self.coord_space.id),
(PROV['label'], self.label))
self.add_attributes(atts)
class PeakCriteria(NIDMObject):
def __init__(self, contrast_num, peak_dist, num_peak=None, label=None,
oid=None):
super(PeakCriteria, self).__init__(oid=oid)
self.num_peak = num_peak
self.peak_dist = peak_dist
self.type = NIDM_PEAK_DEFINITION_CRITERIA
self.prov_type = PROV['Entity']
if not label:
self.label = "Peak Definition Criteria"
else:
self.label = label
@classmethod
def get_query(klass, oid=None):
if oid is None:
oid_var = "?oid"
else:
oid_var = "<" + str(oid) + ">"
query = """
prefix nidm_PeakDefinitionCriteria: <http://purl.org/nidash/nidm#NIDM_0000063>
prefix nidm_minDistanceBetweenPeaks: <http://purl.org/nidash/nidm#NIDM_0000109>
prefix nidm_maxNumberOfPeaksPerCluster: <http://purl.org/nidash/nidm#NIDM_0000\
108>
SELECT DISTINCT * WHERE {
""" + oid_var + """ a nidm_PeakDefinitionCriteria: ;
rdfs:label ?label ;
nidm_minDistanceBetweenPeaks: ?peak_dist .
OPTIONAL { """ + oid_var + """ nidm_maxNumberOfPeaksPerCluster: ?num_peak .} .
}
"""
return query
def export(self, nidm_version, export_dir):
num_peak = ()
if self.num_peak:
num_peak = ((NIDM_MAX_NUMBER_OF_PEAKS_PER_CLUSTER, self.num_peak),)
self.add_attributes((
(PROV['type'], self.type),
(PROV['label'], self.label),
(NIDM_MIN_DISTANCE_BETWEEN_PEAKS, self.peak_dist)
) + num_peak)
class ClusterCriteria(NIDMObject):
def __init__(self, contrast_num, connectivity, label=None, oid=None):
super(ClusterCriteria, self).__init__(oid=oid)
self.connectivity = connectivity
self.type = NIDM_CLUSTER_DEFINITION_CRITERIA
self.prov_type = PROV['Entity']
if not label:
self.label = ("Cluster Connectivity Criterion: " +
str(self.connectivity))
else:
self.label = label
@classmethod
def get_query(klass, oid=None):
if oid is None:
oid_var = "?oid"
else:
oid_var = "<" + str(oid) + ">"
query = """
prefix nidm_ClusterDefinitionCriteria: <http://purl.org/nidash/nidm#NIDM_00000\
07>
prefix nidm_hasConnectivityCriterion: <http://purl.org/nidash/nidm#NIDM_000009\
9>
SELECT DISTINCT * WHERE {
""" + oid_var + """ a nidm_ClusterDefinitionCriteria: ;
rdfs:label ?label ;
nidm_hasConnectivityCriterion: ?connectivity .
}
"""
return query
def export(self, nidm_version, export_dir):
if isinstance(self.connectivity, int):
if self.connectivity == 6:
self.connectivity = NIDM_VOXEL6CONNECTED
elif self.connectivity == 18:
self.connectivity = NIDM_VOXEL18CONNECTED
elif self.connectivity == 26:
self.connectivity = NIDM_VOXEL26CONNECTED
if self.connectivity is not None:
atts = (
(PROV['type'], self.type),
(PROV['label'], self.label),
(NIDM_HAS_CONNECTIVITY_CRITERION, self.connectivity))
else:
atts = (
(PROV['type'], NIDM_CLUSTER_DEFINITION_CRITERIA),
(PROV['label'], label))
self.add_attributes(atts)
class CenterOfGravity(NIDMObject):
def __init__(self, cluster_num, x=None, y=None, z=None, x_std=None,
y_std=None, z_std=None, oid=None, coord_vector=None,
coord_vector_std=None, label=None, coord_id=None):
super(CenterOfGravity, self).__init__(oid=oid)
self.cluster_num = cluster_num
self.coordinate = Coordinate("%04d" % cluster_num, x=x, y=y, z=z,
x_std=x_std, y_std=y_std, z_std=z_std,
coord_vector_std=coord_vector_std,
coord_vector=coord_vector, oid=coord_id)
self.type = NIDM_CLUSTER_CENTER_OF_GRAVITY
self.prov_type = PROV['Entity']
if label is None:
label = "Center of gravity " + str(self.cluster_num)
self.label = label
@classmethod
def get_query(klass, oid=None):
if oid is None:
oid_var = "?oid"
else:
oid_var = "<" + str(oid) + ">"
query = """
prefix nidm_ClusterCenterOfGravity: <http://purl.org/nidash/nidm#NIDM_0000140>
prefix nidm_coordinateVector: <http://purl.org/nidash/nidm#NIDM_0000086>
prefix nidm_coordinateVectorInVoxels: <http://purl.org/nidash/nidm#NIDM_000013\
9>
SELECT DISTINCT * WHERE {
""" + oid_var + """ a nidm_ClusterCenterOfGravity: ;
rdfs:label ?label ;
prov:atLocation ?coord_id .
?coord_id a nidm_Coordinate: ;
nidm_coordinateVector: ?coord_vector_std .
OPTIONAL { ?coord_id nidm_coordinateVectorInVoxels: ?coord_vector .} .
}
"""
return query
def export(self, nidm_version, export_dir):
self.add_attributes((
(PROV['type'], self.type),
(PROV['label'], self.label),
(PROV['location'], self.coordinate.id)))
class SearchSpace(NIDMObject):
def __init__(self, search_space_file, vol_in_voxels, vol_in_units,
vol_in_resels, resel_size_in_voxels,
random_field_stationarity, noise_fwhm_in_voxels,
noise_fwhm_in_units, coord_space,
expected_num_voxels=None, expected_num_clusters=None,
height_critical_fwe05=None, height_critical_fdr05=None,
extent_critical_fwe05=None, extent_critical_fdr05=None,
search_vol_geom=None, noise_roughness=None,
filename=None, sha=None, fmt=None,
label=None, oid=None):
super(SearchSpace, self).__init__(oid=oid)
if not filename:
filename = 'SearchSpaceMask.nii.gz'
self.file = NIDMFile(self.id, search_space_file, filename,
sha=sha, fmt=fmt)
self.coord_space = coord_space
self.resel_size_in_voxels = resel_size_in_voxels
self.search_volume_in_voxels = vol_in_voxels
self.search_volume_in_units = vol_in_units
self.search_volume_in_resels = vol_in_resels
self.rf_stationarity = random_field_stationarity
self.noise_fwhm_in_voxels = noise_fwhm_in_voxels
self.noise_fwhm_in_units = noise_fwhm_in_units
self.type = NIDM_SEARCH_SPACE_MASK_MAP
self.prov_type = PROV['Entity']
self.label = "Search Space Mask Map"
self.expected_num_voxels = expected_num_voxels
self.expected_num_clusters = expected_num_clusters
self.height_critical_fwe05 = height_critical_fwe05
self.height_critical_fdr05 = height_critical_fdr05
self.extent_critical_fwe05 = extent_critical_fwe05
self.extent_critical_fdr05 = extent_critical_fdr05
self.search_vol_geom = search_vol_geom
self.noise_roughness = noise_roughness
@classmethod
def get_query(klass, oid=None):
if oid is None:
oid_var = "?oid"
else:
oid_var = "<" + str(oid) + ">"
query = """
prefix nidm_SearchSpaceMaskMap: <http://purl.org/nidash/nidm#NIDM_0000068>
prefix nidm_expectedNumberOfVoxelsPerCluster: <http://purl.org/nidash/nidm#NID\
M_0000143>
prefix nidm_expectedNumberOfClusters: <http://purl.org/nidash/nidm#NIDM_000014\
1>
prefix nidm_heightCriticalThresholdFWE05: <http://purl.org/nidash/nidm#NIDM_00\
00147>
prefix nidm_heightCriticalThresholdFDR05: <http://purl.org/nidash/nidm#NIDM_00\
00146>
prefix nidm_searchVolumeInVoxels: <http://purl.org/nidash/nidm#NIDM_0000121>
prefix nidm_searchVolumeInUnits: <http://purl.org/nidash/nidm#NIDM_0000136>
prefix nidm_searchVolumeInResels: <http://purl.org/nidash/nidm#NIDM_0000149>
prefix nidm_reselSizeInVoxels: <http://purl.org/nidash/nidm#NIDM_0000148>
prefix nidm_noiseFWHMInVoxels: <http://purl.org/nidash/nidm#NIDM_0000159>
prefix nidm_noiseFWHMInUnits: <http://purl.org/nidash/nidm#NIDM_0000157>
prefix nidm_randomFieldStationarity: <http://purl.org/nidash/nidm#NIDM_0000120>
prefix spm_smallestSignificantClusterSizeInVoxelsFWE05: <http://purl.org/nidas\
h/spm#SPM_0000014>
prefix spm_smallestSignificantClusterSizeInVoxelsFDR05: <http://purl.org/nidas\
h/spm#SPM_0000013>
prefix spm_searchVolumeReselsGeometry: <http://purl.org/nidash/spm#SPM_0000010>
prefix nidm_noiseRoughnessInVoxels: <http://purl.org/nidash/nidm#NIDM_0000145>
SELECT DISTINCT * WHERE {
""" + oid_var + """ a nidm_SearchSpaceMaskMap: ;
rdfs:label ?label ;
nidm_searchVolumeInVoxels: ?vol_in_voxels ;
nidm_searchVolumeInUnits: ?vol_in_units ;
nidm_searchVolumeInResels: ?vol_in_resels ;
nidm_reselSizeInVoxels: ?resel_size_in_voxels ;
nidm_reselSizeInVoxels: ?resel_size_in_voxels ;
nidm_noiseFWHMInVoxels: ?noise_fwhm_in_voxels ;
nidm_noiseFWHMInUnits: ?noise_fwhm_in_units ;
nidm_randomFieldStationarity: ?random_field_stationarity ;
prov:atLocation ?search_space_file ;
dct:format ?fmt ;
nfo:fileName ?filename ;
crypto:sha512 ?sha .
OPTIONAL {""" + oid_var + """ nidm_expectedNumberOfVoxelsPerCluster: ?expected_num_voxels } .
OPTIONAL {""" + oid_var + """ nidm_expectedNumberOfClusters: ?expected_num_clusters } .
OPTIONAL {""" + oid_var + """ nidm_heightCriticalThresholdFWE05: ?height_critical_fwe05 } .
OPTIONAL {""" + oid_var + """ nidm_heightCriticalThresholdFDR05: ?height_critical_fdr05 } .
OPTIONAL {""" + oid_var + """ spm_smallestSignificantClusterSizeInVoxelsFWE05: ?extent_critical_fwe05 } .
OPTIONAL {""" + oid_var + """ spm_smallestSignificantClusterSizeInVoxelsFDR05: ?extent_critical_fdr05 } .
OPTIONAL {""" + oid_var + """ spm_searchVolumeReselsGeometry: ?search_vol_geom } .
OPTIONAL {""" + oid_var + """ nidm_noiseRoughnessInVoxels: ?noise_roughness } .
}
"""
return query
def export(self, version, export_dir):
atts = (
(PROV['label'], self.label),
(PROV['type'], NIDM_SEARCH_SPACE_MASK_MAP),
(NIDM_RANDOM_FIELD_STATIONARITY, self.rf_stationarity),
(NIDM_IN_COORDINATE_SPACE, self.coord_space.id),
(NIDM_SEARCH_VOLUME_IN_VOXELS, self.search_volume_in_voxels),
(NIDM_SEARCH_VOLUME_IN_UNITS, self.search_volume_in_units),
(NIDM_SEARCH_VOLUME_IN_RESELS, self.search_volume_in_resels),
(NIDM_RESEL_SIZE_IN_VOXELS, self.resel_size_in_voxels))
if self.noise_fwhm_in_voxels is not None:
if (version['major'] > 1) or \
(version['major'] >= 1 and
(version['minor'] > 0 or version['revision'] > 0)):
atts = atts + (
(NIDM_NOISE_FWHM_IN_VOXELS, self.noise_fwhm_in_voxels),
(NIDM_NOISE_FWHM_IN_UNITS, self.noise_fwhm_in_units))
if self.expected_num_voxels is not None:
atts = atts + ((NIDM_EXPECTED_NUMBER_OF_VOXELS_PER_CLUSTER,
self.expected_num_voxels),)
if self.expected_num_clusters is not None:
atts = atts + ((NIDM_EXPECTED_NUMBER_OF_CLUSTERS,
self.expected_num_clusters),)
if self.height_critical_fwe05 is not None:
atts = atts + ((NIDM_HEIGHT_CRITICAL_THRESHOLD_FWE_05,
self.height_critical_fwe05),)
if self.height_critical_fdr05 is not None:
atts = atts + ((NIDM_HEIGHT_CRITICAL_THRESHOLD_FDR_05,
self.height_critical_fdr05),)
if self.extent_critical_fwe05 is not None:
atts = atts + ((
SPM_SMALLEST_SIGNIFICANT_CLUSTER_SIZE_IN_VOXELS_FWE05,
self.extent_critical_fwe05),)
if self.extent_critical_fdr05 is not None:
atts = atts + ((
SPM_SMALLEST_SIGNIFICANT_CLUSTER_SIZE_IN_VOXELS_FDR05,
self.extent_critical_fdr05),)
if self.search_vol_geom is not None:
atts = atts + ((SPM_SEARCH_VOLUME_RESELS_GEOMETRY,
self.search_vol_geom),)
if self.noise_roughness:
atts = atts + ((NIDM_NOISE_ROUGHNESS_IN_VOXELS,
self.noise_roughness),)
self.add_attributes(atts)
class Coordinate(NIDMObject):
def __init__(self, label_id, coord_vector=None, coord_vector_std=None,
x=None, y=None, z=None, x_std=None, y_std=None, z_std=None,
label=None, oid=None):
super(Coordinate, self).__init__(oid=oid)
self.label_id = label_id
if x is not None and y is not None and z is not None:
self.coord_vector = [x, y, z]
else:
if coord_vector and not type(coord_vector) is list:
coord_vector = json.loads(coord_vector)
self.coord_vector = coord_vector
if x_std is not None and y_std is not None and z_std is not None:
self.coord_vector_std = [x_std, y_std, z_std]
else:
if coord_vector_std and not type(coord_vector_std) is list:
coord_vector_std = json.loads(coord_vector_std)
self.coord_vector_std = coord_vector_std
self.type = NIDM_COORDINATE
self.prov_type = PROV['Entity']
if label is not None:
self.label = label
else:
self.label = "Coordinate " + self.label_id
def __str__(self):
return '%s\t%s' % (self.label, self.coord_vector)
def export(self, nidm_version, export_dir):
atts = (
(PROV['type'], NIDM_COORDINATE),
(PROV['type'], PROV['Location']),
(PROV['label'], self.label)
)
if self.coord_vector is not None:
atts = atts +\
((NIDM_COORDINATE_VECTOR_IN_VOXELS,
json.dumps(self.coord_vector)),)
if self.coord_vector_std is not None:
atts = atts +\
((NIDM_COORDINATE_VECTOR, json.dumps(self.coord_vector_std)),)
self.add_attributes(atts)
class Peak(NIDMObject):
def __init__(self, equiv_z, p_unc=None, p_fwer=None, label=None,
coord_label=None, exc_set_id=None, oid=None, suffix='',
p_fdr=None, value=None, coord_id=None, *args, **kwargs):
super(Peak, self).__init__(oid)
if oid is not None:
self.label = label
peak_unique_id = label[5:]
peak_index = peak_unique_id
else:
peak_unique_id = suffix
self.label = "Peak " + peak_unique_id
self.equiv_z = equiv_z
self.p_unc = p_unc
self.p_fwer = p_fwer
self.coordinate = Coordinate(
str(peak_unique_id), label=coord_label, oid=coord_id, **kwargs)
self.type = NIDM_PEAK
self.prov_type = PROV['Entity']
self.exc_set_id = exc_set_id
self.value = value
self.p_fdr = p_fdr
@classmethod
def get_query(klass, oid=None):
if oid is None:
oid_var = "?oid"
else:
oid_var = "<" + str(oid) + ">"
query = """
prefix nidm_Peak: <http://purl.org/nidash/nidm#NIDM_0000062>
prefix nidm_pValueUncorrected: <http://purl.org/nidash/nidm#NIDM_0000116>
prefix nidm_equivalentZStatistic: <http://purl.org/nidash/nidm#NIDM_0000092>
prefix nidm_pValueFWER: <http://purl.org/nidash/nidm#NIDM_0000115>
prefix nidm_qValueFDR: <http://purl.org/nidash/nidm#NIDM_0000119>
prefix nidm_coordinateVectorInVoxels: <http://purl.org/nidash/nidm#NIDM_000013\
9>
prefix nidm_coordinateVector: <http://purl.org/nidash/nidm#NIDM_0000086>
SELECT DISTINCT * WHERE {
""" + oid_var + """ a nidm_Peak: ;
rdfs:label ?label ;
prov:atLocation ?coord_id .
?coord_id a nidm_Coordinate: ;
rdfs:label ?coord_label ;
nidm_coordinateVector: ?coord_vector_std .
OPTIONAL {?coord_id nidm_coordinateVectorInVoxels: ?coord_vector .} .
OPTIONAL {""" + oid_var + """ prov:value ?value .} .
OPTIONAL {""" + oid_var + """ nidm_pValueUncorrected: ?p_unc .} .
OPTIONAL {""" + oid_var + """ nidm_equivalentZStatistic: ?equiv_z .} .
OPTIONAL {""" + oid_var + """ nidm_pValueFWER: ?p_fwer .} .
OPTIONAL {""" + oid_var + """ nidm_qValueFDR: ?p_fdr .} .
}
"""
return query
def __str__(self):
return '%s \tz=%.2f \tp=%.2e (unc.) \t%s' % (
self.label, self.equiv_z, self.p_unc, str(self.coordinate))
def export(self, nidm_version, export_dir):
if self.p_unc is None:
norm_cdf_z = (1.0 + erf(self.equiv_z / sqrt(2.0))) / 2.0
self.p_unc = 1 - norm_cdf_z
atts = (
(PROV['type'], self.type),
(PROV['label'], self.label),
(PROV['location'], self.coordinate.id))
if self.value is not None:
atts = atts + (
(PROV['value'], self.value),
)
if self.p_unc is not None:
atts = atts + (
(NIDM_P_VALUE_UNCORRECTED,
Literal(self.p_unc, datatype=XSD_FLOAT)),
)
if self.equiv_z is not None:
atts = atts + (
(NIDM_EQUIVALENT_ZSTATISTIC,
Literal(self.equiv_z, datatype=XSD_FLOAT)),
)
if self.p_fdr is not None:
atts = atts + (
(NIDM_Q_VALUE_FDR,
Literal(self.p_fdr, datatype=XSD_FLOAT)),
)
if self.p_fwer is not None:
atts = atts + (
(NIDM_P_VALUE_FWER,
Literal(self.p_fwer, datatype=XSD_FLOAT)),
)
self.add_attributes(atts)
| true | true |
f72032afeb7c34403fb72e1e874710a4279d0978 | 1,500 | py | Python | saleor/api/payment/serializers.py | glosoftgroup/KahawaHardware | 893e94246583addf41c3bb0d58d2ce6bcd233c4f | [
"BSD-3-Clause"
] | null | null | null | saleor/api/payment/serializers.py | glosoftgroup/KahawaHardware | 893e94246583addf41c3bb0d58d2ce6bcd233c4f | [
"BSD-3-Clause"
] | null | null | null | saleor/api/payment/serializers.py | glosoftgroup/KahawaHardware | 893e94246583addf41c3bb0d58d2ce6bcd233c4f | [
"BSD-3-Clause"
] | null | null | null | # Payment rest api serializers
from rest_framework import serializers
from rest_framework.serializers import (
SerializerMethodField,
IntegerField
)
from ...sale.models import PaymentOption
from ...payment.models import MpesaPayment
class MpesaPaymentUpdateSerializer(serializers.ModelSerializer):
status = IntegerField(max_value=1, min_value=0)
class Meta:
model = MpesaPayment
fields = ('id',
'ref_number',
'status'
)
def update(self, instance, validated_data):
instance.id = validated_data.get('id', instance.id)
instance.status = validated_data.get('status', instance.status)
instance.save()
return instance
class MpesaPaymentListSerializer(serializers.ModelSerializer):
time = SerializerMethodField()
class Meta:
model = MpesaPayment
fields = ('id',
'ref_number',
'phone',
'amount',
'first_name',
'middle_name',
'last_name',
'time',
'status')
def get_time(self, obj):
time = obj.created.strftime("%d/%m/%Y %H:%M:%S %p")
return time
class PaymentOptionListSerializer(serializers.ModelSerializer):
class Meta:
model = PaymentOption
fields = ('id',
'name',
'description') | 27.272727 | 71 | 0.557333 |
from rest_framework import serializers
from rest_framework.serializers import (
SerializerMethodField,
IntegerField
)
from ...sale.models import PaymentOption
from ...payment.models import MpesaPayment
class MpesaPaymentUpdateSerializer(serializers.ModelSerializer):
status = IntegerField(max_value=1, min_value=0)
class Meta:
model = MpesaPayment
fields = ('id',
'ref_number',
'status'
)
def update(self, instance, validated_data):
instance.id = validated_data.get('id', instance.id)
instance.status = validated_data.get('status', instance.status)
instance.save()
return instance
class MpesaPaymentListSerializer(serializers.ModelSerializer):
time = SerializerMethodField()
class Meta:
model = MpesaPayment
fields = ('id',
'ref_number',
'phone',
'amount',
'first_name',
'middle_name',
'last_name',
'time',
'status')
def get_time(self, obj):
time = obj.created.strftime("%d/%m/%Y %H:%M:%S %p")
return time
class PaymentOptionListSerializer(serializers.ModelSerializer):
class Meta:
model = PaymentOption
fields = ('id',
'name',
'description') | true | true |
f720333a7186cc4c3d83e8f61e9842d040ac10f8 | 18,296 | py | Python | sdk/python/pulumi_azure_native/storagepool/v20210801/iscsi_target.py | polivbr/pulumi-azure-native | 09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/storagepool/v20210801/iscsi_target.py | polivbr/pulumi-azure-native | 09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/storagepool/v20210801/iscsi_target.py | polivbr/pulumi-azure-native | 09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['IscsiTargetArgs', 'IscsiTarget']
@pulumi.input_type
class IscsiTargetArgs:
def __init__(__self__, *,
acl_mode: pulumi.Input[Union[str, 'IscsiTargetAclMode']],
disk_pool_name: pulumi.Input[str],
resource_group_name: pulumi.Input[str],
iscsi_target_name: Optional[pulumi.Input[str]] = None,
luns: Optional[pulumi.Input[Sequence[pulumi.Input['IscsiLunArgs']]]] = None,
managed_by: Optional[pulumi.Input[str]] = None,
managed_by_extended: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
static_acls: Optional[pulumi.Input[Sequence[pulumi.Input['AclArgs']]]] = None,
target_iqn: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a IscsiTarget resource.
:param pulumi.Input[Union[str, 'IscsiTargetAclMode']] acl_mode: Mode for Target connectivity.
:param pulumi.Input[str] disk_pool_name: The name of the Disk Pool.
:param pulumi.Input[str] resource_group_name: The name of the resource group. The name is case insensitive.
:param pulumi.Input[str] iscsi_target_name: The name of the iSCSI Target.
:param pulumi.Input[Sequence[pulumi.Input['IscsiLunArgs']]] luns: List of LUNs to be exposed through iSCSI Target.
:param pulumi.Input[str] managed_by: Azure resource id. Indicates if this resource is managed by another Azure resource.
:param pulumi.Input[Sequence[pulumi.Input[str]]] managed_by_extended: List of Azure resource ids that manage this resource.
:param pulumi.Input[Sequence[pulumi.Input['AclArgs']]] static_acls: Access Control List (ACL) for an iSCSI Target; defines LUN masking policy
:param pulumi.Input[str] target_iqn: iSCSI Target IQN (iSCSI Qualified Name); example: "iqn.2005-03.org.iscsi:server".
"""
pulumi.set(__self__, "acl_mode", acl_mode)
pulumi.set(__self__, "disk_pool_name", disk_pool_name)
pulumi.set(__self__, "resource_group_name", resource_group_name)
if iscsi_target_name is not None:
pulumi.set(__self__, "iscsi_target_name", iscsi_target_name)
if luns is not None:
pulumi.set(__self__, "luns", luns)
if managed_by is not None:
pulumi.set(__self__, "managed_by", managed_by)
if managed_by_extended is not None:
pulumi.set(__self__, "managed_by_extended", managed_by_extended)
if static_acls is not None:
pulumi.set(__self__, "static_acls", static_acls)
if target_iqn is not None:
pulumi.set(__self__, "target_iqn", target_iqn)
@property
@pulumi.getter(name="aclMode")
def acl_mode(self) -> pulumi.Input[Union[str, 'IscsiTargetAclMode']]:
"""
Mode for Target connectivity.
"""
return pulumi.get(self, "acl_mode")
@acl_mode.setter
def acl_mode(self, value: pulumi.Input[Union[str, 'IscsiTargetAclMode']]):
pulumi.set(self, "acl_mode", value)
@property
@pulumi.getter(name="diskPoolName")
def disk_pool_name(self) -> pulumi.Input[str]:
"""
The name of the Disk Pool.
"""
return pulumi.get(self, "disk_pool_name")
@disk_pool_name.setter
def disk_pool_name(self, value: pulumi.Input[str]):
pulumi.set(self, "disk_pool_name", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group. The name is case insensitive.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="iscsiTargetName")
def iscsi_target_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the iSCSI Target.
"""
return pulumi.get(self, "iscsi_target_name")
@iscsi_target_name.setter
def iscsi_target_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "iscsi_target_name", value)
@property
@pulumi.getter
def luns(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['IscsiLunArgs']]]]:
"""
List of LUNs to be exposed through iSCSI Target.
"""
return pulumi.get(self, "luns")
@luns.setter
def luns(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['IscsiLunArgs']]]]):
pulumi.set(self, "luns", value)
@property
@pulumi.getter(name="managedBy")
def managed_by(self) -> Optional[pulumi.Input[str]]:
"""
Azure resource id. Indicates if this resource is managed by another Azure resource.
"""
return pulumi.get(self, "managed_by")
@managed_by.setter
def managed_by(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "managed_by", value)
@property
@pulumi.getter(name="managedByExtended")
def managed_by_extended(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
List of Azure resource ids that manage this resource.
"""
return pulumi.get(self, "managed_by_extended")
@managed_by_extended.setter
def managed_by_extended(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "managed_by_extended", value)
@property
@pulumi.getter(name="staticAcls")
def static_acls(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['AclArgs']]]]:
"""
Access Control List (ACL) for an iSCSI Target; defines LUN masking policy
"""
return pulumi.get(self, "static_acls")
@static_acls.setter
def static_acls(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['AclArgs']]]]):
pulumi.set(self, "static_acls", value)
@property
@pulumi.getter(name="targetIqn")
def target_iqn(self) -> Optional[pulumi.Input[str]]:
"""
iSCSI Target IQN (iSCSI Qualified Name); example: "iqn.2005-03.org.iscsi:server".
"""
return pulumi.get(self, "target_iqn")
@target_iqn.setter
def target_iqn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "target_iqn", value)
class IscsiTarget(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
acl_mode: Optional[pulumi.Input[Union[str, 'IscsiTargetAclMode']]] = None,
disk_pool_name: Optional[pulumi.Input[str]] = None,
iscsi_target_name: Optional[pulumi.Input[str]] = None,
luns: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['IscsiLunArgs']]]]] = None,
managed_by: Optional[pulumi.Input[str]] = None,
managed_by_extended: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
static_acls: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['AclArgs']]]]] = None,
target_iqn: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Response for iSCSI Target requests.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Union[str, 'IscsiTargetAclMode']] acl_mode: Mode for Target connectivity.
:param pulumi.Input[str] disk_pool_name: The name of the Disk Pool.
:param pulumi.Input[str] iscsi_target_name: The name of the iSCSI Target.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['IscsiLunArgs']]]] luns: List of LUNs to be exposed through iSCSI Target.
:param pulumi.Input[str] managed_by: Azure resource id. Indicates if this resource is managed by another Azure resource.
:param pulumi.Input[Sequence[pulumi.Input[str]]] managed_by_extended: List of Azure resource ids that manage this resource.
:param pulumi.Input[str] resource_group_name: The name of the resource group. The name is case insensitive.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['AclArgs']]]] static_acls: Access Control List (ACL) for an iSCSI Target; defines LUN masking policy
:param pulumi.Input[str] target_iqn: iSCSI Target IQN (iSCSI Qualified Name); example: "iqn.2005-03.org.iscsi:server".
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: IscsiTargetArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Response for iSCSI Target requests.
:param str resource_name: The name of the resource.
:param IscsiTargetArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(IscsiTargetArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
acl_mode: Optional[pulumi.Input[Union[str, 'IscsiTargetAclMode']]] = None,
disk_pool_name: Optional[pulumi.Input[str]] = None,
iscsi_target_name: Optional[pulumi.Input[str]] = None,
luns: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['IscsiLunArgs']]]]] = None,
managed_by: Optional[pulumi.Input[str]] = None,
managed_by_extended: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
static_acls: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['AclArgs']]]]] = None,
target_iqn: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = IscsiTargetArgs.__new__(IscsiTargetArgs)
if acl_mode is None and not opts.urn:
raise TypeError("Missing required property 'acl_mode'")
__props__.__dict__["acl_mode"] = acl_mode
if disk_pool_name is None and not opts.urn:
raise TypeError("Missing required property 'disk_pool_name'")
__props__.__dict__["disk_pool_name"] = disk_pool_name
__props__.__dict__["iscsi_target_name"] = iscsi_target_name
__props__.__dict__["luns"] = luns
__props__.__dict__["managed_by"] = managed_by
__props__.__dict__["managed_by_extended"] = managed_by_extended
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["static_acls"] = static_acls
__props__.__dict__["target_iqn"] = target_iqn
__props__.__dict__["endpoints"] = None
__props__.__dict__["name"] = None
__props__.__dict__["port"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["sessions"] = None
__props__.__dict__["status"] = None
__props__.__dict__["system_data"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:storagepool/v20210801:IscsiTarget"), pulumi.Alias(type_="azure-native:storagepool:IscsiTarget"), pulumi.Alias(type_="azure-nextgen:storagepool:IscsiTarget"), pulumi.Alias(type_="azure-native:storagepool/v20200315preview:IscsiTarget"), pulumi.Alias(type_="azure-nextgen:storagepool/v20200315preview:IscsiTarget"), pulumi.Alias(type_="azure-native:storagepool/v20210401preview:IscsiTarget"), pulumi.Alias(type_="azure-nextgen:storagepool/v20210401preview:IscsiTarget")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(IscsiTarget, __self__).__init__(
'azure-native:storagepool/v20210801:IscsiTarget',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'IscsiTarget':
"""
Get an existing IscsiTarget resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = IscsiTargetArgs.__new__(IscsiTargetArgs)
__props__.__dict__["acl_mode"] = None
__props__.__dict__["endpoints"] = None
__props__.__dict__["luns"] = None
__props__.__dict__["managed_by"] = None
__props__.__dict__["managed_by_extended"] = None
__props__.__dict__["name"] = None
__props__.__dict__["port"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["sessions"] = None
__props__.__dict__["static_acls"] = None
__props__.__dict__["status"] = None
__props__.__dict__["system_data"] = None
__props__.__dict__["target_iqn"] = None
__props__.__dict__["type"] = None
return IscsiTarget(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="aclMode")
def acl_mode(self) -> pulumi.Output[str]:
"""
Mode for Target connectivity.
"""
return pulumi.get(self, "acl_mode")
@property
@pulumi.getter
def endpoints(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
List of private IPv4 addresses to connect to the iSCSI Target.
"""
return pulumi.get(self, "endpoints")
@property
@pulumi.getter
def luns(self) -> pulumi.Output[Optional[Sequence['outputs.IscsiLunResponse']]]:
"""
List of LUNs to be exposed through iSCSI Target.
"""
return pulumi.get(self, "luns")
@property
@pulumi.getter(name="managedBy")
def managed_by(self) -> pulumi.Output[str]:
"""
Azure resource id. Indicates if this resource is managed by another Azure resource.
"""
return pulumi.get(self, "managed_by")
@property
@pulumi.getter(name="managedByExtended")
def managed_by_extended(self) -> pulumi.Output[Sequence[str]]:
"""
List of Azure resource ids that manage this resource.
"""
return pulumi.get(self, "managed_by_extended")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def port(self) -> pulumi.Output[Optional[int]]:
"""
The port used by iSCSI Target portal group.
"""
return pulumi.get(self, "port")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
"""
State of the operation on the resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def sessions(self) -> pulumi.Output[Sequence[str]]:
"""
List of identifiers for active sessions on the iSCSI target
"""
return pulumi.get(self, "sessions")
@property
@pulumi.getter(name="staticAcls")
def static_acls(self) -> pulumi.Output[Optional[Sequence['outputs.AclResponse']]]:
"""
Access Control List (ACL) for an iSCSI Target; defines LUN masking policy
"""
return pulumi.get(self, "static_acls")
@property
@pulumi.getter
def status(self) -> pulumi.Output[str]:
"""
Operational status of the iSCSI Target.
"""
return pulumi.get(self, "status")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> pulumi.Output['outputs.SystemMetadataResponse']:
"""
Resource metadata required by ARM RPC
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter(name="targetIqn")
def target_iqn(self) -> pulumi.Output[str]:
"""
iSCSI Target IQN (iSCSI Qualified Name); example: "iqn.2005-03.org.iscsi:server".
"""
return pulumi.get(self, "target_iqn")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
The type of the resource. Ex- Microsoft.Compute/virtualMachines or Microsoft.Storage/storageAccounts.
"""
return pulumi.get(self, "type")
| 44.086747 | 555 | 0.648885 |
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['IscsiTargetArgs', 'IscsiTarget']
@pulumi.input_type
class IscsiTargetArgs:
def __init__(__self__, *,
acl_mode: pulumi.Input[Union[str, 'IscsiTargetAclMode']],
disk_pool_name: pulumi.Input[str],
resource_group_name: pulumi.Input[str],
iscsi_target_name: Optional[pulumi.Input[str]] = None,
luns: Optional[pulumi.Input[Sequence[pulumi.Input['IscsiLunArgs']]]] = None,
managed_by: Optional[pulumi.Input[str]] = None,
managed_by_extended: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
static_acls: Optional[pulumi.Input[Sequence[pulumi.Input['AclArgs']]]] = None,
target_iqn: Optional[pulumi.Input[str]] = None):
pulumi.set(__self__, "acl_mode", acl_mode)
pulumi.set(__self__, "disk_pool_name", disk_pool_name)
pulumi.set(__self__, "resource_group_name", resource_group_name)
if iscsi_target_name is not None:
pulumi.set(__self__, "iscsi_target_name", iscsi_target_name)
if luns is not None:
pulumi.set(__self__, "luns", luns)
if managed_by is not None:
pulumi.set(__self__, "managed_by", managed_by)
if managed_by_extended is not None:
pulumi.set(__self__, "managed_by_extended", managed_by_extended)
if static_acls is not None:
pulumi.set(__self__, "static_acls", static_acls)
if target_iqn is not None:
pulumi.set(__self__, "target_iqn", target_iqn)
@property
@pulumi.getter(name="aclMode")
def acl_mode(self) -> pulumi.Input[Union[str, 'IscsiTargetAclMode']]:
return pulumi.get(self, "acl_mode")
@acl_mode.setter
def acl_mode(self, value: pulumi.Input[Union[str, 'IscsiTargetAclMode']]):
pulumi.set(self, "acl_mode", value)
@property
@pulumi.getter(name="diskPoolName")
def disk_pool_name(self) -> pulumi.Input[str]:
return pulumi.get(self, "disk_pool_name")
@disk_pool_name.setter
def disk_pool_name(self, value: pulumi.Input[str]):
pulumi.set(self, "disk_pool_name", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="iscsiTargetName")
def iscsi_target_name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "iscsi_target_name")
@iscsi_target_name.setter
def iscsi_target_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "iscsi_target_name", value)
@property
@pulumi.getter
def luns(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['IscsiLunArgs']]]]:
return pulumi.get(self, "luns")
@luns.setter
def luns(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['IscsiLunArgs']]]]):
pulumi.set(self, "luns", value)
@property
@pulumi.getter(name="managedBy")
def managed_by(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "managed_by")
@managed_by.setter
def managed_by(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "managed_by", value)
@property
@pulumi.getter(name="managedByExtended")
def managed_by_extended(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "managed_by_extended")
@managed_by_extended.setter
def managed_by_extended(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "managed_by_extended", value)
@property
@pulumi.getter(name="staticAcls")
def static_acls(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['AclArgs']]]]:
return pulumi.get(self, "static_acls")
@static_acls.setter
def static_acls(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['AclArgs']]]]):
pulumi.set(self, "static_acls", value)
@property
@pulumi.getter(name="targetIqn")
def target_iqn(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "target_iqn")
@target_iqn.setter
def target_iqn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "target_iqn", value)
class IscsiTarget(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
acl_mode: Optional[pulumi.Input[Union[str, 'IscsiTargetAclMode']]] = None,
disk_pool_name: Optional[pulumi.Input[str]] = None,
iscsi_target_name: Optional[pulumi.Input[str]] = None,
luns: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['IscsiLunArgs']]]]] = None,
managed_by: Optional[pulumi.Input[str]] = None,
managed_by_extended: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
static_acls: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['AclArgs']]]]] = None,
target_iqn: Optional[pulumi.Input[str]] = None,
__props__=None):
...
@overload
def __init__(__self__,
resource_name: str,
args: IscsiTargetArgs,
opts: Optional[pulumi.ResourceOptions] = None):
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(IscsiTargetArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
acl_mode: Optional[pulumi.Input[Union[str, 'IscsiTargetAclMode']]] = None,
disk_pool_name: Optional[pulumi.Input[str]] = None,
iscsi_target_name: Optional[pulumi.Input[str]] = None,
luns: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['IscsiLunArgs']]]]] = None,
managed_by: Optional[pulumi.Input[str]] = None,
managed_by_extended: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
static_acls: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['AclArgs']]]]] = None,
target_iqn: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = IscsiTargetArgs.__new__(IscsiTargetArgs)
if acl_mode is None and not opts.urn:
raise TypeError("Missing required property 'acl_mode'")
__props__.__dict__["acl_mode"] = acl_mode
if disk_pool_name is None and not opts.urn:
raise TypeError("Missing required property 'disk_pool_name'")
__props__.__dict__["disk_pool_name"] = disk_pool_name
__props__.__dict__["iscsi_target_name"] = iscsi_target_name
__props__.__dict__["luns"] = luns
__props__.__dict__["managed_by"] = managed_by
__props__.__dict__["managed_by_extended"] = managed_by_extended
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["static_acls"] = static_acls
__props__.__dict__["target_iqn"] = target_iqn
__props__.__dict__["endpoints"] = None
__props__.__dict__["name"] = None
__props__.__dict__["port"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["sessions"] = None
__props__.__dict__["status"] = None
__props__.__dict__["system_data"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:storagepool/v20210801:IscsiTarget"), pulumi.Alias(type_="azure-native:storagepool:IscsiTarget"), pulumi.Alias(type_="azure-nextgen:storagepool:IscsiTarget"), pulumi.Alias(type_="azure-native:storagepool/v20200315preview:IscsiTarget"), pulumi.Alias(type_="azure-nextgen:storagepool/v20200315preview:IscsiTarget"), pulumi.Alias(type_="azure-native:storagepool/v20210401preview:IscsiTarget"), pulumi.Alias(type_="azure-nextgen:storagepool/v20210401preview:IscsiTarget")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(IscsiTarget, __self__).__init__(
'azure-native:storagepool/v20210801:IscsiTarget',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'IscsiTarget':
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = IscsiTargetArgs.__new__(IscsiTargetArgs)
__props__.__dict__["acl_mode"] = None
__props__.__dict__["endpoints"] = None
__props__.__dict__["luns"] = None
__props__.__dict__["managed_by"] = None
__props__.__dict__["managed_by_extended"] = None
__props__.__dict__["name"] = None
__props__.__dict__["port"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["sessions"] = None
__props__.__dict__["static_acls"] = None
__props__.__dict__["status"] = None
__props__.__dict__["system_data"] = None
__props__.__dict__["target_iqn"] = None
__props__.__dict__["type"] = None
return IscsiTarget(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="aclMode")
def acl_mode(self) -> pulumi.Output[str]:
return pulumi.get(self, "acl_mode")
@property
@pulumi.getter
def endpoints(self) -> pulumi.Output[Optional[Sequence[str]]]:
return pulumi.get(self, "endpoints")
@property
@pulumi.getter
def luns(self) -> pulumi.Output[Optional[Sequence['outputs.IscsiLunResponse']]]:
return pulumi.get(self, "luns")
@property
@pulumi.getter(name="managedBy")
def managed_by(self) -> pulumi.Output[str]:
return pulumi.get(self, "managed_by")
@property
@pulumi.getter(name="managedByExtended")
def managed_by_extended(self) -> pulumi.Output[Sequence[str]]:
return pulumi.get(self, "managed_by_extended")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
return pulumi.get(self, "name")
@property
@pulumi.getter
def port(self) -> pulumi.Output[Optional[int]]:
return pulumi.get(self, "port")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def sessions(self) -> pulumi.Output[Sequence[str]]:
return pulumi.get(self, "sessions")
@property
@pulumi.getter(name="staticAcls")
def static_acls(self) -> pulumi.Output[Optional[Sequence['outputs.AclResponse']]]:
return pulumi.get(self, "static_acls")
@property
@pulumi.getter
def status(self) -> pulumi.Output[str]:
return pulumi.get(self, "status")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> pulumi.Output['outputs.SystemMetadataResponse']:
return pulumi.get(self, "system_data")
@property
@pulumi.getter(name="targetIqn")
def target_iqn(self) -> pulumi.Output[str]:
return pulumi.get(self, "target_iqn")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
return pulumi.get(self, "type")
| true | true |
f72033a5af1e9762b3334c21931344ddd76417f7 | 23,367 | py | Python | perf/benchmark/runner/runner.py | clarketm/tools | 90477465af903c71aa9c6ae97dadb77a8ca7b92a | [
"Apache-2.0"
] | 1 | 2020-07-26T17:56:44.000Z | 2020-07-26T17:56:44.000Z | perf/benchmark/runner/runner.py | clarketm/tools | 90477465af903c71aa9c6ae97dadb77a8ca7b92a | [
"Apache-2.0"
] | 7 | 2021-03-19T13:20:19.000Z | 2022-03-31T13:57:13.000Z | perf/benchmark/runner/runner.py | clarketm/tools | 90477465af903c71aa9c6ae97dadb77a8ca7b92a | [
"Apache-2.0"
] | null | null | null | # Copyright Istio Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import collections
import os
import json
import socket
import argparse
import subprocess
import shlex
import uuid
import sys
import tempfile
import time
from subprocess import getoutput
from urllib.parse import urlparse
import yaml
from fortio import METRICS_START_SKIP_DURATION, METRICS_END_SKIP_DURATION
NAMESPACE = os.environ.get("NAMESPACE", "twopods")
NIGHTHAWK_GRPC_SERVICE_PORT_FORWARD = 9999
POD = collections.namedtuple('Pod', ['name', 'namespace', 'ip', 'labels'])
NIGHTHAWK_DOCKER_IMAGE = "envoyproxy/nighthawk-dev:59683b759eb8f8bd8cce282795c08f9e2b3313d4"
def pod_info(filterstr="", namespace=NAMESPACE, multi_ok=True):
cmd = "kubectl -n {namespace} get pod {filterstr} -o json".format(
namespace=namespace, filterstr=filterstr)
op = getoutput(cmd)
o = json.loads(op)
items = o['items']
if not multi_ok and len(items) > 1:
raise Exception("more than one found " + op)
if not items:
raise Exception("no pods found with command [" + cmd + "]")
i = items[0]
return POD(i['metadata']['name'], i['metadata']['namespace'],
i['status']['podIP'], i['metadata']['labels'])
def run_command(command):
process = subprocess.Popen(shlex.split(command))
process.wait()
def run_command_sync(command):
op = getoutput(command)
return op.strip()
# kubeclt related helper funcs
def kubectl_cp(from_file, to_file, container):
cmd = "kubectl --namespace {namespace} cp {from_file} {to_file} -c {container}".format(
namespace=NAMESPACE,
from_file=from_file,
to_file=to_file,
container=container)
print(cmd, flush=True)
run_command_sync(cmd)
def kubectl_exec(pod, remote_cmd, runfn=run_command, container=None):
c = ""
if container is not None:
c = "-c " + container
cmd = "kubectl --namespace {namespace} exec {pod} {c} -- {remote_cmd}".format(
pod=pod,
remote_cmd=remote_cmd,
c=c,
namespace=NAMESPACE)
print(cmd, flush=True)
runfn(cmd)
class Fortio:
ports = {
"http": {"direct_port": 8077, "port": 8080},
"grpc": {"direct_port": 8076, "port": 8079},
"direct_envoy": {"direct_port": 8076, "port": 8079},
}
def __init__(
self,
headers=None,
conn=None,
qps=None,
duration=None,
size=None,
mode="http",
telemetry_mode="mixer",
perf_record=False,
server="fortioserver",
client="fortioclient",
additional_args=None,
filter_fn=None,
extra_labels=None,
baseline=False,
serversidecar=False,
clientsidecar=False,
bothsidecar=True,
ingress=None,
mesh="istio",
cacert=None,
load_gen_type="fortio"):
self.run_id = str(uuid.uuid4()).partition('-')[0]
self.headers = headers
self.conn = conn
self.qps = qps
self.size = size
self.duration = duration
self.mode = mode
self.ns = NAMESPACE
# bucket resolution in seconds
self.r = "0.00005"
self.telemetry_mode = telemetry_mode
self.perf_record = perf_record
self.server = pod_info("-lapp=" + server, namespace=self.ns)
self.client = pod_info("-lapp=" + client, namespace=self.ns)
self.additional_args = additional_args
self.filter_fn = filter_fn
self.extra_labels = extra_labels
self.run_baseline = baseline
self.run_serversidecar = serversidecar
self.run_clientsidecar = clientsidecar
self.run_bothsidecar = bothsidecar
self.run_ingress = ingress
self.cacert = cacert
self.load_gen_type = load_gen_type
if mesh == "linkerd":
self.mesh = "linkerd"
elif mesh == "istio":
self.mesh = "istio"
else:
sys.exit("invalid mesh %s, must be istio or linkerd" % mesh)
def get_protocol_uri_fragment(self):
return "https" if self.mode == "grpc" else "http"
def compute_uri(self, svc, port_type):
if self.load_gen_type == "fortio":
basestr = "http://{svc}:{port}/echo?size={size}"
if self.mode == "grpc":
basestr = "-payload-size {size} {svc}:{port}"
return basestr.format(svc=svc, port=self.ports[self.mode][port_type], size=self.size)
elif self.load_gen_type == "nighthawk":
return "{protocol}://{svc}:{port}/".format(
svc=svc, port=self.ports[self.mode][port_type], protocol=self.get_protocol_uri_fragment())
else:
sys.exit("invalid load generator %s, must be fortio or nighthawk", self.load_gen_type)
def nosidecar(self, load_gen_cmd, sidecar_mode):
return load_gen_cmd + "_" + sidecar_mode + " " + self.compute_uri(self.server.ip, "direct_port")
def serversidecar(self, load_gen_cmd, sidecar_mode):
return load_gen_cmd + "_" + sidecar_mode + " " + self.compute_uri(self.server.ip, "port")
def clientsidecar(self, load_gen_cmd, sidecar_mode):
return load_gen_cmd + "_" + sidecar_mode + " " + self.compute_uri(self.server.labels["app"], "direct_port")
def bothsidecar(self, load_gen_cmd, sidecar_mode):
return load_gen_cmd + "_" + sidecar_mode + " " + self.compute_uri(self.server.labels["app"], "port")
def ingress(self, load_gen_cmd):
url = urlparse(self.run_ingress)
# If scheme is not defined fallback to http
if url.scheme == "":
url = urlparse("http://{svc}".format(svc=self.run_ingress))
return load_gen_cmd + "_ingress {url}/echo?size={size}".format(
url=url.geturl(), size=self.size)
def execute_sidecar_mode(self, sidecar_mode, load_gen_type, load_gen_cmd, sidecar_mode_func, labels, perf_label_suffix):
print('-------------- Running in {sidecar_mode} mode --------------'.format(sidecar_mode=sidecar_mode))
if load_gen_type == "fortio":
kubectl_exec(self.client.name, sidecar_mode_func(load_gen_cmd, sidecar_mode))
elif load_gen_type == "nighthawk":
run_nighthawk(self.client.name, sidecar_mode_func(load_gen_type, sidecar_mode), labels + "_" + sidecar_mode)
if self.perf_record and len(perf_label_suffix) > 0:
run_perf(
self.mesh,
self.server.name,
labels + perf_label_suffix,
duration=40)
def generate_test_labels(self, conn, qps, size):
size = size or self.size
labels = self.run_id
labels += "_qps_" + str(qps)
labels += "_c_" + str(conn)
labels += "_" + str(size)
if self.mesh == "istio":
labels += "_"
labels += self.telemetry_mode
elif self.mesh == "linkerd":
labels += "_"
labels += "linkerd"
if self.extra_labels is not None:
labels += "_" + self.extra_labels
return labels
def generate_headers_cmd(self, headers):
headers_cmd = ""
if headers is not None:
for header_val in headers.split(","):
headers_cmd += "-H=" + header_val + " "
return headers_cmd
def generate_fortio_cmd(self, headers_cmd, conn, qps, duration, grpc, cacert_arg, labels):
if duration is None:
duration = self.duration
fortio_cmd = (
"fortio load {headers} -c {conn} -qps {qps} -t {duration}s -a -r {r} {cacert_arg} {grpc} "
"-httpbufferkb=128 -labels {labels}").format(
headers=headers_cmd,
conn=conn,
qps=qps,
duration=duration,
r=self.r,
grpc=grpc,
cacert_arg=cacert_arg,
labels=labels)
return fortio_cmd
def generate_nighthawk_cmd(self, cpus, conn, qps, duration, labels):
nighthawk_args = [
"nighthawk_client",
"--concurrency {cpus}",
"--output-format json",
"--prefetch-connections",
"--open-loop",
"--jitter-uniform 0.0001s",
"--experimental-h1-connection-reuse-strategy lru",
"--experimental-h2-use-multiple-connections",
"--nighthawk-service 127.0.0.1:{port_forward}",
"--label Nighthawk",
"--connections {conn}",
"--rps {qps}",
"--duration {duration}",
"--request-header \"x-nighthawk-test-server-config: {{response_body_size:{size}}}\""
]
# Our "gRPC" mode actually means:
# - https (see get_protocol_uri_fragment())
# - h2
# - with long running connections
# - Also transfer request body sized according to "size".
if self.mode == "grpc":
nighthawk_args.append("--h2")
if self.size:
nighthawk_args.append(
"--request-header \"content-length: {size}\"")
# Note: Labels is the last arg, and there's stuff depending on that.
# watch out when moving it.
nighthawk_args.append("--label {labels}")
# As the worker count acts as a multiplier, we divide by qps/conn by the number of cpu's to spread load accross the workers so the sum of the workers will target the global qps/connection levels.
nighthawk_cmd = " ".join(nighthawk_args).format(
conn=round(conn / cpus),
qps=round(qps / cpus),
duration=duration,
labels=labels,
size=self.size,
cpus=cpus,
port_forward=NIGHTHAWK_GRPC_SERVICE_PORT_FORWARD)
return nighthawk_cmd
def run(self, headers, conn, qps, size, duration):
labels = self.generate_test_labels(conn, qps, size)
grpc = ""
if self.mode == "grpc":
grpc = "-grpc -ping"
cacert_arg = ""
if self.cacert is not None:
cacert_arg = "-cacert {cacert_path}".format(cacert_path=self.cacert)
headers_cmd = self.generate_headers_cmd(headers)
load_gen_cmd = ""
if self.load_gen_type == "fortio":
load_gen_cmd = self.generate_fortio_cmd(headers_cmd, conn, qps, duration, grpc, cacert_arg, labels)
elif self.load_gen_type == "nighthawk":
# TODO(oschaaf): Figure out how to best determine the right concurrency for Nighthawk.
# Results seem to get very noisy as the number of workers increases, are the clients
# and running on separate sets of vCPU cores? nproc yields the same concurrency as goprocs
# use with the Fortio version.
# client_cpus = int(run_command_sync(
# "kubectl exec -n \"{ns}\" svc/fortioclient -c shell nproc".format(ns=NAMESPACE)))
# print("Client pod has {client_cpus} cpus".format(client_cpus=client_cpus))
# See the comment above, we restrict execution to a single nighthawk worker for
# now to avoid noise.
workers = 1
load_gen_cmd = self.generate_nighthawk_cmd(workers, conn, qps, duration, labels)
if self.run_baseline:
self.execute_sidecar_mode("baseline", self.load_gen_type, load_gen_cmd, self.nosidecar, labels, "")
if self.run_serversidecar:
self.execute_sidecar_mode("serveronly", self.load_gen_type, load_gen_cmd, self.serversidecar, labels, "_srv_serveronly")
if self.run_clientsidecar:
self.execute_sidecar_mode("clientonly", self.load_gen_type, load_gen_cmd, self.clientsidecar, labels, "_srv_clientonly")
if self.run_bothsidecar:
self.execute_sidecar_mode("both", self.load_gen_type, load_gen_cmd, self.bothsidecar, labels, "_srv_bothsidecars")
if self.run_ingress:
print('-------------- Running in ingress mode --------------')
kubectl_exec(self.client.name, self.ingress(load_gen_cmd))
if self.perf_record:
run_perf(
self.mesh,
self.server.name,
labels + "_srv_ingress",
duration=40)
PERFCMD = "/usr/lib/linux-tools/4.4.0-131-generic/perf"
FLAMESH = "flame.sh"
PERFSH = "get_perfdata.sh"
PERFWD = "/etc/istio/proxy/"
WD = os.getcwd()
LOCAL_FLAMEDIR = os.path.join(WD, "../flame/")
LOCAL_FLAMEPATH = LOCAL_FLAMEDIR + FLAMESH
LOCAL_PERFPATH = LOCAL_FLAMEDIR + PERFSH
LOCAL_FLAMEOUTPUT = LOCAL_FLAMEDIR + "flameoutput/"
def run_perf(mesh, pod, labels, duration=20):
filename = labels + "_perf.data"
filepath = PERFWD + filename
perfpath = PERFWD + PERFSH
# copy executable over
kubectl_cp(LOCAL_PERFPATH, pod + ":" + perfpath, mesh + "-proxy")
kubectl_exec(
pod,
"{perf_cmd} {filename} {duration}".format(
perf_cmd=perfpath,
filename=filename,
duration=duration),
container=mesh + "-proxy")
kubectl_cp(pod + ":" + filepath + ".perf", LOCAL_FLAMEOUTPUT + filename + ".perf", mesh + "-proxy")
run_command_sync(LOCAL_FLAMEPATH + " " + filename + ".perf")
def validate_job_config(job_config):
required_fields = {"conn": list, "qps": list, "duration": int}
for k in required_fields:
if k not in job_config:
print("missing required parameter {}".format(k))
return False
exp_type = required_fields[k]
if not isinstance(job_config[k], exp_type):
print("expecting type of parameter {} to be {}, got {}".format(k, exp_type, type(job_config[k])))
return False
return True
def fortio_from_config_file(args):
with open(args.config_file) as f:
job_config = yaml.safe_load(f)
if not validate_job_config(job_config):
exit(1)
# TODO: hard to parse yaml into object directly because of existing constructor from CLI
fortio = Fortio()
fortio.headers = job_config.get('headers', None)
fortio.conn = job_config.get('conn', 16)
fortio.qps = job_config.get('qps', 1000)
fortio.duration = job_config.get('duration', 240)
fortio.load_gen_type = job_config.get("load_gen_type", "fortio")
fortio.telemetry_mode = job_config.get('telemetry_mode', 'mixer')
fortio.metrics = job_config.get('metrics', 'p90')
fortio.size = job_config.get('size', 1024)
fortio.perf_record = False
fortio.run_serversidecar = job_config.get('run_serversidecar', False)
fortio.run_clientsidecar = job_config.get('run_clientsidecar', False)
fortio.run_bothsidecar = job_config.get('run_bothsidecar', True)
fortio.run_baseline = job_config.get('run_baseline', False)
fortio.run_ingress = job_config.get('run_ingress', False)
fortio.mesh = job_config.get('mesh', 'istio')
fortio.mode = job_config.get('mode', 'http')
fortio.extra_labels = job_config.get('extra_labels')
return fortio
def can_connect_to_nighthawk_service():
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
return sock.connect_ex(('127.0.0.1', NIGHTHAWK_GRPC_SERVICE_PORT_FORWARD)) == 0
def run_perf_test(args):
min_duration = METRICS_START_SKIP_DURATION + METRICS_END_SKIP_DURATION
# run with config files
if args.config_file is not None:
fortio = fortio_from_config_file(args)
else:
fortio = Fortio(
headers=args.headers,
conn=args.conn,
qps=args.qps,
duration=args.duration,
size=args.size,
perf_record=args.perf,
extra_labels=args.extra_labels,
baseline=args.baseline,
serversidecar=args.serversidecar,
clientsidecar=args.clientsidecar,
bothsidecar=args.bothsidecar,
ingress=args.ingress,
mode=args.mode,
mesh=args.mesh,
telemetry_mode=args.telemetry_mode,
cacert=args.cacert,
load_gen_type=args.load_gen_type)
if fortio.duration <= min_duration:
print("Duration must be greater than {min_duration}".format(
min_duration=min_duration))
exit(1)
# Create a port_forward for accessing nighthawk_service.
if not can_connect_to_nighthawk_service():
popen_cmd = "kubectl -n \"{ns}\" port-forward svc/fortioclient {port}:9999".format(
ns=NAMESPACE,
port=NIGHTHAWK_GRPC_SERVICE_PORT_FORWARD)
process = subprocess.Popen(shlex.split(
popen_cmd), stdout=subprocess.PIPE)
max_tries = 10
while max_tries > 0 and not can_connect_to_nighthawk_service():
time.sleep(0.5)
max_tries = max_tries - 1
if not can_connect_to_nighthawk_service():
print("Failure connecting to nighthawk_service")
sys.exit(-1)
else:
print("Able to connect to nighthawk_service, proceeding")
try:
for conn in fortio.conn:
for qps in fortio.qps:
fortio.run(headers=fortio.headers, conn=conn, qps=qps,
duration=fortio.duration, size=fortio.size)
finally:
process.kill()
def run_nighthawk(pod, remote_cmd, labels):
# Use a local docker instance of Nighthawk to control nighthawk_service running in the pod
# and run transforms on the output we get.
docker_cmd = "docker run --rm --network=host {docker_image} {remote_cmd}".format(
docker_image=NIGHTHAWK_DOCKER_IMAGE, remote_cmd=remote_cmd)
print(docker_cmd, flush=True)
process = subprocess.Popen(shlex.split(docker_cmd), stdout=subprocess.PIPE)
(output, err) = process.communicate()
exit_code = process.wait()
if exit_code == 0:
with tempfile.NamedTemporaryFile(dir='/tmp', delete=True) as tmpfile:
dest = tmpfile.name
with open("%s.json" % dest, 'wb') as f:
f.write(output)
print("Dumped Nighthawk's json to {dest}".format(dest=dest))
# Send human readable output to the command line.
os.system(
"cat {dest}.json | docker run -i --rm {docker_image} nighthawk_output_transform --output-format human".format(docker_image=NIGHTHAWK_DOCKER_IMAGE, dest=dest))
# Transform to Fortio's reporting server json format
os.system("cat {dest}.json | docker run -i --rm {docker_image} nighthawk_output_transform --output-format fortio > {dest}.fortio.json".format(
dest=dest, docker_image=NIGHTHAWK_DOCKER_IMAGE))
# Copy to the Fortio report server data directory.
# TODO(oschaaf): We output the global aggregated statistics here of request_to_response, which excludes connection set up time.
# It would be nice to dump a series instead, as we have more details available in the Nighthawk json:
# - queue/connect time
# - time spend blocking in closed loop mode
# - initiation time to completion (spanning the complete lifetime of a request/reply, including queue/connect time)
# - per worker output may sometimes help interpret plots that don't have a nice knee-shaped shape.
kubectl_cp("{dest}.fortio.json".format(
dest=dest), "{pod}:/var/lib/fortio/{datetime}_nighthawk_{labels}.json".format(pod=pod, labels=labels, datetime=time.strftime("%Y-%m-%d-%H%M%S")), "shell")
else:
print("nighthawk remote execution error: %s" % exit_code)
if output:
print("--> stdout: %s" % output.decode("utf-8"))
if err:
print("--> stderr: %s" % err.decode("utf-8"))
def csv_to_int(s):
return [int(i) for i in s.split(",")]
def get_parser():
parser = argparse.ArgumentParser("Run performance test")
parser.add_argument(
"--headers",
help="a list of `header:value` should be separated by comma",
default=None)
parser.add_argument(
"--conn",
help="number of connections, comma separated list",
type=csv_to_int,)
parser.add_argument(
"--qps",
help="qps, comma separated list",
type=csv_to_int,)
parser.add_argument(
"--duration",
help="duration in seconds of the extract",
type=int)
parser.add_argument(
"--size",
help="size of the payload",
type=int,
default=1024)
parser.add_argument(
"--mesh",
help="istio or linkerd",
default="istio")
parser.add_argument(
"--telemetry_mode",
help="run with different mixer configurations: mixer, none, telemetryv2",
default="mixer")
parser.add_argument(
"--client",
help="where to run the test from",
default=None)
parser.add_argument(
"--server",
help="pod ip of the server",
default=None)
parser.add_argument(
"--perf",
help="also run perf and produce flame graph",
default=False)
parser.add_argument(
"--ingress",
help="run traffic through ingress, should be a valid URL",
default=None)
parser.add_argument(
"--extra_labels",
help="extra labels",
default=None)
parser.add_argument(
"--mode",
help="http or grpc",
default="http")
parser.add_argument(
"--config_file",
help="config yaml file",
default=None)
parser.add_argument(
"--cacert",
help="path to the cacert for the fortio client inside the container",
default=None)
parser.add_argument(
"--load_gen_type",
help="fortio or nighthawk",
default="fortio",
)
define_bool(parser, "baseline", "run baseline for all", False)
define_bool(parser, "serversidecar",
"run serversidecar-only for all", False)
define_bool(parser, "clientsidecar",
"run clientsidecar-only for all", False)
define_bool(parser, "bothsidecar",
"run both clientsiecar and serversidecar", True)
return parser
def define_bool(parser, opt, help_arg, default_val):
parser.add_argument(
"--" + opt, help=help_arg, dest=opt, action='store_true')
parser.add_argument(
"--no_" + opt, help="do not " + help_arg, dest=opt, action='store_false')
val = {opt: default_val}
parser.set_defaults(**val)
def main(argv):
args = get_parser().parse_args(argv)
print(args)
return run_perf_test(args)
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))
| 37.567524 | 203 | 0.61711 |
from __future__ import print_function
import collections
import os
import json
import socket
import argparse
import subprocess
import shlex
import uuid
import sys
import tempfile
import time
from subprocess import getoutput
from urllib.parse import urlparse
import yaml
from fortio import METRICS_START_SKIP_DURATION, METRICS_END_SKIP_DURATION
NAMESPACE = os.environ.get("NAMESPACE", "twopods")
NIGHTHAWK_GRPC_SERVICE_PORT_FORWARD = 9999
POD = collections.namedtuple('Pod', ['name', 'namespace', 'ip', 'labels'])
NIGHTHAWK_DOCKER_IMAGE = "envoyproxy/nighthawk-dev:59683b759eb8f8bd8cce282795c08f9e2b3313d4"
def pod_info(filterstr="", namespace=NAMESPACE, multi_ok=True):
cmd = "kubectl -n {namespace} get pod {filterstr} -o json".format(
namespace=namespace, filterstr=filterstr)
op = getoutput(cmd)
o = json.loads(op)
items = o['items']
if not multi_ok and len(items) > 1:
raise Exception("more than one found " + op)
if not items:
raise Exception("no pods found with command [" + cmd + "]")
i = items[0]
return POD(i['metadata']['name'], i['metadata']['namespace'],
i['status']['podIP'], i['metadata']['labels'])
def run_command(command):
process = subprocess.Popen(shlex.split(command))
process.wait()
def run_command_sync(command):
op = getoutput(command)
return op.strip()
def kubectl_cp(from_file, to_file, container):
cmd = "kubectl --namespace {namespace} cp {from_file} {to_file} -c {container}".format(
namespace=NAMESPACE,
from_file=from_file,
to_file=to_file,
container=container)
print(cmd, flush=True)
run_command_sync(cmd)
def kubectl_exec(pod, remote_cmd, runfn=run_command, container=None):
c = ""
if container is not None:
c = "-c " + container
cmd = "kubectl --namespace {namespace} exec {pod} {c} -- {remote_cmd}".format(
pod=pod,
remote_cmd=remote_cmd,
c=c,
namespace=NAMESPACE)
print(cmd, flush=True)
runfn(cmd)
class Fortio:
ports = {
"http": {"direct_port": 8077, "port": 8080},
"grpc": {"direct_port": 8076, "port": 8079},
"direct_envoy": {"direct_port": 8076, "port": 8079},
}
def __init__(
self,
headers=None,
conn=None,
qps=None,
duration=None,
size=None,
mode="http",
telemetry_mode="mixer",
perf_record=False,
server="fortioserver",
client="fortioclient",
additional_args=None,
filter_fn=None,
extra_labels=None,
baseline=False,
serversidecar=False,
clientsidecar=False,
bothsidecar=True,
ingress=None,
mesh="istio",
cacert=None,
load_gen_type="fortio"):
self.run_id = str(uuid.uuid4()).partition('-')[0]
self.headers = headers
self.conn = conn
self.qps = qps
self.size = size
self.duration = duration
self.mode = mode
self.ns = NAMESPACE
self.r = "0.00005"
self.telemetry_mode = telemetry_mode
self.perf_record = perf_record
self.server = pod_info("-lapp=" + server, namespace=self.ns)
self.client = pod_info("-lapp=" + client, namespace=self.ns)
self.additional_args = additional_args
self.filter_fn = filter_fn
self.extra_labels = extra_labels
self.run_baseline = baseline
self.run_serversidecar = serversidecar
self.run_clientsidecar = clientsidecar
self.run_bothsidecar = bothsidecar
self.run_ingress = ingress
self.cacert = cacert
self.load_gen_type = load_gen_type
if mesh == "linkerd":
self.mesh = "linkerd"
elif mesh == "istio":
self.mesh = "istio"
else:
sys.exit("invalid mesh %s, must be istio or linkerd" % mesh)
def get_protocol_uri_fragment(self):
return "https" if self.mode == "grpc" else "http"
def compute_uri(self, svc, port_type):
if self.load_gen_type == "fortio":
basestr = "http://{svc}:{port}/echo?size={size}"
if self.mode == "grpc":
basestr = "-payload-size {size} {svc}:{port}"
return basestr.format(svc=svc, port=self.ports[self.mode][port_type], size=self.size)
elif self.load_gen_type == "nighthawk":
return "{protocol}://{svc}:{port}/".format(
svc=svc, port=self.ports[self.mode][port_type], protocol=self.get_protocol_uri_fragment())
else:
sys.exit("invalid load generator %s, must be fortio or nighthawk", self.load_gen_type)
def nosidecar(self, load_gen_cmd, sidecar_mode):
return load_gen_cmd + "_" + sidecar_mode + " " + self.compute_uri(self.server.ip, "direct_port")
def serversidecar(self, load_gen_cmd, sidecar_mode):
return load_gen_cmd + "_" + sidecar_mode + " " + self.compute_uri(self.server.ip, "port")
def clientsidecar(self, load_gen_cmd, sidecar_mode):
return load_gen_cmd + "_" + sidecar_mode + " " + self.compute_uri(self.server.labels["app"], "direct_port")
def bothsidecar(self, load_gen_cmd, sidecar_mode):
return load_gen_cmd + "_" + sidecar_mode + " " + self.compute_uri(self.server.labels["app"], "port")
def ingress(self, load_gen_cmd):
url = urlparse(self.run_ingress)
if url.scheme == "":
url = urlparse("http://{svc}".format(svc=self.run_ingress))
return load_gen_cmd + "_ingress {url}/echo?size={size}".format(
url=url.geturl(), size=self.size)
def execute_sidecar_mode(self, sidecar_mode, load_gen_type, load_gen_cmd, sidecar_mode_func, labels, perf_label_suffix):
print('-------------- Running in {sidecar_mode} mode --------------'.format(sidecar_mode=sidecar_mode))
if load_gen_type == "fortio":
kubectl_exec(self.client.name, sidecar_mode_func(load_gen_cmd, sidecar_mode))
elif load_gen_type == "nighthawk":
run_nighthawk(self.client.name, sidecar_mode_func(load_gen_type, sidecar_mode), labels + "_" + sidecar_mode)
if self.perf_record and len(perf_label_suffix) > 0:
run_perf(
self.mesh,
self.server.name,
labels + perf_label_suffix,
duration=40)
def generate_test_labels(self, conn, qps, size):
size = size or self.size
labels = self.run_id
labels += "_qps_" + str(qps)
labels += "_c_" + str(conn)
labels += "_" + str(size)
if self.mesh == "istio":
labels += "_"
labels += self.telemetry_mode
elif self.mesh == "linkerd":
labels += "_"
labels += "linkerd"
if self.extra_labels is not None:
labels += "_" + self.extra_labels
return labels
def generate_headers_cmd(self, headers):
headers_cmd = ""
if headers is not None:
for header_val in headers.split(","):
headers_cmd += "-H=" + header_val + " "
return headers_cmd
def generate_fortio_cmd(self, headers_cmd, conn, qps, duration, grpc, cacert_arg, labels):
if duration is None:
duration = self.duration
fortio_cmd = (
"fortio load {headers} -c {conn} -qps {qps} -t {duration}s -a -r {r} {cacert_arg} {grpc} "
"-httpbufferkb=128 -labels {labels}").format(
headers=headers_cmd,
conn=conn,
qps=qps,
duration=duration,
r=self.r,
grpc=grpc,
cacert_arg=cacert_arg,
labels=labels)
return fortio_cmd
def generate_nighthawk_cmd(self, cpus, conn, qps, duration, labels):
nighthawk_args = [
"nighthawk_client",
"--concurrency {cpus}",
"--output-format json",
"--prefetch-connections",
"--open-loop",
"--jitter-uniform 0.0001s",
"--experimental-h1-connection-reuse-strategy lru",
"--experimental-h2-use-multiple-connections",
"--nighthawk-service 127.0.0.1:{port_forward}",
"--label Nighthawk",
"--connections {conn}",
"--rps {qps}",
"--duration {duration}",
"--request-header \"x-nighthawk-test-server-config: {{response_body_size:{size}}}\""
]
if self.mode == "grpc":
nighthawk_args.append("--h2")
if self.size:
nighthawk_args.append(
"--request-header \"content-length: {size}\"")
# watch out when moving it.
nighthawk_args.append("--label {labels}")
# As the worker count acts as a multiplier, we divide by qps/conn by the number of cpu's to spread load accross the workers so the sum of the workers will target the global qps/connection levels.
nighthawk_cmd = " ".join(nighthawk_args).format(
conn=round(conn / cpus),
qps=round(qps / cpus),
duration=duration,
labels=labels,
size=self.size,
cpus=cpus,
port_forward=NIGHTHAWK_GRPC_SERVICE_PORT_FORWARD)
return nighthawk_cmd
def run(self, headers, conn, qps, size, duration):
labels = self.generate_test_labels(conn, qps, size)
grpc = ""
if self.mode == "grpc":
grpc = "-grpc -ping"
cacert_arg = ""
if self.cacert is not None:
cacert_arg = "-cacert {cacert_path}".format(cacert_path=self.cacert)
headers_cmd = self.generate_headers_cmd(headers)
load_gen_cmd = ""
if self.load_gen_type == "fortio":
load_gen_cmd = self.generate_fortio_cmd(headers_cmd, conn, qps, duration, grpc, cacert_arg, labels)
elif self.load_gen_type == "nighthawk":
workers = 1
load_gen_cmd = self.generate_nighthawk_cmd(workers, conn, qps, duration, labels)
if self.run_baseline:
self.execute_sidecar_mode("baseline", self.load_gen_type, load_gen_cmd, self.nosidecar, labels, "")
if self.run_serversidecar:
self.execute_sidecar_mode("serveronly", self.load_gen_type, load_gen_cmd, self.serversidecar, labels, "_srv_serveronly")
if self.run_clientsidecar:
self.execute_sidecar_mode("clientonly", self.load_gen_type, load_gen_cmd, self.clientsidecar, labels, "_srv_clientonly")
if self.run_bothsidecar:
self.execute_sidecar_mode("both", self.load_gen_type, load_gen_cmd, self.bothsidecar, labels, "_srv_bothsidecars")
if self.run_ingress:
print('-------------- Running in ingress mode --------------')
kubectl_exec(self.client.name, self.ingress(load_gen_cmd))
if self.perf_record:
run_perf(
self.mesh,
self.server.name,
labels + "_srv_ingress",
duration=40)
PERFCMD = "/usr/lib/linux-tools/4.4.0-131-generic/perf"
FLAMESH = "flame.sh"
PERFSH = "get_perfdata.sh"
PERFWD = "/etc/istio/proxy/"
WD = os.getcwd()
LOCAL_FLAMEDIR = os.path.join(WD, "../flame/")
LOCAL_FLAMEPATH = LOCAL_FLAMEDIR + FLAMESH
LOCAL_PERFPATH = LOCAL_FLAMEDIR + PERFSH
LOCAL_FLAMEOUTPUT = LOCAL_FLAMEDIR + "flameoutput/"
def run_perf(mesh, pod, labels, duration=20):
filename = labels + "_perf.data"
filepath = PERFWD + filename
perfpath = PERFWD + PERFSH
kubectl_cp(LOCAL_PERFPATH, pod + ":" + perfpath, mesh + "-proxy")
kubectl_exec(
pod,
"{perf_cmd} {filename} {duration}".format(
perf_cmd=perfpath,
filename=filename,
duration=duration),
container=mesh + "-proxy")
kubectl_cp(pod + ":" + filepath + ".perf", LOCAL_FLAMEOUTPUT + filename + ".perf", mesh + "-proxy")
run_command_sync(LOCAL_FLAMEPATH + " " + filename + ".perf")
def validate_job_config(job_config):
required_fields = {"conn": list, "qps": list, "duration": int}
for k in required_fields:
if k not in job_config:
print("missing required parameter {}".format(k))
return False
exp_type = required_fields[k]
if not isinstance(job_config[k], exp_type):
print("expecting type of parameter {} to be {}, got {}".format(k, exp_type, type(job_config[k])))
return False
return True
def fortio_from_config_file(args):
with open(args.config_file) as f:
job_config = yaml.safe_load(f)
if not validate_job_config(job_config):
exit(1)
fortio = Fortio()
fortio.headers = job_config.get('headers', None)
fortio.conn = job_config.get('conn', 16)
fortio.qps = job_config.get('qps', 1000)
fortio.duration = job_config.get('duration', 240)
fortio.load_gen_type = job_config.get("load_gen_type", "fortio")
fortio.telemetry_mode = job_config.get('telemetry_mode', 'mixer')
fortio.metrics = job_config.get('metrics', 'p90')
fortio.size = job_config.get('size', 1024)
fortio.perf_record = False
fortio.run_serversidecar = job_config.get('run_serversidecar', False)
fortio.run_clientsidecar = job_config.get('run_clientsidecar', False)
fortio.run_bothsidecar = job_config.get('run_bothsidecar', True)
fortio.run_baseline = job_config.get('run_baseline', False)
fortio.run_ingress = job_config.get('run_ingress', False)
fortio.mesh = job_config.get('mesh', 'istio')
fortio.mode = job_config.get('mode', 'http')
fortio.extra_labels = job_config.get('extra_labels')
return fortio
def can_connect_to_nighthawk_service():
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
return sock.connect_ex(('127.0.0.1', NIGHTHAWK_GRPC_SERVICE_PORT_FORWARD)) == 0
def run_perf_test(args):
min_duration = METRICS_START_SKIP_DURATION + METRICS_END_SKIP_DURATION
if args.config_file is not None:
fortio = fortio_from_config_file(args)
else:
fortio = Fortio(
headers=args.headers,
conn=args.conn,
qps=args.qps,
duration=args.duration,
size=args.size,
perf_record=args.perf,
extra_labels=args.extra_labels,
baseline=args.baseline,
serversidecar=args.serversidecar,
clientsidecar=args.clientsidecar,
bothsidecar=args.bothsidecar,
ingress=args.ingress,
mode=args.mode,
mesh=args.mesh,
telemetry_mode=args.telemetry_mode,
cacert=args.cacert,
load_gen_type=args.load_gen_type)
if fortio.duration <= min_duration:
print("Duration must be greater than {min_duration}".format(
min_duration=min_duration))
exit(1)
if not can_connect_to_nighthawk_service():
popen_cmd = "kubectl -n \"{ns}\" port-forward svc/fortioclient {port}:9999".format(
ns=NAMESPACE,
port=NIGHTHAWK_GRPC_SERVICE_PORT_FORWARD)
process = subprocess.Popen(shlex.split(
popen_cmd), stdout=subprocess.PIPE)
max_tries = 10
while max_tries > 0 and not can_connect_to_nighthawk_service():
time.sleep(0.5)
max_tries = max_tries - 1
if not can_connect_to_nighthawk_service():
print("Failure connecting to nighthawk_service")
sys.exit(-1)
else:
print("Able to connect to nighthawk_service, proceeding")
try:
for conn in fortio.conn:
for qps in fortio.qps:
fortio.run(headers=fortio.headers, conn=conn, qps=qps,
duration=fortio.duration, size=fortio.size)
finally:
process.kill()
def run_nighthawk(pod, remote_cmd, labels):
docker_cmd = "docker run --rm --network=host {docker_image} {remote_cmd}".format(
docker_image=NIGHTHAWK_DOCKER_IMAGE, remote_cmd=remote_cmd)
print(docker_cmd, flush=True)
process = subprocess.Popen(shlex.split(docker_cmd), stdout=subprocess.PIPE)
(output, err) = process.communicate()
exit_code = process.wait()
if exit_code == 0:
with tempfile.NamedTemporaryFile(dir='/tmp', delete=True) as tmpfile:
dest = tmpfile.name
with open("%s.json" % dest, 'wb') as f:
f.write(output)
print("Dumped Nighthawk's json to {dest}".format(dest=dest))
# Send human readable output to the command line.
os.system(
"cat {dest}.json | docker run -i --rm {docker_image} nighthawk_output_transform --output-format human".format(docker_image=NIGHTHAWK_DOCKER_IMAGE, dest=dest))
# Transform to Fortio's reporting server json format
os.system("cat {dest}.json | docker run -i --rm {docker_image} nighthawk_output_transform --output-format fortio > {dest}.fortio.json".format(
dest=dest, docker_image=NIGHTHAWK_DOCKER_IMAGE))
kubectl_cp("{dest}.fortio.json".format(
dest=dest), "{pod}:/var/lib/fortio/{datetime}_nighthawk_{labels}.json".format(pod=pod, labels=labels, datetime=time.strftime("%Y-%m-%d-%H%M%S")), "shell")
else:
print("nighthawk remote execution error: %s" % exit_code)
if output:
print("--> stdout: %s" % output.decode("utf-8"))
if err:
print("--> stderr: %s" % err.decode("utf-8"))
def csv_to_int(s):
return [int(i) for i in s.split(",")]
def get_parser():
parser = argparse.ArgumentParser("Run performance test")
parser.add_argument(
"--headers",
help="a list of `header:value` should be separated by comma",
default=None)
parser.add_argument(
"--conn",
help="number of connections, comma separated list",
type=csv_to_int,)
parser.add_argument(
"--qps",
help="qps, comma separated list",
type=csv_to_int,)
parser.add_argument(
"--duration",
help="duration in seconds of the extract",
type=int)
parser.add_argument(
"--size",
help="size of the payload",
type=int,
default=1024)
parser.add_argument(
"--mesh",
help="istio or linkerd",
default="istio")
parser.add_argument(
"--telemetry_mode",
help="run with different mixer configurations: mixer, none, telemetryv2",
default="mixer")
parser.add_argument(
"--client",
help="where to run the test from",
default=None)
parser.add_argument(
"--server",
help="pod ip of the server",
default=None)
parser.add_argument(
"--perf",
help="also run perf and produce flame graph",
default=False)
parser.add_argument(
"--ingress",
help="run traffic through ingress, should be a valid URL",
default=None)
parser.add_argument(
"--extra_labels",
help="extra labels",
default=None)
parser.add_argument(
"--mode",
help="http or grpc",
default="http")
parser.add_argument(
"--config_file",
help="config yaml file",
default=None)
parser.add_argument(
"--cacert",
help="path to the cacert for the fortio client inside the container",
default=None)
parser.add_argument(
"--load_gen_type",
help="fortio or nighthawk",
default="fortio",
)
define_bool(parser, "baseline", "run baseline for all", False)
define_bool(parser, "serversidecar",
"run serversidecar-only for all", False)
define_bool(parser, "clientsidecar",
"run clientsidecar-only for all", False)
define_bool(parser, "bothsidecar",
"run both clientsiecar and serversidecar", True)
return parser
def define_bool(parser, opt, help_arg, default_val):
parser.add_argument(
"--" + opt, help=help_arg, dest=opt, action='store_true')
parser.add_argument(
"--no_" + opt, help="do not " + help_arg, dest=opt, action='store_false')
val = {opt: default_val}
parser.set_defaults(**val)
def main(argv):
args = get_parser().parse_args(argv)
print(args)
return run_perf_test(args)
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))
| true | true |
f72033c97746dc5228df50bb0ed00d7fbc48f4af | 253 | py | Python | testcase/test_sensor/__init__.py | yucheng6039/WebAuto | 13fa954dd58407ee23e89be89f73cb97f5c11108 | [
"Apache-2.0"
] | null | null | null | testcase/test_sensor/__init__.py | yucheng6039/WebAuto | 13fa954dd58407ee23e89be89f73cb97f5c11108 | [
"Apache-2.0"
] | null | null | null | testcase/test_sensor/__init__.py | yucheng6039/WebAuto | 13fa954dd58407ee23e89be89f73cb97f5c11108 | [
"Apache-2.0"
] | null | null | null | #-------------------------------------------------------------------------------
# Name: __init__.py
# Description:
# Author: slm
# Date: 2020/5/15
#-------------------------------------------------------------------------------
| 36.142857 | 80 | 0.177866 | true | true | |
f72033fbb720adef0e08eca47c98eececd5d767e | 917 | py | Python | app/user/views.py | dulvinw/recipe-api | f132345987a5962134755e5425e88dde4c56d5fe | [
"Apache-2.0"
] | 1 | 2021-07-08T05:15:38.000Z | 2021-07-08T05:15:38.000Z | app/user/views.py | TMEU/recipe-api | f132345987a5962134755e5425e88dde4c56d5fe | [
"Apache-2.0"
] | null | null | null | app/user/views.py | TMEU/recipe-api | f132345987a5962134755e5425e88dde4c56d5fe | [
"Apache-2.0"
] | 1 | 2021-07-08T05:15:42.000Z | 2021-07-08T05:15:42.000Z | from rest_framework import generics, permissions, authentication
from rest_framework.authtoken.views import ObtainAuthToken
from rest_framework.settings import api_settings
from user.serializers import UserSerializer, AuthTokenSerializer
class CreateUserView(generics.CreateAPIView):
"""Create a new user in the system"""
serializer_class = UserSerializer
class CreateTokenView(ObtainAuthToken):
"""Create auth token for user"""
serializer_class = AuthTokenSerializer
renderer_classes = api_settings.DEFAULT_RENDERER_CLASSES
class UpdateUserView(generics.RetrieveUpdateAPIView):
"""Update a user properties"""
serializer_class = UserSerializer
permission_classes = (permissions.IsAuthenticated, )
authentication_classes = (authentication.TokenAuthentication, )
def get_object(self):
"""Retrieve and return authenticated user"""
return self.request.user
| 32.75 | 67 | 0.78626 | from rest_framework import generics, permissions, authentication
from rest_framework.authtoken.views import ObtainAuthToken
from rest_framework.settings import api_settings
from user.serializers import UserSerializer, AuthTokenSerializer
class CreateUserView(generics.CreateAPIView):
serializer_class = UserSerializer
class CreateTokenView(ObtainAuthToken):
serializer_class = AuthTokenSerializer
renderer_classes = api_settings.DEFAULT_RENDERER_CLASSES
class UpdateUserView(generics.RetrieveUpdateAPIView):
serializer_class = UserSerializer
permission_classes = (permissions.IsAuthenticated, )
authentication_classes = (authentication.TokenAuthentication, )
def get_object(self):
return self.request.user
| true | true |
f7203487179fb4f3e1634194e8c33312c4ba431f | 918 | py | Python | ffeatools/FFEA_initialise/FFEA_mapping_tools/__init__.py | zzalscv2/FFEA | da8a09dadb1b3978a3d230dc79d9b163d7889242 | [
"Apache-2.0"
] | null | null | null | ffeatools/FFEA_initialise/FFEA_mapping_tools/__init__.py | zzalscv2/FFEA | da8a09dadb1b3978a3d230dc79d9b163d7889242 | [
"Apache-2.0"
] | null | null | null | ffeatools/FFEA_initialise/FFEA_mapping_tools/__init__.py | zzalscv2/FFEA | da8a09dadb1b3978a3d230dc79d9b163d7889242 | [
"Apache-2.0"
] | 1 | 2021-04-03T16:08:21.000Z | 2021-04-03T16:08:21.000Z | #
# This file is part of the FFEA simulation package
#
# Copyright (c) by the Theory and Development FFEA teams,
# as they appear in the README.md file.
#
# FFEA is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# FFEA is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with FFEA. If not, see <http://www.gnu.org/licenses/>.
#
# To help us fund FFEA development, we humbly ask that you cite
# the research papers on the package.
#
import node_pdb_align
| 36.72 | 71 | 0.736383 |
import node_pdb_align
| true | true |
f7203581c7df8fcaaa578fd90ca2ab1f4f1e4fbd | 636 | py | Python | src/user_polls_2_app/migrations/0010_alter_pollsassignedtouser_user.py | JackCX777/user_polls_2 | fa8fe9ad4c1fa36b4ea5bb402b3d485852a98d3b | [
"BSD-3-Clause"
] | null | null | null | src/user_polls_2_app/migrations/0010_alter_pollsassignedtouser_user.py | JackCX777/user_polls_2 | fa8fe9ad4c1fa36b4ea5bb402b3d485852a98d3b | [
"BSD-3-Clause"
] | null | null | null | src/user_polls_2_app/migrations/0010_alter_pollsassignedtouser_user.py | JackCX777/user_polls_2 | fa8fe9ad4c1fa36b4ea5bb402b3d485852a98d3b | [
"BSD-3-Clause"
] | null | null | null | # Generated by Django 3.2.7 on 2021-10-25 19:11
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('user_polls_2_app', '0009_auto_20211025_1754'),
]
operations = [
migrations.AlterField(
model_name='pollsassignedtouser',
name='user',
field=models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
| 28.909091 | 147 | 0.691824 |
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('user_polls_2_app', '0009_auto_20211025_1754'),
]
operations = [
migrations.AlterField(
model_name='pollsassignedtouser',
name='user',
field=models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
| true | true |
f72035a00b6e5a656103279e71459c9e9bf51ad5 | 5,434 | py | Python | PythonAPI/docs/bp_doc_gen.py | hecspc/carla | 714f8c4cbfbb46fa9ed163a27c94ede613948767 | [
"MIT"
] | 8 | 2019-11-27T18:43:09.000Z | 2022-01-16T06:08:36.000Z | PythonAPI/docs/bp_doc_gen.py | tcwangjiawei/carla | 714f8c4cbfbb46fa9ed163a27c94ede613948767 | [
"MIT"
] | null | null | null | PythonAPI/docs/bp_doc_gen.py | tcwangjiawei/carla | 714f8c4cbfbb46fa9ed163a27c94ede613948767 | [
"MIT"
] | 5 | 2020-05-12T20:03:10.000Z | 2022-02-25T14:40:07.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2017 Computer Vision Center (CVC) at the Universitat Autonoma de
# Barcelona (UAB).
#
# This work is licensed under the terms of the MIT license.
# For a copy, see <https://opensource.org/licenses/MIT>.
import glob
import os
import sys
try:
sys.path.append(glob.glob('../carla/dist/carla-*%d.%d-%s.egg' % (
sys.version_info.major,
sys.version_info.minor,
'win-amd64' if os.name == 'nt' else 'linux-x86_64'))[0])
except IndexError:
pass
import carla
COLOR_LIST = '#498efc'
def join(elem, separator=''):
return separator.join(elem)
def color(col, buf):
return join(['<font color="', col, '">', buf, '</font>'])
def valid_dic_val(dic, value):
return value in dic and dic[value]
def italic(buf):
return join(['_', buf, '_'])
def bold(buf):
return join(['**', buf, '**'])
def parentheses(buf):
return join(['(', buf, ')'])
def sub(buf):
return join(['<sub>', buf, '</sub>'])
def code(buf):
return join(['`', buf, '`'])
class MarkdownFile:
def __init__(self):
self._data = ""
self._list_depth = 0
self.endl = ' \n'
def data(self):
return self._data
def list_push(self, buf=''):
if buf:
self.text(join([
' ' * self._list_depth if self._list_depth != 0 else '', '- ', buf]))
self._list_depth = (self._list_depth + 1)
def list_pushn(self, buf):
self.list_push(join([buf, self.endl]))
def list_pop(self):
self._list_depth = max(self._list_depth - 1, 0)
def list_popn(self):
self.list_pop()
self._data = join([self._data, '\n'])
def list_depth(self):
if self._data.strip()[-1:] != '\n' or self._list_depth == 0:
return ''
return join([' ' * self._list_depth])
def text(self, buf):
self._data = join([self._data, buf])
def textn(self, buf):
self._data = join([self._data, self.list_depth(), buf, self.endl])
def not_title(self, buf):
self._data = join([
self._data, '\n', self.list_depth(), '<h1>', buf, '</h1>', '\n'])
def title(self, strongness, buf):
self._data = join([
self._data, '\n', self.list_depth(), '#' * strongness, ' ', buf, '\n'])
def new_line(self):
self._data = join([self._data, self.endl])
def code_block(self, buf, language=''):
return join(['```', language, '\n', self.list_depth(), buf, '\n', self.list_depth(), '```\n'])
def main():
"""Generates markdown file"""
client = carla.Client('127.0.0.1', 2000)
client.set_timeout(2.0)
world = client.get_world()
bp_dict = {}
blueprints = [bp for bp in world.get_blueprint_library().filter('*')] # Returns list of all blueprints
blueprint_ids = [bp.id for bp in world.get_blueprint_library().filter('*')] # Returns list of all blueprint ids
# Creates a dict key = walker, static, prop, vehicle, sensor, controller; value = [bp_id, blueprint]
for bp_id in sorted(blueprint_ids):
bp_type = bp_id.split('.')[0]
value = []
for bp in blueprints:
if bp.id == bp_id:
value = [bp_id, bp]
if bp_type in bp_dict:
bp_dict[bp_type].append(value)
else:
bp_dict[bp_type] = [value]
# Actual documentation
md = MarkdownFile()
md.not_title('Blueprint Library')
md.textn(
"The Blueprint Library ([`carla.BlueprintLibrary`](../python_api/#carlablueprintlibrary-class)) " +
"is a summary of all [`carla.ActorBlueprint`](../python_api/#carla.ActorBlueprint) " +
"and its attributes ([`carla.ActorAttribute`](../python_api/#carla.ActorAttribute)) " +
"available to the user in CARLA.")
md.textn("\nHere is an example code for printing all actor blueprints and their attributes:")
md.textn(md.code_block("blueprints = [bp for bp in world.get_blueprint_library().filter('*')]\n"
"for blueprint in blueprints:\n"
" print(blueprint.id)\n"
" for attr in blueprint:\n"
" print(' - {}'.format(attr))", "py"))
md.textn("Check out our [blueprint tutorial](../python_api_tutorial/#blueprints).")
for key, value in bp_dict.items(): # bp types, bp's
md.title(3, key) # Key = walker, static, controller, sensor, vehicle
for bp in sorted(value): # Value = bp[0]= name bp[1]= blueprint
md.list_pushn(bold(color(COLOR_LIST, bp[0]))) # bp name
md.list_push(bold('Attributes:') + '\n')
for attr in sorted(bp[1], key=lambda x: x.id): # for attribute in blueprint
md.list_push(code(attr.id))
md.text(' ' + parentheses(italic(str(attr.type))))
if attr.is_modifiable:
md.text(sub(italic(' – Modifiable')))
md.list_popn()
md.list_pop()
md.list_pop()
md.list_pop()
return md.data()
if __name__ == '__main__':
try:
script_path = os.path.dirname(os.path.abspath(__file__))
with open(os.path.join(script_path, '../../Docs/bp_library.md'), 'w') as md_file:
md_file.write(main())
print("Done!")
except KeyboardInterrupt:
print('\nCancelled by user. Bye!')
| 30.52809 | 115 | 0.572322 |
import glob
import os
import sys
try:
sys.path.append(glob.glob('../carla/dist/carla-*%d.%d-%s.egg' % (
sys.version_info.major,
sys.version_info.minor,
'win-amd64' if os.name == 'nt' else 'linux-x86_64'))[0])
except IndexError:
pass
import carla
COLOR_LIST = '#498efc'
def join(elem, separator=''):
return separator.join(elem)
def color(col, buf):
return join(['<font color="', col, '">', buf, '</font>'])
def valid_dic_val(dic, value):
return value in dic and dic[value]
def italic(buf):
return join(['_', buf, '_'])
def bold(buf):
return join(['**', buf, '**'])
def parentheses(buf):
return join(['(', buf, ')'])
def sub(buf):
return join(['<sub>', buf, '</sub>'])
def code(buf):
return join(['`', buf, '`'])
class MarkdownFile:
def __init__(self):
self._data = ""
self._list_depth = 0
self.endl = ' \n'
def data(self):
return self._data
def list_push(self, buf=''):
if buf:
self.text(join([
' ' * self._list_depth if self._list_depth != 0 else '', '- ', buf]))
self._list_depth = (self._list_depth + 1)
def list_pushn(self, buf):
self.list_push(join([buf, self.endl]))
def list_pop(self):
self._list_depth = max(self._list_depth - 1, 0)
def list_popn(self):
self.list_pop()
self._data = join([self._data, '\n'])
def list_depth(self):
if self._data.strip()[-1:] != '\n' or self._list_depth == 0:
return ''
return join([' ' * self._list_depth])
def text(self, buf):
self._data = join([self._data, buf])
def textn(self, buf):
self._data = join([self._data, self.list_depth(), buf, self.endl])
def not_title(self, buf):
self._data = join([
self._data, '\n', self.list_depth(), '<h1>', buf, '</h1>', '\n'])
def title(self, strongness, buf):
self._data = join([
self._data, '\n', self.list_depth(), '#' * strongness, ' ', buf, '\n'])
def new_line(self):
self._data = join([self._data, self.endl])
def code_block(self, buf, language=''):
return join(['```', language, '\n', self.list_depth(), buf, '\n', self.list_depth(), '```\n'])
def main():
client = carla.Client('127.0.0.1', 2000)
client.set_timeout(2.0)
world = client.get_world()
bp_dict = {}
blueprints = [bp for bp in world.get_blueprint_library().filter('*')]
blueprint_ids = [bp.id for bp in world.get_blueprint_library().filter('*')]
for bp_id in sorted(blueprint_ids):
bp_type = bp_id.split('.')[0]
value = []
for bp in blueprints:
if bp.id == bp_id:
value = [bp_id, bp]
if bp_type in bp_dict:
bp_dict[bp_type].append(value)
else:
bp_dict[bp_type] = [value]
md = MarkdownFile()
md.not_title('Blueprint Library')
md.textn(
"The Blueprint Library ([`carla.BlueprintLibrary`](../python_api/#carlablueprintlibrary-class)) " +
"is a summary of all [`carla.ActorBlueprint`](../python_api/#carla.ActorBlueprint) " +
"and its attributes ([`carla.ActorAttribute`](../python_api/#carla.ActorAttribute)) " +
"available to the user in CARLA.")
md.textn("\nHere is an example code for printing all actor blueprints and their attributes:")
md.textn(md.code_block("blueprints = [bp for bp in world.get_blueprint_library().filter('*')]\n"
"for blueprint in blueprints:\n"
" print(blueprint.id)\n"
" for attr in blueprint:\n"
" print(' - {}'.format(attr))", "py"))
md.textn("Check out our [blueprint tutorial](../python_api_tutorial/#blueprints).")
for key, value in bp_dict.items():
md.title(3, key) # Key = walker, static, controller, sensor, vehicle
for bp in sorted(value): # Value = bp[0]= name bp[1]= blueprint
md.list_pushn(bold(color(COLOR_LIST, bp[0]))) # bp name
md.list_push(bold('Attributes:') + '\n')
for attr in sorted(bp[1], key=lambda x: x.id): # for attribute in blueprint
md.list_push(code(attr.id))
md.text(' ' + parentheses(italic(str(attr.type))))
if attr.is_modifiable:
md.text(sub(italic(' – Modifiable')))
md.list_popn()
md.list_pop()
md.list_pop()
md.list_pop()
return md.data()
if __name__ == '__main__':
try:
script_path = os.path.dirname(os.path.abspath(__file__))
with open(os.path.join(script_path, '../../Docs/bp_library.md'), 'w') as md_file:
md_file.write(main())
print("Done!")
except KeyboardInterrupt:
print('\nCancelled by user. Bye!')
| true | true |
f7203750725cdfec5d228e1de7f7be516116fab2 | 1,038 | py | Python | api/users/models/profiles.py | julianarchila/twitter-clone-api | 9c2d77c9144dcb70cf982d9987c70bc7113b7f3e | [
"MIT"
] | null | null | null | api/users/models/profiles.py | julianarchila/twitter-clone-api | 9c2d77c9144dcb70cf982d9987c70bc7113b7f3e | [
"MIT"
] | null | null | null | api/users/models/profiles.py | julianarchila/twitter-clone-api | 9c2d77c9144dcb70cf982d9987c70bc7113b7f3e | [
"MIT"
] | null | null | null | """Profile models. """
# Django
from django.db import models
# Utils
from api.utils.models import TwModel
class Profile(TwModel):
"""Profile model."""
user = models.OneToOneField("users.User", on_delete=models.CASCADE)
picture = models.ImageField(
"Profile picture",
upload_to="users/pictures/",
default="users/pictures/default.png",
blank=True,
null=True,
)
header = models.ImageField(
"Profile header", upload_to="users/headers/", blank=True, null=True
)
bio = models.TextField(max_length=160, blank=True, null=True)
followers_count = models.IntegerField(blank=True, null=True, default=0)
followers = models.ManyToManyField(
"users.User", related_name="following", blank=True
)
"""
profile = UserProfile.objects.first()
profile.followers.all() -> All users following this profile
user.following.all() -> All user profiles I follow
"""
def __str__(self) -> str:
return f"Profile: {self.user.username}"
| 27.315789 | 75 | 0.655106 |
from django.db import models
from api.utils.models import TwModel
class Profile(TwModel):
user = models.OneToOneField("users.User", on_delete=models.CASCADE)
picture = models.ImageField(
"Profile picture",
upload_to="users/pictures/",
default="users/pictures/default.png",
blank=True,
null=True,
)
header = models.ImageField(
"Profile header", upload_to="users/headers/", blank=True, null=True
)
bio = models.TextField(max_length=160, blank=True, null=True)
followers_count = models.IntegerField(blank=True, null=True, default=0)
followers = models.ManyToManyField(
"users.User", related_name="following", blank=True
)
def __str__(self) -> str:
return f"Profile: {self.user.username}"
| true | true |
f7203796bb012b3a172887a59a4d57229630ce4f | 8,515 | py | Python | hwtLib/avalon/mm.py | optical-o/hwtLib | edad621f5ad4cdbea20a5751ff4468979afe2f77 | [
"MIT"
] | null | null | null | hwtLib/avalon/mm.py | optical-o/hwtLib | edad621f5ad4cdbea20a5751ff4468979afe2f77 | [
"MIT"
] | null | null | null | hwtLib/avalon/mm.py | optical-o/hwtLib | edad621f5ad4cdbea20a5751ff4468979afe2f77 | [
"MIT"
] | null | null | null | from hwt.hdl.constants import DIRECTION, READ, WRITE, NOP, READ_WRITE
from hwt.interfaces.agents.handshaked import HandshakedAgent
from hwt.interfaces.std import VectSignal, Signal
from hwt.simulator.agentBase import SyncAgentBase
from hwt.synthesizer.interface import Interface
from hwt.synthesizer.param import Param
from hwt.interfaces.agents.vldSynced import VldSyncedAgent
from collections import deque
from pyMathBitPrecise.bit_utils import mask
from hwtSimApi.hdlSimulator import HdlSimulator
from hwt.math import log2ceil
RESP_OKAY = 0b00
# RESP_RESERVED = 0b01
RESP_SLAVEERROR = 0b10
RESP_DECODEERROR = 0b11
class AvalonMM(Interface):
"""
Avalon Memory Mapped interface
:note: handshaked, shared address and response channel
https://www.intel.com/content/dam/altera-www/global/en_US/pdfs/literature/manual/mnl_avalon_spec.pdf
.. hwt-autodoc::
"""
def _config(self):
self.ADDR_WIDTH = Param(32)
self.DATA_WIDTH = Param(32)
self.MAX_BURST = Param(0)
def _declr(self):
# self.debugAccess = Signal()
IN = DIRECTION.IN
self.address = VectSignal(self.ADDR_WIDTH)
self.byteEnable = VectSignal(self.DATA_WIDTH // 8)
self.read = Signal()
self.readData = VectSignal(self.DATA_WIDTH, masterDir=IN)
self.readDataValid = Signal(masterDir=IN) # read data valid
self.response = VectSignal(2, masterDir=IN)
self.write = Signal()
self.writeData = VectSignal(self.DATA_WIDTH)
# self.lock = Signal()
self.waitRequest = Signal(masterDir=IN)
self.writeResponseValid = Signal(masterDir=IN)
if self.MAX_BURST != 0:
self.burstCount = VectSignal(log2ceil(self.MAX_BURST))
# self.beginBurstTransfer = Signal()
def _getWordAddrStep(self):
"""
:return: size of one word in unit of address
"""
return int(self.DATA_WIDTH) // self._getAddrStep()
def _getAddrStep(self):
"""
:return: how many bits is one unit of address
(e.g. 8 bits for char * pointer, 36 for 36 bit bram)
"""
return 8
def _initSimAgent(self, sim: HdlSimulator):
self._ag = AvalonMmAgent(sim, self)
class AvalonMmDataRAgent(VldSyncedAgent):
"""
Simulation/verification agent for data part of AvalomMM interface
* vld signal = readDataValid
* data signal = (readData, response)
"""
@classmethod
def get_valid_signal(cls, intf):
return intf.readDataValid
def get_valid(self):
return self._vld.read()
def set_valid(self, val):
self._vld.write(val)
def get_data(self):
"""extract data from interface"""
intf = self.intf
return (intf.readData.read(), intf.response.read())
def set_data(self, data):
"""write data to interface"""
intf = self.intf
if data is None:
intf.readData.write(None)
intf.response.write(None)
else:
readData, response = data
intf.readData.write(readData)
intf.response.write(response)
class AvalonMmAddrAgent(HandshakedAgent):
"""
data format is tuple (address, byteEnable, read/write, burstCount)
* two valid signals "read", "write"
* one ready_n signal "waitrequest")
* on write set data and byteenamble as well
"""
def __init__(self, sim: HdlSimulator, intf, allowNoReset=False):
HandshakedAgent.__init__(self, sim, intf, allowNoReset=allowNoReset)
self.wData = deque()
@classmethod
def get_ready_signal(cls, intf):
return intf.waitRequest
def get_ready(self):
rd = self._rd.read()
rd.val = int(not rd.val)
return rd
def set_ready(self, val):
self._rd.write(int(not val))
@classmethod
def get_valid_signal(cls, intf):
return (intf.read, intf.write)
def get_valid(self):
r = self._vld[0].read()
w = self._vld[1].read()
r.val = r.val | w.val
r.vld_mask = r.vld_mask & w.vld_mask
return r
def set_valid(self, val):
if self.actualData is None or self.actualData is NOP:
r = 0
w = 0
else:
mode = self.actualData[0]
if mode is READ:
r = val
w = 0
elif mode is WRITE:
r = 0
w = val
else:
raise ValueError("Unknown mode", mode)
self._vld[0].write(r)
self._vld[1].write(w)
def get_data(self):
intf = self.intf
address = intf.address.read()
byteEnable = intf.byteEnable.read()
read = intf.read.read()
write = intf.write.read()
wdata = intf.writeData.read()
if intf.MAX_BURST != 0:
burstCount = intf.burstCount.read()
else:
burstCount = 1
if read.val:
if write.val:
rw = READ_WRITE
else:
rw = READ
elif write.val:
rw = WRITE
else:
raise AssertionError(
"This funtion should not be called when data"
"is not ready on interface")
if rw == WRITE or rw == READ_WRITE:
self.wData.append((wdata, byteEnable))
return (rw, address, burstCount)
def set_data(self, data):
intf = self.intf
if data is None:
intf.address.write(None)
intf.byteEnable.write(None)
if intf.MAX_BURST != 0:
intf.burstCount.write(None)
intf.read.write(0)
intf.write.write(0)
else:
rw, address, burstCount = data
if rw is READ:
rd, wr = 1, 0
be = mask(intf.readData._dtype.bit_length() // 8)
elif rw is WRITE:
rd, wr = 0, 1
rw, address, burstCount = data
d, be = self.wData.popleft()
intf.writeData.write(d)
else:
raise TypeError(f"rw is in invalid format {rw}")
intf.address.write(address)
intf.byteEnable.write(be)
assert int(burstCount) >= 1, burstCount
if intf.MAX_BURST:
intf.burstCount.write(burstCount)
intf.read.write(rd)
intf.write.write(wr)
class AvalonMmWRespAgent(VldSyncedAgent):
@classmethod
def get_valid_signal(cls, intf):
return intf.writeResponseValid
def get_data(self):
return self.intf.response.read()
def set_data(self, data):
self.intf.response.write(data)
class AvalonMmAgent(SyncAgentBase):
"""
Simulation agent for AvalonMM bus interface
:ivar ~.req: request data, items are tuples (READ/WRITE, address, burstCount)
:ivar ~.wData: data to write, items are tuples (data, byteenable)
:ivar ~.wResp: write response data
:ivar ~.rData: data read from interface, items are typles (data, response)
"""
def __init__(self, sim: HdlSimulator, intf, allowNoReset=False):
SyncAgentBase.__init__(self, sim, intf, allowNoReset=allowNoReset)
self.addrAg = AvalonMmAddrAgent(sim, intf, allowNoReset=allowNoReset)
self.rDataAg = AvalonMmDataRAgent(sim, intf, allowNoReset=allowNoReset)
self.wRespAg = AvalonMmWRespAgent(sim, intf, allowNoReset=allowNoReset)
def req_get(self):
return self.addrAg.data
def req_set(self, v):
self.addrAg.data = v
req = property(req_get, req_set)
def wData_get(self):
return self.addrAg.wData
def wData_set(self, v):
self.addrAg.wData = v
wData = property(wData_get, wData_set)
def wResp_get(self):
return self.wRespAg.data
def wResp_set(self, v):
self.wRespAg = v
wResp = property(wResp_get, wResp_set)
def rData_get(self):
return self.rDataAg.data
def rData_set(self, v):
self.rDataAg.data = v
rData = property(rData_get, rData_set)
def getDrivers(self):
self.setEnable = self.setEnable_asDriver
return (self.rDataAg.getMonitors()
+self.addrAg.getDrivers()
+self.wRespAg.getMonitors())
def getMonitors(self):
self.setEnable = self.setEnable_asMonitor
return (self.rDataAg.getDrivers()
+self.addrAg.getMonitors()
+self.wRespAg.getDrivers())
| 28.864407 | 104 | 0.603053 | from hwt.hdl.constants import DIRECTION, READ, WRITE, NOP, READ_WRITE
from hwt.interfaces.agents.handshaked import HandshakedAgent
from hwt.interfaces.std import VectSignal, Signal
from hwt.simulator.agentBase import SyncAgentBase
from hwt.synthesizer.interface import Interface
from hwt.synthesizer.param import Param
from hwt.interfaces.agents.vldSynced import VldSyncedAgent
from collections import deque
from pyMathBitPrecise.bit_utils import mask
from hwtSimApi.hdlSimulator import HdlSimulator
from hwt.math import log2ceil
RESP_OKAY = 0b00
RESP_SLAVEERROR = 0b10
RESP_DECODEERROR = 0b11
class AvalonMM(Interface):
def _config(self):
self.ADDR_WIDTH = Param(32)
self.DATA_WIDTH = Param(32)
self.MAX_BURST = Param(0)
def _declr(self):
IN = DIRECTION.IN
self.address = VectSignal(self.ADDR_WIDTH)
self.byteEnable = VectSignal(self.DATA_WIDTH // 8)
self.read = Signal()
self.readData = VectSignal(self.DATA_WIDTH, masterDir=IN)
self.readDataValid = Signal(masterDir=IN)
self.response = VectSignal(2, masterDir=IN)
self.write = Signal()
self.writeData = VectSignal(self.DATA_WIDTH)
self.waitRequest = Signal(masterDir=IN)
self.writeResponseValid = Signal(masterDir=IN)
if self.MAX_BURST != 0:
self.burstCount = VectSignal(log2ceil(self.MAX_BURST))
def _getWordAddrStep(self):
return int(self.DATA_WIDTH) // self._getAddrStep()
def _getAddrStep(self):
return 8
def _initSimAgent(self, sim: HdlSimulator):
self._ag = AvalonMmAgent(sim, self)
class AvalonMmDataRAgent(VldSyncedAgent):
@classmethod
def get_valid_signal(cls, intf):
return intf.readDataValid
def get_valid(self):
return self._vld.read()
def set_valid(self, val):
self._vld.write(val)
def get_data(self):
intf = self.intf
return (intf.readData.read(), intf.response.read())
def set_data(self, data):
intf = self.intf
if data is None:
intf.readData.write(None)
intf.response.write(None)
else:
readData, response = data
intf.readData.write(readData)
intf.response.write(response)
class AvalonMmAddrAgent(HandshakedAgent):
def __init__(self, sim: HdlSimulator, intf, allowNoReset=False):
HandshakedAgent.__init__(self, sim, intf, allowNoReset=allowNoReset)
self.wData = deque()
@classmethod
def get_ready_signal(cls, intf):
return intf.waitRequest
def get_ready(self):
rd = self._rd.read()
rd.val = int(not rd.val)
return rd
def set_ready(self, val):
self._rd.write(int(not val))
@classmethod
def get_valid_signal(cls, intf):
return (intf.read, intf.write)
def get_valid(self):
r = self._vld[0].read()
w = self._vld[1].read()
r.val = r.val | w.val
r.vld_mask = r.vld_mask & w.vld_mask
return r
def set_valid(self, val):
if self.actualData is None or self.actualData is NOP:
r = 0
w = 0
else:
mode = self.actualData[0]
if mode is READ:
r = val
w = 0
elif mode is WRITE:
r = 0
w = val
else:
raise ValueError("Unknown mode", mode)
self._vld[0].write(r)
self._vld[1].write(w)
def get_data(self):
intf = self.intf
address = intf.address.read()
byteEnable = intf.byteEnable.read()
read = intf.read.read()
write = intf.write.read()
wdata = intf.writeData.read()
if intf.MAX_BURST != 0:
burstCount = intf.burstCount.read()
else:
burstCount = 1
if read.val:
if write.val:
rw = READ_WRITE
else:
rw = READ
elif write.val:
rw = WRITE
else:
raise AssertionError(
"This funtion should not be called when data"
"is not ready on interface")
if rw == WRITE or rw == READ_WRITE:
self.wData.append((wdata, byteEnable))
return (rw, address, burstCount)
def set_data(self, data):
intf = self.intf
if data is None:
intf.address.write(None)
intf.byteEnable.write(None)
if intf.MAX_BURST != 0:
intf.burstCount.write(None)
intf.read.write(0)
intf.write.write(0)
else:
rw, address, burstCount = data
if rw is READ:
rd, wr = 1, 0
be = mask(intf.readData._dtype.bit_length() // 8)
elif rw is WRITE:
rd, wr = 0, 1
rw, address, burstCount = data
d, be = self.wData.popleft()
intf.writeData.write(d)
else:
raise TypeError(f"rw is in invalid format {rw}")
intf.address.write(address)
intf.byteEnable.write(be)
assert int(burstCount) >= 1, burstCount
if intf.MAX_BURST:
intf.burstCount.write(burstCount)
intf.read.write(rd)
intf.write.write(wr)
class AvalonMmWRespAgent(VldSyncedAgent):
@classmethod
def get_valid_signal(cls, intf):
return intf.writeResponseValid
def get_data(self):
return self.intf.response.read()
def set_data(self, data):
self.intf.response.write(data)
class AvalonMmAgent(SyncAgentBase):
def __init__(self, sim: HdlSimulator, intf, allowNoReset=False):
SyncAgentBase.__init__(self, sim, intf, allowNoReset=allowNoReset)
self.addrAg = AvalonMmAddrAgent(sim, intf, allowNoReset=allowNoReset)
self.rDataAg = AvalonMmDataRAgent(sim, intf, allowNoReset=allowNoReset)
self.wRespAg = AvalonMmWRespAgent(sim, intf, allowNoReset=allowNoReset)
def req_get(self):
return self.addrAg.data
def req_set(self, v):
self.addrAg.data = v
req = property(req_get, req_set)
def wData_get(self):
return self.addrAg.wData
def wData_set(self, v):
self.addrAg.wData = v
wData = property(wData_get, wData_set)
def wResp_get(self):
return self.wRespAg.data
def wResp_set(self, v):
self.wRespAg = v
wResp = property(wResp_get, wResp_set)
def rData_get(self):
return self.rDataAg.data
def rData_set(self, v):
self.rDataAg.data = v
rData = property(rData_get, rData_set)
def getDrivers(self):
self.setEnable = self.setEnable_asDriver
return (self.rDataAg.getMonitors()
+self.addrAg.getDrivers()
+self.wRespAg.getMonitors())
def getMonitors(self):
self.setEnable = self.setEnable_asMonitor
return (self.rDataAg.getDrivers()
+self.addrAg.getMonitors()
+self.wRespAg.getDrivers())
| true | true |
f7203965a2d54250a5e143f35c9a2614727c589c | 4,095 | py | Python | encoder/i3d/i3d_encoder.py | gnes-ai/hub | 94cff9011ff6447ce1af51c5307813ab6fbbb156 | [
"Apache-2.0"
] | 36 | 2019-08-17T00:23:02.000Z | 2021-08-18T12:12:59.000Z | encoder/i3d/i3d_encoder.py | gnes-ai/hub | 94cff9011ff6447ce1af51c5307813ab6fbbb156 | [
"Apache-2.0"
] | 1 | 2019-10-24T05:09:45.000Z | 2019-10-24T05:09:45.000Z | encoder/i3d/i3d_encoder.py | gnes-ai/hub | 94cff9011ff6447ce1af51c5307813ab6fbbb156 | [
"Apache-2.0"
] | 11 | 2019-10-22T05:15:14.000Z | 2020-04-25T16:04:01.000Z | # Tencent is pleased to support the open source community by making GNES available.
#
# Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List
import numpy as np
from gnes.encoder.base import BaseVideoEncoder
from gnes.helper import batching, get_first_available_gpu
class I3dEncoder(BaseVideoEncoder):
batch_size = 1
def __init__(self, model_dir: str,
output_layer: str,
num_classes: int = 400,
frame_size_x: int = 224,
frame_size_y: int = 224,
num_frame_per_clib: int = 16,
rgb_channels: int = 3,
on_gpu: bool = False,
*args, **kwargs):
super().__init__(*args, **kwargs)
self.model_dir = model_dir
self.output_layer = output_layer
self.num_classes = num_classes
self.frame_size_x = frame_size_x
self.frame_size_y = frame_size_y
self.num_frame_per_clib = num_frame_per_clib
self.rgb_channels = rgb_channels
self.on_gpu = on_gpu
def post_init(self):
import tensorflow as tf
from i3d_cores.i3d import InceptionI3d
import os
os.environ['CUDA_VISIBLE_DEVICES'] = str(get_first_available_gpu())
with tf.Graph().as_default():
self.rgb_images_placeholder = tf.placeholder(dtype=tf.float32, shape=(None,
self.num_frame_per_clib,
self.frame_size_x,
self.frame_size_y,
self.rgb_channels))
is_training = False
with tf.variable_scope('RGB'):
self.feature, _ = InceptionI3d(
num_classes=self.num_classes,
spatial_squeeze=True,
final_endpoint=self.output_layer,
name='inception_i3d'
)(self.rgb_images_placeholder, is_training)
init = tf.global_variables_initializer()
config = tf.ConfigProto(log_device_placement=False)
if self.on_gpu:
config.gpu_options.allow_growth = True
self.sess = tf.Session(config=config)
self.sess.run(init)
checkpoint_file = self.model_dir
meta_graph_location = self.model_dir + '.meta'
saver = tf.train.import_meta_graph(meta_graph_location, clear_devices=True)
saver.restore(self.sess, checkpoint_file)
def encode(self, data: List['np.ndarray'], *args, **kwargs) -> np.ndarray:
def _padding(data):
_data = np.array(
[np.concatenate((d, np.zeros((self.num_frame_per_clib - d.shape[0],
self.frame_size_x,
self.frame_size_y,
self.rgb_channels), dtype=np.float32)), axis=0)
if d.shape[0] < self.num_frame_per_clib else d[:self.num_frame_per_clib] for d in data])
return _data
@batching
def _encode(_, data):
feature, = self.sess.run([self.feature], feed_dict={self.rgb_images_placeholder: data})
return np.array(feature).astype(np.float32)
return _encode(self, _padding(data))
| 41.785714 | 105 | 0.572894 |
from typing import List
import numpy as np
from gnes.encoder.base import BaseVideoEncoder
from gnes.helper import batching, get_first_available_gpu
class I3dEncoder(BaseVideoEncoder):
batch_size = 1
def __init__(self, model_dir: str,
output_layer: str,
num_classes: int = 400,
frame_size_x: int = 224,
frame_size_y: int = 224,
num_frame_per_clib: int = 16,
rgb_channels: int = 3,
on_gpu: bool = False,
*args, **kwargs):
super().__init__(*args, **kwargs)
self.model_dir = model_dir
self.output_layer = output_layer
self.num_classes = num_classes
self.frame_size_x = frame_size_x
self.frame_size_y = frame_size_y
self.num_frame_per_clib = num_frame_per_clib
self.rgb_channels = rgb_channels
self.on_gpu = on_gpu
def post_init(self):
import tensorflow as tf
from i3d_cores.i3d import InceptionI3d
import os
os.environ['CUDA_VISIBLE_DEVICES'] = str(get_first_available_gpu())
with tf.Graph().as_default():
self.rgb_images_placeholder = tf.placeholder(dtype=tf.float32, shape=(None,
self.num_frame_per_clib,
self.frame_size_x,
self.frame_size_y,
self.rgb_channels))
is_training = False
with tf.variable_scope('RGB'):
self.feature, _ = InceptionI3d(
num_classes=self.num_classes,
spatial_squeeze=True,
final_endpoint=self.output_layer,
name='inception_i3d'
)(self.rgb_images_placeholder, is_training)
init = tf.global_variables_initializer()
config = tf.ConfigProto(log_device_placement=False)
if self.on_gpu:
config.gpu_options.allow_growth = True
self.sess = tf.Session(config=config)
self.sess.run(init)
checkpoint_file = self.model_dir
meta_graph_location = self.model_dir + '.meta'
saver = tf.train.import_meta_graph(meta_graph_location, clear_devices=True)
saver.restore(self.sess, checkpoint_file)
def encode(self, data: List['np.ndarray'], *args, **kwargs) -> np.ndarray:
def _padding(data):
_data = np.array(
[np.concatenate((d, np.zeros((self.num_frame_per_clib - d.shape[0],
self.frame_size_x,
self.frame_size_y,
self.rgb_channels), dtype=np.float32)), axis=0)
if d.shape[0] < self.num_frame_per_clib else d[:self.num_frame_per_clib] for d in data])
return _data
@batching
def _encode(_, data):
feature, = self.sess.run([self.feature], feed_dict={self.rgb_images_placeholder: data})
return np.array(feature).astype(np.float32)
return _encode(self, _padding(data))
| true | true |
f7203ab4da825176e6d88094bad4c4f581a86fe3 | 95 | py | Python | jungle/__init__.py | felixhorns/jungle | da50104dcdd2427fcaa5ed190f0bd7f2097e2e79 | [
"MIT"
] | 1 | 2022-03-01T14:50:14.000Z | 2022-03-01T14:50:14.000Z | jungle/__init__.py | felixhorns/jungle | da50104dcdd2427fcaa5ed190f0bd7f2097e2e79 | [
"MIT"
] | 1 | 2020-03-27T00:19:23.000Z | 2020-03-27T00:19:23.000Z | jungle/__init__.py | felixhorns/jungle | da50104dcdd2427fcaa5ed190f0bd7f2097e2e79 | [
"MIT"
] | null | null | null | from .tree import *
from .forest import *
from .sfs import *
from .size_matched_model import *
| 19 | 33 | 0.747368 | from .tree import *
from .forest import *
from .sfs import *
from .size_matched_model import *
| true | true |
f7203b87bf0c1b3f2aca6d05147b1c60aebb4053 | 5,005 | py | Python | examples/mechanics/DirectProjection/n_cubes_directproj.py | vacary/siconos-tutorials | 93c0158321077a313692ed52fed69ff3c256ae32 | [
"Apache-2.0"
] | 6 | 2017-01-12T23:09:28.000Z | 2021-03-20T17:03:58.000Z | examples/mechanics/DirectProjection/n_cubes_directproj.py | vacary/siconos-tutorials | 93c0158321077a313692ed52fed69ff3c256ae32 | [
"Apache-2.0"
] | 3 | 2019-01-14T13:44:51.000Z | 2021-05-17T13:57:27.000Z | examples/mechanics/DirectProjection/n_cubes_directproj.py | vacary/siconos-tutorials | 93c0158321077a313692ed52fed69ff3c256ae32 | [
"Apache-2.0"
] | 2 | 2019-10-22T13:30:39.000Z | 2020-10-06T10:19:57.000Z | #!/usr/bin/env python
#
# Example of two cubes, one with a convex shape, one with a primitive
# shape.
#
from siconos.mechanics.collision.tools import Contactor
from siconos.io.mechanics_run import MechanicsHdf5Runner
import siconos.numerics as sn
import siconos.kernel as sk
import random
import siconos
bullet_options = siconos.mechanics.collision.bullet.SiconosBulletOptions()
bullet_options.worldScale = 1.0
bullet_options.perturbationIterations = 7
bullet_options.minimumPointsPerturbationThreshold = 7
n_cube = 3
n_row = 2
n_col = 2
# Creation of the hdf5 file for input/output
with MechanicsHdf5Runner() as io:
for i in range(n_row):
for j in range(n_col):
for n in range(n_cube):
# Definition of a cube as a convex shape
io.add_convex_shape('CubeCS'+str(n)+'_'+str(i)+'_'+str(j),
[(-1.0, 1.0, -1.0),
(-1.0, -1.0, -1.0),
(-1.0, -1.0, 1.0),
(-1.0, 1.0, 1.0),
(1.0, 1.0, 1.0),
(1.0, 1.0, -1.0),
(1.0, -1.0, -1.0),
(1.0, -1.0, 1.0)])
# Alternative to the previous convex shape definition.
# io.add_primitive_shape('CubePrim', 'Box', (2, 2, 2))
# Definition of the ground shape
io.add_primitive_shape('Ground', 'Box', (200, 200, .5))
# Definition of the left shape
# io.add_primitive_shape('Left', 'Box', (100, 0.5, 50.))
# Definition of the right shape
# io.add_primitive_shape('Right', 'Box', (100, 0.5, 50.))
# Definition of the rear shape
# io.add_primitive_shape('Rear0', 'Box', (0.5, 100., 50.))
# Definition of the front shape
# io.add_primitive_shape('Front', 'Box', (100, 0.5, 50.))
# Definition of a non smooth law. As no group ids are specified it
# is between contactors of group id 0.
io.add_Newton_impact_friction_nsl('contact', mu=0.3)
# The cube object made with an unique Contactor : the cube shape.
# As a mass is given, it is a dynamic system involved in contact
# detection and in the simulation. With no group id specified the
# Contactor belongs to group 0
for i in range(n_row):
for j in range(n_col):
for n in range(n_cube):
io.add_object('cubeCS'+str(n)+'_'+str(i)+'_'+str(j),
[Contactor(
'CubeCS'+str(n)+'_'+str(i)+'_'+str(j))],
translation=[3.0*i, 3.0*j, 2.05*(n+1)],
velocity=[10*(1.0+2.0*(random.random()-1.0)/2.0),
10*(1.0+2.0*(random.random()-1.0)/2.0),
0, 1, 1, 1],
mass=1)
# io.add_object('cube2', [Contactor('CubePrim')], translation=[0, 3, 2],
# velocity=[10, 0, 0, 1, 1, 1],
# mass=1)
# the ground object made with the ground shape. As the mass is
# not given, it is a static object only involved in contact
# detection.
io.add_object('ground', [Contactor('Ground')],
translation=[50, 50, 0])
# io.add_object('left', [Contactor('Left')],
# translation=[0, 50., 25.])
# io.add_object('right', [Contactor('Right')],
# translation=[0, -50., 25.])
# io.add_object('rear00', [Contactor('Rear0')],
# translation=[25., 0., 250.])
# Run the simulation from the inputs previously defined and add
# results to the hdf5 file. The visualisation of the output may be done
# with the vview command.
options = sk.solver_options_create(sn.SICONOS_FRICTION_3D_NSGS)
options.iparam[sn.SICONOS_IPARAM_MAX_ITER] = 100
options.dparam[sn.SICONOS_DPARAM_TOL] = 1e-4
test=True
if test:
nstep = 100
else:
nstep = 2000
step = 0.005
with MechanicsHdf5Runner(mode='r+') as io:
# By default earth gravity is applied and the units are those
# of the International System of Units.
# Because of fixed collision margins used in the collision detection,
# sizes of small objects may need to be expressed in cm or mm.
io.run(with_timer=False,
time_stepping=sk.TimeSteppingDirectProjection,
osi=sk.MoreauJeanDirectProjectionOSI,
body_class=None,
shape_class=None,
face_class=None,
edge_class=None,
gravity_scale=1,
bullet_options=bullet_options,
t0=0,
T=nstep*step,
h=step,
theta=0.50001,
Newton_max_iter=1,
set_external_forces=None,
solver_options=options,
numerics_verbose=False,
output_frequency=1,
projection_itermax=5,
projection_tolerance=1e-8,
projection_tolerance_unilateral=1e-8,
)
| 36.268116 | 79 | 0.564236 |
from siconos.mechanics.collision.tools import Contactor
from siconos.io.mechanics_run import MechanicsHdf5Runner
import siconos.numerics as sn
import siconos.kernel as sk
import random
import siconos
bullet_options = siconos.mechanics.collision.bullet.SiconosBulletOptions()
bullet_options.worldScale = 1.0
bullet_options.perturbationIterations = 7
bullet_options.minimumPointsPerturbationThreshold = 7
n_cube = 3
n_row = 2
n_col = 2
with MechanicsHdf5Runner() as io:
for i in range(n_row):
for j in range(n_col):
for n in range(n_cube):
io.add_convex_shape('CubeCS'+str(n)+'_'+str(i)+'_'+str(j),
[(-1.0, 1.0, -1.0),
(-1.0, -1.0, -1.0),
(-1.0, -1.0, 1.0),
(-1.0, 1.0, 1.0),
(1.0, 1.0, 1.0),
(1.0, 1.0, -1.0),
(1.0, -1.0, -1.0),
(1.0, -1.0, 1.0)])
io.add_primitive_shape('Ground', 'Box', (200, 200, .5))
io.add_Newton_impact_friction_nsl('contact', mu=0.3)
for i in range(n_row):
for j in range(n_col):
for n in range(n_cube):
io.add_object('cubeCS'+str(n)+'_'+str(i)+'_'+str(j),
[Contactor(
'CubeCS'+str(n)+'_'+str(i)+'_'+str(j))],
translation=[3.0*i, 3.0*j, 2.05*(n+1)],
velocity=[10*(1.0+2.0*(random.random()-1.0)/2.0),
10*(1.0+2.0*(random.random()-1.0)/2.0),
0, 1, 1, 1],
mass=1)
io.add_object('ground', [Contactor('Ground')],
translation=[50, 50, 0])
options = sk.solver_options_create(sn.SICONOS_FRICTION_3D_NSGS)
options.iparam[sn.SICONOS_IPARAM_MAX_ITER] = 100
options.dparam[sn.SICONOS_DPARAM_TOL] = 1e-4
test=True
if test:
nstep = 100
else:
nstep = 2000
step = 0.005
with MechanicsHdf5Runner(mode='r+') as io:
io.run(with_timer=False,
time_stepping=sk.TimeSteppingDirectProjection,
osi=sk.MoreauJeanDirectProjectionOSI,
body_class=None,
shape_class=None,
face_class=None,
edge_class=None,
gravity_scale=1,
bullet_options=bullet_options,
t0=0,
T=nstep*step,
h=step,
theta=0.50001,
Newton_max_iter=1,
set_external_forces=None,
solver_options=options,
numerics_verbose=False,
output_frequency=1,
projection_itermax=5,
projection_tolerance=1e-8,
projection_tolerance_unilateral=1e-8,
)
| true | true |
f7203b8e88485ca582d2256b589f018535db2c31 | 223 | py | Python | terminusdb_client/__init__.py | LogicalDash/terminusdb-client-python | 7f13f77e60f891b1e6bd214ebf73ff7f75fcaff8 | [
"Apache-2.0"
] | 43 | 2020-06-12T23:44:17.000Z | 2022-03-12T15:18:55.000Z | terminusdb_client/__init__.py | LogicalDash/terminusdb-client-python | 7f13f77e60f891b1e6bd214ebf73ff7f75fcaff8 | [
"Apache-2.0"
] | 151 | 2020-06-12T20:23:05.000Z | 2022-03-29T20:38:35.000Z | terminusdb_client/__init__.py | LogicalDash/terminusdb-client-python | 7f13f77e60f891b1e6bd214ebf73ff7f75fcaff8 | [
"Apache-2.0"
] | 46 | 2020-06-16T20:51:21.000Z | 2022-03-17T18:11:46.000Z | from .woqlclient import WOQLClient # noqa
from .woqldataframe import woqlDataframe as WOQLDataFrame # noqa
from .woqlquery import WOQLQuery # noqa
from .woqlschema import * # noqa
from .woqlview import WOQLView # noqa
| 37.166667 | 65 | 0.784753 | from .woqlclient import WOQLClient
from .woqldataframe import woqlDataframe as WOQLDataFrame
from .woqlquery import WOQLQuery
from .woqlschema import *
from .woqlview import WOQLView
| true | true |
f7203bb1984344d6b1e7a819172c514ab77e38b9 | 44,751 | py | Python | models/ deeplabv3_plus_xception.py | Mohammedaabdu/pytorch-segmentation | 9fdf927d345146247f039042ee37612157e26582 | [
"MIT"
] | 2 | 2019-07-18T16:01:56.000Z | 2019-07-27T18:57:44.000Z | models/ deeplabv3_plus_xception.py | Mohammedaabdu/pytorch-segmentation | 9fdf927d345146247f039042ee37612157e26582 | [
"MIT"
] | null | null | null | models/ deeplabv3_plus_xception.py | Mohammedaabdu/pytorch-segmentation | 9fdf927d345146247f039042ee37612157e26582 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Wed Apr 21 15:16:18 2021
@author: Administrator
"""
from base import BaseModel
import torch
import math
import torch.nn as nn
import torch.nn.functional as F
from torchvision import models
import torch.utils.model_zoo as model_zoo
from utils.helpers import initialize_weights,set_trainable
from itertools import chain
'''
'xception_65.pth'URL:https://github.com/zhangtianlun12/deeplabv3-/releases/download/v0.1/xception_65.pth
'''
'''
-> ResNet BackBone
'''
class ResNet(nn.Module):
def __init__(self, in_channels=3, output_stride=16, backbone='resnet101', pretrained=True):
super(ResNet, self).__init__()
model = getattr(models, backbone)(pretrained)
if not pretrained or in_channels != 3:
self.layer0 = nn.Sequential(
nn.Conv2d(in_channels, 64, 7, stride=2, padding=3, bias=False),
nn.BatchNorm2d(64),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
)
initialize_weights(self.layer0)
else:
self.layer0 = nn.Sequential(*list(model.children())[:4])
self.layer1 = model.layer1
self.layer2 = model.layer2
self.layer3 = model.layer3
self.layer4 = model.layer4
if output_stride == 16: s3, s4, d3, d4 = (2, 1, 1, 2)
elif output_stride == 8: s3, s4, d3, d4 = (1, 1, 2, 4)
if output_stride == 8:
for n, m in self.layer3.named_modules():
if 'conv1' in n and (backbone == 'resnet34' or backbone == 'resnet18'):
m.dilation, m.padding, m.stride = (d3,d3), (d3,d3), (s3,s3)
elif 'conv2' in n:
m.dilation, m.padding, m.stride = (d3,d3), (d3,d3), (s3,s3)
elif 'downsample.0' in n:
m.stride = (s3, s3)
for n, m in self.layer4.named_modules():
if 'conv1' in n and (backbone == 'resnet34' or backbone == 'resnet18'):
m.dilation, m.padding, m.stride = (d4,d4), (d4,d4), (s4,s4)
elif 'conv2' in n:
m.dilation, m.padding, m.stride = (d4,d4), (d4,d4), (s4,s4)
elif 'downsample.0' in n:
m.stride = (s4, s4)
def forward(self, x):
x = self.layer0(x)
x = self.layer1(x)
low_level_features = x
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
return x, low_level_features
"""
Created on Fri Sep 13 19:04:23 2019
@author: shirhe-lyh
Implementation of Xception model.
Xception: Deep Learning with Depthwise Separable Convolutions, F. Chollect,
arxiv:1610.02357 (https://arxiv.org/abs/1610.02357).
Official tensorflow implementation:
https://github.com/tensorflow/models/blob/master/research/deeplab/core/xception.py
"""
import collections
import os
import torch
_DEFAULT_MULTI_GRID = [1, 1, 1]
# The cap for torch.clamp
_CLIP_CAP = 6
_BATCH_NORM_PARAMS = {
'eps': 0.001,
'momentum': 0.9997,
'affine': True,
}
class Block(collections.namedtuple('Block', ['scope', 'unit_fn', 'args'])):
"""A named tuple describing an Xception block.
Its parts are:
scope: The scope of the block.
unit_fn: The Xception unit function which takes as input a tensor and
returns another tensor with the output of the Xception unit.
args: A list of length equal to the number of units in the block. The
list contains one dictionary for each unit in the block to serve
as argument to unit_fn.
"""
def fixed_padding(inputs, kernel_size, rate=1):
"""Pads the input along the spatial dimensions independently of input size.
Args:
inputs: A tensor of size [batch, height_in, width_in, channels].
kernel_size: The kernel to be used in the conv2d or max_pool2d
operation. Should be a positive integer.
rate: An integer, rate for atrous convolution.
Returns:
padded_inputs: A tensor of size [batch, height_out, width_out,
channels] with the input, either intact (if kernel_size == 1) or
padded (if kernel_size > 1).
"""
kernel_size_effective = kernel_size + (kernel_size - 1) * (rate - 1)
pad_total = kernel_size_effective - 1
pad_beg = pad_total // 2
pad_end = pad_total - pad_beg
padded_inputs = torch.nn.functional.pad(
inputs, pad=(pad_beg, pad_end, pad_beg, pad_end))
return padded_inputs
class Conv2dSame(torch.nn.Module):
"""Strided 2-D convolution with 'SAME' padding."""
def __init__(self, in_channels, out_channels, kernel_size, stride, rate=1):
"""Constructor.
If stride > 1 and use_explicit_padding is True, then we do explicit
zero-padding, followed by conv2d with 'VALID' padding.
Args:
in_channels: An integer, the number of input filters.
out_channels: An integer, the number of output filters.
kernel_size: An integer with the kernel_size of the filters.
stride: An integer, the output stride.
rate: An integer, rate for atrous convolution.
"""
super(Conv2dSame, self).__init__()
self._kernel_size = kernel_size
self._rate = rate
self._without_padding = stride == 1
if self._without_padding:
# Here, we assume that floor(padding) = padding
padding = (kernel_size - 1) * rate // 2
self._conv = torch.nn.Conv2d(in_channels,
out_channels,
kernel_size=kernel_size,
stride=1,
dilation=rate,
padding=padding,
bias=False)
else:
self._conv = torch.nn.Conv2d(in_channels,
out_channels,
kernel_size=kernel_size,
stride=stride,
dilation=rate,
bias=False)
self._batch_norm = torch.nn.BatchNorm2d(out_channels,
**_BATCH_NORM_PARAMS)
self._relu = torch.nn.ReLU(inplace=True)
def forward(self, x):
"""
Args:
x: A 4-D tensor with shape [batch, height_in, width_in, channels].
Returns:
A 4-D tensor of size [batch, height_out, width_out, channels] with
the convolution output.
"""
if not self._without_padding:
x = fixed_padding(x, self._kernel_size, self._rate)
x = self._conv(x)
x = self._batch_norm(x)
x = self._relu(x)
return x
class SeparableConv2dSame(torch.nn.Module):
"""Strided 2-D separable convolution with 'SAME' padding."""
def __init__(self, in_channels, out_channels, kernel_size,
depth_multiplier, stride, rate, use_explicit_padding=True,
activation_fn=None, regularize_depthwise=False, **kwargs):
"""Constructor.
If stride > 1 and use_explicit_padding is True, then we do explicit
zero-padding, followed by conv2d with 'VALID' padding.
Args:
in_channels: An integer, the number of input filters.
out_channels: An integer, the number of output filters.
kernel_size: An integer with the kernel_size of the filters.
depth_multiplier: The number of depthwise convolution output
channels for each input channel. The total number of depthwise
convolution output channels will be equal to `num_filters_in *
depth_multiplier`.
stride: An integer, the output stride.
rate: An integer, rate for atrous convolution.
use_explicit_padding: If True, use explicit padding to make the
model fully compatible with the open source version, otherwise
use the nattive Pytorch 'SAME' padding.
activation_fn: Activation function.
regularize_depthwise: Whether or not apply L2-norm regularization
on the depthwise convolution weights.
**kwargs: Additional keyword arguments to pass to torch.nn.Conv2d.
"""
super(SeparableConv2dSame, self).__init__()
self._kernel_size = kernel_size
self._rate = rate
self._without_padding = stride == 1 or not use_explicit_padding
out_channels_depthwise = in_channels * depth_multiplier
if self._without_padding:
# Separable convolution for padding 'SAME'
# Here, we assume that floor(padding) = padding
padding = (kernel_size - 1) * rate // 2
self._conv_depthwise = torch.nn.Conv2d(in_channels,
out_channels_depthwise,
kernel_size=kernel_size,
stride=stride,
dilation=rate,
groups=in_channels,
padding=padding,
bias=False,
**kwargs)
else:
# Separable convolution for padding 'VALID'
self._conv_depthwise = torch.nn.Conv2d(in_channels,
out_channels_depthwise,
kernel_size=kernel_size,
stride=stride,
dilation=rate,
groups=in_channels,
bias=False,
**kwargs)
self._batch_norm_depthwise = torch.nn.BatchNorm2d(
out_channels_depthwise, **_BATCH_NORM_PARAMS)
self._conv_pointwise = torch.nn.Conv2d(out_channels_depthwise,
out_channels,
kernel_size=1,
stride=1,
bias=False,
**kwargs)
self._batch_norm_pointwise = torch.nn.BatchNorm2d(
out_channels, **_BATCH_NORM_PARAMS)
self._activation_fn = activation_fn
def forward(self, x):
"""
Args:
x: A 4-D tensor with shape [batch, height_in, width_in, channels].
Returns:
A 4-D tensor of size [batch, height_out, width_out, channels] with
the convolution output.
"""
if not self._without_padding:
x = fixed_padding(x, self._kernel_size, self._rate)
x = self._conv_depthwise(x)
x = self._batch_norm_depthwise(x)
if self._activation_fn is not None:
x = self._activation_fn(x)
x = self._conv_pointwise(x)
x = self._batch_norm_pointwise(x)
if self._activation_fn is not None:
x = self._activation_fn(x)
return x
class XceptionModule(torch.nn.Module):
"""An Xception module.
The output of one Xception module is equal to the sum of `residual` and
`shortcut`, where `residual` is the feature computed by three seperable
convolution. The `shortcut` is the feature computed by 1x1 convolution
with or without striding. In some cases, the `shortcut` path could be a
simple identity function or none (i.e, no shortcut).
"""
def __init__(self, in_channels, depth_list, skip_connection_type, stride,
unit_rate_list, rate=1, activation_fn_in_separable_conv=False,
regularize_depthwise=False, use_bounded_activation=False,
use_explicit_padding=True):
"""Constructor.
Args:
in_channels: An integer, the number of input filters.
depth_list: A list of three integers specifying the depth values
of one Xception module.
skip_connection_type: Skip connection type for the residual path.
Only supports 'conv', 'sum', or 'none'.
stride: The block unit's stride. Detemines the amount of
downsampling of the units output compared to its input.
unit_rate_list: A list of three integers, determining the unit
rate for each separable convolution in the Xception module.
rate: An integer, rate for atrous convolution.
activation_fn_in_separable_conv: Includes activation function in
the seperable convolution or not.
regularize_depthwise: Whether or not apply L2-norm regularization
on the depthwise convolution weights.
use_bounded_activation: Whether or not to use bounded activations.
Bounded activations better lend themselves to quantized
inference.
use_explicit_padding: If True, use explicit padding to make the
model fully compatible with the open source version, otherwise
use the nattive Pytorch 'SAME' padding.
Raises:
ValueError: If depth_list and unit_rate_list do not contain three
integers, or if stride != 1 for the third seperable convolution
operation in the residual path, or unsupported skip connection
type.
"""
super(XceptionModule, self).__init__()
if len(depth_list) != 3:
raise ValueError('Expect three elements in `depth_list`.')
if len(unit_rate_list) != 3:
raise ValueError('Expect three elements in `unit_rate_list`.')
if skip_connection_type not in ['conv', 'sum', 'none']:
raise ValueError('Unsupported skip connection type.')
# Activation function
self._input_activation_fn = None
if activation_fn_in_separable_conv:
activation_fn = (torch.nn.ReLU6(inplace=False) if
use_bounded_activation else
torch.nn.ReLU(inplace=False))
else:
if use_bounded_activation:
# When use_bounded_activation is True, we clip the feature
# values and apply relu6 for activation.
activation_fn = lambda x: torch.clamp(x, -_CLIP_CAP, _CLIP_CAP)
self._input_activation_fn = torch.nn.ReLU6(inplace=False)
else:
# Original network design.
activation_fn = None
self._input_activation_fn = torch.nn.ReLU(inplace=False)
self._use_bounded_activation = use_bounded_activation
self._output_activation_fn = None
if use_bounded_activation:
self._output_activation_fn = torch.nn.ReLU6(inplace=True)
# Separable conv block.
layers = []
in_channels_ = in_channels
for i in range(3):
if self._input_activation_fn is not None:
layers += [self._input_activation_fn]
layers += [
SeparableConv2dSame(in_channels_,
depth_list[i],
kernel_size=3,
depth_multiplier=1,
regularize_depthwise=regularize_depthwise,
rate=rate*unit_rate_list[i],
stride=stride if i==2 else 1,
activation_fn=activation_fn,
use_explicit_padding=use_explicit_padding)]
in_channels_ = depth_list[i]
self._separable_conv_block = torch.nn.Sequential(*layers)
# Skip connection
self._skip_connection_type = skip_connection_type
if skip_connection_type == 'conv':
self._conv_skip_connection = torch.nn.Conv2d(in_channels,
depth_list[-1],
kernel_size=1,
stride=stride)
self._batch_norm_shortcut = torch.nn.BatchNorm2d(
depth_list[-1], **_BATCH_NORM_PARAMS)
def forward(self, x):
"""
Args:
x: A 4-D tensor with shape [batch, height, width, channels].
Returns:
The Xception module's output.
"""
residual = self._separable_conv_block(x)
if self._skip_connection_type == 'conv':
shortcut = self._conv_skip_connection(x)
shortcut = self._batch_norm_shortcut(shortcut)
if self._use_bounded_activation:
residual = torch.clamp(residual, -_CLIP_CAP, _CLIP_CAP)
shortcut = torch.clamp(shortcut, -_CLIP_CAP, _CLIP_CAP)
outputs = residual + shortcut
if self._use_bounded_activation:
outputs = self._output_activation_fn(outputs)
elif self._skip_connection_type == 'sum':
if self._use_bounded_activation:
residual = torch.clamp(residual, -_CLIP_CAP, _CLIP_CAP)
x = torch.clamp(x, -_CLIP_CAP, _CLIP_CAP)
outputs = residual + x
if self._use_bounded_activation:
outputs = self._output_activation_fn(outputs)
else:
outputs = residual
return outputs
class StackBlocksDense(torch.nn.Module):
"""Stacks Xception blocks and controls output feature density.
This class allows the user to explicitly control the output stride, which
is the ratio of the input to output spatial resolution. This is useful for
dense prediction tasks such as semantic segmentation or object detection.
Control of the output feature density is implemented by atrous convolution.
"""
def __init__(self, blocks, output_stride=None):
"""Constructor.
Args:
blocks: A list of length equal to the number of Xception blocks.
Each element is an Xception Block object describing the units
in the block.
output_stride: If None, then the output will be computed at the
nominal network stride. If output_stride is not None, it
specifies the requested ratio of input to output spatial
resolution, which needs to be equal to the product of unit
strides from the start up to some level of Xception. For
example, if the Xception employs units with strides 1, 2, 1,
3, 4, 1, then valid values for the output_stride are 1, 2, 6,
24 or None (which is equivalent to output_stride=24).
Raises:
ValueError: If the target output_stride is not valid.
"""
super(StackBlocksDense, self).__init__()
# The current_stride variable keeps track of the effective stride of
# the activations. This allows us to invoke atrous convolution whenever
# applying the next residual unit would result in the activations
# having stride larger than the target output_stride.
current_stride = 1
# The atrous convolution rate parameter.
rate = 1
layers = []
for block in blocks:
for i, unit in enumerate(block.args):
if output_stride is not None and current_stride > output_stride:
raise ValueError('The target output_stride cannot be '
'reached.')
# If we have reached the target output_stride, then we need to
# employ atrous convolution with stride=1 and multiply the
# atrous rate by the current unit's stride for use subsequent
# layers.
if output_stride is not None and current_stride == output_stride:
layers += [block.unit_fn(rate=rate, **dict(unit, stride=1))]
rate *= unit.get('stride', 1)
else:
layers += [block.unit_fn(rate=1, **unit)]
current_stride *= unit.get('stride', 1)
if output_stride is not None and current_stride != output_stride:
raise ValueError('The target ouput_stride cannot be reached.')
self._blocks = torch.nn.Sequential(*layers)
def forward(self, x):
"""
Args:
x: A tensor of shape [batch, height, widht, channels].
Returns:
Output tensor with stride equal to the specified output_stride.
"""
x = self._blocks(x)
return x
class Xception(torch.nn.Module):
"""Generator for Xception models.
This class generates a family of Xception models. See the xception_*()
methods for specific model instantiations, obtained by selecting different
block instantiations that produce Xception of various depths.
"""
def __init__(self, blocks, num_classes=None, global_pool=True,
keep_prob=0.5, output_stride=None, scope=None):
"""Constructor.
Args:
blocks: A list of length equal to the number of Xception blocks.
Each element is an Xception Block object describing the units
in the block.
num_classes: Number of predicted classes for classification tasks.
If 0 or None, we return the features before the logit layer.
global_pool: If True, we perform global average pooling before
computing logits. Set to True for image classification, False
for dense prediction.
keep_prob: Keep probability used in the pre-logits dropout layer.
output_stride: If None, the the output will be computed at the
nominal network stride. If output_stride is not None, it
specifies the requested ratio of input to output spatial
resolution.
scope: Optional variable_scope.
Raises:
ValueError: If the target output_stride is not valid.
"""
super(Xception, self).__init__()
self._scope = scope
layers = []
if output_stride is not None:
if output_stride % 2 != 0:
raise ValueError('The output_stride must be a multiple of 2.')
output_stride /= 2
# Root block function operated on inputs
layers += [Conv2dSame(3, 32, 3, stride=2),
Conv2dSame(32, 64, 3, stride=1)]
# Extract features for entry_flow, middle_flow, and exit_flow
layers += [StackBlocksDense(blocks, output_stride)]
if global_pool:
# Global average pooling
layers += [torch.nn.AdaptiveAvgPool2d(output_size=(1, 1))]
if num_classes:
layers += [torch.nn.Dropout2d(p=keep_prob, inplace=True),
torch.nn.Conv2d(blocks[-1].args[-1]['depth_list'][-1],
num_classes, 1)]
self._layers = torch.nn.Sequential(*layers)
def forward(self, x):
"""
Args:
x: A tensor of shape [batch, height, widht, channels].
Returns:
Output tensor with stride equal to the specified output_stride.
"""
output = self._layers(x)
x1 = self._layers[0](x)
x2 = self._layers[1](x1)
low_level_features = self._layers[2]._blocks[0](x2)
#low_level_features = self._layers[2]._blocks[0](x1)
#print('x1',x1.size())
#print('x2',x2.size())
#print('low_level_features',low_level_features.size())
'''
if output_stride = None:
output.size() torch.Size([2, 2048, 7, 7])
low_level_features.size() torch.Size([2, 128, 56, 56])
elif output_stride = 16:
output.size() torch.Size([2, 2048, 14, 14])
low_level_features.size() torch.Size([2, 128, 56, 56])
'''
return output,low_level_features
@property
def scope(self):
return self._scope
def xception_block(scope,
in_channels,
depth_list,
skip_connection_type,
activation_fn_in_separable_conv,
regularize_depthwise,
num_units,
stride,
unit_rate_list=None):
"""Helper function for creating a Xception block.
Args:
scope: The scope of the block.
in_channels: The number of input filters.
depth_list: The depth of the bottleneck layer for each unit.
skip_connection_type: Skip connection type for the residual path. Only
supports 'conv', 'sum', or 'none'.
activation_fn_in_separable_conv: Includes activation function in the
separable convolution or not.
regularize_depthwise: Whether or not apply L2-norm regularization on
the depthwise convolution weights.
num_units: The number of units in the block.
stride: The stride of the block, implemented as a stride in the last
unit. All other units have stride=1.
unit_rate_list: A list of three integers, determining the unit rate in
the corresponding xception block.
Returns:
An xception block.
"""
if unit_rate_list is None:
unit_rate_list = _DEFAULT_MULTI_GRID
return Block(scope, XceptionModule, [{
'in_channels': in_channels,
'depth_list': depth_list,
'skip_connection_type': skip_connection_type,
'activation_fn_in_separable_conv': activation_fn_in_separable_conv,
'regularize_depthwise': regularize_depthwise,
'stride': stride,
'unit_rate_list': unit_rate_list,
}] * num_units)
def Xception41(num_classes=None,
global_pool=True,
keep_prob=0.5,
output_stride=None,
regularize_depthwise=False,
multi_grid=None,
scope='xception_41'):
"""Xception-41 model."""
blocks = [
xception_block('entry_flow/block1',
in_channels=64,
depth_list=[128, 128, 128],
skip_connection_type='conv',
activation_fn_in_separable_conv=False,
regularize_depthwise=regularize_depthwise,
num_units=1,
stride=2),
xception_block('entry_flow/block2',
in_channels=128,
depth_list=[256, 256, 256],
skip_connection_type='conv',
activation_fn_in_separable_conv=False,
regularize_depthwise=regularize_depthwise,
num_units=1,
stride=2),
xception_block('entry_flow/block3',
in_channels=256,
depth_list=[728, 728, 728],
skip_connection_type='conv',
activation_fn_in_separable_conv=False,
regularize_depthwise=regularize_depthwise,
num_units=1,
stride=2),
xception_block('middle_flow/block1',
in_channels=728,
depth_list=[728, 728, 728],
skip_connection_type='sum',
activation_fn_in_separable_conv=False,
regularize_depthwise=regularize_depthwise,
num_units=8,
stride=1),
xception_block('exit_flow/block1',
in_channels=728,
depth_list=[728, 1024, 1024],
skip_connection_type='conv',
activation_fn_in_separable_conv=False,
regularize_depthwise=regularize_depthwise,
num_units=1,
stride=2),
xception_block('exit_flow/block2',
in_channels=1024,
depth_list=[1536, 1536, 2048],
skip_connection_type='none',
activation_fn_in_separable_conv=True,
regularize_depthwise=regularize_depthwise,
num_units=1,
stride=1,
unit_rate_list=multi_grid),
]
return Xception(blocks=blocks, num_classes=num_classes,
global_pool=global_pool, keep_prob=keep_prob,
output_stride=output_stride, scope=scope)
def xception_41(num_classes=None,
global_pool=True,
keep_prob=0.5,
output_stride=None,
regularize_depthwise=False,
multi_grid=None,
scope='xception_41',
pretrained=True,
checkpoint_path='./pretrained/xception_41.pth'):
"""Xception-41 model."""
xception = Xception41(num_classes=num_classes, global_pool=global_pool,
keep_prob=keep_prob, output_stride=output_stride,
scope=scope)
if pretrained:
_load_state_dict(xception, num_classes, checkpoint_path)
return xception
def Xception65(num_classes=None,
global_pool=True,
keep_prob=0.5,
output_stride=None,
regularize_depthwise=False,
multi_grid=None,
scope='xception_65'):
"""Xception-65 model."""
blocks = [
xception_block('entry_flow/block1',
in_channels=64,
depth_list=[128, 128, 128],
skip_connection_type='conv',
activation_fn_in_separable_conv=False,
regularize_depthwise=regularize_depthwise,
num_units=1,
stride=2),
xception_block('entry_flow/block2',
in_channels=128,
depth_list=[256, 256, 256],
skip_connection_type='conv',
activation_fn_in_separable_conv=False,
regularize_depthwise=regularize_depthwise,
num_units=1,
stride=2),
xception_block('entry_flow/block3',
in_channels=256,
depth_list=[728, 728, 728],
skip_connection_type='conv',
activation_fn_in_separable_conv=False,
regularize_depthwise=regularize_depthwise,
num_units=1,
stride=2),
xception_block('middle_flow/block1',
in_channels=728,
depth_list=[728, 728, 728],
skip_connection_type='sum',
activation_fn_in_separable_conv=False,
regularize_depthwise=regularize_depthwise,
num_units=16,
stride=1),
xception_block('exit_flow/block1',
in_channels=728,
depth_list=[728, 1024, 1024],
skip_connection_type='conv',
activation_fn_in_separable_conv=False,
regularize_depthwise=regularize_depthwise,
num_units=1,
stride=2),
xception_block('exit_flow/block2',
in_channels=1024,
depth_list=[1536, 1536, 2048],
skip_connection_type='none',
activation_fn_in_separable_conv=True,
regularize_depthwise=regularize_depthwise,
num_units=1,
stride=1,
unit_rate_list=multi_grid),
]
return Xception(blocks=blocks, num_classes=num_classes,
global_pool=global_pool, keep_prob=keep_prob,
output_stride=output_stride, scope=scope)
def xception_65(num_classes=None,
global_pool=False,
keep_prob=0.5,
output_stride=None,
regularize_depthwise=False,
multi_grid=None,
scope='xception_65',
pretrained=True,
checkpoint_path='./pretrained/xception_65.pth'):
"""Xception-65 model."""
xception = Xception65(num_classes=num_classes, global_pool=global_pool,
keep_prob=keep_prob, output_stride=output_stride,
scope=scope)
if pretrained:
_load_state_dict(xception, num_classes, checkpoint_path='./pretrained/xception_65.pth')
return xception
def Xception71(num_classes=None,
global_pool=True,
keep_prob=0.5,
output_stride=None,
regularize_depthwise=False,
multi_grid=None,
scope='xception_71'):
"""Xception-71 model."""
blocks = [
xception_block('entry_flow/block1',
in_channels=64,
depth_list=[128, 128, 128],
skip_connection_type='conv',
activation_fn_in_separable_conv=False,
regularize_depthwise=regularize_depthwise,
num_units=1,
stride=2),
xception_block('entry_flow/block2',
in_channels=128,
depth_list=[256, 256, 256],
skip_connection_type='conv',
activation_fn_in_separable_conv=False,
regularize_depthwise=regularize_depthwise,
num_units=1,
stride=1),
xception_block('entry_flow/block3',
in_channels=256,
depth_list=[256, 256, 256],
skip_connection_type='conv',
activation_fn_in_separable_conv=False,
regularize_depthwise=regularize_depthwise,
num_units=1,
stride=2),
xception_block('entry_flow/block4',
in_channels=256,
depth_list=[728, 728, 728],
skip_connection_type='conv',
activation_fn_in_separable_conv=False,
regularize_depthwise=regularize_depthwise,
num_units=1,
stride=1),
xception_block('entry_flow/block5',
in_channels=728,
depth_list=[728, 728, 728],
skip_connection_type='conv',
activation_fn_in_separable_conv=False,
regularize_depthwise=regularize_depthwise,
num_units=1,
stride=2),
xception_block('middle_flow/block1',
in_channels=728,
depth_list=[728, 728, 728],
skip_connection_type='sum',
activation_fn_in_separable_conv=False,
regularize_depthwise=regularize_depthwise,
num_units=16,
stride=1),
xception_block('exit_flow/block1',
in_channels=728,
depth_list=[728, 1024, 1024],
skip_connection_type='conv',
activation_fn_in_separable_conv=False,
regularize_depthwise=regularize_depthwise,
num_units=1,
stride=2),
xception_block('exit_flow/block2',
in_channels=1024,
depth_list=[1536, 1536, 2048],
skip_connection_type='none',
activation_fn_in_separable_conv=True,
regularize_depthwise=regularize_depthwise,
num_units=1,
stride=1,
unit_rate_list=multi_grid),
]
return Xception(blocks=blocks, num_classes=num_classes,
global_pool=global_pool, keep_prob=keep_prob,
output_stride=output_stride, scope=scope)
def xception_71(num_classes=None,
global_pool=True,
keep_prob=0.5,
output_stride=None,
regularize_depthwise=False,
multi_grid=None,
scope='xception_71',
pretrained=True,
checkpoint_path='./pretrained/xception_71.pth'):
"""Xception-71 model."""
xception = Xception71(num_classes=num_classes, global_pool=global_pool,
keep_prob=keep_prob, output_stride=output_stride,
scope=scope)
if pretrained:
_load_state_dict(xception, num_classes, checkpoint_path)
return xception
def _load_state_dict(model, num_classes, checkpoint_path):
"""Load pretrained weights."""
if os.path.exists(checkpoint_path):
state_dict = torch.load(checkpoint_path)
if num_classes is None or num_classes != 1001:
state_dict.pop('_layers.5.weight')
state_dict.pop('_layers.5.bias')
model.load_state_dict(state_dict, strict=False)
print('Load pretrained weights successfully.')
else:
raise ValueError('`checkpoint_path` does not exist.')
'''
-> The Atrous Spatial Pyramid Pooling
'''
def assp_branch(in_channels, out_channles, kernel_size, dilation):
padding = 0 if kernel_size == 1 else dilation
return nn.Sequential(
nn.Conv2d(in_channels, out_channles, kernel_size, padding=padding, dilation=dilation, bias=False),
nn.BatchNorm2d(out_channles),
nn.ReLU(inplace=True))
class ASSP(nn.Module):
def __init__(self, in_channels, output_stride):
super(ASSP, self).__init__()
assert output_stride in [8, 16], 'Only output strides of 8 or 16 are suported'
if output_stride == 16: dilations = [1, 6, 12, 18]
elif output_stride == 8: dilations = [1, 12, 24, 36]
self.aspp1 = assp_branch(in_channels, 256, 1, dilation=dilations[0])
self.aspp2 = assp_branch(in_channels, 256, 3, dilation=dilations[1])
self.aspp3 = assp_branch(in_channels, 256, 3, dilation=dilations[2])
self.aspp4 = assp_branch(in_channels, 256, 3, dilation=dilations[3])
self.avg_pool = nn.Sequential(
nn.AdaptiveAvgPool2d((1, 1)),
nn.Conv2d(in_channels, 256, 1, bias=False),
nn.BatchNorm2d(256),
nn.ReLU(inplace=True))
self.conv1 = nn.Conv2d(256*5, 256, 1, bias=False)
self.bn1 = nn.BatchNorm2d(256)
self.relu = nn.ReLU(inplace=True)
self.dropout = nn.Dropout(0.5)
initialize_weights(self)
def forward(self, x):
x1 = self.aspp1(x)
x2 = self.aspp2(x)
x3 = self.aspp3(x)
x4 = self.aspp4(x)
x5 = F.interpolate(self.avg_pool(x), size=(x.size(2), x.size(3)), mode='bilinear', align_corners=True)
x = self.conv1(torch.cat((x1, x2, x3, x4, x5), dim=1))
x = self.bn1(x)
x = self.dropout(self.relu(x))
return x
'''
-> Decoder
'''
class Decoder(nn.Module):
def __init__(self, low_level_channels, num_classes):
super(Decoder, self).__init__()
self.conv1 = nn.Conv2d(low_level_channels, 48, 1, bias=False)
self.bn1 = nn.BatchNorm2d(48)
self.relu = nn.ReLU(inplace=True)
# Table 2, best performance with two 3x3 convs
self.output = nn.Sequential(
nn.Conv2d(48+256, 256, 3, stride=1, padding=1, bias=False),
nn.BatchNorm2d(256),
nn.ReLU(inplace=True),
nn.Conv2d(256, 256, 3, stride=1, padding=1, bias=False),
nn.BatchNorm2d(256),
nn.ReLU(inplace=True),
nn.Dropout(0.1),
nn.Conv2d(256, num_classes, 1, stride=1),
)
initialize_weights(self)
def forward(self, x, low_level_features):
low_level_features = self.conv1(low_level_features)
low_level_features = self.relu(self.bn1(low_level_features))
H, W = low_level_features.size(2), low_level_features.size(3)
x = F.interpolate(x, size=(H, W), mode='bilinear', align_corners=True)
x = self.output(torch.cat((low_level_features, x), dim=1))
return x
'''
-> Deeplab V3 +
'''
class DeepLab(BaseModel):
def __init__(self, num_classes, in_channels=3, backbone='xception', pretrained=True,
output_stride=16, freeze_bn=False,freeze_backbone=False, **_):
super(DeepLab, self).__init__()
assert ('xception' or 'resnet' in backbone)
if 'resnet' in backbone:
self.backbone = ResNet(in_channels=in_channels, output_stride=output_stride, pretrained=pretrained)
low_level_channels = 256
else:
self.backbone = xception_65(output_stride=output_stride, pretrained=pretrained,global_pool=False,checkpoint_path='./pretrained/xception_65.pth')
low_level_channels = 128
self.ASSP = ASSP(in_channels=2048, output_stride=output_stride)
self.decoder = Decoder(low_level_channels, num_classes)
if freeze_bn: self.freeze_bn()
if freeze_backbone:
set_trainable([self.backbone], False)
def forward(self, x):
H, W = x.size(2), x.size(3)
x, low_level_features = self.backbone(x)
x = self.ASSP(x)
x = self.decoder(x, low_level_features)
x = F.interpolate(x, size=(H, W), mode='bilinear', align_corners=True)
return x
# Two functions to yield the parameters of the backbone
# & Decoder / ASSP to use differentiable learning rates
# FIXME: in xception, we use the parameters from xception and not aligned xception
# better to have higher lr for this backbone
def get_backbone_params(self):
return self.backbone.parameters()
def get_decoder_params(self):
return chain(self.ASSP.parameters(), self.decoder.parameters())
def freeze_bn(self):
for module in self.modules():
if isinstance(module, nn.BatchNorm2d): module.eval()
| 42.701336 | 157 | 0.542714 |
from base import BaseModel
import torch
import math
import torch.nn as nn
import torch.nn.functional as F
from torchvision import models
import torch.utils.model_zoo as model_zoo
from utils.helpers import initialize_weights,set_trainable
from itertools import chain
class ResNet(nn.Module):
def __init__(self, in_channels=3, output_stride=16, backbone='resnet101', pretrained=True):
super(ResNet, self).__init__()
model = getattr(models, backbone)(pretrained)
if not pretrained or in_channels != 3:
self.layer0 = nn.Sequential(
nn.Conv2d(in_channels, 64, 7, stride=2, padding=3, bias=False),
nn.BatchNorm2d(64),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
)
initialize_weights(self.layer0)
else:
self.layer0 = nn.Sequential(*list(model.children())[:4])
self.layer1 = model.layer1
self.layer2 = model.layer2
self.layer3 = model.layer3
self.layer4 = model.layer4
if output_stride == 16: s3, s4, d3, d4 = (2, 1, 1, 2)
elif output_stride == 8: s3, s4, d3, d4 = (1, 1, 2, 4)
if output_stride == 8:
for n, m in self.layer3.named_modules():
if 'conv1' in n and (backbone == 'resnet34' or backbone == 'resnet18'):
m.dilation, m.padding, m.stride = (d3,d3), (d3,d3), (s3,s3)
elif 'conv2' in n:
m.dilation, m.padding, m.stride = (d3,d3), (d3,d3), (s3,s3)
elif 'downsample.0' in n:
m.stride = (s3, s3)
for n, m in self.layer4.named_modules():
if 'conv1' in n and (backbone == 'resnet34' or backbone == 'resnet18'):
m.dilation, m.padding, m.stride = (d4,d4), (d4,d4), (s4,s4)
elif 'conv2' in n:
m.dilation, m.padding, m.stride = (d4,d4), (d4,d4), (s4,s4)
elif 'downsample.0' in n:
m.stride = (s4, s4)
def forward(self, x):
x = self.layer0(x)
x = self.layer1(x)
low_level_features = x
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
return x, low_level_features
import collections
import os
import torch
_DEFAULT_MULTI_GRID = [1, 1, 1]
_CLIP_CAP = 6
_BATCH_NORM_PARAMS = {
'eps': 0.001,
'momentum': 0.9997,
'affine': True,
}
class Block(collections.namedtuple('Block', ['scope', 'unit_fn', 'args'])):
def fixed_padding(inputs, kernel_size, rate=1):
kernel_size_effective = kernel_size + (kernel_size - 1) * (rate - 1)
pad_total = kernel_size_effective - 1
pad_beg = pad_total // 2
pad_end = pad_total - pad_beg
padded_inputs = torch.nn.functional.pad(
inputs, pad=(pad_beg, pad_end, pad_beg, pad_end))
return padded_inputs
class Conv2dSame(torch.nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride, rate=1):
super(Conv2dSame, self).__init__()
self._kernel_size = kernel_size
self._rate = rate
self._without_padding = stride == 1
if self._without_padding:
padding = (kernel_size - 1) * rate // 2
self._conv = torch.nn.Conv2d(in_channels,
out_channels,
kernel_size=kernel_size,
stride=1,
dilation=rate,
padding=padding,
bias=False)
else:
self._conv = torch.nn.Conv2d(in_channels,
out_channels,
kernel_size=kernel_size,
stride=stride,
dilation=rate,
bias=False)
self._batch_norm = torch.nn.BatchNorm2d(out_channels,
**_BATCH_NORM_PARAMS)
self._relu = torch.nn.ReLU(inplace=True)
def forward(self, x):
if not self._without_padding:
x = fixed_padding(x, self._kernel_size, self._rate)
x = self._conv(x)
x = self._batch_norm(x)
x = self._relu(x)
return x
class SeparableConv2dSame(torch.nn.Module):
def __init__(self, in_channels, out_channels, kernel_size,
depth_multiplier, stride, rate, use_explicit_padding=True,
activation_fn=None, regularize_depthwise=False, **kwargs):
super(SeparableConv2dSame, self).__init__()
self._kernel_size = kernel_size
self._rate = rate
self._without_padding = stride == 1 or not use_explicit_padding
out_channels_depthwise = in_channels * depth_multiplier
if self._without_padding:
padding = (kernel_size - 1) * rate // 2
self._conv_depthwise = torch.nn.Conv2d(in_channels,
out_channels_depthwise,
kernel_size=kernel_size,
stride=stride,
dilation=rate,
groups=in_channels,
padding=padding,
bias=False,
**kwargs)
else:
self._conv_depthwise = torch.nn.Conv2d(in_channels,
out_channels_depthwise,
kernel_size=kernel_size,
stride=stride,
dilation=rate,
groups=in_channels,
bias=False,
**kwargs)
self._batch_norm_depthwise = torch.nn.BatchNorm2d(
out_channels_depthwise, **_BATCH_NORM_PARAMS)
self._conv_pointwise = torch.nn.Conv2d(out_channels_depthwise,
out_channels,
kernel_size=1,
stride=1,
bias=False,
**kwargs)
self._batch_norm_pointwise = torch.nn.BatchNorm2d(
out_channels, **_BATCH_NORM_PARAMS)
self._activation_fn = activation_fn
def forward(self, x):
if not self._without_padding:
x = fixed_padding(x, self._kernel_size, self._rate)
x = self._conv_depthwise(x)
x = self._batch_norm_depthwise(x)
if self._activation_fn is not None:
x = self._activation_fn(x)
x = self._conv_pointwise(x)
x = self._batch_norm_pointwise(x)
if self._activation_fn is not None:
x = self._activation_fn(x)
return x
class XceptionModule(torch.nn.Module):
def __init__(self, in_channels, depth_list, skip_connection_type, stride,
unit_rate_list, rate=1, activation_fn_in_separable_conv=False,
regularize_depthwise=False, use_bounded_activation=False,
use_explicit_padding=True):
super(XceptionModule, self).__init__()
if len(depth_list) != 3:
raise ValueError('Expect three elements in `depth_list`.')
if len(unit_rate_list) != 3:
raise ValueError('Expect three elements in `unit_rate_list`.')
if skip_connection_type not in ['conv', 'sum', 'none']:
raise ValueError('Unsupported skip connection type.')
self._input_activation_fn = None
if activation_fn_in_separable_conv:
activation_fn = (torch.nn.ReLU6(inplace=False) if
use_bounded_activation else
torch.nn.ReLU(inplace=False))
else:
if use_bounded_activation:
activation_fn = lambda x: torch.clamp(x, -_CLIP_CAP, _CLIP_CAP)
self._input_activation_fn = torch.nn.ReLU6(inplace=False)
else:
activation_fn = None
self._input_activation_fn = torch.nn.ReLU(inplace=False)
self._use_bounded_activation = use_bounded_activation
self._output_activation_fn = None
if use_bounded_activation:
self._output_activation_fn = torch.nn.ReLU6(inplace=True)
layers = []
in_channels_ = in_channels
for i in range(3):
if self._input_activation_fn is not None:
layers += [self._input_activation_fn]
layers += [
SeparableConv2dSame(in_channels_,
depth_list[i],
kernel_size=3,
depth_multiplier=1,
regularize_depthwise=regularize_depthwise,
rate=rate*unit_rate_list[i],
stride=stride if i==2 else 1,
activation_fn=activation_fn,
use_explicit_padding=use_explicit_padding)]
in_channels_ = depth_list[i]
self._separable_conv_block = torch.nn.Sequential(*layers)
self._skip_connection_type = skip_connection_type
if skip_connection_type == 'conv':
self._conv_skip_connection = torch.nn.Conv2d(in_channels,
depth_list[-1],
kernel_size=1,
stride=stride)
self._batch_norm_shortcut = torch.nn.BatchNorm2d(
depth_list[-1], **_BATCH_NORM_PARAMS)
def forward(self, x):
residual = self._separable_conv_block(x)
if self._skip_connection_type == 'conv':
shortcut = self._conv_skip_connection(x)
shortcut = self._batch_norm_shortcut(shortcut)
if self._use_bounded_activation:
residual = torch.clamp(residual, -_CLIP_CAP, _CLIP_CAP)
shortcut = torch.clamp(shortcut, -_CLIP_CAP, _CLIP_CAP)
outputs = residual + shortcut
if self._use_bounded_activation:
outputs = self._output_activation_fn(outputs)
elif self._skip_connection_type == 'sum':
if self._use_bounded_activation:
residual = torch.clamp(residual, -_CLIP_CAP, _CLIP_CAP)
x = torch.clamp(x, -_CLIP_CAP, _CLIP_CAP)
outputs = residual + x
if self._use_bounded_activation:
outputs = self._output_activation_fn(outputs)
else:
outputs = residual
return outputs
class StackBlocksDense(torch.nn.Module):
def __init__(self, blocks, output_stride=None):
super(StackBlocksDense, self).__init__()
current_stride = 1
rate = 1
layers = []
for block in blocks:
for i, unit in enumerate(block.args):
if output_stride is not None and current_stride > output_stride:
raise ValueError('The target output_stride cannot be '
'reached.')
# layers.
if output_stride is not None and current_stride == output_stride:
layers += [block.unit_fn(rate=rate, **dict(unit, stride=1))]
rate *= unit.get('stride', 1)
else:
layers += [block.unit_fn(rate=1, **unit)]
current_stride *= unit.get('stride', 1)
if output_stride is not None and current_stride != output_stride:
raise ValueError('The target ouput_stride cannot be reached.')
self._blocks = torch.nn.Sequential(*layers)
def forward(self, x):
x = self._blocks(x)
return x
class Xception(torch.nn.Module):
def __init__(self, blocks, num_classes=None, global_pool=True,
keep_prob=0.5, output_stride=None, scope=None):
super(Xception, self).__init__()
self._scope = scope
layers = []
if output_stride is not None:
if output_stride % 2 != 0:
raise ValueError('The output_stride must be a multiple of 2.')
output_stride /= 2
# Root block function operated on inputs
layers += [Conv2dSame(3, 32, 3, stride=2),
Conv2dSame(32, 64, 3, stride=1)]
# Extract features for entry_flow, middle_flow, and exit_flow
layers += [StackBlocksDense(blocks, output_stride)]
if global_pool:
# Global average pooling
layers += [torch.nn.AdaptiveAvgPool2d(output_size=(1, 1))]
if num_classes:
layers += [torch.nn.Dropout2d(p=keep_prob, inplace=True),
torch.nn.Conv2d(blocks[-1].args[-1]['depth_list'][-1],
num_classes, 1)]
self._layers = torch.nn.Sequential(*layers)
def forward(self, x):
output = self._layers(x)
x1 = self._layers[0](x)
x2 = self._layers[1](x1)
low_level_features = self._layers[2]._blocks[0](x2)
#low_level_features = self._layers[2]._blocks[0](x1)
#print('x1',x1.size())
#print('x2',x2.size())
#print('low_level_features',low_level_features.size())
return output,low_level_features
@property
def scope(self):
return self._scope
def xception_block(scope,
in_channels,
depth_list,
skip_connection_type,
activation_fn_in_separable_conv,
regularize_depthwise,
num_units,
stride,
unit_rate_list=None):
if unit_rate_list is None:
unit_rate_list = _DEFAULT_MULTI_GRID
return Block(scope, XceptionModule, [{
'in_channels': in_channels,
'depth_list': depth_list,
'skip_connection_type': skip_connection_type,
'activation_fn_in_separable_conv': activation_fn_in_separable_conv,
'regularize_depthwise': regularize_depthwise,
'stride': stride,
'unit_rate_list': unit_rate_list,
}] * num_units)
def Xception41(num_classes=None,
global_pool=True,
keep_prob=0.5,
output_stride=None,
regularize_depthwise=False,
multi_grid=None,
scope='xception_41'):
blocks = [
xception_block('entry_flow/block1',
in_channels=64,
depth_list=[128, 128, 128],
skip_connection_type='conv',
activation_fn_in_separable_conv=False,
regularize_depthwise=regularize_depthwise,
num_units=1,
stride=2),
xception_block('entry_flow/block2',
in_channels=128,
depth_list=[256, 256, 256],
skip_connection_type='conv',
activation_fn_in_separable_conv=False,
regularize_depthwise=regularize_depthwise,
num_units=1,
stride=2),
xception_block('entry_flow/block3',
in_channels=256,
depth_list=[728, 728, 728],
skip_connection_type='conv',
activation_fn_in_separable_conv=False,
regularize_depthwise=regularize_depthwise,
num_units=1,
stride=2),
xception_block('middle_flow/block1',
in_channels=728,
depth_list=[728, 728, 728],
skip_connection_type='sum',
activation_fn_in_separable_conv=False,
regularize_depthwise=regularize_depthwise,
num_units=8,
stride=1),
xception_block('exit_flow/block1',
in_channels=728,
depth_list=[728, 1024, 1024],
skip_connection_type='conv',
activation_fn_in_separable_conv=False,
regularize_depthwise=regularize_depthwise,
num_units=1,
stride=2),
xception_block('exit_flow/block2',
in_channels=1024,
depth_list=[1536, 1536, 2048],
skip_connection_type='none',
activation_fn_in_separable_conv=True,
regularize_depthwise=regularize_depthwise,
num_units=1,
stride=1,
unit_rate_list=multi_grid),
]
return Xception(blocks=blocks, num_classes=num_classes,
global_pool=global_pool, keep_prob=keep_prob,
output_stride=output_stride, scope=scope)
def xception_41(num_classes=None,
global_pool=True,
keep_prob=0.5,
output_stride=None,
regularize_depthwise=False,
multi_grid=None,
scope='xception_41',
pretrained=True,
checkpoint_path='./pretrained/xception_41.pth'):
xception = Xception41(num_classes=num_classes, global_pool=global_pool,
keep_prob=keep_prob, output_stride=output_stride,
scope=scope)
if pretrained:
_load_state_dict(xception, num_classes, checkpoint_path)
return xception
def Xception65(num_classes=None,
global_pool=True,
keep_prob=0.5,
output_stride=None,
regularize_depthwise=False,
multi_grid=None,
scope='xception_65'):
blocks = [
xception_block('entry_flow/block1',
in_channels=64,
depth_list=[128, 128, 128],
skip_connection_type='conv',
activation_fn_in_separable_conv=False,
regularize_depthwise=regularize_depthwise,
num_units=1,
stride=2),
xception_block('entry_flow/block2',
in_channels=128,
depth_list=[256, 256, 256],
skip_connection_type='conv',
activation_fn_in_separable_conv=False,
regularize_depthwise=regularize_depthwise,
num_units=1,
stride=2),
xception_block('entry_flow/block3',
in_channels=256,
depth_list=[728, 728, 728],
skip_connection_type='conv',
activation_fn_in_separable_conv=False,
regularize_depthwise=regularize_depthwise,
num_units=1,
stride=2),
xception_block('middle_flow/block1',
in_channels=728,
depth_list=[728, 728, 728],
skip_connection_type='sum',
activation_fn_in_separable_conv=False,
regularize_depthwise=regularize_depthwise,
num_units=16,
stride=1),
xception_block('exit_flow/block1',
in_channels=728,
depth_list=[728, 1024, 1024],
skip_connection_type='conv',
activation_fn_in_separable_conv=False,
regularize_depthwise=regularize_depthwise,
num_units=1,
stride=2),
xception_block('exit_flow/block2',
in_channels=1024,
depth_list=[1536, 1536, 2048],
skip_connection_type='none',
activation_fn_in_separable_conv=True,
regularize_depthwise=regularize_depthwise,
num_units=1,
stride=1,
unit_rate_list=multi_grid),
]
return Xception(blocks=blocks, num_classes=num_classes,
global_pool=global_pool, keep_prob=keep_prob,
output_stride=output_stride, scope=scope)
def xception_65(num_classes=None,
global_pool=False,
keep_prob=0.5,
output_stride=None,
regularize_depthwise=False,
multi_grid=None,
scope='xception_65',
pretrained=True,
checkpoint_path='./pretrained/xception_65.pth'):
xception = Xception65(num_classes=num_classes, global_pool=global_pool,
keep_prob=keep_prob, output_stride=output_stride,
scope=scope)
if pretrained:
_load_state_dict(xception, num_classes, checkpoint_path='./pretrained/xception_65.pth')
return xception
def Xception71(num_classes=None,
global_pool=True,
keep_prob=0.5,
output_stride=None,
regularize_depthwise=False,
multi_grid=None,
scope='xception_71'):
blocks = [
xception_block('entry_flow/block1',
in_channels=64,
depth_list=[128, 128, 128],
skip_connection_type='conv',
activation_fn_in_separable_conv=False,
regularize_depthwise=regularize_depthwise,
num_units=1,
stride=2),
xception_block('entry_flow/block2',
in_channels=128,
depth_list=[256, 256, 256],
skip_connection_type='conv',
activation_fn_in_separable_conv=False,
regularize_depthwise=regularize_depthwise,
num_units=1,
stride=1),
xception_block('entry_flow/block3',
in_channels=256,
depth_list=[256, 256, 256],
skip_connection_type='conv',
activation_fn_in_separable_conv=False,
regularize_depthwise=regularize_depthwise,
num_units=1,
stride=2),
xception_block('entry_flow/block4',
in_channels=256,
depth_list=[728, 728, 728],
skip_connection_type='conv',
activation_fn_in_separable_conv=False,
regularize_depthwise=regularize_depthwise,
num_units=1,
stride=1),
xception_block('entry_flow/block5',
in_channels=728,
depth_list=[728, 728, 728],
skip_connection_type='conv',
activation_fn_in_separable_conv=False,
regularize_depthwise=regularize_depthwise,
num_units=1,
stride=2),
xception_block('middle_flow/block1',
in_channels=728,
depth_list=[728, 728, 728],
skip_connection_type='sum',
activation_fn_in_separable_conv=False,
regularize_depthwise=regularize_depthwise,
num_units=16,
stride=1),
xception_block('exit_flow/block1',
in_channels=728,
depth_list=[728, 1024, 1024],
skip_connection_type='conv',
activation_fn_in_separable_conv=False,
regularize_depthwise=regularize_depthwise,
num_units=1,
stride=2),
xception_block('exit_flow/block2',
in_channels=1024,
depth_list=[1536, 1536, 2048],
skip_connection_type='none',
activation_fn_in_separable_conv=True,
regularize_depthwise=regularize_depthwise,
num_units=1,
stride=1,
unit_rate_list=multi_grid),
]
return Xception(blocks=blocks, num_classes=num_classes,
global_pool=global_pool, keep_prob=keep_prob,
output_stride=output_stride, scope=scope)
def xception_71(num_classes=None,
global_pool=True,
keep_prob=0.5,
output_stride=None,
regularize_depthwise=False,
multi_grid=None,
scope='xception_71',
pretrained=True,
checkpoint_path='./pretrained/xception_71.pth'):
xception = Xception71(num_classes=num_classes, global_pool=global_pool,
keep_prob=keep_prob, output_stride=output_stride,
scope=scope)
if pretrained:
_load_state_dict(xception, num_classes, checkpoint_path)
return xception
def _load_state_dict(model, num_classes, checkpoint_path):
if os.path.exists(checkpoint_path):
state_dict = torch.load(checkpoint_path)
if num_classes is None or num_classes != 1001:
state_dict.pop('_layers.5.weight')
state_dict.pop('_layers.5.bias')
model.load_state_dict(state_dict, strict=False)
print('Load pretrained weights successfully.')
else:
raise ValueError('`checkpoint_path` does not exist.')
def assp_branch(in_channels, out_channles, kernel_size, dilation):
padding = 0 if kernel_size == 1 else dilation
return nn.Sequential(
nn.Conv2d(in_channels, out_channles, kernel_size, padding=padding, dilation=dilation, bias=False),
nn.BatchNorm2d(out_channles),
nn.ReLU(inplace=True))
class ASSP(nn.Module):
def __init__(self, in_channels, output_stride):
super(ASSP, self).__init__()
assert output_stride in [8, 16], 'Only output strides of 8 or 16 are suported'
if output_stride == 16: dilations = [1, 6, 12, 18]
elif output_stride == 8: dilations = [1, 12, 24, 36]
self.aspp1 = assp_branch(in_channels, 256, 1, dilation=dilations[0])
self.aspp2 = assp_branch(in_channels, 256, 3, dilation=dilations[1])
self.aspp3 = assp_branch(in_channels, 256, 3, dilation=dilations[2])
self.aspp4 = assp_branch(in_channels, 256, 3, dilation=dilations[3])
self.avg_pool = nn.Sequential(
nn.AdaptiveAvgPool2d((1, 1)),
nn.Conv2d(in_channels, 256, 1, bias=False),
nn.BatchNorm2d(256),
nn.ReLU(inplace=True))
self.conv1 = nn.Conv2d(256*5, 256, 1, bias=False)
self.bn1 = nn.BatchNorm2d(256)
self.relu = nn.ReLU(inplace=True)
self.dropout = nn.Dropout(0.5)
initialize_weights(self)
def forward(self, x):
x1 = self.aspp1(x)
x2 = self.aspp2(x)
x3 = self.aspp3(x)
x4 = self.aspp4(x)
x5 = F.interpolate(self.avg_pool(x), size=(x.size(2), x.size(3)), mode='bilinear', align_corners=True)
x = self.conv1(torch.cat((x1, x2, x3, x4, x5), dim=1))
x = self.bn1(x)
x = self.dropout(self.relu(x))
return x
class Decoder(nn.Module):
def __init__(self, low_level_channels, num_classes):
super(Decoder, self).__init__()
self.conv1 = nn.Conv2d(low_level_channels, 48, 1, bias=False)
self.bn1 = nn.BatchNorm2d(48)
self.relu = nn.ReLU(inplace=True)
# Table 2, best performance with two 3x3 convs
self.output = nn.Sequential(
nn.Conv2d(48+256, 256, 3, stride=1, padding=1, bias=False),
nn.BatchNorm2d(256),
nn.ReLU(inplace=True),
nn.Conv2d(256, 256, 3, stride=1, padding=1, bias=False),
nn.BatchNorm2d(256),
nn.ReLU(inplace=True),
nn.Dropout(0.1),
nn.Conv2d(256, num_classes, 1, stride=1),
)
initialize_weights(self)
def forward(self, x, low_level_features):
low_level_features = self.conv1(low_level_features)
low_level_features = self.relu(self.bn1(low_level_features))
H, W = low_level_features.size(2), low_level_features.size(3)
x = F.interpolate(x, size=(H, W), mode='bilinear', align_corners=True)
x = self.output(torch.cat((low_level_features, x), dim=1))
return x
class DeepLab(BaseModel):
def __init__(self, num_classes, in_channels=3, backbone='xception', pretrained=True,
output_stride=16, freeze_bn=False,freeze_backbone=False, **_):
super(DeepLab, self).__init__()
assert ('xception' or 'resnet' in backbone)
if 'resnet' in backbone:
self.backbone = ResNet(in_channels=in_channels, output_stride=output_stride, pretrained=pretrained)
low_level_channels = 256
else:
self.backbone = xception_65(output_stride=output_stride, pretrained=pretrained,global_pool=False,checkpoint_path='./pretrained/xception_65.pth')
low_level_channels = 128
self.ASSP = ASSP(in_channels=2048, output_stride=output_stride)
self.decoder = Decoder(low_level_channels, num_classes)
if freeze_bn: self.freeze_bn()
if freeze_backbone:
set_trainable([self.backbone], False)
def forward(self, x):
H, W = x.size(2), x.size(3)
x, low_level_features = self.backbone(x)
x = self.ASSP(x)
x = self.decoder(x, low_level_features)
x = F.interpolate(x, size=(H, W), mode='bilinear', align_corners=True)
return x
# Two functions to yield the parameters of the backbone
# & Decoder / ASSP to use differentiable learning rates
# FIXME: in xception, we use the parameters from xception and not aligned xception
# better to have higher lr for this backbone
def get_backbone_params(self):
return self.backbone.parameters()
def get_decoder_params(self):
return chain(self.ASSP.parameters(), self.decoder.parameters())
def freeze_bn(self):
for module in self.modules():
if isinstance(module, nn.BatchNorm2d): module.eval()
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.