input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
<reponame>dmccloskey/listDict
# system
from copy import copy
# Calculate utilities
from .listDict_dependencies import *
# Resources
from io_utilities.base_importData import base_importData
from io_utilities.base_exportData import base_exportData
class listDict():
'''Utility functions for converting and extracting a list of
dictionaries into lists and arrays'''
def __init__(self,listDict_I=None,
dictList_I=None,
record_I=None,
dataFrame_I=None,
pivotTable_I=None):
self.data=None; # of type list, nparray, etc.
if listDict_I:
self.listDict=listDict_I;
else:
self.listDict=[];
if dictList_I:
self.dictList=dictList_I;
else:
self.dictList={};
if record_I: #sql record
self.record=record_I;
else:
self.record={};
if not dataFrame_I is None: #pandas data frame representation
self.dataFrame=dataFrame_I;
else:
self.dataFrame=None;
if pivotTable_I:#pandas pivot table representation
self.pivotTable=pivotTable_I;
else:
self.pivotTable=None;
def convert_listDict2dataMatrix(self,
row_label_I,column_label_I,value_label_I,
row_variables_I=[],
column_variables_I=[],
data_IO=[],
na_str_I=None,
filter_rows_I=[],
filter_columns_I=[],
order_rows_I=[],
order_columns_I=[],
order_rowsFromTemplate_I=[],
order_columnsFromTemplate_I=[],):
'''convert a list of dictionary rows to a numpy array
INPUT:
data_I = [{}]
row_label_I = column_id of the row labels
column_label_I = column_id of the column labels
value_label_I = column_id of the value label
OPTIONAL INPUT:
row_variables_I = list of keys to extract out with the rows
column_variables_I = list of keys to extract out with the columns
data_IO = pre-initialized data list
na_str_I = optional string or value to pre-initialize the output data with
filter_rows_I = list of row labels to include
filter_columns_I = list of column labels to include
order_rows_I = list of integers defining the order of the rows
order_columns_I = list of integers defining the order of the rows
order_rowsFromTemplate_I = list of row labels defining the order of the rows
order_columnsFromTemplate_I = list of row labels defining the order of the rows
OUTPUT:
data_O = nparray of shape(len(row_label_unique),len(column_label_unique))
row_labels_O = row labels of data_O
column_labels_O = column labels of data_O
OPTIONAL OUTPUT:
row_variables_O = {"row_variables_I[0]:[...],..."} where each list is of len(row_labels_O)
column_variables_O = {"row_variables_I[0]:[...],..."} where each list is of len(column_labels_O)
'''
data_O = [];
data_I = self.listDict;
# get unique rows and columns
nrows,row_labels_O = self.get_uniqueValues(row_label_I,filter_I=filter_rows_I);
ncolumns,column_labels_O = self.get_uniqueValues(column_label_I,filter_I=filter_columns_I);
# order rows and columns
row_labels_O,column_labels_O = self.order_rowAndColumnLabels(row_labels_O,column_labels_O,
order_rows_I=order_rows_I,
order_columns_I=order_columns_I,
order_rowsFromTemplate_I=order_rowsFromTemplate_I,
order_columnsFromTemplate_I=order_columnsFromTemplate_I,
);
# initialize the data matrix
data_O = self.initialize_dataMatrix(nrows,ncolumns,na_str_I);
# factor
row_variables_O = {};
if row_variables_I:
for cv in row_variables_I:
row_variables_O[cv]=[];
column_variables_O = {};
if column_variables_I:
for cv in column_variables_I:
column_variables_O[cv]=[];
#make the dataMatrixList
cnt = 0;
cnt_bool = True;
cnt2_bool = True;
for r_cnt,r in enumerate(row_labels_O):
cnt2_bool = True;
for c_cnt,c in enumerate(column_labels_O):
for d in data_I:
if d[column_label_I] == c and d[row_label_I] == r:
if d[value_label_I]:
data_O[r_cnt,c_cnt] = d[value_label_I];
if cnt_bool and column_variables_I:
for cv in column_variables_I:
column_variables_O[cv].append(d[cv]);
if cnt2_bool and row_variables_I:
for rv in row_variables_I:
row_variables_O[rv].append(d[rv]);
cnt2_bool = False;
break;
cnt = cnt+1
cnt_bool = False;
#return output based on input
if row_variables_I and column_variables_I:
return data_O,row_labels_O,column_labels_O,row_variables_O,column_variables_O;
elif row_variables_I:
return data_O,row_labels_O,column_labels_O,row_variables_O;
elif column_variables_I:
return data_O,row_labels_O,column_labels_O,column_variables_O;
else:
return data_O,row_labels_O,column_labels_O;
def convert_listDict2dataMatrixList(self,
row_label_I,column_label_I,value_label_I,
row_variables_I=[],
column_variables_I=[],
data_IO=[],
na_str_I="NA",
order_rows_I=[],
order_columns_I=[],
order_rowsFromTemplate_I=[],
order_columnsFromTemplate_I=[],
):
'''convert a list of dictionary rows to a numpy array
INPUT:
data_I = [{}]
row_label_I = column_id of the row labels
column_label_I = column_id of the column labels
value_label_I = column_id of the value label
OPTIONAL INPUT:
row_variables_I = list of keys to extract out with the rows
column_variables_I = list of keys to extract out with the columns
data_IO = pre-initialized data list
na_str_I = optional string or value to pre-initialize the output data with
order_rows_I = list of integers defining the order of the rows
order_columns_I = list of integers defining the order of the rows
order_rowsFromTemplate_I = list of row labels defining the order of the rows
order_columnsFromTemplate_I = list of row labels defining the order of the rows
OUTPUT:
data_O = list of values ordered according to (len(row_label_unique),len(column_label_unique))
row_labels_O = row labels of data_O
column_labels_O = column labels of data_O
OPTIONAL OUTPUT:
row_variables_O = {"row_variables_I[0]:[...],..."} where each list is of len(row_labels_O)
column_variables_O = {"row_variables_I[0]:[...],..."} where each list is of len(column_labels_O)
'''
data_O = [];
data_I = self.listDict;
# get unique rows and columns
nrows,row_labels_O = self.get_uniqueValues(row_label_I);
ncolumns,column_labels_O = self.get_uniqueValues(column_label_I);
# order rows and columns
row_labels_O,column_labels_O = self.order_rowAndColumnLabels(row_labels_O,column_labels_O);
# factor
row_variables_O = {};
if row_variables_I:
for cv in row_variables_I:
row_variables_O[cv]=[];
column_variables_O = {};
if column_variables_I:
for cv in column_variables_I:
column_variables_O[cv]=[];
# initialize the data list
data_O = self.initialize_dataMatrixList(nrows,ncolumns,na_str_I='NA');
#make the dataMatrixList
cnt = 0;
cnt_bool = True;
cnt2_bool = True;
for r in row_labels_O:
cnt2_bool = True;
for c in column_labels_O:
for d in data_I:
if d[column_label_I] == c and d[row_label_I] == r:
if d[value_label_I]:
data_O[cnt] = d[value_label_I];
if cnt_bool and column_variables_I:
for cv in column_variables_I:
column_variables_O[cv].append(d[cv]);
if cnt2_bool and row_variables_I:
for rv in row_variables_I:
row_variables_O[rv].append(d[rv]);
cnt2_bool = False;
break;
cnt = cnt+1
cnt_bool = False;
#return output based on input
if row_variables_I and column_variables_I:
return data_O,row_labels_O,column_labels_O,row_variables_O,column_variables_O;
elif row_variables_I:
return data_O,row_labels_O,column_labels_O,row_variables_O;
elif column_variables_I:
return data_O,row_labels_O,column_labels_O,column_variables_O;
else:
return data_O,row_labels_O,column_labels_O;
def order_rowAndColumnLabels(self,
row_labels_I,column_labels_I,
order_rows_I=[],
order_columns_I=[],
order_rowsFromTemplate_I=[],
order_columnsFromTemplate_I=[],):
'''Order rows and columns according to input
INPUT:
row_labels_I = list of unique row labels
column_labels_I = list of unique column labels
OUTPUT:
row_labels_O = list of ordered row labels
column_labels_O = list of ordered column labels
'''
row_labels_O,column_labels_O=row_labels_I,column_labels_I;
# order the rows and columns
if order_rows_I:
row_labels_O = self.order_labels(row_labels_I,order_rows_I);
if order_columns_I:
column_labels_O = self.order_labels(column_labels_I,order_columns_I);
if order_rowsFromTemplate_I:
row_labels_O = self.order_labelsFromTemplate(row_labels_I,order_rowsFromTemplate_I);
if order_columnsFromTemplate_I:
column_labels_O = self.order_labelsFromTemplate(column_labels_I,order_columnsFromTemplate_I);
return row_labels_O,column_labels_O;
def get_uniqueValues(self,key_I,filter_I=[]):
'''get the unique values for a column key
INPUT:
key_I = string, column key
filter_I = list of column values to not include in the output
OUTPUT:
nvalues_O = # of values
uniqueValues_O = list of unique values
'''
nvalues_O=0;
uniqueValues_O=[];
data_I = self.listDict;
# get all values
values = [];
for d in data_I:
values.append(d[key_I]);
# filter the values
if filter_I:
values = [x for x in values if x in filter_I];
# get the unique values
uniqueValues_O = sorted(set(values))
# count the values
nvalues_O = len(uniqueValues_O);
return nvalues_O,uniqueValues_O;
def order_labels(self,labels_I,order_I):
'''Order the labels from a pre-defined index
INPUT:
labels_I = list of strings
order_I = list of integers defining the order of the labels
OUTPUT:
labels_O = list of ordered strings
'''
labels_O = [];
if len(labels_I)==len(order_I):
labels_O = [labels_I[i] for i in order_I];
else:
print('length of labels and order do not match!');
return labels_O;
def order_labelsFromTemplate(self,labels_I,template_I):
'''Order the labels using a template
NOTES:
The template may include values not in the labels
ASSUMPTIONS:
The template includes all values found in the labels
INPUT:
labels_I = list of strings
template_I = list of strings
OUTPUT:
labels_O = list of ordered strings
'''
labels_O = [];
# make the new template
template = [];
if len(template_I)>=len(labels_I):
template = [i for i in template_I if i in labels_I];
else:
print('length of labels is less than the template!');
return labels_O;
# order the labels
if len(template)==len(labels_I):
labels_O = template;
#for label in labels_I:
# for temp in template:
# if label == temp:
# labels_O.append(label);
# break;
else:
print('template does not contain all labels!');
return labels_O;
def count_missingValues(self,values_I,na_str_I='NA'):
'''count the number of occurances of a missing value in a list of values
INPUT:
values_I = list of numeric values
na_str_I = string identifier of a missing value
OUTPUT:
mv_O = # of missing values
'''
mv_O = 0;
for c in values_I:
if c==na_str_I:
mv_O += 1;
return mv_O;
def initialize_dataMatrixList(self,nrows_I,ncolumns_I,na_str_I='NA'):
'''initialize dataMatrixList with missing values
INPUT:
nrows_I = int, # of rows of data
ncolumns_I - int, # of columns of data
na_str_I = string identifier of a missing value
OUTPUT:
dataMatrixList_O = list of na_str_I of length nrows_I*ncolumns_I'''
dataMatrixList_O = [na_str_I for r in range(nrows_I*ncolumns_I)];
return dataMatrixList_O;
def initialize_dataMatrix(self,nrows_I,ncolumns_I,na_str_I='NA'):
'''initialize dataMatrix with missing values
INPUT:
nrows_I = int, # of rows of data
ncolumns_I - int, # of columns of data
na_str_I = string identifier of a missing value
OUTPUT:
dataMatrixList_O = list of na_str_I of length nrows_I*ncolumns_I'''
if na_str_I:
dataMatrix_O = npfull((nrows_I,ncolumns_I), na_str_I);
else:
dataMatrix_O = npzeros((nrows_I,ncolumns_I));
return dataMatrix_O;
def extract_arrayFromListDict(self,key_I):
'''convert a list of dictionary rows to a numpy array
INPUT:
key_I = string, dictionary key to extract values from
OUTPUT:
data_O = numpy array of values
'''
data_I = | |
<filename>my_utils/languages.py
def get_content(content_type: str, lang: str):
return LANGUAGES[lang][content_type]
LANGUAGES = {
'ru': {
'ERRORS_DESCRIPTIONS': {
'COG_DISABLED': 'Эта команда отключена на этом сервере!',
'COMMAND_DISABLED': 'Эта команда или группа команд была отключена на этом сервере!',
'TAG_NOT_FOUND': 'Тег не найден!',
'FORBIDDEN_TAG': 'Этот тег нельзя использовать!',
'NOT_TAG_OWNER': 'Вы не владелец тега!',
'UID_NOT_BINDED': 'У вас не привязан UID!',
'GI_ACCOUNT_NOT_FOUND': 'Аккаунт с таким UID не найден!',
'GI_DATA_NOT_PUBLIC': 'Профиль закрыт! Откройте профиль на [сайте]('
'https://www.hoyolab.com/genshin/accountCenter/gameRecord)',
'NOT_CONNECTED_TO_VOICE': 'You not connected to voice channel!',
'NOT_BOT_OWNER': 'Это команда доступна только владельцу бота!',
'BOT_MISS_PERMS': '**У бота недостаточно прав!**\nНеобходимые права: ',
'MISS_PERMS': '**У вас недостаточно прав!**\nНеобходимые права:',
'CHECK_FAILURE': 'Вы не можете использовать эту команду!',
'OTHER_ERRORS_TITLE': '❌ Упс... Произошла непредвиденная ошибка!',
'OTHER_ERRORS_DESCRIPTION': 'Этот баг был отправлен создателю\n *Ошибка:* ```\n{error}\n```',
'BOT_DONT_HAVE_PERMS': '**У бота недостаточно прав чтобы сделать это!**\nТребуется:',
'DONT_HAVE_PERMS': '**У вас недостаточно прав для использования этой команды!**\nТребуется:',
'FORBIDDEN': 'У бота недостаточно прав чтобы сделать это',
'BAD_ARGUMENT': 'Вы ввели неверно один из аргументов!'
},
'FUNC_RANDOM_NUMBER_OUT_CONTENT': 'Рандомное число: `{}`',
'FUNC_MEMBER_INFO': {
'MEMBER_STATUS': {
'online': '<:s_online:850792217031082051> В сети',
'dnd': '<:dnd:850792216943525936> Не беспокоить',
'idle': '<:s_afk:850792216732368937> Отошёл',
'offline': '<:s_offline:850792217262030969> Не в сети'
},
'ABOUT_TITLE': 'Информация о {}',
'GENERAL_INFO_TITLE': 'Общая информация:',
'FULL_NAME_TEXT': 'Полное имя',
'BADGES_TEXT': 'Значки:',
'DISCORD_REGISTRATION_TEXT': 'Дата регистрации в Discord:',
'JOINED_ON_SERVER_TEXT': 'Дата присоединения на сервер:',
'CURRENT_STATUS_TEXT': 'Текущий статус:',
'TOP_ROLE_TEXT': 'Высшая роль:',
'ROLES_TEXT': 'Роли:',
'LEVELING': {
'CURRENT_LEVEL_TEXT': '<:level:863677232239869964> **Уровень:** `{level}`',
'CURRENT_EXP_TEXT': '<:exp:863672576941490176> **Опыт:** `{exp}/{exp_to_next_level}` Всего: `{exp_amount}`',
'LEVELING_INFO_TITLE_TEXT': 'Уровневая информация',
'TOTAL_VOICE_TIME': '<:voice_time:863674908969926656> **Время в голосом канале:** `{voice_time:.2f}` час.'
}
},
'FUNC_PING': 'Задержка бота `{}` мс',
'INVITE_COMMAND': {
'INVITE_BUTTON_NO_PERMS': 'Без разрешений',
'INVITE_BUTTON_ADMIN': 'Администратор',
'INVITE_BUTTON_RECOMMENDED': 'Рекомендуемый',
'CLICK_TO_INVITE_TEXT': 'Нажмите на кнопку, чтобы пригласить бота',
},
'FUNC_ACTIVITIES': {
'NOT_CONNECTED_TO_CHANNEL_TEXT': 'Вы не подключены к голосовому каналу!',
'WRONG_CHANNEL_TEXT': 'Вы выбрали неверный канал! Выберите голосовой!',
'JOIN_TEXT': '**Присоединиться**',
'REQUESTED_BY_TEXT': 'Запрошено {}'
},
'GAMES_NAMES': {
'RPS': 'Камень Ножницы бумага',
'TTT': '<NAME>'
},
'FUNC_INVITE_TO_GAME': {
'SELF_INVITE': 'Вы не можете пригласить себя!',
'BOT_INVITE': 'Вы не можете пригласить бота!',
'INVITE_MESSAGE_CONTENT': '{}! {} приглашает тебя в игру {}',
'BUTTON_AGREE': 'Согласиться',
'BUTTON_DECLINE': 'Отказаться',
'AGREE_MESSAGE_CONTENT': 'Вы приняли приглашение!',
'DECLINE_MESSAGE_CONTENT': '{} отказался от игры!',
'TIMEOUT_MESSAGE_CONTENT': 'От {} нет ответа!'
},
'GAME_RPS': {
'RESULTS_TITLE': '` ИТОГИ ИГРЫ `',
'RESULTS_TEXT': '' \
'**Игроки:** {} и {} \n' \
'**Количество сыгранных игр:** {} \n' \
'**Счёт:** {}:{} \n' \
'**Победитель:** {}',
'RESULTS_GAME_NAME': '**Название игры: Камень ножницы бумага**',
'RESULTS_TIE': 'Ничья',
'MADE_MOVE_TEXT': 'Вы сделали свой ход',
'PLAYERS_TEXT': '**{}** VS **{}**',
'CURRENT_SCORE_TEXT': '**Счёт:** {}:{}\n' \
'**Игра:** {}/{}',
},
'GAME_TTT': {
'GAME_NAME': '<NAME>',
'RESULTS_TITLE': '` ИТОГИ ИГРЫ `',
'RESULTS_GAME_NAME': '**Игра: Крестики Нолики**',
'RESULTS_TEXT': '**Игроки: {player1} И {player2}**\n**Победитель:** {winner}',
'RESULTS_TIE': 'Ничья',
},
'FUNC_MODERATION_CHANGE_NICK_TEXT': 'Был изменён ник {} на `{}`',
'FUNC_MODERATION_MUTE_MEMBER': {
'CANNOT_MUTE_BOT_TEXT': 'Вы не можете замутить бота!',
'WAS_MUTED_TEXT': '{} был отправлен в мут!',
'TIME_TEXT': '**Время**: {amount} {time_format}\n',
'REASON_TEXT': '**Причина**: {reason}',
'MUTED_ROLE_CREATED_TEXT': 'Роль для мута `role_name` была создана!'
},
'FUNC_MODERATION_BAN_MEMBER': {
'CANNOT_BAN_BOT_TEXT': 'Вы не можете забанить бота!',
'WAS_BANNED_TEXT': '{member} был забанен!',
'REASON_TEXT': '**Причина**: {reason}',
'SERVER': '\n**Сервер:** {guild}'
},
'FUNC_MODERATION_KICK_MEMBER': {
'CANNOT_KICK_BOT_TEXT': 'Вы не можете кикнуть бота!',
'WAS_KICKED_TEXT': '{member} был кикнут!',
'REASON_TEXT': '**Причина**: {reason}',
'SERVER': '\n**Сервер:** {guild}'
},
'FUNC_MODERATION_CLEAR_MESSAGES': 'Успешно удалено `{}` сообщений!',
'PUBLIC_TAGS_COMMAND': {
'TAGS_PUBLIC': 'Теги теперь доступны всем!',
'TAGS_FOR_ADMINS': 'Теги теперь могут создавать только те, у кого есть право `Управлять сервером`!'
},
'EMBED_TAG_CONTROL': {
'NOT_SUPPORTED_TAG_TYPE': 'Этот тег не поддерживается!',
'EDIT_TAG_BUTTON': 'Редактировать',
'REMOVE_TAG_BUTTON': 'Удалить',
'EXIT_BUTTON': 'Выйти',
'SET_TITLE_BUTTON': 'Заголовок',
'SET_DESCRIPTION_BUTTON': 'Описание',
'GET_RAW_DESCRIPTION_BUTTON': 'Исходник',
'SAVE_TAG_BUTTON': 'Сохранить',
'TAG_TITLE_TEXT': 'Заголовок Тега',
'TAG_DESCRIPTION_TEXT': 'Описание Тега',
'INPUT_TEXT': 'Введите',
'SAVED_TAG_TEXT': '**Сохранено!**',
'REMOVED_TAG_TEXT': '**Тег удалён!**'
},
'FUNC_TAG_LIST': {
'TAGS_LIST_TEXT': 'Список тегов в {server}',
'NO_TAGS_TEXT': 'Список пуст!'
},
'TAG_ADD_COMMAND': {
'TAG_ALREADY_EXISTS_TEXT': 'Этот тег уже существует!',
'TAG_CREATED_TEXT': 'Тег `{tag_name}` успешно создан!'
},
'TAG_REMOVE_COMMAND': {
'TAG_REMOVED_TEXT': 'Тег `{tag_name}` успешно удалён!'
},
'TAG_RENAME_TAG': {
'TAG_RENAMED_TEXT': 'Тег `{tag_name}` переименован в `{new_tag_name}`!'
},
'MUSIC_PLAY_COMMAND': {
'ADDED_IN_QUEUE_TEXT': '`{}` Было добавлено о очередь',
'NOT_CONNECTED_TO_VOICE': 'Подключитесь к голосовому каналу с ботом!',
'PAUSE_BUTTON': 'Пауза',
'RESUME_BUTTON': 'Продолжить',
'STOP_BUTTON': 'Стоп',
'SKIP_BUTTON': 'Пропустить',
'TOGGLE_OFF_BUTTON': 'Выкл. повтор',
'TOGGLE_ON_BUTTON': 'Вкл. повтор',
'PLAYING_TEXT': 'Сейчас играет',
'NAME_TEXT': 'Название:',
'DURATION_TEXT': 'Продолжительность:',
'LIVE_TEXT': 'Прямая трансляция',
'WHO_ADDED_TEXT': 'Добавлено {}',
'SUCCESSFULLY': 'Успешно!'
},
'FUNC_RANDOM_ITEMS': {
'ITEMS_LIST': [
'Успокоительное', 'Термометр', 'Фотокамера', 'Направленный микрофон',
'Свеча', 'Благовоние', 'Зажигалка', 'Распятие', 'Соль',
'Штатив',
'Датчик ЭМП', 'Радиоприёмник', 'Блокнот', 'Лазерный проектор', 'Видеокамера',
'Слабый фонарик', 'УФ-фонарик', 'Сильный фонарик',
'Датчик движения', 'Неоновая палочка', 'Датчик звука',
'Камера с креплением на голову'
],
'SELECT_BUTTON': 'Выборка',
'EXCEPTION_BUTTON': 'Исключение',
'SELECT_ITEMS_TEXT': 'Выберите предметы',
'START_RANDOM_BUTTON': 'Рандом!',
'EXIT_BUTTON': 'Выйти',
'EMBED_TITLE': 'Рандомные предметы!',
'SECOND_MESSAGE_CONTENT': 'Здесь будет появляться предмет',
'SELECTED_ITEMS_TEXT': '**Выбранные предметы: **\n',
},
'GENSHIN_BIND_COMMAND': 'Вы привязали UID',
'GENSHIN_STATISTICS_COMMAND': {
'EMBED_WORLD_EXPLORATION_TITLE': 'Genshin Impact. Статистика мира',
'EXPLORED_TEXT': 'Исследовано',
'FROSTBEARING_TREE_LEVEL_TEXT': '\nУровень Дерева Вечной Мерзлоты: `{level}`',
'SACRED_SAKURA_LEVEL_TEXT': '\nУровень Благосклонности сакуры: `{level}`',
'REPUTATION_LEVEL_TEXT': '\nУровень репутации: `{level}`',
'ANEMOCULUS': 'Анемокулов',
'GEOCULUS': 'Геокулов',
'ELECTROCULUS': 'Электрокулов',
'COLLECTED_OCULUS_TEXT': 'Собрано окулов',
'COMMON_CHEST': 'Обычных',
'EXQUISITE_CHEST': 'Богатых',
'PRECIOUS_CHEST': 'Драгоценных',
'LUXURIOUS_CHEST': 'Роскошных',
'CHESTS_OPENED': 'Открыто сундуков',
'UNLOCKED_TELEPORTS': 'Открыто телепортов',
'UNLOCKED_DOMAINS': 'Открыто подземелий',
'MISC_INFO': 'Остальное'
},
'GENSHIN_CHARACTERS_LIST_COMMAND': {
'EMBED_GENSHIN_CHARACTERS_LIST_TITLE': 'Genshin Impact. Персонажи',
'CHARACTER_LEVEL': 'Уровень',
'CHARACTER_CONSTELLATION': 'Созвездие',
'CHARACTER_VISION': 'Глаз бога',
'CHARACTER_WEAPON': 'Оружие',
},
'GENSHIN_CHARACTERS_COMMAND': {
'INFORMATION_TEXT': '**Информация**',
'CHARACTER_LEVEL': 'Уровень',
'CHARACTER_CONSTELLATION': 'Созвездие',
'CHARACTER_VISION': 'Глаз бога',
'CHARACTER_FRIENDSHIP': 'Уровень дружбы',
'WEAPON_TEXT': '**Оружие**',
'WEAPON_NAME': 'Название',
'WEAPON_RARITY': 'Редкость',
'WEAPON_TYPE': 'Тип',
'WEAPON_LEVEL': 'Уровень',
'WEAPON_ASCENSION_LEVEL': 'Уровень восхождения',
'WEAPON_REFINEMENT_LEVEL': 'Уровень пробуждения',
'ARTIFACTS_TEXT': '**Артефакты**',
'ARTIFACT_NAME': 'Название',
'ARTIFACT_RARITY': 'Редкость',
'ARTIFACT_LEVEL': 'Уровень',
'GENSHIN_CHARACTER_VISION': {
'Anemo': '<:Element_Anemo:870989749534486538> `Анемо`',
'Pyro': '<:Element_Pyro:870989754454396998> `Пиро`',
'Hydro': '<:Element_Hydro:870989753649102909> `Гидро`',
'Electro': '<:Element_Electro:870989752801837056> `Электро`',
'Geo': '<:Element_Geo:870989753271603230> `Гео`',
'Dendro': '<:Element_Dendro:870989751908446250> `Дендро`',
'Cryo': '<:Element_Cryo:870989751312846868> `Крио`',
},
'GENSHIN_ARTIFACT_TYPE': {
'flower': '<:Icon_Flower_of_Life:871372154179059742> Цветок',
'feather': '<:Icon_Plume_of_Death:871372154510397470> Перо',
'hourglass': '<:Icon_Sands_of_Eon:871372154845933568> Часы',
'goblet': '<:Icon_Goblet_of_Eonothem:871372154346827776> Кубок',
'crown': '<:Icon_Circlet_of_Logos:871372154212605962> Корона',
}
},
'GENSHIN_INFO_COMMAND': {
'NICKNAME_TEXT': 'Ник в игре',
'ADVENTURE_RANK_TEXT': 'Ранг Приключений',
'ACHIEVEMENTS_TEXT': 'Достижений',
'CHARACTERS_TEXT': 'Персонажей',
'SPIRAL_ABYSS_TEXT': '<NAME>',
'PLAYER_INFO_TEXT': 'Информация об игроке',
},
'SET_EMBED_COLOR_COMMAND': {
'WRONG_COLOR': 'Неверный формат цвета!',
'SUCCESSFULLY_CHANGED': 'Цвет сообщений успешно изменён!'
},
'LEVELS': {
'FUNC_UPDATE_MEMBER': {
'NOTIFY_GUILD_CHANNEL': '{member} получил `{level}-й` уровень и повышение до {role}',
'NOTIFY_DM': 'Вы получили `{level}-й` уровень и повышение до {role}'
},
'FUNC_TOP_MEMBERS': {
'TITLES': '**ID | Участник | Текущий уровень**\n',
'TOP_MEMBERS_TEXT': 'Топ участников по уровню',
'REQUESTED_BY_TEXT': 'Запрошено'
}
},
'HELP_COMMAND': {
'INFORMATION_TEXT': 'Информация',
'INFORMATION_CONTENT_TEXT': '```Подсказка: [обязательный аргумент] (необязательный аргумент)```',
'REQUIRED_BY_TEXT': 'Запрошено {user}',
'PLUGINS_TEXT': 'Плагины',
'SELECT_MODULE_TEXT': 'Выберите плагин'
},
'STARBOARD_FUNCTIONS': {
'CHANNEL_WAS_SETUP_TEXT': 'Канал был установлен!',
'LIMIT_WAS_SETUP_TEXT': 'Лимит звёзд был установлен до `{limit}`!',
'STARBOARD_NOT_SETUP_TEXT': 'Канал или лимит звёзд не были установлены!',
'STARBOARD_ENABLED_TEXT': 'Starboard был `включен`!',
'STARBOARD_DISABLED_TEXT': 'Starboard был `выключен`!',
'JUMP_TO_ORIGINAL_MESSAGE_TEXT': 'Перейти к сообщению!',
'BLACKLIST_NO_OPTIONS_TEXT': 'Должно быть что-то из `member`, `role`, `channel`',
'EMPTY_BLACKLIST_TEXT': 'Чёрный список пуст!',
'BLACKLIST_ADDED_TEXT': 'Добавлено',
'BLACKLIST_REMOVED_TEXT': 'Удалено',
'BLACKLIST_TEXT': '⭐Starboard. Чёрный список',
'MEMBERS': 'Участники',
'CHANNELS': 'Каналы',
'ROLES': 'Роли'
},
'AUTOROLE_DROPDOWN': {
'ADDED_ROLES_TEXT': 'Добавлено: ',
'REMOVED_ROLES_TEXT': '\nУдалено: ',
'NO_OPTIONS_TEXT': 'Нет опций',
'CREATED_DROPDOWN_TEXT': 'Меню создано!',
'MESSAGE_WITHOUT_DROPDOWN_TEXT': 'Сообщение не имеет меню!',
'OPTIONS_OVERKILL_TEXT': 'Вы не можете добавить больше 25 опций в меню1',
'SELECT_ROLE_TEXT': 'Выберите роль',
'ROLE_ADDED_TEXT': 'Роль добавлена!',
'OPTION_NOT_FOUND_TEXT': 'Не удалось найти опцию с таким именем!',
'OPTIONS_LESS_THAN_1_TEXT': 'Опций в меню не может быть меньше 1',
'ROLE_REMOVED_TEXT': 'Роль удалена!',
'DROPDOWN_ENABLED_TEXT': 'Меню включено!',
'DROPDOWN_DISABLED_TEXT': 'Меню выключено!',
'DROPDOWN_SAVED_TEXT': 'Сохранено!',
'NOT_SAVED_DROPDOWNS': 'Нет сохранённих меню!',
'DROPDOWN_NOT_FOUND': 'Нет меню с таким именем!',
'DROPDOWN_LOADED_TEXT': 'Меню загружено!',
'DROPDOWN_LIST': 'Список меню с ролями'
},
'AUTOROLE_ON_JOIN': {
'ROLE_ADDED_TEXT': 'Роль {role} Добавлена!',
'ROLE_REMOVED_TEXT': 'Роль {role} Удалена!'
},
'TIME_FORMATS': {
'с': 'сек.',
'м': 'мин.',
'ч': 'час.',
'д': 'дн.',
's': 'сек.',
'm': 'мин.',
'h': 'час.',
'd': 'дн.',
},
'COMMAND_CONTROL': {
'COMMAND_DISABLED': 'Команда `{command_name}` была выключена!',
'COMMAND_ENABLED': 'Команда `{command_name}` была включена!'
}
},
'en': {
'ERRORS_DESCRIPTIONS': {
'COG_DISABLED': 'This command disabled on this server!',
'COMMAND_DISABLED': 'This command or group of commands was disabled on this server!',
'TAG_NOT_FOUND': 'Tag not found!',
'FORBIDDEN_TAG': 'This tag cannot be used!',
'NOT_TAG_OWNER': 'You not owner of this tag!',
'UID_NOT_BINDED': 'You didn\'t tie UID!',
'GI_ACCOUNT_NOT_FOUND': 'Account with this UID not found!',
'GI_DATA_NOT_PUBLIC': 'Profile is private! Open profile on [site](https://www.hoyolab.com/genshin/accountCenter/gameRecord)',
'NOT_CONNECTED_TO_VOICE': 'You not connected to voice channel!',
'NOT_BOT_OWNER': 'Only owner can use this command!',
'BOT_MISS_PERMS': 'Bot don\'t have permission for this!**\nRequired permissions: ',
'MISS_PERMS': 'You don\'t have permission for this!**\nRequired permissions:',
'CHECK_FAILURE': 'You can\'t use this command!',
'OTHER_ERRORS_TITLE': '❌ Oops... An unexpected error occurred!',
'OTHER_ERRORS_DESCRIPTION': 'This bug was sent to owner\n' \
'*Error:* \n' \
'```python ' \
'{error}' \
' ```',
'BOT_DONT_HAVE_PERMS': '**Bot don\'t have permission for this!**\nRequired permissions:',
'DONT_HAVE_PERMS': '**You don\'t have permission for this!**\nRequired permissions:',
'FORBIDDEN': 'Bot doesn\'t have permission for this!',
'BAD_ARGUMENT': 'One of arguments is wrong!',
},
'FUNC_RANDOM_NUMBER_OUT_CONTENT': 'Random number is `{}`',
'FUNC_MEMBER_INFO': {
'MEMBER_STATUS': {
'online': '<:s_online:850792217031082051> Online',
'dnd': '<:dnd:850792216943525936> Do not disturb',
| |
# 相手の駒配置を予測
# これは不完全情報ゲームにおいて動作するようにする
# 正体が不明な相手の駒をとりあえず-1としておく
# board→14R24R34R44R15B25B35B45B41u31u21u11u40u30u20u10u
# move
import numpy as np
import itertools
import time
from game import State
# from pv_mcts import predict
from pathlib import Path
from tensorflow.keras.models import load_model
from test import convert_func_use_in_guess
# model_path = "models/10000.pth"
default_gamma = 0.9
DN_INPUT_SHAPE = (6, 6, 4)
# おそらく不完全情報ガイスター(のstateのみ?)を定義してそれを更新して管理した方がよさげ
# 不完全情報ガイスターの盤面情報及びそれらの推測値
class II_State:
# クラス変数で駒順を定義
piece_name = [
"h",
"g",
"f",
"e",
"d",
"c",
"b",
"a",
"A",
"B",
"C",
"D",
"E",
"F",
"G",
"H",
]
# 初期化
def __init__(
self,
real_my_piece_blue_set,
real_enemy_piece_blue_set=None,
see_through_piece_id=None,
wrong_see_through_piece_id=None,
all_piece=None,
enemy_estimated_num=None,
my_estimated_num=None,
enemy_piece_list=None,
my_piece_list=None,
living_piece_color=None,
):
# 全ての駒(hgfedcbaABCDEFGHの順になっている)
# 敵駒0~7,自駒8~15
if all_piece == None:
# numpyは基本的に型指定しない方が早い(指定すると裏で余計な処理するっぽい)
self.all_piece = np.zeros(16, dtype=np.int16)
# 初期配置を代入(各値は座標を示す)(脱出が88、死亡が99)
# 0~7は敵駒, 8~15は自駒
self.all_piece[0] = 1
self.all_piece[1] = 2
self.all_piece[2] = 3
self.all_piece[3] = 4
self.all_piece[4] = 7
self.all_piece[5] = 8
self.all_piece[6] = 9
self.all_piece[7] = 10
self.all_piece[8] = 25
self.all_piece[9] = 26
self.all_piece[10] = 27
self.all_piece[11] = 28
self.all_piece[12] = 31
self.all_piece[13] = 32
self.all_piece[14] = 33
self.all_piece[15] = 34
else:
self.all_piece = all_piece
if enemy_piece_list == None:
self.enemy_piece_list = [0, 1, 2, 3, 4, 5, 6, 7]
else:
self.enemy_piece_list = enemy_piece_list
if my_piece_list == None:
self.my_piece_list = [8, 9, 10, 11, 12, 13, 14, 15]
else:
self.my_piece_list = my_piece_list
# real_my_piece_blue_setは自分の青駒のIDのセット(引数必須)
self.real_my_piece_blue_set = set(real_my_piece_blue_set)
self.real_my_piece_red_set = (
set(self.my_piece_list) - self.real_my_piece_blue_set
)
# 敵の青駒のセット(デバッグ用)
self.real_enemy_piece_blue_set = set(real_enemy_piece_blue_set)
self.real_enemy_piece_red_set = (
set(self.enemy_piece_list) - self.real_enemy_piece_blue_set
)
# {敵青, 敵赤, 自青, 自赤}
if living_piece_color == None:
self.living_piece_color = [4, 4, 4, 4]
else:
self.living_piece_color = living_piece_color
# [[推測値A,(パターンAの青駒のtuple表現)],[推測値B,(パターンBの青駒のtuple表現),...]
if enemy_estimated_num == None:
# 盤面の推測値を作成(大きい程青らしく、小さい程赤らしい)
self.enemy_estimated_num = []
for enemy_blue in itertools.combinations(
set(self.enemy_piece_list), self.living_piece_color[0]
):
self.enemy_estimated_num.append([0, enemy_blue])
else:
self.enemy_estimated_num = enemy_estimated_num
if my_estimated_num == None:
# 盤面の推測値を作成(大きい程青らしく、小さい程赤らしい)
self.my_estimated_num = []
for my_blue in itertools.combinations(
set(self.my_piece_list), self.living_piece_color[0]
):
self.my_estimated_num.append([0, my_blue])
else:
self.my_estimated_num = my_estimated_num
if see_through_piece_id == None and wrong_see_through_piece_id == None:
self.see_through_piece_id = []
self.wrong_see_through_piece_id = []
elif wrong_see_through_piece_id == None: # 間違った推測のみnullだった場合
self.see_through_piece_id = see_through_piece_id
self.wrong_see_through_piece_id = []
shave_impossible_board_from_see_through(self) # ありえない世界を初期化段階で消す
elif see_through_piece_id == None:
self.see_through_piece_id = []
self.wrong_see_through_piece_id = wrong_see_through_piece_id
rebuilding_estimated_num(
self,
set(self.see_through_piece_id),
set(self.wrong_see_through_piece_id),
)
else: # どっちもnullでない
self.see_through_piece_id = see_through_piece_id
self.wrong_see_through_piece_id = wrong_see_through_piece_id
rebuilding_estimated_num(
self,
set(self.see_through_piece_id),
set(self.wrong_see_through_piece_id),
)
# ボードの初期配置はこんな感じ(小文字が敵の駒で大文字が自分の駒)
# 0 1 2 3 4 5
# 0 h g f e
# 1 d c b a
# 2
# 3
# 4 A B C D
# 5 E F G H
# 合法手のリストの取得
# NNはactionを与えると事前に学習した方策を返す。
# 赤のゴール(非合法なので知らない手)を与えると、そこを0にして返してくれるはず(エラーは吐かないはず???)
def legal_actions(self):
actions = []
# リストに自分の駒を全て追加
piece_coordinate_array = np.array([0] * 8)
index = 0
for i in range(8, 16):
piece_coordinate_array[index] = self.all_piece[i]
index += 1
np.sort(piece_coordinate_array)
# print("my:self.all_piece", piece_coordinate_array)
for piece_coordinate in piece_coordinate_array:
# 88以上は行動できないので省く(0~35)
if piece_coordinate < 36:
actions.extend(
self.piece_coordinate_to_actions(
piece_coordinate, piece_coordinate_array
)
)
# 0と5はゴールの選択肢を追加(赤駒でも問答無用)
if piece_coordinate == 0:
actions.extend([2]) # 0*4 + 2
if piece_coordinate == 5:
actions.extend([22]) # 5*4 + 2
return actions
# 相手目線の合法手のリストを返す
def enemy_legal_actions(self):
actions = []
piece_coordinate_array = np.array([0] * 8)
index = 0
for i in range(0, 8):
if self.all_piece[i] < 36:
piece_coordinate_array[index] = 35 - self.all_piece[i]
else:
piece_coordinate_array[index] = 99
index += 1
np.sort(piece_coordinate_array)
# print("enemy:self.all_piece", piece_coordinate_array)
for piece_coordinate in piece_coordinate_array:
# 88以上は行動できないので省く(0~35)
if piece_coordinate < 36:
actions.extend(
self.piece_coordinate_to_actions(
piece_coordinate, piece_coordinate_array
)
)
# 0と5はゴールの選択肢を追加(赤駒でも問答無用)
if piece_coordinate == 0:
actions.extend([2]) # 0*4 + 2
if piece_coordinate == 5:
actions.extend([22]) # 5*4 + 2
return actions
# 駒の移動元と移動方向を行動に変換
def position_to_action(self, position, direction):
return position * 4 + direction
def piece_coordinate_to_actions(self, piece_coordinate, piece_coordinate_array):
actions = []
x = piece_coordinate % 6
y = int(piece_coordinate / 6)
if y != 5 and not np.any(piece_coordinate_array == (piece_coordinate + 6)): # 下
actions.append(self.position_to_action(piece_coordinate, 0))
if x != 0 and not np.any(piece_coordinate_array == (piece_coordinate - 1)): # 左
actions.append(self.position_to_action(piece_coordinate, 1))
if y != 0 and not np.any(piece_coordinate_array == (piece_coordinate - 6)): # 上
actions.append(self.position_to_action(piece_coordinate, 2))
if x != 5 and not np.any(piece_coordinate_array == (piece_coordinate + 1)): # 右
actions.append(self.position_to_action(piece_coordinate, 3))
return actions
# 駒ごと(駒1つに着目した)の合法手のリストの取得
def legal_actions_pos(self, position, piece_index_list):
piece_list = []
for piece_index in piece_index_list:
piece_list.append(self.all_piece[piece_index])
actions = []
x = position % 6
y = int(position / 6)
# 下左上右の順に行動できるか検証し、できるならactionに追加
# ちなみにand演算子は左の値を評価して右の値を返すか決める(左の値がTrue系でなければ右の値は無視する)ので、はみ出し参照してIndexErrorにはならない(&だとなる)
if y != 5 and (position + 6) not in piece_list: # 下端でない and 下に自分の駒がいない
actions.append(self.position_to_action(position, 0))
if x != 0 and (position - 1) not in piece_list: # 左端でない and 左に自分の駒がいない
actions.append(self.position_to_action(position, 1))
if y != 0 and (position - 6) not in piece_list: # 上端でない and 上に自分の駒がいない
actions.append(self.position_to_action(position, 2))
if x != 5 and (position + 1) not in piece_list: # 右端でない and 右に自分の駒がいない
actions.append(self.position_to_action(position, 3))
# 青駒のゴール行動の可否は1ターンに1度だけ判定すれば良いので、例外的にlegal_actionsで処理する(ここでは処理しない)
return actions
# 行動を受けて、次の状態に遷移
def next(self, action_num):
coordinate_before, coordinate_after = action_to_coordinate(action_num)
move_piece_index = np.where(self.all_piece == coordinate_before)[0][0]
# 移動先に駒が存在する場合は殺す(味方の駒も殺してしまうが、そこは行動側で制御)
if np.any(self.all_piece == coordinate_after):
dead_piece_ID = np.where(self.all_piece == coordinate_after)[0][0]
if dead_piece_ID < 8: # 死んだのが敵駒
# color_is_blue:死んだのが青駒かどうか
color_is_blue = any(
i == dead_piece_ID for i in self.real_enemy_piece_blue_set
)
reduce_pattern(dead_piece_ID, color_is_blue, self)
if self.wrong_see_through_piece_id != []:
rebuilding_estimated_num(
self,
set(self.see_through_piece_id),
set(self.wrong_see_through_piece_id),
)
else: # 死んだのが味方の駒
color_is_blue = any(
i == dead_piece_ID for i in self.real_my_piece_blue_set
)
reduce_pattern(dead_piece_ID, color_is_blue, self)
self.all_piece[move_piece_index] = coordinate_after # 駒の移動
# 推測値を返す(主にデバッグ用)
def return_estimate_value(self):
estimate_value = np.array([0] * 8, dtype="f4")
for elem in self.enemy_estimated_num:
id_matrix = [0] * 8
# 青駒IDのとこだけ1にする
for blue_id in elem[1]:
id_matrix[blue_id] = 1
estimate_value = estimate_value + (
np.array(id_matrix, dtype="f4") * elem[0]
)
if False:
print(self.enemy_estimated_num)
print(
"敵駒の住所",
self.all_piece[0],
self.all_piece[1],
self.all_piece[2],
self.all_piece[3],
)
print(
"味方駒の住所",
self.all_piece[4],
self.all_piece[5],
self.all_piece[6],
self.all_piece[7],
)
return estimate_value
# ボードの文字列表示
def __str__(self):
row = "|{}|{}|{}|{}|{}|{}|"
hr = "\n-------------------------------\n"
# 1つのボードに味方の駒と敵の駒を集める
board = [0] * 36
# 0~7が敵、8~15が自分
# 敵の駒
for enemy_piece_coo in self.all_piece[0:8]:
if enemy_piece_coo < 36 and enemy_piece_coo >= 0:
board[enemy_piece_coo] = -1
# 自分の駒
for blue_index in self.real_my_piece_blue_set:
if self.all_piece[blue_index] < 36 and self.all_piece[blue_index] >= 0:
board[self.all_piece[blue_index]] = 1
for red_index in self.real_my_piece_red_set:
if self.all_piece[red_index] < 36 and self.all_piece[red_index] >= 0:
board[self.all_piece[red_index]] = 2
board_essence = []
for i in board:
if i == 1:
board_essence.append("自青")
elif i == 2:
board_essence.append("自赤")
elif i == -1:
board_essence.append("敵駒")
else:
board_essence.append(" ")
ii_str = (
hr + row + hr + row + hr + row + hr + row + hr + row + hr + row + hr
).format(*board_essence)
ii_str += "\n" + str(self.living_piece_color)
return ii_str
# 盤面が確定しないような駒を選択する
def create_see_through_piece(enemy_blue_piece_set, through_num):
# 7個以上駒の色がわかるなら、全部わかるのと同意義
if through_num >= 7:
return set({0, 1, 2, 3, 4, 5, 6, 7})
blue_piece_set = enemy_blue_piece_set.copy()
red_piece_set = set({0, 1, 2, 3, 4, 5, 6, 7}) - blue_piece_set
# 赤と青から1つ除外(これでパターンが確定しない)
blue_piece_set.remove(random.choice(list(blue_piece_set)))
red_piece_set.remove(random.choice(list(red_piece_set)))
# セットの合成
see_thorugh_id_set = blue_piece_set | red_piece_set
# through_numが少ない場合は見える駒を多く除外する
for _ in range(6 - through_num): # 6は len(see_thorugh_id_set)
see_thorugh_id_set.remove(random.choice(list(see_thorugh_id_set)))
return see_thorugh_id_set
# まちがった推測を含めて、破綻しない推測を作成
def create_wrong_and_see_through_piece(
enemy_blue_piece_set: set, correct_through_num: int, wrong_through_num: int
):
blue_piece_set = enemy_blue_piece_set.copy()
red_piece_set = set({0, 1, 2, 3, 4, 5, 6, 7}) - blue_piece_set
est_num = correct_through_num + wrong_through_num
if est_num >= 9:
print("普通にバグ")
return
if est_num >= 7: # 7個以上駒の色がわかるなら、全部わかるのと同意義
estimated_piece_set = set({0, 1, 2, 3, 4, 5, 6, 7})
else:
# 赤と青から1つ除外(これでパターンが確定しない)
blue_piece_set.remove(random.choice(list(blue_piece_set)))
red_piece_set.remove(random.choice(list(red_piece_set)))
# 赤と青から均等に推測駒を出す
while len(blue_piece_set) + len(red_piece_set) > est_num:
if len(blue_piece_set) > len(red_piece_set):
blue_piece_set.remove(random.choice(list(blue_piece_set)))
elif len(blue_piece_set) < len(red_piece_set):
red_piece_set.remove(random.choice(list(red_piece_set)))
else: # redとblueが同じ量の場合はランダムピック
if random.randint(0, 1) == 0:
blue_piece_set.remove(random.choice(list(blue_piece_set)))
else:
red_piece_set.remove(random.choice(list(red_piece_set)))
wrong_piece_set = set()
cp_wrong_through_num = wrong_through_num
# wrong_through_numが奇数の場合
if cp_wrong_through_num % 2 == 1:
cp_wrong_through_num -= 1
if len(blue_piece_set) > len(red_piece_set):
piece = random.choice(list(blue_piece_set))
blue_piece_set.remove(piece)
wrong_piece_set.add(piece)
elif len(blue_piece_set) < len(red_piece_set):
piece = random.choice(list(red_piece_set))
red_piece_set.remove(piece)
wrong_piece_set.add(piece)
else: # redとblueが同じ量の場合はランダムピック
if random.randint(0, 1) == 0:
piece = random.choice(list(blue_piece_set))
blue_piece_set.remove(piece)
wrong_piece_set.add(piece)
else:
piece = random.choice(list(red_piece_set))
red_piece_set.remove(piece)
wrong_piece_set.add(piece)
# wrong_through_numの数だけ間違った推測駒を増やす
for _ in range(cp_wrong_through_num // 2):
piece = random.choice(list(blue_piece_set))
blue_piece_set.remove(piece)
wrong_piece_set.add(piece)
piece = random.choice(list(red_piece_set))
red_piece_set.remove(piece)
wrong_piece_set.add(piece)
correct_piece_set = blue_piece_set | red_piece_set
return [correct_piece_set, wrong_piece_set]
# stateの駒の色に応じたii_stateを作成する(初期のstateのみ使用可能)
def create_ii_state_from_state(
state, enemy_view=False, through_num=0, wrong_through_num=0
):
if enemy_view:
# 敵視点でii_stateを作成
pieces = state.enemy_pieces
enemy_pieces = state.pieces
else:
pieces = state.pieces
enemy_pieces = state.enemy_pieces
# 駒のIDと座標が紐づいたリストを手動作成(初期配置では座標番号25~28と31~34に駒が存在)
piece_id_list = [0] * 36
for i in range(4):
piece_id_list[25 + i] = 8 + i
for i in range(4):
piece_id_list[31 + i] = 12 + i
blue_piece_set = set({})
for index, piece_color in enumerate(pieces):
if piece_color == 1:
blue_piece_set.add(piece_id_list[index])
# 敵駒の処理も同様にする
enemy_piece_id_list = [0] * 36
for i in range(4):
enemy_piece_id_list[25 + i] = 8 + | |
self.multi_tenancy = multi_tenancy
def set_new_switch_flows(self, dpid, topology, vmac_manager):
"""Set up flows when a new switch(dpid) is connected"""
self.set_default_flows(dpid)
self.set_interswitch_flows(dpid, topology, vmac_manager)
if topology.gateway_connected():
self.set_switch_dcenter_flows(dpid, topology, vmac_manager)
else:
# To be installed interdatacenter flows after gateway is connected
self.interdcenter_waitinglist.append(dpid)
def handle_waitinglist(self, dpid_gw, topology, vmac_manager):
"""Install interdatacenter flows on non-gateway switches
in the waiting list"""
for dpid in self.interdcenter_waitinglist:
self.set_switch_dcenter_flows(dpid, topology, vmac_manager)
self.interdcenter_waitinglist = []
def set_new_gateway_flows(self, dpid_gw, topology, vmac_manager):
"""Set up flows when a new gateway(dpid) is connected"""
self.set_default_flows(dpid_gw)
self.set_interswitch_flows(dpid_gw, topology, vmac_manager)
self.set_gateway_dcenter_flows(dpid_gw, topology, vmac_manager)
self.handle_waitinglist(dpid_gw, topology, vmac_manager)
def set_gateway_dcenter_flows(self, dpid_gw, topology, vmac_manager):
"""Set up flows on gateway switches to other datacenters"""
dcenter_to_port = topology.gateway_to_dcenters[dpid_gw].items()
for (dcenter, port_no) in dcenter_to_port:
peer_dc_vmac = vmac_manager.create_dc_vmac(dcenter)
self.set_topology_flow(dpid_gw, peer_dc_vmac,
VmacManager.DCENTER_MASK, port_no)
def set_switch_dcenter_flows(self, dpid, topology, vmac_manager):
"""Set up flows on non-gateway switches to other datacenters"""
dpid_gws = topology.get_gateways()
for dpid_gw in dpid_gws:
gw_fwd_port = topology.get_fwd_port(dpid, dpid_gw)
for dcenter in topology.gateway_to_dcenters[dpid_gw]:
peer_dc_vmac = vmac_manager.create_dc_vmac(dcenter)
self.set_topology_flow(dpid, peer_dc_vmac,
VmacManager.DCENTER_MASK, gw_fwd_port)
def set_interswitch_flows(self, dpid, topology, vmac_manager):
"""Set up flows connecting the new switch(dpid) and
all existing switches"""
for (peer_dpid, fwd_port) in topology.get_neighbors(dpid).items():
peer_vmac = vmac_manager.get_swc_vmac(peer_dpid)
self.set_topology_flow(dpid, peer_vmac,
VmacManager.SWITCH_MASK, fwd_port)
self_vmac = vmac_manager.get_swc_vmac(dpid)
peer_port = topology.get_fwd_port(peer_dpid, dpid)
self.set_topology_flow(peer_dpid, self_vmac,
VmacManager.SWITCH_MASK, peer_port)
def set_topology_flow(self, dpid, mac, mask, port):
"""Set up a microflow for unicast on switch DPID towards MAC"""
datapath = self.dpset.get(str_to_dpid(dpid))
ofproto = datapath.ofproto
ofproto_parser = datapath.ofproto_parser
priority = i_priority.DATA_FWD_DCENTER
if self.multi_tenancy:
table_id = FlowManager.SECONDARY_TABLE
else:
table_id = FlowManager.PRIMARY_TABLE
actions = [ofproto_parser.OFPActionOutput(int(port))]
instructions_src = [
datapath.ofproto_parser.OFPInstructionActions(
ofproto.OFPIT_APPLY_ACTIONS,
actions)]
match_src = ofproto_parser.OFPMatch(eth_dst=(mac, mask))
self.set_flow(datapath=datapath,
match=match_src,
table_id=table_id,
priority=priority,
flags=ofproto.OFPFF_SEND_FLOW_REM,
command=ofproto.OFPFC_ADD,
instructions=instructions_src)
LOGGER.info("New forward flow: (switch=%s) -> (mac=%s, mask=%s)",
dpid, mac, mask)
def set_gateway_bounce_flow(self, dpid, vmac_new, vmac_old, topology):
"""Set up a flow at gateway towards local dpid_old
during live migration to prevent
unnecessary multi-datacenter traffic"""
dpid_gws = topology.get_gateways()
for dpid_gw in dpid_gws:
gw_fwd_port = topology.get_fwd_port(dpid_gw, dpid)
datapath_gw = self.dpset.get(str_to_dpid(dpid_gw))
ofproto = datapath_gw.ofproto
ofproto_parser = datapath_gw.ofproto_parser
actions = [ofproto_parser.OFPActionSetField(eth_dst=vmac_new),
ofproto_parser.OFPActionOutput(int(gw_fwd_port))]
instructions = [
datapath_gw.ofproto_parser.OFPInstructionActions(
ofproto.OFPIT_APPLY_ACTIONS,
actions)]
match_gw = ofproto_parser.OFPMatch(eth_dst=vmac_old)
self.set_flow(datapath=datapath_gw,
match=match_gw,
table_id=FlowManager.PRIMARY_TABLE,
priority=i_priority.DATA_FWD_LOCAL,
flags=ofproto.OFPFF_SEND_FLOW_REM,
hard_timeout=CONF.arp_timeout,
command=ofproto.OFPFC_ADD,
instructions=instructions)
def set_drop_flow(self, dpid, table_id=0):
"""Set up a flow to drop all packets that do not match any flow"""
datapath = self.dpset.get(str_to_dpid(dpid))
ofproto = datapath.ofproto
ofproto_parser = datapath.ofproto_parser
instruction_norm = [
datapath.ofproto_parser.OFPInstructionActions(
ofproto.OFPIT_APPLY_ACTIONS,
[])]
match_norm = ofproto_parser.OFPMatch()
self.set_flow(datapath=datapath,
match=match_norm,
table_id=table_id,
priority=i_priority.NORMAL,
flags=ofproto.OFPFF_SEND_FLOW_REM,
command=ofproto.OFPFC_ADD,
instructions=instruction_norm)
def set_flow(self, datapath, match=None, table_id=0, command=None,
priority=0, flags=0, hard_timeout=0, instructions=[]):
"""Send OFPFlowMod instruction to datapath"""
parser = datapath.ofproto_parser
datapath.send_msg(
parser.OFPFlowMod(
datapath=datapath,
match=match,
table_id=table_id,
command=command,
priority=priority,
flags=flags,
hard_timeout=hard_timeout,
instructions=instructions))
def set_default_flows(self, dpid):
"""Set up default flows on a connected switch.
Default flows are categarized into two tables:
Table 1: tenant filter
Table 2: destination-based forwarding
"""
datapath = self.dpset.get(str_to_dpid(dpid))
ofproto = datapath.ofproto
ofproto_parser = datapath.ofproto_parser
# Table 1 setup
# Set up one flow for ARP messages.
# Intercepts all ARP packets and send them to the controller
actions_arp = [ofproto_parser.OFPActionOutput(
ofproto.OFPP_CONTROLLER,
ofproto.OFPCML_NO_BUFFER)]
instruction_arp = [datapath.ofproto_parser.OFPInstructionActions(
ofproto.OFPIT_APPLY_ACTIONS,
actions_arp)]
match_arp = ofproto_parser.OFPMatch(eth_type=ether.ETH_TYPE_ARP)
self.set_flow(datapath=datapath,
match=match_arp,
table_id=FlowManager.PRIMARY_TABLE,
priority=i_priority.ARP,
flags=ofproto.OFPFF_SEND_FLOW_REM,
command=ofproto.OFPFC_ADD,
instructions=instruction_arp)
# Set up two flows for DHCP messages
actions_dhcp = [ofproto_parser.OFPActionOutput(
ofproto.OFPP_CONTROLLER,
ofproto.OFPCML_NO_BUFFER)]
instruction_dhcp = [datapath.ofproto_parser.OFPInstructionActions(
ofproto.OFPIT_APPLY_ACTIONS,
actions_dhcp)]
match_client = ofproto_parser.OFPMatch(eth_type=ether.ETH_TYPE_IP,
ip_proto=inet.IPPROTO_UDP,
udp_src=i_dhcp.CLIENT_PORT)
match_server = ofproto_parser.OFPMatch(eth_type=ether.ETH_TYPE_IP,
ip_proto=inet.IPPROTO_UDP,
udp_src=i_dhcp.SERVER_PORT)
# (1) Intercept all DHCP request packets and send to the controller
self.set_flow(datapath=datapath,
match=match_client,
table_id=FlowManager.PRIMARY_TABLE,
priority=i_priority.DHCP,
flags=ofproto.OFPFF_SEND_FLOW_REM,
command=ofproto.OFPFC_ADD,
instructions=instruction_dhcp)
# (2) Intercept all DHCP reply packets and send to the controller
self.set_flow(datapath=datapath,
match=match_server,
table_id=FlowManager.PRIMARY_TABLE,
priority=i_priority.DHCP,
flags=ofproto.OFPFF_SEND_FLOW_REM,
command=ofproto.OFPFC_ADD,
instructions=instruction_dhcp)
# To prevent loop, all non-matching packets are dropped
self.set_drop_flow(dpid)
# Table 2 setup for multi-tenancy
# To prevent loop, all non-matching packets are dropped
if self.multi_tenancy:
self.set_drop_flow(dpid, table_id=FlowManager.SECONDARY_TABLE)
def del_tenant_filter(self, dpid, mac):
"""Delete a tenant filter microflow on a switch (dpid)"""
if not self.multi_tenancy:
return
datapath = self.dpset.get(str_to_dpid(dpid))
ofproto = datapath.ofproto
ofproto_parser = datapath.ofproto_parser
match = ofproto_parser.OFPMatch(eth_src=mac)
self.set_flow(datapath=datapath,
match=match,
table_id=FlowManager.PRIMARY_TABLE,
command=ofproto.OFPFC_DELETE_STRICT)
def set_tenant_filter(self, dpid, vmac, mac):
"""Set up a microflow on a switch (dpid)
to only allow intra-tenant unicast"""
if not self.multi_tenancy:
return
datapath = self.dpset.get(str_to_dpid(dpid))
ofproto = datapath.ofproto
ofproto_parser = datapath.ofproto_parser
match = ofproto_parser.OFPMatch(eth_src=mac,
eth_dst=(vmac,
VmacManager.TENANT_MASK))
inst = [ofproto_parser.OFPInstructionGotoTable(
FlowManager.SECONDARY_TABLE)]
self.set_flow(datapath=datapath,
match=match,
table_id=FlowManager.PRIMARY_TABLE,
priority=i_priority.DATA_FWD_TENANT,
flags=ofproto.OFPFF_SEND_FLOW_REM,
command=ofproto.OFPFC_ADD,
instructions=inst)
def set_local_flow(self, dpid, vmac, mac, port, flow_add=True, timeout=0):
"""Set up a microflow on a switch (dpid) towards a guest (mac)
The rule matches on dst vmac, rewrites it to mac and forwards to
the appropriate port.
mac can be another vmac.
"""
datapath = self.dpset.get(str_to_dpid(dpid))
ofproto = datapath.ofproto
ofproto_parser = datapath.ofproto_parser
if flow_add:
flow_cmd = ofproto.OFPFC_ADD
else:
flow_cmd = ofproto.OFPFC_MODIFY_STRICT
actions = [ofproto_parser.OFPActionSetField(eth_dst=mac),
ofproto_parser.OFPActionOutput(int(port))]
instructions = [
datapath.ofproto_parser.OFPInstructionActions(
ofproto.OFPIT_APPLY_ACTIONS,
actions)]
match = ofproto_parser.OFPMatch(eth_dst=vmac)
self.set_flow(datapath=datapath,
match=match,
table_id=FlowManager.PRIMARY_TABLE,
priority=i_priority.DATA_FWD_LOCAL,
flags=ofproto.OFPFF_SEND_FLOW_REM,
hard_timeout=timeout,
command=flow_cmd,
instructions=instructions)
if self.multi_tenancy:
self.set_flow(datapath=datapath,
match=match,
table_id=FlowManager.SECONDARY_TABLE,
priority=i_priority.DATA_FWD_LOCAL,
hard_timeout=timeout,
flags=ofproto.OFPFF_SEND_FLOW_REM,
command=flow_cmd,
instructions=instructions)
class TenantManager(object):
"""Manage tenant information"""
DEFAULT_TENANT_ID = "1"
def __init__(self, mac_to_tenant={}):
self.mac_to_tenant = mac_to_tenant
@classmethod
def tenant_from_string(cls, tenant_info):
"""Create an instance of TenantManager from argument of type string"""
tenant_list = cls.parse_tenants(tenant_info)
mac_to_tenant = {}
if tenant_list:
for tenant_id, mac_tuple in enumerate(tenant_list, 1):
for mac in mac_tuple:
mac_to_tenant[mac] = str(tenant_id)
tenant_manager = cls(mac_to_tenant)
return tenant_manager
def parse_tenants(self, tenant_info, out_sep=';', in_sep=','):
"""Convert string to list of tuples"""
tenant_list = []
if tenant_info == "":
return tenant_list
tenant_str_list = tenant_info.split(out_sep)
for tenant_str in tenant_str_list:
mac_list = tenant_str.split(in_sep)
mac_tuple = tuple(mac_list)
tenant_list.append(mac_tuple)
return tenant_list
def get_tenant_id(self, mac):
if self.mac_to_tenant:
return self.mac_to_tenant[mac]
else:
return TenantManager.DEFAULT_TENANT_ID
class RPCManager(object):
"""Manager RPC clients and Issue RPC calls"""
MAX_ID = 65536
def __init__(self, dcenter_to_info, self_dcenter='0'):
# {peer_dc => peer_gateway}: Record neighbor datacenter connection info
self.self_dcenter = self_dcenter
self.dcenter_to_info = dcenter_to_info
self.dcenter_to_rpc = {}
self.rpc_id = 0
self.arp_readers = []
self.rpc_arp_readers = []
@classmethod
def rpc_from_config(cls, peer_dcenters, self_dcenter='0'):
dcenter_to_info = cls.parse_peer_dcenters(peer_dcenters)
rpc_manager = cls(dcenter_to_info, self_dcenter)
return rpc_manager
def update_arp_readers(self, arp_readers_str):
self.arp_readers = str_to_tuple(arp_readers_str)
@staticmethod
def parse_peer_dcenters(peer_dcenters, out_sep=';', in_sep=','):
"""Convert string to dictionary"""
peer_dcs_dic = {}
if not peer_dcenters:
return peer_dcs_dic
peer_dcs_list = peer_dcenters.split(out_sep)
for peer_dc in peer_dcs_list:
peer_list = peer_dc.split(in_sep)
peer_dcs_dic[peer_list[0]] = (peer_list[1], peer_list[2])
return peer_dcs_dic
def _setup_rpc_server_clients(self, inception_rpc):
"""Set up RPC server and RPC client to other controllers"""
# RPC server
host_addr = socket.gethostbyname(socket.gethostname())
rpc_server = SimpleXMLRPCServer((host_addr, CONF.rpc_port),
allow_none=True)
rpc_server.register_introspection_functions()
rpc_server.register_instance(inception_rpc)
hub.spawn(rpc_server.serve_forever)
# Create dcenter RPC clients
for dcenter in self.dcenter_to_info:
controller_ip, _ = self.dcenter_to_info[dcenter]
LOGGER.info("New RPC channel: %s", controller_ip)
rpc_client = ServerProxy("http://%s:%s" %
(controller_ip, CONF.rpc_port))
self.dcenter_to_rpc[dcenter] = rpc_client
# Create arp reader RPC clients
for reader_ip in self.arp_readers:
LOGGER.info("New ARP reader: %s", reader_ip)
rpc_client = ServerProxy("http://%s:%s" %
(reader_ip, CONF.rpc_port))
self.rpc_arp_readers.append(rpc_client)
def get_dcenters(self):
peer_dcenters = self.dcenter_to_info.keys()
peer_dcenters.append(self.self_dcenter)
return peer_dcenters
def do_rpc(self, func_name, arguments):
rpc_id = str(self.rpc_id)
self.rpc_id = (self.rpc_id + 1) % self.MAX_ID
for rpc_client in self.dcenter_to_rpc.values():
rpc_client.do_rpc(func_name, rpc_id, arguments)
def rpc_arp_learning(self, ip, vmac):
for rpc_client in self.rpc_arp_readers:
rpc_client.update_local_arp(ip, vmac)
class ArpManager(object):
"""Maintain IP <=> MAC mapping"""
def __init__(self):
# Data stored in zookeeper
self.ip_to_mac = {}
# Local cache
self.mac_to_ip = {}
def update_mapping(self, ip, mac):
if ip in self.ip_to_mac:
return
self.ip_to_mac[ip] = mac
self.mac_to_ip[mac] = ip
LOGGER.info("Update: (ip=%s) => (mac=%s)", ip, mac)
def learn_arp_mapping(self, ip, mac):
if ip in self.ip_to_mac:
return
self.update_mapping(ip, mac)
def del_mapping(self, ip, mac):
del self.ip_to_mac[ip]
del self.mac_to_ip[mac]
def get_ip(self, mac):
return self.mac_to_ip[mac]
def get_mac(self, ip):
return self.ip_to_mac[ip]
def mapping_exist(self, ip):
return (ip in self.ip_to_mac)
class ZkManager(object):
"""Manage data storage and fetch in zookeeper"""
def __init__(self, inception, zk_storage=False):
# zk_storage: Decide whether to use zookeeper (True) or not (False)
self.zk_storage = zk_storage
self.inception = inception
if self.zk_storage:
# Flag indicating master/slave role
self.master_ctl = False
self.pos_path = "/pos"
self.arp_path = "/arp"
self.queue_path = "/queue"
self.leader_path = "/election"
self.pktin_path = "/log/packet_in"
self.rpc_path = "/log/rpc"
self.digest_to_pktin = {}
zk_logger = logging.getLogger('kazoo')
zk_log_level = log.LOG_LEVELS[CONF.zk_log_level]
zk_logger.setLevel(zk_log_level)
console_handler = logging.StreamHandler()
console_handler.setLevel(zk_log_level)
console_handler.setFormatter(logging.Formatter(CONF.log_formatter))
zk_logger.addHandler(console_handler)
self.zk = client.KazooClient(hosts=CONF.zk_servers,
logger=zk_logger)
self.zk.start()
self.zk.ensure_path(self.pos_path)
self.zk.ensure_path(self.arp_path)
self.zk.ensure_path(self.pktin_path)
self.zk.ensure_path(self.rpc_path)
self.pkt_queue = self.zk.LockingQueue(self.queue_path)
self.thread_pkt = hub.spawn(self.handle_pkt_queue)
hub.spawn(self.run_for_leader)
def run_for_leader(self):
election = self.zk.Election(self.leader_path)
LOGGER.info('Contending for leadership...')
election.run(self.handle_role_upgrade)
def handle_role_upgrade(self):
LOGGER.info("Upgrade to master")
hub.kill(self.thread_pkt)
while self.pkt_queue.__len__() > 0:
self.consume_pkt()
dcenters = self.inception.rpc_manager.get_dcenters()
self.init_dcenter(dcenters)
self.load_data(arp_manager=self.inception.arp_manager,
switch_manager=self.inception.switch_manager,
vm_manager=self.inception.vm_manager,
vmac_manager=self.inception.vmac_manager,
tenant_manager=self.inception.tenant_manager)
self.handle_failover_log()
self.master_ctl = True
# TODO: New leader election design
while True:
time.sleep(1)
def consume_pkt(self):
"""Consume packet_in in queue and local cache"""
pkt_data = self.pkt_queue.get()
self.pkt_queue.consume()
pkt_digest = pkt_data.decode('Latin-1').encode('Latin-1')
self.digest_to_pktin.pop(pkt_digest, None)
LOGGER.info('Packet_in message consumed: %s', | |
<reponame>cs6ting/pytorch_geometric_temporal
"""Tests for batch behaviour."""
import numpy as np
import networkx as nx
from torch_geometric_temporal.signal import temporal_signal_split
from torch_geometric_temporal.signal import StaticGraphTemporalSignalBatch
from torch_geometric_temporal.signal import DynamicGraphTemporalSignalBatch
from torch_geometric_temporal.signal import DynamicGraphStaticSignalBatch
from torch_geometric_temporal.signal import StaticHeteroGraphTemporalSignalBatch
from torch_geometric_temporal.signal import DynamicHeteroGraphTemporalSignalBatch
from torch_geometric_temporal.signal import DynamicHeteroGraphStaticSignalBatch
def get_edge_array(node_count, node_start):
edges = []
for edge in nx.gnp_random_graph(node_count, 0.1).edges():
edges.append([edge[0] + node_start, edge[1] + node_start])
return np.array(edges)
def generate_signal(snapshot_count, node_count, feature_count, graph_count):
edge_indices = []
edge_weights = []
features = []
targets = []
batches = []
for _ in range(snapshot_count):
node_start = 0
edge_indices_s = []
edge_weights_s = []
features_s = []
targets_s = []
batches_s = []
for i in range(graph_count):
edge_indices_s.append(get_edge_array(node_count, node_start))
edge_weights_s.append((np.ones(edge_indices_s[-1].shape[0])))
features_s.append(np.random.uniform(0, 1, (node_count, feature_count)))
targets_s.append(
np.array([np.random.choice([0, 1]) for _ in range(node_count)])
)
batches_s.append(np.array([i for _ in range(node_count)]))
node_start = node_start + node_count
edge_indices.append(np.concatenate(edge_indices_s).T)
edge_weights.append(np.concatenate(edge_weights_s))
features.append(np.concatenate(features_s))
targets.append(np.concatenate(targets_s))
batches.append(np.concatenate(batches_s))
return edge_indices, edge_weights, features, targets, batches
def generate_heterogeneous_signal(snapshot_count, node_count, feature_count, graph_count):
edge_index_dicts = []
edge_weight_dicts = []
feature_dicts = []
target_dicts = []
batch_dicts = []
for _ in range(snapshot_count):
node_start = 0
edge_index_dict_s = {('author', 'writes', 'paper'): []}
edge_weight_dict_s = {('author', 'writes', 'paper'): []}
feature_dict_s = {'author': [],
'paper': []}
target_dict_s = {'author': [],
'paper': []}
batch_dict_s = {'author': [],
'paper': []}
for i in range(graph_count):
edge_index_dict_s[('author', 'writes', 'paper')].append(get_edge_array(node_count, node_start))
edge_weight_dict_s[('author', 'writes', 'paper')].append(
(np.ones(edge_index_dict_s[('author', 'writes', 'paper')][-1].shape[0]))
)
feature_dict_s['paper'].append(np.random.uniform(0, 1, (node_count, feature_count)))
feature_dict_s['author'].append(np.random.uniform(0, 1, (node_count, feature_count)))
target_dict_s['paper'].append(
np.array([np.random.choice([0, 1]) for _ in range(node_count)])
)
target_dict_s['author'].append(
np.array([np.random.choice([0, 1]) for _ in range(node_count)])
)
batch_dict_s['paper'].append(np.array([i for _ in range(node_count)]))
batch_dict_s['author'].append(np.array([i for _ in range(node_count)]))
node_start = node_start + node_count
edge_index_dicts.append(
{node_type: np.concatenate(edge_indices_s).T for node_type, edge_indices_s in edge_index_dict_s.items()}
)
edge_weight_dicts.append(
{node_type: np.concatenate(edge_weights_s) for node_type, edge_weights_s in edge_weight_dict_s.items()}
)
feature_dicts.append(
{node_type: np.concatenate(features_s) for node_type, features_s in feature_dict_s.items()}
)
target_dicts.append({node_type: np.concatenate(targets_s) for node_type, targets_s in target_dict_s.items()})
batch_dicts.append({node_type: np.concatenate(batches_s) for node_type, batches_s in batch_dict_s.items()})
return edge_index_dicts, edge_weight_dicts, feature_dicts, target_dicts, batch_dicts
def test_dynamic_graph_temporal_signal_real_batch():
snapshot_count = 250
node_count = 100
feature_count = 32
graph_count = 10
edge_indices, edge_weights, features, targets, batches = generate_signal(
snapshot_count, node_count, feature_count, graph_count
)
dataset = DynamicGraphTemporalSignalBatch(
edge_indices, edge_weights, features, targets, batches
)
for _ in range(15):
for snapshot in dataset:
assert snapshot.edge_index.shape[0] == 2
assert snapshot.edge_index.shape[1] == snapshot.edge_attr.shape[0]
assert snapshot.x.shape == (graph_count * node_count, feature_count)
assert snapshot.y.shape == (graph_count * node_count,)
assert snapshot.batch.shape == (graph_count * node_count,)
def test_static_graph_temporal_signal_batch():
dataset = StaticGraphTemporalSignalBatch(
None, None, [None, None], [None, None], None
)
for snapshot in dataset:
assert snapshot.edge_index is None
assert snapshot.edge_attr is None
assert snapshot.x is None
assert snapshot.y is None
assert snapshot.batch is None
def test_static_hetero_graph_temporal_signal_batch():
dataset = StaticHeteroGraphTemporalSignalBatch(
None, None, [None, None], [None, None], None
)
for snapshot in dataset:
assert len(snapshot.node_types) == 0
assert len(snapshot.node_stores) == 0
assert len(snapshot.edge_types) == 0
assert len(snapshot.edge_stores) == 0
def test_dynamic_hetero_graph_static_signal_batch():
dataset = DynamicHeteroGraphStaticSignalBatch(
[None], [None], None, [None], [None]
)
for snapshot in dataset:
assert len(snapshot.node_types) == 0
assert len(snapshot.node_stores) == 0
assert len(snapshot.edge_types) == 0
assert len(snapshot.edge_stores) == 0
def test_dynamic_hetero_graph_temporal_signal_batch():
dataset = DynamicHeteroGraphTemporalSignalBatch(
[None, None], [None, None], [None, None], [None, None], [None, None]
)
for snapshot in dataset:
assert len(snapshot.node_types) == 0
assert len(snapshot.node_stores) == 0
assert len(snapshot.edge_types) == 0
assert len(snapshot.edge_stores) == 0
def test_dynamic_graph_temporal_signal_batch():
dataset = DynamicGraphTemporalSignalBatch(
[None, None], [None, None], [None, None], [None, None], [None, None]
)
for snapshot in dataset:
assert snapshot.edge_index is None
assert snapshot.edge_attr is None
assert snapshot.x is None
assert snapshot.y is None
assert snapshot.batch is None
def test_static_graph_temporal_signal_typing_batch():
dataset = StaticGraphTemporalSignalBatch(
None, None, [np.array([1])], [np.array([2])], None
)
for snapshot in dataset:
assert snapshot.edge_index is None
assert snapshot.edge_attr is None
assert snapshot.x.shape == (1,)
assert snapshot.y.shape == (1,)
assert snapshot.batch is None
def test_static_hetero_graph_temporal_signal_typing_batch():
dataset = StaticHeteroGraphTemporalSignalBatch(
None, None, [{'author': np.array([1])}], [{'author': np.array([2])}], None
)
for snapshot in dataset:
assert snapshot.node_types[0] == 'author'
assert snapshot.node_stores[0]['x'].shape == (1,)
assert snapshot.node_stores[0]['y'].shape == (1,)
assert 'batch' not in list(dict(snapshot.node_stores[0]).keys())
assert len(snapshot.edge_types) == 0
def test_dynamic_hetero_graph_static_signal_typing_batch():
dataset = DynamicHeteroGraphStaticSignalBatch(
[None], [None], {'author': np.array([1])}, [{'author': np.array([2])}], [None]
)
for snapshot in dataset:
assert snapshot.node_types[0] == 'author'
assert snapshot.node_stores[0]['x'].shape == (1,)
assert snapshot.node_stores[0]['y'].shape == (1,)
assert 'batch' not in list(dict(snapshot.node_stores[0]).keys())
assert len(snapshot.edge_types) == 0
def test_dynamic_hetero_graph_temporal_signal_typing_batch():
dataset = DynamicHeteroGraphTemporalSignalBatch(
[None], [None], [{'author': np.array([1])}], [{'author': np.array([2])}], [None]
)
for snapshot in dataset:
assert snapshot.node_types[0] == 'author'
assert snapshot.node_stores[0]['x'].shape == (1,)
assert snapshot.node_stores[0]['y'].shape == (1,)
assert 'batch' not in list(dict(snapshot.node_stores[0]).keys())
assert len(snapshot.edge_types) == 0
def test_dynamic_graph_static_signal_typing_batch():
dataset = DynamicGraphStaticSignalBatch([None], [None], None, [None], [None])
for snapshot in dataset:
assert snapshot.edge_index is None
assert snapshot.edge_attr is None
assert snapshot.x is None
assert snapshot.y is None
assert snapshot.batch is None
def test_dynamic_graph_temporal_signal_batch_additional_attrs():
dataset = DynamicGraphTemporalSignalBatch([None], [None], [None], [None], [None],
optional1=[np.array([1])], optional2=[np.array([2])])
assert dataset.additional_feature_keys == ["optional1", "optional2"]
for snapshot in dataset:
assert snapshot.optional1.shape == (1,)
assert snapshot.optional2.shape == (1,)
def test_static_graph_temporal_signal_batch_additional_attrs():
dataset = StaticGraphTemporalSignalBatch(None, None, [None], [None], None,
optional1=[np.array([1])], optional2=[np.array([2])])
assert dataset.additional_feature_keys == ["optional1", "optional2"]
for snapshot in dataset:
assert snapshot.optional1.shape == (1,)
assert snapshot.optional2.shape == (1,)
def test_static_hetero_graph_temporal_signal_batch_additional_attrs():
dataset = StaticHeteroGraphTemporalSignalBatch(None, None, [None], [None], None,
optional1=[{'author': np.array([1])}],
optional2=[{'author': np.array([2])}],
optional3=[None])
assert dataset.additional_feature_keys == ["optional1", "optional2", "optional3"]
for snapshot in dataset:
assert snapshot.node_stores[0]['optional1'].shape == (1,)
assert snapshot.node_stores[0]['optional2'].shape == (1,)
assert "optional3" not in list(dict(snapshot.node_stores[0]).keys())
def test_dynamic_hetero_graph_static_signal_batch_additional_attrs():
dataset = DynamicHeteroGraphStaticSignalBatch([None], [None], None, [None], [None],
optional1=[{'author': np.array([1])}],
optional2=[{'author': np.array([2])}],
optional3=[None])
assert dataset.additional_feature_keys == ["optional1", "optional2", "optional3"]
for snapshot in dataset:
assert snapshot.node_stores[0]['optional1'].shape == (1,)
assert snapshot.node_stores[0]['optional2'].shape == (1,)
assert "optional3" not in list(dict(snapshot.node_stores[0]).keys())
def test_dynamic_hetero_graph_temporal_signal_batch_additional_attrs():
dataset = DynamicHeteroGraphTemporalSignalBatch([None], [None], [None], [None], [None],
optional1=[{'author': np.array([1])}],
optional2=[{'author': np.array([2])}],
optional3=[None])
assert dataset.additional_feature_keys == ["optional1", "optional2", "optional3"]
for snapshot in dataset:
assert snapshot.node_stores[0]['optional1'].shape == (1,)
assert snapshot.node_stores[0]['optional2'].shape == (1,)
assert "optional3" not in list(dict(snapshot.node_stores[0]).keys())
def test_dynamic_graph_static_signal_batch_additional_attrs():
dataset = DynamicGraphStaticSignalBatch([None], [None], None, [None], [None],
optional1=[np.array([1])], optional2=[np.array([2])])
assert dataset.additional_feature_keys == ["optional1", "optional2"]
for snapshot in dataset:
assert snapshot.optional1.shape == (1,)
assert snapshot.optional2.shape == (1,)
def test_static_hetero_graph_temporal_signal_batch_edges():
dataset = StaticHeteroGraphTemporalSignalBatch({("author", "writes", "paper"): np.array([[0, 1], [1, 0]])},
{("author", "writes", "paper"): np.array([[0.1], [0.1]])},
[{"author": np.array([[0], [0]]),
"paper": np.array([[0], [0], [0]])},
{"author": np.array([[0.1], [0.1]]),
"paper": np.array([[0.1], [0.1], [0.1]])}],
[None, None],
None)
for snapshot in dataset:
assert snapshot.edge_stores[0]['edge_index'].shape == (2, 2)
assert snapshot.edge_stores[0]['edge_attr'].shape == (2, 1)
assert snapshot.edge_stores[0]['edge_index'].shape[0] == snapshot.edge_stores[0]['edge_attr'].shape[0]
def test_dynamic_hetero_graph_static_signal_batch_edges():
dataset = DynamicHeteroGraphStaticSignalBatch([{("author", "writes", "paper"): np.array([[0, 1], [1, 0]])}],
[{("author", "writes", "paper"): np.array([[0.1], [0.1]])}],
{"author": np.array([[0], [0]]),
"paper": np.array([[0], [0], [0]])},
[None],
[None])
for snapshot in dataset:
assert snapshot.edge_stores[0]['edge_index'].shape == (2, 2)
assert snapshot.edge_stores[0]['edge_attr'].shape == (2, 1)
assert snapshot.edge_stores[0]['edge_index'].shape[0] == snapshot.edge_stores[0]['edge_attr'].shape[0]
def test_dynamic_hetero_graph_temporal_signal_batch_edges():
dataset = DynamicHeteroGraphTemporalSignalBatch([{("author", "writes", "paper"): np.array([[0, 1], [1, 0]])}],
[{("author", "writes", "paper"): np.array([[0.1], [0.1]])}],
[{"author": np.array([[0], [0]]),
"paper": np.array([[0], [0], [0]])}],
[None],
[None])
for snapshot in dataset:
assert snapshot.edge_stores[0]['edge_index'].shape == (2, 2)
assert snapshot.edge_stores[0]['edge_attr'].shape == (2, 1)
assert snapshot.edge_stores[0]['edge_index'].shape[0] == snapshot.edge_stores[0]['edge_attr'].shape[0]
def test_static_hetero_graph_temporal_signal_batch_assigned():
dataset = StaticHeteroGraphTemporalSignalBatch(
None, None, [{'author': np.array([1])}], [{'author': np.array([2])}], {'author': np.array([1])}
)
for snapshot in dataset:
assert snapshot.node_types[0] == 'author'
assert snapshot.node_stores[0]['x'].shape == (1,)
assert snapshot.node_stores[0]['y'].shape == (1,)
assert snapshot.node_stores[0]['batch'].shape == (1,)
assert len(snapshot.edge_types) == 0
def test_dynamic_hetero_graph_static_signal_batch_assigned():
dataset = DynamicHeteroGraphStaticSignalBatch(
[None], [None], {'author': np.array([1])}, [{'author': np.array([2])}], [{'author': np.array([1])}]
)
for snapshot in dataset:
assert snapshot.node_types[0] == 'author'
assert snapshot.node_stores[0]['x'].shape == (1,)
assert snapshot.node_stores[0]['y'].shape == (1,)
assert snapshot.node_stores[0]['batch'].shape == (1,)
assert len(snapshot.edge_types) == 0
def test_dynamic_hetero_graph_temporal_signal_batch_assigned():
dataset = DynamicHeteroGraphTemporalSignalBatch(
[None], [None], [{'author': np.array([1])}], [{'author': np.array([2])}], [{'author': np.array([1])}]
)
for snapshot in dataset:
assert snapshot.node_types[0] == 'author'
assert snapshot.node_stores[0]['x'].shape == (1,)
assert snapshot.node_stores[0]['y'].shape == (1,)
assert snapshot.node_stores[0]['batch'].shape == (1,)
assert len(snapshot.edge_types) == 0
def test_discrete_train_test_split_dynamic_batch():
snapshot_count = 250
node_count = 100
feature_count = 32
graph_count = 10
edge_indices, edge_weights, features, targets, batches = generate_signal(
snapshot_count, node_count, feature_count, graph_count
)
dataset = DynamicGraphTemporalSignalBatch(
edge_indices, edge_weights, features, targets, batches
)
train_dataset, test_dataset = temporal_signal_split(dataset, 0.8)
for _ in range(2):
for snapshot in test_dataset:
assert snapshot.edge_index.shape[0] == 2
assert snapshot.edge_index.shape[1] == snapshot.edge_attr.shape[0]
assert snapshot.x.shape == (node_count * graph_count, feature_count)
assert snapshot.y.shape == (node_count * graph_count,)
for _ in range(2):
for snapshot in train_dataset:
assert snapshot.edge_index.shape[0] == 2
assert snapshot.edge_index.shape[1] == snapshot.edge_attr.shape[0]
assert snapshot.x.shape == (node_count * graph_count, feature_count)
assert snapshot.y.shape == (node_count * graph_count,)
def test_train_test_split_static_graph_temporal_signal_batch():
snapshot_count = 250
node_count = 100
feature_count = 32
graph_count = 10
edge_indices, edge_weights, features, targets, batches = generate_signal(
snapshot_count, node_count, feature_count, graph_count
)
dataset = StaticGraphTemporalSignalBatch(
edge_indices[0], edge_weights[0], features, targets, batches[0]
)
train_dataset, test_dataset = temporal_signal_split(dataset, 0.8)
for _ in range(2):
for | |
import hashlib
import json
import os
from argparse import ArgumentParser, Namespace
from collections import defaultdict
from copy import deepcopy
from functools import partial
from typing import Dict, List, Optional, Type
import numpy as np
import pytorch_lightning as pl
import torch
import torch.nn as nn
import torch.nn.functional as F
import transformers
from torch import Tensor
from torch.utils.data import ConcatDataset, DataLoader, RandomSampler
from transformers import AutoConfig, AutoModel, AutoTokenizer
import constant
import util
from dataset.base import Dataset
from enumeration import Schedule, Split, Task
from metric import Metric
from model.module import Identity, InputVariationalDropout, MeanPooling, Transformer
class Model(pl.LightningModule):
def __init__(self, hparams):
super(Model, self).__init__()
self.optimizer = None
self.scheduler = None
self._metric: Optional[Metric] = None
self.metrics: Dict[str, Metric] = dict()
self.trn_datasets: List[Dataset] = None
self.val_datasets: List[Dataset] = None
self.tst_datasets: List[Dataset] = None
self.padding: Dict[str, int] = {}
self.base_dir: str = ""
self._batch_per_epoch: int = -1
self._comparsion: Optional[str] = None
self._selection_criterion: Optional[str] = None
if isinstance(hparams, dict):
hparams = Namespace(**hparams)
# self.hparams: Namespace = hparams
self.save_hyperparameters(hparams)
pl.seed_everything(hparams.seed)
self.tokenizer = AutoTokenizer.from_pretrained(hparams.pretrain)
self.model = self.build_model()
self.freeze_layers()
self.weight = nn.Parameter(torch.zeros(self.num_layers))
self.mapping = None
if hparams.mapping:
assert os.path.isfile(hparams.mapping)
self.mapping = torch.load(hparams.mapping)
util.freeze(self.mapping)
self.projector = self.build_projector()
self.dropout = InputVariationalDropout(hparams.input_dropout)
def build_model(self):
config = AutoConfig.from_pretrained(
self.hparams.pretrain, output_hidden_states=True
)
model = AutoModel.from_pretrained(self.hparams.pretrain, config=config)
return model
def freeze_layers(self):
if self.hparams.freeze_layer == -1:
return
elif self.hparams.freeze_layer >= 0:
for i in range(self.hparams.freeze_layer + 1):
if i == 0:
print("freeze embeddings")
self.freeze_embeddings()
else:
print(f"freeze layer {i}")
self.freeze_layer(i)
def freeze_embeddings(self):
if isinstance(self.model, transformers.BertModel) or isinstance(
self.model, transformers.RobertaModel
):
util.freeze(self.model.embeddings)
elif isinstance(self.model, transformers.XLMModel):
util.freeze(self.model.position_embeddings)
if self.model.n_langs > 1 and self.model.use_lang_emb:
util.freeze(self.model.lang_embeddings)
util.freeze(self.model.embeddings)
else:
raise ValueError("Unsupported model")
def freeze_layer(self, layer):
if isinstance(self.model, transformers.BertModel) or isinstance(
self.model, transformers.RobertaModel
):
util.freeze(self.model.encoder.layer[layer - 1])
elif isinstance(self.model, transformers.XLMModel):
util.freeze(self.model.attentions[layer - 1])
util.freeze(self.model.layer_norm1[layer - 1])
util.freeze(self.model.ffns[layer - 1])
util.freeze(self.model.layer_norm2[layer - 1])
else:
raise ValueError("Unsupported model")
@property
def hidden_size(self):
if isinstance(self.model, transformers.BertModel) or isinstance(
self.model, transformers.RobertaModel
):
return self.model.config.hidden_size
elif isinstance(self.model, transformers.XLMModel):
return self.model.dim
else:
raise ValueError("Unsupported model")
@property
def num_layers(self):
if isinstance(self.model, transformers.BertModel) or isinstance(
self.model, transformers.RobertaModel
):
return self.model.config.num_hidden_layers + 1
elif isinstance(self.model, transformers.XLMModel):
return self.model.n_layers + 1
else:
raise ValueError("Unsupported model")
@property
def batch_per_epoch(self):
if self.trn_datasets is None:
self.trn_datasets = self.prepare_datasets(Split.train)
if self._batch_per_epoch < 0:
total_datasize = sum([len(d) for d in self.trn_datasets])
self._batch_per_epoch = np.ceil(total_datasize / self.hparams.batch_size)
return self._batch_per_epoch
@property
def selection_criterion(self):
assert self._selection_criterion is not None
return self._selection_criterion
@property
def comparsion(self):
assert self._comparsion is not None
return self._comparsion
def setup_metrics(self):
assert self._metric is not None
langs = self.hparams.trn_langs + self.hparams.val_langs + self.hparams.tst_langs
langs = sorted(list(set(langs)))
for lang in langs:
self.metrics[lang] = deepcopy(self._metric)
self.reset_metrics()
def reset_metrics(self):
for metric in self.metrics.values():
metric.reset()
def build_projector(self):
hparams = self.hparams
if hparams.projector == "id":
return Identity()
elif hparams.projector == "meanpool":
return MeanPooling()
elif hparams.projector == "transformer":
return Transformer(
input_dim=self.hidden_size,
hidden_dim=hparams.projector_trm_hidden_size,
num_heads=hparams.projector_trm_num_heads,
dropout=hparams.projector_dropout,
num_layers=hparams.projector_trm_num_layers,
)
else:
raise ValueError(hparams.projector)
def get_mask(self, sent: Tensor):
mask = (sent != self.tokenizer.pad_token_id).long()
return mask
def encode_sent(
self,
sent: Tensor,
langs: Optional[List[str]] = None,
segment: Optional[Tensor] = None,
model: Optional[transformers.PreTrainedModel] = None,
return_raw_hidden_states: bool = False,
):
if model is None:
model = self.model
mask = self.get_mask(sent)
if isinstance(model, transformers.BertModel) or isinstance(
self.model, transformers.RobertaModel
):
output = model(input_ids=sent, attention_mask=mask, token_type_ids=segment)
elif isinstance(model, transformers.XLMModel):
lang_ids: Optional[torch.Tensor]
if langs is not None:
try:
batch_size, seq_len = sent.shape
lang_ids = torch.tensor(
[self.tokenizer.lang2id[lang] for lang in langs],
dtype=torch.long,
device=sent.device,
)
lang_ids = lang_ids.unsqueeze(1).expand(batch_size, seq_len)
except KeyError as e:
print(f"KeyError with missing language {e}")
lang_ids = None
output = model(
input_ids=sent,
attention_mask=mask,
langs=lang_ids,
token_type_ids=segment,
)
else:
raise ValueError("Unsupported model")
if return_raw_hidden_states:
return output["hidden_states"]
hs = self.map_feature(output["hidden_states"], langs)
hs = self.process_feature(hs)
hs = self.dropout(hs)
hs = self.projector(hs, mask)
return hs
def map_feature(self, hidden_states: List[Tensor], langs):
if self.mapping is None:
return hidden_states
assert len(set(langs)) == 1, "a batch should contain only one language"
lang = langs[0]
lang = constant.LANGUAGE_TO_ISO639.get(lang, lang)
if lang not in self.mapping:
return hidden_states
hs = []
for h, m in zip(hidden_states, self.mapping[lang]):
hs.append(m(h))
return hs
def process_feature(self, hidden_states: List[Tensor]):
if self.hparams.weighted_feature:
hs: Tensor = torch.stack(hidden_states)
weight = F.softmax(self.weight, dim=0).view(-1, 1, 1, 1)
hs = hs * weight
hs = hs.sum(dim=0)
else:
hs = hidden_states[self.hparams.feature_layer]
return hs
def evaluation_step_helper(self, batch, prefix) -> Dict[str, Tensor]:
raise NotImplementedError
def validation_step(self, batch, batch_idx, dataloader_idx=0):
return self.evaluation_step_helper(batch, "val")
def test_step(self, batch, batch_idx, dataloader_idx=0):
return self.evaluation_step_helper(batch, "tst")
def training_epoch_end(self, outputs):
return
def aggregate_outputs(
self, outputs: List[List[Dict[str, Tensor]]], langs: List[str], prefix: str
):
assert prefix in ["val", "tst"]
aver_result = defaultdict(list)
for lang, output in zip(langs, outputs):
for key in output[0]:
mean_val = torch.stack([x[key] for x in output]).mean()
self.log(key, mean_val)
raw_key = key.replace(f"{lang}_", "")
aver_result[raw_key].append(mean_val)
for key, vals in aver_result.items():
self.log(key, torch.stack(vals).mean())
def aggregate_metrics(self, langs: List[str], prefix: str):
aver_metric = defaultdict(list)
for lang in langs:
metric = self.metrics[lang]
for key, val in metric.get_metric().items():
self.log(f"{prefix}_{lang}_{key}", val)
aver_metric[key].append(val)
for key, vals in aver_metric.items():
self.log(f"{prefix}_{key}", torch.stack(vals).mean())
def validation_epoch_end(self, outputs):
if len(self.hparams.val_langs) == 1:
outputs = [outputs]
self.aggregate_outputs(outputs, self.hparams.val_langs, "val")
self.aggregate_metrics(self.hparams.val_langs, "val")
return
def test_epoch_end(self, outputs):
if len(self.hparams.tst_langs) == 1:
outputs = [outputs]
self.aggregate_outputs(outputs, self.hparams.tst_langs, "tst")
self.aggregate_metrics(self.hparams.tst_langs, "tst")
return
def get_warmup_and_total_steps(self):
if self.hparams.max_steps is not None:
max_steps = self.hparams.max_steps
else:
max_steps = self.hparams.max_epochs * self.batch_per_epoch
assert not (
self.hparams.warmup_steps != -1 and self.hparams.warmup_portion != -1
)
if self.hparams.warmup_steps != -1:
assert self.hparams.warmup_steps > 0
warmup_steps = self.hparams.warmup_steps
elif self.hparams.warmup_portion != -1:
assert 0 < self.hparams.warmup_portion < 1
warmup_steps = int(max_steps * self.hparams.warmup_portion)
else:
warmup_steps = 1
return warmup_steps, max_steps
def configure_optimizers(self):
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = []
optimizer_grouped_parameters.append(
{
"params": [
p
for n, p in self.named_parameters()
if not any(nd in n for nd in no_decay)
],
"weight_decay": self.hparams.weight_decay,
}
)
optimizer_grouped_parameters.append(
{
"params": [
p
for n, p in self.named_parameters()
if any(nd in n for nd in no_decay)
],
"weight_decay": 0.0,
}
)
optimizer = torch.optim.AdamW(
optimizer_grouped_parameters,
lr=self.hparams.learning_rate,
betas=(0.9, self.hparams.adam_beta2),
eps=self.hparams.adam_eps,
)
warmup_steps, max_steps = self.get_warmup_and_total_steps()
if self.hparams.schedule == Schedule.invsqroot:
scheduler = util.get_inverse_square_root_schedule_with_warmup(
optimizer, warmup_steps
)
interval = "step"
elif self.hparams.schedule == Schedule.linear:
scheduler = util.get_linear_schedule_with_warmup(
optimizer, warmup_steps, max_steps
)
interval = "step"
elif self.hparams.schedule == Schedule.reduceOnPlateau:
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
optimizer, factor=0.5, patience=0, min_lr=1e-6, mode="min"
)
interval = "epoch"
else:
raise ValueError(self.hparams.schedule)
self.optimizer = optimizer
self.scheduler = scheduler
scheduler_dict = {"scheduler": scheduler, "interval": interval}
if self.hparams.schedule == Schedule.reduceOnPlateau:
scheduler_dict["monitor"] = "val_loss"
return [optimizer], [scheduler_dict]
def _get_signature(self, params: Dict):
def md5_helper(obj):
return hashlib.md5(str(obj).encode()).hexdigest()
signature = dict()
for key, val in params.items():
if key == "tokenizer" and isinstance(val, transformers.PreTrainedTokenizer):
signature[key] = md5_helper(list(val.get_vocab().items()))
else:
signature[key] = str(val)
md5 = md5_helper(list(signature.items()))
return md5, signature
def prepare_datasets(self, split: str) -> List[Dataset]:
raise NotImplementedError
def prepare_datasets_helper(
self,
data_class: Type[Dataset],
langs: List[str],
split: str,
max_len: int,
**kwargs,
):
datasets = []
for lang in langs:
filepath = data_class.get_file(self.hparams.data_dir, lang, split)
if filepath is None:
print(f"skipping {split} language: {lang}")
continue
params = {}
params["task"] = self.hparams.task
params["tokenizer"] = self.tokenizer
params["filepath"] = filepath
params["lang"] = lang
params["split"] = split
params["max_len"] = max_len
if split == Split.train:
params["subset_ratio"] = self.hparams.subset_ratio
params["subset_count"] = self.hparams.subset_count
params["subset_seed"] = self.hparams.subset_seed
params.update(kwargs)
md5, signature = self._get_signature(params)
del params["task"]
cache_file = f"{self.hparams.cache_path}/{md5}"
if self.hparams.cache_dataset and os.path.isfile(cache_file):
print(f"load from cache {filepath} with {self.hparams.pretrain}")
dataset = torch.load(cache_file)
else:
dataset = data_class(**params)
if self.hparams.cache_dataset:
print(f"save to cache {filepath} with {self.hparams.pretrain}")
torch.save(dataset, cache_file)
with open(f"{cache_file}.json", "w") as fp:
json.dump(signature, fp)
datasets.append(dataset)
return datasets
def train_dataloader(self):
if self.trn_datasets is None:
self.trn_datasets = self.prepare_datasets(Split.train)
collate = partial(util.default_collate, padding=self.padding)
if len(self.trn_datasets) == 1:
dataset = self.trn_datasets[0]
sampler = RandomSampler(dataset)
else:
dataset = ConcatDataset(self.trn_datasets)
if self.hparams.mix_sampling:
sampler = RandomSampler(dataset)
else:
sampler = util.ConcatSampler(dataset, self.hparams.batch_size)
return DataLoader(
dataset,
batch_size=self.hparams.batch_size,
sampler=sampler,
pin_memory=True,
drop_last=False,
collate_fn=collate,
num_workers=1,
)
def val_dataloader(self):
if self.val_datasets is None:
self.val_datasets = self.prepare_datasets(Split.dev)
collate = partial(util.default_collate, padding=self.padding)
return [
DataLoader(
val_dataset,
batch_size=self.hparams.eval_batch_size,
shuffle=False,
pin_memory=True,
drop_last=False,
collate_fn=collate,
num_workers=1,
)
for val_dataset in self.val_datasets
]
def test_dataloader(self):
if self.tst_datasets is None:
self.tst_datasets = self.prepare_datasets(Split.test)
collate = partial(util.default_collate, padding=self.padding)
return [
DataLoader(
tst_dataset,
batch_size=self.hparams.eval_batch_size,
shuffle=False,
pin_memory=True,
drop_last=False,
collate_fn=collate,
num_workers=1,
)
for tst_dataset in self.tst_datasets
]
@classmethod
def add_model_specific_args(cls, parent_parser):
parser = ArgumentParser(parents=[parent_parser], add_help=False)
# fmt: off
# shared
parser.add_argument("--task", required=True, choices=Task().choices(), type=str)
parser.add_argument("--data_dir", required=True, type=str)
parser.add_argument("--trn_langs", required=True, nargs="+", type=str)
parser.add_argument("--val_langs", required=True, nargs="+", type=str)
parser.add_argument("--tst_langs", default=[], nargs="*", type=str)
parser.add_argument("--max_trn_len", default=128, type=int)
parser.add_argument("--max_tst_len", default=128, type=int)
parser.add_argument("--subset_ratio", default=1.0, type=float)
parser.add_argument("--subset_count", | |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.13.1
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %% _uuid="8f2839f25d086af736a60e9eeb907d3b93b6e0e5" _cell_guid="b1076dfc-b9ad-4769-8c92-a6c4dae69d19"
# This Python 3 environment comes with many helpful analytics libraries installed
# It is defined by the kaggle/python Docker image: https://github.com/kaggle/docker-python
# For example, here's several helpful packages to load
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# %%
import seaborn as sns
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import numpy as np
import pandas as pd
import statsmodels.api as sm
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.preprocessing import scale, StandardScaler
from sklearn.model_selection import train_test_split, GridSearchCV, cross_val_score
from sklearn.metrics import confusion_matrix, accuracy_score, mean_squared_error, r2_score, roc_auc_score, roc_curve, classification_report
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.neural_network import MLPClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.metrics import mean_squared_error, r2_score
import matplotlib.pyplot as plt
from sklearn.model_selection import cross_val_score
from sklearn.preprocessing import scale
from sklearn.preprocessing import StandardScaler
from sklearn import model_selection
from sklearn.linear_model import LinearRegression
from sklearn.tree import DecisionTreeRegressor
from sklearn.neighbors import KNeighborsRegressor
from sklearn.neural_network import MLPRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import GradientBoostingRegressor
import numpy as np
from sklearn.neighbors import LocalOutlierFactor
from sklearn import neighbors
from sklearn.svm import SVR
from sklearn.metrics import mean_squared_error, r2_score
from sklearn.model_selection import cross_val_score, train_test_split, GridSearchCV
from xgboost import XGBRegressor
from lightgbm import LGBMRegressor
from sklearn import preprocessing
from sklearn.preprocessing import scale
from sklearn.metrics import mean_squared_log_error
from sklearn.preprocessing import OrdinalEncoder
from xgboost import XGBClassifier
from lightgbm import LGBMClassifier
from catboost import CatBoostClassifier
import random
import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
warnings.filterwarnings("ignore", category=FutureWarning)
models = [
LogisticRegression, KNeighborsClassifier, SVC, MLPClassifier,
DecisionTreeClassifier, RandomForestClassifier, GradientBoostingClassifier,
XGBClassifier, LGBMClassifier
] #,CatBoostClassifier
pd.set_option('display.max_columns', None)
pd.set_option('display.max_rows', 10)
pd.set_option('display.float_format', lambda x: '%.5f' % x)
# %% [markdown]
# ## Adding Functions
# %%
def degisken_tiplerine_ayirma(data, cat_th, car_th):
"""
Veri:data parametresi ili fonksiyona girilen verinin değişkenlerin sınıflandırılması.
Parameters
----------
data: pandas.DataFrame
İşlem yapılacak veri seti
cat_th:int
categoric değişken threshold değeri
car_th:int
Cardinal değişkenler için threshold değeri
Returns
-------
cat_deg:list
categorik değişken listesi
num_deg:list
numeric değişken listesi
car_deg:list
categoric ama cardinal değişken listesi
Examples
-------
df = dataset_yukle("breast_cancer")
cat,num,car=degisken_tiplerine_ayirma(df,10,20)
Notes
-------
cat_deg + num_deg + car_deg = toplam değişken sayısı
"""
num_but_cat = [
i for i in data.columns
if data[i].dtypes != "O" and data[i].nunique() < cat_th
]
car_deg = [
i for i in data.columns
if data[i].dtypes == "O" and data[i].nunique() > car_th
]
num_deg = [
i for i in data.columns
if data[i].dtypes != "O" and i not in num_but_cat
]
cat_deg = [
i for i in data.columns if data[i].dtypes == "O" and i not in car_deg
]
cat_deg = cat_deg + num_but_cat
print(f"Dataset kolon/değişken sayısı: {data.shape[1]}")
print(f"Dataset satır/veri sayısı: {data.shape[0]}")
print("********************************************")
print(f"Datasetin numeric değişken sayısı: {len(num_deg)}")
print(f"Datasetin numeric değişkenler: {num_deg}")
print("********************************************")
print(f"Datasetin categoric değişken sayısı: {len(cat_deg)}")
print(f"Datasetin categoric değişkenler: {cat_deg}")
print("********************************************")
print(f"Datasetin cardinal değişken sayısı: {len(car_deg)}")
print(f"Datasetin cardinal değişkenler: {car_deg}")
print("********************************************")
return cat_deg, num_deg, car_deg
def categoric_ozet(data, degisken, plot=False, null_control=False):
"""
Task
----------
Datasetinde bulunan categoric değişkenlerin değişken tiplerinin sayısını ve totale karşı oranını bulur.
Ayrıca isteğe bağlı olarak değişken dağılımının grafiğini ve değişken içinde bulunan null sayısını çıkartır.
Parameters
----------
data:pandas.DataFrame
categoric değişkenin bulunduğu dataset.
degisken:String
Categoric değişken ismi.
plot:bool
Fonksiyonda categoric değişken dağılımının grafiğini çizdirmek için opsiyonel özellik.
null_control:bool
Fonksiyonda değişken içinde null değer kontolü için opsiyonel özellik
Returns
-------
tablo:pandas.DataFrame
Unique değişkenlerin ratio olarak oran tablosu
Examples
-------
df=dataset_yukle("titanic")
cat_deg,num_deg,car_deg=degisken_tiplerine_ayirma(df,10,20)
for i in cat_deg:
tablo=categoric_ozet(df,i,True,True)
"""
print(
pd.DataFrame({
degisken: data[degisken].value_counts(),
"Ratio": 100 * data[degisken].value_counts() / len(data)
}))
tablo = pd.DataFrame({
degisken:
data[degisken].value_counts(),
"Ratio":
100 * data[degisken].value_counts() / len(data)
})
print("##########################################")
if plot:
sns.countplot(x=data[degisken], data=data)
plt.show(block=True)
if null_control:
print(f"Null veri sayısı: {data[degisken].isnull().sum()}")
return tablo
def dataset_ozet(data, head=5):
print("##################### Shape #####################")
print(f"Satır sayısı: {data.shape[0]}")
print(f"Kolon sayısı: {data.shape[1]}")
print("##################### Types #####################")
print(data.dtypes)
print("##################### Head #####################")
print(data.head(head))
print("##################### Tail #####################")
print(data.tail(head))
print("##################### NA Kontrolü #####################")
print(data.isnull().sum())
print("##################### Quantiles #####################")
print(data.quantile([0, 0.05, 0.50, 0.95, 0.99, 1]).T)
print("##################### Describe Tablosu #####################")
print(data.describe().T)
def outlier_threshold(data, degisken):
Q1 = data[degisken].quantile(0.01)
Q3 = data[degisken].quantile(0.99)
Q_Inter_Range = Q3 - Q1
alt_limit = Q1 - 1.5 * Q_Inter_Range
ust_limit = Q3 + 1.5 * Q_Inter_Range
return alt_limit, ust_limit
def threshold_degisimi(data, degisken):
alt_limit, ust_limit = outlier_threshold(data, degisken)
data.loc[(data[degisken] < alt_limit), degisken] = alt_limit
data.loc[(data[degisken] > ust_limit), degisken] = ust_limit
#data[data[degisken]<alt_limit][degisken]=alt_limit
#data[data[degisken]>ust_limit][degisken]=ust_limit
return data
def numeric_ozet(data, degisken, plot=False, null_control=False):
"""
Task
----------
Datasetinde bulunan numeric değişkenlerin değişken tiplerinin sayısını ve totale karşı oranını bulur.
Ayrıca isteğe bağlı olarak değişken dağılımının grafiğini ve değişken içinde bulunan null sayısını çıkartır.
Parameters
----------
data:pandas.DataFrame
categoric değişkenin bulunduğu dataset.
degisken:String
Categoric değişken ismi.
plot:bool
Fonksiyonda categoric değişken dağılımının grafiğini çizdirmek için opsiyonel özellik.
null_control:bool
Fonksiyonda değişken içinde null değer kontolü için opsiyonel özellik
Returns
-------
tablo:pandas.DataFrame
Unique değişkenlerin ratio olarak oran tablosu
Examples
-------
df=dataset_yukle("titanic")
cat_deg,num_deg,car_deg=degisken_tiplerine_ayirma(df,10,20)
for i in cat_deg:
tablo=categoric_ozet(df,i,True,True)
"""
quantiles = [
0.05, 0.10, 0.20, 0.30, 0.40, 0.50, 0.60, 0.70, 0.80, 0.90, 0.95, 0.99
]
print(data[degisken].describe(quantiles).T)
if plot:
data[degisken].hist(bins=20)
plt.xlabel(degisken)
plt.title(degisken)
plt.show(block=True)
print("##########################################")
if null_control:
print(f"Null veri sayısı: {data[degisken].isnull().sum()}")
def missing_values_table(dataframe, na_name=False):
na_columns = [
col for col in dataframe.columns if dataframe[col].isnull().sum() > 0
]
n_miss = dataframe[na_columns].isnull().sum().sort_values(ascending=False)
ratio = (dataframe[na_columns].isnull().sum() / dataframe.shape[0] *
100).sort_values(ascending=False)
missing_df = pd.concat([n_miss, np.round(ratio, 2)],
axis=1,
keys=['n_miss', 'ratio'])
print(missing_df, end="\n")
if na_name:
return na_columns
def one_hot_encoder(dataframe, categorical_cols, drop_first=True):
dataframe = pd.get_dummies(dataframe,
columns=categorical_cols,
drop_first=drop_first)
return dataframe
def model_karsilastirma(df, model, target):
X = df.drop(columns=target)
y = df[target]
X_train, X_test, y_train, y_test = train_test_split(X,
y,
test_size=0.15,
random_state=42)
model_fit = model().fit(X_train, y_train)
y_pred = model_fit.predict(X_test)
acc = accuracy_score(y_test, y_pred)
print(model, "için sonuç doğruluk değeri:", acc)
return acc
def target_analyser(dataframe, target, num_deg, cat_deg):
for degisken in dataframe.columns:
if degisken in cat_deg:
print(degisken, ":", len(dataframe[degisken].value_counts()))
print(pd.DataFrame({
"COUNT":
dataframe[degisken].value_counts(),
"RATIO":
dataframe[degisken].value_counts() / len(dataframe),
"TARGET_MEAN":
dataframe.groupby(degisken)[target].mean()
}),
end="\n\n\n")
if degisken in num_deg:
print(pd.DataFrame(
{"TARGET_MEAN": dataframe.groupby(target)[degisken].mean()}),
end="\n\n\n")
# %% [markdown]
# ## Some image
# 
# %%
#loading dataset
df = pd.read_csv("../input/heart-disease-uci/heart.csv")
df.head()
# %% [markdown]
# ## Some info
# * age: The person's age in years
# * sex: The person's sex (1 = male, 0 = female)
# * cp: The chest pain experienced (Value 1: typical angina, Value 2: atypical angina, Value 3: non-anginal pain, Value 4: asymptomatic)
# * trestbps: The person's resting blood pressure (mm Hg on admission to the hospital)
# * chol: The person's cholesterol measurement in mg/dl
# * fbs: The person's fasting blood sugar (> 120 mg/dl, 1 = true; 0 = false)
# * restecg: Resting electrocardiographic measurement (0 = normal, 1 = having ST-T wave abnormality, 2 = showing probable or definite left ventricular hypertrophy by Estes' criteria)
# * thalach: The person's maximum heart rate achieved
# * exang: Exercise induced angina (1 = yes; 0 = no)
# * oldpeak: ST depression induced by exercise relative to rest ('ST' relates to positions on the ECG plot. See more here)
# * slope: the slope of the peak exercise ST segment (Value 1: upsloping, Value 2: flat, Value 3: downsloping)
# * ca: The number of major vessels (0-3)
# * thal: A blood disorder called thalassemia (3 = normal; 6 = fixed defect; 7 = reversable defect)
# * target: Heart disease (0 = no, 1 = yes)
# %%
#Analysis of Dataset
dataset_ozet(df)
cat_deg, num_deg, car_deg = degisken_tiplerine_ayirma(df, 10, 20)
# %%
#EDA of Dataset
for i in cat_deg:
categoric_ozet(df, i, True, True)
for i in num_deg:
numeric_ozet(df, i, True, True)
# %%
#All columns analaysis based on target column
target_analyser(df, "target", num_deg, cat_deg)
# %%
#Filling missing values
null_cols = missing_values_table(df, True)
for i in null_cols:
df[i].fillna(df[i].transform("mean"), inplace=True)
#There is no missing values
# %%
#Outlier processing
for i in num_deg:
df = threshold_degisimi(df, i)
# %%
#Data Extraction
df.age.describe()
df.loc[(df["age"] < 40), 'NEW_AGE_CAT'] = 'Young'
df.loc[(df["age"] >= 40) & (df["age"] < 50), 'NEW_AGE_CAT'] = 'Middle Age'
df.loc[(df["age"] >= 50) & (df["age"] < 60), 'NEW_AGE_CAT'] = 'Pre-Old'
df.loc[(df["age"] >= | |
ii)
cObj.setValue(entryId, "entry_id", ii)
cObj.setValue(entityId, "entity_id", ii)
cObj.setValue(asymId, "asym_id", ii)
cObj.setValue(authAsymId, "auth_asym_id", ii)
cObj.setValue(confType, "type", ii)
#
cObj.setValue(str(hId), "feature_id", ii)
cObj.setValue(ssType, "name", ii)
#
cObj.setValue(begSeqId, "feature_positions_beg_seq_id", ii)
cObj.setValue(endSeqId, "feature_positions_end_seq_id", ii)
#
cObj.setValue("PDB entity", "reference_scheme", ii)
cObj.setValue(provCode, "provenance_source", ii)
cObj.setValue(provVer, "assignment_version", ii)
#
ii += 1
#
# ------------------
# Unassigned SS features
unassignedProvD = self.__ssU.getProtUnassignedSecStructProvenance(dataContainer)
unassignedRangeD = self.__ssU.getProtUnassignedSecStructFeatures(dataContainer)
for asymId, rTupL in unassignedRangeD.items():
if not rTupL:
continue
entityId = asymIdD[asymId]
authAsymId = asymAuthIdD[asymId]
cObj.setValue(ii + 1, "ordinal", ii)
cObj.setValue(entryId, "entry_id", ii)
cObj.setValue(entityId, "entity_id", ii)
cObj.setValue(asymId, "asym_id", ii)
cObj.setValue(authAsymId, "auth_asym_id", ii)
cObj.setValue("UNASSIGNED_SEC_STRUCT", "type", ii)
#
cObj.setValue(str(1), "feature_id", ii)
cObj.setValue("unassigned secondary structure", "name", ii)
#
cObj.setValue(";".join([str(rTup[0]) for rTup in rTupL]), "feature_positions_beg_seq_id", ii)
cObj.setValue(";".join([str(rTup[1]) for rTup in rTupL]), "feature_positions_end_seq_id", ii)
#
cObj.setValue("PDB entity", "reference_scheme", ii)
cObj.setValue(unassignedProvD["provenance"], "provenance_source", ii)
cObj.setValue(unassignedProvD["version"], "assignment_version", ii)
#
ii += 1
#
cisPeptideD = self.__ssU.getCisPeptides(dataContainer)
for cId, cL in cisPeptideD.items():
for (asymId, begSeqId, endSeqId, modelId, omegaAngle) in cL:
addPropTupL = []
entityId = asymIdD[asymId]
authAsymId = asymAuthIdD[asymId]
cObj.setValue(ii + 1, "ordinal", ii)
cObj.setValue(entryId, "entry_id", ii)
cObj.setValue(entityId, "entity_id", ii)
cObj.setValue(asymId, "asym_id", ii)
cObj.setValue(authAsymId, "auth_asym_id", ii)
cObj.setValue("CIS-PEPTIDE", "type", ii)
cObj.setValue(str(cId), "feature_id", ii)
cObj.setValue("cis-peptide", "name", ii)
#
cObj.setValue(begSeqId, "feature_positions_beg_seq_id", ii)
cObj.setValue(endSeqId, "feature_positions_end_seq_id", ii)
#
cObj.setValue("PDB entity", "reference_scheme", ii)
cObj.setValue("PDB", "provenance_source", ii)
cObj.setValue("V1.0", "assignment_version", ii)
tS = "cis-peptide bond in model %d with omega angle %.2f" % (modelId, omegaAngle)
cObj.setValue(tS, "description", ii)
#
addPropTupL.append(("OMEGA_ANGLE", omegaAngle))
cObj.setValue(";".join([str(tup[0]) for tup in addPropTupL]), "additional_properties_name", ii)
cObj.setValue(";".join([str(tup[1]) for tup in addPropTupL]), "additional_properties_values", ii)
#
#
ii += 1
#
targetSiteD = self.__commonU.getTargetSiteInfo(dataContainer)
ligandSiteD = self.__commonU.getLigandSiteInfo(dataContainer)
for tId, tL in targetSiteD.items():
aD = OrderedDict()
for tD in tL:
aD.setdefault(tD["asymId"], []).append((tD["compId"], tD["seqId"]))
for asymId, aL in aD.items():
entityId = asymIdD[asymId]
authAsymId = asymAuthIdD[asymId]
cObj.setValue(ii + 1, "ordinal", ii)
cObj.setValue(entryId, "entry_id", ii)
cObj.setValue(entityId, "entity_id", ii)
cObj.setValue(asymId, "asym_id", ii)
cObj.setValue(authAsymId, "auth_asym_id", ii)
cObj.setValue("BINDING_SITE", "type", ii)
cObj.setValue(str(tId), "feature_id", ii)
cObj.setValue("binding_site", "name", ii)
#
cObj.setValue(";".join([tup[0] for tup in aL]), "feature_positions_beg_comp_id", ii)
cObj.setValue(";".join([tup[1] for tup in aL]), "feature_positions_beg_seq_id", ii)
#
cObj.setValue("PDB entity", "reference_scheme", ii)
cObj.setValue("PDB", "provenance_source", ii)
cObj.setValue("V1.0", "assignment_version", ii)
if tId in ligandSiteD:
cObj.setValue(ligandSiteD[tId]["description"], "description", ii)
if ligandSiteD[tId]["siteLabel"]:
cObj.setValue(ligandSiteD[tId]["siteLabel"], "name", ii)
#
ii += 1
#
unObsPolyResRngD = self.__commonU.getUnobservedPolymerResidueInfo(dataContainer)
for (modelId, asymId, zeroOccFlag), rTupL in unObsPolyResRngD.items():
entityId = asymIdD[asymId]
authAsymId = asymAuthIdD[asymId]
cObj.setValue(ii + 1, "ordinal", ii)
cObj.setValue(entryId, "entry_id", ii)
cObj.setValue(entityId, "entity_id", ii)
cObj.setValue(asymId, "asym_id", ii)
cObj.setValue(authAsymId, "auth_asym_id", ii)
#
if zeroOccFlag:
cObj.setValue("ZERO_OCCUPANCY_RESIDUE_XYZ", "type", ii)
tS = "All atom coordinates for this residue are reported with zero-occupancy in model %s" % modelId
cObj.setValue(tS, "description", ii)
cObj.setValue("residue coordinates with zero occupancy", "name", ii)
else:
cObj.setValue("UNOBSERVED_RESIDUE_XYZ", "type", ii)
tS = "No coordinates for this residue are reported in model %s" % modelId
cObj.setValue(tS, "description", ii)
cObj.setValue("unmodeled residue", "name", ii)
#
cObj.setValue(str(1), "feature_id", ii)
#
cObj.setValue(";".join([str(rTup[0]) for rTup in rTupL]), "feature_positions_beg_seq_id", ii)
cObj.setValue(";".join([str(rTup[1]) for rTup in rTupL]), "feature_positions_end_seq_id", ii)
#
cObj.setValue("PDB entity", "reference_scheme", ii)
cObj.setValue("PDB", "provenance_source", ii)
cObj.setValue("V1.0", "assignment_version", ii)
#
ii += 1
unObsPolyAtomRngD = self.__commonU.getUnobservedPolymerAtomInfo(dataContainer)
for (modelId, asymId, zeroOccFlag), rTupL in unObsPolyAtomRngD.items():
entityId = asymIdD[asymId]
authAsymId = asymAuthIdD[asymId]
cObj.setValue(ii + 1, "ordinal", ii)
cObj.setValue(entryId, "entry_id", ii)
cObj.setValue(entityId, "entity_id", ii)
cObj.setValue(asymId, "asym_id", ii)
cObj.setValue(authAsymId, "auth_asym_id", ii)
#
if zeroOccFlag:
cObj.setValue("ZERO_OCCUPANCY_ATOM_XYZ", "type", ii)
tS = "Some atom coordinates in this residue are reported with zero-occupancy in model %s" % modelId
cObj.setValue(tS, "description", ii)
cObj.setValue("atom coordinates with zero occupancy", "name", ii)
else:
cObj.setValue("UNOBSERVED_ATOM_XYZ", "type", ii)
tS = "Some atom coordinates in this residue are not reported in model %s" % modelId
cObj.setValue(tS, "description", ii)
cObj.setValue("partially modeled residue", "name", ii)
#
cObj.setValue(str(1), "feature_id", ii)
#
cObj.setValue(";".join([str(rTup[0]) for rTup in rTupL]), "feature_positions_beg_seq_id", ii)
cObj.setValue(";".join([str(rTup[1]) for rTup in rTupL]), "feature_positions_end_seq_id", ii)
#
cObj.setValue("PDB entity", "reference_scheme", ii)
cObj.setValue("PDB", "provenance_source", ii)
cObj.setValue("V1.0", "assignment_version", ii)
#
ii += 1
npbD = self.__commonU.getBoundNonpolymersByInstance(dataContainer)
jj = 1
for asymId, rTupL in npbD.items():
for rTup in rTupL:
addPropTupL = []
if rTup.connectType in ["covalent bond"]:
fType = "HAS_COVALENT_LINKAGE"
fId = "COVALENT_LINKAGE_%d" % jj
elif rTup.connectType in ["metal coordination"]:
fType = "HAS_METAL_COORDINATION_LINKAGE"
fId = "METAL_COORDINATION_LINKAGE_%d" % jj
else:
continue
entityId = asymIdD[asymId]
authAsymId = asymAuthIdD[asymId]
cObj.setValue(ii + 1, "ordinal", ii)
cObj.setValue(entryId, "entry_id", ii)
cObj.setValue(entityId, "entity_id", ii)
cObj.setValue(asymId, "asym_id", ii)
cObj.setValue(authAsymId, "auth_asym_id", ii)
cObj.setValue(rTup.targetCompId, "comp_id", ii)
cObj.setValue(fId, "feature_id", ii)
cObj.setValue(fType, "type", ii)
#
# ("targetCompId", "connectType", "partnerCompId", "partnerAsymId", "partnerEntityType", "bondDistance", "bondOrder")
cObj.setValue(
";".join(["%s has %s with %s instance %s in model 1" % (rTup.targetCompId, rTup.connectType, rTup.partnerEntityType, rTup.partnerAsymId) for rTup in rTupL]),
"feature_value_details",
ii,
)
# ----
addPropTupL.append(("PARTNER_ASYM_ID", rTup.partnerAsymId))
if rTup.partnerCompId:
addPropTupL.append(("PARTNER_COMP_ID", rTup.partnerCompId))
if rTup.bondDistance:
addPropTupL.append(("PARTNER_BOND_DISTANCE", rTup.bondDistance))
cObj.setValue(";".join([str(tup[0]) for tup in addPropTupL]), "additional_properties_name", ii)
cObj.setValue(";".join([str(tup[1]) for tup in addPropTupL]), "additional_properties_values", ii)
# ----
cObj.setValue(";".join([rTup.partnerCompId if rTup.partnerCompId else "?" for rTup in rTupL]), "feature_value_comp_id", ii)
cObj.setValue(";".join([rTup.bondDistance if rTup.bondDistance else "?" for rTup in rTupL]), "feature_value_reported", ii)
cObj.setValue(";".join(["?" for rTup in rTupL]), "feature_value_reference", ii)
cObj.setValue(";".join(["?" for rTup in rTupL]), "feature_value_uncertainty_estimate", ii)
cObj.setValue(";".join(["?" for rTup in rTupL]), "feature_value_uncertainty_estimate_type", ii)
# ---
cObj.setValue("PDB", "provenance_source", ii)
cObj.setValue("V1.0", "assignment_version", ii)
#
ii += 1
jj += 1
# Glycosylation sites
jj = 1
for asymId, rTupL in npbD.items():
if instTypeD[asymId] not in ["polymer"]:
continue
for rTup in rTupL:
addPropTupL = []
if (rTup.connectType in ["covalent bond"]) and (rTup.role is not None) and (rTup.role not in [".", "?"]):
fType = rTup.role.upper() + "_SITE"
fId = "GLYCOSYLATION_SITE_%d" % jj
else:
continue
entityId = asymIdD[asymId]
authAsymId = asymAuthIdD[asymId]
cObj.setValue(ii + 1, "ordinal", ii)
cObj.setValue(entryId, "entry_id", ii)
cObj.setValue(entityId, "entity_id", ii)
cObj.setValue(asymId, "asym_id", ii)
cObj.setValue(authAsymId, "auth_asym_id", ii)
cObj.setValue(rTup.targetCompId, "comp_id", ii)
cObj.setValue(fId, "feature_id", ii)
cObj.setValue(fType, "type", ii)
#
# ("targetCompId", "connectType", "partnerCompId", "partnerAsymId", "partnerEntityType", "bondDistance", "bondOrder")
cObj.setValue(
";".join(["%s has %s site on %s instance %s in model 1" % (rTup.targetCompId, rTup.role, rTup.partnerEntityType, rTup.partnerAsymId) for rTup in rTupL]),
"feature_value_details",
ii,
)
# ----
addPropTupL.append(("PARTNER_ASYM_ID", rTup.partnerAsymId))
if rTup.partnerCompId:
addPropTupL.append(("PARTNER_COMP_ID", rTup.partnerCompId))
if rTup.bondDistance:
addPropTupL.append(("PARTNER_BOND_DISTANCE", rTup.bondDistance))
cObj.setValue(";".join([str(tup[0]) for tup in addPropTupL]), "additional_properties_name", ii)
cObj.setValue(";".join([str(tup[1]) for tup in addPropTupL]), "additional_properties_values", ii)
# ----
cObj.setValue(";".join([rTup.partnerCompId if rTup.partnerCompId else "?" for rTup in rTupL]), "feature_value_comp_id", ii)
cObj.setValue(";".join([rTup.bondDistance if rTup.bondDistance else "?" for rTup in rTupL]), "feature_value_reported", ii)
cObj.setValue(";".join(["?" for rTup in rTupL]), "feature_value_reference", ii)
cObj.setValue(";".join(["?" for rTup in rTupL]), "feature_value_uncertainty_estimate", ii)
cObj.setValue(";".join(["?" for rTup in rTupL]), "feature_value_uncertainty_estimate_type", ii)
# ---
cObj.setValue("PDB", "provenance_source", ii)
cObj.setValue("V1.0", "assignment_version", ii)
#
ii += 1
jj += 1
return True
except Exception as e:
logger.exception("%s %s failing with %s", dataContainer.getName(), catName, str(e))
return False
def addProtSecStructInfo(self, dataContainer, catName, **kwargs):
"""DEPRECATED METHOD - UNLINKED in Dictionary
Add category rcsb_prot_sec_struct_info.
"""
try:
# JDWJDW
logger.info("Starting with %r %r %r", dataContainer.getName(), catName, kwargs)
# Exit if source categories are missing
if not dataContainer.exists("entry") and not (dataContainer.exists("struct_conf") or dataContainer.exists("struct_sheet_range")):
return False
#
# Create the new target category rcsb_prot_sec_struct_info
if not dataContainer.exists(catName):
dataContainer.append(DataCategory(catName, attributeNameList=self.__dApi.getAttributeNameList(catName)))
sD = self.__commonU.getProtSecStructFeaturesAll(dataContainer)
# catName = rcsb_prot_sec_struct_info
cObj = dataContainer.getObj(catName)
#
xObj = dataContainer.getObj("entry")
entryId = xObj.getValue("id", 0)
#
for ii, asymId in enumerate(sD["helixCountD"]):
cObj.setValue(entryId, "entry_id", ii)
cObj.setValue(asymId, "label_asym_id", ii)
#
cObj.setValue(sD["helixCountD"][asymId], "helix_count", ii)
cObj.setValue(sD["sheetStrandCountD"][asymId], "beta_strand_count", ii)
cObj.setValue(sD["unassignedCountD"][asymId], "unassigned_count", ii)
#
cObj.setValue(",".join([str(t) for t in sD["helixLengthD"][asymId]]), "helix_length", ii)
cObj.setValue(",".join([str(t) for t in sD["sheetStrandLengthD"][asymId]]), "beta_strand_length", ii)
cObj.setValue(",".join([str(t) for t in sD["unassignedLengthD"][asymId]]), "unassigned_length", ii)
cObj.setValue("%.2f" % (100.0 * sD["helixFracD"][asymId]), "helix_coverage_percent", ii)
cObj.setValue("%.2f" % (100.0 * sD["sheetStrandFracD"][asymId]), "beta_strand_coverage_percent", ii)
cObj.setValue("%.2f" % (100.0 * sD["unassignedFracD"][asymId]), "unassigned_coverage_percent", ii)
cObj.setValue(",".join(sD["sheetSenseD"][asymId]), "beta_sheet_sense", ii)
cObj.setValue(",".join([str(t) for t in sD["sheetFullStrandCountD"][asymId]]), "beta_sheet_strand_count", ii)
cObj.setValue(sD["featureMonomerSequenceD"][asymId], "feature_monomer_sequence", ii)
cObj.setValue(sD["featureSequenceD"][asymId], "feature_sequence", ii)
return True
except Exception as e:
logger.exception("For %s %r failing with %s", dataContainer.getName(), catName, str(e))
return False
def addConnectionDetails(self, dataContainer, catName, **kwargs):
"""Build rcsb_struct_conn category -
Args:
dataContainer (object): mmcif.api.mmcif.api.DataContainer object instance
catName (str): category name
Returns:
bool: True for success or False otherwise
Example:
loop_
_rcsb_struct_conn.ordinal_id
_rcsb_struct_conn.id
_rcsb_struct_conn.conn_type
_rcsb_struct_conn.connect_target_label_comp_id
_rcsb_struct_conn.connect_target_label_asym_id
_rcsb_struct_conn.connect_target_label_seq_id
_rcsb_struct_conn.connect_target_label_atom_id
_rcsb_struct_conn.connect_target_label_alt_id
_rcsb_struct_conn.connect_target_auth_asym_id
_rcsb_struct_conn.connect_target_auth_seq_id
_rcsb_struct_conn.connect_target_symmetry
_rcsb_struct_conn.connect_partner_label_comp_id
_rcsb_struct_conn.connect_partner_label_asym_id
_rcsb_struct_conn.connect_partner_label_seq_id
_rcsb_struct_conn.connect_partner_label_atom_id
_rcsb_struct_conn.connect_partner_label_alt_id
_rcsb_struct_conn.connect_partner_symmetry
_rcsb_struct_conn.details
# - - - - data truncated for brevity - - - -
"""
try:
logger.debug("Starting with %r %r %r", dataContainer.getName(), catName, kwargs)
# Exit if source categories are missing
if not dataContainer.exists("entry") and not dataContainer.exists("struct_conn"):
return False
#
# Create the new target category rcsb_struct_conn
if not dataContainer.exists(catName):
dataContainer.append(DataCategory(catName, | |
<reponame>BrentG-1849260/PyVoxelizer
import sys
import os
import numpy as np
from ctypes import cdll, Structure, c_float
class Point3(Structure):
_fields_ = [
("x", c_float),
("y", c_float),
("z", c_float)
]
class Triangle3(Structure):
_fields_ = [
("v1", Point3),
("v2", Point3),
("v3", Point3)
]
triangle_lib = None
script_dir = os.path.dirname(os.path.realpath(__file__))
try:
if sys.platform.startswith('linux') and sys.maxsize == 9223372036854775807:
file_path_library = os.path.join(script_dir, 'triangleCube_linux64.so')
if os.path.exists(file_path_library):
triangle_lib = cdll.LoadLibrary(file_path_library)
elif sys.platform.startswith("win") and sys.maxsize == 2147483647:
file_path_library = os.path.join(script_dir, 'triangleCube_win32.so')
if os.path.exists(file_path_library):
triangle_lib = cdll.LoadLibrary(file_path_library)
except OSError:
triangle_lib = None
"""
Code conversion into python from:
'https://github.com/erich666/GraphicsGems/blob/master/gemsiii/triangleCube.c'
"""
INSIDE = 0
OUTSIDE = 1
EPS = 1e-5
# EPS = 0.0
# print(EPS)
def cross_product(a, b):
return (
a[1] * b[2] - a[2] * b[1],
-a[0] * b[2] + a[2] * b[0],
a[0] * b[1] - a[1] * b[0])
def sign3(point):
sign_code = 0
if point[0] < EPS:
sign_code |= 4
if point[0] > -EPS:
sign_code |= 32
if point[1] < EPS:
sign_code |= 2
if point[1] > -EPS:
sign_code |= 16
if point[2] < EPS:
sign_code |= 1
if point[2] > -EPS:
sign_code |= 8
return sign_code
def lerp(alpha, a, b):
return a + alpha * (b - a)
class Triangle(object):
"""
@type v1: numpy.ndarray
@type v2: numpy.ndarray
@type v3: numpy.ndarray
"""
def __init__(self):
"""
"""
self.v1 = 0
self.v2 = 0
self.v3 = 0
def set(self, vertex_1, vertex_2, vertex_3):
"""
@type vertex_1: numpy.ndarray
@type vertex_2: numpy.ndarray
@type vertex_3: numpy.ndarray
"""
self.v1 = vertex_1
self.v2 = vertex_2
self.v3 = vertex_3
def min(self, index):
if self.v1[index] < self.v2[index] and self.v1[index] < self.v3[index]:
return self.v1[index]
elif self.v2[index] < self.v3[index]:
return self.v2[index]
else:
return self.v3[index]
def max(self, index):
if self.v1[index] > self.v2[index] and self.v1[index] > self.v3[index]:
return self.v1[index]
elif self.v2[index] > self.v3[index]:
return self.v2[index]
else:
return self.v3[index]
def vertexes_to_c_triangle(vertex_1, vertex_2, vertex_3):
return Triangle3(
Point3(vertex_1[0], vertex_1[1], vertex_1[2]),
Point3(vertex_2[0], vertex_2[1], vertex_2[2]),
Point3(vertex_3[0], vertex_3[1], vertex_3[2])
)
def face_plane(point):
"""
Which of the six face-plane(s) is point P outside of?
@type point: numpy.ndarray | (float, float, float)
"""
face_plane_code = 0
if point[0] >= .5:
face_plane_code |= 0x01
if point[0] < -.5:
face_plane_code |= 0x02
if point[1] >= .5:
face_plane_code |= 0x04
if point[1] < -.5:
face_plane_code |= 0x08
if point[2] >= .5:
face_plane_code |= 0x10
if point[2] < -.5:
face_plane_code |= 0x20
return face_plane_code
def bevel_2d(point):
"""
Which of the twelve edge plane(s) is point P outside of?
"""
edge_plane_code = 0
if point[0] + point[1] >= 1.0:
edge_plane_code |= 0x001
if point[0] - point[1] >= 1.0:
edge_plane_code |= 0x002
if -point[0] + point[1] > 1.0:
edge_plane_code |= 0x004
if -point[0] - point[1] > 1.0:
edge_plane_code |= 0x008
if point[0] + point[2] >= 1.0:
edge_plane_code |= 0x010
if point[0] - point[2] >= 1.0:
edge_plane_code |= 0x020
if -point[0] + point[2] > 1.0:
edge_plane_code |= 0x040
if -point[0] - point[2] > 1.0:
edge_plane_code |= 0x080
if point[1] + point[2] >= 1.0:
edge_plane_code |= 0x100
if point[1] - point[2] >= 1.0:
edge_plane_code |= 0x200
if -point[1] + point[2] > 1.0:
edge_plane_code |= 0x400
if -point[1] - point[2] > 1.0:
edge_plane_code |= 0x800
return edge_plane_code
def bevel_3d(point):
"""
Which of the eight corner plane(s) is point P outside of?
"""
corner_plane_code = 0
if (point[0] + point[1] + point[2]) >= 1.5:
corner_plane_code |= 0x01
if (point[0] + point[1] - point[2]) >= 1.5:
corner_plane_code |= 0x02
if (point[0] - point[1] + point[2]) >= 1.5:
corner_plane_code |= 0x04
if (point[0] - point[1] - point[2]) >= 1.5:
corner_plane_code |= 0x08
if (-point[0] + point[1] + point[2]) > 1.5:
corner_plane_code |= 0x10
if (-point[0] + point[1] - point[2]) > 1.5:
corner_plane_code |= 0x20
if (-point[0] - point[1] + point[2]) > 1.5:
corner_plane_code |= 0x40
if (-point[0] - point[1] - point[2]) > 1.5:
corner_plane_code |= 0x80
return corner_plane_code
def check_point(point_a, point_b, alpha, mask):
"""
Test the point "alpha" of the way from P1 to P2
See if it is on a face of the cube
Consider only faces in "mask"
"""
plane_point_x = lerp(alpha, point_a[0], point_b[0])
plane_point_y = lerp(alpha, point_a[1], point_b[1])
plane_point_z = lerp(alpha, point_a[2], point_b[2])
plane_point = (plane_point_x, plane_point_y, plane_point_z)
return face_plane(plane_point) & mask
def check_line(point_a, point_b, outcode_diff):
"""
/* Compute intersection of P1 --> P2 line segment with face planes */
/* Then test intersection point to see if it is on cube face */
/* Consider only face planes in "outcode_diff" */
/* Note: Zero bits in "outcode_diff" means face line is outside of */
"""
if (0x01 & outcode_diff) != 0:
if check_point(point_a, point_b, (0.5 - point_a[0])/(point_b[0] - point_a[0]), 0x3e) == INSIDE:
return INSIDE
if (0x02 & outcode_diff) != 0:
if check_point(point_a, point_b, (-0.5 - point_a[0])/(point_b[0] - point_a[0]), 0x3d) == INSIDE:
return INSIDE
if (0x04 & outcode_diff) != 0:
if check_point(point_a, point_b, (0.5 - point_a[1])/(point_b[1] - point_a[1]), 0x3b) == INSIDE:
return INSIDE
if (0x08 & outcode_diff) != 0:
if check_point(point_a, point_b, (-0.5 - point_a[1])/(point_b[1] - point_a[1]), 0x37) == INSIDE:
return INSIDE
if (0x10 & outcode_diff) != 0:
if check_point(point_a, point_b, (0.5 - point_a[2])/(point_b[2] - point_a[2]), 0x2f) == INSIDE:
return INSIDE
if (0x20 & outcode_diff) != 0:
if check_point(point_a, point_b, (-0.5 - point_a[2])/(point_b[2] - point_a[2]), 0x1f) == INSIDE:
return INSIDE
return OUTSIDE
def point_triangle_intersection(p, t):
"""
Test if 3D point is inside 3D triangle
@type p: list[float]
@type t: Triangle
"""
# /* First, a quick bounding-box test: */
# /* If P is outside triangle bbox, there cannot be an intersection. */
# add/sub EPS as buffer to avoid an floating point issue
if p[0] > t.max(0) + EPS:
return OUTSIDE
if p[1] > t.max(1) + EPS:
return OUTSIDE
if p[2] > t.max(2) + EPS:
return OUTSIDE
if p[0] < t.min(0) - EPS:
return OUTSIDE
if p[1] < t.min(1) - EPS:
return OUTSIDE
if p[2] < t.min(2) - EPS:
return OUTSIDE
# /* For each triangle side, make a vector out of it by subtracting vertexes; */
# /* make another vector from one vertex to point P. */
# /* The crossproduct of these two vectors is orthogonal to both and the */
# /* signs of its X,Y,Z components indicate whether P was to the inside or */
# /* to the outside of this triangle side. */
vect12 = np.subtract(t.v1, t.v2)
vect1h = np.subtract(t.v1, p)
cross12_1p = cross_product(vect12, vect1h)
sign12 = sign3(cross12_1p) # /* Extract X,Y,Z signs as 0..7 or 0...63 integer */
vect23 = np.subtract(t.v2, t.v3)
vect2h = np.subtract(t.v2, p)
cross23_2p = cross_product(vect23, vect2h)
sign23 = sign3(cross23_2p)
vect31 = np.subtract(t.v3, t.v1)
vect3h = np.subtract(t.v3, p)
cross31_3p = cross_product(vect31, vect3h)
sign31 = sign3(cross31_3p)
# /* If all three cross product vectors agree in their component signs, */
# /* then the point must be inside all three. */
# /* P cannot be OUTSIDE all three sides simultaneously. */
if (sign12 & sign23 & sign31) == 0:
return OUTSIDE
return INSIDE
def t_c_intersection(triangle):
"""
/**********************************************/
/* This is the main algorithm procedure. */
/* Triangle t is compared with a unit cube, */
/* centered on the origin. */
/* It returns INSIDE (0) or OUTSIDE(1) if t */
/* intersects or does not intersect the cube. */
/**********************************************/
@type triangle: Triangle
"""
# long v1_test,v2_test,v3_test;
# float d,denom;
# Point3 vect12,vect13,norm;
# Point3 hitpp,hitpn,hitnp,hitnn;
# /* First compare all three vertexes with all six face-planes */
# /* If any vertex is inside the cube, return immediately! */
v1_test = face_plane(triangle.v1)
v2_test = face_plane(triangle.v2)
v3_test = face_plane(triangle.v3)
if v1_test == INSIDE:
return INSIDE
if v2_test == INSIDE:
return INSIDE
if v3_test == INSIDE:
return INSIDE
# /* If all three vertexes were outside of one or more face-planes, */
# /* return immediately with a trivial rejection! */
if (v1_test & v2_test & v3_test) != INSIDE:
return OUTSIDE
# /* Now do the same trivial rejection test for the 12 edge planes */
v1_test |= bevel_2d(triangle.v1) << 8
v2_test |= bevel_2d(triangle.v2) << 8
v3_test |= bevel_2d(triangle.v3) << 8
if (v1_test & v2_test & v3_test) != INSIDE:
return OUTSIDE
# /* Now do the same trivial rejection test for the 8 corner planes */
| |
<gh_stars>0
# coding: utf-8
# Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class VolumeAttachment(object):
"""
A base object for all types of attachments between a storage volume and an instance.
For specific details about iSCSI attachments, see
:class:`IScsiVolumeAttachment`.
For general information about volume attachments, see
`Overview of Block Volume Storage`__.
**Warning:** Oracle recommends that you avoid using any confidential information when you
supply string values using the API.
__ https://docs.cloud.oracle.com/Content/Block/Concepts/overview.htm
"""
#: A constant which can be used with the lifecycle_state property of a VolumeAttachment.
#: This constant has a value of "ATTACHING"
LIFECYCLE_STATE_ATTACHING = "ATTACHING"
#: A constant which can be used with the lifecycle_state property of a VolumeAttachment.
#: This constant has a value of "ATTACHED"
LIFECYCLE_STATE_ATTACHED = "ATTACHED"
#: A constant which can be used with the lifecycle_state property of a VolumeAttachment.
#: This constant has a value of "DETACHING"
LIFECYCLE_STATE_DETACHING = "DETACHING"
#: A constant which can be used with the lifecycle_state property of a VolumeAttachment.
#: This constant has a value of "DETACHED"
LIFECYCLE_STATE_DETACHED = "DETACHED"
def __init__(self, **kwargs):
"""
Initializes a new VolumeAttachment object with values from keyword arguments. This class has the following subclasses and if you are using this class as input
to a service operations then you should favor using a subclass over the base class:
* :class:`~oci.core.models.IScsiVolumeAttachment`
* :class:`~oci.core.models.EmulatedVolumeAttachment`
* :class:`~oci.core.models.ParavirtualizedVolumeAttachment`
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param attachment_type:
The value to assign to the attachment_type property of this VolumeAttachment.
:type attachment_type: str
:param availability_domain:
The value to assign to the availability_domain property of this VolumeAttachment.
:type availability_domain: str
:param compartment_id:
The value to assign to the compartment_id property of this VolumeAttachment.
:type compartment_id: str
:param device:
The value to assign to the device property of this VolumeAttachment.
:type device: str
:param display_name:
The value to assign to the display_name property of this VolumeAttachment.
:type display_name: str
:param id:
The value to assign to the id property of this VolumeAttachment.
:type id: str
:param instance_id:
The value to assign to the instance_id property of this VolumeAttachment.
:type instance_id: str
:param is_read_only:
The value to assign to the is_read_only property of this VolumeAttachment.
:type is_read_only: bool
:param is_shareable:
The value to assign to the is_shareable property of this VolumeAttachment.
:type is_shareable: bool
:param lifecycle_state:
The value to assign to the lifecycle_state property of this VolumeAttachment.
Allowed values for this property are: "ATTACHING", "ATTACHED", "DETACHING", "DETACHED", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:type lifecycle_state: str
:param time_created:
The value to assign to the time_created property of this VolumeAttachment.
:type time_created: datetime
:param volume_id:
The value to assign to the volume_id property of this VolumeAttachment.
:type volume_id: str
:param is_pv_encryption_in_transit_enabled:
The value to assign to the is_pv_encryption_in_transit_enabled property of this VolumeAttachment.
:type is_pv_encryption_in_transit_enabled: bool
"""
self.swagger_types = {
'attachment_type': 'str',
'availability_domain': 'str',
'compartment_id': 'str',
'device': 'str',
'display_name': 'str',
'id': 'str',
'instance_id': 'str',
'is_read_only': 'bool',
'is_shareable': 'bool',
'lifecycle_state': 'str',
'time_created': 'datetime',
'volume_id': 'str',
'is_pv_encryption_in_transit_enabled': 'bool'
}
self.attribute_map = {
'attachment_type': 'attachmentType',
'availability_domain': 'availabilityDomain',
'compartment_id': 'compartmentId',
'device': 'device',
'display_name': 'displayName',
'id': 'id',
'instance_id': 'instanceId',
'is_read_only': 'isReadOnly',
'is_shareable': 'isShareable',
'lifecycle_state': 'lifecycleState',
'time_created': 'timeCreated',
'volume_id': 'volumeId',
'is_pv_encryption_in_transit_enabled': 'isPvEncryptionInTransitEnabled'
}
self._attachment_type = None
self._availability_domain = None
self._compartment_id = None
self._device = None
self._display_name = None
self._id = None
self._instance_id = None
self._is_read_only = None
self._is_shareable = None
self._lifecycle_state = None
self._time_created = None
self._volume_id = None
self._is_pv_encryption_in_transit_enabled = None
@staticmethod
def get_subtype(object_dictionary):
"""
Given the hash representation of a subtype of this class,
use the info in the hash to return the class of the subtype.
"""
type = object_dictionary['attachmentType']
if type == 'iscsi':
return 'IScsiVolumeAttachment'
if type == 'emulated':
return 'EmulatedVolumeAttachment'
if type == 'paravirtualized':
return 'ParavirtualizedVolumeAttachment'
else:
return 'VolumeAttachment'
@property
def attachment_type(self):
"""
**[Required]** Gets the attachment_type of this VolumeAttachment.
The type of volume attachment.
:return: The attachment_type of this VolumeAttachment.
:rtype: str
"""
return self._attachment_type
@attachment_type.setter
def attachment_type(self, attachment_type):
"""
Sets the attachment_type of this VolumeAttachment.
The type of volume attachment.
:param attachment_type: The attachment_type of this VolumeAttachment.
:type: str
"""
self._attachment_type = attachment_type
@property
def availability_domain(self):
"""
**[Required]** Gets the availability_domain of this VolumeAttachment.
The availability domain of an instance.
Example: `Uocm:PHX-AD-1`
:return: The availability_domain of this VolumeAttachment.
:rtype: str
"""
return self._availability_domain
@availability_domain.setter
def availability_domain(self, availability_domain):
"""
Sets the availability_domain of this VolumeAttachment.
The availability domain of an instance.
Example: `Uocm:PHX-AD-1`
:param availability_domain: The availability_domain of this VolumeAttachment.
:type: str
"""
self._availability_domain = availability_domain
@property
def compartment_id(self):
"""
**[Required]** Gets the compartment_id of this VolumeAttachment.
The OCID of the compartment.
:return: The compartment_id of this VolumeAttachment.
:rtype: str
"""
return self._compartment_id
@compartment_id.setter
def compartment_id(self, compartment_id):
"""
Sets the compartment_id of this VolumeAttachment.
The OCID of the compartment.
:param compartment_id: The compartment_id of this VolumeAttachment.
:type: str
"""
self._compartment_id = compartment_id
@property
def device(self):
"""
Gets the device of this VolumeAttachment.
The device name.
:return: The device of this VolumeAttachment.
:rtype: str
"""
return self._device
@device.setter
def device(self, device):
"""
Sets the device of this VolumeAttachment.
The device name.
:param device: The device of this VolumeAttachment.
:type: str
"""
self._device = device
@property
def display_name(self):
"""
Gets the display_name of this VolumeAttachment.
A user-friendly name. Does not have to be unique, and it cannot be changed.
Avoid entering confidential information.
Example: `My volume attachment`
:return: The display_name of this VolumeAttachment.
:rtype: str
"""
return self._display_name
@display_name.setter
def display_name(self, display_name):
"""
Sets the display_name of this VolumeAttachment.
A user-friendly name. Does not have to be unique, and it cannot be changed.
Avoid entering confidential information.
Example: `My volume attachment`
:param display_name: The display_name of this VolumeAttachment.
:type: str
"""
self._display_name = display_name
@property
def id(self):
"""
**[Required]** Gets the id of this VolumeAttachment.
The OCID of the volume attachment.
:return: The id of this VolumeAttachment.
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""
Sets the id of this VolumeAttachment.
The OCID of the volume attachment.
:param id: The id of this VolumeAttachment.
:type: str
"""
self._id = id
@property
def instance_id(self):
"""
**[Required]** Gets the instance_id of this VolumeAttachment.
The OCID of the instance the volume is attached to.
:return: The instance_id of this VolumeAttachment.
:rtype: str
"""
return self._instance_id
@instance_id.setter
def instance_id(self, instance_id):
"""
Sets the instance_id of this VolumeAttachment.
The OCID of the instance the volume is attached to.
:param instance_id: The instance_id of this VolumeAttachment.
:type: str
"""
self._instance_id = instance_id
@property
def is_read_only(self):
"""
Gets the is_read_only of this VolumeAttachment.
Whether the attachment was created in read-only mode.
:return: The is_read_only of this VolumeAttachment.
:rtype: bool
"""
return self._is_read_only
@is_read_only.setter
def is_read_only(self, is_read_only):
"""
Sets the is_read_only of this VolumeAttachment.
Whether the attachment was created in read-only mode.
:param is_read_only: The is_read_only of this VolumeAttachment.
:type: bool
"""
self._is_read_only = is_read_only
@property
def is_shareable(self):
"""
Gets the is_shareable of this VolumeAttachment.
Whether the attachment should be created in shareable mode. If an attachment is created in shareable mode, then other instances can attach the same volume, provided that they also create their attachments in shareable mode. Only certain volume types can be attached in shareable mode. Defaults to false if not specified.
:return: The is_shareable of this VolumeAttachment.
:rtype: bool
"""
return self._is_shareable
@is_shareable.setter
def is_shareable(self, is_shareable):
"""
Sets the is_shareable of this VolumeAttachment.
Whether the attachment should be created in shareable mode. If an attachment is created in shareable mode, then other instances can attach the same volume, provided that they also create their attachments in shareable mode. Only certain volume types | |
<filename>sysinv/sysinv/sysinv/sysinv/puppet/kubernetes.py
#
# Copyright (c) 2018-2020 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
from __future__ import absolute_import
from eventlet.green import subprocess
import json
import keyring
import netaddr
import os
import random
import re
import tempfile
from oslo_log import log as logging
from sysinv.common import constants
from sysinv.common import exception
from sysinv.common import kubernetes
from sysinv.common import utils
from sysinv.common import device as dconstants
from sysinv import objects
from sysinv.puppet import base
from sysinv.puppet import interface
LOG = logging.getLogger(__name__)
# Offset aligns with kubeadm DNS IP allocation scheme:
# kubenetes/cmd/kubeadm/app/constants/constants.go:GetDNSIP
CLUSTER_SERVICE_DNS_IP_OFFSET = 10
# certificate keyring params
CERTIFICATE_KEY_SERVICE = "kubernetes"
CERTIFICATE_KEY_USER = "certificate-key"
# kubeadm configuration option
KUBECONFIG = "--kubeconfig=%s" % kubernetes.KUBERNETES_ADMIN_CONF
# kubernetes root CA certificate params
KUBE_ROOTCA_CERT_NS = 'deployment'
KUBE_ROOTCA_CERT_SECRET = 'system-kube-rootca-certificate'
class KubernetesPuppet(base.BasePuppet):
"""Class to encapsulate puppet operations for kubernetes configuration"""
ETCD_SERVICE_PORT = '2379'
def __init__(self, *args, **kwargs):
super(KubernetesPuppet, self).__init__(*args, **kwargs)
self._kube_operator = kubernetes.KubeOperator()
def get_system_config(self):
config = {}
config.update(
{'platform::kubernetes::params::enabled': True,
'platform::kubernetes::params::service_domain':
self._get_dns_service_domain(),
'platform::kubernetes::params::dns_service_ip':
self._get_dns_service_ip(),
'platform::kubernetes::params::upgrade_to_version':
self._get_kubernetes_upgrade_to_version(),
})
return config
def get_host_config(self, host):
config = {}
# Update node configuration for host
config.update(self._get_host_node_config(host))
# Retrieve labels for this host
config.update(self._get_host_label_config(host))
# Update cgroup resource controller parameters for this host
config.update(self._get_host_k8s_cgroup_config(host))
# Update PCI device plugin parameters for this host
config.update(self._get_host_pcidp_config(host))
# Generate the token and join command for this host.
config.update(self._get_host_join_command(host))
# Get the kubernetes version for this host
config.update(self._get_kubernetes_version(host))
# Get kubernetes certificates config for this host
config.update(self._get_host_k8s_certificates_config(host))
# Get the kubernetes version for this host
config.update(self._get_kubeadm_kubelet_version(host))
return config
def get_host_config_upgrade(self, host):
"""Updates the config for upgrade with updated kubernetes params
:param host: host object
"""
config = {}
# Generate the join command for this host
config.update(self._get_host_join_command(host))
# Get the kubernetes version
config.update(self._get_active_kubernetes_version())
LOG.info("get_host_config_upgrade kubernetes config=%s" % config)
return config
def get_secure_static_config(self):
"""Update the hiera configuration to add certificate-key"""
key = keyring.get_password(CERTIFICATE_KEY_SERVICE,
CERTIFICATE_KEY_USER)
if not key:
key = '{:064x}'.format(random.getrandbits(8 * 32))
keyring.set_password(CERTIFICATE_KEY_SERVICE,
CERTIFICATE_KEY_USER, key)
LOG.info('storing kubernetes_kubeadm_certificate_key')
config = {}
config.update({
'kubernetes::kubeadm::certificate-key': key,
})
return config
def get_secure_system_config(self):
"""Update the hiera configuration secure data"""
config = {}
cert, key = self._get_kubernetes_rootca_cert_key()
config.update({
'platform::kubernetes::params::rootca_cert': cert,
'platform::kubernetes::params::rootca_key': key,
})
secret_list = [constants.KUBE_ADMIN_CERT]
cert_data = self._get_kubernetes_components_cert_and_key(secret_list)
config.update({
'platform::kubernetes::params::admin_cert': cert_data[constants.KUBE_ADMIN_CERT][0],
'platform::kubernetes::params::admin_key': cert_data[constants.KUBE_ADMIN_CERT][1],
})
return config
@staticmethod
def _get_kubernetes_rootca_cert_key():
""""Get kubernetes root CA certficate secret from cert-manager"""
try:
kube_operator = kubernetes.KubeOperator()
secret = kube_operator.kube_get_secret(KUBE_ROOTCA_CERT_SECRET,
KUBE_ROOTCA_CERT_NS)
# The root CA cert/key are not stored in kubernetes yet
if not secret:
return 'undef', 'undef'
if hasattr(secret, 'data') and secret.data:
cert = secret.data.get('tls.crt', None)
key = secret.data.get('tls.key', None)
if cert and key:
return cert, key
raise Exception('Failed to get secret %s\\%s' % (
KUBE_ROOTCA_CERT_NS, KUBE_ROOTCA_CERT_SECRET))
except exception.KubeNotConfigured:
# During ansible bootstrap, kubernetes is not configured.
# Set the cert and key to 'undef'
return 'undef', 'undef'
@staticmethod
def _get_kubernetes_components_cert_and_key(secret_names):
""""Get kubernetes components certficates from secrets issued by
cert-manager Certificate resource.
"""
certificate_dict = {}
kube_operator = kubernetes.KubeOperator()
for secret_name in secret_names:
try:
secret = kube_operator.kube_get_secret(secret_name,
KUBE_ROOTCA_CERT_NS)
# The respective cert/key are not stored in kubernetes yet
if not secret:
certificate_dict[secret_name] = 'undef', 'undef'
if hasattr(secret, 'data') and secret.data:
cert = secret.data.get('tls.crt', None)
key = secret.data.get('tls.key', None)
if cert and key:
certificate_dict[secret_name] = cert, key
except exception.KubeNotConfigured:
# During ansible bootstrap, kubernetes is not configured.
# Set the cert and key to 'undef'
certificate_dict[secret_name] = 'undef', 'undef'
return certificate_dict
@staticmethod
def _get_active_kubernetes_version():
"""Get the active kubernetes version
"""
# During a platform upgrade, the version is still None
# when N+1 controller-1 is creating hieradata.
# The version is updated from the running kubernetes version.
config = {}
kube_operator = kubernetes.KubeOperator()
kube_version = kube_operator.kube_get_kubernetes_version()
config.update({
'platform::kubernetes::params::version': kube_version,
})
return config
def _get_host_join_command(self, host):
config = {}
if not utils.is_initial_config_complete():
return config
join_cmd = self._get_kubernetes_join_cmd(host)
config.update({'platform::kubernetes::params::join_cmd': join_cmd})
return config
def _get_kubernetes_join_cmd(self, host):
# The token expires after 24 hours and is needed for a reinstall.
# The puppet manifest handles the case where the node already exists.
try:
join_cmd_additions = ''
if host.personality == constants.CONTROLLER:
# Upload the certificates used during kubeadm join
# The cert key will be printed in the last line of the output
# We will create a temp file with the kubeadm config
# We need this because the kubeadm config could have changed
# since bootstrap. Reading the kubeadm config each time
# it is needed ensures we are not using stale data
fd, temp_kubeadm_config_view = tempfile.mkstemp(
dir='/tmp', suffix='.yaml')
with os.fdopen(fd, 'w') as f:
cmd = ['kubeadm', KUBECONFIG, 'config', 'view']
subprocess.check_call(cmd, stdout=f) # pylint: disable=not-callable
# We will use a custom key to encrypt kubeadm certificates
# to make sure all hosts decrypt using the same key
key = str(keyring.get_password(CERTIFICATE_KEY_SERVICE,
CERTIFICATE_KEY_USER))
with open(temp_kubeadm_config_view, "a") as f:
f.write("---\r\napiVersion: kubeadm.k8s.io/v1beta2\r\n"
"kind: InitConfiguration\r\ncertificateKey: "
"{}".format(key))
cmd = ['kubeadm', 'init', 'phase', 'upload-certs',
'--upload-certs', '--config',
temp_kubeadm_config_view]
subprocess.check_call(cmd) # pylint: disable=not-callable
join_cmd_additions = \
" --control-plane --certificate-key %s" % key
os.unlink(temp_kubeadm_config_view)
# Configure the IP address of the API Server for the controller host.
# If not set the default network interface will be used, which does not
# ensure it will be the Cluster IP address of this host.
host_cluster_ip = self._get_host_cluster_address(host)
join_cmd_additions += \
" --apiserver-advertise-address %s" % host_cluster_ip
cmd = ['kubeadm', KUBECONFIG, 'token', 'create', '--print-join-command',
'--description', 'Bootstrap token for %s' % host.hostname]
join_cmd = subprocess.check_output(cmd, universal_newlines=True) # pylint: disable=not-callable
join_cmd_additions += \
" --cri-socket /var/run/containerd/containerd.sock"
join_cmd = join_cmd.strip() + join_cmd_additions
LOG.info('get_kubernetes_join_cmd join_cmd=%s' % join_cmd)
except Exception:
LOG.exception("Exception generating bootstrap token")
raise exception.SysinvException(
'Failed to generate bootstrap token')
return join_cmd
def _get_etcd_endpoint(self):
addr = self._format_url_address(self._get_cluster_host_address())
protocol = "http"
url = "%s://%s:%s" % (protocol, str(addr), str(self.ETCD_SERVICE_PORT))
return url
def _get_pod_network_cidr(self):
return self._get_network_config(constants.NETWORK_TYPE_CLUSTER_POD)
def _get_pod_network_ipversion(self):
subnet = netaddr.IPNetwork(self._get_pod_network_cidr())
return subnet.version
def _get_cluster_service_subnet(self):
return self._get_network_config(constants.NETWORK_TYPE_CLUSTER_SERVICE)
def _get_network_config(self, networktype):
try:
network = self.dbapi.network_get_by_type(networktype)
except exception.NetworkTypeNotFound:
# network not configured
return {}
address_pool = self.dbapi.address_pool_get(network.pool_uuid)
subnet = str(address_pool.network) + '/' + str(address_pool.prefix)
return subnet
def _get_dns_service_domain(self):
# Setting this to a constant for now. Will be configurable later
return constants.DEFAULT_DNS_SERVICE_DOMAIN
def _get_dns_service_ip(self):
subnet = netaddr.IPNetwork(self._get_cluster_service_subnet())
return str(subnet[CLUSTER_SERVICE_DNS_IP_OFFSET])
def _get_kubernetes_upgrade_to_version(self):
try:
# Get the kubernetes upgrade record
kube_upgrade_obj = self.dbapi.kube_upgrade_get_one()
except exception.NotFound:
# No upgrade is in progress
return None
else:
return kube_upgrade_obj.to_version
def _get_kubernetes_version(self, host):
config = {}
# Get the kubernetes upgrade record for this host
kube_host_upgrade_obj = objects.kube_host_upgrade.get_by_host_id(
self.context, host.id)
version = kube_host_upgrade_obj.target_version
if version is None:
# The target version is not set if an upgrade hasn't been started,
# so get the running kubernetes version.
try:
version = self._kube_operator.kube_get_kubernetes_version()
except Exception:
# During initial installation of the first controller,
# kubernetes may not be running yet. In that case, none of the
# puppet manifests being applied will need the kubernetes
# version.
LOG.warning("Unable to retrieve kubernetes version")
config.update({'platform::kubernetes::params::version': version})
return config
def _get_kubeadm_kubelet_version(self, host):
config = {}
kubeadm_version = None
kubelet_version = None
kube_upgrade_state = None
# Grab the upgrade state if any.
try:
kube_upgrade_obj = objects.kube_upgrade.get_one(
self.context)
kube_upgrade_state = kube_upgrade_obj.state
except exception.NotFound:
pass
try:
kube_version = self.dbapi.kube_cmd_version_get()
# kubeadm version is system-wide
kubeadm_version = kube_version.kubeadm_version
# default kubelet version is system-wide
kubelet_version = kube_version.kubelet_version
# If there's a k8s upgrade in progress the kubelet version
# is determined by the upgrade state of the host.
if kube_upgrade_state:
kube_host_upgrade = objects.kube_host_upgrade.get_by_host_id(
self.context, host.id)
if kube_host_upgrade.status in [
kubernetes.KUBE_HOST_UPGRADING_KUBELET,
kubernetes.KUBE_HOST_UPGRADED_KUBELET]:
kubelet_version = kube_host_upgrade.target_version.lstrip('v')
config.update({'platform::kubernetes::params::kubeadm_version': kubeadm_version})
config.update({'platform::kubernetes::params::kubelet_version': kubelet_version})
except Exception:
LOG.exception("Exception getting kubeadm kubelet version")
raise exception.KubeVersionUnavailable()
return config
def _get_host_cluster_address(self, host):
"""Retrieve the named host address for the cluster host network"""
address = self._get_address_by_name(
host.hostname, constants.NETWORK_TYPE_CLUSTER_HOST)
return address.address
def _get_host_node_config(self, host):
node_ip = self._get_address_by_name(
host.hostname, constants.NETWORK_TYPE_MGMT).address
return {
'platform::kubernetes::params::node_ip': node_ip
}
def _get_host_label_config(self, host):
config = {}
labels = self.dbapi.label_get_by_host(host.uuid)
host_label_keys = []
for label in labels:
host_label_keys.append(label.label_key + "=" + label.label_value)
config.update(
{'platform::kubernetes::params::host_labels': host_label_keys})
return config
def _get_host_k8s_certificates_config(self, host):
config = {}
# kubernetes components certificate secrets
kube_apiserver_cert_secret = constants.KUBE_APISERVER_CERT.format(host.hostname)
kube_apiserver_kubelet_client_cert_secret = constants.KUBE_APISERVER_KUBELET_CERT.format(host.hostname)
kube_scheduler_cert_secret = constants.KUBE_SCHEDULER_CERT.format(host.hostname)
kube_controller_manager_cert_secret = constants.KUBE_CONTROLLER_MANAGER_CERT.format(host.hostname)
kube_kubelet_cert_secret = constants.KUBE_KUBELET_CERT.format(host.hostname)
secret_list = [kube_apiserver_cert_secret, kube_apiserver_kubelet_client_cert_secret,
kube_scheduler_cert_secret, kube_controller_manager_cert_secret,
kube_kubelet_cert_secret]
cert_data = self._get_kubernetes_components_cert_and_key(secret_list)
config.update({
'platform::kubernetes::params::apiserver_cert': cert_data[kube_apiserver_cert_secret][0],
'platform::kubernetes::params::apiserver_key': cert_data[kube_apiserver_cert_secret][1],
'platform::kubernetes::params::apiserver_kubelet_cert':
cert_data[kube_apiserver_kubelet_client_cert_secret][0],
'platform::kubernetes::params::apiserver_kubelet_key':
cert_data[kube_apiserver_kubelet_client_cert_secret][1],
'platform::kubernetes::params::scheduler_cert': cert_data[kube_scheduler_cert_secret][0],
'platform::kubernetes::params::scheduler_key': cert_data[kube_scheduler_cert_secret][1],
'platform::kubernetes::params::controller_manager_cert': cert_data[kube_controller_manager_cert_secret][0],
'platform::kubernetes::params::controller_manager_key': cert_data[kube_controller_manager_cert_secret][1],
'platform::kubernetes::params::kubelet_cert': cert_data[kube_kubelet_cert_secret][0],
'platform::kubernetes::params::kubelet_key': cert_data[kube_kubelet_cert_secret][1],
})
return config
def _get_host_k8s_cgroup_config(self, | |
<gh_stars>1-10
import pandas as pd
import numpy as np
import sklearn
from sklearn.ensemble import BaggingClassifier
from sklearn import metrics
from sklearn.model_selection import StratifiedKFold
from sklearn.dummy import DummyClassifier
from sklearn import tree
from sklearn import svm
import pickle
import os
import util
import argparse
import matplotlib.pyplot as plt
from matplotlib import colors
from imblearn.ensemble import BalancedBaggingClassifier
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import RBF
os.chdir('.')
EPSILON = 2.2e-16 # used to avoid division by zero errors that occur with bad data
def Find_Optimal_Cutoff(target, predicted):
'''
Find the optimal probability cutoff point for a classification model related to event rate
*Parameters:
target : Matrix with dependent or target data, where rows are observations
predicted : Matrix with predicted data, where rows are observations
*Returns: list type, with optimal cutoff value
'''
fpr, tpr, threshold = metrics.roc_curve(target, predicted)
i = np.arange(len(tpr))
precision, recall, threshold_precision_recall = metrics.precision_recall_curve(target, predicted)
threshold_precision_recall = np.concatenate([threshold_precision_recall, [1]])
i_pre = np.arange(len(precision))
beta = 0.5 # larger than 1 => in favor of recall; smaller than 1 => in favor of precision
f1_scores = (1 + beta * beta) * (precision * recall) / (beta * beta * precision + recall)
# print('f1 scores:', f1_scores)
f1_scores[np.isnan(f1_scores)] = 0
values = pd.DataFrame(
{
'tf': pd.Series(tpr + (1 - fpr), index=i),
'threshold': pd.Series(threshold, index=i),
})
f1_values = pd.DataFrame(
{
'precision': pd.Series(precision, index=i_pre),
'recall': pd.Series(recall, index=i_pre),
'f1': pd.Series(f1_scores, index=i_pre),
'threshold': pd.Series(threshold_precision_recall, index=i_pre)
})
# sorted_indices = values.iloc[(values.tf - 0).argsort()[::-1]] # TODO
sorted_indices = f1_values.iloc[(f1_values.f1 - 0).argsort()[::-1]]
# print('f1 scores again: ', sorted_indices['f1'].values)
# print 'roc ', list(sorted_indices['threshold'])
# print 'threshold ', list(sorted_indices['threshold_precision_recall'])
return list(sorted_indices['threshold'])
# return list(sorted_indices['threshold_precision_recall'])
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Bagging Cross Validation Blackbox function')
parser.add_argument('-r', '--resolution', default=1000, help='Input the resolution scale')
parser.add_argument('-p', '--park', help='Input the park name', required=True)
parser.add_argument('-c', '--category', default='All', help='Input the category')
parser.add_argument('-m', '--method', help='Input the training method')
parser.add_argument('-static', '--static', default=False, help='Predicting on all the static data or not (used for planning)')
parser.add_argument('-simple', '--simple', default=False, help='Not using ensemble method; i.e. only one threshold: 0')
parser.add_argument('-cutoff', '--cutoff', default=0, help='Input the cutoff threshold of patrol effort')
args = parser.parse_args()
resolution = int(args.resolution)
park = args.park
category = args.category
method = args.method
all_static = args.static
cutoff_threshold = float(args.cutoff)
simple_classification = args.simple
patrol_option = 0 # PEcase 0: current+past, 1: past, 2: current, 3: none
directory = './{0}_datasets/resolution/{1}m/input'.format(park, str(resolution))
if simple_classification:
output_directory = './{0}_datasets/resolution/{1}m/{2}/simple_output_{3}'.format(park, str(resolution), method, cutoff_threshold)
else:
output_directory = './{0}_datasets/resolution/{1}m/{2}/output_{3}'.format(park, str(resolution), method, cutoff_threshold)
if not os.path.exists(directory):
os.makedirs(directory)
if not os.path.exists(output_directory + '/Prob/Final'):
os.makedirs(output_directory + '/Prob/Final')
if not os.path.exists(output_directory + '/Prob/Merged'):
os.makedirs(output_directory + '/Prob/Merged')
if all_static:
test_year, test_quarter = util.test_year_quarter_by_park(park)
if simple_classification:
selectedThresholds = [0]
else:
selectedThresholds = util.selected_threshold_by_park(park)
print('selected thresholds', selectedThresholds)
currentPatrolEffortList = util.selected_finer_threshold_by_park(park)
pastPatrolEffortList = util.selected_threshold_by_park(park)
evaluationResults_original = pd.DataFrame(index=['Threshold','bestCutOff', 'AUC','Precision', 'Recall', 'F1',
'L&L', 'max_L&L', 'L&L %',
'number of positive in training', 'number of all training',
'number of positive in validation', 'number of all validation',
'number of positive in testing', 'number of all testing', 'effective positive prediction'],
columns=[np.arange(0, len(selectedThresholds))])
evaluationResults = evaluationResults_original.copy(deep=True)
evaluationResults_constraint = evaluationResults_original.copy(deep=True)
X_all = pd.read_csv(directory + '/' + '{0}_X.csv'.format(category))
Y_all = pd.read_csv(directory + '/' + '{0}_Y.csv'.format(category))
X_all_copy = X_all.copy()
#X_all.drop(['ID_Global', 'Year', 'Quarter', 'ID_Spatial'], inplace=True, axis=1)
X_all.drop(['ID_Global', 'Year', 'Quarter', 'ID_Spatial', 'x', 'y'], inplace=True, axis=1)
X_all = X_all.values
scaler = sklearn.preprocessing.StandardScaler()
scaler.fit(X_all[:,1:])
X_all[:,1:] = scaler.transform(X_all[:,1:])
dataPointIndicator_all = Y_all[['ID_Global', 'Year', 'Quarter', 'ID_Spatial', 'x', 'y']]
Y_all.drop(['ID_Global', 'Year', 'Quarter', 'ID_Spatial', 'x', 'y'], inplace=True, axis=1)
Y_all = Y_all.values
Y_all = Y_all.ravel()
# ==================== separating testing set =======================
testing_year, testing_quarter = util.test_year_quarter_by_park(park)
cutoff_useful_threshold = cutoff_threshold
if testing_quarter is not None:
print('testing on year {0}, quarter {1}...'.format(testing_year, testing_quarter))
testing_indices = (dataPointIndicator_all['Year'] == testing_year) & (dataPointIndicator_all['Quarter'] == testing_quarter)
training_indices = -testing_indices
testing_indices = testing_indices & (X_all[:,0] >= cutoff_useful_threshold) # removing the testing data below the threshold
else:
print('testing on year {0}...'.format(testing_year))
testing_indices = (dataPointIndicator_all['Year'] == testing_year) & (X_all[:,0] >= cutoff_useful_threshold)
training_indices = -testing_indices
testing_indices = testing_indices & (X_all[:,0] >= cutoff_useful_threshold) # removing the testing data below the threshold
X_all_test = X_all[testing_indices]
Y_all_test = Y_all[testing_indices]
X_all_train = X_all[training_indices]
Y_all_train = Y_all[training_indices]
dataPointIndicator_test_all = dataPointIndicator_all[testing_indices]
print('X_all_test:', X_all_test.shape)
No_of_Split = 5
No_of_Iteration = 1
# ===================== shuffle training set ========================
X_all_train, Y_all_train = sklearn.utils.shuffle(X_all_train, Y_all_train)
# ======================== blackbox parsing =========================
indicatorList = ['ID_Spatial', 'x', 'y']
if all_static:
X_all_static = pd.read_csv(directory + '/' + 'allStaticFeat.csv')
X_all_static = X_all_static.rename(columns={'Var1': 'ID_Spatial'})
X_all_static['currentPatrolEffort'] = 0.0
X_all_static['pastPatrolEffort'] = 0.0
column_names = X_all_static.columns.tolist()
X_all_static = X_all_static[column_names[:3] + column_names[-2:] + column_names[3:-2]]
ID_spatial_2_patrol_effort = {}
for index, row in X_all_copy.iterrows():
ID_spatial = row['ID_Spatial']
patrol_effort = row['currentPatrolEffort']
year = row['Year']
quarter = row['Quarter']
if year == test_year and quarter == test_quarter:
ID_spatial_2_patrol_effort[ID_spatial] = patrol_effort
for index, row in X_all_static.iterrows():
ID_spatial = row['ID_Spatial']
if ID_spatial_2_patrol_effort.has_key(ID_spatial):
# print('ID spatial', ID_spatial)
X_all_static['pastPatrolEffort'][index] = ID_spatial_2_patrol_effort[ID_spatial]
else:
X_all_static['pastPatrolEffort'][index] = 0
# ================ remove current patrol and past patrol =============
if patrol_option == 1:
X_all_static.drop(['currentPatrolEffort'], inplace=True, axis=1)
elif patrol_option == 2:
X_all_static.drop(['pastPatrolEffort'], inplace=True, axis=1)
elif patrol_option == 3:
X_all_static.drop(['currentPatrolEffort', 'pastPatrolEffort'], inplace=True, axis=1)
Y_static_prob_predict_merged_list = []
Y_static_prob_predict_merged_attack_list = []
Y_static_prob_predict_merged_average = X_all_static.copy()[['ID_Spatial', 'x', 'y']]
Y_static_prob_predict_merged_attack_average = X_all_static.copy()[['ID_Spatial', 'x', 'y']]
for i in range(No_of_Split * No_of_Iteration):
Y_static_prob_predict_merged_list.append(X_all_static.copy()[['ID_Spatial', 'x', 'y']])
Y_static_prob_predict_merged_attack_list.append(X_all_static.copy()[['ID_Spatial', 'x', 'y']])
# ======================== cross-validating ==========================
count = 0
skf = StratifiedKFold(n_splits=No_of_Split)
for train_index, validate_index in skf.split(X_all_train, Y_all_train):
X_train_main, X_validate_main = X_all_train[train_index], X_all_train[validate_index]
Y_train_main, Y_validate_main = Y_all_train[train_index], Y_all_train[validate_index]
train_current_patrol = X_train_main[:,0]
validate_current_patrol = X_validate_main[:,0]
test_current_patrol = X_all_test[:,0]
if patrol_option == 0:
X_test_main = X_all_test
Y_test_main = Y_all_test
elif patrol_option == 1:
X_train_main = X_train_main[:,1:]
X_validate_main = X_validate_main[:,1:]
X_test_main = X_all_test[:,1:]
Y_test_main = Y_all_test
elif patrol_option == 2:
X_train_main = np.concatenate([X_train_main[:,0], X_train_main[:,2:]], axis=1)
X_validate_main = np.concatenate([X_validate_main[:,0], X_validate_main[:,2:]], axis=1)
X_test_main = np.concatenate([X_all_test[:,0], X_all_test[:,2:]], axis=1)
Y_test_main = Y_all_test
elif patrol_option == 3:
X_train_main = X_train_main[:,2:]
X_validate_main = X_validate_main[:,2:]
X_test_main = X_all_test[:,2:]
Y_test_main = Y_all_test
# ===================================================
#print (validate_index)
#print (len(validate_index))
dataPointIndicator_main = dataPointIndicator_all.iloc[validate_index]
count += 1
for jj in range(No_of_Iteration):
tmp_positive_sum = 0
print ('Dataset Number: ', count, ' Round of the iteration', jj)
colNumber = 0
for thrsh_number in range(len(selectedThresholds)):
thrsh = selectedThresholds[thrsh_number]
next_thrsh = 100 if thrsh_number == len(selectedThresholds)-1 else selectedThresholds[thrsh_number+1]
if thrsh == 0:
A_train = train_current_patrol >= thrsh
B_train = Y_train_main == 1
C_train = train_current_patrol < next_thrsh
# D_train = A_train * C_train + B_train # TODO
D_train = A_train + B_train # TODO
A_validate = validate_current_patrol >= thrsh
C_validate = validate_current_patrol < next_thrsh
if simple_classification:
D_validate = A_validate # & C_validate # TODO
else:
D_validate = A_validate # & C_validate # TODO
A_test = test_current_patrol >= thrsh
C_test = test_current_patrol < next_thrsh
D_test = A_test # & C_test # TODO
X_train = X_train_main[D_train]
Y_train = Y_train_main[D_train]
X_validate = X_validate_main[D_validate]
Y_validate = Y_validate_main[D_validate]
X_test = X_test_main[D_test]
Y_test = Y_test_main[D_test]
dataPointIndicator_validate = dataPointIndicator_main.iloc[D_validate]
dataPointIndicator_test = dataPointIndicator_test_all.iloc[D_test]
else:
A_train = train_current_patrol >= thrsh
B_train = Y_train_main[:] == 1
C_train = train_current_patrol < next_thrsh
# D_train = A_train * C_train + B_train # TODO
D_train = A_train + B_train # TODO
X_train = X_train_main[D_train]
Y_train = Y_train_main[D_train]
print (thrsh)
A_validate = validate_current_patrol >= thrsh
C_validate = validate_current_patrol < next_thrsh
if simple_classification:
D_validate = A_validate # & C_validate # TODO
else:
D_validate = A_validate # & C_validate # TODO
X_validate = X_validate_main[D_validate]
Y_validate = Y_validate_main[D_validate]
#print ('C_validate', C_validate)
A_test = test_current_patrol >= thrsh # WARNING: MAKE SURE NOT TO MAKE INFORMATION LEAKAGE
C_test = test_current_patrol < next_thrsh
D_test = A_test # & C_test # TODO
X_test = X_test_main[D_test]
Y_test = Y_test_main[D_test]
dataPointIndicator_validate = dataPointIndicator_main.iloc[D_validate]
dataPointIndicator_test = dataPointIndicator_test_all.iloc[D_test]
max_samples = 1.0 # ratio of sample size draw in the bagging
if method == 'dt':
clf = BaggingClassifier(base_estimator=tree.DecisionTreeClassifier(), n_estimators=50, max_samples=max_samples,
max_features=1.0, bootstrap=True, bootstrap_features=True,
oob_score=False, warm_start=False, n_jobs=1, random_state=None, verbose=0)
elif method == 'svm':
clf = BaggingClassifier(base_estimator=svm.SVC(), n_estimators=50, max_samples=max_samples,
max_features=1.0, bootstrap=True, bootstrap_features=True,
oob_score=False, warm_start=False, n_jobs=1, random_state=None, verbose=0)
elif method == 'gp':
max_samples = min(1000, len(X_train))
clf = BaggingClassifier(base_estimator=GaussianProcessClassifier(kernel=RBF(length_scale=1.00), optimizer=None), n_estimators=50, max_samples=max_samples,
max_features=1.0, bootstrap=True, bootstrap_features=True,
oob_score=False, warm_start=False, n_jobs=1, random_state=None, verbose=0)
#
elif method == 'balance-dt':
clf = BalancedBaggingClassifier(base_estimator=tree.DecisionTreeClassifier(), n_estimators=50, max_samples=max_samples,
max_features=1.0, bootstrap=True, bootstrap_features=True,
oob_score=False, warm_start=False, n_jobs=1, random_state=None, verbose=0)
elif method == 'balance-svm':
clf = BalancedBaggingClassifier(base_estimator=svm.SVC(), n_estimators=50, max_samples=max_samples,
max_features=1.0, bootstrap=True, bootstrap_features=True,
oob_score=False, warm_start=False, n_jobs=1, random_state=None, verbose=0)
elif method == 'balance-gp':
max_samples = min(1000, len(X_train))
clf = BalancedBaggingClassifier(base_estimator=GaussianProcessClassifier(kernel=RBF(length_scale=1.00), | |
# -*- coding: future_fstrings -*-
# Copyright 2018 <NAME>. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Main file parser for Fylm.
This module takes raw input source paths and cleans/analyzes them
to determine various properties used in the name correction
and TMDb lookup.
parser: the main class exported by this module.
"""
from __future__ import unicode_literals, print_function
from builtins import *
import os
import re
import fylmlib.config as config
import fylmlib.patterns as patterns
import fylmlib.formatter as formatter
from fylmlib.enums import Media
class parser:
"""Main class for film parser.
All methods are class methods, thus this class should never be instantiated.
"""
@classmethod
def get_title(cls, source_path):
"""Get title from full path of file or folder.
Use regular expressions to strip, clean, and format a file
or folder path into a more pleasant film title.
Args:
source_path: (str, utf-8) full path of file or folder.
Returns:
A clean and well-formed film title.
"""
# Ensure source_path is a str
source_path = str(source_path)
folder = os.path.basename(os.path.dirname(source_path))
file = os.path.basename(source_path)
# Determine whether to use the file or its containing folder
# to determine the title by parsing for year and resolution
title = folder if cls.get_year(folder) is not None or cls.get_resolution(folder) is not None else file
# Remove the file extension.
title = os.path.splitext(title)[0]
# Strip "tag" prefixes from the title.
for prefix in config.strip_prefixes:
if title.lower().startswith(prefix.lower()):
title = title[len(prefix):]
# For a title that properly begins with 'The' (which was previously
# converted to append ', The' instead), we need to put it back to its
# original state both for lookup validation, and so that we don't
# end up with multiple ', the' suffixes.
if re.search(r', the', title, re.I):
title = f"The {re.sub(r', the', '', title, flags=re.I)}"
# Use the 'strip_from_title' regular expression to replace unwanted
# characters in a title with a space.
title = re.sub(patterns.strip_from_title, ' ', title)
# If the title contains a known edition, strip it from the title. E.g.,
# if we have Dinosaur.Special.Edition, we already know the edition, and
# we don't need it to appear, duplicated, in the title. Because
# `_edition_map` returns a (key, value) tuple, we check for the search
# key here and replace it (not the value).
if cls._edition_map(source_path)[0] is not None:
title = re.sub(cls._edition_map(source_path)[0], '', title)
# Strip all resolution and media tags from the title.
title = re.sub(patterns.media, '', title)
title = re.sub(patterns.resolution, '', title)
# Typical naming patterns place the year as a delimiter between the title
# and the rest of the file. Therefore we can assume we only care about
# the first part of the string, and so we split on the year value, and keep
# only the left-hand portion.
title = title.split(str(cls.get_year(source_path)))[0]
# Add back in . to titles or strings we know need to to keep periods.
# Looking at you, S.W.A.T and After.Life.
for keep_period_str in config.keep_period:
title = re.sub(re.compile(r'\b' + re.escape(keep_period_str) + r'\b', re.I), keep_period_str, title)
# Remove extra whitespace from the edges of the title and remove repeating
# whitespace.
title = formatter.strip_extra_whitespace(title.strip())
return title
@classmethod
def get_year(cls, source_path):
"""Get year from full path of file or folder.
Use regular expressions to identity a year value between 1910 and 2159,
getting the right-most match if there is more than one year found (looking
at you, 2001: A Space Odyssey) and not at the start of the input string
or filename.
Args:
source_path: (str, utf-8) full path of file or folder.
Returns:
A 4-digit integer representing the release year, or None if
no year could be determined.
"""
# Ensure source_path is a str
source_path = str(source_path)
folder = os.path.basename(os.path.dirname(source_path))
file = os.path.basename(source_path)
# Find all matches of years between 1910 and 2159 (we don't want to
# match 2160 because 2160p, and hopefully I'll be dead by then and
# no one will use python anymore). Also convert the matches iterator
# to a list.
matches = list(re.finditer(patterns.year, f'{folder}/{file}'))
# Get the last element, and retrieve the 'year' capture group by name.
# If there are no matches, return None.
return int(matches[-1].group('year')) if matches else None
@classmethod
def get_edition(cls, source_path):
"""Get and correct special edition from full path of file or folder.
Iterate a map of strings (or, more aptly, regular expressions) to
search for, and correct, special editions. This map is defined in
config.edition_map.
Args:
source_path: (str, utf-8) full path of file or folder.
Returns:
A corrected string representing the film's edition, or None.
"""
folder = os.path.basename(os.path.dirname(source_path))
file = os.path.basename(source_path)
# Because _edition_map returns a (key, value) tuple, we need to
# return the second value in the tuple which represents the corrected
# string value of the edition.
return cls._edition_map(f'{folder}/{file}')[1] or None
@classmethod
def get_resolution(cls, source_path):
"""Get resolution from full path of file or folder.
Use a regular expression to retrieve release resolutions from the source path
(e.g. 720p, 1080p, or 2160p).
Args:
source_path: (str, utf-8) full path of file or folder.
Returns:
A corrected string representing the film's resolution, or None.
"""
folder = os.path.basename(os.path.dirname(source_path))
file = os.path.basename(source_path)
# Search for any of the known qualities.
match = re.search(patterns.resolution, f'{folder}/{file}')
# If a match exists, convert it to lowercase.
resolution = match.group('resolution').lower() if match else None
# Manual fix for 4K files
if resolution == '4k':
resolution = '2160p'
# If the resolution doesn't end in p, append p.
if resolution is not None and 'p' not in resolution:
resolution += 'p'
return resolution
@classmethod
def get_media(cls, source_path) -> Media:
"""Get media from full path of file or folder.
Use regular expressions to identity the original media of the file.
Args:
source_path: (str, utf-8) full path of file or folder.
Returns:
An enum representing the media found.
"""
folder = os.path.basename(os.path.dirname(source_path))
file = os.path.basename(source_path)
match = re.search(patterns.media, f'{folder}/{file}')
if match and match.group('bluray'):
return Media.BLURAY
elif match and match.group('webdl'):
return Media.WEBDL
elif match and match.group('hdtv'):
return Media.HDTV
elif match and match.group('dvd'):
return Media.DVD
elif match and match.group('sdtv'):
return Media.SDTV
else:
return Media.UNKNOWN
@classmethod
def is_hdr(cls, source_path) -> bool:
"""Determine whether the media is an HDR file.
Use regular expressions to identity whether the media is HDR or not.
Args:
source_path: (str, utf-8) full path of file or folder.
Returns:
A bool representing the HDR status of the media.
"""
folder = os.path.basename(os.path.dirname(source_path))
file = os.path.basename(source_path)
match = re.search(patterns.hdr, f'{folder}/{file}')
return True if (match and match.group('hdr')) else False
@classmethod
def is_proper(cls, source_path) -> bool:
"""Determine whether the media is a proper rip.
Use regular expressions to identity whether the file is a proper or not.
Args:
source_path: (str, utf-8) full path of file or folder.
Returns:
A bool representing the proper state of the media.
"""
folder = os.path.basename(os.path.dirname(source_path))
file = os.path.basename(source_path)
match = re.search(patterns.proper, f'{folder}/{file}')
return True if (match and match.group('proper')) else False
@classmethod
def get_part(cls, source_path):
"""Get part # from full path of file or folder.
Use regular expressions to identity the part # of the file.
Args:
source_path: (str, utf-8) full path of file or folder.
Returns:
A string representing the part # of the title, or None, if no
match is found.
"""
folder = os.path.basename(os.path.dirname(source_path))
file = os.path.basename(source_path)
# Search for a matching part condition
match = re.search(patterns.part, f'{folder}/{file}')
# If a match exists, convert it to lowercase.
return match.group('part').upper() if match else None
@classmethod
def _edition_map(cls, source_path):
"""Internal method to search for special edition strings in a | |
frame for this location.
"""
confidence = _messages.FloatField(1, variant=_messages.Variant.FLOAT)
timeOffset = _messages.StringField(2)
class GoogleCloudVideointelligenceV1p2beta1LabelSegment(_messages.Message):
r"""Video segment level annotation results for label detection.
Fields:
confidence: Confidence that the label is accurate. Range: [0, 1].
segment: Video segment where a label was detected.
"""
confidence = _messages.FloatField(1, variant=_messages.Variant.FLOAT)
segment = _messages.MessageField('GoogleCloudVideointelligenceV1p2beta1VideoSegment', 2)
class GoogleCloudVideointelligenceV1p2beta1NormalizedBoundingBox(_messages.Message):
r"""Normalized bounding box. The normalized vertex coordinates are relative
to the original image. Range: [0, 1].
Fields:
bottom: Bottom Y coordinate.
left: Left X coordinate.
right: Right X coordinate.
top: Top Y coordinate.
"""
bottom = _messages.FloatField(1, variant=_messages.Variant.FLOAT)
left = _messages.FloatField(2, variant=_messages.Variant.FLOAT)
right = _messages.FloatField(3, variant=_messages.Variant.FLOAT)
top = _messages.FloatField(4, variant=_messages.Variant.FLOAT)
class GoogleCloudVideointelligenceV1p2beta1NormalizedBoundingPoly(_messages.Message):
r"""Normalized bounding polygon for text (that might not be aligned with
axis). Contains list of the corner points in clockwise order starting from
top-left corner. For example, for a rectangular bounding box: When the text
is horizontal it might look like: 0----1 | | 3
----2 When it's clockwise rotated 180 degrees around the top-left corner it
becomes: 2----3 | | 1----0 and the vertex order
will still be (0, 1, 2, 3). Note that values can be less than 0, or greater
than 1 due to trignometric calculations for location of the box.
Fields:
vertices: Normalized vertices of the bounding polygon.
"""
vertices = _messages.MessageField('GoogleCloudVideointelligenceV1p2beta1NormalizedVertex', 1, repeated=True)
class GoogleCloudVideointelligenceV1p2beta1NormalizedVertex(_messages.Message):
r"""A vertex represents a 2D point in the image. NOTE: the normalized vertex
coordinates are relative to the original image and range from 0 to 1.
Fields:
x: X coordinate.
y: Y coordinate.
"""
x = _messages.FloatField(1, variant=_messages.Variant.FLOAT)
y = _messages.FloatField(2, variant=_messages.Variant.FLOAT)
class GoogleCloudVideointelligenceV1p2beta1ObjectTrackingAnnotation(_messages.Message):
r"""Annotations corresponding to one tracked object.
Fields:
confidence: Object category's labeling confidence of this track.
entity: Entity to specify the object category that this track is labeled
as.
frames: Information corresponding to all frames where this object track
appears. Non-streaming batch mode: it may be one or multiple
ObjectTrackingFrame messages in frames. Streaming mode: it can only be
one ObjectTrackingFrame message in frames.
segment: Non-streaming batch mode ONLY. Each object track corresponds to
one video segment where it appears.
trackId: Streaming mode ONLY. In streaming mode, we do not know the end
time of a tracked object before it is completed. Hence, there is no
VideoSegment info returned. Instead, we provide a unique identifiable
integer track_id so that the customers can correlate the results of the
ongoing ObjectTrackAnnotation of the same track_id over time.
"""
confidence = _messages.FloatField(1, variant=_messages.Variant.FLOAT)
entity = _messages.MessageField('GoogleCloudVideointelligenceV1p2beta1Entity', 2)
frames = _messages.MessageField('GoogleCloudVideointelligenceV1p2beta1ObjectTrackingFrame', 3, repeated=True)
segment = _messages.MessageField('GoogleCloudVideointelligenceV1p2beta1VideoSegment', 4)
trackId = _messages.IntegerField(5)
class GoogleCloudVideointelligenceV1p2beta1ObjectTrackingFrame(_messages.Message):
r"""Video frame level annotations for object detection and tracking. This
field stores per frame location, time offset, and confidence.
Fields:
normalizedBoundingBox: The normalized bounding box location of this object
track for the frame.
timeOffset: The timestamp of the frame in microseconds.
"""
normalizedBoundingBox = _messages.MessageField('GoogleCloudVideointelligenceV1p2beta1NormalizedBoundingBox', 1)
timeOffset = _messages.StringField(2)
class GoogleCloudVideointelligenceV1p2beta1SpeechRecognitionAlternative(_messages.Message):
r"""Alternative hypotheses (a.k.a. n-best list).
Fields:
confidence: Output only. The confidence estimate between 0.0 and 1.0. A
higher number indicates an estimated greater likelihood that the
recognized words are correct. This field is set only for the top
alternative. This field is not guaranteed to be accurate and users
should not rely on it to be always provided. The default of 0.0 is a
sentinel value indicating `confidence` was not set.
transcript: Transcript text representing the words that the user spoke.
words: Output only. A list of word-specific information for each
recognized word. Note: When `enable_speaker_diarization` is true, you
will see all the words from the beginning of the audio.
"""
confidence = _messages.FloatField(1, variant=_messages.Variant.FLOAT)
transcript = _messages.StringField(2)
words = _messages.MessageField('GoogleCloudVideointelligenceV1p2beta1WordInfo', 3, repeated=True)
class GoogleCloudVideointelligenceV1p2beta1SpeechTranscription(_messages.Message):
r"""A speech recognition result corresponding to a portion of the audio.
Fields:
alternatives: May contain one or more recognition hypotheses (up to the
maximum specified in `max_alternatives`). These alternatives are
ordered in terms of accuracy, with the top (first) alternative being the
most probable, as ranked by the recognizer.
languageCode: Output only. The [BCP-47](https://www.rfc-
editor.org/rfc/bcp/bcp47.txt) language tag of the language in this
result. This language code was detected to have the most likelihood of
being spoken in the audio.
"""
alternatives = _messages.MessageField('GoogleCloudVideointelligenceV1p2beta1SpeechRecognitionAlternative', 1, repeated=True)
languageCode = _messages.StringField(2)
class GoogleCloudVideointelligenceV1p2beta1TextAnnotation(_messages.Message):
r"""Annotations related to one detected OCR text snippet. This will contain
the corresponding text, confidence value, and frame level information for
each detection.
Fields:
segments: All video segments where OCR detected text appears.
text: The detected text.
"""
segments = _messages.MessageField('GoogleCloudVideointelligenceV1p2beta1TextSegment', 1, repeated=True)
text = _messages.StringField(2)
class GoogleCloudVideointelligenceV1p2beta1TextFrame(_messages.Message):
r"""Video frame level annotation results for text annotation (OCR). Contains
information regarding timestamp and bounding box locations for the frames
containing detected OCR text snippets.
Fields:
rotatedBoundingBox: Bounding polygon of the detected text for this frame.
timeOffset: Timestamp of this frame.
"""
rotatedBoundingBox = _messages.MessageField('GoogleCloudVideointelligenceV1p2beta1NormalizedBoundingPoly', 1)
timeOffset = _messages.StringField(2)
class GoogleCloudVideointelligenceV1p2beta1TextSegment(_messages.Message):
r"""Video segment level annotation results for text detection.
Fields:
confidence: Confidence for the track of detected text. It is calculated as
the highest over all frames where OCR detected text appears.
frames: Information related to the frames where OCR detected text appears.
segment: Video segment where a text snippet was detected.
"""
confidence = _messages.FloatField(1, variant=_messages.Variant.FLOAT)
frames = _messages.MessageField('GoogleCloudVideointelligenceV1p2beta1TextFrame', 2, repeated=True)
segment = _messages.MessageField('GoogleCloudVideointelligenceV1p2beta1VideoSegment', 3)
class GoogleCloudVideointelligenceV1p2beta1VideoAnnotationProgress(_messages.Message):
r"""Annotation progress for a single video.
Enums:
FeatureValueValuesEnum: Specifies which feature is being tracked if the
request contains more than one features.
Fields:
feature: Specifies which feature is being tracked if the request contains
more than one features.
inputUri: Video file location in [Google Cloud
Storage](https://cloud.google.com/storage/).
progressPercent: Approximate percentage processed thus far. Guaranteed to
be 100 when fully processed.
segment: Specifies which segment is being tracked if the request contains
more than one segments.
startTime: Time when the request was received.
updateTime: Time of the most recent update.
"""
class FeatureValueValuesEnum(_messages.Enum):
r"""Specifies which feature is being tracked if the request contains more
than one features.
Values:
FEATURE_UNSPECIFIED: Unspecified.
LABEL_DETECTION: Label detection. Detect objects, such as dog or flower.
SHOT_CHANGE_DETECTION: Shot change detection.
EXPLICIT_CONTENT_DETECTION: Explicit content detection.
SPEECH_TRANSCRIPTION: Speech transcription.
TEXT_DETECTION: OCR text detection and tracking.
OBJECT_TRACKING: Object detection and tracking.
"""
FEATURE_UNSPECIFIED = 0
LABEL_DETECTION = 1
SHOT_CHANGE_DETECTION = 2
EXPLICIT_CONTENT_DETECTION = 3
SPEECH_TRANSCRIPTION = 4
TEXT_DETECTION = 5
OBJECT_TRACKING = 6
feature = _messages.EnumField('FeatureValueValuesEnum', 1)
inputUri = _messages.StringField(2)
progressPercent = _messages.IntegerField(3, variant=_messages.Variant.INT32)
segment = _messages.MessageField('GoogleCloudVideointelligenceV1p2beta1VideoSegment', 4)
startTime = _messages.StringField(5)
updateTime = _messages.StringField(6)
class GoogleCloudVideointelligenceV1p2beta1VideoAnnotationResults(_messages.Message):
r"""Annotation results for a single video.
Fields:
error: If set, indicates an error. Note that for a single
`AnnotateVideoRequest` some videos may succeed and some may fail.
explicitAnnotation: Explicit content annotation.
frameLabelAnnotations: Label annotations on frame level. There is exactly
one element for each unique label.
inputUri: Video file location in [Google Cloud
Storage](https://cloud.google.com/storage/).
objectAnnotations: Annotations for list of objects detected and tracked in
video.
segment: Video segment on which the annotation is run.
segmentLabelAnnotations: Topical label annotations on video level or user
specified segment level. There is exactly one element for each unique
label.
segmentPresenceLabelAnnotations: Presence label annotations on video level
or user specified segment level. There is exactly one element for each
unique label. Compared to the existing topical
`segment_label_annotations`, this field presents more fine-grained,
segment-level labels detected in video content and is made available
only when the client sets `LabelDetectionConfig.model` to
"builtin/latest" in the request.
shotAnnotations: Shot annotations. Each shot is represented as a video
segment.
shotLabelAnnotations: Topical label annotations on shot level. There is
exactly one element for each unique label.
shotPresenceLabelAnnotations: Presence label annotations on shot level.
There is exactly one element for each unique label. Compared to the
existing topical `shot_label_annotations`, this field presents more
fine-grained, shot-level labels detected in video content and is made
available only when the client sets `LabelDetectionConfig.model` to
"builtin/latest" in the request.
speechTranscriptions: Speech transcription.
textAnnotations: OCR text detection and tracking. Annotations for list of
detected text snippets. Each will have list of frame information
associated with it.
"""
error = _messages.MessageField('GoogleRpcStatus', 1)
explicitAnnotation = _messages.MessageField('GoogleCloudVideointelligenceV1p2beta1ExplicitContentAnnotation', 2)
frameLabelAnnotations = _messages.MessageField('GoogleCloudVideointelligenceV1p2beta1LabelAnnotation', | |
"""
Implementation of window function estimation, following https://github.com/cosmodesi/GC_derivations,
and https://fr.overleaf.com/read/hpgbwqzmtcxn.
"""
import time
import numpy as np
from scipy import special
from pmesh.pm import ParticleMesh, RealField, ComplexField
from . import mpi
from .fftlog import PowerToCorrelation
from .utils import _make_array
from .fft_power import MeshFFTPower, get_real_Ylm, _transform_rslab, _get_real_dtype, _format_positions, _format_all_weights, project_to_basis, PowerSpectrumMultipoles, PowerSpectrumWedges, normalization
from .wide_angle import BaseMatrix, Projection, PowerSpectrumOddWideAngleMatrix
from .mesh import CatalogMesh, _get_mesh_attrs, _wrap_positions
def Si(x):
return special.sici(x)[0]
# Derivative of correlation function w.r.t. k-bins, precomputed with sympy; full, low-s or low-a limit
_registered_correlation_function_tophat_derivatives = {}
_registered_correlation_function_tophat_derivatives[0] = (lambda s, a: (-a * np.cos(a * s) / s + np.sin(a * s) / s**2) / (2 * np.pi**2 * s),
lambda s, a: -a**9 * s**6 / (90720 * np.pi**2) + a**7 * s**4 / (1680 * np.pi**2) - a**5 * s**2 / (60 * np.pi**2) + a**3 / (6 * np.pi**2))
_registered_correlation_function_tophat_derivatives[1] = (lambda s, a: ((-a * np.sin(a * s) - 2 * np.cos(a * s) / s) / s**2 + 2 / s**3) / (2 * np.pi**2),
lambda s, a: -a**10 * s**7 / (907200 * np.pi**2) + a**8 * s**5 / (13440 * np.pi**2) - a**6 * s**3 / (360 * np.pi**2) + a**4 * s / (24 * np.pi**2))
_registered_correlation_function_tophat_derivatives[2] = (lambda s, a: -(a * s * np.cos(a * s) - 4 * np.sin(a * s) + 3 * Si(a * s)) / (2 * np.pi**2 * s**3),
lambda s, a: -a**9 * s**6 / (136080 * np.pi**2) + a**7 * s**4 / (2940 * np.pi**2) - a**5 * s**2 / (150 * np.pi**2))
_registered_correlation_function_tophat_derivatives[3] = (lambda s, a: -(8 / s**3 + (a * s**2 * np.sin(a * s) + 7 * s * np.cos(a * s) - 15 * np.sin(a * s) / a) / s**4) / (2 * np.pi**2),
lambda s, a: -a**10 * s**7 / (1663200 * np.pi**2) + a**8 * s**5 / (30240 * np.pi**2) - a**6 * s**3 / (1260 * np.pi**2))
_registered_correlation_function_tophat_derivatives[4] = (lambda s, a: (-a * s**3 * np.cos(a * s) + 11 * s**2 * np.sin(a * s) + 15 * s**2 * Si(a * s) / 2 + 105 * s * np.cos(a * s) / (2 * a) - 105 * np.sin(a * s) / (2 * a**2)) / (2 * np.pi**2 * s**5),
lambda s, a: -a**9 * s**6 / (374220 * np.pi**2) + a**7 * s**4 / (13230 * np.pi**2))
_registered_correlation_function_tophat_derivatives[5] = (lambda s, a: (16 / s**3 + (-a * s**4 * np.sin(a * s) - 16 * s**3 * np.cos(a * s) + 105 * s**2 * np.sin(a * s) / a + 315 * s * np.cos(a * s) / a**2 - 315 * np.sin(a * s) / a**3) / s**6) / (2 * np.pi**2),
lambda s, a: -a**10 * s**7 / (5405400 * np.pi**2) + a**8 * s**5 / (166320 * np.pi**2))
def _get_attr_in_inst(obj, name, insts=(None,)):
# Search for ``name`` in instances of name ``insts`` of obj
for inst in insts:
if inst is None:
if hasattr(obj, name):
return getattr(obj, name)
else:
if hasattr(obj, inst) and hasattr(getattr(obj, inst), name):
return getattr(getattr(obj, inst), name)
def get_correlation_function_tophat_derivative(kedges, ell=0, k=None, **kwargs):
r"""
Return a list of callable corresponding to the derivative of the correlation function
w.r.t. :math:`k`-bins.
Parameters
----------
kedges : array
:math:`k`-edges of the :math:`k`-bins.
ell : int, default=0
Multipole order.
k : array, default=None
If ``None``, calculation will be analytic, which will work if ``ell`` in [0, 2, 4], or sympy package is installed
(such analytic integration with sympy may take several seconds).
If not ``None``, this is the :math:`k` log-spaced array for numerical FFTlog integration.
kwargs : dict
If ``k`` is not ``None``, other arguments for :class:`fftlog.PowerToCorrelation`.
Returns
-------
toret : list
List of callables, taking configuration-space separation ``s`` as input.
"""
if k is None:
if ell in _registered_correlation_function_tophat_derivatives:
fun, fun_lows = _registered_correlation_function_tophat_derivatives[ell]
else:
try:
import sympy as sp
except ImportError as exc:
raise ImportError('Install sympy to for analytic computation') from exc
k, s, a = sp.symbols('k s a', real=True, positive=True)
integrand = sp.simplify(k**2 * sp.expand_func(sp.jn(ell, k * s)))
# i^ell; we take in the imaginary part of the odd power spectrum multipoles
expr = (-1)**(ell // 2) / (2 * sp.pi**2) * sp.integrate(integrand, (k, 0, a))
expr_lows = sp.series(expr, x=s, x0=0, n=8).removeO()
modules = ['numpy', {'Si': Si}]
fun = sp.lambdify((s, a), expr, modules=modules)
fun_lows = sp.lambdify((s, a), expr_lows, modules=modules)
def _make_fun(kmin, kmax):
funa = fun_lows if np.abs(kmin) < 1e-4 else fun
funb = fun_lows if np.abs(kmax) < 1e-4 else fun
def _fun(s):
toret = np.empty_like(s)
mask = s < 1e-1
toret[mask] = fun_lows(s[mask], kmax) - fun_lows(s[mask], kmin)
toret[~mask] = funb(s[~mask], kmax) - funa(s[~mask], kmin)
return toret
return _fun
toret = []
for kmin, kmax in zip(kedges[:-1], kedges[1:]):
toret.append(_make_fun(kmin, kmax))
return toret
fftlog = PowerToCorrelation(k, ell=ell, complex=False, **kwargs)
def _make_fun(sep, fun):
return lambda s: np.interp(s, sep, fun)
toret = []
for kmin, kmax in zip(kedges[:-1], kedges[1:]):
tophat = np.zeros_like(k)
tophat[(k >= kmin) & (k <= kmax)] = 1.
sep, fun = fftlog(tophat)
toret.append(_make_fun(sep, fun))
return toret
class PowerSpectrumFFTWindowMatrix(BaseMatrix):
"""Window matrix, relating "theory" input to "observed" output."""
def __init__(self, matrix, xin, xout, projsin, projsout, nmodes, wnorm=1., attrs=None, mpicomm=None):
"""
Initialize :class:`PowerSpectrumFFTWindowMatrix`.
Parameters
----------
matrix : array
2D array representing window matrix.
xin : array, list
List of input "theory" coordinates.
If single array, assumed to be the same for all input projections ``projsin``.
xout : list
List of output "theory" coordinates.
If single array, assumed to be the same for all output projections ``projsout``.
projsin : list
List of input "theory" projections.
projsout : list
List of output "observed" projections.
nmodes : array
Number of modes in each bin.
wnorm : float, default=1.
Window function normalization.
attrs : dict, default=None
Dictionary of other attributes.
mpicomm : MPI communicator, default=None
The MPI communicator, only used when saving (:meth:`save`) matrix.
"""
super(PowerSpectrumFFTWindowMatrix, self).__init__(matrix, xin, xout, projsin, projsout, weightsout=nmodes, attrs=attrs)
self.cvalue = self.value # let us just keep the original value somewhere
value = []
nout = 0
for iout, xout in enumerate(self.xout):
slout = slice(nout, nout + len(xout))
tmp = self.cvalue[:, slout]
tmp = tmp.real if self.projsout[iout].ell % 2 == 0 else tmp.imag
value.append(tmp)
nout = slout.stop
self.value = np.concatenate(value, axis=-1)
self.wnorm = wnorm
self.mpicomm = mpicomm
@property
def nmodes(self):
return self.weightsout
@nmodes.setter
def nmodes(self, nmodes):
self.weightsout = nmodes
@classmethod
def from_power(cls, power, xin, projin=(0, 0), **kwargs):
"""
Create window function from input :class:`PowerSpectrumMultipoles`.
Parameters
----------
power : PowerSpectrumMultipoles
Power spectrum measurement to convert into :class:`PowerSpectrumFFTWindowMatrix`.
xin : float
Input "theory" bin.
projin : tuple, Projection, default=(0, 0)
Input "theory" projection, i.e. (multipole, wide-angle order) tuple.
Returns
-------
matrix : PowerSpectrumFFTWindowMatrix
"""
xin = [np.asarray([xin])]
projsin = [projin]
ells = getattr(power, 'ells', [0]) # in case of PowerSpectrumWedges, only 0
projsout = [Projection(ell=ell, wa_order=None) for ell in ells]
xout = [np.squeeze(np.array([modes.ravel() for modes in power.modes]).T)] * len(projsout) # modes are k for PowerSpectrumMultipoles, (k, mu) for PowerSpectrumWedges
weights = [power.nmodes.ravel()] * len(projsout)
matrix = np.atleast_2d(power.power.ravel())
attrs = power.attrs.copy()
attrs['edges'] = power.edges
return cls(matrix, xin, xout, projsin, projsout, weights, wnorm=power.wnorm, attrs=attrs, **kwargs)
def __getstate__(self):
"""Return this class state dictionary."""
state = super(PowerSpectrumFFTWindowMatrix, self).__getstate__()
for name in ['cvalue', 'wnorm']:
state[name] = getattr(self, name)
return state
def resum_input_odd_wide_angle(self, **kwargs):
"""
Resum odd wide-angle orders.
Input ``kwargs`` will be passed to :attr:`PowerSpectrumOddWideAngleMatrix`.
"""
projsin = [proj for proj in self.projsin if proj.wa_order == 0]
if projsin == self.projsin: return
# The theory wide-angle expansion uses first point line-of-sight
matrix = PowerSpectrumOddWideAngleMatrix(self.xin[0], projsin=projsin, projsout=self.projsin, los='firstpoint', **kwargs)
self.__dict__.update(self.join(matrix, self).__dict__)
class MeshFFTWindow(MeshFFTPower):
"""
Class that computes window function from input mesh(es), using global or local line-of-sight, see:
- https://github.com/cosmodesi/GC_derivations
- https://fr.overleaf.com/read/hpgbwqzmtcxn
Attributes
----------
poles : PowerSpectrumFFTWindowMatrix
Window matrix.
"""
def __init__(self, mesh1=None, mesh2=None, edgesin=None, projsin=None, power_ref=None, edges=None, ells=None, los=None, periodic=False, boxcenter=None,
compensations=None, wnorm=None, shotnoise=None, edgesin_type='smooth', **kwargs):
r"""
Initialize :class:`MeshFFTWindow`.
Parameters
----------
mesh1 : CatalogMesh, RealField, default=None
First mesh.
mesh2 : CatalogMesh, RealField, default=None
In case of cross-correlation, second mesh, with same | |
import time
import numpy as np
import scipy.integrate
import scipy.linalg
import ross
from ross.units import Q_, check_units
from .abs_defect import Defect
from .integrate_solver import Integrator
__all__ = [
"Rubbing",
]
class Rubbing(Defect):
"""Contains a rubbing model for applications on finite element models of rotative machinery.
The reference coordenates system is: z-axis throught the shaft center; x-axis and y-axis in the sensors' planes
Parameters
----------
dt : float
Time step.
tI : float
Initial time.
tF : float
Final time.
deltaRUB : float
Distance between the housing and shaft surface.
kRUB : float
Contact stiffness.
cRUB : float
Contact damping.
miRUB : float
Friction coefficient.
posRUB : int
Node where the rubbing is ocurring.
speed : float, pint.Quantity
Operational speed of the machine. Default unit is rad/s.
unbalance_magnitude : array
Array with the unbalance magnitude. The unit is kg.m.
unbalance_phase : array
Array with the unbalance phase. The unit is rad.
torque : bool
Set it as True to consider the torque provided by the rubbing, by default False.
print_progress : bool
Set it True, to print the time iterations and the total time spent, by default False.
Returns
-------
A force to be applied on the shaft.
References
----------
.. [1] <NAME>., <NAME>., &<NAME>.(2002). Linear and Nonlinear Rotordynamics: A Modern Treatment with Applications, pp. 215-222 ..
Examples
--------
>>> from ross.defects.rubbing import rubbing_example
>>> probe1 = (14, 0)
>>> probe2 = (22, 0)
>>> response = rubbing_example()
>>> results = response.run_time_response()
>>> fig = response.plot_dfft(probe=[probe1, probe2], range_freq=[0, 100], yaxis_type="log")
>>> # fig.show()
"""
@check_units
def __init__(
self,
dt,
tI,
tF,
deltaRUB,
kRUB,
cRUB,
miRUB,
posRUB,
speed,
unbalance_magnitude,
unbalance_phase,
torque=False,
print_progress=False,
):
self.dt = dt
self.tI = tI
self.tF = tF
self.deltaRUB = deltaRUB
self.kRUB = kRUB
self.cRUB = cRUB
self.miRUB = miRUB
self.posRUB = posRUB
self.speed = speed
self.speedI = speed
self.speedF = speed
self.DoF = np.arange((self.posRUB * 6), (self.posRUB * 6 + 6))
self.torque = torque
self.unbalance_magnitude = unbalance_magnitude
self.unbalance_phase = unbalance_phase
self.print_progress = print_progress
if len(self.unbalance_magnitude) != len(self.unbalance_phase):
raise Exception(
"The unbalance magnitude vector and phase must have the same size!"
)
def run(self, rotor):
"""Calculates the shaft angular position and the unbalance forces at X / Y directions.
Parameters
----------
rotor : ross.Rotor Object
6 DoF rotor model.
"""
self.rotor = rotor
self.n_disk = len(self.rotor.disk_elements)
if self.n_disk != len(self.unbalance_magnitude):
raise Exception("The number of discs and unbalances must agree!")
self.ndof = rotor.ndof
self.iteration = 0
self.radius = rotor.df_shaft.iloc[self.posRUB].o_d / 2
self.ndofd = np.zeros(len(self.rotor.disk_elements))
for ii in range(self.n_disk):
self.ndofd[ii] = (self.rotor.disk_elements[ii].n) * 6
self.lambdat = 0.00001
# Faxial = 0
# TorqueI = 0
# TorqueF = 0
self.sA = (
self.speedI * np.exp(-self.lambdat * self.tF)
- self.speedF * np.exp(-self.lambdat * self.tI)
) / (np.exp(-self.lambdat * self.tF) - np.exp(-self.lambdat * self.tI))
self.sB = (self.speedF - self.speedI) / (
np.exp(-self.lambdat * self.tF) - np.exp(-self.lambdat * self.tI)
)
# sAT = (
# TorqueI * np.exp(-lambdat * self.tF) - TorqueF * np.exp(-lambdat * self.tI)
# ) / (np.exp(-lambdat * self.tF) - np.exp(-lambdat * self.tI))
# sBT = (TorqueF - TorqueI) / (
# np.exp(-lambdat * self.tF) - np.exp(-lambdat * self.tI)
# )
# self.SpeedV = sA + sB * np.exp(-lambdat * t)
# self.TorqueV = sAT + sBT * np.exp(-lambdat * t)
# self.AccelV = -lambdat * sB * np.exp(-lambdat * t)
# Determining the modal matrix
self.K = self.rotor.K(self.speed)
self.C = self.rotor.C(self.speed)
self.G = self.rotor.G()
self.M = self.rotor.M()
self.Kst = self.rotor.Kst()
V1, ModMat = scipy.linalg.eigh(
self.K,
self.M,
type=1,
turbo=False,
)
ModMat = ModMat[:, :12]
self.ModMat = ModMat
# Modal transformations
self.Mmodal = ((ModMat.T).dot(self.M)).dot(ModMat)
self.Cmodal = ((ModMat.T).dot(self.C)).dot(ModMat)
self.Gmodal = ((ModMat.T).dot(self.G)).dot(ModMat)
self.Kmodal = ((ModMat.T).dot(self.K)).dot(ModMat)
self.Kstmodal = ((ModMat.T).dot(self.Kst)).dot(ModMat)
y0 = np.zeros(24)
t_eval = np.arange(self.tI, self.tF + self.dt, self.dt)
# t_eval = np.arange(self.tI, self.tF, self.dt)
T = t_eval
self.angular_position = (
self.sA * T
- (self.sB / self.lambdat) * np.exp(-self.lambdat * T)
+ (self.sB / self.lambdat)
)
self.Omega = self.sA + self.sB * np.exp(-self.lambdat * T)
self.AccelV = -self.lambdat * self.sB * np.exp(-self.lambdat * T)
self.tetaUNB = np.zeros((len(self.unbalance_phase), len(self.angular_position)))
unbx = np.zeros(len(self.angular_position))
unby = np.zeros(len(self.angular_position))
FFunb = np.zeros((self.ndof, len(t_eval)))
self.forces_rub = np.zeros((self.ndof, len(t_eval)))
for ii in range(self.n_disk):
self.tetaUNB[ii, :] = (
self.angular_position + self.unbalance_phase[ii] + np.pi / 2
)
unbx = self.unbalance_magnitude[ii] * (self.AccelV) * (
np.cos(self.tetaUNB[ii, :])
) - self.unbalance_magnitude[ii] * ((self.Omega**2)) * (
np.sin(self.tetaUNB[ii, :])
)
unby = -self.unbalance_magnitude[ii] * (self.AccelV) * (
np.sin(self.tetaUNB[ii, :])
) - self.unbalance_magnitude[ii] * (self.Omega**2) * (
np.cos(self.tetaUNB[ii, :])
)
FFunb[int(self.ndofd[ii]), :] += unbx
FFunb[int(self.ndofd[ii] + 1), :] += unby
self.Funbmodal = (self.ModMat.T).dot(FFunb)
self.inv_Mmodal = np.linalg.pinv(self.Mmodal)
t1 = time.time()
x = Integrator(
self.tI,
y0,
self.tF,
self.dt,
self._equation_of_movement,
self.print_progress,
)
x = x.rk4()
t2 = time.time()
if self.print_progress:
print(f"Time spent: {t2-t1} s")
self.displacement = x[:12, :]
self.velocity = x[12:, :]
self.time_vector = t_eval
self.response = self.ModMat.dot(self.displacement)
def _equation_of_movement(self, T, Y, i):
"""Calculates the displacement and velocity using state-space representation in the modal domain.
Parameters
----------
T : float
Iteration time.
Y : array
Array of displacement and velocity, in the modal domain.
i : int
Iteration step.
Returns
-------
new_Y : array
Array of the new displacement and velocity, in the modal domain.
"""
positions = Y[:12]
velocity = Y[12:] # velocity in space state
positionsFis = self.ModMat.dot(positions)
velocityFis = self.ModMat.dot(velocity)
Frub, ft = self._rub(positionsFis, velocityFis, self.Omega[i])
self.forces_rub[:, i] = ft
ftmodal = (self.ModMat.T).dot(ft)
# proper equation of movement to be integrated in time
new_V_dot = (
ftmodal
+ self.Funbmodal[:, i]
- ((self.Cmodal + self.Gmodal * self.Omega[i])).dot(velocity)
- ((self.Kmodal + self.Kstmodal * self.AccelV[i]).dot(positions))
).dot(self.inv_Mmodal)
new_X_dot = velocity
new_Y = np.zeros(24)
new_Y[:12] = new_X_dot
new_Y[12:] = new_V_dot
return new_Y
def _rub(self, positionsFis, velocityFis, ang):
self.F_k = np.zeros(self.ndof)
self.F_c = np.zeros(self.ndof)
self.F_f = np.zeros(self.ndof)
self.y = np.concatenate((positionsFis, velocityFis))
ii = 0 + 6 * self.posRUB # rubbing position
self.radial_displ_node = np.sqrt(
self.y[ii] ** 2 + self.y[ii + 1] ** 2
) # radial displacement
self.radial_displ_vel_node = np.sqrt(
self.y[ii + self.ndof] ** 2 + self.y[ii + 1 + self.ndof] ** 2
) # velocity
self.phi_angle = np.arctan2(self.y[ii + 1], self.y[ii])
if self.radial_displ_node >= self.deltaRUB:
self.F_k[ii] = self._stiffness_force(self.y[ii])
self.F_k[ii + 1] = self._stiffness_force(self.y[ii + 1])
self.F_c[ii] = self._damping_force(self.y[ii + self.ndof])
self.F_c[ii + 1] = self._damping_force(self.y[ii + 1 + self.ndof])
Vt = -self.y[ii + self.ndof + 1] * np.sin(self.phi_angle) + self.y[
ii + self.ndof
] * np.cos(self.phi_angle)
if Vt + ang * self.radius > 0:
self.F_f[ii] = -self._tangential_force(self.F_k[ii], self.F_c[ii])
self.F_f[ii + 1] = self._tangential_force(
self.F_k[ii + 1], self.F_c[ii + 1]
)
if self.torque:
self.F_f[ii + 5] = self._torque_force(
self.F_f[ii], self.F_f[ii + 1], self.y[ii]
)
elif Vt + ang * self.radius < 0:
self.F_f[ii] = self._tangential_force(self.F_k[ii], self.F_c[ii])
self.F_f[ii + 1] = -self._tangential_force(
self.F_k[ii + 1], self.F_c[ii + 1]
)
if self.torque:
self.F_f[ii + 5] = self._torque_force(
self.F_f[ii], self.F_f[ii + 1], self.y[ii]
)
return self._combine_forces(self.F_k, self.F_c, self.F_f)
def _stiffness_force(self, y):
"""Calculates the stiffness force
Parameters
----------
y : float
Displacement value.
Returns
-------
force : numpy.float64
Force magnitude.
"""
force = (
-self.kRUB
* (self.radial_displ_node - self.deltaRUB)
* y
/ abs(self.radial_displ_node)
)
return force
def _damping_force(self, y):
"""Calculates the damping force
Parameters
----------
y : float
Displacement value.
Returns
-------
force : numpy.float64
Force magnitude.
"""
force = (
-self.cRUB
* (self.radial_displ_vel_node)
* y
/ abs(self.radial_displ_vel_node)
)
return force
def _tangential_force(self, F_k, F_c):
"""Calculates the tangential force
Parameters
----------
y : float
Displacement value.
Returns
-------
force : numpy.float64
Force magnitude.
"""
force = self.miRUB * (abs(F_k + F_c))
return force
def _torque_force(self, F_f, F_fp, y):
"""Calculates the torque force
Parameters
----------
y : float
Displacement value.
Returns
-------
force : numpy.float64
Force magnitude.
"""
force = self.radius * (
np.sqrt(F_f**2 + F_fp**2) * y / abs(self.radial_displ_node)
)
return force
def _combine_forces(self, F_k, F_c, F_f):
"""Mounts the final force vector.
Parameters
----------
F_k : numpy.ndarray
Stiffness force vector.
F_c : numpy.ndarray
Damping force vector.
F_f : numpy.ndarray
Tangential force vector.
Returns
-------
Frub : numpy.ndarray
Final force vector for each degree of freedom.
FFrub : | |
<reponame>tomatze/opendihu-webapp<gh_stars>10-100
from python_settings.python_settings import *
# each dict entry corresponds to a cpp-template
# each template-dict can have a ordered list with template_arguments (assuming there are no template_arguments if omitted)
# possible outer templates (runnables) have the key runnable set to True (assuming False if ommited)
# templates that are discretizable in time have discretizableInTime set to True (assuming False if omited)
# "discretizableInTime" in template_arguments will get expanded to all classes, which have discretizableInTime == True
# templates that are a "TimeSteppingScheme" (e.g. all TimeSteppingScheme:: and OperatorSplitting::) have timeSteppingScheme set to True (assuming False if omited)
# "timeSteppingScheme" in template_arguments will get expanded to all classes, which have timeSteppingScheme == True
# templates with optional template_arguments can have the key template_arguments_needed set to the minimal required argument count
# template_arguments_needed is assumed to be len(template_arguments) if template_arguments_needed is omitted
# e.g. in BasisFunction::LagrangeOfOrder and PrescribedValues
# the keyword "Integer" can be used in template_arguments where an integer is expected (e.g. in CellmlAdapter)
# lists of the form [ "Mesh::" ] get auto expanded to [ "Mesh::StructuredRegularFixedOfDimension", "Mesh::Str..", ... ]
# templates added so far:
# TODO add postprocessing Postprocessing::ParallelFiberEstimation
# Postprocessing::StreamlineTracer
# PreciceAdapter::ContractionDirichletBoundaryConditions
# PreciceAdapter::ContractionNeumannBoundaryConditions
# PreciceAdapter::PartitionedFibers
# PreciceAdapter::MuscleContraction MuscleContractionSolver FastMonodomainSolver
# SpatialDiscretization::HyperelasticitySolver
# Control::MultipleInstances
# Control::Coupling
# Control::LoadBalancing
# Control::MapDofs
# OperatorSplitting::
# CellmlAdapter
# ModelOrderReduction::POD
# ModelOrderReduction::LinearPart
# ModelOrderReduction::ExplicitEulerReduced
# ModelOrderReduction::ImplicitEulerReduced
# FunctionSpace::
# OutputWriter::OutputSurface
# PrescribedValues
# TimeSteppingScheme::
# TimeSteppingScheme::DynamicHyperelasticitySolver
# TimeSteppingScheme::StaticBidomainSolver
# TimeSteppingScheme::MultidomainSolver
# TimeSteppingScheme::MultidomainWithFatSolver
# TimeSteppingScheme::QuasiStaticNonlinearElasticitySolverFebio
# TimeSteppingScheme::NonlinearElasticitySolverFebio
# TimeSteppingScheme::QuasiStaticLinearElasticitySolver
# TimeSteppingScheme::QuasiStaticNonlinearElasticitySolverChaste
# SpatialDiscretization::FiniteElementMethod
# Mesh::
# BasisFunction::
# Quadrature::
# Equation::
# Dummy
solver_common = [
SettingsDictEntry("solverType", '"gmres"', 'the KSPType of the solver, i.e. which solver to use', 'solver.html#solvertype'),
SettingsDictEntry("preconditionerType", '"none"', 'the preconditioner type of PETSc to use', 'solver.html#preconditionertype'),
SettingsDictEntry("relativeTolerance", '1e-5', 'the relative tolerance of the residuum after which the solver is converged', 'solver.html#relativetolerance'),
# undocumented
SettingsDictEntry("absoluteTolerance", '0', 'absolute tolerance of the residual of the linear solver'),
SettingsDictEntry("maxIterations", '1e4', 'the maximum number of iterations after which the solver aborts and states divergence', 'solver.html#maxiterations'),
SettingsDictEntry("dumpFilename", '""',
"if this is set to a non-empty string, the system matrix and right hand side vector will be dumped before every linear solve", 'solver.html#dumpfilename'),
SettingsDictEntry("dumpFormat", '"default"', 'the format in which to export/dump data of matrices and vectors in the file', 'solver.html#dumpformat')
]
solver_linear = SettingsSolver(
solver_common
)
solver_nonlinear = SettingsSolver(
solver_common + [
SettingsDictEntry("snesMaxFunctionEvaluations", '1e3', 'maximum number of function iterations', 'hyperelasticity.html#python-settings'),
SettingsDictEntry("snesMaxIterations", '50', 'maximum number of iterations in the nonlinear solver', 'hyperelasticity.html#python-settings'),
SettingsDictEntry("snesRelativeTolerance", '1e-10', 'relative tolerance of the nonlinear solver', 'hyperelasticity.html#python-settings'),
SettingsDictEntry("snesLineSearchType", '"l2"', 'type of linesearch, possible values: "bt" "nleqerr" "basic" "l2" "cp" "ncglinear"', 'hyperelasticity.html#python-settings'),
SettingsDictEntry("snesAbsoluteTolerance", '1e-10', 'absolute tolerance of the nonlinear solver', 'hyperelasticity.html#python-settings'),
SettingsDictEntry("snesRebuildJacobianFrequency", '5', 'how often the jacobian should be recomputed, -1 indicates NEVER rebuild, 1 means rebuild every time the Jacobian is computed within a single nonlinear solve, 2 means every second time the Jacobian is built etc. -2 means rebuild at next chance but then never again', 'hyperelasticity.html#python-settings'),
]
)
outputwriter = SettingsDictEntry("OutputWriter", SettingsList([
SettingsListEntry(
SettingsDict([
SettingsDictEntry("format", '"Paraview"', 'one of Paraview, PythonFile, ExFile, MegaMol, PythonCallback', 'output_writer.html#outputwriter'),
SettingsDictEntry("filename", '"out/filename"', 'the file name of the output file to write', 'output_writer.html#filename'),
SettingsDictEntry("outputInterval", '1', 'the interval in which timesteps an actual file should be written', 'output_writer.html#outputinterval'),
SettingsDictEntry("fileNumbering", '"incremental"', 'incremental or timeStepIndex', 'output_writer.html#filenumbering'),
SettingsDictEntry("binary", 'True', 'whether to produce binary data files', 'output_writer.html#binary'),
SettingsDictEntry("fixedFormat", 'True', None, 'output_writer.html#fixedformat'),
SettingsDictEntry("combineFiles", 'False', None, 'output_writer.html#combinefiles'),
SettingsChoice([],[
SettingsDictEntry("onlyNodalValues", 'True', None, None),
]),
SettingsChoice([],[
SettingsDictEntry("sphereSize", '"0.005*0.005*0.01"', 'ExFile: defines how spheres, used to visualize nodes, will be rendered. The format is x*y*z', 'output_writer.html#exfile'),
]),
SettingsChoice([],[
SettingsDictEntry("callback", 'callback', 'PythonCallback: python-function to call back to', 'output_writer.html#pythoncallback'),
]),
])
)
]), 'specifies a list of output writers that can be used to output geometry field variables in various formats', 'output_writer.html#outputwriter')
timestepping_schemes_common = [
SettingsChoice([
SettingsDictEntry("numberTimeSteps", '10', None, 'timestepping_schemes_ode.html#endtime-numbertimesteps-and-timestepwidth')
], [
SettingsDictEntry("timeStepWidth", '0.001', None, 'timestepping_schemes_ode.html#endtime-numbertimesteps-and-timestepwidth')
]),
SettingsChoice([], [
SettingsDictEntry("logTimeStepWidthAsKey", '"timestep_width"', 'the time step width of this scheme will be stored under this key in logs/log.csv', 'timestepping_schemes_ode.html#logtimestepwidthaskey-lognumbertimestepsaskey-and-durationlogkey')
]),
SettingsChoice([], [
SettingsDictEntry("logNumberTimeStepsAsKey", '"timesteps_number"', 'the number of time steps of this scheme will be stored under this key in logs/log.csv', 'timestepping_schemes_ode.html#logtimestepwidthaskey-lognumbertimestepsaskey-and-durationlogkey')
]),
SettingsChoice([], [
SettingsDictEntry("durationLogKey", '"duration"', 'the total time that has passed for the computation will be stored under this key in logs/log.csv', 'timestepping_schemes_ode.html#logtimestepwidthaskey-lognumbertimestepsaskey-and-durationlogkey')
]),
SettingsDictEntry("timeStepOutputInterval", '100', 'a positive integer value that specifies the interval in which timesteps are printed to standard output', 'timestepping_schemes_ode.html#timestepoutputinterval'),
]
timestepping_schemes_ode_common = timestepping_schemes_common + [
SettingsDictEntry("endTime", '1', 'run() method performs the simulation for t∈[0,endTime]', 'timestepping_schemes_ode.html#endtime-numbertimesteps-and-timestepwidth'),
SettingsDictEntry("initialValues", '[]', 'list of double values to use as initial values. The solution is set to these values upon initialization', 'timestepping_schemes_ode.html#initialvalues'),
SettingsDictEntry("inputMeshIsGlobal", 'True', 'the degrees of freedom are interpreted in global numbering, if inputMeshIsGlobal is set to True, or in local numbering of the process, if inputMeshIsGlobal is False', 'timestepping_schemes_ode.html#dirichletboundaryconditions-and-inputmeshisglobal'),
SettingsDictEntry("dirichletBoundaryConditions", '{}', 'dictionary with degrees of freedom as key and the value as value (i.e. {"dof": value, ...}', 'timestepping_schemes_ode.html#dirichletboundaryconditions-and-inputmeshisglobal'),
SettingsDictEntry("dirichletOutputFilename", 'None', 'write Dirichlet Boundary conditions to .vtp file', 'boundary_conditions.html#dirichlet-output-filename'),
SettingsDictEntry("checkForNanInf", 'False', 'check if the solution vector contains nan or +/-inf values, if yes, an error is printed. This is a time-consuming check'),
SettingsChoice([], [
outputwriter
]),
SettingsChoice([], [
SettingsDictEntry("nAdditionalFieldVariables", '0', 'number of additional field variables that will be created', 'timestepping_schemes_ode.html#nadditionalfieldvariables'),
SettingsDictEntry("additionalSlotNames", '["connector_slot_1"]', 'list of strings, names for of connector slots for the additional field variables', 'timestepping_schemes_ode.html#additionalslotnames')
])
]
operator_splitting_common = timestepping_schemes_ode_common + [
SettingsDictEntry("connectedSlotsTerm1To2", '[0]', 'list of slots of term 2 that are connected to the slots of term 1', 'output_connector_slots.html#connectedslotsterm1to2-and-connectedslotsterm2to1'),
SettingsDictEntry("connectedSlotsTerm2To1", '[0]', 'list of slots of term 1 that are connected to the slots of term 2', 'output_connector_slots.html#connectedslotsterm1to2-and-connectedslotsterm2to1'),
SettingsDictEntry("Term1", SettingsDict([
SettingsChildPlaceholder(0)
])),
SettingsDictEntry("Term2", SettingsDict([
SettingsChildPlaceholder(1)
]))
]
multidomain_solver_common = timestepping_schemes_ode_common + [
SettingsDictEntry("nCompartments", '1', 'number of compartments', 'multidomain_solver.html#python-settings'),
SettingsDictEntry("compartmentRelativeFactors", '[]', 'list of lists of (the factors for all dofs), if "inputIsGlobal": True, this contains the global dofs', 'multidomain_solver.html#python-settings'),
SettingsDictEntry("inputIsGlobal", 'True', 'if values and dofs correspond to the global numbering', 'multidomain_solver.html#python-settings'),
SettingsDictEntry("am", '500.0', 'am parameter for every motor unit (ration of surface to volume of fibers)', 'multidomain_solver.html#python-settings'),
SettingsDictEntry("cm", '0.58', 'cm parameter for every motor unit (capacitance of the cellular membrane)', 'multidomain_solver.html#python-settings'),
# TODO maybe add a special SettingsSolver()
SettingsDictEntry("solverName", '"multidomainLinearSolver"', 'reference to the solver used for the global linear system of the multidomain eq.', 'multidomain_solver.html#python-settings'),
SettingsDictEntry("alternativeSolverName", '"multidomainAlternativeLinearSolver"', 'reference to the alternative solver, which is used when the normal solver diverges', 'multidomain_solver.html#python-settings'),
SettingsDictEntry("subSolverType", '"gamg"', 'sub solver when block jacobi preconditioner is used', 'multidomain_solver.html#python-settings'),
SettingsDictEntry("subPreconditionerType", '"none"', 'sub preconditioner when block jacobi preconditioner is used', 'multidomain_solver.html#python-settings'),
SettingsDictEntry("gamgType", '"classical"', 'one of agg, geo, or classical', 'multidomain_solver.html#python-settings'),
SettingsDictEntry("cycleType", '"cycleV"', 'either cycleV or cycleW', 'multidomain_solver.html#python-settings'),
SettingsDictEntry("nLevels", '25', None, 'multidomain_solver.html#python-settings'),
SettingsDictEntry("hypreOptions", '"-pc_hypre_boomeramg_strong_threshold 0.7"', 'additional options if a hypre preconditioner is selected', 'multidomain_solver.html#python-settings'),
SettingsDictEntry("theta", '0.5', 'weighting factor of implicit term in Crank-Nicolson scheme, 0.5 gives the classic, 2nd-order Crank-Nicolson scheme, 1.0 gives implicit euler', 'multidomain_solver.html#python-settings'),
SettingsDictEntry("useLumpedMassMatrix", 'True', 'which formulation to use, the formulation with lumped mass matrix (True) is more stable but approximative, the other formulation (False) is exact but needs more iterations', 'multidomain_solver.html#python-settings'),
SettingsDictEntry("useSymmetricPreconditionerMatrix", 'True', 'if the diagonal blocks of the system matrix should be used as preconditioner matrix', 'multidomain_solver.html#python-settings'),
SettingsDictEntry("initialGuessNonzero", 'True', 'if the initial guess for the 3D system should be set as the solution of the previous timestep, this only makes sense for iterative solvers', 'multidomain_solver.html#python-settings'),
SettingsDictEntry("enableFatComputation", 'True', 'disabling the computation of the fat layer is only for debugging and speeds up computation. If set to False, the respective matrix is set to the identity', 'multidomain_solver.html#python-settings'),
SettingsDictEntry("showLinearSolverOutput", 'True', 'if convergence information of the linear solver in every timestep should be printed, this is a lot of output for fast computations', 'multidomain_solver.html#python-settings'),
SettingsDictEntry("updateSystemMatrixEveryTimestep", 'False', 'if this multidomain solver will update the system matrix in every first timestep, us this only if the geometry changed, e.g. by contraction', 'multidomain_solver.html#python-settings'),
SettingsDictEntry("recreateLinearSolverInterval", '0', 'how often the Petsc KSP object (linear solver) should be deleted and recreated. This is to remedy memory leaks in Petsc\'s implementation of some solvers. 0 means disabled.', 'multidomain_solver.html#python-settings')
]
hyperelasticity_common = [
SettingsDictEntry("materialParameters", '[]', 'list of material parameters, must match the number of parameters in the material', 'hyperelasticity.html#materialparameters'),
SettingsDictEntry("density", '1.0', 'density of the material', 'hyperelasticity.html#python-settings'),
SettingsDictEntry("displacementsScalingFactor", '1.0', 'scaling factor for displacements, only set to sth. other than 1 only to increase visual appearance for very small displacements', 'hyperelasticity.html#python-settings'),
SettingsDictEntry("residualNormLogFilename", '"residual_norm.txt"', 'log file where residual norm | |
self.tree_frame.columnconfigure(1, weight=0)
self.tree_frame.grid(
row=1, column=0, sticky=tk.NSEW, padx=(10, 5), pady=10
)
self.tree_files = my_widgets.My_Treeview(
master=self.tree_frame, columns=('Test', 'Ignore', 'Type', 'ID'),
selectmode='extended'
)
self.tree_files.bind('<Double-Button-1>', self.__ignore_handler)
self.tree_files.heading('#0', text='Test', anchor=tk.CENTER)
self.tree_files.heading('#1', text='Ignore', anchor=tk.CENTER)
self.tree_files.heading('#2', text='Type', anchor=tk.CENTER)
self.tree_files.column('#0', stretch=tk.YES, minwidth=50)
self.tree_files.column('#1', stretch=tk.YES, minwidth=50)
self.tree_files.column('#2', stretch=tk.YES, minwidth=50)
self.tree_files.grid(row=0, column=0, sticky=tk.NSEW)
self.scrollbar_vert = my_widgets.My_Scrollbar(
master=self.tree_frame, command=self.tree_files.yview,
orient=tk.VERTICAL, cursor='sb_v_double_arrow'
)
self.scrollbar_vert.grid(row=0, column=1, sticky=tk.NS, pady=1)
self.scrollbar_hor = my_widgets.My_Scrollbar(
master=self.tree_frame, command=self.tree_files.xview,
orient=tk.HORIZONTAL, cursor='sb_h_double_arrow'
)
self.scrollbar_hor.grid(row=1, column=0, sticky=tk.EW, padx=1)
self.tree_files.configure(yscrollcommand=self.scrollbar_vert.set)
self.tree_files.configure(xscrollcommand=self.scrollbar_hor.set)
self.buttons_frame = my_widgets.My_Frame(master=self)
self.buttons_frame.rowconfigure(0, weight=1)
self.buttons_frame.columnconfigure(0, weight=0)
self.buttons_frame.columnconfigure(1, weight=0)
self.buttons_frame.columnconfigure(2, weight=0)
self.buttons_frame.columnconfigure(3, weight=0)
self.buttons_frame.columnconfigure(4, weight=0)
self.buttons_frame.grid(row=0, column=0, sticky=tk.EW)
self.label_projects_selector = my_widgets.My_Label(
master=self.buttons_frame, text='Choose project: '
)
self.label_projects_selector.grid(
row=0, column=0, sticky=tk.NSEW, padx=10
)
self.combobox_projects = my_widgets.My_Combobox(
master=self.buttons_frame
)
self.combobox_projects.bind(
'<<ComboboxSelected>>', self.__on_combobox_selected
)
self.combobox_projects.grid(
row=0, column=1, sticky=tk.EW, padx=10, pady=(10, 5)
)
self.multithreading_var = tk.BooleanVar(master=self)
self.checkbutton_multithreading_testing = my_widgets.My_Checkbutton(
master=self.buttons_frame, text='Multithreading tests',
variable=self.multithreading_var
)
self.checkbutton_multithreading_testing.grid(
row=0, column=2, sticky=tk.NSEW, padx=(5, 10), pady=(5, 10)
)
self.separator = my_widgets.My_Separator(master=self.buttons_frame)
self.separator.grid(row=0, column=3, sticky=tk.NSEW)
self.button_start_test = my_widgets.My_Button(
master=self.buttons_frame, text='Start tests',
command=self.__start_testing
)
self.button_start_test.grid(
row=0, column=4, sticky=tk.NSEW, padx=(5, 10), pady=5
)
def __ignore_handler(self, event):
try:
item = self.tree_files.selection()[0]
self.tree_files.selection_remove(item)
except IndexError as ex:
self.master.logger.debug(
'Cannot select this item. {:s}'.format(str(ex))
)
else:
ignore = self.tree_files.item(item)['values'][0]
if(ignore == 'True'):
ignore = False
else:
ignore = True
self.__ignore_recursively(item=item, ignore=ignore)
self.__update_tree()
def __ignore_recursively(self, item, ignore):
_type = self.tree_files.item(item)['values'][1]
if(ignore is False):
self.tree_files.set_fg_not_ignore(key=item)
self.tree_files.item(item=item, values=(ignore, _type))
else:
self.tree_files.set_fg_ignore(key=item)
self.tree_files.item(item=item, values=(ignore, _type))
self.tree_files.item(item=item, values=(ignore, _type))
grand_items = self.tree_files.get_children(item=item)
for grand_item in grand_items:
self.__ignore_recursively(item=grand_item, ignore=ignore)
def __clear_tree(self):
self.tree_files.delete(*self.tree_files.get_children())
@loading_cursor
def __on_combobox_selected(self, event):
self.__clear_tree()
self.master.logic.clear_testing_logic()
project = self.master.logic.files_creator.load_project(
project_name=self.combobox_projects.get()
)
self.master.logger.info('Loading project {:s}'.format(project['name']))
self.__fill_tree(project=project)
self.__update_tree()
def __fill_tree(self, project):
self.master.logger.info('Files and tests in project:')
self.__add_modules(project=project, root=project['name'])
def __add_modules(self, project, root):
for test in project['tests']:
_module = self.master.logic.detector.load_module(
module_path=test['path']
)
self.master.logger.info('-{:s}'.format(_module.__name__))
new_root = '{:s}*{:s}*{:s}'.format(
root, test['path'], _module.__name__
)
self.master.logic.testing_modules_register[new_root] = _module
self.tree_files.insert(
parent='', index=tk.END, iid=new_root, text=_module.__name__,
values=(False, 'Module'), tags=(new_root, )
)
self.__add_classes(_module=_module, root=new_root)
def __add_classes(self, _module, root):
_classes = self.master.logic.detector.get_module_classes(
_module=_module
)
for _class in _classes:
if(self.master.logic.detector.is_test_class(_class=_class)):
self.master.logger.info('--{:s}'.format(_class.__name__))
new_root = '{:s}*{:s}'.format(root, _class.__name__)
self.master.logic.testing_classes_register[new_root] = _class
self.tree_files.insert(
parent=root, index=tk.END, iid=new_root,
text=_class.__name__, values=(False, 'Class'),
tags=(new_root, )
)
self.__add_methods(_class=_class, root=new_root)
def __add_methods(self, _class, root):
_methods = self.master.logic.detector.get_class_methods(_class=_class)
for _method in _methods:
if(self.master.logic.detector.is_test_method(_method=_method)):
self.master.logger.info('---{:s}'.format(_method.__name__))
new_root = '{:s}*{:s}'.format(root, _method.__name__)
self.master.logic.testing_methods_register[new_root] = _method
self.tree_files.insert(
parent=root, index=tk.END, iid=new_root,
text=_method.__name__, values=(False, 'Method'),
tags=(new_root, )
)
def __prepare_register_to_test(self, register):
objects_to_test = dict()
for key, obj in register.items():
if(self.tree_files.item(key)['values'][0] == 'False'):
objects_to_test[key] = obj
return objects_to_test
def __start_testing(self):
self.master.logic.reload_project_files(
project_name=self.combobox_projects.get()
)
classes_to_test = self.__prepare_register_to_test(
register=self.master.logic.testing_classes_register
)
classes_to_test = [val for key, val in classes_to_test.items()]
methods_to_test = self.__prepare_register_to_test(
register=self.master.logic.testing_methods_register
)
classes_to_test = self.master.logic.prepare_tests(
_classes=classes_to_test, _methods=methods_to_test
)
self.master.logic.read_time_and_date()
multithreading = self.multithreading_var.get()
self.master.logic.start_testing(
test_cases=classes_to_test, multithreading=multithreading
)
self.master.logic.copy_modules_and_classes()
self.master.frame_main_menu.change_frame_results()
def hide_frame(self):
self.__update = False
self.__clear_tree()
self.master.logic.clear_testing_logic()
def show_frame(self):
self.combobox_projects.configure(
values=self.master.logic.files_creator.load_projects_names()
)
self.__update_start_button()
self.__update = True
self.__listen_to_checkbuttons()
self.combobox_projects.set('')
class Frame_Results(my_widgets.My_Label_Frame_Main):
def __init__(self, *args, **kwargs):
my_widgets.My_Label_Frame_Main.__init__(
self, *args, text='RESULTS', **kwargs
)
self.tests_count = None
self.counter = 0
self.__partial = None
# Counters
self.__tests_passed = 0
self.__tests_errors = 0
self.__tests_failures = 0
self.master.logger.info(
'Creating {:s}...'.format(self.__class__.__name__)
)
def __listen_to_test_process(self):
if(not self.master.logic.is_queue_empty()):
test_result = self.master.logic.queue_get()
self.__add_method_result(result_data=test_result)
self.counter += 1
self.after(ms=20, func=self.__listen_to_test_process)
else:
if(self.counter < self.tests_count):
self.after(ms=20, func=self.__listen_to_test_process)
else:
self.counter = 0
self.__open_items_with_errors(
top_level_parents=self.tree_files.get_children()
)
self.master.logic.save_results()
self.__sum_up_tests()
def __sum_up_tests(self):
message = 'Testing complete [{:n} tests]\n'.format(self.tests_count)
message += 'Tests passed: {:n}\n'.format(self.__tests_passed)
message += 'Tests errors: {:n}\n'.format(self.__tests_errors)
message += 'Tests failures: {:n}'.format(self.__tests_failures)
messagebox.showinfo(title='TESTING RESULTS', message=message)
def _set_parameters(self):
self.rowconfigure(0, weight=0)
self.rowconfigure(1, weight=1)
self.rowconfigure(2, weight=0)
self.columnconfigure(0, weight=0)
self.columnconfigure(1, weight=1)
self.columnconfigure(2, weight=0)
def _create_widgets(self):
self.label_project = my_widgets.My_Label(master=self)
self.label_project.grid(row=0, column=0, columnspan=3, sticky=tk.NSEW)
self.tree_files = my_widgets.My_Treeview(
master=self, columns=('Test', 'Type', 'Result', 'ID'),
selectmode='extended'
)
self.tree_files.bind('<Double-Button-1>', self.__show_test_result)
self.tree_files.heading('#0', text='Test', anchor=tk.CENTER)
self.tree_files.heading('#1', text='Type', anchor=tk.CENTER)
self.tree_files.heading('#2', text='Result', anchor=tk.CENTER)
self.tree_files.column('#0', stretch=tk.YES, minwidth=50)
self.tree_files.column('#1', stretch=tk.YES, minwidth=50)
self.tree_files.column('#2', stretch=tk.YES, minwidth=50)
self.tree_files.grid(row=1, column=1, sticky=tk.NSEW)
self.scrollbar_vert = my_widgets.My_Scrollbar(
master=self, command=self.tree_files.yview,
orient=tk.VERTICAL, cursor='sb_v_double_arrow'
)
self.scrollbar_vert.grid(row=1, column=2, sticky=tk.NS, pady=1)
self.scrollbar_hor = my_widgets.My_Scrollbar(
master=self, command=self.tree_files.xview,
orient=tk.HORIZONTAL, cursor='sb_h_double_arrow'
)
self.scrollbar_hor.grid(row=2, column=1, sticky=tk.EW, padx=1)
self.tree_files.configure(yscrollcommand=self.scrollbar_vert.set)
self.tree_files.configure(xscrollcommand=self.scrollbar_hor.set)
self.progressbar = my_widgets.My_Progressbar(master=self, length=2)
self.progressbar.grid(
row=1, column=0, rowspan=2, sticky=tk.NS, padx=2, pady=2
)
def __show_test_result(self, event):
try:
item = self.tree_files.selection()[0]
self.tree_files.selection_remove(item)
except IndexError as ex:
self.master.logger.debug(
'Cannot select this item. {:s}'.format(str(ex))
)
else:
try:
test_info = self.master.logic.methods_register[item]
except KeyError as ex:
self.master.logger.info('Cannot choose anything than method')
else:
my_windows.Test_Info(master=self, method_info=test_info)
def __add_modules(self):
for key in self.master.logic.modules_keys:
name = self.master.logic.get_name_from_key(key=key)
self.tree_files.insert(
parent='', index=tk.END, iid=key, text=name,
values=('Module', ''), tags=(key, ), open=True
)
self.__add_classes(root=key)
def __add_classes(self, root):
for key in self.master.logic.classes_keys:
if(root in key):
name = self.master.logic.get_name_from_key(key=key)
self.tree_files.insert(
parent=root, index=tk.END, iid=key, text=name,
values=('Class', ''), tags=(key, )
)
def __add_method_result(self, result_data):
for key in self.master.logic.methods_register:
result_key = '{:s}*{:s}*{:s}'.format(
result_data['module'], result_data['class'],
result_data['method']
)
if(result_key == key):
self.master.logic.methods_register[key] = result_data
new_root = '{:s}*{:s}'.format(
result_data['module'], result_data['class']
)
test_result = self.__get_result(
key=key, result=result_data['result'],
error=result_data['error_text'],
failure=result_data['failure_text']
)
self.tree_files.insert(
parent=new_root, index=tk.END, iid=key,
text=result_data['method'], values=('Method', test_result),
tags=(key, )
)
self.__change_color_due_result(
key=key, result=result_data['result'],
error=result_data['error_text'],
failure=result_data['failure_text']
)
self.progressbar.step(amount=1)
self.tree_files.update()
def __get_result(self, key, result, error=None, failure=None):
if(result is True):
return 'PASS'
else:
if(error is not None):
return 'ERROR'
elif(failure is not None):
return 'FAILURE'
def __change_color_due_result(self, key, result, error=None, failure=None):
if(result is True):
self.tree_files.set_positive(key=key)
self.__tests_passed += 1
else:
if(error is not None):
self.tree_files.set_error(key=key)
self.__tests_errors += 1
elif(failure is not None):
self.tree_files.set_failure(key=key)
self.__tests_failures += 1
def __open_items_with_errors(self, top_level_parents):
for parent in top_level_parents:
self.__start_opening(parent=parent)
def __start_opening(self, parent):
children = self.tree_files.get_children(parent)
for child in children:
result = self.tree_files.item(child)['values'][1]
if(result == 'FAILURE' or result == 'ERROR'):
self.tree_files.item(parent, open=True)
break
else:
self.__start_opening(parent=child)
def __clear_tree(self):
self.tree_files.delete(*self.tree_files.get_children())
def hide_frame(self):
self.tests_count = None
self.counter = 0
self.__partial = None
self.__tests_passed = 0
self.__tests_errors = 0
self.__tests_failures = 0
self.__clear_tree()
self.master.logic.clear_result_logic()
def show_frame(self):
try:
self.tests_count = self.master.logic.tests_amount
self.progressbar.configure(maximum=self.tests_count)
project_name = self.master.logic.get_project_name()
project_name = 'Project \'{:s}\' results:'.format(project_name)
self.label_project.configure(text=project_name)
self.__add_modules()
self.__listen_to_test_process()
except:
self.master.logger.info('Opening results without project...')
class Inner_Frame_Plot(my_widgets.My_Frame_Main):
def __init__(self, *args, **kwargs):
self.fig = None
self.ax = None
self.create_default_plot()
my_widgets.My_Frame_Main.__init__(self, *args, **kwargs)
def _set_parameters(self):
pass
def _create_widgets(self):
self.canvas = FigureCanvasTkAgg(self.fig, master=self)
self.canvas.draw()
self.canvas.get_tk_widget().pack(side=tk.TOP, fill=tk.BOTH, expand=1)
self.toolbar = NavigationToolbar2Tk(canvas=self.canvas, window=self)
self.toolbar.config(background=my_widgets.toolbar_bg)
self.toolbar.update()
self.canvas.get_tk_widget().pack(side=tk.TOP, fill=tk.BOTH, expand=1)
def create_default_plot(self):
self.fig, self.ax = plt.subplots()
self.fig.set_facecolor(my_widgets.fig_color)
self.ax.set_facecolor(my_widgets.ax_color)
a = [i for i in range(-10, 11)]
b = [i**3 for i in range(-10, 11)]
self.ax.plot(a, b)
self.ax.set(xlabel='date', ylabel='results', title='Default plot')
self.ax.grid()
def add_plot(self, plot_info):
self.fig, self.ax = plt.subplots()
self.fig.set_facecolor(my_widgets.fig_color)
self.ax.set_facecolor(my_widgets.ax_color)
positives, negatives, dates = list(), list(), list()
for date, results in plot_info.items():
dates.append(
self.master.master.logic.parse_str_to_date(date_str=date)
)
positives.append(results[0])
negatives.append(results[1])
self.ax.plot(dates, positives, '.-', label='Positive')
self.ax.plot(dates, negatives, '.-', label='Negative')
self.ax.legend()
self.ax.set(xlabel='date', ylabel='results')
self.ax.grid()
self.canvas.get_tk_widget().destroy()
self.toolbar.destroy()
self._create_widgets()
class Frame_Analysis(my_widgets.My_Label_Frame_Main):
def __init__(self, *args, **kwargs):
my_widgets.My_Label_Frame_Main.__init__(
self, *args, text='ANALYSIS', **kwargs
)
self.project = None
self.modules = list()
self.test_cases = list()
self.tests = list()
self.__update = False
self.master.logger.info(
'Creating {:s}...'.format(self.__class__.__name__)
)
def __listen_to_comboboxes(self):
sub_comboboxes = (
self.combobox_project.get(),
self.combobox_module.get(),
self.combobox_test_case.get(),
self.combobox_test.get(),
)
if(any(sub_comboboxes)):
self.button_generate.enable()
else:
self.button_generate.disable()
if(self.__update is True):
self.after(20, self.__listen_to_comboboxes)
def _set_parameters(self):
self.rowconfigure(0, weight=1)
self.columnconfigure(0, weight=0)
self.columnconfigure(1, weight=1)
def _create_widgets(self):
self.frame_navigation = my_widgets.My_Frame(master=self)
self.frame_navigation.columnconfigure(0, weight=1)
self.frame_navigation.rowconfigure(0, weight=0)
self.frame_navigation.rowconfigure(1, weight=0)
self.frame_navigation.rowconfigure(2, weight=0)
self.frame_navigation.rowconfigure(3, weight=0)
self.frame_navigation.rowconfigure(4, weight=0)
self.frame_navigation.rowconfigure(5, weight=0)
self.frame_navigation.rowconfigure(6, weight=0)
self.frame_navigation.rowconfigure(7, weight=0)
self.frame_navigation.rowconfigure(8, weight=0)
self.frame_navigation.rowconfigure(9, weight=0)
self.frame_navigation.grid(row=0, column=0, sticky=tk.NSEW)
self.label_project = my_widgets.My_Label(
master=self.frame_navigation, text='Project'
)
self.label_project.grid(
row=0, column=0, sticky=tk.NSEW, padx=10, pady=(10, 0)
)
self.combobox_project = my_widgets.My_Combobox(
master=self.frame_navigation
)
self.combobox_project.bind('<<ComboboxSelected>>', self.__load_project)
self.combobox_project.grid(
row=1, column=0, sticky=tk.EW, padx=10, pady=(0, 10)
)
self.label_module = my_widgets.My_Label(
master=self.frame_navigation, text='Module'
)
self.label_module.grid(
row=2, column=0, sticky=tk.NSEW, padx=10, pady=(10, 0)
)
self.combobox_module = my_widgets.My_Combobox(
master=self.frame_navigation
)
self.combobox_module.bind('<<ComboboxSelected>>', self.__load_module)
self.combobox_module.disable()
self.combobox_module.grid(
row=3, column=0, sticky=tk.EW, padx=10, pady=(0, 10)
)
self.label_test_case = my_widgets.My_Label(
master=self.frame_navigation, text='Test case'
)
self.label_test_case.grid(
row=4, column=0, sticky=tk.NSEW, padx=10, pady=(10, 0)
)
self.combobox_test_case = my_widgets.My_Combobox(
master=self.frame_navigation
)
self.combobox_test_case.bind(
'<<ComboboxSelected>>', self.__load_test_case
)
self.combobox_test_case.disable()
self.combobox_test_case.grid(
row=5, column=0, sticky=tk.EW, padx=10, pady=(0, 10)
)
self.label_test = my_widgets.My_Label(
master=self.frame_navigation, text='Test'
)
self.label_test.grid(
row=6, column=0, sticky=tk.NSEW, padx=10, pady=(10, 0)
)
self.combobox_test = my_widgets.My_Combobox(
master=self.frame_navigation
)
self.combobox_test.disable()
self.combobox_test.grid(
row=7, column=0, sticky=tk.EW, padx=10, pady=(0, 10)
)
self.separator = my_widgets.My_Separator(master=self.frame_navigation)
self.separator.grid(row=8, column=0, sticky=tk.NSEW, pady=20)
self.button_generate = my_widgets.My_Button(
master=self.frame_navigation, text='Generate',
command=self.__generate
)
self.button_generate.grid(
row=9, column=0, sticky=tk.NSEW, padx=10, pady=10
)
self.frame_plot = Inner_Frame_Plot(master=self)
self.frame_plot.grid(
row=0, column=1, sticky=tk.NSEW, padx=(0, 10), pady=10
)
def __load_project(self, event):
self.project = self.master.logic.files_creator.load_project(
project_name=self.combobox_project.get()
)
self.modules.clear()
self.test_cases.clear()
self.combobox_test_case.set('')
self.combobox_test_case.configure(values=tuple())
self.combobox_test_case.disable()
self.tests.clear()
self.combobox_test.set('')
self.combobox_test.configure(values=tuple())
self.combobox_test.disable()
for element in self.project['elements']:
module_name = str()
module_path = str()
for _module_dict_key, _module_dict_val in element.items():
if(_module_dict_key == 'path'):
module_path = _module_dict_val
else:
module_name = _module_dict_key
module_key = '{:s}*{:s}*{:s}'.format(
self.project['name'], module_path, module_name
)
module_key | |
"""Generated message classes for genomics version v1.
Stores, processes, explores and shares genomic data. This API implements the
Global Alliance for Genomics and Health (GA4GH) v0.5.1 API as well as several
extensions.
"""
# NOTE: This file is autogenerated and should not be edited by hand.
from googlecloudsdk.third_party.apitools.base.protorpclite import messages as _messages
from googlecloudsdk.third_party.apitools.base.py import encoding
package = 'genomics'
class Annotation(_messages.Message):
"""An annotation describes a region of reference genome. The value of an
annotation may be one of several canonical types, supplemented by arbitrary
info tags. An annotation is not inherently associated with a specific sample
or individual (though a client could choose to use annotations in this way).
Example canonical annotation types are `GENE` and `VARIANT`.
Enums:
TypeValueValuesEnum: The data type for this annotation. Must match the
containing annotation set's type.
Messages:
InfoValue: A map of additional read alignment information. This must be of
the form map<string, string[]> (string key mapping to a list of string
values).
Fields:
annotationSetId: The annotation set to which this annotation belongs.
end: The end position of the range on the reference, 0-based exclusive.
id: The server-generated annotation ID, unique across all annotations.
info: A map of additional read alignment information. This must be of the
form map<string, string[]> (string key mapping to a list of string
values).
name: The display name of this annotation.
referenceId: The ID of the Google Genomics reference associated with this
range.
referenceName: The display name corresponding to the reference specified
by `referenceId`, for example `chr1`, `1`, or `chrX`.
reverseStrand: Whether this range refers to the reverse strand, as opposed
to the forward strand. Note that regardless of this field, the start/end
position of the range always refer to the forward strand.
start: The start position of the range on the reference, 0-based
inclusive.
transcript: A transcript value represents the assertion that a particular
region of the reference genome may be transcribed as RNA. An alternative
splicing pattern would be represented as a separate transcript object.
This field is only set for annotations of type `TRANSCRIPT`.
type: The data type for this annotation. Must match the containing
annotation set's type.
variant: A variant annotation, which describes the effect of a variant on
the genome, the coding sequence, and/or higher level consequences at the
organism level e.g. pathogenicity. This field is only set for
annotations of type `VARIANT`.
"""
class TypeValueValuesEnum(_messages.Enum):
"""The data type for this annotation. Must match the containing annotation
set's type.
Values:
ANNOTATION_TYPE_UNSPECIFIED: <no description>
GENERIC: A `GENERIC` annotation type should be used when no other
annotation type will suffice. This represents an untyped annotation of
the reference genome.
VARIANT: A `VARIANT` annotation type.
GENE: A `GENE` annotation type represents the existence of a gene at the
associated reference coordinates. The start coordinate is typically
the gene's transcription start site and the end is typically the end
of the gene's last exon.
TRANSCRIPT: A `TRANSCRIPT` annotation type represents the assertion that
a particular region of the reference genome may be transcribed as RNA.
"""
ANNOTATION_TYPE_UNSPECIFIED = 0
GENERIC = 1
VARIANT = 2
GENE = 3
TRANSCRIPT = 4
@encoding.MapUnrecognizedFields('additionalProperties')
class InfoValue(_messages.Message):
"""A map of additional read alignment information. This must be of the
form map<string, string[]> (string key mapping to a list of string
values).
Messages:
AdditionalProperty: An additional property for a InfoValue object.
Fields:
additionalProperties: Additional properties of type InfoValue
"""
class AdditionalProperty(_messages.Message):
"""An additional property for a InfoValue object.
Fields:
key: Name of the additional property.
value: A extra_types.JsonValue attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('extra_types.JsonValue', 2, repeated=True)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
annotationSetId = _messages.StringField(1)
end = _messages.IntegerField(2)
id = _messages.StringField(3)
info = _messages.MessageField('InfoValue', 4)
name = _messages.StringField(5)
referenceId = _messages.StringField(6)
referenceName = _messages.StringField(7)
reverseStrand = _messages.BooleanField(8)
start = _messages.IntegerField(9)
transcript = _messages.MessageField('Transcript', 10)
type = _messages.EnumField('TypeValueValuesEnum', 11)
variant = _messages.MessageField('VariantAnnotation', 12)
class AnnotationSet(_messages.Message):
"""An annotation set is a logical grouping of annotations that share
consistent type information and provenance. Examples of annotation sets
include 'all genes from refseq', and 'all variant annotations from ClinVar'.
Enums:
TypeValueValuesEnum: The type of annotations contained within this set.
Messages:
InfoValue: A map of additional read alignment information. This must be of
the form map<string, string[]> (string key mapping to a list of string
values).
Fields:
datasetId: The dataset to which this annotation set belongs.
id: The server-generated annotation set ID, unique across all annotation
sets.
info: A map of additional read alignment information. This must be of the
form map<string, string[]> (string key mapping to a list of string
values).
name: The display name for this annotation set.
referenceSetId: The ID of the reference set that defines the coordinate
space for this set's annotations.
sourceUri: The source URI describing the file from which this annotation
set was generated, if any.
type: The type of annotations contained within this set.
"""
class TypeValueValuesEnum(_messages.Enum):
"""The type of annotations contained within this set.
Values:
ANNOTATION_TYPE_UNSPECIFIED: <no description>
GENERIC: A `GENERIC` annotation type should be used when no other
annotation type will suffice. This represents an untyped annotation of
the reference genome.
VARIANT: A `VARIANT` annotation type.
GENE: A `GENE` annotation type represents the existence of a gene at the
associated reference coordinates. The start coordinate is typically
the gene's transcription start site and the end is typically the end
of the gene's last exon.
TRANSCRIPT: A `TRANSCRIPT` annotation type represents the assertion that
a particular region of the reference genome may be transcribed as RNA.
"""
ANNOTATION_TYPE_UNSPECIFIED = 0
GENERIC = 1
VARIANT = 2
GENE = 3
TRANSCRIPT = 4
@encoding.MapUnrecognizedFields('additionalProperties')
class InfoValue(_messages.Message):
"""A map of additional read alignment information. This must be of the
form map<string, string[]> (string key mapping to a list of string
values).
Messages:
AdditionalProperty: An additional property for a InfoValue object.
Fields:
additionalProperties: Additional properties of type InfoValue
"""
class AdditionalProperty(_messages.Message):
"""An additional property for a InfoValue object.
Fields:
key: Name of the additional property.
value: A extra_types.JsonValue attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('extra_types.JsonValue', 2, repeated=True)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
datasetId = _messages.StringField(1)
id = _messages.StringField(2)
info = _messages.MessageField('InfoValue', 3)
name = _messages.StringField(4)
referenceSetId = _messages.StringField(5)
sourceUri = _messages.StringField(6)
type = _messages.EnumField('TypeValueValuesEnum', 7)
class BatchCreateAnnotationsRequest(_messages.Message):
"""A BatchCreateAnnotationsRequest object.
Fields:
annotations: The annotations to be created. At most 4096 can be specified
in a single request.
"""
annotations = _messages.MessageField('Annotation', 1, repeated=True)
class BatchCreateAnnotationsResponse(_messages.Message):
"""A BatchCreateAnnotationsResponse object.
Fields:
entries: The resulting per-annotation entries, ordered consistently with
the original request.
"""
entries = _messages.MessageField('Entry', 1, repeated=True)
class Binding(_messages.Message):
"""Associates `members` with a `role`.
Fields:
members: Specifies the identities requesting access for a Cloud Platform
resource. `members` can have the following values: * `allUsers`: A
special identifier that represents anyone who is on the internet;
with or without a Google account. * `allAuthenticatedUsers`: A special
identifier that represents anyone who is authenticated with a Google
account or a service account. * `user:{emailid}`: An email address that
represents a specific Google account. For example, `<EMAIL>`
or `<EMAIL>`. * `serviceAccount:{emailid}`: An email address
that represents a service account. For example, `<EMAIL>-
<EMAIL>`. * `group:{emailid}`: An email address
that represents a Google group. For example, `<EMAIL>`. *
`domain:{domain}`: A Google Apps domain name that represents all the
users of that domain. For example, `google.com` or `example.com`.
role: Role that is assigned to `members`. For example, `roles/viewer`,
`roles/editor`, or `roles/owner`. Required
"""
members = _messages.StringField(1, repeated=True)
role = _messages.StringField(2)
class CallSet(_messages.Message):
"""A call set is a collection of variant calls, typically for one sample. It
belongs to a variant set. For more genomics resource definitions, see
[Fundamentals of Google Genomics](https://cloud.google.com/genomics
/fundamentals-of-google-genomics)
Messages:
InfoValue: A map of additional call set information. This must be of the
form map<string, string[]> (string key mapping to a list of string
values).
Fields:
created: The date this call set was created in milliseconds from the
epoch.
id: The server-generated call set ID, unique across all | |
<reponame>demosdemon/pyup
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
from unittest import TestCase
from pyup.requirements import Requirement
from mock import patch, PropertyMock, Mock
from pyup.requirements import RequirementFile, RequirementsBundle
from .test_package import package_factory
from .test_pullrequest import pullrequest_factory
import requests_mock
import os
class RequirementUpdateContent(TestCase):
@patch("pyup.requirements.hashin.get_package_hashes")
def test_update_with_hashes(self, get_hashes_mock):
get_hashes_mock.return_value = {
"hashes": [{"hash": "123"}, {"hash": "456"}]
}
with patch('pyup.requirements.Requirement.latest_version_within_specs',
new_callable=PropertyMock,
return_value="1.4.2"):
content = "alembic==0.8.9 \\\n" \
" --hash=sha256:abcde"
req_file = RequirementFile("req.txt", content)
req = list(req_file.requirements)[0]
self.assertEqual(
Requirement.parse("alembic==0.8.9", 1),
req
)
self.assertEqual(req.update_content(content), "alembic==1.4.2 \\\n"
" --hash=sha256:123 \\\n"
" --hash=sha256:456")
@patch("pyup.requirements.hashin.get_package_hashes")
def test_update_with_hashes_and_comment(self, get_hashes_mock):
get_hashes_mock.return_value = {
"hashes": [{"hash": "123"}, {"hash": "456"}]
}
with patch('pyup.requirements.Requirement.latest_version_within_specs',
new_callable=PropertyMock,
return_value="1.4.2"):
content = "alembic==0.8.9 # yay \\\n" \
" --hash=sha256:abcde"
req_file = RequirementFile("req.txt", content)
req = list(req_file.requirements)[0]
self.assertEqual(
Requirement.parse("alembic==0.8.9", 1),
req
)
new_content = req.update_content(content)
self.assertEqual(new_content, "alembic==1.4.2 # yay \\\n"
" --hash=sha256:123 \\\n"
" --hash=sha256:456")
@patch("pyup.requirements.hashin.get_package_hashes")
def test_update_with_hashes_and_comment_and_env_markers(self, get_hashes_mock):
get_hashes_mock.return_value = {
"hashes": [{"hash": "123"}, {"hash": "456"}]
}
with patch('pyup.requirements.Requirement.latest_version_within_specs',
new_callable=PropertyMock,
return_value="1.4.2"):
content = "alembic==0.8.9; sys_platform != 'win32' # yay \\\n" \
" --hash=sha256:abcde"
req_file = RequirementFile("req.txt", content)
req = list(req_file.requirements)[0]
self.assertEqual(
Requirement.parse("alembic==0.8.9; sys_platform != 'win32'", 1),
req
)
self.assertEqual(req.update_content(content), "alembic==1.4.2; sys_platform != 'win32' # yay \\\n"
" --hash=sha256:123 \\\n"
" --hash=sha256:456")
def test_update_with_environment_markers(self):
with patch('pyup.requirements.Requirement.latest_version_within_specs',
new_callable=PropertyMock,
return_value="1.4.2"):
content = "uvloop==0.6.5; sys_platform != 'win32'"
req = Requirement.parse(content, 0)
self.assertEqual(req.update_content(content), "uvloop==1.4.2; sys_platform != 'win32'")
def test_update_with_environment_markers_and_comment(self):
with patch('pyup.requirements.Requirement.latest_version_within_specs',
new_callable=PropertyMock,
return_value="1.4.2"):
content = "uvloop==0.6.5; sys_platform != 'win32' # and here's some comment"
req = Requirement.parse(content, 0)
self.assertEqual(req.update_content(content), "uvloop==1.4.2; sys_platform != 'win32' # and here's some comment")
def test_update_content_with_extras(self):
with patch('pyup.requirements.Requirement.latest_version_within_specs', new_callable=PropertyMock,
return_value="1.4.2"):
content = "requests[security]==1.4.1"
req = Requirement.parse(content, 0)
self.assertEqual(req.update_content(content), "requests[security]==1.4.2")
def test_update_content_tabbed(self):
with patch('pyup.requirements.Requirement.latest_version_within_specs', new_callable=PropertyMock,
return_value="1.4.2"):
content = "bla==1.4.1\t\t# some foo"
req = Requirement.parse(content, 0)
self.assertEqual(req.update_content(content), "bla==1.4.2 # some foo")
content = "bla==1.4.1\t\t# pyup: <1.4.2"
req = Requirement.parse(content, 0)
self.assertEqual(req.update_content(content), "bla==1.4.2 # pyup: <1.4.2")
def test_something_else(self):
with patch('pyup.requirements.Requirement.latest_version', new_callable=PropertyMock,
return_value="0.13.1"):
content = "some-package==0.12.2+tmf"
req = Requirement.parse(content, 0)
self.assertEqual(req.update_content(content), "some-package==0.13.1")
def test_line_endings(self):
with patch('pyup.requirements.Requirement.latest_version', new_callable=PropertyMock,
return_value="1.2.3"):
with patch('pyup.requirements.Requirement.package', new_callable=PropertyMock,
return_value=package_factory("Foo", [])):
content = """\r\n\r\nWerkzeug\r\ndjango-template-repl\nbpython\nsome-fooo \n"""
r = RequirementFile("foo.txt", content)
self.assertEqual(r.requirements[0].name, "Werkzeug")
self.assertEqual(r.requirements[1].name, "django-template-repl")
self.assertEqual(r.requirements[2].name, "bpython")
self.assertEqual(r.requirements[3].name, "some-fooo")
self.assertTrue("Werkzeug==1.2.3\r\n" in r.requirements[0].update_content(content))
self.assertTrue(
"django-template-repl==1.2.3\n" in r.requirements[1].update_content(content))
self.assertTrue(
"bpython==1.2.3" in r.requirements[2].update_content(content))
self.assertTrue(
"some-fooo==1.2.3 \n" in r.requirements[3].update_content(content))
def test_update_content_simple_pinned(self):
with patch('pyup.requirements.Requirement.latest_version', new_callable=PropertyMock,
return_value="1.4.2"):
content = "Django==1.4.1"
req = Requirement.parse(content, 0)
self.assertEqual(req.update_content(content), "Django==1.4.2")
with patch('pyup.requirements.Requirement.latest_version', new_callable=PropertyMock,
return_value="1.4.2"):
content = "django==1.4.1"
req = Requirement.parse(content, 0)
self.assertEqual(req.update_content(content), "django==1.4.2")
def test_latest_version_within_specs_called(self):
with patch('pyup.requirements.Requirement.latest_version_within_specs',
new_callable=PropertyMock, return_value="1.4.2") as mocked:
content = "django==1.4.1"
req = Requirement.parse(content, 0)
self.assertEqual(req.update_content(content), "django==1.4.2")
mocked.assert_called_with()
def test_update_content_simple_unpinned(self):
with patch('pyup.requirements.Requirement.latest_version', new_callable=PropertyMock,
return_value="1.4.2"):
content = "django"
req = Requirement.parse(content, 0)
self.assertEqual(req.update_content(content), "django==1.4.2")
with patch('pyup.requirements.Requirement.latest_version', new_callable=PropertyMock,
return_value="1.4.2"):
content = "Django"
req = Requirement.parse(content, 0)
self.assertEqual(req.update_content(content), "Django==1.4.2")
def test_update_content_simple_unpinned_with_comment(self):
with patch('pyup.requirements.Requirement.latest_version', new_callable=PropertyMock,
return_value="1.4.2"):
content = "django # newest django release"
req = Requirement.parse(content, 0)
self.assertEqual(req.update_content(content), "django==1.4.2 # newest django release")
with patch('pyup.requirements.Requirement.latest_version', new_callable=PropertyMock,
return_value="1.4.2"):
content = "Django #django"
req = Requirement.parse(content, 0)
self.assertEqual(req.update_content(content), "Django==1.4.2 #django")
with patch('pyup.requirements.Requirement.latest_version', new_callable=PropertyMock,
return_value="1.4.2"):
content = "Django #django #yay this has really cool comments ######"
req = Requirement.parse(content, 0)
self.assertEqual(req.update_content(content),
"Django==1.4.2 #django #yay this has really cool comments ######")
def test_update_content_with_package_in_comments(self):
with patch('pyup.requirements.Requirement.latest_version', new_callable=PropertyMock,
return_value="192.168.127.12"):
content = 'raven==5.8.1\n' \
'{%- endif %}\n\n' \
'{% if cookiecutter.use_newrelic == "y" -%}\n' \
'# Newrelic agent for performance monitoring\n' \
'# -----------------------------------------\n' \
'newrelic\n' \
'{%- endif %}\n\n'
req = Requirement.parse("newrelic", 0)
updated_content = 'raven==5.8.1\n' \
'{%- endif %}\n\n' \
'{% if cookiecutter.use_newrelic == "y" -%}\n' \
'# Newrelic agent for performance monitoring\n' \
'# -----------------------------------------\n' \
'newrelic==2.58.1.44\n' \
'{%- endif %}\n\n'
self.assertEqual(req.update_content(content), updated_content)
def test_update_content_with_dubious_package_name(self):
with patch('pyup.requirements.Requirement.latest_version', new_callable=PropertyMock,
return_value="192.168.127.12"):
content = 'raven\n' \
'ravenclient'
req = Requirement.parse("raven", 0)
updated_content = 'raven==2.58.1.44\n' \
'ravenclient'
self.assertEqual(req.update_content(content), updated_content)
def test_update_content_ranged(self):
with patch('pyup.requirements.Requirement.latest_version', new_callable=PropertyMock,
return_value="1.5.6"):
content = 'raven>=0.2\n' \
'ravenclient'
req = Requirement.parse("raven>=0.2", 0)
updated_content = 'raven==1.5.6\n' \
'ravenclient'
self.assertEqual(req.update_content(content), updated_content)
def test_update_content_unfinished_line(self):
with patch('pyup.requirements.Requirement.latest_version', new_callable=PropertyMock,
return_value="1.5.6"):
content = 'raven==0.2\n'
req = Requirement.parse("raven==0.2", 0)
updated_content = 'raven==1.5.6\n'
self.assertEqual(req.update_content(content), updated_content)
class RequirementTestCase(TestCase):
def test_is_outdated(self):
with patch('pyup.requirements.Requirement.latest_version_within_specs',
new_callable=PropertyMock, return_value=None):
r = Requirement.parse("Django", 0)
self.assertEqual(r.is_outdated, False)
def test_equals(self):
self.assertEqual(
Requirement.parse("Django==1.5", 0),
Requirement.parse("Django==1.5", 0)
)
def test_not_equals(self):
self.assertNotEqual(
Requirement.parse("Django==1.5", 0),
Requirement.parse("Django==1.6", 0)
)
def test_filter(self):
r = Requirement.parse("Django==1.7.6", 0)
self.assertEqual(r.filter, False)
r = Requirement.parse("Django==1.7.6 # pyup: < 1.7.8", 0)
self.assertEqual(r.filter, [("<", "1.7.8")])
req = Requirement.parse("some-package==1.9.3 # rq.filter: <1.10 #some comment here", 0)
self.assertEqual(req.filter, [("<", "1.10")])
r = Requirement.parse("django==1.7.1 # pyup: <1.7.6", 0)
r = Requirement.parse("Django==1.7.6 # pyup: < 1.7.8, > 1.7.2", 0)
self.assertEqual(
sorted(r.filter, key=lambda r: r[1]),
sorted([("<", "1.7.8"), (">", "1.7.2")], key=lambda r: r[1])
)
def test_tabbed(self):
req = Requirement.parse("Django==1.5\t\t#some-comment", 0)
self.assertEqual(req.is_pinned, True)
self.assertEqual(req.version, "1.5")
def test_is_pinned(self):
req = Requirement.parse("Django", 0)
self.assertEqual(req.is_pinned, False)
req = Requirement.parse("Django==1.4,>1.5", 0)
self.assertEqual(req.is_pinned, False)
req = Requirement.parse("Django===1.4", 0)
self.assertEqual(req.is_pinned, False)
req = Requirement.parse("Django<=1.4,>=1.33", 0)
self.assertEqual(req.is_pinned, False)
req = Requirement.parse("Django==1.4", 0)
self.assertEqual(req.is_pinned, True)
def test_is_loose(self):
req = Requirement.parse("Django", 0)
self.assertEqual(req.is_loose, True)
req = Requirement.parse("Django==1.4,>1.5", 0)
self.assertEqual(req.is_loose, False)
req = Requirement.parse("Django===1.4", 0)
self.assertEqual(req.is_loose, False)
req = Requirement.parse("Django<=1.4,>=1.33", 0)
self.assertEqual(req.is_loose, False)
req = Requirement.parse("Django==1.4", 0)
self.assertEqual(req.is_loose, False)
def test_package_filter_present(self):
req = Requirement.parse("Django", 0)
self.assertEqual(req.filter, False)
req = Requirement.parse("Django #rq.filter:", 0)
self.assertEqual(req.filter, False)
req = Requirement.parse("Django #rq.filter: >=1.4,<1.5", 0)
self.assertEqual(
sorted(req.filter, key=lambda i: i[0]),
sorted([('>=', '1.4'), ('<', '1.5')], key=lambda i: i[0])
)
req = Requirement.parse("Django #rq.filter:!=1.2", 0)
self.assertEqual(req.filter, [('!=', '1.2')])
req = Requirement.parse("Django #rq.filter:foo", 0)
self.assertEqual(req.filter, False)
req = Requirement.parse("bliss #rq.filter:", 0)
self.assertEqual(req.filter, False)
req = Requirement.parse("Django", 0)
self.assertEqual(req.filter, False)
req = Requirement.parse("Django #pyup:", 0)
self.assertEqual(req.filter, False)
req = Requirement.parse("Django #pyup: >=1.4,<1.5", 0)
self.assertEqual(
sorted(req.filter, key=lambda i: i[0]),
sorted([('>=', '1.4'), ('<', '1.5')], key=lambda i: i[0])
)
req = Requirement.parse("Django #pyup:!=1.2", 0)
self.assertEqual(req.filter, [('!=', '1.2')])
req = Requirement.parse("Django #pyup:foo", 0)
self.assertEqual(req.filter, False)
req = Requirement.parse("bliss #pyup:", 0)
self.assertEqual(req.filter, False)
def test_get_latest_version_within_specs(self):
latest = Requirement.get_latest_version_within_specs(
(("==", "1.2"), ("!=", "1.2")),
["1.2", "1.3", "1.4", "1.5"]
)
self.assertEqual(latest, None)
latest = Requirement.get_latest_version_within_specs(
(("==", "1.2.1"),),
["1.2.0", "1.2.1", "1.2.2", "1.3"]
)
self.assertEqual(latest, "1.2.1")
def test_latest_version_within_specs(self):
with patch('pyup.requirements.Requirement.package', new_callable=PropertyMock,
return_value=package_factory("bliss",
versions=["1.9rc1", "1.9", "1.8.1", "1.8", "1.7",
"1.6"])):
req = Requirement.parse("bliss #rq.filter:", 0)
self.assertEqual(req.latest_version_within_specs, "1.9")
req = Requirement.parse("bliss==1.8rc1 #rq.filter:", 0)
self.assertEqual(req.prereleases, True)
self.assertEqual(req.latest_version_within_specs, "1.9rc1")
req = Requirement.parse("bliss #rq.filter: >=1.7,<1.9", 0)
self.assertEqual(req.latest_version_within_specs, "1.8.1")
with patch('pyup.requirements.Requirement.package', new_callable=PropertyMock,
return_value=package_factory("gevent",
versions=['1.1rc1', '1.1b6', '1.1b5', '1.1b4',
'1.1b3', '1.1b2', '1.1b1', '1.1a2',
'1.1a1', '1.0.2', '1.0.1', ])):
req = Requirement.parse("gevent==1.1b6", 0)
self.assertEqual(req.latest_version_within_specs, "1.1rc1")
self.assertEqual(req.latest_version, "1.1rc1")
def test_version_unpinned(self):
with patch('pyup.requirements.Requirement.package', new_callable=PropertyMock,
return_value=package_factory(name="django", versions=["1.9", "1.8"])):
r = Requirement.parse("Django", 0)
self.assertEqual(r.version, "1.9")
with patch('pyup.requirements.Requirement.package', new_callable=PropertyMock,
return_value=package_factory(name="django", versions=["1.9rc1", "1.9", "1.8"])):
r = Requirement.parse("Django", 0)
self.assertEqual(r.version, "1.9")
with patch('pyup.requirements.Requirement.package', new_callable=PropertyMock,
return_value=package_factory(name="django",
versions=["1.9.1", "1.8", "1.9rc1"])):
r = Requirement.parse("django", 0)
self.assertEqual(r.version, "1.9.1")
with patch('pyup.requirements.Requirement.package', new_callable=PropertyMock,
return_value=package_factory(
name="django",
versions=["1.4.3", "1.5", "1.4.2", "1.4.1", ])):
r = Requirement.parse("Django # rq.filter: >=1.4,<1.5", 0)
self.assertEqual(r.version, "1.4.3")
with patch('pyup.requirements.Requirement.package', new_callable=PropertyMock,
return_value=package_factory(
name="django",
versions=["1.4.3", "1.5", "1.4.2", "1.4.1", ])):
r = Requirement.parse("Django # pyup: >=1.4,<1.5", 0)
self.assertEqual(r.version, "1.4.3")
with patch('pyup.requirements.Requirement.package', new_callable=PropertyMock,
return_value=package_factory(name="django", versions=["1.8.1", "1.8"])):
r = Requirement.parse("Django # rq.filter: !=1.8.1", 0)
self.assertEqual(r.version, "1.8")
with patch('pyup.requirements.Requirement.package', new_callable=PropertyMock,
return_value=package_factory(name="django", versions=["1.8.1", "1.8"])):
r = Requirement.parse("Django # pyup: !=1.8.1", 0)
self.assertEqual(r.version, "1.8")
with patch('pyup.requirements.Requirement.package', new_callable=PropertyMock,
return_value=package_factory(name="django",
versions=["1.9rc1", "1.9.1", "1.8", ])):
r = Requirement.parse("django # rq.filter: bogus", 0)
self.assertEqual(r.version, "1.9.1")
with patch('pyup.requirements.Requirement.package', new_callable=PropertyMock,
return_value=package_factory(name="django",
versions=["1.9rc1", "1.9.1", "1.8", ])):
r = Requirement.parse("django # pyup: bogus", 0)
self.assertEqual(r.version, "1.9.1")
def test_version_pinned(self):
with patch('pyup.requirements.Requirement.package', new_callable=PropertyMock,
return_value=package_factory(name="django", versions=["1.8", "1.9"])):
r = Requirement.parse("Django==1.9", 0)
self.assertEqual(r.version, "1.9")
with patch('pyup.requirements.Requirement.package', new_callable=PropertyMock,
return_value=package_factory(name="django==1.9rc1",
versions=["1.8", "1.9rc1", "1.9rc2"])):
r = Requirement.parse("Django==1.9.2.rc14 # rq.filter != 1.44", 0)
self.assertEqual(r.version, "1.9.2.rc14")
def test_prereleases(self):
r = Requirement.parse("Django==1.9rc1", 0)
self.assertEqual(r.prereleases, True)
r = Requirement.parse("Django==1.9-b1", 0)
self.assertEqual(r.prereleases, True)
r = Requirement.parse("Django==1.9-alpha1", 0)
self.assertEqual(r.prereleases, True)
r = Requirement.parse("Django", 0)
self.assertEqual(r.prereleases, False)
r = Requirement.parse("Django>=1.5,<=1.6", 0)
self.assertEqual(r.prereleases, False)
r = Requirement.parse("Django!=1.9", 0)
self.assertEqual(r.prereleases, False)
def test_name(self):
r = Requirement.parse("Django==1.9rc1", 0)
self.assertEqual(r.name, "Django")
r = Requirement.parse("django==1.9-b1", 0)
self.assertEqual(r.name, "django")
@requests_mock.mock()
def test_package_found(self, requests):
with open(os.path.dirname(os.path.realpath(__file__)) + "/data/django.json") as f:
requests.get("https://pypi.python.org/pypi/Django/json", text=f.read())
r = Requirement.parse("Django==1.9rc1", 0)
self.assertEqual(r._fetched_package, False)
self.assertEqual(r._package, None)
# this triggers the fetch
self.assertNotEqual(r.package, None)
self.assertEqual(r._fetched_package, True)
self.assertNotEqual(r._package, None)
@requests_mock.mock()
def test_package_not_found(self, requests):
requests.get("https://pypi.python.org/pypi/Fango/json", text="404", status_code=404)
r = Requirement.parse("Fango", 0)
self.assertEqual(r._fetched_package, False)
self.assertEqual(r._package, None)
# this triggers the fetch
self.assertEqual(r.package, None)
self.assertEqual(r._fetched_package, True)
self.assertEqual(r._package, None)
def test_is_insecure(self):
with self.assertRaises(NotImplementedError):
r = Requirement.parse("Django", 0)
r.is_insecure
@requests_mock.mock()
def test_needs_update(self, requests):
with open(os.path.dirname(os.path.realpath(__file__)) | |
"""distutils.dist
Provides the Distribution class, which represents the module distribution
being built/installed/distributed.
"""
zaimportuj sys
zaimportuj os
zaimportuj re
z email zaimportuj message_from_file
spróbuj:
zaimportuj warnings
wyjąwszy ImportError:
warnings = Nic
z distutils.errors zaimportuj *
z distutils.fancy_getopt zaimportuj FancyGetopt, translate_longopt
z distutils.util zaimportuj check_environ, strtobool, rfc822_escape
z distutils zaimportuj log
z distutils.debug zaimportuj DEBUG
# Regex to define acceptable Distutils command names. This jest nie *quite*
# the same jako a Python NAME -- I don't allow leading underscores. The fact
# that they're very similar jest no coincidence; the default naming scheme jest
# to look dla a Python module named after the command.
command_re = re.compile(r'^[a-zA-Z]([a-zA-Z0-9_]*)$')
klasa Distribution:
"""The core of the Distutils. Most of the work hiding behind 'setup'
jest really done within a Distribution instance, which farms the work out
to the Distutils commands specified on the command line.
Setup scripts will almost never instantiate Distribution directly,
unless the 'setup()' function jest totally inadequate to their needs.
However, it jest conceivable that a setup script might wish to subclass
Distribution dla some specialized purpose, oraz then dalej the subclass
to 'setup()' jako the 'distclass' keyword argument. If so, it jest
necessary to respect the expectations that 'setup' has of Distribution.
See the code dla 'setup()', w core.py, dla details.
"""
# 'global_options' describes the command-line options that may be
# supplied to the setup script prior to any actual commands.
# Eg. "./setup.py -n" albo "./setup.py --quiet" both take advantage of
# these global options. This list should be kept to a bare minimum,
# since every global option jest also valid jako a command option -- oraz we
# don't want to pollute the commands przy too many options that they
# have minimal control over.
# The fourth entry dla verbose means that it can be repeated.
global_options = [
('verbose', 'v', "run verbosely (default)", 1),
('quiet', 'q', "run quietly (turns verbosity off)"),
('dry-run', 'n', "don't actually do anything"),
('help', 'h', "show detailed help message"),
('no-user-cfg', Nic,
'ignore pydistutils.cfg w your home directory'),
]
# 'common_usage' jest a short (2-3 line) string describing the common
# usage of the setup script.
common_usage = """\
Common commands: (see '--help-commands' dla more)
setup.py build will build the package underneath 'build/'
setup.py install will install the package
"""
# options that are nie propagated to the commands
display_options = [
('help-commands', Nic,
"list all available commands"),
('name', Nic,
"print package name"),
('version', 'V',
"print package version"),
('fullname', Nic,
"print <package name>-<version>"),
('author', Nic,
"print the author's name"),
('author-email', Nic,
"print the author's email address"),
('maintainer', Nic,
"print the maintainer's name"),
('maintainer-email', Nic,
"print the maintainer's email address"),
('contact', Nic,
"print the maintainer's name jeżeli known, inaczej the author's"),
('contact-email', Nic,
"print the maintainer's email address jeżeli known, inaczej the author's"),
('url', Nic,
"print the URL dla this package"),
('license', Nic,
"print the license of the package"),
('licence', Nic,
"alias dla --license"),
('description', Nic,
"print the package description"),
('long-description', Nic,
"print the long package description"),
('platforms', Nic,
"print the list of platforms"),
('classifiers', Nic,
"print the list of classifiers"),
('keywords', Nic,
"print the list of keywords"),
('provides', Nic,
"print the list of packages/modules provided"),
('requires', Nic,
"print the list of packages/modules required"),
('obsoletes', Nic,
"print the list of packages/modules made obsolete")
]
display_option_names = [translate_longopt(x[0]) dla x w display_options]
# negative options are options that exclude other options
negative_opt = {'quiet': 'verbose'}
# -- Creation/initialization methods -------------------------------
def __init__(self, attrs=Nic):
"""Construct a new Distribution instance: initialize all the
attributes of a Distribution, oraz then use 'attrs' (a dictionary
mapping attribute names to values) to assign some of those
attributes their "real" values. (Any attributes nie mentioned w
'attrs' will be assigned to some null value: 0, Nic, an empty list
albo dictionary, etc.) Most importantly, initialize the
'command_obj' attribute to the empty dictionary; this will be
filled w przy real command objects by 'parse_command_line()'.
"""
# Default values dla our command-line options
self.verbose = 1
self.dry_run = 0
self.help = 0
dla attr w self.display_option_names:
setattr(self, attr, 0)
# Store the distribution meta-data (name, version, author, oraz so
# forth) w a separate object -- we're getting to have enough
# information here (and enough command-line options) that it's
# worth it. Also delegate 'get_XXX()' methods to the 'metadata'
# object w a sneaky oraz underhanded (but efficient!) way.
self.metadata = DistributionMetadata()
dla basename w self.metadata._METHOD_BASENAMES:
method_name = "get_" + basename
setattr(self, method_name, getattr(self.metadata, method_name))
# 'cmdclass' maps command names to klasa objects, so we
# can 1) quickly figure out which klasa to instantiate when
# we need to create a new command object, oraz 2) have a way
# dla the setup script to override command classes
self.cmdclass = {}
# 'command_packages' jest a list of packages w which commands
# are searched for. The factory dla command 'foo' jest expected
# to be named 'foo' w the module 'foo' w one of the packages
# named here. This list jest searched z the left; an error
# jest podnieśd jeżeli no named package provides the command being
# searched for. (Always access using get_command_packages().)
self.command_packages = Nic
# 'script_name' oraz 'script_args' are usually set to sys.argv[0]
# oraz sys.argv[1:], but they can be overridden when the caller jest
# nie necessarily a setup script run z the command-line.
self.script_name = Nic
self.script_args = Nic
# 'command_options' jest where we store command options between
# parsing them (z config files, the command-line, etc.) oraz when
# they are actually needed -- ie. when the command w question jest
# instantiated. It jest a dictionary of dictionaries of 2-tuples:
# command_options = { command_name : { option : (source, value) } }
self.command_options = {}
# 'dist_files' jest the list of (command, pyversion, file) that
# have been created by any dist commands run so far. This jest
# filled regardless of whether the run jest dry albo not. pyversion
# gives sysconfig.get_python_version() jeżeli the dist file jest
# specific to a Python version, 'any' jeżeli it jest good dla all
# Python versions on the target platform, oraz '' dla a source
# file. pyversion should nie be used to specify minimum albo
# maximum required Python versions; use the metainfo dla that
# instead.
self.dist_files = []
# These options are really the business of various commands, rather
# than of the Distribution itself. We provide aliases dla them w
# Distribution jako a convenience to the developer.
self.packages = Nic
self.package_data = {}
self.package_dir = Nic
self.py_modules = Nic
self.libraries = Nic
self.headers = Nic
self.ext_modules = Nic
self.ext_package = Nic
self.include_dirs = Nic
self.extra_path = Nic
self.scripts = Nic
self.data_files = Nic
self.password = ''
# And now initialize bookkeeping stuff that can't be supplied by
# the caller at all. 'command_obj' maps command names to
# Command instances -- that's how we enforce that every command
# klasa jest a singleton.
self.command_obj = {}
# 'have_run' maps command names to boolean values; it keeps track
# of whether we have actually run a particular command, to make it
# cheap to "run" a command whenever we think we might need to -- if
# it's already been done, no need dla expensive filesystem
# operations, we just check the 'have_run' dictionary oraz carry on.
# It's only safe to query 'have_run' dla a command klasa that has
# been instantiated -- a false value will be inserted when the
# command object jest created, oraz replaced przy a true value when
# the command jest successfully run. Thus it's probably best to use
# '.get()' rather than a straight lookup.
self.have_run = {}
# Now we'll use the attrs dictionary (ultimately, keyword args from
# the setup script) to possibly override any albo all of these
# distribution options.
jeżeli attrs:
# Pull out | |
# -*- coding: utf-8 -*-
# Python2 Compatibility
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import json
import conda_content_trust.metadata_construction as cct_metadata_construction
import conda_content_trust.common as cct_common
import conda_content_trust.signing as cct_signing
import conda_content_trust.root_signing as cct_root_signing
import conda_content_trust.authentication as cct_authentication
PRESENT_SLOWLY = False
ROOT_PUBKEY_HEX = '<KEY>'
ROOT_PUBKEY_GPG_FINGERPRINT = '917adb684e2e9fb5ed4e59909ddd19a1268b62d0'
ROOT_PUBKEY_2_HEX = 'a59cea0987ee9046d68d2d011e919eb9278e3f478cca77f5204d65191ff8d7a5'
ROOT_PUBKEY_2_GPG_FINGERPRINT = '<KEY>'
# _OLD_ROOT_PUBKEY_HEX = '<KEY>'
# _OLD_ROOT_PUBKEY_GPG_FINGERPRINT = '<KEY>'
# _OLD_ROOT_PUBKEY_2_HEX = 'd16d07f038e49de3b3bd8661523ef0948181e3109902a9c739beeb69628940c4'
# _OLD_ROOT_PUBKEY_2_GPG_FINGERPRINT = '<KEY>'
KEYMGR_PRIVATE_HEX = '<KEY>'
KEYMGR_PUBLIC_HEX = '<KEY>'
PKGMGR_PUBLIC_HEX = '<KEY>'
PKGMGR_PRIVATE_HEX = '<KEY>'
ROOT_FNAME_V1 = 'demo/1.root.json' # Note that this will be overwritten.
ROOT_FNAME_V2 = 'demo/2.root.json' # Note that this will be overwritten.
ROOT_FNAME_V3 = 'demo/3.root.json' # Note that this will be overwritten.
KEYMGR_FNAME = 'demo/key_mgr.json' # Note that this will be overwritten.
# In Python2, input() performs evaluation and raw_input() does not. In
# Python3, input() does not perform evaluation and there is no raw_input().
# So... use raw_input in Python2, and input in Python3.
try:
_input_func = raw_input
except NameError:
_input_func = input
# Step by step demo or an uninterrupted run, based on PRESENT_SLOWLY.
def input_func(s):
if PRESENT_SLOWLY:
return _input_func(s)
else:
return print(s)
def main():
junk = input_func(
'\n\n\n\nFirst: a demo of root metadata creation, verification, '
'updating, and root chaining. We create an initial version of '
'the root metadata and sign it, then create a second version, '
'with an additional root key and requiring both, and then create '
'third version scaling back key requirements to any one of the '
'two. We verify each with the prior (root chaining).\n')
root_v1, root_v2, root_v3 = demo_root_signing_and_verifying_and_chaining()
# This one uses existing files, if preferred, and just does the chaining
# test.
# demo_root_chaining(root_v1, root_v2) # redundant test for my dev purposes
# To load metadata from a file
# key_mgr = cct_common.load_metadata_from_file('test_key_mgr.json')
# If loading a key from file, for example....
# with open(name + '.pri', 'rb') as fobj:
# private_bytes = fobj.read()
junk = input_func(
'\n\n\nSecond: a demo of the creation and signing of the key '
'manager role (key_mgr), a role root delegates to.')
key_mgr = demo_create_and_sign_key_mgr()
junk = input_func(
'\n\n\nThird: a demo of the verification of the key manager '
'metadata using trusted root metadata.')
demo_verify_key_mgr_using_root(key_mgr, root_v2)
junk = input_func(
'\n\n\nFourth: a demo of verification of an individual package '
'signature using the now-trusted key manager metadata.')
demo_verify_pkg_sig_via_key_mgr(key_mgr)
def demo_create_and_sign_key_mgr():
prikey_keymgr = cct_common.PrivateKey.from_hex(KEYMGR_PRIVATE_HEX)
# pubkey_keymgr = cct_common.PublicKey.from_bytes(KEYMGR_PUBLIC_BYTES)
# print('public test key for keymgr: ' + pubkey_keymgr.to_hex())
# print('private test key for keymgr: ' + prikey_keymgr.to_hex())
key_mgr = cct_metadata_construction.build_delegating_metadata(
metadata_type='key_mgr', # 'root' or 'key_mgr'
delegations={'pkg_mgr': {
'pubkeys': [PKGMGR_PUBLIC_HEX],
'threshold': 1}},
version=1,
#timestamp default: now
#expiration default: now plus root expiration default duration
)
key_mgr = cct_signing.wrap_as_signable(key_mgr)
# sign dictionary in place
cct_signing.sign_signable(key_mgr, prikey_keymgr)
with open(KEYMGR_FNAME, 'wb') as fobj:
fobj.write(cct_common.canonserialize(key_mgr))
return key_mgr
def demo_verify_key_mgr_using_root(key_mgr_metadata, root_metadata):
# Some argument validation
cct_common.checkformat_signable(root_metadata)
if 'delegations' not in root_metadata['signed']:
raise ValueError('Expected "delegations" entry in root metadata.')
root_delegations = root_metadata['signed']['delegations'] # for brevity
cct_common.checkformat_delegations(root_delegations)
if 'key_mgr' not in root_delegations:
raise ValueError(
'Missing expected delegation to "key_mgr" in root metadata.')
cct_common.checkformat_delegation(root_delegations['key_mgr'])
# Doing delegation processing.
cct_authentication.verify_delegation(
'key_mgr', key_mgr_metadata, root_metadata)
print('\n-- Success: key mgr metadata verified based on root metadata.')
def demo_root_signing_and_verifying_and_chaining():
# Build sample root metadata. ('metadata' -> 'md')
root_md = cct_metadata_construction.build_root_metadata(
root_pubkeys=[ROOT_PUBKEY_HEX],
root_threshold=1,
root_version=1,
key_mgr_pubkeys=[KEYMGR_PUBLIC_HEX],
key_mgr_threshold=1)
# Wrap the metadata in a signing envelope.
root_md = cct_signing.wrap_as_signable(root_md)
root_md_serialized_unsigned = cct_common.canonserialize(root_md)
print('\n-- Unsigned root metadata version 1 generated.\n')
# # This is the part of the data over which signatures are constructed.
# root_md_serialized_portion_to_sign = cct_common.canonserialize(
# root_md['signed'])
# TODO: ✅ Format-validate constructed root metadata using checkformat
# function.
if not os.path.exists('demo'):
os.mkdir('demo')
# Write unsigned sample root metadata.
with open(ROOT_FNAME_V1, 'wb') as fobj:
fobj.write(root_md_serialized_unsigned)
print('\n-- Unsigned root metadata version 1 written.\n')
# Sign sample root metadata.
junk = input_func(
'Preparing to request root signature. Please plug in your '
'YubiKey and prepare to put in your user PIN in a GPG dialog box. '
' When the YubiKey is plugged in and you are READY TO ENTER your '
'pin, hit enter to begin.')
# This overwrites the file with a signed version of the file.
cct_root_signing.sign_root_metadata_via_gpg(
ROOT_FNAME_V1, ROOT_PUBKEY_GPG_FINGERPRINT)
cct_root_signing.sign_root_metadata_via_gpg(
ROOT_FNAME_V1, ROOT_PUBKEY_GPG_FINGERPRINT)
junk = input_func('\n-- Root metadata v1 signed. Next: load signed root v1.\n')
# Load untrusted signed root metadata.
signed_root_md = cct_common.load_metadata_from_file(ROOT_FNAME_V1)
junk = input_func('\n-- Signed root metadata v1 loaded. Next: verify signed root v1\n')
# Verify untrusted signed root metadata. (Normally, one uses the prior
# version of root, but here we're bootstrapping for the demo. We'll verify
# with a prior version lower down in this demo.)
cct_authentication.verify_signable(
signed_root_md, [ROOT_PUBKEY_HEX], 1, gpg=True)
junk = input_func('\n-- Root metadata v1 fully verified. Next: build root metadata v2.\n')
# Build sample second version of root metadata. In this case, let's try
# adding another authorized key and requiring signatures from both keys.
root_md2 = cct_metadata_construction.build_root_metadata(
root_pubkeys=[ROOT_PUBKEY_HEX, ROOT_PUBKEY_2_HEX],
root_threshold=2,
root_version=2,
key_mgr_pubkeys=[KEYMGR_PUBLIC_HEX],
key_mgr_threshold=1)
# Wrap the version 2 metadata in a signing envelope, canonicalize it, and
# serialize it to write to disk.
root_md2 = cct_signing.wrap_as_signable(root_md2)
root_md2 = cct_common.canonserialize(root_md2)
# Write unsigned sample root metadata.
with open(ROOT_FNAME_V2, 'wb') as fobj:
fobj.write(root_md2)
junk = input_func('\n-- Unsigned root metadata version 2 generated and written. Next: sign root v2\n')
# This overwrites the file with a signed version of the file.
# We'll sign with both keys specified.
cct_root_signing.sign_root_metadata_via_gpg(
ROOT_FNAME_V2, ROOT_PUBKEY_GPG_FINGERPRINT)
cct_root_signing.sign_root_metadata_via_gpg(
ROOT_FNAME_V2, ROOT_PUBKEY_2_GPG_FINGERPRINT)
junk = input_func('\n-- Root metadata v2 signed. Next: load and verify signed root v2 based on root v1 (root chaining).\n')
# Load the now-signed version from disk.
signed_root_md2 = cct_common.load_metadata_from_file(ROOT_FNAME_V2)
# Test root chaining (verifying v2 using v1)
cct_authentication.verify_root(signed_root_md, signed_root_md2)
print(
'\n-- Root metadata v2 fully verified based directly on Root '
'metadata v1 (root chaining success)\n')
print('\n-- Success. :)\n')
# Build sample third version of root metadata. In this case, let's reduce
# the number of required keys to one.
root_md3 = cct_metadata_construction.build_root_metadata(
root_pubkeys=[ROOT_PUBKEY_HEX, ROOT_PUBKEY_2_HEX],
root_threshold=1,
root_version=3,
key_mgr_pubkeys=[KEYMGR_PUBLIC_HEX],
key_mgr_threshold=1)
# Wrap the version 2 metadata in a signing envelope, canonicalize it, and
# serialize it to write to disk.
root_md3 = cct_signing.wrap_as_signable(root_md3)
root_md3 = cct_common.canonserialize(root_md3)
# Write unsigned sample root metadata.
with open(ROOT_FNAME_V3, 'wb') as fobj:
fobj.write(root_md3)
junk = input_func('\n-- Unsigned root metadata version 2 generated and written. Next: sign root v2\n')
# This overwrites the file with a signed version of the file.
# We'll sign with both keys specified.
cct_root_signing.sign_root_metadata_via_gpg(
ROOT_FNAME_V3, ROOT_PUBKEY_GPG_FINGERPRINT)
cct_root_signing.sign_root_metadata_via_gpg(
ROOT_FNAME_V3, ROOT_PUBKEY_2_GPG_FINGERPRINT)
junk = input_func('\n-- Root metadata v2 signed. Next: load and verify signed root v2 based on root v1 (root chaining).\n')
# Load the now-signed version from disk.
signed_root_md3 = cct_common.load_metadata_from_file(ROOT_FNAME_V3)
# Test root chaining (verifying v2 using v1)
cct_authentication.verify_root(signed_root_md2, signed_root_md3)
print(
'\n-- Root metadata v3 fully verified based directly on Root '
'metadata v2 (root chaining success)\n')
print('\n-- Success. :)\n')
return signed_root_md, signed_root_md2, signed_root_md3
def demo_root_chaining_w_files(trusted_root_fname, new_untrusted_root_fname):
# Just does the chaining part from
# demo_root_signing_and_verifying_and_chaining, but using metadata files
# instead of metadata dictionaries.
# TODO: Contemplate the safest way to hold this metadata in conda during
# execution. I gather that much of what conda does with env
# variables, for example, can be compromised by random packages
# adding environment variables?
trusted_root = cct_common.load_metadata_from_file(trusted_root_fname)
untrusted_root = cct_common.load_metadata_from_file(new_untrusted_root_fname)
# Use that to verify the next root.
verify_root(trusted_root, untrusted_root)
def demo_verify_pkg_sig_via_key_mgr(key_mgr):
packages = {
"pytorch-1.2.0-cuda92py27hd3e106c_0.tar.bz2": {
"build": "cuda92py27hd3e106c_0",
"build_number": 0,
"depends": [
"_pytorch_select 0.2",
"blas 1.0 mkl",
"cffi",
"cudatoolkit 9.2.*",
"cudnn >=7.3.0,<=8.0a0",
"future",
"libgcc-ng >=7.3.0",
"libstdcxx-ng >=7.3.0",
"mkl >=2019.4,<2021.0a0",
"mkl-service >=2,<3.0a0",
"ninja",
"numpy >=1.11.3,<2.0a0",
"python >=2.7,<2.8.0a0"
],
"license": "BSD 3-Clause",
"license_family": "BSD",
"md5": "793c6af90ed62c964e28b046e0b071c6",
"name": "pytorch",
"sha256": "a53f772a224485df7436d4b2aa2c5d44e249e2fb43eee98831eeaaa51a845697",
"size": 282176733,
"subdir": "linux-64",
"timestamp": 1566783471689,
"version": "1.2.0"
}}
print('\n\n\nHere is a sample package entry from repodata.json:')
from pprint import pprint
pprint(packages)
junk = input_func('\n\nNext: sign it with the pkg_mgr key.')
signable = cct_signing.wrap_as_signable(
packages['pytorch-1.2.0-cuda92py27hd3e106c_0.tar.bz2'])
# Sign in place.
cct_signing.sign_signable(
signable,
cct_common.PrivateKey.from_hex('f3cdab14740066fb277651ec4f96b9f6c3e3eb3f812269797b9656074cd52133'))
print('Signed envelope around this pytorch package metadata:\n\n')
pprint(signable)
junk = input_func(
'\n\nNext: verify the signature based on what the now-trusted '
'key manager role told us to expect.\n')
# Some argument validation for the key manager role.
cct_common.checkformat_signable(key_mgr)
if 'delegations' not | |
<filename>tests/contrib/gp/test_models.py<gh_stars>1-10
from __future__ import absolute_import, division, print_function
import logging
from collections import defaultdict, namedtuple
import pytest
import torch
import pyro
import pyro.distributions as dist
import pyro.optim as optim
from pyro.contrib.gp.kernels import Cosine, Matern32, RBF, WhiteNoise
from pyro.contrib.gp.likelihoods import Gaussian
from pyro.contrib.gp.models import (GPLVM, GPRegression, SparseGPRegression,
VariationalGP, VariationalSparseGP)
from pyro.infer import SVI, Trace_ELBO
from pyro.infer.mcmc.hmc import HMC
from pyro.infer.mcmc.mcmc import MCMC
from pyro.params import param_with_module_name
from tests.common import assert_equal
logging.basicConfig(format='%(levelname)s %(message)s')
logger = logging.getLogger('pyro')
logger.setLevel(logging.INFO)
T = namedtuple("TestGPModel", ["model_class", "X", "y", "kernel", "likelihood"])
X = torch.tensor([[1., 5., 3.], [4., 3., 7.]])
y1D = torch.tensor([2., 1.])
y2D = torch.tensor([[1., 2.], [3., 3.], [1., 4.], [-1., 1.]])
kernel = RBF(input_dim=3, variance=torch.tensor(3.), lengthscale=torch.tensor(2.))
noise = torch.tensor(1e-6)
likelihood = Gaussian(noise)
TEST_CASES = [
T(
GPRegression,
X, y1D, kernel, noise
),
T(
GPRegression,
X, y2D, kernel, noise
),
T(
SparseGPRegression,
X, y1D, kernel, noise
),
T(
SparseGPRegression,
X, y2D, kernel, noise
),
T(
VariationalGP,
X, y1D, kernel, likelihood
),
T(
VariationalGP,
X, y2D, kernel, likelihood
),
T(
VariationalSparseGP,
X, y1D, kernel, likelihood
),
T(
VariationalSparseGP,
X, y2D, kernel, likelihood
),
]
TEST_IDS = [t[0].__name__ + "_y{}D".format(str(t[2].dim()))
for t in TEST_CASES]
@pytest.mark.parametrize("model_class, X, y, kernel, likelihood", TEST_CASES, ids=TEST_IDS)
def test_model(model_class, X, y, kernel, likelihood):
if model_class is SparseGPRegression or model_class is VariationalSparseGP:
gp = model_class(X, None, kernel, X, likelihood)
else:
gp = model_class(X, None, kernel, likelihood)
loc, var = gp.model()
if model_class is VariationalGP or model_class is VariationalSparseGP:
assert_equal(loc.norm().item(), 0)
assert_equal(var, torch.ones(var.shape[-1]).expand(var.shape))
else:
assert_equal(loc.norm().item(), 0)
assert_equal(var, kernel(X).diag())
@pytest.mark.parametrize("model_class, X, y, kernel, likelihood", TEST_CASES, ids=TEST_IDS)
def test_forward(model_class, X, y, kernel, likelihood):
if model_class is SparseGPRegression or model_class is VariationalSparseGP:
gp = model_class(X, y, kernel, X, likelihood)
else:
gp = model_class(X, y, kernel, likelihood)
# test shape
Xnew = torch.tensor([[2.0, 3.0, 1.0]])
loc0, cov0 = gp(Xnew, full_cov=True)
loc1, var1 = gp(Xnew, full_cov=False)
assert loc0.dim() == y.dim()
assert loc0.shape[-1] == Xnew.shape[0]
# test latent shape
assert loc0.shape[:-1] == y.shape[:-1]
assert cov0.shape[:-2] == y.shape[:-1]
assert cov0.shape[-1] == cov0.shape[-2]
assert cov0.shape[-1] == Xnew.shape[0]
assert_equal(loc0, loc1)
n = Xnew.shape[0]
cov0_diag = torch.stack([mat.diag() for mat in cov0.view(-1, n, n)]).reshape(var1.shape)
assert_equal(cov0_diag, var1)
# test trivial forward: Xnew = X
loc, cov = gp(X, full_cov=True)
if model_class is VariationalGP or model_class is VariationalSparseGP:
assert_equal(loc.norm().item(), 0)
assert_equal(cov, torch.eye(cov.shape[-1]).expand(cov.shape))
else:
assert_equal(loc, y)
assert_equal(cov.norm().item(), 0)
# test same input forward: Xnew[0,:] = Xnew[1,:] = ...
Xnew = torch.tensor([[2.0, 3.0, 1.0]]).expand(10, 3)
loc, cov = gp(Xnew, full_cov=True)
loc_diff = loc - loc[..., :1].expand(y.shape[:-1] + (10,))
assert_equal(loc_diff.norm().item(), 0)
cov_diff = cov - cov[..., :1, :1].expand(y.shape[:-1] + (10, 10))
assert_equal(cov_diff.norm().item(), 0)
# test noise kernel forward: kernel = WhiteNoise
gp.kernel = WhiteNoise(input_dim=3, variance=torch.tensor(10.))
loc, cov = gp(X, full_cov=True)
assert_equal(loc.norm().item(), 0)
assert_equal(cov, torch.eye(cov.shape[-1]).expand(cov.shape) * 10)
@pytest.mark.parametrize("model_class, X, y, kernel, likelihood", TEST_CASES, ids=TEST_IDS)
def test_forward_with_empty_latent_shape(model_class, X, y, kernel, likelihood):
# regression models don't use latent_shape, no need for test
if model_class is GPRegression or model_class is SparseGPRegression:
return
elif model_class is VariationalGP:
gp = model_class(X, y, kernel, likelihood, latent_shape=torch.Size([]))
else: # model_class is VariationalSparseGP
gp = model_class(X, y, kernel, X, likelihood, latent_shape=torch.Size([]))
# test shape
Xnew = torch.tensor([[2.0, 3.0, 1.0]])
loc0, cov0 = gp(Xnew, full_cov=True)
loc1, var1 = gp(Xnew, full_cov=False)
assert loc0.shape[-1] == Xnew.shape[0]
assert cov0.shape[-1] == cov0.shape[-2]
assert cov0.shape[-1] == Xnew.shape[0]
# test latent shape
assert loc0.shape[:-1] == torch.Size([])
assert cov0.shape[:-2] == torch.Size([])
assert_equal(loc0, loc1)
assert_equal(cov0.diag(), var1)
@pytest.mark.parametrize("model_class, X, y, kernel, likelihood", TEST_CASES, ids=TEST_IDS)
@pytest.mark.init(rng_seed=0)
def test_inference(model_class, X, y, kernel, likelihood):
# skip variational GP models because variance/lengthscale highly
# depend on variational parameters
if model_class is VariationalGP or model_class is VariationalSparseGP:
return
elif model_class is GPRegression:
gp = model_class(X, y, RBF(input_dim=3), likelihood)
else: # model_class is SparseGPRegression
gp = model_class(X, y, RBF(input_dim=3), X, likelihood)
# fix inducing points because variance/lengthscale highly depend on it
gp.fix_param("Xu")
generator = dist.MultivariateNormal(torch.zeros(X.shape[0]), kernel(X))
target_y = generator(sample_shape=torch.Size([1000])).detach()
gp.set_data(X, target_y)
gp.optimize(optim.Adam({"lr": 0.01}), num_steps=1000)
y_cov = gp.kernel(X)
target_y_cov = kernel(X)
assert_equal(y_cov, target_y_cov, prec=0.1)
@pytest.mark.init(rng_seed=0)
def test_inference_sgpr():
N = 1000
X = dist.Uniform(torch.zeros(N), torch.ones(N)*5).sample()
y = 0.5 * torch.sin(3*X) + dist.Normal(torch.zeros(N), torch.ones(N)*0.5).sample()
kernel = RBF(input_dim=1)
Xu = torch.arange(0, 5.5, 0.5)
sgpr = SparseGPRegression(X, y, kernel, Xu)
sgpr.optimize(optim.Adam({"lr": 0.01}), num_steps=1000)
Xnew = torch.arange(0, 5.05, 0.05)
loc, var = sgpr(Xnew, full_cov=False)
target = 0.5 * torch.sin(3*Xnew)
assert_equal((loc - target).abs().mean().item(), 0, prec=0.07)
@pytest.mark.init(rng_seed=0)
def test_inference_vsgp():
N = 1000
X = dist.Uniform(torch.zeros(N), torch.ones(N)*5).sample()
y = 0.5 * torch.sin(3*X) + dist.Normal(torch.zeros(N), torch.ones(N)*0.5).sample()
kernel = RBF(input_dim=1)
Xu = torch.arange(0, 5.5, 0.5)
vsgp = VariationalSparseGP(X, y, kernel, Xu, Gaussian())
vsgp.optimize(optim.Adam({"lr": 0.03}), num_steps=1000)
Xnew = torch.arange(0, 5.05, 0.05)
loc, var = vsgp(Xnew, full_cov=False)
target = 0.5 * torch.sin(3*Xnew)
assert_equal((loc - target).abs().mean().item(), 0, prec=0.06)
@pytest.mark.init(rng_seed=0)
def test_inference_whiten_vsgp():
N = 1000
X = dist.Uniform(torch.zeros(N), torch.ones(N)*5).sample()
y = 0.5 * torch.sin(3*X) + dist.Normal(torch.zeros(N), torch.ones(N)*0.5).sample()
kernel = RBF(input_dim=1)
Xu = torch.arange(0, 5.5, 0.5)
vsgp = VariationalSparseGP(X, y, kernel, Xu, Gaussian(), whiten=True)
vsgp.optimize(optim.Adam({"lr": 0.01}), num_steps=1000)
Xnew = torch.arange(0, 5.05, 0.05)
loc, var = vsgp(Xnew, full_cov=False)
target = 0.5 * torch.sin(3*Xnew)
assert_equal((loc - target).abs().mean().item(), 0, prec=0.07)
@pytest.mark.parametrize("model_class, X, y, kernel, likelihood", TEST_CASES, ids=TEST_IDS)
def test_inference_with_empty_latent_shape(model_class, X, y, kernel, likelihood):
# regression models don't use latent_shape (default=torch.Size([]))
if model_class is GPRegression or model_class is SparseGPRegression:
return
elif model_class is VariationalGP:
gp = model_class(X, y, kernel, likelihood, latent_shape=torch.Size([]))
else: # model_class is SparseVariationalGP
gp = model_class(X, y, kernel, X, likelihood, latent_shape=torch.Size([]))
gp.optimize(num_steps=1)
@pytest.mark.parametrize("model_class, X, y, kernel, likelihood", TEST_CASES, ids=TEST_IDS)
def test_inference_with_whiten(model_class, X, y, kernel, likelihood):
# regression models don't use whiten
if model_class is GPRegression or model_class is SparseGPRegression:
return
elif model_class is VariationalGP:
gp = model_class(X, y, kernel, likelihood, whiten=True)
else: # model_class is SparseVariationalGP
gp = model_class(X, y, kernel, X, likelihood, whiten=True)
gp.optimize(num_steps=1)
@pytest.mark.parametrize("model_class, X, y, kernel, likelihood", TEST_CASES, ids=TEST_IDS)
def test_hmc(model_class, X, y, kernel, likelihood):
if model_class is SparseGPRegression or model_class is VariationalSparseGP:
gp = model_class(X, y, kernel, X, likelihood)
else:
gp = model_class(X, y, kernel, likelihood)
kernel.set_prior("variance", dist.Uniform(torch.tensor(0.5), torch.tensor(1.5)))
kernel.set_prior("lengthscale", dist.Uniform(torch.tensor(1.0), torch.tensor(3.0)))
hmc_kernel = HMC(gp.model, step_size=1)
mcmc_run = MCMC(hmc_kernel, num_samples=10)
post_trace = defaultdict(list)
for trace, _ in mcmc_run._traces():
variance_name = param_with_module_name(kernel.name, "variance")
post_trace["variance"].append(trace.nodes[variance_name]["value"])
lengthscale_name = param_with_module_name(kernel.name, "lengthscale")
post_trace["lengthscale"].append(trace.nodes[lengthscale_name]["value"])
if model_class is VariationalGP:
f_name = param_with_module_name(gp.name, "f")
post_trace["f"].append(trace.nodes[f_name]["value"])
if model_class is VariationalSparseGP:
u_name = param_with_module_name(gp.name, "u")
post_trace["u"].append(trace.nodes[u_name]["value"])
for param in post_trace:
param_mean = torch.mean(torch.stack(post_trace[param]), 0)
logger.info("Posterior mean - {}".format(param))
logger.info(param_mean)
def test_inference_deepGP():
gp1 = GPRegression(X, None, kernel, name="GPR1")
Z, _ = gp1.model()
gp2 = VariationalSparseGP(Z, y2D, Matern32(input_dim=3), Z.clone(),
likelihood, name="GPR2")
def model():
Z, _ = gp1.model()
gp2.set_data(Z, y2D)
gp2.model()
def guide():
gp1.guide()
gp2.guide()
svi = SVI(model, guide, optim.Adam({}), Trace_ELBO())
svi.step()
@pytest.mark.parametrize("model_class, X, y, kernel, likelihood", TEST_CASES, ids=TEST_IDS)
def test_gplvm(model_class, X, y, kernel, likelihood):
if model_class is SparseGPRegression or model_class is VariationalSparseGP:
gp = model_class(X, y, kernel, X, likelihood)
else:
gp = model_class(X, y, kernel, likelihood)
gplvm = GPLVM(gp)
# test inference
gplvm.optimize(num_steps=1)
# test forward
gplvm(Xnew=X)
def _pre_test_mean_function():
def f(x):
return 2 * x + 3 + 5 * torch.sin(7 * x)
X = torch.arange(100)
y = f(X)
Xnew = torch.arange(100, 150)
ynew = f(Xnew)
kernel = Cosine(input_dim=1)
def trend(x):
a = pyro.param("a", torch.tensor(0.))
b = pyro.param("b", torch.tensor(1.))
return a * x + b
return X, y, Xnew, ynew, kernel, trend
def _mape(y_true, y_pred):
return ((y_pred - y_true) / y_true).abs().mean()
def _post_test_mean_function(model, Xnew, y_true):
assert_equal(pyro.param("a").item(), 2, prec=0.02)
assert_equal(pyro.param("b").item(), 3, prec=0.02)
y_pred, _ = model(Xnew)
assert_equal(_mape(y_true, y_pred).item(), 0, prec=0.02)
def test_mean_function_GPR():
X, y, Xnew, ynew, kernel, mean_fn = _pre_test_mean_function()
model = GPRegression(X, y, kernel, mean_function=mean_fn)
model.optimize(optim.Adam({"lr": 0.01}))
_post_test_mean_function(model, Xnew, ynew)
def test_mean_function_SGPR():
X, y, Xnew, ynew, kernel, mean_fn = _pre_test_mean_function()
Xu = X[::20].clone()
model = SparseGPRegression(X, y, kernel, Xu, mean_function=mean_fn)
model.optimize(optim.Adam({"lr": 0.01}))
_post_test_mean_function(model, Xnew, ynew)
def test_mean_function_SGPR_DTC():
X, y, Xnew, ynew, kernel, mean_fn = _pre_test_mean_function()
Xu = X[::20].clone()
model = SparseGPRegression(X, y, kernel, Xu, mean_function=mean_fn, approx="DTC")
model.optimize(optim.Adam({"lr": 0.01}))
_post_test_mean_function(model, Xnew, ynew)
def test_mean_function_SGPR_FITC():
X, y, Xnew, ynew, kernel, mean_fn = _pre_test_mean_function()
Xu = X[::20].clone()
model = SparseGPRegression(X, y, kernel, Xu, mean_function=mean_fn, approx="FITC")
model.optimize(optim.Adam({"lr": 0.01}))
_post_test_mean_function(model, Xnew, ynew)
def test_mean_function_VGP():
X, y, Xnew, ynew, kernel, mean_fn = _pre_test_mean_function()
likelihood = Gaussian()
model = VariationalGP(X, y, kernel, likelihood, mean_function=mean_fn)
model.optimize(optim.Adam({"lr": 0.01}))
_post_test_mean_function(model, Xnew, ynew)
def test_mean_function_VGP_whiten():
X, y, Xnew, ynew, kernel, mean_fn = _pre_test_mean_function()
likelihood = Gaussian()
model = VariationalGP(X, y, kernel, likelihood, mean_function=mean_fn,
whiten=True)
model.optimize(optim.Adam({"lr": 0.1}))
_post_test_mean_function(model, Xnew, ynew)
def test_mean_function_VSGP():
X, y, Xnew, ynew, kernel, mean_fn = _pre_test_mean_function()
Xu = X[::20].clone()
likelihood = Gaussian()
model = VariationalSparseGP(X, y, kernel, Xu, likelihood, mean_function=mean_fn)
model.optimize(optim.Adam({"lr": 0.02}))
_post_test_mean_function(model, Xnew, ynew)
def | |
struct . unpack ( "H" , packet [ : 2 ] ) [ 0 ]
self . rloc . afi = socket . ntohs ( oOo00Oo0o00oo )
if ( len ( packet ) < self . rloc . addr_length ( ) ) : return ( None )
packet = self . rloc . unpack_address ( packet [ 2 : : ] )
if ( packet == None ) : return ( None )
self . rloc . mask_len = self . rloc . host_mask_len ( )
if 28 - 28: iII111i % OoO0O00 . OoO0O00 / IiII * Oo0Ooo * iII111i
if 49 - 49: I1IiiI / I1Ii111 * iII111i + I1IiiI % oO0o % ooOoO0o
if 27 - 27: OoO0O00 / iII111i . I1ii11iIi11i
if 71 - 71: OoO0O00 . i11iIiiIii . iIii1I11I1II1 + I1IiiI - o0oOOo0O0Ooo
if 34 - 34: iII111i
if 6 - 6: OoO0O00 . OoOoOO00 + I1ii11iIi11i
if ( self . rloc . is_null ( ) ) : return ( packet )
if 24 - 24: OoO0O00 . Ii1I
IiIi1I1i1iII = self . rloc_name
if ( IiIi1I1i1iII ) : IiIi1I1i1iII = blue ( self . rloc_name , False )
if 86 - 86: I11i % I1Ii111 . I11i * IiII + IiII + II111iiii
if 66 - 66: oO0o / O0 - OoOoOO00
if 69 - 69: iIii1I11I1II1 * OoO0O00 / OoooooooOO % I1ii11iIi11i . I1IiiI % I11i
if 40 - 40: i11iIiiIii % oO0o / OOooOOo
if 85 - 85: OoO0O00 % O0 . Ii1I . iII111i . iII111i
if 90 - 90: o0oOOo0O0Ooo - Oo0Ooo / ooOoO0o / i1IIi - Ii1I
IiI1IIII = self . keys [ 1 ] if self . keys else None
if ( IiI1IIII == None ) :
if ( o0OoOo0o0OOoO0 . remote_public_key == None ) :
OO0o0o0oo = bold ( "No remote encap-public-key supplied" , False )
lprint ( " {} for {}" . format ( OO0o0o0oo , IiIi1I1i1iII ) )
o0OoOo0o0OOoO0 = None
else :
OO0o0o0oo = bold ( "New encap-keying with new state" , False )
lprint ( " {} for {}" . format ( OO0o0o0oo , IiIi1I1i1iII ) )
o0OoOo0o0OOoO0 . compute_shared_key ( "encap" )
if 43 - 43: i11iIiiIii - OoooooooOO % ooOoO0o
if 55 - 55: oO0o % Oo0Ooo % IiII
if 65 - 65: IiII * IiII
if 60 - 60: ooOoO0o
if 92 - 92: O0 % IiII
if 15 - 15: O0 % i1IIi - OOooOOo . IiII
if 1 - 1: I1IiiI
if 40 - 40: o0oOOo0O0Ooo % I11i % O0
if 88 - 88: o0oOOo0O0Ooo - oO0o
if 73 - 73: II111iiii
if ( IiI1IIII ) :
if ( o0OoOo0o0OOoO0 . remote_public_key == None ) :
o0OoOo0o0OOoO0 = None
oOOo0o000o = bold ( "Remote encap-unkeying occurred" , False )
lprint ( " {} for {}" . format ( oOOo0o000o , IiIi1I1i1iII ) )
elif ( IiI1IIII . compare_keys ( o0OoOo0o0OOoO0 ) ) :
o0OoOo0o0OOoO0 = IiI1IIII
lprint ( " Maintain stored encap-keys for {}" . format ( IiIi1I1i1iII ) )
if 7 - 7: O0 / OoO0O00
else :
if ( IiI1IIII . remote_public_key == None ) :
OO0o0o0oo = "New encap-keying for existing state"
else :
OO0o0o0oo = "Remote encap-rekeying"
if 90 - 90: iII111i % oO0o / iIii1I11I1II1
lprint ( " {} for {}" . format ( bold ( OO0o0o0oo , False ) ,
IiIi1I1i1iII ) )
IiI1IIII . remote_public_key = o0OoOo0o0OOoO0 . remote_public_key
IiI1IIII . compute_shared_key ( "encap" )
o0OoOo0o0OOoO0 = IiI1IIII
if 52 - 52: I1IiiI / o0oOOo0O0Ooo
if 20 - 20: I1Ii111 . I1IiiI - iIii1I11I1II1 / iII111i
self . keys = [ None , o0OoOo0o0OOoO0 , None , None ]
if 46 - 46: I1Ii111 . i11iIiiIii
else :
if 89 - 89: OoO0O00 - OOooOOo - i1IIi - OoO0O00 % iIii1I11I1II1
if 52 - 52: o0oOOo0O0Ooo * O0 + I1ii11iIi11i
if 83 - 83: I11i + OOooOOo - OoooooooOO
if 7 - 7: IiII % ooOoO0o / OoooooooOO / o0oOOo0O0Ooo + OoO0O00 - OoO0O00
packet = packet [ ii1iII1i1iiIi : : ]
if 15 - 15: i1IIi + OOooOOo / Ii1I
return ( packet )
if 51 - 51: OOooOOo + O0
if 91 - 91: i11iIiiIii + o0oOOo0O0Ooo % OoO0O00 / oO0o - i1IIi
def decode ( self , packet , nonce ) :
oOo0ooO0O0oo = "BBBBHH"
OO00OO = struct . calcsize ( oOo0ooO0O0oo )
if ( len ( packet ) < OO00OO ) : return ( None )
if 82 - 82: Ii1I . OoooooooOO + OoooooooOO % OoO0O00 % I1ii11iIi11i
self . priority , self . weight , self . mpriority , self . mweight , oOo0ooo00OoO , oOo00Oo0o00oo = struct . unpack ( oOo0ooO0O0oo , packet [ : OO00OO ] )
if 65 - 65: Oo0Ooo . I11i
if 7 - 7: Oo0Ooo * II111iiii
oOo0ooo00OoO = socket . ntohs ( oOo0ooo00OoO )
oOo00Oo0o00oo = socket . ntohs ( oOo00Oo0o00oo )
self . local_bit = True if ( oOo0ooo00OoO & 0x0004 ) else False
self . probe_bit = True if ( oOo0ooo00OoO & 0x0002 ) else False
self . reach_bit = True if ( oOo0ooo00OoO & 0x0001 ) else False
if 11 - 11: OoOoOO00 % OoooooooOO
if ( oOo00Oo0o00oo == LISP_AFI_LCAF ) :
packet = packet [ OO00OO - 2 : : ]
packet = self . decode_lcaf ( packet , nonce )
else :
self . rloc . afi = oOo00Oo0o00oo
packet = packet [ OO00OO : : ]
packet = self . rloc . unpack_address ( packet )
if 92 - 92: OoOoOO00 - iII111i * Ii1I - i1IIi
self . rloc . mask_len = self . rloc . host_mask_len ( )
return ( packet )
if 87 - 87: Ii1I * I1Ii111 + iIii1I11I1II1 * o0oOOo0O0Ooo * iIii1I11I1II1 . I11i
if 66 - 66: Ii1I / OoO0O00 . O0 . I11i % OoooooooOO / OOooOOo
def end_of_rlocs ( self , packet , rloc_count ) :
for Ii11 in range ( rloc_count ) :
packet = self . decode ( packet , None )
if ( packet == None ) : return ( None )
if 49 - 49: I1IiiI * iII111i - OoO0O00 % Ii1I + Ii1I * I1Ii111
return ( packet )
if 94 - 94: OoOoOO00 - I11i + Ii1I + OoOoOO00 + II111iiii
if 61 - 61: IiII + Ii1I / oO0o . OoooooooOO + iII111i
if 29 - 29: OOooOOo
if 69 - 69: oO0o % OoooooooOO * iII111i
if 58 - 58: oO0o / i11iIiiIii . OoOoOO00 % O0 / iIii1I11I1II1
if 50 - 50: I1Ii111 . I11i / O0 . I11i
if 91 - 91: i11iIiiIii . I1ii11iIi11i + I11i
if 67 - 67: I1ii11iIi11i * I1Ii111 * I1IiiI / I11i - IiII + oO0o
if 11 - 11: O0 + i1IIi / o0oOOo0O0Ooo * OoO0O00
if 64 - 64: i1IIi % IiII . ooOoO0o . iIii1I11I1II1 + OoO0O00 - iIii1I11I1II1
if 52 - 52: II111iiii - IiII
if 91 - 91: iIii1I11I1II1 + iII111i . I11i % i11iIiiIii - i11iIiiIii + I1IiiI
if 75 - 75: I1ii11iIi11i / I1IiiI - iIii1I11I1II1 / OoO0O00 * OOooOOo
if 73 - 73: OoooooooOO % IiII / I1Ii111 * I11i + i1IIi % i11iIiiIii
if 91 - 91: i11iIiiIii
if 6 - 6: O0 - iIii1I11I1II1 + I1Ii111 . o0oOOo0O0Ooo * i11iIiiIii
if 53 - 53: OOooOOo / I1IiiI / oO0o * OOooOOo / i1IIi - I1Ii111
if 71 - 71: O0 + Oo0Ooo % oO0o - o0oOOo0O0Ooo
if 82 - 82: iIii1I11I1II1
if 64 - 64: ooOoO0o + I1IiiI % OOooOOo + II111iiii
if 46 - 46: I1IiiI
if 72 - 72: iII111i
if 100 - 100: I1IiiI
if 55 - 55: i1IIi % IiII
if 44 - 44: | |
import pygame
from pygame.locals import *
from obstacles.floor_collision import FloorCollision
from obstacles.teleporter import Teleport
from Points import Points
from player.player import Player
from pygame import sprite
from AI.enemy import Gumba
from AI.enemy import Koopatroops
from AI.enemy import Paratroops
from AI.enemy import Piranhaplant
from obstacles.bricks import Bricks
from obstacles.bricks import BrickPieces
from obstacles.platform import Platform
from obstacles.firebar import Firebar
from items.coins import Coins
from custom import developer_tool as dt
from items.mushroom import Magic
from items.mushroom import Oneup
from items.fire_flower import Fireflower
from items.starman import Starman
from obstacles.pipe import Pipe
from obstacles.flag_pole import Flag_Pole
class GameScreen:
""" Game Screen runs the game. """
def __init__(self, hub, level_name="1-1-1"):
""" Initialize default values """
# Necessary component to communicate with game screens
self.hub = hub
self.screen = hub.main_screen
self.controller = hub.controller
self.camera = hub.camera
self.gamemode = hub.gamemode
self.level_name = level_name
self.time_seconds = pygame.time.get_ticks() + 1000
# Bounce physics
self.counter_bounce = 0
self.bounce_max_height = 100
self.bounce_velocity = 35
# Set up background
self.bg_image = pygame.image.load(self.hub.game_levels[self.level_name]["background_image"])
self.bg_rect = self.bg_image.get_rect()
self.prep_bg_image()
# Background Collision Group where all the background collisions will be store
# This does not include brick collision. the background collisions are also referred to as "world collision"
self.background_collisions = sprite.Group()
# Teleporter Group, teleporter to the given destination
self.teleporter_group = sprite.Group()
# Points Group, show the points that was shown
self.point_group = sprite.Group()
# Player group spawn player in again if needed
self.player_group = sprite.GroupSingle()
# Player fire ball
self.player_fireball_group = sprite.Group()
# Gumba group spawn gumba when appropriate
self.enemy_group = sprite.Group()
# Piranhaplant group spawn gumba when appropriate
self.plant_group = sprite.Group()
# Magic mushroom group
self.magic_mushroom_group = sprite.Group()
# Oneup mushroom group
self.oneup_mushroom_group = sprite.Group()
# Fireflower group
self.fireflower_group = sprite.Group()
# Starman group
self.starman_group = sprite.Group()
# For red or green shells
self.shells_group = sprite.Group()
# Projectiles (Shell, Fire, Bullet bills)
self.projectile_group = sprite.Group()
# Enemies set to die
self.death_group = sprite.Group()
# Bricks to be spawned
self.brick_group = sprite.Group()
# Platforms to be spawned
self.platform_group = sprite.Group()
# Bricks pieces to be spawned
self.brickpieces_group = sprite.Group()
# Coins to be spawned
self.coin_group = sprite.Group()
# Pipe group
self.pipe_group = sprite.Group()
# Flag Pole Group
self.flagpole_group = sprite.Group()
# Firebar Group
self.firebar_group = sprite.Group()
# Spawn all instances from the JSON File
self.spawn_objects(hub)
def run(self):
""" Run through the loop process"""
self.run_event()
self.run_update()
self.run_draw()
def run_event(self):
""" Run events """
for event in pygame.event.get():
if event.type == QUIT:
self.hub.exit_game()
if event.type == KEYDOWN:
if event.key == K_ESCAPE:
self.hub.exit_game()
if event.key == K_SPACE:
# Jumping key
self.controller.jump = True
self.controller.jump_pressed = True
if event.key == K_LEFT or event.key == K_a:
# Move left key
self.controller.move_left = True
self.controller.move_right = False
if event.key == K_RIGHT or event.key == K_d:
# Move right key
self.controller.move_right = True
self.controller.move_left = False
if event.key == K_UP or event.key == K_w:
# Open level, if inside teleporter
self.controller.up = True
if event.key == K_9:
# Go back to main menu
self.hub.screen_selector = 1
if event.key == K_8:
# Developer tool, print x coordinates
dt.get_coordinates(self.player_group, self.camera)
if event.key == K_7:
# Developer tool, toggle grid coordinates
self.controller.toggle_grid = not self.controller.toggle_grid
if event.key == K_6:
# Developer tool, toggle mouse coordinates
self.controller.toggle_mouse_coordinates = not self.controller.toggle_mouse_coordinates
if event.key == K_1:
# Developer tool, set point A coordinates
dt.set_point_a(self.controller, self.camera)
if event.key == K_2:
# Developer tool, set point B coordinates
dt.set_point_b(self.controller, self.camera)
if event.key == K_3:
# Developer tool, find location, width, and height based on point A and point B
dt.print_description(self.controller)
if event.key == K_LSHIFT:
self.player_group.sprite.throw()
if event.type == KEYUP:
if event.key == K_SPACE:
# Stop mario from jumping
self.controller.jump = False
self.controller.jump_pressed = False
if event.key == K_LEFT or event.key == K_a:
# Stop moving mario to the left
self.controller.move_left = False
if event.key == K_RIGHT or event.key == K_d:
self.controller.move_right = False
if event.key == K_UP or event.key == K_w:
self.controller.up = False
if event.type == MOUSEBUTTONDOWN:
if self.controller.developer_mode:
dt.move_player(self.camera, self.player_group)
else:
self.player_group.sprite.throw()
def run_update(self):
""" Update all instances in the game_screen"""
self.update_player_group()
self.update_player_fireball_group()
self.update_teleporter_group()
self.update_camera()
self.update_world_collision()
if not self.hub.modeFreeze:
self.update_platform_group()
self.update_enemy_group()
self.update_death_group()
self.update_projectile_group()
self.update_shell_group()
self.update_brick_group()
self.update_brickpieces_group()
self.update_coin_group()
self.update_mushroom_group()
self.update_fireflower_group()
self.update_starman_group()
self.update_firebar_group()
self.update_pipe_group()
self.update_point_group()
self.update_flagpole_group()
self.update_timer()
def run_draw(self):
""" Draw all instances onto the screen """
# Draw test collision boxes
self.draw_world_collision_group()
# Draw teleporter collision boxes
self.draw_teleporter_group()
self.screen.fill((0, 0, 0))
# Draw background image
self.screen.blit(self.bg_image, self.bg_rect)
# Draw gumba
self.draw_enemy_group()
# Draw player
self.draw_player_group()
# Draw player fireballs
self.draw_player_fireball_group()
# Draw the Death Enemies
self.draw_death_group()
# Draw the Shells
self.draw_shell_group()
# Draw the Projectiles
self.draw_projectile_group()
# Draw the Bricks
self.draw_brick_group()
# Draw broken Brick Pieces
self.draw_brickpieces_group()
# Draw Platform
self.draw_platform_group()
# Draw the Coins
self.draw_coin_group()
# Draw the mushrooms
self.draw_mushroom_group()
# Draw the fireflowers
self.draw_fireflower_group()
# Draw the starmans
self.draw_starman_group()
self.draw_pipe_group()
self.draw_flagpole_group()
# Draw points
self.draw_point_group()
# Draw Firebar
self.draw_firebar_group()
if self.controller.toggle_grid:
# Developer tool, Display grid coordinates if toggled
dt.draw_debug_line(self.screen, self.player_group, self.camera)
if self.controller.toggle_mouse_coordinates:
# Developer tool, Display mouse coordinates over cursor if toggled
dt.draw_mouse_coordinates(self.screen, self.camera)
def prep_bg_image(self):
""" Prepare background adjustments """
# Scale the background image
self.bg_image = pygame.transform.scale(self.bg_image, (self.bg_rect.width * 3 + 50,
self.bg_rect.height * 3 + 50))
self.bg_rect = self.bg_image.get_rect()
self.bg_rect.bottomleft = self.screen.get_rect().bottomleft
def update_world_collision(self):
""" update world collisions"""
if self.player_group.sprite.mario_motion_state is not "dying":
for firebar in self.firebar_group:
if pygame.sprite.collide_mask(self.player_group.sprite, firebar):
# print("Mario hit Firbar")
self.player_group.sprite.get_smaller()
# Platform Collision with player
for platform in self.platform_group:
if platform.rect.colliderect(self.player_group.sprite.rect):
if self.player_group.sprite.rect.bottom <= platform.rect.top + 25:
platform.state = self.hub.FALL
self.player_group.sprite.rect.bottom = platform.rect.top
self.player_group.sprite.reset_jump()
self.player_group.sprite.reset_bounce()
# check if the player hits the left wall
elif self.player_group.sprite.rect.right < platform.rect.left + 20:
self.player_group.sprite.rect.right = platform.rect.left
# check if the player hits the right wall
elif self.player_group.sprite.rect.left > platform.rect.right - 20:
self.player_group.sprite.rect.left = platform.rect.right
else:
self.player_group.sprite.counter_jump = self.player_group.sprite.jump_max_height
else:
platform.state = self.hub.RESTING
# Remove collisions that are no longer needed
for collision in self.background_collisions:
past_screen = collision.update()
if past_screen:
self.background_collisions.remove(collision)
# Brick Collision with player
for brick in self.brick_group:
if brick.rect.colliderect(self.player_group.sprite.rect):
if self.player_group.sprite.rect.bottom <= brick.rect.top + 10:
self.player_group.sprite.rect.bottom = brick.rect.top
self.player_group.sprite.reset_jump()
self.player_group.sprite.reset_bounce()
# check if the player hits the left wall
elif self.player_group.sprite.rect.right < brick.rect.left + 20:
self.player_group.sprite.rect.right = brick.rect.left
# check if the player hits the right wall
elif self.player_group.sprite.rect.left > brick.rect.right - 20:
self.player_group.sprite.rect.left = brick.rect.right
else:
self.player_group.sprite.counter_jump = self.player_group.sprite.jump_max_height
if brick.state == self.hub.RESTING:
brick.state = self.hub.BUMPED
if brick.coin_total > 0 and (brick.insides == 'coins' or brick.insides == 'coin'):
self.coin_group.add(Coins(hub=self.hub, x=brick.rect.x + 10 + self.camera.world_offset_x,
y=brick.rect.y - 50, name="Coin"+str(brick.coin_total),
state="floating"))
elif brick.insides == 'star':
self.starman_group.add(Starman(hub=self.hub,
x=brick.rect.x + 10 + self.camera.world_offset_x,
y=brick.rect.y-5, name="Star"))
elif brick.insides == 'rshroom':
self.magic_mushroom_group.add(Magic(hub=self.hub,
x=brick.rect.x + self.camera.world_offset_x,
y=brick.rect.y-5))
elif brick.insides == 'gshroom':
self.oneup_mushroom_group.add(Oneup(hub=self.hub,
x=brick.rect.x + self.camera.world_offset_x,
y=brick.rect.y-5))
elif brick.insides == 'flower':
self.fireflower_group.add(Fireflower(hub=self.hub,
x=brick.rect.x + self.camera.world_offset_x,
y=brick.rect.y-5, name="Flower"))
else:
self.brickpieces_group.add(
BrickPieces(hub=self.hub, x=brick.rect.x + self.camera.world_offset_x,
y=brick.rect.y - 25, velx=-5, vely=-12, theme=brick.theme),
BrickPieces(hub=self.hub, x=brick.rect.x + 25 + self.camera.world_offset_x,
y=brick.rect.y - 25, velx=5, vely=-12, theme=brick.theme),
BrickPieces(hub=self.hub, x=brick.rect.x + self.camera.world_offset_x,
y=brick.rect.y + 25, velx=-5, vely=-5, theme=brick.theme),
BrickPieces(hub=self.hub, x=brick.rect.x + 25 + self.camera.world_offset_x,
y=brick.rect.y + 25, velx=5, vely=-5, theme=brick.theme)
)
# Coin Collision with player
for coin in self.coin_group:
if coin.rect.colliderect(self.player_group.sprite.rect) and coin.state == "resting":
coin.kill()
self.hub.gamemode.score += 200
self.point_group.add(Points(self.hub, self.point_group, "200pts", coin.rect.centerx, coin.rect.centery))
self.hub.gamemode.coins += 1
self.hub.sound_board.coin.play()
# Enemy collision with player
for shell in self.shells_group:
if shell.rect.colliderect(self.player_group.sprite.rect):
shell.move = self.player_group.sprite.mario_facing_direction
shell.state = self.hub.SLIDE
if shell.move == self.hub.LEFT:
shell.rect.right = self.player_group.sprite.rect.left
else:
shell.rect.left = self.player_group.sprite.rect.right
if self.player_group.sprite.rect.bottom < shell.rect.top + 20:
self.player_group.sprite.reset_bounce()
self.player_group.sprite.bounce()
shell.kill()
self.projectile_group.add(shell)
for enemy in self.enemy_group:
if enemy.rect.colliderect(self.player_group.sprite.rect):
if self.player_group.sprite.rect.bottom < enemy.rect.top + 20:
if enemy.name == "piranhaplant":
self.player_group.sprite.get_smaller()
# print("<NAME>")
else:
self.player_group.sprite.reset_bounce()
self.player_group.sprite.bounce()
enemy.state = self.hub.STOMPED
enemy.isstomped = True
enemy.death_timer = pygame.time.get_ticks()
enemy.kill()
self.point_group.add(Points(self.hub, self.point_group, "100pts",
enemy.rect.centerx + self.camera.world_offset_x, enemy.rect.centery))
if enemy.name == "paratroop":
self.enemy_group.add(Koopatroops(hub=self.hub, x=enemy.rect.x + self.camera.world_offset_x
, y=enemy.rect.y + 50
, color=2))
elif enemy.name == "koopatroop":
self.shells_group.add(enemy)
else:
self.death_group.add(enemy)
# self.player_group.sprite.is_jumping = False
else:
# If Mario collides in x direction
if self.player_group.sprite.rect.right < enemy.rect.left + 20:
self.player_group.sprite.get_smaller()
elif self.player_group.sprite.rect.left > enemy.rect.right - 20:
self.player_group.sprite.get_smaller()
for mushroom in self.magic_mushroom_group:
if mushroom.rect.colliderect(self.player_group.sprite.rect):
for player in self.player_group:
player.get_bigger()
mushroom.kill()
for mushroom in self.oneup_mushroom_group:
if mushroom.rect.colliderect(self.player_group.sprite.rect):
mushroom.kill()
self.gamemode.lives += | |
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import collections
import signal
import socket
import time
import msgpack
import requests
import simplejson
import sys
import six
from paramiko.ssh_exception import NoValidConnectionsError
from aetros.utils import invalid_json_values, prepend_signal_handler, create_ssh_stream, is_debug, is_debug2, \
thread_join_non_blocking
from threading import Thread, Lock
from aetros.const import __version__
class ApiClient:
def __init__(self, api_host, api_key):
self.host = api_host
self.api_key = api_key
def get(self, url, params=None, **kwargs):
json_chunk = kwargs.get('json')
if json_chunk and not isinstance(json_chunk, str):
kwargs['json'] = simplejson.loads(simplejson.dumps(json_chunk, default=invalid_json_values),
object_pairs_hook=collections.OrderedDict)
return requests.get(self.get_url(url), params=params, **kwargs)
def post(self, url, data=None, **kwargs):
json_chunk = kwargs.get('json')
if json_chunk and not isinstance(json_chunk, str):
kwargs['json'] = simplejson.loads(simplejson.dumps(json_chunk, default=invalid_json_values),
object_pairs_hook=collections.OrderedDict)
return requests.post(self.get_url(url), data=data, **kwargs)
def put(self, url, data=None, **kwargs):
json_chunk = kwargs.get('json')
if json_chunk and not isinstance(json_chunk, str):
kwargs['json'] = simplejson.loads(simplejson.dumps(json_chunk, default=invalid_json_values),
object_pairs_hook=collections.OrderedDict)
return requests.put(self.get_url(url), data=data, **kwargs)
def get_url(self, affix):
url = 'http://%s/api/%s' % (self.host, affix)
if self.api_key:
if '?' in url:
url += '&token=' + self.api_key
else:
url += '?token=' + self.api_key
return url
class BackendClient:
def __init__(self, config, event_listener, logger):
self.config = config
self.host = config['host']
self.go_offline_on_first_failed_attempt = True
self.event_listener = event_listener
self.logger = logger
self.message_id = 0
self.sync_status = {}
self.api_key = None
self.job_id = None
self.queues = {}
self.ssh_stream = {}
self.ssh_channel = {}
self.thread_read_instances = {}
self.thread_write_instances = {}
self.stop_on_empty_queue = {}
self.channel_closed = {}
self.bytes_sent = 0
self.bytes_total = 0
self.bytes_speed = 0
self.lock = Lock()
self.channel_lock = {}
self.queue_lock = {}
self.connection_errors = 0
self.connection_tries = 0
self.in_connecting = {}
self.write_speeds = []
self.read_speeds = []
# indicates whether we are offline or not, means not connected to the internet and
# should not establish a connection to Aetros.
self.online = None
# Whether the client is active and should do things.
self.active = False
self.expect_close = False
self.external_stopped = False
# the connection is authenticated against the server and ready to send stuff
self.registered = {}
# the actual connection is established
self.connected = {}
self.was_connected_once = {}
self.connected_since = {}
self.read_unpacker = msgpack.Unpacker(encoding='utf-8')
def on_sigint(self, sig, frame):
# when connections breaks, we do not reconnect
self.expect_close = True
def start(self, channels=None):
if self.active:
return
self.logger.debug('Client start')
self.active = True
prepend_signal_handler(signal.SIGINT, self.on_sigint)
self.queues = {}
self.thread_read_instances = {}
self.thread_write_instances = {}
self.stop_on_empty_queue = {}
self.connected = {}
self.registered = {}
self.ssh_stream = {}
self.was_connected_once = {}
self.start_channel('')
registered = False
while True:
# check if registered has been set
if self.registered[''] is not None:
registered = self.registered['']
break
time.sleep(0.1)
if not channels:
channels = ['']
if registered:
# main(='') channel is registered, start now all other channels
for channel in channels:
if channel != '':
self.start_channel(channel)
return registered
def start_channel(self, channel):
self.queues[channel] = []
self.ssh_stream[channel] = None
self.ssh_channel[channel] = None
self.connected[channel] = None
self.registered[channel] = None
self.connected_since[channel] = 0
self.was_connected_once[channel] = False
self.stop_on_empty_queue[channel] = False
self.channel_lock[channel] = Lock()
self.queue_lock[channel] = Lock()
self.in_connecting[channel] = False
self.channel_closed[channel] = False
self.thread_read_instances[channel] = Thread(target=self.thread_read, args=[channel])
self.thread_read_instances[channel].daemon = True
self.thread_read_instances[channel].start()
self.thread_write_instances[channel] = Thread(target=self.thread_write, args=[channel])
self.thread_write_instances[channel].daemon = True
self.thread_write_instances[channel].start()
def on_connect(self, reconnect, channel):
pass
def go_offline(self):
if self.online is False:
return
self.event_listener.fire('offline')
self.online = False
def connect(self, channel):
"""
In the write-thread we detect that no connection is living anymore and try always again.
Up to the 3 connection try, we report to user. We keep trying but in silence.
Also, when more than 10 connection tries are detected, we delay extra 15 seconds.
"""
if self.connection_tries > 10:
time.sleep(10)
if self.in_connecting[channel]:
return False
self.in_connecting[channel] = True
self.logger.debug('[%s] Wanna connect ...' % (channel, ))
try:
if self.is_connected(channel) or self.online is False:
if self.is_connected(channel):
self.logger.debug('[%s] Already connected' % (channel, ))
if self.online is False:
self.logger.debug('[%s] self.online=False' % (channel, ))
return True
self.channel_lock[channel].acquire()
self.connected[channel] = None
self.registered[channel] = None
self.ssh_stream[channel] = False
self.ssh_channel[channel] = False
messages = None
stderrdata = ''
try:
if not self.ssh_stream[channel]:
self.logger.debug('[%s] Open ssh connection' % (channel, ))
self.ssh_stream[channel] = create_ssh_stream(self.config, exit_on_failure=False)
self.logger.debug('[%s] open channel' % (channel, ))
self.ssh_channel[channel] = self.ssh_stream[channel].get_transport().open_session()
self.ssh_channel[channel].exec_command('stream')
except (KeyboardInterrupt, SystemExit):
raise
except Exception as e:
self.connected[channel] = False
self.registered[channel] = False
self.logger.debug('[%s] connection failed: %s' % (channel, str(e)))
return False
finally:
self.channel_lock[channel].release()
if self.ssh_channel[channel]:
messages = self.wait_for_at_least_one_message(channel)
if not messages:
stderrdata = self.ssh_channel[channel].recv_stderr().decode("utf-8").strip()
self.connected[channel] = False
self.registered[channel] = False
else:
self.logger.debug('[%s] opened and received %d messages' % (channel, len(messages)))
self.connected[channel] = True
self.registered[channel] = self.on_connect(self.was_connected_once[channel], channel)
self.connected_since[channel] = time.time()
if channel == '' and self.registered[channel] and self.was_connected_once[channel]:
self.logger.info("Successfully reconnected.")
if not self.registered[channel]:
# make sure to close channel and connection first
try:
self.ssh_channel[channel] and self.ssh_channel[channel].close()
except: pass
try:
self.ssh_stream[channel] and self.ssh_stream[channel].close()
except: pass
self.logger.debug("[%s] Client: registration failed. stderrdata: %s" % (channel, stderrdata))
self.connected[channel] = False
try:
self.logger.debug('[%s] Client: ssh_tream close due to registration failure' % (channel, ))
self.ssh_stream[channel].close()
except (KeyboardInterrupt, SystemExit):
raise
self.connection_tries += 1
if not self.was_connected_once[channel] and self.go_offline_on_first_failed_attempt:
# initial try needs to be online, otherwise we go offline
self.go_offline()
if stderrdata:
if 'Connection refused' not in stderrdata and 'Permission denied' not in stderrdata:
self.logger.error(stderrdata)
if 'Permission denied' in stderrdata:
if self.connection_tries < 3:
self.logger.warning("Access denied. Did you setup your SSH public key correctly "
"and saved it in your AETROS Trainer user account?")
self.close()
sys.exit(1)
self.connection_error(channel, "Connection error during connecting to %s: %s" % (self.host, str(stderrdata)))
else:
self.was_connected_once[channel] = True
except Exception as error:
self.connection_error(channel, error)
finally:
self.in_connecting[channel] = False
return self.is_connected(channel)
# def debug(self):
# sent = len(filter(lambda x: x['_sent'], self.queue))
# sending = len(filter(lambda x: x['_sending'], self.queue))
# open = len(filter(lambda x: not x['_sending'], self.queue))
# self.logger.debug("%d sent, %d in sending, %d open " % (sent, sending, open))
def end(self):
self.expect_close = True
for channel in six.iterkeys(self.ssh_channel):
self.send_message({'type': 'end'}, channel)
self.wait_for_close()
def connection_error(self, channel, error=None):
if not self.active:
# we don't care when we're not active
return
# give it some free time
time.sleep(0.1)
# make sure ssh connection is closed, so we can recover
try:
if self.ssh_channel[channel]:
self.ssh_channel[channel].close()
except (KeyboardInterrupt, SystemExit):
raise
try:
if self.ssh_stream[channel]:
self.logger.debug('[%s] Client: ssh_stream close due to connection error' % (channel,))
self.ssh_stream[channel].close()
except (KeyboardInterrupt, SystemExit):
raise
if self.expect_close:
# we expected the close, so ignore the error
return
# needs to be set before logger.error, since they can call send_message again
self.connected[channel] = False
self.registered[channel] = False
if socket is None:
# python interpreter is already dying, so quit
return
if channel != '':
# we don't care about the file channel,
# it will reconnect anyway
return
since = 0
if self.connected_since[channel]:
since = time.time() - self.connected_since[channel]
message = "[%s] Connection error (connected for %d seconds) " % (channel, since)
if error:
import traceback
self.logger.debug(traceback.format_exc())
if hasattr(error, 'message'):
self.logger.error(message + ": " + str(error.message))
else:
self.logger.error(message + ": " + str(error))
if 'No authentication methods available' in str(error):
self.logger.error("Make sure you have authenticated your machine correctly using "
"'aetros authenticate'.")
else:
self.logger.error(message)
self.event_listener.fire('disconnect')
self.connection_errors += 1
def thread_write(self, channel):
while self.active:
if self.online is not False:
if self.is_connected(channel) and self.is_registered(channel) and self.queues[channel]:
message = self.queues[channel][0]
try:
sent = []
if message['_sending'] and not message['_sent']:
message['_sending'] = False
if not self.is_connected(channel) or not self.is_registered(channel):
# additional check to make sure there's no race condition
self.logger.debug('[%s] break while sending' % (channel,))
break
if not message['_sending'] and not message['_sent']:
self.send_message(message, channel)
sent.append(message)
self.queue_lock[channel].acquire()
if message in self.queues[channel]:
if message['_sent']:
self.queues[channel].remove(message)
self.queue_lock[channel].release()
except Exception as e:
self.logger.debug('[%s] Closed write thread: exception. %d messages left'
% (channel, len(self.queues[channel]), ))
self.connection_error(channel, e)
else:
time.sleep(0.1)
if self.stop_on_empty_queue[channel]:
if len(self.queues[channel]) == 0 or not self.is_connected(channel) or \
not self.is_registered(channel):
self.logger.debug('[%s] Closed write thread: ended. %d messages left'
% (channel, len(self.queues[channel]),))
return
if self.active and not self.is_connected(channel) and not self.expect_close:
if not self.connect(channel):
time.sleep(1)
self.logger.debug('[%s] Closed write thread: disconnect. %d messages left' % (channel, len(self.queues[channel]), ))
def thread_read(self, channel):
while self.active:
if self.online is not False:
if self.is_connected(channel) and self.is_registered(channel):
try:
# this blocks if we have data
messages = self.read(channel)
if messages is not None:
self.logger.debug("[%s] Client: handle message: %s" % (channel, str(messages)))
self.handle_messages(channel, messages)
if self.stop_on_empty_queue[channel]:
return
except Exception as e:
self.logger.debug('[%s] Closed read thread: exception' % | |
2): (0, 1),
(8, 21, -4, 3): (0, 1),
(8, 21, -4, 4): (0, 1),
(8, 21, -4, 5): (0, 1),
(8, 21, -3, -5): (0, 1),
(8, 21, -3, -4): (0, 1),
(8, 21, -3, -3): (0, 0),
(8, 21, -3, -2): (0, 1),
(8, 21, -3, -1): (0, 1),
(8, 21, -3, 0): (0, 1),
(8, 21, -3, 1): (0, 1),
(8, 21, -3, 2): (0, 1),
(8, 21, -3, 3): (1, 1),
(8, 21, -3, 4): (1, 1),
(8, 21, -3, 5): (1, 0),
(8, 21, -2, -5): (-1, 1),
(8, 21, -2, -4): (-1, 1),
(8, 21, -2, -3): (-1, 0),
(8, 21, -2, -2): (-1, 1),
(8, 21, -2, -1): (1, 1),
(8, 21, -2, 0): (1, 1),
(8, 21, -2, 1): (1, 1),
(8, 21, -2, 2): (1, 1),
(8, 21, -2, 3): (1, 1),
(8, 21, -2, 4): (0, 1),
(8, 21, -2, 5): (0, 1),
(8, 21, -1, -5): (-1, 1),
(8, 21, -1, -4): (-1, 1),
(8, 21, -1, -3): (-1, 1),
(8, 21, -1, -2): (-1, 1),
(8, 21, -1, -1): (1, 1),
(8, 21, -1, 0): (1, 1),
(8, 21, -1, 1): (1, 1),
(8, 21, -1, 2): (0, 1),
(8, 21, -1, 3): (0, 1),
(8, 21, -1, 4): (-1, 1),
(8, 21, -1, 5): (-1, 1),
(8, 21, 0, -5): (1, 1),
(8, 21, 0, -4): (1, 1),
(8, 21, 0, -3): (1, 1),
(8, 21, 0, -2): (1, 1),
(8, 21, 0, -1): (1, 1),
(8, 21, 0, 0): (0, 1),
(8, 21, 0, 1): (0, 1),
(8, 21, 0, 2): (-1, 1),
(8, 21, 0, 3): (-1, 1),
(8, 21, 0, 4): (-1, 0),
(8, 21, 0, 5): (-1, -1),
(8, 21, 1, -5): (1, 1),
(8, 21, 1, -4): (1, 1),
(8, 21, 1, -3): (1, 0),
(8, 21, 1, -2): (1, 1),
(8, 21, 1, -1): (0, 1),
(8, 21, 1, 0): (-1, 1),
(8, 21, 1, 1): (-1, 1),
(8, 21, 1, 2): (-1, 1),
(8, 21, 1, 3): (-1, 1),
(8, 21, 1, 4): (-1, 0),
(8, 21, 1, 5): (-1, -1),
(8, 21, 2, -5): (0, 1),
(8, 21, 2, -4): (0, 1),
(8, 21, 2, -3): (0, 0),
(8, 21, 2, -2): (0, 1),
(8, 21, 2, -1): (0, 1),
(8, 21, 2, 0): (-1, 1),
(8, 21, 2, 1): (-1, 1),
(8, 21, 2, 2): (-1, 1),
(8, 21, 2, 3): (-1, 1),
(8, 21, 2, 4): (0, 1),
(8, 21, 2, 5): (0, 1),
(8, 21, 3, -5): (0, 1),
(8, 21, 3, -4): (0, 1),
(8, 21, 3, -3): (0, 0),
(8, 21, 3, -2): (0, 1),
(8, 21, 3, -1): (0, 1),
(8, 21, 3, 0): (0, 1),
(8, 21, 3, 1): (0, 1),
(8, 21, 3, 2): (0, 1),
(8, 21, 3, 3): (0, 1),
(8, 21, 3, 4): (0, 1),
(8, 21, 3, 5): (0, 1),
(8, 21, 4, -5): (0, 1),
(8, 21, 4, -4): (0, 1),
(8, 21, 4, -3): (0, 0),
(8, 21, 4, -2): (0, 1),
(8, 21, 4, -1): (0, 1),
(8, 21, 4, 0): (0, 1),
(8, 21, 4, 1): (0, 1),
(8, 21, 4, 2): (0, 1),
(8, 21, 4, 3): (0, 1),
(8, 21, 4, 4): (0, 1),
(8, 21, 4, 5): (0, 1),
(8, 21, 5, -5): (0, 1),
(8, 21, 5, -4): (0, 1),
(8, 21, 5, -3): (0, 0),
(8, 21, 5, -2): (0, 1),
(8, 21, 5, -1): (0, 1),
(8, 21, 5, 0): (0, 1),
(8, 21, 5, 1): (0, 1),
(8, 21, 5, 2): (0, 1),
(8, 21, 5, 3): (0, 1),
(8, 21, 5, 4): (0, 1),
(8, 21, 5, 5): (0, 1),
(8, 22, -5, -5): (0, 1),
(8, 22, -5, -4): (0, 0),
(8, 22, -5, -3): (0, 1),
(8, 22, -5, -2): (0, 1),
(8, 22, -5, -1): (0, 1),
(8, 22, -5, 0): (0, 1),
(8, 22, -5, 1): (0, 1),
(8, 22, -5, 2): (0, 1),
(8, 22, -5, 3): (0, 1),
(8, 22, -5, 4): (0, 1),
(8, 22, -5, 5): (0, 1),
(8, 22, -4, -5): (0, 1),
(8, 22, -4, -4): (0, 0),
(8, 22, -4, -3): (0, 1),
(8, 22, -4, -2): (0, 1),
(8, 22, -4, -1): (0, 1),
(8, 22, -4, 0): (0, 1),
(8, 22, -4, 1): (0, 1),
(8, 22, -4, 2): (0, 1),
(8, 22, -4, 3): (0, 1),
(8, 22, -4, 4): (0, 1),
(8, 22, -4, 5): (0, 1),
(8, 22, -3, -5): (0, 1),
(8, 22, -3, -4): (0, 0),
(8, 22, -3, -3): (0, 1),
(8, 22, -3, -2): (0, 1),
(8, 22, -3, -1): (0, 1),
(8, 22, -3, 0): (0, 1),
(8, 22, -3, 1): (0, 1),
(8, 22, -3, 2): (1, 1),
(8, 22, -3, 3): (1, 1),
(8, 22, -3, 4): (1, 1),
(8, 22, -3, 5): (1, 0),
(8, 22, -2, -5): (-1, 1),
(8, 22, -2, -4): (-1, 0),
(8, 22, -2, -3): (-1, 1),
(8, 22, -2, -2): (-1, 1),
(8, 22, -2, -1): (-1, 1),
(8, 22, -2, 0): (1, 1),
(8, 22, -2, 1): (1, 1),
(8, 22, -2, 2): (1, 1),
(8, 22, -2, 3): (1, 1),
(8, 22, -2, 4): (0, 1),
(8, 22, -2, 5): (0, 1),
(8, 22, -1, -5): (-1, 1),
(8, 22, -1, -4): (-1, 1),
(8, 22, -1, -3): (-1, 1),
(8, 22, -1, -2): (1, 1),
(8, 22, -1, -1): (1, 1),
(8, 22, -1, 0): (1, 1),
(8, 22, -1, 1): (1, 1),
(8, 22, -1, 2): (0, 1),
(8, 22, -1, 3): (0, 1),
(8, 22, -1, 4): (-1, 1),
(8, 22, -1, 5): (-1, 1),
(8, 22, 0, -5): (1, 1),
(8, 22, 0, -4): (1, 1),
(8, 22, 0, -3): (1, 1),
(8, 22, 0, -2): (1, 1),
(8, 22, 0, -1): (1, 1),
(8, 22, 0, 0): (0, 1),
(8, 22, 0, 1): (0, 1),
(8, 22, 0, 2): (-1, 1),
(8, 22, 0, 3): (-1, 1),
(8, 22, 0, 4): (-1, 0),
(8, 22, 0, 5): (-1, -1),
(8, 22, 1, -5): (1, 1),
(8, 22, 1, -4): (1, 0),
(8, 22, 1, -3): (1, 1),
(8, 22, 1, -2): (1, 1),
(8, 22, 1, -1): (0, 1),
(8, 22, 1, 0): (-1, 1),
(8, 22, 1, 1): (-1, 1),
(8, 22, 1, 2): (-1, 1),
(8, 22, 1, 3): (-1, 0),
(8, 22, 1, 4): (1, 1),
(8, 22, 1, 5): (1, 0),
(8, 22, 2, -5): (0, 1),
(8, 22, 2, -4): (0, 0),
(8, 22, 2, -3): (0, 1),
(8, 22, 2, -2): (0, 1),
(8, 22, 2, -1): (0, 1),
(8, 22, 2, 0): (-1, 1),
(8, 22, 2, 1): (-1, 1),
(8, 22, 2, 2): (-1, 1),
(8, 22, 2, 3): (0, 1),
(8, 22, 2, 4): (0, 1),
(8, 22, 2, 5): (0, 1),
(8, 22, 3, -5): (0, 1),
(8, 22, 3, -4): (0, 0),
(8, 22, 3, -3): (0, 1),
(8, 22, 3, -2): (0, 1),
(8, 22, 3, -1): (0, 1),
(8, 22, 3, 0): (0, 1),
(8, 22, 3, 1): (0, 1),
(8, 22, 3, 2): (0, 1),
(8, 22, 3, 3): (0, 1),
(8, 22, 3, 4): (0, 1),
(8, 22, 3, 5): (0, 1),
(8, 22, 4, -5): (0, 1),
(8, 22, 4, -4): (0, 0),
(8, 22, 4, -3): (0, 1),
(8, 22, 4, -2): (0, 1),
(8, 22, 4, -1): (0, 1),
(8, 22, 4, 0): (0, 1),
(8, 22, 4, 1): (0, 1),
(8, 22, 4, 2): (0, 1),
(8, 22, 4, 3): (0, 1),
(8, 22, 4, 4): (0, 1),
(8, 22, 4, 5): (0, 1),
(8, 22, 5, -5): (0, | |
<reponame>oliver306/TREE<gh_stars>0
import networkx as nx
import copy
import random
import sys
import matplotlib.pyplot as plt
from matplotlib import cm
import Colorizer
from datetime import datetime
from pathlib import Path
import pickle
import numpy as np
import csv
from kResiliencePaths import kResiliencePaths
import math
from timeit import default_timer as timer
def kResilienceTrees(s, d, fails, g, version="multiple", file_name=None, draw=False, unranked=True, treeChoice="shortest"):
sp_length = getTrueShortestPathLength(g.copy(), s, d, fails)
# we will use a copy of the input graph
g_copy = copy.deepcopy(g)
# At the beginning, all the nodes and the edges of the graph are numbered with '0'
# i.e. they don't belong to any structure yet
nx.set_edge_attributes(g_copy,"0","attr")
nx.set_node_attributes(g_copy,"0","attr")
# The source and the destination node have their own separate attributes ('s' and 'd' respectively)
g_copy.nodes[s]["attr"] = "s"
g_copy.nodes[s]["label"] = "s"
g_copy.nodes[d]["attr"] = "d"
g_copy.nodes[d]["label"] = "d"
try:
startEDP = timer()
edge_disjoint_paths = list(nx.edge_disjoint_paths(g_copy,s,d))
except:
# if an error is thrown, it means that the source node and the destination node belong
# to different graph components; the destination node cannot be reached from the source node anymore
return (True,0,0,[])
#sort paths long to short
edge_disjoint_paths.sort(key=lambda x: len(x), reverse=False)
#give numbers to paths, the higher the shorter
no_path = 1
for path in edge_disjoint_paths:
for i in range(0,len(path)-1):
if g_copy.nodes[path[i+1]]["attr"] != "d":
g_copy.nodes[path[i+1]]["attr"] = str(no_path)
g_copy[path[i]][path[i+1]]["attr"] = str(no_path)
no_path += 1
endEDP = timer()
timeEDP = endEDP - startEDP
for fail in fails:
g_copy[fail[0]][fail[1]]["failed"] = True
if draw:
Colorizer.colorizeGraph(g_copy.copy(), paths=len(edge_disjoint_paths), file_name=file_name + "-" + version + "-paths")
startTreeBuilding = timer()
if version == "multiple":
makeMultipleTrees(g_copy, edge_disjoint_paths)
else:
makeOneTree(g_copy, edge_disjoint_paths, reverse=True) #reverse here, so we make tree from LONGEST path
endTreeBuilding = timer()
timeTree = endTreeBuilding - startTreeBuilding
d_incidents = set()
# remove incident edges of D from all structures
for d_edge in g_copy.edges(d):
d_incidents.add(d_edge[1])
g_copy[d_edge[0]][d_edge[1]]['attr'] = "-1"
s_incidents = set()
for s_edge in g_copy.edges(s):
s_incidents.add(s_edge[1])
if draw:
Colorizer.colorizeGraph(g_copy.copy(), paths=len(edge_disjoint_paths), file_name=file_name + "-" + version + "-noPP-" + version)
startTreeProcessing = timer()
trees_changed, overallNodeAdditions = postProcessTree(g_copy, s, d_incidents, s_incidents, edge_disjoint_paths, version=version)
endTreeProcessing = timer()
timeProcessing = endTreeProcessing - startTreeProcessing
overallTime = (timeEDP + timeTree + timeProcessing) * 1000
"""print("Version:",version)
print("Nodes:",g.number_of_nodes())
print("Edges:",g.number_of_edges())
print("Time:",overallTime)
print("Time EDP:",timeEDP * 1000)
print("--------------------------------------------")"""
if draw:
Colorizer.colorizeGraph(g_copy.copy(), paths=len(edge_disjoint_paths), file_name=file_name + "-" + version + "-PP-" + version)
#rankings = rankTree(g_copy, s, d, d_incidents, trees_changed)
rankings = rankTree(g_copy, s, d, d_incidents) #causes crash when trees neighbors S as well
hops, routed_paths, tree_order_ranked = routeTrees(g_copy.copy(), s, d, d_incidents, fails, rankings, treeChoice=treeChoice)
if unranked:
hops_unranked, routed_paths_unranked, tree_order_unranked = routeTrees(g_copy.copy(), s, d, d_incidents, fails, rankings, unranked=True)
else:
hops_unranked = -1
if draw:
Colorizer.colorizeGraph(g_copy.copy(), paths=len(edge_disjoint_paths), hops=hops, file_name=file_name + "-PProuted-" + version, routed_paths=routed_paths, sp_length=sp_length)
return hops, sp_length, overallNodeAdditions, hops_unranked, g_copy, routed_paths, rankings, tree_order_ranked, timeEDP * 1000, overallTime
#makes as one tree as large as possible from graph g
def makeOneTree(g_copy, edge_disjoint_paths, reverse=False):
if reverse:
edge_disjoint_paths.reverse()
no_tree = len(edge_disjoint_paths)
else:
no_tree = 1
for path in edge_disjoint_paths:
nodes_added = 0
for i in range(1, len(path) - 1):
nodes = [path[i]] # obtain a list with the nodes of the i-th path
it = 0
while (it < len(nodes)):
# obtain a list with all the incident edges of nodes from the i-th path
list_of_incident_edges = list(g_copy.edges(nodes[it]))
# obtain a generator of the previous edges, which provides edges with the '0' attribute
# (meaning that they are not used by any structure yet)
edge_candidates_gen = (edge for edge in list_of_incident_edges if
g_copy.get_edge_data(edge[0], edge[1]).get("attr") == "0")
for edge in edge_candidates_gen:
if g_copy.nodes[edge[1]]["attr"] == "0":
g_copy[edge[0]][edge[1]]["attr"] = str(no_tree)
g_copy.nodes[edge[1]]["attr"] = str(no_tree)
nodes.append(edge[1])
nodes_added += 1
# we also give an attribute to the incident edges of the destination node
# however, tree leaves of the tree are considered to be the neighbors of the destination node
if g_copy.nodes[edge[1]]["attr"] == "d":
g_copy[edge[0]][edge[1]]["attr"] = str(no_tree)
it += 1
no_tree = no_tree + 1 if not reverse else no_tree - 1
if DEBUG:
print("added", nodes_added, "nodes to tree", no_tree)
#makes as many trees as possible from graph g
def makeMultipleTrees(g_copy, edge_disjoint_paths):
no_tree = 1
for path in edge_disjoint_paths:
nodes_added = 0
for i in range(1, len(path) - 1):
nodes = [path[i]] # obtain a list with the nodes of the i-th path
it = 0
while (it < len(nodes)):
list_of_incident_edges = list(g_copy.edges(nodes[it]))
edge_candidates_gen = (edge for edge in list_of_incident_edges if
g_copy.get_edge_data(edge[0], edge[1]).get("attr") == "0")
for edge in edge_candidates_gen:
node_candidate_incident_attrs = [g_copy[e[0]][e[1]]["attr"] for e in g_copy.edges(edge[1])]
if str(no_tree) not in node_candidate_incident_attrs and g_copy[edge[0]][edge[1]]["attr"] == "0" and \
g_copy.nodes[edge[1]]["attr"] != "s" and g_copy.nodes[edge[1]]["attr"] != "d":
g_copy[edge[0]][edge[1]]["attr"] = str(no_tree)
g_copy.nodes[edge[1]]["attr"] = str(no_tree)
nodes.append(edge[1])
nodes_added += 1
# we also give an attribute to the incident edges of the destination node
# however, tree leaves of the tree are considered to be the neighbors of the destination node
if g_copy.nodes[edge[1]]["attr"] == "d":
g_copy[edge[0]][edge[1]]["attr"] = str(no_tree)
it += 1
if DEBUG:
print("added", nodes_added, "nodes to tree", no_tree)
no_tree += 1
#route along tree structures
def routeTrees(g, s, d, d_incidents, fails, rankings, unranked=False, treeChoice="shortest"):
hops = 0
trees_attributes = []
for node1,node2,data in g.edges(data=True):
if data['attr'] not in trees_attributes and int(data['attr']) > 0:
trees_attributes.append(data['attr'])
#trees_attributes = [str(el2) for el2 in sorted([int(el1) for el1 in trees_attributes], reverse=True)]
trees_attributes = getTreeOrder(rankings, treeChoice=treeChoice)
routed_paths = []
found = False
for attr in trees_attributes:
if found:
break
T = nx.Graph() # we reconstruct the tree
for node1,node2,data in g.edges(data=True):
if data['attr'] == attr:
T.add_edge(node1,node2)
if s not in list(T.nodes):
continue
dfs_edge_order_list = list(nx.dfs_labeled_edges(T, s))
for n1,n2,label in dfs_edge_order_list:
if label == "nontree" or n1 == n2: # we also remove self-loops
dfs_edge_order_list.remove((n1,n2,label))
hops_current_tree = 0
if not unranked and attr in rankings:
dfs_edge_order_list = rankDfs(T, s, d, dfs_edge_order_list, rankings[attr])
final_dfs = removeFails(dfs_edge_order_list, fails)
for n1,n2,label in final_dfs:
routed_paths.append((n1,n2))
hops_current_tree += 1
if n1 in d_incidents and (str(n1),str(d)) not in fails and (str(d),str(n1)) not in fails and (int(n1),int(d)) not in fails and (int(d),int(n1)) not in fails:
routed_paths.append((n1, d))
hops_current_tree += 1
found = True
break
elif n2 in d_incidents and (str(n2), str(d)) not in fails and (str(d), str(n2)) not in fails and (int(n2), int(d)) not in fails and (int(d), int(n2)) not in fails:
routed_paths.append((n2, d))
hops_current_tree += 1
found = True
break
hops += hops_current_tree
if DEBUG:
if unranked:
print("UNRANKED: TOOK", hops, "HOPS")
else:
print("RANKED: TOOK", hops, "HOPS")
return hops if found else -1, routed_paths, trees_attributes
#prune created tree branches that do not lead to d
def postProcessTree(g, s, d_incidents, s_incidents, edge_disjoint_paths, version=None, tree_attr=None):
# we will test if we can still reach the destination node; for this we will analyze each subgraph (tree) until we will find the destination node
trees_attributes = []
trees_changed = set()
if tree_attr is None:
for node1, node2, data in g.edges(data=True):
if data['attr'] not in trees_attributes:
if int(data['attr']) > 0:
trees_attributes.append(data['attr'])
else:
trees_attributes.append(tree_attr)
overallNodeAdditions = 0
for attr in trees_attributes:
T = nx.Graph() # we reconstruct the tree
for node1, node2, data in g.edges(data=True):
if data['attr'] == attr:
T.add_edge(node1, node2)
if s in list(T.nodes):
dfs_edge_order_list = list(nx.dfs_labeled_edges(T, s))
for n1,n2,label in dfs_edge_order_list:
if label == "nontree" or n1 == n2: # we also remove self-loops
dfs_edge_order_list.remove((n1,n2,label))
#edges_to_remove.add((node1, node2))
#print(dfs_edge_order_list)
good_branch_nodes = set()
visited_nodes = set()
visited_nodes.add(dfs_edge_order_list[0][0])
delete_mode = False
for i in range(len(dfs_edge_order_list)):
n1,n2,label = dfs_edge_order_list[i]
if label == "forward":
visited_nodes.add(n2)
elif label == "reverse":
visited_nodes.remove(n2)
if label == "forward" or n2 in good_branch_nodes:
delete_mode = False
if delete_mode:
if DEBUG:
print("edge {},{} set to 0".format(n1, n2))
g[n1][n2]["attr"] = "0"
g.nodes[n2]["attr"] = "0"
if i < len(dfs_edge_order_list) - 1:
n1_next, n2_next, label_next = dfs_edge_order_list[i+1]
if label == "forward" and label_next == "reverse" and str(n2) not in d_incidents and int(n2) not in d_incidents: #and n2 not in s_incidents:
delete_mode = True
elif str(n2) in d_incidents or int(n2) in d_incidents: #or n2 in s_incidents:
[good_branch_nodes.add(el) for el in visited_nodes]
if edge_disjoint_paths is not None:
cnt = 0
for path in edge_disjoint_paths:
if n2 not in path:
cnt += 1
else:
break
if cnt == len(edge_disjoint_paths):
overallNodeAdditions += 1
trees_changed.add(attr)
if DEBUG:
print("found node", n2, "that was not originally present in edge disjoint paths")
return trees_changed, | |
P HAVE OTH DIAG TESTEXAM", max_length=2)
SURGPROC = models.CharField("WAS SURG PROC PERFORMED ON P THIS VISIT", max_length=2)
MEDPRESC = models.CharField("ANY MEDICINE PRESCRIBED FOR P THIS VISIT", max_length=2)
OPCCC1X = models.CharField("MODIFIED CLINICAL CLASSIFICATION CODE", max_length=3)
OPCCC2X = models.CharField("MODIFIED CLINICAL CLASSIFICATION CODE", max_length=3)
OPCCC3X = models.CharField("MODIFIED CLINICAL CLASSIFICATION CODE", max_length=3)
OPCCC4X = models.CharField("MODIFIED CLINICAL CLASSIFICATION CODE", max_length=3)
FFOPTYPE = models.CharField("FLAT FEE BUNDLE", max_length=2)
FFBEF14 = models.CharField("TOTAL # OF VISITS IN FF BEFORE 2014", max_length=2)
FFTOT15 = models.CharField("TOTAL # OF VISITS IN FF AFTER 2014", max_length=2)
OPXP14X = models.CharField("TOT EXP FOR EVENT (OPFXP14X + OPDXP14X)", max_length=8)
OPTC14X = models.CharField("TOTAL CHG FOR EVENT (OPFTC14X+OPDTC14X)", max_length=9)
OPFSF14X = models.CharField("FACILITY AMT PD, FAMILY (IMPUTED)", max_length=8)
OPFMR14X = models.CharField("FACILITY AMT PD, MEDICARE (IMPUTED)", max_length=8)
OPFMD14X = models.CharField("FACILITY AMT PD, MEDICAID (IMPUTED)", max_length=8)
OPFPV14X = models.CharField("FACILITY AMT PD, PRIV INSUR (IMPUTED)", max_length=8)
OPFVA14X = models.CharField("FAC AMT PD,VETERANSCHAMPVA(IMPUTED)", max_length=8)
OPFTR14X = models.CharField("FACILITY AMT PD,TRICARE(IMPUTED)", max_length=7)
OPFOF14X = models.CharField("FACILITY AMT PD, OTH FEDERAL (IMPUTED)", max_length=7)
OPFSL14X = models.CharField("FACILITY AMT PD, STATELOC GOV (IMPUTED)", max_length=7)
OPFWC14X = models.CharField("FACILITY AMT PD, WORKERS COMP (IMPUTED)", max_length=8)
OPFOR14X = models.CharField("FACILITY AMT PD, OTH PRIV (IMPUTED)", max_length=8)
OPFOU14X = models.CharField("FACILITY AMT PD, OTH PUB (IMPUTED)", max_length=8)
OPFOT14X = models.CharField("FACILITY AMT PD, OTH INSUR (IMPUTED)", max_length=8)
OPFXP14X = models.CharField("FACILITY SUM PAYMENTS OPFSF14X-OPFOT14X", max_length=8)
OPFTC14X = models.CharField("TOTAL FACILITY CHARGE (IMPUTED)", max_length=9)
OPDSF14X = models.CharField("DOCTOR AMOUNT PAID, FAMILY (IMPUTED)", max_length=7)
OPDMR14X = models.CharField("DOCTOR AMOUNT PAID, MEDICARE (IMPUTED)", max_length=8)
OPDMD14X = models.CharField("DOCTOR AMOUNT PAID, MEDICAID (IMPUTED)", max_length=7)
OPDPV14X = models.CharField("DOCTOR AMT PD, PRIVATE INSUR (IMPUTED)", max_length=8)
OPDVA14X = models.CharField("DR AMT PD,VETERANSCHAMPVA(IMPUTED)", max_length=7)
OPDTR14X = models.CharField("DOCTOR AMT PD,TRICARE(IMPUTED)", max_length=7)
OPDOF14X = models.CharField("DOCTOR AMT PAID, OTH FEDERAL (IMPUTED)", max_length=5)
OPDSL14X = models.CharField("DOCTOR AMT PD, STATELOC GOV (IMPUTED)", max_length=6)
OPDWC14X = models.CharField("DOCTOR AMOUNT PD, WORKERS COMP (IMPUTED)", max_length=7)
OPDOR14X = models.CharField("DOCTOR AMT PD, OTH PRIVATE (IMPUTED)", max_length=7)
OPDOU14X = models.CharField("DOCTOR AMT PD, OTH PUBLIC (IMPUTED)", max_length=7)
OPDOT14X = models.CharField("DOCTOR AMT PAID, OTH INSUR (IMPUTED)", max_length=7)
OPDXP14X = models.CharField("DOCTOR SUM PAYMENTS OPDSF14X-OPDOT14X", max_length=8)
OPDTC14X = models.CharField("TOTAL DOCTOR CHARGE (IMPUTED)", max_length=8)
IMPFLAG = models.CharField("IMPUTATION STATUS", max_length=1)
PERWT14F = models.CharField("EXPENDITURE FILE PERSON WEIGHT, 2014", max_length=12)
VARSTR = models.CharField("VARIANCE ESTIMATION STRATUM, 2014", max_length=4)
VARPSU = models.CharField("VARIANCE ESTIMATION PSU, 2014", max_length=1)
# Methods
def __str__(self):
"""String for representing a OutpatientVisits14 object"""
return f"{self.DUPERSID}"
class OutpatientVisits13(models.Model):
""" Defines the OutpatientVisits Model for 2013, derived from the model class. """
# Metadata
class Meta:
""" Set parameters for admin app"""
ordering = ["DUPERSID"]
verbose_name_plural = "OutpatientVisits13"
DUID = models.CharField("DWELLING UNIT ID", max_length=5)
PID = models.CharField("PERSON NUMBER", max_length=3)
DUPERSID = models.CharField("PERSON ID (DUID + PID)", max_length=8)
EVNTIDX = models.CharField("EVENT ID", max_length=12)
EVENTRN = models.CharField("EVENT ROUND NUMBER", max_length=1)
FFEEIDX = models.CharField("FLAT FEE ID", max_length=12)
PANEL = models.CharField("PANEL NUMBER", max_length=2)
MPCDATA = models.CharField("MPC DATA FLAG", max_length=1)
OPDATEYR = models.CharField("EVENT DATE - YEAR", max_length=4)
OPDATEMM = models.CharField("EVENT DATE - MONTH", max_length=2)
SEETLKPV = models.CharField("DID P VISIT PROV IN PERSON OR TELEPHONE", max_length=2)
SEEDOC = models.CharField("DID P TALK TO MD THIS VISITPHONE CALL", max_length=2)
DRSPLTY = models.CharField("OPAT DOCTOR S SPECIALTY", max_length=2)
MEDPTYPE = models.CharField("TYPE OF MED PERSON P TALKED TO ON VST DT", max_length=2)
VSTCTGRY = models.CharField("BEST CATEGORY FOR CARE P RECV ON VST DT", max_length=2)
VSTRELCN = models.CharField("THIS VSTPHONE CALL RELATED TO SPEC COND", max_length=2)
LABTEST = models.CharField("THIS VISIT DID P HAVE LAB TESTS", max_length=2)
SONOGRAM = models.CharField("THIS VISIT DID P HAVE SONOGRAM OR ULTRSD", max_length=2)
XRAYS = models.CharField("THIS VISIT DID P HAVE X-RAYS", max_length=2)
MAMMOG = models.CharField("THIS VISIT DID P HAVE A MAMMOGRAM", max_length=2)
MRI = models.CharField("THIS VISIT DID P HAVE AN MRICATSCAN", max_length=2)
EKG = models.CharField("THIS VISIT DID P HAVE AN EKG OR ECG", max_length=2)
EEG = models.CharField("THIS VISIT DID P HAVE AN EEG", max_length=2)
RCVVAC = models.CharField("THIS VISIT DID P RECEIVE A VACCINATION", max_length=2)
ANESTH = models.CharField("THIS VISIT DID P RECEIVE ANESTHESIA", max_length=2)
THRTSWAB = models.CharField("THIS VISIT DID P HAVE A THROAT SWAB", max_length=2)
OTHSVCE = models.CharField("THIS VISIT DID P HAVE OTH DIAG TESTEXAM", max_length=2)
SURGPROC = models.CharField("WAS SURG PROC PERFORMED ON P THIS VISIT", max_length=2)
MEDPRESC = models.CharField("ANY MEDICINE PRESCRIBED FOR P THIS VISIT", max_length=2)
OPCCC1X = models.CharField("MODIFIED CLINICAL CLASSIFICATION CODE", max_length=3)
OPCCC2X = models.CharField("MODIFIED CLINICAL CLASSIFICATION CODE", max_length=3)
OPCCC3X = models.CharField("MODIFIED CLINICAL CLASSIFICATION CODE", max_length=3)
OPCCC4X = models.CharField("MODIFIED CLINICAL CLASSIFICATION CODE", max_length=3)
FFOPTYPE = models.CharField("FLAT FEE BUNDLE", max_length=2)
FFTOT14 = models.CharField("TOTAL # OF VISITS IN FF AFTER 2013", max_length=2)
OPXP13X = models.CharField("TOT EXP FOR EVENT (OPFXP13X + OPDXP13X)", max_length=8)
OPTC13X = models.CharField("TOTAL CHG FOR EVENT (OPFTC13X+OPDTC13X)", max_length=9)
OPFSF13X = models.CharField("FACILITY AMT PD, FAMILY (IMPUTED)", max_length=7)
OPFMR13X = models.CharField("FACILITY AMT PD, MEDICARE (IMPUTED)", max_length=8)
OPFMD13X = models.CharField("FACILITY AMT PD, MEDICAID (IMPUTED)", max_length=8)
OPFPV13X = models.CharField("FACILITY AMT PD, PRIV INSUR (IMPUTED)", max_length=8)
OPFVA13X = models.CharField("FAC AMT PD,VETERANSCHAMPVA(IMPUTED)", max_length=8)
OPFTR13X = models.CharField("FACILITY AMT PD,TRICARE(IMPUTED)", max_length=7)
OPFOF13X = models.CharField("FACILITY AMT PD, OTH FEDERAL (IMPUTED)", max_length=7)
OPFSL13X = models.CharField("FACILITY AMT PD, STATELOC GOV (IMPUTED)", max_length=8)
OPFWC13X = models.CharField("FACILITY AMT PD, WORKERS COMP (IMPUTED)", max_length=8)
OPFOR13X = models.CharField("FACILITY AMT PD, OTH PRIV (IMPUTED)", max_length=8)
OPFOU13X = models.CharField("FACILITY AMT PD, OTH PUB (IMPUTED)", max_length=7)
OPFOT13X = models.CharField("FACILITY AMT PD, OTH INSUR (IMPUTED)", max_length=8)
OPFXP13X = models.CharField("FACILITY SUM PAYMENTS OPFSF13X-OPFOT13X", max_length=8)
OPFTC13X = models.CharField("TOTAL FACILITY CHARGE (IMPUTED)", max_length=9)
OPDSF13X = models.CharField("DOCTOR AMOUNT PAID, FAMILY (IMPUTED)", max_length=7)
OPDMR13X = models.CharField("DOCTOR AMOUNT PAID, MEDICARE (IMPUTED)", max_length=7)
OPDMD13X = models.CharField("DOCTOR AMOUNT PAID, MEDICAID (IMPUTED)", max_length=7)
OPDPV13X = models.CharField("DOCTOR AMT PD, PRIVATE INSUR (IMPUTED)", max_length=8)
OPDVA13X = models.CharField("DR AMT PD,VETERANSCHAMPVA(IMPUTED)", max_length=7)
OPDTR13X = models.CharField("DOCTOR AMT PD,TRICARE(IMPUTED)", max_length=7)
OPDOF13X = models.CharField("DOCTOR AMT PAID, OTH FEDERAL (IMPUTED)", max_length=5)
OPDSL13X = models.CharField("DOCTOR AMT PD, STATELOC GOV (IMPUTED)", max_length=7)
OPDWC13X = models.CharField("DOCTOR AMOUNT PD, WORKERS COMP (IMPUTED)", max_length=7)
OPDOR13X = models.CharField("DOCTOR AMT PD, OTH PRIVATE (IMPUTED)", max_length=7)
OPDOU13X = models.CharField("DOCTOR AMT PD, OTH PUBLIC (IMPUTED)", max_length=6)
OPDOT13X = models.CharField("DOCTOR AMT PAID, OTH INSUR (IMPUTED)", max_length=7)
OPDXP13X = models.CharField("DOCTOR SUM PAYMENTS OPDSF13X-OPDOT13X", max_length=8)
OPDTC13X = models.CharField("TOTAL DOCTOR CHARGE (IMPUTED)", max_length=8)
IMPFLAG = models.CharField("IMPUTATION STATUS", max_length=1)
PERWT13F = models.CharField("EXPENDITURE FILE PERSON WEIGHT, 2013", max_length=12)
VARSTR = models.CharField("VARIANCE ESTIMATION STRATUM, 2013", max_length=4)
VARPSU = models.CharField("VARIANCE ESTIMATION PSU, 2013", max_length=1)
# Methods
def __str__(self):
"""String for representing a OutpatientVisits13 object"""
return f"{self.DUPERSID}"
class OutpatientVisits12(models.Model):
""" Defines the OutpatientVisits Model for 2012, derived from the model class. """
# Metadata
class Meta:
""" Set parameters for admin app"""
ordering = ["DUPERSID"]
verbose_name_plural = "OutpatientVisits12"
DUID = models.CharField("DWELLING UNIT ID", max_length=5)
PID = models.CharField("PERSON NUMBER", max_length=3)
DUPERSID = models.CharField("PERSON ID (DUID + PID)", max_length=8)
EVNTIDX = models.CharField("EVENT ID", max_length=12)
EVENTRN = models.CharField("EVENT ROUND NUMBER", max_length=1)
FFEEIDX = models.CharField("FLAT FEE ID", max_length=12)
PANEL = models.CharField("PANEL NUMBER", max_length=2)
MPCDATA = models.CharField("MPC DATA FLAG", max_length=1)
OPDATEYR = models.CharField("EVENT DATE - YEAR", max_length=4)
OPDATEMM = models.CharField("EVENT DATE - MONTH", max_length=2)
OPDATEDD = models.CharField("EVENT DATE - DAY", max_length=2)
SEETLKPV = models.CharField("DID P VISIT PROV IN PERSON OR TELEPHONE", max_length=2)
SEEDOC = models.CharField("DID P TALK TO MD THIS VISITPHONE CALL", max_length=2)
DRSPLTY = models.CharField("OPAT DOCTOR S SPECIALTY", max_length=2)
MEDPTYPE = models.CharField("TYPE OF MED PERSON P TALKED TO ON VST DT", max_length=2)
VSTCTGRY = models.CharField("BEST CATEGORY FOR CARE P RECV ON VST DT", max_length=2)
VSTRELCN = models.CharField("THIS VSTPHONE CALL RELATED TO SPEC COND", max_length=2)
PHYSTH = models.CharField("THIS VISIT DID P HAVE PHYSICAL THERAPY", max_length=2)
OCCUPTH = models.CharField("THIS VIS DID P HAVE OCCUPATIONAL THERAPY", max_length=2)
SPEECHTH = models.CharField("THIS VISIT DID P HAVE SPEECH THERAPY", max_length=2)
CHEMOTH = models.CharField("THIS VISIT DID P HAVE CHEMOTHERAPY", max_length=2)
RADIATTH = models.CharField("THIS VISIT DID P HAVE RADIATION THERAPY", max_length=2)
KIDNEYD = models.CharField("THIS VISIT DID P HAVE KIDNEY DIALYSIS", max_length=2)
IVTHER = models.CharField("THIS VISIT DID P HAVE IV THERAPY", max_length=2)
DRUGTRT = models.CharField("THIS VIS DID P HAVE TRT FOR DRUGALCOHOL", max_length=2)
RCVSHOT = models.CharField("THIS VISIT DID P RECEIVE AN ALLERGY SHOT", max_length=2)
PSYCHOTH = models.CharField("DID P HAVE PSYCHOTHERAPYCOUNSELING", max_length=2)
OTHSHOT = models.CharField("THIS VISIT DID P HAVE OTHER SHOT", max_length=2)
LABTEST = models.CharField("THIS VISIT DID P HAVE LAB TESTS", max_length=2)
SONOGRAM = models.CharField("THIS VISIT DID P HAVE SONOGRAM OR ULTRSD", max_length=2)
XRAYS = models.CharField("THIS VISIT DID P | |
# Trezor interaction script
from ..hwwclient import HardwareWalletClient
from ..errors import ActionCanceledError, BadArgumentError, DeviceAlreadyInitError, DeviceAlreadyUnlockedError, DeviceConnectionError, DEVICE_NOT_INITIALIZED, DeviceNotReadyError, UnavailableActionError, common_err_msgs, handle_errors
from .trezorlib.client import TrezorClient as Trezor
from .trezorlib.debuglink import TrezorClientDebugLink
from .trezorlib.exceptions import Cancelled
from .trezorlib.transport import enumerate_devices, get_transport
from .trezorlib.ui import echo, PassphraseUI, mnemonic_words, PIN_CURRENT, PIN_NEW, PIN_CONFIRM, PIN_MATRIX_DESCRIPTION, prompt
from .trezorlib import tools, syscoin, device
from .trezorlib import messages as proto
from ..base58 import get_xpub_fingerprint, to_address, xpub_main_2_test, get_xpub_fingerprint_hex
from ..serializations import CTxOut, ser_uint256
from .. import bech32
from usb1 import USBErrorNoDevice
from types import MethodType
import base64
import logging
import sys
py_enumerate = enumerate # Need to use the enumerate built-in but there's another function already named that
# Only handles up to 15 of 15
def parse_multisig(script):
# Get m
m = script[0] - 80
if m < 1 or m > 15:
return (False, None)
# Get pubkeys and build HDNodePathType
pubkeys = []
offset = 1
while True:
pubkey_len = script[offset]
if pubkey_len != 33:
break
offset += 1
key = script[offset:offset + 33]
offset += 33
hd_node = proto.HDNodeType(depth=0, fingerprint=0, child_num=0, chain_code=b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00', public_key=key)
pubkeys.append(proto.HDNodePathType(node=hd_node, address_n=[]))
# Check things at the end
n = script[offset] - 80
if n != len(pubkeys):
return (False, None)
offset += 1
op_cms = script[offset]
if op_cms != 174:
return (False, None)
# Build MultisigRedeemScriptType and return it
multisig = proto.MultisigRedeemScriptType(m=m, signatures=[b''] * n, pubkeys=pubkeys)
return (True, multisig)
def trezor_exception(f):
def func(*args, **kwargs):
try:
return f(*args, **kwargs)
except ValueError as e:
raise BadArgumentError(str(e))
except Cancelled:
raise ActionCanceledError('{} canceled'.format(f.__name__))
except USBErrorNoDevice:
raise DeviceConnectionError('Device disconnected')
return func
def interactive_get_pin(self, code=None):
if code == PIN_CURRENT:
desc = "current PIN"
elif code == PIN_NEW:
desc = "new PIN"
elif code == PIN_CONFIRM:
desc = "new PIN again"
else:
desc = "PIN"
echo(PIN_MATRIX_DESCRIPTION)
while True:
pin = prompt("Please enter {}".format(desc), hide_input=True)
if not pin.isdigit():
echo("Non-numerical PIN provided, please try again")
else:
return pin
# This class extends the HardwareWalletClient for Trezor specific things
class TrezorClient(HardwareWalletClient):
def __init__(self, path, password=''):
super(TrezorClient, self).__init__(path, password)
self.simulator = False
if path.startswith('udp'):
logging.debug('Simulator found, using DebugLink')
transport = get_transport(path)
self.client = TrezorClientDebugLink(transport=transport)
self.simulator = True
self.client.set_passphrase(password)
else:
self.client = Trezor(transport=get_transport(path), ui=PassphraseUI(password))
# if it wasn't able to find a client, throw an error
if not self.client:
raise IOError("no Device")
self.password = password
self.type = 'Trezor'
def _check_unlocked(self):
self.client.init_device()
if self.client.features.pin_protection and not self.client.features.pin_cached:
raise DeviceNotReadyError('{} is locked. Unlock by using \'promptpin\' and then \'sendpin\'.'.format(self.type))
# Must return a dict with the xpub
# Retrieves the public key at the specified BIP 32 derivation path
@trezor_exception
def get_pubkey_at_path(self, path):
self._check_unlocked()
try:
expanded_path = tools.parse_path(path)
except ValueError as e:
raise BadArgumentError(str(e))
output = syscoin.get_public_node(self.client, expanded_path)
if self.is_testnet:
return {'xpub': xpub_main_2_test(output.xpub)}
else:
return {'xpub': output.xpub}
# Must return a hex string with the signed transaction
# The tx must be in the psbt format
@trezor_exception
def sign_tx(self, tx):
self._check_unlocked()
# Get this devices master key fingerprint
master_key = syscoin.get_public_node(self.client, [0])
master_fp = get_xpub_fingerprint(master_key.xpub)
# Do multiple passes for multisig
passes = 1
p = 0
while p < passes:
# Prepare inputs
inputs = []
to_ignore = [] # Note down which inputs whose signatures we're going to ignore
for input_num, (psbt_in, txin) in py_enumerate(list(zip(tx.inputs, tx.tx.vin))):
txinputtype = proto.TxInputType()
# Set the input stuff
txinputtype.prev_hash = ser_uint256(txin.prevout.hash)[::-1]
txinputtype.prev_index = txin.prevout.n
txinputtype.sequence = txin.nSequence
# Detrermine spend type
scriptcode = b''
if psbt_in.non_witness_utxo:
utxo = psbt_in.non_witness_utxo.vout[txin.prevout.n]
txinputtype.script_type = proto.InputScriptType.SPENDADDRESS
scriptcode = utxo.scriptPubKey
txinputtype.amount = psbt_in.non_witness_utxo.vout[txin.prevout.n].nValue
elif psbt_in.witness_utxo:
utxo = psbt_in.witness_utxo
# Check if the output is p2sh
if psbt_in.witness_utxo.is_p2sh():
txinputtype.script_type = proto.InputScriptType.SPENDP2SHWITNESS
else:
txinputtype.script_type = proto.InputScriptType.SPENDWITNESS
scriptcode = psbt_in.witness_utxo.scriptPubKey
txinputtype.amount = psbt_in.witness_utxo.nValue
# Set the script
if psbt_in.witness_script:
scriptcode = psbt_in.witness_script
elif psbt_in.redeem_script:
scriptcode = psbt_in.redeem_script
def ignore_input():
txinputtype.address_n = [0x80000000]
txinputtype.multisig = None
txinputtype.script_type = proto.InputScriptType.SPENDWITNESS
inputs.append(txinputtype)
to_ignore.append(input_num)
# Check for multisig
is_ms, multisig = parse_multisig(scriptcode)
if is_ms:
# Add to txinputtype
txinputtype.multisig = multisig
if psbt_in.non_witness_utxo:
if utxo.is_p2sh:
txinputtype.script_type = proto.InputScriptType.SPENDMULTISIG
else:
# Cannot sign bare multisig, ignore it
ignore_input()
continue
elif not is_ms and psbt_in.non_witness_utxo and not utxo.is_p2pkh:
# Cannot sign unknown spk, ignore it
ignore_input()
continue
elif not is_ms and psbt_in.witness_utxo and psbt_in.witness_script:
# Cannot sign unknown witness script, ignore it
ignore_input()
continue
# Find key to sign with
found = False
our_keys = 0
for key in psbt_in.hd_keypaths.keys():
keypath = psbt_in.hd_keypaths[key]
if keypath[0] == master_fp and key not in psbt_in.partial_sigs:
if not found:
txinputtype.address_n = keypath[1:]
found = True
our_keys += 1
# Determine if we need to do more passes to sign everything
if our_keys > passes:
passes = our_keys
if not found:
# This input is not one of ours
ignore_input()
continue
# append to inputs
inputs.append(txinputtype)
# address version byte
if self.is_testnet:
p2pkh_version = b'\x41'
p2sh_version = b'\xc4'
bech32_hrp = 'tsys'
else:
p2pkh_version = b'\x3F'
p2sh_version = b'\x05'
bech32_hrp = 'sys'
# prepare outputs
outputs = []
for i, out in py_enumerate(tx.tx.vout):
txoutput = proto.TxOutputType()
txoutput.amount = out.nValue
txoutput.script_type = proto.OutputScriptType.PAYTOADDRESS
if out.is_p2pkh():
txoutput.address = to_address(out.scriptPubKey[3:23], p2pkh_version)
elif out.is_p2sh():
txoutput.address = to_address(out.scriptPubKey[2:22], p2sh_version)
else:
wit, ver, prog = out.is_witness()
if wit:
txoutput.address = bech32.encode(bech32_hrp, ver, prog)
else:
raise BadArgumentError("Output is not an address")
# Add the derivation path for change, but only if there is exactly one derivation path
psbt_out = tx.outputs[i]
if len(psbt_out.hd_keypaths) == 1:
_, keypath = next(iter(psbt_out.hd_keypaths.items()))
if keypath[0] == master_fp:
wit, ver, prog = out.is_witness()
if out.is_p2pkh():
txoutput.address_n = keypath[1:]
txoutput.address = None
elif wit:
txoutput.script_type = proto.OutputScriptType.PAYTOWITNESS
txoutput.address_n = keypath[1:]
txoutput.address = None
elif out.is_p2sh() and psbt_out.redeem_script:
wit, ver, prog = CTxOut(0, psbt_out.redeem_script).is_witness()
if wit and len(prog) == 20:
txoutput.script_type = proto.OutputScriptType.PAYTOP2SHWITNESS
txoutput.address_n = keypath[1:]
txoutput.address = None
# append to outputs
outputs.append(txoutput)
# Prepare prev txs
prevtxs = {}
for psbt_in in tx.inputs:
if psbt_in.non_witness_utxo:
prev = psbt_in.non_witness_utxo
t = proto.TransactionType()
t.version = prev.nVersion
t.lock_time = prev.nLockTime
for vin in prev.vin:
i = proto.TxInputType()
i.prev_hash = ser_uint256(vin.prevout.hash)[::-1]
i.prev_index = vin.prevout.n
i.script_sig = vin.scriptSig
i.sequence = vin.nSequence
t.inputs.append(i)
for vout in prev.vout:
o = proto.TxOutputBinType()
o.amount = vout.nValue
o.script_pubkey = vout.scriptPubKey
t.bin_outputs.append(o)
logging.debug(psbt_in.non_witness_utxo.hash)
prevtxs[ser_uint256(psbt_in.non_witness_utxo.sha256)[::-1]] = t
# Sign the transaction
tx_details = proto.SignTx()
tx_details.version = tx.tx.nVersion
tx_details.lock_time = tx.tx.nLockTime
if self.is_testnet:
signed_tx = syscoin.sign_tx(self.client, "Testnet", inputs, outputs, tx_details, prevtxs)
else:
signed_tx = syscoin.sign_tx(self.client, "Syscoin", inputs, outputs, tx_details, prevtxs)
# Each input has one signature
for input_num, (psbt_in, sig) in py_enumerate(list(zip(tx.inputs, signed_tx[0]))):
if input_num in to_ignore:
continue
for pubkey in psbt_in.hd_keypaths.keys():
fp = psbt_in.hd_keypaths[pubkey][0]
if fp == master_fp and pubkey not in psbt_in.partial_sigs:
psbt_in.partial_sigs[pubkey] = sig + b'\x01'
break
p += 1
return {'psbt': tx.serialize()}
# Must return a base64 encoded string with the signed message
# The message can be any string
@trezor_exception
def sign_message(self, message, keypath):
self._check_unlocked()
path = tools.parse_path(keypath)
result = syscoin.sign_message(self.client, 'Syscoin', path, message)
return {'signature': base64.b64encode(result.signature).decode('utf-8')}
# Display address of specified type on the device. Only supports single-key based addresses.
@trezor_exception
def display_address(self, keypath, p2sh_p2wpkh, bech32):
self._check_unlocked()
expanded_path = tools.parse_path(keypath)
address = syscoin.get_address(
self.client,
"Testnet" if self.is_testnet else "Syscoin",
expanded_path,
show_display=True,
script_type=proto.InputScriptType.SPENDWITNESS if bech32 else (proto.InputScriptType.SPENDP2SHWITNESS if p2sh_p2wpkh else proto.InputScriptType.SPENDADDRESS)
)
return {'address': address}
# Setup a new device
@trezor_exception
def setup_device(self, label='', passphrase=''):
self.client.init_device()
if not self.simulator:
# Use interactive_get_pin
self.client.ui.get_pin = MethodType(interactive_get_pin, self.client.ui)
if self.client.features.initialized:
raise DeviceAlreadyInitError('Device is already initialized. Use wipe first and try again')
device.reset(self.client, passphrase_protection=bool(self.password))
return {'success': True}
# Wipe this device
@trezor_exception
def wipe_device(self):
self._check_unlocked()
device.wipe(self.client)
return {'success': True}
# Restore device from mnemonic or xprv
@trezor_exception
def restore_device(self, label=''):
self.client.init_device()
if not self.simulator:
# Use interactive_get_pin
self.client.ui.get_pin = MethodType(interactive_get_pin, self.client.ui)
device.recover(self.client, label=label, input_callback=mnemonic_words(), passphrase_protection=bool(self.password))
return {'success': True}
# Begin backup process
def backup_device(self, label='', passphrase=''):
raise UnavailableActionError('The {} does not support creating a backup via software'.format(self.type))
# Close the device
@trezor_exception
def close(self):
self.client.close()
# Prompt for a pin on device
@trezor_exception
def prompt_pin(self):
self.client.open()
self.client.init_device()
if not self.client.features.pin_protection:
raise DeviceAlreadyUnlockedError('This device does not need a PIN')
if self.client.features.pin_cached:
raise DeviceAlreadyUnlockedError('The PIN has already been sent to this device')
print('Use \'sendpin\' to provide the number positions for the PIN as displayed on your device\'s | |
query.message.delete()
try:
prev_message = bot.get_messages(query.from_user.id, int(query.message.message_id) - 1)
if prev_message.text:
print("before contains:", prev_message.text)
if str(prev_message.text).__contains__("Take your audio searching to the speed"):
print("after contains:", prev_message.text)
text = language_handler("welcome", lang_code, query.from_user.first_name)
exception_handler(prev_message.edit_text(text))
# apl[1].edit_message_text(query.from_user.id, int(query.message.message_id)-1, "new text")
print(bot.get_messages(query.from_user.id, int(query.message.message_id) - 1))
except Exception as e:
print("from editing welcome message: ", e)
pass
print(query)
elif query.data in joined_status:
if query.data == "joined":
try:
# user_data = es.get(index="user", id=query.from_user.id)["_source"]
if user_data["lang_code"] == "fa":
user = exception_handler(app.get_chat_member(chromusic_fa_id, query.from_user.id))
else:
user = exception_handler(app.get_chat_member(chromusic_id, query.from_user.id))
es.update("user", id=query.from_user.id, body={
"script":
{
"inline": "ctx._source.role = params.role;"
"ctx._source.limited = params.limited;",
"lang": "painless",
"params": {
"role": "subscriber",
"limited": False
}
}
}, ignore=409)
# text = "ok you're right"
user_data = es.get("user", id=query.from_user.id)["_source"]
text = language_handler("has_joined", user_data["lang_code"], user_data["first_name"])
# text = language_handler("not_joined", user_data["lang_code"], user_data["first_name"])
except:
user_data = es.get("user", id=query.from_user.id)["_source"]
text = language_handler("not_joined", user_data["lang_code"], user_data["first_name"])
# text = "you're not joined :("
exception_handler(bot.answer_callback_query(
query.id,
text=text,
# f"{query.data} language registered for you.\n\nYou can always change it using /lang command",
show_alert=True
))
else:
exception_handler(bot.answer_callback_query(
query.id,
text=""
# f"{query.data} language registered for you.\n\nYou can always change it using /lang command",
# show_alert=True
))
elif str(query.data).__contains__("get_list"):
playlist_id = str(query.data).split(" ")[1]
results_list = es.get(index="playlist", id=playlist_id)["_source"]
back_text = language_handler("back_to_the_bot", lang_code)
results = []
for index, file_id in enumerate(results_list["list"]):
res = es.get(index="audio_files", id=file_id)
# file = app.get_chat(res["_source"]["chat_id"])["username"]
# print(file)
# audio_file = app.get_messages(res["_source"]["chat_id"], res["_source"]["message_id"])
# caption = file_retrieve_handler(audio_file)
# captions.append(caption)
results.append(res)
print("\n\nresults\n", res)
text = language_handler("playlist_result_list_handler", lang_code, results, results_list["title"])
print("text", text)
exception_handler(bot.answer_callback_query(
query.id,
text=''
# f"{query.data} language registered for you.\n\nYou can always change it using /lang command",
# show_alert=True
))
exception_handler(bot.send_message(query.from_user.id, text=text, parse_mode="HTML"))
# exception_handler(
# bot.answer_inline_query(query.id, results=results,
# cache_time=10, switch_pm_text=back_text, switch_pm_parameter="back_to_the_bot"))
elif str(query.data).__contains__("delete"):
print(query)
operation = str(query.data).split(" ")[0]
playlist_id = str(query.data).split(" ")[1]
if operation == "delete":
result = es.get(index="playlist", id=playlist_id)
func = "playlist"
text = language_handler("delete_playlist_validation_text", lang_code, func)
markup_list = language_handler("delete_playlist_validation_keyboard", lang_code, playlist_id, func)
# exception_handler(bot.send_message(chat_id=query.from_user.id,
# text=f"<b>{text}</b>",
# reply_markup=InlineKeyboardMarkup(markup_list),
# parse_mode='HTML'))
exception_handler(query.edit_message_text(
text=f"<b>{text}</b>",
reply_markup=InlineKeyboardMarkup(markup_list),
parse_mode='HTML'))
elif operation == "ydelete":
# try:
playlist_id = str(query.data).split(" ")[1]
try:
file_retrieve_id = str(query.data).split(" ")[2]
# is_audio_file = True
res = es.update(index="playlist", id=playlist_id, body={
"script": {
"source": "if (ctx._source.list.contains(params.file_id)) "
"{ctx._source.list.remove(ctx._source.list.indexOf(params.file_id))} "
"else {ctx.op = 'none'}",
"lang": "painless",
"params": {
"file_id": file_retrieve_id
}
}
}, ignore=409)
text = language_handler("file_deleted_from_playlist", user_data["lang_code"])
exception_handler(query.answer(
text=text,
show_alert=True))
bot.delete_messages(user.id, query.message.message_id)
except:
is_audio_file = False
res = es.delete(index="playlist", id=playlist_id)
text = language_handler("playlist_deleted_text", lang_code)
exception_handler(query.answer(
text=f"{text}",
show_alert=True))
bot.delete_messages(user.id, query.message.message_id)
elif operation == "ndelete":
playlist_files = es.get(index="playlist", id=playlist_id)["_source"]
single_playlist_markup_list = language_handler("single_playlist_markup_list", user_data["lang_code"],
playlist_id)
single_playlist_text = language_handler("single_playlist_text", user_data["lang_code"], playlist_files)
exception_handler(query.edit_message_text(text=single_playlist_text,
reply_markup=InlineKeyboardMarkup(single_playlist_markup_list),
parse_mode='HTML'))
elif operation == "adelete":
results_list = es.get(index="playlist", id=playlist_id)
back_text = language_handler("back_to_the_bot", lang_code)
results = []
for _audio_file_id in results_list["_source"]["list"]:
results.append(es.get(index="audio_files", id=_audio_file_id))
print("result list:", results_list)
print("results:", results)
text = language_handler("delete_audio_file_text", lang_code) # , results,
delete_audio_guide_text = language_handler("delete_audio_guide_text", lang_code)
exception_handler(bot.answer_callback_query(
query.id,
text=delete_audio_guide_text,
# f"{query.data} language registered for you.\n\nYou can always change it using /lang command",
show_alert=True
))
# results_list["_source"]["title"])
da_markup_keyborad = language_handler("delete_audio_murkup_keyboard", lang_code, playlist_id, results)
print("da_markup_keyborad", da_markup_keyborad)
exception_handler(query.edit_message_text(text=text, parse_mode="HTML",
reply_markup=InlineKeyboardMarkup(da_markup_keyborad)))
elif operation == "afdelete":
playlist_id = str(query.data).split(" ")[1]
audio_file_id = str(query.data).split(" ")[2]
_message_id = query.message.message_id
print("got delete query: ", query)
func = "audio_file"
text = language_handler("delete_playlist_validation_text", lang_code, func)
markup_list = language_handler("delete_playlist_validation_keyboard", lang_code, playlist_id, func,
audio_file_id)
# exception_handler(bot.send_message(chat_id=query.from_user.id,
# text=f"<b>{text}</b>",
# reply_markup=InlineKeyboardMarkup(markup_list),
# parse_mode='HTML'))
exception_handler(query.edit_message_text(
text=f"<b>{text}</b>",
reply_markup=InlineKeyboardMarkup(markup_list),
parse_mode='HTML'))
elif str(query.data).__contains__("edit"):
_query = str(query.data).split(" ")[0]
playlist_id = str(query.data).split(" ")[1]
if _query == "editpl":
try:
print(query)
playlist_id = str(query.data).split(" ")[1]
playlist = es.get(index="playlist", id=playlist_id)
print(playlist)
text = language_handler("edit_playlist_text", lang_code, playlist)
markup_list = language_handler("edit_playlist_keyboard", lang_code, playlist_id)
# exception_handler(bot.send_message(chat_id=query.from_user.id,
# text=f"<b>{text}</b>",
# reply_markup=InlineKeyboardMarkup(markup_list),
# parse_mode='HTML'))
exception_handler(query.edit_message_text(
text=f"{text}",
reply_markup=InlineKeyboardMarkup(markup_list),
parse_mode='HTML'))
except Exception as e:
print("exception from edit playlist: ", e)
elif str(query.data).__contains__("showplaylist"):
show_playlist(query, user_data)
elif str(query.data).__contains__("showmyplaylists"):
playlist_id = str(query.data).split(" ")[1]
playlist_files = es.get(index="playlist", id=playlist_id)["_source"]
markup_list = language_handler("playlists_buttons", user_data["lang_code"])
mylists_menu_text = language_handler("mylists_menu_text", user_data["lang_code"])
print(playlist_files)
exception_handler(query.edit_message_text(text=mylists_menu_text,
reply_markup=InlineKeyboardMarkup(markup_list),
parse_mode='HTML'))
elif str(query.data) == "home":
home_markup_keyboard = language_handler("home_markup_keyboard", user_data["lang_code"])
home_keyboard_text = language_handler("home_keyboard_text", user_data["lang_code"])
exception_handler(query.edit_message_text(text=home_keyboard_text,
reply_markup=InlineKeyboardMarkup(home_markup_keyboard),
parse_mode='HTML'))
elif str(query.data) == "help":
help_markup_keyboard = language_handler("help_markup_keyboard", user_data["lang_code"])
help_keyboard_text = language_handler("help_keyboard_text", user_data["lang_code"])
exception_handler(query.edit_message_text(text=help_keyboard_text,
reply_markup=InlineKeyboardMarkup(help_markup_keyboard),
parse_mode='HTML'))
elif str(query.data).__contains__("edit"):
_query = str(query.data).split(" ")[0]
playlist_id = str(query.data).split(" ")[1]
if _query == "editpl":
try:
print(query)
playlist_id = str(query.data).split(" ")[1]
playlist = es.get(index="playlist", id=playlist_id)
print(playlist)
text = language_handler("edit_playlist_text", lang_code, playlist)
markup_list = language_handler("edit_playlist_keyboard", lang_code, playlist_id)
# exception_handler(bot.send_message(chat_id=query.from_user.id,
# text=f"<b>{text}</b>",
# reply_markup=InlineKeyboardMarkup(markup_list),
# parse_mode='HTML'))
exception_handler(query.edit_message_text(
text=f"{text}",
reply_markup=InlineKeyboardMarkup(markup_list),
parse_mode='HTML'))
except Exception as e:
print("exception from edit playlist: ", e)
elif str(query.data).__contains__("showplaylist"):
show_playlist(query, user_data)
elif str(query.data).__contains__("showmyplaylists"):
# try:
playlist_id = str(query.data).split(" ")[1]
playlist_files = es.get(index="playlist", id=playlist_id)["_source"]
# single_playlist_markup_list = language_handler("single_playlist_markup_list", user_data["lang_code"], playlist_id, query.message.message_id)
# single_playlist_text = language_handler("single_playlist_text", user_data["lang_code"], playlist_files)
markup_list = language_handler("playlists_buttons", user_data["lang_code"])
mylists_menu_text = language_handler("mylists_menu_text", user_data["lang_code"])
print(playlist_files)
# exception_handler(bot.send_message(chat_id=user.id,
# text=mylists_menu_text,
# reply_markup=InlineKeyboardMarkup(markup_list),
# parse_mode='HTML'))
exception_handler(query.edit_message_text(text=mylists_menu_text,
reply_markup=InlineKeyboardMarkup(markup_list),
parse_mode='HTML'))
elif str(query.data) == "home":
home_markup_keyboard = language_handler("home_markup_keyboard", user_data["lang_code"])
home_keyboard_text = language_handler("home_keyboard_text", user_data["lang_code"])
# exception_handler(bot.send_message(chat_id=user.id,
# text=mylists_menu_text,
# reply_markup=InlineKeyboardMarkup(markup_list),
# parse_mode='HTML'))
exception_handler(query.edit_message_text(text=home_keyboard_text,
reply_markup=InlineKeyboardMarkup(home_markup_keyboard),
parse_mode='HTML'))
elif str(query.data) == "help":
help_markup_keyboard = language_handler("help_markup_keyboard", user_data["lang_code"])
help_keyboard_text = language_handler("help_keyboard_text", user_data["lang_code"])
# exception_handler(bot.send_message(chat_id=user.id,
# text=mylists_menu_text,
# reply_markup=InlineKeyboardMarkup(markup_list),
# parse_mode='HTML'))
exception_handler(query.edit_message_text(text=help_keyboard_text,
reply_markup=InlineKeyboardMarkup(help_markup_keyboard),
parse_mode='HTML'))
return True
def show_playlist(query, user_data):
"""
Generates a keyboard for each playlist; buttons:
1. Audio files list (as an inline list)
2. Get list (as a text message)
3. Edit
4. Delete
5. Home
6. Back
:param query: Query containing the "show playlist" data
:param user_data: User data within database
:return: True on success; False otherwise
"""
try:
query.answer(f"Back to the playlist ...")
playlist_id = str(query.data).split(" ")[1]
playlist_files = es.get(index="playlist", id=playlist_id)["_source"]
single_playlist_markup_list = language_handler("single_playlist_markup_list", user_data["lang_code"],
playlist_id)
single_playlist_text = language_handler("single_playlist_text", user_data["lang_code"], playlist_files)
print(playlist_files)
exception_handler(query.edit_message_text(text=single_playlist_text,
reply_markup=InlineKeyboardMarkup(single_playlist_markup_list),
parse_mode='HTML'))
return True
except Exception as e:
print("from showplaylist:", e)
return False
@bot.on_message(Filters.command(["users", "promote", "reset_channel", "index"]))
def users_log(bot, message):
"""
Some useful functionalities and options for the owner/admin of the bot:
1. "users": Generates a summary log of the database status only for the admin/owner of the bot. This is a
static function and not a formal part of the bot (meant just for simplification; otherwise you can use Kibana).
2. "promote": promotes the rank of a channel in the indexer waiting list
3. "reset_channel": Reset the indexing information of a channel in the database
4. "index": Index a channel immediately without waiting in the indexer queue
:param bot: Telegram bot object
:param message: Telegram message object
:return: True on success
"""
user = message.from_user
user_data = es.get(index="user", id=user.id)["_source"]
if user_data["role"] == "owner":
if message.command[0] == "users":
res = es.count(index="audio", body={
"query": {
"match_all": {}
}
})
audio_files = es.count(index="audio_files", body={
"query": {
"match_all": {}
}
})
users_count = es.count(index="user", body={
"query": {
"match_all": {}
}
})
uen = es.count(index="user", body={
"query": {
"match": {
"lang_code": "en"
}
}
})
uhi = es.count(index="user", body={
"query": {
"match": {
"lang_code": "hi"
}
}
})
uru = es.count(index="user", body={
"query": {
"match": {
"lang_code": "ru"
}
}
})
ufa = es.count(index="user", body={
"query": {
"match": {
"lang_code": "fa"
}
}
})
uar = es.count(index="user", body={
"query": {
"match": {
"lang_code": "ar"
}
}
})
channels = es.count(index="channel", body={
"query": {
"match_all": {}
}
})
imp0 = es.count(index="channel", body={
"query": {
"match": {
"importance": 0
}
}
})
imp1 = es.count(index="channel", body={
"query": {
"match": {
"importance": 1
}
}
})
imp2 = es.count(index="channel", body={
"query": {
"match": {
"importance": 2
}
}
})
imp3 = es.count(index="channel", body={
"query": {
"match": {
"importance": 3
}
}
})
imp4 = es.count(index="channel", body={
"query": {
"match": {
"importance": 4
}
}
})
imp5 = es.count(index="channel", body={
"query": {
"match": {
"importance": 5
}
}
})
to_index = es.count(index="to_index", body={
"query": {
"match_all": {}
}
})
future_channel = es.count(index="future_channel", body={
"query": {
"match_all": {}
}
})
channel_buffer = es.count(index="channel_buffer", body={
"query": {
"match_all": {}
}
})
user_lists = helpers.scan(
client=es,
query={"query": {"match_all": {}}},
size=10000,
scroll='2m',
index="user_lists"
)
# print("audio files:", res)
audio_count = res["count"]
audio_files_count = audio_files["count"]
users_count = users_count["count"]
# print("channels:", channels)
channel_count = channels["count"]
imp0_count = imp0["count"]
imp1_count = imp1["count"]
imp2_count = imp2["count"]
imp3_count = imp3["count"]
imp4_count = imp4["count"]
imp5_count = imp5["count"]
uen_count = uen["count"]
uhi_count = uhi["count"]
uru_count = uru["count"]
ufa_count = ufa["count"]
uar_count = uar["count"]
# print("to_index:", to_index)
to_index_count = | |
# Licensed to the .NET Foundation under one or more agreements.
# The .NET Foundation licenses this file to you under the MIT license.
# See the LICENSE file in the project root for more information.
from dataclasses import dataclass
from pathlib import Path
from typing import Callable, cast, Dict, Iterable, Mapping, Optional, Sequence, Type
from ..analysis.run_metrics import get_final_youngest_desired_bytes_for_process
from ..analysis.clr import get_clr
from ..commonlib.bench_file import (
BenchFile,
BenchOptions,
Benchmark,
BenchmarkAndName,
Config,
ConfigsVaryBy,
CoreclrAndName,
CoreclrSpecifier,
GCPerfSimArgs,
get_this_machine,
PartialConfigAndName,
SingleTestCombination,
TestConfigContainer,
TestKind,
)
from ..commonlib.get_built import Built, get_built, get_built_tests_dir, get_current_git_commit_hash
from ..commonlib.collection_util import (
combine_mappings,
find_only_matching,
make_mapping,
map_mapping_values,
unique,
)
from ..commonlib.command import Command, CommandFunction, CommandKind, CommandsMapping
from ..commonlib.host_info import HostInfo, read_this_machines_host_info
from ..commonlib.option import non_null, optional_to_iter
from ..commonlib.parse_and_serialize import load_yaml, write_yaml_file
from ..commonlib.type_utils import argument, with_slots
from ..commonlib.util import (
assert_dir_exists,
assert_file_exists,
bytes_to_mb,
mb_to_bytes,
mb_to_gb,
remove_str_end,
try_parse_single_tag_from_xml_document,
walk_files_recursive,
)
from .run_single_test import check_env, run_single_test_temporary, SingleTest
def to_benchmark(
args: GCPerfSimArgs,
min_seconds: Optional[int] = None,
max_seconds: Optional[int] = None,
only_configs: Optional[Sequence[str]] = None,
) -> Benchmark:
return Benchmark(
executable=None,
arguments=args.to_str(),
min_seconds=min_seconds,
max_seconds=max_seconds,
only_configs=only_configs,
)
@with_slots
@dataclass(frozen=True)
class _CoreclrRepositorySpecifier:
"""Like CoreclrSpecifier, but always specifies the repository root and not just CORE_ROOT."""
path: Path
commit_hash: Optional[str] = None
def to_coreclr_specifier(self) -> CoreclrSpecifier:
return CoreclrSpecifier(repo_path=self.path, commit_hash=self.commit_hash)
def _to_coreclr_specifiers(
coreclrs: Mapping[str, _CoreclrRepositorySpecifier]
) -> Mapping[str, CoreclrSpecifier]:
return map_mapping_values(lambda c: c.to_coreclr_specifier(), coreclrs)
@with_slots
@dataclass(frozen=True)
class _ArgsForGenerate:
built: Built
coreclrs: Mapping[str, _CoreclrRepositorySpecifier]
host_info: HostInfo
@property
def coreclr_specifiers(self) -> Mapping[str, CoreclrSpecifier]:
return _to_coreclr_specifiers(self.coreclrs)
@with_slots
@dataclass(frozen=True)
class _CommandLineArgsForGenerate:
path: Path = argument(name_optional=True, doc="Path to write the output benchfile to.")
coreclrs: Path = argument(doc="Path to 'coreclrs.yaml'")
overwrite: bool = argument(
default=False, doc="If true, allow the output path to already exist."
)
def _generate_helper(
args: _CommandLineArgsForGenerate, generator: Callable[[_ArgsForGenerate], BenchFile]
) -> None:
assert (
not args.path.exists() or args.overwrite
), f"{args.path} already exists, did you mean to '--overwrite'?"
coreclrs = _parse_coreclrs(args.coreclrs)
built = get_built(_to_coreclr_specifiers(coreclrs))
content = generator(_ArgsForGenerate(built, coreclrs, read_this_machines_host_info()))
write_yaml_file(args.path, content, overwrite=args.overwrite)
def _parse_coreclrs(path: Path) -> Mapping[str, _CoreclrRepositorySpecifier]:
# https://github.com/python/mypy/issues/4717
t = cast(
Type[Mapping[str, _CoreclrRepositorySpecifier]], Mapping[str, _CoreclrRepositorySpecifier]
)
coreclrs: Mapping[str, _CoreclrRepositorySpecifier] = load_yaml(t, path)
def ensure_has_commit_hash(c: _CoreclrRepositorySpecifier) -> _CoreclrRepositorySpecifier:
path = assert_dir_exists(c.path)
commit_hash = c.commit_hash
if commit_hash is None:
commit_hash = get_current_git_commit_hash(c.path)
return _CoreclrRepositorySpecifier(path, commit_hash)
return {k: ensure_has_commit_hash(v) for k, v in coreclrs.items()}
def _no_extra_args(
cb: Callable[[_ArgsForGenerate], BenchFile]
) -> Callable[[_CommandLineArgsForGenerate], None]:
def f(args: _CommandLineArgsForGenerate) -> None:
_generate_helper(args, cb)
return f
def _generate_benchyaml_for_coreclr_unit_tests(args: _ArgsForGenerate) -> BenchFile:
return BenchFile(
comment=None,
configs_vary_by=None,
coreclrs=args.coreclr_specifiers,
# Have a very generous time range
options=BenchOptions(default_min_seconds=0, default_max_seconds=600),
common_config=Config(complus_gcconcurrent=True),
configs={
"server": Config(complus_gcserver=True),
"workstation": Config(complus_gcserver=False),
},
benchmarks=to_benchmarks_dict(_find_coreclr_unit_tests(args)),
)
def _args_with_defaults(
tc: int, tlgb: float = 0.5, lohar: int = 0, sohsi: int = 0, sohpi: int = 0, lohpi: int = 0
) -> GCPerfSimArgs:
return GCPerfSimArgs(
tc=tc, tagb=500, tlgb=tlgb, lohar=lohar, sohsi=sohsi, sohpi=sohpi, lohpi=lohpi
)
def _gcperfsim_benchmarks() -> Mapping[str, Benchmark]:
perfsim_configs = {
"TC4SOHOnlySohsi0": _args_with_defaults(tc=4),
"TC4SOHOnlySohsi15": _args_with_defaults(tc=4, sohsi=15),
"TC4SOHOnlySohsi30": _args_with_defaults(tc=4, sohsi=30),
"TC8SOHOnly": _args_with_defaults(tc=8, lohar=5, sohsi=30),
"TC4LOH5": _args_with_defaults(tc=4, lohar=5, sohsi=30),
"TC8LOH10": _args_with_defaults(tc=8, lohar=5, sohsi=30),
"TC8LOH20": _args_with_defaults(tc=8, lohar=20, sohsi=30),
"TC8LOH10P10": _args_with_defaults(tc=8, lohar=10, sohsi=30, sohpi=10, lohpi=100),
"TC8LOH10P50": _args_with_defaults(tc=8, lohar=10, sohsi=30, sohpi=50),
"TC8LOH10P100": _args_with_defaults(tc=8, lohar=10, sohsi=30, sohpi=100, lohpi=100),
}
return {k: to_benchmark(v) for k, v in perfsim_configs.items()}
def _generate_benchyaml_for_gcperfsim(args: _ArgsForGenerate) -> BenchFile:
return BenchFile(
comment=None,
configs_vary_by=None,
coreclrs=args.coreclr_specifiers,
options=_default_options(args),
benchmarks=_gcperfsim_benchmarks(),
)
# Sequence of evenly spaced ints in range [lo, hi] with exactly n_elements
def _int_range_with_n_elements(lo: int, hi: int, n_elements: int) -> Sequence[int]:
assert hi > lo and 2 <= n_elements <= ((hi - lo) + 1)
return [round(x) for x in _float_range_with_n_elements(lo, hi, n_elements)]
# both lo and hi are inclusive
def _float_range_with_n_elements(lo: float, hi: float, n_elements: int) -> Sequence[float]:
assert n_elements > 1
assert lo < hi, f"Expected {lo} < {hi}"
step = (hi - lo) / (n_elements - 1)
return [lo + i * step for i in range(n_elements)]
def _float_range_around(v: float) -> Sequence[float]:
N = 4
# 0.25, 0.5, 0.75, 1, 1. 25, 1.5, 1.75, 2
return _float_range_with_n_elements(v / N, v * 2, N * 2)
# return [
# *_float_range_with_n_elements(v / N, v, N),
# *_float_range_with_n_elements(v * (N + 1) / N, v * 2, N),
# ]
def range_inclusive(lo: int, hi: int, step: int = 1) -> Iterable[int]:
assert lo < hi and (hi - lo) % step == 0
return range(lo, hi + 1, step)
@with_slots
@dataclass(frozen=True)
class _NheapsArgs(_CommandLineArgsForGenerate):
# Not really optional, but non-optional fields can't follow optional fields from superclass
live_gb: Optional[int] = argument(default=None, doc="-tlgb value")
tc: Optional[int] = argument(default=None, doc="-tc value")
def _survive_benchmarks() -> Mapping[str, Benchmark]:
return map_mapping_values(
to_benchmark,
{
"nosurvive": _args_with_defaults(tc=8, tlgb=0, sohsi=0),
# tagb is arbitrary
"hisurvive": GCPerfSimArgs(
tc=8, tagb=0, tlgb=0.5, totalMins=1, testKind=TestKind.highSurvival
),
},
)
_TLGB_HIGH_SURVIVE = 400
def _normal_bench_file(
args: _ArgsForGenerate,
common_config: Config,
configs: Mapping[str, Config],
benchmarks: Mapping[str, Benchmark],
comment: Optional[str] = None,
configs_vary_by: Optional[ConfigsVaryBy] = None,
) -> BenchFile:
return BenchFile(
comment=comment,
configs_vary_by=configs_vary_by,
coreclrs=args.coreclr_specifiers,
options=_default_options(args),
common_config=common_config,
configs=configs,
benchmarks=benchmarks,
)
def _survive_bench_file(
args: _ArgsForGenerate, common_config: Config, configs: Mapping[str, Config]
) -> BenchFile:
return _normal_bench_file(args, common_config, configs, _survive_benchmarks())
def _generate_gen0size_nosurvive(args: _ArgsForGenerate) -> BenchFile:
common_config = Config(complus_gcserver=True, complus_gcconcurrent=False)
def get_config(gen0size_bytes: int, heap_count: Optional[int]) -> Config:
return Config(complus_gcgen0size=gen0size_bytes, complus_gcheapcount=heap_count)
min_seconds = 10 # With no survival we don't need a very long test
benchmarks: Mapping[str, Benchmark] = {
f"nosurvive_{n_threads}threads": to_benchmark(
GCPerfSimArgs(tc=n_threads, tlgb=0, sohsi=0, tagb=300), min_seconds=min_seconds
)
for n_threads in (4, args.host_info.n_logical_processors)
}
defaults = {
coreclr: _measure_default_gen0_min_bytes_for_coreclr(args.built, coreclr)
for coreclr in args.coreclrs.keys()
}
sizes = _float_range_with_n_elements(
min(defaults.values()) / 2, max(defaults.values()) * 2, n_elements=16
)
configs = {
f"{bytes_to_mb(gen0size_bytes)}mb_gen0size": get_config(round(gen0size_bytes), heap_count)
for gen0size_bytes in sizes
for heap_count in (None,) # (4, 8)
}
return _normal_bench_file(
args,
common_config,
configs,
benchmarks,
configs_vary_by=ConfigsVaryBy(name="gen0size", default_values=defaults),
)
def _measure_default_gen0_min_bytes_for_coreclr(built: Built, coreclr_name: str) -> int:
# Get this lazily to ensure we only load DLLs after they have been built
# NOTE: FinalYoungestDesired should approach the min gen0 size when there is no survival.
# (With high survival it approaches the max gen0 size.)
# Allocate 50GB with no survival, should be enough to get a good measure without taking forever
benchmark = to_benchmark(GCPerfSimArgs(tc=1, tlgb=0, sohsi=0, tagb=100), min_seconds=10)
coreclr = non_null(built.coreclrs[coreclr_name])
proc = run_single_test_temporary(
get_clr(),
built,
SingleTest(
test=SingleTestCombination(
machine=get_this_machine(),
coreclr=CoreclrAndName(coreclr_name, CoreclrSpecifier(core_root=coreclr.core_root)),
config=PartialConfigAndName(
"a", Config(complus_gcserver=True, complus_gcconcurrent=False)
),
benchmark=BenchmarkAndName("nosurvive", benchmark),
),
coreclr=coreclr,
test_exe=built.gcperfsim_dll,
options=BenchOptions(default_iteration_count=1),
default_env=check_env(),
),
)
return get_final_youngest_desired_bytes_for_process(proc)
def _nheaps_configs_vary_by(args: _ArgsForGenerate) -> ConfigsVaryBy:
df = args.host_info.n_physical_processors
return ConfigsVaryBy(
name="nheaps (workstation=-1)", default_values={k: df for k in args.coreclrs.keys()}
)
def _generate_benchyaml_for_container_nheaps(args: _ArgsForGenerate) -> BenchFile:
common_config = Config(complus_gcserver=True, complus_gcconcurrent=False)
configs = {
str(threads): Config(
complus_threadpool_forcemaxworkerthreads=threads,
container=TestConfigContainer(memory_mb=_TLGB_HIGH_SURVIVE * 2),
)
for threads in range(1, 8 + 1)
}
return BenchFile(
comment=None,
configs_vary_by=_nheaps_configs_vary_by(args),
coreclrs=args.coreclr_specifiers,
options=_default_options(args),
common_config=common_config,
configs=configs,
benchmarks=_survive_benchmarks(),
)
def _get_container_memory_test() -> Benchmark:
return to_benchmark(GCPerfSimArgs(tc=4, tagb=100, tlgb=1, sohsi=0), min_seconds=8)
def _measure_unconstrained_memory_usage_mb(built: Built, coreclr_name: str, n_heaps: int) -> float:
coreclr = non_null(built.coreclrs[coreclr_name])
proc = run_single_test_temporary(
get_clr(),
built,
SingleTest(
test=SingleTestCombination(
machine=get_this_machine(),
coreclr=CoreclrAndName(coreclr_name, CoreclrSpecifier(core_root=coreclr.core_root)),
config=PartialConfigAndName(
"a",
Config(
complus_gcserver=True,
complus_gcconcurrent=False,
complus_gcheapcount=n_heaps,
# no container
),
),
benchmark=BenchmarkAndName("nosurvive", _get_container_memory_test()),
),
coreclr=coreclr,
test_exe=built.gcperfsim_dll,
options=BenchOptions(default_iteration_count=1),
default_env=check_env(),
),
)
res = max(gc.HeapSizeBeforeMB for gc in proc.gcs)
print(f"Used {res}MB (max HeapSizeBeforeMB)")
return res
@with_slots
@dataclass(frozen=True)
class _ContainerMemoryLimitsExtra(_CommandLineArgsForGenerate):
# Not really optional
n_heaps: Optional[int] = argument(default=None, doc="Value for complus_gcheapcount")
def _generate_for_container_memory_limits(cmd_args: _ContainerMemoryLimitsExtra) -> None:
def f(args: _ArgsForGenerate) -> BenchFile:
n_heaps = non_null(cmd_args.n_heaps)
# First, run the test outside of a container and measure the memory usage
defaults = {
coreclr: _measure_unconstrained_memory_usage_mb(args.built, coreclr, n_heaps)
for coreclr in args.coreclrs.keys()
}
# Tests will probably fail in the lower end of this range -- test runner should handle it
mem_limits_mb = _float_range_with_n_elements(
min(defaults.values()) / 4, max(defaults.values()) * 1.2, 10
)
common_config = Config(
complus_gcserver=True, complus_gcconcurrent=False, complus_gcheapcount=n_heaps
)
configs = {
f"mem_limit_{mem_limit_mb}mb": Config(
container=TestConfigContainer(memory_mb=mem_limit_mb)
)
for mem_limit_mb in mem_limits_mb
}
return BenchFile(
comment=None,
configs_vary_by=ConfigsVaryBy(name="container_memory_mb", default_values=defaults),
coreclrs=args.coreclr_specifiers,
options=_default_options(args),
common_config=common_config,
configs=configs,
benchmarks={"nosurvive": _get_container_memory_test()},
)
_generate_helper(cmd_args, f)
def _generate_containers_no_survival_no_live_data(args: _ArgsForGenerate) -> BenchFile:
common_config = Config(
complus_gcserver=True,
complus_gcconcurrent=False,
container=TestConfigContainer(memory_mb=128),
)
n_heapses = (1, 2, 4, 8, 16, 24, 32, 48)
configs = {f"{n_heaps}_heaps": Config(complus_gcheapcount=n_heaps) for n_heaps in n_heapses}
benchmarks = {
"nosurvive": to_benchmark(
GCPerfSimArgs(tc=args.host_info.n_logical_processors, tagb=20, tlgb=0, sohsi=0),
min_seconds=5,
)
}
return BenchFile(
comment=None,
configs_vary_by=ConfigsVaryBy("n_heaps", default_values=None),
coreclrs=args.coreclr_specifiers,
options=_default_options(args),
common_config=common_config,
configs=configs,
benchmarks=benchmarks,
)
def _generate_containers_low_survival_temporary_data(args: _ArgsForGenerate) -> BenchFile:
common_config = Config(
complus_gcserver=True,
complus_gcconcurrent=False,
container=TestConfigContainer(memory_mb=200),
)
n_heapses = (1, 2, 4, 8, 16, 24, 32, 48)
configs = {f"{n_heaps}_heaps": Config(complus_gcheapcount=n_heaps) for n_heaps in n_heapses}
benchmarks = {
"low_survival_temp_data": to_benchmark(
GCPerfSimArgs(
tc=args.host_info.n_logical_processors, tagb=20, tlgb=mb_to_gb(50), sohsi=50
),
min_seconds=5,
)
}
return BenchFile(
comment=None,
configs_vary_by=ConfigsVaryBy("n_heaps", default_values=None),
coreclrs=args.coreclr_specifiers,
options=_default_options(args),
common_config=common_config,
configs=configs,
benchmarks=benchmarks,
)
def _generate_containers_varying_live_mb(args: _ArgsForGenerate) -> BenchFile:
common_config = Config(
complus_gcserver=True,
complus_gcconcurrent=False,
container=TestConfigContainer(memory_mb=200),
)
| |
<reponame>python-happybase/aiohappybase
"""
HappyBase tests.
"""
import gc
import random
import logging
import asyncio as aio
from functools import partial
from typing import AsyncGenerator, Tuple, List
import pytest
from thriftpy2.thrift import TException
from aiohappybase import (
Table,
Connection,
ConnectionPool,
NoConnectionsAvailable,
)
from aiohappybase.pool import current_task # Easiest way to get the right one
from aiohappybase.table import Data
TABLE_PREFIX = b'happybase_tests_tmp'
connection_kwargs = {'table_prefix': TABLE_PREFIX}
@pytest.fixture
async def conn() -> Connection:
async with Connection(**connection_kwargs) as conn:
assert conn is not None
yield conn
@pytest.fixture
async def table(conn: Connection, table_name: bytes) -> Table:
cfs = {
'cf1': {},
'cf2': None,
'cf3': {'max_versions': 1},
}
table = await conn.create_table(table_name, families=cfs)
assert table is not None
yield table
await conn.delete_table(table_name, disable=True)
class TestAPI:
@pytest.mark.asyncio
def test_autoconnect(self):
conn = Connection(**connection_kwargs, autoconnect=True)
assert conn.client._iprot.trans.is_open()
conn.close()
@pytest.mark.asyncio
async def test_double_close(self):
conn = Connection(**connection_kwargs)
await conn.open()
conn.close()
conn.close() # No error on second close
@pytest.mark.asyncio
async def test_no_close_warning(self, caplog):
conn = Connection(**connection_kwargs)
await conn.open()
client = conn.client # Save so we can close later
with caplog.at_level(level=logging.WARNING):
del conn
gc.collect()
assert "was not closed" in caplog.text
client.close()
@pytest.mark.asyncio
def test_connection_invalid_table_prefix(self):
with pytest.raises(TypeError):
Connection(table_prefix=1) # noqa Not a string
@pytest.mark.asyncio
def test_connection_invalid_table_prefix_sep(self):
with pytest.raises(TypeError):
Connection(table_prefix_separator=1) # noqa Not a string
@pytest.mark.asyncio
def test_connection_invalid_transport(self):
with pytest.raises(ValueError):
Connection(transport='millennium')
@pytest.mark.asyncio
def test_connection_invalid_protocol(self):
with pytest.raises(ValueError):
Connection(protocol='falcon')
@pytest.mark.asyncio
def test_connection_invalid_compat(self):
with pytest.raises(ValueError):
Connection(compat='0.1.invalid.version')
@pytest.mark.asyncio
@pytest.mark.usefixtures('table')
async def test_enabling(self, conn: Connection, table_name: bytes):
assert await conn.is_table_enabled(table_name)
await conn.disable_table(table_name)
assert not await conn.is_table_enabled(table_name)
await conn.enable_table(table_name)
assert await conn.is_table_enabled(table_name)
@pytest.mark.asyncio
@pytest.mark.usefixtures('table')
async def test_compaction(self, conn: Connection, table_name: bytes):
await conn.compact_table(table_name)
await conn.compact_table(table_name, major=True)
@pytest.mark.asyncio
async def test_prefix(self, conn: Connection):
assert TABLE_PREFIX + b'_' == conn._table_name('')
assert TABLE_PREFIX + b'_foo' == conn._table_name('foo')
assert conn.table('foobar').name == TABLE_PREFIX + b'_foobar'
assert conn.table('foobar', use_prefix=False).name == b'foobar'
c = Connection(autoconnect=False) # sync has it set to True
assert b'foo' == c._table_name('foo')
with pytest.raises(TypeError):
Connection(table_prefix=123) # noqa
with pytest.raises(TypeError):
Connection(table_prefix_separator=2.1) # noqa
@pytest.mark.asyncio
async def test_stringify(self, conn: Connection, table: Table):
str(conn)
repr(conn)
str(table)
repr(table)
@pytest.mark.asyncio
@pytest.mark.usefixtures('table')
async def test_table_listing(self, conn: Connection, table_name: bytes):
names = await conn.tables()
assert isinstance(names, list)
assert table_name in names
@pytest.mark.asyncio
async def test_table_regions(self, table: Table):
assert isinstance(await table.regions(), list)
@pytest.mark.asyncio
async def test_invalid_table_create(self, conn: Connection):
create_table = partial(conn.create_table, 'sometable')
with pytest.raises(ValueError):
await create_table(families={})
for fam in [0, []]:
with pytest.raises(TypeError):
await create_table(families=fam) # noqa
@pytest.mark.asyncio
async def test_families(self, table: Table):
families = await table.families()
for name, fdesc in families.items():
assert isinstance(name, bytes)
assert isinstance(fdesc, dict)
assert 'name' in fdesc
assert isinstance(fdesc['name'], bytes)
assert 'max_versions' in fdesc
@pytest.mark.asyncio
async def test_put(self, table: Table):
await table.put(b'r1', {b'cf1:c1': b'v1',
b'cf1:c2': b'v2',
b'cf2:c3': b'v3'})
await table.put(b'r1', {b'cf1:c4': b'v2'}, timestamp=2345678)
await table.put(b'r1', {b'cf1:c4': b'v2'}, timestamp=1369168852994)
@pytest.mark.asyncio
async def test_atomic_counters(self, table: Table):
row = b'row-with-counter'
column = b'cf1:counter'
get = partial(table.counter_get, row, column)
inc = partial(table.counter_inc, row, column)
dec = partial(table.counter_dec, row, column)
assert 0 == await get()
assert 10 == await inc(10)
assert 10 == await get()
await table.counter_set(row, column, 0)
assert 1 == await inc()
assert 4 == await inc(3)
assert 4 == await get()
await table.counter_set(row, column, 3)
assert 3 == await get()
assert 8 == await inc(5)
assert 6 == await inc(-2)
assert 5 == await dec()
assert 3 == await dec(2)
assert 10 == await dec(-7)
@pytest.mark.asyncio
async def test_batch(self, table: Table):
with pytest.raises(TypeError):
table.batch(timestamp='invalid') # noqa
b = table.batch()
await b.put(b'row1', {b'cf1:col1': b'value1',
b'cf1:col2': b'value2'})
await b.put(b'row2', {b'cf1:col1': b'value1',
b'cf1:col2': b'value2',
b'cf1:col3': b'value3'})
await b.delete(b'row1', [b'cf1:col4'])
await b.delete(b'another-row')
await b.close()
table.batch(timestamp=1234567)
await b.put(b'row1', {b'cf1:col5': b'value5'})
await b.close()
with pytest.raises(ValueError):
table.batch(batch_size=0)
with pytest.raises(TypeError):
table.batch(transaction=True, batch_size=10)
@pytest.mark.asyncio
async def test_batch_context_managers(self, table: Table):
async with table.batch() as b:
await b.put(b'row4', {b'cf1:col3': b'value3'})
await b.put(b'row5', {b'cf1:col4': b'value4'})
await b.put(b'row', {b'cf1:col1': b'value1'})
await b.delete(b'row', [b'cf1:col4'])
await b.put(b'row', {b'cf1:col2': b'value2'})
async with table.batch(timestamp=87654321) as b:
await b.put(b'row', {b'cf1:c3': b'somevalue',
b'cf1:c5': b'anothervalue'})
await b.delete(b'row', [b'cf1:c3'])
with pytest.raises(ValueError):
async with table.batch(transaction=True) as b:
await b.put(b'fooz', {b'cf1:bar': b'baz'})
raise ValueError
assert {} == await table.row(b'fooz', [b'cf1:bar'])
with pytest.raises(ValueError):
async with table.batch(transaction=False) as b:
await b.put(b'fooz', {b'cf1:bar': b'baz'})
raise ValueError
assert {b'cf1:bar': b'baz'} == await table.row(b'fooz', [b'cf1:bar'])
async with table.batch(batch_size=5) as b:
for i in range(10):
await b.put(
f'row-batch1-{i:03}'.encode('ascii'),
{b'cf1:': str(i).encode('ascii')},
)
async with table.batch(batch_size=20) as b:
for i in range(95):
await b.put(
f'row-batch2-{i:03}'.encode('ascii'),
{b'cf1:': str(i).encode('ascii')},
)
assert 95 == await self._tbl_scan_len(table, row_prefix=b'row-batch2-')
async with table.batch(batch_size=20) as b:
for i in range(95):
await b.delete(f'row-batch2-{i:03}'.encode('ascii'))
assert 0 == await self._tbl_scan_len(table, row_prefix=b'row-batch2-')
@pytest.mark.asyncio
async def test_batch_order(self, table: Table):
row = b'row-test-batch-order'
col = b'cf1:col'
async with table.batch() as b:
for i in range(5):
await b.put(row, {col: str(i).encode()})
assert (await table.row(row))[col] == b'4'
@pytest.mark.asyncio
async def test_batch_delete_put_same_row(self, table: Table):
# See https://github.com/python-happybase/happybase/issues/224
row = b'row-test-batch-delete-put'
col = b'cf1:col'
val = b'val'
await table.put(row, {col: b''})
async with table.batch() as b:
await b.delete(row)
await b.put(row, {col: val})
result = await table.row(row)
assert col in result
assert result[col] == val
@pytest.mark.asyncio
async def test_batch_counters(self, table: Table):
row = b'row-with-counter'
col1 = b'cf1:counter1'
col2 = b'cf1:counter2'
get = partial(table.counter_get, row)
async def check_cols(c1: int, c2: int):
for col, val in [(col1, c1), (col2, c2)]:
assert await get(col) == val
async with table.batch() as b:
inc = partial(b.counter_inc, row)
dec = partial(b.counter_dec, row)
await inc(col1, 1) # c1 == 1, c2 == 0
await inc(col1, 2) # c1 == 3, c2 == 0
await dec(col2, 2) # c1 == 3, c2 == -2
await dec(col1, 1) # c1 == 2, c2 == -2
await inc(col2, 5) # c1 == 2, c2 == 3
# Make sure nothing was sent yet
await check_cols(0, 0)
await check_cols(2, 3)
for c in [col1, col2]:
await table.counter_set(row, c, 0)
await check_cols(0, 0)
async with table.batch(batch_size=2) as b:
inc = partial(b.counter_inc, row)
await inc(col1, 1)
await check_cols(0, 0) # Not sent yet
await inc(col1, 1)
await check_cols(0, 0) # Same column modified twice, not sent
await inc(col2, 1) # Forces send since batch count >= 2
await check_cols(2, 1)
@pytest.mark.asyncio
async def test_row(self, table: Table):
row = table.row
put = table.put
row_key = b'row-test'
with pytest.raises(TypeError):
await row(row_key, 123) # noqa
with pytest.raises(TypeError):
await row(row_key, timestamp='invalid') # noqa
await put(row_key, {b'cf1:col1': b'v1old'}, timestamp=1234)
await put(row_key, {b'cf1:col1': b'v1new'}, timestamp=3456)
await put(row_key, {b'cf1:col2': b'v2', b'cf2:col1': b'v3'})
await put(row_key, {b'cf2:col2': b'v4'}, timestamp=1234)
exp = {b'cf1:col1': b'v1new',
b'cf1:col2': b'v2',
b'cf2:col1': b'v3',
b'cf2:col2': b'v4'}
assert exp == await row(row_key)
exp = {b'cf1:col1': b'v1new', b'cf1:col2': b'v2'}
assert exp == await row(row_key, [b'cf1'])
exp = {b'cf1:col1': b'v1new', b'cf2:col2': b'v4'}
assert exp == await row(row_key, list(exp))
exp = {b'cf1:col1': b'v1old', b'cf2:col2': b'v4'}
assert exp == await row(row_key, timestamp=2345)
assert {} == await row(row_key, timestamp=123)
res = await row(row_key, include_timestamp=True)
assert len(res) == 4
assert b'v1new' == res[b'cf1:col1'][0]
assert isinstance(res[b'cf1:col1'][1], int)
@pytest.mark.asyncio
async def test_rows(self, table: Table):
row_keys = [b'rows-row1', b'rows-row2', b'rows-row3']
data_old = {b'cf1:col1': b'v1old', b'cf1:col2': b'v2old'}
data_new = {b'cf1:col1': b'v1new', b'cf1:col2': b'v2new'}
with pytest.raises(TypeError):
await table.rows(row_keys, object()) # noqa
with pytest.raises(TypeError):
await table.rows(row_keys, timestamp='invalid') # noqa
for row_key in row_keys:
await table.put(row_key, data_old, timestamp=4000)
for row_key in row_keys:
await table.put(row_key, data_new)
assert {} == dict(await table.rows([]))
rows = dict(await table.rows(row_keys))
for row_key in row_keys:
assert row_key in rows
assert data_new == rows[row_key]
rows = dict(await table.rows(row_keys, timestamp=5000))
for row_key in row_keys:
assert row_key in rows
assert data_old == rows[row_key]
@pytest.mark.asyncio
async def test_cells(self, table: Table):
row_key = b'cell-test'
col = b'cf1:col1'
await table.put(row_key, {col: b'old'}, timestamp=1234)
await table.put(row_key, {col: b'new'})
with pytest.raises(TypeError):
await table.cells(row_key, col, versions='invalid') # noqa
with pytest.raises(TypeError):
await table.cells(
row_key, col,
versions=3,
timestamp='invalid', # noqa
)
with pytest.raises(ValueError):
await table.cells(row_key, col, versions=0)
results = await table.cells(row_key, col, versions=1)
assert len(results) == 1
assert b'new' == results[0]
results = await table.cells(row_key, col)
assert len(results) == 2
assert b'new' == results[0]
assert b'old' == results[1]
results = await table.cells(
row_key, col,
timestamp=2345,
include_timestamp=True,
)
assert len(results) == 1
assert b'old' == results[0][0]
assert 1234 == results[0][1]
@pytest.mark.asyncio
async def test_scan(self, table: Table):
with pytest.raises(TypeError):
await self._scan_list(table, row_prefix='foobar', row_start='xyz')
if table.connection.compat == '0.90':
with pytest.raises(NotImplementedError):
await self._scan_list(table, filter='foo')
if table.connection.compat < '0.96':
with pytest.raises(NotImplementedError):
await self._scan_list(table, sorted_columns=True)
with pytest.raises(ValueError):
await self._scan_list(table, | |
if ( iIIiI11II != None and O00O0 . itr_rlocs != [ ] ) :
I1i11111i = O00O0 . itr_rlocs [ 0 ]
else :
if ( deid . is_ipv4 ( ) ) :
I1i11111i = II1ii1ii
elif ( deid . is_ipv6 ( ) ) :
I1i11111i = I11ii1i1i
else :
I1i11111i = II1ii1ii
if 64 - 64: i1IIi / O0 - oO0o
if 7 - 7: IiII . IiII * Ii1I
if 1 - 1: i11iIiiIii
if 91 - 91: I1ii11iIi11i . OoO0O00 / OoO0O00 / I1ii11iIi11i + iII111i
if 20 - 20: o0oOOo0O0Ooo . I1Ii111 + O0
if 99 - 99: O0 / IiII . oO0o
iI1IIII1ii1 = O00O0 . encode ( iIIiI11II , i1i111 )
O00O0 . print_map_request ( )
if 18 - 18: OoooooooOO * OoO0O00 * I1Ii111
if 12 - 12: i11iIiiIii / iIii1I11I1II1 . I11i % I1Ii111 * ooOoO0o % ooOoO0o
if 13 - 13: i1IIi . ooOoO0o . ooOoO0o
if 24 - 24: iIii1I11I1II1
if 72 - 72: i11iIiiIii + o0oOOo0O0Ooo % ooOoO0o * I1ii11iIi11i . i1IIi
if 59 - 59: OoooooooOO - OoooooooOO - o0oOOo0O0Ooo + i1IIi % I1Ii111
if ( iIIiI11II != None ) :
if ( rloc . is_rloc_translated ( ) ) :
IiiiI11I1 = lisp_get_nat_info ( iIIiI11II , rloc . rloc_name )
if ( IiiiI11I1 and len ( lisp_sockets ) == 4 ) :
lisp_encapsulate_rloc_probe ( lisp_sockets , iIIiI11II ,
IiiiI11I1 , iI1IIII1ii1 )
return
if 74 - 74: IiII * iIii1I11I1II1 - I1IiiI
if 62 - 62: o0oOOo0O0Ooo
if 54 - 54: iIii1I11I1II1 / OoooooooOO + o0oOOo0O0Ooo . i1IIi - OoooooooOO
OoOOoooO000 = iIIiI11II . print_address_no_iid ( )
iiIi1I = lisp_convert_4to6 ( OoOOoooO000 )
lisp_send ( lisp_sockets , iiIi1I , LISP_CTRL_PORT , iI1IIII1ii1 )
return
if 70 - 70: Ii1I / OoOoOO00 * Oo0Ooo
if 32 - 32: I1Ii111 . OoOoOO00 % OoooooooOO + I1Ii111 * OoO0O00
if 84 - 84: OoOoOO00
if 80 - 80: oO0o
if 59 - 59: iIii1I11I1II1 / IiII % I1ii11iIi11i + OoO0O00 - I11i % OOooOOo
if 92 - 92: iII111i
OOoOO = None if lisp_i_am_rtr else seid
if ( lisp_decent_pull_xtr_configured ( ) ) :
IIiIII1IIi = lisp_get_decent_map_resolver ( deid )
else :
IIiIII1IIi = lisp_get_map_resolver ( None , OOoOO )
if 47 - 47: Oo0Ooo . I1ii11iIi11i * I1IiiI
if ( IIiIII1IIi == None ) :
lprint ( "Cannot find Map-Resolver for source-EID {}" . format ( green ( seid . print_address ( ) , False ) ) )
if 46 - 46: I1Ii111 / I11i
return
if 13 - 13: I1ii11iIi11i + II111iiii * IiII * OoooooooOO + O0 * O0
IIiIII1IIi . last_used = lisp_get_timestamp ( )
IIiIII1IIi . map_requests_sent += 1
if ( IIiIII1IIi . last_nonce == 0 ) : IIiIII1IIi . last_nonce = O00O0 . nonce
if 15 - 15: Oo0Ooo % I11i * O0
if 61 - 61: I1ii11iIi11i - ooOoO0o / OoOoOO00 % OOooOOo * i1IIi . IiII
if 27 - 27: I1ii11iIi11i % iII111i . Oo0Ooo * iIii1I11I1II1
if 40 - 40: I11i
if ( seid == None ) : seid = I1i11111i
lisp_send_ecm ( lisp_sockets , iI1IIII1ii1 , seid , lisp_ephem_port , deid ,
IIiIII1IIi . map_resolver )
if 58 - 58: o0oOOo0O0Ooo / OOooOOo . oO0o % ooOoO0o
if 33 - 33: I1IiiI * I1ii11iIi11i . OoO0O00 - I1Ii111 . OoO0O00
if 79 - 79: ooOoO0o
if 90 - 90: OOooOOo
lisp_last_map_request_sent = lisp_get_timestamp ( )
if 4 - 4: OoOoOO00 - I1Ii111 . i1IIi - IiII . ooOoO0o + II111iiii
if 56 - 56: I1ii11iIi11i / i1IIi + I11i % Oo0Ooo
if 86 - 86: O0 * II111iiii
if 75 - 75: iIii1I11I1II1 - Oo0Ooo - OoOoOO00 % I1ii11iIi11i . II111iiii
IIiIII1IIi . resolve_dns_name ( )
return
if 11 - 11: I1ii11iIi11i - I1ii11iIi11i . ooOoO0o * Oo0Ooo + I1Ii111
if 59 - 59: iII111i - OOooOOo - OoO0O00 . I1IiiI % o0oOOo0O0Ooo + iII111i
if 10 - 10: iIii1I11I1II1 - Ii1I
if 84 - 84: iII111i
if 21 - 21: i11iIiiIii
if 30 - 30: OoO0O00 + OoooooooOO
if 98 - 98: I1ii11iIi11i % I1IiiI
if 9 - 9: o0oOOo0O0Ooo / I1Ii111 % i1IIi - OOooOOo % I1IiiI / I1ii11iIi11i
def lisp_send_info_request ( lisp_sockets , dest , port , device_name ) :
if 66 - 66: IiII
if 56 - 56: oO0o + OoooooooOO
if 75 - 75: O0 % Ii1I
if 47 - 47: OoooooooOO - OoooooooOO + OoO0O00 / iIii1I11I1II1
i1ii = lisp_info ( )
i1ii . nonce = lisp_get_control_nonce ( )
if ( device_name ) : i1ii . hostname += "-" + device_name
if 7 - 7: oO0o
OoOOoooO000 = dest . print_address_no_iid ( )
if 89 - 89: i11iIiiIii / o0oOOo0O0Ooo / I1ii11iIi11i % iII111i . OoooooooOO - iIii1I11I1II1
if 63 - 63: Ii1I % I1Ii111 + O0 * OoO0O00 . oO0o
if 34 - 34: I1IiiI . I1ii11iIi11i . O0 - OoOoOO00 - i11iIiiIii / iII111i
if 63 - 63: OOooOOo
if 84 - 84: i11iIiiIii * iIii1I11I1II1 % I11i % iII111i + OoooooooOO . o0oOOo0O0Ooo
if 78 - 78: o0oOOo0O0Ooo . iII111i + O0 / I1ii11iIi11i + I1ii11iIi11i + II111iiii
if 96 - 96: iIii1I11I1II1 * II111iiii . iIii1I11I1II1
if 13 - 13: Ii1I - OoOoOO00 . Ii1I
if 7 - 7: Ii1I - I11i / I1ii11iIi11i + iII111i
if 47 - 47: I11i * IiII / oO0o - OoooooooOO . OoooooooOO / I11i
if 73 - 73: Ii1I . IiII % IiII
if 56 - 56: I1Ii111 + iII111i + iII111i
if 99 - 99: o0oOOo0O0Ooo % I1ii11iIi11i / Oo0Ooo . O0 + OoO0O00 * OoOoOO00
if 48 - 48: iIii1I11I1II1 + O0 * I11i * i11iIiiIii . Ii1I / i1IIi
if 48 - 48: i1IIi % iIii1I11I1II1 + I1IiiI - OoOoOO00 % I11i . I1Ii111
if 66 - 66: I1Ii111 * i11iIiiIii + I1IiiI % II111iiii
iiIi1iIi1i = False
if ( device_name ) :
i1i1 = lisp_get_host_route_next_hop ( OoOOoooO000 )
if 53 - 53: IiII / II111iiii / oO0o % O0 / I1Ii111
if 91 - 91: oO0o * OoOoOO00 + O0 % Oo0Ooo
if 62 - 62: iIii1I11I1II1 - i11iIiiIii % iIii1I11I1II1 . ooOoO0o / OOooOOo * OoOoOO00
if 45 - 45: OOooOOo - OOooOOo % iII111i - IiII . O0
if 6 - 6: iIii1I11I1II1 * II111iiii / O0 % IiII - I1Ii111
if 64 - 64: ooOoO0o
if 28 - 28: i11iIiiIii - IiII * I1ii11iIi11i + IiII * iII111i
if 75 - 75: o0oOOo0O0Ooo * OoOoOO00 % I1ii11iIi11i + OOooOOo . II111iiii
if 12 - 12: ooOoO0o
if ( port == LISP_CTRL_PORT and i1i1 != None ) :
while ( True ) :
time . sleep ( .01 )
i1i1 = lisp_get_host_route_next_hop ( OoOOoooO000 )
if ( i1i1 == None ) : break
if 83 - 83: I1Ii111 % ooOoO0o + OoooooooOO
if 50 - 50: i11iIiiIii % I1IiiI * iII111i / Ii1I
if 12 - 12: iII111i / OoO0O00 - II111iiii + Oo0Ooo
ooO0oO00O = lisp_get_default_route_next_hops ( )
for Ooooo , I1i1i1iIIiI11 in ooO0oO00O :
if ( Ooooo != device_name ) : continue
if 22 - 22: I11i / i11iIiiIii * II111iiii
if 52 - 52: o0oOOo0O0Ooo . O0 % I11i . iIii1I11I1II1 % iIii1I11I1II1 / I1Ii111
if 18 - 18: Ii1I * I1ii11iIi11i % I11i
if 50 - 50: Ii1I . I1ii11iIi11i + iIii1I11I1II1 * i11iIiiIii . iII111i
if 47 - 47: o0oOOo0O0Ooo * oO0o % I1ii11iIi11i
if 59 - 59: IiII
if ( i1i1 != I1i1i1iIIiI11 ) :
if ( i1i1 != None ) :
lisp_install_host_route ( OoOOoooO000 , i1i1 , False )
if 22 - 22: i11iIiiIii . oO0o * OoOoOO00 . OoooooooOO
lisp_install_host_route ( OoOOoooO000 , I1i1i1iIIiI11 , True )
iiIi1iIi1i = True
if 100 - 100: I1Ii111 + O0
break
if 69 - 69: | |
'.format(language=language)
if copyright != None:
cmd += '--copyright-notice {copyright} '.format(copyright=copyright)
cmd += '-i "{path}"?par={par} -i "{audio_path}"?encoder-delay={delay}, -o "{output}"'.format(path=path, par=par, audio_path=audio_path, delay=delay, output=output)
print(cmd)
os.system('"%s"' % cmd)
return output
##################################################################################################
### Function : Demuxer
### Author : ema
### Version : v0.1
### Release : 2018.01.28
##################################################################################################
### Demux a clip of some track(a/v).
###
### paths [dict]
### ------------------
### Clip file list.
###
### audio_track [int, Default: 1]
### ------------------
### Demux number of audio track.
###
### video_track [int, Default: None]
### ------------------
### Demux number of video track.
###
### demux_bin [str, Default: 'ffmpeg']
### ------------------
### Demuxer(FFmpeg) exec file path.
###
### example:
### ------------------
### qvs.Demuxer('H:/qvs/*.mp4', audio_track=0)
##################################################################################################
def Demuxer(paths, audio_track=0, video_track=None, demux_bin='ffmpeg'):
paths = ToDict(paths)
check_paths = False
outputs = []
for path in paths:
if (path.find('*') != -1) | (path.find('?') != -1):
_paths = GetEach(path)
check_paths = True
break
if check_paths:
paths = _paths
for path in paths:
input = path
if audio_track != None:
filter = '-vn -sn -c:a'
track_type = 'a'
track_num = str(audio_track)
output = '{path}_{track_type}{track_num}.{ext}'.format(path=RemoveFileExt(input), track_type=track_type, track_num=track_num, ext=input.split('.')[-1])
cmd = '{demux_bin} -i "{input}" {filter} copy -y -map 0:{track_type}:{track_num} "{output}"'.format(demux_bin=FindFirstFilePath(demux_bin), input=input, filter=filter, track_type=track_type, track_num=track_num, output=output)
print('[cmd]{cmd}'.format(cmd=cmd))
os.system(cmd)
outputs.append(output)
continue
if video_track != None:
filter = '-an -sn -c:v'
track_type = 'v'
track_num = str(video_track)
output = '{path}_{track_type}{track_num}.{ext}'.format(path=RemoveFileExt(input), track_type=track_type, track_num=track_num, ext=input.split('.')[-1])
cmd = '{demux_bin} -i "{input}" {filter} copy -y -map 0:{track_type}:{track_num} "{output}"'.format(demux_bin=FindFirstFilePath(demux_bin), input=input, filter=filter, track_type=track_type, track_num=track_num, output=output)
print('[cmd]{cmd}'.format(cmd=cmd))
os.system('"%s"' % cmd)
outputs.append(output)
continue
return outputs
##################################################################################################
### Function : TsDemuxer
### Author : ema
### Version : v0.1
### Release : 2018.01.28
##################################################################################################
### Demux a clip of some track(a/v).
### Only supports for *.ts.
###
### paths [dict]
### ------------------
### Clip file list.
###
### audio_track [int, Default: 1]
### ------------------
### Demux number of audio track.
###
### video_track [int, Default: None]
### ------------------
### Demux number of video track.
###
### demux_bin [str, Default: 'tsdemux']
### ------------------
### Demuxer(tsdemux) exec file path.
###
### example:
### ------------------
### qvs.TsDemuxer('H:/qvs/*.ts', encode=1)
##################################################################################################
def TsDemuxer(paths, encode=1, demux_bin='tsdemux'):
paths = ToDict(paths)
check_paths = False
outputs = []
for path in paths:
if (path.find('*') != -1) | (path.find('?') != -1):
_paths = GetEach(path)
check_paths = True
break
if check_paths:
paths = _paths
for path in paths:
input = path
if (encode > 4) | (encode < 1):
encode = 1
encode_dict = {1:'aac', 2:'m2v+aac', 3:'wav', 4:'m2v+wav'}
encode_type = encode_dict[encode]
cmd = '{demux_bin} -i "{input}" -encode Demux({encode})'.format(demux_bin=FindFirstFilePath(demux_bin), input=input, encode=encode_type)
print('[cmd]{cmd}'.format(cmd=cmd))
os.system('"%s"' % cmd)
outputs.append(output)
return outputs
##################################################################################################
### Function : EacDemuxer
### Author : ema
### Version : v0.1a
### Release : 2018.02.14
##################################################################################################
### Demux a clip of some track(a/v).
### Only supports for *.vob, *.m2ts, *.ts, *.mkv.
###
### paths [dict]
### ------------------
### Clip file list.
###
### track [int, Default: 2]
### ------------------
### Demux number of track in all tracks.
###
### track_list [bint, Default: False]
### ------------------
### Show all tracks in cui.
### It will not demux anything when true.
###
### out_ext [int, Default: None]
### ------------------
### Set the output file ext.
###
### demux_bin [str, Default: 'eac3to']
### ------------------
### Demuxer(eac3to) exec file path.
###
### log [bint, Default: False]
### ------------------
### Auto remove the log file(*Log.txt).
###
### ret_delay [bint, Default: False]
### ------------------
### Return the first path delay value which is in paths.
###
### example:
### ------------------
### qvs.EacDemuxer('H:/qvs/*.ts', track=2, out_ext=None)
##################################################################################################
def EacDemuxer(paths, track=2, track_list=False, out_ext=None, demux_bin='eac3to', log=False, ret_delay=False):
paths = ToDict(paths)
check_paths = False
for path in paths:
if (path.find('*') != -1) | (path.find('?') != -1):
_paths = GetEach(path)
check_paths = True
break
if check_paths:
paths = _paths
outputs = []
for path in paths:
#Gain the real format and the delay(ms) value.
input = path
cmd = '{demux_bin} "{input}"'.format(demux_bin=FindFirstFilePath(demux_bin), input=input)
cmd = cmd.replace('/', '\\')
print('[cmd]{cmd}'.format(cmd=cmd))
result = os.popen(cmd)
res = result.read()
delay = None
n = 0
for line in res.splitlines():
if track_list:
print(line.strip())
else:
if n == track:
line = line.strip()
line_index = line.find(':')
print('[track]{line}'.format(line=line[line_index - 1 : len(line)]))
_format = line.split(',')[0]
format = _format[_format.find(' ') : len(_format)]
format = format.strip().lower()
print('[format]{format}'.format(format=format))
for text in line.split(','):
text = text.strip()
if text.find('ms') != -1:
delay = text[0 : len(text) - 2]
if delay != None:
print('[delay]{delay}'.format(delay=delay))
if ret_delay:
return delay
n += 1
if track_list:
continue
#Demux the track.
if out_ext == None:
format_dict = {
'h264/avc' : 'h264',
'mpegh/iso/hevc' : 'hevc',
'raw/pcm' : 'wav',
'chapters' : 'txt',
}
if format in format_dict.keys():
ext = format_dict[format]
else:
ext = format.lower()
out_ext = ext
if delay == None:
output = '{path} track{track}.{ext}'.format(path=RemoveFileExt(input), track=str(track), ext=out_ext)
else:
output = '{path} track{track} DELAY {delay}ms.{ext}'.format(path=RemoveFileExt(input), delay=delay, track=str(track), ext=out_ext)
cmd = '{demux_bin} "{input}" {track}: "{output}"'.format(demux_bin=FindFirstFilePath(demux_bin), input=input, track=track, output=output)
cmd = cmd.replace('/', '\\')
print('[cmd]{cmd}'.format(cmd=cmd))
os.system('"%s"' % cmd)
#Remove the log file.
if log is False:
try:
os.remove('{path} - Log.txt'.format(path=RemoveFileExt(output)))
except:
pass
outputs.append(output)
return outputs
##################################################################################################
### Function : AudioEnc
### Author : ema
### Version : v0.1
### Release : 2018.01.28
##################################################################################################
### Demux a clip of some track(a/v).
###
### paths [dict]
### ------------------
### Clip file list.
###
### bitrate [int, Default: 160]
### ------------------
### Audio bitrate(kbps).
###
### enc [int, Default: 1]
### ------------------
### 1:aac -> bitrate(v)
### 2:flac -> bitrate(x)
### 3:alac -> bitrate(x)
###
### enc_bin [str, Default: 'qaac']
### ------------------
### Encoder(qaac) exec file path.
###
### enc_flac_bin [str, Default: 'flac']
### ------------------
### Encoder(flac) exec file path.
###
### enc_alac_bin [str, Default: 'refalac']
### ------------------
### Encoder(refalac) exec file path.
###
### pipe_bin [str, Default: 'ffmpeg']
### ------------------
### Pipe(FFmpeg) exec file path.
###
### example:
### ------------------
### qvs.AudioEnc('H:/qvs/*.m2ts', bitrate=160, enc=1)
##################################################################################################
def AudioEnc(paths, bitrate=160, enc=1, enc_bin='qaac', enc_flac_bin='flac', enc_alac_bin='refalac', pipe_bin='ffmpeg'):
paths = ToDict(paths)
check_paths = False
for path in paths:
if (path.find('*') != -1) | (path.find('?') != -1):
_paths = GetEach(path)
check_paths = True
break
if check_paths:
paths = _paths
enc_dict = {1:'aac', 2:'flac', 3:'alac'}
if (enc > 3) | (enc < 1):
enc = 1
enc_type = enc_dict[enc]
outputs = []
for path in paths:
input = path
output = '{path}.{enc_type}'.format(path=RemoveFileExt(input), enc_type=enc_type)
pipe_cmd = '{pipe_bin} -i "{input}" -vn -sn -v 0 -c:a pcm_s16le -f wav pipe: |'.format(pipe_bin=FindFirstFilePath(pipe_bin), input=input)
if enc == 1:
cmd = '{enc_bin} -q 2 --ignorelength -c {bitrate} - -o "{output}"'.format(enc_bin=pipe_cmd+FindFirstFilePath(enc_bin), bitrate=str(bitrate), output=output)
elif enc == 2:
cmd = '{enc_bin} -f --ignore-chunk-sizes -5 - -o "{output}"'.format(enc_bin=pipe_cmd+FindFirstFilePath(enc_flac_bin), output=output)
elif enc == 3:
cmd = '{enc_bin} -f --ignorelength - -o "{output}"'.format(enc_bin=pipe_cmd+FindFirstFilePath(enc_alac_bin), output=output)
print('[cmd]{cmd}'.format(cmd=cmd))
os.system('"%s"' % cmd)
outputs.append(output)
return outputs
##################################################################################################
### Function : AudioTrimFF
### Function : AudioTrimFFSingle
### Function : AudioTrims
### Author : ema
### Version : v0.1
### Release : 2018.02.04
##################################################################################################
### Trim a audio clip losslessly with ffmpeg.
###
### path [str]
### ------------------
### Audio clip file path.
###
### clip [clip, Default: None]
### ------------------
### Video clip (for get fps value).
###
### codec [str, Default: 'aac']
### ------------------
### Ext of output file.
###
### delay [int, Default: None]
### ------------------
### Audio clip fixd delay value.
###
### fps [str, Default: None]
### ------------------
### Convert frame to timecode with fps value.
###
### enc_bin [str, Default: 'ffmpeg']
### ------------------
### Enc(FFmpeg) exec file path.
###
### example1:
### ------------------
### clip = qvs.D2VSource('Video.ts')
### frame = [[100,200], [1000,2000]]
### qvs.AudioTrimFF('Audio DELAY -99ms.aac', frame=frame, clip=clip, delay=-99)
###
### example2:
### ------------------
### frame = [[100,200], [1000,2000]]
### qvs.AudioTrimFF('Audio DELAY -99ms.aac', frame=frame, fps=29.970, delay=-99)
##################################################################################################
def AudioTrimFF(path, frame=[], clip=None, codec='aac', delay=None, fps=None, enc_bin='ffmpeg'):
input = path
if not os.path.isfile(input):
raise ValueError('AudioTrimFF: param of path is not a file path.')
index_delay = input.lower().find('delay')
index_ms = input.lower().find('ms')
delay_orgin = None
if (index_delay != -1) & (index_ms != -1):
try:
delay_orgin = int(input[(index_delay + 5):index_ms].strip())
except:
pass
delay = Default(delay, Default(delay_orgin, 0))
outputs = []
n = 1
for f in frame:
start_frame = f[0]
end_frame = f[1]
outputs.append(AudioTrimFFSingle(path=path, start_frame=start_frame, end_frame=end_frame, clip=clip, codec=codec, delay=delay, fps=fps, enc_bin=FindFirstFilePath(enc_bin), n=n))
n += 1
concat = ''
for o in outputs:
concat = '{concat}|{o}'.format(concat=concat, o=o)
concat = concat[1:len(concat)]
if delay_orgin != None:
input = input[0:(index_delay + 5)] + ' ' + str(delay_orgin-delay) + input[index_ms:len(input)]
output = '{path}_split.{codec}'.format(path=RemoveFileExt(input), n=str(n), codec=codec)
cmd = '{enc_bin} -i "concat:{concat}" -c copy "{output}" -y'.format(enc_bin=FindFirstFilePath(enc_bin), concat=concat, output=output)
print('[cmd]{cmd}'.format(cmd=cmd))
os.system('"%s"' % cmd)
for o in outputs:
try:
os.remove(o)
except:
pass
return output
def AudioTrimFFSingle(path, start_frame, end_frame, clip=None, codec='aac', delay=0, fps=None, enc_bin='ffmpeg', n=1):
if not clip is None:
clip_fps = clip.fps_num/clip.fps_den
else:
if fps is None:
raise ValueError('AudioTrim: clip or fps is none!')
clip_fps = None
fps = Default(fps, clip_fps)
input = path
output = '{path}_split{n}.{codec}'.format(path=RemoveFileExt(input), n=str(n), codec=codec)
cmd = '{enc_bin} -i "{input}" -vn -acodec copy -ss {start_tc} -to {end_tc} "{output}" -y'.format(enc_bin=enc_bin, input=input, start_tc=ConvertToTimecode(start_frame, fps, delay), end_tc=ConvertToTimecode(end_frame, fps, delay), output=output)
print('[cmd]{cmd}'.format(cmd=cmd))
os.system('"%s"' % cmd)
return output
def AudioTrims(clip=None, path=None, frame=[], codec=None, delay=None, fps=None, enc_bin='ffmpeg', ret_output=False):
if path is None:
raise ValueError(path)
temp_path, temp_basename, ext = PathSplit(path)
ext = ext.replace('.', '')
output = AudioTrimFF(path=path, frame=frame, clip=clip, codec=ext, delay=delay, fps=fps, enc_bin=enc_bin)
if ret_output:
return output
return clip
##################################################################################################
### Function : AudioTrimAVS
### Author : ema
### Version : v0.1
### Release : 2018.02.05
##################################################################################################
### AudioTrim (from Avisynth v2.60) trims a clip based on time, not on frames.
### This is most useful for audio-only clips, where "frames" have no meaning anyway,
### and you may want to edit with finer precision than whole frames (at 30fps, 1 frame=33.3ms).
### Return the outpu avisynth script path.
###
### path [str]
### ------------------
### Audio clip file path.
###
### clip [clip, Default: None]
### ------------------
### Video clip (for get fps value).
###
### fps [str, Default: None]
### ------------------
### Convert frame to timecode with fps value.
###
### delay [int, Default: None]
### ------------------
### Audio clip fixd delay value.
###
### plugin_path [str, Default: 'LSMASHSource.dll']
### ------------------
### The plugin path of avisynth for audio source.
###
### func_name [str, Default: 'LWLibavAudioSource']
### ------------------
### The function name of avisynth for audio source.
###
### example:
### ------------------
### frame = [[933,40642],[44239,51282]]
### path = qvs.AudioTrimAVS('Audio DELAY -173ms.aac', frame=frame, fps=29.970)
### qvs.AudioEnc(path, bitrate=160, enc=1)
##################################################################################################
def AudioTrimAVS(path, frame=[], clip=None, fps=None, delay=None, plugin_path='LSMASHSource.dll', func_name='LWLibavAudioSource'):
def _ToTime(frame, fps, delay=0):
frame_delay = int(fps * delay / 1000)
frame -= frame_delay
if frame < 0:
frame = 0
time = frame / fps
return time
input = path
if not clip is None:
clip_fps = clip.fps_num / clip.fps_den
else:
if fps is None:
raise ValueError('AudioTrim: clip or fps is none!')
clip_fps = None
fps = Default(fps, clip_fps)
index_delay = input.lower().find('delay')
index_ms = input.lower().find('ms')
delay_orgin = None
if (index_delay != -1) & (index_ms != -1):
try:
delay_orgin = int(input[(index_delay + 5):index_ms].strip())
except:
pass
delay = Default(delay, Default(delay_orgin, 0))
audio_trim_list | |
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""High-level wrapper for datastore queries.
The fundamental API here overloads the 6 comparison operators to represent
filters on property values, and supports AND and OR operations (implemented as
functions -- Python's 'and' and 'or' operators cannot be overloaded, and the
'&' and '|' operators have a priority that conflicts with the priority of
comparison operators).
For example::
class Employee(Model):
name = StringProperty()
age = IntegerProperty()
rank = IntegerProperty()
@classmethod
def demographic(cls, min_age, max_age):
return cls.query().filter(AND(cls.age >= min_age,
cls.age <= max_age))
@classmethod
def ranked(cls, rank):
return cls.query(cls.rank == rank).order(cls.age)
for emp in Employee.seniors(42, 5):
print emp.name, emp.age, emp.rank
The 'in' operator cannot be overloaded, but is supported through the IN()
method. For example::
Employee.query().filter(Employee.rank.IN([4, 5, 6]))
Sort orders are supported through the order() method; unary minus is
overloaded on the Property class to represent a descending order::
Employee.query().order(Employee.name, -Employee.age)
Besides using AND() and OR(), filters can also be combined by repeatedly
calling .filter()::
query1 = Employee.query() # A query that returns all employees
query2 = query1.filter(Employee.age >= 30) # Only those over 30
query3 = query2.filter(Employee.age < 40) # Only those in their 30s
A further shortcut is calling .filter() with multiple arguments; this implies
AND()::
query1 = Employee.query() # A query that returns all employees
query3 = query1.filter(Employee.age >= 30,
Employee.age < 40) # Only those in their 30s
And finally you can also pass one or more filter expressions directly to the
.query() method::
query3 = Employee.query(Employee.age >= 30,
Employee.age < 40) # Only those in their 30s
Query objects are immutable, so these methods always return a new Query object;
the above calls to filter() do not affect query1. On the other hand, operations
that are effectively no-ops may return the original Query object.
Sort orders can also be combined this way, and .filter() and .order() calls may
be intermixed::
query4 = query3.order(-Employee.age)
query5 = query4.order(Employee.name)
query6 = query5.filter(Employee.rank == 5)
Again, multiple .order() calls can be combined::
query5 = query3.order(-Employee.age, Employee.name)
The simplest way to retrieve Query results is a for-loop::
for emp in query3:
print emp.name, emp.age
Some other methods to run a query and access its results::
:meth:`Query.iter`() # Return an iterator; same as iter(q) but more
flexible.
:meth:`Query.fetch`(N) # Return a list of the first N results
:meth:`Query.get`() # Return the first result
:meth:`Query.count`(N) # Return the number of results, with a maximum of N
:meth:`Query.fetch_page`(N, start_cursor=cursor) # Return (results, cursor,
has_more)
All of the above methods take a standard set of additional query options,
either in the form of keyword arguments such as keys_only=True, or as
QueryOptions object passed with options=QueryOptions(...). The most important
query options are:
- keys_only: bool, if set the results are keys instead of entities.
- limit: int, limits the number of results returned.
- offset: int, skips this many results first.
- start_cursor: Cursor, start returning results after this position.
- end_cursor: Cursor, stop returning results after this position.
- batch_size: int, hint for the number of results returned per RPC.
- prefetch_size: int, hint for the number of results in the first RPC.
- produce_cursors: bool, return Cursor objects with the results.
All of the above methods except for iter() have asynchronous variants as well,
which return a Future; to get the operation's ultimate result, yield the Future
(when inside a tasklet) or call the Future's get_result() method (outside a
tasklet)::
:meth:`Query.fetch_async`(N)
:meth:`Query.get_async`()
:meth:`Query.count_async`(N)
:meth:`Query.fetch_page_async`(N, start_cursor=cursor)
Finally, there's an idiom to efficiently loop over the Query results in a
tasklet, properly yielding when appropriate::
it = query1.iter()
while (yield it.has_next_async()):
emp = it.next()
print emp.name, emp.age
"""
import functools
import inspect
import logging
from google.cloud.ndb import context as context_module
from google.cloud.ndb import _datastore_api
from google.cloud.ndb import _datastore_query
from google.cloud.ndb import _gql
from google.cloud.ndb import exceptions
from google.cloud.ndb import model
from google.cloud.ndb import _options
from google.cloud.ndb import tasklets
__all__ = [
"QueryOptions",
"PropertyOrder",
"RepeatedStructuredPropertyPredicate",
"ParameterizedThing",
"Parameter",
"ParameterizedFunction",
"Node",
"FalseNode",
"ParameterNode",
"FilterNode",
"PostFilterNode",
"ConjunctionNode",
"DisjunctionNode",
"AND",
"OR",
"Query",
"gql",
]
_EQ_OP = "="
_NE_OP = "!="
_IN_OP = "in"
_LT_OP = "<"
_GT_OP = ">"
_OPS = frozenset([_EQ_OP, _NE_OP, _LT_OP, "<=", _GT_OP, ">=", _IN_OP])
_log = logging.getLogger(__name__)
class PropertyOrder(object):
"""The sort order for a property name, to be used when ordering the
results of a query.
Args:
name (str): The name of the model property to use for ordering.
reverse (bool): Whether to reverse the sort order (descending)
or not (ascending). Default is False.
"""
__slots__ = ["name", "reverse"]
def __init__(self, name, reverse=False):
self.name = name
self.reverse = reverse
def __repr__(self):
return "PropertyOrder(name='{}', reverse={})".format(
self.name, self.reverse
)
def __neg__(self):
reverse = not self.reverse
return self.__class__(name=self.name, reverse=reverse)
class RepeatedStructuredPropertyPredicate:
"""A predicate for querying repeated structured properties.
Called by ``model.StructuredProperty._compare``. This is used to handle
queries of the form::
Squad.query(Squad.members == Member(name="Joe", age=24, rank=5))
This query should find any squad with a member named "Joe" whose age is 24
and rank is 5.
Datastore, on its own, can find all squads with a team member named Joe, or
a team member whose age is 24, or whose rank is 5, but it can't be queried
for all 3 in a single subentity. This predicate must be applied client
side, therefore, to limit results to entities where all the keys match for
a single subentity.
Arguments:
name (str): Name of the repeated structured property being queried
(e.g. "members").
match_keys (list[str]): Property names to check on the subentities
being queried (e.g. ["name", "age", "rank"]).
entity_pb (google.cloud.datastore_v1.proto.entity_pb2.Entity): A
partial entity protocol buffer containing the values that must
match in a subentity of the repeated structured property. Should
contain a value for each key in ``match_keys``.
"""
__slots__ = ["name", "match_keys", "match_values"]
def __init__(self, name, match_keys, entity_pb):
self.name = name
self.match_keys = match_keys
self.match_values = [entity_pb.properties[key] for key in match_keys]
def __call__(self, entity_pb):
prop_pb = entity_pb.properties.get(self.name)
if prop_pb:
subentities = prop_pb.array_value.values
for subentity in subentities:
properties = subentity.entity_value.properties
values = [properties.get(key) for key in self.match_keys]
if values == self.match_values:
return True
else:
# Backwards compatibility. Legacy NDB, rather than using
# Datastore's ability to embed subentities natively, used dotted
# property names.
prefix = self.name + "."
subentities = ()
for prop_name, prop_pb in entity_pb.properties.items():
if not prop_name.startswith(prefix):
continue
subprop_name = prop_name.split(".", 1)[1]
if not subentities:
subentities = [
{subprop_name: value}
for value in prop_pb.array_value.values
]
else:
for subentity, value in zip(
subentities, prop_pb.array_value.values
):
subentity[subprop_name] = value
for subentity in subentities:
values = [subentity.get(key) for key in self.match_keys]
if values == self.match_values:
return True
return False
class ParameterizedThing:
"""Base class for :class:`Parameter` and :class:`ParameterizedFunction`.
This exists purely for :func:`isinstance` checks.
"""
def __eq__(self, other):
raise NotImplementedError
def __ne__(self, other):
eq = self.__eq__(other)
if eq is not NotImplemented:
eq = not eq
return eq
class Parameter(ParameterizedThing):
"""Represents a bound variable in a GQL query.
``Parameter(1)`` corresponds to a slot labeled ``:1`` in a GQL query.
``Parameter('something')`` corresponds to a slot labeled ``:something``.
The value must be set (bound) separately.
Args:
key (Union[str, int]): The parameter key.
Raises:
TypeError: If the ``key`` is not a string or integer.
"""
__slots__ = ("_key",)
def __init__(self, key):
if not isinstance(key, (int, str)):
raise TypeError(
"Parameter key must be an integer or string, not {}".format(
key
)
)
self._key = key
def __repr__(self):
return "{}({!r})".format(type(self).__name__, self._key)
def __eq__(self, other):
if not isinstance(other, Parameter):
return NotImplemented
return self._key == other._key
@property
def key(self):
"""Retrieve the key."""
return self._key
def resolve(self, bindings, used):
"""Resolve the current parameter from the parameter bindings.
Args:
bindings (dict): A mapping of parameter bindings.
used (Dict[Union[str, int], bool]): A mapping of already used
parameters. This will be modified if the current parameter
is in ``bindings``.
Returns:
Any: The bound value for the current parameter.
Raises:
.BadArgumentError: If the current parameter is | |
# ========================================================================================
#
# Description:
#
# Emulates execution of VM code.
#
# Attribution:
#
# Code by www.jk-quantized.com
#
# Redistribution and use of this code in source and binary forms must retain
# the above attribution notice and this condition.
#
# ========================================================================================
'''
Purpose:
Emulation at a usable execution speed.
Description:
Faster than emulating binary code.
Infact, does not emulate nor use the Hack Computer's architecture.
Instead it executes the VM code using your machine's processor's architecture.
While running binary code is cycle accurate,
it is too slow in emulation (see cpuEmulator.py).
I'm sure there are optimizations to be made that can improve the performance
of the binary/CPU emulation. If you have any ideas, be sure to share
them because the best case scenario is for the binary emulator to execute at a
usable speed. Till then, this exists as an inbetween.
'''
# TODO - add stepping, GUI debugger (registers etc)
# Imports --------------------------
# Built ins
import re
import time
import yappi
# Hack computer
import Components
from commonHelpers import *
from .pythonNBitArithmetic import *
# Configure computer ---------------
# VMX file containing all necessary program code
programPath = ''
debugPath = 'Debug/VMEmulator/' # Folder where logs go
debugMode = False
runYappiProfile = False
# Setup computer -------------------
nBits = Components.N_BITS
ALU = NBitArithmetic( nBits )
PC = 0
PC_prev = 0
PC_jump = False
RAM = [ 0 ] * ( 2 ** 16 )
ROM = [] # Psuedo ROM, loaded with VM code
clock = None
io = None
startTime = None
sysHalt = None
yieldToExternal = False # Suspend tick
static_segment_start = Components.STATIC_START
static_segment_end = Components.STATIC_END
stack_segment_start = Components.STACK_END
heap_segment_start = Components.HEAP_START
heap_segment_end = Components.HEAP_END
# Setup pointers -------------------
SP = 0
LCL = 1
ARG = 2
THIS = 3
THAT = 4
TEMP = 5
# GP = 13
STATIC = 16
# IO Helpers ------------------------
class RAMWrapper():
def __init__( self, ram ):
self.ram = ram
def read( self, address ):
return self.ram[ address ]
def write( self, clk, x, write, address ):
if clk == 1 and write == 1:
self.ram[ address ] = x
# VM Helpers ------------------------
# unaryOps = [ 'not', 'neg' ]
# binaryOps = [ 'and', 'or', 'add', 'sub', 'xor', 'lsl', 'lsr' ]
# comparisonOps = [ 'eq', 'gt', 'lt', 'gte', 'lte', 'ne' ]
# operations = [ unaryOps + binaryOps + comparisonOps ]
unaryOps = set( [ 'not', 'neg' ] )
binaryOps = set( [ 'and', 'or', 'add', 'sub', 'xor', 'lsl', 'lsr', 'mul', 'div' ] )
comparisonOps = set( [ 'eq', 'gt', 'lt', 'gte', 'lte', 'ne' ] )
operations = unaryOps | binaryOps | comparisonOps # Set marginally faster to lookup than list
addressLookup = {}
staticLookup = {}
# VM instructions -------------------
def executeInstruction( cmd ):
cmdType = cmd[ 0 ]
if cmdType == 'push':
push( cmd[ 1 ], cmd[ 2 ], cmd )
elif cmdType == 'pop':
pop( cmd[ 1 ], cmd[ 2 ], cmd )
elif cmdType in operations:
operation( cmdType )
elif cmdType == 'goto':
goto( cmd[ 1 ] )
elif cmdType == 'if-goto':
ifgoto( cmd[ 1 ] )
elif cmdType == 'call':
call( cmd[ 1 ], cmd[ 2 ] )
elif cmdType == 'return':
ret()
elif cmdType == 'label':
label( cmd[ 1 ] )
elif cmdType == 'function':
function( cmd[ 1 ], cmd[ 2 ] )
else:
raise Exception( "Don't know how to execute the command - {}".format( cmd ) )
def push( seg, index, cmd ):
addr = RAM[ SP ]
if seg == 'constant':
RAM[ addr ] = index
elif seg == 'pointer':
if index == 0: RAM[ addr ] = RAM[ THIS ]
else: RAM[ addr ] = RAM[ THAT ]
elif seg == 'static':
RAM[ addr ] = RAM[ staticLookup[ cmd[ 3 ] ] ]
elif seg == 'temp':
RAM[ addr ] = RAM[ TEMP + index ]
elif seg == 'argument':
RAM[ addr ] = RAM[ RAM[ ARG ] + index ]
elif seg == 'local':
RAM[ addr ] = RAM[ RAM[ LCL ] + index ]
elif seg == 'this':
RAM[ addr ] = RAM[ RAM[ THIS ] + index ]
elif seg == 'that':
RAM[ addr ] = RAM[ RAM[ THAT ] + index ]
else:
raise Exception( 'Unknown segment - {}'.format( seg ) )
# Update SP
RAM[ SP ] += 1
# if RAM[ SP ] >= heap_segment_start:
# raiseException( 'Stack overflow' )
def pop( seg, index, cmd ):
addr = RAM[ SP ] - 1
value = RAM[ addr ]
if seg == 'pointer':
if index == 0: RAM[ THIS ] = value
else: RAM[ THAT ] = value
elif seg == 'static':
RAM[ staticLookup[ cmd[ 3 ] ] ] = value
elif seg == 'temp':
RAM[ TEMP + index ] = value
elif seg == 'argument':
RAM[ RAM[ ARG ] + index ] = value
elif seg == 'local':
RAM[ RAM[ LCL ] + index ] = value
elif seg == 'this':
RAM[ RAM[ THIS ] + index ] = value
elif seg == 'that':
RAM[ RAM[ THAT ] + index ] = value
else:
raise Exception( 'Unknown segment - {}'.format( seg ) )
# Update SP
RAM[ SP ] -= 1
def operation( op ):
if op in unaryOps:
addr = RAM[ SP ] - 1
a = RAM[ addr ]
if op == 'not':
RAM[ addr ] = ALU._not( a )
elif op == 'neg':
RAM[ addr ] = ALU._neg( a )
elif op in binaryOps:
addr_a = RAM[ SP ] - 2
addr_b = RAM[ SP ] - 1
a = RAM[ addr_a ]
b = RAM[ addr_b ]
value = None
if op == 'and':
value = ALU._and( a, b )
elif op == 'or':
value = ALU._or( a, b )
elif op == 'xor':
value = ALU._xor( a, b )
elif op == 'lsl':
value = ALU._lsl( a, b )
elif op == 'lsr':
value = ALU._lsr( a, b )
elif op == 'add':
value = ALU._add( a, b )
elif op == 'sub':
value = ALU._sub( a, b )
elif op == 'mul':
value = ALU._mul( a, b )
elif op == 'div':
value = ALU._div( a, b )
RAM[ addr_a ] = value
# Update SP
RAM[ SP ] -= 1
elif op in comparisonOps:
addr_a = RAM[ SP ] - 2
addr_b = RAM[ SP ] - 1
a = RAM[ addr_a ]
b = RAM[ addr_b ]
value = None
if op == 'eq':
value = ALU._eq( a, b )
elif op == 'ne':
value = ALU._ne( a, b )
elif op == 'gt':
value = ALU._gt( a, b )
elif op == 'gte':
value = ALU._gte( a, b )
elif op == 'lt':
value = ALU._lt( a, b )
elif op == 'lte':
value = ALU._lte( a, b )
if value:
RAM[ addr_a ] = negativeOne # 111111 so that !True = 00000
else:
RAM[ addr_a ] = 0
# Update SP
RAM[ SP ] -= 1
def goto( loc ):
global PC
global PC_jump
PC = addressLookup[ loc ]
PC_jump = True
def ifgoto( loc ):
global PC
global PC_jump
addr = RAM[ SP ] - 1
value = RAM[ addr ]
if value != 0:
# if value:
PC = addressLookup[ loc ]
PC_jump = True
# Update SP
RAM[ SP ] -= 1
def call( fxName, nArgs ):
addr = RAM[ SP ]
# Save return position
RAM[ addr ] = PC + 1
addr += 1
# Save segment pointers
RAM[ addr ] = RAM[ LCL ]
addr += 1
RAM[ addr ] = RAM[ ARG ]
addr += 1
RAM[ addr ] = RAM[ THIS ]
addr += 1
RAM[ addr ] = RAM[ THAT ]
addr += 1
# Set ARG pointer
RAM[ ARG ] = RAM[ SP ] - nArgs
# Set LCL pointer
RAM[ LCL ] = addr
# Set SP
RAM[ SP ] = addr
# Goto function
goto( fxName )
def ret():
global PC
global PC_jump
global yieldToExternal
# Save current LCL pointer
curLCL = RAM[ LCL ]
# Save return address
retAddr = RAM[ curLCL - 5 ]
# Copy return value into arg0
addr_a = RAM[ ARG ]
addr_r = RAM[ SP ] - 1
RAM[ addr_a ] = RAM[ addr_r ]
# Reposition SP for caller (to just after return value)
RAM[ SP ] = addr_a + 1
# Restore segment pointers of caller
curLCL -= 1
RAM[ THAT ] = RAM[ curLCL ]
curLCL -= 1
RAM[ THIS ] = RAM[ curLCL ]
curLCL -= 1
RAM[ ARG ] = RAM[ curLCL ]
curLCL -= 1
RAM[ LCL ] = RAM[ curLCL ]
# Jump to return position
PC = retAddr
PC_jump = True
yieldToExternal = False # temp...
def label( loc ): pass
def function( fxName, nLocals ):
global yieldToExternal
# print( 'curFx - ', fxName )
# Init locals to zeros
for i in range( nLocals ):
addr = RAM[ LCL ] + i
RAM[ addr ] = 0
RAM[ SP ] += nLocals
# If exists, execute python equivalent
if fxName in OSWrappers:
yieldToExternal = True
OSWrappers[ fxName ]()
# OS Wrappers -----------------------
# Sys ---
def Sys_wait():
# Retrieve args ---
argBase = RAM[ ARG ]
duration = RAM[ argBase ]
# Subroutine body ---
'''
if ( duration <= 0 ) {
Sys.error( 1 );
// Sys.raiseException( 'Sys.wait duration must be greater than zero' );
}
'''
if duration <= 0:
print( 'ERROR: Sys.wait duration must be greater than zero' )
# Halt program
haltOnError()
return
# print( 'About to sleep for | |
<reponame>abael/eli5
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from functools import partial
import re
from singledispatch import singledispatch
from typing import Any, Dict, List, Tuple
import numpy as np # type: ignore
import scipy.sparse as sp # type: ignore
from xgboost import ( # type: ignore
XGBClassifier,
XGBRegressor,
Booster,
DMatrix
)
from eli5.base import (
FeatureWeight, FeatureImportances, Explanation, TargetExplanation)
from eli5.explain import explain_weights, explain_prediction
from eli5.sklearn.text import add_weighted_spans
from eli5.sklearn.utils import (
add_intercept, get_feature_names, get_X, handle_vec, predict_proba)
from eli5.utils import (
argsort_k_largest_positive, get_target_display_names, mask, is_sparse_vector)
from eli5._decision_path import DECISION_PATHS_CAVEATS
from eli5._feature_weights import get_top_features
DECISION_PATHS_CAVEATS = """
Feature weights are calculated by following decision paths in trees
of an ensemble. Each leaf has an output score, and expected scores can also be
assigned to parent nodes. Contribution of one feature on the decision path
is how much expected score changes from parent to child. Weights of all features
sum to the output score of the estimator.
""" + DECISION_PATHS_CAVEATS
DESCRIPTION_XGBOOST = """
XGBoost feature importances; values are numbers 0 <= x <= 1;
all values sum to 1.
"""
DESCRIPTION_CLF_MULTICLASS = """
Features with largest coefficients per class.
""" + DECISION_PATHS_CAVEATS
DESCRIPTION_CLF_BINARY = """
Features with largest coefficients.
""" + DECISION_PATHS_CAVEATS
DESCRIPTION_REGRESSION = DESCRIPTION_CLF_BINARY
@explain_weights.register(XGBClassifier)
@explain_weights.register(XGBRegressor)
@singledispatch
def explain_weights_xgboost(xgb,
vec=None,
top=20,
target_names=None, # ignored
targets=None, # ignored
feature_names=None,
feature_re=None,
feature_filter=None,
importance_type='gain',
):
"""
Return an explanation of an XGBoost estimator (via scikit-learn wrapper
XGBClassifier or XGBRegressor) as feature importances.
See :func:`eli5.explain_weights` for description of
``top``, ``feature_names``,
``feature_re`` and ``feature_filter`` parameters.
``target_names`` and ``targets`` parameters are ignored.
Parameters
----------
importance_type : str, optional
A way to get feature importance. Possible values are:
- 'gain' - the average gain of the feature when it is used in trees
(default)
- 'weight' - the number of times a feature is used to split the data
across all trees
- 'cover' - the average coverage of the feature when it is used in trees
"""
coef = _xgb_feature_importances(xgb, importance_type=importance_type)
num_features = coef.shape[-1]
feature_names = get_feature_names(
xgb, vec, feature_names=feature_names, num_features=num_features)
feature_names, flt_indices = feature_names.handle_filter(
feature_filter, feature_re)
if flt_indices is not None:
coef = coef[flt_indices]
indices = argsort_k_largest_positive(coef, top)
names, values = feature_names[indices], coef[indices]
return Explanation(
feature_importances=FeatureImportances(
[FeatureWeight(*x) for x in zip(names, values)],
remaining=np.count_nonzero(coef) - len(indices),
),
description=DESCRIPTION_XGBOOST,
estimator=repr(xgb),
method='feature importances',
is_regression=isinstance(xgb, XGBRegressor),
)
@explain_prediction.register(XGBClassifier)
@explain_prediction.register(XGBRegressor)
@singledispatch
def explain_prediction_xgboost(
xgb, doc,
vec=None,
top=None,
top_targets=None,
target_names=None,
targets=None,
feature_names=None,
feature_re=None,
feature_filter=None,
vectorized=False,
):
""" Return an explanation of XGBoost prediction (via scikit-learn wrapper
XGBClassifier or XGBRegressor) as feature weights.
See :func:`eli5.explain_prediction` for description of
``top``, ``top_targets``, ``target_names``, ``targets``,
``feature_names``, ``feature_re`` and ``feature_filter`` parameters.
``vec`` is a vectorizer instance used to transform
raw features to the input of the estimator ``xgb``
(e.g. a fitted CountVectorizer instance); you can pass it
instead of ``feature_names``.
``vectorized`` is a flag which tells eli5 if ``doc`` should be
passed through ``vec`` or not. By default it is False, meaning that
if ``vec`` is not None, ``vec.transform([doc])`` is passed to the
estimator. Set it to False if you're passing ``vec``,
but ``doc`` is already vectorized.
Method for determining feature importances follows an idea from
http://blog.datadive.net/interpreting-random-forests/.
Feature weights are calculated by following decision paths in trees
of an ensemble.
Each leaf has an output score, and expected scores can also be assigned
to parent nodes.
Contribution of one feature on the decision path is how much expected score
changes from parent to child.
Weights of all features sum to the output score of the estimator.
"""
num_features = len(xgb.booster().feature_names)
vec, feature_names = handle_vec(
xgb, doc, vec, vectorized, feature_names, num_features=num_features)
if feature_names.bias_name is None:
# XGBoost estimators do not have an intercept, but here we interpret
# them as having an intercept
feature_names.bias_name = '<BIAS>'
X = get_X(doc, vec, vectorized=vectorized)
if sp.issparse(X):
# Work around XGBoost issue:
# https://github.com/dmlc/xgboost/issues/1238#issuecomment-243872543
X = X.tocsc()
proba = predict_proba(xgb, X)
scores_weights = _prediction_feature_weights(xgb, X, feature_names)
x, = add_intercept(X)
x = _missing_values_set_to_nan(x, xgb.missing, sparse_missing=True)
feature_names, flt_indices = feature_names.handle_filter(
feature_filter, feature_re, x)
is_multiclass = _xgb_n_targets(xgb) > 1
is_regression = isinstance(xgb, XGBRegressor)
names = xgb.classes_ if not is_regression else ['y']
display_names = get_target_display_names(names, target_names, targets,
top_targets, proba)
res = Explanation(
estimator=repr(xgb),
method='decision paths',
description={
(False, False): DESCRIPTION_CLF_BINARY,
(False, True): DESCRIPTION_CLF_MULTICLASS,
(True, False): DESCRIPTION_REGRESSION,
}[is_regression, is_multiclass],
is_regression=is_regression,
targets=[],
)
def get_score_feature_weights(_label_id):
_score, _feature_weights = scores_weights[_label_id]
_x = x
if flt_indices is not None:
_x = mask(_x, flt_indices)
_feature_weights = mask(_feature_weights, flt_indices)
return _score, get_top_features(
feature_names, _feature_weights, top, _x)
if is_multiclass:
for label_id, label in display_names:
score, feature_weights = get_score_feature_weights(label_id)
target_expl = TargetExplanation(
target=label,
feature_weights=feature_weights,
score=score,
proba=proba[label_id] if proba is not None else None,
)
add_weighted_spans(doc, vec, vectorized, target_expl)
res.targets.append(target_expl)
else:
score, feature_weights = get_score_feature_weights(0)
target_expl = TargetExplanation(
target=display_names[-1][1],
feature_weights=feature_weights,
score=score,
proba=proba[1] if proba is not None else None,
)
add_weighted_spans(doc, vec, vectorized, target_expl)
res.targets.append(target_expl)
return res
def _prediction_feature_weights(xgb, X, feature_names):
""" For each target, return score and numpy array with feature weights
on this prediction, following an idea from
http://blog.datadive.net/interpreting-random-forests/
"""
# XGBClassifier does not have pred_leaf argument, so use booster
booster = xgb.booster() # type: Booster
leaf_ids, = booster.predict(DMatrix(X, missing=xgb.missing), pred_leaf=True)
tree_dumps = booster.get_dump(with_stats=True)
assert len(tree_dumps) == len(leaf_ids)
target_feature_weights = partial(
_target_feature_weights, feature_names=feature_names)
n_targets = _xgb_n_targets(xgb)
if n_targets > 1:
# For multiclass, XGBoost stores dumps and leaf_ids in a 1d array,
# so we need to split them.
scores_weights = [
target_feature_weights(
leaf_ids[target_idx::n_targets],
tree_dumps[target_idx::n_targets],
) for target_idx in range(n_targets)]
else:
scores_weights = [target_feature_weights(leaf_ids, tree_dumps)]
return scores_weights
def _target_feature_weights(leaf_ids, tree_dumps, feature_names):
feature_weights = np.zeros(len(feature_names))
# All trees in XGBoost give equal contribution to the prediction:
# it is equal to sum of "leaf" values in leafs
# before applying loss-specific function
# (e.g. logistic for "binary:logistic" loss).
score = 0
for text_dump, leaf_id in zip(tree_dumps, leaf_ids):
leaf = _indexed_leafs(_parse_tree_dump(text_dump))[leaf_id]
score += leaf['leaf']
path = [leaf]
while 'parent' in path[-1]:
path.append(path[-1]['parent'])
path.reverse()
# Check how each split changes "leaf" value
for node, child in zip(path, path[1:]):
f_num_match = re.search('^f(\d+)$', node['split'])
idx = int(f_num_match.groups()[0])
feature_weights[idx] += child['leaf'] - node['leaf']
# Root "leaf" value is interpreted as bias
feature_weights[feature_names.bias_idx] += path[0]['leaf']
return score, feature_weights
def _indexed_leafs(parent):
""" Return a leaf nodeid -> node dictionary with
"parent" and "leaf" (average child "leaf" value) added to all nodes.
"""
if not parent.get('children'):
return {parent['nodeid']: parent}
indexed = {}
for child in parent['children']:
child['parent'] = parent
if 'leaf' in child:
indexed[child['nodeid']] = child
else:
indexed.update(_indexed_leafs(child))
parent['leaf'] = _parent_value(parent['children'])
return indexed
def _parent_value(children):
""" Value of the parent node: a weighted sum of child values.
"""
covers = np.array([child['cover'] for child in children])
covers /= np.sum(covers)
leafs = np.array([child['leaf'] for child in children])
return np.sum(leafs * covers)
def _xgb_n_targets(xgb):
if isinstance(xgb, XGBClassifier):
return 1 if xgb.n_classes_ == 2 else xgb.n_classes_
elif isinstance(xgb, XGBRegressor):
return 1
else:
raise TypeError
def _xgb_feature_importances(xgb, importance_type):
b = xgb.booster()
fs = b.get_score(importance_type=importance_type)
all_features = np.array(
[fs.get(f, 0.) for f in b.feature_names], dtype=np.float32)
return all_features / all_features.sum()
def _parse_tree_dump(text_dump):
""" Parse text tree dump (one item of a list returned by Booster.get_dump())
into json format that will be used by next XGBoost release.
"""
result = None
stack = [] # type: List[Dict]
for line in text_dump.split('\n'):
if line:
depth, node = _parse_dump_line(line)
if depth == 0:
assert not stack
result = node
stack.append(node)
elif depth > len(stack):
raise ValueError('Unexpected dump structure')
else:
if depth < len(stack):
stack = stack[:depth]
stack[-1].setdefault('children', []).append(node)
stack.append(node)
return result
def _parse_dump_line(line):
# type: (str) -> Tuple[int, Dict[str, Any]]
branch_match = re.match(
'^(\t*)(\d+):\[(\w+)<([^\]]+)\] '
'yes=(\d+),no=(\d+),missing=(\d+),'
'gain=([^,]+),cover=(.+)$', line)
if branch_match:
tabs, node_id, feature, condition, yes, no, missing, gain, cover = \
branch_match.groups()
depth = len(tabs)
return depth, {
'depth': depth,
'nodeid': int(node_id),
'split': feature,
'split_condition': float(condition),
'yes': int(yes),
'no': int(no),
'missing': int(missing),
'gain': float(gain),
'cover': float(cover),
}
leaf_match = re.match('^(\t*)(\d+):leaf=([^,]+),cover=(.+)$', line)
if leaf_match:
tabs, node_id, value, cover = leaf_match.groups()
depth = len(tabs)
return depth, {
'nodeid': int(node_id),
'leaf': float(value),
'cover': float(cover),
}
raise ValueError('Line in unexpected format: {}'.format(line))
def _missing_values_set_to_nan(values, missing_value, sparse_missing):
""" Return a copy of values where missing values (equal to missing_value)
are replaced to nan according. If sparse_missing is True,
entries missing in a sparse matrix will also be set to nan.
Sparse matrices will be converted to dense format.
| |
import collections
import gc
import inspect
import math
from multiprocessing.spawn import import_main_path
import os
import re
import shutil
import sys
import time
import warnings
from logging import StreamHandler
from pathlib import Path
from typing import (
TYPE_CHECKING,
Any,
Callable,
Counter,
Dict,
List,
Optional,
Tuple,
Union,
)
from torch import optim
# Integrations must be imported before ML frameworks:
from transformers.integrations import ( # isort: split
default_hp_search_backend,
get_reporting_integration_callbacks,
hp_params,
is_fairscale_available,
is_optuna_available,
is_ray_tune_available,
run_hp_search_optuna,
run_hp_search_ray,
init_deepspeed,
)
import numpy as np
import torch
from packaging import version
from torch import nn
from torch.utils.data.dataloader import DataLoader
from torch.utils.data.dataset import Dataset
from torch.utils.data.distributed import DistributedSampler
from torch.utils.data.sampler import RandomSampler, SequentialSampler
from tqdm import tqdm
from transformers.data.data_collator import (
DataCollator,
DataCollatorWithPadding,
default_data_collator,
)
from transformers.file_utils import (
WEIGHTS_NAME,
is_apex_available,
is_datasets_available,
is_in_notebook,
is_sagemaker_distributed_available,
is_torch_tpu_available,
is_training_run_on_sagemaker,
)
from transformers.modeling_utils import PreTrainedModel, unwrap_model
from transformers.optimization import Adafactor, AdamW, get_scheduler
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_callback import (
CallbackHandler,
DefaultFlowCallback,
PrinterCallback,
ProgressCallback,
TrainerCallback,
TrainerControl,
TrainerState,
)
from transformers.trainer_pt_utils import (
DistributedLengthGroupedSampler,
DistributedSamplerWithLoop,
DistributedTensorGatherer,
LabelSmoother,
LengthGroupedSampler,
SequentialDistributedSampler,
distributed_broadcast_scalars,
distributed_concat,
get_parameter_names,
nested_concat,
nested_detach,
nested_numpify,
nested_xla_mesh_reduce,
reissue_pt_warnings,
)
from transformers import Trainer
from transformers.trainer_utils import (
PREFIX_CHECKPOINT_DIR,
BestRun,
EvalPrediction,
HPSearchBackend,
PredictionOutput,
ShardedDDPOption,
TrainerMemoryTracker,
TrainOutput,
default_compute_objective,
default_hp_space,
denumpify_detensorize,
get_last_checkpoint,
set_seed,
speed_metrics,
)
from transformers.training_args import ParallelMode, TrainingArguments
from transformers.utils import logging
from transformers.utils.modeling_auto_mapping import (
MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
)
from transformers import Trainer
from transformers.integrations import WandbCallback, rewrite_logs
import wandb
_is_native_amp_available = False
DEFAULT_CALLBACKS = [DefaultFlowCallback]
DEFAULT_PROGRESS_CALLBACK = ProgressCallback
if is_in_notebook():
from transformers.utils.notebook import NotebookProgressCallback
DEFAULT_PROGRESS_CALLBACK = NotebookProgressCallback
if is_apex_available():
from apex import amp
if version.parse(torch.__version__) >= version.parse("1.6"):
_is_native_amp_available = True
from torch.cuda.amp import autocast
if is_datasets_available():
import datasets
if is_torch_tpu_available():
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
import torch_xla.distributed.parallel_loader as pl
if is_fairscale_available():
import fairscale
from fairscale.nn.data_parallel import ShardedDataParallel as ShardedDDP
from fairscale.optim import OSS
from fairscale.optim.grad_scaler import ShardedGradScaler
if version.parse(fairscale.__version__) >= version.parse("0.3"):
from fairscale.nn.data_parallel import (
FullyShardedDataParallel as FullyShardedDDP,
)
from fairscale.nn.wrap import auto_wrap
else:
FullyShardedDDP = None
if is_sagemaker_distributed_available():
import smdistributed.dataparallel.torch.distributed as dist
from smdistributed.dataparallel.torch.parallel.distributed import (
DistributedDataParallel as DDP,
)
else:
import torch.distributed as dist
if is_training_run_on_sagemaker():
logging.add_handler(StreamHandler(sys.stdout))
if TYPE_CHECKING:
import optuna
logger = logging.get_logger(__name__)
class WandbCallbackThreadFix(WandbCallback):
def setup(self, args, state, model, reinit, **kwargs):
"""
Setup the optional Weights & Biases (`wandb`) integration.
One can subclass and override this method to customize the setup if needed. Find more information `here
<https://docs.wandb.ai/integrations/huggingface>`__. You can also override the following environment variables:
Environment:
WANDB_LOG_MODEL (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to log model as artifact at the end of training.
WANDB_WATCH (:obj:`str`, `optional` defaults to :obj:`"gradients"`):
Can be :obj:`"gradients"`, :obj:`"all"` or :obj:`"false"`. Set to :obj:`"false"` to disable gradient
logging or :obj:`"all"` to log gradients and parameters.
WANDB_PROJECT (:obj:`str`, `optional`, defaults to :obj:`"huggingface"`):
Set this to a custom string to store results in a different project.
WANDB_DISABLED (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to disable wandb entirely. Set `WANDB_DISABLED=true` to disable.
"""
if self._wandb is None:
return
self._initialized = True
if state.is_world_process_zero:
logger.info(
'Automatic Weights & Biases logging enabled, to disable set os.environ["WANDB_DISABLED"] = "true"'
)
combined_dict = {**args.to_sanitized_dict()}
if hasattr(model, "config") and model.config is not None:
model_config = model.config.to_dict()
combined_dict = {**model_config, **combined_dict}
trial_name = state.trial_name
init_args = {}
if trial_name is not None:
run_name = trial_name
init_args["group"] = args.run_name
else:
run_name = args.run_name
init_args["settings"] = wandb.Settings(start_method="fork")
self._wandb.init(
project=os.getenv("WANDB_PROJECT", "huggingface"),
config=combined_dict,
name=run_name,
reinit=reinit,
**init_args,
)
# keep track of model topology and gradients, unsupported on TPU
if not is_torch_tpu_available() and os.getenv("WANDB_WATCH") != "false":
self._wandb.watch(
model,
log=os.getenv("WANDB_WATCH", "gradients"),
log_freq=max(100, args.logging_steps),
)
def on_log(self, args, state, control, model=None, logs=None, **kwargs):
if self._wandb is None:
return
if not self._initialized:
self.setup(args, state, model, reinit=False)
is_table = len(logs) == 1
if state.is_world_process_zero:
if is_table:
self._wandb.log(logs)
else:
use_global_step = logs.pop("use_global_step", True)
logs = rewrite_logs(logs)
if use_global_step:
self._wandb.log(logs, step=state.global_step)
else:
self._wandb.log(logs)
class MuxTrainer(Trainer):
def __init__(
self,
model: Union[PreTrainedModel, torch.nn.Module] = None,
args: TrainingArguments = None,
data_collator: Optional[DataCollator] = None,
train_dataset: Optional[Dataset] = None,
eval_dataset: Optional[Dataset] = None,
tokenizer: Optional["PreTrainedTokenizerBase"] = None,
model_init: Callable[[], PreTrainedModel] = None,
compute_metrics: Optional[Callable[[EvalPrediction], Dict]] = None,
callbacks: Optional[List[TrainerCallback]] = None,
optimizers: Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR] = (
None,
None,
),
):
if args is None:
output_dir = "tmp_trainer"
logger.info(
f"No `TrainingArguments` passed, using `output_dir={output_dir}`."
)
args = TrainingArguments(output_dir=output_dir)
self.args = args
# Seed must be set before instantiating the model when using model
set_seed(self.args.seed)
self.hp_name = None
self.deepspeed = None
self.is_in_train = False
# memory metrics - must set up as early as possible
self._memory_tracker = TrainerMemoryTracker(self.args.skip_memory_metrics)
self._memory_tracker.start()
# force device and distributed setup init explicitly
args._setup_devices
if model is None:
if model_init is not None:
self.model_init = model_init
model = self.call_model_init()
else:
raise RuntimeError(
"`Trainer` requires either a `model` or `model_init` argument"
)
else:
if model_init is not None:
warnings.warn(
"`Trainer` requires either a `model` or `model_init` argument, but not both. "
"`model_init` will overwrite your model when calling the `train` method. This will become a fatal error in the next release.",
FutureWarning,
)
self.model_init = model_init
if (
hasattr(model, "is_parallelizable")
and model.is_parallelizable
and model.model_parallel
):
self.is_model_parallel = True
else:
self.is_model_parallel = False
# Setup Sharded DDP training
self.sharded_ddp = None
if len(args.sharded_ddp) > 0:
if args.deepspeed:
raise ValueError(
"Using --sharded_ddp xxx together with --deepspeed is not possible, deactivate one of those flags."
)
if args.local_rank == -1:
raise ValueError(
"Using sharded DDP only works in distributed training."
)
elif not is_fairscale_available():
raise ImportError(
"Sharded DDP training requires fairscale: `pip install fairscale`."
)
elif (
ShardedDDPOption.SIMPLE not in args.sharded_ddp
and FullyShardedDDP is None
):
raise ImportError(
"Sharded DDP in a mode other than simple training requires fairscale version >= 0.3, found "
f"{fairscale.__version__}. Upgrade your fairscale library: `pip install --upgrade fairscale`."
)
elif ShardedDDPOption.SIMPLE in args.sharded_ddp:
self.sharded_ddp = ShardedDDPOption.SIMPLE
elif ShardedDDPOption.ZERO_DP_2 in args.sharded_ddp:
self.sharded_ddp = ShardedDDPOption.ZERO_DP_2
elif ShardedDDPOption.ZERO_DP_3 in args.sharded_ddp:
self.sharded_ddp = ShardedDDPOption.ZERO_DP_3
# one place to sort out whether to place the model on device or not
self.place_model_on_device = args.place_model_on_device
if (
self.is_model_parallel
or (args.deepspeed and args.do_train)
or (args.fp16_full_eval and not args.do_train)
or (
self.sharded_ddp
in [ShardedDDPOption.ZERO_DP_2, ShardedDDPOption.ZERO_DP_3]
)
):
self.place_model_on_device = False
default_collator = (
default_data_collator
if tokenizer is None
else DataCollatorWithPadding(tokenizer)
)
self.data_collator = (
data_collator if data_collator is not None else default_collator
)
self.train_dataset = train_dataset
self.eval_dataset = eval_dataset
self.tokenizer = tokenizer
# postpone switching model to cuda when:
# 1. MP - since we are trying to fit a much bigger than 1 gpu model
# 2. fp16-enabled DeepSpeed loads the model in half the size and it doesn't need .to() anyway,
# and we only use deepspeed for training at the moment
if self.place_model_on_device:
model = model.to(args.device)
# Force n_gpu to 1 to avoid DataParallel as MP will manage the GPUs
if self.is_model_parallel:
self.args._n_gpu = 1
# later use `self.model is self.model_wrapped` to check if it's wrapped or not
self.model_wrapped = model
self.model = model
self.compute_metrics = compute_metrics
self.optimizer, self.lr_scheduler = optimizers
if model_init is not None and (
self.optimizer is not None or self.lr_scheduler is not None
):
raise RuntimeError(
"Passing a `model_init` is incompatible with providing the `optimizers` argument."
"You should subclass `Trainer` and override the `create_optimizer_and_scheduler` method."
)
# default_callbacks = DEFAULT_CALLBACKS + get_reporting_integration_callbacks(
# self.args.report_to
# )
default_callbacks = DEFAULT_CALLBACKS + [WandbCallbackThreadFix]
callbacks = (
default_callbacks if callbacks is None else default_callbacks + callbacks
)
self.callback_handler = CallbackHandler(
callbacks, self.model, self.tokenizer, self.optimizer, self.lr_scheduler
)
self.add_callback(
PrinterCallback if self.args.disable_tqdm else DEFAULT_PROGRESS_CALLBACK
)
# Will be set to True by `self._setup_loggers()` on first call to `self.log()`.
self._loggers_initialized = False
# Create output directory if needed
if self.is_world_process_zero():
os.makedirs(self.args.output_dir, exist_ok=True)
if not callable(self.data_collator) and callable(
getattr(self.data_collator, "collate_batch", None)
):
raise ValueError(
"The `data_collator` should be a simple callable (function, class with `__call__`)."
)
if args.max_steps > 0:
logger.info(
"max_steps is given, it will override any value given in num_train_epochs"
)
# Enforce rules on using datasets with no __len__
if (
train_dataset is not None
and not isinstance(train_dataset, collections.abc.Sized)
and args.max_steps <= 0
):
raise ValueError(
"train_dataset does not implement __len__, max_steps has to be specified"
)
if eval_dataset is not None and not isinstance(
eval_dataset, collections.abc.Sized
):
raise ValueError("eval_dataset must implement __len__")
self._signature_columns = None
if is_datasets_available():
if isinstance(train_dataset, datasets.Dataset):
self._remove_unused_columns(self.train_dataset, description="training")
if isinstance(eval_dataset, datasets.Dataset):
self._remove_unused_columns(self.eval_dataset, description="evaluation")
# Mixed precision setup
self.use_apex = False
self.use_amp = False
self.fp16_backend | |
<reponame>opnfv/samplevnf
#!/usr/bin/python
##
## Copyright (c) 2020 Intel Corporation
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
import sys
import time
import copy
from math import ceil
from statistics import mean
from past.utils import old_div
from rapid_log import RapidLog
from rapid_log import bcolors
from rapid_test import RapidTest
inf = float("inf")
class FlowSizeTest(RapidTest):
"""
Class to manage the flowsizetesting
"""
def __init__(self, test_param, lat_percentile, runtime, testname,
environment_file, gen_machine, sut_machine, background_machines):
super().__init__(test_param, runtime, testname, environment_file)
self.gen_machine = gen_machine
self.sut_machine = sut_machine
self.background_machines = background_machines
self.test['lat_percentile'] = lat_percentile
if self.test['test'] == 'TST009test':
# This test implements some of the testing as defined in
# https://docbox.etsi.org/ISG/NFV/open/Publications_pdf/Specs-Reports/NFV-TST%20009v3.2.1%20-%20GS%20-%20NFVI_Benchmarks.pdf
self.test['TST009_n'] = int(ceil(old_div(
self.test['maxframespersecondallingress'],
self.test['stepsize'])))
self.test['TST009'] = True
self.test['TST009_L'] = 0
self.test['TST009_R'] = self.test['TST009_n'] - 1
self.test['TST009_S']= []
for m in range(0, self.test['TST009_n']):
self.test['TST009_S'].append((m+1) * self.test['stepsize'])
elif self.test['test'] == 'fixed_rate':
for key in['drop_rate_threshold','lat_avg_threshold',
'lat_perc_threshold','lat_max_threshold']:
self.test[key] = inf
def new_speed(self, speed,size,success):
if self.test['test'] == 'fixed_rate':
return (self.test['startspeed'])
elif self.test['test'] == 'increment_till_fail':
return (speed + self.test['step'])
elif 'TST009' in self.test.keys():
if success:
self.test['TST009_L'] = self.test['TST009_m'] + 1
else:
self.test['TST009_R'] = max(self.test['TST009_m'] - 1,
self.test['TST009_L'])
self.test['TST009_m'] = int (old_div((self.test['TST009_L'] +
self.test['TST009_R']),2))
return (self.get_percentageof10Gbps(self.test['TST009_S'][self.test['TST009_m']],size))
else:
if success:
self.test['minspeed'] = speed
else:
self.test['maxspeed'] = speed
return (old_div((self.test['minspeed'] + self.test['maxspeed']),2.0))
def get_start_speed_and_init(self, size):
if self.test['test'] == 'fixed_rate':
return (self.test['startspeed'])
elif self.test['test'] == 'increment_till_fail':
return (self.test['startspeed'])
elif 'TST009' in self.test.keys():
self.test['TST009_L'] = 0
self.test['TST009_R'] = self.test['TST009_n'] - 1
self.test['TST009_m'] = int(old_div((self.test['TST009_L'] +
self.test['TST009_R']), 2))
return (self.get_percentageof10Gbps(self.test['TST009_S'][self.test['TST009_m']],size))
else:
self.test['minspeed'] = 0
self.test['maxspeed'] = self.test['startspeed']
return (self.test['startspeed'])
def resolution_achieved(self):
if self.test['test'] == 'fixed_rate':
return (True)
elif 'TST009' in self.test.keys():
return (self.test['TST009_L'] == self.test['TST009_R'])
else:
return ((self.test['maxspeed'] - self.test['minspeed']) <= self.test['accuracy'])
def warm_up(self):
# Running at low speed to make sure the ARP messages can get through.
# If not doing this, the ARP message could be dropped by a switch in overload and then the test will not give proper results
# Note however that if we would run the test steps during a very long time, the ARP would expire in the switch.
# PROX will send a new ARP request every seconds so chances are very low that they will all fail to get through
imix = self.test['warmupimix']
FLOWSIZE = self.test['warmupflowsize']
WARMUPSPEED = self.test['warmupspeed']
WARMUPTIME = self.test['warmuptime']
RapidLog.info(("Warming up during {} seconds..., packet size = {},"
" flows = {}, speed = {}").format(WARMUPTIME, imix, FLOWSIZE,
WARMUPSPEED))
self.gen_machine.set_generator_speed(WARMUPSPEED)
self.set_background_speed(self.background_machines, WARMUPSPEED)
self.gen_machine.set_udp_packet_size(imix)
self.set_background_size(self.background_machines, imix)
_ = self.gen_machine.set_flows(FLOWSIZE)
self.set_background_flows(self.background_machines, FLOWSIZE)
self.gen_machine.start()
self.start_background_traffic(self.background_machines)
time.sleep(WARMUPTIME)
self.stop_background_traffic(self.background_machines)
self.gen_machine.stop()
def run(self):
result_details = {'Details': 'Nothing'}
TestResult = 0
end_data = {}
iteration_prefix = {}
self.warm_up()
for imix in self.test['imixs']:
size = mean(imix)
self.gen_machine.set_udp_packet_size(imix)
if self.background_machines:
backgroundinfo = ('{}Running {} x background traffic not '
'represented in the table{}').format(bcolors.FLASH,
len(self.background_machines),bcolors.ENDC)
else:
backgroundinfo = '{}{}'.format(bcolors.FLASH,bcolors.ENDC)
self.set_background_size(self.background_machines, imix)
RapidLog.info('+' + '-' * 188 + '+')
RapidLog.info(("| UDP, {:>5} bytes, different number of flows by "
"randomizing SRC & DST UDP port. {:116.116}|").
format(round(size), backgroundinfo))
RapidLog.info('+' + '-' * 8 + '+' + '-' * 18 + '+' + '-' * 13 +
'+' + '-' * 13 + '+' + '-' * 13 + '+' + '-' * 24 + '+' +
'-' * 10 + '+' + '-' * 10 + '+' + '-' * 10 + '+' + '-' * 11
+ '+' + '-' * 11 + '+' + '-' * 11 + '+' + '-' * 11 + '+'
+ '-' * 7 + '+' + '-' * 4 + '+')
RapidLog.info(('| Flows | Speed requested | Gen by core | Sent by'
' NIC | Fwrd by SUT | Rec. by core | Avg. Lat.|{:.0f}'
' Pcentil| Max. Lat.| Sent | Received | Lost | Total'
' Lost|L.Ratio|Time|').format(self.test['lat_percentile']*100))
RapidLog.info('+' + '-' * 8 + '+' + '-' * 18 + '+' + '-' * 13 +
'+' + '-' * 13 + '+' + '-' * 13 + '+' + '-' * 24 + '+' +
'-' * 10 + '+' + '-' * 10 + '+' + '-' * 10 + '+' + '-' * 11
+ '+' + '-' * 11 + '+' + '-' * 11 + '+' + '-' * 11 + '+'
+ '-' * 7 + '+' + '-' * 4 + '+')
for flow_number in self.test['flows']:
attempts = 0
self.gen_machine.reset_stats()
if self.sut_machine:
self.sut_machine.reset_stats()
flow_number = self.gen_machine.set_flows(flow_number)
self.set_background_flows(self.background_machines, flow_number)
end_data['speed'] = None
speed = self.get_start_speed_and_init(size)
while True:
attempts += 1
endwarning = False
print('{} flows: Measurement ongoing at speed: {}%'.format(
str(flow_number), str(round(speed, 2))), end=' \r')
sys.stdout.flush()
iteration_data = self.run_iteration(
float(self.test['runtime']),flow_number,size,speed)
if iteration_data['r'] > 1:
retry_warning = '{} {:1} retries needed{}'.format(
bcolors.WARNING, iteration_data['r'],
bcolors.ENDC)
else:
retry_warning = ''
# Drop rate is expressed in percentage. lat_used is a ratio
# (0 to 1). The sum of these 2 should be 100%.
# If the sum is lower than 95, it means that more than 5%
# of the latency measurements where dropped for accuracy
# reasons.
if (iteration_data['drop_rate'] +
iteration_data['lat_used'] * 100) < 95:
lat_warning = ('{} Latency accuracy issue?: {:>3.0f}%'
'{}').format(bcolors.WARNING,
iteration_data['lat_used'] * 100,
bcolors.ENDC)
else:
lat_warning = ''
iteration_prefix = {'speed' : bcolors.ENDC,
'lat_avg' : bcolors.ENDC,
'lat_perc' : bcolors.ENDC,
'lat_max' : bcolors.ENDC,
'abs_drop_rate' : bcolors.ENDC,
'drop_rate' : bcolors.ENDC}
if self.test['test'] == 'fixed_rate':
end_data = copy.deepcopy(iteration_data)
end_prefix = copy.deepcopy(iteration_prefix)
if lat_warning or retry_warning:
endwarning = '| | {:177.177} |'.format(
retry_warning + lat_warning)
success = True
# TestResult = TestResult + iteration_data['pps_rx']
# fixed rate testing result is strange: we just report
# the pps received
# The following if statement is testing if we pass the
# success criteria of a certain drop rate, average latency
# and maximum latency below the threshold.
# The drop rate success can be achieved in 2 ways: either
# the drop rate is below a treshold, either we want that no
# packet has been lost during the test.
# This can be specified by putting 0 in the .test file
elif ((iteration_data['drop_rate'] < self.test['drop_rate_threshold']) or (iteration_data['abs_dropped']==self.test['drop_rate_threshold']==0)) and (iteration_data['lat_avg']< self.test['lat_avg_threshold']) and (iteration_data['lat_perc']< self.test['lat_perc_threshold']) and (iteration_data['lat_max'] < self.test['lat_max_threshold']):
if (old_div((self.get_pps(speed,size) - iteration_data['pps_tx']),self.get_pps(speed,size)))>0.01:
iteration_prefix['speed'] = bcolors.WARNING
if iteration_data['abs_tx_fail'] > 0:
gen_warning = bcolors.WARNING + ' Network limit?: requesting {:<.3f} Mpps and getting {:<.3f} Mpps - {} failed to be transmitted'.format(self.get_pps(speed,size), iteration_data['pps_tx'], iteration_data['abs_tx_fail']) + bcolors.ENDC
else:
gen_warning = bcolors.WARNING + ' Generator limit?: requesting {:<.3f} Mpps and getting {:<.3f} Mpps'.format(self.get_pps(speed,size), iteration_data['pps_tx']) + bcolors.ENDC
else:
iteration_prefix['speed'] = bcolors.ENDC
gen_warning = ''
end_data = copy.deepcopy(iteration_data)
end_prefix = copy.deepcopy(iteration_prefix)
if lat_warning or gen_warning or retry_warning:
endwarning = '| | {:186.186} |'.format(retry_warning + lat_warning + gen_warning)
success = True
success_message=' SUCCESS'
RapidLog.debug(self.report_result(-attempts, size,
iteration_data, iteration_prefix) + success_message +
retry_warning + lat_warning + gen_warning)
else:
success_message=' FAILED'
if ((iteration_data['abs_dropped']>0) and (self.test['drop_rate_threshold'] ==0)):
iteration_prefix['abs_drop_rate'] = bcolors.FAIL
if (iteration_data['drop_rate'] < self.test['drop_rate_threshold']):
iteration_prefix['drop_rate'] = bcolors.ENDC
else:
iteration_prefix['drop_rate'] = bcolors.FAIL
if (iteration_data['lat_avg']< self.test['lat_avg_threshold']):
iteration_prefix['lat_avg'] = bcolors.ENDC
else:
iteration_prefix['lat_avg'] = bcolors.FAIL
if (iteration_data['lat_perc']< self.test['lat_perc_threshold']):
iteration_prefix['lat_perc'] = bcolors.ENDC
else:
iteration_prefix['lat_perc'] = bcolors.FAIL
if (iteration_data['lat_max']< self.test['lat_max_threshold']):
iteration_prefix['lat_max'] = bcolors.ENDC
else:
iteration_prefix['lat_max'] = bcolors.FAIL
if ((old_div((self.get_pps(speed,size) - iteration_data['pps_tx']),self.get_pps(speed,size)))<0.001):
iteration_prefix['speed'] = bcolors.ENDC
else:
iteration_prefix['speed'] = bcolors.FAIL
success = False
RapidLog.debug(self.report_result(-attempts, size,
iteration_data, iteration_prefix) +
success_message + retry_warning + lat_warning)
speed = self.new_speed(speed, size, success)
if self.test['test'] == 'increment_till_fail':
if not success:
break
elif self.resolution_achieved():
break
if end_data['speed'] is None:
end_data = iteration_data
end_prefix = iteration_prefix
RapidLog.info('|{:>7} | {:<177} |'.format("FAILED","Speed 0 or close to 0, data for last failed step below:"))
RapidLog.info(self.report_result(flow_number, size,
end_data, end_prefix))
if end_data['avg_bg_rate']:
tot_avg_rx_rate = end_data['pps_rx'] + (end_data['avg_bg_rate'] * len(self.background_machines))
endtotaltrafficrate = '| | Total amount of traffic received by all generators during | |
project_id: int = None
) -> schemas.Project:
project_record = self._get_project_record(session, name, project_id)
return self._transform_project_record_to_schema(session, project_record)
def delete_project(
self,
session: Session,
name: str,
deletion_strategy: schemas.DeletionStrategy = schemas.DeletionStrategy.default(),
):
logger.debug(
"Deleting project from DB", name=name, deletion_strategy=deletion_strategy
)
self._delete(session, Project, name=name)
def list_projects(
self,
session: Session,
owner: str = None,
format_: mlrun.api.schemas.ProjectsFormat = mlrun.api.schemas.ProjectsFormat.full,
labels: List[str] = None,
state: mlrun.api.schemas.ProjectState = None,
names: typing.Optional[typing.List[str]] = None,
) -> schemas.ProjectsOutput:
query = self._query(session, Project, owner=owner, state=state)
if labels:
query = self._add_labels_filter(session, query, Project, labels)
if names:
query = query.filter(Project.name.in_(names))
project_records = query.all()
project_names = [project_record.name for project_record in project_records]
projects = []
# calculating the project summary data is done by doing cross project queries (and not per project) so we're
# building it outside of the loop
if format_ == mlrun.api.schemas.ProjectsFormat.summary:
projects = self.generate_projects_summaries(session, project_names)
else:
for project_record in project_records:
if format_ == mlrun.api.schemas.ProjectsFormat.name_only:
projects = project_names
# leader format is only for follower mode which will format the projects returned from here
elif format_ in [
mlrun.api.schemas.ProjectsFormat.full,
mlrun.api.schemas.ProjectsFormat.leader,
]:
projects.append(
self._transform_project_record_to_schema(
session, project_record
)
)
else:
raise NotImplementedError(
f"Provided format is not supported. format={format_}"
)
return schemas.ProjectsOutput(projects=projects)
def _get_project_resources_counters(self, session: Session):
now = datetime.now()
if (
not self._cache["project_resources_counters"]["ttl"]
or self._cache["project_resources_counters"]["ttl"] < now
):
logger.debug(
"Project resources counter cache expired. Calculating",
ttl=self._cache["project_resources_counters"]["ttl"],
)
import mlrun.artifacts
functions_count_per_project = (
session.query(Function.project, func.count(distinct(Function.name)))
.group_by(Function.project)
.all()
)
project_to_function_count = {
result[0]: result[1] for result in functions_count_per_project
}
feature_sets_count_per_project = (
session.query(FeatureSet.project, func.count(distinct(FeatureSet.name)))
.group_by(FeatureSet.project)
.all()
)
project_to_feature_set_count = {
result[0]: result[1] for result in feature_sets_count_per_project
}
# The kind filter is applied post the query to the DB (manually in python code), so counting should be that
# way as well, therefore we're doing it here, and can't do it with sql as the above
# We're using the "latest" which gives us only one version of each artifact key, which is what we want to
# count (artifact count, not artifact versions count)
model_artifacts = self._find_artifacts(
session, None, "latest", kind=mlrun.artifacts.model.ModelArtifact.kind
)
project_to_models_count = collections.defaultdict(int)
for model_artifact in model_artifacts:
project_to_models_count[model_artifact.project] += 1
runs = self._find_runs(session, None, "*", None)
project_to_recent_failed_runs_count = collections.defaultdict(int)
project_to_running_runs_count = collections.defaultdict(int)
# we want to count unique run names, and not all occurrences of all runs, therefore we're keeping set of
# names and only count new names
project_to_recent_failed_run_names = collections.defaultdict(set)
project_to_running_run_names = collections.defaultdict(set)
runs = runs.all()
for run in runs:
run_json = run.struct
if self._is_run_matching_state(
run,
run_json,
mlrun.runtimes.constants.RunStates.non_terminal_states(),
):
if (
run_json.get("metadata", {}).get("name")
and run_json["metadata"]["name"]
not in project_to_running_run_names[run.project]
):
project_to_running_run_names[run.project].add(
run_json["metadata"]["name"]
)
project_to_running_runs_count[run.project] += 1
if self._is_run_matching_state(
run,
run_json,
[
mlrun.runtimes.constants.RunStates.error,
mlrun.runtimes.constants.RunStates.aborted,
],
):
one_day_ago = datetime.now() - timedelta(hours=24)
if run.start_time and run.start_time >= one_day_ago:
if (
run_json.get("metadata", {}).get("name")
and run_json["metadata"]["name"]
not in project_to_recent_failed_run_names[run.project]
):
project_to_recent_failed_run_names[run.project].add(
run_json["metadata"]["name"]
)
project_to_recent_failed_runs_count[run.project] += 1
self._cache["project_resources_counters"]["result"] = (
project_to_function_count,
project_to_feature_set_count,
project_to_models_count,
project_to_recent_failed_runs_count,
project_to_running_runs_count,
)
ttl_time = datetime.now() + timedelta(
seconds=humanfriendly.parse_timespan(
config.httpdb.projects.counters_cache_ttl
)
)
self._cache["project_resources_counters"]["ttl"] = ttl_time
return self._cache["project_resources_counters"]["result"]
def generate_projects_summaries(
self, session: Session, projects: List[str]
) -> List[mlrun.api.schemas.ProjectSummary]:
(
project_to_function_count,
project_to_feature_set_count,
project_to_models_count,
project_to_recent_failed_runs_count,
project_to_running_runs_count,
) = self._get_project_resources_counters(session)
project_summaries = []
for project in projects:
project_summaries.append(
mlrun.api.schemas.ProjectSummary(
name=project,
functions_count=project_to_function_count.get(project, 0),
feature_sets_count=project_to_feature_set_count.get(project, 0),
models_count=project_to_models_count.get(project, 0),
runs_failed_recent_count=project_to_recent_failed_runs_count.get(
project, 0
),
runs_running_count=project_to_running_runs_count.get(project, 0),
)
)
return project_summaries
def _update_project_record_from_project(
self, session: Session, project_record: Project, project: schemas.Project
):
project.metadata.created = project_record.created
project_dict = project.dict()
# TODO: handle taking out the functions/workflows/artifacts out of the project and save them separately
project_record.full_object = project_dict
project_record.description = project.spec.description
project_record.source = project.spec.source
project_record.state = project.status.state
labels = project.metadata.labels or {}
update_labels(project_record, labels)
self._upsert(session, project_record)
def _patch_project_record_from_project(
self,
session: Session,
name: str,
project_record: Project,
project: dict,
patch_mode: schemas.PatchMode,
):
project.setdefault("metadata", {})["created"] = project_record.created
strategy = patch_mode.to_mergedeep_strategy()
project_record_full_object = project_record.full_object
mergedeep.merge(project_record_full_object, project, strategy=strategy)
# If a bad kind value was passed, it will fail here (return 422 to caller)
project = schemas.Project(**project_record_full_object)
self.store_project(
session, name, project,
)
project_record.full_object = project_record_full_object
self._upsert(session, project_record)
def is_project_exists(self, session: Session, name: str, **kwargs):
project_record = self._get_project_record(
session, name, raise_on_not_found=False
)
if not project_record:
return False
return True
def _get_project_record(
self,
session: Session,
name: str = None,
project_id: int = None,
raise_on_not_found: bool = True,
) -> Project:
if not any([project_id, name]):
raise mlrun.errors.MLRunInvalidArgumentError(
"One of 'name' or 'project_id' must be provided"
)
project_record = self._query(
session, Project, name=name, id=project_id
).one_or_none()
if not project_record:
if not raise_on_not_found:
return None
raise mlrun.errors.MLRunNotFoundError(
f"Project not found: name={name}, project_id={project_id}"
)
return project_record
def verify_project_has_no_related_resources(self, session: Session, name: str):
artifacts = self._find_artifacts(session, name, "*")
self._verify_empty_list_of_project_related_resources(
name, artifacts, "artifacts"
)
logs = self._list_logs(session, name)
self._verify_empty_list_of_project_related_resources(name, logs, "logs")
runs = self._find_runs(session, None, name, []).all()
self._verify_empty_list_of_project_related_resources(name, runs, "runs")
schedules = self.list_schedules(session, project=name)
self._verify_empty_list_of_project_related_resources(
name, schedules, "schedules"
)
functions = self._list_project_functions(session, name)
self._verify_empty_list_of_project_related_resources(
name, functions, "functions"
)
feature_sets = self.list_feature_sets(session, name).feature_sets
self._verify_empty_list_of_project_related_resources(
name, feature_sets, "feature_sets"
)
feature_vectors = self.list_feature_vectors(session, name).feature_vectors
self._verify_empty_list_of_project_related_resources(
name, feature_vectors, "feature_vectors"
)
def delete_project_related_resources(self, session: Session, name: str):
self.del_artifacts(session, project=name)
self._delete_logs(session, name)
self.del_runs(session, project=name)
self.delete_schedules(session, name)
self._delete_functions(session, name)
self._delete_feature_sets(session, name)
self._delete_feature_vectors(session, name)
# resources deletion should remove their tags and labels as well, but doing another try in case there are
# orphan resources
self._delete_resources_tags(session, name)
self._delete_resources_labels(session, name)
@staticmethod
def _verify_empty_list_of_project_related_resources(
project: str, resources: List, resource_name: str
):
if resources:
raise mlrun.errors.MLRunPreconditionFailedError(
f"Project {project} can not be deleted since related resources found: {resource_name}"
)
def _get_record_by_name_tag_and_uid(
self, session, cls, project: str, name: str, tag: str = None, uid: str = None,
):
query = self._query(session, cls, name=name, project=project)
computed_tag = tag or "latest"
object_tag_uid = None
if tag or not uid:
object_tag_uid = self._resolve_class_tag_uid(
session, cls, project, name, computed_tag
)
if object_tag_uid is None:
return None, None, None
uid = object_tag_uid
if uid:
query = query.filter(cls.uid == uid)
return computed_tag, object_tag_uid, query.one_or_none()
def _get_feature_set(
self, session, project: str, name: str, tag: str = None, uid: str = None,
):
(
computed_tag,
feature_set_tag_uid,
db_feature_set,
) = self._get_record_by_name_tag_and_uid(
session, FeatureSet, project, name, tag, uid
)
if db_feature_set:
feature_set = self._transform_feature_set_model_to_schema(db_feature_set)
# If connected to a tag add it to metadata
if feature_set_tag_uid:
feature_set.metadata.tag = computed_tag
return feature_set
else:
return None
def get_feature_set(
self, session, project: str, name: str, tag: str = None, uid: str = None,
) -> schemas.FeatureSet:
feature_set = self._get_feature_set(session, project, name, tag, uid)
if not feature_set:
feature_set_uri = generate_object_uri(project, name, tag)
raise mlrun.errors.MLRunNotFoundError(
f"Feature-set not found {feature_set_uri}"
)
return feature_set
def _get_records_to_tags_map(self, session, cls, project, tag, name=None):
# Find object IDs by tag, project and feature-set-name (which is a like query)
tag_query = self._query(session, cls.Tag, project=project, name=tag)
if name:
tag_query = tag_query.filter(
generate_query_predicate_for_name(cls.Tag.obj_name, name)
)
# Generate a mapping from each object id (note: not uid, it's the DB ID) to its associated tags.
obj_id_tags = {}
for row in tag_query:
if row.obj_id in obj_id_tags:
obj_id_tags[row.obj_id].append(row.name)
else:
obj_id_tags[row.obj_id] = [row.name]
return obj_id_tags
def _generate_records_with_tags_assigned(
self, object_record, transform_fn, obj_id_tags, default_tag=None
):
# Using a similar mechanism here to assign tags to feature sets as is used in list_functions. Please refer
# there for some comments explaining the logic.
results = []
if default_tag:
results.append(transform_fn(object_record, default_tag))
else:
object_tags = obj_id_tags.get(object_record.id, [])
if len(object_tags) == 0 and not object_record.uid.startswith(
unversioned_tagged_object_uid_prefix
):
new_object = transform_fn(object_record)
results.append(new_object)
else:
for object_tag in object_tags:
results.append(transform_fn(object_record, object_tag))
return results
@staticmethod
def _generate_feature_set_digest(feature_set: schemas.FeatureSet):
return schemas.FeatureSetDigestOutput(
metadata=feature_set.metadata,
spec=schemas.FeatureSetDigestSpec(
entities=feature_set.spec.entities, features=feature_set.spec.features,
),
)
def _generate_feature_or_entity_list_query(
self,
session,
query_class,
project: str,
feature_set_keys,
name: str = None,
tag: str = None,
labels: List[str] = None,
):
# Query the actual objects to be returned
query = (
session.query(FeatureSet, query_class)
.filter_by(project=project)
.join(query_class)
)
if name:
query = query.filter(
generate_query_predicate_for_name(query_class.name, name)
)
if labels:
query = self._add_labels_filter(session, query, query_class, labels)
if tag:
query = query.filter(FeatureSet.id.in_(feature_set_keys))
return query
def list_features(
self,
session,
project: str,
name: str = None,
tag: str = None,
entities: List[str] = None,
labels: List[str] = None,
) -> schemas.FeaturesOutput:
# We don't filter by feature-set name here, as the name parameter refers to features
feature_set_id_tags = self._get_records_to_tags_map(
session, FeatureSet, project, tag, name=None
)
query = self._generate_feature_or_entity_list_query(
session, Feature, project, | |
<gh_stars>0
"""This file outlines the interaction between snek and Discord's Gateway API."""
import asyncio
import collections
import logging
import random
import sys
import time
import zlib
from types import TracebackType
from typing import TypeVar, TYPE_CHECKING
from aiohttp import WSMsgType
from dis_snek.api import events
from dis_snek.client.const import logger_name
from dis_snek.client.errors import WebSocketClosed
from dis_snek.client.utils.input_utils import OverriddenJson
from dis_snek.client.utils.serializer import dict_filter_none
from dis_snek.models.discord.enums import Status
from dis_snek.models.discord.enums import WebSocketOPCodes as OPCODE
from dis_snek.models.discord.snowflake import to_snowflake
from dis_snek.models.snek.cooldowns import CooldownSystem
if TYPE_CHECKING:
from .state import ConnectionState
from dis_snek.models.discord.snowflake import Snowflake_Type
__all__ = ["WebsocketClient"]
log = logging.getLogger(logger_name)
SELF = TypeVar("SELF", bound="WebsocketClient")
class GatewayRateLimit:
def __init__(self) -> None:
self.lock = asyncio.Lock()
# docs state 120 calls per 60 seconds, this is set conservatively to 110 per 60 seconds.
rate = 110
interval = 60
self.cooldown_system = CooldownSystem(1, interval / rate)
# hacky way to throttle how frequently we send messages to the gateway
async def rate_limit(self) -> None:
async with self.lock:
while not self.cooldown_system.acquire_token():
await asyncio.sleep(self.cooldown_system.get_cooldown_time())
class WebsocketClient:
"""
Abstraction over one gateway connection.
Multiple `WebsocketClient` instances can be used to implement same-process sharding.
Attributes:
buffer: A buffer to hold incoming data until its complete
sequence: The sequence of this connection
session_id: The session ID of this connection
"""
__slots__ = (
"state",
"ws",
"shard",
"_zlib",
"rl_manager",
"chunk_cache",
"_trace",
"heartbeat_interval",
"sequence",
"session_id",
"latency",
"_race_lock",
"_closed",
"_keep_alive",
"_kill_bee_gees",
"_last_heartbeat",
"_acknowledged",
"_close_gateway",
"_entered",
)
def __init__(self, state: "ConnectionState", shard: tuple[int, int]) -> None:
self.state = state
self.ws = None
self.shard = shard
self.rl_manager = GatewayRateLimit()
self.chunk_cache = {}
self._trace = []
self.heartbeat_interval = None
self.sequence = None
self.session_id = None
self.latency = collections.deque(maxlen=10)
# This lock needs to be held to send something over the gateway, but is also held when
# reconnecting. That way there's no race conditions between sending and reconnecting.
self._race_lock = asyncio.Lock()
# Then this event is used so that receive() can wait for the reconnecting to complete.
self._closed = asyncio.Event()
self._keep_alive = None
self._kill_bee_gees = asyncio.Event()
self._last_heartbeat = 0
self._acknowledged = asyncio.Event()
self._acknowledged.set() # Initialize it as set
self._close_gateway = asyncio.Event()
# Santity check, it is extremely important that an instance isn't reused.
self._entered = False
@property
def loop(self) -> asyncio.AbstractEventLoop:
return self.state.client.loop
async def __aenter__(self: SELF) -> SELF:
if self._entered:
raise RuntimeError("An instance of 'WebsocketClient' cannot be re-used!")
self._entered = True
self._zlib = zlib.decompressobj()
self.ws = await self.state.client.http.websocket_connect(self.state.gateway_url)
hello = await self.receive(force=True)
self.heartbeat_interval = hello["d"]["heartbeat_interval"] / 1000
self._closed.set()
self._keep_alive = asyncio.create_task(self.run_bee_gees())
await self._identify()
return self
async def __aexit__(
self, exc_type: type[BaseException] | None, exc_val: BaseException | None, traceback: TracebackType | None
) -> None:
# Technically should not be possible in any way, but might as well be safe worst-case.
self._close_gateway.set()
try:
if self._keep_alive is not None:
self._kill_bee_gees.set()
try:
# Even if we get cancelled that is fine, because then the keep-alive
# handler will also be cancelled since we're waiting on it.
await self._keep_alive # Wait for the keep-alive handler to finish
finally:
self._keep_alive = None
finally:
if self.ws is not None:
# We could be cancelled here, it is extremely important that we close the
# WebSocket either way, hence the try/except.
try:
await self.ws.close(code=1000)
finally:
self.ws = None
@property
def average_latency(self) -> float:
"""Get the average latency of the connection."""
if self.latency:
return sum(self.latency) / len(self.latency)
else:
return float("inf")
async def send(self, data: str, bypass=False) -> None:
"""
Send data to the gateway.
Parameters:
data: The data to send
bypass: Should the rate limit be ignored for this send (used for heartbeats)
"""
log.debug(f"Sending data to gateway: {data}")
async with self._race_lock:
if self.ws is None:
raise RuntimeError
if not bypass:
await self.rl_manager.rate_limit()
await self.ws.send_str(data)
async def send_json(self, data: dict, bypass=False) -> None:
"""
Send json data to the gateway.
Parameters:
data: The data to send
bypass: Should the rate limit be ignored for this send (used for heartbeats)
"""
serialized = OverriddenJson.dumps(data)
await self.send(serialized, bypass)
async def receive(self, force: bool = False) -> dict:
"""
Receive a full event payload from the WebSocket.
Parameters:
force:
Whether to force the receiving, ignoring safety measures such as the read-lock.
This option also means that exceptions are raised when a reconnection would normally
be tried.
"""
buffer = bytearray()
while True:
if not force:
# If we are currently reconnecting in another task, wait for it to complete.
await self._closed.wait()
resp = await self.ws.receive()
if resp.type == WSMsgType.CLOSE:
log.debug(f"Disconnecting from gateway! Reason: {resp.data}::{resp.extra}")
if resp.data >= 4000:
# This should propogate to __aexit__() which will forcefully shutdown everything
# and cleanup correctly.
raise WebSocketClosed(resp.data)
if force:
raise RuntimeError("Discord unexpectedly wants to close the WebSocket during force receive!")
await self.reconnect(code=resp.data, resume=resp.data != 1000)
continue
elif resp.type is WSMsgType.CLOSED:
if force:
raise RuntimeError("Discord unexpectedly closed the underlying socket during force receive!")
if not self._closed.is_set():
# Because we are waiting for the even before we receive, this shouldn't be
# possible - the CLOSING message should be returned instead. Either way, if this
# is possible after all we can just wait for the event to be set.
await self._closed.wait()
else:
# This is an odd corner-case where the underlying socket connection was closed
# unexpectedly without communicating the WebSocket closing handshake. We'll have
# to reconnect ourselves.
await self.reconnect(resume=True)
elif resp.type is WSMsgType.CLOSING:
if force:
raise RuntimeError("WebSocket is unexpectedly closing during force receive!")
# This happens when the keep-alive handler is reconnecting the connection even
# though we waited for the event before hand, because it got to run while we waited
# for data to come in. We can just wait for the event again.
await self._closed.wait()
continue
if isinstance(resp.data, bytes):
buffer.extend(resp.data)
if resp.data is None:
continue
if len(resp.data) < 4 or resp.data[-4:] != b"\x00\x00\xff\xff":
# message isn't complete yet, wait
continue
msg = self._zlib.decompress(buffer)
msg = msg.decode("utf-8")
msg = OverriddenJson.loads(msg)
return msg
async def reconnect(self, *, resume: bool = False, code: int = 1012) -> None:
async with self._race_lock:
self._closed.clear()
if self.ws is not None:
await self.ws.close(code=code)
self.ws = None
self._zlib = zlib.decompressobj()
self.ws = await self.state.client.http.websocket_connect(self.state.gateway_url)
hello = await self.receive(force=True)
self.heartbeat_interval = hello["d"]["heartbeat_interval"] / 1000
if not resume:
await self._identify()
else:
await self._resume_connection()
self._closed.set()
self._acknowledged.set()
async def run_bee_gees(self) -> None:
try:
await self._start_bee_gees()
except Exception:
self.close()
log.error("The heartbeater raised an exception!", exc_info=True)
async def _start_bee_gees(self) -> None:
if self.heartbeat_interval is None:
raise RuntimeError
try:
await asyncio.wait_for(self._kill_bee_gees.wait(), timeout=self.heartbeat_interval * random.uniform(0, 0.5))
except asyncio.TimeoutError:
pass
else:
return
log.debug(f"Sending heartbeat every {self.heartbeat_interval} seconds")
while not self._kill_bee_gees.is_set():
if not self._acknowledged.is_set():
log.warning(
f"Heartbeat has not been acknowledged for {self.heartbeat_interval} seconds,"
" likely zombied connection. Reconnect!"
)
await self.reconnect(resume=True)
self._acknowledged.clear()
await self.send_heartbeat()
self._last_heartbeat = time.perf_counter()
try:
# wait for next iteration, accounting for latency
await asyncio.wait_for(self._kill_bee_gees.wait(), timeout=self.heartbeat_interval)
except asyncio.TimeoutError:
continue
else:
return
async def run(self) -> None:
"""Start receiving events from the websocket."""
while True:
stopping = asyncio.create_task(self._close_gateway.wait())
receiving = asyncio.create_task(self.receive())
done, _ = await asyncio.wait({stopping, receiving}, return_when=asyncio.FIRST_COMPLETED)
if receiving in done:
# Note that we check for a received message first, because if both completed at
# the same time, we don't want to discard that message.
msg = await receiving
stopping.cancel()
else:
# This has to be the stopping task, which we join into the current task (even
# though that doesn't give any meaningful value in the return).
await stopping
receiving.cancel()
return
op = msg.get("op")
data = msg.get("d")
seq = msg.get("s")
event = msg.get("t")
if seq:
self.sequence = seq
if op == OPCODE.DISPATCH:
asyncio.create_task(self.dispatch_event(data, seq, event))
continue
# This may try to reconnect the connection so it is best to wait
# for it to complete before receiving more - that way there's less
# possible race conditions to consider.
await self.dispatch_opcode(data, op)
async def dispatch_opcode(self, data, op) -> None:
match op:
case OPCODE.HEARTBEAT:
return await self.send_heartbeat()
case OPCODE.HEARTBEAT_ACK:
self.latency.append(time.perf_counter() - self._last_heartbeat)
if self._last_heartbeat != 0 and self.latency[-1] >= 15:
log.warning(
f"High Latency! shard ID {self.shard[0]} heartbeat took {self.latency[-1]:.1f}s to be acknowledged!"
)
else:
log.debug(f"❤ Heartbeat acknowledged after {self.latency[-1]:.5f} seconds")
return self._acknowledged.set()
case OPCODE.RECONNECT:
log.info("Gateway requested reconnect. Reconnecting...")
return | |
with self.assertRaises(ValueError) as err:
coerce_dtypes(in_series, datetime)
self.assertEqual(str(err.exception), err_msg)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
err_msg = (f"[datatube.dtype.coerce_dtypes] cannot coerce column "
f"{repr(self.col_name)} to {datetime} without losing "
f"information (head: {list(in_df[self.col_name].head())})")
with self.assertRaises(ValueError) as err:
coerce_dtypes(in_df, {self.col_name: datetime})
self.assertEqual(str(err.exception), err_msg)
def test_coerce_from_real_complex_to_timedelta_no_na(self):
in_data = self.real_complex
out_data = [timedelta(seconds=c.real) for c in self.real_complex]
# series
in_series = pd.Series(in_data)
out_series = pd.Series(out_data)
result = coerce_dtypes(in_series, timedelta)
assert_series_equal(result, out_series)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
out_df = pd.DataFrame({self.col_name: out_data})
result = coerce_dtypes(in_df, {self.col_name: timedelta})
assert_frame_equal(result, out_df)
def test_coerce_from_real_complex_to_timedelta_with_na(self):
in_data = self.real_complex + [None]
out_data = ([timedelta(seconds=c.real) for c in self.real_complex] +
[None])
# series
in_series = pd.Series(in_data)
out_series = pd.Series(out_data)
result = coerce_dtypes(in_series, timedelta)
assert_series_equal(result, out_series)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
out_df = pd.DataFrame({self.col_name: out_data})
result = coerce_dtypes(in_df, {self.col_name: timedelta})
assert_frame_equal(result, out_df)
def test_coerce_from_imaginary_complex_to_timedelta_no_na(self):
in_data = self.imag_complex
# series
in_series = pd.Series(in_data)
err_msg = (f"[datatube.dtype.coerce_dtypes] cannot coerce series "
f"values to {timedelta} without losing information (head: "
f"{list(in_series.head())})")
with self.assertRaises(ValueError) as err:
coerce_dtypes(in_series, timedelta)
self.assertEqual(str(err.exception), err_msg)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
err_msg = (f"[datatube.dtype.coerce_dtypes] cannot coerce column "
f"{repr(self.col_name)} to {timedelta} without losing "
f"information (head: {list(in_df[self.col_name].head())})")
with self.assertRaises(ValueError) as err:
coerce_dtypes(in_df, {self.col_name: timedelta})
self.assertEqual(str(err.exception), err_msg)
def test_coerce_from_imaginary_complex_to_timedelta_with_na(self):
in_data = self.imag_complex + [None]
# series
in_series = pd.Series(in_data)
err_msg = (f"[datatube.dtype.coerce_dtypes] cannot coerce series "
f"values to {timedelta} without losing information (head: "
f"{list(in_series.head())})")
with self.assertRaises(ValueError) as err:
coerce_dtypes(in_series, timedelta)
self.assertEqual(str(err.exception), err_msg)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
err_msg = (f"[datatube.dtype.coerce_dtypes] cannot coerce column "
f"{repr(self.col_name)} to {timedelta} without losing "
f"information (head: {list(in_df[self.col_name].head())})")
with self.assertRaises(ValueError) as err:
coerce_dtypes(in_df, {self.col_name: timedelta})
self.assertEqual(str(err.exception), err_msg)
def test_coerce_from_complex_to_object_no_na(self):
in_series = pd.Series(self.imag_complex)
out_series = in_series.astype(np.dtype("O"))
# series
result = coerce_dtypes(in_series, object)
assert_series_equal(result, out_series)
# dataframe
in_df = pd.DataFrame({self.col_name: in_series})
out_df = pd.DataFrame({self.col_name: out_series})
result = coerce_dtypes(in_df, {self.col_name: object})
assert_frame_equal(result, out_df)
def test_coerce_from_complex_to_object_wth_na(self):
in_series = pd.Series(self.imag_complex + [None])
out_series = in_series.astype(np.dtype("O"))
# series
result = coerce_dtypes(in_series, object)
assert_series_equal(result, out_series)
# dataframe
in_df = pd.DataFrame({self.col_name: in_series})
out_df = pd.DataFrame({self.col_name: out_series})
result = coerce_dtypes(in_df, {self.col_name: object})
assert_frame_equal(result, out_df)
class CoerceStringDtypeTests(unittest.TestCase):
@classmethod
def setUpClass(cls) -> None:
random.seed(12345)
size = 3
cls.integers = [-1 * size // 2 + i + 1 for i in range(size)]
# ^ = [..., -1, 0, 1, ...]
cls.floats = [i + random.random() for i in cls.integers]
# ^ = [..., -1+e, 0+e, 1+e, ...]
cls.complex = [complex(f, f) for f in cls.floats]
# ^ = [..., complex(-1+e,-1+e), complex(0+e,0+e), complex(1+e,1+e), ...]
cls.characters = [chr((i % 26) + ord("a")) for i in range(size)]
# ^ = ["a", "b", "c", ..., "a", "b", "c", ...]
cls.booleans = [bool((i + 1) % 2) for i in range(size)]
# ^ = [True, False, True, False, ...]
cls.naive_datetimes = [datetime.utcfromtimestamp(f) for f in cls.floats]
# ^ = [..., utc time -1+e, utc time 0+e, utc_time 1+e, ...] (no tz)
cls.aware_datetimes = [datetime.fromtimestamp(f, tz=timezone.utc)
for f in cls.floats]
# ^ = [..., utc time -1+e, utc time 0+e, utc_time 1+e, ...] (with tz)
cls.aware_naive_datetimes = []
for index, f in enumerate(cls.floats):
if index % 2: # naive
cls.aware_naive_datetimes.append(datetime.utcfromtimestamp(f))
else: # aware
val = datetime.fromtimestamp(f, tz=timezone.utc)
cls.aware_naive_datetimes.append(val)
# ^ = [aware, naive, aware, naive, aware, ...]
cls.mixed_timezones = []
for index, f in enumerate(cls.floats):
tz_name = pytz.all_timezones[index % len(pytz.all_timezones)]
tz = pytz.timezone(tz_name)
val = datetime.fromtimestamp(f, tz=tz)
cls.mixed_timezones.append(val)
# ^ = ["Africa/Abidjan", "Africa/Accra", "Africa/Addis_Ababa", ...]
cls.timedeltas = [timedelta(seconds=f) for f in cls.floats]
# ^ = [..., -1+e seconds, 0+e seconds, 1+e seconds, ...]
cls.col_name = "strings"
def test_coerce_from_integer_string_to_integer_no_na(self):
in_data = [str(i) for i in self.integers]
out_data = self.integers
# series
in_series = pd.Series(in_data)
out_series = pd.Series(out_data)
result = coerce_dtypes(in_series, int)
assert_series_equal(result, out_series)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
out_df = pd.DataFrame({self.col_name: out_data})
result = coerce_dtypes(in_df, {self.col_name: int})
assert_frame_equal(result, out_df)
def test_coerce_from_integer_string_to_integer_with_na(self):
in_data = [str(i) for i in self.integers] + [None]
out_data = self.integers + [None]
# series
in_series = pd.Series(in_data)
out_series = pd.Series(out_data)
result = coerce_dtypes(in_series, int)
assert_series_equal(result, out_series)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
out_df = pd.DataFrame({self.col_name: out_data})
result = coerce_dtypes(in_df, {self.col_name: int})
assert_frame_equal(result, out_df)
def test_coerce_from_float_string_to_float_no_na(self):
in_data = [str(f) for f in self.floats]
out_data = self.floats
# series
in_series = pd.Series(in_data)
out_series = pd.Series(out_data)
result = coerce_dtypes(in_series, float)
assert_series_equal(result, out_series)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
out_df = pd.DataFrame({self.col_name: out_data})
result = coerce_dtypes(in_df, {self.col_name: float})
assert_frame_equal(result, out_df)
def test_coerce_from_float_string_to_float_with_na(self):
in_data = [str(f) for f in self.floats] + [None]
out_data = self.floats + [None]
# series
in_series = pd.Series(in_data)
out_series = pd.Series(out_data)
result = coerce_dtypes(in_series, float)
assert_series_equal(result, out_series)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
out_df = pd.DataFrame({self.col_name: out_data})
result = coerce_dtypes(in_df, {self.col_name: float})
assert_frame_equal(result, out_df)
def test_coerce_from_complex_string_to_complex_no_na(self):
in_data = [str(c) for c in self.complex]
out_data = self.complex
# series
in_series = pd.Series(in_data)
out_series = pd.Series(out_data)
result = coerce_dtypes(in_series, complex)
assert_series_equal(result, out_series)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
out_df = pd.DataFrame({self.col_name: out_data})
result = coerce_dtypes(in_df, {self.col_name: complex})
assert_frame_equal(result, out_df)
def test_coerce_from_complex_string_to_complex_with_na(self):
in_data = [str(c) for c in self.complex] + [None]
out_data = self.complex + [None]
# series
in_series = pd.Series(in_data)
out_series = pd.Series(out_data)
result = coerce_dtypes(in_series, complex)
assert_series_equal(result, out_series)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
out_df = pd.DataFrame({self.col_name: out_data})
result = coerce_dtypes(in_df, {self.col_name: complex})
assert_frame_equal(result, out_df)
def test_coerce_from_character_string_to_string_no_na(self):
in_data = self.characters
out_data = in_data.copy()
# series
in_series = pd.Series(in_data)
out_series = pd.Series(out_data)
result = coerce_dtypes(in_series, str)
assert_series_equal(result, out_series)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
out_df = pd.DataFrame({self.col_name: out_data})
result = coerce_dtypes(in_df, {self.col_name: str})
assert_frame_equal(result, out_df)
def test_coerce_from_character_string_to_string_with_na(self):
in_data = self.characters + [None]
out_data = in_data.copy()
# series
in_series = pd.Series(in_data)
out_series = pd.Series(out_data)
result = coerce_dtypes(in_series, str)
assert_series_equal(result, out_series)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
out_df = pd.DataFrame({self.col_name: out_data})
result = coerce_dtypes(in_df, {self.col_name: str})
assert_frame_equal(result, out_df)
def test_coerce_from_boolean_string_to_boolean_no_na(self):
in_data = [str(b) for b in self.booleans]
out_data = self.booleans
# series
in_series = pd.Series(in_data)
out_series = pd.Series(out_data)
result = coerce_dtypes(in_series, bool)
assert_series_equal(result, out_series)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
out_df = pd.DataFrame({self.col_name: out_data})
result = coerce_dtypes(in_df, {self.col_name: bool})
assert_frame_equal(result, out_df)
def test_coerce_from_boolean_string_to_boolean_with_na(self):
in_data = [str(b) for b in self.booleans] + [None]
out_data = self.booleans + [None]
# series
in_series = pd.Series(in_data)
out_series = pd.Series(out_data)
result = coerce_dtypes(in_series, bool)
assert_series_equal(result, out_series)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
out_df = pd.DataFrame({self.col_name: out_data})
result = coerce_dtypes(in_df, {self.col_name: bool})
assert_frame_equal(result, out_df)
def test_coerce_from_naive_datetime_string_to_datetime_no_na(self):
in_data = [str(d) for d in self.naive_datetimes]
out_data = self.naive_datetimes
# series
in_series = pd.Series(in_data)
out_series = pd.Series(out_data)
result = coerce_dtypes(in_series, datetime)
assert_series_equal(result, out_series)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
out_df = pd.DataFrame({self.col_name: out_data})
result = coerce_dtypes(in_df, {self.col_name: datetime})
assert_frame_equal(result, out_df)
def test_coerce_from_naive_datetime_string_to_datetime_with_na(self):
in_data = [str(d) for d in self.naive_datetimes] + [None]
out_data = self.naive_datetimes + [None]
# series
in_series = pd.Series(in_data)
out_series = pd.Series(out_data)
result = coerce_dtypes(in_series, datetime)
assert_series_equal(result, out_series)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
out_df = pd.DataFrame({self.col_name: out_data})
result = coerce_dtypes(in_df, {self.col_name: datetime})
assert_frame_equal(result, out_df)
def test_coerce_from_naive_ISO_8601_string_to_datetime_no_na(self):
in_data = [d.isoformat() for d in self.naive_datetimes]
out_data = self.naive_datetimes
# series
in_series = pd.Series(in_data)
out_series = pd.Series(out_data)
result = coerce_dtypes(in_series, datetime)
assert_series_equal(result, out_series)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
out_df = pd.DataFrame({self.col_name: out_data})
result = coerce_dtypes(in_df, {self.col_name: datetime})
assert_frame_equal(result, out_df)
def test_coerce_from_naive_ISO_8601_string_to_datetime_with_na(self):
in_data = [d.isoformat() for d in self.naive_datetimes] + [None]
out_data = self.naive_datetimes + [None]
# series
in_series = pd.Series(in_data)
out_series = pd.Series(out_data)
result = coerce_dtypes(in_series, datetime)
assert_series_equal(result, out_series)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
out_df = pd.DataFrame({self.col_name: out_data})
result = coerce_dtypes(in_df, {self.col_name: datetime})
assert_frame_equal(result, out_df)
def test_coerce_from_aware_datetime_string_to_datetime_no_na(self):
in_data = [str(d) for d in self.aware_datetimes]
out_data = self.aware_datetimes
# series
in_series = pd.Series(in_data)
out_series = pd.Series(out_data)
result = coerce_dtypes(in_series, datetime)
assert_series_equal(result, out_series)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
out_df = pd.DataFrame({self.col_name: out_data})
result = coerce_dtypes(in_df, {self.col_name: datetime})
assert_frame_equal(result, out_df)
def test_coerce_from_aware_datetime_string_to_datetime_with_na(self):
in_data = [str(d) for d in self.aware_datetimes] + [None]
out_data = self.aware_datetimes + [None]
# series
in_series = pd.Series(in_data)
out_series = pd.Series(out_data)
result = coerce_dtypes(in_series, datetime)
assert_series_equal(result, out_series)
# dataframe
in_df = pd.DataFrame({self.col_name: in_data})
out_df = pd.DataFrame({self.col_name: out_data})
result = coerce_dtypes(in_df, {self.col_name: datetime})
assert_frame_equal(result, out_df)
def test_coerce_from_aware_ISO_8601_string_to_datetime_no_na(self):
in_data = [d.isoformat() | |
#!/usr/bin/env python
#
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Services descriptor definitions.
Contains message definitions and functions for converting
service classes into transmittable message format.
Describing an Enum instance, Enum class, Field class or Message class will
generate an appropriate descriptor object that describes that class.
This message can itself be used to transmit information to clients wishing
to know the description of an enum value, enum, field or message without
needing to download the source code. This format is also compatible with
other, non-Python languages.
The descriptors are modeled to be binary compatible with
https://github.com/google/protobuf
NOTE: The names of types and fields are not always the same between these
descriptors and the ones defined in descriptor.proto. This was done in order
to make source code files that use these descriptors easier to read. For
example, it is not necessary to prefix TYPE to all the values in
FieldDescriptor.Variant as is done in descriptor.proto
FieldDescriptorProto.Type.
Example:
class Pixel(messages.Message):
x = messages.IntegerField(1, required=True)
y = messages.IntegerField(2, required=True)
color = messages.BytesField(3)
# Describe Pixel class using message descriptor.
fields = []
field = FieldDescriptor()
field.name = 'x'
field.number = 1
field.label = FieldDescriptor.Label.REQUIRED
field.variant = FieldDescriptor.Variant.INT64
fields.append(field)
field = FieldDescriptor()
field.name = 'y'
field.number = 2
field.label = FieldDescriptor.Label.REQUIRED
field.variant = FieldDescriptor.Variant.INT64
fields.append(field)
field = FieldDescriptor()
field.name = 'color'
field.number = 3
field.label = FieldDescriptor.Label.OPTIONAL
field.variant = FieldDescriptor.Variant.BYTES
fields.append(field)
message = MessageDescriptor()
message.name = 'Pixel'
message.fields = fields
# Describing is the equivalent of building the above message.
message == describe_message(Pixel)
Public Classes:
EnumValueDescriptor: Describes Enum values.
EnumDescriptor: Describes Enum classes.
FieldDescriptor: Describes field instances.
FileDescriptor: Describes a single 'file' unit.
FileSet: Describes a collection of file descriptors.
MessageDescriptor: Describes Message classes.
Public Functions:
describe_enum_value: Describe an individual enum-value.
describe_enum: Describe an Enum class.
describe_field: Describe a Field definition.
describe_file: Describe a 'file' unit from a Python module or object.
describe_file_set: Describe a file set from a list of modules or objects.
describe_message: Describe a Message definition.
"""
import codecs
import types
import six
from apitools.base.protorpclite import messages
from apitools.base.protorpclite import util
__all__ = [
'EnumDescriptor',
'EnumValueDescriptor',
'FieldDescriptor',
'MessageDescriptor',
'FileDescriptor',
'FileSet',
'DescriptorLibrary',
'describe_enum',
'describe_enum_value',
'describe_field',
'describe_message',
'describe_file',
'describe_file_set',
'describe',
'import_descriptor_loader',
]
# NOTE: MessageField is missing because message fields cannot have
# a default value at this time.
# TODO(rafek): Support default message values.
#
# Map to functions that convert default values of fields of a given type
# to a string. The function must return a value that is compatible with
# FieldDescriptor.default_value and therefore a unicode string.
_DEFAULT_TO_STRING_MAP = {
messages.IntegerField: six.text_type,
messages.FloatField: six.text_type,
messages.BooleanField: lambda value: value and u'true' or u'false',
messages.BytesField: lambda value: codecs.escape_encode(value)[0],
messages.StringField: lambda value: value,
messages.EnumField: lambda value: six.text_type(value.number),
}
_DEFAULT_FROM_STRING_MAP = {
messages.IntegerField: int,
messages.FloatField: float,
messages.BooleanField: lambda value: value == u'true',
messages.BytesField: lambda value: codecs.escape_decode(value)[0],
messages.StringField: lambda value: value,
messages.EnumField: int,
}
class EnumValueDescriptor(messages.Message):
"""Enum value descriptor.
Fields:
name: Name of enumeration value.
number: Number of enumeration value.
"""
# TODO(rafek): Why are these listed as optional in descriptor.proto.
# Harmonize?
name = messages.StringField(1, required=True)
number = messages.IntegerField(2,
required=True,
variant=messages.Variant.INT32)
class EnumDescriptor(messages.Message):
"""Enum class descriptor.
Fields:
name: Name of Enum without any qualification.
values: Values defined by Enum class.
"""
name = messages.StringField(1)
values = messages.MessageField(EnumValueDescriptor, 2, repeated=True)
class FieldDescriptor(messages.Message):
"""Field definition descriptor.
Enums:
Variant: Wire format hint sub-types for field.
Label: Values for optional, required and repeated fields.
Fields:
name: Name of field.
number: Number of field.
variant: Variant of field.
type_name: Type name for message and enum fields.
default_value: String representation of default value.
"""
Variant = messages.Variant # pylint:disable=invalid-name
class Label(messages.Enum):
"""Field label."""
OPTIONAL = 1
REQUIRED = 2
REPEATED = 3
name = messages.StringField(1, required=True)
number = messages.IntegerField(3,
required=True,
variant=messages.Variant.INT32)
label = messages.EnumField(Label, 4, default=Label.OPTIONAL)
variant = messages.EnumField(Variant, 5)
type_name = messages.StringField(6)
# For numeric types, contains the original text representation of
# the value.
# For booleans, "true" or "false".
# For strings, contains the default text contents (not escaped in any
# way).
# For bytes, contains the C escaped value. All bytes < 128 are that are
# traditionally considered unprintable are also escaped.
default_value = messages.StringField(7)
class MessageDescriptor(messages.Message):
"""Message definition descriptor.
Fields:
name: Name of Message without any qualification.
fields: Fields defined for message.
message_types: Nested Message classes defined on message.
enum_types: Nested Enum classes defined on message.
"""
name = messages.StringField(1)
fields = messages.MessageField(FieldDescriptor, 2, repeated=True)
message_types = messages.MessageField(
'apitools.base.protorpclite.descriptor.MessageDescriptor', 3,
repeated=True)
enum_types = messages.MessageField(EnumDescriptor, 4, repeated=True)
class FileDescriptor(messages.Message):
"""Description of file containing protobuf definitions.
Fields:
package: Fully qualified name of package that definitions belong to.
message_types: Message definitions contained in file.
enum_types: Enum definitions contained in file.
"""
package = messages.StringField(2)
# TODO(rafek): Add dependency field
message_types = messages.MessageField(MessageDescriptor, 4, repeated=True)
enum_types = messages.MessageField(EnumDescriptor, 5, repeated=True)
class FileSet(messages.Message):
"""A collection of FileDescriptors.
Fields:
files: Files in file-set.
"""
files = messages.MessageField(FileDescriptor, 1, repeated=True)
def describe_enum_value(enum_value):
"""Build descriptor for Enum instance.
Args:
enum_value: Enum value to provide descriptor for.
Returns:
Initialized EnumValueDescriptor instance describing the Enum instance.
"""
enum_value_descriptor = EnumValueDescriptor()
enum_value_descriptor.name = six.text_type(enum_value.name)
enum_value_descriptor.number = enum_value.number
return enum_value_descriptor
def describe_enum(enum_definition):
"""Build descriptor for Enum class.
Args:
enum_definition: Enum class to provide descriptor for.
Returns:
Initialized EnumDescriptor instance describing the Enum class.
"""
enum_descriptor = EnumDescriptor()
enum_descriptor.name = enum_definition.definition_name().split('.')[-1]
values = []
for number in enum_definition.numbers():
value = enum_definition.lookup_by_number(number)
values.append(describe_enum_value(value))
if values:
enum_descriptor.values = values
return enum_descriptor
def describe_field(field_definition):
"""Build descriptor for Field instance.
Args:
field_definition: Field instance to provide descriptor for.
Returns:
Initialized FieldDescriptor instance describing the Field instance.
"""
field_descriptor = FieldDescriptor()
field_descriptor.name = field_definition.name
field_descriptor.number = field_definition.number
field_descriptor.variant = field_definition.variant
if isinstance(field_definition, messages.EnumField):
field_descriptor.type_name = field_definition.type.definition_name()
if isinstance(field_definition, messages.MessageField):
field_descriptor.type_name = (
field_definition.message_type.definition_name())
if field_definition.default is not None:
field_descriptor.default_value = _DEFAULT_TO_STRING_MAP[
type(field_definition)](field_definition.default)
# Set label.
if field_definition.repeated:
field_descriptor.label = FieldDescriptor.Label.REPEATED
elif field_definition.required:
field_descriptor.label = FieldDescriptor.Label.REQUIRED
else:
field_descriptor.label = FieldDescriptor.Label.OPTIONAL
return field_descriptor
def describe_message(message_definition):
"""Build descriptor for Message class.
Args:
message_definition: Message class to provide descriptor for.
Returns:
Initialized MessageDescriptor instance describing the Message class.
"""
message_descriptor = MessageDescriptor()
message_descriptor.name = message_definition.definition_name().split(
'.')[-1]
fields = sorted(message_definition.all_fields(),
key=lambda v: v.number)
if fields:
message_descriptor.fields = [describe_field(field) for field in fields]
try:
nested_messages = message_definition.__messages__
except AttributeError:
pass
else:
message_descriptors = []
for name in nested_messages:
value = getattr(message_definition, name)
message_descriptors.append(describe_message(value))
message_descriptor.message_types = message_descriptors
try:
nested_enums = message_definition.__enums__
except AttributeError:
pass
else:
enum_descriptors = []
for name in nested_enums:
value = getattr(message_definition, name)
enum_descriptors.append(describe_enum(value))
message_descriptor.enum_types = enum_descriptors
return message_descriptor
def describe_file(module):
"""Build a file from a specified Python module.
Args:
module: Python module to describe.
Returns:
Initialized FileDescriptor instance describing the module.
"""
descriptor = FileDescriptor()
descriptor.package = util.get_package_for_module(module)
if not descriptor.package:
descriptor.package = None
message_descriptors = []
enum_descriptors = []
# Need to iterate over all top level attributes of the module looking for
# message and enum definitions. Each definition must be itself described.
for name in sorted(dir(module)):
value = getattr(module, name)
if isinstance(value, type):
if issubclass(value, messages.Message):
message_descriptors.append(describe_message(value))
elif issubclass(value, messages.Enum):
enum_descriptors.append(describe_enum(value))
if message_descriptors:
descriptor.message_types = message_descriptors
if enum_descriptors:
descriptor.enum_types = enum_descriptors
return descriptor
def describe_file_set(modules):
"""Build a file set from a specified Python modules.
Args:
modules: Iterable of Python module to describe.
Returns:
Initialized FileSet instance describing the modules.
"""
descriptor = FileSet()
file_descriptors = []
for module in modules:
file_descriptors.append(describe_file(module))
if file_descriptors:
descriptor.files = file_descriptors
return descriptor
def describe(value):
"""Describe any value as a descriptor.
Helper function for describing any object with an appropriate descriptor
object.
Args:
value: Value to describe as a descriptor.
Returns:
Descriptor message class if object is describable as a descriptor, else
None.
"""
if isinstance(value, types.ModuleType):
return describe_file(value)
elif isinstance(value, messages.Field):
return describe_field(value)
elif isinstance(value, messages.Enum):
return describe_enum_value(value)
elif isinstance(value, | |
<reponame>PENGUINLIONG/tinyspv
"""
Generate machine readable JSON referring to the SPIR-V specification.
@PENGUINLIONG
"""
from html.parser import HTMLParser
import json
from collections import defaultdict
import re
import os
from typing import List
with open("specs/unified1/SPIRV.html") as f:
spec = f.read()
class HeadlineParser:
def __init__(self):
self.should_update_cur_sec = False
self.should_update_cur_subsec = False
self.should_update_cur_subsubsec = False
self.cur_sec = ""
self.cur_subsec = ""
self.cur_subsubsec = ""
self.gather_text = None
def handle_starttag(self, tag, attrs):
if tag == "h2" or tag == "h3" or tag == "h4":
self.gather_text = ""
def handle_data(self, data):
if self.gather_text != None:
self.gather_text += data
def handle_endtag(self, tag):
if tag == "h2":
self.cur_sec = self.gather_text.strip()
self.cur_subsec = "" # Invalidate lower level of headline.
self.cur_subsubsec = "" # Invalidate lower level of headline.
self.gather_text = None
print((self.cur_sec))
elif tag == "h3":
self.cur_subsec = self.gather_text.strip()
self.cur_subsubsec = "" # Invalidate lower level of headline.
self.gather_text = None
print(" ", (self.cur_subsec))
elif tag == "h4":
self.cur_subsubsec = self.gather_text.strip()
self.gather_text = None
print(" ", (self.cur_subsubsec))
def get_triplet(self):
return (self.cur_sec, self.cur_subsec, self.cur_subsubsec)
class TableParser:
def __init__(self):
self.should_update_col_defs = False
self.should_update_rows = False
self.col_defs = []
self.rows = []
self.gather_text = ""
def handle_starttag(self, tag, attrs):
if tag == "thead":
self.should_update_col_defs = True
elif tag == "tbody":
self.should_update_rows = True
elif tag == "tr" and self.should_update_rows:
self.rows += [[]]
elif tag == "td" and self.should_update_rows:
colspan = int(attrs["colspan"]) if "colspan" in attrs else 1
self.rows[-1] += [""] * (colspan - 1)
self.gather_text = ""
elif tag == "th" and self.should_update_col_defs:
colspan = int(attrs["colspan"]) if "colspan" in attrs else 1
self.col_defs += [""] * (colspan - 1)
self.gather_text = ""
def handle_data(self, data):
self.gather_text += data
def handle_endtag(self, tag):
if tag == "thead":
self.should_update_col_defs = False
elif tag == "tbody":
self.should_update_rows = False
elif tag == "th" and self.should_update_col_defs:
self.col_defs += [self.gather_text.strip()]
elif tag == "td" and self.should_update_rows:
self.rows[-1] += [self.gather_text.strip()]
def split_and_strip(sep, s):
"""
Split input `s` by separator `sep`, strip each output segment with empty
segment dropped.
"""
return [y for y in (x.strip() for x in s.split(sep)) if len(y) > 0]
def decompose_item_desc(desc) -> dict:
out = {}
lines = split_and_strip('\n', desc)
m = re.match(r"(\w+) *(?:\(((?:\w+,? *)+)\))?", lines[0])
assert m, f"invalid op name pattern '{lines[0]}'"
name = m[1]
out["Name"] = name
aliases = split_and_strip(',', m[2]) if m[2] != None else None
if aliases:
out["Aliases"] = aliases
desc_lines = []
for line in lines[1:]:
if line.startswith("See extension "):
out["SeeExtensions"] = [line[len("See extension "):].strip()]
elif line.startswith("See extensions "):
out["SeeExtensions"] = [x.strip() for x in line[len("See extensions "):].split(",")]
else:
# There might be other descriptions but these are safe to ignore.
desc_lines += [line]
if len(desc_lines) > 0:
out["Description"] = '\n'.join(desc_lines)
return out
def decompose_item_meta(meta) -> dict:
lines = split_and_strip('\n', meta)
out = {}
for line in lines:
if line == "Reserved.":
out["Reserved"] = True
elif line.startswith("Missing before version ") and line.endswith("."):
out["MissingBefore"] = line[len("Missing before version "):-1]
elif line.startswith("Missing after version ") and line.endswith("."):
out["MissingAfter"] = line[len("Missing after version "):-1]
elif line.startswith("Also see extension: "):
out["SeeExtensions"] = [line[len("Also see extension: "):].strip()]
elif line.startswith("Also see extensions: "):
out["SeeExtensions"] = split_and_strip(',', line[len("Also see extensions: "):])
else:
out["EnablingCapabilities"] = split_and_strip(',', line)
return out
def title_keep_upper(txt) -> str:
if len(txt) == 0 or txt[0].isupper():
return txt
out = ""
for i, c in enumerate(txt):
out += c.upper() if i == 0 else c
return out
def decompose_operand_list(operand_ty, operand_desc) -> dict:
# Extract the repeating units in type specifications. This is the
# referential operand count.
segs = split_and_strip(',', operand_ty)
raw_segs = segs
assert segs[-1] == '\u2026', "a operand listing must end with \\u2026"
assert (len(segs) - 1) % 2 == 0, "a operand listing should have a length of multiple of two"
segs = segs[:(len(segs) - 1) // 2]
if operand_desc == None:
# There is no description for the operand list.
out = []
for seg in segs:
out += [{
"Type": title_keep_upper(seg),
"Listed": True,
}]
return out
out = []
# Now extract the descriptions. There are various ways a listed operand
# being described. We will deal with each case respectively.
desc_segs = split_and_strip(',', operand_desc)
if desc_segs[-1] == '\u2026':
desc_segs = desc_segs[:-1]
if len(desc_segs) > 1:
if any(c.isdigit() for c in desc_segs[0]):
# The description enumerates the operand list by an ordinate number,
# like `Operand 1, Operand 2, \u2026` in `OpExtInst`.
order = next(c for c in desc_segs[0] if c.isdigit())
next_order = str(int(order) + 1)
if desc_segs[0].replace(order, next_order).lower() == desc_segs[1].lower():
desc = desc_segs[0].replace(f" {order}", "")
assert desc_segs[0] != desc, "ordinate number ellimination seems failed"
for seg in segs:
out += [{
"Type": title_keep_upper(segs[0]),
"Description": desc,
"Listed": True,
}]
return out
if len(desc_segs) == len(raw_segs) - 1:
# Each of the description matches with the type specification in the
# same segment position, like `Variable, Parent, \u2026` in `OpPhi`.
for ty, desc in zip(segs, desc_segs):
out += [{
"Type": title_keep_upper(ty),
"Description": desc,
"Listed": True
}]
return out
if len(desc_segs) == 1 and operand_desc[-1] != '\u2026':
desc = desc_segs[0]
if desc.startswith("See "):
# Special cases for variadic instructions, like `See Decoration.` in
# `OpDecoration`. Simply ignore the descripiton because it's a
# phrase rather than a noun.
for seg in segs:
out += [{
"Type": title_keep_upper(seg),
"Description": desc,
"Listed": True,
}]
return out
else:
# In this case the description describe the sequence as a whole,
# like `Indexes` in `OpAccessChain`. Also special case like
# `literal, label <id>, literal, label <id>, \u2026` in `OpSwitch`.
for seg in segs:
out += [{
"Type": title_keep_upper(seg),
"Description": desc,
"Listed": True,
}]
return out
raise RuntimeError("unrecognized operand list pattern")
def decompose_operand(operand) -> List[dict]:
optional = False
# Not using `startwith` because of `OpDecorateString`.
if "Optional" in operand:
optional = True
operand = operand.replace("Optional", "")
lines = split_and_strip('\n', operand)
# Concatenate multi-line operand list description. See `OpSwitch`.
lines2 = [""]
for seg in lines:
lines2[-1] += seg
lines2[-1] += " "
if not seg.endswith(','):
lines2 += [""]
assert lines2[-1] == ""
lines = [x.strip() for x in lines2[:-1]]
assert len(lines) <= 2, f"unexpected operand description row {lines}"
if ',' in lines[0]:
desc = lines[1] if len(lines) == 2 else None
listed_operands = decompose_operand_list(lines[0], desc)
return listed_operands
else:
out = {}
is_listed = False
if lines[0] == "Literals":
out["Type"] = "Literal"
is_listed = True
else:
out["Type"] = lines[0]
if len(lines) == 2:
out["Description"] = lines[1]
if is_listed:
out["Listed"] = True
elif optional:
out["Optional"] = optional
return [out]
def table2enum(table: TableParser, subsec):
out = []
ncol_def = len(table.col_defs)
# Some enumerations are not literals but are referred to by result IDs
# so there might be a '<id>' suffix at the end of `subsec`. Be aware of
# this, we don't use `endswith` here.
assert table.col_defs[0] in subsec, \
"subsection headline mismatch with the first column header of the enumeration table"
# Keep the order, the checks on `ncol_def` shold be done in descending
# order.
if ncol_def >= 3:
assert table.col_defs[1] == "Extra Operands", \
"column 2 must be the extra operands"
assert table.col_defs[2] == "Enabling Capabilities", \
"column 3 must be the enabling capabilities"
for row in table.rows:
assert len(row) >= ncol_def
extra = []
for operand in row[2:-1]:
operand = operand.strip()
# Ignore trailing empty extra operands.
if len(operand) > 0:
operand = decompose_operand(operand)
extra += operand
elem = decompose_item_desc(row[1])
if len(extra) > 0:
elem["ExtraOperands"] = extra
meta = decompose_item_meta(row[-1])
elem.update(meta)
elem["Value"] = row[0]
out += [elem]
elif ncol_def >= 2:
def override_caps_en2imply(meta):
"""
Override enabling capabilities to implicitly declares. This is used
for the `Capability` enum.
"""
out = {}
for k, v in meta.items():
if k == "Enabling Capabilities":
out["Implicitly Declares"] = v
else:
out[k] = v
return out
# General cases for other literal number specifications.
for row in table.rows:
assert len(row) == ncol_def + 1
assert table.col_defs[1] == "Enabling Capabilities" or \
table.col_defs[1] == "Implicitly Declares", \
"unsupported capability column"
elem = decompose_item_desc(row[1])
meta = decompose_item_meta(row[2])
if table.col_defs[1] == "Implicitly Declares":
meta = override_caps_en2imply(meta)
elem.update(meta)
elem["Value"] = row[0]
out | |
from pymatgen import PeriodicSite
from Voronoi_sites import add_voronoi
from empty_polyhedra_util import *
__author__ = 'Tina'
"""
Created March 17, 2015
Takes code with all Voronoi points inserted into structure (as cif file from Voronoi_sites) as input
These points are indicated by Rn, an element not present in any of the structures
-Outputted as struct_with_voronoi.cif
Check to see if sites correspond to the center of an empty polyhedra (and removes Voronoi points/Rn elements from
sites which are not empty polyhedra)
Remaining instances of Rn are those corresponding to an empty polyhedron (not necessarily at center)
"""
# remove duplicates of voronois
# remove voronois that have fewer than 4 neighboring oxygen atoms
def step1(all_voronois, structure, radius, anions, min_neighbors):
"""
Remove sites with Voronoi points that have fewer than 4 neighboring anions
:param all_voronois: (List of Sites) list of all sites in structure1 containing Voronoi points
:param structure: (Structure) target structure
:param radius: (float) maximum radius to look out to for neighbors
:param min_neighbors: (int) minimum number of neighbors a point must have to still be considered
:return: (List of Sites) list of sites with Voronoi points to keep (Voronoi sites with fewer than 4 neighboring
anions removed)
"""
new_voronois = []
for site in all_voronois:
# take out duplicates of voronois
if site in new_voronois:
continue
# include only those sites surrounded by at least 3 (or some number) of peripheral ions
neighbors = []
cation_neighbors = []
for entry in structure.get_neighbors(site, radius):
if entry[0].species_string in anions and entry[1] < radius:
neighbors.append(entry)
else:
if entry[1] < radius:
cation_neighbors.append(entry)
if len(neighbors) < min_neighbors:
continue
new_voronois.append(site)
return new_voronois
def step3(voronoi_points, structure, radius, anions, num_matches):
"""
Removes Voronoi points from the list voronoi_points from the given the structure which are basically the same as
another Voronoi
Args:
:param voronoi_points: (List of Sites) list of sites in structure containing candidate Voronoi points
(empty polyhedra centers)
:param structure: (Structure) target Structure
:param radius: (float) distance to which we check for anions
:param anions: (List of Strings) list of species which we consider anions
:param num_matches: (int) number of matches between neighboring anions to consider a Voronoi site the same as
another
:return: remaining Voronoi points
"""
key = {}
similar_voronoi = {} # sets of voronoi points that are the same
keys_viewed = []
to_remove = []
for x in range(0, len(voronoi_points)):
key[x] = voronoi_points[x]
for num1 in key.keys():
print "voronoi point being analyzed:", num1
similar_voronoi[num1] = []
if num1 in to_remove:
continue
for num2 in key.keys():
if num1 == num2:
continue
if num2 in keys_viewed or num2 in to_remove:
continue
neighbors1 = get_anion_neighbors(key[num1], structure, radius, anions)
neighbors2 = get_anion_neighbors(key[num2], structure, radius, anions)
if check_match(neighbors1, neighbors2) or check_matches(neighbors1, neighbors2, num_matches):
similar_voronoi[num1].append(num2)
to_remove.append(num2)
else:
# PeriodicSite(atoms_n_occu, coords, lattice)
x = 0
y = 0
z = 0
reflections = []
to_reflect = key[num1]
if 0.0 <= to_reflect.frac_coords[0] <= 0.1:
#reflections.append(PeriodicSite(toReflect.species_string, toReflect.frac_coords + [1.0, 0, 0],
# toReflect.lattice))
x = 1
if 0.9 <= to_reflect.frac_coords[0] <= 1.0:
#reflections.append(PeriodicSite(toReflect.species_string, toReflect.frac_coords + [-1.0, 0, 0],
# toReflect.lattice))
x = -1
if 0.0 <= to_reflect.frac_coords[1] <= 0.1:
#reflections.append(PeriodicSite(toReflect.species_string, toReflect.frac_coords + [0, 1.0, 0],
# toReflect.lattice))
y = 1
if 0.9 <= to_reflect.frac_coords[1] <= 1.0:
#reflections.append(PeriodicSite(toReflect.species_string, toReflect.frac_coords + [0, -1.0, 0],
# toReflect.lattice))
y = -1
if 0.0 <= to_reflect.frac_coords[2] <= 0.1:
#reflections.append(PeriodicSite(toReflect.species_string, toReflect.frac_coords + [0, 0, 1.0],
# toReflect.lattice))
z = 1
if 0.9 <= to_reflect.frac_coords[2] <= 1.0:
#reflections.append(PeriodicSite(toReflect.species_string, toReflect.frac_coords + [0, 0, -1.0],
# toReflect.lattice))
z = -1
if not x == 0:
reflections.append(PeriodicSite(to_reflect.species_string, to_reflect.frac_coords + [x*1.0, 0, 0],
to_reflect.lattice))
if not y == 0:
reflections.append(PeriodicSite(to_reflect.species_string,
to_reflect.frac_coords + [x*1.0, y*1.0, 0], to_reflect.lattice))
if not z == 0:
reflections.append(PeriodicSite(to_reflect.species_string,
to_reflect.frac_coords + [x*1.0, 0, z*1.0], to_reflect.lattice))
if not y == 0:
reflections.append(PeriodicSite(to_reflect.species_string, to_reflect.frac_coords + [0, y*1.0, 0],
to_reflect.lattice))
if not z == 0:
reflections.append(PeriodicSite(to_reflect.species_string,
to_reflect.frac_coords + [0, y*1.0, z*1.0], to_reflect.lattice))
if not z == 0:
reflections.append(PeriodicSite(to_reflect.species_string, to_reflect.frac_coords + [0, 0, z*1.0],
to_reflect.lattice))
if not x == 0 and not y == 0:
reflections.append(PeriodicSite(to_reflect.species_string,
to_reflect.frac_coords + [x*1.0, y*1.0, z*1.0],
to_reflect.lattice))
if len(reflections) > 0:
temp_species = []
temp_frac_coords = []
for site in structure.sites:
temp_species.append(site.species_string)
temp_frac_coords.append(site.frac_coords)
for reflection in reflections:
temp_species.append(reflection.species_string)
temp_frac_coords.append(reflection.frac_coords)
temp_struct = Structure(structure.lattice, temp_species, temp_frac_coords)
for reflection in reflections:
temp_neighbors1 = get_anion_neighbors(reflection, temp_struct, radius, anions)
if check_match(temp_neighbors1, neighbors2) or \
check_matches(temp_neighbors1, neighbors2, num_matches):
similar_voronoi[num1].append(num2)
to_remove.append(num2)
keys_viewed.append(num1)
voronois_copy = voronoi_points
for item in to_remove:
if key[item] in voronois_copy:
voronois_copy.remove(key[item])
return voronois_copy
def step2(voronoi_points, structure, radius, anions, voronoi_point, min_neighbors):
"""
Remove sites with Voronoi points that sit on the bonds between anions
:param voronoi_points: (List of Sites) list of sites with candidate Voronoi points
:param structure: (Structure) target structure
:param radius: (float) distance to which we check for anions
:param anions: (List of Strings) list of species which we consider anions
"""
big_cations = ['K', 'Ba', 'Sr']
to_remove = []
counter = 0
for point in voronoi_points:
counter += 1
#print counter
neighbors = structure.get_neighbors(point, radius)
for neighbor in neighbors:
if not neighbor[0].species_string in anions and not neighbor[0].species_string == voronoi_point \
and neighbor[1] < radius:
cv_distance = neighbor[1]
co_distances = []
if neighbor[0].species_string in big_cations:
for neighbor2 in structure.get_neighbors(neighbor[0], 3.5):
if neighbor2[0].species_string in anions and neighbor2[1] < radius:
co_distances.append(neighbor2[1])
for co_distance in co_distances:
# I can see a small area within which this might not work, but I don't think that area happens
# in actual materials
if cv_distance <= co_distance:
to_remove.append(point)
else:
for neighbor2 in structure.get_neighbors(neighbor[0], radius):
if neighbor2[0].species_string in anions and neighbor2[1] < radius:
co_distances.append(neighbor2[1])
for co_distance in co_distances:
# I can see a small area within which this might not work, but I don't think that area happens
# in actual materials
if cv_distance <= co_distance*(1.1*(2**0.5)*0.5):
to_remove.append(point)
for point in voronoi_points:
neighbors = get_anion_neighbors(point, structure, radius, anions)
if len(neighbors) < min_neighbors:
to_remove.append(point)
voronois_copy = voronoi_points
# go through similarVoronoi to reduce to only one voronoi with a given set of peripheral anions
for item in to_remove:
if item in voronois_copy:
voronois_copy.remove(item)
return voronois_copy
if __name__ == '__main__':
"""
voronoipoint = 'Rn'
radius = 3.0
minneighbors = 4
anions = ['O2-', 'O', 'F-', 'F', 'Cl-', 'Cl', 'I-', 'I', 'Br-', 'Br', 'S2-', 'S', 'N', 'N3-']
numMatches = 4
print "accessing structure with voronois points"
task_ids = []
toexclude = []
allstructures = {}
taskidsList = open('structures_with_voronoi/taskid.txt', 'r')
for line in taskidsList:
splits = line.split("\t")
for task_id in splits:
task_ids.append(int(task_id.strip().replace(".cif", "")))
print task_ids
print len(task_ids)
print "Loading structure files"
counter = 1
for id in task_ids:
print "Loading file", counter
allstructures[id] = Structure.from_file("structures_with_voronoi/%s.cif"%(id))
counter += 1
structuresWithVoronoi = {}
counter = 1
for id in allstructures.keys():
print counter, ":", id
structure = allstructures[id]
# find all voronoi sites in the structure
print "splitting sites into voronoi and non-voronoi"
allvoronois, othersites = splitSites(structure)
# process potential empty polyhedral center sites
print "processing structure, step 1"
voronois = step1(allvoronois, structure)
# create intermediate structure2
print "creating structure2"
structure2 = createStructure(voronois, othersites, structure)
structure2.to(filename="structures_completed/%s_2.cif" % id)
if len(voronois) <= 1000:
#counter += 1
continue
print "processing structure, step 2 (may take a few minutes)"
# find polyhedral of voronoi, organizing them by pairing those with the same set of peripheral anions
# match each voronoi point in voronois to a number - dictionary
# create a method like getCationPolyhedral that finds the polyhedral for the specific site
# check polyhedral in the same way
# use the number from 2nd step to choose which sites to remove
voronois = step3(voronois, structure2, radius, anions)
print "creating structure3"
structure3 = createStructure(voronois, othersites, structure2)
structure3.to(filename="structures_completed/%s_3.cif"%(id))
print "processing structure, step 3"
# add criterion based on C-V and C-O distances
# if C-O < 2.5, if C-V distance > C-O distance, then remove
# for cation closest to V with all nearby oxygen
voronois = step2(voronois, structure3, radius, anions)
print "creating structure4"
structure4 = createStructure(voronois, othersites, structure3)
structure4.to(filename="structures_completed/%s.cif"%(id))
structuresWithVoronoi[id] = structure4
counter += 1
"""
"""
structure1 = Structure.from_file("LiCoO2.cif")
structure2 = add_voronoi(structure1)
voronoi_species = 'Rn'
max_radius = 2.5
min_num_neighbors = 4
anions_list = | |
the Tcl/Tk libraries
przerwij
# Now check dla the header files
jeżeli tklib oraz tcllib:
# Check dla the include files on Debian oraz {Free,Open}BSD, where
# they're put w /usr/include/{tcl,tk}X.Y
dotversion = version
jeżeli '.' nie w dotversion oraz "bsd" w host_platform.lower():
# OpenBSD oraz FreeBSD use Tcl/Tk library names like libtcl83.a,
# but the include subdirs are named like .../include/tcl8.3.
dotversion = dotversion[:-1] + '.' + dotversion[-1]
tcl_include_sub = []
tk_include_sub = []
dla dir w inc_dirs:
tcl_include_sub += [dir + os.sep + "tcl" + dotversion]
tk_include_sub += [dir + os.sep + "tk" + dotversion]
tk_include_sub += tcl_include_sub
tcl_includes = find_file('tcl.h', inc_dirs, tcl_include_sub)
tk_includes = find_file('tk.h', inc_dirs, tk_include_sub)
jeżeli (tcllib jest Nic albo tklib jest Nic albo
tcl_includes jest Nic albo tk_includes jest Nic):
self.announce("INFO: Can't locate Tcl/Tk libs and/or headers", 2)
zwróć
# OK... everything seems to be present dla Tcl/Tk.
include_dirs = [] ; libs = [] ; defs = [] ; added_lib_dirs = []
dla dir w tcl_includes + tk_includes:
jeżeli dir nie w include_dirs:
include_dirs.append(dir)
# Check dla various platform-specific directories
jeżeli host_platform == 'sunos5':
include_dirs.append('/usr/openwin/include')
added_lib_dirs.append('/usr/openwin/lib')
albo_inaczej os.path.exists('/usr/X11R6/include'):
include_dirs.append('/usr/X11R6/include')
added_lib_dirs.append('/usr/X11R6/lib64')
added_lib_dirs.append('/usr/X11R6/lib')
albo_inaczej os.path.exists('/usr/X11R5/include'):
include_dirs.append('/usr/X11R5/include')
added_lib_dirs.append('/usr/X11R5/lib')
inaczej:
# Assume default location dla X11
include_dirs.append('/usr/X11/include')
added_lib_dirs.append('/usr/X11/lib')
# If Cygwin, then verify that X jest installed before proceeding
jeżeli host_platform == 'cygwin':
x11_inc = find_file('X11/Xlib.h', [], include_dirs)
jeżeli x11_inc jest Nic:
zwróć
# Check dla BLT extension
jeżeli self.compiler.find_library_file(lib_dirs + added_lib_dirs,
'BLT8.0'):
defs.append( ('WITH_BLT', 1) )
libs.append('BLT8.0')
albo_inaczej self.compiler.find_library_file(lib_dirs + added_lib_dirs,
'BLT'):
defs.append( ('WITH_BLT', 1) )
libs.append('BLT')
# Add the Tcl/Tk libraries
libs.append('tk'+ version)
libs.append('tcl'+ version)
jeżeli host_platform w ['aix3', 'aix4']:
libs.append('ld')
# Finally, link przy the X11 libraries (nie appropriate on cygwin)
jeżeli host_platform != "cygwin":
libs.append('X11')
ext = Extension('_tkinter', ['_tkinter.c', 'tkappinit.c'],
define_macros=[('WITH_APPINIT', 1)] + defs,
include_dirs = include_dirs,
libraries = libs,
library_dirs = added_lib_dirs,
)
self.extensions.append(ext)
# XXX handle these, but how to detect?
# *** Uncomment oraz edit dla PIL (TkImaging) extension only:
# -DWITH_PIL -I../Extensions/Imaging/libImaging tkImaging.c \
# *** Uncomment oraz edit dla TOGL extension only:
# -DWITH_TOGL togl.c \
# *** Uncomment these dla TOGL extension only:
# -lGL -lGLU -lXext -lXmu \
def configure_ctypes_darwin(self, ext):
# Darwin (OS X) uses preconfigured files, w
# the Modules/_ctypes/libffi_osx directory.
srcdir = sysconfig.get_config_var('srcdir')
ffi_srcdir = os.path.abspath(os.path.join(srcdir, 'Modules',
'_ctypes', 'libffi_osx'))
sources = [os.path.join(ffi_srcdir, p)
dla p w ['ffi.c',
'x86/darwin64.S',
'x86/x86-darwin.S',
'x86/x86-ffi_darwin.c',
'x86/x86-ffi64.c',
'powerpc/ppc-darwin.S',
'powerpc/ppc-darwin_closure.S',
'powerpc/ppc-ffi_darwin.c',
'powerpc/ppc64-darwin_closure.S',
]]
# Add .S (preprocessed assembly) to C compiler source extensions.
self.compiler.src_extensions.append('.S')
include_dirs = [os.path.join(ffi_srcdir, 'include'),
os.path.join(ffi_srcdir, 'powerpc')]
ext.include_dirs.extend(include_dirs)
ext.sources.extend(sources)
zwróć Prawda
def configure_ctypes(self, ext):
jeżeli nie self.use_system_libffi:
jeżeli host_platform == 'darwin':
zwróć self.configure_ctypes_darwin(ext)
srcdir = sysconfig.get_config_var('srcdir')
ffi_builddir = os.path.join(self.build_temp, 'libffi')
ffi_srcdir = os.path.abspath(os.path.join(srcdir, 'Modules',
'_ctypes', 'libffi'))
ffi_configfile = os.path.join(ffi_builddir, 'fficonfig.py')
z distutils.dep_util zaimportuj newer_group
config_sources = [os.path.join(ffi_srcdir, fname)
dla fname w os.listdir(ffi_srcdir)
jeżeli os.path.isfile(os.path.join(ffi_srcdir, fname))]
jeżeli self.force albo newer_group(config_sources,
ffi_configfile):
z distutils.dir_util zaimportuj mkpath
mkpath(ffi_builddir)
config_args = [arg dla arg w sysconfig.get_config_var("CONFIG_ARGS").split()
jeżeli (('--host=' w arg) albo ('--build=' w arg))]
jeżeli nie self.verbose:
config_args.append("-q")
# Pass empty CFLAGS because we'll just append the resulting
# CFLAGS to Python's; -g albo -O2 jest to be avoided.
cmd = "cd %s && env CFLAGS='' '%s/configure' %s" \
% (ffi_builddir, ffi_srcdir, " ".join(config_args))
res = os.system(cmd)
jeżeli res albo nie os.path.exists(ffi_configfile):
print("Failed to configure _ctypes module")
zwróć Nieprawda
fficonfig = {}
przy open(ffi_configfile) jako f:
exec(f.read(), globals(), fficonfig)
# Add .S (preprocessed assembly) to C compiler source extensions.
self.compiler.src_extensions.append('.S')
include_dirs = [os.path.join(ffi_builddir, 'include'),
ffi_builddir,
os.path.join(ffi_srcdir, 'src')]
extra_compile_args = fficonfig['ffi_cflags'].split()
ext.sources.extend(os.path.join(ffi_srcdir, f) dla f w
fficonfig['ffi_sources'])
ext.include_dirs.extend(include_dirs)
ext.extra_compile_args.extend(extra_compile_args)
zwróć Prawda
def detect_ctypes(self, inc_dirs, lib_dirs):
self.use_system_libffi = Nieprawda
include_dirs = []
extra_compile_args = []
extra_link_args = []
sources = ['_ctypes/_ctypes.c',
'_ctypes/callbacks.c',
'_ctypes/callproc.c',
'_ctypes/stgdict.c',
'_ctypes/cfield.c']
depends = ['_ctypes/ctypes.h']
jeżeli host_platform == 'darwin':
sources.append('_ctypes/malloc_closure.c')
sources.append('_ctypes/darwin/dlfcn_simple.c')
extra_compile_args.append('-DMACOSX')
include_dirs.append('_ctypes/darwin')
# XXX Is this still needed?
## extra_link_args.extend(['-read_only_relocs', 'warning'])
albo_inaczej host_platform == 'sunos5':
# XXX This shouldn't be necessary; it appears that some
# of the assembler code jest non-PIC (i.e. it has relocations
# when it shouldn't. The proper fix would be to rewrite
# the assembler code to be PIC.
# This only works przy GCC; the Sun compiler likely refuses
# this option. If you want to compile ctypes przy the Sun
# compiler, please research a proper solution, instead of
# finding some -z option dla the Sun compiler.
extra_link_args.append('-mimpure-text')
albo_inaczej host_platform.startswith('hp-ux'):
extra_link_args.append('-fPIC')
ext = Extension('_ctypes',
include_dirs=include_dirs,
extra_compile_args=extra_compile_args,
extra_link_args=extra_link_args,
libraries=[],
sources=sources,
depends=depends)
ext_test = Extension('_ctypes_test',
sources=['_ctypes/_ctypes_test.c'])
self.extensions.extend([ext, ext_test])
jeżeli nie '--with-system-ffi' w sysconfig.get_config_var("CONFIG_ARGS"):
zwróć
jeżeli host_platform == 'darwin':
# OS X 10.5 comes przy libffi.dylib; the include files are
# w /usr/include/ffi
inc_dirs.append('/usr/include/ffi')
ffi_inc = [sysconfig.get_config_var("LIBFFI_INCLUDEDIR")]
jeżeli nie ffi_inc albo ffi_inc[0] == '':
ffi_inc = find_file('ffi.h', [], inc_dirs)
jeżeli ffi_inc jest nie Nic:
ffi_h = ffi_inc[0] + '/ffi.h'
przy open(ffi_h) jako fp:
dopóki 1:
line = fp.readline()
jeżeli nie line:
ffi_inc = Nic
przerwij
jeżeli line.startswith('#define LIBFFI_H'):
przerwij
ffi_lib = Nic
jeżeli ffi_inc jest nie Nic:
dla lib_name w ('ffi_convenience', 'ffi_pic', 'ffi'):
jeżeli (self.compiler.find_library_file(lib_dirs, lib_name)):
ffi_lib = lib_name
przerwij
jeżeli ffi_inc oraz ffi_lib:
ext.include_dirs.extend(ffi_inc)
ext.libraries.append(ffi_lib)
self.use_system_libffi = Prawda
def _decimal_ext(self):
extra_compile_args = []
undef_macros = []
jeżeli '--with-system-libmpdec' w sysconfig.get_config_var("CONFIG_ARGS"):
include_dirs = []
libraries = [':libmpdec.so.2']
sources = ['_decimal/_decimal.c']
depends = ['_decimal/docstrings.h']
inaczej:
srcdir = sysconfig.get_config_var('srcdir')
include_dirs = [os.path.abspath(os.path.join(srcdir,
'Modules',
'_decimal',
'libmpdec'))]
libraries = []
sources = [
'_decimal/_decimal.c',
'_decimal/libmpdec/basearith.c',
'_decimal/libmpdec/constants.c',
'_decimal/libmpdec/context.c',
'_decimal/libmpdec/convolute.c',
'_decimal/libmpdec/crt.c',
'_decimal/libmpdec/difradix2.c',
'_decimal/libmpdec/fnt.c',
'_decimal/libmpdec/fourstep.c',
'_decimal/libmpdec/io.c',
'_decimal/libmpdec/memory.c',
'_decimal/libmpdec/mpdecimal.c',
'_decimal/libmpdec/numbertheory.c',
'_decimal/libmpdec/sixstep.c',
'_decimal/libmpdec/transpose.c',
]
depends = [
'_decimal/docstrings.h',
'_decimal/libmpdec/basearith.h',
'_decimal/libmpdec/bits.h',
'_decimal/libmpdec/constants.h',
'_decimal/libmpdec/convolute.h',
'_decimal/libmpdec/crt.h',
'_decimal/libmpdec/difradix2.h',
'_decimal/libmpdec/fnt.h',
'_decimal/libmpdec/fourstep.h',
'_decimal/libmpdec/io.h',
'_decimal/libmpdec/memory.h',
'_decimal/libmpdec/mpdecimal.h',
'_decimal/libmpdec/numbertheory.h',
'_decimal/libmpdec/sixstep.h',
'_decimal/libmpdec/transpose.h',
'_decimal/libmpdec/typearith.h',
'_decimal/libmpdec/umodarith.h',
]
config = {
'x64': [('CONFIG_64','1'), ('ASM','1')],
'uint128': [('CONFIG_64','1'), ('ANSI','1'), ('HAVE_UINT128_T','1')],
'ansi64': [('CONFIG_64','1'), ('ANSI','1')],
'ppro': [('CONFIG_32','1'), ('PPRO','1'), ('ASM','1')],
'ansi32': [('CONFIG_32','1'), ('ANSI','1')],
'ansi-legacy': [('CONFIG_32','1'), ('ANSI','1'),
('LEGACY_COMPILER','1')],
'universal': [('UNIVERSAL','1')]
}
cc = sysconfig.get_config_var('CC')
sizeof_size_t = sysconfig.get_config_var('SIZEOF_SIZE_T')
machine = os.environ.get('PYTHON_DECIMAL_WITH_MACHINE')
jeżeli machine:
# Override automatic configuration to facilitate testing.
define_macros = config[machine]
albo_inaczej host_platform == 'darwin':
# Universal here means: build przy the same options Python
# was built with.
define_macros = config['universal']
albo_inaczej sizeof_size_t == 8:
jeżeli sysconfig.get_config_var('HAVE_GCC_ASM_FOR_X64'):
define_macros = config['x64']
albo_inaczej sysconfig.get_config_var('HAVE_GCC_UINT128_T'):
define_macros = config['uint128']
inaczej:
define_macros = config['ansi64']
albo_inaczej sizeof_size_t == 4:
ppro = sysconfig.get_config_var('HAVE_GCC_ASM_FOR_X87')
jeżeli ppro oraz ('gcc' w cc albo 'clang' w cc) oraz \
nie 'sunos' w host_platform:
# solaris: problems przy register allocation.
# icc >= 11.0 works jako well.
define_macros = config['ppro']
extra_compile_args.append('-Wno-unknown-pragmas')
inaczej:
define_macros = config['ansi32']
inaczej:
podnieś DistutilsError("_decimal: unsupported architecture")
# Workarounds dla toolchain bugs:
jeżeli sysconfig.get_config_var('HAVE_IPA_PURE_CONST_BUG'):
# Some versions of gcc miscompile inline asm:
# http://gcc.gnu.org/bugzilla/show_bug.cgi?id=46491
# http://gcc.gnu.org/ml/gcc/2010-11/msg00366.html
extra_compile_args.append('-fno-ipa-pure-const')
jeżeli sysconfig.get_config_var('HAVE_GLIBC_MEMMOVE_BUG'):
# _FORTIFY_SOURCE wrappers dla memmove oraz bcopy are incorrect:
# http://sourceware.org/ml/libc-alpha/2010-12/msg00009.html
undef_macros.append('_FORTIFY_SOURCE')
# Faster version without thread local contexts:
jeżeli nie sysconfig.get_config_var('WITH_THREAD'):
define_macros.append(('WITHOUT_THREADS', 1))
# Increase warning level dla gcc:
jeżeli 'gcc' w cc:
cmd = ("echo '' | %s -Wextra -Wno-missing-field-initializers -E - "
"> /dev/null 2>&1" % cc)
ret = os.system(cmd)
jeżeli ret >> 8 == 0:
extra_compile_args.extend(['-Wextra',
'-Wno-missing-field-initializers'])
# Uncomment dla extra functionality:
#define_macros.append(('EXTRA_FUNCTIONALITY', 1))
ext = Extension (
'_decimal',
include_dirs=include_dirs,
libraries=libraries,
define_macros=define_macros,
undef_macros=undef_macros,
extra_compile_args=extra_compile_args,
sources=sources,
depends=depends
)
zwróć ext
klasa PyBuildInstall(install):
# Suppress the warning about installation into the lib_dynload
# directory, which jest nie w sys.path when running Python during
# installation:
def initialize_options (self):
install.initialize_options(self)
self.warn_dir=0
# Customize subcommands to nie install an egg-info file dla Python
sub_commands = [('install_lib', install.has_lib),
('install_headers', install.has_headers),
('install_scripts', install.has_scripts),
('install_data', install.has_data)]
klasa PyBuildInstallLib(install_lib):
# Do exactly what install_lib does but make sure correct access modes get
# set on installed directories oraz files. All installed files przy get
# mode 644 unless they are a shared library w which case they will get
# mode 755. All installed directories will get mode 755.
# this jest works dla EXT_SUFFIX too, which ends przy SHLIB_SUFFIX
shlib_suffix = sysconfig.get_config_var("SHLIB_SUFFIX")
def install(self):
outfiles = install_lib.install(self)
self.set_file_modes(outfiles, 0o644, 0o755)
self.set_dir_modes(self.install_dir, 0o755)
zwróć outfiles
def set_file_modes(self, files, defaultMode, sharedLibMode):
jeżeli nie self.is_chmod_supported(): zwróć
jeżeli nie files: zwróć
| |
"""Callbacks used by the :mod:`responses` module for mocking out API requests."""
import json
try:
from urllib.parse import parse_qs
except ImportError:
from urlparse import parse_qs
from requests_toolbelt.multipart import decoder
from tests.constants import *
def get_content_disposition_name(headers):
"""Get the name from the Content-Disposition header (like `Content-Disposition: form-data; name="gotten name"`)"""
content_disposition = headers[b'Content-Disposition'].decode('utf-8')
name_and_label = content_disposition.split(';')[1]
name = name_and_label.split('=')[1]
unquoted_name = name.strip('"')
return unquoted_name
def generic_callback(request):
"""A callback to test the _generic_get and _generic_post methods."""
resp_body = {
'status': 1
}
req_body = getattr(request, 'body', None)
qs = parse_qs(req_body)
qs = {k: v[0] for k, v in qs.items()}
if qs.get('payload-test') is not None:
resp_body['payload-test'] = qs.get('payload-test')
else:
resp_body['payload-test'] = False
return 200, list(), json.dumps(resp_body)
def messages_callback(request):
"""A callback to mock the `/messages.json` endpoint."""
resp_body = {
'request': TEST_REQUEST_ID
}
headers = {'X-Request-Id': TEST_REQUEST_ID}
if getattr(request, 'headers', {}).get('content-type') == 'application/x-www-form-urlencoded':
req_body = getattr(request, 'body', None)
qs = parse_qs(req_body)
qs = {k: v[0] for k, v in qs.items()}
else:
request.content = getattr(request, 'body', None) # make this like MultipartDecoder.from_response expects
multipart_data = decoder.MultipartDecoder.from_response(request)
qs = {}
for part in multipart_data.parts:
header_name = get_content_disposition_name(part.headers)
if header_name != 'attachment':
qs[header_name] = part.text
else:
qs[header_name] = part.content
if qs.get('message') is None:
resp_body['message'] = 'cannot be blank'
resp_body['status'] = 0
resp_body['errors'] = ['message cannot be blank']
elif qs.get('token') != TEST_TOKEN:
resp_body['token'] = 'invalid'
resp_body['status'] = 0
resp_body['errors'] = ['application token is invalid']
elif qs.get('user') != TEST_USER and qs.get('user') != TEST_GROUP: # allow TEST_USER or TEST_GROUP
resp_body['user'] = 'invalid'
resp_body['status'] = 0
resp_body['errors'] = ['user identifier is not a valid user, group, or subscribed user key']
elif qs.get('priority') == 2:
if qs.get('expire') is None:
resp_body['expire'] = 'must be supplied with priority=2'
resp_body['status'] = 0
resp_body['errors'] = ['expire must be supplied with priority=2']
elif qs.get('retry') is None:
resp_body['retry'] = 'must be supplied with priority=2'
resp_body['status'] = 0
resp_body['errors'] = ['retry must be supplied with priority=2']
else:
resp_body['receipt'] = TEST_RECEIPT_ID
else:
resp_body['status'] = 1
return 200 if resp_body['status'] == 1 else 400, headers, json.dumps(resp_body)
def sounds_callback(request):
"""A callback to mock the `/sounds.json` endpoint."""
resp_body = {
'request': TEST_REQUEST_ID
}
headers = {'X-Request-Id': TEST_REQUEST_ID}
req_body = getattr(request, 'body', None)
qs = parse_qs(req_body)
qs = {k: v[0] for k, v in qs.items()}
if qs.get('token') != TEST_TOKEN:
resp_body['token'] = 'invalid'
resp_body['status'] = 0
resp_body['errors'] = ['application token is invalid']
else:
resp_body['status'] = 1
resp_body['sounds'] = SOUNDS
return 200 if resp_body['status'] == 1 else 400, headers, json.dumps(resp_body)
def validate_callback(request):
"""A callback to mock the `/users/validate.json` endpoint."""
resp_body = {
'request': TEST_REQUEST_ID
}
headers = {'X-Request-Id': TEST_REQUEST_ID}
req_body = getattr(request, 'body', None)
qs = parse_qs(req_body)
qs = {k: v[0] for k, v in qs.items()}
if qs.get('token') != TEST_TOKEN:
resp_body['token'] = 'invalid'
resp_body['status'] = 0
resp_body['errors'] = ['application token is invalid']
user = qs.get('user')
if user == TEST_USER:
device = qs.get('device')
if device and device.lower() not in TEST_DEVICES:
resp_body['device'] = 'invalid for this user'
resp_body['status'] = 0
resp_body['errors'] = ['device name is not valid for this user']
else:
resp_body['status'] = 1
resp_body['group'] = 0
resp_body['devices'] = TEST_DEVICES
elif user == TEST_GROUP:
resp_body['status'] = 1
resp_body['group'] = 1
resp_body['devices'] = []
else:
resp_body['user'] = 'invalid'
resp_body['status'] = 0
resp_body['errors'] = ['user identifier is not a valid user, group, or subscribed user key']
return 200 if resp_body['status'] == 1 else 400, headers, json.dumps(resp_body)
def receipt_callback(request):
"""A callback to mock the /receipts/{receipt}.json endpoint.
Best used like so::
url_re = re.compile('https://api\.pushover\.net/1/receipts/r[a-zA-Z0-9]*\.json')
responses.add_callback(
responses.GET,
url_re,
callback=receipt_callback,
content_type='application/json'
)
in order to capture all calls to the endpoint.
"""
resp_body = {
'request': TEST_REQUEST_ID
}
headers = {'X-Request-Id': TEST_REQUEST_ID}
req_body = getattr(request, 'body', None)
qs = parse_qs(req_body)
qs = {k: v[0] for k, v in qs.items()}
if qs.get('token') != TEST_TOKEN:
resp_body['token'] = 'invalid'
resp_body['status'] = 0
resp_body['errors'] = ['application token is invalid']
elif request.path_url.split('/')[-1].split('.')[0] != TEST_RECEIPT_ID: # get the receipt from a url of the form /1/receipts/{receipt}.json
resp_body['receipt'] = 'not found'
resp_body['status'] = 0
resp_body['errors'] = ['receipt not found; may be invalid or expired']
else:
resp_body['status'] = 1
resp_body['acknowledged'] = 1
resp_body['acknowledged_at'] = 100
resp_body['acknowledged_by'] = TEST_USER
resp_body['acknowledged_by_device'] = TEST_DEVICES[0]
resp_body['last_delivered_at'] = 100
resp_body['expired'] = 1
resp_body['expires_at'] = 100
resp_body['called_back'] = 0
resp_body['called_back_at'] = 100
return 200 if resp_body['status'] == 1 else 400, headers, json.dumps(resp_body)
def receipt_cancel_callback(request):
"""A callback to mock the /receipts/{receipt}/cancel.json endpoint.
Best used like so::
url_re = re.compile('https://api\.pushover\.net/1/receipts/r[a-zA-Z0-9]*/cancel\.json')
responses.add_callback(
responses.GET,
url_re,
callback=receipt_cancel_callback,
content_type='application/json'
)
in order to capture all calls to the endpoint.
"""
resp_body = {
'request': TEST_REQUEST_ID
}
headers = {'X-Request-Id': TEST_REQUEST_ID}
req_body = getattr(request, 'body', None)
qs = parse_qs(req_body)
qs = {k: v[0] for k, v in qs.items()}
if qs.get('token') != TEST_TOKEN:
resp_body['token'] = 'invalid'
resp_body['status'] = 0
resp_body['errors'] = ['application token is invalid']
elif request.path_url.split('/')[-2] != TEST_RECEIPT_ID: # get the receipt from a url of the form /1/receipts/{receipt}/cancel.json
resp_body['receipt'] = 'not found'
resp_body['status'] = 0
resp_body['errors'] = ['receipt not found; may be invalid or expired']
else:
resp_body['status'] = 1
return 200 if resp_body['status'] == 1 else 400, headers, json.dumps(resp_body)
def subscription_migrate_callback(request):
"""A callback to mock the `/subscriptions/migrate.json` endpoint."""
resp_body = {
'request': TEST_REQUEST_ID
}
headers = {'X-Request-Id': TEST_REQUEST_ID}
req_body = getattr(request, 'body', None)
qs = parse_qs(req_body)
qs = {k: v[0] for k, v in qs.items()}
if qs.get('token') != TEST_TOKEN:
resp_body['token'] = 'invalid'
resp_body['status'] = 0
resp_body['errors'] = ['application token is invalid']
elif qs.get('subscription') != TEST_SUBSCRIPTION_CODE:
resp_body['subscription'] = 'invalid'
resp_body['status'] = 0
resp_body['errors'] = ['subscription token is invalid']
elif qs.get('user') != TEST_USER:
resp_body['user'] = 'is not a valid user'
resp_body['status'] = 0
resp_body['errors'] = ['user key is not valid for any active user']
else:
resp_body['status'] = 1
resp_body['subscribed_user_key'] = TEST_SUBSCRIBED_USER_KEY
return 200 if resp_body['status'] == 1 else 400, headers, json.dumps(resp_body)
def groups_callback(request):
"""A callback to mock the `/groups/{group_key}.json` endpoint."""
resp_body = {
'request': TEST_REQUEST_ID
}
headers = {'X-Request-Id': TEST_REQUEST_ID}
req_body = getattr(request, 'body', None)
qs = parse_qs(req_body)
qs = {k: v[0] for k, v in qs.items()}
if qs.get('token') != TEST_TOKEN:
resp_body['token'] = 'invalid'
resp_body['status'] = 0
resp_body['errors'] = ['application token is invalid']
elif request.path_url.split('/')[-1].split('.')[0] != TEST_GROUP:
resp_body['group'] = 'not found'
resp_body['status'] = 0
resp_body['errors'] = ['group not found or you are not authorized to edit it']
else:
resp_body['status'] = 1
resp_body['name'] = TEST_GROUP_NAME
resp_body['users'] = [
{
'user': TEST_USER,
'device': TEST_DEVICES[0],
'memo': '',
'disabled': False
},
{
'user': TEST_USER,
'device': TEST_DEVICES[1],
'memo': '',
'disabled': False
}
]
return 200 if resp_body['status'] == 1 else 400, headers, json.dumps(resp_body)
def groups_add_user_callback(request):
"""A callback to mock the `/groups/{group_id}/add_user.json` endpoint."""
resp_body = {
'request': TEST_REQUEST_ID
}
headers = {'X-Request-Id': TEST_REQUEST_ID}
req_body = getattr(request, 'body', None)
qs = parse_qs(req_body)
qs = {k: v[0] for k, v in qs.items()}
if qs.get('token') != TEST_TOKEN:
resp_body['token'] = 'invalid'
resp_body['status'] = 0
resp_body['errors'] = ['application token is invalid']
elif request.path_url.split('/')[-2] != TEST_GROUP:
resp_body['group'] = 'not found'
resp_body['status'] = 0
resp_body['errors'] = ['group not found or you are not authorized to edit it']
elif qs.get('user') != TEST_USER:
resp_body['user'] = 'invalid'
resp_body['status'] = 0
resp_body['errors'] = ['user key is invalid']
else:
resp_body['status'] = 1
return 200 if resp_body['status'] == 1 else 400, headers, json.dumps(resp_body)
def groups_delete_user_callback(request):
"""A callback to mock the `/groups/{group_id}/delete_user.json` endpoint."""
resp_body = {
'request': TEST_REQUEST_ID
}
headers = {'X-Request-Id': TEST_REQUEST_ID}
req_body = getattr(request, 'body', None)
qs = parse_qs(req_body)
qs = {k: v[0] for k, v in qs.items()}
if qs.get('token') != TEST_TOKEN:
resp_body['token'] = 'invalid'
resp_body['status'] = 0
resp_body['errors'] = ['application token is invalid']
elif request.path_url.split('/')[-2] != TEST_GROUP:
resp_body['group'] = 'not found'
resp_body['status'] = 0
resp_body['errors'] = ['group not found or you are not authorized to edit it']
elif qs.get('user') != TEST_USER:
resp_body['user'] = 'invalid'
resp_body['status'] = 0
resp_body['errors'] = ['user is not a member of this group']
else:
resp_body['status'] = 1
return 200 if resp_body['status'] == 1 else 400, headers, json.dumps(resp_body)
def groups_disable_user_callback(request):
"""A callback to mock the `/groups/{group_id}/disable_user.json` endpoint."""
resp_body = {
'request': TEST_REQUEST_ID
}
headers = {'X-Request-Id': TEST_REQUEST_ID}
req_body = getattr(request, 'body', None)
qs = parse_qs(req_body)
qs = {k: v[0] for k, v in qs.items()}
if qs.get('token') != TEST_TOKEN:
resp_body['token'] = 'invalid'
resp_body['status'] = 0
resp_body['errors'] = ['application token is invalid']
elif | |
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
**Project Name:** MakeHuman
**Product Home Page:** http://www.makehuman.org/
**Code Home Page:** http://code.google.com/p/makehuman/
**Authors:** <NAME>
**Copyright(c):** MakeHuman Team 2001-2011
**Licensing:** GPL3 (see also http://sites.google.com/site/makehumandocs/licensing)
**Coding Standards:** See http://sites.google.com/site/makehumandocs/developers-guide
Abstract
--------
MakeHuman to Collada (MakeHuman eXchange format) exporter. Collada files can be loaded into
Blender by collada_import.py.
TO DO
"""
import module3d
import aljabr
import mh
import files3d
import mh2bvh
import os, time
import shutil
import mh2proxy
import export_config
import mhx_globals as the
import read_rig
#
# Size of end bones = 1 mm
#
Delta = [0,0.01,0]
#
# exportCollada(human, filename, options):
#
def exportCollada(human, filename, options):
time1 = time.clock()
the.Config = export_config.exportConfig(human, True, [])
the.Config.separatefolder = True
the.Rotate90X = options["rotate90X"]
the.Rotate90Z = options["rotate90Z"]
the.Config.pngTexture = options["pngTexture"]
the.Options = options
outfile = export_config.getOutFileFolder(filename+".dae", the.Config)
try:
fp = open(outfile, 'w')
print("Writing Collada file", outfile)
except:
print("Unable to open file for writing", outfile)
(name,ext) = os.path.splitext(os.path.basename(outfile))
exportDae(human, name, fp)
fp.close()
time2 = time.clock()
print("Wrote Collada file in %g s:" % (time2-time1), outfile)
return
#
# findInHierarchy(bone, hier):
#
def findInHierarchy(bone, hier):
if hier == []:
return []
for pair in hier:
(b, children) = pair
if b == bone:
return pair
else:
b = findInHierarchy(bone, children)
if b: return b
return []
#
# flatten(hier, bones):
#
def flatten(hier, bones):
for (bone, children) in hier:
bones.append(bone)
flatten(children, bones)
return
#
#
#
def rotateLoc(loc, scale):
(x,y,z) = (scale*loc[0], scale*loc[1], scale*loc[2])
if the.Rotate90X:
yy = -z
z = y
y = yy
if the.Rotate90Z:
yy = x
x = -y
y = yy
return (x,y,z)
#
# boneOK(flags, bone, parent):
#
Reparents = {
'UpArm_L' : 'Clavicle_L',
'UpArm_R' : 'Clavicle_R',
'UpLeg_L' : 'Hip_L',
'UpLeg_R' : 'Hip_R',
'UpArmTwist_L' : 'Clavicle_L',
'UpArmTwist_R' : 'Clavicle_R',
'UpLegTwist_L' : 'Hip_L',
'UpLegTwist_R' : 'Hip_R',
}
TwistBones = {
'UpArmTwist_L' : 'UpArm_L',
'UpArmTwist_R' : 'UpArm_R',
'LoArmTwist_L' : 'LoArm_L',
'LoArmTwist_R' : 'LoArm_R',
'UpLegTwist_L' : 'UpLeg_L',
'UpLegTwist_R' : 'UpLeg_R',
}
SkipBones = [ 'Rib_L', 'Rib_R', 'Stomach_L', 'Stomach_R', 'Scapula_L', 'Scapula_R']
def boneOK(flags, bone, parent):
if bone == the.Root:
return 'None'
elif bone in TwistBones.keys():
return None
elif bone in SkipBones:
return None
elif bone in Reparents.keys():
return Reparents[bone]
elif flags & F_DEF:
return parent
elif bone in ['HipsInv']:
return parent
return None
#
# readSkinWeights(weights, tmplName):
#
# VertexGroup Breathe
# wv 2543 0.148938 ;
#
def readSkinWeights(weights, tmplName):
tmpl = open(tmplName, "rU")
if tmpl == None:
print("Cannot open template "+tmplName)
return
for line in tmpl:
lineSplit= line.split()
if len(lineSplit) == 0:
pass
elif lineSplit[0] == 'VertexGroup':
grp = []
weights[lineSplit[1]] = grp
elif lineSplit[0] == 'wv':
grp.append((lineSplit[1],lineSplit[2]))
return
def fixTwistWeights(fp, weights):
for (twist, bone) in TwistBones.items():
wts = weights[twist] + weights[bone]
wts.sort()
nwts = []
n = 1
weights[bone] = nwts
while n < len(wts):
(v0, w0) = wts[n-1]
(v1, w1) = wts[n]
if v0 == v1:
nwts.append((v0, w0+w1))
n += 2
else:
nwts.append((v0, w0))
n += 1
fp.write("\n%s\n%s\n%s\n" % (twist, weights[twist], weights[bone]))
return
#
# writeBone(fp, bone, orig, extra, pad, stuff):
#
def writeBone(fp, bone, orig, extra, pad, stuff):
(name, children) = bone
head = stuff.rigHead[name]
vec = aljabr.vsub(head, orig)
printNode(fp, name, vec, extra, pad)
for child in children:
writeBone(fp, child, head, '', pad+' ', stuff)
fp.write('\n%s </node>' % pad)
return
def printNode(fp, name, vec, extra, pad):
# print(name, vec)
if name:
nameStr = 'sid="%s"' % name
idStr = 'id="%s" name="%s"' % (name, name)
else:
nameStr = ''
idStr = ''
fp.write('\n'+
'%s <node %s %s type="JOINT" %s>\n' % (pad, extra, nameStr, idStr) +
'%s <translate sid="translate"> ' % pad)
(scale, name) = the.Options["scale"]
(x,y,z) = rotateLoc(vec, scale)
fp.write("%.4f %.4f %.4f " % (x,y,z))
fp.write('</translate>\n' +
'%s <rotate sid="rotateZ">0 0 1 0.0</rotate>\n' % pad +
'%s <rotate sid="rotateY">0 1 0 0.0</rotate>\n' % pad +
'%s <rotate sid="rotateX">1 0 0 0.0</rotate>\n' % pad +
'%s <scale sid="scale">1.0 1.0 1.0</scale>' % pad)
#
# getArmatureFromRigFile(fileName, obj):
#
def getArmatureFromRigFile(fileName, obj):
(locations, armature, weights) = read_rig.readRigFile(fileName, obj)
hier = []
heads = {}
tails = {}
the.Root = None
for (bone, head, tail, roll, parent, options) in armature:
heads[bone] = head
tails[bone] = tail
if parent == '-':
hier.append((bone, []))
if not the.Root:
the.Root = bone
else:
parHier = findInHierarchy(parent, hier)
try:
(p, children) = parHier
except:
raise NameError("Did not find %s parent %s" % (bone, parent))
children.append((bone, []))
if not the.Root:
raise NameError("No root bone found in rig file %s" % fileName)
# newHier = addInvBones(hier, heads, tails)
newHier = hier
bones = []
flatten(newHier, bones)
return (heads, tails, newHier, bones, weights)
#
# addInvBones(hier, heads, tails):
#
def addInvBones(hier, heads, tails):
newHier = []
for (bone, children) in hier:
newChildren = addInvBones(children, heads, tails)
n = len(children)
if n == 1:
(child, subChildren) = children[0]
offs = vsub(tails[bone], heads[child])
if n > 1 or (n == 1 and vlen(offs) > 1e-4):
boneInv = bone+"Inv"
heads[boneInv] = tails[bone]
#tails[boneInv] = heads[bone]
tails[boneInv] = aljabr.vadd(tails[bone], Delta)
newHier.append( (bone, [(boneInv, newChildren)]) )
else:
newHier.append( (bone, newChildren) )
return newHier
#
# class CStuff
#
class CStuff:
def __init__(self, name, proxy):
self.name = os.path.basename(name)
self.type = None
self.bones = None
self.rawWeights = None
self.verts = None
self.vnormals = None
self.uvValues = None
self.faces = None
self.weights = None
self.targets = None
self.vertexWeights = None
self.skinWeights = None
self.material = None
self.texture = None
if proxy:
self.type = proxy.type
self.material = proxy.material
self.texture = proxy.texture
def __repr__(self):
return "<CStuff %s %s mat %s tex %s>" % (self.name, self.type, self.material, self.texture)
def setBones(self, amt):
(rigHead, rigTail, rigHier, bones, rawWeights) = amt
self.rigHead = rigHead
self.rigTail = rigTail
self.rigHier = rigHier
self.bones = bones
self.rawWeights = rawWeights
def copyBones(self, rig):
self.rigHead = rig.rigHead
self.rigTail = rig.rigTail
self.rigHier = rig.rigHier
self.bones = rig.bones
self.rawWeights = rig.rawWeights
def setMesh(self, mesh):
(verts, vnormals, uvValues, faces, weights, targets) = mesh
self.verts = verts
self.vnormals = vnormals
self.uvValues = uvValues
self.faces = faces
self.weights = weights
self.targets = targets
return
#
# filterMesh(mesh1, obj, groups, deleteVerts):
#
def filterMesh(mesh1, obj, deleteGroups, deleteVerts):
(verts1, vnormals1, uvValues1, faces1, weights1, targets1) = mesh1
killVerts = {}
killUvs = {}
killFaces = {}
for v in obj.verts:
killVerts[v.idx] = False
for f in obj.faces:
killFaces[f.idx] = False
for vt in f.uv:
killUvs[vt] = False
for vn in deleteVerts:
killVerts[vn] = True
for fg in obj.faceGroups:
if (((not the.Options["helpers"]) and
(("joint" in fg.name) or ("helper" in fg.name))) or
((not the.Options["eyebrows"]) and
(("eyebrown" in fg.name) or ("cornea" in fg.name))) or
((not the.Options["lashes"]) and
("lash" in fg.name)) or
mh2proxy.deleteGroup(fg.name, deleteGroups)):
print(" kill %s" % fg.name)
for f in fg.faces:
killFaces[f.idx] = True
for v in f.verts:
killVerts[v.idx] = True
for vt in f.uv:
killUvs[vt] = True
n = 0
nv = {}
verts2 = []
for m,v in enumerate(verts1):
if not killVerts[m]:
verts2.append(v)
nv[m] = n
n += 1
vnormals2 = []
for m,vn in enumerate(vnormals1):
if not killVerts[m]:
vnormals2.append(vn)
n = 0
uvValues2 = []
nuv = {}
for m,uv in enumerate(uvValues1):
if not killUvs[m]:
uvValues2.append(uv)
nuv[m] = n
n += 1
faces2 = []
for fn,f in enumerate(faces1):
if not killFaces[fn]:
f2 = []
for c in f:
v2 = nv[c[0]]
uv2 = nuv[c[1]]
f2.append([v2, uv2])
faces2.append(f2)
if weights1:
weights2 = {}
for (b, wts1) in weights1.items():
wts2 = []
for (v1,w) in wts1:
if not killVerts[v1]:
wts2.append((nv[v1],w))
weights2[b] = wts2
else:
weights2 = weights1
if targets1:
targets2 = []
for (name, morphs1) in targets1:
morphs2 = []
for (v1,dx) in morphs1:
if not killVerts[v1]:
morphs2.append((nv[v1],dx))
targets2.append(name, morphs2)
else:
targets2 = targets1
return (verts2, vnormals2, uvValues2, faces2, weights2, targets2)
#
# exportDae(human, name, fp):
#
def exportDae(human, name, fp):
cfg = export_config.exportConfig(human, True)
obj = human.meshData
rigfile = "data/rigs/%s.rig" % the.Options["daerig"]
print("Using rig file %s" % rigfile)
amt = getArmatureFromRigFile(rigfile, obj)
#rawTargets = loadShapeKeys("shared/mhx/templates/shapekeys-facial25.mhx")
rawTargets = []
(the.Stuff, stuffs) = setupStuff(name, obj, amt, rawTargets, cfg)
date = time.strftime("%a, %d %b %Y %H:%M:%S +0000", time.localtime())
if the.Rotate90X:
upaxis = 'Z_UP'
else:
upaxis = 'Y_UP'
(scale, unit) = the.Options["scale"]
fp.write('<?xml version="1.0" encoding="utf-8"?>\n' +
'<COLLADA version="1.4.0" xmlns="http://www.collada.org/2005/11/COLLADASchema">\n' +
' <asset>\n' +
' <contributor>\n' +
' <author>www.makehuman.org</author>\n' +
' </contributor>\n' +
' <created>%s</created>\n' % date +
' <modified>%s</modified>\n' % date +
' <unit meter="%.4f" name="%s"/>\n' % (0.1/scale, unit) +
' <up_axis>%s</up_axis>\n' % upaxis+
' </asset>\n' +
' <library_images>\n')
for stuff in stuffs:
writeImages(obj, fp, stuff, human)
fp.write(
' </library_images>\n' +
' <library_effects>\n')
for | |
<reponame>Wassaf-Shahzad/micromasters<filename>discussions/api_test.py
"""Tests for discussions API"""
# pylint: disable=redefined-outer-name
from django.core.exceptions import ImproperlyConfigured
from django.db.models.signals import post_save
from elasticsearch_dsl import Search
from factory.django import mute_signals
from open_discussions_api.constants import ROLE_STAFF
import pytest
from requests.exceptions import HTTPError
from requests import Response
from rest_framework import status as statuses
from courses.factories import ProgramFactory
from dashboard.factories import ProgramEnrollmentFactory
from discussions import api
from discussions.exceptions import (
ChannelAlreadyExistsException,
ChannelCreationException,
ContributorSyncException,
DiscussionUserSyncException,
ModeratorSyncException,
SubscriberSyncException,
)
from discussions.factories import (
ChannelFactory,
ChannelProgramFactory,
DiscussionUserFactory,
)
from discussions.models import (
Channel,
ChannelProgram,
DiscussionUser,
)
from profiles.factories import (
ProfileFactory,
UserFactory,
)
from roles.factories import RoleFactory
from roles.roles import Staff
from search.models import (
PercolateQuery,
PercolateQueryMembership,
)
pytestmark = [
pytest.mark.usefixtures('mocked_elasticsearch'),
pytest.mark.usefixtures('mocked_on_commit'),
pytest.mark.django_db,
]
# pylint: disable=too-many-locals, unused-argument
@pytest.fixture
def mock_staff_client(mocker):
"""Mocks the staff client"""
return mocker.patch('discussions.api.get_staff_client').return_value
@pytest.mark.parametrize("secret, base_url, username", [
(None, 'base_url', 'username'),
('secret', None, 'username'),
('secret', 'base_url', None),
])
def test_get_staff_client_config_errors(settings, secret, base_url, username):
"""Assert that get_staff_client raises config errors"""
settings.OPEN_DISCUSSIONS_JWT_SECRET = secret
settings.OPEN_DISCUSSIONS_BASE_URL = base_url
settings.OPEN_DISCUSSIONS_API_USERNAME = username
with pytest.raises(ImproperlyConfigured):
api.get_staff_client()
def test_get_staff_client_config_valid(settings):
"""Test that get_staff_client returns a configured client"""
settings.OPEN_DISCUSSIONS_JWT_SECRET = 'secret'
settings.OPEN_DISCUSSIONS_BASE_URL = 'base_url'
settings.OPEN_DISCUSSIONS_API_USERNAME = 'username'
assert api.get_staff_client().roles == [ROLE_STAFF]
def test_create_or_update_discussion_user_no_username(mocker):
"""Test that create_or_update_discussion_user creates if we don't have a username"""
create_mock = mocker.patch('discussions.api.create_discussion_user')
update_mock = mocker.patch('discussions.api.update_discussion_user')
with mute_signals(post_save):
profile = ProfileFactory.create()
assert DiscussionUser.objects.count() == 0
api.create_or_update_discussion_user(profile.user_id)
assert create_mock.call_count == 1
assert update_mock.call_count == 0
assert DiscussionUser.objects.count() == 1
@pytest.mark.parametrize('enable_update', [True, False])
def test_create_or_update_discussion_user_has_username(mocker, enable_update, settings):
"""Test that create_or_update_discussion_user updates if we have a username"""
settings.FEATURES['OPEN_DISCUSSIONS_USER_UPDATE'] = enable_update
create_mock = mocker.patch('discussions.api.create_discussion_user')
update_mock = mocker.patch('discussions.api.update_discussion_user')
with mute_signals(post_save):
profile = ProfileFactory.create()
DiscussionUser.objects.create(user=profile.user, username='username')
api.create_or_update_discussion_user(profile.user_id)
assert create_mock.call_count == 0
assert update_mock.call_count == (1 if enable_update else 0)
assert DiscussionUser.objects.count() == 1
def test_create_discussion_user(mock_staff_client):
"""Verify create_discussion_user makes the correct API calls"""
mock_response = mock_staff_client.users.create.return_value
mock_response.status_code = 201
mock_response.json.return_value = {
'username': 'username'
}
with mute_signals(post_save):
profile = ProfileFactory.create()
discussion_user = DiscussionUser.objects.create(user=profile.user)
api.create_discussion_user(discussion_user)
assert discussion_user.username == 'username'
mock_staff_client.users.create.assert_called_once_with(
profile.user.username,
email=profile.user.email,
profile=dict(
name=profile.full_name,
image=profile.image.url if profile.image else None,
image_small=profile.image_small.url if profile.image_small else None,
image_medium=profile.image_medium.url if profile.image_medium else None,
email_optin=profile.email_optin
)
)
def test_create_discussion_user_error(mock_staff_client):
"""Verify create_discussion_user handles non 2xx status codes"""
mock_staff_client.users.create.return_value.raise_for_status.side_effect = HTTPError
with mute_signals(post_save):
profile = ProfileFactory.create()
discussion_user = DiscussionUser.objects.create(user=profile.user)
with pytest.raises(DiscussionUserSyncException) as exc:
api.create_discussion_user(discussion_user)
assert str(exc.value) == "Error creating discussion user for {}".format(profile.user.username)
def test_update_discussion_user(mock_staff_client):
"""Verify update_discussion_user makes the correct API calls"""
mock_response = mock_staff_client.users.update.return_value
mock_response.status_code = 200
mock_response.json.return_value = {
'username': 'username'
}
with mute_signals(post_save):
profile = ProfileFactory.create()
discussion_user = DiscussionUser.objects.create(user=profile.user, username='username')
api.update_discussion_user(discussion_user)
mock_staff_client.users.update.assert_called_once_with(
discussion_user.username,
uid=discussion_user.user.username,
email=profile.user.email,
profile=dict(
name=profile.full_name,
image=profile.image.url if profile.image else None,
image_small=profile.image_small.url if profile.image_small else None,
image_medium=profile.image_medium.url if profile.image_medium else None,
)
)
def test_update_discussion_user_with_email_optin(mock_staff_client):
"""Verify update_discussion_user makes the correct API calls"""
mock_response = mock_staff_client.users.update.return_value
mock_response.status_code = 200
mock_response.json.return_value = {
'username': 'username'
}
with mute_signals(post_save):
profile = ProfileFactory.create()
discussion_user = DiscussionUser.objects.create(user=profile.user, username='username')
api.update_discussion_user(discussion_user, allow_email_optin=True)
mock_staff_client.users.update.assert_called_once_with(
discussion_user.username,
uid=discussion_user.user.username,
email=profile.user.email,
profile=dict(
name=profile.full_name,
image=profile.image.url if profile.image else None,
image_small=profile.image_small.url if profile.image_small else None,
image_medium=profile.image_medium.url if profile.image_medium else None,
email_optin=profile.email_optin
)
)
def test_update_discussion_user_no_update(mock_staff_client):
"""Verify update_discussion_user makes the correct API calls"""
with mute_signals(post_save):
profile = ProfileFactory.create()
discussion_user = DiscussionUser.objects.create(user=profile.user, username='user1', last_sync=profile.updated_on)
api.update_discussion_user(discussion_user)
assert mock_staff_client.users.update.call_count == 0
def test_update_discussion_user_error(mock_staff_client):
"""Verify update_discussion_user handles non-2xx status codes"""
mock_staff_client.users.update.return_value.raise_for_status.side_effect = HTTPError
with mute_signals(post_save):
profile = ProfileFactory.create()
discussion_user = DiscussionUser.objects.create(user=profile.user, username='username')
with pytest.raises(DiscussionUserSyncException) as exc:
api.update_discussion_user(discussion_user)
assert str(exc.value) == "Error updating discussion user for {}".format(profile.user.username)
def test_add_to_channel(mock_staff_client):
"""add_to_channel should add user as contributor and subscriber"""
channel_name = 'channel'
discussion_username = 'username'
api.add_to_channel(channel_name, discussion_username)
mock_staff_client.channels.add_contributor.assert_called_once_with(channel_name, discussion_username)
mock_staff_client.channels.add_subscriber.assert_called_once_with(channel_name, discussion_username)
def test_add_to_channel_failed_contributor(mock_staff_client):
"""add_to_channel should raise an exception if it fails to add a contributor"""
mock_staff_client.channels.add_contributor.return_value.raise_for_status.side_effect = HTTPError
with pytest.raises(ContributorSyncException) as ex:
api.add_to_channel('channel', 'user')
assert ex.value.args[0] == 'Error adding contributor user to channel channel'
assert mock_staff_client.channels.add_subscriber.called is False
def test_add_to_channel_failed_subscriber(mock_staff_client):
"""add_to_channel should raise an exception if it fails to add a subscriber"""
channel_name = 'channel'
discussion_username = 'username'
mock_staff_client.channels.add_subscriber.return_value.raise_for_status.side_effect = HTTPError
with pytest.raises(SubscriberSyncException) as ex:
api.add_to_channel(channel_name, discussion_username)
assert ex.value.args[0] == 'Error adding subscriber {user} to channel {channel}'.format(
user=discussion_username,
channel=channel_name,
)
mock_staff_client.channels.add_contributor.assert_called_once_with(channel_name, discussion_username)
mock_staff_client.channels.add_subscriber.assert_called_once_with(channel_name, discussion_username)
@pytest.mark.parametrize("contributor_status_code,subscriber_status_code", [
(statuses.HTTP_200_OK, statuses.HTTP_200_OK),
(statuses.HTTP_404_NOT_FOUND, statuses.HTTP_404_NOT_FOUND),
(statuses.HTTP_409_CONFLICT, statuses.HTTP_404_NOT_FOUND),
])
def test_remove_from_channel(mock_staff_client, contributor_status_code, subscriber_status_code):
"""remove_from_channel should remove a user's contributor and subscriber status"""
channel_name = 'channel'
discussion_username = 'username'
api.remove_from_channel(channel_name, discussion_username)
mock_staff_client.channels.remove_contributor.assert_called_once_with(channel_name, discussion_username)
mock_staff_client.channels.remove_subscriber.assert_called_once_with(channel_name, discussion_username)
@pytest.mark.parametrize("status_code", [
statuses.HTTP_400_BAD_REQUEST,
statuses.HTTP_401_UNAUTHORIZED,
statuses.HTTP_403_FORBIDDEN,
statuses.HTTP_500_INTERNAL_SERVER_ERROR,
statuses.HTTP_505_HTTP_VERSION_NOT_SUPPORTED
])
def test_remove_from_channel_failed_contributor(mock_staff_client, status_code):
"""
remove_from_channel should raise an exception if it fails to remove a user's contributor status,
depending on the status code
"""
channel_name = 'channel'
discussion_username = 'user'
response = mock_staff_client.channels.remove_contributor.return_value
response.ok = False
response.status_code = status_code
response.raise_for_status.side_effect = HTTPError
with pytest.raises(ContributorSyncException) as ex:
api.remove_from_channel(channel_name, discussion_username)
assert ex.value.args[0] == 'Unable to remove a contributor user from channel channel'
mock_staff_client.channels.remove_contributor.assert_called_once_with(channel_name, discussion_username)
mock_staff_client.channels.remove_subscriber.assert_called_once_with(channel_name, discussion_username)
@pytest.mark.parametrize("status_code", [
statuses.HTTP_400_BAD_REQUEST,
statuses.HTTP_401_UNAUTHORIZED,
statuses.HTTP_403_FORBIDDEN,
statuses.HTTP_409_CONFLICT,
statuses.HTTP_500_INTERNAL_SERVER_ERROR,
statuses.HTTP_505_HTTP_VERSION_NOT_SUPPORTED
])
def test_remove_from_channel_failed_subscriber(mock_staff_client, status_code):
"""
remove_from_channel should raise an exception if it fails to remove a user's subscriber status,
depending on the status code
"""
mock_staff_client.channels.remove_contributor.return_value.ok = True
response = mock_staff_client.channels.remove_subscriber.return_value
response.ok = False
response.status_code = status_code
response.raise_for_status.side_effect = HTTPError
channel_name = 'channel'
discussion_username = 'username'
with pytest.raises(SubscriberSyncException) as ex:
api.remove_from_channel(channel_name, discussion_username)
assert ex.value.args[0] == 'Unable to remove a subscriber username from channel channel'
mock_staff_client.channels.remove_subscriber.assert_called_once_with(channel_name, discussion_username)
assert mock_staff_client.channels.remove_contributor.called is False
def test_get_membership_ids_needing_sync(patched_users_api):
"""
Tests that get_membership_ids_needing_sync only returns ids for the correct records
"""
user1 = UserFactory.create()
user2 = UserFactory.create()
user3 = UserFactory.create()
with mute_signals(post_save):
user3.profile.delete()
member_channels = [ChannelFactory.create() for _ in range(4)]
nonmember_channels = [ChannelFactory.create() for _ in range(3)]
# these should show up in results
memberships_to_add = [
PercolateQueryMembership.objects.create(user=user1, query=channel.query, needs_update=True, is_member=True)
for channel in member_channels
]
memberships_to_remove = [
PercolateQueryMembership.objects.create(user=user1, query=channel.query, needs_update=True, is_member=False)
for channel in nonmember_channels
]
# these shouldn't show up in results
memberships_add_no_update = [
PercolateQueryMembership.objects.create(user=user2, query=channel.query, needs_update=False, is_member=True)
for channel in member_channels
]
memberships_remove_no_update = [
PercolateQueryMembership.objects.create(user=user2, query=channel.query, needs_update=False, is_member=False)
for channel in nonmember_channels
]
memberships_add_no_profile = [
PercolateQueryMembership.objects.create(user=user3, query=channel.query, needs_update=True, is_member=True)
for channel in member_channels
]
memberships_remove_no_profile = [
PercolateQueryMembership.objects.create(user=user3, query=channel.query, needs_update=True, is_member=False)
for channel in nonmember_channels
]
results = api.get_membership_ids_needing_sync()
for membership in memberships_to_add + memberships_to_remove:
assert membership.id in results
for membership in (
memberships_add_no_update + memberships_remove_no_update +
memberships_add_no_profile + memberships_remove_no_profile
):
assert membership.id not in results
def test_ordering_get_membership_ids_needing_sync(patched_users_api):
"""Test that get_membership_ids_needing_sync returns ordered list based on is_member (True before False)
and updated_on (most recent first)."""
users = [UserFactory.create() for _ in range(4)]
channel = ChannelFactory.create()
memberships_is_member_true = [
PercolateQueryMembership.objects.create(user=user, query=channel.query, needs_update=True, is_member=True)
for user in users[:2]
]
memberships_is_member_false = [
PercolateQueryMembership.objects.create(user=user, query=channel.query, needs_update=True, is_member=False)
for user in users[2:]
]
memberships_is_member_true.reverse()
memberships_is_member_false.reverse()
expected_order = []
for membership in memberships_is_member_true + memberships_is_member_false:
expected_order.append(membership.id)
results = api.get_membership_ids_needing_sync()
assert expected_order == list(results)
def test_sync_channel_memberships(mocker, patched_users_api):
"""
sync_user_to_channels should add or remove the user's membership from channels, not touching channels where
the user is a moderator of at least one program
"""
user = UserFactory.create()
# member here means the user matches the percolate query of the channel
member_channels = [ChannelFactory.create() for _ in range(4)]
nonmember_channels = [ChannelFactory.create() for _ in range(3)]
# first channel of members and first channel of nonmembers are skipped since user is staff
channels_to_add = member_channels[1:]
channels_to_remove = nonmember_channels[1:]
# User is a staff of some channels and not of others.
# Note that a staff user may or may not match the percolate query or a channel
staff_programs = [
ChannelProgramFactory.create(channel=member_channels[0]).program,
ChannelProgramFactory.create(channel=nonmember_channels[0]).program,
]
non_staff_programs = [
ChannelProgramFactory.create(channel=channel).program
for channel in (channels_to_add + channels_to_remove)
]
memberships_to_add = [
PercolateQueryMembership.objects.create(user=user, query=channel.query, needs_update=True, is_member=True)
for channel in member_channels
]
memberships_to_remove = [
PercolateQueryMembership.objects.create(user=user, query=channel.query, needs_update=True, is_member=False)
for channel in nonmember_channels
]
for program in staff_programs:
with mute_signals(post_save):
RoleFactory.create(program=program, user=user, role=Staff.ROLE_ID)
# Enroll the user in all programs. This isn't technically required but it's unrealistic to have a query
# matching a user if they are not enrolled in the program.
for program in staff_programs + non_staff_programs:
ProgramEnrollmentFactory.create(program=program, user=user)
# One percolate query per channel
assert PercolateQuery.objects.count() == len(member_channels) + len(nonmember_channels)
add_subscriber_stub = mocker.patch(
'discussions.api.add_subscriber_to_channel',
autospec=True,
)
add_contributor_stub = mocker.patch(
'discussions.api.add_contributor_to_channel',
autospec=True,
)
remove_subscriber_stub = mocker.patch(
'discussions.api.remove_subscriber_from_channel',
autospec=True,
)
remove_contributor_stub = mocker.patch(
'discussions.api.remove_contributor_from_channel',
autospec=True,
)
api.sync_channel_memberships(api.get_membership_ids_needing_sync())
created_stub, _ = patched_users_api
created_stub.assert_any_call(user.discussion_user)
assert add_subscriber_stub.call_count == len(channels_to_add)
assert add_contributor_stub.call_count == len(channels_to_add)
assert remove_subscriber_stub.call_count == len(channels_to_remove)
assert remove_contributor_stub.call_count == len(channels_to_remove)
for membership in memberships_to_add + memberships_to_remove:
membership.refresh_from_db()
assert membership.needs_update is False
for channel in channels_to_add:
add_subscriber_stub.assert_any_call(channel.name, user.discussion_user.username)
add_contributor_stub.assert_any_call(channel.name, user.discussion_user.username)
for channel in channels_to_remove:
remove_contributor_stub.assert_any_call(channel.name, user.discussion_user.username)
remove_subscriber_stub.assert_any_call(channel.name, user.discussion_user.username)
def test_sync_channel_memberships_api_error(mocker, patched_users_api):
"""
sync_user_to_channels should not fail | |
"""Functions to calculate electron probe"""
import numpy as np
import pyTEMlib.image_tools
import scipy.ndimage as ndimage
def make_gauss(size_x, size_y, width=1.0, x0=0.0, y0=0.0, intensity=1.0):
"""Make a Gaussian shaped probe """
size_x = size_x/2
size_y = size_y/2
x, y = np.mgrid[-size_x:size_x, -size_y:size_y]
g = np.exp(-((x-x0)**2 + (y-y0)**2) / 2.0 / width**2)
probe = g / g.sum() * intensity
return probe
def make_lorentz(size_x, size_y, gamma=1.0, x0=0., y0=0., intensity=1.):
"""Make a Lorentzian shaped probe """
size_x = np.floor(size_x / 2)
size_y = np.floor(size_y / 2)
x, y = np.mgrid[-size_x:size_x, -size_y:size_y]
g = gamma / (2*np.pi) / np.power(((x-x0)**2 + (y-y0)**2 + gamma**2), 1.5)
probe = g / g.sum() * intensity
return probe
def zero_loss_peak_weight():
# US100 zero_loss peak for Cc of aberrations
x = np.linspace(-0.5, 0.9, 29)
y = [0.0143, 0.0193, 0.0281, 0.0440, 0.0768, 0.1447, 0.2785, 0.4955, 0.7442, 0.9380, 1.0000, 0.9483, 0.8596,
0.7620, 0.6539, 0.5515, 0.4478, 0.3500, 0.2683, 0.1979, 0.1410, 0.1021, 0.0752, 0.0545, 0.0401, 0.0300,
0.0229, 0.0176, 0.0139]
return x, y
def make_chi(phi, theta, aberrations):
maximum_aberration_order = 5
chi = np.zeros(theta.shape)
for n in range(maximum_aberration_order + 1): # First Sum up to fifth order
term_first_sum = np.power(theta, n+1) / (n + 1) # term in first sum
second_sum = np.zeros(theta.shape) # second Sum intialized with zeros
for m in range((n + 1) % 2, n + 2, 2):
if m > 0:
if f'C{n}{m}a' not in aberrations: # Set non existent aberrations coefficient to zero
aberrations[f'C{n}{m}a'] = 0.
if f'C{n}{m}b' not in aberrations:
aberrations[f'C{n}{m}b'] = 0.
# term in second sum
second_sum = second_sum + aberrations[f'C{n}{m}a'] * np.cos(m * phi) + aberrations[
f'C{n}{m}b'] * np.sin(m * phi)
else:
if f'C{n}{m}' not in aberrations: # Set non existent aberrations coefficient to zero
aberrations[f'C{n}{m}'] = 0.
# term in second sum
second_sum = second_sum + aberrations[f'C{n}{m}']
chi = chi + term_first_sum * second_sum * 2 * np.pi / aberrations['wavelength']
return chi
def get_chi(ab, size_x, size_y, verbose=False):
""" Get aberration function chi without defocus spread
# Internally reciprocal lattice vectors in 1/nm or rad.
# All calculations of chi in angles.
# All aberration coefficients in nm
"""
aperture_angle = ab['convergence_angle'] / 1000.0 # in rad
wavelength = pyTEMlib.image_tools.get_wavelength(ab['acceleration_voltage'])
if verbose:
print(f"Acceleration voltage {ab['acceleration_voltage'] / 1000:}kV => wavelength {wavelength * 1000.:.2f}pm")
ab['wavelength'] = wavelength
# Reciprocal plane in 1/nm
dk = 1 / ab['FOV']
k_x = np.array(dk * (-size_x / 2. + np.arange(size_x)))
k_y = np.array(dk * (-size_y / 2. + np.arange(size_y)))
t_x_v, t_y_v = np.meshgrid(k_x, k_y)
# define reciprocal plane in angles
phi = np.arctan2(t_x_v, t_y_v)
theta = np.arctan2(np.sqrt(t_x_v ** 2 + t_y_v ** 2), 1 / wavelength)
# calculate chi
chi = make_chi(phi, theta, ab)
# Aperture function
mask = theta >= aperture_angle
aperture = np.ones((size_x, size_y), dtype=float)
aperture[mask] = 0.
return chi, aperture
def print_abberrations(ab):
from IPython.display import HTML, display
output = '<html><body>'
output += f"Abberrations [nm] for acceleration voltage: {ab['acceleration_voltage']/1e3:.0f} kV"
output += '<table>'
output += f"<tr><td> C10 </td><td> {ab['C10']:.1f} </tr>"
output += f"<tr><td> C12a </td><td> {ab['C12a']:20.1f} <td> C12b </td><td> {ab['C12b']:20.1f} </tr>"
output += f"<tr><td> C21a </td><td> {ab['C21a']:.1f} <td> C21b </td><td> {ab['C21b']:.1f} "
output += f" <td> C23a </td><td> {ab['C23a']:.1f} <td> C23b </td><td> {ab['C23b']:.1f} </tr>"
output += f"<tr><td> C30 </td><td> {ab['C30']:.1f} </tr>"
output += f"<tr><td> C32a </td><td> {ab['C32a']:20.1f} <td> C32b </td><td> {ab['C32b']:20.1f} "
output += f"<td> C34a </td><td> {ab['C34a']:20.1f} <td> C34b </td><td> {ab['C34b']:20.1f} </tr>"
output += f"<tr><td> C41a </td><td> {ab['C41a']:.3g} <td> C41b </td><td> {ab['C41b']:.3g} "
output += f" <td> C43a </td><td> {ab['C43a']:.3g} <td> C43b </td><td> {ab['C41b']:.3g} "
output += f" <td> C45a </td><td> {ab['C45a']:.3g} <td> C45b </td><td> {ab['C45b']:.3g} </tr>"
output += f"<tr><td> C50 </td><td> {ab['C50']:.3g} </tr>"
output += f"<tr><td> C52a </td><td> {ab['C52a']:20.1f} <td> C52b </td><td> {ab['C52b']:20.1f} "
output += f"<td> C54a </td><td> {ab['C54a']:20.1f} <td> C54b </td><td> {ab['C54b']:20.1f} "
output += f"<td> C56a </td><td> {ab['C56a']:20.1f} <td> C56b </td><td> {ab['C56b']:20.1f} </tr>"
output += f"<tr><td> Cc </td><td> {ab['Cc']:.3g} </tr>"
output += '</table></body></html>'
display(HTML(output))
def get_ronchigram(size, ab, scale='mrad'):
""" Get Ronchigram
"""
size_x = size_y = size
chi, A_k = get_chi(ab, size_x, size_y)
v_noise = np.random.rand(size_x, size_y)
smoothing = 5
phi_r = ndimage.gaussian_filter(v_noise, sigma=(smoothing, smoothing), order=0)
sigma = 6 # 6 for carbon and thin
q_r = np.exp(-1j * sigma * phi_r)
# q_r = 1-phi_r * sigma
T_k = A_k * (np.exp(-1j * chi))
t_r = (np.fft.ifft2(np.fft.fftshift(T_k)))
psi_k = np.fft.fftshift(np.fft.fft2(q_r * t_r))
ronchigram = np.absolute(psi_k * np.conjugate(psi_k))
fov_reciprocal = 1 / ab['FOV'] * size_x / 2
if scale == '1/nm':
extent = [-fov_reciprocal, fov_reciprocal, -fov_reciprocal, fov_reciprocal]
ylabel = 'reciprocal distance [1/nm]'
else:
fov_mrad = fov_reciprocal * ab['wavelength'] * 1000
extent = [-fov_mrad, fov_mrad, -fov_mrad, fov_mrad]
ylabel = 'reciprocal distance [mrad]'
ab['ronchi_extent'] = extent
ab['ronchi_label'] = ylabel
return ronchigram
def get_chi_2(ab, u, v):
chi1 = ab['C10'] * (u ** 2 + v ** 2) / 2 \
+ ab['C12a'] * (u ** 2 - v ** 2) / 2 \
- ab['C12b'] * u * v
chi2 = ab['C21a'] * (u ** 3 + u * v ** 2) / 3 \
- ab['C21b'] * (u ** 2 * v + v ** 3) / 3 \
+ ab['C23a'] * (u ** 3 - 3 * u * v ** 2) / 3 \
- ab['C23b'] * (3 * u ** 2 * v - v ** 3) / 3
chi3 = ab['C30'] * (u ** 4 + 2 * u ** 2 * v ** 2 + v ** 4) / 4 \
+ ab['C32a'] * (u ** 4 - v ** 4) / 4 \
- ab['C32b'] * (u ** 3 * v + u * v ** 3) / 2 \
+ ab['C34a'] * (u ** 4 - 6 * u ** 2 * v ** 2 + v ** 4) / 4 \
- ab['C34b'] * (4 * u ** 3 * v - 4 * u * v ** 3) / 4
chi4 = ab['C41a'] * (u ** 5 + 2 * u ** 3 * v ** 2 + u * v ** 4) / 5 \
- ab['C41b'] * (u ** 4 * v + 2 * u ** 2 * v ** 3 + v ** 5) / 5 \
+ ab['C43a'] * (u ** 5 - 2 * u ** 3 * v ** 2 - 3 * u * v ** 4) / 5 \
- ab['C43b'] * (3 * u ** 4 * v + 2 * u ** 2 * v ** 3 - v ** 5) / 5 \
+ ab['C45a'] * (u ** 5 - 10 * u ** 3 * v ** 2 + 5 * u * v ** 4) / 5 \
- ab['C45b'] * (5 * u ** 4 * v - 10 * u ** 2 * v ** 3 + v ** 5) / 5
chi5 = ab['C50'] * (u ** 6 + 3 * u ** 4 * v ** 2 + 3 * u ** 2 * v ** 4 + v ** 6) / 6 \
+ ab['C52a'] * (u ** 6 + u ** 4 * v ** 2 - u ** 2 * v ** 4 - v ** 6) / 6 \
- ab['C52b'] * (2 * u ** 5 * v + 4 * u ** 3 * v ** 3 + 2 * u * v ** 5) / 6 \
+ ab['C54a'] * (u ** 6 - 5 * u ** 4 * v ** 2 - 5 * u ** 2 * v ** 4 + v ** 6) / 6 \
- ab['C54b'] * (4 * u ** 5 * v - 4 * u * v ** 5) / 6 \
+ ab['C56a'] * (u ** 6 - 15 * u ** 4 * v ** 2 + 15 * u ** 2 * v ** 4 - v ** 6) / 6 \
- ab['C56b'] * (6 | |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
__all__ = ['TemplateArgs', 'Template']
@pulumi.input_type
class TemplateArgs:
def __init__(__self__, *,
code: pulumi.Input[str],
cloud_config: Optional[pulumi.Input[str]] = None,
default_username: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
image_id: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
short_description: Optional[pulumi.Input[str]] = None,
volume_id: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a Template resource.
:param pulumi.Input[str] code: This is a unqiue, alphanumerical, short, human readable code for the template.
:param pulumi.Input[str] cloud_config: Commonly referred to as 'user-data', this is a customisation script that is run after
the instance is first booted. We recommend using cloud-config as it's a great distribution-agnostic
way of configuring cloud servers. If you put `$INITIAL_USER` in your script, this will automatically
be replaced by the initial user chosen when creating the instance, `$INITIAL_PASSWORD` will be
replaced with the random password generated by the system, `$HOSTNAME` is the fully qualified
domain name of the instance and `$SSH_KEY` will be the content of the SSH public key.
(this is technically optional, but you won't really be able to use instances without it -
see our learn guide on templates for more information).
:param pulumi.Input[str] default_username: The default username to suggest that the user creates
:param pulumi.Input[str] description: A multi-line description of the template, in Markdown format
:param pulumi.Input[str] image_id: This is the Image ID of any default template or the ID of another template
either owned by you or global (optional; but must be specified if no volume_id is specified).
:param pulumi.Input[str] name: This is a short human readable name for the template
:param pulumi.Input[str] short_description: A one line description of the template
:param pulumi.Input[str] volume_id: This is the ID of a bootable volume, either owned by you or global
(optional; but must be specified if no image_id is specified)
"""
pulumi.set(__self__, "code", code)
if cloud_config is not None:
pulumi.set(__self__, "cloud_config", cloud_config)
if default_username is not None:
pulumi.set(__self__, "default_username", default_username)
if description is not None:
pulumi.set(__self__, "description", description)
if image_id is not None:
pulumi.set(__self__, "image_id", image_id)
if name is not None:
pulumi.set(__self__, "name", name)
if short_description is not None:
pulumi.set(__self__, "short_description", short_description)
if volume_id is not None:
pulumi.set(__self__, "volume_id", volume_id)
@property
@pulumi.getter
def code(self) -> pulumi.Input[str]:
"""
This is a unqiue, alphanumerical, short, human readable code for the template.
"""
return pulumi.get(self, "code")
@code.setter
def code(self, value: pulumi.Input[str]):
pulumi.set(self, "code", value)
@property
@pulumi.getter(name="cloudConfig")
def cloud_config(self) -> Optional[pulumi.Input[str]]:
"""
Commonly referred to as 'user-data', this is a customisation script that is run after
the instance is first booted. We recommend using cloud-config as it's a great distribution-agnostic
way of configuring cloud servers. If you put `$INITIAL_USER` in your script, this will automatically
be replaced by the initial user chosen when creating the instance, `$INITIAL_PASSWORD` will be
replaced with the random password generated by the system, `$HOSTNAME` is the fully qualified
domain name of the instance and `$SSH_KEY` will be the content of the SSH public key.
(this is technically optional, but you won't really be able to use instances without it -
see our learn guide on templates for more information).
"""
return pulumi.get(self, "cloud_config")
@cloud_config.setter
def cloud_config(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "cloud_config", value)
@property
@pulumi.getter(name="defaultUsername")
def default_username(self) -> Optional[pulumi.Input[str]]:
"""
The default username to suggest that the user creates
"""
return pulumi.get(self, "default_username")
@default_username.setter
def default_username(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "default_username", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
A multi-line description of the template, in Markdown format
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="imageId")
def image_id(self) -> Optional[pulumi.Input[str]]:
"""
This is the Image ID of any default template or the ID of another template
either owned by you or global (optional; but must be specified if no volume_id is specified).
"""
return pulumi.get(self, "image_id")
@image_id.setter
def image_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "image_id", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
This is a short human readable name for the template
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="shortDescription")
def short_description(self) -> Optional[pulumi.Input[str]]:
"""
A one line description of the template
"""
return pulumi.get(self, "short_description")
@short_description.setter
def short_description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "short_description", value)
@property
@pulumi.getter(name="volumeId")
def volume_id(self) -> Optional[pulumi.Input[str]]:
"""
This is the ID of a bootable volume, either owned by you or global
(optional; but must be specified if no image_id is specified)
"""
return pulumi.get(self, "volume_id")
@volume_id.setter
def volume_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "volume_id", value)
@pulumi.input_type
class _TemplateState:
def __init__(__self__, *,
cloud_config: Optional[pulumi.Input[str]] = None,
code: Optional[pulumi.Input[str]] = None,
default_username: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
image_id: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
short_description: Optional[pulumi.Input[str]] = None,
volume_id: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering Template resources.
:param pulumi.Input[str] cloud_config: Commonly referred to as 'user-data', this is a customisation script that is run after
the instance is first booted. We recommend using cloud-config as it's a great distribution-agnostic
way of configuring cloud servers. If you put `$INITIAL_USER` in your script, this will automatically
be replaced by the initial user chosen when creating the instance, `$INITIAL_PASSWORD` will be
replaced with the random password generated by the system, `$HOSTNAME` is the fully qualified
domain name of the instance and `$SSH_KEY` will be the content of the SSH public key.
(this is technically optional, but you won't really be able to use instances without it -
see our learn guide on templates for more information).
:param pulumi.Input[str] code: This is a unqiue, alphanumerical, short, human readable code for the template.
:param pulumi.Input[str] default_username: The default username to suggest that the user creates
:param pulumi.Input[str] description: A multi-line description of the template, in Markdown format
:param pulumi.Input[str] image_id: This is the Image ID of any default template or the ID of another template
either owned by you or global (optional; but must be specified if no volume_id is specified).
:param pulumi.Input[str] name: This is a short human readable name for the template
:param pulumi.Input[str] short_description: A one line description of the template
:param pulumi.Input[str] volume_id: This is the ID of a bootable volume, either owned by you or global
(optional; but must be specified if no image_id is specified)
"""
if cloud_config is not None:
pulumi.set(__self__, "cloud_config", cloud_config)
if code is not None:
pulumi.set(__self__, "code", code)
if default_username is not None:
pulumi.set(__self__, "default_username", default_username)
if description is not None:
pulumi.set(__self__, "description", description)
if image_id is not None:
pulumi.set(__self__, "image_id", image_id)
if name is not None:
pulumi.set(__self__, "name", name)
if short_description is not None:
pulumi.set(__self__, "short_description", short_description)
if volume_id is not None:
pulumi.set(__self__, "volume_id", volume_id)
@property
@pulumi.getter(name="cloudConfig")
def cloud_config(self) -> Optional[pulumi.Input[str]]:
"""
Commonly referred to as 'user-data', this is a customisation script that is run after
the instance is first booted. We recommend using cloud-config as it's a great distribution-agnostic
way of configuring cloud servers. If you put `$INITIAL_USER` in your script, this will automatically
be replaced by the initial user chosen when creating the instance, `$INITIAL_PASSWORD` will be
replaced with the random password generated by the system, `$HOSTNAME` is the fully qualified
domain name of the instance and `$SSH_KEY` will be the content of the SSH public key.
(this is technically optional, but you won't really be able to use instances without it -
see our learn guide on templates for more information).
"""
return pulumi.get(self, "cloud_config")
@cloud_config.setter
def cloud_config(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "cloud_config", value)
@property
@pulumi.getter
def code(self) -> Optional[pulumi.Input[str]]:
"""
This is a unqiue, alphanumerical, short, human readable code for the template.
"""
return pulumi.get(self, "code")
@code.setter
def code(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "code", value)
@property
@pulumi.getter(name="defaultUsername")
def default_username(self) -> Optional[pulumi.Input[str]]:
"""
The default username to | |
import re
from enum import IntEnum
from typing import Tuple, Optional
from datetime import datetime, timedelta, timezone
from flask import redirect, url_for, request, current_app, g, make_response
from flask.views import MethodView
from flask_smorest import Blueprint, abort
from swpt_lib.utils import u64_to_i64
from swpt_lib.swpt_uris import parse_account_uri
from swpt_debtors.schemas import DebtorSchema, TransferSchema, \
TransfersListSchema, TransferCreationRequestSchema, \
TransfersList, TransferCancelationRequestSchema, DebtorReservationRequestSchema, \
DebtorReservationSchema, DebtorsListSchema, ObjectReferencesPageSchema, \
DebtorActivationRequestSchema, DebtorDeactivationRequestSchema, DebtorConfigSchema
from swpt_debtors.models import MIN_INT64
from swpt_debtors import specs
from swpt_debtors import procedures
READ_ONLY_METHODS = ['GET', 'HEAD', 'OPTIONS']
class UserType(IntEnum):
SUPERUSER = 1
SUPERVISOR = 2
DEBTOR = 3
class UserIdPatternMatcher:
PATTERN_CONFIG_KEYS = {
UserType.SUPERUSER: 'APP_SUPERUSER_SUBJECT_REGEX',
UserType.SUPERVISOR: 'APP_SUPERVISOR_SUBJECT_REGEX',
UserType.DEBTOR: 'APP_DEBTOR_SUBJECT_REGEX',
}
def __init__(self):
self._regex_patterns = {}
def get_pattern(self, user_type: UserType) -> re.Pattern:
pattern_config_key = self.PATTERN_CONFIG_KEYS[user_type]
regex = current_app.config[pattern_config_key]
regex_patterns = self._regex_patterns
regex_pattern = regex_patterns.get(regex)
if regex_pattern is None:
regex_pattern = regex_patterns[regex] = re.compile(regex)
return regex_pattern
def match(self, user_id: str) -> Tuple[UserType, Optional[int]]:
for user_type in UserType:
pattern = self.get_pattern(user_type)
m = pattern.match(user_id)
if m:
debtor_id = u64_to_i64(int(m.group(1))) if user_type == UserType.DEBTOR else None
return user_type, debtor_id
abort(403)
user_id_pattern_matcher = UserIdPatternMatcher()
def parse_swpt_user_id_header() -> Tuple[UserType, Optional[int]]:
user_id = request.headers.get('X-Swpt-User-Id')
if user_id is None:
user_type = UserType.SUPERUSER
debtor_id = None
else:
user_type, debtor_id = user_id_pattern_matcher.match(user_id)
g.superuser = user_type == UserType.SUPERUSER
return user_type, debtor_id
def ensure_admin():
user_type, _ = parse_swpt_user_id_header()
if user_type == UserType.DEBTOR:
abort(403)
def ensure_debtor_permissions():
# NOTE: Debtors can access and modify only their own resources.
# Supervisors can activate new debtors, and have read-only access
# to all debtors's resources. Superusers are allowed everything.
user_type, debtor_id = parse_swpt_user_id_header()
if user_type == UserType.DEBTOR and debtor_id != request.view_args.get('debtorId', debtor_id):
abort(403)
if user_type == UserType.SUPERVISOR and request.method not in READ_ONLY_METHODS:
abort(403)
g.debtor_id = debtor_id
def calc_reservation_deadline(created_at: datetime) -> datetime:
return created_at + timedelta(days=current_app.config['APP_INACTIVE_DEBTOR_RETENTION_DAYS'])
def calc_checkup_datetime(debtor_id: int, initiated_at: datetime) -> datetime:
current_ts = datetime.now(tz=timezone.utc)
current_delay = current_ts - initiated_at
average_delay = timedelta(seconds=current_app.config['APP_TRANSFERS_FINALIZATION_AVG_SECONDS'])
return current_ts + max(current_delay, average_delay)
context = {
'Debtor': 'debtors.DebtorEndpoint',
'DebtorConfig': 'debtors.DebtorConfigEndpoint',
'TransfersList': 'transfers.TransfersListEndpoint',
'Transfer': 'transfers.TransferEndpoint',
'SaveDocument': 'documents.SaveDocumentEndpoint',
'RedirectToDebtorsInfo': 'documents.RedirectToDebtorsInfoEndpoint',
'calc_reservation_deadline': calc_reservation_deadline,
'calc_checkup_datetime': calc_checkup_datetime,
}
admin_api = Blueprint(
'admin',
__name__,
url_prefix='/debtors',
description="View debtors list, create new debtors.",
)
admin_api.before_request(ensure_admin)
@admin_api.route('/.debtor-reserve')
class RandomDebtorReserveEndpoint(MethodView):
@admin_api.arguments(DebtorReservationRequestSchema)
@admin_api.response(DebtorReservationSchema(context=context))
@admin_api.doc(operationId='reserveRandomDebtor',
security=specs.SCOPE_ACTIVATE,
responses={409: specs.CONFLICTING_DEBTOR})
def post(self, debtor_reservation_request):
"""Reserve an auto-generated debtor ID.
**Note:** The reserved debtor ID will be a random valid
debtor ID.
"""
for _ in range(100):
debtor_id = procedures.generate_new_debtor_id()
try:
debtor = procedures.reserve_debtor(debtor_id, verify_correctness=False)
break
except procedures.DebtorExists: # pragma: no cover
pass
else: # pragma: no cover
abort(500, message='Can not generate a valid debtor ID.')
return debtor
@admin_api.route('/.list')
class DebtorsListEndpoint(MethodView):
@admin_api.response(DebtorsListSchema, example=specs.DEBTORS_LIST_EXAMPLE)
@admin_api.doc(operationId='getDebtorsList', security=specs.SCOPE_ACCESS_READONLY)
def get(self):
"""Return a paginated list of links to all activated debtors."""
return {
'uri': url_for('admin.DebtorsListEndpoint'),
'items_type': 'ObjectReference',
'first': url_for('admin.DebtorEnumerateEndpoint', debtorId=MIN_INT64),
}
@admin_api.route('/<i64:debtorId>/enumerate', parameters=[specs.DEBTOR_ID])
class DebtorEnumerateEndpoint(MethodView):
@admin_api.response(ObjectReferencesPageSchema(context=context), example=specs.DEBTOR_LINKS_EXAMPLE)
@admin_api.doc(operationId='getDebtorsPage', security=specs.SCOPE_ACCESS_READONLY)
def get(self, debtorId):
"""Return a collection of activated debtors.
The returned object will be a fragment (a page) of a paginated
list. The paginated list contains references to all activated
debtors on the server. The returned fragment, and all the
subsequent fragments, will be sorted by debtor ID, starting
from the `debtorID` specified in the path. The sorting order
is implementation-specific.
**Note:** To obtain references to all activated debtors, the
client should start with the debtor ID that precedes all other
IDs in the sorting order.
"""
n = int(current_app.config['APP_DEBTORS_PER_PAGE'])
debtor_ids, next_debtor_id = procedures.get_debtor_ids(start_from=debtorId, count=n)
debtor_uris = [{'uri': url_for('debtors.DebtorEndpoint', debtorId=debtor_id)} for debtor_id in debtor_ids]
if next_debtor_id is None:
# The last page does not have a 'next' link.
return {
'uri': request.full_path,
'items': debtor_uris,
}
return {
'uri': request.full_path,
'items': debtor_uris,
'next': url_for('admin.DebtorEnumerateEndpoint', debtorId=next_debtor_id),
}
@admin_api.route('/<i64:debtorId>/reserve', parameters=[specs.DEBTOR_ID])
class DebtorReserveEndpoint(MethodView):
@admin_api.arguments(DebtorReservationRequestSchema)
@admin_api.response(DebtorReservationSchema(context=context))
@admin_api.doc(operationId='reserveDebtor',
security=specs.SCOPE_ACTIVATE,
responses={409: specs.CONFLICTING_DEBTOR})
def post(self, debtor_reservation_request, debtorId):
"""Try to reserve a specific debtor ID.
**Note:** The reserved debtor ID will be the same as the
`debtorId` specified in the path.
---
Will fail if the debtor already exists.
"""
try:
debtor = procedures.reserve_debtor(debtorId)
except procedures.DebtorExists:
abort(409)
except procedures.InvalidDebtor: # pragma: no cover
abort(500, message='The node is not responsible for this debtor.')
return debtor
@admin_api.route('/<i64:debtorId>/activate', parameters=[specs.DEBTOR_ID])
class DebtorActivateEndpoint(MethodView):
@admin_api.arguments(DebtorActivationRequestSchema)
@admin_api.response(DebtorSchema(context=context))
@admin_api.doc(operationId='activateDebtor',
security=specs.SCOPE_ACTIVATE,
responses={409: specs.CONFLICTING_DEBTOR})
def post(self, debtor_activation_request, debtorId):
"""Activate a debtor."""
reservation_id = debtor_activation_request.get('optional_reservation_id')
try:
if reservation_id is None:
reservation_id = procedures.reserve_debtor(debtorId).reservation_id
assert reservation_id is not None
debtor = procedures.activate_debtor(debtorId, reservation_id)
except procedures.DebtorExists:
abort(409)
except procedures.InvalidReservationId:
abort(422, errors={'json': {'reservationId': ['Invalid ID.']}})
except procedures.InvalidDebtor: # pragma: no cover
abort(500, message='The node is not responsible for this debtor.')
return debtor
@admin_api.route('/<i64:debtorId>/deactivate', parameters=[specs.DEBTOR_ID])
class DebtorDeactivateEndpoint(MethodView):
@admin_api.arguments(DebtorDeactivationRequestSchema)
@admin_api.response(code=204)
@admin_api.doc(operationId='deactivateDebtor', security=specs.SCOPE_DEACTIVATE)
def post(self, debtor_deactivation_request, debtorId):
"""Deactivate a debtor."""
if not g.superuser:
abort(403)
procedures.deactivate_debtor(debtorId)
debtors_api = Blueprint(
'debtors',
__name__,
url_prefix='/debtors',
description="View public information about debtors.",
)
debtors_api.before_request(ensure_debtor_permissions)
@debtors_api.route('/.debtor')
class RedirectToDebtorEndpoint(MethodView):
@debtors_api.response(code=204)
@debtors_api.doc(operationId='redirectToDebtor',
security=specs.SCOPE_ACCESS_READONLY,
responses={204: specs.DEBTOR_DOES_NOT_EXIST,
303: specs.DEBTOR_EXISTS})
def get(self):
"""Redirect to the debtor's record."""
debtorId = g.debtor_id
if debtorId is not None:
location = url_for('debtors.DebtorEndpoint', _external=True, debtorId=debtorId)
return redirect(location, code=303)
@debtors_api.route('/<i64:debtorId>/', parameters=[specs.DEBTOR_ID])
class DebtorEndpoint(MethodView):
@debtors_api.response(DebtorSchema(context=context))
@debtors_api.doc(operationId='getDebtor', security=specs.SCOPE_ACCESS_READONLY)
def get(self, debtorId):
"""Return debtor."""
return procedures.get_active_debtor(debtorId) or abort(403)
@debtors_api.route('/<i64:debtorId>/config', parameters=[specs.DEBTOR_ID])
class DebtorConfigEndpoint(MethodView):
@debtors_api.response(DebtorConfigSchema(context=context))
@debtors_api.doc(operationId='getDebtorConfig', security=specs.SCOPE_ACCESS_READONLY)
def get(self, debtorId):
"""Return debtors's configuration."""
return procedures.get_active_debtor(debtorId) or abort(404)
@debtors_api.arguments(DebtorConfigSchema)
@debtors_api.response(DebtorConfigSchema(context=context))
@debtors_api.doc(operationId='updateDebtorConfig',
security=specs.SCOPE_ACCESS_MODIFY,
responses={403: specs.FORBIDDEN_OPERATION,
409: specs.UPDATE_CONFLICT})
def patch(self, debtor_config, debtorId):
"""Update debtor's configuration."""
try:
config = procedures.update_debtor_config(
debtor_id=debtorId,
config_data=debtor_config['config_data'],
latest_update_id=debtor_config['latest_update_id'],
max_actions_per_month=current_app.config['APP_MAX_TRANSFERS_PER_MONTH'],
)
except procedures.TooManyManagementActions:
abort(403)
except procedures.DebtorDoesNotExist:
abort(404)
except procedures.UpdateConflict:
abort(409, errors={'json': {'latestUpdateId': ['Incorrect value.']}})
return config
transfers_api = Blueprint(
'transfers',
__name__,
url_prefix='/debtors',
description="Make credit-issuing transfers.",
)
transfers_api.before_request(ensure_debtor_permissions)
@transfers_api.route('/<i64:debtorId>/transfers/', parameters=[specs.DEBTOR_ID])
class TransfersListEndpoint(MethodView):
@transfers_api.response(TransfersListSchema(context=context))
@transfers_api.doc(operationId='getTransfersList', security=specs.SCOPE_ACCESS_READONLY)
def get(self, debtorId):
"""Return the debtor's list of initiated transfers."""
try:
transfer_uuids = procedures.get_debtor_transfer_uuids(debtorId)
except procedures.DebtorDoesNotExist:
abort(404)
return TransfersList(debtor_id=debtorId, items=transfer_uuids)
@transfers_api.arguments(TransferCreationRequestSchema)
@transfers_api.response(TransferSchema(context=context), code=201, headers=specs.LOCATION_HEADER)
@transfers_api.doc(operationId='createTransfer',
security=specs.SCOPE_ACCESS_MODIFY,
responses={303: specs.TRANSFER_EXISTS,
403: specs.FORBIDDEN_OPERATION,
409: specs.TRANSFER_CONFLICT})
def post(self, transfer_creation_request, debtorId):
"""Initiate a credit-issuing transfer."""
# Verify the recipient.
recipient_uri = transfer_creation_request['recipient_identity']['uri']
try:
recipient_debtor_id, recipient = parse_account_uri(recipient_uri)
except ValueError:
abort(422, errors={'json': {'recipient': {'uri': ['The URI can not be recognized.']}}})
if recipient_debtor_id != debtorId:
abort(422, errors={'json': {'recipient': {'uri': ['Invalid recipient account.']}}})
uuid = transfer_creation_request['transfer_uuid']
location = url_for('transfers.TransferEndpoint', _external=True, debtorId=debtorId, transferUuid=uuid)
try:
transfer = procedures.initiate_running_transfer(
debtor_id=debtorId,
transfer_uuid=uuid,
amount=transfer_creation_request['amount'],
recipient_uri=recipient_uri,
recipient=recipient,
transfer_note_format=transfer_creation_request['transfer_note_format'],
transfer_note=transfer_creation_request['transfer_note'],
max_actions_per_month=current_app.config['APP_MAX_TRANSFERS_PER_MONTH'],
)
except (procedures.TooManyManagementActions, procedures.TooManyRunningTransfers):
abort(403)
except procedures.DebtorDoesNotExist:
abort(404)
except procedures.TransfersConflict:
abort(409)
except procedures.TransferExists:
return redirect(location, code=303)
return transfer, {'Location': location}
@transfers_api.route('/<i64:debtorId>/transfers/<uuid:transferUuid>', parameters=[specs.DEBTOR_ID, specs.TRANSFER_UUID])
class TransferEndpoint(MethodView):
@transfers_api.response(TransferSchema(context=context))
@transfers_api.doc(operationId='getTransfer', security=specs.SCOPE_ACCESS_READONLY)
def get(self, debtorId, transferUuid):
"""Return a transfer."""
return procedures.get_running_transfer(debtorId, transferUuid) or abort(404)
@transfers_api.arguments(TransferCancelationRequestSchema)
@transfers_api.response(TransferSchema(context=context))
@transfers_api.doc(operationId='cancelTransfer',
security=specs.SCOPE_ACCESS_MODIFY,
responses={403: specs.TRANSFER_CANCELLATION_FAILURE})
def post(self, cancel_transfer_request, debtorId, transferUuid):
"""Try to cancel a transfer.
**Note:** This is an idempotent operation.
"""
try:
transfer = procedures.cancel_running_transfer(debtorId, transferUuid)
except procedures.ForbiddenTransferCancellation: # pragma: no cover
abort(403)
except procedures.TransferDoesNotExist:
abort(404)
return transfer
@transfers_api.response(code=204)
@transfers_api.doc(operationId='deleteTransfer', security=specs.SCOPE_ACCESS_MODIFY)
def delete(self, debtorId, transferUuid):
"""Delete a transfer.
Before deleting a transfer, client implementations should
ensure that at least 5 days (120 hours) have passed since the
transfer was initiated (see the `initiatedAt` field). Also, it
is recommended successful transfers to stay on the server at
least a few weeks after their finalization.
Note that deleting a running (not finalized) transfer does not
cancel it. To ensure that a running transfer has not been
successful, it must be canceled before deletion.
"""
try:
procedures.delete_running_transfer(debtorId, transferUuid)
except procedures.TransferDoesNotExist:
pass
documents_api = Blueprint(
'documents',
__name__,
url_prefix='/debtors',
description="Maintains an ever-growing set of public documents.",
)
@documents_api.route('/<i64:debtorId>/public', parameters=[specs.DEBTOR_ID])
class RedirectToDebtorsInfoEndpoint(MethodView):
@documents_api.response(code=302)
@documents_api.doc(operationId='redirectToDebtorsInfo', responses={302: specs.DEBTOR_INFO_EXISTS})
def get(self, debtorId):
"""Redirect to the debtor's public info document.
The user will be redirected to the info URL specified in the
debtor's configuration. If no URL is specified in the
configuration, a `404` error code will be returned.
"""
debtor = procedures.get_active_debtor(debtorId) or abort(404)
location = debtor.debtor_info_iri or abort(404)
response = redirect(location, code=302)
response.headers['Cache-Control'] = 'max-age=86400'
return response
@documents_api.route('/<i64:debtorId>/documents/', parameters=[specs.DEBTOR_ID])
class SaveDocumentEndpoint(MethodView):
@documents_api.response(code=201, headers=specs.LOCATION_HEADER)
@documents_api.doc(operationId='saveDocument',
security=specs.SCOPE_ACCESS_MODIFY,
requestBody=specs.DOCUMENT_CONTENT,
responses={201: specs.DOCUMENT_CONTENT,
403: specs.FORBIDDEN_OPERATION,
413: specs.DOCUMENT_IS_TOO_BIG})
def post(self, debtorId):
"""Save a document.
The body of the request should contain the document to be
saved. The document can be of any type, as long as the type is
correctly specified by the `Content-Type` header in the
request.
"""
ensure_debtor_permissions()
if request.content_length > current_app.config['APP_DOCUMENT_MAX_CONTENT_LENGTH']:
abort(413)
content_type = request.content_type or 'text/html; charset=utf-8'
content = request.get_data() or b''
try:
document = procedures.save_document(
debtor_id=debtorId,
content_type=content_type,
content=content,
max_saves_per_year=current_app.config['APP_DOCUMENT_MAX_SAVES_PER_YEAR'],
)
except procedures.TooManySavedDocuments:
abort(403)
except procedures.DebtorDoesNotExist:
abort(404)
location = url_for(
'documents.DocumentEndpoint',
_external=True,
debtorId=debtorId,
documentId=document.document_id,
)
return make_response(content, 201, {'Content-Type': content_type, 'Location': location})
@documents_api.route('/<i64:debtorId>/documents/<i64:documentId>/public', parameters=[specs.DEBTOR_ID, specs.DOC_ID])
class DocumentEndpoint(MethodView):
@documents_api.response(code=200)
@documents_api.doc(operationId='getDocument', responses={200: specs.DOCUMENT_CONTENT})
def get(self, debtorId, documentId):
"""Return a saved document.
The returned document can be of any type. The | |
# -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.base.exchange import Exchange
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import BadRequest
from ccxt.base.errors import BadSymbol
from ccxt.base.errors import InvalidOrder
from ccxt.base.decimal_to_precision import SIGNIFICANT_DIGITS
import json
import sys
from datetime import datetime
class tprexchange(Exchange):
def describe(self):
return self.deep_extend(super(tprexchange, self).describe(), {
'id': 'tprexchange',
'name': 'TPR Exchange',
# 'countries': ['US'],
# 'rateLimit': 500,
'version': 'v1',
'certified': False,
'has': {
'loadMarkets': True,
'cancelAllOrders': False,
'cancelOrder': True,
'cancelOrders': False,
'CORS': False,
'createDepositAddress': False,
'createLimitOrder': False,
'createMarketOrder': False,
'createOrder': True,
'deposit': False,
'editOrder': 'emulated',
'fetchBalance': True,
'fetchBidsAsks': False,
'fetchClosedOrders': True,
'fetchCurrencies': True,
'fetchDepositAddress': False,
'fetchDeposits': False,
'fetchFundingFees': False,
'fetchL2OrderBook': False,
'fetchLedger': False,
'fetchMarkets': True,
'fetchMyTrades': True,
'fetchOHLCV': True,
'fetchOpenOrders': True,
'fetchOrder': True,
'fetchOrderBook': True,
'fetchOrderBooks': False,
'fetchOrders': True,
'fetchOrderTrades': False,
'fetchStatus': True,
'fetchTicker': True,
'fetchTickers': True,
'fetchTime': False,
'fetchTrades': True,
'fetchTradingFee': False,
'fetchTradingFees': False,
'fetchTradingLimits': False,
'fetchTransactions': False,
'fetchWithdrawals': False,
'privateAPI': True,
'publicAPI': False,
'signIn': True,
'withdraw': False,
'getMarketPrice': True,
},
'timeframes': {
'1m': '1',
'1h': '60',
'1d': '1440',
'1w': '10080',
'1mn': '43200',
},
'urls': {
'logo': '',
'api': '{hostname}',
'www': '',
'doc': '',
'fees': '',
'referral': '',
},
'api': {
'private': {
'get': [
],
'post': [
'ucenter/api-login',
'ucenter/member/balance',
'market/symbol-thumb',
'market/coins-info',
'market/symbol-info',
'exchange/order/add',
'exchange/order/find',
'exchange/order/all',
'exchange/order/apicancel',
'exchange/order/trades',
'exchange/order/my-trades',
'exchange/exchange-coin/base-symbol',
],
'delete': [
],
},
'feed': {
'get': [
],
},
},
'fees': {
'trading': {
},
},
'requiredCredentials': {
'apiKey': True,
'secret': True,
'uid': False,
},
'precisionMode': SIGNIFICANT_DIGITS,
'options': {
'createMarketBuyOrderRequiresPrice': False,
},
'exceptions': {
'exact': {
'Invalid cost': InvalidOrder, # {"message":"Invalid cost","_links":{"self":{"href":"/orders","templated":false}}}
'Invalid order ID': InvalidOrder, # {"message":"Invalid order ID","_links":{"self":{"href":"/orders/4a151805-d594-4a96-9d64-e3984f2441f7","templated":false}}}
'Invalid market !': BadSymbol, # {"message":"Invalid market !","_links":{"self":{"href":"/markets/300/order-book","templated":false}}}
},
'broad': {
'Failed to convert argument': BadRequest,
},
},
})
def parse_ticker(self, response):
if len(response) == 0:
return []
symbol = response[0].get('symbol')
high = 0
bidVolume = 0
askVolume = 0
vwap = 0
vwapCost = 0
vwapVolume = 0
open_ = 'None'
close = 0
last = close
previousClose = 'None'
change = 'None'
percentage = 'None'
average = 'None'
baseVolume = 0
quoteVolume = 0
time = 0
lastDayTime = int((datetime.now().timestamp() - 86400) * 1000)
currentTimestamp = int(datetime.now().timestamp() * 1000)
currentDatetime = str(datetime.fromtimestamp(currentTimestamp * 0.001))
low = response[0].get('price')
bid = 0
ask = sys.maxsize
openSellOrdersCount = 0
for order in response:
price = order.get('price')
amount = order.get('amount')
timestamp = order.get('timestamp')
if high < price:
high = price
if low > price:
low = price
if order.get('status') == 'open':
if order.get('side') == 'buy':
if bid < price:
bid = price
if bidVolume < amount:
bidVolume = amount
if order.get('status') == 'open':
if order.get('side') == 'sell':
openSellOrdersCount += 1
if ask > price:
ask = price
if askVolume < amount:
askVolume = amount
if order.get('info').get('status') == 'COMPLETED':
vwapCost += price * amount
vwapVolume += amount
if time < timestamp:
time = timestamp
close = price
if timestamp > lastDayTime:
quoteVolume += amount
baseVolume += price
if vwapVolume != 0:
vwap = vwapCost / vwapVolume
if openSellOrdersCount == 0:
ask = 0
last = close
result = {
'symbol': symbol,
'info': response,
'timestamp': currentTimestamp,
'datetime': currentDatetime,
'high': high,
'low': low,
'bid': bid,
'bidVolume': bidVolume,
'ask': ask,
'askVolume': askVolume,
'vwap': vwap,
'open': open_,
'close': close,
'last': last,
'previousClose': previousClose,
'change': change,
'percentage': percentage,
'average': average,
'baseVolume': baseVolume,
'quoteVolume': quoteVolume,
}
return result
def fetch_ticker(self, symbol, since=None, limit=None):
response = self.fetch_orders(symbol, since, limit)
# Response example:
# {
# 'symbol': 'BTC/USDT',
# 'info': [...],
# 'timestamp': 1615386851976,
# 'datetime': '2021-03-10 16:34:11.976000',
# 'high': 50.0,
# 'low': 1.0,
# 'bid': 30.0,
# 'bidVolume': 15.0,
# 'ask': 40.0,
# 'askVolume': 25.0,
# 'vwap': 11.0,
# 'open': 'None',
# 'close': 20.0,
# 'last': 20.0,
# 'previousClose': 'None',
# 'change': 'None',
# 'percentage': 'None',
# 'average': 'None',
# 'baseVolume': 60.0,
# 'quoteVolume': 30.0
# }
return self.parse_ticker(response)
def fetch_tickers(self, since=None, limit=None):
# Response example:
# [
# {
# 'symbol': 'BTC/USDT',
# 'info': [...],
# 'timestamp': 1615386851976,
# 'datetime': '2021-03-10 16:34:11.976000',
# 'high': 50.0,
# 'low': 1.0,
# 'bid': 30.0,
# 'bidVolume': 15.0,
# 'ask': 40.0,
# 'askVolume': 25.0,
# 'vwap': 11.0,
# 'open': 'None',
# 'close': 20.0,
# 'last': 20.0,
# 'previousClose': 'None',
# 'change': 'None',
# 'percentage': 'None',
# 'average': 'None',
# 'baseVolume': 60.0,
# 'quoteVolume': 30.0
# },
# ...
# ]
result = []
symbols = self.fetch_markets()
for symblol in symbols:
response = self.fetch_orders(symblol.get('symbol'), since, limit)
ticker = self.parse_ticker(response)
if len(ticker) != 0:
result.append(ticker)
return result
def fetch_order_book(self, symbol, limit, since=0):
# Response example:
# {
# 'bids':
# [
# [20.0, 10.0, 'E161538482263642'], // [price, amount, orderId]
# [30.0, 15.0, 'E161538482271646']
# ],
# 'asks':
# [
# [40.0, 20.0, 'E161538482278825'],
# [50.0, 25.0, 'E161538482286085']
# ],
# 'timestamp': 1615390711695,
# 'datetime': '2021-03-10 17:38:31.695000',
# 'nonce': 1615390711695
# }
orders = self.fetch_open_orders(symbol, since, limit)
bids = []
asks = []
for order in orders:
temp = []
temp.append(order.get('price'))
temp.append(order.get('amount'))
temp.append(order.get('id'))
if order.get('side') == 'buy':
bids.append(temp)
else:
asks.append(temp)
currentTimestamp = int(datetime.now().timestamp() * 1000)
currentDatetime = str(datetime.fromtimestamp(currentTimestamp * 0.001))
result = {
'bids': bids,
'asks': asks,
'timestamp': currentTimestamp,
'datetime': currentDatetime,
'nonce': currentTimestamp,
}
return result
def parse_markets(self, response):
listData = []
for value in response:
tmp = {
"id": value.get("coinSymbol"),
"symbol": value.get("symbol"),
"base": value.get("coinSymbol"),
"quote": value.get("baseSymbol"),
"baseId": value.get("coinSymbol"),
"quoteId": value.get("baseSymbol"),
"type": value.get("publishType"),
"active": value.get("enable"),
"precision": {
"amount": value.get("coinScale"),
"price": value.get("baseCoinScale"),
},
"limits": {
"amount": {"min": value.get("minVolume"), "max": value.get("maxVolume")},
"price": {"min": value.get("minSellPrice"), "max": value.get("maxBuyPrice")},
"cost": {"min": value.get("minVolume") * value.get("minSellPrice"), "max": value.get("maxVolume") * value.get("maxBuyPrice")},
},
"taker": value.get("fee"),
"maker": value.get("fee"),
"info": value,
}
listData.append(tmp)
return listData
def add_frame(self, timeFrameStart, timeFrameEnd, timeframe, highestPrice, lowestPrice, amount, result, openPrice, closePrice):
frame = []
frame.append(timeFrameStart)
frame.append(openPrice)
frame.append(highestPrice)
frame.append(lowestPrice)
frame.append(closePrice)
frame.append(amount)
result.append(frame)
def parse_ohlcv(self, response, since, timeframe):
highestPrice = 0
lowestPrice = sys.maxsize
price = 0
amount = 0
timeFrameStart = since
timeFrameEnd = int((since * 0.001 + timeframe) * 1000)
result = []
i = 0
orders = response.get('content')
isOpenPrice = True
openPrice = 0
closePrice = 0
while i < len(orders):
if isOpenPrice == True:
openPrice = orders[i].get('price')
isOpenPrice = False
time = orders[i].get('time')
if time >= timeFrameStart and time <= timeFrameEnd:
price = orders[i].get('price')
closePrice = price
if highestPrice < price:
highestPrice = price
if lowestPrice > price:
lowestPrice = price
amount += orders[i].get('amount')
i += 1
if i == len(orders):
self.add_frame(timeFrameStart, timeFrameEnd, timeframe, highestPrice, lowestPrice, amount, result, openPrice, closePrice)
else:
if lowestPrice == sys.maxsize:
lowestPrice = 0
openPrice = 0
closePrice = 0
i -= 1
self.add_frame(timeFrameStart, timeFrameEnd, timeframe, highestPrice, lowestPrice, amount, result, openPrice, closePrice)
timeFrameStart = timeFrameEnd + 1
timeFrameEnd = int((timeFrameEnd * 0.001 + timeframe) * 1000)
amount = 0
highestPrice = 0
lowestPrice = sys.maxsize
isOpenPrice = True
i += 1
return result
# timeframe variants:
# 1m (one minute);
# 1h (one hour);
# 1d (one day - 24 hours)
# 1w (one week - 7 days)
# 1mn (one mounth - 30 days)
def fetch_ohlcv(self, symbol, timeframe=None, since=0, limit=None, params={}):
# Response example:
# [
# [
# 1504541580000, // UTC timestamp in milliseconds, integer
# 4235.4, // (O)pen price, float
# 4240.6, // (H)ighest price, float
# 4230.0, // (L)owest price, float
# 4230.7, // (C)losing price, float
# 37.72941911 // (V)olume (in terms of the base currency), float
# ],
# ...
# ]
inputDataCheck = False
for frame in self.timeframes:
if frame == timeframe:
inputDataCheck = True
break
if inputDataCheck == False:
return {'error': 'Incorrect timeframe'}
tFrame = int(self.timeframes.get(timeframe)) * 60
default_order_amount_limit = 100
params['status'] = 'COMPLETED'
if 'page' in params:
params['pageNo'] = self.safe_string(params, 'page')
else:
params['pageNo'] = 0
if since is None:
since | |
<reponame>SpaceTeam/space-event-trace<filename>space_trace/views.py
from datetime import date, datetime, timedelta
from functools import wraps
from io import StringIO
from traceback import format_exception
import csv
from typing import List, Tuple
import flask
from flask import session, redirect, url_for, request, flash, abort
from flask.helpers import make_response
from flask.templating import render_template
from onelogin.saml2.auth import OneLogin_Saml2_Auth
from onelogin.saml2.utils import OneLogin_Saml2_Utils
from werkzeug.exceptions import InternalServerError
from space_trace import app, db
from space_trace.certificates import (
detect_and_attach_vaccine_cert,
)
from space_trace.models import User, Visit, Seat
def maybe_load_user(f):
@wraps(f)
def wrapper(*args, **kwargs):
user = None
if "username" in session:
user = User.query.filter(User.email == session["username"]).first()
flask.g.user = user
return f(*args, **kwargs)
return wrapper
def require_login(f):
@wraps(f)
def wrapper(*args, **kwargs):
if "username" not in session:
return redirect(url_for("login"))
user = User.query.filter(User.email == session["username"]).first()
if user is None:
session.pop("username", None)
return redirect(url_for("login"))
flask.g.user = user
return f(*args, **kwargs)
return wrapper
def require_admin(f):
@wraps(f)
@require_login
def wrapper(*args, **kwargs):
if not flask.g.user.is_admin():
flash("You are not an admin, what were you thinking?", "danger")
return redirect(url_for("home"))
return f(*args, **kwargs)
return wrapper
def require_vaccinated(f):
@wraps(f)
def wrapper(*args, **kwargs):
user = flask.g.user
if not user.is_vaccinated():
flash("You need to upload a vaccination certificate.", "info")
return redirect(url_for("cert"))
return f(*args, **kwargs)
return wrapper
def get_active_visit(user: User) -> Visit:
return Visit.query.filter(db.and_(Visit.user == user.id)).first()
@app.get("/")
@require_login
@require_vaccinated
def home():
user: User = flask.g.user
visit = get_active_visit(user)
seat = Seat.query.filter(Seat.user == user.id).first()
expires_in = user.vaccinated_till - date.today()
if expires_in < timedelta(days=21):
color = "warning" if expires_in > timedelta(days=7) else "danger"
flash(
"Your vaccination certificate will expire "
f"in {expires_in.days} days.",
color,
)
return render_template(
"visit.html",
user=user,
visit=visit,
seat=seat,
)
@app.post("/")
@require_login
@require_vaccinated
def add_visit():
# Check if the event has even started
starttime = datetime.fromisoformat(app.config["EVENT_START"])
if datetime.now() < starttime:
flash(f"Event registration will start at: {starttime}", "danger")
return redirect(url_for("home"))
user: User = flask.g.user
# Don't enter a visit if there is already one for today
visit = get_active_visit(user)
if visit is not None:
flash("You are already registered for today", "warning")
return redirect(url_for("home"))
# Create a new visit
visit = Visit(datetime.now(), user.id)
db.session.add(visit)
seat = (
db.session.query(Seat)
.filter(Seat.user == None)
.order_by(Seat.row, Seat.number.desc())
.first()
)
if seat is None:
flash("There are no seats left", "danger")
return redirect(url_for("home"))
db.session.query(Seat).filter(Seat.id == seat.id).update({"user": user.id})
db.session.commit()
return redirect(url_for("home"))
@app.get("/cert")
@require_login
def cert():
user: User = flask.g.user
is_vaccinated = (
user.vaccinated_till is not None
and user.vaccinated_till > date.today()
)
return render_template("cert.html", user=user, is_vaccinated=is_vaccinated)
@app.post("/cert")
@require_login
def upload_cert():
user: User = flask.g.user
vaccine_file = request.files["vaccineFile"]
# If the user does not select a file, the browser submits an
# empty file without a filename.
if vaccine_file.filename == "":
flash("You must select a vaccine file", "danger")
return redirect(url_for("cert"))
try:
new_vaccine = vaccine_file.filename != ""
if new_vaccine:
detect_and_attach_vaccine_cert(vaccine_file, user)
# Update the user
db.session.query(User).filter(User.id == user.id).update(
{
"vaccinated_till": user.vaccinated_till,
}
)
db.session.commit()
user = User.query.filter(User.id == user.id).first()
except Exception as e:
if hasattr(e, "message"):
message = e.message
else:
message = str(e)
flash(message, "danger")
return redirect(request.url)
message_vaccinated = (
"a valid vaccination certificate" if new_vaccine else ""
)
flash(
f"Successfully uploaded {message_vaccinated} 😀",
"success",
)
return redirect(url_for("home"))
@app.post("/cert-delete")
@require_login
def delete_cert():
user: User = flask.g.user
if user.vaccinated_till is None:
flash("You don't have a certificate to delete", "danger")
return redirect(url_for("cert"))
db.session.query(User).filter(User.id == user.id).update(
{"vaccinated_till": None}
)
db.session.commit()
flash("Successfully deleted your certificate", "success")
return redirect(url_for("cert"))
@app.post("/whitelist-user")
@require_login
@require_admin
def whitelist_user():
whitelist_id = int(request.form["whitelistId"])
whitelisted_till = date.today() + timedelta(days=1)
db.session.query(User).filter(User.id == whitelist_id).filter(
User.vaccinated_till < whitelisted_till
).update({"vaccinated_till": whitelisted_till})
db.session.commit()
user = db.session.query(User).filter(User.id == whitelist_id).first()
if user.vaccinated_till == whitelisted_till:
flash(
f"Successfully whitelisted {user.full_name()} until {whitelisted_till}",
"success",
)
else:
flash(
f"{user.full_name()} already has a vaccine certificate valid till: {user.vaccinated_till}",
"warning",
)
return redirect(url_for("admin"))
@app.get("/admin")
@require_admin
def admin():
users = User.query.all()
users.sort(key=lambda u: u.email)
q = db.session.query(
db.func.strftime("%H", Visit.timestamp), db.func.count(Visit.id)
).group_by(db.func.strftime("%H", Visit.timestamp))
checkin_per_hour = dict()
checkin_per_hour["labels"] = [f"{i:02d}" for i in range(24)]
checkin_per_hour["data"] = [0 for i in range(24)]
for row in q:
checkin_per_hour["data"][int(row[0])] = row[1]
return render_template(
"admin.html",
user=flask.g.user,
users=users,
checkin_per_hour=checkin_per_hour,
now=datetime.now(),
)
# TODO: this should be a subroute of admin
@app.get("/contacts.csv")
@require_admin
def contacts_csv():
format = "%Y-%m-%d"
start = datetime.strptime(request.args.get("startDate"), format)
end = datetime.strptime(request.args.get("endDate"), format)
if start > end:
flash("End date cannot be before start date.", "warning")
return redirect("admin")
# This may look weired but we need to do a bit of arithmetic with both
# timestamps. At the moment both timestamps point to the
# start of the day (0:00) but end should point to the last minute so if
# both point to the same day one whole day gets selected.
# Actually start should point to 12h before that because members that only
# logged in 12h before are still considered as in the HQ.
start = start - timedelta(hours=12)
end = end + timedelta(hours=24)
# Get all email addresses of people logged in that time period
users = (
db.session.query(User)
.filter(User.id == Visit.user)
.filter(db.and_(Visit.timestamp > start, Visit.timestamp < end))
.all()
)
if len(users) == 0:
flash("No members were in the HQ at that time 👍", "success")
return redirect("admin")
# TODO: the convertion from User to csv should be its own function
# Convert the mails to names
names = []
for user in users:
first, last = user.email.split("@")[0].split(".")
names.append((first, last))
# Convert to a csv
si = StringIO()
cw = csv.writer(si)
cw.writerow(["first name", "last name"])
for name in names:
cw.writerow(name)
output = make_response(si.getvalue())
output.headers["Content-Disposition"] = "attachment; filename=export.csv"
output.headers["Content-type"] = "text/csv"
return output
# TODO: this should be a subroute of admin
@app.get("/smart-contacts.csv")
@require_admin
def smart_contacts_csv():
format = "%Y-%m-%d"
start = datetime.strptime(request.args.get("startDate"), format)
infected_id = int(request.args.get("infectedId"))
# This may look weired but we need to do a bit of arithmetic with both
# timestamps. At the moment both timestamps point to the
# start of the day (0:00) but end should point to the last minute so if
# both point to the same day one whole day gets selected.
# Actually start should point to 12h before that because members that only
# logged in 12h before are still considered as in the HQ.
start = start - timedelta(hours=12)
# Get all contacts of the infected
visit1: User = db.aliased(Visit)
visit2: User = db.aliased(Visit)
query = (
db.session.query(User)
.filter(visit1.user == infected_id)
.filter(visit1.timestamp > start)
.filter(visit2.timestamp > db.func.date(visit1.timestamp, "-12 hours"))
.filter(visit2.timestamp < db.func.date(visit1.timestamp, "+12 hours"))
.filter(User.id == visit2.user)
.filter(User.id != infected_id)
)
users = query.all()
if len(users) == 0:
flash("No members were in the HQ at that time 👍", "success")
return redirect("admin")
# Convert the mails to names
names = []
for user in users:
first, last = user.email.split("@")[0].split(".")
names.append((first, last))
# Convert to a csv
si = StringIO()
cw = csv.writer(si)
cw.writerow(["first name", "last name"])
for name in names:
cw.writerow(name)
output = make_response(si.getvalue())
output.headers["Content-Disposition"] = "attachment; filename=export.csv"
output.headers["Content-type"] = "text/csv"
return output
@app.get("/help")
@maybe_load_user
def help():
return render_template("help.html", user=flask.g.user)
def zip_users_seats(users, seats) -> List[Tuple[User, Seat]]:
lookup = dict()
for seat in seats:
lookup[seat.user] = seat
return list(map(lambda u: (u, lookup[u.id]), users))
@app.get("/statistic")
@maybe_load_user
def statistic():
users = db.session.query(User).filter(User.id == Visit.user).all()
seats = db.session.query(Seat).filter(Seat.id is not None).all()
active_users = None
if flask.g.user is not None:
zipped = zip_users_seats(users, seats)
active_users = []
for entry in zipped:
(user, seat) = entry
seat_text = f"row {seat.row}, number {seat.number}"
active_users.append(
(user.first_name(), user.last_name(), seat_text)
)
active_users = sorted(active_users, key=lambda n: n[0])
return render_template(
"statistic.html",
user=flask.g.user,
active_users=active_users,
active_visits=len(users),
)
def init_saml_auth(req):
auth = OneLogin_Saml2_Auth(req, custom_base_path=app.config["SAML_PATH"])
return auth
def prepare_flask_request(request):
# If server is behind proxys or balancers use the HTTP_X_FORWARDED fields
return {
"https": "on" if request.scheme == "https" else "off",
"http_host": request.host,
"script_name": request.path,
"get_data": request.args.copy(),
"post_data": request.form.copy(),
}
@app.get("/login")
def login():
return render_template("login.html")
@app.post("/login")
def add_login():
req = prepare_flask_request(request)
auth = init_saml_auth(req)
# TODO: Move this to config
return_to = "https://gv.tust.at/"
sso_built_url = auth.login(return_to)
session["AuthNRequestID"] = auth.get_last_request_id()
return redirect(sso_built_url)
@app.route("/saml", methods=["POST", "GET"])
def saml_response():
req = prepare_flask_request(request)
auth = init_saml_auth(req)
errors = []
request_id = None
if "AuthNRequestID" in session:
request_id = session["AuthNRequestID"]
auth.process_response(request_id=request_id)
errors = auth.get_errors()
if len(errors) != 0:
flash(f"An error occured during login: {errors}", "danger")
return redirect(url_for("login"))
if "AuthNRequestID" in session:
del session["AuthNRequestID"]
username = str(auth.get_nameid())
user = User.query.filter(User.email == username).first()
if user is None:
firstname = username.split(".", 1)[0].capitalize()
user = User(firstname, username)
db.session.add(user)
db.session.commit()
session["username"] = username
session.permanent = True
self_url = OneLogin_Saml2_Utils.get_self_url(req)
if "RelayState" in request.form and self_url != request.form["RelayState"]:
# To avoid 'Open Redirect' attacks, before execute the redirection
# confirm the value of the request.form['RelayState'] | |
the example, the before string is "The phone number is" ( can be blank )
:param after: In the example, the after string is "how cool is that?" ( defaults to blank )
:param data: Data to be spoken as
:param content: see https://msdn.microsoft.com/en-us/library/system.speech.synthesis.sayas(v=vs.110).aspx )
Example:
ai.SayAs(r"The phone number is","18001239874","Telephone","how cool is that?")
"""
try:
fcn = '[Speak("{} [SayAs("{}","{}")]. {}")]'.format(before, data, content, after)
self._get_request(fcn)
except Exception as e:
print(e)
print("Exception in SayAs function")
return
def SetSpeechVolume(self, vol):
""" Sets system wide speech volume
:param vol: 0 - 100
"""
try:
fcn = '[SetSpeechVolume("{}")]'.format(vol)
self._get_request(fcn)
except Exception as e:
print(e)
print("Exception in SetSpeechVolume function")
return
def SetSpeechVoice(self, name):
""" Set active speaker
:param name: Name of tts speaker ( ex: <NAME> ) case sensitive
"""
try:
fcn = '[SetSpeechVoice("{}")]'.format(name)
self._get_request(fcn)
except Exception as e:
print(e)
print("Exception in SetSpeechVoice function")
return
def SetSpeechConfig(self, name, vol, rate):
""" Config a certain speakers parameters
:param name: Name of tts speaker ( ex: <NAME> ) case sensitive
:param vol: Volume 0 - 100
:param rate: Speech rate -10 - 10
"""
try:
fcn = '[SetSpeechConfig("{}", "{}", "{}")]'.format(name, vol, rate)
self._get_request(fcn)
except Exception as e:
print(e)
print("Exception in SetSpeechConfig function")
return
def SpeakEx(self, phrase, name, vol, rate, delay, sys_or_voice_vol="voice"):
""" Speaks using specified values
:param phrase: Phrase to be spoken
:param name: Name of tts speaker ( ex: <NAME> ) case sensitive
:param vol: 0 - 100
:param rate: Fast - Slow
:param delay: Initial delay
:param sys_or_voice_vol: Use system wide volume or ai' set volume
"""
try:
fcn = '[SpeakEx("{}", "{}", "{}", "{}", "{}", "{}")]'.format(phrase, name, vol, rate,
delay, sys_or_voice_vol)
self._get_request(fcn)
except Exception as e:
print(e)
print("Exception in SpeakEx function")
return
def StopVoiceByName(self, name, ResponseOnSuccess):
""" Stop the current speaker by AI name
:param name: Name of ai ( case sensitive )
:param ResponseOnSuccess: What the ai says after being silenced
"""
try:
fcn = '[StopVoiceByName("{}", "{}")]'.format(name, ResponseOnSuccess)
self._get_request(fcn)
except Exception as e:
print(e)
print("Exception in StopVoiceByName function")
return
def StopVoiceByIdentifier(self, identifier, ResponseOnSuccess):
""" Stops voice by Identifier, which is only set in tasks atm - uses tasks name
:param identifier: Task name
:param ResponseOnSuccess: AIs response
"""
try:
fcn = '[StopVoiceByIdentifier("{}", "{}")]'.format(identifier, ResponseOnSuccess)
self._get_request(fcn)
except Exception as e:
print(e)
print("Exception in StopVoiceByIdentifier function")
return
def SpeakExSysVolSync(self, phrase, VoiceVolume, VoiceRate, phraseDelay, VoiceName):
""" Synchronous speech
:param phrase: Phrase to be spoken
:param VoiceVolume: 0 - 100
:param VoiceRate: -10 - 10
:param phraseDelay: Delay between next speech ( ex: 0.8 )
:param VoiceName: Ai name ( case sensitive )
"""
try:
fcn = '[SpeakExSysVolSync("{}", "{}", "{}", "{}", "{}")]'.format(phrase, VoiceVolume, VoiceRate,
phraseDelay, VoiceName)
self._get_request(fcn)
except Exception as e:
print(e)
print("Exception in SpeakExSysVolSync function")
return
def SpeakExSysVolAsync(self, phrase, VoiceVolume, VoiceRate, phraseDelay, VoiceName):
""" Asynchronous speech
:param phrase: Phrase to be spoken
:param VoiceVolume: 0 - 100
:param VoiceRate: -10 - 10
:param phraseDelay: Delay between next speech ( ex: 0.8 )
:param VoiceName: Ai name ( case sensitive )
"""
try:
fcn = '[SpeakExSysVolAsync("{}", "{}", "{}", "{}", "{}")]'.format(phrase, VoiceVolume, VoiceRate,
phraseDelay, VoiceName)
self._get_request(fcn)
except Exception as e:
print(e)
print("Exception in SpeakExSysVolAsync function")
return
def _get_xml(self, var_name='Pytron'):
""" Checks xml file for incoming commands sent from links using the [Set("var", "value")] function
and returns them.
:param variable: Name of the variable in the UserVariable.xml file.
:return: Returns the value of the variable.
"""
try:
self.variable = var_name
_path = self._XML_PATH + r'\UserVariables.xml'
tree = ET.parse(_path)
root = tree.getroot()
response = False
for variable in root.findall('Variable'):
n = variable.find('Name').text
if n == var_name:
v = variable.find('Value')
t = v.text
response = t
break
return response
except Exception as e:
print("Exception in _get_xml function")
print(e)
return
def _clear_xml(self, var_name='Pytron'):
""" Clears xml value with pythons standard xml library ET
:param var_name: Variable name in xml file
"""
try:
_path = self._XML_PATH + r'\UserVariables.xml'
tree = ET.parse(_path)
root = tree.getroot()
for variable in root.findall('Variable'):
n = variable.find('Name').text
if n == var_name:
v = variable.find('Value')
v.clear()
break
tree.write(_path)
return
except Exception as e:
print("Exception in _clear_xml function")
print(e)
return
def _check_for_input(self):
""" Check dictation file for changes """
with open(self._SCRIPTS_PATH + "\dictation.txt", 'r') as f:
for line in f:
dictation = line
f.close()
if dictation:
self._write_history(dictation)
self._clear_input()
return dictation
else:
return False
def _clear_input(self):
""" Deletes any entries in dictation file. Also used to create new file on init. -private """
with open(self._SCRIPTS_PATH + '\dictation.txt', 'w+') as f:
f.close()
def _get_request(self, fcn):
""" Speak to Links with a get request using urllib
:param fcn: The function call for Links ( must be a valid links function )
"""
try:
ip = self.ip
port = self.port
key = self.key
url = 'http://{}:{}/?action={}&key={}&output=json'.format(ip, port, fcn, key)
# r = requests.get(url)
r = urllib.urlopen(url)
if r.code == 200:
j = r.readlines()
# print(j)
response = ast.literal_eval(j[3].strip())
result = response['response']
# print result
err = (response['error'])
if len(err) > 0:
print("v" * 20)
z = err + "\n" + result
print(z)
print("^" * 20)
print("\n")
return False
else:
# print(result)
return result
elif r.code:
err = 'Error {}'.format(r.status_code)
print(err)
return err
else:
print("Something went terribly wrong.....")
return False
except Exception as e:
print(e)
print("Exception in _get_request function. \n"
"***Check your ip, port and key settings!*** \n"
"Also, your shoes are untied..")
return False
def _write_history(self, text):
""" Appends history.txt with detected user input -private
:param text:
"""
try:
secs = time()
time_stamp = datetime.datetime.fromtimestamp(secs).strftime('%Y-%m-%d %H:%M:%S')
history = "{}: {}".format(time_stamp, text)
with open(self._SCRIPTS_PATH + '\history.txt', 'a') as f:
f.write(history)
f.close()
except Exception as e:
print(e)
print("Exception in _write_history function")
def write_commands_to_file(self, commands):
""" Writes a list of commands to a file in rst format.
:param commands: List of commands returned bt GetGrammarList function
:return: Returns True or Exceptions
"""
try:
data = commands
print type(data)
with open(self._SCRIPTS_PATH + r'\command_list.txt', 'w') as f:
f.writelines(data)
f.close()
return True
except Exception as e:
return e
@staticmethod
def strip_non_ascii(string):
""" Can be used to remove non-ascii characters from a string.
:param string: String with non-ascii characters
:return: Cleaned up string
"""
stripped = (c for c in string if 0 < ord(c) < 127)
return ''.join(stripped)
@staticmethod
def strip_bad_chars(string):
""" Cleans up some speech elements.. Replaces some things that may otherwise
mess up the url request to Links.. ( this function is ugly and needs a bag on its head )
:type string: String with characters to be replaced
"""
bad_chars = '[]=^_' # Characters to remove outright
a = "".join(c for c in string if c not in bad_chars)
a = a.replace("~~", " approximately ") # Helps format the way certain sites return math values
a = a.replace(";", ",") # Gets rid of semi-colons
return a
# Used for testing
if __name__ == '__main__':
ai = Client()
ai.talk('Hello')
temp = ai.CallCommand("whats the temperature")
print(temp)
# print 'here'
# print(ai.GetWord("test_greetings", "hello", "converted value"))
# print(ai.GetWord("RSSFeeds", "CNN", "url"))
# print(ai.GetWord("RSSFeeds", "New York Times", "url"))
# ai.Set('Pytron', 'This is the first test')
test = ai.Get('Pytron')
print("This is the first test: ", test)
# ai.talk(test)
# ai.Set('Pytron', 'This is the second test')
# test = ai.Get('Pytron')
# print("This is the second test: ", test)
# ai.talk(test)
# test = ai.CallCommand("Greetings")
# print test
# ai.emulate_speech("open windows explorer")
# print(ai.CallCommand("what time is it"))
# confirm = ai.GetConfirmation(confirm='Say yes or no to continue', on_yes="You said yes", on_no="You said no")
# if confirm:
# print("WORKS!")
# if not confirm:
# print("NOT CONFIRM!")
# if confirm is None:
# print("CONFIRM IS NONE!")
# print("Confirm :", confirm)
# x = ai.GetGrammarList()
# ai.write_commands_to_file(x)
"""
Changelog- v.0.3.9
- Fixed some troublesome local variables.
Changelog- v.0.3.8
- Brought back urllib. Standard library is all we need.
- Tweaked Client a bit.
Changelog- v.0.3.7
- Fixed error on Client initialization
Changelog- v.0.3.6
- Tweaked CallCommand function. Now returns the response from Links.
- Docstrings added for new functions
- Shelved urllib in exchange for the Requests | |
g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doModifyAlarm(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.ClsClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.ModifyAlarmRequest()
model.from_json_string(json.dumps(args))
rsp = client.ModifyAlarm(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDeleteMachineGroup(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.ClsClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DeleteMachineGroupRequest()
model.from_json_string(json.dumps(args))
rsp = client.DeleteMachineGroup(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDeleteLogset(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.ClsClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DeleteLogsetRequest()
model.from_json_string(json.dumps(args))
rsp = client.DeleteLogset(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeShippers(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.ClsClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeShippersRequest()
model.from_json_string(json.dumps(args))
rsp = client.DescribeShippers(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDeleteConfigFromMachineGroup(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.ClsClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DeleteConfigFromMachineGroupRequest()
model.from_json_string(json.dumps(args))
rsp = client.DeleteConfigFromMachineGroup(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doCreateLogset(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.ClsClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.CreateLogsetRequest()
model.from_json_string(json.dumps(args))
rsp = client.CreateLogset(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeAlarms(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.ClsClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeAlarmsRequest()
model.from_json_string(json.dumps(args))
rsp = client.DescribeAlarms(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeAlarmNotices(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.ClsClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeAlarmNoticesRequest()
model.from_json_string(json.dumps(args))
rsp = client.DescribeAlarmNotices(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doCreateTopic(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.ClsClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.CreateTopicRequest()
model.from_json_string(json.dumps(args))
rsp = client.CreateTopic(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribePartitions(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.ClsClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribePartitionsRequest()
model.from_json_string(json.dumps(args))
rsp = client.DescribePartitions(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDeleteAlarmNotice(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.ClsClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DeleteAlarmNoticeRequest()
model.from_json_string(json.dumps(args))
rsp = client.DeleteAlarmNotice(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doCreateAlarm(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.ClsClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.CreateAlarmRequest()
model.from_json_string(json.dumps(args))
rsp = client.CreateAlarm(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDeleteAlarm(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.ClsClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DeleteAlarmRequest()
model.from_json_string(json.dumps(args))
rsp = client.DeleteAlarm(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doModifyConfig(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.ClsClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.ModifyConfigRequest()
model.from_json_string(json.dumps(args))
rsp = client.ModifyConfig(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doUploadLog(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.ClsClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.UploadLogRequest()
model.from_json_string(json.dumps(args))
rsp = client.UploadLog(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doModifyLogset(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.ClsClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.ModifyLogsetRequest()
model.from_json_string(json.dumps(args))
rsp = client.ModifyLogset(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doCreateConfig(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.ClsClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.CreateConfigRequest()
model.from_json_string(json.dumps(args))
rsp = client.CreateConfig(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeMachineGroupConfigs(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.ClsClient(cred, g_param[OptionsDefine.Region], | |
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
# WILL NEED UPDATING TO NEW ELEMENT SCHEME
"""Main module for basic geometric manipulation of non-shapely objects, but
objects such as points and arrays used in drawing."""
import math
from collections.abc import Iterable, Mapping
from typing import List, Tuple, Union
import numpy as np
import shapely
import shapely.wkt
from numpy import array
from numpy.linalg import norm
from shapely.geometry import MultiPolygon, Polygon
from .. import logger
from .. import is_component
from . import BaseGeometry
__all__ = [
'get_poly_pts', 'get_all_component_bounds', 'get_all_geoms',
'flatten_all_filter', 'remove_colinear_pts', 'array_chop',
'vec_unit_planar', 'Vector'
]
#########################################################################
# Shapely Geometry Basic Coordinates
def get_poly_pts(poly: Polygon):
"""Return the coordinates of a Shapely polygon with the last repeating
point removed.
Args:
poly (shapely.Polygon): Shapely polygin
Returns:
np.array: Sequence of coorindates.
"""
return np.array(poly.exterior.coords)[:-1]
def get_all_geoms(obj, func=lambda x: x, root_name='components'):
"""Get a dict of dict of all shapely objects, from components, dict, etc.
Used to compute the bounding box.
Args:
obj (dict, list, element, component): Object to get from.
func (function): Function to use if mapping. Defaults to (lambda x: x).
root_name (str): Name to prepend in the flattening. Defaults to 'components'.
Returns:
dict: Dictonary of geometries
"""
# Prelim
# Calculate the new name
def new_name(name): return root_name + '.' + \
name if not (root_name == '') else name
# Check what we have
if is_component(obj):
# Is it a metal component? Then traverse its components
return obj.get_all_geom() # dict of shapely geoms
# elif is_element(obj):
# # is it a metal element?
# return {obj.name: obj.geom} # shapely geom
elif isinstance(obj, BaseGeometry):
# Is it the shapely object? This is the bottom of the search tree, then return
return obj
elif isinstance(obj, Mapping):
return {
get_all_geoms(sub_obj, root_name=new_name(name))
for name, sub_obj in obj.items()
}
'''
RES = {}
for name, sub_obj in obj.items():
if is_component(obj):
RES[name] = get_all_geoms(
sub_obj.components, root_name=new_name(name))
elif isinstance(sub_obj, dict):
# if name.startswith('components'): # old school to remove eventually TODO
# RES[name] = func(obj) # allow transofmraiton of components
# else:
RES[name] = get_all_geoms(sub_obj, root_name=new_name(name))
elif isinstance(sub_obj, BaseGeometry):
RES[name] = func(obj)
return RES
'''
else:
logger.debug(
f'warning: {root_name} was not an object or dict or the right handle'
)
return None
def flatten_all_filter(components: dict, filter_obj=None):
"""Internal function to flatten a dict of shapely objects.
Args:
components (dict): Dictionary of components
filter_obj (class): Filter based on this class. Defaults to None.
Returns:
array: Flattened dictionary
"""
assert isinstance(components, dict)
RES = []
for name, obj in components.items():
if isinstance(obj, dict):
RES += flatten_all_filter(obj, filter_obj)
else:
if filter_obj is None:
RES += [obj] # add whatever we have in here
else:
if isinstance(obj, filter_obj):
RES += [obj]
else:
print('flatten_all_filter: ', name)
return RES
def get_all_component_bounds(components: dict, filter_obj=Polygon):
"""Pass in a dict of components to calcualte the total bounding box.
Args:
components (dict): Dictionary of components
filter_obj (Polygon): Only use instances of this object to
calcualte the bounds
Returns:
tuple: (x_min, y_min, x_max, y_max)
"""
assert isinstance(components, dict)
components = get_all_geoms(components)
components = flatten_all_filter(components, filter_obj=filter_obj)
(x_min, y_min, x_max, y_max) = MultiPolygon(components).bounds
return (x_min, y_min, x_max, y_max)
def round_coordinate_sequence(geom_ref, precision):
"""Rounds the vertices of a coordinate sequence (both interior and
exterior).
Args:
geometry (shapely.geometry) : A shapely geometry, should not be a MultiPoly
precison (int) : The decimal precision to round to (eg. 3 -> 0.001)
Returns:
shapely.geometry : A shapely geometry with rounded coordinates
"""
if isinstance(geom_ref, shapely.geometry.linestring.LineString):
temp_line = np.around(geom_ref.coords[:], precision).tolist()
geom_ref.coords = temp_line.copy()
else:
temp_ext = np.around(geom_ref.exterior.coords[:], precision).tolist()
geom_ref.exterior.coords = temp_ext.copy()
for x in range(0, len(geom_ref.interiors)):
temp_int = np.around(geom_ref.interiors[x].coords[:],
precision).tolist()
geom_ref.interiors[x].coords = temp_int.copy()
return geom_ref
#########################################################################
# POINT LIST FUNCTIONS
def check_duplicate_list(your_list):
"""Check if the list contains duplicates.
Args:
your_list (list): List to check
Returns:
bool: True if there are duplicates, False otherwise
"""
return len(your_list) != len(set(your_list))
def array_chop(vec, zero=0, rtol=0, machine_tol=100):
"""Chop array entries close to zero.
Args:
vec (array): Array to chop
zero (double): Value to check against. Defaults to 0.
rtol (double): Relative tolerance. Defaults to 0.
machine_tol (double): Machine tolerance. Defaults to 100.
Returns:
array: Chopped arary
"""
vec = np.array(vec)
mask = np.isclose(vec,
zero,
rtol=rtol,
atol=machine_tol * np.finfo(float).eps)
vec[mask] = 0
return vec
def remove_colinear_pts(points):
"""Remove colinear points and identical consequtive points.
Args:
points (array): Array of points
Returns:
ndarray: A copy of the input array without colinear points
"""
remove_idx = []
for i in range(2, len(points)):
v1 = array(points[i - 2]) - array(points[i - 1])
v2 = array(points[i - 1]) - array(points[i - 0])
if Vector.are_same(v1, v2):
remove_idx += [i - 1]
elif Vector.angle_between(v1, v2) == 0:
remove_idx += [i - 1]
points = np.delete(points, remove_idx, axis=0)
# remove consequtive duplicates
remove_idx = []
for i in range(1, len(points)):
if norm(points[i] - points[i - 1]) == 0:
remove_idx += [i]
points = np.delete(points, remove_idx, axis=0)
return points
#########################################################################
# Points, Lines and Areas functions
def intersect(p1x, p1y, p2x, p2y, x0, y0):
"""Intersect segment defined by p1 and p2 with ray coming out of x0,y0 ray
can be horizontal y=y0 x=x0+dx , want dx>0.
Args:
p1x (float): x coordinate of point 1 of segment
p1y (float): y coordinate of point 1 of segment
p2x (float): x coordinate of point 2 of segment
p2y (float): y coordinate of point 2 of segment
x0 (float): x coordinate anchoring the intersection ray
y0 (float): y coordinate anchoring the intersection ray
Returns:
boolean int: (1) if intersecting, (0) if not intersecting
"""
if p1x != p2x and p1y != p2y:
m = (p2y - p1y) / (p2x - p1x)
x_inter = (y0 - p1y) / m + p1x
if x_inter >= x0 and np.min([p1y, p2y]) <= y0 <= np.max([p1y, p2y]):
ans = 1
else:
ans = 0
else:
if p1x == p2x: # vertical segment
if x0 <= p1x and np.min([p1y, p2y]) <= y0 <= np.max([p1y, p2y]):
ans = 1
else:
ans = 0
if p1y == p2y: # horizontal segment
if y0 == p1y:
ans = 1
else:
ans = 0
return ans
def in_or_out(xs, ys, x0, y0):
"""Count up how many times a ray intersects the polygon, even or odd tells
you whether inside (odd) or outside (even)"""
crossings = 0
for i in range(len(xs) - 1):
p1x = xs[i]
p2x = xs[i + 1]
p1y = ys[i]
p2y = ys[i + 1]
cross = intersect(p1x, p1y, p2x, p2y, x0, y0)
# print('i = ', i, 'cross = ', cross)
crossings += cross
return crossings
#########################################################################
# Vector functions
def vec_unit_planar(vector: np.array):
"""Make the planar 2D (x,y) part of a vector to be unit mag. Return a
vector where is XY components now a unit vector. I.e., Normalizes only in
the XY plane, leaves the Z plane alone.
Args:
vector (np.array): Input 2D or 3D
Returns:
np.array: Same dimension 2D or 3D
Raises:
Exception: The input was not a 2 or 3 vector
"""
vector = array_chop(vector) # get rid of near zero crap
if len(vector) == 2:
_norm = norm(vector)
if not bool(_norm): # zero length vector
logger.debug(f'Warning: zero vector length')
return vector
return vector / _norm
elif len(vector) == 3:
v2 = vec_unit_planar(vector[:2])
return np.append(v2, vector[2])
else:
raise Exception('You did not give a 2 or 3 vec')
def to_vec3D(list_of_2d_pts: List[Tuple], z=0) -> np.ndarray:
"""Adds 3rd point to list of 2D points. For the given design, get the z
values in HFSS UNITS! Manually specify z dimension.
Args:
list_of_2d_pts (List[Tuple]): List of 2D points
z (int, optional): z-value in hfss. Defaults to 0.
Returns:
np.ndarray: vec3d of points
"""
add_me = [z]
return np.array([list(a_2d_pt) + add_me for a_2d_pt in list_of_2d_pts])
Vec2D = Union[list, np.ndarray]
class Vector:
"""Utility functions to call on 2D vectors, which can | |
# These are all the modules we'll be using later. Make sure you can import them
# before proceeding further.
from __future__ import print_function
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import random
import numpy as np
import os
import sys
import tarfile
from IPython.display import display, Image
from scipy import ndimage
from sklearn.linear_model import LogisticRegression
from six.moves.urllib.request import urlretrieve
from six.moves import cPickle as pickle
import hashlib
import time
from PIL import Image
import imagehash
from sklearn import metrics
# Config the matplotlib backend as plotting inline in IPython
# matplotlib inline
def download_progress_hook(count, blockSize, totalSize):
"""A hook to report the progress of a download. This is mostly intended for users with
slow internet connections. Reports every 5% change in download progress.
"""
global last_percent_reported
percent = int(count * blockSize * 100 / totalSize)
if last_percent_reported != percent:
if percent % 5 == 0:
sys.stdout.write("%s%%" % percent)
sys.stdout.flush()
else:
sys.stdout.write(".")
sys.stdout.flush()
last_percent_reported = percent
def maybe_download(filename, expected_bytes, force=False):
"""Download a file if not present, and make sure it's the right size."""
dest_filename = os.path.join(data_root, filename)
if force or not os.path.exists(dest_filename):
print('Attempting to download:', filename)
filename, _ = urlretrieve(url + filename, dest_filename, reporthook=download_progress_hook)
print('\nDownload Complete!')
statinfo = os.stat(dest_filename)
if statinfo.st_size == expected_bytes:
print('Found and verified', dest_filename)
else:
raise Exception(
'Failed to verify ' + dest_filename + '. Can you get to it with a browser?')
return dest_filename
def maybe_extract(filename, force=False):
root = os.path.splitext(os.path.splitext(filename)[0])[0] # remove .tar.gz
if os.path.isdir(root) and not force:
# You may override by setting force=True.
print('%s already present - Skipping extraction of %s.' % (root, filename))
else:
print('Extracting data for %s. This may take a while. Please wait.' % root)
tar = tarfile.open(filename)
sys.stdout.flush()
tar.extractall(data_root)
tar.close()
data_folders = [
os.path.join(root, d) for d in sorted(os.listdir(root))
if os.path.isdir(os.path.join(root, d))]
if len(data_folders) != num_classes:
raise Exception(
'Expected %d folders, one per class. Found %d instead.' % (
num_classes, len(data_folders)))
print(data_folders)
return data_folders
def load_letter(folder, min_num_images):
"""Load the data for a single letter label."""
image_files = os.listdir(folder)
dataset = np.ndarray(shape=(len(image_files), image_size, image_size),
dtype=np.float32)
print(folder)
num_images = 0
for image in image_files:
image_file = os.path.join(folder, image)
try:
image_data = (ndimage.imread(image_file).astype(float) -
pixel_depth / 2) / pixel_depth
if image_data.shape != (image_size, image_size):
raise Exception('Unexpected image shape: %s' % str(image_data.shape))
dataset[num_images, :, :] = image_data
num_images = num_images + 1
except IOError as e:
print('Could not read:', image_file, ':', e, '- it\'s ok, skipping.')
dataset = dataset[0:num_images, :, :]
if num_images < min_num_images:
raise Exception('Many fewer images than expected: %d < %d' %
(num_images, min_num_images))
print('Full dataset tensor:', dataset.shape)
print('Mean:', np.mean(dataset))
print('Standard deviation:', np.std(dataset))
return dataset
def maybe_pickle(data_folders, min_num_images_per_class, force=False):
dataset_names = []
for folder in data_folders:
set_filename = folder + '.pickle'
dataset_names.append(set_filename)
if os.path.exists(set_filename) and not force:
# You may override by setting force=True.
print('%s already present - Skipping pickling.' % set_filename)
else:
print('Pickling %s.' % set_filename)
dataset = load_letter(folder, min_num_images_per_class)
try:
with open(set_filename, 'wb') as f:
pickle.dump(dataset, f, pickle.HIGHEST_PROTOCOL)
except Exception as e:
print('Unable to save data to', set_filename, ':', e)
return dataset_names
url = 'http://commondatastorage.googleapis.com/books1000/'
last_percent_reported = None
data_root = '.' # Change me to store data elsewhere
num_classes = 10
np.random.seed(133)
image_size = 28 # Pixel width and height.
pixel_depth = 255.0 # Number of levels per pixel.
train_filename = maybe_download('notMNIST_large.tar.gz', 247336696)
test_filename = maybe_download('notMNIST_small.tar.gz', 8458043)
train_folders = maybe_extract(train_filename)
test_folders = maybe_extract(test_filename)
train_datasets = maybe_pickle(train_folders, 45000)
test_datasets = maybe_pickle(test_folders, 1800)
# Problem 1
# Let's take a peek at some of the data to make sure it looks sensible.
# Each exemplar should be an image of a character A through J rendered in a different font. Display a sample of the images that we just downloaded. Hint: you can use the package IPython.display.
def problem1(folder_path):
for file in os.listdir(folder_path):
d = os.path.join(folder_path, file)
if os.path.isdir(d):
img = mpimg.imread(os.path.join(d,os.listdir(d)[random.randint(0, len(d))]))
imgplot = plt.imshow(img)
plt.show()
problem1('notMNIST_large')
problem1('notMNIST_small')
# Problem 2
# Let's verify that the data still looks good. Displaying a sample of the labels and images from the ndarray.
# Hint: you can use matplotlib.pyplot.
def problem2(folder_path):
for file in os.listdir(folder_path):
if file.endswith(".pickle"):
figx = pickle.load(open(os.path.join(folder_path,file),'rb'))
plt.imshow(figx[0,:,:])
plt.title(file)
plt.show()
problem2('notMNIST_large')
problem2('notMNIST_small')
# Problem 3
# Another check: we expect the data to be balanced across classes. Verify that.
def problem3(folder_path):
for file in os.listdir(folder_path):
if file.endswith(".pickle"):
figx = pickle.load(open(os.path.join(folder_path,file),'rb'))
print(os.path.join(folder_path,file), len(figx), ' images')
problem3('notMNIST_large')
problem3('notMNIST_small')
def make_arrays(nb_rows, img_size):
if nb_rows:
dataset = np.ndarray((nb_rows, img_size, img_size), dtype=np.float32)
labels = np.ndarray(nb_rows, dtype=np.int32)
else:
dataset, labels = None, None
return dataset, labels
def merge_datasets(pickle_files, train_size, valid_size=0):
num_classes = len(pickle_files)
valid_dataset, valid_labels = make_arrays(valid_size, image_size)
train_dataset, train_labels = make_arrays(train_size, image_size)
vsize_per_class = valid_size // num_classes
tsize_per_class = train_size // num_classes
start_v, start_t = 0, 0
end_v, end_t = vsize_per_class, tsize_per_class
end_l = vsize_per_class+tsize_per_class
for label, pickle_file in enumerate(pickle_files):
try:
with open(pickle_file, 'rb') as f:
letter_set = pickle.load(f)
# let's shuffle the letters to have random validation and training set
np.random.shuffle(letter_set)
if valid_dataset is not None:
valid_letter = letter_set[:vsize_per_class, :, :]
valid_dataset[start_v:end_v, :, :] = valid_letter
valid_labels[start_v:end_v] = label
start_v += vsize_per_class
end_v += vsize_per_class
train_letter = letter_set[vsize_per_class:end_l, :, :]
train_dataset[start_t:end_t, :, :] = train_letter
train_labels[start_t:end_t] = label
start_t += tsize_per_class
end_t += tsize_per_class
except Exception as e:
print('Unable to process data from', pickle_file, ':', e)
raise
return valid_dataset, valid_labels, train_dataset, train_labels
train_size = 200000
valid_size = 10000
test_size = 10000
valid_dataset, valid_labels, train_dataset, train_labels = merge_datasets(
train_datasets, train_size, valid_size)
_, _, test_dataset, test_labels = merge_datasets(test_datasets, test_size)
print('Training:', train_dataset.shape, train_labels.shape)
print('Validation:', valid_dataset.shape, valid_labels.shape)
print('Testing:', test_dataset.shape, test_labels.shape)
def randomize(dataset, labels):
permutation = np.random.permutation(labels.shape[0])
shuffled_dataset = dataset[permutation,:,:]
shuffled_labels = labels[permutation]
return shuffled_dataset, shuffled_labels
train_dataset, train_labels = randomize(train_dataset, train_labels)
test_dataset, test_labels = randomize(test_dataset, test_labels)
valid_dataset, valid_labels = randomize(valid_dataset, valid_labels)
# Problem 4
# Convince yourself that the data is still good after shuffling!
def problem4(data,labels):
label_map = {0: 'A', 1: 'B', 2: 'C', 3: 'D', 4: 'E', 5: 'F', 6: 'G', 7: 'H', 8: 'I', 9: 'J'}
# plot some images to make sure that the labels correspond to images
items = random.sample(range(len(labels)), 8)
for i, item in enumerate(items):
plt.subplot(2, 4, i+1)
plt.axis('off')
plt.title(label_map[labels[item]])
plt.imshow(data[item])
plt.show()
problem4(train_dataset, train_labels)
problem4(test_dataset, test_labels)
problem4(valid_dataset, valid_labels)
pickle_file = os.path.join(data_root, 'notMNIST.pickle')
try:
f = open(pickle_file, 'wb')
save = {
'train_dataset': train_dataset,
'train_labels': train_labels,
'valid_dataset': valid_dataset,
'valid_labels': valid_labels,
'test_dataset': test_dataset,
'test_labels': test_labels,
}
pickle.dump(save, f, pickle.HIGHEST_PROTOCOL)
f.close()
except Exception as e:
print('Unable to save data to', pickle_file, ':', e)
raise
statinfo = os.stat(pickle_file)
print('Compressed pickle size:', statinfo.st_size)
# Problem5
# By construction, this dataset might contain a lot of overlapping samples,
# including training data that's also contained in the validation and test set!
# Overlap between training and test can skew the results if you expect to use your model in
# an environment where there is never an overlap, but are actually ok if you expect to see
# training samples recur when you use it. Measure how much overlap there is between training,
# validation and test samples.
# Optional questions:
# What about near duplicates between datasets? (images that are almost identical)
# Create a sanitized validation and test set, and compare your accuracy on those in subsequent assignments.
def problem5(dataset1, dataset2):
# dataset1_hash = [hashlib.sha256(img).hexdigest() for img in dataset1]
# dataset2_hash = [hashlib.sha256(img).hexdigest() for img in dataset2]
# overlap = 0
# for i, hash1 in enumerate(dataset1_hash):
# for j, hash2 in enumerate(dataset2_hash):
# if hash1 == hash2:
# overlap = overlap+1
overlap = {}
for i, img_1 in enumerate(dataset1):
for j, img_2 in enumerate(dataset2):
if np.array_equal(img_1, img_2):
overlap[i] = [j]
break
return overlap
# print('Overlap between train and test dataset is: ', problem5(train_dataset,test_dataset))
# print('Overlap between train and validation dataset is: ', problem5(train_dataset,valid_dataset))
def image_array_to_diff_hash(ndarr):
with_pixel_value = ndarr * pixel_depth + pixel_depth/2
restored_image = Image.fromarray(np.uint8(with_pixel_value))
return str(imagehash.dhash(restored_image))
def hash_dataset(images):
return [image_array_to_diff_hash(x) for x in images]
all_valid_hashes = hash_dataset(valid_dataset)
all_train_hashes = hash_dataset(train_dataset)
all_test_hashes = hash_dataset(test_dataset)
valid_in_train = np.in1d(all_valid_hashes, all_train_hashes)
test_in_train = np.in1d(all_test_hashes, all_train_hashes)
test_in_valid = np.in1d(all_test_hashes, all_valid_hashes)
valid_keep = ~valid_in_train
test_keep = ~(test_in_train | test_in_valid)
valid_dataset_clean = valid_dataset[valid_keep]
valid_labels_clean = valid_labels[valid_keep]
test_dataset_clean = test_dataset[test_keep]
test_labels_clean = test_labels[test_keep]
print("valid -> train overlap: %d samples" % valid_in_train.sum())
print("test -> train overlap: %d samples" % test_in_train.sum())
print("test -> valid overlap: %d samples" % test_in_valid.sum())
# Problem 6
# Let's get an idea of what an off-the-shelf classifier can give you on this data.
# It's always good to check that there is something to learn, and that it's a problem | |
<reponame>icebreaker/dotfiles<filename>gnome/gnome2/gedit/plugins.symlink/tm_autocomplete.py
# -*- coding: utf-8 -*-
#
# Gedit Plugin for TextMate style autocompletion. Tap Esc to cycle through
# completions.
#
# Copyright © 2010, <NAME> <<EMAIL>>
#
# Thanks to <NAME> <<EMAIL>> for the proximity based search
# code, and most recent match promotion
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
#
__version__ = '1.0.4'
__author__ = '<NAME>'
import gedit
import gtk
import re
import gconf
# The default trigger: a (keyval, mod) pair
DEFAULT_TRIGGER = (gtk.keysyms.Escape, 0)
def uniq_order_preserved(v):
z, s = [], set()
for x in v:
if x not in s:
s.add(x)
z.append(x)
return z
def zip_no_truncation(v,w):
z = []
for i in range(max(len(v),len(w))):
if i < len(v):
z.append(v[i])
if i < len(w):
z.append(w[i])
return z
class AutoCompleter(object):
"""Class that actually does the autocompletion"""
IgnoreUnderscore = True
ValidScopes = ('document', 'window', 'application')
ValidOrders = ('alphabetical', 'proximity')
LastAcceptedMatch = None
__slots__ = (
'doc', # The document autocomplete was initiated on
'word', # Word being completed
'matches', # List of potential autocompletions
'index', # Index of the next autocompletion to suggest
'iter_s', # GtkTextIterator pointing to the start of word being completed
'iter_i', # GtkTextIterator pointing to insertion point
'iter_e', # GtkTextIterator pointing to end of last insertion
'scope', # Search scope (document|application|window)
'order', # Result list ordering (proximity|alphabetical)
'promote', # Promote last accepted match
)
def __init__(self, doc, scope='document', order='alphabetical',
promote=False):
"""Create an autocompleter for the document. Indexes the words in the
current scope and builds a list of matches for the current cursor
position. Calling insert_next_completion will cycle through the matches,
replacing the last match inserted (if any).
If order is 'alphabetical' then the autocompletion list is ordered
alphabetically. If order is 'proximity' then the autocompletion list
is ordered based on distance from the cursor in the current document,
with the other open documents being ordered alphabetcially.
"""
self.scope = scope
self.order = order
self.promote = promote
self.reindex(doc)
def _get_iter_for_beginning_of_word_at(self, iter1):
"""Returns a GtkTextIter pointing to the start of the current word"""
if not self.IgnoreUnderscore:
# Just use pango's word start facility
result = iter1.copy()
result.backward_word_start()
else:
# Including underscores in the words
i = iter1.copy()
while not i.starts_sentence() and i.backward_char():
ch = i.get_char()
if ch.isalpha() or ch == '_':
continue
else:
i.forward_char()
break
result = i
return result
def _can_autocomplete_at(self, iter1):
"""Returns true if autocompletion can be done at the given iterator"""
if iter1.ends_word() or iter1.inside_word():
return True
if self.IgnoreUnderscore:
i = iter1.copy()
if not i.starts_sentence() and i.backward_char() and i.get_char() == '_':
return True
return False
def _get_current_doc_words_sorted_by_proximity(self, regex):
"""Returns the words in the current document sorted by distance from
cursor.
"""
fwd_text = self.doc.get_text(self.iter_i, self.doc.get_end_iter())
bck_text = self.doc.get_text(self.doc.get_start_iter(), self.iter_s)
fwd_words = regex.findall(fwd_text)
bck_words = regex.findall(bck_text)
bck_words.reverse()
all_words = zip_no_truncation(bck_words, fwd_words)
return uniq_order_preserved(all_words)
def _get_current_doc_words(self, regex):
"""Returns an unsorted list of words in the current document. The given
regex is used to match the words.
"""
iter1 = self.doc.get_start_iter()
iter2 = self.doc.get_end_iter()
text = self.doc.get_text(iter1, iter2)
words = set(regex.findall(text))
return list(words)
def _get_other_doc_words(self, regex):
"""Returns an unsorted list of words in the non-current document based
on the selected scope. The given regex is used to match the words.
"""
if self.scope == 'application':
# Index all documents open in any gedit window
docs = gedit.app_get_default().get_documents()
elif self.scope == 'window':
# Index all documents in this gedit window
docs = gedit.app_get_default().get_active_window().get_documents()
else:
# No other documents in use
docs = []
words = set()
for doc in docs:
if doc != self.doc:
text = doc.get_text(doc.get_start_iter(), doc.get_end_iter())
words.update(regex.findall(text))
return list(words)
def _create_regex_for_prefix(self, prefix):
"""Compiles a regular expression that matches words beginning with the
given prefix. If the prefix is empty, a match-any-word regular
expression is created.
"""
return re.compile(r'\b' + prefix + r'\w+\b')
def _get_candidate_matches(self, doc, prefix):
"""Returns all words in the document that match the given word"""
regex = self._create_regex_for_prefix(prefix)
if self.order == 'alphabetical':
# Alphabetical sort
words = self._get_current_doc_words(regex)
other = self._get_other_doc_words(regex)
words.extend(other)
words.sort()
else:
# Proximity sort in current doc, alphabetical in others
words = self._get_current_doc_words_sorted_by_proximity(regex)
other = self._get_other_doc_words(regex)
other.sort()
words.extend(other)
return uniq_order_preserved(words)
def _should_promote_last_accepted(self, prefix):
last = AutoCompleter.LastAcceptedMatch
return (last is not None and self.promote and
len(prefix) > len(last) and last.startswith(prefix))
def reindex(self, doc):
"""Compile a list of candidate words for autocompletion"""
self.doc = doc
self.word = None
self.matches = []
self.index = 0
self.iter_e = None
self.iter_i = doc.get_iter_at_mark(doc.get_insert())
if self._can_autocomplete_at(self.iter_i):
self.iter_s = self._get_iter_for_beginning_of_word_at(self.iter_i)
self.iter_e = self.iter_i.copy()
self.word = doc.get_text(self.iter_s, self.iter_i)
self.matches = self._get_candidate_matches(doc, self.word)
if self._should_promote_last_accepted(self.word):
self.matches.remove(self.LastAcceptedMatch)
self.matches.insert(0, self.LastAcceptedMatch)
return len(self.matches) > 0
def has_completions(self):
"""Returns true if we can do autocompletion"""
return 0 <= self.index < len(self.matches)
def insert_next_completion(self):
"""Insert the next autocompletion into the document and move the cursor
to the end of the completion. The previous autocompletion is removed.
"""
insert_ok = self.has_completions()
if insert_ok:
self.doc.begin_user_action()
# Store insertion offset
insertion_point = self.iter_i.get_offset()
# Remove previous completions
if not self.iter_i.equal(self.iter_e):
self.doc.delete(self.iter_i, self.iter_e)
self.iter_i = self.doc.get_iter_at_offset(insertion_point)
# Insert new completion
match = self.matches[self.index]
completion = match[len(self.word):]
self.doc.insert(self.iter_i, completion, len(completion))
AutoCompleter.LastAcceptedMatch = match
# Update iterators
self.iter_i = self.doc.get_iter_at_offset(insertion_point)
self.iter_e = self.iter_i.copy()
self.iter_s = self.iter_i.copy()
self.iter_e.forward_chars(len(completion))
self.iter_s.backward_chars(len(match))
# Move cursor
self.doc.place_cursor(self.iter_e)
# Next completion
self.index = self.index + 1 if self.index + 1 < len(self.matches) else 0
self.doc.end_user_action()
return insert_ok
class AutoCompletionPlugin(gedit.Plugin):
"""TextMate style autocompletion plugin for Gedit"""
# Where our configuration data is held
ConfigRoot = '/apps/gedit-2/plugins/tm_autocomplete'
def __init__(self):
self.autocompleter = None
self.trigger = DEFAULT_TRIGGER
self.scope = 'document'
self.order = 'proximity'
self.promote_last_accepted = True
gedit.Plugin.__init__(self)
def activate(self, window):
self.gconf_activate()
self.update_ui(window)
def deactivate(self, window):
for view in window.get_views():
for handler_id in getattr(view, 'autocomplete_handlers', []):
view.disconnect(handler_id)
setattr(view, 'autocomplete_handlers_attached', False)
self.autocompleter = None
self.gconf_deactivate()
def update_ui(self, window):
view = window.get_active_view()
doc = window.get_active_document()
if isinstance(view, gedit.View) and doc:
if not getattr(view, 'autocomplete_handlers_attached', False):
setattr(view, 'autocomplete_handlers_attached', True)
self.autocompleter = None
id1 = view.connect('key-press-event', self.on_key_press, doc)
id2 = view.connect('button-press-event', self.on_button_press, doc)
setattr(view, 'autocomplete_handlers', (id1, id2))
def is_autocomplete_trigger(self, event):
keyval, modifiers = self.trigger
if modifiers and (modifiers & event.state) == 0:
# Required modifiers not depressed
return False
return event.keyval == keyval
def on_key_press(self, view, event, doc):
if self.is_autocomplete_trigger(event):
if not self.autocompleter:
self.autocompleter = AutoCompleter(doc, self.scope, self.order,
self.promote_last_accepted)
if self.autocompleter and self.autocompleter.has_completions():
self.autocompleter.insert_next_completion()
else:
self.autocompleter = None
return True
elif self.autocompleter:
self.autocompleter = None
return False
def on_button_press(self, view, event, doc):
if self.autocompleter:
self.autocompleter = None
return False
def set_scope(self, scope):
if scope != self.scope and scope in AutoCompleter.ValidScopes:
self.scope = scope
self.autocompleter = None
return True
return False
def set_order(self, order):
if order != self.order and order in AutoCompleter.ValidOrders:
self.order = order
self.autocompleter = None
return True
return False
def set_promote_last_accepted(self, promote_last_accepted):
if self.promote_last_accepted != promote_last_accepted:
self.promote_last_accepted = promote_last_accepted
self.autocompleter = None
return True
return False
def set_trigger(self, trigger):
if isinstance(trigger, str):
try:
self.trigger = gtk.accelerator_parse(trigger)
except:
self.trigger = DEFAULT_TRIGGER
elif isinstance(trigger, tuple):
self.trigger = trigger
else:
self.trigger = DEFAULT_TRIGGER
def get_trigger_name(self):
keyval, modifiers = self.trigger
return gtk.accelerator_name(keyval, modifiers or 0)
def gconf_activate(self):
self.gconf_client = gconf.client_get_default()
self.gconf_client.add_dir(self.ConfigRoot, gconf.CLIENT_PRELOAD_NONE)
self.notify_id = self.gconf_client.notify_add(
self.ConfigRoot, self.gconf_event)
self.gconf_set_defaults(self.gconf_client)
self.gconf_configure(self.gconf_client)
def gconf_deactivate(self):
self.gconf_client.notify_remove(self.notify_id)
del self.notify_id
del self.gconf_client
def gconf_key_for(self, name):
return '/'.join([self.ConfigRoot, name])
def gconf_set_defaults(self, client):
def set_string_default(name, value):
key = self.gconf_key_for(name)
if client.get(key) is None:
client.set_string(key, value)
def set_bool_default(name, value):
key = self.gconf_key_for(name)
if client.get(key) is None:
client.set_bool(key, value)
set_string_default('scope', | |
"outstunt",
"outsulks",
"outsware",
"outswear",
"outsweep",
"outswept",
"outswims",
"outswing",
"outswore",
"outsworn",
"outswung",
"outtalks",
"outtasks",
"outtells",
"outthank",
"outthink",
"outthrew",
"outthrob",
"outthrow",
"outtower",
"outtrade",
"outtrick",
"outtrots",
"outtrump",
"outturns",
"outvalue",
"outvaunt",
"outvoice",
"outvoted",
"outvotes",
"outvying",
"outwaits",
"outwalks",
"outwaste",
"outwatch",
"outwears",
"outweary",
"outweeps",
"outwhirl",
"outwiled",
"outwiles",
"outwills",
"outwinds",
"outworks",
"outwrite",
"outwrote",
"outyells",
"outyelps",
"outyield",
"ovalness",
"ovariole",
"ovaritis",
"ovenbird",
"ovenlike",
"ovenware",
"overable",
"overacts",
"overaged",
"overages",
"overarch",
"overarms",
"overawed",
"overawes",
"overbake",
"overbear",
"overbeat",
"overbets",
"overbids",
"overbill",
"overbite",
"overblew",
"overblow",
"overboil",
"overbold",
"overbook",
"overbore",
"overborn",
"overbred",
"overburn",
"overbusy",
"overbuys",
"overcall",
"overcold",
"overcook",
"overcool",
"overcram",
"overcrop",
"overcure",
"overcuts",
"overdare",
"overdear",
"overdeck",
"overdoer",
"overdoes",
"overdogs",
"overdraw",
"overdrew",
"overdubs",
"overdyed",
"overdyer",
"overdyes",
"overeasy",
"overeats",
"overedit",
"overfast",
"overfear",
"overfeed",
"overfill",
"overfish",
"overflew",
"overfond",
"overfoul",
"overfree",
"overfund",
"overgild",
"overgilt",
"overgird",
"overgirt",
"overglad",
"overgoad",
"overgrew",
"overgrow",
"overhand",
"overhard",
"overhate",
"overheap",
"overhear",
"overheld",
"overhigh",
"overhold",
"overholy",
"overhope",
"overhung",
"overhunt",
"overhype",
"overidle",
"overjoys",
"overjust",
"overkeen",
"overkind",
"overlade",
"overlain",
"overlate",
"overleap",
"overlend",
"overlent",
"overlets",
"overlewd",
"overlies",
"overlive",
"overlong",
"overloud",
"overlove",
"overlush",
"overmans",
"overmany",
"overmeek",
"overmelt",
"overmild",
"overmilk",
"overmine",
"overmuch",
"overnear",
"overneat",
"overnice",
"overpack",
"overpast",
"overpays",
"overpert",
"overplan",
"overplot",
"overplus",
"overpump",
"overrank",
"overrash",
"overrate",
"overrich",
"overrife",
"overripe",
"overrode",
"overrude",
"overruff",
"oversale",
"oversalt",
"oversave",
"overseed",
"oversell",
"oversets",
"oversewn",
"oversews",
"overshoe",
"overshot",
"oversick",
"overside",
"overslip",
"overslow",
"oversoak",
"oversoft",
"oversoon",
"oversoul",
"overspin",
"overstay",
"overstep",
"overstir",
"oversuds",
"oversups",
"oversure",
"overtalk",
"overtame",
"overtart",
"overtask",
"overthin",
"overtips",
"overtire",
"overtoil",
"overtops",
"overtrim",
"overurge",
"overuses",
"overvote",
"overwarm",
"overwary",
"overweak",
"overwear",
"overween",
"overwets",
"overwide",
"overwily",
"overwind",
"overwise",
"overword",
"overwore",
"overworn",
"overzeal",
"ovicidal",
"ovicides",
"oviducal",
"oviducts",
"oviposit",
"ovoidals",
"ovulated",
"ovulates",
"owlishly",
"oxalated",
"oxalates",
"oxalises",
"oxazepam",
"oxazines",
"oxbloods",
"oxhearts",
"oxidable",
"oxidases",
"oxidasic",
"oxidated",
"oxidates",
"oxidised",
"oxidiser",
"oxidises",
"oxidizes",
"oximeter",
"oxpecker",
"oxtongue",
"oxyacids",
"oxygenic",
"oxyphile",
"oxyphils",
"oxysalts",
"oxysomes",
"oxytocic",
"oxytones",
"oystered",
"oysterer",
"ozonated",
"ozonates",
"ozonides",
"ozonised",
"ozonises",
"ozonized",
"ozonizer",
"ozonizes",
"pabulums",
"pachadom",
"pachalic",
"pachisis",
"pachouli",
"pachucos",
"pacified",
"pacifies",
"packable",
"packeted",
"packness",
"packsack",
"pactions",
"paddings",
"padishah",
"padrones",
"padshahs",
"paduasoy",
"paeanism",
"paesanos",
"pagandom",
"paganise",
"paganish",
"paganist",
"paganize",
"pageboys",
"pagefuls",
"paginate",
"pagurian",
"pagurids",
"pahlavis",
"pahoehoe",
"pailfuls",
"paillard",
"pailsful",
"painches",
"paintier",
"paisanas",
"paisanos",
"paisleys",
"pajamaed",
"palatals",
"palavers",
"palazzos",
"paleface",
"paleness",
"paleosol",
"palestra",
"paletots",
"paleways",
"palewise",
"palfreys",
"palikars",
"palimony",
"palinode",
"palladia",
"palladic",
"palleted",
"pallette",
"palliate",
"pallidly",
"palliest",
"palliums",
"palmated",
"palmette",
"palmfuls",
"palmiest",
"palmists",
"palmitin",
"palmlike",
"palmtops",
"palmyras",
"palookas",
"palpably",
"palpated",
"palpates",
"palpator",
"palpebra",
"palships",
"palsying",
"paltered",
"palterer",
"paltrier",
"paltrily",
"paludism",
"pampeans",
"pamperer",
"pamperos",
"panacean",
"panaceas",
"panaches",
"panatela",
"panbroil",
"pancaked",
"pancetta",
"pandanus",
"pandects",
"pandered",
"panderer",
"pandoors",
"pandoras",
"pandores",
"pandours",
"pandowdy",
"panduras",
"pandying",
"paneless",
"panetela",
"panfried",
"panfries",
"pangenes",
"pangolin",
"pangrams",
"panhuman",
"panicled",
"panicles",
"panicums",
"panmixes",
"panmixia",
"panmixis",
"pannikin",
"panochas",
"panoches",
"panoptic",
"panpipes",
"pansophy",
"pantalet",
"pantiled",
"pantiles",
"pantofle",
"pantoums",
"pantsuit",
"papacies",
"papadams",
"papadoms",
"papadums",
"paperers",
"papering",
"paphians",
"papillae",
"papillar",
"papistic",
"papistry",
"papooses",
"pappadam",
"pappiest",
"pappoose",
"papricas",
"paprikas",
"papulose",
"papyrian",
"papyrine",
"parachor",
"paraders",
"paradors",
"paradrop",
"parafoil",
"paraform",
"paragoge",
"paragons",
"parakite",
"paralyse",
"parament",
"paramour",
"paranoea",
"paranoic",
"parapets",
"paraquet",
"parasail",
"parasang",
"parashah",
"parashot",
"paravane",
"parawing",
"parazoan",
"parbaked",
"parbakes",
"parboils",
"parceled",
"parcener",
"parchesi",
"parching",
"parchisi",
"parclose",
"pardners",
"pardoner",
"parecism",
"pareiras",
"parented",
"parergon",
"paretics",
"parfaits",
"parflesh",
"parfocal",
"pargeted",
"pargings",
"parhelia",
"parhelic",
"parietes",
"parities",
"parkades",
"parkette",
"parkings",
"parklike",
"parkways",
"parlando",
"parlante",
"parlayed",
"parleyed",
"parleyer",
"parodied",
"parodist",
"parolees",
"paroling",
"paronyms",
"paroquet",
"parosmia",
"parotids",
"parotoid",
"paroxysm",
"parquets",
"parridge",
"parriers",
"parritch",
"parroket",
"parroted",
"parroter",
"parrying",
"parsable",
"parsleys",
"parslied",
"parsnips",
"parsonic",
"partaken",
"partaker",
"partakes",
"parterre",
"partials",
"partible",
"partiers",
"partings",
"partitas",
"partizan",
"partlets",
"partyers",
"parvenue",
"parvenus",
"parvises",
"parvolin",
"paschals",
"pashadom",
"pashalic",
"pashalik",
"pasquils",
"passably",
"passades",
"passados",
"passaged",
"passbook",
"passerby",
"passible",
"passings",
"passkeys",
"passless",
"passuses",
"pasterns",
"pasteups",
"pasticci",
"pastiest",
"pastille",
"pastinas",
"pastises",
"pastitso",
"pastless",
"pastness",
"pastored",
"pastorly",
"pastrami",
"pastromi",
"pastural",
"pastured",
"pasturer",
"patagial",
"patagium",
"patamars",
"patchers",
"patchier",
"patchily",
"patellae",
"patellas",
"patentor",
"pathless",
"pathoses",
"patinaed",
"patinate",
"patining",
"patinize",
"patootie",
"patriate",
"patronal",
"patronly",
"patroons",
"pattamar",
"pattened",
"pattered",
"patterer",
"pattypan",
"patulent",
"patulous",
"pauldron",
"paunched",
"paunches",
"paupered",
"paviours",
"pavisers",
"pavisses",
"pavlovas",
"pavonine",
"pawkiest",
"pawnable",
"pawnages",
"pawnshop",
"paxwaxes",
"paybacks",
"paygrade",
"pazazzes",
"peacenik",
"peachers",
"peachier",
"peaching",
"peacoats",
"peacocky",
"peafowls",
"peakiest",
"peakless",
"peaklike",
"pearlash",
"pearlers",
"pearlier",
"pearling",
"pearlite",
"pearmain",
"peartest",
"pearwood",
"peascods",
"peasecod",
"peatiest",
"pebblier",
"pebbling",
"peccable",
"peccancy",
"peccavis",
"peckiest",
"pecorini",
"pecorino",
"pectases",
"pectates",
"pectines",
"pectized",
"pectizes",
"peculate",
"peculium",
"pedagogs",
"pedalers",
"pedalfer",
"pedalier",
"pedalled",
"pedaller",
"pedantry",
"pedately",
"peddlery",
"pederast",
"pedicabs",
"pedicels",
"pedicled",
"pedicles",
"pediform",
"pediment",
"pedipalp",
"pedocals",
"pedology",
"peduncle",
"peebeens",
"peekapoo",
"peelable",
"peelings",
"peephole",
"peerages",
"peesweep",
"peetweet",
"pegboard",
"pegboxes",
"peignoir",
"pekepoos",
"pelagial",
"pelagics",
"pelerine",
"pelisses",
"pellagra",
"pelletal",
"pelleted",
"pellicle",
"pellmell",
"pellucid",
"pelorian",
"pelorias",
"pelotons",
"peltasts",
"peltered",
"peltless",
"peltries",
"pelvises",
"pembinas",
"pemicans",
"pemmican",
"pemoline",
"penalise",
"penality",
"penanced",
"penances",
"penciler",
"pendents",
"pendular",
"penicils",
"penknife",
"penlight",
"penlites",
"pennames",
"pennated",
"pennines",
"pennoned",
"penoches",
"penology",
"penoncel",
"penpoint",
"pensters",
"penstock",
"pentanes",
"pentanol",
"pentarch",
"pentenes",
"pentodes",
"pentomic",
"pentosan",
"pentoses",
"penuches",
"penuchis",
"penuchle",
"penuckle",
"penuries",
"peonages",
"peonisms",
"peoplers",
"peopling",
"peperoni",
"peploses",
"peplumed",
"pepluses",
"peponida",
"peponium",
"pepperer",
"peppiest",
"pepsines",
"peptalks",
"peptidic",
"peptized",
"peptizer",
"peptizes",
"peptones",
"peptonic",
"peracids",
"percales",
"percepts",
"perchers",
"perching",
"percoids",
"perdured",
"perdures",
"peregrin",
"pereions",
"pereopod",
"perfecta",
"perfects",
"perfumer",
"perfuses",
"perianth",
"periapts",
"periblem",
"pericarp",
"pericope",
"periderm",
"peridial",
"peridium",
"peridots",
"perigeal",
"perigean",
"perigees",
"perigons",
"perigyny",
"periling",
"perillas",
"perilled",
"perilune",
"perineum",
"periodid",
"periotic",
"peripety",
"peripter",
"periques",
"perisarc",
"perishes",
"periwigs",
"perjured",
"perjurer",
"perjures",
"perkiest",
"perlites",
"perlitic",
"permeant",
"permuted",
"permutes",
"peroneal",
"perorate",
"peroxids",
"perpends",
"perpents",
"persalts",
"perspire",
"perspiry",
"pertness",
"perturbs",
"perusals",
"perusers",
"pervader",
"peskiest",
"pestered",
"pesterer",
"pesthole",
"pestiest",
"pestling",
"petabyte",
"petaline",
"petalled",
"petalody",
"petaloid",
"petalous",
"petcocks",
"petechia",
"petering",
"petiolar",
"petioled",
"petioles",
"petnaper",
"petrales",
"petrolic",
"petronel",
"petrosal",
"pettable",
"pettedly",
"pettiest",
"pettifog",
"pettings",
"pettling",
"petulant",
"petunias",
"petuntse",
"petuntze",
"pewterer",
"peytrals",
"peytrels",
"pfennige",
"pfennigs",
"phaetons",
"phalange",
"phallism",
"phallist",
"phantast",
"pharming",
"pharoses",
"phaseout",
"phasmids",
"phattest",
"phellems",
"phelonia",
"phenates",
"phenazin",
"phenetic",
"phenetol",
"phenixes",
"phenylic",
"phereses",
"pheresis",
"philabeg",
"philibeg",
"philomel",
"philters",
"philtred",
"philtres",
"philtrum",
"phimoses",
"phimosis",
"phimotic",
"phonated",
"phonates",
"phoneyed",
"phoniest",
"phonying",
"phorates",
"phoronid",
"phosgene",
"phosphid",
"phosphin",
"photoing",
"photomap",
"photopia",
"photopic",
"phratral",
"phratric",
"phreaked",
"phreaker",
"phreatic",
"phthalic",
"phthalin",
"phthises",
"phthisic",
"phthisis",
"phylaxis",
"phyleses",
"phylesis",
"phyletic",
"phyllary",
"phyllite",
"phyllode",
"phylloid",
"phyllome",
"phytanes",
"phytonic",
"piacular",
"piaffers",
"piaffing",
"pianisms",
"piasabas",
"piasavas",
"piassaba",
"piassava",
"piasters",
"piastres",
"pibrochs",
"picachos",
"picadors",
"picaroon",
"piccolos",
"piciform",
"pickadil",
"pickaxed",
"pickaxes",
"pickeers",
"picketed",
"picketer",
"pickiest",
"picklock",
"pickoffs",
"picloram",
"picnicky",
"picogram",
"picoline",
"picolins",
"picomole",
"picotees",
"picoting",
"picowave",
"picquets",
"picrated",
"picrates",
"picrites",
"picritic",
"piddlers",
"piddling",
"piddocks",
"piebalds",
"piecings",
"piecrust",
"piedfort",
"pieforts",
"pieholes",
"pieplant",
"piercers",
"pierrots",
"pietisms",
"pietists",
"piffling",
"pigboats",
"piggiest",
"pignolia",
"pignolis",
"pigskins",
"pigsneys",
"pigstick",
"pigsties",
"pigweeds",
"pilaster",
"pilchard",
"pileated",
"pileless",
"pilewort",
"pilfered",
"pilferer",
"piliform",
"pillaged",
"pillager",
"pillages",
"pillared",
"pillions",
"pillowed",
"pilosity",
"pilsener",
"pilsners",
"pimentos",
"pimiento",
"pimplier",
"pinaster",
"pinballs",
"pinbones",
"pinchbug",
"pincheck",
"pinchers",
"pindling",
"pineland",
"pinelike",
"pineries",
"pinesaps",
"pinfolds",
"pingrass",
"pinheads",
"pinholes",
"pinioned",
"pinitols",
"pinkened",
"pinkeyes",
"pinkings",
"pinkness",
"pinkroot",
"pinnaces",
"pinnated",
"pinniped",
"pinnulae",
"pinnular",
"pinnules",
"pinocles",
"pinprick",
"pintadas",
"pintados",
"pintails",
"pintanos",
"pintsize",
"pinwales",
"pinweeds",
"pinworks",
"pinworms",
"pipeages",
"pipefish",
"pipefuls",
"pipeless",
"pipelike",
"piperine",
"pipestem",
"pipetted",
"pipiness",
"pipingly",
"piquance",
"piquancy",
"piracies",
"piraguas",
"pirarucu",
"piriform",
"pirogies",
"pirogues",
"piroques",
"piroshki",
"pirozhki",
"pirozhok",
"piscator",
"piscinae",
"piscinal",
"piscinas",
"pishoges",
"pishogue",
"pisiform",
"pismires",
"pisolite",
"pisolith",
"pissants",
"pissoirs",
"pistache",
"pistoled",
"pistoles",
"pitahaya",
"pitapats",
"pitchier",
"pitchily",
"pitchman",
"pitchmen",
"pitchout",
"pitheads",
"pithiest",
"pithless",
"pitiable",
"pitiably",
"pitiless",
"pittance",
"pittings",
"pivotman",
"pivotmen",
"pixieish",
"pixiness",
"pizazzes",
"pizzazes",
"pizzazzy",
"pizzelle",
"placable",
"placably",
"placated",
"placater",
"placates",
"placeman",
"placemen",
"placidly",
"plackets",
"placoids",
"plafonds",
"plagiary",
"plaguers",
"plaguily",
"plainest",
"plaining",
"plaister",
"plaiters",
"plaiting",
"planaria",
"planches",
"planchet",
"planform",
"plangent",
"planking",
"plankter",
"planless",
"planosol",
"plantlet",
"planulae",
"planular",
"plashers",
"plashier",
"plashing",
"plasmins",
"plasmoid",
"plasmons",
"plastery",
"plastids",
"plastral",
"plastron",
"plastrum",
"platanes",
"plateaux",
"plateful",
"platiest",
"platinas",
"platings",
"platinic",
"platting",
"plaudits",
"plausive",
"playacts",
"playdate",
"playdays",
"playdown",
"playgoer",
"playless",
"playlets",
"playlike",
"playpens",
"playsuit",
"playwear",
"pleached",
"pleaches",
"pleaders",
"pleasers",
"pleaters",
"pleather",
"pleating",
"plebeian",
"plectron",
"plectrum",
"pledgees",
"pledgeor",
"pledgers",
"pledgets",
"pledgors",
"plenches",
"plenisms",
"plenists",
"plenties",
"pleonasm",
"pleopods",
"plessors",
"pleuston",
"plexuses",
"pliantly",
"plicated",
"plighted",
"plighter",
"plimsole",
"plimsoll",
"plimsols",
"plinkers",
"plinking",
"pliofilm",
"pliotron",
"pliskies",
"plodders",
"ploidies",
"plonking",
"plopping",
"plosions",
"plosives",
"plotless",
"plotline",
"plottage",
"plottier",
"plotties",
"plotzing",
"plougher",
"plowable",
"plowback",
"plowboys",
"plowhead",
"plowland",
"pluckers",
"pluckier",
"pluckily",
"pluggers",
"plugless",
"plugolas",
"plugugly",
"plumaged",
"plumages",
"plumbago",
"plumbery",
"plumbism",
"plumbous",
"plumbums",
"plumelet",
"plumiest",
"plumiped",
"plumlike",
"plummest",
"plummier",
"plumpens",
"plumpest",
"plumping",
"plumpish",
"plumular",
"plumules",
"plunders",
"plungers",
"plunkers",
"plunkier",
"plunking",
"plurally",
"plushest",
"plushier",
"plushily",
"plussage",
"plutonic",
"pluvials",
"pluviose",
"pluvious",
"plyingly",
"plywoods",
"poaceous",
"poachier",
"poblanos",
"pochards",
"pocketer",
"pockiest",
"pockmark",
"pocosens",
"pocosins",
"pocosons",
"podagral",
"podagras",
"podagric",
"podestas",
"podgiest",
"podocarp",
"podomere",
"podsolic",
"podzolic",
"poechore",
"poetised",
"poetiser",
| |
<filename>common_utils.py
!pip install kaggle
import os
def get_size(path = os.getcwd()):
print("Calculating Size: ",path)
total_size = 0
#if path is directory--
if os.path.isdir(path):
print("Path type : Directory/Folder")
for dirpath, dirnames, filenames in os.walk(path):
for f in filenames:
fp = os.path.join(dirpath, f)
# skip if it is symbolic link
if not os.path.islink(fp):
total_size += os.path.getsize(fp)
#if path is a file---
elif os.path.isfile(path):
print("Path type : File")
total_size=os.path.getsize(path)
else:
print("Path Type : Special File (Socket, FIFO, Device File)" )
total_size=0
bytesize=total_size
print(bytesize, 'bytes')
print(bytesize/(1024), 'kilobytes')
print(bytesize/(1024*1024), 'megabytes')
print(bytesize/(1024*1024*1024), 'gegabytes')
return total_size
x=get_size("/content/examples")
import os
os.makedirs("/content/.kaggle/")
import json
token = {"username":"farhanhaikhan","key":"<KEY>"}
with open('/content/.kaggle/kaggle.json', 'a+') as file:
json.dump(token, file)
import shutil
os.makedirs("/.kaggle/")
src="/content/.kaggle/kaggle.json"
des="/.kaggle/kaggle.json"
shutil.copy(src,des)
os.makedirs("/root/.kaggle/")
!cp /content/.kaggle/kaggle.json ~/.kaggle/kaggle.json
!kaggle config set -n path -v /content
#https://towardsdatascience.com/setting-up-kaggle-in-google-colab-ebb281b61463
!kaggle competitions download -c digit-recognizer
!kaggle datasets download -d tawsifurrahman/covid19-radiography-database
!unzip -q covid19-radiography-database.zip -d /content/dataset
src="/content/Dataset.zip"
des="/content/DATA/"
get_ipython().system('unzip -q {} -d {}'.format(src,des))
import os
def create_dir(dir):
if not os.path.exists(dir):
os.makedirs(dir)
print("Created Directory : ", dir)
return dir
import os
import zipfile
def zipdir(path, ziph):
# ziph is zipfile handle
for root, dirs, files in os.walk(path):
for file in files:
ziph.write(os.path.join(root, file))
def zipper(dir_path,zip_path):
zipf = zipfile.ZipFile(zip_path, 'w', zipfile.ZIP_DEFLATED)
zipdir(dir_path, zipf)
zipf.close()
zipper('/content/MAIN/Train',"Zipped_Data.zip")
MAIN="/content/DATA"
SLAVE="/content/final-dataset/"
create_dir(MAIN)
#we want directories such as
#DATA-> Train , Val-> Covid, Normal ,Viral_Pneumonia
TRAIN_PATH = "/content/DATA/Train"
VAL_PATH = "/content/DATA/Val"
create_dir(TRAIN_PATH)
create_dir(VAL_PATH)
from sklearn.model_selection import train_test_split
import random
def distribute(SRC_PATH="/content/final-dataset",TRAIN_PATH="/content/DATA/Train",VAL_PATH="/content/DATA/Val",current_class="abc",
max_val=0,split_frac=0.8):
SRC_CLASS_PATH=os.path.join(SRC_PATH,current_class)
TRAIN_CLASS_PATH=os.path.join(TRAIN_PATH,current_class)
VAL_CLASS_PATH=os.path.join(VAL_PATH,current_class)
ITEMS=os.listdir(SRC_CLASS_PATH)
if (max_val!=0):
try:
ITEMS=random.choices(ITEMS,k=max_val)
except:
ITEMS = random.sample(ITEMS, max_val)
print("Length of ",current_class," trimmed to ",len(ITEMS))
print("Length of ",current_class," ",len(ITEMS))
random.seed(43) #make this expt reproducable
x=ITEMS
y=range(len(x))
xTrain, xTest, yTrain, yTest = train_test_split(x, y, test_size = 0.2, random_state = 43)
TRAIN_IMGS=xTrain
VAL_IMGS=xTest
print("Train : Val Lists ",len(TRAIN_IMGS)," : ",len(VAL_IMGS))
for each_img in TRAIN_IMGS:
src=os.path.join(SRC_CLASS_PATH,each_img)
des=os.path.join(TRAIN_CLASS_PATH,each_img)
shutil.copy(src,des)
for each_img in VAL_IMGS:
src=os.path.join(SRC_CLASS_PATH,each_img)
des=os.path.join(VAL_CLASS_PATH,each_img)
shutil.copy(src,des)
category_classes=["COVID-19","NORMAL","Viral Pneumonia"]
for each_class in category_classes:
create_dir(os.path.join(TRAIN_PATH,each_class))
create_dir(os.path.join(VAL_PATH,each_class))
distribute(current_class=each_class,max_val=375)
#75/375 is 20%
#trim all datasets to 300:75 ratio
#other augmentation techniques : CVD_DATASET
#to delete some data
#shutil.rmtree("/content/DATA")
#COVID ZIPPER
from google.colab import drive
drive.mount('/content/drive')
import shutil
import os
def create_dir(dir):
if not os.path.exists(dir):
os.makedirs(dir)
print("Created Directory : ", dir)
return dir
src="/content/drive/My Drive/NEW_DATA/FINAL_AUG_DATA"
des="/content/MAIN"
shutil.copytree(src,des)
def zipper(dir_path,zip_path):
zipf = zipfile.ZipFile(zip_path, 'w', zipfile.ZIP_DEFLATED)
zipdir(dir_path, zipf)
zipf.close()
import os
import zipfile
def zipdir(path, ziph):
# ziph is zipfile handle
for root, dirs, files in os.walk(path):
for file in files:
ziph.write(os.path.join(root, file))
def zipper(dir_path,zip_path):
zipf = zipfile.ZipFile(zip_path, 'w', zipfile.ZIP_DEFLATED)
zipdir(dir_path, zipf)
zipf.close()
zipper('/content/MAIN/Train',"'Zipped_Dataset_Train.zip'")
zipf = zipfile.ZipFile('Zipped_Dataset_Train.zip', 'w', zipfile.ZIP_DEFLATED)
zipdir('/content/MAIN/Train', zipf)
zipf.close()
zipf = zipfile.ZipFile('Zipped_Dataset_Val.zip', 'w', zipfile.ZIP_DEFLATED)
zipdir('/content/MAIN/Val', zipf)
zipf.close()
import os, fnmatch
def find(pattern, path):
result = []
for root, dirs, files in os.walk(path):
for name in files:
if fnmatch.fnmatch(name, pattern):
result.append(os.path.join(root, name))
return result
RES="/content/drive/My Drive/NEW_DATA/"
zip_file=find('*Zipped_Dataset_Val.zip', '/content/')
print(zip_file[0])
src=zip_file[0]
des=os.path.join(RES,"Zipped_Dataset_Val.zip")
shutil.copy(src,des)
zip_file2=find('*Zipped_Dataset_Train.zip', '/content/')
print(zip_file2[0])
src=zip_file2[0]
des=os.path.join(RES,"Zipped_Dataset_Train.zip")
shutil.copy(src,des)
def get_size(start_path = '.'):
total_size = 0
for dirpath, dirnames, filenames in os.walk(start_path):
for f in filenames:
fp = os.path.join(dirpath, f)
# skip if it is symbolic link
if not os.path.islink(fp):
total_size += os.path.getsize(fp)
return total_size
dir="/content/drive/My Drive/NEW_DATA/FINAL_AUG_DATA"
bytesize=get_size(dir)
print(bytesize, 'bytes')
print(bytesize/(1024*1024), 'megabytes')
print(bytesize/(1024*1024*1024), 'gegabytes')
#download the dataset
#main_runner
#https://towardsdatascience.com/setting-up-kaggle-in-google-colab-ebb281b61463
#setting up kaggle in your colab
import os
os.makedirs("/content/.kaggle/")
import json
token = {"username":"farhanhaikhan","key":"<KEY>"}
with open('/content/.kaggle/kaggle.json', 'a+') as file:
json.dump(token, file)
import shutil
os.makedirs("/.kaggle/")
src="/content/.kaggle/kaggle.json"
des="/.kaggle/kaggle.json"
shutil.copy(src,des)
os.makedirs("/root/.kaggle/")
!cp /content/.kaggle/kaggle.json ~/.kaggle/kaggle.json
!kaggle config set -n path -v /content
#https://towardsdatascience.com/setting-up-kaggle-in-google-colab-ebb281b61463
!pip install kaggle
!kaggle datasets download -d tawsifurrahman/covid19-radiography-database
!unzip -q covid19-radiography-database.zip -d /content/dataset
from PIL import Image
directory="/content/dataset/COVID-19 Radiography Database/COVID-19/"
output="/content/dataset_compressed/COVID-19 Radiography Database/COVID-19/"
os.makedirs(output)
list_files=os.listdir(directory)
for each in list_files:
full_path=os.path.join(directory,each)
foo = Image.open(full_path)
x,y=foo.size
x=round(x*0.8)
y=round(y*0.8)
foo = foo.resize((x,y),Image.ANTIALIAS)
foo.save(os.path.join(output,each),optimize=True,quality=95)
"""
The normal chest X-ray (left panel) depicts clear lungs without any areas of abnormal opacification in the image. Bacterial pneumonia (middle) typically exhibits a focal lobar consolidation, in this case in the right upper lobe (white arrows), whereas viral pneumonia (right) manifests with a more diffuse ‘‘interstitial’’ pattern in both lungs.
"""
#auc roc curve
#intersection
#https://github.com/ieee8023/covid-chestxray-dataset
#https://www.kaggle.com/paultimothymooney/chest-xray-pneumonia
#merge all notebooks in a directory!
import os, fnmatch
import io
import sys
import nbformat
from nbformat import v4 as nbf
#from IPython import nbformat #is deprecated
#finds files in a directory corresponding to a regex query
def find(pattern, path):
result = []
for root, dirs, files in os.walk(path):
for name in files:
if fnmatch.fnmatch(name, pattern):
result.append(os.path.join(root, name))
return result
#code to print a list vertically :)
def print_list(lst):
for i in lst:
print(i)
#function to merge multiple notebooks into one file
def merge_notebooks(filenames,output="output_finale.ipynb",start=True,end=True):
merged = nbf.new_notebook()
count=0
for fname in filenames:
count+=1
with io.open(fname, 'r', encoding='utf-8') as f:
print("Reading Notebook",count," : ",fname)
nb = nbformat.read(f, as_version=4)
if start:
start_text = """## Start of notebook """+str(os.path.basename(fname))
start_cells = [nbformat.v4.new_markdown_cell(start_text)]
merged.cells.extend(start_cells)
merged.cells.extend(nb.cells)
print("Appending to Output Notebook",count," : ",fname)
if end:
end_text = """## End of notebook """+str(os.path.basename(fname))
end_cells = [nbformat.v4.new_markdown_cell(end_text)]
merged.cells.extend(end_cells)
if not hasattr(merged.metadata, 'name'):
merged.metadata.name = ''
merged.metadata.name += "_merged"
print("Merging to Output Notebook : ",output)
with io.open(output, 'w', encoding='utf-8') as f:
nbformat.write(merged, f)
#print("Merged to Output Notebook : ",os.path.join(os.getcwd(),output))
print("Merged to Output Notebook : ",output)
#print(nbformat.writes(merged))
#nbformat.writes(merged)
#sorted_list=sorted([notebook_files], key=str.lower)
#sort by basename
notebook_files=find('*.ipynb', '/content/')
notebook_files.sort(key=lambda x: os.path.basename(x))
print(len(notebook_files),"Notebook Files Found :")
print_list(notebook_files)
merge_notebooks(filenames=notebook_files,output="merged_output_notebook.ipynb")
#answer on stackoverflow 4 line indent code
filename="/content/db_code.txt"
with open(filename) as f:
content = f.readlines()
# you may also want to remove whitespace characters like `\n` at the end of each line
content = [x.rstrip() for x in content]
for e in content:
print(str(" ")+e)
#finds files in a directory corresponding to a regex query
import os, fnmatch
def find(pattern, path):
result = []
for root, dirs, files in os.walk(path):
for name in files:
if fnmatch.fnmatch(name, pattern):
result.append(os.path.join(root, name))
return result
zip_file=find('*covid19-radiography-database.zip', '/content/')
print(zip_file)
#scrape a table from a webpage
import requests
import pandas as pd
url = "http://www.inwea.org/wind-energy-in-india/wind-power-potential/"
html = requests.get(url).content
df_list = pd.read_html(html)
print(len(df_list))
df1,df2 = df_list
#Estimation of installable wind power potential at 80 m level,Estimation of installable wind power potential at 100 m level
df1.to_csv('small_data.csv')
df2.to_csv('big_data.csv')
df.to_csv('50-80.csv', index = False, header=True)
import os, fnmatch
import sqlite3
import pandas as pd
#creates a directory without throwing an error
def create_dir(dir):
if not os.path.exists(dir):
os.makedirs(dir)
print("Created Directory : ", dir)
else:
print("Directory already existed : ", dir)
return dir
#finds files in a directory corresponding to a regex query
def find(pattern, path):
result = []
for root, dirs, files in os.walk(path):
for name in files:
if fnmatch.fnmatch(name, pattern):
result.append(os.path.join(root, name))
return result
#convert sqlite databases(.db,.sqlite) to pandas dataframe(excel with each table as a different sheet or individual csv sheets)
def save_db(dbpath=None,excel_path=None,csv_path=None,extension="*.sqlite",csvs=True,excels=True):
if (excels==False and csvs==False):
print("Atleast one of the parameters need to be true: csvs or excels")
return -1
#little code to find files by extension
if dbpath==None:
files=find(extension,os.getcwd())
if len(files)>1:
print("Multiple files found! Selecting the first one found!")
print("To locate your file, set dbpath=<yourpath>")
dbpath = find(extension,os.getcwd())[0] if dbpath==None else dbpath
print("Reading database file from location :",dbpath)
#path handling
external_folder,base_name=os.path.split(os.path.abspath(dbpath))
file_name=os.path.splitext(base_name)[0] #firstname without .
exten=os.path.splitext(base_name)[-1] #.file_extension
internal_folder="Saved_Dataframes_"+file_name
main_path=os.path.join(external_folder,internal_folder)
create_dir(main_path)
excel_path=os.path.join(main_path,"Excel_Multiple_Sheets.xlsx") if excel_path==None else excel_path
csv_path=main_path if csv_path==None else csv_path
db = sqlite3.connect(dbpath)
cursor = db.cursor()
cursor.execute("SELECT name FROM sqlite_master WHERE type='table';")
tables = cursor.fetchall()
print(len(tables),"Tables found :")
if excels==True:
#for writing to excel(xlsx) we will be needing this!
try:
import XlsxWriter
except ModuleNotFoundError:
!pip install XlsxWriter
if (excels==True and csvs==True):
writer = pd.ExcelWriter(excel_path, engine='xlsxwriter')
i=0
for table_name in tables:
table_name = table_name[0]
table = pd.read_sql_query("SELECT * from %s" % table_name, db)
i+=1
print("Parsing Excel Sheet ",i," : ",table_name)
table.to_excel(writer, sheet_name=table_name, index=False)
print("Parsing CSV File ",i," : ",table_name)
table.to_csv(os.path.join(csv_path,table_name + '.csv'), index_label='index')
writer.save()
elif excels==True:
writer = pd.ExcelWriter(excel_path, engine='xlsxwriter')
i=0
for table_name in tables:
table_name = table_name[0]
table = pd.read_sql_query("SELECT * from %s" % table_name, db)
i+=1
print("Parsing Excel Sheet ",i," : ",table_name)
table.to_excel(writer, sheet_name=table_name, index=False)
writer.save()
elif csvs==True:
i=0
for table_name in tables:
table_name = table_name[0]
table = pd.read_sql_query("SELECT * from %s" % table_name, db)
i+=1
print("Parsing CSV File ",i," : ",table_name)
table.to_csv(os.path.join(csv_path,table_name + '.csv'), index_label='index')
cursor.close()
db.close()
return 0
save_db();
#mount the drive where we have the downloaded trained weights
from google.colab import drive
drive.mount('/content/drive',force_remount=True)
from google.colab import drive
drive.mount('/gdrive')
#common utility functions used everywhere
from IPython.display import FileLink
FileLink(r'/kaggle/input/lung-segmentation-unet/best_model.h5')
#creates a directory without throwing an error
def create_dir(dir):
if not os.path.exists(dir):
os.makedirs(dir)
print("Created Directory : ", dir)
else:
print("Directory already existed : ", dir)
return dir
#counts the number of files in a directory
def count_no(dir):
lst=os.listdir(dir)
return len(lst)
def ListDiff(li1, li2):
return (list(set(li1) - set(li2)))
import os
import zipfile
def zipdir(path, ziph):
# ziph is zipfile handle
for root, dirs, files in os.walk(path):
for file in files:
ziph.write(os.path.join(root, file))
zipf = zipfile.ZipFile('Zipped_Dataset.zip', 'w', zipfile.ZIP_DEFLATED)
zipdir('/content/FINAL_AUG_DATA', zipf)
zipf.close()
get_ipython().system('unzip -q {} -d /content/kaggle_dir'.format(zip_file[0]))
print("Done Unzipping!")
#main_runner
#https://towardsdatascience.com/setting-up-kaggle-in-google-colab-ebb281b61463
#setting up kaggle in your colab
import os
os.makedirs("/content/.kaggle/")
import json
token = {"username":"farhanhaikhan","key":"<KEY>"}
with open('/content/.kaggle/kaggle.json', 'a+') as file:
json.dump(token, file)
import shutil
os.makedirs("/.kaggle/")
src="/content/.kaggle/kaggle.json"
des="/.kaggle/kaggle.json"
shutil.copy(src,des)
os.makedirs("/root/.kaggle/")
!cp /content/.kaggle/kaggle.json ~/.kaggle/kaggle.json
!kaggle config set -n path -v{/content}
#############################
#download google drive file to kaggle_dir
#turn on the internet first
import gdown
url = 'https://drive.google.com/uc?id=1UP_Gv9D0nqTHaK7haudwPqjshW_XYEFz'
output = 'dataset.zip'
gdown.download(url, output, quiet=False)
####################
def print_dict(new_dict):
#print ("Dictionary is : ")
#print("keys: values")
for i in new_dict:
print(i, " :", new_dict[i])
############################
!unzip -q /kaggle/working/dataset.zip -d /kaggle/working/data_aug
#############################
import shutil
shutil.rmtree("/kaggle/working/data_aug")
##############################
import shutil
src=""
des=""
shutil.copy(src,des)
###########################
import os
"""def get_size(start_path = '.'):
total_size = 0
for dirpath, dirnames, filenames in os.walk(start_path):
for f in | |
<reponame>nicolargo/clize
# clize - function decorator for creating commands
# Copyright (C) 2011, 2012 by <NAME> <<EMAIL>>
# Copyright (C) 2012 by <NAME> <<EMAIL>>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# -*- coding: utf-8 -*
from __future__ import print_function, unicode_literals
from functools import wraps, partial
from collections import namedtuple, OrderedDict
import re
from textwrap import TextWrapper
import sys
import os
import inspect
from gettext import gettext as _, ngettext as _n
__version__ = '2.3'
__all__ = ['run', 'clize', 'ArgumentError']
if not hasattr(inspect, 'FullArgSpec'):
# we are on python2, make functions to emulate python3 ran on
# a py2-style function
FullArgSpec = namedtuple(
'FullArgSpec',
(
'args', 'varargs', 'varkw', 'defaults',
'kwonlyargs', 'kwonlydefaults', 'annotations'
)
)
def getfullargspec(func):
argspec = inspect.getargspec(func)
return FullArgSpec(
argspec.args,
argspec.varargs,
argspec.keywords,
argspec.defaults,
[], None, {}
)
else:
getfullargspec = inspect.getfullargspec
try:
basestring
except NameError:
basestring = str
unicode = str
decode = lambda s: s
else:
decode = lambda s: s.decode('utf8')
class ArgumentError(TypeError):
def __str__(self):
return str(((self.args[0] + '\n') if self.args[0] else '')
+ help(self.args[2], self.args[1],
just_do_usage=True, do_print=False))
Option = namedtuple(
'Option',
(
'source',
'names',
'default',
'type',
'help',
'optional',
'positional',
'takes_argument',
'catchall',
)
)
def make_flag(
source,
names,
default=False,
type=bool,
help='',
takes_argument=0,
):
return Option(
source, names, default, type, help,
optional=True, positional=False,
takes_argument=takes_argument, catchall=False
)
Command = namedtuple(
'Command',
(
'description',
'footnotes',
'posargs',
'options',
)
)
SuperCommand = namedtuple(
'SuperCommand',
(
'description',
'footnotes',
'subcommands',
)
)
argdesc = re.compile('^(\w+): (.*)$', re.DOTALL)
def read_docstring(fn):
doc = inspect.getdoc(fn)
description = []
footnotes = []
opts_help = {}
if doc:
for paragraph in doc.split('\n\n'):
m = argdesc.match(paragraph)
if m:
optname, desc = m.groups()
opts_help[optname] = desc
else:
if opts_help:
footnotes.append(paragraph)
else:
description.append(paragraph)
return description, opts_help, footnotes
def annotation_aliases(annotations):
return tuple(filter(lambda s: isinstance(s, str) and (' ' not in s),
annotations))
def read_annotations(annotations, source):
alias = []
flags = []
coerce = None
try:
iter(annotations)
except TypeError:
annotations = (annotations,)
else:
if isinstance(annotations, basestring):
annotations = (annotations,)
for i, annotation in enumerate(annotations):
if (
isinstance(annotation, type)
and issubclass(annotation, AnnotationFlag)
):
flags.append(annotation)
elif isinstance(annotation, basestring):
if ' ' not in annotation:
alias.append(annotation)
else:
raise ValueError(
"Aliases may not contain spaces. "
"Put argument descriptions in the docstring."
)
elif callable(annotation):
if coerce is not None:
raise ValueError(
"Coercion function already encountered before "
"index {0} of annotation on {1}: {2!r}"
.format(i, source, annotation)
)
coerce = annotation
else:
raise ValueError(
"Don't know how to interpret index {0} of "
"annotation on {1}: {2!r}"
.format(i, source, annotation)
)
return tuple(alias), tuple(flags), coerce
def read_argument(
i, argname, argspec, opts_help,
alias, force_positional,
coerce, use_kwoargs
):
annotations = argspec.annotations.get(argname, ())
alias_, flags_, coerce_ = read_annotations(annotations, argname)
if not coerce_:
coerce_ = coerce.get(argname, coerce_)
try:
if i is None:
default = argspec.kwonlydefaults[argname]
else:
default = argspec.defaults[-len(argspec.args) + i]
except (KeyError, IndexError, TypeError):
default = None
optional = False
type_ = coerce_ or unicode
else:
optional = True
if not coerce_:
type_ = unicode if default is None else type(default)
else:
type_ = coerce_
positional = i is not None if use_kwoargs else not optional
if (
argname in force_positional
or clize.POSITIONAL in flags_
):
if use_kwoargs:
raise ValueError("Cannot use clize.POSITIONAL along with keyword-only arguments.")
positional = True
option = Option(
source=argname,
names=
(argname.replace('_', '-'),)
+ alias.get(argname, ())
+ alias_,
default=default,
type=type_,
help=opts_help.get(argname, ''),
optional=optional,
positional=positional,
takes_argument=int(type_ != bool),
catchall=False,
)
return option
def read_arguments(fn, alias, force_positional, require_excess, coerce, use_kwoargs=None):
argspec = getfullargspec(fn)
description, opts_help, footnotes = read_docstring(fn)
if use_kwoargs is None:
if argspec.kwonlyargs:
use_kwoargs = True
else:
use_kwoargs = False
posargs = []
options = []
if force_positional and use_kwoargs:
raise ValueError("Cannot use force_positional along with keyword-only arguments.")
for i, argname in enumerate(argspec.args):
option = read_argument(
i, argname, argspec, opts_help,
alias, force_positional,
coerce, use_kwoargs)
if option.positional:
if not option.optional and posargs and posargs[-1].optional:
raise ValueError(
"Cannot have required argument \"{0}\" after an optional argument."
.format(argname)
)
posargs.append(option)
else:
options.append(option)
for argname in argspec.kwonlyargs:
option = read_argument(
None, argname, argspec, opts_help,
alias, force_positional,
coerce, use_kwoargs)
options.append(option)
if argspec.varargs:
if require_excess and posargs and posargs[-1].optional:
raise ValueError("Cannot require excess arguments with optional arguments.")
posargs.append(
Option(
source=argspec.varargs,
names=(argspec.varargs.replace('_', '-'),),
default=None,
type=unicode,
help=opts_help.get(argspec.varargs, ''),
optional=not require_excess,
positional=True,
takes_argument=False,
catchall=True,
)
)
return (
Command(
description=tuple(description), footnotes=tuple(footnotes),
posargs=posargs, options=options),
argspec
)
def get_arg_name(arg):
name = arg.names[0] + (arg.catchall and '...' or '')
return (arg.optional and '[' + name + ']'
or name)
def get_type_name(func):
return (
'STR' if func in (unicode, str)
else
func.__name__.upper()
)
def get_option_names(option):
shorts = []
longs = []
for name in option.names:
if option.positional:
longs.append(name)
elif len(name) == 1:
shorts.append('-' + name)
else:
longs.append('--' + name)
option_names = shorts + longs
if ((not option.positional and option.type != bool)
or (option.positional and option.type != unicode)):
option_names[-1] += '=' + get_type_name(option.type)
if option.positional and option.catchall:
option_names[-1] += '...'
return ', '.join(option_names)
try:
terminal_width = max(50, os.get_terminal_size().columns - 1)
except (AttributeError, OSError):
terminal_width = 70 #fair terminal dice roll
def get_default_for_printing(default):
ret = repr(default)
if isinstance(default, unicode) and ret[0] == 'u':
return ret[1:]
return ret
def print_arguments(arguments, width=None):
if width == None:
width = 0
for arg in arguments:
width = max(width, len(get_option_names(arg)))
help_wrapper = TextWrapper(
width=terminal_width,
initial_indent=' ' * (width + 5),
subsequent_indent=' ' * (width + 5),
)
return ('\n'.join(
' ' * 2 + '{0:<{width}} {1}'.format(
get_option_names(arg),
help_wrapper.fill(
arg.help +
(
_('(default: {0})').format(arg.default)
if arg.default not in (None, False)
else _('(required)')
if not (arg.optional or arg.positional)
else ''
)
)[width + 4:]
if arg.help else '',
width=width,
) for arg in arguments))
def format_p(tw, p):
return p if p.startswith(' ') else tw.fill(p)
def help(name, command, just_do_usage=False, do_print=True, **kwargs):
ret = ""
ret += (_('Usage: {name}{options} {args}').format(
name=name + (' command' if 'subcommands' in command._fields
else ''),
options=(
_(' [OPTIONS]')
if 'options' not in command._fields
or command.options
else ''),
args=(
' '.join(get_arg_name(arg) for arg in command.posargs)
if 'posargs' in command._fields else ''
),
))
if just_do_usage:
if do_print:
print(ret)
return ret
tw = TextWrapper(width=terminal_width)
ret += '\n\n'.join(
format_p(tw, p) for p in ('',) + command.description) + '\n'
if 'subcommands' in command._fields and command.subcommands:
ret += '\n' + _('Available commands:') + '\n'
ret += print_arguments(command.subcommands) + '\n'
if 'posargs' in command._fields and command.posargs:
ret += '\n' + _('Positional arguments:') + '\n'
ret += print_arguments(command.posargs) + '\n'
if 'options' in command._fields and command.options:
ret += '\n' + _('Options:') + '\n'
ret += print_arguments(command.options) + '\n'
if 'subcommands' in command._fields and command.subcommands:
ret += '\n' + tw.fill(_(
"See '{0} command --help' for more information "
"on a specific command.").format(name)) + '\n'
if command.footnotes:
ret += '\n' + '\n\n'.join(format_p(tw, p) for p in command.footnotes)
ret += '\n'
if do_print:
print(ret)
return ret
def get_option(name, list):
for option in list:
if name in option.names:
return option
raise KeyError
def coerce_option(val, option, key, command, name):
try:
return option.type(val)
except ValueError:
key = (len(key) == 1 and '-' + key) or ('--' + key)
raise ArgumentError(_("{0} needs an argument of type {1}")
.format(key, option.type.__name__.upper()),
name, command
)
def set_arg_value(val, option, key, params, name, command):
if callable(option.source):
return option.source(name=name, command=command,
val=val, params=params)
else:
params[option.source] = coerce_option(
val, option, key, name, command)
def get_following_arguments(i, option, input, key, command, name):
if i + option.takes_argument >= len(input):
raise | |
<filename>scripts/discovery_tests_with_setup.py
# Need to import path to test/fixtures and test/scripts/
# Ex : export PYTHONPATH='$PATH:/root/test/fixtures/:/root/test/scripts/'
#
# To run tests, you can do 'python -m testtools.run tests'. To run specific tests,
# You can do 'python -m testtools.run -l tests'
# Set the env variable PARAMS_FILE to point to your ini file. Else it will try to pick params.ini in PWD
#
import os
import signal
from novaclient import client as mynovaclient
from novaclient import exceptions as novaException
import unittest
import fixtures
import testtools
import traceback
import traffic_tests
from contrail_test_init import *
from vn_test import *
from quantum_test import *
from vnc_api_test import *
from nova_test import *
from vm_test import *
from connections import ContrailConnections
from floating_ip import *
from policy_test import *
from multiple_vn_vm_test import *
from contrail_fixtures import *
from tcutils.wrappers import preposttest_wrapper
import uuid
#from analytics_tests import *
class TestDiscoveryFixture(testtools.TestCase, fixtures.TestWithFixtures):
# @classmethod
def setUp(self):
super(TestDiscoveryFixture, self).setUp()
if 'PARAMS_FILE' in os.environ:
self.ini_file = os.environ.get('PARAMS_FILE')
else:
self.ini_file = 'params.ini'
self.inputs = self.useFixture(ContrailTestInit(self.ini_file))
self.connections = ContrailConnections(self.inputs)
self.quantum_fixture = self.connections.quantum_fixture
self.nova_fixture = self.connections.nova_fixture
self.vnc_lib = self.connections.vnc_lib
self.logger = self.inputs.logger
self.agent_inspect = self.connections.agent_inspect
self.cn_inspect = self.connections.cn_inspect
self.analytics_obj = self.connections.analytics_obj
self.ds_obj = self.connections.ds_verification_obj
# end setUpClass
def cleanUp(self):
super(TestDiscoveryFixture, self).cleanUp()
# end cleanUp
def runTest(self):
pass
# end runTest
@preposttest_wrapper
def test_all_publishers_registered_to_discovery_service(self):
'''
Description:Validate all services are registered to discovery service
Steps:
1.Gets expected services to be published to discovery from testbed.py
2.Gets actually published services to discovery from <ip>:5998/services.json
3.Find out any diff between expected and actual list of publishers - fails test case if there is any diff
4.Checkes all the published services are up from discovery - fails if any of them down
Maintainer: <EMAIL>
'''
for ip in self.inputs.cfgm_ips:
self.logger.info("Verifying for ip %s" % (ip))
assert self.ds_obj.verify_registered_services_to_discovery_service(
ip)
return True
@preposttest_wrapper
def test_agent_gets_control_nodes_from_discovery(self):
'''
Description:Validate agents subscribed to control node service
Steps:
1.Get all xmpp-clients from connected to a xmpp server from discovery
2.From introspect of each of those xmpp-clients,verify if that client connected to the same xmpp server and connection established- fails otherwise
Maintainer: <EMAIL>
'''
for ip in self.inputs.cfgm_ips:
self.logger.info("Verifying for ip %s" % (ip))
assert self.ds_obj.verify_bgp_connection(ip)
return True
@preposttest_wrapper
def test_agents_connected_to_dns_service(self):
''' Validate agents subscribed to dns service
'''
for ip in self.inputs.cfgm_ips:
self.logger.info("Verifying for ip %s" % (ip))
assert self.ds_obj.verify_agents_connected_to_dns_service(ip)
return True
@preposttest_wrapper
def test_agents_connected_to_collector_service(self):
'''
Description: Validate agents subscribed to collector service
1.Verify all agents subscribed to collector service from discovery - fails otherwise
Maintainer: <EMAIL>
'''
for ip in self.inputs.cfgm_ips:
self.logger.info("Verifying for ip %s" % (ip))
assert self.ds_obj.verify_agents_connected_to_collector_service(ip)
return True
@preposttest_wrapper
def test_dns_agents_connected_to_collector_service(self):
''' Validate dns agents subscribed to collector service
'''
for ip in self.inputs.cfgm_ips:
self.logger.info("Verifying for ip %s" % (ip))
assert self.ds_obj.verify_dns_agent_connected_to_collector_service(
ip)
return True
@preposttest_wrapper
def test_control_nodes_connected_to_collector_service(self):
''' Validate control nodes subscribed to collector service
'''
for ip in self.inputs.cfgm_ips:
self.logger.info("Verifying for ip %s" % (ip))
assert self.ds_obj.verify_control_nodes_connected_to_collector_service(
ip)
return True
@preposttest_wrapper
def test_control_nodes_subscribed_to_ifmap_service(self):
'''
Description: Validate control nodes subscribed to ifmap service
1.Verify that control-node subscribed to ifmap server and the get the ifmap server info from discovery - fails otherwise
2.Go to control node introspect to verify if control node actually connected to that ifmap - fails otherwise
Maintainer: <EMAIL>
'''
for ip in self.inputs.cfgm_ips:
self.logger.info("Verifying for ip %s" % (ip))
assert self.ds_obj.verify_control_nodes_subscribed_to_ifmap_service(
ip)
return True
@preposttest_wrapper
def test_dns_agents_subscribed_to_ifmap_service(self):
''' Validate dns agents subscribed to ifmap service
'''
for ip in self.inputs.cfgm_ips:
self.logger.info("Verifying for ip %s" % (ip))
assert self.ds_obj.verify_dns_agent_subscribed_to_ifmap_service(ip)
return True
@preposttest_wrapper
def test_ApiServer_subscribed_to_collector_service(self):
''' Validate apiserver subscribed to collector service
'''
for ip in self.inputs.cfgm_ips:
self.logger.info("Verifying for ip %s" % (ip))
assert self.ds_obj.verify_ApiServer_subscribed_to_collector_service(
ip)
return True
@preposttest_wrapper
def test_Schema_subscribed_to_collector_service(self):
''' Validate schema subscribed to collector service
'''
assert self.ds_obj.verify_Schema_subscribed_to_collector_service()
return True
@preposttest_wrapper
def itest_cross_verification_objects_in_all_discovery(self):
''' cross verification objects in all discovery
'''
assert self.ds_obj.cross_verification_objects_in_all_discovery()
return True
@preposttest_wrapper
def test_ServiceMonitor_subscribed_to_collector_service(self):
''' Validate service monitor subscribed to collector service
'''
assert self.ds_obj.verify_ServiceMonitor_subscribed_to_collector_service(
)
return True
@preposttest_wrapper
def test_control_node_restart_and_validate_status_of_the_service(self):
''' Validate restart of control node services
'''
result = True
svc_lst = []
svc_lst = self.ds_obj.get_all_control_services(self.inputs.cfgm_ip)
for elem in svc_lst:
if (self.ds_obj.get_service_status(self.inputs.cfgm_ip, service_touple=elem) == 'up'):
self.logger.info("Service %s is up" % (elem,))
result = result and True
else:
self.logger.warn("Service %s is down" % (elem,))
result = result and False
svc_lst.remove(elem)
# Stopping the control node service
for elem in svc_lst:
ip = elem[0]
self.logger.info("Stopping service %s.." % (elem,))
self.inputs.stop_service('contrail-control', [ip])
time.sleep(20)
for elem in svc_lst:
ip = elem[0]
if (self.ds_obj.get_service_status(self.inputs.cfgm_ip, service_touple=elem) == 'up'):
self.logger.warn("Service %s is still up" % (elem,))
result = result and False
else:
self.logger.info("Service %s is down" % (elem,))
result = result and True
# Starting the control node service
for elem in svc_lst:
ip = elem[0]
self.logger.info("Starting service %s.." % (elem,))
self.inputs.start_service('contrail-control', [ip])
time.sleep(6)
for elem in svc_lst:
ip = elem[0]
if (self.ds_obj.get_service_status(self.inputs.cfgm_ip, service_touple=elem) == 'up'):
self.logger.info(
"Service %s came up after service was started" % (elem,))
result = result and True
else:
self.logger.info(
"Service %s is down even after service was started" % (elem,))
result = result and False
assert result
return True
@preposttest_wrapper
def test_agent_restart(self):
''' Validate agent start and stop
'''
assert self.ds_obj.verify_bgp_connection()
result = True
cmd = 'cd /etc/contrail;sed -i \'/ttl_min.*=.*/c\\ttl_min = 5\' discovery.conf'
for ip in self.inputs.cfgm_ips:
self.inputs.run_cmd_on_server(
ip, cmd, username='root', password='<PASSWORD>')
cmd = 'cd /etc/contrail;sed -i \'/ttl_max.*=.*/c\\ttl_max = 10\' discovery.conf'
for ip in self.inputs.cfgm_ips:
self.inputs.run_cmd_on_server(
ip, cmd, username='root', password='<PASSWORD>')
for ip in self.inputs.cfgm_ips:
self.inputs.restart_service('contrail-discovery', [ip])
time.sleep(2)
assert self.analytics_obj.verify_cfgm_uve_module_state(
self.inputs.collector_ips[0], self.inputs.cfgm_names[0], 'contrail-discovery')
for ip in self.inputs.compute_ips:
self.inputs.restart_service('contrail-vrouter', [ip])
time.sleep(20)
for ip in self.inputs.compute_ips:
in_use_initial = {}
in_use_after_stop = {}
in_use_after_start = {}
lst_in_use = []
lst_svc_id = []
t = {}
svc_id = []
svc_id = self.ds_obj.get_subscribed_service_id(
self.inputs.cfgm_ip, client=(ip, 'VRouterAgent'), service='xmpp-server')
for service in svc_id:
t = self.ds_obj.get_service_status_by_service_id(
self.inputs.cfgm_ip, service_id=service)
in_use_initial[service] = t['in_use']
self.logger.info(
"%s service id in use before agent %s restart: %s" %
(service, ip, t['in_use']))
compute_node_process = ['contrail-vrouter']
for process in compute_node_process:
try:
self.inputs.stop_service(process, [ip])
time.sleep(50)
for service in svc_id:
t = self.ds_obj.get_service_status_by_service_id(
self.inputs.cfgm_ip, service_id=service)
in_use_after_stop[service] = t['in_use']
self.logger.info(
"%s service id in use after agent %s restart: %s" %
(service, ip, t['in_use']))
for k, v in in_use_after_stop.iteritems():
for k1, v1 in in_use_initial.iteritems():
if (k1 == k):
if (int(v1) - int(v) == 1):
self.logger.info(
"in-use decremented for %s service-id after %s agent stopped" % (k1, ip))
result = result and True
else:
self.logger.warn(
"in-use not decremented for %s service-id after %s agent stopped" % (k1, ip))
result = result and False
except Exception as e:
print e
finally:
self.inputs.start_service(process, [ip])
time.sleep(10)
svc_id = self.ds_obj.get_subscribed_service_id(
self.inputs.cfgm_ip, client=(ip, 'VRouterAgent'), service='xmpp-server')
for service in svc_id:
t = self.ds_obj.get_service_status_by_service_id(
self.inputs.cfgm_ip, service_id=service)
in_use_after_start[service] = t['in_use']
self.logger.info(
"%s service id in use after agent %s restart: %s" %
(service, ip, t['in_use']))
for k, v in in_use_after_start.iteritems():
for k1, v1 in in_use_after_stop.iteritems():
if (k1 == k):
if (int(v) - int(v1) == 1):
self.logger.info(
"in-use incremented for %s service-id after %s agent started" % (k1, ip))
result = result and True
else:
self.logger.warn(
"in-use not incremented for %s service-id after %s agent started" % (k1, ip))
result = result and False
self.logger.info(
"************ END for %s *************" % (ip))
# reverting back the changes in discovery.conf
cmd = 'cd /etc/contrail;sed -i \'/ttl_min.*=.*/c\\ttl_min = 300\' discovery.conf'
for ip in self.inputs.cfgm_ips:
self.inputs.run_cmd_on_server(
ip, cmd, username='root', password='<PASSWORD>')
cmd = 'cd /etc/contrail;sed -i \'/ttl_max.*=.*/c\\ttl_max = 1800\' discovery.conf'
for ip in self.inputs.cfgm_ips:
self.inputs.run_cmd_on_server(
ip, cmd, username='root', password='<PASSWORD>')
for ip in self.inputs.cfgm_ips:
self.inputs.restart_service('contrail-discovery', [ip])
time.sleep(2)
assert self.analytics_obj.verify_cfgm_uve_module_state(
self.inputs.collector_ips[0], self.inputs.cfgm_names[0], 'contrail-discovery')
assert self.ds_obj.verify_bgp_connection()
assert result
time.sleep(300)
return True
@preposttest_wrapper
def test_change_parameters_in_discovery_conf(self):
''' Validate parameters in discovery.conf
-ttl_min
-ttl_max
-hc_max_miss
-policy
'''
# Changing the hc_max_miss=5 and verifying that the services are down
# after 25 sec
try:
cmd = 'cd /etc/contrail;sed -i \'/hc_max_miss.*=.*/c\hc_max_miss = 10\' discovery.conf'
for ip in self.inputs.cfgm_ips:
self.inputs.run_cmd_on_server(
ip, cmd, username='root', password='<PASSWORD>')
self.inputs.restart_service('contrail-discovery', [ip])
assert self.analytics_obj.verify_cfgm_uve_module_state(
self.inputs.collector_ips[0], self.inputs.cfgm_names[0], 'contrail-discovery')
result = True
svc_lst = []
svc_lst = self.ds_obj.get_all_control_services(self.inputs.cfgm_ip)
for elem in svc_lst:
if (self.ds_obj.get_service_status(self.inputs.cfgm_ip, service_touple=elem) == 'up'):
self.logger.info("Service %s is up" % (elem,))
result = result and True
else:
self.logger.warn("Service %s is down" % (elem,))
result = result and False
| |
z: z[0] * z[1])([m120, m220])
#mul = multiply([m1,m2])
multiplied_out.append(mul20)
m121 = Lambda(lambda x: x[:, 9])(image_y)
m221 = Lambda(lambda y: y[:, 9])(pi)
mul21 = Lambda(lambda z: z[0] * z[1])([m121, m221])
#mul = multiply([m1,m2])
multiplied_out.append(mul21)
m122 = Lambda(lambda x: x[:, 10])(image_y)
m222 = Lambda(lambda y: y[:, 10])(pi)
mul22 = Lambda(lambda z: z[0] * z[1])([m122, m222])
#mul = multiply([m1,m2])
multiplied_out.append(mul22)
m123 = Lambda(lambda x: x[:, 11])(image_y)
m223 = Lambda(lambda y: y[:, 11])(pi)
mul23 = Lambda(lambda z: z[0] * z[1])([m123, m223])
#mul = multiply([m1,m2])
multiplied_out.append(mul23)
m124 = Lambda(lambda x: x[:, 12])(image_y)
m224 = Lambda(lambda y: y[:, 12])(pi)
mul24 = Lambda(lambda z: z[0] * z[1])([m124, m224])
#mul = multiply([m1,m2])
multiplied_out.append(mul24)
m125 = Lambda(lambda x: x[:, 13])(image_y)
m225 = Lambda(lambda y: y[:, 13])(pi)
mul25 = Lambda(lambda z: z[0] * z[1])([m125, m225])
#mul = multiply([m1,m2])
multiplied_out.append(mul25)
m126 = Lambda(lambda x: x[:, 14])(image_y)
m226 = Lambda(lambda y: y[:, 14])(pi)
mul26 = Lambda(lambda z: z[0] * z[1])([m126, m226])
#mul = multiply([m1,m2])
multiplied_out.append(mul26)
m127 = Lambda(lambda x: x[:, 15])(image_y)
m227 = Lambda(lambda y: y[:, 15])(pi)
mul27 = Lambda(lambda z: z[0] * z[1])([m127, m227])
#mul = multiply([m1,m2])
multiplied_out.append(mul27)
pivi = Concatenate(axis = 1)(multiplied_out) #N*16*64
print(pivi.shape)
pivi = Reshape((16,64))(pivi)
pivi0 = Lambda(lambda v: v[:, 0])(pivi)
pivi1 = Lambda(lambda v: v[:, 1])(pivi)
pivi2 = Lambda(lambda v: v[:, 2])(pivi)
pivi3 = Lambda(lambda v: v[:, 3])(pivi)
pivi4 = Lambda(lambda v: v[:, 4])(pivi)
pivi5 = Lambda(lambda v: v[:, 5])(pivi)
pivi6 = Lambda(lambda v: v[:, 6])(pivi)
pivi7 = Lambda(lambda v: v[:, 7])(pivi)
pivi8 = Lambda(lambda v: v[:, 8])(pivi)
pivi9 = Lambda(lambda v: v[:, 9])(pivi)
pivi10 = Lambda(lambda v: v[:, 10])(pivi)
pivi11 = Lambda(lambda v: v[:, 11])(pivi)
pivi12 = Lambda(lambda v: v[:, 12])(pivi)
pivi13 = Lambda(lambda v: v[:, 13])(pivi)
pivi14 = Lambda(lambda v: v[:, 14])(pivi)
pivi15 = Lambda(lambda v: v[:, 15])(pivi)
sum1 = Add()([pivi0,pivi1,pivi2,pivi3,pivi4,pivi5,pivi6,pivi7,pivi8,pivi9,pivi10,pivi11,pivi12,pivi13,pivi14,pivi15])
u = Lambda(lambda a: a[0] + a[1])([text_x,sum1])
#------------------------------------------------------------------------------------------------------------------------#
hq = Dense(32)(u)
hq = Reshape((1,32))(hq)
hi_outputs = []
ut16 = Lambda(lambda x: x[:, 0])(hi)
ut16 = Add()([ut16,hq]) #instead of using 3 different loops,i think the entire thing can be done using a single loop also
# out = Lambda(lambda a: a[0] + a[1])([out,hq])
ut16 = Activation('tanh')(ut16)
ut16 = Dense(1,activation = 'softmax')(ut16)
hi_outputs.append(ut16)
ut17 = Lambda(lambda x: x[:, 1])(hi)
ut17 = Add()([ut17,hq]) #instead of using 3 different loops,i think the entire thing can be done using a single loop also
# out = Lambda(lambda a: a[0] + a[1])([out,hq])
ut17 = Activation('tanh')(ut17)
ut17 = Dense(1,activation = 'softmax')(ut17)
hi_outputs.append(ut17)
ut18 = Lambda(lambda x: x[:, 2])(hi)
ut18 = Add()([ut18,hq]) #instead of using 3 different loops,i think the entire thing can be done using a single loop also
# out = Lambda(lambda a: a[0] + a[1])([out,hq])
ut18 = Activation('tanh')(ut18)
ut18 = Dense(1,activation = 'softmax')(ut18)
hi_outputs.append(ut18)
ut19 = Lambda(lambda x: x[:, 3])(hi)
ut19 = Add()([ut19,hq]) #instead of using 3 different loops,i think the entire thing can be done using a single loop also
# out = Lambda(lambda a: a[0] + a[1])([out,hq])
ut19 = Activation('tanh')(ut19)
ut19 = Dense(1,activation = 'softmax')(ut19)
hi_outputs.append(ut19)
ut20 = Lambda(lambda x: x[:, 4])(hi)
ut20 = Add()([ut20,hq]) #instead of using 3 different loops,i think the entire thing can be done using a single loop also
# out = Lambda(lambda a: a[0] + a[1])([out,hq])
ut20 = Activation('tanh')(ut20)
ut20 = Dense(1,activation = 'softmax')(ut20)
hi_outputs.append(ut20)
ut21 = Lambda(lambda x: x[:, 5])(hi)
ut21 = Add()([ut21,hq]) #instead of using 3 different loops,i think the entire thing can be done using a single loop also
# out = Lambda(lambda a: a[0] + a[1])([out,hq])
ut21 = Activation('tanh')(ut21)
ut21 = Dense(1,activation = 'softmax')(ut21)
hi_outputs.append(ut21)
ut22 = Lambda(lambda x: x[:, 6])(hi)
ut22 = Add()([ut22,hq]) #instead of using 3 different loops,i think the entire thing can be done using a single loop also
# out = Lambda(lambda a: a[0] + a[1])([out,hq])
ut22 = Activation('tanh')(ut22)
ut22 = Dense(1,activation = 'softmax')(ut22)
hi_outputs.append(ut22)
ut23 = Lambda(lambda x: x[:, 7])(hi)
ut23 = Add()([out23,hq]) #instead of using 3 different loops,i think the entire thing can be done using a single loop also
# out = Lambda(lambda a: a[0] + a[1])([out,hq])
ut23 = Activation('tanh')(ut23)
ut23 = Dense(1,activation = 'softmax')(ut23)
hi_outputs.append(ut23)
ut24 = Lambda(lambda x: x[:, 8])(hi)
ut24 = Add()([ut24,hq]) #instead of using 3 different loops,i think the entire thing can be done using a single loop also
# out = Lambda(lambda a: a[0] + a[1])([out,hq])
ut24 = Activation('tanh')(ut24)
ut24 = Dense(1,activation = 'softmax')(ut24)
hi_outputs.append(ut24)
ut25 = Lambda(lambda x: x[:, 9])(hi)
ut25 = Add()([ut25,hq]) #instead of using 3 different loops,i think the entire thing can be done using a single loop also
# out = Lambda(lambda a: a[0] + a[1])([out,hq])
ut25 = Activation('tanh')(ut25)
ut25 = Dense(1,activation = 'softmax')(ut25)
hi_outputs.append(ut25)
ut26 = Lambda(lambda x: x[:, 10])(hi)
ut26 = Add()([ut26,hq]) #instead of using 3 different loops,i think the entire thing can be done using a single loop also
# out = Lambda(lambda a: a[0] + a[1])([out,hq])
ut26 = Activation('tanh')(ut26)
ut26 = Dense(1,activation = 'softmax')(ut26)
hi_outputs.append(ut26)
ut27 = Lambda(lambda x: x[:, 11])(hi)
ut27 = Add()([ut27,hq]) #instead of using 3 different loops,i think the entire thing can be done using a single loop also
# out = Lambda(lambda a: a[0] + a[1])([out,hq])
ut27 = Activation('tanh')(ut27)
ut27 = Dense(1,activation = 'softmax')(ut27)
hi_outputs.append(ut27)
ut28 = Lambda(lambda x: x[:, 12])(hi)
ut28 = Add()([ut28,hq]) #instead of using 3 different loops,i think the entire thing can be done using a single loop also
# out = Lambda(lambda a: a[0] + a[1])([out,hq])
ut28 = Activation('tanh')(ut28)
ut28 = Dense(1,activation = 'softmax')(ut28)
hi_outputs.append(ut28)
ut29 = Lambda(lambda x: x[:, 13])(hi)
ut29 = Add()([ut29,hq]) #instead of using 3 different loops,i think the entire thing can be done using a single loop also
# out = Lambda(lambda a: a[0] + a[1])([out,hq])
ut29 = Activation('tanh')(ut29)
ut29 = Dense(1,activation = 'softmax')(ut29)
hi_outputs.append(ut29)
ut30 = Lambda(lambda x: x[:, 14])(hi)
ut30 = Add()([ut30,hq]) #instead of using 3 different loops,i think the entire thing can be done using a single loop also
# out = Lambda(lambda a: a[0] + a[1])([out,hq])
ut30 = Activation('tanh')(ut30)
ut30 = Dense(1,activation = 'softmax')(ut30)
hi_outputs.append(ut30)
ut31 = Lambda(lambda x: x[:, 15])(hi)
ut31 = Add()([ut31,hq]) #instead of using 3 different loops,i think the entire thing can be done using a single loop also
# out = Lambda(lambda a: a[0] + a[1])([out,hq])
ut31 = Activation('tanh')(ut31)
ut31 = Dense(1,activation = 'softmax')(ut31)
hi_outputs.append(ut31)
pi = Concatenate(axis = 1)(hi_outputs) #N*16*1
image_y = Permute((2,1))(image_x) #N*32*16
# image_y = Flatten()(image_y)
print(image_y.shape)
multiplied_out = []
m121 = Lambda(lambda x: x[:, 0])(image_y)
m221 = Lambda(lambda y: y[:, 0])(pi)
mul21 = Lambda(lambda z: z[0] * z[1])([m121, m221])
#mul = multiply([m1,m2])
multiplied_out.append(mul21)
m131 = Lambda(lambda x: x[:, 1])(image_y)
m231 = Lambda(lambda y: y[:, 1])(pi)
mul31 = Lambda(lambda z: z[0] * z[1])([m131, m231])
#mul = multiply([m1,m2])
multiplied_out.append(mul31)
m141 = Lambda(lambda x: x[:, 2])(image_y)
m241 = Lambda(lambda y: y[:, 2])(pi)
mul41 = Lambda(lambda z: z[0] * z[1])([m141, m241])
#mul = multiply([m1,m2])
multiplied_out.append(mul41)
m151 = Lambda(lambda x: x[:, 3])(image_y)
m251 = Lambda(lambda y: y[:, 3])(pi)
mul51 = Lambda(lambda z: z[0] * z[1])([m151, m251])
#mul = multiply([m1,m2])
multiplied_out.append(mul51)
m161 = Lambda(lambda x: x[:, 4])(image_y)
m261 = Lambda(lambda y: y[:, 4])(pi)
mul61 = Lambda(lambda z: z[0] * z[1])([m161, m261])
#mul = multiply([m1,m2])
multiplied_out.append(mul61)
m171 = Lambda(lambda x: x[:, 5])(image_y)
m271 = Lambda(lambda y: y[:, 5])(pi)
mul71 = Lambda(lambda z: z[0] * z[1])([m171, m271])
#mul = multiply([m1,m2])
multiplied_out.append(mul71)
m181 = Lambda(lambda x: x[:, 6])(image_y)
m281 = Lambda(lambda y: y[:, 6])(pi)
mul81 = Lambda(lambda z: z[0] * z[1])([m181, m281])
#mul = multiply([m1,m2])
multiplied_out.append(mul81)
m191 = Lambda(lambda x: x[:, 7])(image_y)
m291 = Lambda(lambda y: y[:, 7])(pi)
mul91 = Lambda(lambda z: z[0] * z[1])([m191, m291])
#mul = multiply([m1,m2])
multiplied_out.append(mul91)
m1201 = Lambda(lambda x: x[:, 8])(image_y)
m2201 = Lambda(lambda y: y[:, 8])(pi)
mul201 = Lambda(lambda z: z[0] * z[1])([m1201, m2201])
#mul = multiply([m1,m2])
multiplied_out.append(mul201)
m1211 = Lambda(lambda x: x[:, 9])(image_y)
m2211 = Lambda(lambda y: y[:, 9])(pi)
mul211 = Lambda(lambda z: z[0] * z[1])([m1211, m2211])
#mul = multiply([m1,m2])
multiplied_out.append(mul211)
m1221 = Lambda(lambda x: x[:, 10])(image_y)
m2221 = Lambda(lambda y: y[:, 10])(pi)
mul221 = Lambda(lambda z: z[0] * z[1])([m1221, m2221])
#mul = multiply([m1,m2])
multiplied_out.append(mul221)
m1231 = Lambda(lambda x: x[:, 11])(image_y)
m2231 = Lambda(lambda y: y[:, 11])(pi)
mul231 = Lambda(lambda z: z[0] * z[1])([m1231, m2231])
#mul = multiply([m1,m2])
multiplied_out.append(mul231)
m1241 = Lambda(lambda x: x[:, 12])(image_y)
m2241 = Lambda(lambda y: y[:, 12])(pi)
mul241 = Lambda(lambda z: z[0] * z[1])([m1241, m2241])
#mul = multiply([m1,m2])
multiplied_out.append(mul241)
m1251 = Lambda(lambda x: x[:, 13])(image_y)
m2251 = Lambda(lambda y: y[:, 13])(pi)
mul251 = Lambda(lambda z: z[0] * z[1])([m1251, m2251])
#mul = multiply([m1,m2])
multiplied_out.append(mul251)
m1261 = Lambda(lambda x: x[:, 14])(image_y)
m2261 = Lambda(lambda y: y[:, 14])(pi)
mul261 = Lambda(lambda z: z[0] * z[1])([m1261, m2261])
#mul = multiply([m1,m2])
multiplied_out.append(mul261)
m1271 = Lambda(lambda x: x[:, 15])(image_y)
m2271 = Lambda(lambda y: y[:, 15])(pi)
mul271 = Lambda(lambda z: z[0] * z[1])([m1271, m2271])
#mul = multiply([m1,m2])
multiplied_out.append(mul271)
pivi = Concatenate(axis = 1)(multiplied_out) #N*16*64
print(pivi.shape)
pivi = Reshape((16,64))(pivi)
pivi0 = Lambda(lambda v: v[:, 0])(pivi)
pivi1 = Lambda(lambda v: v[:, 1])(pivi)
pivi2 = Lambda(lambda v: v[:, 2])(pivi)
pivi3 = Lambda(lambda v: v[:, 3])(pivi)
pivi4 = Lambda(lambda v: v[:, 4])(pivi)
pivi5 = Lambda(lambda v: v[:, 5])(pivi)
pivi6 = Lambda(lambda v: v[:, 6])(pivi)
pivi7 = Lambda(lambda v: v[:, 7])(pivi)
pivi8 = Lambda(lambda v: v[:, 8])(pivi)
pivi9 = Lambda(lambda v: v[:, 9])(pivi)
pivi10 = Lambda(lambda v: v[:, 10])(pivi)
pivi11 = Lambda(lambda v: v[:, 11])(pivi)
pivi12 = Lambda(lambda v: v[:, 12])(pivi)
pivi13 = Lambda(lambda v: v[:, 13])(pivi)
pivi14 = Lambda(lambda v: v[:, 14])(pivi)
pivi15 = Lambda(lambda v: v[:, 15])(pivi)
sum1 = Add()([pivi0,pivi1,pivi2,pivi3,pivi4,pivi5,pivi6,pivi7,pivi8,pivi9,pivi10,pivi11,pivi12,pivi13,pivi14,pivi15])
u = Lambda(lambda a: a[0] + a[1])([text_x,sum1])
output = Dense(num_labels, activation='softmax', | |
#
# Copyright (c) 2017, Massachusetts Institute of Technology All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice, this
# list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
"""
RfxDevices
==========
@authors: <NAME> (<NAME>)
@copyright: 2019
@license: GNU GPL
"""
from MDSplus import mdsExceptions, Device, Data, Int64, Int64Array, Uint64, Event, Float32
from MDSplus.mdsExceptions import DevCOMM_ERROR
from MDSplus.mdsExceptions import DevBAD_PARAMETER
from threading import Thread
from ctypes import CDLL, c_int, byref, c_byte, c_ulonglong, c_ubyte
from time import sleep
import sys
import numpy as np
import select
try:
import psycopg2
except:
pass
class SIG_SNAPSHOT(Device):
"""National Instrument 6683 device. Generation of clock and triggers and recording of events """
parts = [{'path':':COMMENT', 'type':'text'},
{'path':':DATABASE', 'type':'text'},
{'path':':DB_SERVER', 'type':'text'},
{'path':':DB_USER', 'type':'text'},
{'path':':DB_PASSWD', 'type':'text'},
{'path':':TABLE', 'type':'text'},
{'path':':TIMES', 'type':'numeric'},
{'path':':NUM_SIGNALS', 'type':'numeric', 'value': 0},
{'path':':UPDATE_SECS', 'type':'numeric', 'value': 1}]
for sigIdx in range(512):
parts.append({'path':'.SIGNAL_'+str(sigIdx+1), 'type':'structure'})
parts.append({'path':'.SIGNAL_'+str(sigIdx+1)+':COL_NAME', 'type':'text'})
parts.append({'path':'.SIGNAL_'+str(sigIdx+1)+':ALGORITHM', 'type':'text', 'value':'MEAN'})
parts.append({'path':'.SIGNAL_'+str(sigIdx+1)+':PARAMS', 'type':'numeric'})
parts.append({'path':'.SIGNAL_'+str(sigIdx+1)+':START', 'type':'numeric', 'value' : -1})
parts.append({'path':'.SIGNAL_'+str(sigIdx+1)+':END', 'type':'numeric', 'value' : 1})
parts.append({'path':'.SIGNAL_'+str(sigIdx+1)+':IN_SIGNAL', 'type':'numeric'})
parts.append({'path':'.SIGNAL_'+str(sigIdx+1)+':SNAPSHOTS', 'type':'signal'})
parts.append({'path':':START_STORE','type':'action',
'valueExpr':"Action(Dispatch('PXI_SERVER','PON',51,None),Method(None,'start_store',head))",
'options':('no_write_shot',)})
parts.append({'path':':STOP_STORE','type':'action',
'valueExpr':"Action(Dispatch('PXI_SERVER','PPC',50,None),Method(None,'stop_store',head))",
'options':('no_write_shot',)})
parts.append({'path':':RDB_STORE','type':'action',
'valueExpr':"Action(Dispatch('PXI_SERVER','PPC',50,None),Method(None,'rdb_store',head))",
'options':('no_write_shot',)})
workerDict = {}
class AsynchStore(Thread):
def configure(self, device):
self.device = device
try:
self.times = device.times.data()
except:
Data.execute('DevLogErr($1,$2)', device.getNid(), 'Error reading snapshot times')
raise DevCOMM_ERROR
try:
self.numSignals = device.num_signals.data()
except:
Data.execute('DevLogErr($1,$2)', device.getNid(), 'Error reading Number of signals')
raise DevCOMM_ERROR
try:
self.updateSecs = device.update_secs.data()
except:
self.updateSecs = 1
self.inSignals = []
self.snapshots = []
self.endTimes = []
self.startTimes = []
self.isEmpty = []
self.lastTimeServed = []
for sigIdx in range(self.numSignals):
print('signal_'+str(sigIdx + 1)+'_in_signal')
self.inSignals.append(getattr(device, 'signal_'+str(sigIdx + 1)+'_in_signal'))
self.snapshots.append(getattr(device, 'signal_'+str(sigIdx + 1)+'_snapshots'))
self.lastTimeServed.append(self.times[0] - 1)
try:
self.startTimes.append(getattr(device, 'signal_'+str(sigIdx + 1)+'_start').data() + self.times)
self.endTimes.append(getattr(device, 'signal_'+str(sigIdx + 1)+'_end').data() + self.times)
except:
Data.execute('DevLogErr($1,$2)', device.getNid(), 'Error reading start or end for signal '+str(sigIdx+1))
raise DevCOMM_ERROR
self.isEmpty.append(True)
self.stopReq = False
def computeMean(self, samples, times, startTime, endTime):
return np.mean(samples[np.logical_and(times >= startTime, times <= endTime)])
def computeMeanVECCHIA(self, samples, times, startTime, endTime):
summed = None
nMean = 0
nSamples = len(samples)
if nSamples > len(times):
nSamples = len(times)
for idx in range(nSamples):
if times[idx] >= startTime and times[idx] <= endTime:
if summed == None:
summed = samples[idx:idx+1]
else:
summed += samples[idx:idx+1]
nMean += 1
if times[idx] > endTime:
break
if summed == None:
return []
return summed / nMean
def check(self):
allDone = True
minStart = 1E6
for sigIdx in range(len(self.inSignals)):
if len(self.startTimes[sigIdx]) > 0 and self.startTimes[sigIdx][0] < minStart:
minStart = self.startTimes[sigIdx][0]
if minStart == 1E6: # no left times
return True
print('MIN START: '+str(minStart))
# self.device.getTree().setTimeContext(Float32(minStart), None, None)
for sigIdx in range(len(self.inSignals)):
inSignal = self.inSignals[sigIdx]
snapshots = self.snapshots[sigIdx]
startTimes = self.startTimes[sigIdx]
endTimes = self.endTimes[sigIdx]
if len(startTimes) == 0: #this signal is done
continue
print('Signal '+str(sigIdx))
print('START TIMES: ' + str(startTimes))
print('END TIMES: ' + str(endTimes))
#self.device.getTree().setTimeContext(Float32(startTimes[0]), None, None)
try:
# inSignal.getTree().setTimeContext(Float32(startTimes[0]), None, None)
# inSignal.getTree().setTimeContext(None, None, None)
currData = inSignal.data()
currTimes = inSignal.getDimensionAt(0).data()
signalEnd = currTimes[-1]
except:
print('READOUT OF ' + inSignal.getFullPath()+' FAILED')
continue
#if len(endTimes) > 0 and endTimes[0] > signalEnd:
print('SIGNAL '+ inSignal.getPath() + ' TERMINATES AT '+ str(signalEnd))
#currData = inSignal.data()
#currTimes = inSignal.getDimensionAt(0)
if currData.shape[0] > currTimes.shape[0]:
currData = currData[:currTimes.shape[0]]
if currData.shape[0] < currTimes.shape[0]:
currTimes = currTimes[:currData.shape[0]]
while len(endTimes) > 0 and endTimes[0] <= signalEnd:
print('CONTEXT START : ' + str(startTimes[0]))
print('CONTEXT END : ' + str(endTimes[0]))
# inSignal.getTree().setTimeContext(startTimes[0], endTimes[0], None)
# meanData = currData[0:1]
# for i in range(1,len(currData)):
# meanData += currData[i:i+1]
# meanData = meanData / len(currData)
roi = currData[np.logical_and(currTimes >= startTimes[0], currTimes <= endTimes[0])]
if roi.size > 0:
meanData = np.mean(roi,0)
# meanData = self.computeMean(currData, currTimes, startTimes[0], endTimes[0])
if self.isEmpty[sigIdx]:
self.isEmpty[sigIdx] = False
inShape = currData.shape
shape = []
for s in inShape:
shape.append(s)
shape[0] = len(self.times)
print('SHAPE: ' + str(shape))
print('DATA SHAPE: ' + str(currData.shape))
print(self.times)
snapshots.beginSegment(self.times[0], self.times[-1], self.times, np.zeros(shape, currData.dtype))
print('MEAN: '+ str(meanData) + str( currData.shape) + str(currTimes.shape))
try:
snapshots.putSegment(meanData)
print('PUT SEGMENT FATTA')
except:
print('PUT SEGMENT FALITA')
startTimes = startTimes[1:]
endTimes = endTimes[1:]
inSignal.getTree().setTimeContext(None, None, None)
#endwhile
self.startTimes[sigIdx] = startTimes
self.endTimes[sigIdx] = endTimes
#endfor
for sigIdx in range(len(self.inSignals)):
endTimes = self.endTimes[sigIdx]
if len(endTimes) > 0:
allDone = False
inSignal.getTree().setTimeContext(None, None, None)
return allDone
def run(self):
while not self.stopReq:
for i in range(self.updateSecs):
sleep(1)
if self.stopReq:
break
if self.stopReq:
self.check()
return
allDone = self.check()
print('ALL DONE: ' + str(allDone))
if allDone or self.stopReq:
return
def stop(self):
self.stopReq = True
################End class AsynchStore
def start_store(self):
nSignals = self.num_signals.data()
#Clear output signals first
for sigIdx in range(nSignals):
getattr(self, 'signal_'+str(sigIdx + 1)+'_snapshots').deleteData()
try:
worker = SIG_SNAPSHOT.workerDict[self.nid]
if worker.isAlive():
worker.stop
worker.join()
except:
pass
worker = self.AsynchStore()
SIG_SNAPSHOT.workerDict[self.nid] = worker
worker.configure(self.copy())
worker.daemon = True
worker.start()
def stop_store(self):
worker = SIG_SNAPSHOT.workerDict[self.nid]
if worker.isAlive():
print("SIG_SNAPSHOT stop_worker")
worker.stop()
worker.join()
return 1
def store(self):
nSignals = self.num_signals.data()
#Clear output signals first
for sigIdx in range(nSignals):
getattr(self, 'signal_'+str(sigIdx + 1)+'_snapshots').deleteData()
worker = self.AsynchStore()
worker.configure(self.copy())
worker.check()
return 1
def rdb_store(self):
try:
times = self.times.data()
except:
return 1
if len(times) < 1:
return 1
signals = []
names = []
paths = []
numSignals = self.num_signals.data()
for sigIdx in range(numSignals):
try:
signals.append(getattr(self, 'signal_'+str(sigIdx + 1)+'_snapshots').data())
except:
Data.execute('DevLogErr($1,$2)', self.getNid(), 'No snapshot acquired for signal '+str(sigIdx+1))
signals.append([])
paths.append("")
names.append("")
continue
paths.append(getattr(self, 'signal_'+str(sigIdx + 1)+'_snapshots').getFullPath())
try:
names.append(getattr(self, 'signal_'+str(sigIdx + 1)+'_col_name').data().lower())
except:
Data.execute('DevLogErr($1,$2)', self.getNid(), 'Cannot read column name for signal '+ str(sigIdx+1))
raise DevCOMM_ERROR
try:
host = self.db_server.data()
except:
host = ''
try:
database = self.database.data()
user = self.db_user.data()
password = <PASSWORD>.db_<PASSWORD>()
table = self.table.data()
except:
Data.execute('DevLogErr($1,$2)', self.getNid(), 'Cannot read database access configuration')
raise DevCOMM_ERROR
try:
conn = psycopg2.connect(host=host,database=database, user=user, password=password)
except err:
Data.execute('DevLogErr($1,$2)', self.getNid(), 'Cannot connect to database server '+host+'; '+str(err))
raise DevCOMM_ERROR
cursor = conn.cursor()
for sigIdx in range(len(signals)):
if names[sigIdx] == '':
continue
createCommand = 'CREATE TABLE IF NOT EXISTS '+names[sigIdx].upper()+'_table (id SERIAL PRIMARY KEY, experiment VARCHAR, shot INTEGER, sig_idx INTEGER, snap_time REAL'
createCommand += ','+names[sigIdx]+'_path VARCHAR,' + names[sigIdx] + ' REAL'
if len(signals[sigIdx].shape) == 2:
createCommand += '[]'
createCommand += ')'
print(createCommand)
try:
cursor.execute(createCommand)
except:
Data.execute('DevLogErr($1,$2)', self.getNid(), 'Cannot create table: ')
raise DevCOMM_ERROR
for timeIdx in range(len(times)):
for sigIdx in range(len(signals)):
if names[sigIdx] == '':
continue
if len(signals[sigIdx]) <= timeIdx:
continue #do not update empty signals
checkQuery='SELECT experiment from '+ names[sigIdx]+'_table WHERE shot = '+ str(self.getTree().shot) + ' AND sig_idx = '+str(sigIdx) +' AND snap_time = '+str(times[timeIdx])
try:
cursor.execute(checkQuery)
except:
Data.execute('DevLogErr($1,$2)', self.getNid(), 'Cannot make select query: ')
raise DevCOMM_ERROR
exps = cursor.fetchall()
if len(exps) == 0:
insertCommand = 'INSERT INTO ' + names[sigIdx]+'_table (experiment, shot, sig_idx, snap_time) VALUES(\''+self.getTree().name+'\','
insertCommand += str(self.getTree().shot)+', '+str(sigIdx)+', '+str(times[timeIdx])+')'
print(insertCommand)
try:
cursor.execute(insertCommand)
except:
Data.execute('DevLogErr($1,$2)', self.getNid(), 'Cannot insert in table('+insertCommand+'): ')
print(sys.exc_info()[0])
raise DevCOMM_ERROR
insertCommand = 'UPDATE '+ names[sigIdx]+'_table SET '+names[sigIdx]+'_path = \''+ paths[sigIdx]+'\','+names[sigIdx]+' = '
if len(signals[sigIdx].shape) == 1:
if len(signals[sigIdx]) > timeIdx:
insertCommand += str(signals[sigIdx][timeIdx])
else:
insertCommand += '0'
elif len(signals[sigIdx].shape) == 2:
# insertCommand += ',ARRAY['+str(signals[sigIdx][timeIdx][0])
print(signals[sigIdx].shape)
insertCommand += 'ARRAY['+str(signals[sigIdx][timeIdx][0])
for i in range(1, len(signals[sigIdx][timeIdx])):
insertCommand += ','+str(signals[sigIdx][timeIdx][i])
insertCommand += ']'
insertCommand += ' WHERE experiment = \''+self.getTree().name+'\' AND shot = '+str(self.getTree().shot)+' AND sig_idx = '+str(sigIdx) +' AND snap_time = '+str(times[timeIdx])
print(insertCommand)
try:
cursor.execute(insertCommand)
except:
Data.execute('DevLogErr($1,$2)', self.getNid(), | |
# Copyright (c) 2018 <NAME> at the University of Rochester
# This file is part of the Hoomd-Tensorflow plugin developed by <NAME>
import tensorflow as tf
import numpy as np
from os import path
import pickle
import hoomd
## \internal
# \brief load the TensorFlow variables from a checkpoint
#
# Adds variables from model_directory corresponding to names
# into the TensorFlow graph, optionally loading from a checkpoint
# other than the most recently saved one, or setting variable values
# with a feed_dict
def load_variables(model_directory, names, checkpoint=-1, feed_dict={}):
# just in case
tf.reset_default_graph()
# load graph
tf.train.import_meta_graph(path.join('{}/'.format(
model_directory), 'model.meta'), import_scope='')
# add colons if missing
tf_names = [n + ':0' if len(n.split(':')) == 1 else n for n in names]
run_dict = {n: tf.get_default_graph(
).get_tensor_by_name(n) for n in tf_names}
with tf.Session() as sess:
saver = tf.train.Saver()
if(checkpoint == -1):
# get latest
checkpoint_str = model_directory
checkpoint = tf.train.latest_checkpoint(checkpoint_str)
saver.restore(sess, checkpoint)
checkpoint = 'latest'
elif type(checkpoint) == int:
# get specific checkpoint number
checkpoint_str = '{}{}model-{}'.format(model_directory,
path.sep, checkpoint)
checkpoint = tf.train.load_checkpoint(checkpoint_str)
saver.restore(sess, checkpoint_str)
else:
checkpoint_str = checkpoint
checkpoint = tf.train.load_checkpoint(checkpoint_str)
saver.restore(sess, checkpoint_str)
result = sess.run(run_dict, feed_dict=feed_dict)
# re add without colon if necessary
combined_result = {}
for k, v in result.items():
combined_result[k] = v
combined_result[k.split(':')[0]] = v
return combined_result
## \internal
# \brief computes the U(r) for a given TensorFlow model
def compute_pairwise_potential(model_directory, r,
potential_tensor_name,
checkpoint=-1, feed_dict={}):
R""" Compute the pairwise potential at r for the given model.
Parameters
----------
model_directory
The model directory
r
A 1D grid of points at which to compute the potential.
potential_tensor_name
The tensor containing potential energy.
checkpoint
Which checkpoint to load. Default is -1, which loads latest checkpoint.
An integer indicates loading
from the model directory. If you pass a string, it is interpreted
as a path.
feed_dict
Allows you to add any other placeholder values that need to be added
to compute potential in your model
Returns
-------
A 1D array of potentials corresponding the pairwise distances in r.
"""
# just in case
tf.reset_default_graph()
# load graph
tf.train.import_meta_graph(path.join('{}/'.format(
model_directory), 'model.meta'), import_scope='')
with open('{}/graph_info.p'.format(model_directory), 'rb') as f:
model_params = pickle.load(f)
if ':' not in potential_tensor_name:
potential_tensor_name = potential_tensor_name + ':0'
potential_tensor = tf.get_default_graph(
).get_tensor_by_name(potential_tensor_name)
nlist_tensor = tf.get_default_graph(
).get_tensor_by_name(model_params['nlist'])
# build nlist
NN = model_params['NN']
np_nlist = np.zeros((2, NN, 4))
potential = np.empty(len(r))
nlist_forces = tf.gradients(potential_tensor, nlist_tensor)[0]
nlist_forces = tf.identity(tf.math.multiply(tf.constant(2.0),
nlist_forces),
name='nlist-pairwise-force'
'-gradient-raw')
zeros = tf.zeros(tf.shape(nlist_forces))
nlist_forces = tf.where(tf.is_finite(nlist_forces),
nlist_forces, zeros,
name='nlist-pairwise-force-gradient')
nlist_reduce = tf.reduce_sum(nlist_forces, axis=1,
name='nlist-force-gradient')
forces = nlist_reduce
with tf.Session() as sess:
saver = tf.train.Saver()
if(checkpoint == -1):
# get latest
checkpoint_str = model_directory
checkpoint = tf.train.latest_checkpoint(checkpoint_str)
saver.restore(sess, checkpoint)
checkpoint = 'latest'
elif type(checkpoint) == int:
# get specific checkpoint number
checkpoint_str = '{}/model-{}'.format(model_directory, checkpoint)
checkpoint = tf.train.load_checkpoint(checkpoint_str)
saver.restore(sess, checkpoint_str)
else:
checkpoint_str = checkpoint
checkpoint = tf.train.load_checkpoint(checkpoint_str)
saver.restore(sess, checkpoint_str)
for i, ri in enumerate(r):
np_nlist[0, 0, 1] = ri
np_nlist[1, 0, 1] = -ri
# run including passed in feed_dict
result = sess.run(potential_tensor, feed_dict={
**feed_dict, nlist_tensor: np_nlist})
potential[i] = result[0]
return potential, forces
## \internal
# \brief Maps molecule-wise indices to particle-wise indices
def find_molecules(system):
R""" Given a hoomd system, this will return a mapping
from molecule index to particle index
This is a slow function and should only be called once.
Parameters
---------
system
The molecular system in HOOMD.
"""
mapping = []
mapped = set()
N = len(system.particles)
unmapped = set(range(N))
pi = 0
# copy over bonds for speed
bonds = [[b.a, b.b] for b in system.bonds]
print('Finding molecules...', end='')
while len(mapped) != N:
print('\rFinding molecules...{:.2%}'.format(len(mapped) / N), end='')
pi = unmapped.pop()
mapped.add(pi)
mapping.append([pi])
# traverse bond group
# until no more found
# Have to keep track of "to consider" for branching molecules
to_consider = [pi]
while len(to_consider) > 0:
pi = to_consider[-1]
found_bond = False
for bi, bond in enumerate(bonds):
# see if bond contains pi and an unseen atom
if (pi == bond[0] and bond[1] in unmapped) or \
(pi == bond[1] and bond[0] in unmapped):
new_pi = bond[0] if pi == bond[1] else bond[1]
unmapped.remove(new_pi)
mapped.add(new_pi)
mapping[-1].append(new_pi)
to_consider.append(new_pi)
found_bond = True
break
if not found_bond:
to_consider.remove(pi)
# sort it to be ascending in min atom index in molecule
print('')
for m in mapping:
m.sort()
mapping.sort(key=lambda x: min(x))
return mapping
## \internal
# \brief Finds mapping operators for coarse-graining
def sparse_mapping(molecule_mapping, molecule_mapping_index,
system=None):
R""" This will create the necessary indices and values for
defining a sparse tensor in
tensorflow that is a mass-weighted M x N mapping operator.
Parameters
-----------
molecule_mapping
This is a list of L x M matrices, where M is the number
of atoms in the molecule and L is the number of coarse-grain
sites that should come out of the mapping.
There should be one matrix per molecule.
The ordering of the atoms should follow
what is defined in the output from find_molecules
molecule_mapping_index
This is the output from find_molecules.
system
The hoomd system. This is used to get mass values
for the mapping, if you would like to
weight by mass of the atoms.
Returns
-------
A sparse tensorflow tensor of dimension N x N,
where N is number of atoms
"""
import numpy as np
assert type(molecule_mapping[0]) == np.ndarray
assert molecule_mapping[0].dtype in [np.int, np.int32, np.int64]
# get system size
N = sum([len(m) for m in molecule_mapping_index])
M = sum([m.shape[0] for m in molecule_mapping])
# create indices
indices = []
values = []
total_i = 0
for mmi, mm in zip(molecule_mapping_index, molecule_mapping):
idx = []
vs = []
masses = [0 for _ in range(mm.shape[0])]
# iterate over CG particles
for i in range(mm.shape[0]):
# iterate over atoms
for j in range(mm.shape[1]):
# check if non-zero
if mm[i, j] > 0:
# index -> CG particle, atom index
idx.append([i + total_i, mmi[j]])
if system is not None:
vs.append(system.particles[mmi[j]].mass)
else:
vs.append(1.)
# now scale values by mases
if system is not None:
# now add up masses
for i in range(len(idx)):
# get masses from previous values
masses[idx[i][0] - total_i] += vs[i]
# make sure things are valid
assert sum([m == 0 for m in masses]) == 0
for i in range(len(idx)):
vs[i] /= masses[idx[i][0] - total_i]
# all done
indices.extend(idx)
values.extend(vs)
total_i += len(masses)
return tf.SparseTensor(indices=indices, values=values, dense_shape=[M, N])
def eds_bias(cv, set_point, period, learning_rate=1, cv_scale=1, name='eds'):
# set-up variables
mean = tf.get_variable('{}.mean'.format(name), initializer=0.0, trainable=False)
ssd = tf.get_variable('{}.ssd'.format(name), initializer=0.0, trainable=False)
n = tf.get_variable('{}.n'.format(name), initializer=0, trainable=False)
alpha = tf.get_variable('{}.a'.format(name), initializer=0.0)
reset_mask = tf.cast((n == 0), tf.float32)
# reset statistics if n is 0
reset_mean = mean.assign(mean * reset_mask)
reset_ssd = mean.assign(ssd * reset_mask)
# update statistics
# do we update? - masked
with tf.control_dependencies([reset_mean, reset_ssd]):
update_mask = tf.cast(n > period // 2, tf.float32)
delta = (cv - mean) * update_mask
update_mean = mean.assign_add(delta / tf.cast(tf.maximum(1, n - period // 2), tf.float32))
update_ssd = ssd.assign_add(delta * (cv - mean))
# update grad
with tf.control_dependencies([update_mean, update_ssd]):
update_mask = tf.cast(tf.equal(n, period - 1), tf.float32)
gradient = update_mask * - 2 * (cv - set_point) * ssd / period // 2 / cv_scale
optimizer = tf.train.AdamOptimizer(learning_rate)
update_alpha = tf.cond(tf.equal(n, period - 1),
lambda: optimizer.apply_gradients([(gradient, alpha)]),
lambda: tf.no_op())
# update n. Should reset at period
update_n = n.assign((n + 1) % period)
with tf.control_dependencies([update_alpha, update_n]):
alpha_dummy = tf.identity(alpha)
return alpha_dummy
## \internal
# \brief Finds the center of mass of a set of particles
def center_of_mass(positions, mapping, system, name='center-of-mass'):
R"""Comptue mapping of the given positions (N x 3) and mapping (M x N)
considering PBC. Returns mapped particles.
Parameters
----------
positions
The tensor of particle positions
mapping
The coarse-grain mapping used to produce the particles in system
system
The system of particles
"""
# https://en.wikipedia.org/wiki/
# /Center_of_mass#Systems_with_periodic_boundary_conditions
# Adapted for -L to L boundary conditions
# box dim in hoomd is 2 * L
box_dim = [system.box.Lx, system.box.Ly, system.box.Lz]
theta = positions / box_dim * 2 * np.pi
xi = tf.math.cos(theta)
zeta = tf.math.sin(theta)
ximean = tf.sparse.matmul(mapping, xi)
zetamean = tf.sparse.matmul(mapping, zeta)
thetamean = tf.math.atan2(zetamean, ximean)
return tf.identity(thetamean/np.pi/2*box_dim, name=name)
## \internal
# \brief Calculates the neihgbor list | |
# -*- coding: utf-8 -*- {{{
# vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et:
#
# Copyright 2019, Battelle Memorial Institute.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This material was prepared as an account of work sponsored by an agency of
# the United States Government. Neither the United States Government nor the
# United States Department of Energy, nor Battelle, nor any of their
# employees, nor any jurisdiction or organization that has cooperated in the
# development of these materials, makes any warranty, express or
# implied, or assumes any legal liability or responsibility for the accuracy,
# completeness, or usefulness or any information, apparatus, product,
# software, or process disclosed, or represents that its use would not infringe
# privately owned rights. Reference herein to any specific commercial product,
# process, or service by trade name, trademark, manufacturer, or otherwise
# does not necessarily constitute or imply its endorsement, recommendation, or
# favoring by the United States Government or any agency thereof, or
# Battelle Memorial Institute. The views and opinions of authors expressed
# herein do not necessarily state or reflect those of the
# United States Government or any agency thereof.
#
# PACIFIC NORTHWEST NATIONAL LABORATORY operated by
# BATTELLE for the UNITED STATES DEPARTMENT OF ENERGY
# under Contract DE-AC05-76RL01830
# }}}
__docformat__ = 'reStructuredText'
import logging
import re
import sys
import grequests
import datetime
import pkg_resources
from volttron.platform.agent.base_weather import BaseWeatherAgent
from volttron.platform.agent import utils
from volttron.utils.docs import doc_inherit
from volttron.platform import jsonapi
# requests should be imported after grequests
# as grequests monkey patches ssl and requests imports ssl
# TODO do we need the requests at all.. TODO test with RMQ
import requests
__version__ = "2.0.0"
utils.setup_logging()
_log = logging.getLogger(__name__)
SERVICE_HOURLY_FORECAST = "get_hourly_forecast"
LAT_LONG_REGEX = re.compile(
"^-?[0-9]{1,3}(\.[0-9]{1,4})?,( |t?)-?[0-9]{1,3}(\.[0-9]{1,4})?$")
STATION_REGEX = re.compile("^[Kk][a-zA-Z]{3}$")
WFO_REGEX = re.compile("^[A-Z]{3}$")
def weather_agent(config_path, **kwargs):
"""
Used for instantiating the WeatherDotGov agent.
:param config_path: string formatted file path to use for configuring the
agent.
:param kwargs: keyword arguments passed during instantiation.
:return: an instance of the WeatherDotGov Agent
"""
if isinstance(config_path, dict):
config_dict = config_path
else:
config_dict = utils.load_config(config_path)
_log.debug("config_dict before init: {}".format(config_dict))
utils.update_kwargs_with_config(kwargs, config_dict)
return WeatherDotGovAgent(**kwargs)
class WeatherDotGovAgent(BaseWeatherAgent):
"""
Concrete implementation of the base weather agent for querying the
NOAA/weather.gov weather api.
"""
def __init__(self, **kwargs):
super(WeatherDotGovAgent, self).__init__(**kwargs)
self.headers = {"Accept": "application/json",
"Accept-Language": "en-US"
}
self.remove_service("get_hourly_historical")
def get_update_interval(self, service_name):
"""
Get the timedelta between api service updates.
:param service_name: name of service stored in api_services
:return: datetime.timedelta object representing the time between
the api's service updates
"""
if service_name == "get_current_weather":
return datetime.timedelta(hours=1)
elif service_name == "get_hourly_forecast":
return datetime.timedelta(hours=1)
else:
return None
def get_point_name_defs_file(self):
"""
Constructs the point name mapping dict from the
mapping csv.
:return: dictionary containing a mapping of service point
names to standard point names with optional
"""
# returning resource file instead of stream, as csv.DictReader require file path or file like object opened in
# text mode.
return pkg_resources.get_resource_filename(__name__, "data/name_mapping.csv")
def get_location_string(self, location):
"""
Generic conversion of location dictionary into corresponding string
format for request url.
:param location: location dictionary formatted as for a specific
request.
:return: string representation of location dictionary for request url.
"""
if location.get('lat') and location.get('long'):
formatted_location = self.get_lat_long_str(location)
return formatted_location
if location.get('station'):
formatted_location = self.get_station_str(location)
return formatted_location
elif location.get("wfo") and location.get("x") and location.get("y"):
formatted_location = self.get_gridpoints_str(location)
return formatted_location
else:
raise ValueError("Invalid location {}".format(location))
def get_api_description(self, service_name):
"""
Provides the api description string for a given api service.
Primarily used during concrete agent startup.
:param service_name: name of the api service
:return: string describing the function of the api endpoint, along with
rpc call usage for the weather agent.
"""
if service_name is "get_current_weather":
return "Provides current weather observations by station via RPC " \
"(Requires {'station': <station id>}"
elif service_name is "get_hourly_forecast":
return "Provides <hours> (optional) hours of forecast " \
"predictions by lat/long or gridpoint location " \
"via RPC (Requires {'wfo': <wfo string>, 'x': <x " \
"coordinate>, 'y': <y coordinate>} or" \
"{'lat': <latitude>, 'long': <longitude>}"
else:
raise RuntimeError(
"Service {} is not implemented by weather.gov.".format(
service_name))
@staticmethod
def get_lat_long_str(location_dict):
"""
Converts a location dictionary using lat/long format into string
format to be used in a request url.
:param location_dict: location dictionary for the upcoming request.
Expects lat/long
:return: url formatted location string
"""
return "{},{}".format(location_dict.get("lat"),
location_dict.get("long"))
@staticmethod
def get_station_str(location_dict):
"""
Converts a location dictionary using station format into string
format to be used in a request url.
:param location_dict: location dictionary for the upcoming request.
Expects station id
:return: url formatted location string
"""
return location_dict.get("station")
@staticmethod
def get_gridpoints_str(location_dict):
"""
Converts a location dictionary using gridpoints format into string
format to be used in a request url.
:param location_dict: location dictionary for the upcoming request.
Expects gridpoint format
:return: url formatted location string
"""
return "{}/{},{}".format(location_dict.get("wfo"),
location_dict.get("x"), location_dict.get("y"))
def validate_location(self, service_name, location):
"""
Intermediate method for validating location dicts passed by rpc
calls. Validity depends on the service being
requested.
:param service_name: name of the api service which the location
dictionary is intended to be used for.
:param location: location dictionary to validate for the api service
:return: boolean indicating whether the location/service combination
is valid for the weather api.
"""
if service_name == "get_current_weather":
return self.validate_location_formats(("station",), location)
else:
return self.validate_location_formats(("gridpoints", "lat/long"),
location)
def validate_location_formats(self, accepted_formats, location):
"""
Regular expression comparision to validate the various location
dictionary formats
:param accepted_formats: string representations of the acceptable
location formats for an api service
:param location: location dictionary to validate for the api service
:return: boolean representing the validity of the location
"""
if ("lat/long" in accepted_formats) and (
location.get('lat') and location.get('long')):
location_string = self.get_lat_long_str(location)
if LAT_LONG_REGEX.match(location_string):
return True
elif ("station" in accepted_formats) and (location.get('station')):
location_string = self.get_station_str(location)
if STATION_REGEX.match(location_string):
return True
else:
_log.debug("station did not match regex")
return False
elif ("gridpoints" in accepted_formats) and (
location.get("wfo") and location.get("x") and location.get(
"y")):
if WFO_REGEX.match(location.get("wfo")) and (
1 <= len(str(location.get("x"))) <= 3) and \
(1 <= len(str(location.get("y"))) <= 3):
return True
else:
return False
@staticmethod
def generate_response_error(url, response_code):
"""
raises a descriptive runtime error based on the response code
returned by a service.
:param url: actual url used for requesting data from weather.gov
:param response_code: Http response code returned by a service
following a request
"""
code_x100 = int(response_code / 100)
if code_x100 == 2:
raise RuntimeError(
"Remote API returned no data(code:{}, url:{})".format(
response_code, url))
elif code_x100 == 3:
raise RuntimeError(
"Remote API redirected request, "
"but redirect failed (code:{}, url:{})".format(response_code,
url))
elif code_x100 == 4:
raise RuntimeError(
"Invalid request ({}) Remote API returned "
" Code {}".format(url, response_code))
elif code_x100 == 5:
raise RuntimeError(
"Remote API returned invalid response "
"(code:{}, url:{})".format(response_code, url))
else:
raise RuntimeError(
"API request failed with unexpected response "
"code (code:{}, url:{})".format(response_code, url))
@doc_inherit
def query_current_weather(self, location):
"""
Returns current hourly weather data provided by the api via an http
request.
:param location: currently accepts station id (K followed by 3
letters, case insensitive) or
lat/long (up to 4 decimals) location dictionary formats
:return: time of data observation as a timestamp string,
data dictionary containing weather data points
"""
if location.get('station'):
formatted_location = self.get_location_string(location)
url = "https://api.weather.gov/stations/{}/" \
"observations/latest".format(formatted_location)
else:
raise ValueError('Invalid location. Expected format is:'
'{"station":"station_id_value"}')
grequest = [grequests.get(url, verify=requests.certs.where(),
headers=self.headers, timeout=5)]
gresponse = grequests.map(grequest)[0]
if gresponse is None:
raise RuntimeError("get request did not return any "
"response")
try:
response = jsonapi.loads(gresponse.content)
properties = response["properties"]
observation_time = properties["timestamp"]
return observation_time, properties
except ValueError:
self.generate_response_error(url, gresponse.status_code)
@doc_inherit
def query_forecast_service(self, service, location, quantity, forecast_start):
"""
Returns forecast weather from Weather.gov for requested forecast service
:param service: forecast service to query, Weather.gov provides only
hourly
:param location: currently | |
: 'ardent'
, '12425' : 'are' , '12426' : 'area' , '12431' : 'arena' , '12432' : 'ares' , '12433' : 'argive'
, '12434' : 'argo' , '12435' : 'argon' , '12436' : 'argot' , '12441' : 'argue' , '12442' : 'argus'
, '12443' : 'arhat' , '12444' : 'arid' , '12445' : 'aries' , '12446' : 'arise' , '12451' : 'ark'
, '12452' : 'arlen' , '12453' : 'arlene' , '12454' : 'arm' , '12455' : 'armco' , '12456' : 'army'
, '12461' : 'arnold' , '12462' : 'aroma' , '12463' : 'arose' , '12464' : 'arpa' , '12465' : 'array'
, '12466' : 'arrear' , '12511' : 'arrow' , '12512' : 'arson' , '12513' : 'art' , '12514' : 'artery'
, '12515' : 'arthur' , '12516' : 'artie' , '12521' : 'arty' , '12522' : 'aruba' , '12523' : 'arum'
, '12524' : 'aryl' , '12525' : 'as' , '12526' : 'ascend' , '12531' : 'ash' , '12532' : 'ashen'
, '12533' : 'asher' , '12534' : 'ashley' , '12535' : 'ashy' , '12536' : 'asia' , '12541' : 'aside'
, '12542' : 'ask' , '12543' : 'askew' , '12544' : 'asleep' , '12545' : 'aspen' , '12546' : 'aspire'
, '12551' : 'ass' , '12552' : 'assai' , '12553' : 'assam' , '12554' : 'assay' , '12555' : 'asset'
, '12556' : 'assort' , '12561' : 'assure' , '12562' : 'aster' , '12563' : 'astm' , '12564' : 'astor'
, '12565' : 'astral' , '12566' : 'at' , '12611' : 'at&t' , '12612' : 'ate' , '12613' : 'athens'
, '12614' : 'atlas' , '12615' : 'atom' , '12616' : 'atomic' , '12621' : 'atone' , '12622' : 'atop'
, '12623' : 'attic' , '12624' : 'attire' , '12625' : 'au' , '12626' : 'aubrey' , '12631' : 'audio'
, '12632' : 'audit' , '12633' : 'aug' , '12634' : 'auger' , '12635' : 'augur' , '12636' : 'august'
, '12641' : 'auk' , '12642' : 'aunt' , '12643' : 'aura' , '12644' : 'aural' , '12645' : 'auric'
, '12646' : 'austin' , '12651' : 'auto' , '12652' : 'autumn' , '12653' : 'av' , '12654' : 'avail'
, '12655' : 'ave' , '12656' : 'aver' , '12661' : 'avert' , '12662' : 'avery' , '12663' : 'aviate'
, '12664' : 'avid' , '12665' : 'avis' , '12666' : 'aviv' , '13111' : 'avoid' , '13112' : 'avon'
, '13113' : 'avow' , '13114' : 'aw' , '13115' : 'await' , '13116' : 'awake' , '13121' : 'award'
, '13122' : 'aware' , '13123' : 'awash' , '13124' : 'away' , '13125' : 'awe' , '13126' : 'awful'
, '13131' : 'awl' , '13132' : 'awn' , '13133' : 'awoke' , '13134' : 'awry' , '13135' : 'ax'
, '13136' : 'axe' , '13141' : 'axes' , '13142' : 'axial' , '13143' : 'axiom' , '13144' : 'axis'
, '13145' : 'axle' , '13146' : 'axon' , '13151' : 'ay' , '13152' : 'aye' , '13153' : 'ayers'
, '13154' : 'az' , '13155' : 'aztec' , '13156' : 'azure' , '13161' : 'b' , '13162' : "b's"
, '13163' : 'ba' , '13164' : 'babe' , '13165' : 'babel' , '13166' : 'baby' , '13211' : 'bach'
, '13212' : 'back' , '13213' : 'backup' , '13214' : 'bacon' , '13215' : 'bad' , '13216' : 'bade'
, '13221' : 'baden' , '13222' : 'badge' , '13223' : 'baffle' , '13224' : 'bag' , '13225' : 'baggy'
, '13226' : 'bah' , '13231' : 'bahama' , '13232' : 'bail' , '13233' : 'baird' , '13234' : 'bait'
, '13235' : 'bake' , '13236' : 'baku' , '13241' : 'bald' , '13242' : 'baldy' , '13243' : 'bale'
, '13244' : 'bali' , '13245' : 'balk' , '13246' : 'balkan' , '13251' : 'balky' , '13252' : 'ball'
, '13253' : 'balled' , '13254' : 'ballot' , '13255' : 'balm' , '13256' : 'balmy' , '13261' : 'balsa'
, '13262' : 'bam' , '13263' : 'bambi' , '13264' : 'ban' , '13265' : 'banal' , '13266' : 'band'
, '13311' : 'bandit' , '13312' : 'bandy' , '13313' : 'bane' , '13314' : 'bang' , '13315' : 'banish'
, '13316' : 'banjo' , '13321' : 'bank' , '13322' : 'banks' , '13323' : 'bantu' , '13324' : 'bar'
, '13325' : 'barb' , '13326' : 'bard' , '13331' : 'bare' , '13332' : 'barfly' , '13333' : 'barge'
, '13334' : 'bark' , '13335' : 'barley' , '13336' : 'barn' , '13341' : 'barnes' , '13342' : 'baron'
, '13343' : 'barony' , '13344' : 'barr' , '13345' : 'barre' , '13346' : 'barry' , '13351' : 'barter'
, '13352' : 'barth' , '13353' : 'barton' , '13354' : 'basal' , '13355' : 'base' , '13356' : 'basel'
, '13361' : 'bash' , '13362' : 'basic' , '13363' : 'basil' , '13364' : 'basin' , '13365' : 'basis'
, '13366' : 'bask' , '13411' : 'bass' , '13412' : 'bassi' , '13413' : 'basso' , '13414' : 'baste'
, '13415' : 'bat' , '13416' : 'batch' , '13421' : 'bate' , '13422' : 'bater' , '13423' : 'bates'
, '13424' : 'bath' , '13425' : 'bathe' , '13426' : 'batik' , '13431' : 'baton' , '13432' : 'bator'
, '13433' : 'batt' , '13434' : 'bauble' , '13435' : 'baud' , '13436' : 'bauer' , '13441' : 'bawd'
, '13442' : 'bawdy' , '13443' : 'bawl' , '13444' : 'baxter' , '13445' : 'bay' , '13446' : 'bayda'
, '13451' : 'bayed' , '13452' : 'bayou' , '13453' : 'bazaar' , '13454' : 'bb' , '13455' : 'bbb'
, '13456' : 'bbbb' , '13461' : 'bc' , '13462' : 'bcd' , '13463' : 'bd' , '13464' : 'be'
, '13465' : 'beach' , '13466' : 'bead' , '13511' : 'beady' , '13512' : 'beak' , '13513' : 'beam'
, '13514' : 'bean' , '13515' : 'bear' , '13516' : 'beard' , '13521' : 'beast' , '13522' : 'beat'
, '13523' : 'beau' , '13524' : 'beauty' , '13525' : 'beaux' , '13526' : 'bebop' , '13531' : 'becalm'
, '13532' : 'beck' , '13533' : 'becker' , '13534' : 'becky' , '13535' : 'bed' , '13536' : 'bedim'
, '13541' : 'bee' , '13542' : 'beebe' , '13543' : 'beech' , '13544' : 'beef' , '13545' : 'beefy'
, '13546' : 'been' , '13551' : 'beep' , '13552' : 'beer' , '13553' : 'beet' , '13554' : 'befall'
, '13555' : 'befit' , '13556' : 'befog' , '13561' : 'beg' , '13562' : 'began' , '13563' : 'beget'
, '13564' : 'beggar' , '13565' : 'begin' , '13566' : 'begun' , '13611' : 'behind' , '13612' : 'beige'
, '13613' : 'being' , '13614' : 'beirut' , '13615' : 'bel' , '13616' : 'bela' , '13621' : 'belch'
, '13622' : 'belfry' , '13623' : 'belie' , '13624' : 'bell' , '13625' : 'bella' , '13626' : 'belle'
, '13631' : 'belly' , '13632' : 'below' , '13633' : 'belt' , '13634' : 'bema' , '13635' : 'beman'
, '13636' : 'bemoan' , '13641' : 'ben' , '13642' : 'bench' , '13643' : 'bend' , '13644' : 'bender'
, '13645' : 'benny' , '13646' : 'bent' , '13651' : 'benz' , '13652' : 'berea' , '13653' : 'bereft'
, '13654' : 'beret' , '13655' : 'berg' , '13656' : 'berlin' , '13661' : 'bern' , '13662' : 'berne'
, '13663' : 'bernet' , '13664' : 'berra' , '13665' : 'berry' , '13666' : 'bert' , '14111' : 'berth'
, '14112' : 'beryl' , '14113' : 'beset' , '14114' : 'bess' , '14115' : 'bessel' , '14116' : 'best'
, '14121' : 'bestir' , '14122' : 'bet' , '14123' : 'beta' , '14124' : 'betel' , '14125' : 'beth'
, '14126' : 'bethel' , '14131' : 'betsy' , '14132' : 'bette' , '14133' : 'betty' , '14134' : 'bevel'
, '14135' : 'bevy' , '14136' : 'beware' , '14141' : 'bey' , '14142' : 'bezel' , '14143' : 'bf'
, '14144' : 'bg' , '14145' | |
= average_precision_score(truth, scores)
fpr, tpr, thrs = roc_curve(truth, scores)
area = roc_auc_score(truth, scores)
dt[rr_file] = truth
ds[rr_file] = scores
metrics[rr_file] = [fpr, tpr, thrs, area, pr]
plt.figure()
lw = 2
for rr_file in rr_files:
i = rr_files.index(rr_file)
[fpr, tpr, thrs, area, pr] = metrics[rr_file]
plt.plot(fpr, tpr, lw=lw, label='{} (AUC-ROC={}, AUPR={})'.format(labels[i], format(area, '.3f'),
format(pr, '.3f')))
plt.plot([0, 1], [0, 1], lw=lw, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.legend(loc="lower right", prop={'size': 8})
if save:
plt.savefig(save, dpi=300)
plt.show()
def canpredict_denovo(self, method='count', threshold=0.0, topX=10, ind_id=None, proteins=None,
minimize=None, consensus=True, cmpd_set='all', save=''):
"""!
This function is used for predicting putative therapeutics for an indication
of interest by summing/counting the number of interactions above a certain input interaction
threshold for all proteins or a specified subset of proteins. An indication can be specified to
mark drugs associated with that indication in the output. The threshold will vary based on the
values of the input matrix. Method can be 'count' (score1), which ranks compounds based on the
number of interactions above the threshold, 'sum' (score2), which ranks the compounds based on the
highest total sum for interaction scores above the threshold (these two are highly correlated but can
differ for larger sets of proteins or lower thresholds), 'min', which first ranks by 'count' then re-ranks
based on the summed interactions with the proteins in the input 'minimize' list - this list should contain
proteins IDs towards which the user wants low interaction scores - or 'diff', which ranks by the difference of
sums and the summed scores from off-targets in 'minimize'. A fifth option is 'targets', which inspects
and outputs the top protein interactions on an individual basis without summing/counting per drug (the
output format differs from the other two options). If indication_proteins flag is used for
the CANDO object instantiation, the proteins associated with the input indication will automatically
be used. Otherwise, the 'proteins=' input can be used. The output can be saved to a file specified
by 'save='. If ind_id is used, compounds associated with the indication will be included and marked
in the output for comparison.
@param method str: 'sum', 'count', or 'targets'
@param threshold float: a interaction score cutoff to use (ignores values for sum/count less than threshold)
@param topX int: top number of predicted Compounds to be printed/saved
@param ind_id str: an indication id for marking drug output/ specifying protein set
@param proteins List str: list of protein IDs to use from the matrix
@param minimize List str: list of protein IDs to treat as 'off targets' to avoid, ranking
@param consensus bool: if True, only compounds with score1 >= 2 will be printed
@param cmpd_set str: specify the compound set to use ('all', 'approved', or 'other')
@param save str: name of a file to save results
@return Returns None
"""
if ind_id:
ind = self.get_indication(ind_id)
c_dct = {}
top_hits = []
min_hits = []
if self.indication_proteins and ind_id:
indices = []
for p in ind.proteins:
indices.append(self.protein_id_to_index[p.id_])
elif proteins:
indices = []
for p in proteins:
if type(p) is str:
indices.append(self.protein_id_to_index[p])
elif type(p) is int:
indices.append(p)
elif type(p) is Protein:
indices.append(self.protein_id_to_index[p.id_])
else:
indices = range(len(self.proteins))
if minimize is None:
minimize = []
for c in self.compounds:
ss = 0.0
count = 0
min_ss = 0.0
min_count = 0
for pi in indices:
si = float(c.sig[pi])
p = self.proteins[pi]
if si >= threshold:
if p.id_ in minimize:
min_ss += si
min_count += 1
top_hits.append((p.id_, c, si, False))
else:
ss += si
count += 1
top_hits.append((p.id_, c, si, True))
if ind_id:
already_approved = ind in c.indications
else:
already_approved = False # Not relevant since there is no indication
c_dct[c.id_] = [ss, count, already_approved, min_ss, min_count]
if method == 'sum':
sorted_x = sorted(c_dct.items(), key=lambda x: (x[1][0], x[1][1]))[::-1]
elif method == 'count':
sorted_x = sorted(c_dct.items(), key=lambda x: (x[1][1], x[1][0]))[::-1]
elif method == 'min':
sorted_x = sorted(c_dct.items(), key=lambda x: (x[1][1], x[1][3]*-1))[::-1]
elif method == 'diff':
sorted_x = sorted(c_dct.items(), key=lambda x: (x[1][0] - x[1][3]))[::-1]
elif method == 'targets':
sp = sorted(top_hits, key=lambda x: x[2])[::-1]
print('target \tscore\toff_target\tid\tapproved\tname')
if save:
fo = open(save, 'w')
fo.write('target \tscore\toff_target\tid\tapproved\tname\n')
for s in sp:
co = s[1]
if cmpd_set == 'approved':
if co.status == 'approved' or (co in ind.compounds):
pass
else:
continue
st = '{}\t{}\t{}\t{}\t{}\t{}'.format(s[0].ljust(8), round(s[2], 3), co.id_,
str(s[3]).lower().ljust(10),
(str(co.status == 'approved').lower()).ljust(8), co.name)
print(st)
fo.write(st + '\n')
return
else:
sorted_x = []
print('Please enter a valid ranking method -- quitting.')
quit()
if save:
fo = open(save, 'w')
fo.write('rank\tscore1\tscore2\toffhits\tdiff\tid\tapproved\tname\n')
print("Printing the {} highest predicted compounds...\n".format(topX))
i = 0
print('rank\tscore1\tscore2\toffhits\tdiff\tid\tapproved\tname')
for p in enumerate(sorted_x):
if i >= topX != -1:
break
else:
if consensus and p[1][1][1] <= 1:
if i == 0:
print('\n\tFAILED - there are no compounds with score1 >= 2 -- change the\n'
'\targuments to include "consensus=False" to print results with\n'
'\tscore1 == 1, or lower the threshold.\n')
break
co = self.get_compound(p[1][0])
if cmpd_set == 'approved':
if co.status != 'approved':
if ind_id:
if co in ind.compounds:
pass
else:
continue
else:
continue
if p[1][1][2]:
diff = str(round(p[1][1][0] - p[1][1][3], 3))[0:7].ljust(7)
st = "{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}".format(i + 1, p[1][1][1], str(round(p[1][1][0], 3))[0:7],
str(round(p[1][1][3], 3))[0:7].ljust(7), diff, co.id_,
(str(co.status == 'approved').lower() + '+').ljust(8),
co.name)
else:
diff = str(round(p[1][1][0] - p[1][1][3], 3))[0:7].ljust(7)
st = "{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}".format(i + 1, p[1][1][1], str(round(p[1][1][0], 3))[0:7],
str(round(p[1][1][3], 3))[0:7].ljust(7), diff, co.id_,
(str(co.status == 'approved').lower()).ljust(8),
co.name)
print(st)
i += 1
if save:
fo.write(st + '\n')
return
def canpredict_compounds(self, ind_id, n=10, topX=10, consensus=True, keep_associated=False, cmpd_set='all',
save=''):
"""!
This function is used for predicting putative therapeutics for an indication
of interest using a homology-based approach. Input an ind_id id and for each of the
associated compounds, it will generate the similar compounds (based on distance) and add
them to a dictionary with a value of how many times it shows up (enrichment). If a
compound not approved for the indication of interest keeps showing
up, that means it is similar in signature to the drugs that are
ALREADY approved for the indication, so it may be a target for repurposing.
Control how many similar compounds to consider with the argument 'n'. In the output, 'score1'
refers to the number of times the compound shows up in the top 'n' drugs associated with
the indication and 'score2' is the average of the ranks for 'score1' (note: 'score2' <= 'n').
@param ind_id str: Indication id
@param n int: top number of similar Compounds to be used for each Compound associated with the given Indication
@param topX int: top number of predicted Compounds to be printed
@param consensus bool: if True, only compounds with at least 2 votes will be printed
@param keep_associated bool: Print Compounds that are already approved/associated for the Indication
@param cmpd_set str: specify the compound set to use ('all', 'approved', or 'other')
@param save str: name of a file to save results
@return Returns None
"""
if int(topX) == -1:
topX = len(self.compounds)-1
if int(n) == -1:
n = len(self.compounds)-1
i = self.indication_ids.index(ind_id)
ind = self.indications[i]
print("{0} compounds found for {1} --> {2}".format(len(ind.compounds), ind.id_, ind.name))
if self.pathways:
if self.indication_pathways:
self.quantify_pathways(ind)
else:
self.quantify_pathways()
for c in ind.compounds:
if c.similar_computed:
continue
if self.pathways:
self.generate_similar_sigs(c, aux=True, sort=True)
elif self.indication_proteins:
self.generate_similar_sigs(c, sort=True, proteins=ind.proteins)
else:
self.generate_similar_sigs(c, sort=True)
print("Generating compound predictions using top{} most similar compounds...\n".format(n))
c_dct = {}
for c in ind.compounds:
c2_i = 0
c_count = 0
while c_count < n:
c2 = c.similar[c2_i]
if c2[0].status != 'approved' and cmpd_set == 'approved':
c2_i += 1
continue
if c2[1] == 0.0:
c2_i += 1
continue
already_approved = ind in c2[0].indications
k = c2[0].id_
if k not in c_dct:
c_dct[k] = [1, already_approved, c_count]
else:
c_dct[k][0] += 1
c_dct[k][2] += c_count
c2_i += 1
c_count += 1
sorted_x = sorted(c_dct.items(), key=lambda x: (x[1][0], (-1 * (x[1][2] / x[1][0]))))[::-1]
i = 0
if save:
fo = open(save, 'w')
fo.write('rank\tscore1\tscore2\tprobability\tid\tapproved\tname\n')
else:
print('rank\tscore1\tscore2\tprobability\tid\tapproved\tname')
hg_dct = {}
for p in enumerate(sorted_x):
if i >= topX != -1:
| |
cluster, security group, snapshot or parameter group can be obtained by providing the name as a parameter. By default, the past hour of events are returned.
See also: AWS API Documentation
:example: response = client.describe_events(
SourceIdentifier='string',
SourceType='cluster'|'cluster-parameter-group'|'cluster-security-group'|'cluster-snapshot',
StartTime=datetime(2015, 1, 1),
EndTime=datetime(2015, 1, 1),
Duration=123,
MaxRecords=123,
Marker='string'
)
:type SourceIdentifier: string
:param SourceIdentifier: The identifier of the event source for which events will be returned. If this parameter is not specified, then all sources are included in the response.
Constraints:
If SourceIdentifier is supplied, SourceType must also be provided.
Specify a cluster identifier when SourceType is cluster .
Specify a cluster security group name when SourceType is cluster-security-group .
Specify a cluster parameter group name when SourceType is cluster-parameter-group .
Specify a cluster snapshot identifier when SourceType is cluster-snapshot .
:type SourceType: string
:param SourceType: The event source to retrieve events for. If no value is specified, all events are returned.
Constraints:
If SourceType is supplied, SourceIdentifier must also be provided.
Specify cluster when SourceIdentifier is a cluster identifier.
Specify cluster-security-group when SourceIdentifier is a cluster security group name.
Specify cluster-parameter-group when SourceIdentifier is a cluster parameter group name.
Specify cluster-snapshot when SourceIdentifier is a cluster snapshot identifier.
:type StartTime: datetime
:param StartTime: The beginning of the time interval to retrieve events for, specified in ISO 8601 format. For more information about ISO 8601, go to the ISO8601 Wikipedia page.
Example: 2009-07-08T18:00Z
:type EndTime: datetime
:param EndTime: The end of the time interval for which to retrieve events, specified in ISO 8601 format. For more information about ISO 8601, go to the ISO8601 Wikipedia page.
Example: 2009-07-08T18:00Z
:type Duration: integer
:param Duration: The number of minutes prior to the time of the request for which to retrieve events. For example, if the request is sent at 18:00 and you specify a duration of 60, then only events which have occurred after 17:00 will be returned.
Default: 60
:type MaxRecords: integer
:param MaxRecords: The maximum number of response records to return in each call. If the number of remaining response records exceeds the specified MaxRecords value, a value is returned in a marker field of the response. You can retrieve the next set of records by retrying the command with the returned marker value.
Default: 100
Constraints: minimum 20, maximum 100.
:type Marker: string
:param Marker: An optional parameter that specifies the starting point to return a set of response records. When the results of a DescribeEvents request exceed the value specified in MaxRecords , AWS returns a value in the Marker field of the response. You can retrieve the next set of response records by providing the returned marker value in the Marker parameter and retrying the request.
:rtype: dict
:return: {
'Marker': 'string',
'Events': [
{
'SourceIdentifier': 'string',
'SourceType': 'cluster'|'cluster-parameter-group'|'cluster-security-group'|'cluster-snapshot',
'Message': 'string',
'EventCategories': [
'string',
],
'Severity': 'string',
'Date': datetime(2015, 1, 1),
'EventId': 'string'
},
]
}
:returns:
(string) --
"""
pass
def describe_hsm_client_certificates(HsmClientCertificateIdentifier=None, MaxRecords=None, Marker=None, TagKeys=None, TagValues=None):
"""
Returns information about the specified HSM client certificate. If no certificate ID is specified, returns information about all the HSM certificates owned by your AWS customer account.
If you specify both tag keys and tag values in the same request, Amazon Redshift returns all HSM client certificates that match any combination of the specified keys and values. For example, if you have owner and environment for tag keys, and admin and test for tag values, all HSM client certificates that have any combination of those values are returned.
If both tag keys and values are omitted from the request, HSM client certificates are returned regardless of whether they have tag keys or values associated with them.
See also: AWS API Documentation
:example: response = client.describe_hsm_client_certificates(
HsmClientCertificateIdentifier='string',
MaxRecords=123,
Marker='string',
TagKeys=[
'string',
],
TagValues=[
'string',
]
)
:type HsmClientCertificateIdentifier: string
:param HsmClientCertificateIdentifier: The identifier of a specific HSM client certificate for which you want information. If no identifier is specified, information is returned for all HSM client certificates owned by your AWS customer account.
:type MaxRecords: integer
:param MaxRecords: The maximum number of response records to return in each call. If the number of remaining response records exceeds the specified MaxRecords value, a value is returned in a marker field of the response. You can retrieve the next set of records by retrying the command with the returned marker value.
Default: 100
Constraints: minimum 20, maximum 100.
:type Marker: string
:param Marker: An optional parameter that specifies the starting point to return a set of response records. When the results of a DescribeHsmClientCertificates request exceed the value specified in MaxRecords , AWS returns a value in the Marker field of the response. You can retrieve the next set of response records by providing the returned marker value in the Marker parameter and retrying the request.
:type TagKeys: list
:param TagKeys: A tag key or keys for which you want to return all matching HSM client certificates that are associated with the specified key or keys. For example, suppose that you have HSM client certificates that are tagged with keys called owner and environment . If you specify both of these tag keys in the request, Amazon Redshift returns a response with the HSM client certificates that have either or both of these tag keys associated with them.
(string) --
:type TagValues: list
:param TagValues: A tag value or values for which you want to return all matching HSM client certificates that are associated with the specified tag value or values. For example, suppose that you have HSM client certificates that are tagged with values called admin and test . If you specify both of these tag values in the request, Amazon Redshift returns a response with the HSM client certificates that have either or both of these tag values associated with them.
(string) --
:rtype: dict
:return: {
'Marker': 'string',
'HsmClientCertificates': [
{
'HsmClientCertificateIdentifier': 'string',
'HsmClientCertificatePublicKey': 'string',
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
]
},
]
}
"""
pass
def describe_hsm_configurations(HsmConfigurationIdentifier=None, MaxRecords=None, Marker=None, TagKeys=None, TagValues=None):
"""
Returns information about the specified Amazon Redshift HSM configuration. If no configuration ID is specified, returns information about all the HSM configurations owned by your AWS customer account.
If you specify both tag keys and tag values in the same request, Amazon Redshift returns all HSM connections that match any combination of the specified keys and values. For example, if you have owner and environment for tag keys, and admin and test for tag values, all HSM connections that have any combination of those values are returned.
If both tag keys and values are omitted from the request, HSM connections are returned regardless of whether they have tag keys or values associated with them.
See also: AWS API Documentation
:example: response = client.describe_hsm_configurations(
HsmConfigurationIdentifier='string',
MaxRecords=123,
Marker='string',
TagKeys=[
'string',
],
TagValues=[
'string',
]
)
:type HsmConfigurationIdentifier: string
:param HsmConfigurationIdentifier: The identifier of a specific Amazon Redshift HSM configuration to be described. If no identifier is specified, information is returned for all HSM configurations owned by your AWS customer account.
:type MaxRecords: integer
:param MaxRecords: The maximum number of response records to return in each call. If the number of remaining response records exceeds the specified MaxRecords value, a value is returned in a marker field of the response. You can retrieve the next set of records by retrying the command with the returned marker value.
Default: 100
Constraints: minimum 20, maximum 100.
:type Marker: string
:param Marker: An optional parameter that specifies the starting point to return a set of response records. When the results of a DescribeHsmConfigurations request exceed | |
'Memorizer': 'User'
}
# Maps model names to the names of other models they can be joined to for
# queries. The values of the join models are the attributes of the original
# model that the joins are actually made on, e.g., outerjoin(model.Form.tags)
models2joins = {
'Form': {
'File': 'files',
'Translation': 'translations',
'Tag': 'tags',
'Collection': 'collections',
'Memorizer': 'memorizers'
},
'File': {
'Tag': 'tags',
'Form': 'forms',
'Collection': 'collections'
},
'Collection': {
'Form': 'forms',
'File': 'files',
'Tag': 'tags'
}
}
############################################################################
# Model getters
############################################################################
def _get_model_name(self, model_name):
"""Always return model_name; store an error if model_name is invalid."""
if model_name not in self.schema:
self._add_to_errors(model_name, u'Searching on the %s model is not permitted' % model_name)
return model_name
def _get_model(self, model_name, add_to_joins=True):
try:
model = getattr(old_model, self.model_aliases.get(model_name, model_name))
except AttributeError:
model = None
self._add_to_errors(model_name, u"The OLD has no model %s" % model_name)
# Store any implicit joins in self.joins to await addition to the query
# in self._add_joins_to_query. Using sqlalchemy.orm's aliased to alias
# models/tables is what permits filters on multiple -to-many relations.
# Aliasing File while searching Form.files, for example, permits us to
# retrieve all forms that are associated to file 71 and file 74.
if add_to_joins and model_name != self.model_name:
join_models = self.models2joins.get(self.model_name, {})
if model_name in join_models:
join_collection_name = join_models[model_name]
join_collection = getattr(getattr(old_model, self.model_name),
join_collection_name)
model = aliased(model)
self.joins.append((model, join_collection))
else:
self._add_to_errors(model_name,
u"Searching the %s model by joining on the %s model is not possible" % (
self.model_name, model_name))
return model
def _get_attribute_model_name(self, attribute_name, model_name):
"""Returns the name of the model X that stores the data for the attribute
A of model M, e.g., the attribute_model_name for model_name='Form' and
attribute_name='enterer' is 'User'.
"""
attribute_dict = self._get_attribute_dict(attribute_name, model_name)
try:
return attribute_dict['foreign_model']
except KeyError:
self._add_to_errors(u'%s.%s' % (model_name, attribute_name),
u'The %s attribute of the %s model does not represent a many-to-one relation.' % (
attribute_name, model_name))
except:
pass # probably a TypeError, meaning model_name.attribute_name is invalid; would have already been caught
############################################################################
# Attribute getters
############################################################################
def _get_attribute_name(self, attribute_name, model_name):
"""Return attribute_name or cache an error if attribute_name is not in
self.schema[model_name].
"""
attribute_dict = self._get_attribute_dict(attribute_name, model_name, True)
return attribute_name
def _get_attribute_dict(self, attribute_name, model_name, report_error=False):
"""Return the dict needed to validate a given attribute of a given model,
or return None. Propagate an error (optionally) if the attribute_name is
invalid.
"""
attribute_dict = self.schema.get(model_name, {}).get(
attribute_name, None)
if attribute_dict is None and report_error:
self._add_to_errors('%s.%s' % (model_name, attribute_name),
u'Searching on %s.%s is not permitted' % (model_name, attribute_name))
return attribute_dict
def _get_attribute(self, attribute_name, model, model_name):
try:
attribute = self._collate_attribute(getattr(model, attribute_name))
except AttributeError: # model can be None
attribute = None
self._add_to_errors('%s.%s' % (model_name, attribute_name),
u"There is no attribute %s of %s" % (attribute_name, model_name))
return attribute
def _collate_attribute(self, attribute):
"""Append a MySQL COLLATE utf8_bin expression after the column name, if
appropriate. This allows regexp and like searches to be case-sensitive.
An example SQLA query would be Session.query(model.Form).filter(
collate(model.Form.transcription, 'utf8_bin').like(u'a%'))
Previously there was a condition on collation that the relation_name be in
('like', 'regexp'). This condition was removed because MySQL does case-
insensitive equality searches too!
"""
if self.RDBMSName == 'mysql' and attribute is not None:
try:
attribute_type = attribute.property.columns[0].type
except AttributeError:
attribute_type = None
if isinstance(attribute_type, self.SQLAlchemyStringTypes):
attribute = collate(attribute, 'utf8_bin')
return attribute
############################################################################
# Relation getters
############################################################################
def _get_relation_name(self, relation_name, model_name, attribute_name):
"""Return relation_name or its alias; propagate an error if relation_name is invalid."""
relation_dict = self._get_relation_dict(relation_name, model_name, attribute_name, True)
try:
return relation_dict.get('alias', relation_name)
except AttributeError: # relation_dict can be None
return None
def _get_relation_dict(self, relation_name, model_name, attribute_name, report_error=False):
attribute_relations = self._get_attribute_relations(attribute_name, model_name)
try:
relation_dict = attribute_relations.get(relation_name, None)
except AttributeError:
relation_dict = None
if relation_dict is None and report_error:
self._add_to_errors('%s.%s.%s' % (model_name, attribute_name, relation_name),
u"The relation %s is not permitted for %s.%s" % (relation_name, model_name, attribute_name))
return relation_dict
def _get_attribute_relations(self, attribute_name, model_name):
"""Return the data structure encoding what relations are valid for the
input attribute name.
"""
attribute_dict = self._get_attribute_dict(attribute_name, model_name)
try:
if attribute_dict.get('foreign_model'):
return self.equality_relations
else:
return self.relations
except AttributeError: # attribute_dict can be None
return None
def _get_relation(self, relation_name, attribute, attribute_name, model_name):
try:
if relation_name == 'regexp':
op = getattr(attribute, 'op')
relation = op('regexp')
else:
relation = getattr(attribute, relation_name)
except AttributeError: # attribute can be None
relation = None
self._add_to_errors('%s.%s.%s' % (model_name, attribute_name, relation_name),
u"There is no relation '%s' of '%s.%s'" % (relation_name, model_name, attribute_name))
return relation
############################################################################
# Value getters
############################################################################
def _normalize(self, value):
def normalize_if_string(value):
if type(value) in (str, unicode):
return normalize(value)
return value
value = normalize_if_string(value)
if type(value) is list:
value = [normalize_if_string(i) for i in value]
return value
def _get_value_converter(self, attribute_name, model_name):
attribute_dict = self._get_attribute_dict(attribute_name, model_name)
try:
value_converter_name = attribute_dict.get('value_converter', '')
return getattr(self, value_converter_name, None)
except AttributeError: # attribute_dict can be None
return None
def _get_value(self, value, model_name, attribute_name, relation_name):
"""Unicode normalize & modify the value using a value_converter (if necessary)."""
value = self._normalize(value) # unicode normalize (NFD) search patterns; we might want to parameterize this
value_converter = self._get_value_converter(attribute_name, model_name)
if value_converter is not None:
if type(value) is type([]):
value = [value_converter(li) for li in value]
else:
value = value_converter(value)
return value
############################################################################
# Filter expression getters
############################################################################
def _get_invalid_filter_expression_message(self, model_name, attribute_name,
relation_name, value):
return u"Invalid filter expression: %s.%s.%s(%s)" % (model_name,
attribute_name, relation_name, repr(value))
def _get_invalid_model_attribute_errors(self, relation, value, model_name,
attribute_name, relation_name, attribute, attribute_model_name, attribute_model_attribute_name):
"""Avoid catching a (costly) RuntimeError by preventing _get_filter_expression
from attempting to build relation(value) or attribute.has(relation(value)).
We do this by returning a non-empty list of error tuples if Model.attribute
errors are present in self.errors.
"""
e = []
if attribute_model_name:
error_key = '%s.%s' % (attribute_model_name, attribute_model_attribute_name)
if self.errors.get(error_key) == u'Searching on the %s is not permitted' % error_key:
e.append(('%s.%s.%s' % (attribute_model_name, attribute_model_attribute_name, relation_name),
self._get_invalid_filter_expression_message(attribute_model_name,
attribute_model_attribute_name, relation_name, value)))
error_key = '%s.%s' % (model_name, attribute_name)
if self.errors.get(error_key) == u'Searching on %s is not permitted' % error_key:
e.append(('%s.%s.%s' % (model_name, attribute_name, relation_name),
self._get_invalid_filter_expression_message(model_name, attribute_name,
relation_name, value)))
return e
def _get_meta_relation(self, attribute, model_name, attribute_name):
"""Return the has() or the any() method of the input attribute, depending
on the value of schema[model_name][attribute_name]['type'].
"""
return getattr(attribute, {'scalar': 'has', 'collection': 'any'}[
self.schema[model_name][attribute_name]['type']])
def _get_filter_expression(self, relation, value, model_name, attribute_name,
relation_name, attribute=None, attribute_model_name=None,
attribute_model_attribute_name=None):
"""Attempt to return relation(value), catching and storing errors as
needed. If 5 args are provided, we are doing a [mod, attr, rel, val]
search; if all 8 are provided, it's a [mod, attr, attr_mod_attr, rel, val]
one.
"""
invalid_model_attribute_errors = self._get_invalid_model_attribute_errors(
relation, value, model_name, attribute_name, relation_name, attribute,
attribute_model_name, attribute_model_attribute_name)
if invalid_model_attribute_errors:
filter_expression = None
for e in invalid_model_attribute_errors:
self._add_to_errors(e[0], e[1])
else:
try:
if attribute_model_name:
meta_relation = self._get_meta_relation(attribute, model_name, attribute_name)
filter_expression = meta_relation(relation(value))
else:
filter_expression = relation(value)
except AttributeError:
filter_expression = None
self._add_to_errors('%s.%s' % (model_name, attribute_name),
u'The %s.%s attribute does not represent a many-to-one relation.' % (
model_name, attribute_name))
except TypeError:
filter_expression = None
self._add_to_errors('%s.%s.%s' % (model_name, attribute_name, relation_name),
self._get_invalid_filter_expression_message(model_name,
attribute_name, relation_name, value))
except InvalidRequestError, e:
filter_expression = None
self.errors['InvalidRequestError'] = e.__unicode__()
except OperationalError, e:
filter_expression = None
self.errors['OperationalError'] = e.__unicode__()
except RuntimeError, e:
filter_expression = None
self.errors['RuntimeError'] = e.__unicode__()
return filter_expression
def _get_simple_filter_expression(self, *args):
"""Build an SQLAlchemy filter expression. Examples:
1. ['Form', 'transcription', '=', 'abc'] =>
model.Form.transcription.__eq__('abc')
2. ['Form', 'enterer', 'first_name', 'like', 'J%'] =>
Session.query(model.Form)\
.filter(model.Form.enterer.has(model.User.first_name.like(u'J%')))
3. ['Tag', 'name', 'like', '%abc%'] (when searching the Form model) =>
aliased_tag = aliased(model.Tag)
Session.query(model.Form)\
.filter(aliased_tag.name.like(u'%abc%'))\
.outerjoin(aliased_tag, model.Form.tags)
4. ['Form', 'tags', 'name', 'like', '%abc%'] =>
Session.query(model.Form)\
.filter(model.Form.tags.any(model.Tag.name.like(u'%abc%')))
"""
model_name = self._get_model_name(args[0])
attribute_name = self._get_attribute_name(args[1], model_name)
if len(args) == 4:
model = self._get_model(model_name)
relation_name = self._get_relation_name(args[2], model_name, attribute_name)
value = self._get_value(args[3], model_name, attribute_name, relation_name)
attribute = self._get_attribute(attribute_name, model, model_name)
relation = self._get_relation(relation_name, attribute, attribute_name, model_name)
return self._get_filter_expression(relation, value, model_name, attribute_name, relation_name)
else:
attribute_model_name = self._get_attribute_model_name(attribute_name, model_name)
attribute_model_attribute_name = self._get_attribute_name(args[2], attribute_model_name)
relation_name = self._get_relation_name(args[3], attribute_model_name, attribute_model_attribute_name)
value = self._get_value(args[4], attribute_model_name, attribute_model_attribute_name, relation_name)
model = self._get_model(model_name, False)
attribute = self._get_attribute(attribute_name, model, model_name)
attribute_model = self._get_model(attribute_model_name, False)
attribute_model_attribute = self._get_attribute(attribute_model_attribute_name, attribute_model, attribute_model_name)
relation = self._get_relation(relation_name, attribute_model_attribute, attribute_model_attribute_name, attribute_model_name)
return self._get_filter_expression(relation, value, model_name, attribute_name, relation_name,
attribute, attribute_model_name, | |
# -*- coding: utf-8 -*-
from __future__ import print_function
import numpy as np
from numpy.linalg import LinAlgError, inv, solve, norm
from numpy import dot, exp
from numpy.random import beta
from scipy.integrate import trapz
import scipy.stats as stats
import pandas as pd
from lifelines.plotting import plot_estimate, plot_regressions
from lifelines.utils import survival_table_from_events, inv_normal_cdf, \
epanechnikov_kernel, StatError, coalesce
from lifelines.progress_bar import progress_bar
class BaseFitter(object):
def __repr__(self):
classname = self.__class__.__name__
try:
s = """<lifelines.%s: fitted with %d observations, %d censored>""" % (
classname, self.event_observed.shape[0], (1 - self.event_observed).sum())
except AttributeError:
s = """<lifelines.%s>""" % classname
return s
class NelsonAalenFitter(BaseFitter):
"""
Class for fitting the Nelson-Aalen estimate for the cumulative hazard.
NelsonAalenFitter( alpha=0.95, nelson_aalen_smoothing=True)
alpha: The alpha value associated with the confidence intervals.
nelson_aalen_smoothing: If the event times are naturally discrete (like discrete years, minutes, etc.)
then it is advisable to turn this parameter to False. See [1], pg.84.
"""
def __init__(self, alpha=0.95, nelson_aalen_smoothing=True):
self.alpha = alpha
self.nelson_aalen_smoothing = nelson_aalen_smoothing
if self.nelson_aalen_smoothing:
self._variance_f = self._variance_f_smooth
self._additive_f = self._additive_f_smooth
else:
self._variance_f = self._variance_f_discrete
self._additive_f = self._additive_f_discrete
def fit(self, durations, event_observed=None, timeline=None, entry=None,
label='NA-estimate', alpha=None, ci_labels=None):
"""
Parameters:
duration: an array, or pd.Series, of length n -- duration subject was observed for
timeline: return the best estimate at the values in timelines (postively increasing)
event_observed: an array, or pd.Series, of length n -- True if the the death was observed, False if the event
was lost (right-censored). Defaults all True if event_observed==None
entry: an array, or pd.Series, of length n -- relative time when a subject entered the study. This is
useful for left-truncated observations, i.e the birth event was not observed.
If None, defaults to all 0 (all birth events observed.)
label: a string to name the column of the estimate.
alpha: the alpha value in the confidence intervals. Overrides the initializing
alpha for this call to fit only.
ci_labels: add custom column names to the generated confidence intervals
as a length-2 list: [<lower-bound name>, <upper-bound name>]. Default: <label>_lower_<alpha>
Returns:
self, with new properties like 'cumulative_hazard_'.
"""
v = preprocess_inputs(durations, event_observed, timeline, entry)
self.durations, self.event_observed, self.timeline, self.entry, self.event_table = v
cumulative_hazard_, cumulative_sq_ = _additive_estimate(self.event_table, self.timeline,
self._additive_f, self._variance_f, False)
# esimates
self.cumulative_hazard_ = pd.DataFrame(cumulative_hazard_, columns=[label])
self.confidence_interval_ = self._bounds(cumulative_sq_[:, None], alpha if alpha else self.alpha, ci_labels)
self._cumulative_sq = cumulative_sq_
# estimation functions
self.predict = _predict(self, "cumulative_hazard_", label)
self.subtract = _subtract(self, "cumulative_hazard_")
self.divide = _divide(self, "cumulative_hazard_")
# plotting
self.plot = plot_estimate(self, "cumulative_hazard_")
self.plot_cumulative_hazard = self.plot
self.plot_hazard = plot_estimate(self, 'hazard_')
return self
def _bounds(self, cumulative_sq_, alpha, ci_labels):
alpha2 = inv_normal_cdf(1 - (1 - alpha) / 2)
df = pd.DataFrame(index=self.timeline)
name = self.cumulative_hazard_.columns[0]
if ci_labels is None:
ci_labels = ["%s_upper_%.2f" % (name, self.alpha), "%s_lower_%.2f" % (name, self.alpha)]
assert len(ci_labels) == 2, "ci_labels should be a length 2 array."
self.ci_labels = ci_labels
df[ci_labels[0]] = self.cumulative_hazard_.values * \
np.exp(alpha2 * np.sqrt(cumulative_sq_) / self.cumulative_hazard_.values)
df[ci_labels[1]] = self.cumulative_hazard_.values * \
np.exp(-alpha2 * np.sqrt(cumulative_sq_) / self.cumulative_hazard_.values)
return df
def _variance_f_smooth(self, population, deaths):
df = pd.DataFrame({'N': population, 'd': deaths})
return df.apply(lambda N_d: np.sum((1. / (N_d[0] - i) ** 2 for i in range(int(N_d[1])))), axis=1)
def _variance_f_discrete(self, population, deaths):
return 1. * (population - deaths) * deaths / population ** 3
def _additive_f_smooth(self, population, deaths):
df = pd.DataFrame({'N': population, 'd': deaths})
return df.apply(lambda N_d: np.sum((1. / (N_d[0] - i) for i in range(int(N_d[1])))), axis=1)
def _additive_f_discrete(self, population, deaths):
return (1. * deaths / population).replace([np.inf], 0)
def smoothed_hazard_(self, bandwidth):
"""
Parameters:
bandwidth: the bandwith used in the Epanechnikov kernel.
Returns:
a DataFrame of the smoothed hazard
"""
timeline = self.timeline
cumulative_hazard_name = self.cumulative_hazard_.columns[0]
hazard_name = "smoothed-" + cumulative_hazard_name
hazard_ = self.cumulative_hazard_.diff().fillna(self.cumulative_hazard_.iloc[0])
C = (hazard_[cumulative_hazard_name] != 0.0).values
return pd.DataFrame( 1./(2*bandwidth)*np.dot(epanechnikov_kernel(timeline[:, None], timeline[C][None, :], bandwidth), hazard_.values[C,:]),
columns=[hazard_name], index=timeline)
def smoothed_hazard_confidence_intervals_(self, bandwidth, hazard_=None):
"""
Parameter:
bandwidth: the bandwith to use in the Epanechnikov kernel.
hazard_: a computed (n,) numpy array of estimated hazard rates. If none, uses naf.smoothed_hazard_
"""
if hazard_ is None:
hazard_ = self.smoothed_hazard_(bandwidth).values[:, 0]
timeline = self.timeline
alpha2 = inv_normal_cdf(1 - (1 - self.alpha) / 2)
self._cumulative_sq.iloc[0] = 0
var_hazard_ = self._cumulative_sq.diff().fillna(self._cumulative_sq.iloc[0])
C = (var_hazard_.values != 0.0) # only consider the points with jumps
std_hazard_ = np.sqrt(1./(2*bandwidth**2)*np.dot(epanechnikov_kernel(timeline[:, None], timeline[C][None, :], bandwidth)**2, var_hazard_.values[C]))
values = {
self.ci_labels[0]: hazard_ * np.exp(alpha2 * std_hazard_ / hazard_),
self.ci_labels[1]: hazard_ * np.exp(-alpha2 * std_hazard_ / hazard_)
}
return pd.DataFrame(values, index=timeline)
class KaplanMeierFitter(BaseFitter):
"""
Class for fitting the Kaplan-Meier estimate for the survival function.
KaplanMeierFitter( alpha=0.95)
alpha: The alpha value associated with the confidence intervals.
"""
def __init__(self, alpha=0.95):
self.alpha = alpha
def fit(self, durations, event_observed=None, timeline=None, entry=None, label='KM-estimate',
alpha=None, left_censorship=False, ci_labels=None):
"""
Parameters:
duration: an array, or pd.Series, of length n -- duration subject was observed for
timeline: return the best estimate at the values in timelines (postively increasing)
event_observed: an array, or pd.Series, of length n -- True if the the death was observed, False if the event
was lost (right-censored). Defaults all True if event_observed==None
entry: an array, or pd.Series, of length n -- relative time when a subject entered the study. This is
useful for left-truncated observations, i.e the birth event was not observed.
If None, defaults to all 0 (all birth events observed.)
label: a string to name the column of the estimate.
alpha: the alpha value in the confidence intervals. Overrides the initializing
alpha for this call to fit only.
left_censorship: True if durations and event_observed refer to left censorship events. Default False
ci_labels: add custom column names to the generated confidence intervals
as a length-2 list: [<lower-bound name>, <upper-bound name>]. Default: <label>_lower_<alpha>
Returns:
self, with new properties like 'survival_function_'.
"""
# if the user is interested in left-censorship, we return the cumulative_density_, no survival_function_,
estimate_name = 'survival_function_' if not left_censorship else 'cumulative_density_'
v = preprocess_inputs(durations, event_observed, timeline, entry)
self.durations, self.event_observed, self.timeline, self.entry, self.event_table = v
log_survival_function, cumulative_sq_ = _additive_estimate(self.event_table, self.timeline,
self._additive_f, self._additive_var,
left_censorship)
if entry is not None:
# a serious problem with KM is that when the sample size is small and there are too few early
# truncation times, it may happen that is the number of patients at risk and the number of deaths is the same.
# we adjust for this using the Breslow-Fleming-Harrington estimator
n = self.event_table.shape[0]
net_population = (self.event_table['entrance'] - self.event_table['removed']).cumsum()
if net_population.iloc[:int(n / 2)].min() == 0:
ix = net_population.iloc[:int(n / 2)].argmin()
raise StatError("""There are too few early truncation times and too many events. S(t)==0 for all t>%.1f. Recommend BFH estimator.""" % ix)
# estimation
setattr(self, estimate_name, pd.DataFrame(np.exp(log_survival_function), columns=[label]))
self.__estimate = getattr(self, estimate_name)
self.confidence_interval_ = self._bounds(cumulative_sq_[:, None], alpha if alpha else self.alpha, ci_labels)
self.median_ = median_survival_times(self.__estimate)
# estimation methods
self.predict = _predict(self, estimate_name, label)
self.subtract = _subtract(self, estimate_name)
self.divide = _divide(self, estimate_name)
# plotting functions
self.plot = plot_estimate(self, estimate_name)
setattr(self, "plot_" + estimate_name, self.plot)
return self
def _bounds(self, cumulative_sq_, alpha, ci_labels):
# See http://courses.nus.edu.sg/course/stacar/internet/st3242/handouts/notes2.pdfg
alpha2 = inv_normal_cdf((1. + alpha) / 2.)
df = pd.DataFrame(index=self.timeline)
name = self.__estimate.columns[0]
v = np.log(self.__estimate.values)
if ci_labels is None:
ci_labels = ["%s_upper_%.2f" % (name, self.alpha), "%s_lower_%.2f" % (name, self.alpha)]
assert len(ci_labels) == 2, "ci_labels should be a length 2 array."
df[ci_labels[0]] = np.exp(-np.exp(np.log(-v) + alpha2 * np.sqrt(cumulative_sq_) / v))
df[ci_labels[1]] = np.exp(-np.exp(np.log(-v) - alpha2 * np.sqrt(cumulative_sq_) / v))
return df
def _additive_f(self, population, deaths):
np.seterr(invalid='ignore')
return (np.log(population - deaths) - np.log(population))
def _additive_var(self, population, deaths):
np.seterr(divide='ignore')
return (1. * deaths / (population * (population - deaths))).replace([np.inf], 0)
class BreslowFlemingHarringtonFitter(BaseFitter):
"""
Class for fitting the Breslow-Fleming-Harrington estimate for the survival function. This estimator
is a biased estimator of the survival function but is more stable when the popualtion is small and
there are too few early truncation times, it may happen that is the number of patients at risk and
the number of deaths is the same.
Mathematically, the NAF estimator is the negative logarithm of the BFH estimator.
BreslowFlemingHarringtonFitter(alpha=0.95)
alpha: The alpha value associated with the confidence intervals.
"""
def __init__(self, alpha=0.95):
self.alpha = alpha
def fit(self, durations, event_observed=None, timeline=None, entry=None,
label='BFH-estimate', alpha=None, ci_labels=None):
"""
Parameters:
duration: an array, or pd.Series, of length | |
import random as r
import math
#import matplotlib.pyplot as plotter
import numpy
import scipy
from scipy import stats
# Rideshare service simulation model that includes rider choice
# Author: <NAME>
# SOURCES AND DERIVATIONS FROM V2:
# In 2019 and 2020, there were 5 million Uber drivers and 18.7 million trips per day, on average. (Source 1)
# This means that each driver makes an average of 3.74 trips per day. So for a minimum of 20 riders per driver,
# we will say the probability a rider needs a ride is 0.187
# For 1000 drivers, we expect to see approx. 3740 rides per day, 187000 rides over 50 days.
# Running the sim while counting the number of rides with this parameter shows that it works.
# For those 5 million drivers, Uber claims to have 103 million average monthly users. (Source 1)
# This means an average of 20.6 riders per driver. We will generate 20.6*1000 riders and scatter them
# randomly about the board.
# In 2017, 36.1 % of Uber drivers were female. (source 1)
# In 2018, Uber reported 3045 sexual assaults in 1.3 billion rides (Source 1)
# Assuming this rate of "assaults per ride" still holds, we expect to see about 0.438 assaults in the fifty days of
# our simulation. Since that's absoultely tiny, we are going to scale it up by multiplying this "assaults per ride"
# parameter by 1000. Thus, we expect to see about 432 assaults per 50-day sim, on average.
# The probability of an assault happening on a ride is assumed to be equal to the probability that at least one of the
# riders is malicious AND that an assault happens. The parameter to be adjusted in order to tune the model to match reality
# is the proportion of malicious people in the model. (While this joint probability is going to be 2000 times as high as
# real life, we cannot say for certain if our model has 2000 times as many malicious people as real life.)
# In a study, 98.1% of female rape victims and 92.5% of female non-rape sex crime victims reported male perpetrators. (Source 2)
# We will average this to say ~95% of female sexual assault victims will report male perpetrators. This means mTw ~ 19x wTw
# For male sexual assault victims, the sex of the perpetrator varied widely based on the type of violence. (ex: rape vs. groping)
# This makes things difficult, as ultimately our preferred sex will have to come down to a guess. We have 4 unknowns, and only
# 3 equations.
# Ultimately, we went with mTw = 0.95, which makes mTm=0.05, wTm=0.95, wTw=0.05
# With some calculations from the CDC estimates (Source 2), we see that the probability a victim of sexual violence is a man is 0.2626.
# This was used with our previous guesses to calculate the true proportions of malicious people.
# Of malicious people, men are 76.56% and women are 23.55%.
# Using conditional probability, we can create a formula for the proportions of men and women who are malicious.
# From tuning model v1, we reached a probability that a ride with a malicious person ends in an assault is 0.491. We will fix this
# value in place, and tune this model by varying the proportion of people who are malicious.
# NEW ADDITIONS
# When a rider needs a ride, they may indicate a preferred sex. If the driver is not that sex, then
# the driver will not give them a ride unless nobody else is available.
# 40% of non-malicious male riders will indicate a preferred sex; 50% of the time it will be women
# and 50% of the time it will be men.
# 60% of non-malicious female riders will indicate a preferred sex; 80% of the time it will be
# women, and 20% of the time it will be men.
# These numbers are sheer guesswork, under the assumption that men do not feel the same safety concerns
# as women are are less likely to care who picks them up.
# Source 1: http://web.archive.org/web/20210423034332/https://www.businessofapps.com/data/uber-statistics/, accessed 3 May 2021
# Source 2: https://www.cdc.gov/violenceprevention/pdf/nisvs_report2010-a.pdf, accessed 3 May 2021
class Board:
#ADJUSTABLE VARIABLES
expectedRides = 187000 #AVERAGE NUMBER OF RIDES EXPECTED OVER THE COURSE OF THE SIMULATION
expectedAssaults = 438 #AVERAGE NUMBER OF ASSAULTS EXPECTED OVER THE COURSE OF THE SIMULATION
numDrivers = 1000 #NUMBER OF DRIVERS IN THE SIMULATION
numDays = 50 #NUMBER OF DAYS THE SIMULATION RUNS FOR
probMalicious = 0.005 #PROBABILITY A DRIVER OR RIDER IS MALICIOUS
probAssault = 0.5 #PROBABILITY OF AN ASSAULT DURING A RIDE WITH A MALICIOUS PERSON
assaultsPerRide = 0.002648 #AVERAGE NUMBER OF ASSAULTS PER RIDE, APPROX. 2000 TIMES REAL LIFE.
ridersPer = 20.6 #NUMBER OF RIDERS GENERATED PER DRIVER
mTw = 0.95 #PROBABILITY A MALICIOUS MAN TARGETS WOMEN
wTm = 0.95 #PROBABILITY A MALICIOUS WOMAN TARGETS MEN
pMM = 0.7656 #PROBABILITY A MALICIOUS PERSON IS A MAN
mPreference = 0.4 #PROBABILITY A NON-MALICIOUS MAN HAS A PREFERRED DRIVER SEX
mPw = 0.5 #PROBABILITY A NON-MALICIOUS MAN PREFERS FEMALE DRIVERS
wPreference = 0.6 #PROBABILITY A WOMAN HAS A PREFERRED DRIVER SEX
wPw = 0.8 #PROBABILITY A NON-MALICIOUS WOMAN PREFERS FEMALE DRIVERSN
def __init__(self):
self.mTm = 1 - self.mTw #PROBABILITY A MALICIOUS MAN TARGETS MEN
self.wTw = 1 - self.wTm #PROBABILITY A MALICIOUS WOMAN TERGETS WOMEN
self.probMaliciousMan = self.probMalicious*self.pMM*2 #PROBABILITY A MAN IS MALICIOUS
self.probMaliciousWoman = self.probMalicious*(1-self.pMM)*2 #PROBABILITY A WOMAN IS MALICIOUS
self.setDrivers = set() #SET OF DRIVERS IN THE SIMULATION
self.setRiders = set() #SET OF RIDERS IN THE SIMULATION
self.day = 0 #GETTER FOR CURRENT DAY
self.assaults = [] #TRACKS ASSAULTS BY DAY
self.rides = [] #TRACKS TOTAL RIDES BY DAY
self.activeRiders = set() #SET OF RIDERS WHO NEED A RIDE THAT DAY
self.activeDrivers = set() #SET OF DRIVERS WHO CAN STILL GIVE A RIDE THAT DAY
self.driversToRemove = set() #SET OF DRIVERS NOT ACTIVE AFTER EACH BATCH OF RIDES
for i in range(self.numDrivers): #Generate Driveres
self.setDrivers.add(Driver(self))
for i in range(int(self.ridersPer*self.numDrivers)): #Generate riders
rx = r.uniform(0, 10)
ry = r.uniform(0, 10)
self.setRiders.add(Rider(self, rx, ry))
for driver in (self.setDrivers):
driver.findRidersInRange(self)
for rider in self.setRiders:
active = rider.nextDay()
if (active):
self.activeRiders.add(rider)
for driver in self.setDrivers:
driver.nextDay()
print("simulation setup complete")
#Runs the simulation
def runSim(self):
for day in range(self.numDays):
self.assaults.append(0)
self.rides.append(0)
self.day = day
self.activeDrivers = self.setDrivers.copy()
while (len(self.activeDrivers) > 0 and len(self.activeRiders) > 0):
for driver in self.activeDrivers:
riderToRemove = driver.giveRide(self)
if (riderToRemove is None):
self.driversToRemove.add(driver)
else:
self.activeRiders.remove(riderToRemove)
for driver in self.driversToRemove:
self.activeDrivers.remove(driver)
self.driversToRemove.clear()
self.activeRiders.clear() #Reset for next day
self.activeDrivers.clear()
for rider in self.setRiders:
active = rider.nextDay()
if (active):
self.activeRiders.add(rider)
for driver in self.setDrivers:
driver.nextDay()
#print("Day " + str(day + 1) + " completed")
class Driver:
#ADJUSTABLE VARIABLES
probMale = 0.639 #PROBABILITY THE DRIVER IS MALE
radius = 1 #RADIUS THE DRIVER CAN GIVE RIDES IN
def __init__(self, board):
self.ridesGiven = 0 #NUMBER OF RIDES GIVEN THAT DAY
xcoord = r.uniform(0, 10)
ycoord = r.uniform(0, 10)
self.male = False #INDICATES THE SEX OF THE DRIVER
self.targetWomen = None #IF MALICIOUS, INDICATES TARGET SEX
self.coords = (xcoord, ycoord) #COORDINATES OF THE DRIVER
self.ridersInRange = set() #SET OF THE RIDERS IN RANGE OF THE DRIVER
self.activeInRange = [] #LIST OF ACTIVE RIDERS IN RANGE
self.isMalicious = False #MALICIOUS INDICATOR
board.setDrivers.add(self)
if (r.random() < self.probMale):
self.male = True
if (r.random() < board.probMaliciousMan):
self.isMalicious = True
if (r.random() < board.mTw):
self.targetWomen = True
else:
self.targetWomen = False
else:
self.male = False
if (r.random() < board.probMaliciousWoman):
self.isMalicious = True
if (r.random() < board.wTw):
self.targetWomen = True
else:
self.targetWomen = False
#Populates the driver's ridersInRange set.
#Must be called AFTER all of the riders have been generated.
def findRidersInRange (self, board):
for rider in board.setRiders:
x = rider.coords[0] - | |
<reponame>huicheese/Django-test3<filename>django/contrib/gis/tests/distapp/tests.py<gh_stars>0
import os, unittest
from decimal import Decimal
from django.db.models import Q
from django.contrib.gis.gdal import DataSource
from django.contrib.gis.geos import GEOSGeometry, Point, LineString
from django.contrib.gis.measure import D # alias for Distance
from django.contrib.gis.tests.utils import oracle, postgis, spatialite, no_oracle, no_spatialite
from models import AustraliaCity, Interstate, SouthTexasInterstate, \
SouthTexasCity, SouthTexasCityFt, CensusZipcode, SouthTexasZipcode
from data import au_cities, interstates, stx_interstates, stx_cities, stx_zips
class DistanceTest(unittest.TestCase):
# A point we are testing distances with -- using a WGS84
# coordinate that'll be implicitly transormed to that to
# the coordinate system of the field, EPSG:32140 (Texas South Central
# w/units in meters)
stx_pnt = GEOSGeometry('POINT (-95.370401017314293 29.704867409475465)', 4326)
# Another one for Australia
au_pnt = GEOSGeometry('POINT (150.791 -34.4919)', 4326)
def get_names(self, qs):
cities = [c.name for c in qs]
cities.sort()
return cities
def test01_init(self):
"Initialization of distance models."
# Loading up the cities.
def load_cities(city_model, data_tup):
for name, x, y in data_tup:
city_model(name=name, point=Point(x, y, srid=4326)).save()
def load_interstates(imodel, data_tup):
for name, wkt in data_tup:
imodel(name=name, path=wkt).save()
load_cities(SouthTexasCity, stx_cities)
load_cities(SouthTexasCityFt, stx_cities)
load_cities(AustraliaCity, au_cities)
self.assertEqual(9, SouthTexasCity.objects.count())
self.assertEqual(9, SouthTexasCityFt.objects.count())
self.assertEqual(11, AustraliaCity.objects.count())
# Loading up the South Texas Zip Codes.
for name, wkt in stx_zips:
poly = GEOSGeometry(wkt, srid=4269)
SouthTexasZipcode(name=name, poly=poly).save()
CensusZipcode(name=name, poly=poly).save()
self.assertEqual(4, SouthTexasZipcode.objects.count())
self.assertEqual(4, CensusZipcode.objects.count())
# Loading up the Interstates.
load_interstates(Interstate, interstates)
load_interstates(SouthTexasInterstate, stx_interstates)
self.assertEqual(1, Interstate.objects.count())
self.assertEqual(1, SouthTexasInterstate.objects.count())
@no_spatialite
def test02_dwithin(self):
"Testing the `dwithin` lookup type."
# Distances -- all should be equal (except for the
# degree/meter pair in au_cities, that's somewhat
# approximate).
tx_dists = [(7000, 22965.83), D(km=7), D(mi=4.349)]
au_dists = [(0.5, 32000), D(km=32), D(mi=19.884)]
# Expected cities for Australia and Texas.
tx_cities = ['Downtown Houston', 'Southside Place']
au_cities = ['Mittagong', 'Shellharbour', 'Thirroul', 'Wollongong']
# Performing distance queries on two projected coordinate systems one
# with units in meters and the other in units of U.S. survey feet.
for dist in tx_dists:
if isinstance(dist, tuple): dist1, dist2 = dist
else: dist1 = dist2 = dist
qs1 = SouthTexasCity.objects.filter(point__dwithin=(self.stx_pnt, dist1))
qs2 = SouthTexasCityFt.objects.filter(point__dwithin=(self.stx_pnt, dist2))
for qs in qs1, qs2:
self.assertEqual(tx_cities, self.get_names(qs))
# Now performing the `dwithin` queries on a geodetic coordinate system.
for dist in au_dists:
if isinstance(dist, D) and not oracle: type_error = True
else: type_error = False
if isinstance(dist, tuple):
if oracle: dist = dist[1]
else: dist = dist[0]
# Creating the query set.
qs = AustraliaCity.objects.order_by('name')
if type_error:
# A TypeError should be raised on PostGIS when trying to pass
# Distance objects into a DWithin query using a geodetic field.
self.assertRaises(TypeError, AustraliaCity.objects.filter, point__dwithin=(self.au_pnt, dist))
else:
self.assertEqual(au_cities, self.get_names(qs.filter(point__dwithin=(self.au_pnt, dist))))
def test03a_distance_method(self):
"Testing the `distance` GeoQuerySet method on projected coordinate systems."
# The point for La Grange, TX
lagrange = GEOSGeometry('POINT(-96.876369 29.905320)', 4326)
# Reference distances in feet and in meters. Got these values from
# using the provided raw SQL statements.
# SELECT ST_Distance(point, ST_Transform(ST_GeomFromText('POINT(-96.876369 29.905320)', 4326), 32140)) FROM distapp_southtexascity;
m_distances = [147075.069813, 139630.198056, 140888.552826,
138809.684197, 158309.246259, 212183.594374,
70870.188967, 165337.758878, 139196.085105]
# SELECT ST_Distance(point, ST_Transform(ST_GeomFromText('POINT(-96.876369 29.905320)', 4326), 2278)) FROM distapp_southtexascityft;
# Oracle 11 thinks this is not a projected coordinate system, so it's s
# not tested.
ft_distances = [482528.79154625, 458103.408123001, 462231.860397575,
455411.438904354, 519386.252102563, 696139.009211594,
232513.278304279, 542445.630586414, 456679.155883207]
# Testing using different variations of parameters and using models
# with different projected coordinate systems.
dist1 = SouthTexasCity.objects.distance(lagrange, field_name='point')
dist2 = SouthTexasCity.objects.distance(lagrange) # Using GEOSGeometry parameter
if spatialite or oracle:
dist_qs = [dist1, dist2]
else:
dist3 = SouthTexasCityFt.objects.distance(lagrange.ewkt) # Using EWKT string parameter.
dist4 = SouthTexasCityFt.objects.distance(lagrange)
dist_qs = [dist1, dist2, dist3, dist4]
# Original query done on PostGIS, have to adjust AlmostEqual tolerance
# for Oracle.
if oracle: tol = 2
else: tol = 5
# Ensuring expected distances are returned for each distance queryset.
for qs in dist_qs:
for i, c in enumerate(qs):
self.assertAlmostEqual(m_distances[i], c.distance.m, tol)
self.assertAlmostEqual(ft_distances[i], c.distance.survey_ft, tol)
@no_spatialite
def test03b_distance_method(self):
"Testing the `distance` GeoQuerySet method on geodetic coordnate systems."
if oracle: tol = 2
else: tol = 5
# Now testing geodetic distance aggregation.
hillsdale = AustraliaCity.objects.get(name='Hillsdale')
if not oracle:
# PostGIS is limited to disance queries only to/from point geometries,
# ensuring a TypeError is raised if something else is put in.
self.assertRaises(ValueError, AustraliaCity.objects.distance, 'LINESTRING(0 0, 1 1)')
self.assertRaises(ValueError, AustraliaCity.objects.distance, LineString((0, 0), (1, 1)))
# Got the reference distances using the raw SQL statements:
# SELECT ST_distance_spheroid(point, ST_GeomFromText('POINT(151.231341 -33.952685)', 4326), 'SPHEROID["WGS 84",6378137.0,298.257223563]') FROM distapp_australiacity WHERE (NOT (id = 11));
spheroid_distances = [60504.0628825298, 77023.948962654, 49154.8867507115, 90847.435881812, 217402.811862568, 709599.234619957, 640011.483583758, 7772.00667666425, 1047861.7859506, 1165126.55237647]
# SELECT ST_distance_sphere(point, ST_GeomFromText('POINT(151.231341 -33.952685)', 4326)) FROM distapp_australiacity WHERE (NOT (id = 11)); st_distance_sphere
sphere_distances = [60580.7612632291, 77143.7785056615, 49199.2725132184, 90804.4414289463, 217712.63666124, 709131.691061906, 639825.959074112, 7786.80274606706, 1049200.46122281, 1162619.7297006]
# Testing with spheroid distances first.
qs = AustraliaCity.objects.exclude(id=hillsdale.id).distance(hillsdale.point, spheroid=True)
for i, c in enumerate(qs):
self.assertAlmostEqual(spheroid_distances[i], c.distance.m, tol)
if postgis:
# PostGIS uses sphere-only distances by default, testing these as well.
qs = AustraliaCity.objects.exclude(id=hillsdale.id).distance(hillsdale.point)
for i, c in enumerate(qs):
self.assertAlmostEqual(sphere_distances[i], c.distance.m, tol)
@no_oracle # Oracle already handles geographic distance calculation.
def test03c_distance_method(self):
"Testing the `distance` GeoQuerySet method used with `transform` on a geographic field."
# Normally you can't compute distances from a geometry field
# that is not a PointField (on PostGIS).
self.assertRaises(ValueError, CensusZipcode.objects.distance, self.stx_pnt)
# We'll be using a Polygon (created by buffering the centroid
# of 77005 to 100m) -- which aren't allowed in geographic distance
# queries normally, however our field has been transformed to
# a non-geographic system.
z = SouthTexasZipcode.objects.get(name='77005')
# Reference query:
# SELECT ST_Distance(ST_Transform("distapp_censuszipcode"."poly", 32140), ST_GeomFromText('<buffer_wkt>', 32140)) FROM "distapp_censuszipcode";
dists_m = [3553.30384972258, 1243.18391525602, 2186.15439472242]
# Having our buffer in the SRID of the transformation and of the field
# -- should get the same results. The first buffer has no need for
# transformation SQL because it is the same SRID as what was given
# to `transform()`. The second buffer will need to be transformed,
# however.
buf1 = z.poly.centroid.buffer(100)
buf2 = buf1.transform(4269, clone=True)
ref_zips = ['77002', '77025', '77401']
for buf in [buf1, buf2]:
qs = CensusZipcode.objects.exclude(name='77005').transform(32140).distance(buf)
self.assertEqual(ref_zips, self.get_names(qs))
for i, z in enumerate(qs):
self.assertAlmostEqual(z.distance.m, dists_m[i], 5)
def test04_distance_lookups(self):
"Testing the `distance_lt`, `distance_gt`, `distance_lte`, and `distance_gte` lookup types."
# Retrieving the cities within a 20km 'donut' w/a 7km radius 'hole'
# (thus, Houston and Southside place will be excluded as tested in
# the `test02_dwithin` above).
qs1 = SouthTexasCity.objects.filter(point__distance_gte=(self.stx_pnt, D(km=7))).filter(point__distance_lte=(self.stx_pnt, D(km=20)))
# Can't determine the units on SpatiaLite from PROJ.4 string, and
# Oracle 11 incorrectly thinks it is not projected.
if spatialite or oracle:
dist_qs = (qs1,)
else:
qs2 = SouthTexasCityFt.objects.filter(point__distance_gte=(self.stx_pnt, D(km=7))).filter(point__distance_lte=(self.stx_pnt, D(km=20)))
dist_qs = (qs1, qs2)
for qs in dist_qs:
cities = self.get_names(qs)
self.assertEqual(cities, ['Bellaire', 'Pearland', 'West University Place'])
# Doing a distance query using Polygons instead of a Point.
z = SouthTexasZipcode.objects.get(name='77005')
qs = SouthTexasZipcode.objects.exclude(name='77005').filter(poly__distance_lte=(z.poly, D(m=275)))
self.assertEqual(['77025', '77401'], self.get_names(qs))
# If we add a little more distance 77002 should be included.
qs = SouthTexasZipcode.objects.exclude(name='77005').filter(poly__distance_lte=(z.poly, D(m=300)))
self.assertEqual(['77002', '77025', '77401'], self.get_names(qs))
@no_spatialite
def test05_geodetic_distance_lookups(self):
"Testing distance lookups on geodetic coordinate systems."
if not oracle:
# Oracle doesn't have this limitation -- PostGIS only allows geodetic
# distance queries from Points to PointFields.
mp = GEOSGeometry('MULTIPOINT(0 0, 5 23)')
self.assertRaises(TypeError,
AustraliaCity.objects.filter(point__distance_lte=(mp, D(km=100))))
# Too many params (4 in this case) should raise a ValueError.
self.assertRaises(ValueError,
AustraliaCity.objects.filter, point__distance_lte=('POINT(5 23)', D(km=100), 'spheroid', '4'))
# Not enough params should raise a ValueError.
self.assertRaises(ValueError,
AustraliaCity.objects.filter, point__distance_lte=('POINT(5 23)',))
# Getting all cities w/in 550 miles of Hobart.
hobart = AustraliaCity.objects.get(name='Hobart')
qs = AustraliaCity.objects.exclude(name='Hobart').filter(point__distance_lte=(hobart.point, D(mi=550)))
cities = self.get_names(qs)
self.assertEqual(cities, ['Batemans Bay', 'Canberra', 'Melbourne'])
# Cities that are either really close or really far from Wollongong --
# and using different units of distance.
wollongong = AustraliaCity.objects.get(name='Wollongong')
d1, d2 = D(yd=19500), D(nm=400) # Yards (~17km) & Nautical miles.
# Normal geodetic distance lookup (uses `distance_sphere` on PostGIS.
gq1 = Q(point__distance_lte=(wollongong.point, d1))
gq2 = Q(point__distance_gte=(wollongong.point, d2))
qs1 = AustraliaCity.objects.exclude(name='Wollongong').filter(gq1 | gq2)
# Geodetic distance lookup but telling GeoDjango to use `distance_spheroid`
# instead (we should get the same results b/c accuracy variance won't matter
# in this test case).
if postgis:
gq3 = Q(point__distance_lte=(wollongong.point, d1, 'spheroid'))
gq4 = Q(point__distance_gte=(wollongong.point, d2, 'spheroid'))
qs2 = AustraliaCity.objects.exclude(name='Wollongong').filter(gq3 | | |
from __future__ import absolute_import, division, print_function
from libtbx import easy_run
small_pdb = '''
CRYST1 67.764 73.374 89.188 90.00 90.00 90.00 P 21 21 21
SCALE1 0.014757 0.000000 0.000000 0.00000
SCALE2 0.000000 0.013629 0.000000 0.00000
SCALE3 0.000000 0.000000 0.011212 0.00000
ATOM 1 CD1 LEU B 72 8.667 -35.470 -5.077 1.00 59.79 C
ANISOU 1 CD1 LEU B 72 6414 8428 7875 523 1603 1997 C
ATOM 2 OD2 ASP B 73 12.724 -38.374 -11.812 1.00 88.34 O
ANISOU 2 OD2 ASP B 73 10138 12571 10858 -103 489 1759 O
ATOM 3 NH2 ARG B 75 14.028 -38.071 -14.297 1.00 37.51 N
ANISOU 3 NH2 ARG B 75 3656 6350 4245 -76 315 1819 N
ATOM 4 CE LYS B 82 17.480 -35.482 -12.533 1.00 53.69 C
ANISOU 4 CE LYS B 82 6220 7461 6719 236 729 1552 C
ATOM 5 NZ LYS B 82 16.277 -34.849 -13.106 1.00 47.71 N
ANISOU 5 NZ LYS B 82 5220 6893 6016 349 794 1830 N
TER
ATOM 6 C6 DG C 5 13.380 -35.454 -16.997 1.00 55.38 C
ANISOU 6 C6 DG C 5 7038 7537 6465 -2349 -2046 1030 C
ATOM 7 O6 DG C 5 12.844 -35.974 -16.003 1.00 55.39 O
ANISOU 7 O6 DG C 5 6490 7702 6853 -2340 -2093 1187 O
ATOM 8 N1 DG C 5 12.788 -34.348 -17.615 1.00 55.50 N
ANISOU 8 N1 DG C 5 7219 7595 6272 -2261 -2308 1153 N
ATOM 9 C5 DG C 6 15.752 -32.181 -16.614 1.00 49.45 C
ANISOU 9 C5 DG C 6 6580 6784 5425 -1809 -942 838 C
ATOM 10 C6 DG C 6 14.837 -32.426 -15.573 1.00 47.58 C
ANISOU 10 C6 DG C 6 5799 6807 5471 -1736 -1178 1041 C
ATOM 11 O6 DG C 6 14.820 -33.371 -14.798 1.00 48.06 O
ANISOU 11 O6 DG C 6 5544 6935 5781 -1777 -1175 1056 O
ATOM 12 N1 DG C 6 13.878 -31.427 -15.468 1.00 46.77 N
ANISOU 12 N1 DG C 6 5555 6852 5363 -1576 -1370 1262 N
ATOM 13 C2 DG C 6 13.795 -30.332 -16.288 1.00 48.97 C
ANISOU 13 C2 DG C 6 6199 7043 5364 -1495 -1408 1290 C
ATOM 14 N2 DG C 6 12.777 -29.485 -16.060 1.00 49.05 N
ANISOU 14 N2 DG C 6 5979 7190 5466 -1303 -1608 1541 N
ATOM 15 N3 DG C 6 14.631 -30.103 -17.301 1.00 51.62 N
ANISOU 15 N3 DG C 6 7118 7128 5367 -1581 -1217 1106 N
ATOM 16 O2 DC C 7 14.973 -27.621 -14.169 1.00 47.56 O
ANISOU 16 O2 DC C 7 5780 6947 5345 -996 -605 1243 O
ATOM 17 C3' DA C 16 37.852 -9.655 4.024 1.00183.91 C
ANISOU 17 C3' DA C 16 24314 16850 28714 -7056 -9451 -4932 C
ATOM 18 O3' DA C 16 38.675 -8.488 3.955 1.00187.48 O
ANISOU 18 O3' DA C 16 24548 17238 29446 -7688 -8925 -5266 O
TER
ATOM 19 C5' DG D 10 11.714 -32.153 -4.599 1.00 52.39 C
ANISOU 19 C5' DG D 10 5649 7656 6599 612 672 1410 C
ATOM 20 C4' DG D 10 11.196 -31.311 -5.747 1.00 52.11 C
ANISOU 20 C4' DG D 10 5418 7630 6751 490 384 1385 C
ATOM 21 O4' DG D 10 12.222 -31.189 -6.760 1.00 49.39 O
ANISOU 21 O4' DG D 10 5315 7201 6250 233 30 1112 O
ATOM 22 C3' DG D 10 9.952 -31.848 -6.451 1.00 54.58 C
ANISOU 22 C3' DG D 10 5206 7973 7559 275 284 1638 C
ATOM 23 O3' DG D 10 9.016 -30.796 -6.576 1.00 58.75 O
ANISOU 23 O3' DG D 10 5490 8567 8267 512 297 1794 O
ATOM 24 C2' DG D 10 10.477 -32.307 -7.809 1.00 51.83 C
ANISOU 24 C2' DG D 10 4978 7529 7186 -143 -178 1447 C
ATOM 25 C1' DG D 10 11.640 -31.352 -8.025 1.00 49.44 C
ANISOU 25 C1' DG D 10 5132 7167 6485 -32 -268 1161 C
ATOM 26 N9 DG D 10 12.684 -31.840 -8.943 1.00 47.12 N
ANISOU 26 N9 DG D 10 5146 6716 6042 -316 -476 956 N
ATOM 27 C8 DG D 10 13.307 -33.070 -8.914 1.00 46.20 C
ANISOU 27 C8 DG D 10 5133 6515 5904 -515 -409 937 C
ATOM 28 N7 DG D 10 14.254 -33.199 -9.818 1.00 44.53 N
ANISOU 28 N7 DG D 10 5246 6119 5554 -679 -524 774 N
ATOM 29 C5 DG D 10 14.280 -31.969 -10.477 1.00 43.30 C
ANISOU 29 C5 DG D 10 5225 5914 5314 -597 -675 667 C
ATOM 30 C6 DG D 10 15.112 -31.506 -11.556 1.00 41.62 C
ANISOU 30 C6 DG D 10 5395 5462 4955 -668 -744 507 C
ATOM 31 O6 DG D 10 15.979 -32.137 -12.188 1.00 41.25 O
ANISOU 31 O6 DG D 10 5636 5194 4842 -819 -674 445 O
ATOM 32 N1 DG D 10 14.797 -30.192 -11.935 1.00 41.60 N
ANISOU 32 N1 DG D 10 5469 5445 4892 -509 -832 459 N
ATOM 33 C2 DG D 10 13.813 -29.418 -11.332 1.00 43.73 C
ANISOU 33 C2 DG D 10 5468 5915 5233 -286 -849 563 C
ATOM 34 N2 DG D 10 13.646 -28.173 -11.808 1.00 43.93 N
ANISOU 34 N2 DG D 10 5658 5867 5167 -113 -891 518 N
ATOM 35 N3 DG D 10 13.041 -29.841 -10.321 1.00 45.36 N
ANISOU 35 N3 DG D 10 5302 6335 5597 -199 -748 732 N
ATOM 36 C4 DG D 10 13.317 -31.120 -9.954 1.00 44.89 C
ANISOU 36 C4 DG D 10 5171 6288 5599 -373 -668 771 C
ATOM 37 P DC D 11 7.464 -31.076 -6.856 1.00 64.47 P
ANISOU 37 P DC D 11 5497 9365 9635 459 284 2173 P
ATOM 38 OP1 DC D 11 6.694 -30.199 -5.955 1.00 68.29 O
ANISOU 38 OP1 DC D 11 5821 9892 10233 961 768 2440 O
ATOM 39 OP2 DC D 11 7.221 -32.542 -6.838 1.00 66.22 O
ANISOU 39 OP2 DC D 11 5442 9536 10183 92 297 2280 O
ATOM 40 O5' DC D 11 7.294 -30.564 -8.369 1.00 64.31 O
ANISOU 40 O5' DC D 11 5423 9353 9659 251 -357 2068 O
ATOM 41 C5' DC D 11 7.627 -29.216 -8.713 1.00 63.28 C
ANISOU 41 C5' DC D 11 5632 9211 9200 523 -437 1929 C
ATOM 42 C4' DC D 11 8.011 -29.086 -10.181 1.00 62.67 C
ANISOU 42 C4' DC D 11 5821 9041 8950 261 -1009 1721 C
ATOM 43 O4' DC D 11 9.255 -29.758 -10.428 1.00 58.49 O
ANISOU 43 O4' DC D 11 5760 8365 8097 -24 -1057 1430 O
ATOM 44 C3' DC D 11 7.016 -29.681 -11.186 1.00 67.45 C
ANISOU 44 C3' DC D 11 6000 9677 9951 -27 -1557 1885 C
ATOM 45 O3' DC D 11 6.256 -28.657 -11.805 1.00 72.04 O
ANISOU 45 O3' DC D 11 6413 10328 10632 230 -1830 2033 O
ATOM 46 C2' DC D 11 7.890 -30.400 -12.212 1.00 64.48 C
ANISOU 46 C2' DC D 11 6150 9097 9252 -428 -1934 1593 C
ATOM 47 C1' DC D 11 9.306 -30.020 -11.805 1.00 58.64 C
ANISOU 47 C1' DC D 11 5982 8251 8047 -301 -1549 1328 C
ATOM 48 N1 DC D 11 10.301 -31.101 -12.087 1.00 54.72 N
ANISOU 48 N1 DC D 11 5853 7578 7362 -630 -1553 1136 N
ATOM 49 C2 DC D 11 11.214 -30.932 -13.141 1.00 52.51 C
ANISOU 49 C2 DC D 11 6165 7054 6734 -727 -1703 915 C
ATOM 50 O2 DC D 11 11.240 -29.851 -13.751 1.00 52.40 O
ANISOU 50 O2 DC D 11 6386 6974 6548 -537 -1809 863 O
ATOM 51 N3 DC D 11 12.076 -31.957 -13.436 1.00 51.07 N
ANISOU 51 N3 DC D 11 6319 6673 6412 -983 -1636 793 N
ATOM 52 C4 DC D 11 12.018 -33.112 -12.738 1.00 51.92 C
ANISOU 52 C4 DC D 11 6202 6832 6694 -1150 -1478 871 C
ATOM 53 N4 DC D 11 12.895 -34.084 -13.036 1.00 50.99 N
ANISOU 53 N4 DC D 11 6456 6495 6424 -1347 -1360 775 N
ATOM 54 C5 DC D 11 11.056 -33.313 -11.693 1.00 53.75 C
ANISOU 54 C5 DC D 11 5848 7303 7273 -1078 -1345 1085 C
ATOM 55 C6 DC D 11 10.213 -32.306 | |
contains:
destination:
description:
- Conceptually, this is the range of IP addresses that a packet originating from the instance
can go to.
- "Allowed values:"
- " * IP address range in CIDR notation. For example: `192.168.1.0/24`"
- " * The `cidrBlock` value for a L(Service,https://docs.cloud.oracle.com/en-us/iaas/api/#/en/iaas/20160918/Service/), if you're
setting up a security list rule for traffic destined for a particular `Service` through
a service gateway. For example: `oci-phx-objectstorage`."
returned: on success
type: string
sample: destination_example
destination_type:
description:
- Type of destination for the rule. The default is `CIDR_BLOCK`.
- "Allowed values:"
- " * `CIDR_BLOCK`: If the rule's `destination` is an IP address range in CIDR notation."
- " * `SERVICE_CIDR_BLOCK`: If the rule's `destination` is the `cidrBlock` value for a
L(Service,https://docs.cloud.oracle.com/en-us/iaas/api/#/en/iaas/20160918/Service/) (the rule is for traffic destined for a
particular `Service` through a service gateway)."
returned: on success
type: string
sample: CIDR_BLOCK
icmp_options:
description:
- "Optional and valid only for ICMP. Use to specify a particular ICMP type and code
as defined in
L(ICMP Parameters,http://www.iana.org/assignments/icmp-parameters/icmp-parameters.xhtml).
If you specify ICMP as the protocol but omit this object, then all ICMP types and
codes are allowed. If you do provide this object, the type is required and the code is optional.
To enable MTU negotiation for ingress internet traffic, make sure to allow type 3 (\\"Destination
Unreachable\\") code 4 (\\"Fragmentation Needed and Don't Fragment was Set\\"). If you need to specify
multiple codes for a single type, create a separate security list rule for each."
returned: on success
type: complex
contains:
code:
description:
- The ICMP code (optional).
returned: on success
type: int
sample: 56
type:
description:
- The ICMP type.
returned: on success
type: int
sample: 56
is_stateless:
description:
- A stateless rule allows traffic in one direction. Remember to add a corresponding
stateless rule in the other direction if you need to support bidirectional traffic. For
example, if egress traffic allows TCP destination port 80, there should be an ingress
rule to allow TCP source port 80. Defaults to false, which means the rule is stateful
and a corresponding rule is not necessary for bidirectional traffic.
returned: on success
type: bool
sample: true
protocol:
description:
- "The transport protocol. Specify either `all` or an IPv4 protocol number as
defined in
L(Protocol Numbers,http://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml).
Options are supported only for ICMP (\\"1\\"), TCP (\\"6\\"), and UDP (\\"17\\")."
returned: on success
type: string
sample: protocol_example
tcp_options:
description:
- Optional and valid only for TCP. Use to specify particular destination ports for TCP rules.
If you specify TCP as the protocol but omit this object, then all destination ports are allowed.
returned: on success
type: complex
contains:
destination_port_range:
description:
- An inclusive range of allowed destination ports. Use the same number for the min and max
to indicate a single port. Defaults to all ports if not specified.
returned: on success
type: complex
contains:
max:
description:
- The maximum port number. Must not be lower than the minimum port number. To specify
a single port number, set both the min and max to the same value.
returned: on success
type: int
sample: 56
min:
description:
- The minimum port number. Must not be greater than the maximum port number.
returned: on success
type: int
sample: 56
source_port_range:
description:
- An inclusive range of allowed source ports. Use the same number for the min and max to
indicate a single port. Defaults to all ports if not specified.
returned: on success
type: complex
contains:
max:
description:
- The maximum port number. Must not be lower than the minimum port number. To specify
a single port number, set both the min and max to the same value.
returned: on success
type: int
sample: 56
min:
description:
- The minimum port number. Must not be greater than the maximum port number.
returned: on success
type: int
sample: 56
udp_options:
description:
- Optional and valid only for UDP. Use to specify particular destination ports for UDP rules.
If you specify UDP as the protocol but omit this object, then all destination ports are allowed.
returned: on success
type: complex
contains:
destination_port_range:
description:
- An inclusive range of allowed destination ports. Use the same number for the min and max
to indicate a single port. Defaults to all ports if not specified.
returned: on success
type: complex
contains:
max:
description:
- The maximum port number. Must not be lower than the minimum port number. To specify
a single port number, set both the min and max to the same value.
returned: on success
type: int
sample: 56
min:
description:
- The minimum port number. Must not be greater than the maximum port number.
returned: on success
type: int
sample: 56
source_port_range:
description:
- An inclusive range of allowed source ports. Use the same number for the min and max to
indicate a single port. Defaults to all ports if not specified.
returned: on success
type: complex
contains:
max:
description:
- The maximum port number. Must not be lower than the minimum port number. To specify
a single port number, set both the min and max to the same value.
returned: on success
type: int
sample: 56
min:
description:
- The minimum port number. Must not be greater than the maximum port number.
returned: on success
type: int
sample: 56
description:
description:
- An optional description of your choice for the rule.
returned: on success
type: string
sample: description_example
freeform_tags:
description:
- Free-form tags for this resource. Each tag is a simple key-value pair with no
predefined name, type, or namespace. For more information, see L(Resource
Tags,https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm).
- "Example: `{\\"Department\\": \\"Finance\\"}`"
returned: on success
type: dict
sample: {'Department': 'Finance'}
id:
description:
- The security list's Oracle Cloud ID (OCID).
returned: on success
type: string
sample: ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx
ingress_security_rules:
description:
- Rules for allowing ingress IP packets.
returned: on success
type: complex
contains:
icmp_options:
description:
- "Optional and valid only for ICMP. Use to specify a particular ICMP type and code
as defined in
L(ICMP Parameters,http://www.iana.org/assignments/icmp-parameters/icmp-parameters.xhtml).
If you specify ICMP as the protocol but omit this object, then all ICMP types and
codes are allowed. If you do provide this object, the type is required and the code is optional.
To enable MTU negotiation for ingress internet traffic, make sure to allow type 3 (\\"Destination
Unreachable\\") code 4 (\\"Fragmentation Needed and Don't Fragment was Set\\"). If you need to specify
multiple codes for a single type, create a separate security list rule for each."
returned: on success
type: complex
contains:
code:
description:
- The ICMP code (optional).
returned: on success
type: int
sample: 56
type:
description:
- The ICMP type.
returned: on success
type: int
sample: 56
is_stateless:
description:
- A stateless rule allows traffic in one direction. Remember to add a corresponding
stateless rule in the other direction if you need to support bidirectional traffic. For
example, if ingress traffic allows TCP destination port 80, there should be an egress
rule to allow TCP source port 80. Defaults to false, which means the rule is stateful
and a corresponding rule is not necessary for bidirectional traffic.
returned: on success
type: bool
sample: true
protocol:
description:
- "The transport protocol. Specify either `all` or an IPv4 protocol number as
defined in
L(Protocol Numbers,http://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml).
Options are supported only for ICMP (\\"1\\"), TCP (\\"6\\"), and UDP (\\"17\\")."
returned: on success
type: string
sample: protocol_example
source:
description:
- Conceptually, this is the range of IP addresses that a packet coming into the instance
can come from.
- "Allowed values:"
- " * IP address range in CIDR notation. For example: `192.168.1.0/24`"
- " * The `cidrBlock` value for a L(Service,https://docs.cloud.oracle.com/en-us/iaas/api/#/en/iaas/20160918/Service/), if you're
setting up a security list rule for traffic coming from a particular `Service` through
a service gateway. For example: `oci-phx-objectstorage`."
returned: on success
| |
cdf['y']
cluster = cdf['cluster']
customdata = cdf['id']
if self.cluster_wf.is_gpu_enabled():
x_data = x_data.to_array()
y_data = y_data.to_array()
cluster = cluster.to_array()
customdata = customdata.to_array()
df_size = cupy.asnumpy(df_size)
df_shape = cupy.asnumpy(df_shape)
scatter_trace = go.Scattergl({
'x': x_data,
'y': y_data,
'text': cluster,
'customdata': customdata,
'name': 'Cluster ' + str(cluster_id),
'mode': 'markers',
'marker': {
'size': df_size,
'symbol': df_shape,
'color': self.cluster_colors[int(cluster_id) % len(self.cluster_colors)],
},
})
if moi_present:
# save to add later. This is to ensure the scatter is on top
scatter_traces.append(scatter_trace)
else:
fig.add_trace(scatter_trace)
for scatter_trace in scatter_traces:
fig.add_trace(scatter_trace)
# Change the title to indicate type of H/W in use
f_color = 'green' if self.cluster_wf.is_gpu_enabled() else 'blue'
fig.update_layout(
showlegend=True, clickmode='event', height=main_fig_height,
title='Clusters', dragmode='select',
title_font_color=f_color,
annotations=[
dict(x=0.5, y=-0.07, showarrow=False, text='x',
xref="paper", yref="paper"),
dict(x=-0.05, y=0.5, showarrow=False, text="y",
textangle=-90, xref="paper", yref="paper")])
del ldf
return fig, northstar_cluster
def start(self, host=None, port=5000):
return self.app.run_server(
debug=False, use_reloader=False, host=host, port=port)
def href_ify(self, molregno):
return html.A(molregno,
href='https://www.ebi.ac.uk/chembl/compound_report_card/' + str(molregno),
target='_blank')
def construct_molecule_detail(self, selected_points, display_properties,
page, pageSize=10, chembl_ids=None):
# Create Table header
table_headers = [
html.Th("Chemical Structure", style={'width': '30%', 'fontSize': '150%', 'text-align': 'center'}),
html.Th("SMILES", style={'maxWidth': '100px', 'fontSize': '150%', 'text-align': 'center'})]
for prop in display_properties:
if prop in PROP_DISP_NAME:
table_headers.append(html.Th(PROP_DISP_NAME[prop], style={'fontSize': '150%', 'text-align': 'center'}))
if chembl_ids:
table_headers.append(html.Th('ChEMBL', style={'fontSize': '150%', 'text-align': 'center'}))
else:
table_headers.append(html.Th("", style={'width': '10px'}))
table_headers.append(html.Th("", style={'width': '10px'}))
prop_recs = [html.Tr(table_headers, style={'background': 'lightgray'})]
if chembl_ids:
props, selected_molecules = self.chem_data.fetch_props_by_chemble(chembl_ids)
elif selected_points:
selected_molregno = []
for point in selected_points['points'][((page - 1) * pageSize): page * pageSize]:
if 'customdata' in point:
molregid = point['customdata']
selected_molregno.append(molregid)
props, selected_molecules = self.chem_data.fetch_props_by_molregno(
selected_molregno)
else:
return None, None
all_props = []
for k in props:
if k in PROP_DISP_NAME:
all_props.append({"label": PROP_DISP_NAME[k], "value": k})
for selected_molecule in selected_molecules:
td = []
selected_chembl_id = selected_molecule[1]
smiles = selected_molecule[props.index('canonical_smiles')]
m = Chem.MolFromSmiles(smiles)
drawer = Draw.rdMolDraw2D.MolDraw2DCairo(500, 125)
drawer.SetFontSize(1.0)
drawer.DrawMolecule(m)
drawer.FinishDrawing()
img_binary = "data:image/png;base64," + \
base64.b64encode(drawer.GetDrawingText()).decode("utf-8")
td.append(html.Td(html.Img(src=img_binary)))
td.append(html.Td(smiles, style={'wordWrap': 'break-word'}))
for key in display_properties:
if key in PROP_DISP_NAME:
td.append(html.Td(selected_molecule[props.index(key)],
style={'text-align': 'center'}))
molregno = selected_molecule[0]
if chembl_ids:
td.append(html.Td(selected_chembl_id))
else:
td.append(html.Td(
dbc.Button('Add as MoI',
id={'role': 'bt_star_candidate',
'chemblId': selected_chembl_id,
'molregno': str(molregno)
},
n_clicks=0)
))
td.append(html.Td(
dbc.Button('Add for Interpolation',
id={'role': 'bt_add_candidate',
'chemblId': selected_chembl_id,
'molregno': str(molregno)
},
style={'margin-right': '6px'},
n_clicks=0)
))
prop_recs.append(html.Tr(td, style={'fontSize': '125%'}))
return html.Table(prop_recs, style={'width': '100%', 'border': '1px solid lightgray'}), all_props
def constuct_layout(self):
# TODO: avoid calling self.cluster_wf.df_embedding
fig, _ = self.create_graph(self.cluster_wf.df_embedding)
return html.Div([
html.Div(className='row', children=[
dcc.Graph(id='main-figure', figure=fig,
className='nine columns',
style={'verticalAlign': 'text-top'}),
html.Div([
dcc.Markdown("""**Molecule(s) of Interest**"""),
dcc.Markdown("Please enter ChEMBL ID(s) separated by commas."),
html.Div(className='row', children=[
dcc.Input(id='north_star', type='text', debounce=True, className='nine columns'),
dbc.Button('Highlight',
id='bt_north_star', n_clicks=0,
className='three columns'),
], style={'marginLeft': 0, 'marginBottom': 18, }),
dcc.Tabs([
dcc.Tab(label='Cluster Molecules', children=[
dcc.Markdown("""**Select Workflow**""", style={'marginTop': 18, }),
html.Div(className='row', children=[
html.Div(children=[
dcc.Dropdown(id='sl_wf',
multi=False,
options=[{'label': 'GPU KMeans-UMAP - Single and Multiple GPUs',
'value': 'cuchem.wf.cluster.gpukmeansumap.GpuKmeansUmapHybrid'},
{'label': 'GPU KMeans-UMAP',
'value': 'cuchem.wf.cluster.gpukmeansumap.GpuKmeansUmap'},
{'label': 'GPU KMeans-Random Projection - Single GPU',
'value': 'cuchem.wf.cluster.gpurandomprojection.GpuWorkflowRandomProjection'},
{'label': 'CPU KMeans-UMAP',
'value': 'cuchem.wf.cluster.cpukmeansumap.CpuKmeansUmap'}, ],
value=self.cluster_wf_cls,
clearable=False),
], className='nine columns'),
dbc.Button('Apply',
id='bt_apply_wf', n_clicks=0,
className='three columns'),
], style={'marginLeft': 0, 'marginTop': 6, }),
dcc.Markdown("""**Cluster Selection**""", style={'marginTop': 18, }),
dcc.Markdown("Set number of clusters", style={'marginTop': 12, }),
dcc.Input(id='sl_nclusters', value=self.n_clusters),
dcc.Markdown("Click a point to select a cluster.", style={'marginTop': 12, }),
html.Div(className='row', children=[
dcc.Input(id='selected_clusters', type='text', className='nine columns'),
dbc.Button('Recluster',
id='bt_recluster_clusters', n_clicks=0,
className='three columns'),
], style={'marginLeft': 0}),
dcc.Markdown("""**Selection Points**""", style={'marginTop': 18, }),
dcc.Markdown("""Choose the lasso or rectangle tool in the graph's menu
bar and then select points in the graph.
""", style={'marginTop': 12, }),
dbc.Button('Recluster Selection', id='bt_recluster_points', n_clicks=0),
html.Div(children=[html.Div(id='selected_point_cnt'), ]),
dbc.Button('Reload', id='bt_reset', n_clicks=0, style={'marginLeft': 0, 'marginTop': 18, }),
]),
dcc.Tab(label='Generate Molecules', children=[
dcc.Markdown("""**Select Generative Model**""", style={'marginTop': 18, }),
html.Div(children=[
dcc.Dropdown(id='sl_generative_wf', multi=False,
options=[{'label': 'CDDD Model',
'value': 'cuchem.wf.generative.Cddd'},
{'label': 'MegaMolBART Model',
'value': 'cuchem.wf.generative.MegatronMolBART'},
],
value=self.generative_wf_cls,
clearable=False),
]),
dcc.RadioItems(
id='rd_generation_type',
options=[
{'label': 'Interpolate between two molecules', 'value': 'INTERPOLATE'},
{'label': 'Sample around one molecule', 'value': 'SAMPLE'},
],
value='INTERPOLATE',
style={'marginTop': 18},
inputStyle={'display': 'inline-block', 'marginLeft': 6, 'marginRight': 6},
labelStyle={'display': 'block', 'marginLeft': 6, 'marginRight': 6}
),
html.Div(className='row', children=[
dcc.Markdown("Number of molecules to generate",
style={'marginLeft': 10, 'marginTop': 12, 'width': '250px'}),
dcc.Input(id='n2generate', value=10),
], style={'marginLeft': 0}),
html.Div(className='row', children=[
dcc.Markdown("Scaled sampling radius (int, start with 1)",
style={'marginLeft': 10, 'marginTop': 12, 'width': '250px'}),
dcc.Input(id='scaled_radius', value=1),
], style={'marginLeft': 0, 'marginTop': '6px'}),
dcc.Markdown(children="""**Please Select Two**""",
id="mk_selection_msg",
style={'marginTop': 18}),
dcc.Checklist(
id='ckl_candidate_mol_id',
options=[],
value=[],
inputStyle={'display': 'inline-block', 'marginLeft': 6, 'marginRight': 6},
labelStyle={'display': 'block', 'marginLeft': 6, 'marginRight': 6}
),
html.Div(className='row', children=[
dbc.Button('Generate', id='bt_generate', n_clicks=0, style={'marginRight': 12}),
dbc.Button('Reset', id='bt_reset_candidates', n_clicks=0),
], style={'marginLeft': 0}),
]),
]),
html.Div(className='row', children=[
html.Label([
"Select molecular property for color gradient",
dcc.Dropdown(id='sl_prop_gradient', multi=False, clearable=True,
options=[{"label": PROP_DISP_NAME[p], "value": p} for p in IMP_PROPS], ),
], style={'marginTop': 18, 'marginLeft': 18})],
),
], className='three columns', style={'marginLeft': 18, 'marginTop': 90, 'verticalAlign': 'text-top', }),
]),
html.Div(className='row', children=[
html.Div(id='section_generated_molecules', children=[
html.Div(className='row', children=[
html.A('Export to SDF',
id='download-link',
download="rawdata.sdf",
href="/cheminfo/downloadSDF",
target="_blank",
n_clicks=0,
style={'fontSize': '150%'}
),
html.Div(id='msg_generated_molecules', children=[],
style={'color': 'red', 'fontWeight': 'bold', 'marginLeft': 12, 'fontSize': '150%'}),
], style={'marginLeft': 0, 'marginBottom': 18, }),
html.Div(id='table_generated_molecules', children=[], style={'width': '100%'})
], style={'display': 'none', 'width': '100%'}),
html.Div(id='section_selected_molecules', children=[
html.Div(className='row', children=[
html.Div(id='section_display_properties', children=[
html.Label([
"Select Molecular Properties",
dcc.Dropdown(id='sl_mol_props', multi=True,
options=[
{'label': 'alogp', 'value': 'alogp'}],
value=['alogp']),
])],
className='nine columns'),
html.Div(children=[
dbc.Button("<", id="bt_page_prev",
style={"height": "25px"}),
html.Span(children=1, id='current_page',
style={"paddingLeft": "6px"}),
html.Span(children=' of 1', id='total_page',
style={"paddingRight": "6px"}),
dbc.Button(">", id="bt_page_next",
style={"height": "25px"})
],
className='three columns',
style={'verticalAlign': 'text-bottom', 'text-align': 'right'}
),
], style={'margin': 12}),
html.Div(id='tb_selected_molecules', children=[], style={'width': '100%'})
], style={'display': 'none', 'width': '100%'}),
], style={'margin': 12}),
html.Div(id='refresh_main_fig', style={'display': 'none'}),
html.Div(id='northstar_cluster', style={'display': 'none'}),
html.Div(id='recluster_error', style={'display': 'none'}),
html.Div(id='mol_selection_error', style={'display': 'none'}),
html.Div(id='show_selected_mol', style={'display': 'none'}),
html.Div(id='show_generated_mol', style={'display': 'none'}),
html.Div(id='genration_candidates', style={'display': 'none'}),
html.Div(id='refresh_moi_prop_table', style={'display': 'none'}),
html.Div(id='interpolation_error', style={'display': 'none'}),
html.Div(className='row', children=[
dbc.Modal([
dbc.ModalHeader("Error"),
dbc.ModalBody(
html.Div(id='error_msg', style={'color': 'red'}),
),
dbc.ModalFooter(
dbc.Button("Close", id="bt_close_err", className="ml-auto")
),
], id="md_error"),
]),
])
def handle_error(self, recluster_error, interpolation_error, bt_close_err):
comp_id, event_type = self._fetch_event_data()
if comp_id == 'bt_close_err' and event_type == 'n_clicks':
return '', False
msg = None
if comp_id == 'interpolation_error' and event_type == 'children':
msg = interpolation_error
elif comp_id == 'recluster_error' and event_type == 'children':
msg = recluster_error
if msg is None:
raise dash.exceptions.PreventUpdate
return msg, True
@report_ui_error(6)
def handle_molecule_selection(self, mf_selected_data, selected_columns,
prev_click, next_click, refresh_moi_prop_table,
north_star, current_page, show_selected_mol,
sl_prop_gradient):
comp_id, event_type = self._fetch_event_data()
module_details = None
chembl_ids = None
# Code to support pagination
if comp_id == 'bt_page_prev' and event_type == 'n_clicks':
if current_page == 1:
raise dash.exceptions.PreventUpdate
current_page -= 1
elif comp_id == 'bt_page_next' and event_type == 'n_clicks':
if len(mf_selected_data['points']) < PAGE_SIZE * (current_page + 1):
raise dash.exceptions.PreventUpdate
current_page += 1
elif north_star and \
((comp_id == 'refresh_moi_prop_table' and event_type == 'children')):
chembl_ids = north_star.split(",")
elif (comp_id == 'main-figure' and event_type == 'selectedData') or \
(comp_id == 'sl_mol_props' and event_type == 'value'):
pass
else:
raise dash.exceptions.PreventUpdate
if selected_columns and sl_prop_gradient:
if sl_prop_gradient not in selected_columns:
selected_columns.append(sl_prop_gradient)
module_details, all_props = self.construct_molecule_detail(
mf_selected_data, selected_columns, current_page,
pageSize=PAGE_SIZE, chembl_ids=chembl_ids)
if module_details is None and all_props is None:
return dash.no_update, dash.no_update, dash.no_update, \
dash.no_update, dash.no_update, dash.no_update,
if chembl_ids:
last_page = ''
else:
last_page = ' of ' + str(len(mf_selected_data['points']) // PAGE_SIZE)
if show_selected_mol is None:
show_selected_mol = 0
show_selected_mol += 1
return module_details, all_props, current_page, last_page, show_selected_mol, dash.no_update
def handle_data_selection(self, mf_click_data, mf_selected_data,
bt_cluster_clicks, bt_point_clicks,
northstar_cluster,
curr_clusters):
comp_id, event_type = self._fetch_event_data()
selected_clusters = ''
selected_point_cnt = ''
if comp_id == 'main-figure' and event_type == 'clickData':
# Event - On selecting cluster on the main scatter plot
clusters = []
if curr_clusters:
clusters = list(map(int, curr_clusters.split(",")))
points = mf_click_data['points']
for point in points:
cluster = point['text']
if cluster in clusters:
clusters.remove(cluster)
else:
clusters.append(cluster)
selected_clusters = ','.join(map(str, clusters))
elif comp_id == 'main-figure' and event_type == 'selectedData':
# Event - On selection on the main scatterplot
if not mf_selected_data:
raise dash.exceptions.PreventUpdate
points = mf_selected_data['points']
selected_point_cnt = str(len(points)) + ' points selected'
clusters = {point['text'] for point in points}
selected_clusters = northstar_cluster
elif comp_id == 'northstar_cluster' and event_type == 'children':
selected_clusters = northstar_cluster
elif (comp_id == 'bt_recluster_clusters' and event_type == 'n_clicks') \
or (comp_id == 'bt_recluster_points' and event_type == 'n_clicks'):
selected_clusters = northstar_cluster
else:
raise dash.exceptions.PreventUpdate
return selected_clusters, selected_point_cnt
def handle_mark_north_star(self, bt_north_star_click, north_star):
comp_id, event_type = self._fetch_event_data()
if event_type != 'n_clicks' or dash.callback_context.triggered[0]['value'] == 0:
raise dash.exceptions.PreventUpdate
selected_north_star = []
selected_north_star_mol_reg_id = []
if north_star:
selected_north_star = north_star.split(",")
selected_north_star_mol_reg_id = [
str(row[0]) for row in self.chem_data.fetch_molregno_by_chemblId(selected_north_star)]
comp_detail = json.loads(comp_id)
selected_chembl_id = comp_detail['chemblId']
if selected_chembl_id not in | |
features
"""
# UNCOMMENT var below if standard library combinations is used
#allowed_c = set(allowed_c)
spectrum = zip(mz_list,intensity_list)
dists_mz = []
dists_mz_intens = []
prev_analyzed = set()
#Make deepcopy since we are going to change the spectra!
spec_one = list(copy.deepcopy(spectrum))
spec_two = list(copy.deepcopy(spectrum))
#Iterate over the peaks and measure the distance in m/z between all combinations
for peak_one in spec_one:
if len(spec_two) == 1: continue
spec_two = spec_two[1:]
for peak_two in spec_two:
dist_mz = abs(peak_one[0]-peak_two[0])
if dist_mz > max_dist: break
dists_mz.append(dist_mz)
dists_mz_intens.append(peak_one[1]+peak_two[1])
# UNCOMMENT code below if standard library combinations is used
#for c in combinations(spectrum,2):
# dist_mz = abs(c[0][0]-c[1][0])
# if c[0][0] in prev_analyzed: continue
# if dist_mz > max_dist:
# prev_analyzed.add(c[0][0])
# continue
# if len(allowed_c) != 0:
# if dist_mz not in allowed_c: continue
# dists_mz.append(dist_mz)
# dists_mz_intens.append(c[0][1]+c[1][1])
#Digitize the delta m/z; assign bins for all delta m/z s
index_bins = np.digitize(dists_mz,feats)
#Iterate over assigned bins and sum the intensity for possible existing values
for index,intens in zip(index_bins,dists_mz_intens):
feat_matrix[instance_index,index-1] += intens
feat_matrix[instance_index,feat_matrix.shape[1]-1] += pseudocount
return(feat_matrix)
def read_msp(infile_name,feat_lim_file="",
sum_feats=False,selected_features=[],
max_dist=275,step_size=0.005,feat_bins=[],
top_peaks=50,windowed_mode=False):
"""
Read an MSP file and put the features into a matrix.
Parameters
----------
infile_name : list
The infile MSP file.
feat_lim_file : list
Old variable with the name of a file that contains the features.
sum_feats : bool
Old variable used to sum features of the two classes.
selected_features : list
Old variable for selected features; use feat_bins.
max_dist : int
Maximum distance between peaks
step_size : float
Size between the m/z values for bins.
feat_bins : list
Bins to use for features.
top_peaks : int
Number of peaks to select based on the intensity
windowed_mode : bool
Flag to used windowed mode for selecting the highest intensity peaks
Returns
-------
matrix
A sparse matrix (csr) is returned with filled in features
list
Used features for binning
list
Identifiers of all entries in the MSP file
int
Number of analyzed MSP entries
"""
print(" Reading the MSP file ... ")
infile = open(infile_name)
if len(feat_lim_file) > 0:
selected_features = [float(f.strip()) for f in open(feat_lim_file).readlines()]
counter = 0
temp_entry = []
instance_names = []
num_instances = num_instances_msp(infile_name)
if len(feat_bins) == 0: feat_bins = np.arange(0,max_dist+step_size,step_size)
#Initialize the feature matrix, must be lil since scr is slow when mutating values!
feat_matrix = scipy.sparse.lil_matrix((num_instances, len(feat_bins)),dtype=np.float32)
#Iterate over the file and filter out single entries
for line in infile:
if line.startswith("Name: "):
if len(temp_entry) == 0:
temp_entry.append(line.strip())
continue
#For this entry get identifier,m/z,intensities
identifier,mz_list,intensity_list = parse_msp(temp_entry,top=top_peaks,windowed_mode=windowed_mode)
instance_names.append(identifier)
#Fill in the feature matrix
feat_matrix = get_feats(mz_list,intensity_list,feat_matrix,counter,feat_bins,allowed_c=selected_features,max_dist=max_dist)
#Make sure the current line is still used for the next entry
temp_entry = [line]
perc = round((float(counter)/num_instances)*1000,1)
if perc.is_integer():
try:
gui_object.progress_bar.setProperty("value", perc/10.0)
except:
pass
counter += 1
temp_entry.append(line.strip())
#If everything is empty; return
if len(temp_entry) == 0:
temp_entry.append(line.strip())
return(feat_matrix.asformat("csr"),feat_bins,instance_names,counter)
#Analyse the last record; since we do not know when the spectra ends
identifier,mz_list,intensity_list = parse_msp(temp_entry,top=top_peaks,windowed_mode=windowed_mode)
instance_names.append(identifier)
feat_matrix = get_feats(mz_list,intensity_list,feat_matrix,counter,feat_bins,allowed_c=selected_features)
print(" Total number of entries read: %s " % (counter))
counter += 1
return(feat_matrix.asformat("csr"),feat_bins,instance_names,counter)
def num_instances_msp(infile_name):
"""
Count the number of entries in the MSP file.
Parameters
----------
infile_name : list
The infile MSP file.
Returns
-------
int
Number of analyzed MSP entries
"""
infile = open(infile_name)
num_instances = 0
for line in infile:
if line.startswith("Name: "):
num_instances += 1
return(num_instances)
def read_mgf(infile_name,feat_lim_file="",
sum_feats=False,selected_features=[],
max_dist=275,step_size=0.005,feat_bins=[],
top_peaks=50,windowed_mode=False):
"""
Read an MGF file and put the features into a matrix.
Parameters
----------
infile_name : list
The infile MGF file.
feat_lim_file : list
Old variable with the name of a file that contains the features.
sum_feats : bool
Old variable used to sum features of the two classes.
selected_features : list
Old variable for selected features; use feat_bins.
max_dist : int
Maximum distance between peaks
step_size : float
Size between the m/z values for bins.
feat_bins : list
Bins to use for features.
top_peaks : int
Number of peaks to select based on the intensity
windowed_mode : bool
Flag to used windowed mode for selecting the highest intensity peaks
Returns
-------
matrix
A sparse matrix (csr) is returned with filled in features
list
Used features for binning
list
Identifiers of all entries in the MGF file
int
Number of analyzed MGF entries
"""
infile = open(infile_name)
if len(feat_lim_file) > 0:
selected_features = [float(f.strip()) for f in open("selected_features.txt").readlines()]
counter = 0
temp_entry = []
instance_names = []
num_instances = num_instances_mgf(infile_name)
#print(num_instances)
if len(feat_bins) == 0: feat_bins = np.arange(0,max_dist+step_size,step_size)
#Initialize the feature matrix, must be lil since scr is slow when mutating values!
feat_matrix = scipy.sparse.lil_matrix((num_instances, len(feat_bins)),dtype=np.float32)
#Iterate over the file and filter out single entries
for line in infile:
if line.startswith("END IONS"):
#For this entry get identifier,m/z,intensities
identifier,mz_list,intensity_list = parse_mgf(temp_entry,top=top_peaks,windowed_mode=windowed_mode)
instance_names.append(identifier)
#Fill in the feature matrix
feat_matrix = get_feats(mz_list,intensity_list,feat_matrix,counter,feat_bins,allowed_c=selected_features,max_dist=max_dist)
counter += 1
temp_entry = []
continue
if line.startswith("BEGIN IONS"):
continue
temp_entry.append(line)
return(feat_matrix.asformat("csr"),feat_bins,instance_names,counter)
def num_instances_mgf(infile_name):
"""
Count the number of entries in the MGF file.
Parameters
----------
infile_name : list
The infile MGF file.
Returns
-------
int
Number of analyzed MGF entries
"""
infile = open(infile_name)
num_instances = 0
for line in infile:
if line.startswith("BEGIN IONS"):
num_instances += 1
return(num_instances)
def train_xgb(X,y,n_jobs=32,n_iter_search=20):
"""
Train an XGBoost model with hyper parameter optimization.
Parameters
----------
X : matrix
Matrix with all the features, every instance should be coupled to the y-value
y : vector
Vector with the class, every value should be coupled to an x-vector with features
Returns
-------
object
Trained XGBoost model
object
Cross-validation results
"""
xgb_handle = xgb.XGBClassifier()
one_to_left = st.beta(10, 1)
from_zero_positive = st.expon(0, 50)
#Define distributions to sample from for hyper parameter optimization
param_dist = {
"n_estimators": st.randint(3, 80),
"max_depth": st.randint(3, 40),
"learning_rate": st.uniform(0.05, 0.4),
"colsample_bytree": one_to_left,
"subsample": one_to_left,
"gamma": st.uniform(0, 10),
"reg_alpha": from_zero_positive,
"min_child_weight": from_zero_positive
}
n_iter_search = n_iter_search
random_search = RandomizedSearchCV(xgb_handle, param_distributions=param_dist,
n_iter=n_iter_search,verbose=10,scoring="roc_auc",
n_jobs=n_jobs,cv=StratifiedKFold(n_splits=10, shuffle=True,random_state=42))
#print(X)
#print(X.shape)
#print(y)
random_search_res_xgb = random_search.fit(X, y)
#Get the best model that was retrained on all data
xgb_model = random_search_res_xgb.best_estimator_
return(xgb_model,random_search_res_xgb)
def train_xgb_lim(X,y,params_dist,out_dir="res/"):
"""
Train an XGBoost model with set hyper parameters.
Parameters
----------
X : matrix
Matrix with all the features, every instance should be coupled to the y-value
y : vector
Vector with the class, every value should be coupled to an x-vector with features
params_dist : dict
The hyperparameters to use
out_dir : str
String value that points to a directory used for output
Returns
-------
list
The cross-validated predictions.
"""
#There is a need to unpack the hyperparameter dictionary with "**"
xgb_handle = xgb.XGBClassifier(**params_dist)
#Using predict_proba since ROC-curve
test_preds = cross_val_predict(xgb_handle,X,y,method="predict_proba")
plot_roc(X,y,test_preds[:,1],fname=out_dir+"roc.png")
return(test_preds)
def plot_feat_imp(feats_index,feat_names,X,y,top_imp=10,out_dir="res/"):
"""
Plot the most important features in a boxplot and seperate on class (y)
Parameters
----------
feats_index : list
Indexes of the features coupled to the X matrix
feat_names : list
Names of the features coupled to specific indices
X : matrix
Matrix with all the features, every instance should be coupled to the y-value
y : vector
Vector with the class, every value should be coupled to an x-vector with features
top_imp : int
Plot this number of top features in a boxplot
out_dir : str
String value that points to a directory used for output
Returns
-------
"""
for fi in feats_index[0:top_imp]:
#Need for a dense matrix when plotting
#print(fi)
#print(X.todense()[y==1,:][:,fi])
#print(X.todense()[y==0,:][:,fi])
plt.boxplot([list(X.todense()[y==1,:][:,fi]),list(X.todense()[y==0,:][:,fi])])
plt.title(feat_names[fi])
plt.savefig(out_dir+"%s_feat_groups.png" % (feat_names[fi]), bbox_inches='tight')
plt.close()
def plot_train_distr(xgb_model,X,y,out_dir="res/"):
"""
Plot probability distributions for the input matrix.
Parameters
----------
xgb_model : object
Trained XGBoost model
X : matrix
Matrix with all the features, every instance should be coupled to the y-value
y : vector
Vector with the class, every value should be coupled to an x-vector with features
out_dir : str
String value that points to a directory used for output
Returns
-------
"""
#Get the predicted probabilities | |
color, 'alpha': 0.7, 'label': 'beginning'},
# 'axvline': {'x': 0, 'linestyle': 'dashed', 'color': 'gray', 'linewidth': 0.5},
# 'axhline': {'y': 0, 'linestyle': 'dashed', 'color': 'gray', 'linewidth': 0.5}
}
if it == avail_its[-1]:
plot_dic['axvline'] = {'x': 0, 'linestyle': 'dashed', 'color': 'gray', 'linewidth': 0.5}
plot_dic['axhline'] = {'y': 0, 'linestyle': 'dashed', 'color': 'gray', 'linewidth': 0.5}
o_plot.set_plot_dics.append(plot_dic)
#
o_plot.main()
else:
print_colored_string(["task:", "plot dens modes phase", "it:", str(it), "t:", "{:.1f}".format(t),
"fname:", plotfname, "mmodes:", "[1]", ":", "skipping"],
["blue", "green", "blue", "green", "blue", "green", "blue",
"green", "blue", "green", "", "blue"])
except IOError:
print_colored_string(["task:", "plot dens modes phase", "it:", str(it), "t:", "{:.1f}".format(t),
"fname:", plotfname, "mmodes:", "[1]", ":", "missing file"],
["blue", "green", "blue", "green", "blue", "green", "blue",
"green", "blue", "green", "", "red"])
except KeyboardInterrupt:
exit(1)
except:
print_colored_string(["task:", "plot dens modes phase", "it:", str(it), "t:", "{:.1f}".format(t),
"fname:", plotfname, "mmodes:", "[1]", ":", "failed"],
["blue", "green", "blue", "green", "blue", "green", "blue",
"green", "blue", "green", "", "red"])
def plot_density_modes(dmclass, resdir, rewrite=False):
plotfname = __d3densitymodesfame__.replace(".h5", ".png")
path = resdir
# fpath = path + fname
dmclass.gen_set['fname'] = path + __d3densitymodesfame__ #"density_modes_lap15.h5"
fpath = path + plotfname
o_plot = PLOT_MANY_TASKS()
o_plot.gen_set["figdir"] = path
o_plot.gen_set["type"] = "cartesian"
o_plot.gen_set["figsize"] = (4.2, 3.6) # <->, |]
o_plot.gen_set["figname"] = plotfname
o_plot.gen_set["sharex"] = False
o_plot.gen_set["sharey"] = False
o_plot.set_plot_dics = []
# o_plot.set_plot_dics.append(densmode_m0)
try:
if (os.path.isfile(fpath) and rewrite) or not os.path.isfile(fpath):
if os.path.isfile(fpath): os.remove(fpath)
print_colored_string(["task:", "plot dens modes", "fname:", plotfname, "mmodes:", "[1,2]", ":", "computing"],
["blue", "green", "blue", "green", "blue", "green", "", "green"])
#
mags = dmclass.get_data(1, "int_phi_r")
times = dmclass.get_grid("times")
densmode_m1 = {
'task': 'line', 'ptype': 'cartesian',
'xarr': times * 1e3, 'yarr': mags,
'position': (1, 1),
'v_n_x': 'times', 'v_n_y': 'int_phi_r abs',
'mode': 1, 'norm_to_m': 0,
'ls': '-', 'color': 'black', 'lw': 1., 'ds': 'default', 'alpha': 1.,
'label': r'$m=1$', 'ylabel': r'$C_m/C_0$ Magnitude', 'xlabel': r'time [ms]',
'xmin': None, 'xmax': None, 'ymin': 1e-4, 'ymax': 1e0,
'xscale': None, 'yscale': 'log', 'legend': {},
'fancyticks': True, 'minorticks': True,
'fontsize': 14,
'labelsize': 14,
}
mags = dmclass.get_data(2, "int_phi_r")
times = dmclass.get_grid("times")
densmode_m2 = {
'task': 'line', 'ptype': 'cartesian',
'xarr': times * 1e3, 'yarr': mags,
'position': (1, 1),
'v_n_x': 'times', 'v_n_y': 'int_phi_r abs',
'mode': 2, 'norm_to_m': 0,
'ls': ':', 'color': 'black', 'lw': 0.8, 'ds': 'default', 'alpha': 1.,
'label': r'$m=2$', 'ylabel': r'$C_m/C_0$ Magnitude', 'xlabel': r'time [ms]',
'xmin': None, 'xmax': None, 'ymin': 1e-4, 'ymax': 1e0,
'xscale': None, 'yscale': 'log',
'fancyticks': True, 'minorticks': True,
'legend': {'loc': 'best', 'ncol': 1, 'fontsize': 14},
'fontsize': 14,
'labelsize': 14,
}
#
o_plot.set_plot_dics.append(densmode_m1)
o_plot.set_plot_dics.append(densmode_m2)
o_plot.main()
else:
print_colored_string(["task:", "plot dens modes", "fname:", plotfname, "mmodes:", "[1,2]", ":", "skipping"],
["blue", "green", "blue", "green", "blue", "green", "", "blue"])
except IOError:
print_colored_string(["task:", "plot dens modes", "fname:", plotfname, "mmodes:", "[1,2]", ":", "missing input efile"],
["blue", "green", "blue", "green", "blue", "green", "", "red"])
except KeyboardInterrupt:
exit(1)
except:
print_colored_string(["task:", "plot dens modes", "fname:", plotfname, "mmodes:", "[1,2]", ":", "failed"],
["blue", "green", "blue", "green", "blue", "green", "", "red"])
def collate_mass(ittime, masks, resdir, rewrite=False):
#
# path = Paths.ppr_sims + d3class.sim + '/' + __rootoutdir__
#
# fname = __d3diskmass__.replace(".txt",".png")
# figname = __d3diskmass__.replace(".txt",".png")
parfilepath = resdir
for mask in masks:
ffpath = parfilepath + '/' + mask + '_' + 'mass.txt'
try:
if (os.path.isfile(ffpath) and rewrite) or not os.path.isfile(ffpath):
if os.path.isfile(ffpath): os.remove(ffpath)
print_colored_string(["task:", "collate", ":", "saving"],
["blue", "green", "", "green"])
#
list_iterations = get_list_iterations_from_res_3d(resdir)
#
it_arr = []
time_arr = []
data_arr = []
for it in list_iterations:
fpath = parfilepath + str(int(it)) + '/' + mask + '/' + "mass.txt"
time_ = ittime.get_time_for_it(it, "profiles", "prof")
time_arr.append(time_)
it_arr.append(it)
if os.path.isfile(fpath):
data_ = np.float(np.loadtxt(fpath, unpack=True))
data_arr.append(data_)
else:
data_arr.append(np.nan)
#
it_arr = np.array(it_arr, dtype=int)
time_arr = np.array(time_arr, dtype=float)
data_arr = np.array(data_arr, dtype=float)
#
if len(it_arr) > 0:
x = np.vstack((it_arr, time_arr, data_arr)).T
np.savetxt(ffpath, x, header="1:it 2:time[s] 3:mass[Msun]", fmt='%i %0.5f %0.5f')
else:
Printcolor.yellow("No disk mass found")
#
# if len(it_arr) > 0:
#
# time_arr = time_arr * 1e3
#
# o_plot = PLOT_MANY_TASKS()
# o_plot.gen_set["figdir"] = parfilepath
# o_plot.gen_set["type"] = "cartesian"
# o_plot.gen_set["figsize"] = (4.2, 3.6) # <->, |]
# o_plot.gen_set["figname"] = __d3diskmass__.replace(".txt",".png")
# o_plot.gen_set["sharex"] = False
# o_plot.gen_set["sharey"] = False
# o_plot.gen_set["subplots_adjust_h"] = 0.2
# o_plot.gen_set["subplots_adjust_w"] = 0.0
# o_plot.set_plot_dics = []
#
# # plot
# plot_dic = {
# 'task': 'line', 'ptype': 'cartesian',
# 'xarr': time_arr, 'yarr': data_arr,
# 'position': (1, 1),
# 'v_n_x': 'times', 'v_n_y': 'int_phi_r abs',
# 'marker': '.', 'color': 'black', 'ms': 5., 'alpha': 1.0, #'ds': 'default',
# 'label': None, 'ylabel': r'$M_{\rm{disk}}$ [$M_{\odot}$]', 'xlabel': r"$t$ [ms]",
# 'xmin': -5., 'xmax': time_arr.max(), 'ymin': 0, 'ymax': 0.5,
# 'xscale': None, 'yscale': None,
# 'fancyticks': True, 'minorticks': True,
# 'legend': {'loc': 'upper right', 'ncol': 2, 'fontsize': 10, 'shadow': False, 'framealpha': 0.5,
# 'borderaxespad': 0.0},
# 'fontsize': 14,
# 'labelsize': 14,
# 'title': {'text': "Disk Mass Evolution", 'fontsize': 14},
# # 'mark_end': {'marker': 'x', 'ms': 5, 'color': 'red', 'alpha': 0.7, 'label': 'end'},
# # 'mark_beginning': {'marker': 's', 'ms': 5, 'color': 'blue', 'alpha': 0.7, 'label': 'beginning'},
# # 'axvline': {'x': 0, 'linestyle': 'dashed', 'color': 'gray', 'linewidth': 0.5},
# # 'axhline': {'y': 0, 'linestyle': 'dashed', 'color': 'gray', 'linewidth': 0.5}
# }
#
# o_plot.set_plot_dics.append(plot_dic)
#
# o_plot.main()
else:
print_colored_string(["task:", "collate", ":", "skipping"],
["blue", "green", "", "blue"])
except IOError:
print_colored_string(["task:", "collate", ":", "IOError"],
["blue", "green", "", "red"])
except KeyboardInterrupt:
exit(1)
except:
print_colored_string(["task:", "collate", ":", "failed"],
["blue", "green", "", "red"])
# _, itnuprofs, timenuprofs = self.get_ittime("profiles", "prof")
# _, itnuprofs, timenuprofs = self.get_ittime("nuprofiles", "nuprof")
#
# fpath = self.profpath + str(it) + self.nuprof_name + ".h5"
#
#
# # for corr plot
# self.list_iterations = Paths.get_list_iterations_from_res_3d(resdir)
# listdir = self.set_rootdir + str(it)
#
# fpath = resdir + "density_modes.h5",
#
# path = self.set_rootdir + str(it) + '/'
# fname = "profile" + '.' + plane + ".h5"
# fpath = path + fname
# broken
def get_list_iterations_from_res_3d(prodfir):
"""
Checks the /res_3d/ for 12345 folders, (iterations) retunrs their sorted list
:param sim:
:return:
"""
if not os.path.isdir(prodfir):
raise IOError("no {} directory found".format(prodfir))
itdirs = os.listdir(prodfir)
if len(itdirs) == 0:
raise NameError("No iteration-folders found in the {}".format(prodfir))
# this is a f*cking masterpiece of programming)))
list_iterations = np.array(
np.sort(np.array(list([int(itdir) for itdir in itdirs if re.match("^[-+]?[0-9]+$", itdir)]))))
if len(list_iterations) == 0:
raise ValueError("Error extracting the iterations")
return list(list_iterations)
def compute_methods_with_interpolation(
glob_outdir,
glob_fpaths,
glob_its,
glob_times,
glob_tasklist,
glob_masks,
glob_symmetry,
glob_overwrite
):
outdir = glob_outdir
outdir += __rootoutdir__
if not os.path.isdir(outdir):
os.mkdir(outdir)
# methods that required inteprolation [No masks used!]
if "mjenclosed" in glob_tasklist:
new_type = {'type': 'cyl', 'n_r': 75, 'n_phi': 64, 'n_z': 100}
o_grid = CYLINDRICAL_GRID(grid_info=new_type)
o_d3int = INTMETHODS_STORE(grid_object=o_grid, flist=glob_fpaths,
itlist=glob_its, timesteplist=glob_times, symmetry=glob_symmetry)
d3_interpolate_mjenclosed(o_d3int, glob_its, glob_masks, outdir, rewrite=False)
if "vtk" in glob_tasklist:
o_grid = CARTESIAN_GRID()
o_d3int = INTMETHODS_STORE(grid_object=o_grid, flist=glob_fpaths,
itlist=glob_its, timesteplist=glob_times, symmetry=glob_symmetry)
d3_int_data_to_vtk(o_d3int, glob_its, glob_v_ns, outdir, rewrite=False)
for it in glob_its:
sys.stdout.flush() # it, v_n_s, outdir, overwrite=False, private_dir="vtk"
o_d3int.save_vtk_file(it, glob_v_ns, outdir=outdir, overwrite=False, private_dir="vtk/")
sys.stdout.flush()
if "densmodeint" in glob_tasklist:
o_grid = POLAR_GRID()
o_d3int = INTMETHODS_STORE(grid_object=o_grid, flist=glob_fpaths,
itlist=glob_its, timesteplist=glob_times, symmetry=glob_symmetry)
o_d3int.enforce_xy_grid = True
d3_dens_modes_int(o_d3int, outdir=outdir, rewrite=glob_overwrite)
def compute_methods_with_original_data(
glob_outdir,
glob_fpaths,
glob_its,
glob_times,
glob_tasklist,
glob_masks,
glob_symmetry,
glob_overwrite
):
outdir = glob_outdir
outdir += __rootoutdir__
if not os.path.isdir(outdir):
os.mkdir(outdir)
# methods that do not require interplation [Use masks for reflevels and lapse]
d3corr_class = MAINMETHODS_STORE(flist=glob_fpaths, itlist=glob_its,
timesteplist=glob_its, symmetry=glob_symmetry)
# d3corr_class.update_storage_lists(new_iterations=glob_its, new_times=glob_times) # remove corrupt
# d3corr_class.mask_setup = {'rm_rl': True, # REMOVE previouse ref. level from the next
# 'rho': [6.e4 / 6.176e+17, 1.e13 / 6.176e+17], # REMOVE atmo and NS
# 'lapse': [0.15, 1.]} # remove apparent horizon
# tasks for each iteration
for it in glob_its:
_outdir = outdir + str(it) + '/'
if not os.path.isdir(_outdir):
os.mkdir(_outdir)
for task in glob_tasklist:
# if task in ["all", "plotall", "densmode"]: pass
if task == "slice": d3_to_d2_slice_for_it(it, d3corr_class, glob_planes, _outdir, rewrite=glob_overwrite)
for mask in glob_masks:
__outdir = _outdir + mask + '/'
if not os.path.isdir(__outdir):
os.mkdir(__outdir)
if task == "corr": d3_corr_for_it(it, d3corr_class, mask, glob_v_ns, __outdir, rewrite=glob_overwrite)
if task == "hist": d3_hist_for_it(it, d3corr_class, mask, glob_v_ns, __outdir, rewrite=glob_overwrite)
if task == "mass": d3_mass_for_it(it, d3corr_class, mask, __outdir, rewrite=glob_overwrite)
# d3_remnant_mass_for_it(it, d3corr_class, outdir, rewrite=glob_overwrite)
# else:
# raise NameError("d3 method is not recognized: {}".format(task))
d3corr_class.delete_for_it(it=it, except_v_ns=[], rm_masks=True, rm_comp=True, rm_prof=False)
sys.stdout.flush()
print("\n")
# collate tasks
for task in glob_tasklist:
if task == "collate_mass": collate_mass(ittime, glob_masks, outdir, rewrite=glob_overwrite)
# methods that require all iterations | |
from paida.paida_core.PAbsorber import *
from paida.paida_core.IFunctionCatalog import *
from paida.paida_core.IFunction import *
from paida.paida_core.PUtilities import _Shlex
from paida.paida_core.PExceptions import *
import os
class _EOFException(Exception):
pass
class _ItemObject:
_zeros = ['0.0', '-0.0', '(0.0)', '-(0.0)']
def __init__(self, name0, name1):
self._0 = name0
self._1 = name1
def __pos__(self):
return _ItemObject(self._0, self._1)
def __neg__(self):
_0 = '-(%s)' % (self._0)
_1 = '-(%s)' % (self._1)
return _ItemObject(_0, _1)
def __add__(self, other):
_0 = '(%s + %s)' % (self._0, other._0)
if self._1 in self._zeros:
if other._1 in self._zeros:
_1 = '0.0'
else:
_1 = '(%s)' % (other._1)
else:
if other._1 in self._zeros:
_1 = '(%s)' % (self._1)
else:
_1 = '(%s + %s)' % (self._1, other._1)
return _ItemObject(_0, _1)
def __sub__(self, other):
_0 = '(%s - %s)' % (self._0, other._0)
if self._1 in self._zeros:
if other._1 in self._zeros:
_1 = '0.0'
else:
_1 = '-(%s)' % (other._1)
else:
if other._1 in self._zeros:
_1 = '(%s)' % (self._1)
else:
_1 = '(%s - %s)' % (self._1, other._1)
return _ItemObject(_0, _1)
def __mul__(self, other):
_0 = '(%s * %s)' % (self._0, other._0)
if self._1 in self._zeros:
if other._1 in self._zeros:
_1 = '0.0'
else:
_1 = '(%s * %s)' % (self._0, other._1)
else:
if other._1 in self._zeros:
_1 = '(%s * %s)' % (self._1, other._0)
else:
_1 = '(%s * %s + %s * %s)' % (self._1, other._0, self._0, other._1)
return _ItemObject(_0, _1)
def __div__(self, other):
_0 = '(%s / %s)' % (self._0, other._0)
if self._1 in self._zeros:
if other._1 in self._zeros:
_1 = '0.0'
else:
_1 = '(-(%s * %s) / (%s)**2)' % (self._0, other._1, other._0)
else:
if other._1 in self._zeros:
_1 = '(%s / %s)' % (self._1, other._0)
else:
_1 = '(%s / %s - %s * %s / (%s)**2)' % (self._1, other._0, self._0, other._1, other._0)
return _ItemObject(_0, _1)
def __pow__(self, other):
_0 = '(%(s0)s**%(o0)s)' % {'s0': self._0, 'o0': other._0}
if self._1 in self._zeros:
if other._1 in self._zeros:
_1 = '0.0'
else:
_1 = '(%(s0)s**%(o0)s * log(%(s0)s) * %(o1)s)' % {'s0': self._0, 's1': self._1, 'o0': other._0, 'o1': other._1}
else:
if other._1 in self._zeros:
_1 = '(%(o0)s * %(s0)s**(%(o0)s - 1) * %(s1)s)' % {'s0': self._0, 's1': self._1, 'o0': other._0, 'o1': other._1}
else:
_1 = '(%(s0)s**%(o0)s * log(%(s0)s) * %(o1)s + %(o0)s * %(s0)s**(%(o0)s - 1) * %(s1)s)' % {'s0': self._0, 's1': self._1, 'o0': other._0, 'o1': other._1}
return _ItemObject(_0, _1)
class IFunctionFactory:
_zeros = ['0.0', '-0.0', '(0.0)', '-(0.0)']
def __init__(self, tree):
self._tree = tree
### Make a catalog.
self._catalog = IFunctionCatalog()
### Predefined gaussian.
G = self.createFunctionFromScript('G',
1,
'amplitude * exp(-0.5 * ((x[0] - mean) / sigma)**2)',
'amplitude,mean,sigma',
'Gaussian',
gradExpr = ','.join(['exp(-0.5 * ((x[0] - mean) / sigma)**2)',
'amplitude * exp(-0.5 * ((x[0] - mean) / sigma)**2) * (x[0] - mean) / sigma**2',
'amplitude * exp(-0.5 * ((x[0] - mean) / sigma)**2) * (x[0] - mean)**2 / sigma**3']),
grad2Expr = ','.join(['0.0',
'exp(-0.5 * ((x[0] - mean) / sigma)**2) * (x[0] - mean) / sigma**2',
'exp(-0.5 * ((x[0] - mean) / sigma)**2) * (x[0] - mean)**2 / sigma**3',
'exp(-0.5 * ((x[0] - mean) / sigma)**2) * (x[0] - mean) / sigma**2',
'amplitude * exp(-0.5 * ((x[0] - mean) / sigma)**2) * (((x[0] - mean) / sigma)**2 - 1.0) / sigma**2',
'amplitude * exp(-0.5 * ((x[0] - mean) / sigma)**2) * ((x[0] - mean)**3 / sigma**5 - 2.0 * (x[0] - mean) / sigma**3)',
'exp(-0.5 * ((x[0] - mean) / sigma)**2) * (x[0] - mean)**2 / sigma**3',
'amplitude * exp(-0.5 * ((x[0] - mean) / sigma)**2) * ((x[0] - mean)**3 / sigma**5 - 2.0 * (x[0] - mean) / sigma**3)',
'amplitude * exp(-0.5 * ((x[0] - mean) / sigma)**2) * ((x[0] - mean)**4 / sigma**6 - 3.0 * (x[0] - mean)**2 / sigma**4)']),
inner = True)
if self._catalog.add('G', G) == False:
### Catalogging failed.
raise RuntimeException()
### Predefined double gaussian.
GG = self.createFunctionFromScript('GG',
1,
'amplitude0 * exp(-0.5 * ((x[0] - mean0) / sigma0)**2) + amplitude1 * exp(-0.5 * ((x[0] - mean1) / sigma1)**2)',
'amplitude0,mean0,sigma0,amplitude1,mean1,sigma1',
'Double Gaussian',
gradExpr = ','.join(['exp(-0.5 * ((x[0] - mean0) / sigma0)**2)',
'amplitude0 * exp(-0.5 * ((x[0] - mean0) / sigma0)**2) * (x[0] - mean0) / sigma0**2',
'amplitude0 * exp(-0.5 * ((x[0] - mean0) / sigma0)**2) * (x[0] - mean0)**2 / sigma0**3',
'exp(-0.5 * ((x[0] - mean1) / sigma1)**2)',
'amplitude1 * exp(-0.5 * ((x[0] - mean1) / sigma1)**2) * (x[0] - mean1) / sigma1**2',
'amplitude1 * exp(-0.5 * ((x[0] - mean1) / sigma1)**2) * (x[0] - mean1)**2 / sigma1**3']),
grad2Expr = ','.join(['0.0',
'exp(-0.5 * ((x[0] - mean0) / sigma0)**2) * (x[0] - mean0) / sigma0**2',
'exp(-0.5 * ((x[0] - mean0) / sigma0)**2) * (x[0] - mean0)**2 / sigma0**3',
'0.0',
'0.0',
'0.0',
'exp(-0.5 * ((x[0] - mean0) / sigma0)**2) * (x[0] - mean0) / sigma0**2',
'amplitude0 * exp(-0.5 * ((x[0] - mean0) / sigma0)**2) * (((x[0] - mean0) / sigma0)**2 - 1.0) / sigma0**2',
'amplitude0 * exp(-0.5 * ((x[0] - mean0) / sigma0)**2) * ((x[0] - mean0)**3 / sigma0**5 - 2.0 * (x[0] - mean0) / sigma0**3)',
'0.0',
'0.0',
'0.0',
'exp(-0.5 * ((x[0] - mean0) / sigma0)**2) * (x[0] - mean0)**2 / sigma0**3',
'amplitude0 * exp(-0.5 * ((x[0] - mean0) / sigma0)**2) * ((x[0] - mean0)**3 / sigma0**5 - 2.0 * (x[0] - mean0) / sigma0**3)',
'amplitude0 * exp(-0.5 * ((x[0] - mean0) / sigma0)**2) * ((x[0] - mean0)**4 / sigma0**6 - 3.0 * (x[0] - mean0)**2 / sigma0**4)',
'0.0',
'0.0',
'0.0',
'0.0',
'0.0',
'0.0',
'0.0',
'exp(-0.5 * ((x[0] - mean1) / sigma1)**2) * (x[0] - mean1) / sigma1**2',
'exp(-0.5 * ((x[0] - mean1) / sigma1)**2) * (x[0] - mean1)**2 / sigma1**3',
'0.0',
'0.0',
'0.0',
'exp(-0.5 * ((x[0] - mean1) / sigma1)**2) * (x[0] - mean1) / sigma1**2',
'amplitude1 * exp(-0.5 * ((x[0] - mean1) / sigma1)**2) * (((x[0] - mean1) / sigma1)**2 - 1.0) / sigma1**2',
'amplitude1 * exp(-0.5 * ((x[0] - mean1) / sigma1)**2) * ((x[0] - mean1)**3 / sigma1**5 - 2.0 * (x[0] - mean1) / sigma1**3)',
'0.0',
'0.0',
'0.0',
'exp(-0.5 * ((x[0] - mean1) / sigma1)**2) * (x[0] - mean1)**2 / sigma1**3',
'amplitude1 * exp(-0.5 * ((x[0] - mean1) / sigma1)**2) * ((x[0] - mean1)**3 / sigma1**5 - 2.0 * (x[0] - mean1) / sigma1**3)',
'amplitude1 * exp(-0.5 * ((x[0] - mean1) / sigma1)**2) * ((x[0] - mean1)**4 / sigma1**6 - 3.0 * (x[0] - mean1)**2 / sigma1**4)']),
inner = True)
if self._catalog.add('GG', GG) == False:
### Catalogging failed.
raise RuntimeException()
### Predefined exponential.
E = self.createFunctionFromScript('E',
1,
'amplitude * exp(exponent * x[0])', 'amplitude,exponent',
'Exponential',
gradExpr = ','.join(['exp(exponent * x[0])',
'amplitude * exp(exponent * x[0]) * x[0]']),
grad2Expr = ','.join(['0.0',
'exp(exponent * x[0]) * x[0]',
'exp(exponent * x[0]) * x[0]',
'amplitude * exp(exponent * x[0]) * x[0]**2']),
inner = True)
if self._catalog.add('E', E) == False:
### Catalogging failed.
raise RuntimeException()
### Predefined double exponential.
EE = self.createFunctionFromScript('EE',
1,
'amplitude0 * exp(exponent0 * x[0]) + amplitude1 * exp(exponent1 * x[0])',
'amplitude0,exponent0,amplitude1,exponent1',
'Double Exponential',
gradExpr = ','.join(['exp(exponent0 * x[0])',
'amplitude0 * exp(exponent0 * x[0]) * x[0]',
'exp(exponent1 * x[0])',
'amplitude1 * exp(exponent1 * x[0]) * x[0]']),
grad2Expr = ','.join(['0.0',
'exp(exponent0 * x[0]) * x[0]',
'0.0',
'0.0',
'exp(exponent0 * x[0]) * x[0]',
'amplitude0 * exp(exponent0 * x[0]) * x[0]**2',
'0.0',
'0.0',
'0.0',
'0.0',
'0.0',
'exp(exponent1 * x[0]) * x[0]',
'0.0',
'0.0',
'exp(exponent1 * x[0]) * x[0]',
'amplitude1 * exp(exponent1 * x[0]) * x[0]**2']),
inner = True)
if self._catalog.add('EE', EE) == False:
### Catalogging failed.
raise RuntimeException()
### Polynomial will be created on demand by calling self._createPolynomial().
### Any degree of polynomial will be accepted!
def _createPolynomial(self, degree, parameterNamePrefix, inner):
name = 'P%d' % degree
parameters = 'p0'
expression = 'p0'
gradExpr = '1.0'
grad2Expr = '0.0'
for i in range(degree):
ip1 = i + 1
parameters += ',p%d' % ip1
expression += ' + p%d * x[0]**%d' % (ip1, ip1)
gradExpr += ',x[0]**%d' % ip1
grad2Expr += ',0.0' * ((degree + 1)**2 - 1)
function = self.createFunctionFromScript(name, 1, expression, parameters, 'Polynomial%d' % degree, gradExpr = gradExpr, grad2Expr = grad2Expr, parameterNamePrefix = parameterNamePrefix, inner = True)
if inner == False:
if self._catalog.add(name, function) == False:
### Catalogging failed.
raise RuntimeException()
return function
def _sin(self, data):
if data._0 in self._zeros:
_0 = '0.0'
_1 = '0.0'
elif data._1 in self._zeros:
_0 = 'sin(%s)' % (data._0)
_1 = '0.0'
elif data._2 in self._zeros:
_0 = 'sin(%s)' % (data._0)
_1 = '(cos(%s) * %s)' % (data._0, data._1)
else:
_0 = 'sin(%s)' % (data._0)
_1 = '(cos(%s) * %s)' % (data._0, data._1)
return _ItemObject(_0, _1)
def _cos(self, data):
if data._0 in self._zeros:
_0 = '1.0'
_1 = '0.0'
elif data._1 in self._zeros:
_0 = 'cos(%s)' % (data._0)
_1 = '0.0'
elif data._2 in self._zeros:
_0 = 'cos(%s)' % (data._0)
_1 = '(-sin(%s) * %s)' % (data._0, data._1)
else:
_0 = 'cos(%s)' % (data._0)
_1 = '(-sin(%s) * %s)' % (data._0, data._1)
return _ItemObject(_0, _1)
def _tan(self, data):
if data._0 in self._zeros:
_0 = '0.0'
_1 = '0.0'
elif data._1 in self._zeros:
_0 = 'tan(%s)' % (data._0)
_1 = '0.0'
elif data._1 in self._zeros:
_0 = 'tan(%s)' % (data._0)
_1 = '(%s / cos(%s)**2)' % (data._1, data._0)
else:
_0 = 'tan(%s)' % (data._0)
_1 = '(%s / cos(%s)**2)' % (data._1, data._0)
return _ItemObject(_0, _1)
def _exp(self, data):
if data._0 in self._zeros:
_0 = '1.0'
_1 = '0.0'
elif data._1 in self._zeros:
_0 = 'exp(%s)' % (data._0)
_1 = '0.0'
elif data._1 in self._zeros:
_0 = 'exp(%s)' % (data._0)
_1 = '(exp(%s) * %s)' % (data._0, data._1)
else:
_0 = 'exp(%s)' % (data._0)
_1 = '(exp(%s) * %s)' % (data._0, data._1)
return _ItemObject(_0, _1)
def _log(self, data):
if data._0 in self._zeros:
raise ValueError, 'Called log(0.0).'
elif data._1 in self._zeros:
_0 = 'log(%s)' | |
from collections import OrderedDict
import numpy as np
import pytest
from pandas._libs.tslib import Timestamp
from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
import pandas as pd
from pandas import Index, MultiIndex, date_range
import pandas.util.testing as tm
def test_constructor_single_level():
result = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux']],
codes=[[0, 1, 2, 3]], names=['first'])
assert isinstance(result, MultiIndex)
expected = Index(['foo', 'bar', 'baz', 'qux'], name='first')
tm.assert_index_equal(result.levels[0], expected)
assert result.names == ['first']
def test_constructor_no_levels():
msg = "non-zero number of levels/codes"
with pytest.raises(ValueError, match=msg):
MultiIndex(levels=[], codes=[])
msg = "Must pass both levels and codes"
with pytest.raises(TypeError, match=msg):
MultiIndex(levels=[])
with pytest.raises(TypeError, match=msg):
MultiIndex(codes=[])
def test_constructor_nonhashable_names():
# GH 20527
levels = [[1, 2], ['one', 'two']]
codes = [[0, 0, 1, 1], [0, 1, 0, 1]]
names = (['foo'], ['bar'])
msg = r"MultiIndex\.name must be a hashable type"
with pytest.raises(TypeError, match=msg):
MultiIndex(levels=levels, codes=codes, names=names)
# With .rename()
mi = MultiIndex(levels=[[1, 2], ['one', 'two']],
codes=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=('foo', 'bar'))
renamed = [['foor'], ['barr']]
with pytest.raises(TypeError, match=msg):
mi.rename(names=renamed)
# With .set_names()
with pytest.raises(TypeError, match=msg):
mi.set_names(names=renamed)
def test_constructor_mismatched_codes_levels(idx):
codes = [np.array([1]), np.array([2]), np.array([3])]
levels = ["a"]
msg = "Length of levels and codes must be the same"
with pytest.raises(ValueError, match=msg):
MultiIndex(levels=levels, codes=codes)
length_error = (r"On level 0, code max \(3\) >= length of level \(1\)\."
" NOTE: this index is in an inconsistent state")
label_error = r"Unequal code lengths: \[4, 2\]"
code_value_error = r"On level 0, code value \(-2\) < -1"
# important to check that it's looking at the right thing.
with pytest.raises(ValueError, match=length_error):
MultiIndex(levels=[['a'], ['b']],
codes=[[0, 1, 2, 3], [0, 3, 4, 1]])
with pytest.raises(ValueError, match=label_error):
MultiIndex(levels=[['a'], ['b']], codes=[[0, 0, 0, 0], [0, 0]])
# external API
with pytest.raises(ValueError, match=length_error):
idx.copy().set_levels([['a'], ['b']])
with pytest.raises(ValueError, match=label_error):
idx.copy().set_codes([[0, 0, 0, 0], [0, 0]])
# test set_codes with verify_integrity=False
# the setting should not raise any value error
idx.copy().set_codes(codes=[[0, 0, 0, 0], [0, 0]],
verify_integrity=False)
# code value smaller than -1
with pytest.raises(ValueError, match=code_value_error):
MultiIndex(levels=[['a'], ['b']], codes=[[0, -2], [0, 0]])
def test_na_levels():
# GH26408
# test if codes are re-assigned value -1 for levels
# with mising values (NaN, NaT, None)
result = MultiIndex(levels=[[np.nan, None, pd.NaT, 128, 2]],
codes=[[0, -1, 1, 2, 3, 4]])
expected = MultiIndex(levels=[[np.nan, None, pd.NaT, 128, 2]],
codes=[[-1, -1, -1, -1, 3, 4]])
tm.assert_index_equal(result, expected)
result = MultiIndex(levels=[[np.nan, 's', pd.NaT, 128, None]],
codes=[[0, -1, 1, 2, 3, 4]])
expected = MultiIndex(levels=[[np.nan, 's', pd.NaT, 128, None]],
codes=[[-1, -1, 1, -1, 3, -1]])
tm.assert_index_equal(result, expected)
# verify set_levels and set_codes
result = MultiIndex(
levels=[[1, 2, 3, 4, 5]], codes=[[0, -1, 1, 2, 3, 4]]).set_levels(
[[np.nan, 's', pd.NaT, 128, None]])
tm.assert_index_equal(result, expected)
result = MultiIndex(
levels=[[np.nan, 's', pd.NaT, 128, None]],
codes=[[1, 2, 2, 2, 2, 2]]).set_codes(
[[0, -1, 1, 2, 3, 4]])
tm.assert_index_equal(result, expected)
def test_labels_deprecated(idx):
# GH23752
with tm.assert_produces_warning(FutureWarning):
MultiIndex(levels=[['foo', 'bar', 'baz', 'qux']],
labels=[[0, 1, 2, 3]], names=['first'])
with tm.assert_produces_warning(FutureWarning):
idx.labels
def test_copy_in_constructor():
levels = np.array(["a", "b", "c"])
codes = np.array([1, 1, 2, 0, 0, 1, 1])
val = codes[0]
mi = MultiIndex(levels=[levels, levels], codes=[codes, codes],
copy=True)
assert mi.codes[0][0] == val
codes[0] = 15
assert mi.codes[0][0] == val
val = levels[0]
levels[0] = "PANDA"
assert mi.levels[0][0] == val
# ----------------------------------------------------------------------------
# from_arrays
# ----------------------------------------------------------------------------
def test_from_arrays(idx):
arrays = [np.asarray(lev).take(level_codes)
for lev, level_codes in zip(idx.levels, idx.codes)]
# list of arrays as input
result = MultiIndex.from_arrays(arrays, names=idx.names)
tm.assert_index_equal(result, idx)
# infer correctly
result = MultiIndex.from_arrays([[pd.NaT, Timestamp('20130101')],
['a', 'b']])
assert result.levels[0].equals(Index([Timestamp('20130101')]))
assert result.levels[1].equals(Index(['a', 'b']))
def test_from_arrays_iterator(idx):
# GH 18434
arrays = [np.asarray(lev).take(level_codes)
for lev, level_codes in zip(idx.levels, idx.codes)]
# iterator as input
result = MultiIndex.from_arrays(iter(arrays), names=idx.names)
tm.assert_index_equal(result, idx)
# invalid iterator input
msg = "Input must be a list / sequence of array-likes."
with pytest.raises(TypeError, match=msg):
MultiIndex.from_arrays(0)
def test_from_arrays_tuples(idx):
arrays = tuple(tuple(np.asarray(lev).take(level_codes))
for lev, level_codes in zip(idx.levels, idx.codes))
# tuple of tuples as input
result = MultiIndex.from_arrays(arrays, names=idx.names)
tm.assert_index_equal(result, idx)
def test_from_arrays_index_series_datetimetz():
idx1 = pd.date_range('2015-01-01 10:00', freq='D', periods=3,
tz='US/Eastern')
idx2 = pd.date_range('2015-01-01 10:00', freq='H', periods=3,
tz='Asia/Tokyo')
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_series_timedelta():
idx1 = pd.timedelta_range('1 days', freq='D', periods=3)
idx2 = pd.timedelta_range('2 hours', freq='H', periods=3)
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_series_period():
idx1 = pd.period_range('2011-01-01', freq='D', periods=3)
idx2 = pd.period_range('2015-01-01', freq='H', periods=3)
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_datetimelike_mixed():
idx1 = pd.date_range('2015-01-01 10:00', freq='D', periods=3,
tz='US/Eastern')
idx2 = pd.date_range('2015-01-01 10:00', freq='H', periods=3)
idx3 = pd.timedelta_range('1 days', freq='D', periods=3)
idx4 = pd.period_range('2011-01-01', freq='D', periods=3)
result = pd.MultiIndex.from_arrays([idx1, idx2, idx3, idx4])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
tm.assert_index_equal(result.get_level_values(2), idx3)
tm.assert_index_equal(result.get_level_values(3), idx4)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1),
pd.Series(idx2),
pd.Series(idx3),
pd.Series(idx4)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result2.get_level_values(2), idx3)
tm.assert_index_equal(result2.get_level_values(3), idx4)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_series_categorical():
# GH13743
idx1 = pd.CategoricalIndex(list("abcaab"), categories=list("bac"),
ordered=False)
idx2 = pd.CategoricalIndex(list("abcaab"), categories=list("bac"),
ordered=True)
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
result3 = pd.MultiIndex.from_arrays([idx1.values, idx2.values])
tm.assert_index_equal(result3.get_level_values(0), idx1)
tm.assert_index_equal(result3.get_level_values(1), idx2)
def test_from_arrays_empty():
# 0 levels
msg = "Must pass non-zero number of levels/codes"
with pytest.raises(ValueError, match=msg):
MultiIndex.from_arrays(arrays=[])
# 1 level
result = MultiIndex.from_arrays(arrays=[[]], names=['A'])
assert isinstance(result, MultiIndex)
expected = Index([], name='A')
tm.assert_index_equal(result.levels[0], expected)
# N levels
for N in [2, 3]:
arrays = [[]] * N
names = list('ABC')[:N]
result = MultiIndex.from_arrays(arrays=arrays, names=names)
expected = MultiIndex(levels=[[]] * N, codes=[[]] * N,
names=names)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('invalid_sequence_of_arrays', [
1, [1], [1, 2], [[1], 2], [1, [2]], 'a', ['a'], ['a', 'b'], [['a'], 'b'],
(1,), (1, 2), ([1], 2), (1, [2]), 'a', ('a',), ('a', 'b'), (['a'], 'b'),
[(1,), 2], [1, (2,)], [('a',), 'b'],
((1,), 2), (1, (2,)), (('a',), 'b')
])
def test_from_arrays_invalid_input(invalid_sequence_of_arrays):
msg = "Input must be a list / sequence of array-likes"
with pytest.raises(TypeError, match=msg):
MultiIndex.from_arrays(arrays=invalid_sequence_of_arrays)
@pytest.mark.parametrize('idx1, idx2', [
([1, 2, 3], ['a', 'b']),
([], ['a', 'b']),
([1, 2, 3], [])
])
def test_from_arrays_different_lengths(idx1, idx2):
# see gh-13599
msg = '^all arrays must be same length$'
with pytest.raises(ValueError, match=msg):
MultiIndex.from_arrays([idx1, idx2])
# ----------------------------------------------------------------------------
# from_tuples
# ----------------------------------------------------------------------------
def test_from_tuples():
msg = 'Cannot infer number of levels from empty list'
with pytest.raises(TypeError, match=msg):
MultiIndex.from_tuples([])
expected = MultiIndex(levels=[[1, 3], [2, 4]],
codes=[[0, 1], [0, 1]],
names=['a', 'b'])
# input tuples
result = MultiIndex.from_tuples(((1, 2), (3, 4)), names=['a', 'b'])
tm.assert_index_equal(result, expected)
def test_from_tuples_iterator():
# GH 18434
# input iterator for tuples
expected = MultiIndex(levels=[[1, 3], [2, 4]],
codes=[[0, 1], [0, 1]],
names=['a', 'b'])
result = MultiIndex.from_tuples(zip([1, 3], [2, 4]), names=['a', 'b'])
tm.assert_index_equal(result, expected)
# input non-iterables
msg = 'Input must be a list / sequence of tuple-likes.'
with pytest.raises(TypeError, match=msg):
MultiIndex.from_tuples(0)
def test_from_tuples_empty():
# GH 16777
result = MultiIndex.from_tuples([], names=['a', 'b'])
expected = MultiIndex.from_arrays(arrays=[[], []],
names=['a', 'b'])
tm.assert_index_equal(result, expected)
def test_from_tuples_index_values(idx):
result = MultiIndex.from_tuples(idx)
assert (result.values == idx.values).all()
def test_tuples_with_name_string():
# GH 15110 and GH 14848
li = [(0, 0, 1), (0, 1, 0), (1, 0, 0)]
msg = "Names should be list-like for a MultiIndex"
with pytest.raises(ValueError, match=msg):
pd.Index(li, name='abc')
with pytest.raises(ValueError, match=msg):
pd.Index(li, name='a')
def test_from_tuples_with_tuple_label():
# GH 15457
expected = pd.DataFrame([[2, 1, 2], [4, (1, 2), 3]],
columns=['a', 'b', 'c']).set_index(['a', 'b'])
idx = pd.MultiIndex.from_tuples([(2, 1), (4, (1, 2))], names=('a', 'b'))
result = pd.DataFrame([2, 3], columns=['c'], index=idx)
tm.assert_frame_equal(expected, result)
# ----------------------------------------------------------------------------
# from_product
# ----------------------------------------------------------------------------
def test_from_product_empty_zero_levels():
# 0 levels
msg = "Must pass non-zero number of levels/codes"
with pytest.raises(ValueError, match=msg):
MultiIndex.from_product([])
def test_from_product_empty_one_level():
result = MultiIndex.from_product([[]], names=['A'])
expected = pd.Index([], name='A')
tm.assert_index_equal(result.levels[0], expected)
@pytest.mark.parametrize('first, second', [
([], []),
(['foo', 'bar', 'baz'], []),
([], ['a', 'b', 'c']),
])
def test_from_product_empty_two_levels(first, second):
names = ['A', 'B']
result = MultiIndex.from_product([first, second], names=names)
expected = MultiIndex(levels=[first, second],
codes=[[], []], names=names)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('N', list(range(4)))
def test_from_product_empty_three_levels(N):
# GH12258
names = ['A', 'B', 'C']
lvl2 = list(range(N))
result = MultiIndex.from_product([[], lvl2, []], names=names)
expected = MultiIndex(levels=[[], lvl2, []],
codes=[[], [], []], names=names)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('invalid_input', [
1,
[1],
[1, 2],
[[1], 2],
'a',
['a'],
['a', 'b'],
[['a'], 'b'],
])
def test_from_product_invalid_input(invalid_input):
msg = (r"Input must be a list / sequence of iterables|"
"Input must be list-like")
with pytest.raises(TypeError, match=msg):
MultiIndex.from_product(iterables=invalid_input)
def test_from_product_datetimeindex():
dt_index = date_range('2000-01-01', periods=2)
mi = pd.MultiIndex.from_product([[1, 2], dt_index])
etalon = construct_1d_object_array_from_listlike([
(1, pd.Timestamp('2000-01-01')),
(1, pd.Timestamp('2000-01-02')),
(2, pd.Timestamp('2000-01-01')),
(2, pd.Timestamp('2000-01-02')),
])
tm.assert_numpy_array_equal(mi.values, etalon)
@pytest.mark.parametrize('ordered', [False, True])
@pytest.mark.parametrize('f', [
lambda x: x,
lambda x: pd.Series(x),
lambda x: x.values
])
def test_from_product_index_series_categorical(ordered, | |
/ self.zeta[link][k] for k in range(condition, self.nij[link])])
if not self.full_links:
zero_out = (self.node_ts[link[0]] >= self.Tau[link])
sum_1 = np.sum((np.exp(-(mu+phi) * t_diff) - 1)[zero_out])
sum_2 = np.sum(np.multiply(t_diff,np.exp(-(mu+phi) * t_diff))[zero_out])
vals_mu += [phi / ((mu+phi) ** 2) * sum_1 - mu / (mu+phi) * sum_2 + psi_sum - mu * psi_derivative_sum]
vals_phi += [- mu / ((mu+phi) ** 2) * sum_1 - 1 / (mu+phi) * sum_2 - mu * psi_derivative_sum]
else:
vals_mu += [psi_sum - mu * psi_derivative_sum]
vals_phi += [- mu * psi_derivative_sum]
## Repeat for mu_prime and phi_prime
condition_prime = (self.nij[link] != len(self.A_bar_prime[link])) * (self.equal_start[link] if self.discrete else 1)
t_bar_prime = self.A_bar_prime[link]
t_diff_prime = np.diff(np.append(self.node_ts_prime[link[1]] if self.directed else self.node_ts[link[1]],self.T))
psi_prime_sum = np.sum([np.exp(-(mu_prime+phi_prime) * t_bar_prime[k-condition_prime]) / self.zeta[link][k] for k in range(condition_prime, self.nij[link])])
psi_prime_derivative_sum = np.sum([np.exp(-(mu_prime+phi_prime) * t_bar_prime[k-condition_prime]) * t_bar_prime[k-condition_prime] / self.zeta[link][k] for k in range(condition_prime, self.nij[link])])
if not self.full_links:
zero_out_prime = ((self.node_ts_prime[link[1]] if self.directed else self.node_ts[link[1]]) >= self.Tau[link])
sum_1_prime = np.sum((np.exp(-(mu_prime+phi_prime) * t_diff_prime) - 1)[zero_out_prime])
sum_2_prime = np.sum(np.multiply(t_diff_prime,np.exp(-(mu_prime+phi_prime) * t_diff_prime))[zero_out_prime])
res_mu = [phi_prime / ((mu_prime+phi_prime) ** 2) * sum_1_prime - mu_prime / (mu_prime+phi_prime) * sum_2_prime + psi_prime_sum - mu_prime * psi_prime_derivative_sum]
res_phi = [- mu_prime / ((mu_prime+phi_prime) ** 2) * sum_1_prime - 1 / (mu_prime+phi_prime) * sum_2_prime - mu_prime * psi_prime_derivative_sum]
else:
res_mu = [psi_prime_sum - mu_prime * psi_prime_derivative_sum]
res_phi = [- mu_prime * psi_prime_derivative_sum]
## If the graph is directed, add to vals_parameter_prime, otherwise (undirected graph) to vals_parameter
if self.directed:
vals_mu_prime += res_mu
vals_phi_prime += res_phi
else:
vals_mu += res_mu
vals_phi += res_phi
if self.interactions:
## Update rows and columns
if self.D > 1:
rows_int += [link[0]]*self.D
cols_int += [link[1]]*self.D
dims_int += list(range(self.D))
if not self.directed:
rows_int += [link[1]]*self.D
cols_int += [link[0]]*self.D
dims_int += list(range(self.D))
## Updates for gamma
gamma = self.gamma[link[0]]
gamma_prime = self.gamma_prime[link[1]] if self.directed else self.gamma[link[1]]
if self.D == 1:
vals_gamma += [gamma_prime * (- ((self.T - self.Tau[link]) if not self.full_links else 0) + \
np.sum([1.0 / self.zeta[link][k] for k in range(self.nij[link])]))]
res_gamma = [gamma * (- ((self.T - self.Tau[link]) if not self.full_links else 0) + \
np.sum([1.0 / self.zeta[link][k] for k in range(self.nij[link])]))]
else:
vals_gamma += list(gamma_prime * (- ((self.T - self.Tau[link]) if not self.full_links else 0) + \
np.sum([1.0 / self.zeta[link][k] for k in range(self.nij[link])])))
res_gamma = list(gamma * (- ((self.T - self.Tau[link]) if not self.full_links else 0) + \
np.sum([1.0 / self.zeta[link][k] for k in range(self.nij[link])])))
if self.directed:
vals_gamma_prime += res_gamma
else:
vals_gamma += res_gamma
if not self.poisson_int:
## Obtain the parameters
nu = self.nu[link[0]]
nu_prime = self.nu_prime[link[1]] if self.directed else self.nu[link[1]]
theta = self.theta[link[0]]
theta_prime = self.theta_prime[link[1]] if self.directed else self.theta[link[1]]
q = self.equal_start[link] if self.discrete else 1
if self.hawkes_int:
## Psi summations
psi_sum = np.sum([self.psi_tilde[link][k] / self.zeta[link][k] for k in range(q,self.nij[link])],axis=0)
psi_derivative_sum = np.sum([self.psi_tilde_derivative[link][k] / self.zeta[link][k] for k in range(q,self.nij[link])],axis=0)
psi_prime_derivative_sum = np.sum([self.psi_tilde_derivative_prime[link][k] / self.zeta[link][k] for k in range(q,self.nij[link])],axis=0)
## Updates for nu and theta
t_diff = self.T - np.array(self.A[link])
if self.D == 1:
sum_1 = np.sum(np.exp(-(nu+theta) * (nu_prime+theta_prime) * t_diff) - 1)
sum_2 = np.sum(np.multiply(t_diff,np.exp(-(nu+theta) * (nu_prime+theta_prime) * t_diff)))
vals_nu += [nu_prime / (nu+theta) * (theta / (nu+theta) / (nu_prime+theta_prime) * sum_1 - nu * sum_2) + nu_prime * (psi_sum + nu * psi_derivative_sum)]
vals_theta += [- nu * nu_prime / (nu+theta) * (sum_1 / (nu + theta) / (nu_prime + theta_prime) + sum_2) + nu * psi_derivative_sum]
## Repeat for nu_prime and theta_prime
res_nu = [nu / (nu_prime+theta_prime) * (theta_prime / (nu_prime+theta_prime) / (nu+theta) * sum_1 - nu_prime * sum_2) + nu * (psi_sum + nu_prime * psi_prime_derivative_sum)]
res_theta = [- nu_prime * nu / (nu_prime+theta_prime) * (sum_1 / (nu_prime + theta_prime) / (nu + theta) + sum_2) + nu_prime * psi_prime_derivative_sum]
else:
sum_1 = np.sum([np.exp(-(nu+theta) * (nu_prime+theta_prime) * tt) - 1 for tt in t_diff], axis=0)
sum_2 = np.sum([tt * np.exp(-(nu+theta) * (nu_prime+theta_prime) * tt) for tt in t_diff], axis=0)
vals_nu += list(nu_prime / (nu+theta) * (theta / (nu+theta) / (nu_prime+theta_prime) * sum_1 - nu * sum_2) + nu_prime * (psi_sum + nu * psi_derivative_sum))
vals_theta += list(- nu * nu_prime / (nu+theta) * (sum_1 / (nu + theta) / (nu_prime + theta_prime) + sum_2) + nu * psi_derivative_sum)
## Repeat for nu_prime and theta_prime
res_nu = list(nu / (nu_prime+theta_prime) * (theta_prime / (nu_prime+theta_prime) / (nu+theta) * sum_1 - nu_prime * sum_2) + nu * (psi_sum + nu_prime * psi_prime_derivative_sum))
res_theta = list(- nu_prime * nu / (nu_prime+theta_prime) * (sum_1 / (nu_prime + theta_prime) / (nu + theta) + sum_2) + nu_prime * psi_prime_derivative_sum)
else:
## Updates for nu and theta
t_diff = np.append(self.A_diff[link], self.T - self.A[link][-1])
## Calculate the updates
if self.D == 1:
sum_1 = np.sum(np.exp(-(nu+theta) * (nu_prime+theta_prime) * t_diff) - 1)
sum_2 = np.sum(np.multiply(t_diff, np.exp(-(nu+theta) * (nu_prime+theta_prime) * t_diff)))
psi_sum = np.sum([np.exp(-(nu+theta) * (nu_prime+theta_prime) * t_diff[k-q]) / self.zeta[link][k] for k in range(q, self.nij[link])])
psi_derivative_sum = np.sum([t_diff[k-q] * np.exp(-(nu+theta) * (nu_prime+theta_prime) * t_diff[k-q]) / self.zeta[link][k] for k in range(q, self.nij[link])])
## Calculate the updates
vals_nu += [nu_prime / (nu+theta) * (theta / (nu+theta) / (nu_prime+theta_prime) * sum_1 - nu * sum_2) + nu_prime * (psi_sum - nu * (nu_prime+theta_prime) * psi_derivative_sum)]
vals_theta += [- nu * nu_prime / (nu+theta) * (sum_1 / (nu + theta) / (nu_prime + theta_prime) + sum_2) - nu * nu_prime * (nu_prime + theta_prime) * psi_derivative_sum]
## Repeat for nu_prime and theta_prime
res_nu = [nu / (nu_prime+theta_prime) * (theta_prime / (nu_prime+theta_prime) / (nu+theta) * sum_1 - nu_prime * sum_2) + nu * (psi_sum - nu_prime * (nu+theta) * psi_derivative_sum)]
res_theta = [- nu_prime * nu / (nu_prime+theta_prime) * (sum_1 / (nu_prime + theta_prime) / (nu + theta) + sum_2) - nu * nu_prime * (nu + theta) * psi_derivative_sum]
else:
sum_1 = np.sum([np.exp(-(nu+theta) * (nu_prime+theta_prime) * tt) - 1 for tt in t_diff], axis=0)
sum_2 = np.sum([tt * np.exp(-(nu+theta) * (nu_prime+theta_prime) * tt) for tt in t_diff],axis=0)
psi_sum = np.sum([np.exp(-(nu+theta) * (nu_prime+theta_prime) * t_diff[k-q]) / self.zeta[link][k] for k in range(q, self.nij[link])], axis=0)
psi_derivative_sum = np.sum([t_diff[k-q] * np.exp(-(nu+theta) * (nu_prime+theta_prime) * t_diff[k-q]) / self.zeta[link][k] for k in range(q, self.nij[link])], axis=0)
vals_nu += list(nu_prime / (nu+theta) * (theta / (nu+theta) / (nu_prime+theta_prime) * sum_1 - nu * sum_2) + nu_prime * (psi_sum - nu * (nu_prime+theta_prime) * psi_derivative_sum))
vals_theta += list(- nu * nu_prime / (nu+theta) * (sum_1 / (nu + theta) / (nu_prime + theta_prime) + sum_2) - nu * nu_prime * (nu_prime + theta_prime) * psi_derivative_sum)
## Repeat for nu_prime and theta_prime
res_nu = list(nu / (nu_prime+theta_prime) * (theta_prime / (nu_prime+theta_prime) / (nu+theta) * sum_1 - nu_prime * sum_2) + nu * (psi_sum - nu_prime * (nu+theta) * psi_derivative_sum))
res_theta = list(- nu_prime * nu / (nu_prime+theta_prime) * (sum_1 / (nu_prime + theta_prime) / (nu + theta) + sum_2) - nu * nu_prime * (nu + theta) * psi_derivative_sum)
## If the graph is directed, add to vals_parameter_prime, otherwise (undirected graph) to vals_parameter
if self.directed:
vals_nu_prime += res_nu
vals_theta_prime += res_theta
else:
vals_nu += res_nu
vals_theta += res_theta
if verbose:
prop += self.nij[link]
print("\r+++ Percentage of processed links +++ {:0.2f}%".format(prop / self.m * 100), end="")
if verbose:
print("")
## Calculate the gradients for the main effects
if self.main_effects:
## For undirected graphs, sum along the columns (by construction of the matrices)
Z = coo_matrix((vals, (rows, cols)), shape=(self.n1 if self.bipartite else self.n, self.n2 if self.bipartite else self.n))
## Baseline parameters
self.grad_alpha = np.array(Z.sum(axis=1)).flatten()
if self.full_links:
self.grad_alpha -= self.T * (self.n2 if self.bipartite else self.n)
if self.directed:
self.grad_beta = np.array(Z.sum(axis=0)).flatten()
if self.full_links:
self.grad_beta -= self.T * (self.n1 if self.bipartite else self.n)
if not self.poisson_me:
## Excitation function parameters
Z = coo_matrix((vals_mu, (rows, cols)), shape=(self.n1 if self.bipartite else self.n, self.n2 if self.bipartite else self.n))
self.grad_mu = np.array(Z.sum(axis=1)).flatten()
if self.full_links:
self.grad_mu += mu_component * (self.n2 if self.bipartite else self.n)
Z = coo_matrix((vals_phi, (rows, cols)), shape=(self.n1 if self.bipartite else self.n, self.n2 if self.bipartite else self.n))
self.grad_phi = np.array(Z.sum(axis=1)).flatten()
if self.full_links:
self.grad_phi += phi_component * (self.n2 if self.bipartite else self.n)
## Parameters of the directed graph
if self.directed:
Z = coo_matrix((vals_mu_prime, (rows, cols)), shape=(self.n1 if self.bipartite else self.n, self.n2 if self.bipartite else self.n))
self.grad_mu_prime = np.array(Z.sum(axis=0)).flatten()
if self.full_links:
self.grad_mu_prime += mu_prime_component * (self.n1 if self.bipartite else self.n)
Z = coo_matrix((vals_phi_prime, (rows, cols)), shape=(self.n1 if self.bipartite else self.n, self.n2 if self.bipartite else self.n))
self.grad_phi_prime = np.array(Z.sum(axis=0)).flatten()
if self.full_links:
self.grad_phi_prime += phi_prime_component * (self.n1 if self.bipartite else self.n)
if verbose:
print("+++ Updating the parameters for the main effects +++")
## Calculate the gradients for the interactions
if self.interactions:
if self.D == 1:
## Baseline parameters
Z = coo_matrix((vals_gamma, (rows, cols)), shape=(self.n1 if self.bipartite else self.n, self.n2 if self.bipartite else self.n))
self.grad_gamma = np.array(Z.sum(axis=1)).flatten()
if self.full_links:
self.grad_gamma -= self.T * np.sum(self.gamma_prime if self.directed else self.gamma)
if not self.poisson_int:
## Excitation function
Z = coo_matrix((vals_nu, (rows, cols)), shape=(self.n1 if self.bipartite else self.n, self.n2 if self.bipartite else self.n))
self.grad_nu = np.array(Z.sum(axis=1)).flatten()
Z = coo_matrix((vals_theta, (rows, cols)), shape=(self.n1 if self.bipartite else self.n, self.n2 if self.bipartite else self.n))
self.grad_theta = np.array(Z.sum(axis=1)).flatten()
## Parameters of the directed | |
*need* to implement:
* doRead(maxage)
Subclasses *can* implement:
* doReset()
* doPoll(n, maxage)
Subclasses *can* override:
* doStatus(maxage)
* valueInfo()
"""
parameters = {
'fmtstr': Param('Format string for the device value', type=str,
default='%.3f', settable=True),
'unit': Param('Unit of the device main value', type=str,
mandatory=True, settable=True),
'maxage': Param('Maximum age of cached value and status (zero '
'to never use cached values, or None to cache '
'them indefinitely)', unit='s', fmtstr='%.1f',
default=12, settable=True,
type=none_or(floatrange(0, 24 * 3600))),
'pollinterval': Param('Polling interval for value and status (or None '
'to disable polling)', unit='s', fmtstr='%.1f',
default=5, settable=True,
type=none_or(floatrange(0.5, 24 * 3600))),
'warnlimits': Param('Range in which the device value should be '
'in normal operation; warnings may be triggered '
'when it is outside', settable=True, chatty=True,
unit='main', fmtstr='main',
type=none_or(tupleof(anytype, anytype))),
}
def init(self):
self._info_errcount = 0
# value in simulation mode
self._sim_intercept = self._mode == SIMULATION and self.hardware_access
self._sim_old_value = None
self._sim_value = 0 # no way to configure a useful default...
self._sim_min = None
self._sim_max = None
self._sim_started = None
self._sim_preset = {}
Device.init(self)
def _sim_getMinMax(self):
"""Return info about the value range this device had in a simulation.
The return value is a list of tuples ``(value name, last value, minimum
value, maximum value)``. By default this has one entry, where "value
name" is the device name.
"""
if self._sim_min is not None:
return [(self.name, self.format(self._sim_value),
self.format(self._sim_min), self.format(self._sim_max))]
else:
return []
def _sim_setValue(self, pos):
self._sim_old_value = self._sim_value
self._sim_value = pos
if isinstance(pos, number_types):
if self._sim_min is None:
self._sim_min = pos
self._sim_min = min(pos, self._sim_min)
if self._sim_max is None:
self._sim_max = pos
self._sim_max = max(pos, self._sim_max)
def _setMode(self, mode):
sim_intercept = mode == SIMULATION and self.hardware_access
if sim_intercept:
# save the last known value
try:
self._sim_value = self.read() # cached value is ok here
self.log.debug('last value before simulation mode is %r',
self._sim_value)
except Exception as err:
self.log.warning('error reading last value', exc=err)
self._sim_intercept = sim_intercept
Device._setMode(self, mode)
def __call__(self, *values):
"""Allow dev() as shortcut for read."""
if values:
# give a nicer error message than "TypeError: takes 1 argument"
raise UsageError(self, 'not a moveable device')
return self.read()
def valueInfo(self):
"""Describe the values read by this device.
Return a tuple of :class:`~nicos.core.params.Value` instances
describing the values that :meth:`read` returns.
This must be overridden by every Readable that returns more than one
value in a list. For example, a slit that returns a width and height
would define ::
def valueInfo(self):
return (Value(self.name + '.width', unit=self.unit),
Value(self.name + '.height', unit=self.unit))
By default, this returns a Value that indicates one return value with
the proper unit and format string of the device.
"""
return Value(self.name, unit=self.unit, fmtstr=self.fmtstr),
@usermethod
def read(self, maxage=None):
"""Read the (possibly cached) main value of the device.
.. method:: doRead(maxage=0)
This method must be implemented to read the actual device value from
the device. It is only called if the last cached value is out of
date, or no cache is available.
The *maxage* parameter should be given to read() calls of subdevices.
"""
if self._sim_intercept:
return self._sim_value
val = self._getFromCache('value', self.doRead, maxage)
if self._mode == SIMULATION:
self._sim_setValue(val)
return val
@usermethod
def status(self, maxage=None):
"""Return the (possibly cached) status of the device.
The status is a tuple of one of the integer constants defined in the
:mod:`nicos.core.status` module, and textual extended info.
.. method:: doStatus(maxage=0)
This method can be implemented to get actual device status from the
device. It is only called if the last cached value is out of
date, or no cache is available.
If no ``doStatus()`` is implemented, ``status()`` tries to determine
the status via `nicos.core.utils.multiStatus` of the attached
devices. If that is not possible, it returns
``status.UNKNOWN, 'doStatus not implemented'``.
The *maxage* parameter should be given to status() calls of
subdevices.
"""
if self._sim_intercept:
return (status.OK, 'simulated ok')
return self._getFromCache('status', self._combinedStatus, maxage)
def _combinedStatus(self, maxage=0):
"""Return the status of the device combined from hardware status and
movement status determined by NICOS such as timeouts.
The default implementation just returns the hardware status, except
that the warnlimits of the device are checked, and the status is
changed to WARN if they are exceeded.
"""
try:
stvalue = self.doStatus(maxage)
except NicosError as err:
stvalue = (status.ERROR, str(err))
except Exception as err:
stvalue = (status.ERROR, 'unhandled %s: %s' %
(err.__class__.__name__, err))
if stvalue[0] not in status.statuses:
stvalue = (status.UNKNOWN,
'status constant %r is unknown' % stvalue[0])
if stvalue[0] == status.OK:
value = None
wl = self.warnlimits
if wl:
value = self.read(maxage)
if wl[0] is not None and value < wl[0]:
stvalue = status.WARN, \
statusString(stvalue[1], 'below warn limit (%s)' %
self.format(wl[0], unit=True))
elif wl[1] is not None and value > wl[1]:
stvalue = status.WARN, \
statusString(stvalue[1], 'above warn limit (%s)' %
self.format(wl[1], unit=True))
if isinstance(self, HasLimits):
if value is None:
value = self.read(maxage)
ul = self.userlimits
# take precision into account in case we drive exactly to the
# user limit but the device overshoots a little
prec = self.precision if isinstance(self, HasPrecision) else 0
if value < ul[0] - prec:
stvalue = status.WARN, \
statusString(stvalue[1], 'below user limit (%s)' %
self.format(ul[0], unit=True))
elif value > ul[1] + prec:
stvalue = status.WARN, \
statusString(stvalue[1], 'above user limit (%s)' %
self.format(ul[1], unit=True))
return stvalue
def doStatus(self, maxage=0):
if self._adevs:
return multiStatus(self._adevs, maxage)
return (status.UNKNOWN, 'doStatus not implemented')
def poll(self, n=0, maxage=0):
"""Get status and value directly from the device and put both values
into the cache. For continuous polling, *n* should increase by one
with every call to *poll*.
.. method:: doPoll(n, maxage)
If present, this method is called to perform additional polling,
e.g. on parameters that can be changed from outside the NICOS
system. The *n* parameter can be used to perform the polling less
frequently than the polling of value and status.
If doPoll returns a (status, value) tuple, they are used instead of
calling doStatus and doRead again.
.. automethod:: _pollParam
"""
if self._sim_intercept or self._cache is None:
return (self.status(), self.read())
ret = None
if hasattr(self, 'doPoll'):
try:
ret = self.doPoll(n, maxage)
except Exception:
self.log.warning('error in doPoll', exc=1)
if ret is not None and ret[0] is not None:
ct = currenttime()
self._cache.put(self, 'status', ret[0], ct, self.maxage)
self._cache.put(self, 'value', ret[1], ct, self.maxage)
return ret[0], ret[1]
# updates shall always get through to the cache
# self._cache.invalidate(self, 'value')
# self._cache.invalidate(self, 'status')
return self.status(maxage), self.read(maxage)
@usermethod
def reset(self):
"""Reset the device hardware. Returns the new status afterwards.
This operation is forbidden in slave mode, and a no-op for hardware
devices in simulation mode.
.. method:: doReset()
This method is called if implemented. Otherwise, this is a no-op.
"""
if self._mode == SLAVE:
raise ModeError('reset not possible in slave mode')
elif self._sim_intercept:
return status.OK, ''
if isinstance(self, HasTimeout):
self._setROParam('_timesout', None)
# reset should not trigger timeoutAction()
self._timeoutActionCalled = True
if hasattr(self, 'doReset'):
self.doReset()
# make sure, status is propagated to the cache after a reset
if self._cache:
self._cache.invalidate(self, 'status')
return self.status(0)
def format(self, value, unit=False):
"""Format a value from :meth:`read` into a human-readable string.
The device unit is not included unless *unit* is true.
This is done using Python string formatting (the ``%`` operator) with
the :attr:`fmtstr` parameter value as the format string.
"""
if isinstance(value, list):
value = tuple(value)
try:
ret = self.fmtstr % value
except (TypeError, ValueError):
ret = str(value)
if unit and self.unit:
return ret + ' ' + self.unit
return ret
def info(self):
"""Automatically add device main value and status."""
ret = []
try:
val = self.read()
ret.append(('value', val, self.format(val), self.unit, 'general'))
except Exception as err:
self._info_errcount += 1
# only display the message for the first 5 times and then
# every 20 measurements. always display if in debugmode
if self._info_errcount <= 5 or self._info_errcount % 20 == 0:
self.log.warning('error reading', exc=err)
else:
self.log.debug('error reading', exc=err)
ret.append(('value', None, 'Error: %s' % err, '', 'general'))
else:
self._info_errcount = 0
try:
st = self.status()
except Exception | |
0:
src_ix = np.insert(src_ix, 0, src_ix[0] - 1)
# check right edge
intervals = np.append(intervals, dt_trg[i + 1] - dt_src[src_ix[-1]])
if src_ix[-1] > (len(dt_src) - 1):
src_ix = np.append(src_ix, src_ix[-1] + 1)
# convert to seconds
intervals = np.array([_tdelta2seconds(interval)
for interval in intervals])
# compute weights
weights = intervals / width
# compute weighted mean
trg[i] = np.dot(np.transpose(src[src_ix]), weights)
return trg
@deprecated(deprecated_in="0.11.3", removed_in="1.0.0",
current_version=short_version)
def average_over_time_windows(src, dt_src, dt_trg, maxdist=3600,
helper_interval=300, **ipargs):
"""UNDER DEVELOPMENT: Computes the average of a time series over given
time windows.
This function computes the average values of an irregular time series
``src`` within given time windows ``dt_trg``. The datetimes of the original
time series are given by ``dt_src``. The idea of this function is to create
regular helper timesteps at an interval length given by
``helper_interval``. The values of ``src`` are then interpolated to these
helper time steps, and the resulting helper values are finally averaged
over the given target time windows.
Parameters
----------
src : :class:`numpy:numpy.ndarray`
Array of shape (..., original number of time steps,...)
This is the time series data which should be aggregated. The number
of time steps corresponds to the length of the time dimension.
dt_src : :class:`numpy:numpy.ndarray`
Array of datetime objects
Must be of length *original number of time steps + 1* because dt_src
defines the limits of the intervals corresponding to the time steps.
This means: dt_src[0] is the lower limit of time step 1, dt_src[1] is
the upper limit of time step 1 and the lower limit of time step 2 and
so on.
dt_trg : :class:`numpy:numpy.ndarray`
Array of datetime objects
Must be of length *number of output time steps + 1* analogously to
dt_src. This means: dt_trg[0] is the lower limit of output time step 1,
dt_trg[1] is the upper limit of output time step 1 and the lower limit
of output time step 2 and so on.
# todo: add maxdist, helper_interval, **ipargs
Returns
-------
output : :class:`numpy:numpy.ndarray`
Array of shape (..., len(dt_trg) - 1, ...)
The length of the time dimension of the output array depends on the
array *dt_trg* which defines the limits of the output time step
intervals.
Examples
--------
>>> # TODO: put an example here for `average_over_time_windows`
"""
# Convert input time steps to numpy arrays
dt_src, dt_trg = np.array(dt_src), np.array(dt_trg)
trg_secs = np.array([mktime(tstep.timetuple()) for tstep in dt_trg])
src_secs = np.array([mktime(tstep.timetuple()) for tstep in dt_src])
helper_secs = np.arange(trg_secs[0], trg_secs[-1], helper_interval)
# Interpolate to target points
f = interpolate.interp1d(src_secs, src, axis=0, bounds_error=False)
helpers = f(helper_secs)
# Mask those values as invalid which are more than maxdist from the next
# source point
tree = cKDTree(src_secs.reshape((-1, 1)))
dists, ix = tree.query(helper_secs.reshape((-1, 1)), k=1)
# deal with edges (in case of extrapolation, we apply nearest neighbour)
np.where(np.isnan(helpers), src[ix], helpers)
# mask out points which are to far from the next source point
helpers[np.where(dists > maxdist)[0]] = np.nan
# Create a new container for the target data
trg_shape = list(src.shape)
trg_shape[0] = len(dt_trg) - 1
trg = np.repeat(np.nan, _shape2size(trg_shape)).reshape(trg_shape)
for i in range(len(dt_trg) - 1):
# width of window
# width = float(_tdelta2seconds(dt_trg[i + 1] - dt_trg[i]))
# These are the intervals completely INSIDE the target time window
helper_ix = np.where(np.logical_and(dt_src >= dt_trg[i],
dt_src <= dt_trg[i + 1]))[0]
trg[i] = np.mean(helpers[helper_ix], axis=0)
return trg
@deprecated(deprecated_in="0.11.3", removed_in="1.0.0",
current_version=short_version)
def _get_func(funcname):
"""
Retrieve the numpy function with name <funcname>
Parameters
----------
funcname : string
"""
try:
func = getattr(np, funcname)
except AttributeError:
raise AttributeError('<' + funcname +
'> is not a valid function in numpy...')
return func
def _shape2size(shape):
"""
Compute the size which corresponds to a shape
"""
out = 1
for item in shape:
out *= item
return out
def from_to(tstart, tend, tdelta):
"""Return a list of timesteps from <tstart> to <tend> of length <tdelta>
Parameters
----------
tstart : datetime isostring (%Y%m%d %H:%M:%S), e.g. 2000-01-01 15:34:12
or datetime object
tend : datetime isostring (%Y%m%d %H:%M:%S), e.g. 2000-01-01 15:34:12
or datetime object
tdelta : integer representing time interval in SECONDS
Returns
-------
output : list of datetime.datetime objects
"""
if not type(tstart) == dt.datetime:
tstart = dt.datetime.strptime(tstart, "%Y-%m-%d %H:%M:%S")
if not type(tend) == dt.datetime:
tend = dt.datetime.strptime(tend, "%Y-%m-%d %H:%M:%S")
tdelta = dt.timedelta(seconds=tdelta)
tsteps = [tstart, ]
tmptime = tstart
while True:
tmptime = tmptime + tdelta
if tmptime > tend:
break
else:
tsteps.append(tmptime)
return tsteps
@deprecated(deprecated_in="0.11.3", removed_in="1.0.0",
current_version=short_version)
def _tdelta2seconds(tdelta):
"""
Convert a dt.timedelta object to seconds
Parameters
----------
tdelta : a dt.timedelta object
"""
return tdelta.days * 86400 + tdelta.seconds
@deprecated(deprecated_in="0.11.3", removed_in="1.0.0",
current_version=short_version)
def _get_tdelta(tstart, tend, as_secs=False):
"""Returns the difference between two datetimes
"""
if not isinstance(tstart, dt.datetime):
tstart = dt.datetime.strptime(tstart, "%Y-%m-%d %H:%M:%S")
if not isinstance(tend, dt.datetime):
tend = dt.datetime.strptime(tend, "%Y-%m-%d %H:%M:%S")
if not as_secs:
return tend - tstart
else:
return _tdelta2seconds(tend - tstart)
@deprecated(deprecated_in="0.11.3", removed_in="1.0.0",
current_version=short_version)
def iso2datetime(iso):
"""Converts an ISO formatted time string to a datetime object.
Parameters
----------
iso : string
time string
"""
# in case the argument has been parsed to datetime before
if type(iso) == dt.datetime:
return iso
# sometimes isoformat separates date and time by a white space
iso = iso.replace(" ", "T")
try:
return dt.datetime.strptime(iso, "%Y-%m-%dT%H:%M:%S.%f")
except (ValueError, TypeError):
return dt.datetime.strptime(iso, "%Y-%m-%dT%H:%M:%S")
except Exception:
print("Could not convert argument <%r> to datetime. "
"Probably not an isostring. See following traceback:" % iso)
raise
@deprecated(deprecated_in="0.11.3", removed_in="1.0.0",
current_version=short_version)
def timestamp2index(ts, delta, refts, **kwargs):
"""Calculates the array index for a certain time in an equidistant
time-series given the reference time (where the index would be 0)
and the time discretization.
If any of the input parameters contains timezone information, all others
also need to contain timezone information.
Parameters
----------
ts : str or datetime-object
The timestamp to determine the index for.
If it is a string, it will be converted to datetime using the
function iso2datetime
delta : str or timedelta object
The discretization of the time series (the amount of time that
elapsed between indices)
If used as a string, it needs to be given in the format
"keyword1=value1,keyword2=value2". Keywords must be understood
by the timedelta constructor (like days, hours,
minutes, seconds) and the values may only be integers.
refts : str or datetime-object
The timestamp to determine the index for
If it is a string, it will be converted to datetime using the
function iso2datetime.
Returns
-------
index : integer
The index of a discrete time series array of the given parameters.
Example
-------
>>> import datetime as dt
>>> timestr1, timestr2 = '2008-06-01T00:00:00', '2007-01-01T00:00:00'
>>> timestamp2index(timestr1, 'minutes=5', timestr2)
148896
>>> timestamp2index(timestr1, 'hours=1,minutes=5',timestr2)
11453
>>> timestamp2index(timestr1, dt.timedelta(hours=1, minutes=5), timestr2)
11453
"""
if not isinstance(ts, dt.datetime):
_ts = iso2datetime(ts)
else:
_ts = ts
if not isinstance(refts, dt.datetime):
_refts = iso2datetime(refts)
else:
_refts = refts
if not isinstance(delta, dt.timedelta):
kwargs = dict([(sp[0], int(sp[1]))
for sp in [item.split('=')
for item in delta.split(',')]])
_dt = dt.timedelta(**kwargs)
else:
_dt = delta
return int(_tdelta2seconds(_ts - _refts) / _tdelta2seconds(_dt))
def _idvalid(data, isinvalid=None, minval=None, maxval=None):
"""Identifies valid entries in an array and returns the corresponding
indices
Invalid values are NaN and Inf. Other invalid values can be passed using
the isinvalid keyword argument.
Parameters
----------
data : :class:`numpy:numpy.ndarray` of floats
isinvalid : list of what is considered an invalid value
"""
if isinvalid is None:
isinvalid = [-99., 99, -9999., -9999]
ix = np.ma.masked_invalid(data).mask
for el in isinvalid:
ix = np.logical_or(ix, np.ma.masked_where(data == el, data).mask)
if minval is not None:
ix = np.logical_or(ix, np.ma.masked_less(data, minval).mask)
if maxval is not None:
ix = np.logical_or(ix, np.ma.masked_greater(data, maxval).mask)
return np.where(np.logical_not(ix))[0]
def meshgridN(*arrs):
"""N-dimensional meshgrid
Just pass sequences of coordinates arrays
"""
arrs = tuple(arrs)
lens = list(map(len, arrs))
dim = len(arrs)
sz = 1
for s in lens:
sz *= s
ans = []
for i, arr in enumerate(arrs):
slc = [1] * dim
slc[i] = lens[i]
arr2 = np.asarray(arr).reshape(slc)
for j, sz in enumerate(lens):
if j != i:
arr2 = arr2.repeat(sz, axis=j)
ans.append(arr2)
# return tuple(ans[::-1])
return tuple(ans)
def gridaspoints(*arrs):
"""Creates an N-dimensional grid form arrs and | |
"""
/******************************************************************************
* $Id$
*
* Project: libLAS - http://liblas.org - A BSD library for LAS format data.
* Purpose: Python ctypes function calls
* Author: <NAME>, <EMAIL>
*
******************************************************************************
* Copyright (c) 2009, <NAME>
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following
* conditions are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided
* with the distribution.
* * Neither the name of the Martin Isenburg or Iowa Department
* of Natural Resources nor the names of its contributors may be
* used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
* OF SUCH DAMAGE.
****************************************************************************/
"""
import atexit
import os
import re
import sys
import ctypes
from ctypes.util import find_library
if not 'pypy' in sys.executable:
from ctypes import PyDLL
class LASException(Exception):
"libLAS exception, indicates a libLAS-related error."
pass
def check_return(result, func, cargs):
"Error checking for LASError calls"
if result != 0:
msg = 'LASError in "%s": %s' % (func.__name__,
las.LASError_GetLastErrorMsg())
las.LASError_Reset()
raise LASException(msg)
return True
def check_void(result, func, cargs):
"Error checking for void* returns"
if not bool(result):
msg = 'LASError in "%s": %s' % (func.__name__,
las.LASError_GetLastErrorMsg())
las.LASError_Reset()
raise LASException(msg)
return result
def check_void_done(result, func, cargs):
"Error checking for void* returns that might be empty with no error"
if las.LASError_GetErrorCount():
msg = 'LASError in "%s": %s' % (func.__name__,
las.LASError_GetLastErrorMsg())
las.LASError_Reset()
raise LASException(msg)
return result
def check_value(result, func, cargs):
"Error checking proper value returns"
count = las.LASError_GetErrorCount()
if count != 0:
msg = 'LASError in "%s": %s' % (func.__name__,
las.LASError_GetLastErrorMsg())
las.LASError_Reset()
raise LASException(msg)
return result
def check_value_free(result, func, cargs):
"Error checking proper value returns"
count = las.LASError_GetErrorCount()
if count != 0:
msg = 'LASError in "%s": %s' % (func.__name__,
las.LASError_GetLastErrorMsg())
las.LASError_Reset()
raise LASException(msg)
retval = ctypes.string_at(result)[:]
return retval
def free_returned_char_p(result, func, cargs):
size = ctypes.c_int()
retvalue = ctypes.string_at(result)
pdata = ctypes.cast(result, ctypes.POINTER(ctypes.c_char_p))
las.LASString_Free(pdata)
return retvalue
try:
from numpy import array, ndarray
HAS_NUMPY = True
except ImportError:
HAS_NUMPY = False
if os.name == 'nt':
# stolen from Shapely
# http://trac.gispython.org/projects/PCL/browser/Shapely/trunk/shapely/geos.py
lib_name = 'liblas_c.dll'
try:
local_dlls = os.path.abspath(os.__file__ + "../../../DLLs")
original_path = os.environ['PATH']
os.environ['PATH'] = "%s;%s" % (local_dlls, original_path)
las = ctypes.CDLL(lib_name)
def free(m):
try:
free = ctypes.cdll.msvcrt.free(m)
except WindowsError:
pass
except (ImportError, WindowsError):
raise
elif os.name == 'posix':
platform = os.uname()[0]
if platform == 'Darwin':
lib_name = 'liblas_c.dylib'
free = ctypes.CDLL(find_library('libc')).free
else:
lib_name = 'liblas_c.so.3'
lib_name_alt = 'liblas_c.so.2'
free = ctypes.CDLL(find_library('c')).free
try:
las = ctypes.CDLL(lib_name)
except:
las = ctypes.CDLL(lib_name_alt)
else:
raise LASException('Unsupported OS "%s"' % os.name)
def get_version():
return las.LAS_GetVersion()
version = get_version()
las.LAS_IsGDALEnabled.restype = ctypes.c_int
las.LAS_IsLibGeoTIFFEnabled.restype = ctypes.c_int
las.LAS_GetVersion.restype = ctypes.POINTER(ctypes.c_char)
las.LAS_GetVersion.errcheck = free_returned_char_p
las.LASError_GetLastErrorNum.restype = ctypes.c_int
las.LASError_GetLastErrorMsg.restype = ctypes.POINTER(ctypes.c_char)
las.LASError_GetLastErrorMsg.errcheck = free_returned_char_p
las.LASError_GetLastErrorMethod.restype = ctypes.POINTER(ctypes.c_char)
las.LASError_GetLastErrorMethod.errcheck = free_returned_char_p
las.LASError_GetErrorCount.restype = ctypes.c_int
las.LASReader_Create.argtypes = [ctypes.c_char_p]
las.LASReader_Create.restype = ctypes.c_void_p
las.LASReader_Create.errcheck = check_void
las.LASReader_CreateWithHeader.argtypes = [ctypes.c_char_p, ctypes.c_void_p]
las.LASReader_CreateWithHeader.restype = ctypes.c_void_p
las.LASReader_CreateWithHeader.errcheck = check_void
las.LASReader_GetNextPoint.restype = ctypes.c_void_p
las.LASReader_GetNextPoint.argtypes = [ctypes.c_void_p]
las.LASReader_GetNextPoint.errcheck = check_void_done
las.LASReader_GetPointAt.restype = ctypes.c_void_p
las.LASReader_GetPointAt.argtypes = [ctypes.c_void_p, ctypes.c_ulong]
las.LASReader_GetPointAt.errcheck = check_void_done
las.LASReader_Seek.argtypes = [ctypes.c_void_p, ctypes.c_ulong]
las.LASReader_Seek.errcheck = check_return
las.LASReader_SetSRS.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
las.LASReader_SetSRS.errcheck = check_return
las.LASReader_SetInputSRS.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
las.LASReader_SetInputSRS.errcheck = check_return
las.LASReader_SetOutputSRS.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
las.LASReader_SetOutputSRS.errcheck = check_return
las.LASReader_GetSummaryXML.restype = ctypes.POINTER(ctypes.c_char)
las.LASReader_GetSummaryXML.argtypes = [ctypes.c_void_p]
las.LASReader_GetSummaryXML.errcheck = free_returned_char_p
las.LASReader_Destroy.argtypes = [ctypes.c_void_p]
las.LASReader_Destroy.errcheck = check_void_done
las.LASReader_Destroy.restype = None
las.LASPoint_GetX.restype = ctypes.c_double
las.LASPoint_GetX.argtypes = [ctypes.c_void_p]
las.LASPoint_GetX.errcheck = check_value
las.LASPoint_SetX.restype = ctypes.c_int
las.LASPoint_SetX.argtypes = [ctypes.c_void_p, ctypes.c_double]
las.LASPoint_SetX.errcheck = check_return
las.LASPoint_GetRawX.restype = ctypes.c_long
las.LASPoint_GetRawX.argtypes = [ctypes.c_void_p]
las.LASPoint_GetRawX.errcheck = check_value
las.LASPoint_SetRawX.restype = ctypes.c_int
las.LASPoint_SetRawX.argtypes = [ctypes.c_void_p, ctypes.c_long]
las.LASPoint_SetRawX.errcheck = check_return
las.LASPoint_GetY.restype = ctypes.c_double
las.LASPoint_GetY.argtypes = [ctypes.c_void_p]
las.LASPoint_GetY.errcheck = check_value
las.LASPoint_SetY.restype = ctypes.c_int
las.LASPoint_SetY.argtypes = [ctypes.c_void_p, ctypes.c_double]
las.LASPoint_SetY.errcheck = check_return
las.LASPoint_GetRawY.restype = ctypes.c_long
las.LASPoint_GetRawY.argtypes = [ctypes.c_void_p]
las.LASPoint_GetRawY.errcheck = check_value
las.LASPoint_SetRawY.restype = ctypes.c_int
las.LASPoint_SetRawY.argtypes = [ctypes.c_void_p, ctypes.c_long]
las.LASPoint_SetRawY.errcheck = check_return
las.LASPoint_GetZ.restype = ctypes.c_double
las.LASPoint_GetZ.argtypes = [ctypes.c_void_p]
las.LASPoint_GetZ.errcheck = check_value
las.LASPoint_SetZ.restype = ctypes.c_int
las.LASPoint_SetZ.argtypes = [ctypes.c_void_p, ctypes.c_double]
las.LASPoint_SetZ.errcheck = check_return
las.LASPoint_GetRawZ.restype = ctypes.c_long
las.LASPoint_GetRawZ.argtypes = [ctypes.c_void_p]
las.LASPoint_GetRawZ.errcheck = check_value
las.LASPoint_SetRawZ.restype = ctypes.c_int
las.LASPoint_SetRawZ.argtypes = [ctypes.c_void_p, ctypes.c_long]
las.LASPoint_SetRawZ.errcheck = check_return
las.LASPoint_GetIntensity.restype = ctypes.c_short
las.LASPoint_GetIntensity.argtypes = [ctypes.c_void_p]
las.LASPoint_GetIntensity.errcheck = check_value
las.LASPoint_SetIntensity.restype = ctypes.c_int
las.LASPoint_SetIntensity.argtypes = [ctypes.c_void_p, ctypes.c_short]
las.LASPoint_SetIntensity.errcheck = check_return
las.LASPoint_GetReturnNumber.restype = ctypes.c_ushort
las.LASPoint_GetReturnNumber.argtypes = [ctypes.c_void_p]
las.LASPoint_GetReturnNumber.errcheck = check_value
las.LASPoint_SetReturnNumber.restype = ctypes.c_int
las.LASPoint_SetReturnNumber.argtypes = [ctypes.c_void_p, ctypes.c_ushort]
las.LASPoint_SetReturnNumber.errcheck = check_return
las.LASPoint_GetNumberOfReturns.restype = ctypes.c_ushort
las.LASPoint_GetNumberOfReturns.argtypes = [ctypes.c_void_p]
las.LASPoint_GetNumberOfReturns.errcheck = check_value
las.LASPoint_SetNumberOfReturns.restype = ctypes.c_int
las.LASPoint_SetNumberOfReturns.argtypes = [ctypes.c_void_p, ctypes.c_ushort]
las.LASPoint_SetNumberOfReturns.errcheck = check_return
las.LASPoint_GetScanDirection.restype = ctypes.c_ushort
las.LASPoint_GetScanDirection.argtypes = [ctypes.c_void_p]
las.LASPoint_GetScanDirection.errcheck = check_value
las.LASPoint_SetScanDirection.restype = ctypes.c_int
las.LASPoint_SetScanDirection.argtypes = [ctypes.c_void_p, ctypes.c_ushort]
las.LASPoint_SetScanDirection.errcheck = check_return
las.LASPoint_GetFlightLineEdge.restype = ctypes.c_ushort
las.LASPoint_GetFlightLineEdge.argtypes = [ctypes.c_void_p]
las.LASPoint_GetFlightLineEdge.errcheck = check_value
las.LASPoint_SetFlightLineEdge.restype = ctypes.c_int
las.LASPoint_SetFlightLineEdge.argtypes = [ctypes.c_void_p, ctypes.c_ushort]
las.LASPoint_SetFlightLineEdge.errcheck = check_return
las.LASPoint_GetScanFlags.restype = ctypes.c_ubyte
las.LASPoint_GetScanFlags.argtypes = [ctypes.c_void_p]
las.LASPoint_GetScanFlags.errcheck = check_value
las.LASPoint_SetScanFlags.restype = ctypes.c_int
las.LASPoint_SetScanFlags.argtypes = [ctypes.c_void_p, ctypes.c_ubyte]
las.LASPoint_SetScanFlags.errcheck = check_return
las.LASPoint_GetClassification.restype = ctypes.c_ubyte
las.LASPoint_GetClassification.argtypes = [ctypes.c_void_p]
las.LASPoint_GetClassification.errcheck = check_value
las.LASPoint_SetClassification.restype = ctypes.c_int
las.LASPoint_SetClassification.argtypes = [ctypes.c_void_p, ctypes.c_ubyte]
las.LASPoint_SetClassification.errcheck = check_return
las.LASPoint_GetTime.restype = ctypes.c_double
las.LASPoint_GetTime.argtypes = [ctypes.c_void_p]
las.LASPoint_GetTime.errcheck = check_value
las.LASPoint_SetTime.restype = ctypes.c_int
las.LASPoint_SetTime.argtypes = [ctypes.c_void_p, ctypes.c_double]
las.LASPoint_SetTime.errcheck = check_return
las.LASPoint_GetScanAngleRank.restype = ctypes.c_int8
las.LASPoint_GetScanAngleRank.argtypes = [ctypes.c_void_p]
las.LASPoint_GetScanAngleRank.errcheck = check_value
las.LASPoint_SetScanAngleRank.restype = ctypes.c_int
las.LASPoint_SetScanAngleRank.argtypes = [ctypes.c_void_p, ctypes.c_int8]
las.LASPoint_SetScanAngleRank.errcheck = check_return
las.LASPoint_GetUserData.restype = ctypes.c_ubyte
las.LASPoint_GetUserData.argtypes = [ctypes.c_void_p]
las.LASPoint_GetUserData.errcheck = check_value
las.LASPoint_SetUserData.restype = ctypes.c_int
las.LASPoint_SetUserData.argtypes = [ctypes.c_void_p, ctypes.c_ubyte]
las.LASPoint_SetUserData.errcheck = check_return
las.LASPoint_SetPointSourceId.restype = ctypes.c_uint16
las.LASPoint_SetPointSourceId.argtypes = [ctypes.c_void_p]
las.LASPoint_SetPointSourceId.errcheck = check_value
las.LASPoint_GetPointSourceId.restype = ctypes.c_uint16
las.LASPoint_GetPointSourceId.argtypes = [ctypes.c_void_p]
las.LASPoint_GetPointSourceId.errcheck = check_value
las.LASPoint_GetXML.restype = ctypes.POINTER(ctypes.c_char)
las.LASPoint_GetXML.argtypes = [ctypes.c_void_p]
las.LASPoint_GetXML.errcheck = free_returned_char_p
las.LASPoint_GetData.argtypes = [ctypes.c_void_p,
ctypes.POINTER(ctypes.c_ubyte)]
las.LASPoint_GetData.errcheck = check_value
las.LASPoint_GetData.restype = ctypes.c_int
las.LASPoint_SetData.argtypes = [ctypes.c_void_p,
ctypes.POINTER(ctypes.c_ubyte)]
las.LASPoint_SetData.errcheck = check_value
las.LASPoint_SetData.restype = ctypes.c_int
las.LASPoint_Create.restype = ctypes.c_void_p
las.LASPoint_Create.errcheck = check_void
las.LASPoint_Copy.restype = ctypes.c_void_p
las.LASPoint_Copy.argtypes = [ctypes.c_void_p]
las.LASPoint_Copy.errcheck = check_void
las.LASReader_GetHeader.restype = ctypes.c_void_p
las.LASReader_GetHeader.argtypes = [ctypes.c_void_p]
las.LASReader_GetHeader.errcheck = check_void
las.LASHeader_Destroy.argtypes = [ctypes.c_void_p]
las.LASHeader_Destroy.errcheck = check_void_done
las.LASHeader_Destroy.restype = None
las.LASHeader_Copy.restype = ctypes.c_void_p
las.LASHeader_Copy.argtypes = [ctypes.c_void_p]
las.LASHeader_Copy.errcheck = check_void
las.LASHeader_Create.restype = ctypes.c_void_p
las.LASHeader_Create.errcheck = check_void
las.LASHeader_GetFileSignature.argtypes = [ctypes.c_void_p]
las.LASHeader_GetFileSignature.errcheck = check_value_free
las.LASHeader_GetFileSignature.restype = ctypes.c_char_p
las.LASHeader_GetFileSourceId.restype = ctypes.c_ushort
las.LASHeader_GetFileSourceId.argtypes = [ctypes.c_void_p]
las.LASHeader_GetFileSourceId.errcheck = check_value
las.LASHeader_SetFileSourceId.restype = ctypes.c_short
las.LASHeader_SetFileSourceId.argtypes = [ctypes.c_void_p]
las.LASHeader_SetFileSourceId.errcheck = check_value
las.LASHeader_SetReserved.restype = ctypes.c_short
las.LASHeader_SetReserved.argtypes = [ctypes.c_void_p]
las.LASHeader_SetReserved.errcheck = check_value
las.LASHeader_GetReserved.restype = ctypes.c_short
las.LASHeader_GetReserved.argtypes = [ctypes.c_void_p]
las.LASHeader_GetReserved.errcheck = check_value
las.LASHeader_GetProjectId.argtypes = [ctypes.c_void_p]
las.LASHeader_GetProjectId.errcheck = check_value_free
las.LASHeader_GetProjectId.restype = ctypes.c_char_p
las.LASHeader_GetVersionMajor.restype = ctypes.c_ubyte
las.LASHeader_GetVersionMajor.argtypes = [ctypes.c_void_p]
las.LASHeader_GetVersionMajor.errcheck = check_value
las.LASHeader_SetVersionMajor.restype = ctypes.c_int
las.LASHeader_SetVersionMajor.argtypes = [ctypes.c_void_p, ctypes.c_ubyte]
las.LASHeader_SetVersionMajor.errcheck = check_return
las.LASHeader_GetVersionMinor.restype = ctypes.c_ubyte
las.LASHeader_GetVersionMinor.argtypes = [ctypes.c_void_p]
las.LASHeader_GetVersionMinor.errcheck = check_value
las.LASHeader_SetVersionMinor.restype = ctypes.c_int
las.LASHeader_SetVersionMinor.argtypes = [ctypes.c_void_p, ctypes.c_ubyte]
las.LASHeader_SetVersionMinor.errcheck = check_return
las.LASHeader_GetSystemId.argtypes = [ctypes.c_void_p]
las.LASHeader_GetSystemId.errcheck = check_value_free
las.LASHeader_GetSystemId.restype = ctypes.c_char_p
las.LASHeader_SetSystemId.restype = ctypes.c_int
las.LASHeader_SetSystemId.argtypes = [ctypes.c_void_p, ctypes.c_char_p]
las.LASHeader_SetSystemId.errcheck = check_return
las.LASHeader_GetSoftwareId.argtypes = [ctypes.c_void_p]
las.LASHeader_GetSoftwareId.errcheck = check_value_free
las.LASHeader_GetSoftwareId.restype = ctypes.c_char_p
las.LASHeader_SetSoftwareId.restype = ctypes.c_int
las.LASHeader_SetSoftwareId.argtypes = [ctypes.c_void_p, ctypes.c_char_p]
las.LASHeader_SetSoftwareId.errcheck = check_return
las.LASHeader_GetCreationDOY.restype = ctypes.c_ushort
las.LASHeader_GetCreationDOY.argtypes = [ctypes.c_void_p]
las.LASHeader_GetCreationDOY.errcheck = check_value
las.LASHeader_SetCreationDOY.restype = ctypes.c_int
las.LASHeader_SetCreationDOY.argtypes = [ctypes.c_void_p, ctypes.c_ushort]
las.LASHeader_SetCreationDOY.errcheck = check_return
las.LASHeader_GetCreationYear.restype = ctypes.c_ushort
las.LASHeader_GetCreationYear.argtypes = [ctypes.c_void_p]
las.LASHeader_GetCreationYear.errcheck = check_value
las.LASHeader_SetCreationYear.restype = ctypes.c_int
las.LASHeader_SetCreationYear.argtypes = [ctypes.c_void_p, ctypes.c_ushort]
las.LASHeader_SetCreationYear.errcheck = check_return
las.LASHeader_GetHeaderSize.restype = ctypes.c_ushort
las.LASHeader_GetHeaderSize.argtypes = [ctypes.c_void_p]
las.LASHeader_GetHeaderSize.errcheck = check_value
las.LASHeader_GetDataOffset.restype = ctypes.c_ulong
las.LASHeader_GetDataOffset.argtypes = [ctypes.c_void_p]
las.LASHeader_GetDataOffset.errcheck = check_value
las.LASHeader_SetDataOffset.restype = ctypes.c_int
las.LASHeader_SetDataOffset.argtypes = [ctypes.c_void_p, ctypes.c_int]
las.LASHeader_SetDataOffset.errcheck = check_return
las.LASHeader_GetHeaderPadding.restype = ctypes.c_ulong
las.LASHeader_GetHeaderPadding.argtypes = [ctypes.c_void_p]
las.LASHeader_GetHeaderPadding.errcheck = check_value
las.LASHeader_SetHeaderPadding.restype = ctypes.c_int
las.LASHeader_SetHeaderPadding.argtypes = [ctypes.c_void_p, ctypes.c_int]
las.LASHeader_SetHeaderPadding.errcheck = check_return
las.LASHeader_GetRecordsCount.restype = ctypes.c_ulong
las.LASHeader_GetRecordsCount.argtypes = [ctypes.c_void_p]
las.LASHeader_GetRecordsCount.errcheck = check_value
las.LASHeader_GetDataFormatId.restype = ctypes.c_ubyte
las.LASHeader_GetDataFormatId.argtypes = [ctypes.c_void_p]
las.LASHeader_GetDataFormatId.errcheck = check_value
las.LASHeader_SetDataFormatId.restype = ctypes.c_int
las.LASHeader_SetDataFormatId.argtypes = [ctypes.c_void_p, ctypes.c_int]
las.LASHeader_SetDataFormatId.errcheck = check_return
las.LASHeader_GetDataRecordLength.restype = ctypes.c_ushort
las.LASHeader_GetDataRecordLength.argtypes = [ctypes.c_void_p]
las.LASHeader_GetDataRecordLength.errcheck = check_value
las.LASHeader_GetPointRecordsCount.restype = ctypes.c_ulong
las.LASHeader_GetPointRecordsCount.argtypes = [ctypes.c_void_p]
las.LASHeader_GetPointRecordsCount.errcheck = check_value
las.LASHeader_SetPointRecordsCount.restype = ctypes.c_int
las.LASHeader_SetPointRecordsCount.argtypes = [ctypes.c_void_p, ctypes.c_ulong]
las.LASHeader_SetPointRecordsCount.errcheck = check_return
las.LASHeader_GetPointRecordsByReturnCount.restype = ctypes.c_ulong
las.LASHeader_GetPointRecordsByReturnCount.argtypes = [ctypes.c_void_p,
ctypes.c_int]
las.LASHeader_GetPointRecordsByReturnCount.errcheck = check_value
las.LASHeader_SetPointRecordsByReturnCount.restype = ctypes.c_int
las.LASHeader_SetPointRecordsByReturnCount.argtypes = [ctypes.c_void_p,
ctypes.c_int,
ctypes.c_ulong]
las.LASHeader_SetPointRecordsByReturnCount.errcheck = check_return
las.LASHeader_GetScaleX.restype = ctypes.c_double
las.LASHeader_GetScaleX.argtypes = [ctypes.c_void_p]
las.LASHeader_GetScaleX.errcheck = check_value
las.LASHeader_GetScaleY.restype = ctypes.c_double
las.LASHeader_GetScaleY.argtypes = [ctypes.c_void_p]
las.LASHeader_GetScaleY.errcheck = check_value
las.LASHeader_GetScaleZ.restype = ctypes.c_double
las.LASHeader_GetScaleZ.argtypes = [ctypes.c_void_p]
las.LASHeader_GetScaleZ.errcheck = check_value
las.LASHeader_SetScale.restype = ctypes.c_int
las.LASHeader_SetScale.argtypes = [ctypes.c_void_p,
ctypes.c_double,
ctypes.c_double,
ctypes.c_double]
las.LASHeader_SetScale.errcheck = check_return
las.LASHeader_GetOffsetX.restype = ctypes.c_double
las.LASHeader_GetOffsetX.argtypes = [ctypes.c_void_p]
las.LASHeader_GetOffsetX.errcheck = check_value
las.LASHeader_GetOffsetY.restype = ctypes.c_double
las.LASHeader_GetOffsetY.argtypes = [ctypes.c_void_p]
las.LASHeader_GetOffsetY.errcheck = check_value
las.LASHeader_GetOffsetZ.restype = ctypes.c_double
las.LASHeader_GetOffsetZ.argtypes = [ctypes.c_void_p]
las.LASHeader_GetOffsetZ.errcheck = check_value
las.LASHeader_SetOffset.restype = ctypes.c_int
las.LASHeader_SetOffset.argtypes = [ctypes.c_void_p,
ctypes.c_double,
ctypes.c_double,
ctypes.c_double]
las.LASHeader_SetOffset.errcheck = check_return
las.LASHeader_GetMinX.restype = ctypes.c_double
las.LASHeader_GetMinX.argtypes = [ctypes.c_void_p]
las.LASHeader_GetMinX.errcheck = check_value
las.LASHeader_GetMinY.restype = ctypes.c_double
las.LASHeader_GetMinY.argtypes = [ctypes.c_void_p]
las.LASHeader_GetMinY.errcheck = check_value
las.LASHeader_GetMinZ.restype = ctypes.c_double
las.LASHeader_GetMinZ.argtypes = [ctypes.c_void_p]
las.LASHeader_GetMinZ.errcheck = check_value
las.LASHeader_SetMin.restype = ctypes.c_int
las.LASHeader_SetMin.argtypes = [ctypes.c_void_p,
ctypes.c_double,
ctypes.c_double,
ctypes.c_double]
las.LASHeader_SetMin.errcheck = check_return
las.LASHeader_GetMaxX.restype = ctypes.c_double
las.LASHeader_GetMaxX.argtypes = [ctypes.c_void_p]
las.LASHeader_GetMaxX.errcheck = check_value
las.LASHeader_GetMaxY.restype = ctypes.c_double
las.LASHeader_GetMaxY.argtypes = [ctypes.c_void_p]
las.LASHeader_GetMaxY.errcheck = check_value
las.LASHeader_GetMaxZ.restype = ctypes.c_double
las.LASHeader_GetMaxZ.argtypes = [ctypes.c_void_p]
las.LASHeader_GetMaxZ.errcheck = check_value
las.LASHeader_SetMax.restype = ctypes.c_int
las.LASHeader_SetMax.argtypes = [ctypes.c_void_p,
ctypes.c_double,
ctypes.c_double,
ctypes.c_double]
las.LASHeader_SetMax.errcheck = check_return
las.LASHeader_GetVLR.argtypes = [ctypes.c_void_p, ctypes.c_int]
las.LASHeader_GetVLR.errcheck = check_void
las.LASHeader_GetVLR.restype = ctypes.c_void_p
las.LASHeader_DeleteVLR.argtypes = [ctypes.c_void_p, ctypes.c_int]
las.LASHeader_DeleteVLR.errcheck = check_return
las.LASHeader_AddVLR.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
las.LASHeader_AddVLR.errcheck = check_return
las.LASHeader_AddVLR.restype = ctypes.c_int
las.LASHeader_GetXML.restype = ctypes.POINTER(ctypes.c_char)
las.LASHeader_GetXML.argtypes = [ctypes.c_void_p]
las.LASHeader_GetXML.errcheck = free_returned_char_p
las.LASWriter_Create.restype = ctypes.c_void_p
las.LASWriter_Create.argtypes = [ctypes.c_char_p,
ctypes.c_void_p,
ctypes.c_int]
las.LASWriter_Create.errcheck = check_void
las.LASWriter_WritePoint.restype = ctypes.c_int
las.LASWriter_WritePoint.argtypes = [ctypes.c_void_p,
ctypes.c_void_p]
las.LASWriter_WritePoint.errcheck = check_return
las.LASWriter_WriteHeader.restype = ctypes.c_int
las.LASWriter_WriteHeader.argtypes = [ctypes.c_void_p,
ctypes.c_void_p]
las.LASWriter_WriteHeader.errcheck = check_return
las.LASWriter_SetSRS.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
las.LASWriter_SetSRS.errcheck = | |
<filename>deep_polygoggles.py
# coding: utf-8
# # Porting Tensorflow tutorial "Deep MNIST for Experts" to polygoggles
#
# based on https://www.tensorflow.org/versions/r0.7/tutorials/mnist/pros/index.html
# In[21]:
import math
import tensorflow as tf
import datasets
import make_polygon_pngs
def get_data_sets_and_params(use_MNIST_instead_of_our_data=False):
if use_MNIST_instead_of_our_data:
params = dict(
width = 28,
height = 28,
num_training_steps = 20000,
batch_size = 50,
)
else:
params = dict(
width = 70,
height = 70,
num_training_steps = 1000,
batch_size = 50,
training_images = 5000,
test_images = 1000,
allow_rotation = True,
)
if use_MNIST_instead_of_our_data:
from tensorflow.examples.tutorials.mnist import input_data
data_sets = input_data.read_data_sets('MNIST_data', one_hot=True)
else:
collection_dir = make_polygon_pngs.make_collection(params['width'],
params['height'],
params['training_images'],
params['test_images'],
allow_rotation=params['allow_rotation'])
data_sets = datasets.read_data_sets(collection_dir)
return data_sets, params
def run_regression(data_sets):
sess = tf.InteractiveSession()
flat_size = width * height
num_labels = data_sets.train.labels.shape[1]
x = tf.placeholder(tf.float32, shape=[None, flat_size])
y_ = tf.placeholder(tf.float32, shape=[None, num_labels])
W = tf.Variable(tf.zeros([flat_size, num_labels]))
b = tf.Variable(tf.zeros([num_labels]))
sess.run(tf.initialize_all_variables())
# We can now implement our regression model. It only takes one line!
# We multiply the vectorized input images x by the weight matrix W, add the bias b,
# and compute the softmax probabilities that are assigned to each class.
y = tf.nn.softmax(tf.matmul(x,W) + b)
# The cost function to be minimized during training can be specified just as easily.
# Our cost function will be the cross-entropy between the target and the model's prediction.
cross_entropy = -tf.reduce_sum(y_*tf.log(y))
# Now that we have defined our model and training cost function, it is straightforward to train using TensorFlow.
# Because TensorFlow knows the entire computation graph, it can use automatic differentiation to find
# the gradients of the cost with respect to each of the variables.
# TensorFlow has a variety of builtin optimization algorithms.
# For this example, we will use steepest gradient descent, with a step length of 0.01, to descend the cross entropy.
train_step = tf.train.GradientDescentOptimizer(0.01).minimize(cross_entropy)
# What TensorFlow actually did in that single line was to add new operations to the computation graph.
# These operations included ones to compute gradients, compute parameter update steps, and apply update
# steps to the parameters.
#
# The returned operation train_step, when run, will apply the gradient descent updates to the parameters.
# Training the model can therefore be accomplished by repeatedly running train_step.
for i in range(1000):
batch = data_sets.train.next_batch(50)
train_step.run(feed_dict={x: batch[0], y_: batch[1]})
correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1))
# That gives us a list of booleans. To determine what fraction are correct, we cast to floating point
# numbers and then take the mean. For example, [True, False, True, True] would become [1,0,1,1] which would become 0.75.
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# Finally, we can evaluate our accuracy on the test data. (On MNIST this should be about 91% correct.)
accuracy = accuracy.eval(feed_dict={x: data_sets.test.images, y_: data_sets.test.labels})
print("Accuracy: %.5f" % accuracy)
return accuracy
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
def conv2d(x, W):
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
def max_pool_2x2(x):
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
def run_multilayer_convolutional_networkd():
"""
# # Build a Multilayer Convolutional Network
#
# Getting .91 accuracy on MNIST is bad. It's almost embarrassingly bad. In this section, we'll fix that, jumping from a very simple model to something moderately sophisticated: a small convolutional neural network. This will get us to around 99.2% accuracy -- not state of the art, but respectable.
#
# ## Weight Initialization
#
# To create this model, we're going to need to create a lot of weights and biases. One should generally initialize weights with a small amount of noise for symmetry breaking, and to prevent 0 gradients. Since we're using ReLU neurons, it is also good practice to initialize them with a slightly positive initial bias to avoid "dead neurons." Instead of doing this repeatedly while we build the model, let's create two handy functions to do it for us.
"""
# ## Convolution and Pooling
#
# TensorFlow also gives us a lot of flexibility in convolution and pooling operations.
# How do we handle the boundaries? What is our stride size? In this example, we're always going
# to choose the vanilla version. Our convolutions uses a stride of one and are zero padded so
# that the output is the same size as the input.
# Our pooling is plain old max pooling over 2x2 blocks. To keep our code cleaner,
# let's also abstract those operations into functions.
# ## First Convolutional Layer
#
# We can now implement our first layer. It will consist of convolution, followed by max pooling. The convolutional will compute 32 features for each 5x5 patch. Its weight tensor will have a shape of [5, 5, 1, 32]. The first two dimensions are the patch size, the next is the number of input channels, and the last is the number of output channels. We will also have a bias vector with a component for each output channel.
# In[16]:
W_conv1 = weight_variable([5, 5, 1, 32])
b_conv1 = bias_variable([32])
# To apply the layer, we first reshape x to a 4d tensor, with the second and third dimensions corresponding to image width and height, and the final dimension corresponding to the number of color channels.
# In[17]:
x_image = tf.reshape(x, [-1, width, height,1]) # XXX not sure which is width and which is height
# In[18]:
# We then convolve x_image with the weight tensor, add the bias, apply the ReLU function, and finally max pool.
h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
h_pool1 = max_pool_2x2(h_conv1)
# ## Second Convolutional Layer
#
# In order to build a deep network, we stack several layers of this type. The second layer will have 64 features for each 5x5 patch.
# In[19]:
W_conv2 = weight_variable([5, 5, 32, 64])
b_conv2 = bias_variable([64])
h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
h_pool2 = max_pool_2x2(h_conv2)
# ## Densely Connected Layer
#
# Now that the image size has been reduced to 7x7, we add a fully-connected layer with 1024 neurons to allow processing on the entire image. We reshape the tensor from the pooling layer into a batch of vectors, multiply by a weight matrix, add a bias, and apply a ReLU.
# XXX where is the 7x7 coming from?
#
# when bumping to width, height of 50 each:
#
# InvalidArgumentError: Input to reshape is a tensor with 540800 values, but the requested shape requires a multiple of 3136
#
# 7 x 7 x 64 = 3136
#
#
# 540800 / 64. = 8450
#
# 13 x 13 x 50 x 64 = 540800
#
#
#
#
#
# On MNIST, if I change the densely connected layer to fail (change the 7x7x64 to 7x7x65 in both W_fcl and h_pool2_flat
# for example, then I get the following error as soon as start to train:
#
# InvalidArgumentError: Input to reshape is a tensor with 156800 values, but the requested shape requires a multiple of 3185
#
# note 3185 = 7x7x65
#
# 156800 = 7 * 7 * 64 * 50
#
# 50 is batch size
#
# ##### with width & height = 70:
# Input to reshape is a tensor with 1036800 values, but the requested shape requires a multiple of 10816
#
# ##### with width & height = 150:
# Input to reshape is a tensor with 4620800 values, but the requested shape requires a multiple of 20736
# In[23]:
def get_size_reduced_to_from_input_tensor_size(input_tensor_size):
size_reduced_to_squared = input_tensor_size / 64. / batch_size # last divide is 50., pretty sure it's batch size
return math.sqrt(size_reduced_to_squared)
print(get_size_reduced_to_from_input_tensor_size(4620800))
print(get_size_reduced_to_from_input_tensor_size(1036800))
# In[24]:
if use_MNIST_instead_of_our_data:
size_reduced_to = 7
else:
# for width & height = 50, size_reduced_to seems to be 13
# for width & height = 70, size_reduced_to seems to be 18
# for width & height = 150, size_reduced_to seems to be 38
size_reduced_to = | |
<filename>font_icons/management/commands/loadfontawesome5_free.py
from django.core.management.base import BaseCommand
from ...models import FontIconModel
class Command(BaseCommand):
help = "Load all free fontawesome icons"
def handle(self, *args, **options):
FontIconModel.objects.bulk_create([
FontIconModel(style="fas", icon_name="fa-address-book"),
FontIconModel(style="fas", icon_name="fa-address-card"),
FontIconModel(style="fas", icon_name="fa-adjust"),
FontIconModel(style="fas", icon_name="fa-air-freshener"),
FontIconModel(style="fas", icon_name="fa-align-center"),
FontIconModel(style="fas", icon_name="fa-align-justify"),
FontIconModel(style="fas", icon_name="fa-align-left"),
FontIconModel(style="fas", icon_name="fa-align-right"),
FontIconModel(style="fas", icon_name="fa-allergies"),
FontIconModel(style="fas", icon_name="fa-ambulance"),
FontIconModel(style="fas", icon_name="fa-american-sign-language-interpreting"),
FontIconModel(style="fas", icon_name="fa-anchor"),
FontIconModel(style="fas", icon_name="fa-angle-double-down"),
FontIconModel(style="fas", icon_name="fa-angle-double-left"),
FontIconModel(style="fas", icon_name="fa-angle-double-right"),
FontIconModel(style="fas", icon_name="fa-angle-double-up"),
FontIconModel(style="fas", icon_name="fa-angle-down"),
FontIconModel(style="fas", icon_name="fa-angle-left"),
FontIconModel(style="fas", icon_name="fa-angle-right"),
FontIconModel(style="fas", icon_name="fa-angle-up"),
FontIconModel(style="fas", icon_name="fa-angry"),
FontIconModel(style="fas", icon_name="fa-ankh"),
FontIconModel(style="fas", icon_name="fa-apple-alt"),
FontIconModel(style="fas", icon_name="fa-archive"),
FontIconModel(style="fas", icon_name="fa-archway"),
FontIconModel(style="fas", icon_name="fa-arrow-alt-circle-down"),
FontIconModel(style="fas", icon_name="fa-arrow-alt-circle-left"),
FontIconModel(style="fas", icon_name="fa-arrow-alt-circle-right"),
FontIconModel(style="fas", icon_name="fa-arrow-alt-circle-up"),
FontIconModel(style="fas", icon_name="fa-arrow-circle-down"),
FontIconModel(style="fas", icon_name="fa-arrow-circle-left"),
FontIconModel(style="fas", icon_name="fa-arrow-circle-right"),
FontIconModel(style="fas", icon_name="fa-arrow-circle-up"),
FontIconModel(style="fas", icon_name="fa-arrow-down"),
FontIconModel(style="fas", icon_name="fa-arrow-left"),
FontIconModel(style="fas", icon_name="fa-arrow-right"),
FontIconModel(style="fas", icon_name="fa-arrow-up"),
FontIconModel(style="fas", icon_name="fa-arrows-alt"),
FontIconModel(style="fas", icon_name="fa-arrows-alt-h"),
FontIconModel(style="fas", icon_name="fa-arrows-alt-v"),
FontIconModel(style="fas", icon_name="fa-assistive-listening-systems"),
FontIconModel(style="fas", icon_name="fa-asterisk"),
FontIconModel(style="fas", icon_name="fa-at"),
FontIconModel(style="fas", icon_name="fa-atlas"),
FontIconModel(style="fas", icon_name="fa-atom"),
FontIconModel(style="fas", icon_name="fa-audio-description"),
FontIconModel(style="fas", icon_name="fa-award"),
FontIconModel(style="fas", icon_name="fa-baby"),
FontIconModel(style="fas", icon_name="fa-baby-carriage"),
FontIconModel(style="fas", icon_name="fa-backspace"),
FontIconModel(style="fas", icon_name="fa-backward"),
FontIconModel(style="fas", icon_name="fa-bacon"),
FontIconModel(style="fas", icon_name="fa-balance-scale"),
FontIconModel(style="fas", icon_name="fa-ban"),
FontIconModel(style="fas", icon_name="fa-band-aid"),
FontIconModel(style="fas", icon_name="fa-barcode"),
FontIconModel(style="fas", icon_name="fa-bars"),
FontIconModel(style="fas", icon_name="fa-baseball-ball"),
FontIconModel(style="fas", icon_name="fa-basketball-ball"),
FontIconModel(style="fas", icon_name="fa-bath"),
FontIconModel(style="fas", icon_name="fa-battery-empty"),
FontIconModel(style="fas", icon_name="fa-battery-full"),
FontIconModel(style="fas", icon_name="fa-battery-half"),
FontIconModel(style="fas", icon_name="fa-battery-quarter"),
FontIconModel(style="fas", icon_name="fa-battery-three-quarters"),
FontIconModel(style="fas", icon_name="fa-bed"),
FontIconModel(style="fas", icon_name="fa-beer"),
FontIconModel(style="fas", icon_name="fa-bell"),
FontIconModel(style="fas", icon_name="fa-bell-slash"),
FontIconModel(style="fas", icon_name="fa-bezier-curve"),
FontIconModel(style="fas", icon_name="fa-bible"),
FontIconModel(style="fas", icon_name="fa-bicycle"),
FontIconModel(style="fas", icon_name="fa-binoculars"),
FontIconModel(style="fas", icon_name="fa-biohazard"),
FontIconModel(style="fas", icon_name="fa-birthday-cake"),
FontIconModel(style="fas", icon_name="fa-blender"),
FontIconModel(style="fas", icon_name="fa-blender-phone"),
FontIconModel(style="fas", icon_name="fa-blind"),
FontIconModel(style="fas", icon_name="fa-blog"),
FontIconModel(style="fas", icon_name="fa-bold"),
FontIconModel(style="fas", icon_name="fa-bolt"),
FontIconModel(style="fas", icon_name="fa-bomb"),
FontIconModel(style="fas", icon_name="fa-bone"),
FontIconModel(style="fas", icon_name="fa-bong"),
FontIconModel(style="fas", icon_name="fa-book"),
FontIconModel(style="fas", icon_name="fa-book-dead"),
FontIconModel(style="fas", icon_name="fa-book-medical"),
FontIconModel(style="fas", icon_name="fa-book-open"),
FontIconModel(style="fas", icon_name="fa-book-reader"),
FontIconModel(style="fas", icon_name="fa-bookmark"),
FontIconModel(style="fas", icon_name="fa-bowling-ball"),
FontIconModel(style="fas", icon_name="fa-box"),
FontIconModel(style="fas", icon_name="fa-box-open"),
FontIconModel(style="fas", icon_name="fa-boxes"),
FontIconModel(style="fas", icon_name="fa-braille"),
FontIconModel(style="fas", icon_name="fa-brain"),
FontIconModel(style="fas", icon_name="fa-bread-slice"),
FontIconModel(style="fas", icon_name="fa-briefcase"),
FontIconModel(style="fas", icon_name="fa-briefcase-medical"),
FontIconModel(style="fas", icon_name="fa-broadcast-tower"),
FontIconModel(style="fas", icon_name="fa-broom"),
FontIconModel(style="fas", icon_name="fa-brush"),
FontIconModel(style="fas", icon_name="fa-bug"),
FontIconModel(style="fas", icon_name="fa-building"),
FontIconModel(style="fas", icon_name="fa-bullhorn"),
FontIconModel(style="fas", icon_name="fa-bullseye"),
FontIconModel(style="fas", icon_name="fa-burn"),
FontIconModel(style="fas", icon_name="fa-bus"),
FontIconModel(style="fas", icon_name="fa-bus-alt"),
FontIconModel(style="fas", icon_name="fa-business-time"),
FontIconModel(style="fas", icon_name="fa-calculator"),
FontIconModel(style="fas", icon_name="fa-calendar"),
FontIconModel(style="fas", icon_name="fa-calendar-alt"),
FontIconModel(style="fas", icon_name="fa-calendar-check"),
FontIconModel(style="fas", icon_name="fa-calendar-day"),
FontIconModel(style="fas", icon_name="fa-calendar-minus"),
FontIconModel(style="fas", icon_name="fa-calendar-plus"),
FontIconModel(style="fas", icon_name="fa-calendar-times"),
FontIconModel(style="fas", icon_name="fa-calendar-week"),
FontIconModel(style="fas", icon_name="fa-camera"),
FontIconModel(style="fas", icon_name="fa-camera-retro"),
FontIconModel(style="fas", icon_name="fa-campground"),
FontIconModel(style="fas", icon_name="fa-candy-cane"),
FontIconModel(style="fas", icon_name="fa-cannabis"),
FontIconModel(style="fas", icon_name="fa-capsules"),
FontIconModel(style="fas", icon_name="fa-car"),
FontIconModel(style="fas", icon_name="fa-car-alt"),
FontIconModel(style="fas", icon_name="fa-car-battery"),
FontIconModel(style="fas", icon_name="fa-car-crash"),
FontIconModel(style="fas", icon_name="fa-car-side"),
FontIconModel(style="fas", icon_name="fa-caret-down"),
FontIconModel(style="fas", icon_name="fa-caret-left"),
FontIconModel(style="fas", icon_name="fa-caret-right"),
FontIconModel(style="fas", icon_name="fa-caret-square-down"),
FontIconModel(style="fas", icon_name="fa-caret-square-left"),
FontIconModel(style="fas", icon_name="fa-caret-square-right"),
FontIconModel(style="fas", icon_name="fa-caret-square-up"),
FontIconModel(style="fas", icon_name="fa-caret-up"),
FontIconModel(style="fas", icon_name="fa-carrot"),
FontIconModel(style="fas", icon_name="fa-cart-arrow-down"),
FontIconModel(style="fas", icon_name="fa-cart-plus"),
FontIconModel(style="fas", icon_name="fa-cash-register"),
FontIconModel(style="fas", icon_name="fa-cat"),
FontIconModel(style="fas", icon_name="fa-certificate"),
FontIconModel(style="fas", icon_name="fa-chair"),
FontIconModel(style="fas", icon_name="fa-chalkboard"),
FontIconModel(style="fas", icon_name="fa-chalkboard-teacher"),
FontIconModel(style="fas", icon_name="fa-charging-station"),
FontIconModel(style="fas", icon_name="fa-chart-area"),
FontIconModel(style="fas", icon_name="fa-chart-bar"),
FontIconModel(style="fas", icon_name="fa-chart-line"),
FontIconModel(style="fas", icon_name="fa-chart-pie"),
FontIconModel(style="fas", icon_name="fa-check"),
FontIconModel(style="fas", icon_name="fa-check-circle"),
FontIconModel(style="fas", icon_name="fa-check-double"),
FontIconModel(style="fas", icon_name="fa-check-square"),
FontIconModel(style="fas", icon_name="fa-cheese"),
FontIconModel(style="fas", icon_name="fa-chess"),
FontIconModel(style="fas", icon_name="fa-chess-bishop"),
FontIconModel(style="fas", icon_name="fa-chess-board"),
FontIconModel(style="fas", icon_name="fa-chess-king"),
FontIconModel(style="fas", icon_name="fa-chess-knight"),
FontIconModel(style="fas", icon_name="fa-chess-pawn"),
FontIconModel(style="fas", icon_name="fa-chess-queen"),
FontIconModel(style="fas", icon_name="fa-chess-rook"),
FontIconModel(style="fas", icon_name="fa-chevron-circle-down"),
FontIconModel(style="fas", icon_name="fa-chevron-circle-left"),
FontIconModel(style="fas", icon_name="fa-chevron-circle-right"),
FontIconModel(style="fas", icon_name="fa-chevron-circle-up"),
FontIconModel(style="fas", icon_name="fa-chevron-down"),
FontIconModel(style="fas", icon_name="fa-chevron-left"),
FontIconModel(style="fas", icon_name="fa-chevron-right"),
FontIconModel(style="fas", icon_name="fa-chevron-up"),
FontIconModel(style="fas", icon_name="fa-child"),
FontIconModel(style="fas", icon_name="fa-church"),
FontIconModel(style="fas", icon_name="fa-circle"),
FontIconModel(style="fas", icon_name="fa-circle-notch"),
FontIconModel(style="fas", icon_name="fa-city"),
FontIconModel(style="fas", icon_name="fa-clinic-medical"),
FontIconModel(style="fas", icon_name="fa-clipboard"),
FontIconModel(style="fas", icon_name="fa-clipboard-check"),
FontIconModel(style="fas", icon_name="fa-clipboard-list"),
FontIconModel(style="fas", icon_name="fa-clock"),
FontIconModel(style="fas", icon_name="fa-clone"),
FontIconModel(style="fas", icon_name="fa-closed-captioning"),
FontIconModel(style="fas", icon_name="fa-cloud"),
FontIconModel(style="fas", icon_name="fa-cloud-download-alt"),
FontIconModel(style="fas", icon_name="fa-cloud-meatball"),
FontIconModel(style="fas", icon_name="fa-cloud-moon"),
FontIconModel(style="fas", icon_name="fa-cloud-moon-rain"),
FontIconModel(style="fas", icon_name="fa-cloud-rain"),
FontIconModel(style="fas", icon_name="fa-cloud-showers-heavy"),
FontIconModel(style="fas", icon_name="fa-cloud-sun"),
FontIconModel(style="fas", icon_name="fa-cloud-sun-rain"),
FontIconModel(style="fas", icon_name="fa-cloud-upload-alt"),
FontIconModel(style="fas", icon_name="fa-cocktail"),
FontIconModel(style="fas", icon_name="fa-code"),
FontIconModel(style="fas", icon_name="fa-code-branch"),
FontIconModel(style="fas", icon_name="fa-coffee"),
FontIconModel(style="fas", icon_name="fa-cog"),
FontIconModel(style="fas", icon_name="fa-cogs"),
FontIconModel(style="fas", icon_name="fa-coins"),
FontIconModel(style="fas", icon_name="fa-columns"),
FontIconModel(style="fas", icon_name="fa-comment"),
FontIconModel(style="fas", icon_name="fa-comment-alt"),
FontIconModel(style="fas", icon_name="fa-comment-dollar"),
FontIconModel(style="fas", icon_name="fa-comment-dots"),
FontIconModel(style="fas", icon_name="fa-comment-medical"),
FontIconModel(style="fas", icon_name="fa-comment-slash"),
FontIconModel(style="fas", icon_name="fa-comments"),
FontIconModel(style="fas", icon_name="fa-comments-dollar"),
FontIconModel(style="fas", icon_name="fa-compact-disc"),
FontIconModel(style="fas", icon_name="fa-compass"),
FontIconModel(style="fas", icon_name="fa-compress"),
FontIconModel(style="fas", icon_name="fa-compress-arrows-alt"),
FontIconModel(style="fas", icon_name="fa-concierge-bell"),
FontIconModel(style="fas", icon_name="fa-cookie"),
FontIconModel(style="fas", icon_name="fa-cookie-bite"),
FontIconModel(style="fas", icon_name="fa-copy"),
FontIconModel(style="fas", icon_name="fa-copyright"),
FontIconModel(style="fas", icon_name="fa-couch"),
FontIconModel(style="fas", icon_name="fa-credit-card"),
FontIconModel(style="fas", icon_name="fa-crop"),
FontIconModel(style="fas", icon_name="fa-crop-alt"),
FontIconModel(style="fas", icon_name="fa-cross"),
FontIconModel(style="fas", icon_name="fa-crosshairs"),
FontIconModel(style="fas", icon_name="fa-crow"),
FontIconModel(style="fas", icon_name="fa-crown"),
FontIconModel(style="fas", icon_name="fa-crutch"),
FontIconModel(style="fas", icon_name="fa-cube"),
FontIconModel(style="fas", icon_name="fa-cubes"),
FontIconModel(style="fas", icon_name="fa-cut"),
FontIconModel(style="fas", icon_name="fa-database"),
FontIconModel(style="fas", icon_name="fa-deaf"),
FontIconModel(style="fas", icon_name="fa-democrat"),
FontIconModel(style="fas", icon_name="fa-desktop"),
FontIconModel(style="fas", icon_name="fa-dharmachakra"),
FontIconModel(style="fas", icon_name="fa-diagnoses"),
FontIconModel(style="fas", icon_name="fa-dice"),
FontIconModel(style="fas", icon_name="fa-dice-d20"),
FontIconModel(style="fas", icon_name="fa-dice-d6"),
FontIconModel(style="fas", icon_name="fa-dice-five"),
FontIconModel(style="fas", icon_name="fa-dice-four"),
FontIconModel(style="fas", icon_name="fa-dice-one"),
FontIconModel(style="fas", icon_name="fa-dice-six"),
FontIconModel(style="fas", icon_name="fa-dice-three"),
FontIconModel(style="fas", icon_name="fa-dice-two"),
FontIconModel(style="fas", icon_name="fa-digital-tachograph"),
FontIconModel(style="fas", icon_name="fa-directions"),
FontIconModel(style="fas", icon_name="fa-divide"),
FontIconModel(style="fas", icon_name="fa-dizzy"),
FontIconModel(style="fas", icon_name="fa-dna"),
FontIconModel(style="fas", icon_name="fa-dog"),
FontIconModel(style="fas", icon_name="fa-dollar-sign"),
FontIconModel(style="fas", icon_name="fa-dolly"),
FontIconModel(style="fas", icon_name="fa-dolly-flatbed"),
FontIconModel(style="fas", icon_name="fa-donate"),
FontIconModel(style="fas", icon_name="fa-door-closed"),
FontIconModel(style="fas", icon_name="fa-door-open"),
FontIconModel(style="fas", icon_name="fa-dot-circle"),
FontIconModel(style="fas", icon_name="fa-dove"),
FontIconModel(style="fas", icon_name="fa-download"),
FontIconModel(style="fas", icon_name="fa-drafting-compass"),
FontIconModel(style="fas", icon_name="fa-dragon"),
FontIconModel(style="fas", icon_name="fa-draw-polygon"),
FontIconModel(style="fas", icon_name="fa-drum"),
FontIconModel(style="fas", icon_name="fa-drum-steelpan"),
FontIconModel(style="fas", icon_name="fa-drumstick-bite"),
FontIconModel(style="fas", icon_name="fa-dumbbell"),
FontIconModel(style="fas", icon_name="fa-dumpster"),
FontIconModel(style="fas", icon_name="fa-dumpster-fire"),
FontIconModel(style="fas", icon_name="fa-dungeon"),
FontIconModel(style="fas", icon_name="fa-edit"),
FontIconModel(style="fas", icon_name="fa-egg"),
FontIconModel(style="fas", icon_name="fa-eject"),
FontIconModel(style="fas", icon_name="fa-ellipsis-h"),
FontIconModel(style="fas", icon_name="fa-ellipsis-v"),
FontIconModel(style="fas", icon_name="fa-envelope"),
FontIconModel(style="fas", icon_name="fa-envelope-open"),
FontIconModel(style="fas", icon_name="fa-envelope-open-text"),
FontIconModel(style="fas", icon_name="fa-envelope-square"),
FontIconModel(style="fas", icon_name="fa-equals"),
FontIconModel(style="fas", icon_name="fa-eraser"),
FontIconModel(style="fas", icon_name="fa-ethernet"),
FontIconModel(style="fas", icon_name="fa-euro-sign"),
FontIconModel(style="fas", icon_name="fa-exchange-alt"),
FontIconModel(style="fas", icon_name="fa-exclamation"),
FontIconModel(style="fas", icon_name="fa-exclamation-circle"),
FontIconModel(style="fas", icon_name="fa-exclamation-triangle"),
FontIconModel(style="fas", icon_name="fa-expand"),
FontIconModel(style="fas", icon_name="fa-expand-arrows-alt"),
FontIconModel(style="fas", icon_name="fa-external-link-alt"),
FontIconModel(style="fas", icon_name="fa-external-link-square-alt"),
FontIconModel(style="fas", icon_name="fa-eye"),
FontIconModel(style="fas", icon_name="fa-eye-dropper"),
FontIconModel(style="fas", icon_name="fa-eye-slash"),
FontIconModel(style="fas", icon_name="fa-fast-backward"),
FontIconModel(style="fas", icon_name="fa-fast-forward"),
FontIconModel(style="fas", icon_name="fa-fax"),
FontIconModel(style="fas", icon_name="fa-feather"),
FontIconModel(style="fas", icon_name="fa-feather-alt"),
FontIconModel(style="fas", icon_name="fa-female"),
FontIconModel(style="fas", icon_name="fa-fighter-jet"),
FontIconModel(style="fas", icon_name="fa-file"),
FontIconModel(style="fas", icon_name="fa-file-alt"),
FontIconModel(style="fas", icon_name="fa-file-archive"),
FontIconModel(style="fas", icon_name="fa-file-audio"),
FontIconModel(style="fas", icon_name="fa-file-code"),
FontIconModel(style="fas", icon_name="fa-file-contract"),
FontIconModel(style="fas", icon_name="fa-file-csv"),
FontIconModel(style="fas", icon_name="fa-file-download"),
FontIconModel(style="fas", icon_name="fa-file-excel"),
FontIconModel(style="fas", icon_name="fa-file-export"),
FontIconModel(style="fas", icon_name="fa-file-image"),
FontIconModel(style="fas", icon_name="fa-file-import"),
FontIconModel(style="fas", icon_name="fa-file-invoice"),
FontIconModel(style="fas", icon_name="fa-file-invoice-dollar"),
FontIconModel(style="fas", icon_name="fa-file-medical"),
FontIconModel(style="fas", icon_name="fa-file-medical-alt"),
FontIconModel(style="fas", icon_name="fa-file-pdf"),
FontIconModel(style="fas", icon_name="fa-file-powerpoint"),
FontIconModel(style="fas", icon_name="fa-file-prescription"),
FontIconModel(style="fas", icon_name="fa-file-signature"),
FontIconModel(style="fas", icon_name="fa-file-upload"),
FontIconModel(style="fas", icon_name="fa-file-video"),
FontIconModel(style="fas", icon_name="fa-file-word"),
FontIconModel(style="fas", icon_name="fa-fill"),
FontIconModel(style="fas", icon_name="fa-fill-drip"),
FontIconModel(style="fas", icon_name="fa-film"),
FontIconModel(style="fas", icon_name="fa-filter"),
FontIconModel(style="fas", icon_name="fa-fingerprint"),
FontIconModel(style="fas", icon_name="fa-fire"),
FontIconModel(style="fas", icon_name="fa-fire-alt"),
FontIconModel(style="fas", icon_name="fa-fire-extinguisher"),
FontIconModel(style="fas", icon_name="fa-first-aid"),
FontIconModel(style="fas", icon_name="fa-fish"),
FontIconModel(style="fas", icon_name="fa-fist-raised"),
FontIconModel(style="fas", icon_name="fa-flag"),
FontIconModel(style="fas", icon_name="fa-flag-checkered"),
FontIconModel(style="fas", icon_name="fa-flag-usa"),
FontIconModel(style="fas", icon_name="fa-flask"),
FontIconModel(style="fas", icon_name="fa-flushed"),
FontIconModel(style="fas", icon_name="fa-folder"),
FontIconModel(style="fas", icon_name="fa-folder-minus"),
FontIconModel(style="fas", icon_name="fa-folder-open"),
FontIconModel(style="fas", icon_name="fa-folder-plus"),
FontIconModel(style="fas", icon_name="fa-font"),
FontIconModel(style="fas", icon_name="fa-football-ball"),
FontIconModel(style="fas", icon_name="fa-forward"),
FontIconModel(style="fas", icon_name="fa-frog"),
FontIconModel(style="fas", icon_name="fa-frown"),
FontIconModel(style="fas", icon_name="fa-frown-open"),
FontIconModel(style="fas", icon_name="fa-funnel-dollar"),
FontIconModel(style="fas", icon_name="fa-futbol"),
FontIconModel(style="fas", icon_name="fa-gamepad"),
FontIconModel(style="fas", icon_name="fa-gas-pump"),
FontIconModel(style="fas", icon_name="fa-gavel"),
FontIconModel(style="fas", icon_name="fa-gem"),
FontIconModel(style="fas", icon_name="fa-genderless"),
FontIconModel(style="fas", icon_name="fa-ghost"),
FontIconModel(style="fas", icon_name="fa-gift"),
FontIconModel(style="fas", icon_name="fa-gifts"),
FontIconModel(style="fas", icon_name="fa-glass-cheers"),
FontIconModel(style="fas", icon_name="fa-glass-martini"),
FontIconModel(style="fas", icon_name="fa-glass-martini-alt"),
FontIconModel(style="fas", icon_name="fa-glass-whiskey"),
FontIconModel(style="fas", icon_name="fa-glasses"),
FontIconModel(style="fas", icon_name="fa-globe"),
FontIconModel(style="fas", icon_name="fa-globe-africa"),
FontIconModel(style="fas", icon_name="fa-globe-americas"),
FontIconModel(style="fas", icon_name="fa-globe-asia"),
FontIconModel(style="fas", icon_name="fa-globe-europe"),
FontIconModel(style="fas", icon_name="fa-golf-ball"),
FontIconModel(style="fas", icon_name="fa-gopuram"),
FontIconModel(style="fas", icon_name="fa-graduation-cap"),
FontIconModel(style="fas", icon_name="fa-greater-than"),
FontIconModel(style="fas", icon_name="fa-greater-than-equal"),
FontIconModel(style="fas", icon_name="fa-grimace"),
FontIconModel(style="fas", icon_name="fa-grin"),
FontIconModel(style="fas", icon_name="fa-grin-alt"),
FontIconModel(style="fas", icon_name="fa-grin-beam"),
FontIconModel(style="fas", icon_name="fa-grin-beam-sweat"),
FontIconModel(style="fas", icon_name="fa-grin-hearts"),
FontIconModel(style="fas", icon_name="fa-grin-squint"),
FontIconModel(style="fas", icon_name="fa-grin-squint-tears"),
FontIconModel(style="fas", icon_name="fa-grin-stars"),
FontIconModel(style="fas", icon_name="fa-grin-tears"),
FontIconModel(style="fas", icon_name="fa-grin-tongue"),
FontIconModel(style="fas", icon_name="fa-grin-tongue-squint"),
FontIconModel(style="fas", icon_name="fa-grin-tongue-wink"),
FontIconModel(style="fas", icon_name="fa-grin-wink"),
FontIconModel(style="fas", icon_name="fa-grip-horizontal"),
FontIconModel(style="fas", icon_name="fa-grip-lines"),
FontIconModel(style="fas", icon_name="fa-grip-lines-vertical"),
FontIconModel(style="fas", icon_name="fa-grip-vertical"),
FontIconModel(style="fas", icon_name="fa-guitar"),
FontIconModel(style="fas", icon_name="fa-h-square"),
FontIconModel(style="fas", icon_name="fa-hamburger"),
FontIconModel(style="fas", icon_name="fa-hammer"),
FontIconModel(style="fas", icon_name="fa-hamsa"),
FontIconModel(style="fas", icon_name="fa-hand-holding"),
FontIconModel(style="fas", icon_name="fa-hand-holding-heart"),
FontIconModel(style="fas", icon_name="fa-hand-holding-usd"),
FontIconModel(style="fas", icon_name="fa-hand-lizard"),
FontIconModel(style="fas", icon_name="fa-hand-middle-finger"),
FontIconModel(style="fas", icon_name="fa-hand-paper"),
FontIconModel(style="fas", icon_name="fa-hand-peace"),
FontIconModel(style="fas", icon_name="fa-hand-point-down"),
FontIconModel(style="fas", icon_name="fa-hand-point-left"),
FontIconModel(style="fas", icon_name="fa-hand-point-right"),
FontIconModel(style="fas", icon_name="fa-hand-point-up"),
FontIconModel(style="fas", icon_name="fa-hand-pointer"),
FontIconModel(style="fas", icon_name="fa-hand-rock"),
FontIconModel(style="fas", icon_name="fa-hand-scissors"),
FontIconModel(style="fas", icon_name="fa-hand-spock"),
FontIconModel(style="fas", icon_name="fa-hands"),
FontIconModel(style="fas", icon_name="fa-hands-helping"),
FontIconModel(style="fas", icon_name="fa-handshake"),
FontIconModel(style="fas", icon_name="fa-hanukiah"),
FontIconModel(style="fas", icon_name="fa-hard-hat"),
FontIconModel(style="fas", icon_name="fa-hashtag"),
FontIconModel(style="fas", icon_name="fa-hat-wizard"),
FontIconModel(style="fas", icon_name="fa-haykal"),
FontIconModel(style="fas", icon_name="fa-hdd"),
FontIconModel(style="fas", icon_name="fa-heading"),
FontIconModel(style="fas", icon_name="fa-headphones"),
FontIconModel(style="fas", icon_name="fa-headphones-alt"),
FontIconModel(style="fas", icon_name="fa-headset"),
FontIconModel(style="fas", icon_name="fa-heart"),
FontIconModel(style="fas", icon_name="fa-heart-broken"),
FontIconModel(style="fas", icon_name="fa-heartbeat"),
FontIconModel(style="fas", icon_name="fa-helicopter"),
FontIconModel(style="fas", icon_name="fa-highlighter"),
FontIconModel(style="fas", icon_name="fa-hiking"),
FontIconModel(style="fas", icon_name="fa-hippo"),
FontIconModel(style="fas", icon_name="fa-history"),
FontIconModel(style="fas", icon_name="fa-hockey-puck"),
FontIconModel(style="fas", icon_name="fa-holly-berry"),
FontIconModel(style="fas", icon_name="fa-home"),
FontIconModel(style="fas", icon_name="fa-horse"),
FontIconModel(style="fas", icon_name="fa-horse-head"),
FontIconModel(style="fas", icon_name="fa-hospital"),
FontIconModel(style="fas", icon_name="fa-hospital-alt"),
FontIconModel(style="fas", icon_name="fa-hospital-symbol"),
FontIconModel(style="fas", icon_name="fa-hot-tub"),
FontIconModel(style="fas", icon_name="fa-hotdog"),
FontIconModel(style="fas", icon_name="fa-hotel"),
FontIconModel(style="fas", icon_name="fa-hourglass"),
FontIconModel(style="fas", icon_name="fa-hourglass-end"),
FontIconModel(style="fas", icon_name="fa-hourglass-half"),
FontIconModel(style="fas", icon_name="fa-hourglass-start"),
FontIconModel(style="fas", icon_name="fa-house-damage"),
FontIconModel(style="fas", icon_name="fa-hryvnia"),
FontIconModel(style="fas", icon_name="fa-i-cursor"),
FontIconModel(style="fas", icon_name="fa-ice-cream"),
FontIconModel(style="fas", icon_name="fa-icicles"),
FontIconModel(style="fas", icon_name="fa-id-badge"),
FontIconModel(style="fas", icon_name="fa-id-card"),
FontIconModel(style="fas", icon_name="fa-id-card-alt"),
FontIconModel(style="fas", icon_name="fa-igloo"),
FontIconModel(style="fas", icon_name="fa-image"),
FontIconModel(style="fas", icon_name="fa-images"),
FontIconModel(style="fas", icon_name="fa-inbox"),
FontIconModel(style="fas", icon_name="fa-indent"),
FontIconModel(style="fas", icon_name="fa-industry"),
FontIconModel(style="fas", icon_name="fa-infinity"),
FontIconModel(style="fas", icon_name="fa-info"),
FontIconModel(style="fas", icon_name="fa-info-circle"),
FontIconModel(style="fas", icon_name="fa-italic"),
FontIconModel(style="fas", icon_name="fa-jedi"),
FontIconModel(style="fas", icon_name="fa-joint"),
FontIconModel(style="fas", icon_name="fa-journal-whills"),
FontIconModel(style="fas", icon_name="fa-kaaba"),
FontIconModel(style="fas", icon_name="fa-key"),
FontIconModel(style="fas", icon_name="fa-keyboard"),
FontIconModel(style="fas", icon_name="fa-khanda"),
FontIconModel(style="fas", icon_name="fa-kiss"),
FontIconModel(style="fas", icon_name="fa-kiss-beam"),
FontIconModel(style="fas", icon_name="fa-kiss-wink-heart"),
FontIconModel(style="fas", icon_name="fa-kiwi-bird"),
FontIconModel(style="fas", icon_name="fa-landmark"),
FontIconModel(style="fas", icon_name="fa-language"),
FontIconModel(style="fas", icon_name="fa-laptop"),
FontIconModel(style="fas", icon_name="fa-laptop-code"),
FontIconModel(style="fas", icon_name="fa-laptop-medical"),
FontIconModel(style="fas", icon_name="fa-laugh"),
FontIconModel(style="fas", icon_name="fa-laugh-beam"),
FontIconModel(style="fas", icon_name="fa-laugh-squint"),
FontIconModel(style="fas", icon_name="fa-laugh-wink"),
FontIconModel(style="fas", icon_name="fa-layer-group"),
FontIconModel(style="fas", icon_name="fa-leaf"),
FontIconModel(style="fas", icon_name="fa-lemon"),
FontIconModel(style="fas", icon_name="fa-less-than"),
FontIconModel(style="fas", icon_name="fa-less-than-equal"),
FontIconModel(style="fas", icon_name="fa-level-down-alt"),
FontIconModel(style="fas", icon_name="fa-level-up-alt"),
FontIconModel(style="fas", icon_name="fa-life-ring"),
FontIconModel(style="fas", icon_name="fa-lightbulb"),
FontIconModel(style="fas", icon_name="fa-link"),
FontIconModel(style="fas", icon_name="fa-lira-sign"),
FontIconModel(style="fas", icon_name="fa-list"),
FontIconModel(style="fas", icon_name="fa-list-alt"),
FontIconModel(style="fas", icon_name="fa-list-ol"),
FontIconModel(style="fas", icon_name="fa-list-ul"),
FontIconModel(style="fas", icon_name="fa-location-arrow"),
FontIconModel(style="fas", icon_name="fa-lock"),
FontIconModel(style="fas", icon_name="fa-lock-open"),
FontIconModel(style="fas", icon_name="fa-long-arrow-alt-down"),
FontIconModel(style="fas", icon_name="fa-long-arrow-alt-left"),
FontIconModel(style="fas", icon_name="fa-long-arrow-alt-right"),
FontIconModel(style="fas", icon_name="fa-long-arrow-alt-up"),
FontIconModel(style="fas", icon_name="fa-low-vision"),
FontIconModel(style="fas", icon_name="fa-luggage-cart"),
FontIconModel(style="fas", icon_name="fa-magic"),
FontIconModel(style="fas", icon_name="fa-magnet"),
FontIconModel(style="fas", icon_name="fa-mail-bulk"),
FontIconModel(style="fas", icon_name="fa-male"),
FontIconModel(style="fas", icon_name="fa-map"),
FontIconModel(style="fas", icon_name="fa-map-marked"),
FontIconModel(style="fas", icon_name="fa-map-marked-alt"),
FontIconModel(style="fas", icon_name="fa-map-marker"),
FontIconModel(style="fas", icon_name="fa-map-marker-alt"),
FontIconModel(style="fas", icon_name="fa-map-pin"),
FontIconModel(style="fas", icon_name="fa-map-signs"),
FontIconModel(style="fas", icon_name="fa-marker"),
FontIconModel(style="fas", icon_name="fa-mars"),
FontIconModel(style="fas", icon_name="fa-mars-double"),
FontIconModel(style="fas", icon_name="fa-mars-stroke"),
FontIconModel(style="fas", icon_name="fa-mars-stroke-h"),
FontIconModel(style="fas", icon_name="fa-mars-stroke-v"),
FontIconModel(style="fas", icon_name="fa-mask"),
FontIconModel(style="fas", icon_name="fa-medal"),
FontIconModel(style="fas", icon_name="fa-medkit"),
FontIconModel(style="fas", icon_name="fa-meh"),
FontIconModel(style="fas", icon_name="fa-meh-blank"),
FontIconModel(style="fas", icon_name="fa-meh-rolling-eyes"),
FontIconModel(style="fas", icon_name="fa-memory"),
FontIconModel(style="fas", icon_name="fa-menorah"),
FontIconModel(style="fas", icon_name="fa-mercury"),
FontIconModel(style="fas", icon_name="fa-meteor"),
FontIconModel(style="fas", icon_name="fa-microchip"),
FontIconModel(style="fas", icon_name="fa-microphone"),
FontIconModel(style="fas", icon_name="fa-microphone-alt"),
FontIconModel(style="fas", icon_name="fa-microphone-alt-slash"),
FontIconModel(style="fas", icon_name="fa-microphone-slash"),
FontIconModel(style="fas", icon_name="fa-microscope"),
FontIconModel(style="fas", icon_name="fa-minus"),
FontIconModel(style="fas", icon_name="fa-minus-circle"),
FontIconModel(style="fas", icon_name="fa-minus-square"),
FontIconModel(style="fas", icon_name="fa-mitten"),
FontIconModel(style="fas", icon_name="fa-mobile"),
FontIconModel(style="fas", icon_name="fa-mobile-alt"),
FontIconModel(style="fas", icon_name="fa-money-bill"),
FontIconModel(style="fas", icon_name="fa-money-bill-alt"),
FontIconModel(style="fas", icon_name="fa-money-bill-wave"),
FontIconModel(style="fas", icon_name="fa-money-bill-wave-alt"),
FontIconModel(style="fas", icon_name="fa-money-check"),
FontIconModel(style="fas", icon_name="fa-money-check-alt"),
FontIconModel(style="fas", icon_name="fa-monument"),
FontIconModel(style="fas", icon_name="fa-moon"),
FontIconModel(style="fas", icon_name="fa-mortar-pestle"),
FontIconModel(style="fas", icon_name="fa-mosque"),
FontIconModel(style="fas", icon_name="fa-motorcycle"),
FontIconModel(style="fas", icon_name="fa-mountain"),
FontIconModel(style="fas", icon_name="fa-mouse-pointer"),
FontIconModel(style="fas", icon_name="fa-mug-hot"),
FontIconModel(style="fas", icon_name="fa-music"),
FontIconModel(style="fas", icon_name="fa-network-wired"),
FontIconModel(style="fas", icon_name="fa-neuter"),
FontIconModel(style="fas", icon_name="fa-newspaper"),
FontIconModel(style="fas", icon_name="fa-not-equal"),
FontIconModel(style="fas", icon_name="fa-notes-medical"),
FontIconModel(style="fas", icon_name="fa-object-group"),
FontIconModel(style="fas", icon_name="fa-object-ungroup"),
FontIconModel(style="fas", icon_name="fa-oil-can"),
FontIconModel(style="fas", icon_name="fa-om"),
FontIconModel(style="fas", icon_name="fa-otter"),
FontIconModel(style="fas", icon_name="fa-outdent"),
FontIconModel(style="fas", icon_name="fa-pager"),
FontIconModel(style="fas", icon_name="fa-paint-brush"),
FontIconModel(style="fas", icon_name="fa-paint-roller"),
FontIconModel(style="fas", icon_name="fa-palette"),
FontIconModel(style="fas", icon_name="fa-pallet"),
FontIconModel(style="fas", icon_name="fa-paper-plane"),
FontIconModel(style="fas", icon_name="fa-paperclip"),
FontIconModel(style="fas", icon_name="fa-parachute-box"),
FontIconModel(style="fas", icon_name="fa-paragraph"),
FontIconModel(style="fas", icon_name="fa-parking"),
FontIconModel(style="fas", icon_name="fa-passport"),
FontIconModel(style="fas", icon_name="fa-pastafarianism"),
FontIconModel(style="fas", icon_name="fa-paste"),
FontIconModel(style="fas", icon_name="fa-pause"),
FontIconModel(style="fas", icon_name="fa-pause-circle"),
FontIconModel(style="fas", icon_name="fa-paw"),
FontIconModel(style="fas", icon_name="fa-peace"),
FontIconModel(style="fas", icon_name="fa-pen"),
FontIconModel(style="fas", icon_name="fa-pen-alt"),
FontIconModel(style="fas", icon_name="fa-pen-fancy"),
FontIconModel(style="fas", icon_name="fa-pen-nib"),
FontIconModel(style="fas", icon_name="fa-pen-square"),
FontIconModel(style="fas", icon_name="fa-pencil-alt"),
FontIconModel(style="fas", icon_name="fa-pencil-ruler"),
FontIconModel(style="fas", icon_name="fa-people-carry"),
FontIconModel(style="fas", icon_name="fa-pepper-hot"),
FontIconModel(style="fas", icon_name="fa-percent"),
FontIconModel(style="fas", icon_name="fa-percentage"),
FontIconModel(style="fas", icon_name="fa-person-booth"),
FontIconModel(style="fas", icon_name="fa-phone"),
FontIconModel(style="fas", icon_name="fa-phone-slash"),
FontIconModel(style="fas", icon_name="fa-phone-square"),
FontIconModel(style="fas", icon_name="fa-phone-volume"),
FontIconModel(style="fas", icon_name="fa-piggy-bank"),
FontIconModel(style="fas", icon_name="fa-pills"),
FontIconModel(style="fas", icon_name="fa-pizza-slice"),
FontIconModel(style="fas", icon_name="fa-place-of-worship"),
FontIconModel(style="fas", icon_name="fa-plane"),
FontIconModel(style="fas", icon_name="fa-plane-arrival"),
FontIconModel(style="fas", icon_name="fa-plane-departure"),
FontIconModel(style="fas", icon_name="fa-play"),
FontIconModel(style="fas", icon_name="fa-play-circle"),
FontIconModel(style="fas", icon_name="fa-plug"),
FontIconModel(style="fas", icon_name="fa-plus"),
FontIconModel(style="fas", icon_name="fa-plus-circle"),
FontIconModel(style="fas", icon_name="fa-plus-square"),
FontIconModel(style="fas", icon_name="fa-podcast"),
FontIconModel(style="fas", icon_name="fa-poll"),
FontIconModel(style="fas", icon_name="fa-poll-h"),
FontIconModel(style="fas", icon_name="fa-poo"),
FontIconModel(style="fas", icon_name="fa-poo-storm"),
FontIconModel(style="fas", icon_name="fa-poop"),
| |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 8 13:37:56 2020
@author: karliskanders
Last updated on 01/04/2020
"""
import leidenalg as la
import pandas as pd
import numpy as np
from sklearn.metrics.cluster import adjusted_mutual_info_score as ami_score
from sklearn.metrics import confusion_matrix
from matplotlib import pyplot as plt
import seaborn as sns
import igraph as ig
from time import time
import os
class ConsensusClustering:
"""
Class for determining stable clustering of data by using a 3-step process.
First, an ensemble of clustering results is generated by repeatedly applying
a clustering algorithm many times (step 1).
Then, the ensemble is used to define new edge weights between the graph nodes
based on the data point co-clustering occurrences. These weights are then used
to generate another "consensus ensemble", which in practice is very stable and
exhibits only minor variations between different clustering runs (step 2).
To decide which one of the partitions among the "consensus ensemble" should
be designated as the final consensus partition, we use adjusted mutual information
to compare all partitions within the consensus ensemble, and choose the one
which agrees the best with all of the other partitions (step 3).
Presently, we use the Leiden community detection algorithm for clustering the
graph into communities. However, this class can be easily adapted to use other
graph-based clustering algorithms.
The consensus clustering approach used here is an adapted version of the
intuitively simple but well-performing "Ensemble Clustering for Graphs" method
by <NAME> (see https://arxiv.org/abs/1809.05578).
"""
def __init__(
self, graph, N=20, N_consensus=10, verbose=True, seed=None, edge_bootstrap=False
):
"""
Parameters
----------
graph (igraph.Graph):
Graph object that will be used for finding graph communities.
N (int):
Ensemble size for the first clustering step (normally use 500-1000).
N_consensus (int):
Ensemble size for the consensus clustering step.
verbose (boolean):
Determines whether user is informed about the progress regarding the
intermediate steps of the clustering procedure.
seed (int):
Seed for the random number generator; useful for reproducing the exact
same clustering result. This seed is then used to generate all other
random seeds for each repeated clustering run.
edge_bootstrap (boolean):
Determines whether edge bootstrapping is used for generating the
clustering ensemble.
"""
self.graph = graph
self.directed = graph.is_directed()
self.N = N
self.N_consensus = N_consensus
self.v = verbose
self.edge_bootstrap = edge_bootstrap
self.w_min = 0.05
# Hard-coded parameter for consensus clustering (step 2) from Poulin &
# Theberge publication
self._ensemble = None
# List of lists containing the ensemble of step 1 clustering results
self._consensus_ensemble = None
# List of lists containing the ensemble of step 2 clustering results
self._COOC = None
# Clustering co-occurrence matrix
self._consensus_partition = None
# Final consensus clustering partition
# Manage random seeds
if type(seed) != type(None):
print("Setting random seeds...")
np.random.seed(seed)
self.ensemble_seeds = np.random.randint(100000000, size=N)
self.consensus_ensemble_seed = np.random.randint(100000000)
else:
self.ensemble_seeds = None
self.consensus_ensemble_seed = None
@property
def ensemble(self):
"""
List of clustering results (pertaining to step 1 of the clustering
procedure), where each clustering result is a list of integers. These
integers correspond to cluster labels.
"""
if self._ensemble is None:
# Generate ensemble of self.N partitions
self._ensemble = self.create_ensemble(self.N, weights="weight")
# self.clustering_AMI, _ = self.ensemble_AMI(self.ensemble, v=self.v)
return self._ensemble
@property
def COOC(self):
"""
Co-clustering occurrence matrix: element (i,j) of this matrix indicates
how many times nodes i and j were clustered together.
"""
if self._COOC is None:
# Calculate the co-occurrence matrix from the ensemble
self._COOC = self.cooccurrence_matrix(self.ensemble)
return self._COOC
@property
def consensus_ensemble(self):
"""
List of consensus clustering results (pertaining to step 2 of the clustering
procedure) where each clustering result is a list of integers. These
integers correspond to cluster labels.
"""
if self._consensus_ensemble is None:
# Use the co-occurrence matrix values for consensus clustering weights
A = (self.COOC != 0).astype(int)
if self.v:
print("Using co-occurrence matrix to do consensus clustering...")
# Create a new graph and find communities in this new graph
g_cooc = build_graph(self.COOC / self.N, kNN=A)
clust_cooc = ConsensusClustering(
g_cooc, N=self.N_consensus, seed=self.consensus_ensemble_seed
)
self._consensus_ensemble = clust_cooc.create_ensemble()
return self._consensus_ensemble
def load_ensemble(self, ensemble, consensus=False):
"""
This method can be used to load an external ensemble. For example,
you might have stored an ensemble of clustering results from a previous
analysis and would now like to recalculate the consensus partition.
Parameters
----------
ensemble (list of lists of int):
List of clustering results, where each clustering result is a list
of integers. These integers correspond to cluster labels.
consensus (boolean):
Determines whether the ensemble should be treated as the initial
ensemble (from step 1) or the consensus ensemble (from step 2).
"""
if not consensus:
self._ensemble = ensemble
else:
self._consensus_ensemble = ensemble
def create_ensemble(self, N=None, weights="weight"):
"""
Generates ensemble of clustering partitions by repeatedly applying
a clustering algorithm many times.
Parameters
----------
N (int OR None):
Ensemble size for the first clustering step. If N==None, use the
class property self.N
weights (string OR None):
Edge property to use for the community detection
Returns
-------
ensemble (list of lists of int):
List of clustering results, where each clustering result is a list
of integers. These integers correspond to cluster labels.
"""
if N is None:
N = self.N
ensemble = []
if self.v:
print(f"Generating an ensemble with {N} partitions...")
for i in range(N):
# Choose random seed for the clustering
if self.ensemble_seeds is not None:
ensemble_seed = self.ensemble_seeds[i]
else:
ensemble_seed = None
# Bootstrapping by removing edges
if self.edge_bootstrap == True:
graph_ = self.graph.copy()
rand_numbers = np.random.rand(len(graph_.es))
edge_weights = graph_.es[weights]
# Normalise the edge weights between 0 and 1
edge_weights = np.array(edge_weights) / np.max(edge_weights)
# Remove edges based on a probability that is proportional to their weight
# (one might want to parameterise this further to tweak the edge removal)
id_to_delete = np.where(rand_numbers > edge_weights)[0]
graph_.delete_edges(list(id_to_delete))
else:
graph_ = self.graph
# Community detection
p = la.find_partition(
graph_,
weights=weights,
partition_type=la.ModularityVertexPartition,
seed=ensemble_seed,
)
ensemble.append(p.membership)
if self.v:
print("x", end="")
if self.v:
print("")
return ensemble
@staticmethod
def cooccurrence_matrix(ensemble):
"""
Create the co-clustering occurrence matrix (also called 'cooccurrence matrix');
This can be quite slow for large graphs with ~10K nodes and probably could
be optimized, e.g., with numba.
Parameters
----------
ensemble (list of lists of int):
List of clustering results, where each clustering result is a list
of integers. These integers correspond to cluster labels.
"""
n = len(ensemble[0])
COOC = np.zeros((n, n))
# For each clustering result in the ensemble
for i, p in enumerate(ensemble):
membership = p
# Use pandas to find node pairs with the same cluster labels
membership_df = pd.DataFrame(
data={"id": list(range(len(membership))), "cluster": membership}
)
cooc = membership_df.merge(right=membership_df, on="cluster")
cooc = cooc[cooc.id_x < cooc.id_y]
# For each node pair with the same cluster labels, add 1 to the
# co-clustering occurrence matrix
COOC[cooc.id_x.values, cooc.id_y.values] += 1
COOC = COOC + np.triu(COOC).T
return COOC
@property
def consensus_partition(self):
"""
Final consensus partition of the clustering procedure
"""
if self._consensus_partition is None:
self.consensus_communities()
return self._consensus_partition
def consensus_communities(self):
"""
Method for finding the consensus clustering partition, i.e.,
for the steps 2-3 of the clustering procedure.
"""
# Measure the stability of the consensus ensemble. If the consensus ensemble
# has not been generated yet, it will be by calling the self.consensus_ensemble
self.consensus_AMI, AMI_matrix = self.ensemble_AMI(
self.consensus_ensemble, v=self.v
)
# Take "the most agreeable" partition as the final consensus clustering
# partition (i.e., step 3)
mean_ami = np.mean(AMI_matrix, axis=1)
most_agreeable = np.argsort(mean_ami)[-1]
self._consensus_partition = self.consensus_ensemble[most_agreeable]
# Describe the final consensus clustering partition
char = self.describe_partition(self._consensus_partition, self.v)
self.n = char["n"]
self.sizes = char["sizes"]
@staticmethod
def describe_partition(partition, verbose=True):
"""
Describes the number of clusters and the number of nodes in each cluster
"""
partition = np.array(partition)
clusters = np.unique(partition)
n = len(clusters)
sizes = [0] * n
for c in range(n):
sizes[c] = np.sum(partition == c)
if verbose:
print(f"Clustering with {len(partition)} nodes and {n} clusters.")
return {"n": n, "sizes": sizes}
@staticmethod
def ensemble_AMI(P, v=True):
"""
Calculates pairwise adjusted mutual information (AMI) scores across
the clustering ensemble.
Parameters
----------
P (list of lists of int):
Clustering ensemble, i.e., a list of | |
bar
statsStatusBar['value'] = 0
statuslbl.config(text='Loading')
# Disable stats button
statsButton.config(state=tk.DISABLED)
# Update window
window.update()
# Get status inputs
segmentBy = segmentByBox.get()
increment = incrementBox.get()
yval = yValsBox.get()
# Set height tag for column name
if('hae' in yval.lower()):
ht_tag = 'HAE'
else:
ht_tag = 'MSL'
# endIf
# Only compute stats when HAE or MSL heights are on the y-axis
if('height' in yval.lower()):
# Get file numbers to plot
indsToPlotTuple = [gtNumPlotBox.current()]
# Test if inputs are valid
segmentByValid = segmentBy!=''
incrementValid = increment!=''
indsToPlotTupleValid = indsToPlotTuple!=()
atl03DF_allValid = len(atl03DF_all)>=1
# Test if all inputs are valid
allValid = segmentByValid and incrementValid and indsToPlotTupleValid and atl03DF_allValid
# Continue code if all inputs are valid, else send message box error
if(allValid):
if(len(indsToPlotTuple)==1):
# Get correct data frame to use from user
dfNum = indsToPlotTuple[0]
atl03DF = atl03DF_all.copy()
atl03DF = atl03DF[dfNum]
# Get the segment key to bin data by
if('Segment ID' == segmentBy):
segmentKey = 'Segment ID'
elif('Time (sec)' == segmentBy):
segmentKey = 'Time (sec)'
elif('Latitude (deg)' == segmentBy):
segmentKey = 'Latitude (deg)'
elif('UTM Northing (m)' == segmentBy):
segmentKey = 'UTM Northing (m)'
elif('Along-Track (m)' == segmentBy):
segmentKey = 'Along-Track (m)'
# endIf
# Convert increment to float
increment = float(increment)
# Create aggregate list for binning function
agg_list = ['ATL03;atl03_ground_min (m ' + ht_tag + ');' + yval + ';min;[1]',
'ATL03;atl03_ground_max (m ' + ht_tag + ');' + yval + ';max100;[1]',
'ATL03;atl03_ground_median (m ' + ht_tag + ');' + yval + ';median;[1]',
'ATL03;atl03_ground_mean (m ' + ht_tag + ');' + yval + ';mean;[1]',
'ATL03;atl03_ground_std (m ' + ht_tag + ');' + yval + ';std;[1]',
'ATL03;atl03_all_canopy_min (m ' + ht_tag + ');' + yval + ';min;[2,3]',
'ATL03;atl03_all_canopy_max (m ' + ht_tag + ');' + yval + ';max100;[2,3]',
'ATL03;atl03_all_canopy_median (m ' + ht_tag + ');' + yval + ';median;[2,3]',
'ATL03;atl03_all_canopy_mean (m ' + ht_tag + ');' + yval + ';mean;[2,3]',
'ATL03;atl03_all_canopy_std (m ' + ht_tag + ');' + yval + ';std;[2,3]',
'ATL03;atl03_all_height_min (m ' + ht_tag + ');' + yval + ';min;[1,2,3]',
'ATL03;atl03_all_height_max (m ' + ht_tag + ');' + yval + ';max100;[1,2,3]',
'ATL03;atl03_all_height_median (m ' + ht_tag + ');' + yval + ';median;[1,2,3]',
'ATL03;atl03_all_height_mean (m ' + ht_tag + ');' + yval + ';mean;[1,2,3]',
'ATL03;atl03_all_height_std (m ' + ht_tag + ');' + yval + ';std;[1,2,3]']
try:
# Set Add Stats listbox in Stats Section
addStatsBox.set('')
window.update()
# Bin data into dataframe
atl03DF_binned = get_bin_df(atl03DF, segmentKey, increment, agg_list)
# Pull subset of data into smaller dataframe
statsDF_orig = atl03DF_binned.copy()
statsDF_orig = statsDF_orig[statsDF_orig.columns[[0,2,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19]]]
# Rename 'beg_id' column name to Segment By name
newColName = 'segment_start_' + segmentBy.lower()
statsDF_orig.rename(columns={'beg_id':newColName}, inplace=True)
# Rename 'end_id' column name to Segment By name
newColName = 'segment_end_' + segmentBy.lower()
statsDF_orig.rename(columns={'end_id':newColName}, inplace=True)
# Interpolate stats dataframe midpoints
statsDF = interpStats(atl03DF, statsDF_orig, ht_tag)
# Update status bar
statsStatusBar['value'] = 100
statuslbl.config(text='Complete')
# Set Add Stats listbox in Stats Section
addStatsTuple = ('Ground Min','Ground Max','Ground Median','Ground Mean','Ground Mean + 3*Std','Ground Mean - 3*Std',
'All Canopy Min','All Canopy Max','All Canopy Median','All Canopy Mean','All Canopy Mean + 3*Std','All Canopy Mean - 3*Std',
'All Height Min','All Height Max','All Height Median','All Height Mean','All Height Mean + 3*Std','All Height Mean - 3*Std')
addStatsBox['values'] = addStatsTuple
addStatsBox.current(0)
# Update window
window.update()
except:
messagebox.showinfo('Error','Could not compute stats. Please check inputs.')
# endTry
else:
messagebox.showinfo('Error','Please select only 1 file to compute stats.')
# endIf
else:
messagebox.showinfo('Error','Missing data to compute stats.')
# endIf
else:
messagebox.showinfo('Error','Can only compute stats with HAE or MSL Heights in Y Axis above.')
# endIf
# Enable button
statsButton.config(state=tk.NORMAL)
window.update()
# endDef
# Add stats to figure callback
def addStatsCallback():
try:
# Get file numbers to plot
indsToPlotTuple = [gtNumPlotBox.current()]
# Get x parameter to plot
xParam = plotList[xValsBox.current()]
# Get y parameter to plot
yParam = addStatsBox.get()
# Get y-axis plot parameter
yVar = plotList[yValsBox.current()]
# Get HAE/MSL
yHt = yValsBox.get()
# Add stats to figure
addStatsToPlot(indsToPlotTuple,xParam,yParam,yVar,yHt,statsDF)
except:
messagebox.showinfo('Error','Missing data to plot stats.')
# endTry
# endDef
# Write stats callback button
def writeStatsToCsv():
try:
if(len(statsDF)>0):
# Disable button
writeStatsButton.config(state=tk.NORMAL)
window.update()
# Try to write .csv file
try:
# Get dataframe for file
csvDF = statsDF
# Prompt user to save CSV file somewhere
files = [('CSV File', '*.csv')]
initialDir = 'C:/'
outputCsvName = asksaveasfile(initialdir = initialDir, \
initialfile = 'outputData', title = 'Save CSV File', \
filetypes = files, defaultextension = files)
# Write CSV file
writeCsv(csvDF, outputCsvName)
except:
messagebox.showinfo('Error','Could not write .csv file.')
# end Try
# Enable button
writeStatsButton.config(state=tk.NORMAL)
window.update()
# endIf
except:
pass
# endTry
# endDef
# Write CSV Parameter Button Callback
def writeCsv(csvDF, outputCsvName, verbose=True):
# Try to write CSV file
try:
if(verbose):
print('Writing to CSV file...')
# endIf
csvDF.to_csv(outputCsvName, index=False, line_terminator='\n')
if(verbose):
print('File Complete!')
# endIf
# Send completion message
if(verbose):
messagebox.showinfo('Success','File Complete!')
# endIf
except:
# Send error message
if(verbose):
messagebox.showinfo('Error','Could not write .csv file.')
# endIf
# endTry
# endDef
### Stats Data Panel label
statsLabelframe = tk.LabelFrame(tab3, width=545, height=175, text='Compute and Plot Stats', font=('Arial Bold', 14))
statsLabelframe.place(x=15, y=290)
### "Segment by:" section
lbl = tk.Label(statsLabelframe, text='Segment Stats by:', font=('Arial', 12), anchor = 'w', justify='left')
lbl.place(x=10, y=20)
segmentByBox = ttk.Combobox(statsLabelframe, width=20)
segmentByBox.place(x=10, y=50)
segmentByBox.bind("<<ComboboxSelected>>", segmentByCallback)
### "Increment" section
lbl = tk.Label(statsLabelframe, text='Segment Increment:', font=('Arial', 12), anchor = 'w', justify='left')
lbl.place(x=10, y=85)
incrementBox = tk.Entry(statsLabelframe, width=14)
incrementBox.place(x=10, y=115)
incrementText = tk.Label(statsLabelframe, text='', font=('Arial', 12), anchor = 'w', justify='left')
incrementText.place(x=100, y=112)
### Compute Stats button
statsButton = tk.Button(statsLabelframe, text='Compute Stats', font=('Arial Bold', 16), width = 13, command=computeStats)
statsButton.place(x=180, y=30)
### Compute Stats Status Bar
statuslbl = tk.Label(statsLabelframe, text=' Status:', font=('Arial Bold', 10))
statuslbl.place(x=270, y=0)
statsStatus = int()
statsStatusBar = Progressbar(statsLabelframe, variable=statsStatus, length=20)
statsStatusBar['value'] = 0
statsStatusBar.place(x=340, y=0)
### "Add Stats to Plot:" section
lbl = tk.Label(statsLabelframe, text='Add Stats to Plot:', font=('Arial', 12), anchor = 'w', justify='left')
lbl.place(x=180, y=85)
addStatsBox = ttk.Combobox(statsLabelframe, width=26)
addStatsBox.place(x=180, y=115)
#addStatsBox.bind("<<ComboboxSelected>>", addStatsCallback)
### Export Stats button
lbl = tk.Label(statsLabelframe, text='Export Stats to CSV:', font=('Arial', 12), anchor = 'w', justify='left')
lbl.place(x=385, y=0)
writeStatsButton = tk.Button(statsLabelframe, text='Export', font=('Arial Bold', 16), width = 10, command=writeStatsToCsv)
writeStatsButton.place(x=385, y=30)
### Add Stats button
addStatsButton = tk.Button(statsLabelframe, text='Plot Stats', font=('Arial Bold', 16), width = 10, command=addStatsCallback)
addStatsButton.place(x=385, y=95)
###############################################################################
#
# TAB 3: PLOT DATA - ATL08 DATA
#
###############################################################################
### Plot ATL08 Data Panel label
dataLayersLabelframe = tk.LabelFrame(tab3, width=545, height=270, text='Add Layers to Plot', font=('Arial Bold', 14))
dataLayersLabelframe.place(x=580, y=10)
# ATL08 Plot text
lbl = tk.Label(dataLayersLabelframe, text='ATL08 Data to Add:', font=('Arial', 12), anchor = 'w', justify='left')
lbl.place(x=10, y=15)
# ATL08 Y Axis Combo Box
#lbl = tk.Label(atl08DataLabelframe, text='Y Axis:', font=('Arial', 12), anchor = 'w', justify='left')
#lbl.place(x=10, y=100)
yValsBox_atl08 = Combobox(dataLayersLabelframe, width=30)
yValsBox_atl08.place(x=10, y=45)
# Itialize ATL08 plot lists
plotList_atl08 = ('maxCanopy', 'teBestFit', 'teMedian',
'maxCanopyMsl', 'teBestFitMsl', 'teMedianMsl')
# Plot ATL08 Button Callback
def plotAtl08():
# Try plot code
try:
if(len(atl08Data)>0):
# Get
gtNumToPlot = gtNumPlotBox.current()
# Get x,y combo box number selections
xVarNum = xValsBox.current()
yVarNum = yValsBox_atl08.current()
# Get x,y combo bxx text selections
xData = eval('atl08Data[' + str(gtNumToPlot) + '].' + plotList[xVarNum])
yData = eval('atl08Data[' + str(gtNumToPlot) + '].' + plotList_atl08[yVarNum])
fileName = eval('atl03Data[' + str(gtNumToPlot) + '].atl03FileName')
gtNum = eval('atl03Data[' + str(gtNumToPlot) + '].gtNum')
# Remove bad data from ATL08
indsToKeep = yData<=1e20
xData = xData[indsToKeep]
yData = yData[indsToKeep]
# Get labels
xLabel = xlabel_textBox.get()
yLabel = ylabel_textBox.get()
title = fileName + ' (' + gtNum + ')'
yName = plotList_atl08[yVarNum]
# Call getPlot function
getPlot_atl08(xData, yData, xLabel, yLabel, title, yName)
else:
messagebox.showinfo('Error','No ATL08 data to plot.')
# endIf
except:
messagebox.showinfo('Error','Cannot plot data. Please check inputs.')
# endTry
# endDef
# Plot ATL08 Button
btn = tk.Button(dataLayersLabelframe, text='Add ATL08', font=('Arial Bold', 16), width = 15, command=plotAtl08)
btn.place(x=320, y=25)
###############################################################################
#
# TAB 3: PLOT DATA - REFERENCE DATA
#
###############################################################################
#### Plot Reference Data Panel label
#truthPlotLabelframe = tk.LabelFrame(tab3, width=545, height=130, text='Add Reference Data', font=('Arial Bold', | |
"concrete": False,
"defaultSort": None,
"model": None,
"prettyName": "Admin",
"toMany": False,
"type": "html",
},
"id": {
"actions": [
{
"name": "delete_selected",
"prettyName": "Delete selected in admins",
}
],
"canPivot": True,
"choices": [],
"concrete": True,
"defaultSort": None,
"model": "number",
"prettyName": "ID",
"toMany": False,
"type": "number",
},
"inlineadmin_set": {
"actions": [],
"canPivot": False,
"choices": [],
"concrete": False,
"defaultSort": None,
"model": "core.InlineAdmin",
"prettyName": "Inlineadmin set",
"toMany": True,
"type": None,
},
"name": {
"actions": [],
"canPivot": True,
"choices": [],
"concrete": True,
"defaultSort": None,
"model": "string",
"prettyName": "Name",
"toMany": False,
"type": "string",
},
"normal_set": {
"actions": [],
"canPivot": False,
"choices": [],
"concrete": False,
"defaultSort": None,
"model": "core.Normal",
"prettyName": "Normal set",
"toMany": True,
"type": None,
},
},
"sortedFields": ["id", "admin", "inlineadmin_set", "name", "normal_set"],
},
"core.InlineAdmin": {
"defaultFilters": [],
"fields": {
"id": {
"actions": [],
"canPivot": True,
"choices": [],
"concrete": True,
"defaultSort": None,
"model": "number",
"prettyName": "ID",
"toMany": False,
"type": "number",
},
"in_admin": {
"actions": [],
"canPivot": False,
"choices": [],
"concrete": False,
"defaultSort": None,
"model": "core.InAdmin",
"prettyName": "In admin",
"toMany": False,
"type": None,
},
"name": {
"actions": [],
"canPivot": True,
"choices": [],
"concrete": True,
"defaultSort": None,
"model": "string",
"prettyName": "Name",
"toMany": False,
"type": "string",
},
"normal_set": {
"actions": [],
"canPivot": False,
"choices": [],
"concrete": False,
"defaultSort": None,
"model": "core.Normal",
"prettyName": "Normal set",
"toMany": True,
"type": None,
},
},
"sortedFields": ["id", "in_admin", "name", "normal_set"],
},
"core.Normal": {
"defaultFilters": [],
"fields": {
"admin": {
"actions": [
{
"name": "delete_selected",
"prettyName": "Delete selected normals",
}
],
"canPivot": True,
"choices": [],
"concrete": False,
"defaultSort": None,
"model": None,
"prettyName": "Admin",
"toMany": False,
"type": "html",
},
"id": {
"actions": [
{
"name": "delete_selected",
"prettyName": "Delete selected normals",
}
],
"canPivot": True,
"choices": [],
"concrete": True,
"defaultSort": None,
"model": "number",
"prettyName": "ID",
"toMany": False,
"type": "number",
},
"in_admin": {
"actions": [],
"canPivot": False,
"choices": [],
"concrete": False,
"defaultSort": None,
"model": "core.InAdmin",
"prettyName": "In admin",
"toMany": False,
"type": None,
},
"inline_admin": {
"actions": [],
"canPivot": False,
"choices": [],
"concrete": False,
"defaultSort": None,
"model": "core.InlineAdmin",
"prettyName": "Inline admin",
"toMany": False,
"type": None,
},
"name": {
"actions": [],
"canPivot": True,
"choices": [],
"concrete": True,
"defaultSort": None,
"model": "string",
"prettyName": "Name",
"toMany": False,
"type": "string",
},
"not_in_admin": {
"actions": [],
"canPivot": True,
"choices": [],
"concrete": True,
"defaultSort": None,
"model": "number",
"prettyName": "Not in admin",
"toMany": False,
"type": "number",
},
},
"sortedFields": [
"id",
"admin",
"in_admin",
"inline_admin",
"name",
"not_in_admin",
],
},
"core.Producer": {
"defaultFilters": [],
"fields": {
"address": {
"actions": [],
"canPivot": False,
"choices": [],
"concrete": False,
"defaultSort": None,
"model": "core.Address",
"prettyName": "Address",
"toMany": False,
"type": None,
},
"admin": {
"actions": [
{
"name": "delete_selected",
"prettyName": "Delete selected producers",
}
],
"canPivot": True,
"choices": [],
"concrete": False,
"defaultSort": None,
"model": None,
"prettyName": "Admin",
"toMany": False,
"type": "html",
},
"frank": {
"actions": [],
"canPivot": True,
"choices": [],
"concrete": False,
"defaultSort": None,
"model": None,
"prettyName": "Frank",
"toMany": False,
"type": "html",
},
"id": {
"actions": [
{
"name": "delete_selected",
"prettyName": "Delete selected producers",
}
],
"canPivot": True,
"choices": [],
"concrete": True,
"defaultSort": None,
"model": "number",
"prettyName": "ID",
"toMany": False,
"type": "number",
},
"name": {
"actions": [],
"canPivot": True,
"choices": [],
"concrete": True,
"defaultSort": None,
"model": "string",
"prettyName": "Name",
"toMany": False,
"type": "string",
},
"product_set": {
"actions": [],
"canPivot": False,
"choices": [],
"concrete": False,
"defaultSort": None,
"model": "core.Product",
"prettyName": "Product set",
"toMany": True,
"type": None,
},
},
"sortedFields": ["id", "admin", "address", "frank", "name", "product_set"],
},
"core.Product": {
"defaultFilters": [
{"lookup": "a_lookup", "pathStr": "a_field", "value": "a_value"},
{"lookup": "not_equals", "pathStr": "name", "value": "not a thing"},
{"lookup": "a_lookup", "pathStr": "a_field", "value": "true"},
],
"fields": {
"_underscore": {
"actions": [],
"canPivot": True,
"choices": [],
"concrete": True,
"defaultSort": None,
"model": "number",
"prettyName": " underscore",
"toMany": False,
"type": "number",
},
"admin": {
"actions": [
{"name": "an_action", "prettyName": "An action"},
{
"name": "delete_selected",
"prettyName": "Delete selected products",
},
],
"canPivot": True,
"choices": [],
"concrete": False,
"defaultSort": None,
"model": None,
"prettyName": "Admin",
"toMany": False,
"type": "html",
},
"annotated": {
"actions": [],
"canPivot": True,
"choices": [],
"concrete": True,
"defaultSort": None,
"model": "string",
"prettyName": "Annotated",
"toMany": False,
"type": "string",
},
"boat": {
"actions": [],
"canPivot": True,
"choices": [],
"concrete": True,
"defaultSort": None,
"model": "number",
"prettyName": "Boat",
"toMany": False,
"type": "number",
},
"calculated_boolean": {
"actions": [],
"canPivot": True,
"choices": [],
"concrete": False,
"defaultSort": None,
"model": None,
"prettyName": "Calculated boolean",
"toMany": False,
"type": "boolean",
},
"created_time": {
"actions": [],
"canPivot": True,
"choices": [],
"concrete": True,
"defaultSort": "asc",
"model": "datetime",
"prettyName": "Created time",
"toMany": False,
"type": "datetime",
},
"date": {
"actions": [],
"canPivot": True,
"choices": [],
"concrete": True,
"defaultSort": "asc",
"model": "date",
"prettyName": "Date",
"toMany": False,
"type": "date",
},
"default_sku": {
"actions": [],
"canPivot": False,
"choices": [],
"concrete": False,
"defaultSort": None,
"model": "core.SKU",
"prettyName": "Default sku",
"toMany": False,
"type": None,
},
"duration": {
"actions": [],
"canPivot": True,
"choices": [],
"concrete": True,
"defaultSort": None,
"model": "duration",
"prettyName": "Duration",
"toMany": False,
"type": "duration",
},
"extra_inline": {
"actions": [],
"canPivot": True,
"choices": [],
"concrete": True,
"defaultSort": None,
"model": "string",
"prettyName": "Extra inline",
"toMany": False,
"type": "string",
},
"extra_model": {
"actions": [],
"canPivot": True,
"choices": [],
"concrete": True,
"defaultSort": None,
"model": "string",
"prettyName": "Extra model",
"toMany": False,
"type": "string",
},
"fake": {
"actions": [],
"canPivot": True,
"choices": [],
"concrete": True,
"defaultSort": None,
"model": "unknown",
"prettyName": "Fake",
"toMany": False,
"type": "unknown",
},
"funky": {
"actions": [],
"canPivot": True,
"choices": [],
"concrete": False,
"defaultSort": None,
"model": None,
"prettyName": "Funky",
"toMany": False,
"type": "html",
},
"id": {
"actions": [
{"name": "an_action", "prettyName": "An action"},
{
"name": "delete_selected",
"prettyName": "Delete selected products",
},
],
"canPivot": True,
"choices": [],
"concrete": True,
"defaultSort": None,
"model": "number",
"prettyName": "ID",
"toMany": False,
"type": "number",
},
"image": {
"actions": [],
"canPivot": True,
"choices": [],
"concrete": True,
"defaultSort": None,
"model": "url",
"prettyName": "Image",
"toMany": False,
"type": "url",
},
"is_onsale": {
"actions": [],
"canPivot": True,
"choices": [],
"concrete": False,
"defaultSort": None,
"model": None,
"prettyName": "Is onsale",
"toMany": False,
"type": "html",
},
"lambda": {
"actions": [],
"canPivot": True,
"choices": [],
"concrete": False,
"defaultSort": None,
"model": None,
"prettyName": "Lambda",
"toMany": False,
"type": "html",
},
"model_not_in_admin": {
"actions": [],
"canPivot": True,
"choices": [],
"concrete": True,
"defaultSort": None,
"model": "number",
"prettyName": "Model not in admin",
"toMany": False,
"type": "number",
},
"name": {
"actions": [],
"canPivot": True,
"choices": [],
"concrete": True,
"defaultSort": None,
"model": "string",
"prettyName": "Name",
"toMany": False,
"type": "string",
},
"number_choice": {
"actions": [],
"canPivot": True,
"choices": ["A", "B"],
"concrete": True,
"defaultSort": None,
"model": "numberchoice",
"prettyName": "Number choice",
"toMany": False,
"type": "numberchoice",
},
"only_in_list_view": {
"actions": [],
"canPivot": True,
"choices": [],
"concrete": True,
"defaultSort": None,
"model": "string",
"prettyName": "Only in list view",
"toMany": False,
"type": "string",
},
"onsale": {
"actions": [],
"canPivot": True,
"choices": [],
"concrete": True,
"defaultSort": None,
"model": "boolean",
"prettyName": "Onsale",
"toMany": False,
"type": "boolean",
},
"other_annotation": {
"actions": [],
"canPivot": True,
"choices": [],
"concrete": True,
"defaultSort": None,
"model": "string",
"prettyName": "Other annotation",
"toMany": False,
"type": "string",
},
"producer": {
"actions": [],
"canPivot": False,
"choices": [],
"concrete": False,
"defaultSort": None,
"model": "core.Producer",
"prettyName": "Producer",
"toMany": False,
"type": None,
},
"size": {
"actions": [],
"canPivot": True,
"choices": [],
"concrete": True,
"defaultSort": None,
"model": "number",
"prettyName": "Size",
"toMany": False,
"type": "number",
},
"size_unit": {
"actions": [],
"canPivot": True,
"choices": [],
"concrete": True,
"defaultSort": None,
"model": "string",
"prettyName": "Size unit",
"toMany": False,
"type": "string",
},
"sku_set": {
"actions": [],
"canPivot": False,
"choices": [],
"concrete": False,
"defaultSort": None,
"model": "core.SKU",
"prettyName": "Sku set",
"toMany": True,
"type": None,
},
"stealth_annotation": {
"actions": [],
"canPivot": True,
"choices": [],
"concrete": True,
"defaultSort": None,
"model": "string",
"prettyName": "Stealth annotation",
"toMany": False,
"type": "string",
},
"string_choice": {
"actions": [],
"canPivot": True,
"choices": ["A", "B"],
"concrete": True,
"defaultSort": None,
"model": "stringchoice",
"prettyName": "String choice",
"toMany": False,
"type": "stringchoice",
},
"tags": {
"actions": [],
"canPivot": False,
"choices": [],
"concrete": False,
"defaultSort": None,
"model": "core.Tag",
"prettyName": "Tags",
"toMany": True,
"type": None,
},
"url": {
"actions": [],
"canPivot": True,
"choices": [],
"concrete": True,
"defaultSort": None,
"model": "url",
"prettyName": "Url",
"toMany": False,
"type": "url",
},
},
"sortedFields": [
"id",
"admin",
"_underscore",
"annotated",
"boat",
"calculated_boolean",
"created_time",
"date",
"default_sku",
| |
must be given')
if E_file is None and not isinstance(self.E, IdentityOperator):
raise ValueError('E is not identity, E_file must be given')
from pymor.tools.io import save_matrix
A, B, C, D, E = self.to_matrices()
for mat, file in [(A, A_file), (B, B_file), (C, C_file), (D, D_file), (E, E_file)]:
if mat is None:
continue
save_matrix(file, mat)
@classmethod
def from_mat_file(cls, file_name, cont_time=True,
state_id='STATE', solver_options=None, error_estimator=None,
visualizer=None, name=None):
"""Create |LTIModel| from matrices stored in a .mat file.
Parameters
----------
file_name
The name of the .mat file (extension .mat does not need to be included) containing A, B,
C, and optionally D and E.
cont_time
`True` if the system is continuous-time, otherwise `False`.
state_id
Id of the state space.
solver_options
The solver options to use to solve the Lyapunov equations.
error_estimator
An error estimator for the problem. This can be any object with an
`estimate_error(U, mu, model)` method. If `error_estimator` is not `None`, an
`estimate_error(U, mu)` method is added to the model which will call
`error_estimator.estimate_error(U, mu, self)`.
visualizer
A visualizer for the problem. This can be any object with a `visualize(U, model, ...)`
method. If `visualizer` is not `None`, a `visualize(U, *args, **kwargs)` method is added
to the model which forwards its arguments to the visualizer's `visualize` method.
name
Name of the system.
Returns
-------
lti
The |LTIModel| with operators A, B, C, D, and E.
"""
import scipy.io as spio
mat_dict = spio.loadmat(file_name)
assert 'A' in mat_dict and 'B' in mat_dict and 'C' in mat_dict
A = mat_dict['A']
B = mat_dict['B']
C = mat_dict['C']
D = mat_dict['D'] if 'D' in mat_dict else None
E = mat_dict['E'] if 'E' in mat_dict else None
return cls.from_matrices(A, B, C, D, E, cont_time=cont_time,
state_id=state_id, solver_options=solver_options,
error_estimator=error_estimator, visualizer=visualizer, name=name)
def to_mat_file(self, file_name):
"""Save operators as matrices to .mat file.
Parameters
----------
file_name
The name of the .mat file (extension .mat does not need to be included).
"""
import scipy.io as spio
A, B, C, D, E = self.to_matrices()
mat_dict = {'A': A, 'B': B, 'C': C}
if D is not None:
mat_dict['D'] = D
if E is not None:
mat_dict['E'] = E
spio.savemat(file_name, mat_dict)
@classmethod
def from_abcde_files(cls, files_basename, cont_time=True,
state_id='STATE', solver_options=None, error_estimator=None,
visualizer=None, name=None):
"""Create |LTIModel| from matrices stored in .[ABCDE] files.
Parameters
----------
files_basename
The basename of files containing A, B, C, and optionally D and E.
cont_time
`True` if the system is continuous-time, otherwise `False`.
state_id
Id of the state space.
solver_options
The solver options to use to solve the Lyapunov equations.
error_estimator
An error estimator for the problem. This can be any object with an
`estimate_error(U, mu, model)` method. If `error_estimator` is not `None`, an
`estimate_error(U, mu)` method is added to the model which will call
`error_estimator.estimate_error(U, mu, self)`.
visualizer
A visualizer for the problem. This can be any object with a `visualize(U, model, ...)`
method. If `visualizer` is not `None`, a `visualize(U, *args, **kwargs)` method is added
to the model which forwards its arguments to the visualizer's `visualize` method.
name
Name of the system.
Returns
-------
lti
The |LTIModel| with operators A, B, C, D, and E.
"""
from pymor.tools.io import load_matrix
import os.path
A = load_matrix(files_basename + '.A')
B = load_matrix(files_basename + '.B')
C = load_matrix(files_basename + '.C')
D = load_matrix(files_basename + '.D') if os.path.isfile(files_basename + '.D') else None
E = load_matrix(files_basename + '.E') if os.path.isfile(files_basename + '.E') else None
return cls.from_matrices(A, B, C, D, E, cont_time=cont_time,
state_id=state_id, solver_options=solver_options,
error_estimator=error_estimator, visualizer=visualizer, name=name)
def to_abcde_files(self, files_basename):
"""Save operators as matrices to .[ABCDE] files in Matrix Market format.
Parameters
----------
files_basename
The basename of files containing the operators.
"""
from pathlib import Path
from pymor.tools.io.matrices import _mmwrite
A, B, C, D, E = self.to_matrices()
_mmwrite(Path(files_basename + '.A'), A)
_mmwrite(Path(files_basename + '.B'), B)
_mmwrite(Path(files_basename + '.C'), C)
if D is not None:
_mmwrite(Path(files_basename + '.D'), D)
if E is not None:
_mmwrite(Path(files_basename + '.E'), E)
def __add__(self, other):
"""Add an |LTIModel|."""
if not isinstance(other, LTIModel):
return NotImplemented
assert self.cont_time == other.cont_time
assert self.D.source == other.D.source
assert self.D.range == other.D.range
A = BlockDiagonalOperator([self.A, other.A])
B = BlockColumnOperator([self.B, other.B])
C = BlockRowOperator([self.C, other.C])
D = self.D + other.D
if isinstance(self.E, IdentityOperator) and isinstance(other.E, IdentityOperator):
E = IdentityOperator(BlockVectorSpace([self.solution_space, other.solution_space]))
else:
E = BlockDiagonalOperator([self.E, other.E])
return self.with_(A=A, B=B, C=C, D=D, E=E)
def __sub__(self, other):
"""Subtract an |LTIModel|."""
return self + (-other)
def __neg__(self):
"""Negate the |LTIModel|."""
return self.with_(C=-self.C, D=-self.D)
def __mul__(self, other):
"""Postmultiply by an |LTIModel|."""
if not isinstance(other, LTIModel):
return NotImplemented
assert self.cont_time == other.cont_time
assert self.D.source == other.D.range
A = BlockOperator([[self.A, self.B @ other.C],
[None, other.A]])
B = BlockColumnOperator([self.B @ other.D, other.B])
C = BlockRowOperator([self.C, self.D @ other.C])
D = self.D @ other.D
E = BlockDiagonalOperator([self.E, other.E])
return self.with_(A=A, B=B, C=C, D=D, E=E)
@cached
def poles(self, mu=None):
"""Compute system poles.
.. note::
Assumes the systems is small enough to use a dense eigenvalue solver.
Parameters
----------
mu
|Parameter values| for which to compute the systems poles.
Returns
-------
One-dimensional |NumPy array| of system poles.
"""
if not isinstance(mu, Mu):
mu = self.parameters.parse(mu)
assert self.parameters.assert_compatible(mu)
A = self.A.assemble(mu=mu)
E = self.E.assemble(mu=mu)
if self.order >= sparse_min_size():
if not isinstance(A, NumpyMatrixOperator) or A.sparse:
self.logger.warning('Converting operator A to a NumPy array.')
if not isinstance(E, IdentityOperator):
if not isinstance(E, NumpyMatrixOperator) or E.sparse:
self.logger.warning('Converting operator E to a NumPy array.')
A = to_matrix(A, format='dense')
E = None if isinstance(E, IdentityOperator) else to_matrix(E, format='dense')
return spla.eigvals(A, E)
@cached
def gramian(self, typ, mu=None):
"""Compute a Gramian.
Parameters
----------
typ
The type of the Gramian:
- `'c_lrcf'`: low-rank Cholesky factor of the controllability Gramian,
- `'o_lrcf'`: low-rank Cholesky factor of the observability Gramian,
- `'c_dense'`: dense controllability Gramian,
- `'o_dense'`: dense observability Gramian.
.. note::
For `'c_lrcf'` and `'o_lrcf'` types, the method assumes the system is asymptotically
stable.
For `'c_dense'` and `'o_dense'` types, the method assumes there are no two system
poles which add to zero.
mu
|Parameter values|.
Returns
-------
If typ is `'c_lrcf'` or `'o_lrcf'`, then the Gramian factor as a |VectorArray| from
`self.A.source`.
If typ is `'c_dense'` or `'o_dense'`, then the Gramian as a |NumPy array|.
"""
if not self.cont_time:
raise NotImplementedError
assert typ in ('c_lrcf', 'o_lrcf', 'c_dense', 'o_dense')
if not isinstance(mu, Mu):
mu = self.parameters.parse(mu)
assert self.parameters.assert_compatible(mu)
A = self.A.assemble(mu)
B = self.B
C = self.C
E = self.E.assemble(mu) if not isinstance(self.E, IdentityOperator) else None
options_lrcf = self.solver_options.get('lyap_lrcf') if self.solver_options else None
options_dense = self.solver_options.get('lyap_dense') if self.solver_options else None
if typ == 'c_lrcf':
return solve_lyap_lrcf(A, E, B.as_range_array(mu=mu),
trans=False, options=options_lrcf)
elif typ == 'o_lrcf':
return solve_lyap_lrcf(A, E, C.as_source_array(mu=mu),
trans=True, options=options_lrcf)
elif typ == 'c_dense':
return solve_lyap_dense(to_matrix(A, format='dense'),
to_matrix(E, format='dense') if E else None,
to_matrix(B, format='dense'),
trans=False, options=options_dense)
elif typ == 'o_dense':
return solve_lyap_dense(to_matrix(A, format='dense'),
to_matrix(E, format='dense') if E else None,
to_matrix(C, format='dense'),
trans=True, options=options_dense)
@cached
def _hsv_U_V(self, mu=None):
"""Compute Hankel singular values and vectors.
.. note::
Assumes the system is asymptotically stable.
Parameters
----------
mu
|Parameter values|.
Returns
-------
hsv
One-dimensional |NumPy array| of singular values.
Uh
|NumPy array| of left singular vectors.
Vh
|NumPy array| of right singular vectors.
"""
if not isinstance(mu, Mu):
mu = self.parameters.parse(mu)
assert self.parameters.assert_compatible(mu)
cf = self.gramian('c_lrcf', mu=mu)
of = self.gramian('o_lrcf', mu=mu)
U, hsv, Vh = spla.svd(self.E.apply2(of, cf, mu=mu), lapack_driver='gesvd')
return hsv, U.T, Vh
def hsv(self, mu=None):
"""Hankel singular values.
.. note::
Assumes the system is asymptotically stable.
Parameters
----------
mu
|Parameter values|.
Returns
-------
sv
One-dimensional |NumPy array| of singular values.
"""
return self._hsv_U_V(mu=mu)[0]
@cached
def h2_norm(self, mu=None):
"""Compute the H2-norm of the |LTIModel|.
.. note::
Assumes the system is asymptotically stable.
Parameters
----------
mu
|Parameter values|.
Returns
-------
norm
H_2-norm.
"""
if not self.cont_time:
raise NotImplementedError
if not isinstance(mu, Mu):
mu = self.parameters.parse(mu)
D_norm2 = np.sum(self.D.as_range_array(mu=mu).norm2())
if D_norm2 != 0:
self.logger.warning('The D operator is not exactly zero '
f'(squared Frobenius norm is {D_norm2}).')
assert self.parameters.assert_compatible(mu)
if self.dim_input <= self.dim_output:
cf = self.gramian('c_lrcf', mu=mu)
return np.sqrt(self.C.apply(cf, mu=mu).norm2().sum())
else:
of = self.gramian('o_lrcf', mu=mu)
return np.sqrt(self.B.apply_adjoint(of, mu=mu).norm2().sum())
@cached
def hinf_norm(self, mu=None, | |
<reponame>butlertron/deployfish<filename>deployfish/aws/systems_manager.py
import re
from deployfish.aws import get_boto3_session
WILDCARD_RE = re.compile('^(?P<prefix>.+\..+\.)\*(?P<remainder>.*)')
class BaseParameter(object):
"""
This class represents a parameter in the AWS Systems Manager Parameter Store.
"""
def __init__(self, name, kms_key_id=None):
self.ssm = get_boto3_session().client('ssm')
self._defaults(kms_key_id=kms_key_id)
self.name = name
def _defaults(self, kms_key_id=None):
# CPM: Should we be checking for proper key format here?
self.kms_key_id = kms_key_id
self._value = None
self._is_secure = False
self._prefix = ''
self._aws_parameter = {}
@property
def prefix(self):
return self._prefix
@property
def name(self):
"""
Return the full name of the parameter as we will store it in AWS.
:rtype: string
"""
return self.prefix + self.key
@property
def value(self):
"""
Return the value of the parameter as it is in AWS.
:rtype: string
"""
if not self._value:
if self._aws_parameter:
self._value = self._aws_parameter['Value']
return self._value
@property
def aws_value(self):
if self._aws_parameter:
return self._aws_parameter['Value']
else:
raise ValueError
@property
def key(self):
"""
Return the parameter name stripped of its prefix.
:rtype: string
"""
return self._key
@property
def is_secure(self):
"""
Return ``True`` if we want the value for this parameter to be encrypted
in AWS, or if we've set a KMS Key ID for it.
:rtype: boolean
"""
if self._aws_parameter:
self._is_secure = self._aws_parameter['Type'] == "SecureString"
if self.kms_key_id:
self._is_secure = True
return self._is_secure
@property
def exists(self):
"""
Return ``True`` if the parameter exists in AWS, ``False`` otherwise.
:rtype: boolean
"""
return self._aws_parameter != {}
def _render_read(self):
"""
Create an list of keyword parameters suitable for passing to
``boto3.client('ssm').get_parameters()``.
:rtype: dict
"""
d = {}
d['Names'] = [self.name]
d['WithDecryption'] = True
return d
def _from_aws(self):
"""
Set the value of ``self._aws_parameter`` to the data for that parameter from AWS, if
that parameter exists in AWS, otherwise set ``self._aws_parameter`` to ``{}``
"""
self._aws_parameter = {}
response = self.ssm.get_parameters(**self._render_read())
if response['Parameters']:
self._aws_parameter = response['Parameters'][0]
def _render_write(self):
"""
Create an list of keyword parameters suitable for passing to
``boto3.client('ssm').put_parameter()``.
:rtype: dict
"""
d = {}
d['Name'] = self.name
d['Value'] = self.value
d['Overwrite'] = True
if self.is_secure:
d['Type'] = 'SecureString'
if self.kms_key_id:
d['KeyId'] = self.kms_key_id
else:
d['Type'] = 'String'
return d
def save(self, overwrite=False):
"""
Save this parameter to AWS. If ``overwrite`` is False, raise an exception
"""
if self.exists and not overwrite:
raise ValueError('{} exists in AWS: not overwriting'.format(self.name))
self.ssm.put_parameter(**self._render_write())
self._from_aws()
def display(self, key, value):
"""
Return a human readable display of the key value pair
:param key:
:param value:
:return: string
"""
base = "{}: {}".format(key, value)
if self.is_secure:
base += " [SECURE:{}]".format(self.kms_key_id)
return base
def __str__(self):
return self.display(self.name, self.value)
def __cmp__(self, other):
return cmp(self.name, other.name)
def __lt__(self, other):
return self.name < other.name
def __gt__(self, other):
return self.name > other.name
def __ge__(self, other):
return self.name >= other.name
def __le__(self, other):
return self.name <= other.name
def __eq__(self, other):
return self.name == other.name
def __ne__(self, other):
return self.name != other.name
class UnboundParameter(BaseParameter):
"""
This is a parameter not bound to an ECS service or task.
"""
@BaseParameter.prefix.setter
def prefix(self, value):
self._prefix = value
if self._prefix is None:
self._prefix = ''
self._from_aws()
@BaseParameter.name.setter
def name(self, name):
if not name:
raise ValueError('UnboundParameter.name cannot be empty.')
self._key = name.split('.')[-1]
prefix = '.'.join(name.split('.')[:-1])
self._prefix = '{}.'.format(prefix) if prefix else ''
self._from_aws()
@BaseParameter.value.setter
def value(self, new_value):
self._value = new_value
@BaseParameter.key.setter
def key(self, value):
"""
Set the prefix-free parameter name.
:param value: string
"""
if not value:
raise ValueError('UnboundParameter.key cannot be empty.')
self._key = value
self._from_aws()
def display(self, key, value):
"""
Return a human readable display of the key value pair
:param key:
:param value:
:return: string
"""
base = super(UnboundParameter, self).display(key, value)
if not self._aws_parameter:
base += " [NOT IN AWS]"
return base
class ClusterServicePrefixMixin(object):
@property
def prefix(self):
return "{}.{}.".format(self.cluster, self.service)
class Parameter(ClusterServicePrefixMixin, BaseParameter):
"""
This class represents a parameter in the AWS Systems Manager Parameter Store.
"""
def __init__(self, service, cluster, aws={}, yml=None):
self.ssm = get_boto3_session().client('ssm')
self.service = service
self.cluster = cluster
self._defaults()
self.is_external = False
self.__from_yml(yml)
self._from_aws(aws)
def _defaults(self):
super(Parameter, self)._defaults()
self._key = None
@property
def name(self):
if self.is_external:
return self._key
else:
return super(Parameter, self).name
@property
def key(self):
"""
Return the parameter key as it is in AWS.
:rtype: string
"""
if not self._key:
if self._aws_parameter:
self._key = self._aws_parameter['Name'][len(self.prefix):]
# strip the external prefix
if self.is_external:
key = self._key.split('.')[-1]
return key
return self._key
@property
def is_secure(self):
"""
Return ``True`` if we want the value for this parameter to be encrypted
in AWS, ``False`` otherwise.
:rtype: boolean
"""
if not self.__yml:
if self._aws_parameter:
self._is_secure = self._aws_parameter['Type'] == "SecureString"
return self._is_secure
@property
def should_exist(self):
"""
Return ``True`` if we want this parameter to exist in AWS. This means that
we have a parameter definition for this parameter in our ``deployfish.yml``
file.
This will always return ``True`` for external parameters.
:rtype: boolean
"""
if self.is_external:
return True
return self.__yml is not None
@property
def needs_update(self):
"""
Return ``True`` if the value portion of our parameter definition differs
from what is currently in AWS, ``False`` otherwise.
This will always be ``False`` for external parameters.
:rtype: boolean
"""
if self.is_external:
return False
if not self._aws_parameter:
return True
else:
return self.value != self._aws_parameter['Value']
def _split(self, definition):
"""
In our YAML parameter definition line, split the key part from the value part.
:param definition: a parameter definition from our deployfish.yml
:type definition: string
:rtype: 2-tuple of strings
"""
key = definition
value = None
delimiter_loc = definition.find('=')
if delimiter_loc > 0:
key = definition[:delimiter_loc]
if len(definition) > delimiter_loc + 1:
value = definition[delimiter_loc + 1:].strip('"')
else:
value = ""
return (key, value)
def _parse_key(self, key):
"""
Parse a key from a parameter definition that looks like one of the following:
KEY
KEY:secure
KEY:secure:arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab
KEY:external
KEY:external:secure
KEY:external:secure:arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab
"""
i = 0
while key is not None:
# segments = string.split(key, ':', 1)
segments = key.split(':', 1)
segment = segments[0]
if len(segments) > 1:
key = segments[1]
else:
key = None
if i == 0:
self._key = segment
elif segment == 'external':
self.is_external = True
elif segment == 'secure':
self._is_secure = True
elif segment == 'arn':
self.kms_key_id = 'arn:{}'.format(key)
break
i += 1
def __from_yml(self, yml=None):
"""
Parse a parameter definition string and set some instance properties based on it.
If ``yml`` is not ``None``, it will be a string that looks like one of the following examples:
KEY=value
KEY:secure=value
KEY:secure:arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab=value
KEY:external
KEY:external:secure
KEY:external:secure:arn:aws:kms:us-west-2:111122223333:key/<KEY>
:param yml: (optional) a string describing a parameter to be stored in
AWS Systems Manager Parameter Store
:type yml: string
"""
if yml:
self.__yml = yml
key, value = self._split(yml)
self._value = value
self._parse_key(key)
if not self._value and not self.is_external:
raise ValueError
else:
self.__yml = None
def _from_aws(self, aws=None):
"""
Return the current value of the parameter named by ``self.key`` as it
exists in AWS. If such a parameter does not exist, raise ``KeyError``.
"""
self._aws_parameter = aws
if not aws:
super(Parameter, self)._from_aws()
def save(self):
"""
If the value still exists in the config, save it, otherwise remove the parameter
from AWS
"""
if self.should_exist:
if self.needs_update and not self.is_external:
self.ssm.put_parameter(**self._render_write())
elif self.exists:
self.ssm.delete_parameter(Name=self.name)
def display(self, key, value):
"""
Return a human readable display of the key value pair
:param key:
:param value:
:return: string
"""
base = super(Parameter, self).display(key, value)
if self.is_external:
base += " [EXTERNAL]"
return base
def __str__(self):
return self.display(self.key, self.value)
class UnboundParameterFactory(object):
@staticmethod
def new(name):
"""
Returns a list of UnboundParameters matching ``name``. If ``name`` ends with "*",
this could be a long list of parameters. If there is no "*" in name, there
will be just one Parameter in the list.
:param name: the name to search for in AWS SSM Parameter Store
:return: list of Parameter objects
"""
m = WILDCARD_RE.search(name)
if m:
# This is a wildcard search
filter_option = "BeginsWith"
filter_values = [m.group('prefix')]
else:
# Get a single parameter
filter_option = "Equals"
filter_values = [name]
ssm = get_boto3_session().client('ssm')
paginator = ssm.get_paginator('describe_parameters')
response_iterator = paginator.paginate(
ParameterFilters=[{
'Key': 'Name',
'Option': filter_option,
'Values': filter_values
}],
PaginationConfig={
'MaxItems': 100,
'PageSize': 50
}
)
parms = []
for r in response_iterator:
parms.extend(r['Parameters'])
return [UnboundParameter(parm['Name'], | |
contains fields with a default. If 'self' is a new
# line in a one2many field, 'names' also contains the one2many's inverse
# field, and that field may not be in nametree.
todo = list(unique(itertools.chain(names, nametree))) if first_call else list(names)
done = set()
# mark fields to do as modified to trigger recomputations
protected = [self._fields[name] for name in names]
with self.env.protecting(protected, record):
record.modified(todo)
for name in todo:
field = self._fields[name]
if field.inherited:
# modifying an inherited field should modify the parent
# record accordingly; because we don't actually assign the
# modified field on the record, the modification on the
# parent record has to be done explicitly
parent = record[field.related[0]]
parent[name] = record[name]
result = {'warnings': OrderedSet()}
# process names in order
while todo:
# apply field-specific onchange methods
for name in todo:
if field_onchange.get(name):
record._onchange_eval(name, field_onchange[name], result)
done.add(name)
# determine which fields to process for the next pass
todo = [
name
for name in nametree
if name not in done and snapshot0.has_changed(name)
]
if not env.context.get('recursive_onchanges', True):
todo = []
# make the snapshot with the final values of record
snapshot1 = Snapshot(record, nametree)
# determine values that have changed by comparing snapshots
self.invalidate_cache()
result['value'] = snapshot1.diff(snapshot0, force=first_call)
# format warnings
warnings = result.pop('warnings')
if len(warnings) == 1:
title, message, type = warnings.pop()
if not type:
type = 'dialog'
result['warning'] = dict(title=title, message=message, type=type)
elif len(warnings) > 1:
# concatenate warning titles and messages
title = _("Warnings")
message = '\n\n'.join([warn_title + '\n\n' + warn_message for warn_title, warn_message, warn_type in warnings])
result['warning'] = dict(title=title, message=message, type='dialog')
return result
def _get_placeholder_filename(self, field=None):
""" Returns the filename of the placeholder to use,
set on web/static/src/img by default, or the
complete path to access it (eg: module/path/to/image.png).
"""
return 'placeholder.png'
def _populate_factories(self):
""" Generates a factory for the different fields of the model.
``factory`` is a generator of values (dict of field values).
Factory skeleton::
def generator(iterator, field_name, model_name):
for counter, values in enumerate(iterator):
# values.update(dict())
yield values
See :mod:`odoo.tools.populate` for population tools and applications.
:returns: list of pairs(field_name, factory) where `factory` is a generator function.
:rtype: list(tuple(str, generator))
.. note::
It is the responsibility of the generator to handle the field_name correctly.
The generator could generate values for multiple fields together. In this case,
the field_name should be more a "field_group" (should be begin by a "_"), covering
the different fields updated by the generator (e.g. "_address" for a generator
updating multiple address fields).
"""
return []
@property
def _populate_sizes(self):
""" Return a dict mapping symbolic sizes (``'small'``, ``'medium'``, ``'large'``) to integers,
giving the minimal number of records that :meth:`_populate` should create.
The default population sizes are:
* ``small`` : 10
* ``medium`` : 100
* ``large`` : 1000
"""
return {
'small': 10, # minimal representative set
'medium': 100, # average database load
'large': 1000, # maxi database load
}
@property
def _populate_dependencies(self):
""" Return the list of models which have to be populated before the current one.
:rtype: list
"""
return []
def _populate(self, size):
""" Create records to populate this model.
:param str size: symbolic size for the number of records: ``'small'``, ``'medium'`` or ``'large'``
"""
batch_size = 1000
min_size = self._populate_sizes[size]
record_count = 0
create_values = []
complete = False
field_generators = self._populate_factories()
if not field_generators:
return self.browse() # maybe create an automatic generator?
records_batches = []
generator = populate.chain_factories(field_generators, self._name)
while record_count <= min_size or not complete:
values = next(generator)
complete = values.pop('__complete')
create_values.append(values)
record_count += 1
if len(create_values) >= batch_size:
_logger.info('Batch: %s/%s', record_count, min_size)
records_batches.append(self.create(create_values))
create_values = []
if create_values:
records_batches.append(self.create(create_values))
return self.concat(*records_batches)
collections.Set.register(BaseModel)
# not exactly true as BaseModel doesn't have __reversed__, index or count
collections.Sequence.register(BaseModel)
class RecordCache(MutableMapping):
""" A mapping from field names to values, to read and update the cache of a record. """
__slots__ = ['_record']
def __init__(self, record):
assert len(record) == 1, "Unexpected RecordCache(%s)" % record
self._record = record
def __contains__(self, name):
""" Return whether `record` has a cached value for field ``name``. """
field = self._record._fields[name]
return self._record.env.cache.contains(self._record, field)
def __getitem__(self, name):
""" Return the cached value of field ``name`` for `record`. """
field = self._record._fields[name]
return self._record.env.cache.get(self._record, field)
def __setitem__(self, name, value):
""" Assign the cached value of field ``name`` for ``record``. """
field = self._record._fields[name]
self._record.env.cache.set(self._record, field, value)
def __delitem__(self, name):
""" Remove the cached value of field ``name`` for ``record``. """
field = self._record._fields[name]
self._record.env.cache.remove(self._record, field)
def __iter__(self):
""" Iterate over the field names with a cached value. """
for field in self._record.env.cache.get_fields(self._record):
yield field.name
def __len__(self):
""" Return the number of fields with a cached value. """
return sum(1 for name in self)
AbstractModel = BaseModel
class Model(AbstractModel):
""" Main super-class for regular database-persisted Odoo models.
Odoo models are created by inheriting from this class::
class user(Model):
...
The system will later instantiate the class once per database (on
which the class' module is installed).
"""
_auto = True # automatically create database backend
_register = False # not visible in ORM registry, meant to be python-inherited only
_abstract = False # not abstract
_transient = False # not transient
class TransientModel(Model):
""" Model super-class for transient records, meant to be temporarily
persistent, and regularly vacuum-cleaned.
A TransientModel has a simplified access rights management, all users can
create new records, and may only access the records they created. The
superuser has unrestricted access to all TransientModel records.
"""
_auto = True # automatically create database backend
_register = False # not visible in ORM registry, meant to be python-inherited only
_abstract = False # not abstract
_transient = True # transient
@api.autovacuum
def _transient_vacuum(self):
"""Clean the transient records.
This unlinks old records from the transient model tables whenever the
"_transient_max_count" or "_max_age" conditions (if any) are reached.
Actual cleaning will happen only once every "_transient_check_time" calls.
This means this method can be called frequently called (e.g. whenever
a new record is created).
Example with both max_hours and max_count active:
Suppose max_hours = 0.2 (e.g. 12 minutes), max_count = 20, there are 55 rows in the
table, 10 created/changed in the last 5 minutes, an additional 12 created/changed between
5 and 10 minutes ago, the rest created/changed more then 12 minutes ago.
- age based vacuum will leave the 22 rows created/changed in the last 12 minutes
- count based vacuum will wipe out another 12 rows. Not just 2, otherwise each addition
would immediately cause the maximum to be reached again.
- the 10 rows that have been created/changed the last 5 minutes will NOT be deleted
"""
if self._transient_max_hours:
# Age-based expiration
self._transient_clean_rows_older_than(self._transient_max_hours * 60 * 60)
if self._transient_max_count:
# Count-based expiration
self._transient_clean_old_rows(self._transient_max_count)
def _transient_clean_old_rows(self, max_count):
# Check how many rows we have in the table
query = 'SELECT count(*) FROM "{}"'.format(self._table)
self._cr.execute(query)
[count] = self._cr.fetchone()
if count > max_count:
self._transient_clean_rows_older_than(300)
def _transient_clean_rows_older_than(self, seconds):
# Never delete rows used in last 5 minutes
seconds = max(seconds, 300)
query = """
SELECT id FROM "{}"
WHERE COALESCE(write_date, create_date, (now() AT TIME ZONE 'UTC'))::timestamp
< (now() AT TIME ZONE 'UTC') - interval %s
""".format(self._table)
self._cr.execute(query, ["%s seconds" % seconds])
ids = [x[0] for x in self._cr.fetchall()]
self.sudo().browse(ids).unlink()
def itemgetter_tuple(items):
""" Fixes itemgetter inconsistency (useful in some cases) of not returning
a tuple if len(items) == 1: always returns an n-tuple where n = len(items)
"""
if len(items) == 0:
return lambda a: ()
if len(items) == 1:
return lambda gettable: (gettable[items[0]],)
return operator.itemgetter(*items)
def convert_pgerror_not_null(model, fields, info, e):
if e.diag.table_name != model._table:
return {'message': _(u"Missing required value for the field '%s'") % (e.diag.column_name)}
field_name = e.diag.column_name
field = fields[field_name]
message = _(u"Missing required value for the field '%s' (%s)") % (field['string'], field_name)
return {
'message': message,
'field': field_name,
}
def convert_pgerror_unique(model, fields, info, e):
# new cursor since we're probably in an error handler in a blown
# transaction which may not have | |
from vmaf.tools.misc import run_process
__copyright__ = "Copyright 2016-2020, Netflix, Inc."
__license__ = "BSD+Patent"
import os
import re
import unittest
from vmaf.config import VmafConfig
from vmaf import ExternalProgram, required
REMOVE_LOG = 1 # for debug, make this 0
def read_log(log_filename, type):
scores = []
idx = 0
with open(log_filename, 'rt') as log_file:
for line in log_file.readlines():
mo = re.match("{type}: ([0-9]+) ([0-9.-]+)".format(type=type), line)
if mo:
cur_idx = int(mo.group(1))
assert cur_idx == idx
scores.append(float(mo.group(2)))
idx += 1
score = sum(scores) / float(len(scores))
return score, scores
class FeatureTest(unittest.TestCase):
LOG_FILENAME = VmafConfig.workdir_path("logFeatureTest")
REF_YUV = VmafConfig.test_resource_path("yuv", "src01_hrc00_576x324.yuv")
DIS_YUV = VmafConfig.test_resource_path("yuv", "src01_hrc01_576x324.yuv")
YUV_FMT = "yuv420p"
YUV_WIDTH = 576
YUV_HEIGHT = 324
def setUp(self):
if os.path.exists(self.LOG_FILENAME):
os.remove(self.LOG_FILENAME)
def tearDown(self):
if os.path.exists(self.LOG_FILENAME):
os.remove(self.LOG_FILENAME)
if REMOVE_LOG:
(logPath, logFilePrefix) = os.path.split(self.LOG_FILENAME)
filenames = [filename for filename in os.listdir(logPath) if filename.startswith(logFilePrefix)]
for filename in filenames:
os.remove(os.path.join(logPath, filename))
def test_adm(self):
ADM_LOG = self.LOG_FILENAME + '_adm'
cmd = "{vmaf} adm {fmt} {ref} {dis} {w} {h} > {log}".format(
vmaf=required(ExternalProgram.vmaf_feature), fmt=self.YUV_FMT, ref=self.REF_YUV, dis=self.DIS_YUV,
w=self.YUV_WIDTH, h=self.YUV_HEIGHT, log=ADM_LOG
)
run_process(cmd, shell=True)
score, scores = read_log(ADM_LOG, "adm")
self.assertAlmostEqual(score, 0.9345877708333336, places=4)
score, scores = read_log(ADM_LOG, "adm_num")
self.assertAlmostEqual(score, 371.8354140624999, places=4)
score, scores = read_log(ADM_LOG, "adm_den")
self.assertAlmostEqual(score, 397.8337897291667, places=4)
score, scores = read_log(ADM_LOG, "adm_num_scale0")
self.assertAlmostEqual(score, 45.5277493125, places=4)
score, scores = read_log(ADM_LOG, "adm_den_scale0")
self.assertAlmostEqual(score, 50.143851375000004, places=4)
score, scores = read_log(ADM_LOG, "adm_num_scale1")
self.assertAlmostEqual(score, 66.58064533333334, places=4)
score, scores = read_log(ADM_LOG, "adm_den_scale1")
self.assertAlmostEqual(score, 74.47438285416666, places=4)
score, scores = read_log(ADM_LOG, "adm_num_scale2")
self.assertAlmostEqual(score, 105.56477879166668, places=4)
score, scores = read_log(ADM_LOG, "adm_den_scale2")
self.assertAlmostEqual(score, 113.49725852083333, places=4)
score, scores = read_log(ADM_LOG, "adm_num_scale3")
self.assertAlmostEqual(score, 154.16224066666666, places=4)
score, scores = read_log(ADM_LOG, "adm_den_scale3")
self.assertAlmostEqual(score, 159.71829710416668, places=4)
def test_ansnr(self):
ANSNR_LOG = self.LOG_FILENAME + '_ansnr'
cmd = "{vmaf} ansnr {fmt} {ref} {dis} {w} {h} > {log}".format(
vmaf=required(ExternalProgram.vmaf_feature), fmt=self.YUV_FMT, ref=self.REF_YUV, dis=self.DIS_YUV,
w=self.YUV_WIDTH, h=self.YUV_HEIGHT, log=ANSNR_LOG
)
run_process(cmd, shell=True)
score, scores = read_log(ANSNR_LOG, "ansnr")
self.assertAlmostEqual(score, 23.5095715208, places=4)
score, scores = read_log(ANSNR_LOG, "anpsnr")
self.assertAlmostEqual(score, 34.164776875, places=4)
def test_motion(self):
MOTION_LOG = self.LOG_FILENAME + '_motion'
cmd = "{vmaf} motion {fmt} {ref} {dis} {w} {h} > {log}".format(
vmaf=required(ExternalProgram.vmaf_feature), fmt=self.YUV_FMT, ref=self.REF_YUV, dis=self.DIS_YUV,
w=self.YUV_WIDTH, h=self.YUV_HEIGHT, log=MOTION_LOG
)
run_process(cmd, shell=True)
score, scores = read_log(MOTION_LOG, "motion")
self.assertAlmostEqual(score, 4.04982535417, places=4)
def test_motion2(self):
MOTION_LOG = self.LOG_FILENAME + '_motion2'
cmd = "{vmaf} motion {fmt} {ref} {dis} {w} {h} > {log}".format(
vmaf=required(ExternalProgram.vmaf_feature), fmt=self.YUV_FMT, ref=self.REF_YUV, dis=self.DIS_YUV,
w=self.YUV_WIDTH, h=self.YUV_HEIGHT, log=MOTION_LOG
)
run_process(cmd, shell=True)
score, scores = read_log(MOTION_LOG, "motion2")
self.assertAlmostEqual(score, 3.8953518541666665, places=4)
def test_vif(self):
VIF_LOG = self.LOG_FILENAME + '_vif'
cmd = "{vmaf} vif {fmt} {ref} {dis} {w} {h} > {log}".format(
vmaf=required(ExternalProgram.vmaf_feature), fmt=self.YUV_FMT, ref=self.REF_YUV, dis=self.DIS_YUV,
w=self.YUV_WIDTH, h=self.YUV_HEIGHT, log=VIF_LOG
)
run_process(cmd, shell=True)
score, scores = read_log(VIF_LOG, "vif")
self.assertAlmostEqual(score, 0.4460930625000001, places=4)
self.assertAlmostEqual(scores[0], 0.580304, places=4)
self.assertAlmostEqual(scores[1], 0.492477, places=4)
score, scores = read_log(VIF_LOG, "vif_num")
self.assertAlmostEqual(score, 712650.023478, places=0)
score, scores = read_log(VIF_LOG, "vif_den")
self.assertAlmostEqual(score, 1597314.95249, places=0)
score, scores = read_log(VIF_LOG, "vif_num_scale0")
self.assertAlmostEqual(score, 468101.509766, places=0)
score, scores = read_log(VIF_LOG, "vif_num_scale1")
self.assertAlmostEqual(score, 184971.572266, places=1)
score, scores = read_log(VIF_LOG, "vif_num_scale2")
self.assertAlmostEqual(score, 47588.8323567, places=0)
score, scores = read_log(VIF_LOG, "vif_num_scale3")
self.assertAlmostEqual(score, 11988.1090902, places=1)
score, scores = read_log(VIF_LOG, "vif_den_scale0")
self.assertAlmostEqual(score, 1287822.80208, places=0)
score, scores = read_log(VIF_LOG, "vif_den_scale1")
self.assertAlmostEqual(score, 241255.067708, places=1)
score, scores = read_log(VIF_LOG, "vif_den_scale2")
self.assertAlmostEqual(score, 55149.8169759, places=2)
score, scores = read_log(VIF_LOG, "vif_den_scale3")
self.assertAlmostEqual(score, 13087.2657267, places=2)
def test_all(self):
ALL_LOG = self.LOG_FILENAME + "_all"
cmd = "{vmaf} all {fmt} {ref} {dis} {w} {h} > {log}".format(
vmaf=required(ExternalProgram.vmaf_feature), fmt=self.YUV_FMT, ref=self.REF_YUV, dis=self.DIS_YUV,
w=self.YUV_WIDTH, h=self.YUV_HEIGHT, log=ALL_LOG
)
run_process(cmd, shell=True)
score, scores = read_log(ALL_LOG, "vif")
self.assertAlmostEqual(score, 0.4460930625, places=4)
score, scores = read_log(ALL_LOG, "motion")
self.assertAlmostEqual(score, 4.04982535417, places=4)
score, scores = read_log(ALL_LOG, "motion2")
self.assertAlmostEqual(score, 3.8953518541666665, places=4)
score, scores = read_log(ALL_LOG, "ansnr")
self.assertAlmostEqual(score, 23.509571520833337, places=4)
score, scores = read_log(ALL_LOG, "adm")
self.assertAlmostEqual(score, 0.9345877708333336, places=4)
score, scores = read_log(ALL_LOG, "adm_num")
self.assertAlmostEqual(score, 371.8354140624999, places=4)
score, scores = read_log(ALL_LOG, "adm_den")
self.assertAlmostEqual(score, 397.8337897291667, places=4)
score, scores = read_log(ALL_LOG, "vif_num")
self.assertAlmostEqual(score, 712650.023478, places=0)
score, scores = read_log(ALL_LOG, "vif_den")
self.assertAlmostEqual(score, 1597314.95249, places=0)
score, scores = read_log(ALL_LOG, "anpsnr")
self.assertAlmostEqual(score, 34.164776874999994, places=4)
score, scores = read_log(ALL_LOG, "vif_num_scale0")
self.assertAlmostEqual(score, 468101.509766, places=0)
score, scores = read_log(ALL_LOG, "vif_num_scale1")
self.assertAlmostEqual(score, 184971.572266, places=1)
score, scores = read_log(ALL_LOG, "vif_num_scale2")
self.assertAlmostEqual(score, 47588.8323567, places=0)
score, scores = read_log(ALL_LOG, "vif_num_scale3")
self.assertAlmostEqual(score, 11988.1090902, places=1)
score, scores = read_log(ALL_LOG, "vif_den_scale0")
self.assertAlmostEqual(score, 1287822.80208, places=0)
score, scores = read_log(ALL_LOG, "vif_den_scale1")
self.assertAlmostEqual(score, 241255.067708, places=1)
score, scores = read_log(ALL_LOG, "vif_den_scale2")
self.assertAlmostEqual(score, 55149.8169759, places=2)
score, scores = read_log(ALL_LOG, "vif_den_scale3")
self.assertAlmostEqual(score, 13087.2657267, places=2)
score, scores = read_log(ALL_LOG, "adm_den_scale0")
self.assertAlmostEqual(score, 50.143851375000004, places=4)
score, scores = read_log(ALL_LOG, "adm_num_scale1")
self.assertAlmostEqual(score, 66.58064533333334, places=4)
score, scores = read_log(ALL_LOG, "adm_den_scale1")
self.assertAlmostEqual(score, 74.47438285416666, places=4)
score, scores = read_log(ALL_LOG, "adm_num_scale2")
self.assertAlmostEqual(score, 105.56477879166668, places=4)
score, scores = read_log(ALL_LOG, "adm_den_scale2")
self.assertAlmostEqual(score, 113.49725852083333, places=4)
score, scores = read_log(ALL_LOG, "adm_num_scale3")
self.assertAlmostEqual(score, 154.16224066666666, places=4)
score, scores = read_log(ALL_LOG, "adm_den_scale3")
self.assertAlmostEqual(score, 159.71829710416668, places=4)
class FeatureTestYuv422p10le(unittest.TestCase):
LOG_FILENAME = VmafConfig.workdir_path("logFeatureTestYuv422p10le")
REF_YUV = VmafConfig.test_resource_path("yuv", "src01_hrc00_576x324.yuv422p10le.yuv")
DIS_YUV = VmafConfig.test_resource_path("yuv", "src01_hrc01_576x324.yuv422p10le.yuv")
YUV_FMT = "yuv422p10le"
YUV_WIDTH = 576
YUV_HEIGHT = 324
def setUp(self):
if os.path.exists(self.LOG_FILENAME):
os.remove(self.LOG_FILENAME)
def tearDown(self):
if os.path.exists(self.LOG_FILENAME):
os.remove(self.LOG_FILENAME)
if (REMOVE_LOG):
(logPath, logFilePrefix) = os.path.split(self.LOG_FILENAME)
filenames = [filename for filename in os.listdir(logPath) if filename.startswith(logFilePrefix)]
for filename in filenames:
os.remove(os.path.join(logPath, filename))
def test_adm(self):
ADM_LOG = self.LOG_FILENAME + '_adm'
cmd = "{vmaf} adm {fmt} {ref} {dis} {w} {h} > {log}".format(
vmaf=required(ExternalProgram.vmaf_feature), fmt=self.YUV_FMT, ref=self.REF_YUV, dis=self.DIS_YUV,
w=self.YUV_WIDTH, h=self.YUV_HEIGHT, log=ADM_LOG
)
run_process(cmd, shell=True)
score, scores = read_log(ADM_LOG, "adm")
self.assertAlmostEqual(score, 0.9345877708333336, places=4)
score, scores = read_log(ADM_LOG, "adm_num")
self.assertAlmostEqual(score, 371.8354140624999, places=4)
score, scores = read_log(ADM_LOG, "adm_den")
self.assertAlmostEqual(score, 397.8337897291667, places=4)
score, scores = read_log(ADM_LOG, "adm_den_scale0")
self.assertAlmostEqual(score, 50.143851375000004, places=4)
score, scores = read_log(ADM_LOG, "adm_num_scale1")
self.assertAlmostEqual(score, 66.58064533333334, places=4)
score, scores = read_log(ADM_LOG, "adm_den_scale1")
self.assertAlmostEqual(score, 74.47438285416666, places=4)
score, scores = read_log(ADM_LOG, "adm_num_scale2")
self.assertAlmostEqual(score, 105.56477879166668, places=4)
score, scores = read_log(ADM_LOG, "adm_den_scale2")
self.assertAlmostEqual(score, 113.49725852083333, places=4)
score, scores = read_log(ADM_LOG, "adm_num_scale3")
self.assertAlmostEqual(score, 154.16224066666666, places=4)
score, scores = read_log(ADM_LOG, "adm_den_scale3")
self.assertAlmostEqual(score, 159.71829710416668, places=4)
def test_ansnr(self):
ANSNR_LOG = self. LOG_FILENAME + '_ansnr'
cmd = "{vmaf} ansnr {fmt} {ref} {dis} {w} {h} > {log}".format(
vmaf=required(ExternalProgram.vmaf_feature), fmt=self.YUV_FMT, ref=self.REF_YUV, dis=self.DIS_YUV,
w=self.YUV_WIDTH, h=self.YUV_HEIGHT, log=ANSNR_LOG
)
run_process(cmd, shell=True)
score, scores = read_log(ANSNR_LOG, "ansnr")
self.assertAlmostEqual(score, 23.5095715208, places=4)
score, scores = read_log(ANSNR_LOG, "anpsnr")
self.assertAlmostEqual(score, 34.1902860625, places=4)
def test_motion(self):
MOTION_LOG = self.LOG_FILENAME + '_motion'
cmd = "{vmaf} motion {fmt} {ref} {dis} {w} {h} > {log}".format(
vmaf=required(ExternalProgram.vmaf_feature), fmt=self.YUV_FMT, ref=self.REF_YUV, dis=self.DIS_YUV,
w=self.YUV_WIDTH, h=self.YUV_HEIGHT, log=MOTION_LOG
)
run_process(cmd, shell=True)
score, scores = read_log(MOTION_LOG, "motion")
self.assertAlmostEqual(score, 4.04982535417, places=4)
def test_motion2(self):
MOTION_LOG = self.LOG_FILENAME + '_motion2'
cmd = "{vmaf} motion {fmt} {ref} {dis} {w} {h} > {log}".format(
vmaf=required(ExternalProgram.vmaf_feature), fmt=self.YUV_FMT, ref=self.REF_YUV, dis=self.DIS_YUV,
w=self.YUV_WIDTH, h=self.YUV_HEIGHT, log=MOTION_LOG
)
run_process(cmd, shell=True)
score, scores = read_log(MOTION_LOG, "motion2")
self.assertAlmostEqual(score, 3.8953518541666665, places=4)
def test_vif(self):
VIF_LOG = self.LOG_FILENAME + '_vif'
cmd = "{vmaf} vif {fmt} {ref} {dis} {w} {h} > {log}".format(
vmaf=required(ExternalProgram.vmaf_feature), fmt=self.YUV_FMT, ref=self.REF_YUV, dis=self.DIS_YUV,
w=self.YUV_WIDTH, h=self.YUV_HEIGHT, log=VIF_LOG
)
run_process(cmd, shell=True)
score, scores = read_log(VIF_LOG, "vif")
self.assertAlmostEqual(score, 0.4460930625, places=4)
self.assertAlmostEqual(scores[0], 0.580304, places=4)
self.assertAlmostEqual(scores[1], 0.492477, places=4)
score, scores = read_log(VIF_LOG, "vif_num")
self.assertAlmostEqual(score, 712650.023478, places=0)
score, scores = read_log(VIF_LOG, "vif_den")
self.assertAlmostEqual(score, 1597314.95249, places=0)
score, scores = read_log(VIF_LOG, "vif_num_scale0")
self.assertAlmostEqual(score, 468101.509766, places=0)
score, scores = read_log(VIF_LOG, "vif_num_scale1")
self.assertAlmostEqual(score, 184971.572266, places=1)
score, scores = read_log(VIF_LOG, "vif_num_scale2")
self.assertAlmostEqual(score, 47588.8323567, places=0)
score, scores = read_log(VIF_LOG, "vif_num_scale3")
self.assertAlmostEqual(score, 11988.1090902, places=1)
score, scores = read_log(VIF_LOG, "vif_den_scale0")
self.assertAlmostEqual(score, 1287822.80208, places=0)
score, scores = read_log(VIF_LOG, "vif_den_scale1")
self.assertAlmostEqual(score, 241255.067708, places=1)
score, scores = read_log(VIF_LOG, "vif_den_scale2")
self.assertAlmostEqual(score, 55149.8169759, places=2)
score, scores = read_log(VIF_LOG, "vif_den_scale3")
self.assertAlmostEqual(score, 13087.2657267, places=2)
def test_all(self):
ALL_LOG = self.LOG_FILENAME + "_all"
cmd = "{vmaf} all {fmt} {ref} {dis} {w} {h} > {log}".format(
vmaf=required(ExternalProgram.vmaf_feature), fmt=self.YUV_FMT, ref=self.REF_YUV, dis=self.DIS_YUV,
w=self.YUV_WIDTH, h=self.YUV_HEIGHT, log=ALL_LOG
)
run_process(cmd, shell=True)
score, scores = read_log(ALL_LOG, "vif")
self.assertAlmostEqual(score, 0.4460930625, places=4)
score, scores = read_log(ALL_LOG, "motion")
self.assertAlmostEqual(score, 4.04982535417, places=4)
score, scores = read_log(ALL_LOG, "motion2")
self.assertAlmostEqual(score, 3.8953518541666665, places=4)
score, scores = read_log(ALL_LOG, "ansnr")
self.assertAlmostEqual(score, 23.5095715208, places=4)
score, scores = read_log(ALL_LOG, "adm")
self.assertAlmostEqual(score, 0.9345877708333336, places=4)
score, scores = read_log(ALL_LOG, "adm_num")
self.assertAlmostEqual(score, 371.8354140624999, places=4)
score, scores = read_log(ALL_LOG, "adm_den")
self.assertAlmostEqual(score, 397.8337897291667, places=4)
score, scores = read_log(ALL_LOG, "vif_num")
self.assertAlmostEqual(score, 712650.023478, places=0)
score, scores = read_log(ALL_LOG, "vif_den")
self.assertAlmostEqual(score, 1597314.95249, places=0)
score, scores = read_log(ALL_LOG, "anpsnr")
self.assertAlmostEqual(score, 34.1902860625, places=4)
score, scores = read_log(ALL_LOG, "vif_num_scale0")
self.assertAlmostEqual(score, 468101.509766, places=0)
score, scores = read_log(ALL_LOG, "vif_num_scale1")
self.assertAlmostEqual(score, 184971.572266, places=1)
score, scores = read_log(ALL_LOG, "vif_num_scale2")
self.assertAlmostEqual(score, 47588.8323567, places=0)
score, scores = read_log(ALL_LOG, "vif_num_scale3")
self.assertAlmostEqual(score, 11988.1090902, places=1)
score, scores = read_log(ALL_LOG, "vif_den_scale0")
self.assertAlmostEqual(score, 1287822.80208, places=0)
score, scores = read_log(ALL_LOG, "vif_den_scale1")
self.assertAlmostEqual(score, 241255.067708, places=1)
score, scores = read_log(ALL_LOG, "vif_den_scale2")
self.assertAlmostEqual(score, 55149.8169759, places=2)
score, scores = read_log(ALL_LOG, "vif_den_scale3")
self.assertAlmostEqual(score, 13087.2657267, places=2)
score, scores | |
<reponame>vathes/canonical-imaging<filename>djimaging/processing.py
import datajoint as dj
import scanreader
import numpy as np
import pathlib
from datetime import datetime
from uuid import UUID
from .imaging import schema, Scan, ScanInfo, Channel, PhysicalFile
from .utils import dict_to_hash
from djutils.templates import required, optional
from img_loaders import suite2p
# ===================================== Lookup =====================================
@schema
class ProcessingMethod(dj.Lookup):
definition = """
processing_method: char(8)
"""
contents = zip(['suite2p', 'caiman'])
@schema
class ProcessingParamSet(dj.Lookup):
definition = """
paramset_idx: smallint
---
-> ProcessingMethod
paramset_desc: varchar(128)
param_set_hash: uuid
unique index (param_set_hash)
params: longblob # dictionary of all applicable parameters
"""
@classmethod
def insert_new_params(cls, processing_method: str, paramset_idx: int, paramset_desc: str, params: dict):
param_dict = {'processing_method': processing_method,
'paramset_idx': paramset_idx,
'paramset_desc': paramset_desc,
'params': params,
'param_set_hash': UUID(dict_to_hash(params))}
q_param = cls & {'param_set_hash': param_dict['param_set_hash']}
if q_param: # If the specified param-set already exists
pname = q_param.fetch1('param_set_name')
if pname == paramset_idx: # If the existed set has the same name: job done
return
else: # If not same name: human error, trying to add the same paramset with different name
raise dj.DataJointError('The specified param-set already exists - name: {}'.format(pname))
else:
cls.insert1(param_dict)
@schema
class CellCompartment(dj.Lookup):
definition = """ # cell compartments that can be imaged
cell_compartment : char(16)
"""
contents = zip(['axon', 'soma', 'bouton'])
@schema
class MaskType(dj.Lookup):
definition = """ # possible classifications for a segmented mask
mask_type : varchar(16)
"""
contents = zip(['soma', 'axon', 'dendrite', 'neuropil', 'artefact', 'unknown'])
# ===================================== Trigger a processing routine =====================================
@schema
class ProcessingTask(dj.Manual):
definition = """
-> Scan
-> ProcessingParamSet
"""
@schema
class Processing(dj.Computed):
definition = """
-> ProcessingTask
---
proc_completion_time : datetime # time of generation of this set of processed, segmented results
proc_start_time=null : datetime # execution time of this processing task (not available if analysis triggering is NOT required)
proc_curation_time=null : datetime # time of lastest curation (modification to the file) on this result set
"""
class ProcessingOutputFile(dj.Part):
definition = """
-> master
-> PhysicalFile
"""
@staticmethod
@optional
def _get_caiman_dir(processing_task_key: dict) -> str:
"""
Retrieve the CaImAn output directory for a given ProcessingTask
:param processing_task_key: a dictionary of one ProcessingTask
:return: a string for full path to the resulting CaImAn output directory
"""
return None
@staticmethod
@optional
def _get_suite2p_dir(processing_task_key: dict) -> str:
"""
Retrieve the Suite2p output directory for a given ProcessingTask
:param processing_task_key: a dictionary of one ProcessingTask
:return: a string for full path to the resulting Suite2p output directory
"""
return None
# Run processing only on Scan with ScanInfo inserted
@property
def key_source(self):
return ProcessingTask & ScanInfo
def make(self, key):
# ----
# trigger suite2p or caiman here
# ----
method = (ProcessingParamSet * ProcessingTask & key).fetch1('processing_method')
if method == 'suite2p':
if (ScanInfo & key).fetch1('nrois') > 0:
raise NotImplementedError(f'Suite2p ingestion error - Unable to handle ScanImage multi-ROI scanning mode yet')
data_dir = pathlib.Path(Processing._get_suite2p_dir(key))
if data_dir.exists():
s2p_loader = suite2p.Suite2p(data_dir)
key = {**key, 'proc_completion_time': s2p_loader.creation_time, 'proc_curation_time': s2p_loader.curation_time}
self.insert1(key)
# Insert file(s)
root = pathlib.Path(PhysicalFile._get_root_data_dir())
files = data_dir.glob('*') # works for Suite2p, maybe something more file-specific for CaImAn
files = [pathlib.Path(f).relative_to(root).as_posix() for f in files if f.is_file()]
PhysicalFile.insert(zip(files), skip_duplicates=True)
self.ProcessingOutputFile.insert([{**key, 'file_path': f} for f in files], ignore_extra_fields=True)
else:
start_time = datetime.now()
# trigger Suite2p here
# wait for completion, then insert with "completion_time", "start_time", no "curation_time"
return
else:
raise NotImplementedError('Unknown method: {}'.format(method))
# ===================================== Motion Correction =====================================
@schema
class MotionCorrection(dj.Imported):
definition = """
-> Processing
---
-> Channel.proj(mc_channel='channel') # channel used for motion correction in this processing task
"""
class RigidMotionCorrection(dj.Part):
definition = """
-> master
-> ScanInfo.Field
---
outlier_frames : longblob # mask with true for frames with outlier shifts (already corrected)
y_shifts : longblob # (pixels) y motion correction shifts
x_shifts : longblob # (pixels) x motion correction shifts
y_std : float # (pixels) standard deviation of y shifts
x_std : float # (pixels) standard deviation of x shifts
z_drift=null : longblob # z-drift over frame of this Field (plane)
"""
class NonRigidMotionCorrection(dj.Part):
""" Piece-wise rigid motion correction - tile the FOV into multiple 2D blocks/patches"""
definition = """
-> master
-> ScanInfo.Field
---
outlier_frames : longblob # mask with true for frames with outlier shifts (already corrected)
block_height : int # (px)
block_width : int # (px)
block_count_y : int # number of blocks tiled in the y direction
block_count_x : int # number of blocks tiled in the x direction
z_drift=null : longblob # z-drift over frame of this Field (plane)
"""
class Block(dj.Part):
definition = """ # FOV-tiled blocks used for non-rigid motion correction
-> master.NonRigidMotionCorrection
block_id : int
---
block_y : longblob # (y_start, y_end) in pixel of this block
block_x : longblob # (x_start, x_end) in pixel of this block
y_shifts : longblob # (pixels) y motion correction shifts for every frame
x_shifts : longblob # (pixels) x motion correction shifts for every frame
y_std : float # (pixels) standard deviation of y shifts
x_std : float # (pixels) standard deviation of x shifts
"""
class Summary(dj.Part):
definition = """ # summary images for each field and channel after corrections
-> master
-> ScanInfo.Field
---
ref_image : longblob # image used as alignment template
average_image : longblob # mean of registered frames
correlation_image=null : longblob # correlation map (computed during cell detection)
max_proj_image=null : longblob # max of registered frames
"""
def make(self, key):
method = (ProcessingParamSet * ProcessingTask & key).fetch1('processing_method')
if method == 'suite2p':
data_dir = pathlib.Path(Processing._get_suite2p_dir(key))
s2p_loader = suite2p.Suite2p(data_dir)
field_keys = (ScanInfo.Field & key).fetch('KEY', order_by='field_z')
align_chn = s2p_loader.planes[0].alignment_channel
self.insert1({**key, 'mc_channel': align_chn})
# ---- iterate through all s2p plane outputs ----
for plane, s2p in s2p_loader.planes.items():
mc_key = (ScanInfo.Field * ProcessingTask & key & field_keys[plane]).fetch1('KEY')
# -- rigid motion correction --
rigid_mc = {'y_shifts': s2p.ops['yoff'],
'x_shifts': s2p.ops['xoff'],
'y_std': np.nanstd(s2p.ops['yoff']),
'x_std': np.nanstd(s2p.ops['xoff']),
'outlier_frames': s2p.ops['badframes']}
self.RigidMotionCorrection.insert1({**mc_key, **rigid_mc})
# -- non-rigid motion correction --
if s2p.ops['nonrigid']:
nonrigid_mc = {'block_height': s2p.ops['block_size'][0],
'block_width': s2p.ops['block_size'][1],
'block_count_y': s2p.ops['nblocks'][0],
'block_count_x': s2p.ops['nblocks'][1],
'outlier_frames': s2p.ops['badframes']}
nr_blocks = [{**mc_key, 'block_id': b_id,
'block_y': b_y, 'block_x': b_x,
'y_shifts': bshift_y, 'x_shifts': bshift_x,
'y_std': np.nanstd(bshift_y), 'x_std': np.nanstd(bshift_x)}
for b_id, (b_y, b_x, bshift_y, bshift_x)
in enumerate(zip(s2p.ops['xblock'], s2p.ops['yblock'],
s2p.ops['yoff1'].T, s2p.ops['xoff1'].T))]
self.NonRigidMotionCorrection.insert1({**mc_key, **nonrigid_mc})
self.Block.insert(nr_blocks)
# -- summary images --
img_dict = {'ref_image': s2p.ref_image,
'average_image': s2p.mean_image,
'correlation_image': s2p.correlation_map,
'max_proj_image': s2p.max_proj_image}
self.Summary.insert1({**mc_key, **img_dict})
else:
raise NotImplementedError('Unknown/unimplemented method: {}'.format(method))
# ===================================== Segmentation =====================================
@schema
class Segmentation(dj.Computed):
definition = """ # Different mask segmentations.
-> MotionCorrection
"""
class Mask(dj.Part):
definition = """ # A mask produced by segmentation.
-> master
mask : smallint
---
-> Channel.proj(seg_channel='channel') # channel used for the segmentation
-> ScanInfo.Field # the field this ROI comes from
mask_npix : int # number of pixels in ROIs
mask_center_x : int # center x coordinate in pixels
mask_center_y : int # center y coordinate in pixels
mask_xpix : longblob # x coordinates in pixels
mask_ypix : longblob # y coordinates in pixels
mask_weights : longblob # weights of the mask at the indices above in column major (Fortran) order
"""
def make(self, key):
method = (ProcessingParamSet * ProcessingTask & key).fetch1('processing_method')
if method == 'suite2p':
data_dir = pathlib.Path(Processing._get_suite2p_dir(key))
s2p_loader = suite2p.Suite2p(data_dir)
field_keys = (ScanInfo.Field & key).fetch('KEY', order_by='field_z')
# ---- iterate through all s2p plane outputs ----
masks, cells = [], []
for plane, s2p in s2p_loader.planes.items():
seg_key = (ScanInfo.Field * ProcessingTask & key & field_keys[plane]).fetch1('KEY')
mask_count = len(masks) # increment mask id from all "plane"
for mask_idx, (is_cell, cell_prob, mask_stat) in enumerate(zip(s2p.iscell, s2p.cell_prob, s2p.stat)):
masks.append({**seg_key, 'mask': mask_idx + mask_count, 'seg_channel': s2p.segmentation_channel,
'mask_npix': mask_stat['npix'],
'mask_center_x': mask_stat['med'][1],
'mask_center_y': mask_stat['med'][0],
'mask_xpix': mask_stat['xpix'],
'mask_ypix': mask_stat['ypix'],
'mask_weights': mask_stat['lam']})
if is_cell:
cells.append({**seg_key, 'mask_classification_method': 'suite2p_default_classifier',
'mask': mask_idx + mask_count, 'mask_type': 'soma', 'confidence': cell_prob})
self.insert1(key)
self.Mask.insert(masks, ignore_extra_fields=True)
if cells:
MaskClassification.insert1({**key, 'mask_classification_method': 'suite2p_default_classifier'}, allow_direct_insert=True)
MaskClassification.MaskType.insert(cells, ignore_extra_fields=True, allow_direct_insert=True)
else:
raise NotImplementedError('Unknown/unimplemented method: {}'.format(method))
@schema
class MaskClassificationMethod(dj.Lookup):
definition = """
mask_classification_method: varchar(32)
"""
contents = zip(['suite2p_default_classifier'])
@schema
class MaskClassification(dj.Computed):
definition = """
-> Segmentation
-> MaskClassificationMethod
"""
class MaskType(dj.Part):
definition = """
-> master
-> Segmentation.Mask
---
-> MaskType
confidence: float
"""
# ===================================== Activity Trace =====================================
@schema
class Fluorescence(dj.Computed):
definition = """ # fluorescence traces before spike extraction or filtering
-> Segmentation
"""
class Trace(dj.Part):
definition = """
-> master
-> Segmentation.Mask
-> Channel.proj(fluo_channel='channel') # the channel that this trace comes from
---
fluorescence : | |
color=None,
aggregate='count',
x_annot=None,
y_annot=None,
width=None,
height=None,
title=None,
legend='bottom',
pan_zoom=None,
use_container_width=True,
):
"""Calculate and draw a time histogram.
Parameters
----------
data : DataFrame
date: str
Column name to use for the date.
x_unit : str
Vega-Lite time unit to use for the x axis, such as 'seconds', 'minutes', 'hours', 'day' (day
of week), 'date' (day of month), 'week', 'month', 'year'.
See https://vega.github.io/vega-lite/docs/timeunit.html
y_unit : str
Vega-Lite time unit to use for the y axis, such as 'seconds', 'minutes', 'hours', 'day' (day
of week), 'date' (day of month), 'week', 'month', 'year'.
See https://vega.github.io/vega-lite/docs/timeunit.html
color : str or dict or None
Column name to use for chart colors, or Vega-Lite dict for the color encoding.
May be a literal value, like "#223344" or "green".
If using aggregate is not None, this is the column that will be aggregated.
None means the default color will be used, or that the aggregation function does not require
a column.
aggregate : str or None
The Vega-Lite aggregation operation to use for this histogram. Defaults to 'count'.
Common operations are 'count', 'distinct', 'sum', 'mean', 'median', 'max', 'min',
'valid', and 'missing'.
See https://vega.github.io/vega-lite/docs/aggregate.html#ops.
x_annot : dict or list or None
Annotations to draw on top the chart, tied to specific X-axis values.
Can be specified as a dict or a list:
- list style: [x_value_1, x_value_2, ...]
- dict style: {x_value_1: label_1, x_value_2: label_2, ...}
y_annot : dict or list or None
Annotations to draw on top the chart, tied to specific Y-axis values.
Can be specified as a dict or a list:
- list style: [y_value_1, y_value_2, ...]
- dict style: {y_value_1: label_1, y_value_2: label_2, ...}
width : number or None
Chart width in pixels or None for default. See also, use_container_width.
height : number or None
Chart height in pixels, or None for default.
title : str or None
Chart title, or None for no title.
legend : str or None
Legend orientation: 'top', 'left', 'bottom', 'right', etc. See Vega-Lite docs
for more. To hide, use None.
pan_zoom : str or None
Specify the method for panning and zooming the chart, if any. Allowed values:
- 'both': drag canvas to pan, use scroll with mouse to zoom.
- 'pan': drag canvas to pan.
- 'zoom': scroll with mouse to zoom.
- 'minimap': Not supported for histograms.
- None: chart will not be pannable/zoomable.
use_container_width : bool
If True, sets the chart to use all available space. This takes precedence over the width
parameter.
"""
meta = _(
data=data,
width=width,
height=height,
title=title,
)
spec = _(
mark=_(type='rect', tooltip=True),
encoding=_(
x=_(field=date, type='ordinal', timeUnit=x_unit, title=None, axis=_(tickBand='extent')),
y=_(field=date, type='ordinal', timeUnit=y_unit, title=None, axis=_(tickBand='extent')),
color=_clean_encoding(data, color, aggregate=aggregate, legend=legend)
),
selection=_get_selection(pan_zoom),
)
spec = _add_annotations(spec, x_annot, y_annot)
spec.update(meta)
st.vega_lite_chart(spec, use_container_width=use_container_width)
def xy_hist(
data,
x,
y,
color=None,
aggregate='count',
x_bin=True,
y_bin=True,
x_annot=None,
y_annot=None,
width=None,
height=None,
title=None,
legend='bottom',
pan_zoom=None,
use_container_width=True,
):
"""Calculate and draw an x-y histogram (i.e. 2D histogram).
Parameters
----------
x : str or dict
Column name to use for the x axis, or Vega-Lite dict for the x encoding.
See https://vega.github.io/vega-lite/docs/encoding.html#position-datum-def.
Also supports Altair-style shorthands, like "foo:T" for temporal. See
https://altair-viz.github.io/user_guide/encoding.html#encoding-data-types.
y : str or list of str or dict
Column name to use for the y axis, or Vega-Lite dict for the y encoding.
If a list of strings, draws several series on the same chart by melting your wide-format
table into a long-format table behind the scenes. If your table is already in long-format,
the way to draw multiple series is by using the color parameter instead.
See https://vega.github.io/vega-lite/docs/encoding.html#position-datum-def.
Also supports Altair-style shorthands, like "foo:T" for temporal. See
https://altair-viz.github.io/user_guide/encoding.html#encoding-data-types.
color : str or dict or None
Column name to use for chart colors, or Vega-Lite dict for the color encoding.
May be a literal value, like "#223344" or "green".
If using aggregate is not None, this is the column that will be aggregated.
None means the default color will be used, or that the aggregation operation does not require
a column.
Also supports Altair-style shorthands, like "foo:T" for temporal. See
https://altair-viz.github.io/user_guide/encoding.html#encoding-data-types.
aggregate : str or None
The Vega-Lite aggregation operation to use for this histogram. Defaults to 'count'.
Common operations are 'count', 'distinct', 'sum', 'mean', 'median', 'max', 'min',
'valid', and 'missing'.
See https://vega.github.io/vega-lite/docs/aggregate.html#ops.
x_bin : dict or None
Allows you to customize the binning properties for the x axis.
If None, uses the default binning properties.
See https://vega.github.io/vega-lite/docs/bin.html#bin-parameters>
y_bin : dict or None
Allows you to customize the binning properties for the y axis.
If None, uses the default binning properties.
See https://vega.github.io/vega-lite/docs/bin.html#bin-parameters>
x_annot : dict or list or None
Annotations to draw on top the chart, tied to specific X-axis values.
Can be specified as a dict or a list:
- list style: [x_value_1, x_value_2, ...]
- dict style: {x_value_1: label_1, x_value_2: label_2, ...}
y_annot : dict or list or None
Annotations to draw on top the chart, tied to specific Y-axis values.
Can be specified as a dict or a list:
- list style: [y_value_1, y_value_2, ...]
- dict style: {y_value_1: label_1, y_value_2: label_2, ...}
width : number or None
Chart width in pixels or None for default. See also, use_container_width.
height : number or None
Chart height in pixels, or None for default.
title : str or None
Chart title, or None for no title.
legend : str or None
Legend orientation: 'top', 'left', 'bottom', 'right', etc. See Vega-Lite docs
for more. To hide, use None.
pan_zoom : str or None
Specify the method for panning and zooming the chart, if any. Allowed values:
- 'both': drag canvas to pan, use scroll with mouse to zoom.
- 'pan': drag canvas to pan.
- 'zoom': scroll with mouse to zoom.
- 'minimap': Not supported for histograms.
- None: chart will not be pannable/zoomable.
use_container_width : bool
If True, sets the chart to use all available space. This takes precedence over the width
parameter.
"""
meta = _(
data=data,
width=width,
height=height,
title=title,
)
spec = _(
mark=_(type='rect', tooltip=True),
encoding=_(
x=_clean_encoding(data, x, bin=x_bin),
y=_clean_encoding(data, y, bin=y_bin),
color=_clean_encoding(data, color, aggregate=aggregate, legend=legend)
),
selection=_get_selection(pan_zoom),
)
spec = _add_annotations(spec, x_annot, y_annot)
spec.update(meta)
st.vega_lite_chart(spec, use_container_width=use_container_width)
def hist(
data,
x,
y=None,
aggregate='count',
bin=None,
x_annot=None,
y_annot=None,
width=None,
height=None,
title=None,
legend='bottom',
pan_zoom=None,
use_container_width=True,
):
"""Calculate and draw a histogram.
Parameters
----------
x : str or dict
Column name to use for the x axis, or Vega-Lite dict for the x encoding.
See https://vega.github.io/vega-lite/docs/encoding.html#position-datum-def.
Also supports Altair-style shorthands, like "foo:T" for temporal. See
https://altair-viz.github.io/user_guide/encoding.html#encoding-data-types.
y : str or dict or None
Column to be aggregated. See aggregate parameter.
None means the aggregation operation does not require a column.
See https://vega.github.io/vega-lite/docs/encoding.html#position-datum-def.
Also supports Altair-style shorthands, like "foo:T" for temporal. See
https://altair-viz.github.io/user_guide/encoding.html#encoding-data-types.
aggregate : str or None
The Vega-Lite aggregation operation to use for this histogram. Defaults to 'count'.
Common operations are 'count', 'distinct', 'sum', 'mean', 'median', 'max', 'min',
'valid', and 'missing'.
See https://vega.github.io/vega-lite/docs/aggregate.html#ops.
bin : dict or None
Allows you to customize the binning properties for the histogram.
If None, uses the default binning properties.
See https://vega.github.io/vega-lite/docs/bin.html#bin-parameters>
x_annot : dict or list or None
Annotations to draw on top the chart, tied to specific X-axis values.
Can be specified as a dict or a list:
- list style: [x_value_1, x_value_2, ...]
- dict style: {x_value_1: label_1, x_value_2: label_2, ...}
y_annot : dict or list or None
Annotations to draw on top the chart, tied to specific Y-axis values.
Can be specified as a dict or a list:
- list style: [y_value_1, y_value_2, ...]
- dict style: {y_value_1: label_1, y_value_2: label_2, ...}
width : number or None
Chart width in pixels or None for default. See also, use_container_width.
height : number or None
Chart height in pixels, or None for default.
title : str or None
Chart title, or None for no title.
legend : str or None
Legend orientation: 'top', 'left', 'bottom', | |
def __init__(self, plist, blist, belse):
self.plist = plist
self.blist = blist
self.belse = belse
def visit(self, a):
return a.visitIf(self)
## class TImport(object):
## def __init__(self, vec):
## self.vec = vec
##
## def visit(self, a):
## return a.visitImport(self)
class TRaise(object):
def __init__(self, ex):
self.ex = ex
def visit(self, a):
return a.visitRaise(self)
class TGlobal(object):
def __init__(self, var):
self.var = var
def visit(self, a):
return a.visitGlobal(self)
class TReturn(object):
def __init__(self, retval):
self.retval = retval
def visit(self, a):
return a.visitReturn(self)
class TPrint(object):
def __init__(self, f, vec):
self.f = f
self.vec = vec
def visit(self, a):
return a.visitPrint(self)
class TAssert(object):
def __init__(self, pred, msg):
self.pred = pred
self.msg = msg
def visit(self, a):
return a.visitAssert(self)
class TBlock(object):
def __init__(self, vec):
self.vec = vec
def visit(self, a):
return a.visitBlock(self)
#endif
class Compiler(object):
def __init__(self, parentCompiler, argVars, localVars, globalOverrides, tclass, isDunderInit):
self.parentCompiler = parentCompiler
self.tclass = tclass
self.isDunderInit = isDunderInit
## argVars = [] if argVars is None else argVars
## localVars = set() if localVars is None else localVars
self.argVars = argVars
self.localVars = sorted(localVars - set(argVars) - set(globalOverrides))
print >> E, 'Compiler init:', 'parent', parentCompiler, 'argVars', self.argVars, 'localVars', self.localVars, 'globalOverrides', globalOverrides, 'tclass', tclass, 'isDunderInit', isDunderInit
self.ops = [0, 0, 0, 255, 255, 255]
self.interns = {}
self.globals = {}
self.classes = {}
self.funcs = {}
self.tempVars = [] # not used yet
self.continue_to = None
self.break_patches = None
self.try_blocks_pending = 0
def AddIntern(self, s):
## self.interns :: s -> (i, patches)
if is_in(s, self.interns):
i, patches = self.interns[s]
return i
z = len(self.interns)
self.interns[s] = (z, [])
return z
def PatchIntern(self, s, patch):
self.AddIntern(s)
i, patches = self.interns[s]
patches.append(patch)
return i
def AddGlobal(self, name):
## Add with intern number.
## self.globals :: name -> (j, patches)
self.AddIntern(name) # Will need it at Output time.
if is_in(name, self.globals):
j, patches = self.globals[name]
return j
z = len(self.globals)
self.globals[name] = (z, [])
return z
def PatchGlobal(self, name, patch):
j, patches = self.globals[name]
patches.append(patch)
return j
def visitExprAndDrop(self, t):
t.x.visit(self)
self.ops.append('Drop')
def visitAssign(self, t):
t.y.visit(self)
self.assignTo(t.x)
def assignToIdent(self, a):
var = a.x
if var=='_':
self.ops.append('Drop')
elif is_in(var, self.argVars):
self.ops.append('ArgPut')
self.ops.append(self.argVars.index(var))
elif is_in(var, self.localVars):
self.ops.append('LocalPut')
self.ops.append(self.localVars.index(var))
else:
self.AddGlobal(var)
self.ops.append('GlobalPut')
g = self.PatchGlobal(var, len(self.ops))
self.ops.append(g)
def assignToMember(self, a):
if type(a.x) == TIdent and a.x.x == 'self' and self.tclass:
self.ops.append('SelfMemberPut')
self.ops.append(sorted(self.tclass.fields).index(a.member))
else:
a.x.visit(self)
self.ops.append('MemberPut')
isn = self.PatchIntern(a.member, len(self.ops))
self.ops.append(isn)
def assignTo(self, a):
if type(a) is TIdent:
self.assignToIdent(a)
elif type(a) is TGetItem:
a.coll.visit(self)
a.key.visit(self)
self.ops.append('PutItem')
elif type(a) is TMember:
self.assignToMember(a)
elif type(a) is TTuple:
self.ops.append('Explode')
self.ops.append(len(a.vec))
for b in a.vec:
self.assignTo(b)
else:
raise Exception('assignTo: bad lhs: %s' % a)
def visitFunCall(self, t):
## fn, xlist
for x in reversed(t.xlist): # Iterate in reverse.
x.visit(self)
if type(t.fn) == TMember:
t.fn.x.visit(self)
self.ops.append('CallMeth')
isn = self.PatchIntern(t.fn.member, len(self.ops))
self.ops.append(isn)
self.ops.append(len(t.xlist) + 1) # +1 for self.
else:
self.visitFunCallMore(t)
def visitFunCallMore(self, t):
if type(t.fn) == TIdent and t.fn.x == 'len' and len(t.xlist) == 1:
self.ops.append('Len')
elif type(t.fn) == TIdent and t.fn.x == 'chr' and len(t.xlist) == 1:
self.ops.append('Chr')
elif type(t.fn) == TIdent and t.fn.x == 'ord' and len(t.xlist) == 1:
self.ops.append('Ord')
elif type(t.fn) == TIdent and t.fn.x == 'range' and len(t.xlist) == 1:
self.ops.append('Range')
elif type(t.fn) == TIdent and t.fn.x == 'int' and len(t.xlist) == 1:
self.ops.append('Int')
elif type(t.fn) == TIdent and t.fn.x == 'str' and len(t.xlist) == 1:
self.ops.append('Str')
elif type(t.fn) == TIdent and t.fn.x == 'repr' and len(t.xlist) == 1:
self.ops.append('Repr')
elif type(t.fn) == TIdent and t.fn.x == 'sorted' and len(t.xlist) == 1:
self.ops.append('Sorted')
elif type(t.fn) == TIdent and t.fn.x == 'ogc' and len(t.xlist) == 0:
self.ops.append('GC')
elif type(t.fn) == TIdent and t.fn.x == 'shell' and len(t.xlist) == 0:
self.ops.append('ForkShellAndWait')
else:
self.visitFunCallMoreMore(t)
def visitFunCallMoreMore(self, t):
if type(t.fn) == TIdent and t.fn.x == 'open' and len(t.xlist) == 2:
self.ops.append('Open')
else:
t.fn.visit(self)
self.ops.append('Call')
self.ops.append(len(t.xlist))
def visitIf(self, t):
endmarks = []
for p, b in zip(t.plist, t.blist):
pass ### TODO -- optimize
p.visit(self)
self.ops.append('BranchIfFalse')
skipmark = len(self.ops)
self.ops.append(0)
b.visit(self)
self.ops.append('Branch')
endmarks.append(len(self.ops))
self.ops.append(0)
self.ops[skipmark] = len(self.ops)
if t.belse:
t.belse.visit(self)
end = len(self.ops)
for m in endmarks:
self.ops[m] = end
def visitAndOr(self, t):
patches = []
## Push the short-circuit value: True for or, False for and.
self.ops.append('LitInt')
self.ops.append(1 if t.op=='or' else 0)
for e in t.vec:
e.visit(self)
self.ops.append('BranchIfTrue' if t.op=='or' else 'BranchIfFalse')
patches.append(len(self.ops)) # request patching.
self.ops.append(0) # to be patched.
## Invert the short-circuit value, if we reach the end.
self.ops.append('Not')
## Patch short-circuit branches to here at the end.
for p in patches:
self.ops[p] = len(self.ops)
def visitCond(self, t):
t.cond.visit(self)
self.ops.append('BranchIfFalse')
patch_to_no = len(self.ops)
self.ops.append(0)
t.yes.visit(self)
self.ops.append('Branch')
patch_to_end = len(self.ops)
self.ops.append(0)
self.ops[patch_to_no] = len(self.ops)
t.no.visit(self)
self.ops[patch_to_end] = len(self.ops)
def visitUnaryOp(self, t):
t.x.visit(self)
if t.op == 'not':
self.ops.append('Not')
elif t.op == '-':
self.ops.append('Negate')
else:
raise Exception('bad_unary: %s' % t.op)
def visitBinaryOp(self, t):
t.x.visit(self)
t.y.visit(self)
if t.op == '+':
self.ops.append('Plus')
elif t.op == '-':
self.ops.append('Minus')
elif t.op == '*':
self.ops.append('Times')
elif t.op == '%':
self.ops.append('Mod')
elif t.op == '/':
self.ops.append('Div')
elif t.op == '==':
self.ops.append('EQ')
elif t.op == '!=':
self.ops.append('NE')
elif t.op == '<':
self.ops.append('LT')
elif t.op == '>':
self.ops.append('GT')
elif t.op == '<=':
self.ops.append('LE')
elif t.op == '>=':
self.ops.append('GE')
else:
self.visitBinaryOpMore(t)
def visitBinaryOpMore(self, t):
if t.op == 'is':
self.ops.append('Is')
elif t.op == '<<':
self.ops.append('ShiftLeft')
elif t.op == '>>':
self.ops.append('ShiftRight')
elif t.op == '&':
self.ops.append('BitAnd')
elif t.op == '|':
self.ops.append('BitOr')
elif t.op == '^':
self.ops.append('BitXor')
else:
raise Exception('visitBinaryOp: bad %s' % t.op)
def visitMember(self, t):
if type(t.x) is TIdent and t.x.x == 'sys':
if t.member == 'stdin':
self.ops.append('SpecialStdin')
elif t.member == 'stdout':
self.ops.append('SpecialStdout')
elif t.member == 'stderr':
self.ops.append('SpecialStderr')
else:
raise Exception('bad_sys')
elif type(t.x) is TIdent and t.x.x == 'self' and self.tclass:
print >>E, 'C FF', self.tclass.fields
print >>E, 'M', t.member
self.ops.append('SelfMemberGet')
self.ops.append(sorted(self.tclass.fields).index(t.member)*2)
else:
t.x.visit(self)
self.ops.append('MemberGet')
isn = self.PatchIntern(t.member, len(self.ops))
self.ops.append(isn)
def visitGetItem(self, t):
t.coll.visit(self)
t.key.visit(self)
self.ops.append('GetItem')
def visitTuple(self, t):
for e in t.vec:
e.visit(self)
self.ops.append('NewTuple')
self.ops.append(len(t.vec))
def visitList(self, t):
for e in t.vec:
e.visit(self)
self.ops.append('NewList')
self.ops.append(len(t.vec))
def visitDict(self, t):
for k, v in t.dic:
k.visit(self)
v.visit(self)
self.ops.append('NewDict')
self.ops.append(len(t.dic))
def visitIdent(self, t):
var = t.x
if type(var) is str:
if is_in(var, self.argVars):
self.ops.append('ArgGet')
self.ops.append(self.argVars.index(var))
elif is_in(var, self.localVars):
self.ops.append('LocalGet')
self.ops.append(self.localVars.index(var))
else:
self.AddGlobal(var)
self.ops.append('GlobalGet')
g = self.PatchGlobal(var, len(self.ops))
self.ops.append(g)
else:
raise Exception('visitIdent: bad var: %s %s' % (type(var), var))
def visitStr(self, t):
self.ops.append('LitStr')
isn = self.PatchIntern(t.x, len(self.ops))
self.ops.append(isn)
def visitSpecial(self, t):
if t.x=='True':
self.ops.append('SpecialTrue')
elif t.x=='False':
self.ops.append('SpecialFalse')
elif t.x=='None':
self.ops.append('SpecialNone')
## elif t.x=='Stdin':
## self.ops.append('SpecialStdin')
## elif t.x=='Stdout':
## self.ops.append('SpecialStdout')
## elif t.x=='Stderr':
## self.ops.append('SpecialStderr')
else:
raise t
def visitInt(self, t):
if 0 <= t.x and t.x <= 255:
self.ops.append('LitInt')
self.ops.append(255 & t.x)
else:
hi, lo = 255&(t.x>>8), 255&t.x
self.ops.append('LitInt2')
self.ops.append(hi)
self.ops.append(lo)
def OpList2Bytecodes(self, ops):
ops[BC_NUM_ARGS] = len(self.argVars)
ops[BC_NUM_LOCALS] = len(self.localVars)
ops[BC_NUM_TEMPS] = len(self.tempVars)
z = []
for x in ops:
if type(x) is int:
z.append(chr(255 & x))
elif type(x) is str and len(x) == 1:
z.append(x)
elif type(x) is str:
z.append(chr(BytecodeNumbers[x]))
else:
raise Exception('bad item: %s %s' % (type(x), x))
if len(z) > 250:
print >>E, 'WARNING: Bytecodes too long: %d' % len(z)
return z
def OutputCodePack(self, w):
print >>E, '(((((((((('
P.put_str(w, T.CodePack_bytecode, self.OpList2Bytecodes(self.ops))
i_strs = self.OutputInterns(w)
self.OutputGlobals(w)
self.OutputFuncPacks(w, i_strs)
self.OutputClassPacks(w, i_strs)
P.put_finish_message(w)
print >>E, '))))))))))'
def OutputInterns(self, w):
## sort by interns by number i, and write to protobuf in that order.
i_vec = []
for s, (i, patches) in self.interns.items():
i_vec.append((i, s, patches))
i_vec = sorted(i_vec) # Sorted by i
i_strs = []
for i, s, patches in i_vec:
assert i == len(i_strs)
i_strs.append(s)
P.put_start_message(w, T.CodePack_interns)
P.put_str(w, T.InternPack_s, s)
for e in patches:
P.put_int(w, T.InternPack_patch, e)
P.put_finish_message(w)
return i_strs
def OutputGlobals(self, w):
## sort by globals by number j, and write to protobuf in that order.
g_vec = []
for name, (j, patches) in self.globals.items():
g_vec.append((j, name, patches))
for j, name, patches in sorted(g_vec):
P.put_start_message(w, T.CodePack_globals)
P.put_int(w, T.GlobalPack_name_i, self.AddIntern(name))
for e in patches:
P.put_int(w, T.GlobalPack_patch, e)
P.put_finish_message(w)
def OutputFuncPacks(self, w, i_strs):
## funcpacks
for name, fc in sorted(self.funcs.items()):
print >>E, 'Func Pack: ((((( name=', name
P.put_start_message(w, T.CodePack_funcpacks)
P.put_int(w, T.FuncPack_name_i, i_strs.index(name))
P.put_start_message(w, T.FuncPack_pack)
fc.OutputCodePack(w)
P.put_finish_message(w) # finish CodePack_funcpacks
print >>E, 'Func Pack: | |
<reponame>PoporoDev/poporo<filename>tests/functional_tests/mining_pop.py
#!/usr/bin/env python3
# Copyright (c) 2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""An example functional test
The module-level docstring should include a high-level description of
what the test is doing. It's the first thing people see when they open
the file and should give the reader information about *what* the test
is testing and *how* it's being tested
"""
import time
import threading
from decimal import Decimal
from queue import Queue
from test_framework.test_framework import BitMoneroTestFramework
from test_framework.messages import COIN
from test_framework.monero_node import Daemon, Wallet
from test_framework.util import assert_equal, assert_raises_rpc_error, wait_until, BidCalculator, set_node_times, \
sync_blocks, pvk_from_address
def dec_bid_amount_when_grow_old(bids, which_btc_height):
'''
模拟bid交易的金额随着块龄变老逐渐递减的情况
:param bids:
:param bid_height:
:return:
'''
for bid in bids:
bid['amount'] = bid['amount'] + (120 - which_btc_height) * COIN * 10
def gcb(node):
return
print(node.daemon.sync_info())
# time.sleep(1)
class BidThread(threading.Thread):
def __init__(self, node, node1, q, pkv, pkv1, miner_sec_key, miner_sec_key1,info):
threading.Thread.__init__(self)
# create a new connection to the node, we can't use the same
# connection from two threads
# self.node = get_rpc_proxy(node.url, 1, timeout=600, coveragedir=node.coverage_dir)
# self.node1 = get_rpc_proxy(node1.url, 1, timeout=600, coveragedir=node1.coverage_dir)
self.node = Daemon(idx=node.index)
self.node1 = Daemon(idx=node1.index)
self.wallet = Wallet(idx=10)
self.q = q
self.pkv = pkv
self.pkv1 = pkv1
self.miner0_sec_key = miner_sec_key
self.miner1_sec_key = miner_sec_key1
self.info = info
def run(self):
for i in range(20):
height = self.node.get_height()['height']
self.node.bid('100', height, self.pkv)
time.sleep(0.5)
print('all bid is done.wait for reach height')
mock_time = self.q.get()
for i in range(30, 1, -1):
print('node1 will generate in {} seconds'.format(i))
time.sleep(1)
print('thread wakeup! height is {}'.format(self.node1.get_height()['height']))
# set_node_times([self.node,self.node1],mock_time)
self.node.setmocktime(mock_time)
self.node1.setmocktime(mock_time)
self.info('thead mocktime:{}'.format(mock_time))
for wallet_id in range(10,20):
w = Wallet(idx=wallet_id)
addr = w.get_address()['address']
prikey = w.query_key('view_key').key
print('addr:',addr,'prikey:',prikey)
try:
print(self.node1.generateblocks(addr, miner_sec_key=prikey))
break
except Exception as e:
print('error:{}'.format(e))
continue
for i in range(50, 1, -1):
print('block will be create in {} seconds'.format(i))
time.sleep(1)
# set_node_times([self.node, self.node1], mock_time + 1000)
time.sleep(5)
print('thread generate done')
class PopMiningleTest(BitMoneroTestFramework):
def set_test_params(self):
super(PopMiningleTest, self).set_test_params()
self.setup_clean_chain = False
self.ltcbidstart = 280
self.calc_bid = BidCalculator(1000, 203)
self.extra_args = [["-deprecatedrpc=generate"], ["-deprecatedrpc=generate"]]
self.num_wallets = [10, 10]
self.xmr_rpctimeout = 60 * 10
self.monero_extra_args = [
['--popforkheight', '1000', '--btcbidstart', '203', '--out-peers', '1024',
'--block-sync-size', '1', '--db-sync-mode', 'safe:sync', '--reorg-notify',
'/usr/bin/python3 /home/weigun/reorg.py %s %h %d %n'],
['--popforkheight', '1000', '--btcbidstart', '203', '--out-peers', '1024',
'--block-sync-size', '1', '--db-sync-mode', 'safe:sync', '--reorg-notify',
'/usr/bin/python3 /home/weigun/reorg.py %s %h %d %n']]
def add_options(self, parser):
super(PopMiningleTest, self).add_options(parser)
parser.add_argument("--startbidnum", dest="start_bid_num", default=100, help="set start bid num")
def run_test(self):
# self.xnode1 = self.xnode0
# self.xmrnodes[1] = self.xnode0
for n in self.xmrnodes:
print(n.getblockcount())
self.prepare()
keys = self.xnode0.wallet.query_key('view_key')
balance = self.node0.getbalance()
xmr_balance = self.xnode0.wallet.get_balance()
print(balance, xmr_balance)
for n in (self.nodes + self.xmrnodes):
print(n.getblockcount(), n.getblockhash(1), n.getblockhash(2))
self.test_for_amount()
# return
self.test_for_blockheight()
for i in range(200,204):
print('>>>>>>>>>>>>>>{}:'.format(i),
self.node0.getbid(i, 'myV5V3oQWJgzzxwp5suwSJM7HgU1zfsmRj', 0))
self.log.info('before cur xmr height:{},{}'.format(self.xnode0.getblockcount(), self.xnode1.getblockcount()))
self.combi_generate(800 - self.xnode0.getblockcount()) # 挖到popforkheight
self.log.info('cur xmr height:{},{}'.format(self.xnode0.getblockcount(), self.xnode1.getblockcount()))
self.log.info('cur xmr besthash:{},{}'.format(self.xnode0.getbestblockhash(), self.xnode1.getbestblockhash()))
# time.sleep(120)
need_blocks = 1000 - self.xnode0.getblockcount()
self.combi_generate(need_blocks, 1, 1, callback=lambda: gcb(self.xnode0)) # 挖到popforkheight
# return
self.log.info('cur xmr height:{},{}'.format(self.xnode0.getblockcount(), self.xnode1.getblockcount()))
self.log.info('cur xmr besthash:{},{}'.format(self.xnode0.getbestblockhash(), self.xnode1.getbestblockhash()))
self.test_block_will_generate_by_max_bid()
self.test_max_bid_growing_old_can_still_mining()
self.log.info('start test transfer')
# self.test_transfer()
self.test_break_generate_when_recv_new_block()
self.log.info('start test fork without btc fork')
self.test_fork()
self.log.info('start test fork with btc fork')
self.test_fork(True)
self.log.info('start test transfer')
self.test_transfer()
self.test_generate_unit_1w()
def prepare(self):
'''
挖到预定的高度
:return:
'''
blocks = self.ltcbidstart - self.xnode0.getblockcount()
self.log.info('xmr prepare {} blocks'.format(blocks))
self.miner0 = self.xnode0.getnewaddress()
self.miner1 = self.xnode1.getnewaddress()
print('pkv:', pvk_from_address(self.miner0), self.miner0)
print('pkv1:', pvk_from_address(self.miner1), self.miner1)
# 先每个钱包挖有一个块
# for n in self.xmrnodes:
# for i in range(10):
# assert 'error' not in n.generate(1,wallet_index=i)
blocks = self.ltcbidstart - self.xnode0.getblockcount()
self.xnode0.generate(blocks - 1, self.miner0)
self.xnode0.wallet.refresh()
# 将xmr平分给20个钱包
reward = int(self.xnode0.getbalance() / 100)
assert reward > 0
print('hard_fork_info:', self.xnode0.daemon.hard_fork_info())
self.log.info('before send,total balance:{}'.format(self.xnode0.getbalance()))
for i in range(1, 20):
if i < 10:
addr = self.xnode0.getnewaddress(i)
else:
addr = self.xnode1.getnewaddress(i - 10)
dst = {
'address': addr,
'amount': 10000, }
self.log.info('send {} to wallet{},addr {}'.format(reward, i, addr))
self.log.info('remain balance:{}'.format(self.xnode0.getbalance()))
result = self.xnode0.wallet0.transfer([dst])
print(result)
self.sync_all([self.xmrnodes])
# print(self.xnode1.wallet9.get_transfer_by_txid(result.tx_hash))
self.xnode1.generate(1)
self.sync_all([self.xmrnodes])
# print(self.xnode1.wallet9.get_transfer_by_txid(result.tx_hash))
# must be refresh tx so the balance can be update
# check balance
for node_index in range(2):
n = self.xmrnodes[node_index]
# print('hard_fork_info:',n.daemon.hard_fork_info())
for wallet_index in range(10):
wallet = getattr(n, 'wallet{}'.format(wallet_index))
refresh_ret = wallet.refresh()
self.log.info('wallet{} check balance...'.format(wallet_index))
def must_has_balance():
return n.getbalance(wallet_index=wallet_index) > 0
wait_until(must_has_balance, timeout=30)
for n in self.nodes:
self.sync_all()
n.generate(1)
print(self.xnode0.getblockcount())
def test_for_amount(self):
node = self.xnode0
bnode = self.node0
height = node.getblockcount()
pkv = pvk_from_address(self.miner0)
pkv1 = pvk_from_address(self.miner1)
node.daemon.bid('0', height, pkv).assert_message('Invalid amount for send')
node.daemon.bid('-1', height, pkv).assert_message('Amount out of range')
node.daemon.bid(1, height, pkv).assert_message('json not found')
node.daemon.bid(str(1e512), height, pkv).assert_message('Parse error')
node.daemon.bid('0.00099999', height, pkv).assert_message('Transaction amount too small')
node.daemon.bid(str(1e100), height, pkv).assert_message('Invalid amount')
bid_num = 100 if self.options.start_bid_num == 100 else 4
self.log.info('start bid num:{}'.format(bid_num))
for i in range(bid_num - 1):
if bid_num == 4:
if i < 2:
ret = self.xnode1.daemon.bid(str(20 - i), height, pkv1)
else:
ret = self.xnode0.daemon.bid('5', height, pkv)
else:
if i < 20:
if i < 10:
addr = self.xnode0.getnewaddress(wallet_index=i)
tmp_pkv = pvk_from_address(addr)
# print(i, addr, tmp_pkv)
if i == 8:
ret = self.xnode0.daemon.bid(str(16.5), height, tmp_pkv)
elif i == 9:
ret = self.xnode0.daemon.bid(str(15.5), height, tmp_pkv)
else:
ret = self.xnode0.daemon.bid(str(20 - i), height, tmp_pkv)
else:
addr = self.xnode1.getnewaddress(wallet_index=i - 10)
tmp_pkv = pvk_from_address(addr)
if i == 11:
ret = self.xnode1.daemon.bid(str(17.5), height, tmp_pkv)
elif i == 12:
ret = self.xnode1.daemon.bid(str(18.5), height, tmp_pkv)
else:
ret = self.xnode1.daemon.bid(str(20 - i), height, tmp_pkv)
else:
print(i, self.miner0, pkv)
ret = self.xnode0.daemon.bid('5', height, pkv)
print(i, addr, tmp_pkv)
assert ret.data.result is not None
assert self.xnode1.daemon.bid('1000', height, pkv1).data.result is not None
self.sync_all()
# make sure two transactions are in mempool and can be packaged to block
assert_equal(len(self.node0.getrawmempool()), len(self.node1.getrawmempool()))
self.node0.generate(1)
self.sync_all()
btc_bid_height = self.node0.getblockcount()
self.log.info('xmr height {},bid at btc {} succ'.format(self.xnode0.getblockcount(), btc_bid_height))
bids = self.node1.getbid(btc_bid_height, 'myV5V3oQWJgzzxwp5suwSJM7HgU1zfsmRj', 0)
assert_equal(len(bids['bids']), bid_num)
self.sync_all()
assert_equal(len(self.node0.getrawmempool()), 0)
assert_equal(len(self.node1.getrawmempool()), 0)
assert_equal(self.node0.getblockcount(), self.node1.getblockcount())
# check balance
self.log.info('check balance...')
for n in self.xmrnodes:
for i in range(10):
print(n.getbalance(wallet_index=i))
def test_for_blockheight(self):
# blockheight nosence
node = self.xnode0
bnode = self.node0
pkv = pvk_from_address(self.miner0)
pkv1 = pvk_from_address(self.miner1)
print(node.daemon.bid('1', -1, pkv))
print(node.daemon.bid('1', 0, pkv))
node.daemon.bid('1', '100.236', pkv).assert_message('json not found')
node.daemon.bid('1', 'abc', pkv).assert_message('json not found')
node.daemon.bid('1', str(1000000000000000000000000000000000000000), pkv).assert_message('json not found')
def test_generate_unit_1w(self):
self.combi_generate(100) # 应该可以一直挖的
def test_block_will_generate_by_max_bid(self):
node0_total_balance = self.xnode0.getbalance()
node1_total_balance = self.xnode1.getbalance()
self.generate(1, 1)
try:
self.generate(1, 1)
except AssertionError:
self.log.info('node1 generate should failed,because of can not repeat mining in same address')
self.sync_all([self.xmrnodes])
# node0 should mining over 2min because bid is not the max
# ming 101 blocks make 1002 can spend
print(self.node0.getblockcount())
# self.generate(0, 1)
self.generate2(0, 1)
pkv = pvk_from_address(self.miner0)
pkv1 = pvk_from_address(self.miner1)
# 将xmr平分给20个钱包
print('hard_fork_info:', self.xnode0.daemon.hard_fork_info())
self.log.info('before send,total balance:{}'.format(self.xnode0.getbalance()))
for i in range(1, 20):
if i < 10:
addr = self.xnode0.getnewaddress(i)
else:
addr = self.xnode1.getnewaddress(i - 10)
dst = {
'address': addr,
'amount': 100, }
self.log.info('remain balance:{}'.format(self.xnode0.getbalance()))
print(self.xnode0.wallet0.transfer([dst]))
def callback():
once = False
def warp():
nonlocal once
if not once:
print('------------', self.calc_bid(self.xnode0.getblockcount()), self.node0.getblockcount())
if self.node0.getblockcount() == 347:#347:318
print('once')
self.xnode0.daemon.bid('5', self.xnode0.getblockcount(), pkv)
self.sync_all([self.xmrnodes])
self.sync_all()
self.generate(0, 1, bitchain=True)
self.sync_all()
once = True
return warp
self.combi_generate(100, 1, callback=callback())
# print('diff:', self.xnode0.getbalance() - node0_total_balance)
# assert self.xnode0.getbalance() > node0_total_balance
def test_max_bid_growing_old_can_still_mining(self):
ltc_height = self.xnode0.getblockcount()
btc_height = self.node0.getblockcount()
should_get_bid_height = self.calc_bid(ltc_height)
print(ltc_height, btc_height, should_get_bid_height)
pkv = pvk_from_address(self.miner0)
pkv1 = pvk_from_address(self.miner1)
# 当前块先埋19个bid
for i in range(19):
i = i - 10 if i > 9 else i
addr = self.xnode1.getnewaddress(wallet_index=i)
tmp_pkv = pvk_from_address(addr)
self.xnode1.daemon.bid(str(max(5 - i, 0.1)), ltc_height, tmp_pkv)
self.sync_all([self.xmrnodes])
self.sync_all()
self.generate(0, 10, bitchain=True) # avoid btc blocks not enough,then header not accept
self.sync_all()
assert_equal(len(self.node1.getrawmempool()), 0)
bid_data_height = btc_height + 1
bids = self.get_bid_info(bid_data_height)
assert_equal(len(bids), 19)
# 埋好后,需要将ltc的高度提上来,直到self.calc_bid(ltc_height) == bid_data_height
# 这里必须-3,因为往前搜索块的时候,如果有找到,就最多往前搜120个块的限制,超出就不找了,所以会出现不满20的情况
blocks = 1600 - self.xnode0.getblockcount() - 1
print('need to generate {} blocks to 1600'.format(blocks))
self.combi_generate(blocks, node_index=1) #1599挖不了
blocks = (bid_data_height - self.calc_bid(self.xnode0.getblockcount())) * 5 - 5
print('need to generate {} blocks to reach bid height {}'.format(blocks, bid_data_height))
self.combi_generate(blocks,node_index=0)
bid_319 = self.get_bid_info(348) #348 319
print(bid_319)
self.log.info('319 or 348 bid info:{}'.format(bid_319))
# amount做递减处理
dec_bid_amount_when_grow_old(bids, self.calc_bid(self.xnode0.getblockcount() + 1))
dec_bid_amount_when_grow_old(bid_319, 348) #348 319
print(bid_319)
# 先合并要找到的bids
bids.extend(bid_319)
assert_equal(len(bids), 20)
sort_bids = sorted(bids, key=lambda x: x['amount'], reverse=True)
self.log.info('bids after sorted')
for i in sort_bids:
print('>> ',i)
# 这里只能是node1来挖,因为上一个块的出块者是node0的某个地址(bid最大),到这里递减后,依然是最大,但有一个规则
# 就是同一个地址不能连续出块
while self.calc_bid(self.xnode0.getblockcount() + 1) < 340:
self.log.info('not enought 340,xmr generate.xnode0 height {}'.format(self.xnode0.getblockcount() ))
self.generate(0,1)
self.generate2(1, 1)
self.generate2(1, 1) # 这里可以连续出块,因为node1有多笔交易
self.generate(0, 1) # 这里可以node0可以出块了
print(self.xnode0.getblockcount(), self.node0.getblockcount(), self.calc_bid(self.xnode0.getblockcount()))
def test_break_generate_when_recv_new_block(self):
| |
black_glazed_terracotta
* white_concrete
* orange_concrete
* magenta_concrete
* light_blue_concrete
* yellow_concrete
* lime_concrete
* pink_concrete
* gray_concrete
* light_gray_concrete
* cyan_concrete
* purple_concrete
* blue_concrete
* brown_concrete
* green_concrete
* red_concrete
* black_concrete
* white_concrete_powder
* orange_concrete_powder
* magenta_concrete_powder
* light_blue_concrete_powder
* yellow_concrete_powder
* lime_concrete_powder
* pink_concrete_powder
* gray_concrete_powder
* light_gray_concrete_powder
* cyan_concrete_powder
* purple_concrete_powder
* blue_concrete_powder
* brown_concrete_powder
* green_concrete_powder
* red_concrete_powder
* black_concrete_powder
* kelp
* kelp_plant
* dried_kelp_block
* turtle_egg
* dead_tube_coral_block
* dead_brain_coral_block
* dead_bubble_coral_block
* dead_fire_coral_block
* dead_horn_coral_block
* tube_coral_block
* brain_coral_block
* bubble_coral_block
* fire_coral_block
* horn_coral_block
* dead_tube_coral
* dead_brain_coral
* dead_bubble_coral
* dead_fire_coral
* dead_horn_coral
* tube_coral
* brain_coral
* bubble_coral
* fire_coral
* horn_coral
* dead_tube_coral_fan
* dead_brain_coral_fan
* dead_bubble_coral_fan
* dead_fire_coral_fan
* dead_horn_coral_fan
* tube_coral_fan
* brain_coral_fan
* bubble_coral_fan
* fire_coral_fan
* horn_coral_fan
* dead_tube_coral_wall_fan
* dead_brain_coral_wall_fan
* dead_bubble_coral_wall_fan
* dead_fire_coral_wall_fan
* dead_horn_coral_wall_fan
* tube_coral_wall_fan
* brain_coral_wall_fan
* bubble_coral_wall_fan
* fire_coral_wall_fan
* horn_coral_wall_fan
* sea_pickle
* blue_ice
* conduit
* bamboo_sapling
* bamboo
* potted_bamboo
* void_air
* cave_air
* bubble_column
* polished_granite_stairs
* smooth_red_sandstone_stairs
* mossy_stone_brick_stairs
* polished_diorite_stairs
* mossy_cobblestone_stairs
* end_stone_brick_stairs
* stone_stairs
* smooth_sandstone_stairs
* smooth_quartz_stairs
* granite_stairs
* andesite_stairs
* red_nether_brick_stairs
* polished_andesite_stairs
* diorite_stairs
* polished_granite_slab
* smooth_red_sandstone_slab
* mossy_stone_brick_slab
* polished_diorite_slab
* mossy_cobblestone_slab
* end_stone_brick_slab
* smooth_sandstone_slab
* smooth_quartz_slab
* granite_slab
* andesite_slab
* red_nether_brick_slab
* polished_andesite_slab
* diorite_slab
* brick_wall
* prismarine_wall
* red_sandstone_wall
* mossy_stone_brick_wall
* granite_wall
* stone_brick_wall
* nether_brick_wall
* andesite_wall
* red_nether_brick_wall
* sandstone_wall
* end_stone_brick_wall
* diorite_wall
* scaffolding
* loom
* barrel
* smoker
* blast_furnace
* cartography_table
* fletching_table
* grindstone
* lectern
* smithing_table
* stonecutter
* bell
* lantern
* soul_lantern
* campfire
* soul_campfire
* sweet_berry_bush
* warped_stem
* stripped_warped_stem
* warped_hyphae
* stripped_warped_hyphae
* warped_nylium
* warped_fungus
* warped_wart_block
* warped_roots
* nether_sprouts
* crimson_stem
* stripped_crimson_stem
* crimson_hyphae
* stripped_crimson_hyphae
* crimson_nylium
* crimson_fungus
* shroomlight
* weeping_vines
* weeping_vines_plant
* twisting_vines
* twisting_vines_plant
* crimson_roots
* crimson_planks
* warped_planks
* crimson_slab
* warped_slab
* crimson_pressure_plate
* warped_pressure_plate
* crimson_fence
* warped_fence
* crimson_trapdoor
* warped_trapdoor
* crimson_fence_gate
* warped_fence_gate
* crimson_stairs
* warped_stairs
* crimson_button
* warped_button
* crimson_door
* warped_door
* crimson_sign
* warped_sign
* crimson_wall_sign
* warped_wall_sign
* structure_block
* jigsaw
* composter
* target
* bee_nest
* beehive
* honey_block
* honeycomb_block
* netherite_block
* ancient_debris
* crying_obsidian
* respawn_anchor
* potted_crimson_fungus
* potted_warped_fungus
* potted_crimson_roots
* potted_warped_roots
* lodestone
* blackstone
* blackstone_stairs
* blackstone_wall
* blackstone_slab
* polished_blackstone
* polished_blackstone_bricks
* cracked_polished_blackstone_bricks
* chiseled_polished_blackstone
* polished_blackstone_brick_slab
* polished_blackstone_brick_stairs
* polished_blackstone_brick_wall
* gilded_blackstone
* polished_blackstone_stairs
* polished_blackstone_slab
* polished_blackstone_pressure_plate
* polished_blackstone_button
* polished_blackstone_wall
* chiseled_nether_bricks
* cracked_nether_bricks
* quartz_bricks
"""
air = "minecraft:air"
stone = "minecraft:stone"
granite = "minecraft:granite"
polished_granite = "minecraft:polished_granite"
diorite = "minecraft:diorite"
polished_diorite = "minecraft:polished_diorite"
andesite = "minecraft:andesite"
polished_andesite = "minecraft:polished_andesite"
grass_block = "minecraft:grass_block"
dirt = "minecraft:dirt"
coarse_dirt = "minecraft:coarse_dirt"
podzol = "minecraft:podzol"
cobblestone = "minecraft:cobblestone"
oak_planks = "minecraft:oak_planks"
spruce_planks = "minecraft:spruce_planks"
birch_planks = "minecraft:birch_planks"
jungle_planks = "minecraft:jungle_planks"
acacia_planks = "minecraft:acacia_planks"
dark_oak_planks = "minecraft:dark_oak_planks"
oak_sapling = "minecraft:oak_sapling"
spruce_sapling = "minecraft:spruce_sapling"
birch_sapling = "minecraft:birch_sapling"
jungle_sapling = "minecraft:jungle_sapling"
acacia_sapling = "minecraft:acacia_sapling"
dark_oak_sapling = "minecraft:dark_oak_sapling"
bedrock = "minecraft:bedrock"
water = "minecraft:water"
lava = "minecraft:lava"
sand = "minecraft:sand"
red_sand = "minecraft:red_sand"
gravel = "minecraft:gravel"
gold_ore = "minecraft:gold_ore"
iron_ore = "minecraft:iron_ore"
coal_ore = "minecraft:coal_ore"
nether_gold_ore = "minecraft:nether_gold_ore"
oak_log = "minecraft:oak_log"
spruce_log = "minecraft:spruce_log"
birch_log = "minecraft:birch_log"
jungle_log = "minecraft:jungle_log"
acacia_log = "minecraft:acacia_log"
dark_oak_log = "minecraft:dark_oak_log"
stripped_spruce_log = "minecraft:stripped_spruce_log"
stripped_birch_log = "minecraft:stripped_birch_log"
stripped_jungle_log = "minecraft:stripped_jungle_log"
stripped_acacia_log = "minecraft:stripped_acacia_log"
stripped_dark_oak_log = "minecraft:stripped_dark_oak_log"
stripped_oak_log = "minecraft:stripped_oak_log"
oak_wood = "minecraft:oak_wood"
spruce_wood = "minecraft:spruce_wood"
birch_wood = "minecraft:birch_wood"
jungle_wood = "minecraft:jungle_wood"
acacia_wood = "minecraft:acacia_wood"
dark_oak_wood = "minecraft:dark_oak_wood"
stripped_oak_wood = "minecraft:stripped_oak_wood"
stripped_spruce_wood = "minecraft:stripped_spruce_wood"
stripped_birch_wood = "minecraft:stripped_birch_wood"
stripped_jungle_wood = "minecraft:stripped_jungle_wood"
stripped_acacia_wood = "minecraft:stripped_acacia_wood"
stripped_dark_oak_wood = "minecraft:stripped_dark_oak_wood"
oak_leaves = "minecraft:oak_leaves"
spruce_leaves = "minecraft:spruce_leaves"
birch_leaves = "minecraft:birch_leaves"
jungle_leaves = "minecraft:jungle_leaves"
acacia_leaves = "minecraft:acacia_leaves"
dark_oak_leaves = "minecraft:dark_oak_leaves"
sponge = "minecraft:sponge"
wet_sponge = "minecraft:wet_sponge"
glass = "minecraft:glass"
lapis_ore = "minecraft:lapis_ore"
lapis_block = "minecraft:lapis_block"
dispenser = "minecraft:dispenser"
sandstone = "minecraft:sandstone"
chiseled_sandstone = "minecraft:chiseled_sandstone"
cut_sandstone = "minecraft:cut_sandstone"
note_block = "minecraft:note_block"
white_bed = "minecraft:white_bed"
orange_bed = "minecraft:orange_bed"
magenta_bed = "minecraft:magenta_bed"
light_blue_bed = "minecraft:light_blue_bed"
yellow_bed = "minecraft:yellow_bed"
lime_bed = "minecraft:lime_bed"
pink_bed = "minecraft:pink_bed"
gray_bed = "minecraft:gray_bed"
light_gray_bed = "minecraft:light_gray_bed"
cyan_bed = "minecraft:cyan_bed"
purple_bed = "minecraft:purple_bed"
blue_bed = "minecraft:blue_bed"
brown_bed = "minecraft:brown_bed"
green_bed = "minecraft:green_bed"
red_bed = "minecraft:red_bed"
black_bed = "minecraft:black_bed"
powered_rail = "minecraft:powered_rail"
detector_rail = "minecraft:detector_rail"
sticky_piston = "minecraft:sticky_piston"
cobweb = "minecraft:cobweb"
grass = "minecraft:grass"
fern = "minecraft:fern"
dead_bush = "minecraft:dead_bush"
seagrass = "minecraft:seagrass"
tall_seagrass = "minecraft:tall_seagrass"
piston = "minecraft:piston"
piston_head = "minecraft:piston_head"
white_wool = "minecraft:white_wool"
orange_wool = "minecraft:orange_wool"
magenta_wool = "minecraft:magenta_wool"
light_blue_wool = "minecraft:light_blue_wool"
yellow_wool = "minecraft:yellow_wool"
lime_wool = "minecraft:lime_wool"
pink_wool = "minecraft:pink_wool"
gray_wool = "minecraft:gray_wool"
light_gray_wool = "minecraft:light_gray_wool"
cyan_wool = "minecraft:cyan_wool"
purple_wool = "minecraft:purple_wool"
blue_wool = "minecraft:blue_wool"
brown_wool = "minecraft:brown_wool"
green_wool = "minecraft:green_wool"
red_wool = "minecraft:red_wool"
black_wool = "minecraft:black_wool"
moving_piston = "minecraft:moving_piston"
dandelion = "minecraft:dandelion"
poppy = "minecraft:poppy"
blue_orchid = "minecraft:blue_orchid"
allium = "minecraft:allium"
azure_bluet = "minecraft:azure_bluet"
red_tulip = "minecraft:red_tulip"
orange_tulip = "minecraft:orange_tulip"
white_tulip = "minecraft:white_tulip"
pink_tulip = "minecraft:pink_tulip"
oxeye_daisy = "minecraft:oxeye_daisy"
cornflower = "minecraft:cornflower"
wither_rose = "minecraft:wither_rose"
lily_of_the_valley = "minecraft:lily_of_the_valley"
brown_mushroom = "minecraft:brown_mushroom"
red_mushroom = "minecraft:red_mushroom"
gold_block = "minecraft:gold_block"
iron_block = "minecraft:iron_block"
bricks = "minecraft:bricks"
tnt = "minecraft:tnt"
bookshelf = "minecraft:bookshelf"
mossy_cobblestone = "minecraft:mossy_cobblestone"
obsidian = "minecraft:obsidian"
torch = "minecraft:torch"
wall_torch = "minecraft:wall_torch"
fire = "minecraft:fire"
soul_fire = "minecraft:soul_fire"
spawner = "minecraft:spawner"
oak_stairs = "minecraft:oak_stairs"
chest = "minecraft:chest"
redstone_wire = "minecraft:redstone_wire"
diamond_ore = "minecraft:diamond_ore"
diamond_block = "minecraft:diamond_block"
crafting_table = "minecraft:crafting_table"
wheat = "minecraft:wheat"
farmland = "minecraft:farmland"
furnace = "minecraft:furnace"
oak_sign = "minecraft:oak_sign"
spruce_sign = "minecraft:spruce_sign"
birch_sign = "minecraft:birch_sign"
acacia_sign = "minecraft:acacia_sign"
jungle_sign = "minecraft:jungle_sign"
dark_oak_sign = "minecraft:dark_oak_sign"
oak_door = "minecraft:oak_door"
ladder = "minecraft:ladder"
rail = "minecraft:rail"
cobblestone_stairs = "minecraft:cobblestone_stairs"
oak_wall_sign = "minecraft:oak_wall_sign"
spruce_wall_sign = "minecraft:spruce_wall_sign"
birch_wall_sign = "minecraft:birch_wall_sign"
acacia_wall_sign = "minecraft:acacia_wall_sign"
jungle_wall_sign = "minecraft:jungle_wall_sign"
dark_oak_wall_sign = "minecraft:dark_oak_wall_sign"
lever = "minecraft:lever"
stone_pressure_plate = "minecraft:stone_pressure_plate"
iron_door = "minecraft:iron_door"
oak_pressure_plate = "minecraft:oak_pressure_plate"
spruce_pressure_plate = "minecraft:spruce_pressure_plate"
birch_pressure_plate = "minecraft:birch_pressure_plate"
jungle_pressure_plate = "minecraft:jungle_pressure_plate"
acacia_pressure_plate = "minecraft:acacia_pressure_plate"
dark_oak_pressure_plate = "minecraft:dark_oak_pressure_plate"
redstone_ore = "minecraft:redstone_ore"
redstone_torch = "minecraft:redstone_torch"
redstone_wall_torch = "minecraft:redstone_wall_torch"
stone_button = "minecraft:stone_button"
snow = "minecraft:snow"
ice = "minecraft:ice"
snow_block = "minecraft:snow_block"
cactus = "minecraft:cactus"
clay = "minecraft:clay"
sugar_cane = "minecraft:sugar_cane"
jukebox = "minecraft:jukebox"
oak_fence = "minecraft:oak_fence"
pumpkin = "minecraft:pumpkin"
netherrack = "minecraft:netherrack"
soul_sand = "minecraft:soul_sand"
soul_soil = "minecraft:soul_soil"
basalt = "minecraft:basalt"
polished_basalt = "minecraft:polished_basalt"
soul_torch = "minecraft:soul_torch"
soul_wall_torch = "minecraft:soul_wall_torch"
glowstone = "minecraft:glowstone"
nether_portal = "minecraft:nether_portal"
carved_pumpkin = "minecraft:carved_pumpkin"
jack_o_lantern = "minecraft:jack_o_lantern"
cake = "minecraft:cake"
repeater = "minecraft:repeater"
white_stained_glass = "minecraft:white_stained_glass"
orange_stained_glass = "minecraft:orange_stained_glass"
magenta_stained_glass = "minecraft:magenta_stained_glass"
light_blue_stained_glass = "minecraft:light_blue_stained_glass"
yellow_stained_glass = "minecraft:yellow_stained_glass"
lime_stained_glass = "minecraft:lime_stained_glass"
pink_stained_glass = "minecraft:pink_stained_glass"
gray_stained_glass = "minecraft:gray_stained_glass"
light_gray_stained_glass = "minecraft:light_gray_stained_glass"
cyan_stained_glass = "minecraft:cyan_stained_glass"
purple_stained_glass = "minecraft:purple_stained_glass"
blue_stained_glass = "minecraft:blue_stained_glass"
brown_stained_glass = "minecraft:brown_stained_glass"
green_stained_glass = "minecraft:green_stained_glass"
red_stained_glass = "minecraft:red_stained_glass"
black_stained_glass = "minecraft:black_stained_glass"
oak_trapdoor = "minecraft:oak_trapdoor"
spruce_trapdoor = "minecraft:spruce_trapdoor"
birch_trapdoor = "minecraft:birch_trapdoor"
jungle_trapdoor = "minecraft:jungle_trapdoor"
acacia_trapdoor = "minecraft:acacia_trapdoor"
dark_oak_trapdoor = "minecraft:dark_oak_trapdoor"
stone_bricks = "minecraft:stone_bricks"
mossy_stone_bricks = "minecraft:mossy_stone_bricks"
cracked_stone_bricks = "minecraft:cracked_stone_bricks"
chiseled_stone_bricks = "minecraft:chiseled_stone_bricks"
infested_stone = "minecraft:infested_stone"
infested_cobblestone = "minecraft:infested_cobblestone"
infested_stone_bricks = "minecraft:infested_stone_bricks"
infested_mossy_stone_bricks = "minecraft:infested_mossy_stone_bricks"
infested_cracked_stone_bricks = "minecraft:infested_cracked_stone_bricks"
infested_chiseled_stone_bricks = "minecraft:infested_chiseled_stone_bricks"
brown_mushroom_block = "minecraft:brown_mushroom_block"
red_mushroom_block = "minecraft:red_mushroom_block"
mushroom_stem = "minecraft:mushroom_stem"
iron_bars = "minecraft:iron_bars"
chain = "minecraft:chain"
glass_pane = "minecraft:glass_pane"
melon = "minecraft:melon"
attached_pumpkin_stem = "minecraft:attached_pumpkin_stem"
attached_melon_stem = "minecraft:attached_melon_stem"
pumpkin_stem = "minecraft:pumpkin_stem"
melon_stem = "minecraft:melon_stem"
vine = "minecraft:vine"
oak_fence_gate = "minecraft:oak_fence_gate"
brick_stairs = "minecraft:brick_stairs"
stone_brick_stairs = "minecraft:stone_brick_stairs"
mycelium = "minecraft:mycelium"
lily_pad = "minecraft:lily_pad"
nether_bricks = "minecraft:nether_bricks"
nether_brick_fence = "minecraft:nether_brick_fence"
nether_brick_stairs = "minecraft:nether_brick_stairs"
nether_wart = "minecraft:nether_wart"
enchanting_table = "minecraft:enchanting_table"
brewing_stand = "minecraft:brewing_stand"
cauldron = "minecraft:cauldron"
end_portal = "minecraft:end_portal"
end_portal_frame = "minecraft:end_portal_frame"
end_stone = "minecraft:end_stone"
dragon_egg = "minecraft:dragon_egg"
redstone_lamp = "minecraft:redstone_lamp"
cocoa = "minecraft:cocoa"
sandstone_stairs = "minecraft:sandstone_stairs"
emerald_ore = "minecraft:emerald_ore"
ender_chest = "minecraft:ender_chest"
tripwire_hook = "minecraft:tripwire_hook"
tripwire = "minecraft:tripwire"
emerald_block = "minecraft:emerald_block"
spruce_stairs = "minecraft:spruce_stairs"
birch_stairs = "minecraft:birch_stairs"
jungle_stairs = "minecraft:jungle_stairs"
command_block = "minecraft:command_block"
beacon = |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.